Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (76 commits)
  [ARM] 4002/1: S3C24XX: leave parent IRQs unmasked
  [ARM] 4001/1: S3C24XX: shorten reboot time
  [ARM] 3983/2: remove unused argument to __bug()
  [ARM] 4000/1: Osiris: add third serial port in
  [ARM] 3999/1: RX3715: suspend to RAM support
  [ARM] 3998/1: VR1000: LED platform devices
  [ARM] 3995/1: iop13xx: add iop13xx support
  [ARM] 3968/1: iop13xx: add iop13xx_defconfig
  [ARM] Update mach-types
  [ARM] Allow gcc to optimise arm_add_memory a little more
  [ARM] 3991/1: i.MX/MX1 high resolution time source
  [ARM] 3990/1: i.MX/MX1 more precise PLL decode
  [ARM] 3986/1: H1940: suspend to RAM support
  [ARM] 3985/1: ixp4xx clocksource cleanup
  [ARM] 3984/1: ixp4xx/nslu2: Fix disk LED numbering (take 2)
  [ARM] 3994/1: ixp23xx: fix handling of pci master aborts
  [ARM] 3981/1: sched_clock for PXA2xx
  [ARM] 3980/1: extend the ARM Versatile sched_clock implementation from 32 to 63 bit
  [ARM] 3979/1: extend the SA11x0 sched_clock implementation from 32 to 63 bit period
  [ARM] 3978/1: macro to provide a 63-bit value from a 32-bit hardware counter
  ...
diff --git a/CREDITS b/CREDITS
index ccd4f9f..d088008 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2598,6 +2598,9 @@
 S: Prague 8
 S: 182 00 Czech Republic
 
+N: Rick Payne
+D: RFC2385 Support for TCP
+
 N: Barak A. Pearlmutter
 E: bap@cs.unm.edu
 W: http://www.cs.unm.edu/~bap/
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 02457ec..f08ca95 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -104,8 +104,6 @@
 	- request_firmware() hotplug interface info.
 floppy.txt
 	- notes and driver options for the floppy disk driver.
-ftape.txt
-	- notes about the floppy tape device driver.
 hayes-esp.txt
 	- info on using the Hayes ESP serial driver.
 highuid.txt
diff --git a/Documentation/Changes b/Documentation/Changes
index abee7f5..73a8617 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -201,7 +201,7 @@
 ----
 udev is a userspace application for populating /dev dynamically with
 only entries for devices actually present.  udev replaces the basic
-functionality of devfs, while allowing persistant device naming for
+functionality of devfs, while allowing persistent device naming for
 devices.
 
 FUSE
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 2ffb0d6..805db4b 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -77,7 +77,7 @@
 Many drivers need lots of small dma-coherent memory regions for DMA
 descriptors or I/O buffers.  Rather than allocating in units of a page
 or more using dma_alloc_coherent(), you can use DMA pools.  These work
-much like a kmem_cache_t, except that they use the dma-coherent allocator
+much like a struct kmem_cache, except that they use the dma-coherent allocator
 not __get_free_pages().  Also, they understand common hardware constraints
 for alignment, like queue heads needing to be aligned on N byte boundaries.
 
@@ -94,7 +94,7 @@
 for use with a given device.  It must be called in a context which
 can sleep.
 
-The "name" is for diagnostics (like a kmem_cache_t name); dev and size
+The "name" is for diagnostics (like a struct kmem_cache name); dev and size
 are like what you'd pass to dma_alloc_coherent().  The device's hardware
 alignment requirement for this type of data is "align" (which is expressed
 in bytes, and must be a power of two).  If your device has no boundary
@@ -431,10 +431,10 @@
 dma_alloc_noncoherent()).
 
 int
-dma_is_consistent(dma_addr_t dma_handle)
+dma_is_consistent(struct device *dev, dma_addr_t dma_handle)
 
-returns true if the memory pointed to by the dma_handle is actually
-consistent.
+returns true if the device dev is performing consistent DMA on the memory
+area pointed to by the dma_handle.
 
 int
 dma_get_cache_alignment(void)
@@ -459,7 +459,7 @@
 memory you intend to sync partially.
 
 void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 
 Do a partial sync of memory that was allocated by
@@ -489,7 +489,7 @@
 flags can be or'd together and are
 
 DMA_MEMORY_MAP - request that the memory returned from
-dma_alloc_coherent() be directly writeable.
+dma_alloc_coherent() be directly writable.
 
 DMA_MEMORY_IO - request that the memory returned from
 dma_alloc_coherent() be addressable using read/write/memcpy_toio etc.
diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt
index 705f6be..e767805 100644
--- a/Documentation/DMA-ISA-LPC.txt
+++ b/Documentation/DMA-ISA-LPC.txt
@@ -110,7 +110,7 @@
 
 Once the DMA transfer is finished (or timed out) you should disable
 the channel again. You should also check get_dma_residue() to make
-sure that all data has been transfered.
+sure that all data has been transferred.
 
 Example:
 
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index db9499a..36526a1 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -190,9 +190,13 @@
 ###
 # Help targets as used by the top-level makefile
 dochelp:
-	@echo  '  Linux kernel internal documentation in different formats:'
-	@echo  '  xmldocs (XML DocBook), psdocs (Postscript), pdfdocs (PDF)'
-	@echo  '  htmldocs (HTML), mandocs (man pages, use installmandocs to install)'
+	@echo  ' Linux kernel internal documentation in different formats:'
+	@echo  '  htmldocs        - HTML'
+	@echo  '  installmandocs  - install man pages generated by mandocs'
+	@echo  '  mandocs         - man pages'
+	@echo  '  pdfdocs         - PDF'
+	@echo  '  psdocs          - Postscript'
+	@echo  '  xmldocs         - XML DocBook'
 
 ###
 # Temporary files left by various tools
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a166675..ca09491 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -418,9 +418,35 @@
 !Idrivers/parport/daisy.c
   </chapter>
 
-  <chapter id="viddev">
-     <title>Video4Linux</title>
-!Edrivers/media/video/videodev.c
+  <chapter id="message_devices">
+	<title>Message-based devices</title>
+     <sect1><title>Fusion message devices</title>
+!Edrivers/message/fusion/mptbase.c
+!Idrivers/message/fusion/mptbase.c
+!Edrivers/message/fusion/mptscsih.c
+!Idrivers/message/fusion/mptscsih.c
+!Idrivers/message/fusion/mptctl.c
+!Idrivers/message/fusion/mptspi.c
+!Idrivers/message/fusion/mptfc.c
+!Idrivers/message/fusion/mptlan.c
+     </sect1>
+     <sect1><title>I2O message devices</title>
+!Iinclude/linux/i2o.h
+!Idrivers/message/i2o/core.h
+!Edrivers/message/i2o/iop.c
+!Idrivers/message/i2o/iop.c
+!Idrivers/message/i2o/config-osm.c
+!Edrivers/message/i2o/exec-osm.c
+!Idrivers/message/i2o/exec-osm.c
+!Idrivers/message/i2o/bus-osm.c
+!Edrivers/message/i2o/device.c
+!Idrivers/message/i2o/device.c
+!Idrivers/message/i2o/driver.c
+!Idrivers/message/i2o/pci.c
+!Idrivers/message/i2o/i2o_block.c
+!Idrivers/message/i2o/i2o_scsi.c
+!Idrivers/message/i2o/i2o_proc.c
+     </sect1>
   </chapter>
 
   <chapter id="snddev">
diff --git a/Documentation/DocBook/writing_usb_driver.tmpl b/Documentation/DocBook/writing_usb_driver.tmpl
index 07cd34c..d4188d4 100644
--- a/Documentation/DocBook/writing_usb_driver.tmpl
+++ b/Documentation/DocBook/writing_usb_driver.tmpl
@@ -345,8 +345,7 @@
         usb_buffer_free (dev->udev, dev->bulk_out_size,
             dev->bulk_out_buffer,
             dev->write_urb->transfer_dma);
-    if (dev->write_urb != NULL)
-        usb_free_urb (dev->write_urb);
+    usb_free_urb (dev->write_urb);
     kfree (dev);
 }
   </programlisting>
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 0e3924e..24dc3fc 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -365,6 +365,7 @@
        regshifts=<shift1>,<shift2>,...
        slave_addrs=<addr1>,<addr2>,...
        force_kipmid=<enable1>,<enable2>,...
+       unload_when_empty=[0|1]
 
 Each of these except si_trydefaults is a list, the first item for the
 first interface, second item for the second interface, etc.
@@ -416,6 +417,11 @@
 or users that don't want the daemon (don't need the performance, don't
 want the CPU hit) can disable it.
 
+If unload_when_empty is set to 1, the driver will be unloaded if it
+doesn't find any interfaces or all the interfaces fail to work.  The
+default is one.  Setting to 0 is useful with the hotmod, but is
+obviously only useful for modules.
+
 When compiled into the kernel, the parameters can be specified on the
 kernel command line as:
 
@@ -441,6 +447,25 @@
 interrupts enabled, the driver will run VERY slowly.  Don't blame me,
 these interfaces suck.
 
+The driver supports a hot add and remove of interfaces.  This way,
+interfaces can be added or removed after the kernel is up and running.
+This is done using /sys/modules/ipmi_si/hotmod, which is a write-only
+parameter.  You write a string to this interface.  The string has the
+format:
+   <op1>[:op2[:op3...]]
+The "op"s are:
+   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+You can specify more than one interface on the line.  The "opt"s are:
+   rsp=<regspacing>
+   rsi=<regsize>
+   rsh=<regshift>
+   irq=<irq>
+   ipmb=<ipmb slave addr>
+and these have the same meanings as discussed above.  Note that you
+can also use this on the kernel command line for a more compact format
+for specifying an interface.  Note that when removing an interface,
+only the first three parameters (si type, address type, and address)
+are used for the comparison.  Any options are ignored for removing.
 
 The SMBus Driver
 ----------------
@@ -502,7 +527,10 @@
 
   modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
       preaction=<preaction type> preop=<preop type> start_now=x
-      nowayout=x
+      nowayout=x ifnum_to_use=n
+
+ifnum_to_use specifies which interface the watchdog timer should use.
+The default is -1, which means to pick the first one registered.
 
 The timeout is the number of seconds to the action, and the pretimeout
 is the amount of seconds before the reset that the pre-timeout panic will
@@ -624,5 +652,9 @@
 in /proc/sys/dev/ipmi/poweroff_powercycle.  Note that if the system
 does not support power cycling, it will always do the power off.
 
+The "ifnum_to_use" parameter specifies which interface the poweroff
+code should use.  The default is -1, which means to pick the first one
+registered.
+
 Note that if you have ACPI enabled, the system will prefer using ACPI to
 power off.
diff --git a/Documentation/MSI-HOWTO.txt b/Documentation/MSI-HOWTO.txt
index 5c34910..d389388 100644
--- a/Documentation/MSI-HOWTO.txt
+++ b/Documentation/MSI-HOWTO.txt
@@ -219,7 +219,7 @@
 Note that the pre-assigned IOAPIC dev->irq is valid only if the device
 operates in PIN-IRQ assertion mode. In MSI-X mode, any attempt at
 using dev->irq by the device driver to request for interrupt service
-may result unpredictabe behavior.
+may result in unpredictable behavior.
 
 For each MSI-X vector granted, a device driver is responsible for calling
 other functions like request_irq(), enable_irq(), etc. to enable
diff --git a/Documentation/accounting/taskstats.txt b/Documentation/accounting/taskstats.txt
index 92ebf29..ff06b73 100644
--- a/Documentation/accounting/taskstats.txt
+++ b/Documentation/accounting/taskstats.txt
@@ -96,9 +96,9 @@
 a pid/tgid will be followed by some stats.
 
 b) TASKSTATS_TYPE_PID/TGID: attribute whose payload is the pid/tgid whose stats
-is being returned.
+are being returned.
 
-c) TASKSTATS_TYPE_STATS: attribute with a struct taskstsats as payload. The
+c) TASKSTATS_TYPE_STATS: attribute with a struct taskstats as payload. The
 same structure is used for both per-pid and per-tgid stats.
 
 3. New message sent by kernel whenever a task exits. The payload consists of a
@@ -122,12 +122,12 @@
 
 However, maintaining per-process, in addition to per-task stats, within the
 kernel has space and time overheads. To address this, the taskstats code
-accumalates each exiting task's statistics into a process-wide data structure.
-When the last task of a process exits, the process level data accumalated also
+accumulates each exiting task's statistics into a process-wide data structure.
+When the last task of a process exits, the process level data accumulated also
 gets sent to userspace (along with the per-task data).
 
 When a user queries to get per-tgid data, the sum of all other live threads in
-the group is added up and added to the accumalated total for previously exited
+the group is added up and added to the accumulated total for previously exited
 threads of the same thread group.
 
 Extending taskstats
diff --git a/Documentation/block/as-iosched.txt b/Documentation/block/as-iosched.txt
index e2a66f8..a598fe1 100644
--- a/Documentation/block/as-iosched.txt
+++ b/Documentation/block/as-iosched.txt
@@ -24,8 +24,10 @@
 Selecting IO schedulers
 -----------------------
 To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
-'noop' and 'as' (the default) are also available. IO schedulers are assigned
-globally at boot time only presently.
+'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
+assigned globally at boot time only presently. It's also possible to change
+the IO scheduler for a determined device on the fly, as described in
+Documentation/block/switching-sched.txt.
 
 
 Anticipatory IO scheduler Policies
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 34bf8f6..c6c9a9c 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -183,7 +183,7 @@
 modified to accomplish a direct page -> bus translation, without requiring
 a virtual address mapping (unlike the earlier scheme of virtual address
 -> bus translation). So this works uniformly for high-memory pages (which
-do not have a correponding kernel virtual address space mapping) and
+do not have a corresponding kernel virtual address space mapping) and
 low-memory pages.
 
 Note: Please refer to DMA-mapping.txt for a discussion on PCI high mem DMA
@@ -391,7 +391,7 @@
 on to the generic block layer, only to be merged by the i/o scheduler
 when the underlying device was capable of handling the i/o in one shot.
 Also, using the buffer head as an i/o structure for i/os that didn't originate
-from the buffer cache unecessarily added to the weight of the descriptors
+from the buffer cache unnecessarily added to the weight of the descriptors
 which were generated for each such chunk.
 
 The following were some of the goals and expectations considered in the
@@ -403,14 +403,14 @@
     for raw i/o.
 ii. Ability to represent high-memory buffers (which do not have a virtual
     address mapping in kernel address space).
-iii.Ability to represent large i/os w/o unecessarily breaking them up (i.e
+iii.Ability to represent large i/os w/o unnecessarily breaking them up (i.e
     greater than PAGE_SIZE chunks in one shot)
 iv. At the same time, ability to retain independent identity of i/os from
     different sources or i/o units requiring individual completion (e.g. for
     latency reasons)
 v.  Ability to represent an i/o involving multiple physical memory segments
     (including non-page aligned page fragments, as specified via readv/writev)
-    without unecessarily breaking it up, if the underlying device is capable of
+    without unnecessarily breaking it up, if the underlying device is capable of
     handling it.
 vi. Preferably should be based on a memory descriptor structure that can be
     passed around different types of subsystems or layers, maybe even
@@ -1013,7 +1013,7 @@
 i. Binary tree
 AS and deadline i/o schedulers use red black binary trees for disk position
 sorting and searching, and a fifo linked list for time-based searching. This
-gives good scalability and good availablility of information. Requests are
+gives good scalability and good availability of information. Requests are
 almost always dispatched in disk sort order, so a cache is kept of the next
 request in sort order to prevent binary tree lookups.
 
diff --git a/Documentation/cpu-freq/cpufreq-nforce2.txt b/Documentation/cpu-freq/cpufreq-nforce2.txt
index 9188337..babce13 100644
--- a/Documentation/cpu-freq/cpufreq-nforce2.txt
+++ b/Documentation/cpu-freq/cpufreq-nforce2.txt
@@ -1,7 +1,7 @@
 
-The cpufreq-nforce2 driver changes the FSB on nVidia nForce2 plattforms.
+The cpufreq-nforce2 driver changes the FSB on nVidia nForce2 platforms.
 
-This works better than on other plattforms, because the FSB of the CPU
+This works better than on other platforms, because the FSB of the CPU
 can be controlled independently from the PCI/AGP clock.
 
 The module has two options:
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 4868c34..cc60d29 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -54,8 +54,8 @@
 
 ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
 to determine the number of potentially hot-pluggable cpus. The implementation
-should only rely on this to count the #of cpus, but *MUST* not rely on the
-apicid values in those tables for disabled apics. In the event BIOS doesnt
+should only rely on this to count the # of cpus, but *MUST* not rely on the
+apicid values in those tables for disabled apics. In the event BIOS doesn't
 mark such hot-pluggable cpus as disabled entries, one could use this
 parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
 
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 28c4f79..8de132a 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -3,7 +3,7 @@
 
 	     Maintained by Torben Mathiasen <device@lanana.org>
 
-		      Last revised: 15 May 2006
+		      Last revised: 29 November 2006
 
 This list is the Linux Device List, the official registry of allocated
 device numbers and /dev directory nodes for the Linux operating
@@ -92,8 +92,9 @@
 		  7 = /dev/full		Returns ENOSPC on write
 		  8 = /dev/random	Nondeterministic random number gen.
 		  9 = /dev/urandom	Faster, less secure random number gen.
-		 10 = /dev/aio		Asyncronous I/O notification interface
+		 10 = /dev/aio		Asynchronous I/O notification interface
 		 11 = /dev/kmsg		Writes to this come out as printk's
+
   1 block	RAM disk
 		  0 = /dev/ram0		First RAM disk
 		  1 = /dev/ram1		Second RAM disk
@@ -122,7 +123,7 @@
 		devices are on major 128 and above and use the PTY
 		master multiplex (/dev/ptmx) to acquire a PTY on
 		demand.
-  
+
   2 block	Floppy disks
 		  0 = /dev/fd0		Controller 0, drive 0, autodetect
 		  1 = /dev/fd1		Controller 0, drive 1, autodetect
@@ -257,7 +258,7 @@
 		129 = /dev/vcsa1	tty1 text/attribute contents
 		    ...
 		191 = /dev/vcsa63	tty63 text/attribute contents
-	
+
 		NOTE: These devices permit both read and write access.
 
   7 block	Loopback devices
@@ -411,7 +412,7 @@
 		207 = /dev/video/em8300_sp	EM8300 DVD decoder subpicture
 		208 = /dev/compaq/cpqphpc	Compaq PCI Hot Plug Controller
 		209 = /dev/compaq/cpqrid	Compaq Remote Insight Driver
-		210 = /dev/impi/bt	IMPI coprocessor block transfer	
+		210 = /dev/impi/bt	IMPI coprocessor block transfer
 		211 = /dev/impi/smic	IMPI coprocessor stream interface
 		212 = /dev/watchdogs/0	First watchdog device
 		213 = /dev/watchdogs/1	Second watchdog device
@@ -506,6 +507,7 @@
 		 33 = /dev/patmgr1	Sequencer patch manager
 		 34 = /dev/midi02	Third MIDI port
 		 50 = /dev/midi03	Fourth MIDI port
+
  14 block	BIOS harddrive callback support {2.6}
 		  0 = /dev/dos_hda	First BIOS harddrive whole disk
 		 64 = /dev/dos_hdb	Second BIOS harddrive whole disk
@@ -527,6 +529,7 @@
 
  16 char	Non-SCSI scanners
 		  0 = /dev/gs4500	Genius 4500 handheld scanner
+
  16 block	GoldStar CD-ROM
 		  0 = /dev/gscd		GoldStar CD-ROM
 
@@ -548,6 +551,7 @@
 		  0 = /dev/ttyC0	First Cyclades port
 		    ...
 		 31 = /dev/ttyC31	32nd Cyclades port
+
  19 block	"Double" compressed disk
 		  0 = /dev/double0	First compressed disk
 		    ...
@@ -563,6 +567,7 @@
 		  0 = /dev/cub0		Callout device for ttyC0
 		    ...
 		 31 = /dev/cub31	Callout device for ttyC31
+
  20 block	Hitachi CD-ROM (under development)
 		  0 = /dev/hitcd	Hitachi CD-ROM
 
@@ -582,7 +587,7 @@
 
 		This device is used on the ARM-based Acorn RiscPC.
 		Partitions are handled the same way as for IDE disks
-		(see major number 3). 
+		(see major number 3).
 
  22 char	Digiboard serial card
 		  0 = /dev/ttyD0	First Digiboard port
@@ -591,7 +596,7 @@
  22 block	Second IDE hard disk/CD-ROM interface
 		  0 = /dev/hdc		Master: whole disk (or CD-ROM)
 		 64 = /dev/hdd		Slave: whole disk (or CD-ROM)
-		
+
 		Partitions are handled the same way as for the first
 		interface (see major number 3).
 
@@ -639,6 +644,7 @@
 
  26 char	Quanta WinVision frame grabber {2.6}
 		  0 = /dev/wvisfgrab	Quanta WinVision frame grabber
+
  26 block	Second Matsushita (Panasonic/SoundBlaster) CD-ROM
 		  0 = /dev/sbpcd4	Panasonic CD-ROM controller 1 unit 0
 		  1 = /dev/sbpcd5	Panasonic CD-ROM controller 1 unit 1
@@ -670,6 +676,7 @@
 		 37 = /dev/nrawqft1	Unit 1, no rewind-on-close, no file marks
 		 38 = /dev/nrawqft2	Unit 2, no rewind-on-close, no file marks
 		 39 = /dev/nrawqft3	Unit 3, no rewind-on-close, no file marks
+
  27 block	Third Matsushita (Panasonic/SoundBlaster) CD-ROM
 		  0 = /dev/sbpcd8	Panasonic CD-ROM controller 2 unit 0
 		  1 = /dev/sbpcd9	Panasonic CD-ROM controller 2 unit 1
@@ -681,6 +688,7 @@
 		  1 = /dev/staliomem1	Second Stallion card I/O memory
 		  2 = /dev/staliomem2	Third Stallion card I/O memory
 		  3 = /dev/staliomem3	Fourth Stallion card I/O memory
+
  28 char	Atari SLM ACSI laser printer (68k/Atari)
 		  0 = /dev/slm0		First SLM laser printer
 		  1 = /dev/slm1		Second SLM laser printer
@@ -690,6 +698,7 @@
 		  1 = /dev/sbpcd13	Panasonic CD-ROM controller 3 unit 1
 		  2 = /dev/sbpcd14	Panasonic CD-ROM controller 3 unit 2
 		  3 = /dev/sbpcd15	Panasonic CD-ROM controller 3 unit 3
+
  28 block	ACSI disk (68k/Atari)
 		  0 = /dev/ada		First ACSI disk whole disk
 		 16 = /dev/adb		Second ACSI disk whole disk
@@ -750,6 +759,7 @@
  31 char	MPU-401 MIDI
 		  0 = /dev/mpu401data	MPU-401 data port
 		  1 = /dev/mpu401stat	MPU-401 status port
+
  31 block	ROM/flash memory card
 		  0 = /dev/rom0		First ROM card (rw)
 		      ...
@@ -801,7 +811,7 @@
  34 block	Fourth IDE hard disk/CD-ROM interface
 		  0 = /dev/hdg		Master: whole disk (or CD-ROM)
 		 64 = /dev/hdh		Slave: whole disk (or CD-ROM)
-		
+
 		Partitions are handled the same way as for the first
 		interface (see major number 3).
 
@@ -818,6 +828,7 @@
 		129 = /dev/smpte1	Second MIDI port, SMPTE timed
 		130 = /dev/smpte2	Third MIDI port, SMPTE timed
 		131 = /dev/smpte3	Fourth MIDI port, SMPTE timed
+
  35 block	Slow memory ramdisk
 		  0 = /dev/slram	Slow memory ramdisk
 
@@ -828,6 +839,7 @@
 		 16 = /dev/tap0		First Ethertap device
 		    ...
 		 31 = /dev/tap15	16th Ethertap device
+
  36 block	MCA ESDI hard disk
 		  0 = /dev/eda		First ESDI disk whole disk
 		 64 = /dev/edb		Second ESDI disk whole disk
@@ -882,6 +894,7 @@
 
  40 char	Matrox Meteor frame grabber {2.6}
 		  0 = /dev/mmetfgrab	Matrox Meteor frame grabber
+
  40 block	Syquest EZ135 parallel port removable drive
 		  0 = /dev/eza		Parallel EZ135 drive, whole disk
 
@@ -893,6 +906,7 @@
 
  41 char	Yet Another Micro Monitor
 		  0 = /dev/yamm		Yet Another Micro Monitor
+
  41 block	MicroSolutions BackPack parallel port CD-ROM
 		  0 = /dev/bpcd		BackPack CD-ROM
 
@@ -901,6 +915,7 @@
 		the parallel port ATAPI CD-ROM driver at major number 46.
 
  42 char	Demo/sample use
+
  42 block	Demo/sample use
 
 		This number is intended for use in sample code, as
@@ -918,6 +933,7 @@
 		  0 = /dev/ttyI0	First virtual modem
 		    ...
 		 63 = /dev/ttyI63	64th virtual modem
+
  43 block	Network block devices
 		  0 = /dev/nb0		First network block device
 		  1 = /dev/nb1		Second network block device
@@ -934,12 +950,13 @@
 		  0 = /dev/cui0		Callout device for ttyI0
 		    ...
 		 63 = /dev/cui63	Callout device for ttyI63
+
  44 block	Flash Translation Layer (FTL) filesystems
 		  0 = /dev/ftla		FTL on first Memory Technology Device
 		 16 = /dev/ftlb		FTL on second Memory Technology Device
 		 32 = /dev/ftlc		FTL on third Memory Technology Device
 		    ...
-		240 = /dev/ftlp		FTL on 16th Memory Technology Device 
+		240 = /dev/ftlp		FTL on 16th Memory Technology Device
 
 		Partitions are handled in the same way as for IDE
 		disks (see major number 3) except that the partition
@@ -958,6 +975,7 @@
 		191 = /dev/ippp63	64th SyncPPP device
 
 		255 = /dev/isdninfo	ISDN monitor interface
+
  45 block	Parallel port IDE disk devices
 		  0 = /dev/pda		First parallel port IDE disk
 		 16 = /dev/pdb		Second parallel port IDE disk
@@ -1044,6 +1062,7 @@
 		  1 = /dev/dcbri1	Second DataComm card
 		  2 = /dev/dcbri2	Third DataComm card
 		  3 = /dev/dcbri3	Fourth DataComm card
+
  52 block	Mylex DAC960 PCI RAID controller; fifth controller
 		  0 = /dev/rd/c4d0	First disk, whole disk
 		  8 = /dev/rd/c4d1	Second disk, whole disk
@@ -1093,7 +1112,8 @@
 
  55 char	DSP56001 digital signal processor
 		  0 = /dev/dsp56k	First DSP56001
- 55 block	Mylex DAC960 PCI RAID controller; eigth controller
+
+ 55 block	Mylex DAC960 PCI RAID controller; eighth controller
 		  0 = /dev/rd/c7d0	First disk, whole disk
 		  8 = /dev/rd/c7d1	Second disk, whole disk
 		    ...
@@ -1130,6 +1150,7 @@
 		  0 = /dev/cup0		Callout device for ttyP0
 		  1 = /dev/cup1		Callout device for ttyP1
 		    ...
+
  58 block	Reserved for logical volume manager
 
  59 char	sf firewall package
@@ -1149,6 +1170,7 @@
 		NAMING CONFLICT -- PROPOSED REVISED NAME /dev/rpda0 etc
 
  60-63 char	LOCAL/EXPERIMENTAL USE
+
  60-63 block	LOCAL/EXPERIMENTAL USE
 		Allocated for local/experimental use.  For devices not
 		assigned official numbers, these ranges should be
@@ -1434,7 +1456,6 @@
 		DAC960 (see major number 48) except that the limit on
 		partitions is 15.
 
-
  78 char	PAM Software's multimodem boards
 		  0 = /dev/ttyM0	First PAM modem
 		  1 = /dev/ttyM1	Second PAM modem
@@ -1450,13 +1471,12 @@
 		DAC960 (see major number 48) except that the limit on
 		partitions is 15.
 
-
  79 char	PAM Software's multimodem boards - alternate devices
 		  0 = /dev/cum0		Callout device for ttyM0
 		  1 = /dev/cum1		Callout device for ttyM1
 		    ...
 
- 79 block	Compaq Intelligent Drive Array, eigth controller
+ 79 block	Compaq Intelligent Drive Array, eighth controller
 		  0 = /dev/ida/c7d0	First logical drive whole disk
 		 16 = /dev/ida/c7d1	Second logical drive whole disk
 		    ...
@@ -1466,7 +1486,6 @@
 		DAC960 (see major number 48) except that the limit on
 		partitions is 15.
 
-
  80 char	Photometrics AT200 CCD camera
 		  0 = /dev/at200	Photometrics AT200 CCD camera
 
@@ -1679,7 +1698,7 @@
 		  1 = /dev/dcxx1	Second capture card
 		    ...
 
- 94 block IBM S/390 DASD block storage
+ 94 block	IBM S/390 DASD block storage
     		  0 = /dev/dasda First DASD device, major
     		  1 = /dev/dasda1 First DASD device, block 1
 	    	  2 = /dev/dasda2 First DASD device, block 2
@@ -1695,7 +1714,7 @@
 		  1 = /dev/ipnat	NAT control device/log file
 		  2 = /dev/ipstate	State information log file
 		  3 = /dev/ipauth	Authentication control device/log file
-		    ...		
+		    ...
 
  96 char	Parallel port ATAPI tape devices
 		  0 = /dev/pt0		First parallel port ATAPI tape
@@ -1705,7 +1724,7 @@
 		129 = /dev/npt1		Second p.p. ATAPI tape, no rewind
 		    ...
 
- 96 block Inverse NAND Flash Translation Layer
+ 96 block	Inverse NAND Flash Translation Layer
 		  0 = /dev/inftla First INFTL layer
 		 16 = /dev/inftlb Second INFTL layer
 		    ...
@@ -1900,7 +1919,7 @@
 		  1 = /dev/av1		Second A/V card
 		    ...
 
-111 block	Compaq Next Generation Drive Array, eigth controller
+111 block	Compaq Next Generation Drive Array, eighth controller
 		  0 = /dev/cciss/c7d0	First logical drive, whole disk
 		 16 = /dev/cciss/c7d1	Second logical drive, whole disk
 		    ...
@@ -1937,7 +1956,6 @@
 		    ...
 
 113 block	IBM iSeries virtual CD-ROM
-
 		  0 = /dev/iseries/vcda	First virtual CD-ROM
 		  1 = /dev/iseries/vcdb	Second virtual CD-ROM
 		    ...
@@ -2059,11 +2077,12 @@
 		    ...
 
 119 char	VMware virtual network control
-		  0 = /dev/vnet0	1st virtual network
-		  1 = /dev/vnet1	2nd virtual network
+		  0 = /dev/vmnet0	1st virtual network
+		  1 = /dev/vmnet1	2nd virtual network
 		    ...
 
 120-127 char	LOCAL/EXPERIMENTAL USE
+
 120-127 block	LOCAL/EXPERIMENTAL USE
 		Allocated for local/experimental use.  For devices not
 		assigned official numbers, these ranges should be
@@ -2075,7 +2094,6 @@
 		nodes; instead they should be accessed through the
 		/dev/ptmx cloning interface.
 
-
 128 block       SCSI disk devices (128-143)
                   0 = /dev/sddy         129th SCSI disk whole disk
                  16 = /dev/sddz         130th SCSI disk whole disk
@@ -2087,7 +2105,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 129 block       SCSI disk devices (144-159)
                   0 = /dev/sdeo         145th SCSI disk whole disk
                  16 = /dev/sdep         146th SCSI disk whole disk
@@ -2123,7 +2140,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 132 block       SCSI disk devices (192-207)
                   0 = /dev/sdgk         193rd SCSI disk whole disk
                  16 = /dev/sdgl         194th SCSI disk whole disk
@@ -2135,7 +2151,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 133 block       SCSI disk devices (208-223)
                   0 = /dev/sdha         209th SCSI disk whole disk
                  16 = /dev/sdhb         210th SCSI disk whole disk
@@ -2147,7 +2162,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 134 block       SCSI disk devices (224-239)
                   0 = /dev/sdhq         225th SCSI disk whole disk
                  16 = /dev/sdhr         226th SCSI disk whole disk
@@ -2159,7 +2173,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 135 block       SCSI disk devices (240-255)
                   0 = /dev/sdig         241st SCSI disk whole disk
                  16 = /dev/sdih         242nd SCSI disk whole disk
@@ -2171,7 +2184,6 @@
 		disks (see major number 3) except that the limit on
 		partitions is 15.
 
-
 136-143 char	Unix98 PTY slaves
 		  0 = /dev/pts/0	First Unix98 pseudo-TTY
 		  1 = /dev/pts/1	Second Unix98 pesudo-TTY
@@ -2384,6 +2396,7 @@
 		    ...
 
 159 char	RESERVED
+
 159 block	RESERVED
 
 160 char	General Purpose Instrument Bus (GPIB)
@@ -2427,7 +2440,7 @@
 
 		Partitions are handled in the same way as for IDE
 		disks (see major number 3) except that the limit on
-		partitions is 31. 
+		partitions is 31.
 
 162 char	Raw block device interface
 		  0 = /dev/rawctl	Raw I/O control device
@@ -2483,7 +2496,6 @@
 
 171 char	Reserved for IEEE 1394 (Firewire)
 
-
 172 char	Moxa Intellio serial card
 		  0 = /dev/ttyMX0	First Moxa port
 		  1 = /dev/ttyMX1	Second Moxa port
@@ -2543,9 +2555,6 @@
 		 64 = /dev/usb/rio500	Diamond Rio 500
 		 65 = /dev/usb/usblcd	USBLCD Interface (info@usblcd.de)
 		 66 = /dev/usb/cpad0	Synaptics cPad (mouse/LCD)
-		 67 = /dev/usb/adutux0	1st Ontrak ADU device
-		    ...
-		 76 = /dev/usb/adutux10	10th Ontrak ADU device
 		 96 = /dev/usb/hiddev0	1st USB HID device
 		    ...
 		111 = /dev/usb/hiddev15	16th USB HID device
@@ -2558,7 +2567,7 @@
 		132 = /dev/usb/idmouse	ID Mouse (fingerprint scanner) device
 		133 = /dev/usb/sisusbvga1	First SiSUSB VGA device
 		    ...
-		140 = /dev/usb/sisusbvga8	Eigth SISUSB VGA device
+		140 = /dev/usb/sisusbvga8	Eighth SISUSB VGA device
 		144 = /dev/usb/lcd	USB LCD device
 		160 = /dev/usb/legousbtower0	1st USB Legotower device
 		    ...
@@ -2571,7 +2580,7 @@
 		  0 = /dev/uba		First USB block device
 		  8 = /dev/ubb		Second USB block device
 		 16 = /dev/ubc		Third USB block device
-		    ...
+ 		    ...
 
 181 char	Conrad Electronic parallel port radio clocks
 		  0 = /dev/pcfclock0	First Conrad radio clock
@@ -2657,7 +2666,7 @@
 		 32 = /dev/mvideo/status2	Third device
 		    ...
 		    ...
-		240 = /dev/mvideo/status15	16th device 
+		240 = /dev/mvideo/status15	16th device
 		    ...
 
 195 char	Nvidia graphics devices
@@ -2795,6 +2804,10 @@
 		    ...
 		 185 = /dev/ttyNX15		Hilscher netX serial port 15
 		 186 = /dev/ttyJ0		JTAG1 DCC protocol based serial port emulation
+		 187 = /dev/ttyUL0		Xilinx uartlite - port 0
+		    ...
+		 190 = /dev/ttyUL3		Xilinx uartlite - port 3
+		 191 = /dev/xvc0		Xen virtual console - port 0
 
 205 char	Low-density serial ports (alternate device)
 		  0 = /dev/culu0		Callout device for ttyLU0
@@ -2832,7 +2845,6 @@
 		 82 = /dev/cuvr0		Callout device for ttyVR0
 		 83 = /dev/cuvr1		Callout device for ttyVR1
 
-
 206 char	OnStream SC-x0 tape devices
 		  0 = /dev/osst0		First OnStream SCSI tape, mode 0
 		  1 = /dev/osst1		Second OnStream SCSI tape, mode 0
@@ -2922,7 +2934,6 @@
 		    ...
 
 212 char	LinuxTV.org DVB driver subsystem
-
 		  0 = /dev/dvb/adapter0/video0    first video decoder of first card
 		  1 = /dev/dvb/adapter0/audio0    first audio decoder of first card
 		  2 = /dev/dvb/adapter0/sec0      (obsolete/unused)
@@ -3008,9 +3019,9 @@
 		  2 = /dev/3270/tub2		Second 3270 terminal
 		    ...
 
-229 char	IBM iSeries virtual console
-		  0 = /dev/iseries/vtty0	First console port
-		  1 = /dev/iseries/vtty1	Second console port
+229 char	IBM iSeries/pSeries virtual console
+		  0 = /dev/hvc0			First console port
+		  1 = /dev/hvc1			Second console port
 		    ...
 
 230 char	IBM iSeries virtual tape
@@ -3083,12 +3094,14 @@
 234-239		UNASSIGNED
 
 240-254 char	LOCAL/EXPERIMENTAL USE
+
 240-254 block	LOCAL/EXPERIMENTAL USE
 		Allocated for local/experimental use.  For devices not
 		assigned official numbers, these ranges should be
 		used in order to avoid conflicting with future assignments.
 
 255 char	RESERVED
+
 255 block	RESERVED
 
 		This major is reserved to assist the expansion to a
@@ -3115,7 +3128,20 @@
 257 char	Phoenix Technologies Cryptographic Services Driver
 		  0 = /dev/ptlsec	Crypto Services Driver
 
+257 block	SSFDC Flash Translation Layer filesystem
+		  0 = /dev/ssfdca	First SSFDC layer
+		  8 = /dev/ssfdcb	Second SSFDC layer
+		 16 = /dev/ssfdcc	Third SSFDC layer
+		 24 = /dev/ssfdcd	4th SSFDC layer
+		 32 = /dev/ssfdce	5th SSFDC layer
+		 40 = /dev/ssfdcf	6th SSFDC layer
+		 48 = /dev/ssfdcg	7th SSFDC layer
+		 56 = /dev/ssfdch	8th SSFDC layer
 
+258 block	ROM/Flash read-only translation layer
+		  0 = /dev/blockrom0	First ROM card's translation layer interface
+		  1 = /dev/blockrom1	Second ROM card's translation layer interface
+		  ...
 
  ****	ADDITIONAL /dev DIRECTORY ENTRIES
 
diff --git a/Documentation/driver-model/platform.txt b/Documentation/driver-model/platform.txt
index 5eee3e0..9f0bc3b 100644
--- a/Documentation/driver-model/platform.txt
+++ b/Documentation/driver-model/platform.txt
@@ -1,99 +1,131 @@
 Platform Devices and Drivers
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+See <linux/platform_device.h> for the driver model interface to the
+platform bus:  platform_device, and platform_driver.  This pseudo-bus
+is used to connect devices on busses with minimal infrastructure,
+like those used to integrate peripherals on many system-on-chip
+processors, or some "legacy" PC interconnects; as opposed to large
+formally specified ones like PCI or USB.
+
 
 Platform devices
 ~~~~~~~~~~~~~~~~
 Platform devices are devices that typically appear as autonomous
 entities in the system. This includes legacy port-based devices and
-host bridges to peripheral buses. 
+host bridges to peripheral buses, and most controllers integrated
+into system-on-chip platforms.  What they usually have in common
+is direct addressing from a CPU bus.  Rarely, a platform_device will
+be connected through a segment of some other kind of bus; but its
+registers will still be directly addressible.
+
+Platform devices are given a name, used in driver binding, and a
+list of resources such as addresses and IRQs.
+
+struct platform_device {
+	const char	*name;
+	u32		id;
+	struct device	dev;
+	u32		num_resources;
+	struct resource	*resource;
+};
 
 
 Platform drivers
 ~~~~~~~~~~~~~~~~
-Drivers for platform devices are typically very simple and
-unstructured. Either the device was present at a particular I/O port
-and the driver was loaded, or it was not. There was no possibility
-of hotplugging or alternative discovery besides probing at a specific
-I/O address and expecting a specific response.
+Platform drivers follow the standard driver model convention, where
+discovery/enumeration is handled outside the drivers, and drivers
+provide probe() and remove() methods.  They support power management
+and shutdown notifications using the standard conventions.
+
+struct platform_driver {
+	int (*probe)(struct platform_device *);
+	int (*remove)(struct platform_device *);
+	void (*shutdown)(struct platform_device *);
+	int (*suspend)(struct platform_device *, pm_message_t state);
+	int (*suspend_late)(struct platform_device *, pm_message_t state);
+	int (*resume_early)(struct platform_device *);
+	int (*resume)(struct platform_device *);
+	struct device_driver driver;
+};
+
+Note that probe() should general verify that the specified device hardware
+actually exists; sometimes platform setup code can't be sure.  The probing
+can use device resources, including clocks, and device platform_data.
+
+Platform drivers register themselves the normal way:
+
+	int platform_driver_register(struct platform_driver *drv);
+
+Or, in common situations where the device is known not to be hot-pluggable,
+the probe() routine can live in an init section to reduce the driver's
+runtime memory footprint:
+
+	int platform_driver_probe(struct platform_driver *drv,
+			  int (*probe)(struct platform_device *))
 
 
-Other Architectures, Modern Firmware, and new Platforms
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-These devices are not always at the legacy I/O ports. This is true on
-other architectures and on some modern architectures. In most cases,
-the drivers are modified to discover the devices at other well-known
-ports for the given platform. However, the firmware in these systems
-does usually know where exactly these devices reside, and in some
-cases, it's the only way of discovering them. 
+Device Enumeration
+~~~~~~~~~~~~~~~~~~
+As a rule, platform specific (and often board-specific) setup code wil
+register platform devices:
+
+	int platform_device_register(struct platform_device *pdev);
+
+	int platform_add_devices(struct platform_device **pdevs, int ndev);
+
+The general rule is to register only those devices that actually exist,
+but in some cases extra devices might be registered.  For example, a kernel
+might be configured to work with an external network adapter that might not
+be populated on all boards, or likewise to work with an integrated controller
+that some boards might not hook up to any peripherals.
+
+In some cases, boot firmware will export tables describing the devices
+that are populated on a given board.   Without such tables, often the
+only way for system setup code to set up the correct devices is to build
+a kernel for a specific target board.  Such board-specific kernels are
+common with embedded and custom systems development.
+
+In many cases, the memory and IRQ resources associated with the platform
+device are not enough to let the device's driver work.  Board setup code
+will often provide additional information using the device's platform_data
+field to hold additional information.
+
+Embedded systems frequently need one or more clocks for platform devices,
+which are normally kept off until they're actively needed (to save power).
+System setup also associates those clocks with the device, so that that
+calls to clk_get(&pdev->dev, clock_name) return them as needed.
 
 
-The Platform Bus
-~~~~~~~~~~~~~~~~
-A platform bus has been created to deal with these issues. First and
-foremost, it groups all the legacy devices under a common bus, and
-gives them a common parent if they don't already have one. 
+Device Naming and Driver Binding
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The platform_device.dev.bus_id is the canonical name for the devices.
+It's built from two components:
 
-But, besides the organizational benefits, the platform bus can also
-accommodate firmware-based enumeration. 
+    * platform_device.name ... which is also used to for driver matching.
 
+    * platform_device.id ... the device instance number, or else "-1"
+      to indicate there's only one.
 
-Device Discovery
-~~~~~~~~~~~~~~~~
-The platform bus has no concept of probing for devices. Devices
-discovery is left up to either the legacy drivers or the
-firmware. These entities are expected to notify the platform of
-devices that it discovers via the bus's add() callback:
+These are catenated, so name/id "serial"/0 indicates bus_id "serial.0", and
+"serial/3" indicates bus_id "serial.3"; both would use the platform_driver
+named "serial".  While "my_rtc"/-1 would be bus_id "my_rtc" (no instance id)
+and use the platform_driver called "my_rtc".
 
-	platform_bus.add(parent,bus_id).
+Driver binding is performed automatically by the driver core, invoking
+driver probe() after finding a match between device and driver.  If the
+probe() succeeds, the driver and device are bound as usual.  There are
+three different ways to find such a match:
 
+    - Whenever a device is registered, the drivers for that bus are
+      checked for matches.  Platform devices should be registered very
+      early during system boot.
 
-Bus IDs
-~~~~~~~
-Bus IDs are the canonical names for the devices. There is no globally
-standard addressing mechanism for legacy devices. In the IA-32 world,
-we have Pnp IDs to use, as well as the legacy I/O ports. However,
-neither tell what the device really is or have any meaning on other
-platforms. 
+    - When a driver is registered using platform_driver_register(), all
+      unbound devices on that bus are checked for matches.  Drivers
+      usually register later during booting, or by module loading.
 
-Since both PnP IDs and the legacy I/O ports (and other standard I/O
-ports for specific devices) have a 1:1 mapping, we map the
-platform-specific name or identifier to a generic name (at least
-within the scope of the kernel).
-
-For example, a serial driver might find a device at I/O 0x3f8. The
-ACPI firmware might also discover a device with PnP ID (_HID)
-PNP0501. Both correspond to the same device and should be mapped to the
-canonical name 'serial'. 
-
-The bus_id field should be a concatenation of the canonical name and
-the instance of that type of device. For example, the device at I/O
-port 0x3f8 should have a bus_id of "serial0". This places the
-responsibility of enumerating devices of a particular type up to the
-discovery mechanism. But, they are the entity that should know best
-(as opposed to the platform bus driver).
-
-
-Drivers 
-~~~~~~~
-Drivers for platform devices should have a name that is the same as
-the canonical name of the devices they support. This allows the
-platform bus driver to do simple matching with the basic data
-structures to determine if a driver supports a certain device. 
-
-For example, a legacy serial driver should have a name of 'serial' and
-register itself with the platform bus. 
-
-
-Driver Binding
-~~~~~~~~~~~~~~
-Legacy drivers assume they are bound to the device once they start up
-and probe an I/O port. Divorcing them from this will be a difficult
-process. However, that shouldn't prevent us from implementing
-firmware-based enumeration. 
-
-The firmware should notify the platform bus about devices before the
-legacy drivers have had a chance to load. Once the drivers are loaded,
-they driver model core will attempt to bind the driver to any
-previously-discovered devices. Once that has happened, it will be free
-to discover any other devices it pleases.
+    - Registering a driver using platform_driver_probe() works just like
+      using platform_driver_register(), except that the the driver won't
+      be probed later if another device registers.  (Which is OK, since
+      this interface is only for use with non-hotpluggable devices.)
 
diff --git a/Documentation/driver-model/porting.txt b/Documentation/driver-model/porting.txt
index 98b233c..92d86f7 100644
--- a/Documentation/driver-model/porting.txt
+++ b/Documentation/driver-model/porting.txt
@@ -92,7 +92,7 @@
 describing the relationship the device has to other entities. 
 
 
-- Embedd a struct device in the bus-specific device type. 
+- Embed a struct device in the bus-specific device type. 
 
 
 struct pci_dev {
diff --git a/Documentation/dvb/ci.txt b/Documentation/dvb/ci.txt
index 531239b..2ecd834 100644
--- a/Documentation/dvb/ci.txt
+++ b/Documentation/dvb/ci.txt
@@ -71,7 +71,7 @@
 The disadvantage is that the driver/hardware has to manage the rest. For
 the application programmer it would be as simple as sending/receiving an
 array to/from the CI ioctls as defined in the Linux DVB API. No changes
-have been made in the API to accomodate this feature.
+have been made in the API to accommodate this feature.
 
 
 * Why the need for another CI interface ?
@@ -102,7 +102,7 @@
 implemented by most applications. Hence this area is revisited.
 
 This CI interface is quite different in the case that it tries to
-accomodate all other CI based devices, that fall into the other categories
+accommodate all other CI based devices, that fall into the other categories.
 
 This means that this CI interface handles the EN50221 style tags in the
 Application layer only and no session management is taken care of by the
diff --git a/Documentation/eisa.txt b/Documentation/eisa.txt
index 6a099ed..60e361b 100644
--- a/Documentation/eisa.txt
+++ b/Documentation/eisa.txt
@@ -62,7 +62,7 @@
 bus_base_addr : slot 0 address on this bus
 slots	      : max slot number to probe
 force_probe   : Probe even when slot 0 is empty (no EISA mainboard)
-dma_mask      : Default DMA mask. Usualy the bridge device dma_mask.
+dma_mask      : Default DMA mask. Usually the bridge device dma_mask.
 bus_nr	      : unique bus id, set by eisa_root_register
 
 ** Driver :
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index d52c4aa..226ecf2 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -70,18 +70,6 @@
 
 ---------------------------
 
-What:	ip_queue and ip6_queue (old ipv4-only and ipv6-only netfilter queue)
-When:	December 2005
-Why:	This interface has been obsoleted by the new layer3-independent
-	"nfnetlink_queue".  The Kernel interface is compatible, so the old
-	ip[6]tables "QUEUE" targets still work and will transparently handle
-	all packets into nfnetlink queue number 0.  Userspace users will have
-	to link against API-compatible library on top of libnfnetlink_queue 
-	instead of the current 'libipq'.
-Who:	Harald Welte <laforge@netfilter.org>
-
----------------------------
-
 What:	remove EXPORT_SYMBOL(kernel_thread)
 When:	August 2006
 Files:	arch/*/kernel/*_ksyms.c
@@ -227,21 +215,6 @@
 
 ---------------------------
 
-What:	frame diverter
-When:	November 2006
-Why:	The frame diverter is included in most distribution kernels, but is
-	broken. It does not correctly handle many things:
-	- IPV6
-	- non-linear skb's
-	- network device RCU on removal
-	- input frames not correctly checked for protocol errors
-	It also adds allocation overhead even if not enabled.
-	It is not clear if anyone is still using it.
-Who:	Stephen Hemminger <shemminger@osdl.org>
-
----------------------------
-
-
 What:	PHYSDEVPATH, PHYSDEVBUS, PHYSDEVDRIVER in the uevent environment
 When:	October 2008
 Why:	The stacking of class devices makes these values misleading and
@@ -261,10 +234,11 @@
 
 ---------------------------
 
-What:	ftape
-When:	2.6.20
-Why:	Orphaned for ages.  SMP bugs long unfixed.  Few users left
-	in the world.
-Who:	Jeff Garzik <jeff@garzik.org>
+What:	IPv4 only connection tracking/NAT/helpers
+When:	2.6.22
+Why:	The new layer 3 independant connection tracking replaces the old
+	IPv4 only version. After some stabilization of the new code the
+	old one will be removed.
+Who:	Patrick McHardy <kaber@trash.net>
 
 ---------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index eb1a6ca..790ef6f 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -124,7 +124,7 @@
 write_super_lockfs:	?
 unlockfs:		?
 statfs:			no	no	no
-remount_fs:		no	yes	maybe		(see below)
+remount_fs:		yes	yes	maybe		(see below)
 clear_inode:		no
 umount_begin:		yes	no	no
 show_options:		no				(vfsmount->sem)
diff --git a/Documentation/filesystems/adfs.txt b/Documentation/filesystems/adfs.txt
index 060abb0..9e8811f 100644
--- a/Documentation/filesystems/adfs.txt
+++ b/Documentation/filesystems/adfs.txt
@@ -3,7 +3,7 @@
 
   uid=nnn	All files in the partition will be owned by
 		user id nnn.  Default 0 (root).
-  gid=nnn	All files in the partition willbe in group
+  gid=nnn	All files in the partition will be in group
 		nnn.  Default 0 (root).
   ownmask=nnn	The permission mask for ADFS 'owner' permissions
 		will be nnn.  Default 0700.
diff --git a/Documentation/filesystems/configfs/configfs.txt b/Documentation/filesystems/configfs/configfs.txt
index c3a7afb..b34cdb5 100644
--- a/Documentation/filesystems/configfs/configfs.txt
+++ b/Documentation/filesystems/configfs/configfs.txt
@@ -209,7 +209,7 @@
 
 [struct config_group]
 
-A config_item cannot live in a vaccum.  The only way one can be created
+A config_item cannot live in a vacuum.  The only way one can be created
 is via mkdir(2) on a config_group.  This will trigger creation of a
 child item.
 
@@ -275,7 +275,7 @@
 
 [struct configfs_subsystem]
 
-A subsystem must register itself, ususally at module_init time.  This
+A subsystem must register itself, usually at module_init time.  This
 tells configfs to make the subsystem appear in the file tree.
 
 	struct configfs_subsystem {
diff --git a/Documentation/filesystems/fuse.txt b/Documentation/filesystems/fuse.txt
index a584f05..345392c 100644
--- a/Documentation/filesystems/fuse.txt
+++ b/Documentation/filesystems/fuse.txt
@@ -51,6 +51,22 @@
 
   http://fuse.sourceforge.net/
 
+Filesystem type
+~~~~~~~~~~~~~~~
+
+The filesystem type given to mount(2) can be one of the following:
+
+'fuse'
+
+  This is the usual way to mount a FUSE filesystem.  The first
+  argument of the mount system call may contain an arbitrary string,
+  which is not interpreted by the kernel.
+
+'fuseblk'
+
+  The filesystem is block device based.  The first argument of the
+  mount system call is interpreted as the name of the device.
+
 Mount options
 ~~~~~~~~~~~~~
 
@@ -94,6 +110,11 @@
   The default is infinite.  Note that the size of read requests is
   limited anyway to 32 pages (which is 128kbyte on i386).
 
+'blksize=N'
+
+  Set the block size for the filesystem.  The default is 512.  This
+  option is only valid for 'fuseblk' type mounts.
+
 Control filesystem
 ~~~~~~~~~~~~~~~~~~
 
@@ -111,7 +132,7 @@
 
  'waiting'
 
-  The number of requests which are waiting to be transfered to
+  The number of requests which are waiting to be transferred to
   userspace or being processed by the filesystem daemon.  If there is
   no filesystem activity and 'waiting' is non-zero, then the
   filesystem is hung or deadlocked.
@@ -136,7 +157,7 @@
 
   2) If the request is not yet sent to userspace AND the signal is not
      fatal, then an 'interrupted' flag is set for the request.  When
-     the request has been successfully transfered to userspace and
+     the request has been successfully transferred to userspace and
      this flag is set, an INTERRUPT request is queued.
 
   3) If the request is already sent to userspace, then an INTERRUPT
diff --git a/Documentation/filesystems/hpfs.txt b/Documentation/filesystems/hpfs.txt
index 33dc360..38aba03 100644
--- a/Documentation/filesystems/hpfs.txt
+++ b/Documentation/filesystems/hpfs.txt
@@ -274,7 +274,7 @@
      Fixed race-condition in buffer code - it is in all filesystems in Linux;
         when reading device (cat /dev/hda) while creating files on it, files
         could be damaged
-2.02 Woraround for bug in breada in Linux. breada could cause accesses beyond
+2.02 Workaround for bug in breada in Linux. breada could cause accesses beyond
         end of partition
 2.03 Char, block devices and pipes are correctly created
      Fixed non-crashing race in unlink (Alexander Viro)
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index 35f105b..13ba649 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -337,7 +337,7 @@
 this (note all values are in 512-byte sectors):
 
 --- cut here ---
-# Ofs Size   Raid   Log  Number Region Should Number Source  Start Taget  Start
+# Ofs Size   Raid   Log  Number Region Should Number Source  Start Target Start
 # in  of the type   type of log size   sync?  of     Device  in    Device in
 # vol volume		 params		     mirrors	     Device	  Device
 0    2056320 mirror core 2	16     nosync 2	   /dev/hda1 0   /dev/hdb1 0
@@ -599,7 +599,7 @@
 	- Major bug fixes for reading files and volumes in corner cases which
 	  were being hit by Windows 2k/XP users.
 2.1.2:
-	- Major bug fixes aleviating the hangs in statfs experienced by some
+	- Major bug fixes alleviating the hangs in statfs experienced by some
 	  users.
 2.1.1:
 	- Update handling of compressed files so people no longer get the
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index 4389c68..af6defd 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -30,7 +30,7 @@
 Features which OCFS2 does not support yet:
 	- sparse files
 	- extended attributes
-	- shared writeable mmap
+	- shared writable mmap
 	- loopback is supported, but data written will not
 	  be cluster coherent.
 	- quotas
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 3355e69..72af5de 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1220,9 +1220,9 @@
 you probably should increase the lower_zone_protection setting.
 
 The units of this tunable are fairly vague.  It is approximately equal
-to "megabytes".  So setting lower_zone_protection=100 will protect around 100
+to "megabytes," so setting lower_zone_protection=100 will protect around 100
 megabytes of the lowmem zone from user allocations.  It will also make
-those 100 megabytes unavaliable for use by applications and by
+those 100 megabytes unavailable for use by applications and by
 pagecache, so there is a cost.
 
 The effects of this tunable may be observed by monitoring
@@ -1538,10 +1538,10 @@
 tcp_ecn
 -------
 
-This file controls the use of the ECN bit in the IPv4 headers, this is a new
+This file controls the use of the ECN bit in the IPv4 headers. This is a new
 feature about Explicit Congestion Notification, but some routers and firewalls
-block trafic that has this bit set, so it could be necessary to echo 0 to
-/proc/sys/net/ipv4/tcp_ecn, if you want to talk to this sites. For more info
+block traffic that has this bit set, so it could be necessary to echo 0 to
+/proc/sys/net/ipv4/tcp_ecn if you want to talk to these sites. For more info
 you could read RFC2481.
 
 tcp_retrans_collapse
diff --git a/Documentation/filesystems/spufs.txt b/Documentation/filesystems/spufs.txt
index 982645a..1343d11 100644
--- a/Documentation/filesystems/spufs.txt
+++ b/Documentation/filesystems/spufs.txt
@@ -210,7 +210,7 @@
    /signal2
        The two signal notification channels of an SPU.  These  are  read-write
        files  that  operate  on  a 32 bit word.  Writing to one of these files
-       triggers an interrupt on the SPU. The  value  writting  to  the  signal
+       triggers an interrupt on the SPU.  The  value  written  to  the  signal
        files can be read from the SPU through a channel read or from host user
        space through the file.  After the value has been read by the  SPU,  it
        is  reset  to zero.  The possible operations on an open signal1 or sig-
diff --git a/Documentation/filesystems/sysv-fs.txt b/Documentation/filesystems/sysv-fs.txt
index d817224..253b50d 100644
--- a/Documentation/filesystems/sysv-fs.txt
+++ b/Documentation/filesystems/sysv-fs.txt
@@ -1,11 +1,8 @@
-This is the implementation of the SystemV/Coherent filesystem for Linux.
 It implements all of
   - Xenix FS,
   - SystemV/386 FS,
   - Coherent FS.
 
-This is version beta 4.
-
 To install:
 * Answer the 'System V and Coherent filesystem support' question with 'y'
   when configuring the kernel.
@@ -28,11 +25,173 @@
   for this FS on hard disk yet.
 
 
-Please report any bugs and suggestions to
-  Bruno Haible <haible@ma2s2.mathematik.uni-karlsruhe.de>
-  Pascal Haible <haible@izfm.uni-stuttgart.de>
-  Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
+These filesystems are rather similar. Here is a comparison with Minix FS:
 
-Bruno Haible
-<haible@ma2s2.mathematik.uni-karlsruhe.de>
+* Linux fdisk reports on partitions
+  - Minix FS     0x81 Linux/Minix
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  0x08 AIX bootable
 
+* Size of a block or zone (data allocation unit on disk)
+  - Minix FS     1024
+  - Xenix FS     1024 (also 512 ??)
+  - SystemV FS   1024 (also 512 and 2048)
+  - Coherent FS   512
+
+* General layout: all have one boot block, one super block and
+  separate areas for inodes and for directories/data.
+  On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
+  all the block numbers (including the super block) are offset by one track.
+
+* Byte ordering of "short" (16 bit entities) on disk:
+  - Minix FS     little endian  0 1
+  - Xenix FS     little endian  0 1
+  - SystemV FS   little endian  0 1
+  - Coherent FS  little endian  0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Byte ordering of "long" (32 bit entities) on disk:
+  - Minix FS     little endian  0 1 2 3
+  - Xenix FS     little endian  0 1 2 3
+  - SystemV FS   little endian  0 1 2 3
+  - Coherent FS  PDP-11         2 3 0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Inode on disk: "short", 0 means non-existent, the root dir ino is:
+  - Minix FS                            1
+  - Xenix FS, SystemV FS, Coherent FS   2
+
+* Maximum number of hard links to a file:
+  - Minix FS     250
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  >=10000
+
+* Free inode management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      There is a cache of a certain number of free inodes in the super-block.
+      When it is exhausted, new free inodes are found using a linear search.
+
+* Free block management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      Free blocks are organized in a "free list". Maybe a misleading term,
+      since it is not true that every free block contains a pointer to
+      the next free block. Rather, the free blocks are organized in chunks
+      of limited size, and every now and then a free block contains pointers
+      to the free blocks pertaining to the next chunk; the first of these
+      contains pointers and so on. The list terminates with a "block number"
+      0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
+
+* Super-block location:
+  - Minix FS     block 1 = bytes 1024..2047
+  - Xenix FS     block 1 = bytes 1024..2047
+  - SystemV FS   bytes 512..1023
+  - Coherent FS  block 1 = bytes 512..1023
+
+* Super-block layout:
+  - Minix FS
+                    unsigned short s_ninodes;
+                    unsigned short s_nzones;
+                    unsigned short s_imap_blocks;
+                    unsigned short s_zmap_blocks;
+                    unsigned short s_firstdatazone;
+                    unsigned short s_log_zone_size;
+                    unsigned long s_max_size;
+                    unsigned short s_magic;
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short s_firstdatazone;
+                    unsigned long  s_nzones;
+                    unsigned short s_fzone_count;
+                    unsigned long  s_fzones[NICFREE];
+                    unsigned short s_finode_count;
+                    unsigned short s_finodes[NICINOD];
+                    char           s_flock;
+                    char           s_ilock;
+                    char           s_modified;
+                    char           s_rdonly;
+                    unsigned long  s_time;
+                    short          s_dinfo[4]; -- SystemV FS only
+                    unsigned long  s_free_zones;
+                    unsigned short s_free_inodes;
+                    short          s_dinfo[4]; -- Xenix FS only
+                    unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
+                    char           s_fname[6];
+                    char           s_fpack[6];
+    then they differ considerably:
+        Xenix FS
+                    char           s_clean;
+                    char           s_fill[371];
+                    long           s_magic;
+                    long           s_type;
+        SystemV FS
+                    long           s_fill[12 or 14];
+                    long           s_state;
+                    long           s_magic;
+                    long           s_type;
+        Coherent FS
+                    unsigned long  s_unique;
+    Note that Coherent FS has no magic.
+
+* Inode layout:
+  - Minix FS
+                    unsigned short i_mode;
+                    unsigned short i_uid;
+                    unsigned long  i_size;
+                    unsigned long  i_time;
+                    unsigned char  i_gid;
+                    unsigned char  i_nlinks;
+                    unsigned short i_zone[7+1+1];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short i_mode;
+                    unsigned short i_nlink;
+                    unsigned short i_uid;
+                    unsigned short i_gid;
+                    unsigned long  i_size;
+                    unsigned char  i_zone[3*(10+1+1+1)];
+                    unsigned long  i_atime;
+                    unsigned long  i_mtime;
+                    unsigned long  i_ctime;
+
+* Regular file data blocks are organized as
+  - Minix FS
+               7 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+  - Xenix FS, SystemV FS, Coherent FS
+              10 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+               1 triple-indirect block (pointer to pointers to pointers to blocks)
+
+* Inode size, inodes per block
+  - Minix FS        32   32
+  - Xenix FS        64   16
+  - SystemV FS      64   16
+  - Coherent FS     64    8
+
+* Directory entry on disk
+  - Minix FS
+                    unsigned short inode;
+                    char name[14/30];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short inode;
+                    char name[14];
+
+* Dir entry size, dir entries per block
+  - Minix FS     16/32    64/32
+  - Xenix FS     16       64
+  - SystemV FS   16       64
+  - Coherent FS  16       32
+
+* How to implement symbolic links such that the host fsck doesn't scream:
+  - Minix FS     normal
+  - Xenix FS     kludge: as regular files with  chmod 1000
+  - SystemV FS   ??
+  - Coherent FS  kludge: as regular files with  chmod 1000
+
+
+Notation: We often speak of a "block" but mean a zone (the allocation unit)
+and not the disk driver's notion of "block".
diff --git a/Documentation/ftape.txt b/Documentation/ftape.txt
deleted file mode 100644
index 7d8bb33..0000000
--- a/Documentation/ftape.txt
+++ /dev/null
@@ -1,307 +0,0 @@
-Intro
-=====
-
-This file describes some issues involved when using the "ftape"
-floppy tape device driver that comes with the Linux kernel.
-
-ftape has a home page at
-
-http://ftape.dot-heine.de/
-
-which contains further information about ftape. Please cross check
-this WWW address against the address given (if any) in the MAINTAINERS
-file located in the top level directory of the Linux kernel source
-tree.
-
-NOTE: This is an unmaintained set of drivers, and it is not guaranteed to work.
-If you are interested in taking over maintenance, contact Claus-Justus Heine
-<ch@dot-heine.de>, the former maintainer.
-
-Contents
-========
-
-A minus 1: Ftape documentation
-
-A. Changes
-   1. Goal
-   2. I/O Block Size
-   3. Write Access when not at EOD (End Of Data) or BOT (Begin Of Tape)
-   4. Formatting
-   5. Interchanging cartridges with other operating systems
-
-B. Debugging Output
-   1. Introduction
-   2. Tuning the debugging output
-
-C. Boot and load time configuration
-   1. Setting boot time parameters
-   2. Module load time parameters
-   3. Ftape boot- and load time options
-   4. Example kernel parameter setting
-   5. Example module parameter setting
-
-D. Support and contacts
-
-*******************************************************************************
-
-A minus 1. Ftape documentation
-==============================
-
-Unluckily, the ftape-HOWTO is out of date. This really needs to be
-changed. Up to date documentation as well as recent development
-versions of ftape and useful links to related topics can be found at
-the ftape home page at
-
-http://ftape.dot-heine.de/
-
-*******************************************************************************
-
-A. Changes
-==========
-
-1. Goal
-   ~~~~
-   The goal of all that incompatibilities was to give ftape an interface
-   that resembles the interface provided by SCSI tape drives as close
-   as possible. Thus any Unix backup program that is known to work
-   with SCSI tape drives should also work.
-
-   The concept of a fixed block size for read/write transfers is
-   rather unrelated to this SCSI tape compatibility at the file system
-   interface level. It developed out of a feature of zftape, a
-   block wise user transparent on-the-fly compression. That compression
-   support will not be dropped in future releases for compatibility
-   reasons with previous releases of zftape.
-
-2. I/O Block Size
-   ~~~~~~~~~~~~~~
-   The block size defaults to 10k which is the default block size of
-   GNU tar.
-
-   The block size can be tuned either during kernel configuration or
-   at runtime with the MTIOCTOP ioctl using the MTSETBLK operation
-   (i.e. do "mt -f /dev/qft0" setblk #BLKSZ). A block size of 0
-   switches to variable block size mode i.e. "mt setblk 0" switches
-   off the block size restriction. However, this disables zftape's
-   built in on-the-fly compression which doesn't work with variable
-   block size mode.
-
-   The BLKSZ parameter must be given as a byte count and must be a
-   multiple of 32k or 0, i.e. use "mt setblk 32768" to switch to a
-   block size of 32k.
-
-   The typical symptom of a block size mismatch is an "invalid
-   argument" error message.
-
-3. Write Access when not at EOD (End Of Data) or BOT (Begin Of Tape)
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-   zftape (the file system interface of ftape-3.x) denies write access
-   to the tape cartridge when it isn't positioned either at BOT or
-   EOD.
-
-4. Formatting
-   ~~~~~~~~~~
-   ftape DOES support formatting of floppy tape cartridges. You need the
-   `ftformat' program that is shipped with the modules version of ftape.
-   Please get the latest version of ftape from
-
-   ftp://sunsite.unc.edu/pub/Linux/kernel/tapes
-
-   or from the ftape home page at
-
-   http://ftape.dot-heine.de/
-
-   `ftformat' is contained in the `./contrib/' subdirectory of that
-   separate ftape package.
-
-5. Interchanging cartridges with other operating systems
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-   The internal emulation of Unix tape device file marks has changed
-   completely. ftape now uses the volume table segment as specified
-   by the QIC-40/80/3010/3020/113 standards to emulate file marks. As
-   a consequence there is limited support to interchange cartridges
-   with other operating systems.
-
-   To be more precise: ftape will detect volumes written by other OS's
-   programs and other OS's programs will detect volumes written by
-   ftape.
-
-   However, it isn't possible to extract the data dumped to the tape
-   by some MSDOS program with ftape. This exceeds the scope of a
-   kernel device driver. If you need such functionality, then go ahead
-   and write a user space utility that is able to do that. ftape already
-   provides all kernel level support necessary to do that.
-
-*******************************************************************************
-
-B. Debugging Output
-   ================
-
-1. Introduction
-   ~~~~~~~~~~~~
-   The ftape driver can be very noisy in that is can print lots of
-   debugging messages to the kernel log files and the system console.
-   While this is useful for debugging it might be annoying during
-   normal use and enlarges the size of the driver by several kilobytes.
-
-   To reduce the size of the driver you can trim the maximal amount of
-   debugging information available during kernel configuration. Please
-   refer to the kernel configuration script and its on-line help
-   functionality.
-
-   The amount of debugging output maps to the "tracing" boot time
-   option and the "ft_tracing" modules option as follows:
-
-   0              bugs
-   1              + errors (with call-stack dump)
-   2              + warnings
-   3              + information
-   4              + more information
-   5              + program flow
-   6              + fdc/dma info
-   7              + data flow
-   8              + everything else
-
-2. Tuning the debugging output
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-   To reduce the amount of debugging output printed to the system
-   console you can
-
-   i)  trim the debugging output at run-time with
-
-       mt -f /dev/nqft0 setdensity #DBGLVL
-
-       where "#DBGLVL" is a number between 0 and 9
-
-   ii) trim the debugging output at module load time with
-
-       modprobe ftape ft_tracing=#DBGLVL
-
-       Of course, this applies only if you have configured ftape to be
-       compiled as a module.
-
-   iii) trim the debugging output during system boot time. Add the
-       following to the kernel command line:
-
-       ftape=#DBGLVL,tracing
-
-       Please refer also to the next section if you don't know how to
-       set boot time parameters.
-
-*******************************************************************************
-
-C. Boot and load time configuration
-   ================================
-
-1. Setting boot time parameters
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 
-   Assuming that you use lilo, the LI)nux LO)ader, boot time kernel
-   parameters can be set by adding a line
-
-   append some_kernel_boot_time_parameter
-
-   to `/etc/lilo.conf' or at real boot time by typing in the options
-   at the prompt provided by LILO. I can't give you advice on how to
-   specify those parameters with other loaders as I don't use them.
-
-   For ftape, each "some_kernel_boot_time_parameter" looks like
-   "ftape=value,option". As an example, the debugging output can be
-   increased with
-
-   ftape=4,tracing
-
-   NOTE: the value precedes the option name.
-
-2. Module load time parameters
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-   Module parameters can be specified either directly when invoking
-   the program 'modprobe' at the shell prompt:
-
-   modprobe ftape ft_tracing=4
-
-   or by editing the file `/etc/modprobe.conf' in which case they take
-   effect each time when the module is loaded with `modprobe' (please
-   refer to the respective manual pages). Thus, you should add a line
-
-   options ftape ft_tracing=4
-
-   to `/etc/modprobe.conf` if you intend to increase the debugging
-   output of the driver.
-
-
-3. Ftape boot- and load time options
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-   i.   Controlling the amount of debugging output
-        DBGLVL has to be replaced by a number between 0 and 8.
-
-        module                 |  kernel command line
-        -----------------------|----------------------
-        ft_tracing=DBGLVL      |  ftape=DBGLVL,tracing
-
-   ii.  Hardware setup
-	BASE is the base address of your floppy disk controller,
-        IRQ and DMA give its interrupt and DMA channel, respectively.
-        BOOL is an integer, "0" means "no"; any other value means
-	"yes". You don't need to specify anything if connecting your tape
-        drive to the standard floppy disk controller. All of these
-	values have reasonable defaults. The defaults can be modified
-	during kernel configuration, i.e. while running "make config",
-	"make menuconfig" or "make xconfig" in the top level directory
-	of the Linux kernel source tree. Please refer also to the on
-	line documentation provided during that kernel configuration
-	process.
-
-	ft_probe_fc10 is set to a non-zero value if you wish for ftape to
-	probe for a Colorado FC-10 or FC-20 controller.
-
-	ft_mach2 is set to a non-zero value if you wish for ftape to probe
-	for a Mountain MACH-2 controller.
-
-        module                 |  kernel command line
-        -----------------------|----------------------
-        ft_fdc_base=BASE       |  ftape=BASE,ioport
-        ft_fdc_irq=IRQ         |  ftape=IRQ,irq
-        ft_fdc_dma=DMA         |  ftape=DMA,dma
-        ft_probe_fc10=BOOL     |  ftape=BOOL,fc10
-        ft_mach2=BOOL          |  ftape=BOOL,mach2
-        ft_fdc_threshold=THR   |  ftape=THR,threshold
-        ft_fdc_rate_limit=RATE |  ftape=RATE,datarate
-
-4. Example kernel parameter setting
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 
-   To configure ftape to probe for a Colorado FC-10/FC-20 controller
-   and to increase the amount of debugging output a little bit, add
-   the following line to `/etc/lilo.conf':
-
-   append ftape=1,fc10 ftape=4,tracing
-
-5. Example module parameter setting
-   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-   To do the same, but with ftape compiled as a loadable kernel
-   module, add the following line to `/etc/modprobe.conf':
-
-   options ftape ft_probe_fc10=1 ft_tracing=4
-
-*******************************************************************************
-
-D. Support and contacts
-   ====================
-
-   Ftape is distributed under the GNU General Public License. There is
-   absolutely no warranty for this software. However, you can reach
-   the current maintainer of the ftape package under the email address
-   given in the MAINTAINERS file which is located in the top level
-   directory of the Linux kernel source tree. There you'll find also
-   the relevant mailing list to use as a discussion forum and the web
-   page to query for the most recent documentation, related work and
-   development versions of ftape.
-
-   Changelog:
-   ==========
-
-~1996:		Original Document
-
-10-24-2004:	General cleanup and updating, noting additional module options.
-		James Nelson <james4765@gmail.com>
diff --git a/Documentation/fujitsu/frv/gdbstub.txt b/Documentation/fujitsu/frv/gdbstub.txt
index 6ce5aa9..9304fb3 100644
--- a/Documentation/fujitsu/frv/gdbstub.txt
+++ b/Documentation/fujitsu/frv/gdbstub.txt
@@ -59,7 +59,7 @@
 Then build as usual, download to the board and execute. Note that if
 "Immediate activation" was selected, then the kernel will wait for GDB to
 attach. If not, then the kernel will boot immediately and GDB will have to
-interupt it or wait for an exception to occur if before doing anything with
+interrupt it or wait for an exception to occur before doing anything with
 the kernel.
 
 
diff --git a/Documentation/fujitsu/frv/kernel-ABI.txt b/Documentation/fujitsu/frv/kernel-ABI.txt
index 8b0a5fc..aaa1cec 100644
--- a/Documentation/fujitsu/frv/kernel-ABI.txt
+++ b/Documentation/fujitsu/frv/kernel-ABI.txt
@@ -156,7 +156,7 @@
 almost completely self-contained. The only external code used is the
 sprintf family of functions.
 
-Futhermore, break.S is so complicated because single-step mode does not
+Furthermore, break.S is so complicated because single-step mode does not
 switch off on entry to an exception. That means unless manually disabled,
 single-stepping will blithely go on stepping into things like interrupts.
 See gdbstub.txt for more information.
diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt
index c51314b..9575de3 100644
--- a/Documentation/i386/boot.txt
+++ b/Documentation/i386/boot.txt
@@ -2,7 +2,7 @@
 		     ----------------------------
 
 		    H. Peter Anvin <hpa@zytor.com>
-			Last update 2005-09-02
+			Last update 2006-11-17
 
 On the i386 platform, the Linux kernel uses a rather complicated boot
 convention.  This has evolved partially due to historical aspects, as
@@ -35,6 +35,8 @@
 		initrd address available to the bootloader.
 
 Protocol 2.04:	(Kernel 2.6.14) Extend the syssize field to four bytes.
+Protocol 2.05:	(Kernel 2.6.20) Make protected mode kernel relocatable.
+		Introduce relocatable_kernel and kernel_alignment fields.
 
 
 **** MEMORY LAYOUT
@@ -129,6 +131,8 @@
 0226/2	N/A	pad1		Unused
 0228/4	2.02+	cmd_line_ptr	32-bit pointer to the kernel command line
 022C/4	2.03+	initrd_addr_max	Highest legal initrd address
+0230/4	2.05+	kernel_alignment Physical addr alignment required for kernel
+0234/1	2.05+	relocatable_kernel Whether kernel is relocatable or not
 
 (1) For backwards compatibility, if the setup_sects field contains 0, the
     real value is 4.
diff --git a/Documentation/ide.txt b/Documentation/ide.txt
index 0bf38ba..786c3a7 100644
--- a/Documentation/ide.txt
+++ b/Documentation/ide.txt
@@ -390,5 +390,5 @@
 Wed Apr 17 22:52:44 CEST 2002 edited by Marcin Dalecki, the current
 maintainer.
 
-Wed Aug 20 22:31:29 CEST 2003 updated ide boot uptions to current ide.c
+Wed Aug 20 22:31:29 CEST 2003 updated ide boot options to current ide.c
 comments at 2.6.0-test4 time. Maciej Soltysiak <solt@dns.toxicfilms.tv>
diff --git a/Documentation/input/amijoy.txt b/Documentation/input/amijoy.txt
index 4f0e89d..7dc4f17 100644
--- a/Documentation/input/amijoy.txt
+++ b/Documentation/input/amijoy.txt
@@ -91,8 +91,8 @@
          |   1    | M0HQ     | JOY0DAT Horizontal Clock (quadrature)   |
          |   2    | M0V      | JOY0DAT Vertical Clock                  |
          |   3    | M0VQ     | JOY0DAT Vertical Clock  (quadrature)    |
-         |   4    | M1V      | JOY1DAT Horizontall Clock               |
-         |   5    | M1VQ     | JOY1DAT Horizontall Clock (quadrature)  |
+         |   4    | M1V      | JOY1DAT Horizontal Clock                |
+         |   5    | M1VQ     | JOY1DAT Horizontal Clock (quadrature)   |
          |   6    | M1V      | JOY1DAT Vertical Clock                  |
          |   7    | M1VQ     | JOY1DAT Vertical Clock (quadrature)     |
          +--------+----------+-----------------------------------------+
diff --git a/Documentation/input/atarikbd.txt b/Documentation/input/atarikbd.txt
index 1e7e585..668f4d0 100644
--- a/Documentation/input/atarikbd.txt
+++ b/Documentation/input/atarikbd.txt
@@ -103,7 +103,7 @@
 
 5.1 Joystick Event Reporting
 
-In this mode, the ikbd generates a record whever the joystick position is
+In this mode, the ikbd generates a record whenever the joystick position is
 changed (i.e. for each opening or closing of a joystick switch or trigger).
 
 The joystick event record is two bytes of the form:
@@ -277,8 +277,8 @@
 9.7 SET MOUSE SCALE
 
     0x0C
-    X                   ; horizontal mouse ticks per internel X
-    Y                   ; vertical mouse ticks per internel Y
+    X                   ; horizontal mouse ticks per internal X
+    Y                   ; vertical mouse ticks per internal Y
 
 This command sets the scale factor for the ABSOLUTE MOUSE POSITIONING mode.
 In this mode, the specified number of mouse phase changes ('clicks') must
@@ -323,7 +323,7 @@
     0x0F
 
 This command makes the origin of the Y axis to be at the bottom of the
-logical coordinate system internel to the ikbd for all relative or absolute
+logical coordinate system internal to the ikbd for all relative or absolute
 mouse motion. This causes mouse motion toward the user to be negative in sign
 and away from the user to be positive.
 
@@ -597,8 +597,8 @@
 
 10. SCAN CODES
 
-The key scan codes return by the ikbd are chosen to simplify the
-implementaion of GSX.
+The key scan codes returned by the ikbd are chosen to simplify the
+implementation of GSX.
 
 GSX Standard Keyboard Mapping.
 
diff --git a/Documentation/input/yealink.txt b/Documentation/input/yealink.txt
index 0a8c97e..5360e43 100644
--- a/Documentation/input/yealink.txt
+++ b/Documentation/input/yealink.txt
@@ -134,7 +134,7 @@
   888888888888
   Linux Rocks!
 
-Writing to /sys/../lineX will set the coresponding LCD line.
+Writing to /sys/../lineX will set the corresponding LCD line.
  - Excess characters are ignored.
  - If less characters are written than allowed, the remaining digits are
    unchanged.
diff --git a/Documentation/ioctl/cdrom.txt b/Documentation/ioctl/cdrom.txt
index 8ec32cc..62d4af4 100644
--- a/Documentation/ioctl/cdrom.txt
+++ b/Documentation/ioctl/cdrom.txt
@@ -735,7 +735,7 @@
 	    Ok, this is where problems start.  The current interface for
 	    the CDROM_DISC_STATUS ioctl is flawed.  It makes the false
 	    assumption that CDs are all CDS_DATA_1 or all CDS_AUDIO, etc.
-	    Unfortunatly, while this is often the case, it is also
+	    Unfortunately, while this is often the case, it is also
 	    very common for CDs to have some tracks with data, and some
 	    tracks with audio.	Just because I feel like it, I declare
 	    the following to be the best way to cope.  If the CD has
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 50f4edd..4b3d671 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -227,9 +227,9 @@
 	be included in a library, lib.a.
 	All objects listed with lib-y are combined in a single
 	library for that directory.
-	Objects that are listed in obj-y and additionaly listed in
-	lib-y will not be included in the library, since they will anyway
-	be accessible.
+	Objects that are listed in obj-y and additionally listed in
+	lib-y will not be included in the library, since they will
+	be accessible anyway.
 	For consistency, objects listed in lib-m will be included in lib.a.
 
 	Note that the same kbuild makefile may list files to be built-in
@@ -535,7 +535,7 @@
 	Host programs can be made up based on composite objects.
 	The syntax used to define composite objects for host programs is
 	similar to the syntax used for kernel objects.
-	$(<executeable>-objs) lists all objects used to link the final
+	$(<executable>-objs) lists all objects used to link the final
 	executable.
 
 	Example:
@@ -1022,7 +1022,7 @@
 	In this example, there are two possible targets, requiring different
 	options to the linker. The linker options are specified using the
 	LDFLAGS_$@ syntax - one for each potential target.
-	$(targets) are assinged all potential targets, by which kbuild knows
+	$(targets) are assigned all potential targets, by which kbuild knows
 	the targets and will:
 		1) check for commandline changes
 		2) delete target during make clean
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6747384..b79bcdf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -557,9 +557,6 @@
 	floppy=		[HW]
 			See Documentation/floppy.txt.
 
-	ftape=		[HW] Floppy Tape subsystem debugging options.
-			See Documentation/ftape.txt.
-
 	gamecon.map[2|3]=
 			[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
 			support via parallel port (up to 5 devices per port)
@@ -602,8 +599,6 @@
 
 	hugepages=	[HW,IA-32,IA-64] Maximal number of HugeTLB pages.
 
-	noirqbalance	[IA-32,SMP,KNL] Disable kernel irq balancing
-
 	i8042.direct	[HW] Put keyboard port into non-translated mode
 	i8042.dumbkbd	[HW] Pretend that controller can only read data from
 			     keyboard and cannot control its state
@@ -653,6 +648,10 @@
 	idle=		[HW]
 			Format: idle=poll or idle=halt
 
+	ignore_loglevel	[KNL]
+			Ignore loglevel setting - this will print /all/
+			kernel messages to the console. Useful for debugging.
+
 	ihash_entries=	[KNL]
 			Set number of hash buckets for inode cache.
 
@@ -717,7 +716,12 @@
 			Format: <RDP>,<reset>,<pci_scan>,<verbosity>
 
 	isolcpus=	[KNL,SMP] Isolate CPUs from the general scheduler.
-			Format: <cpu number>,...,<cpu number>
+			Format:
+			<cpu number>,...,<cpu number>
+			or
+			<cpu number>-<cpu number>  (must be a positive range in ascending order)
+			or a mixture
+			<cpu number>,...,<cpu number>-<cpu number>
 			This option can be used to specify one or more CPUs
 			to isolate from the general SMP balancing and scheduling
 			algorithms. The only way to move a process onto or off
@@ -1015,6 +1019,10 @@
 			emulation library even if a 387 maths coprocessor
 			is present.
 
+	noaliencache	[MM, NUMA] Disables the allcoation of alien caches in
+			the slab allocator.  Saves per-node memory, but will
+			impact performance on real NUMA hardware.
+
 	noalign		[KNL,ARM]
 
 	noapic		[SMP,APIC] Tells the kernel to not make use of any
@@ -1055,9 +1063,14 @@
 			in certain environments such as networked servers or
 			real-time systems.
 
+	noirqbalance	[IA-32,SMP,KNL] Disable kernel irq balancing
+
 	noirqdebug	[IA-32] Disables the code which attempts to detect and
 			disable unhandled interrupt sources.
 
+	no_timer_check	[IA-32,X86_64,APIC] Disables the code which tests for
+			broken timer IRQ sources.
+
 	noisapnp	[ISAPNP] Disables ISA PnP code.
 
 	noinitrd	[RAM] Tells the kernel not to load any configured
@@ -1288,6 +1301,7 @@
 			Param: "schedule" - profile schedule points.
 			Param: <number> - step/bucket size as a power of 2 for
 				statistical time based profiling.
+			Param: "sleep" - profile D-state sleeping (millisecs)
 
 	processor.max_cstate=	[HW,ACPI]
 			Limit processor to maximum C-state
@@ -1369,6 +1383,12 @@
 	resume=		[SWSUSP]
 			Specify the partition device for software suspend
 
+	resume_offset=	[SWSUSP]
+			Specify the offset from the beginning of the partition
+			given by "resume=" at which the swap header is located,
+			in <PAGE_SIZE> units (needed only for swap files).
+			See  Documentation/power/swsusp-and-swap-files.txt
+
 	rhash_entries=	[KNL,NET]
 			Set number of hash buckets for route cache
 
@@ -1419,6 +1439,11 @@
 
 	scsi_logging=	[SCSI]
 
+	scsi_mod.scan=	[SCSI] sync (default) scans SCSI busses as they are
+			discovered.  async scans them in kernel threads,
+			allowing boot to proceed.  none ignores them, expecting
+			user space to do the scan.
+
 	selinux		[SELINUX] Disable or enable SELinux at boot time.
 			Format: { "0" | "1" }
 			See security/selinux/Kconfig help text.
@@ -1730,6 +1755,9 @@
 	norandmaps	Don't use address space randomization
 			Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space
 
+ 	unwind_debug=N 	N > 0 will enable dwarf2 unwinder debugging
+			This is useful to get more information why
+			you got a "dwarf2 unwinder stuck"
 
 ______________________________________________________________________
 
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 3da586b..60c665d 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -304,7 +304,7 @@
 	R	Revoked
 	D	Dead
 	Q	Contributes to user's quota
-	U	Under contruction by callback to userspace
+	U	Under construction by callback to userspace
 	N	Negative key
 
      This file must be enabled at kernel configuration time as it allows anyone
diff --git a/Documentation/laptop-mode.txt b/Documentation/laptop-mode.txt
index c487186..6f639e3 100644
--- a/Documentation/laptop-mode.txt
+++ b/Documentation/laptop-mode.txt
@@ -121,7 +121,7 @@
 MAX_AGE:
 
 Maximum time, in seconds, of hard drive spindown time that you are
-confortable with. Worst case, it's possible that you could lose this
+comfortable with. Worst case, it's possible that you could lose this
 amount of work if your battery fails while you're in laptop mode.
 
 MINIMUM_BATTERY_MINUTES:
@@ -235,7 +235,7 @@
 
 --------------------CONFIG FILE BEGIN-------------------------------------------
 # Maximum time, in seconds, of hard drive spindown time that you are
-# confortable with. Worst case, it's possible that you could lose this
+# comfortable with. Worst case, it's possible that you could lose this
 # amount of work if your battery fails you while in laptop mode.
 #MAX_AGE=600
 
@@ -350,7 +350,7 @@
 # set defaults instead:
 
 # Maximum time, in seconds, of hard drive spindown time that you are
-# confortable with. Worst case, it's possible that you could lose this
+# comfortable with. Worst case, it's possible that you could lose this
 # amount of work if your battery fails you while in laptop mode.
 MAX_AGE=${MAX_AGE:-'600'}
 
@@ -699,7 +699,7 @@
 Dax Kelson submitted this so that the ACPI acpid daemon will
 kick off the laptop_mode script and run hdparm. The part that
 automatically disables laptop mode when the battery is low was
-writen by Jan Topinski.
+written by Jan Topinski.
 
 -----------------/etc/acpi/events/ac_adapter BEGIN------------------------------
 event=ac_adapter
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 7751704..58408dd 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -212,7 +212,7 @@
 
 	STORE *X = c, d = LOAD *X
 
-     (Loads and stores overlap if they are targetted at overlapping pieces of
+     (Loads and stores overlap if they are targeted at overlapping pieces of
      memory).
 
 And there are a number of things that _must_ or _must_not_ be assumed:
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index b1181ce..e06b6e3 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -58,6 +58,8 @@
 	- FORE Systems PCA-200E/SBA-200E ATM NIC driver info.
 framerelay.txt
 	- info on using Frame Relay/Data Link Connection Identifier (DLCI).
+generic_netlink.txt
+	- info on Generic Netlink
 ip-sysctl.txt
 	- /proc/sys/net/ipv4/* variables
 ip_dynaddr.txt
diff --git a/Documentation/networking/NAPI_HOWTO.txt b/Documentation/networking/NAPI_HOWTO.txt
index 93af3e8..fb8dc64 100644
--- a/Documentation/networking/NAPI_HOWTO.txt
+++ b/Documentation/networking/NAPI_HOWTO.txt
@@ -95,8 +95,8 @@
 		Move all to dev->poll()
 
 C) Ability to detect new work correctly.
-NAPI works by shutting down event interrupts when theres work and
-turning them on when theres none. 
+NAPI works by shutting down event interrupts when there's work and
+turning them on when there's none. 
 New packets might show up in the small window while interrupts were being 
 re-enabled (refer to appendix 2).  A packet might sneak in during the period 
 we are enabling interrupts. We only get to know about such a packet when the 
@@ -114,7 +114,7 @@
 only one CPU can pick the initial interrupt and hence the initial
 netif_rx_schedule(dev);
 - The core layer invokes devices to send packets in a round robin format.
-This implies receive is totaly lockless because of the guarantee only that 
+This implies receive is totally lockless because of the guarantee that only 
 one CPU is executing it.
 -  contention can only be the result of some other CPU accessing the rx
 ring. This happens only in close() and suspend() (when these methods
@@ -510,7 +510,7 @@
 			an interrupt will be generated */
                         goto done;
 	}
-	/* done! at least thats what it looks like ;->
+	/* done! at least that's what it looks like ;->
 	if new packets came in after our last check on status bits
 	they'll be caught by the while check and we go back and clear them 
 	since we havent exceeded our quota */
@@ -535,11 +535,11 @@
         * 1. it can race with disabling irqs in irq handler (which are done to 
 	* schedule polls)
         * 2. it can race with dis/enabling irqs in other poll threads
-        * 3. if an irq raised after the begining of the outer  beginning 
-        * loop(marked in the code above), it will be immediately
+        * 3. if an irq raised after the beginning of the outer beginning 
+        * loop (marked in the code above), it will be immediately
         * triggered here.
         *
-        * Summarizing: the logic may results in some redundant irqs both
+        * Summarizing: the logic may result in some redundant irqs both
         * due to races in masking and due to too late acking of already
         * processed irqs. The good news: no events are ever lost.
         */
@@ -601,7 +601,7 @@
 	
 5) dev->close() and dev->suspend() issues
 ==========================================
-The driver writter neednt worry about this. The top net layer takes
+The driver writer needn't worry about this; the top net layer takes
 care of it.
 
 6) Adding new Stats to /proc 
@@ -622,9 +622,9 @@
 packets fast enough i.e send a pause only when you run out of rx buffers.
 Note FC in itself is a good solution but we have found it to not be
 much of a commodity feature (both in NICs and switches) and hence falls
-under the same category as using NIC based mitigation. Also experiments
-indicate that its much harder to resolve the resource allocation
-issue (aka lazy receiving that NAPI offers) and hence quantify its usefullness
+under the same category as using NIC based mitigation. Also, experiments
+indicate that it's much harder to resolve the resource allocation
+issue (aka lazy receiving that NAPI offers) and hence quantify its usefulness
 proved harder. In any case, FC works even better with NAPI but is not
 necessary.
 
@@ -678,10 +678,10 @@
 CSR5 bit of interest is only the rx status. 
 If you look at the last if statement: 
 you just finished grabbing all the packets from the rx ring .. you check if
-status bit says theres more packets just in ... it says none; you then
+status bit says there are more packets just in ... it says none; you then
 enable rx interrupts again; if a new packet just came in during this check,
 we are counting that CSR5 will be set in that small window of opportunity
-and that by re-enabling interrupts, we would actually triger an interrupt
+and that by re-enabling interrupts, we would actually trigger an interrupt
 to register the new packet for processing.
 
 [The above description nay be very verbose, if you have better wording 
diff --git a/Documentation/networking/cs89x0.txt b/Documentation/networking/cs89x0.txt
index 6489647..6387d3d 100644
--- a/Documentation/networking/cs89x0.txt
+++ b/Documentation/networking/cs89x0.txt
@@ -248,7 +248,7 @@
    with device probing.  To avoid this behaviour, add one
    to the `io=' module parameter.  This doesn't actually change
    the I/O address, but it is a flag to tell the driver
-   topartially initialise the hardware before trying to
+   to partially initialise the hardware before trying to
    identify the card.  This could be dangerous if you are
    not sure that there is a cs89x0 card at the provided address.
 
@@ -620,8 +620,8 @@
                                                 12       Mouse (PS/2)                              
 Memory Address  Device                          13       Math Coprocessor
 --------------  ---------------------           14       Hard Disk controller
-A000-BFFF	EGA Graphics Adpater
-A000-C7FF	VGA Graphics Adpater
+A000-BFFF	EGA Graphics Adapter
+A000-C7FF	VGA Graphics Adapter
 B000-BFFF	Mono Graphics Adapter
 B800-BFFF	Color Graphics Adapter
 E000-FFFF	AT BIOS
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index 74563b3..dda1588 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -19,21 +19,17 @@
 
 It has a base protocol and pluggable congestion control IDs (CCIDs).
 
-It is at draft RFC status and the homepage for DCCP as a protocol is at:
-	http://www.icir.org/kohler/dcp/
+It is at experimental RFC status and the homepage for DCCP as a protocol is at:
+	http://www.read.cs.ucla.edu/dccp/
 
 Missing features
 ================
 
 The DCCP implementation does not currently have all the features that are in
-the draft RFC.
+the RFC.
 
-In particular the following are missing:
-- CCID2 support
-- feature negotiation
-
-When testing against other implementations it appears that elapsed time
-options are not coded compliant to the specification.
+The known bugs are at:
+	http://linux-net.osdl.org/index.php/TODO#DCCP
 
 Socket options
 ==============
@@ -47,12 +43,70 @@
 is present). Connecting sockets set at most one service option; for
 listening sockets, multiple service codes can be specified.
 
+DCCP_SOCKOPT_SEND_CSCOV and DCCP_SOCKOPT_RECV_CSCOV are used for setting the
+partial checksum coverage (RFC 4340, sec. 9.2). The default is that checksums
+always cover the entire packet and that only fully covered application data is
+accepted by the receiver. Hence, when using this feature on the sender, it must
+be enabled at the receiver, too with suitable choice of CsCov.
+
+DCCP_SOCKOPT_SEND_CSCOV sets the sender checksum coverage. Values in the
+	range 0..15 are acceptable. The default setting is 0 (full coverage),
+	values between 1..15 indicate partial coverage.
+DCCP_SOCKOPT_SEND_CSCOV is for the receiver and has a different meaning: it
+	sets a threshold, where again values 0..15 are acceptable. The default
+	of 0 means that all packets with a partial coverage will be discarded.
+	Values in the range 1..15 indicate that packets with minimally such a
+	coverage value are also acceptable. The higher the number, the more
+	restrictive this setting (see [RFC 4340, sec. 9.2.1]).
+
+Sysctl variables
+================
+Several DCCP default parameters can be managed by the following sysctls
+(sysctl net.dccp.default or /proc/sys/net/dccp/default):
+
+request_retries
+	The number of active connection initiation retries (the number of
+	Requests minus one) before timing out. In addition, it also governs
+	the behaviour of the other, passive side: this variable also sets
+	the number of times DCCP repeats sending a Response when the initial
+	handshake does not progress from RESPOND to OPEN (i.e. when no Ack
+	is received after the initial Request).  This value should be greater
+	than 0, suggested is less than 10. Analogue of tcp_syn_retries.
+
+retries1
+	How often a DCCP Response is retransmitted until the listening DCCP
+	side considers its connecting peer dead. Analogue of tcp_retries1.
+
+retries2
+	The number of times a general DCCP packet is retransmitted. This has
+	importance for retransmitted acknowledgments and feature negotiation,
+	data packets are never retransmitted. Analogue of tcp_retries2.
+
+send_ndp = 1
+	Whether or not to send NDP count options (sec. 7.7.2).
+
+send_ackvec = 1
+	Whether or not to send Ack Vector options (sec. 11.5).
+
+ack_ratio = 2
+	The default Ack Ratio (sec. 11.3) to use.
+
+tx_ccid = 2
+	Default CCID for the sender-receiver half-connection.
+
+rx_ccid = 2
+	Default CCID for the receiver-sender half-connection.
+
+seq_window = 100
+	The initial sequence window (sec. 7.5.2).
+
+tx_qlen = 5
+	The size of the transmit buffer in packets. A value of 0 corresponds
+	to an unbounded transmit buffer.
+
 Notes
 =====
 
-SELinux does not yet have support for DCCP. You will need to turn it off or
-else you will get EACCES.
-
-DCCP does not travel through NAT successfully at present. This is because
-the checksum covers the psuedo-header as per TCP and UDP. It should be
-relatively trivial to add Linux NAT support for DCCP.
+DCCP does not travel through NAT successfully at present on many boxes. This is
+because the checksum covers the psuedo-header as per TCP and UDP. Linux NAT
+support for DCCP has been added.
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index 5c0a5cc..61b171c 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -1,7 +1,7 @@
 Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
 ===============================================================
 
-November 15, 2005
+September 26, 2006
 
 
 Contents
@@ -9,6 +9,7 @@
 
 - In This Release
 - Identifying Your Adapter
+- Building and Installation
 - Command Line Parameters
 - Speed and Duplex Configuration
 - Additional Configurations
@@ -41,6 +42,9 @@
 Instructions on updating ethtool can be found in the section "Additional
 Configurations" later in this document.
 
+NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100
+support.
+
 
 Identifying Your Adapter
 ========================
@@ -51,28 +55,27 @@
     http://support.intel.com/support/network/adapter/pro100/21397.htm
 
 For the latest Intel network drivers for Linux, refer to the following
-website. In the search field, enter your adapter name or type, or use the
+website.  In the search field, enter your adapter name or type, or use the
 networking link on the left to search for your adapter:
 
     http://downloadfinder.intel.com/scripts-df/support_intel.asp
 
 
-Command Line Parameters =======================
+Command Line Parameters
+=======================
 
 If the driver is built as a module, the  following optional parameters
-are used by entering them on the command line with the modprobe or insmod
-command using this syntax:
+are used by entering them on the command line with the modprobe command
+using this syntax:
 
      modprobe e1000 [<option>=<VAL1>,<VAL2>,...]
 
-     insmod e1000 [<option>=<VAL1>,<VAL2>,...]
-
 For example, with two PRO/1000 PCI adapters, entering:
 
-     insmod e1000 TxDescriptors=80,128
+     modprobe e1000 TxDescriptors=80,128
 
-loads the e1000 driver with 80 TX descriptors for the first adapter and 128
-TX descriptors for the second adapter.
+loads the e1000 driver with 80 TX descriptors for the first adapter and
+128 TX descriptors for the second adapter.
 
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
@@ -87,7 +90,7 @@
         http://www.intel.com/design/network/applnots/ap450.htm
 
         A descriptor describes a data buffer and attributes related to
-        the data buffer. This information is accessed by the hardware.
+        the data buffer.  This information is accessed by the hardware.
 
 
 AutoNeg
@@ -96,9 +99,9 @@
 Valid Range:   0x01-0x0F, 0x20-0x2F
 Default Value: 0x2F
 
-This parameter is a bit mask that specifies which speed and duplex
-settings the board advertises. When this parameter is used, the Speed
-and Duplex parameters must not be specified.
+This parameter is a bit-mask that specifies the speed and duplex settings
+advertised by the adapter.  When this parameter is used, the Speed and
+Duplex parameters must not be specified.
 
 NOTE:  Refer to the Speed and Duplex section of this readme for more
        information on the AutoNeg parameter.
@@ -110,14 +113,15 @@
 Valid Range:   0-2 (0=auto-negotiate, 1=half, 2=full)
 Default Value: 0
 
-Defines the direction in which data is allowed to flow. Can be either
-one or two-directional. If both Duplex and the link partner are set to
-auto-negotiate, the board auto-detects the correct duplex. If the link
-partner is forced (either full or half), Duplex defaults to half-duplex.
+This defines the direction in which data is allowed to flow.  Can be
+either one or two-directional.  If both Duplex and the link partner are
+set to auto-negotiate, the board auto-detects the correct duplex.  If the
+link partner is forced (either full or half), Duplex defaults to half-
+duplex.
 
 
 FlowControl
-----------
+-----------
 Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
 Default Value: Reads flow control settings from the EEPROM
 
@@ -127,57 +131,107 @@
 
 InterruptThrottleRate
 ---------------------
-(not supported on Intel 82542, 82543 or 82544-based adapters)
-Valid Range:   100-100000 (0=off, 1=dynamic)
-Default Value: 8000
+(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
+Valid Range:   0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+Default Value: 3
 
-This value represents the maximum number of interrupts per second the
-controller generates. InterruptThrottleRate is another setting used in
-interrupt moderation. Dynamic mode uses a heuristic algorithm to adjust
-InterruptThrottleRate based on the current traffic load.
+The driver can limit the amount of interrupts per second that the adapter
+will generate for incoming packets. It does this by writing a value to the 
+adapter that is based on the maximum amount of interrupts that the adapter 
+will generate per second.
+
+Setting InterruptThrottleRate to a value greater or equal to 100
+will program the adapter to send out a maximum of that many interrupts
+per second, even if more packets have come in. This reduces interrupt
+load on the system and can lower CPU utilization under heavy load,
+but will increase latency as packets are not processed as quickly.
+
+The default behaviour of the driver previously assumed a static 
+InterruptThrottleRate value of 8000, providing a good fallback value for 
+all traffic types,but lacking in small packet performance and latency. 
+The hardware can handle many more small packets per second however, and 
+for this reason an adaptive interrupt moderation algorithm was implemented.
+
+Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which
+it dynamically adjusts the InterruptThrottleRate value based on the traffic 
+that it receives. After determining the type of incoming traffic in the last
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value 
+for that traffic.
+
+The algorithm classifies the incoming traffic every interval into
+classes.  Once the class is determined, the InterruptThrottleRate value is 
+adjusted to suit that traffic type the best. There are three classes defined: 
+"Bulk traffic", for large amounts of packets of normal size; "Low latency",
+for small amounts of traffic and/or a significant percentage of small
+packets; and "Lowest latency", for almost completely small packets or 
+minimal traffic.
+
+In dynamic conservative mode, the InterruptThrottleRate value is set to 4000 
+for traffic that falls in class "Bulk traffic". If traffic falls in the "Low 
+latency" or "Lowest latency" class, the InterruptThrottleRate is increased 
+stepwise to 20000. This default mode is suitable for most applications.
+
+For situations where low latency is vital such as cluster or
+grid computing, the algorithm can reduce latency even more when
+InterruptThrottleRate is set to mode 1. In this mode, which operates
+the same as mode 3, the InterruptThrottleRate will be increased stepwise to 
+70000 for traffic in class "Lowest latency".
+
+Setting InterruptThrottleRate to 0 turns off any interrupt moderation
+and may improve small packet latency, but is generally not suitable
+for bulk throughput traffic.
 
 NOTE:  InterruptThrottleRate takes precedence over the TxAbsIntDelay and
-       RxAbsIntDelay parameters. In other words, minimizing the receive
+       RxAbsIntDelay parameters.  In other words, minimizing the receive
        and/or transmit absolute delays does not force the controller to
        generate more interrupts than what the Interrupt Throttle Rate
        allows.
 
-CAUTION:  If you are using the Intel PRO/1000 CT Network Connection
+CAUTION:  If you are using the Intel(R) PRO/1000 CT Network Connection
           (controller 82547), setting InterruptThrottleRate to a value
           greater than 75,000, may hang (stop transmitting) adapters
-          under certain network conditions. If this occurs a NETDEV
-          WATCHDOG message is logged in the system event log. In
+          under certain network conditions.  If this occurs a NETDEV
+          WATCHDOG message is logged in the system event log.  In
           addition, the controller is automatically reset, restoring
-          the network connection. To eliminate the potential for the
+          the network connection.  To eliminate the potential for the
           hang, ensure that InterruptThrottleRate is set no greater
           than 75,000 and is not set to 0.
 
 NOTE:  When e1000 is loaded with default settings and multiple adapters
        are in use simultaneously, the CPU utilization may increase non-
-       linearly. In order to limit the CPU utilization without impacting
+       linearly.  In order to limit the CPU utilization without impacting
        the overall throughput, we recommend that you load the driver as
        follows:
 
-           insmod e1000.o InterruptThrottleRate=3000,3000,3000
+           modprobe e1000 InterruptThrottleRate=3000,3000,3000
 
        This sets the InterruptThrottleRate to 3000 interrupts/sec for
-       the first, second, and third instances of the driver. The range
+       the first, second, and third instances of the driver.  The range
        of 2000 to 3000 interrupts per second works on a majority of
        systems and is a good starting point, but the optimal value will
-       be platform-specific. If CPU utilization is not a concern, use
+       be platform-specific.  If CPU utilization is not a concern, use
        RX_POLLING (NAPI) and default driver settings.
 
 
+
 RxDescriptors
 -------------
 Valid Range:   80-256 for 82542 and 82543-based adapters
                80-4096 for all other supported adapters
 Default Value: 256
 
-This value specifies the number of receive descriptors allocated by the
-driver. Increasing this value allows the driver to buffer more incoming
-packets.  Each descriptor is 16 bytes.  A receive buffer is also
-allocated for each descriptor and is 2048.
+This value specifies the number of receive buffer descriptors allocated
+by the driver.  Increasing this value allows the driver to buffer more
+incoming packets, at the expense of increased system memory utilization.
+
+Each descriptor is 16 bytes.  A receive buffer is also allocated for each
+descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending 
+on the MTU setting. The maximum MTU size is 16110.
+
+NOTE:  MTU designates the frame size.  It only needs to be set for Jumbo 
+       Frames.  Depending on the available system resources, the request 
+       for a higher number of receive descriptors may be denied.  In this 
+       case, use a lower number.
 
 
 RxIntDelay
@@ -187,17 +241,17 @@
 
 This value delays the generation of receive interrupts in units of 1.024
 microseconds.  Receive interrupt reduction can improve CPU efficiency if
-properly tuned for specific network traffic. Increasing this value adds
+properly tuned for specific network traffic.  Increasing this value adds
 extra latency to frame reception and can end up decreasing the throughput
-of TCP traffic. If the system is reporting dropped receives, this value
+of TCP traffic.  If the system is reporting dropped receives, this value
 may be set too high, causing the driver to run out of available receive
 descriptors.
 
 CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
-          hang (stop transmitting) under certain network conditions. If
+          hang (stop transmitting) under certain network conditions.  If
           this occurs a NETDEV WATCHDOG message is logged in the system
-          event log. In addition, the controller is automatically reset,
-          restoring the network connection. To eliminate the potential
+          event log.  In addition, the controller is automatically reset,
+          restoring the network connection.  To eliminate the potential
           for the hang ensure that RxIntDelay is set to 0.
 
 
@@ -208,7 +262,7 @@
 Default Value: 128
 
 This value, in units of 1.024 microseconds, limits the delay in which a
-receive interrupt is generated. Useful only if RxIntDelay is non-zero,
+receive interrupt is generated.  Useful only if RxIntDelay is non-zero,
 this value ensures that an interrupt is generated after the initial
 packet is received within the set amount of time.  Proper tuning,
 along with RxIntDelay, may improve traffic throughput in specific network
@@ -222,9 +276,9 @@
 Default Value:  0 (auto-negotiate at all supported speeds)
 
 Speed forces the line speed to the specified value in megabits per second
-(Mbps). If this parameter is not specified or is set to 0 and the link
+(Mbps).  If this parameter is not specified or is set to 0 and the link
 partner is set to auto-negotiate, the board will auto-detect the correct
-speed. Duplex should also be set when Speed is set to either 10 or 100.
+speed.  Duplex should also be set when Speed is set to either 10 or 100.
 
 
 TxDescriptors
@@ -234,7 +288,7 @@
 Default Value: 256
 
 This value is the number of transmit descriptors allocated by the driver.
-Increasing this value allows the driver to queue more transmits. Each
+Increasing this value allows the driver to queue more transmits.  Each
 descriptor is 16 bytes.
 
 NOTE:  Depending on the available system resources, the request for a
@@ -248,8 +302,8 @@
 Default Value: 64
 
 This value delays the generation of transmit interrupts in units of
-1.024 microseconds. Transmit interrupt reduction can improve CPU
-efficiency if properly tuned for specific network traffic. If the
+1.024 microseconds.  Transmit interrupt reduction can improve CPU
+efficiency if properly tuned for specific network traffic.  If the
 system is reporting dropped transmits, this value may be set too high
 causing the driver to run out of available transmit descriptors.
 
@@ -261,7 +315,7 @@
 Default Value: 64
 
 This value, in units of 1.024 microseconds, limits the delay in which a
-transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
+transmit interrupt is generated.  Useful only if TxIntDelay is non-zero,
 this value ensures that an interrupt is generated after the initial
 packet is sent on the wire within the set amount of time.  Proper tuning,
 along with TxIntDelay, may improve traffic throughput in specific
@@ -288,15 +342,15 @@
 
 For copper-based boards, the keywords interact as follows:
 
-  The default operation is auto-negotiate. The board advertises all
+  The default operation is auto-negotiate.  The board advertises all
   supported speed and duplex combinations, and it links at the highest
   common speed and duplex mode IF the link partner is set to auto-negotiate.
 
   If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
   is advertised (The 1000BaseT spec requires auto-negotiation.)
 
-  If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
-  negotiation is disabled, and the AutoNeg parameter is ignored. Partner
+  If Speed = 10 or 100, then both Speed and Duplex should be set.  Auto-
+  negotiation is disabled, and the AutoNeg parameter is ignored.  Partner
   SHOULD also be forced.
 
 The AutoNeg parameter is used when more control is required over the
@@ -304,7 +358,7 @@
 speed and duplex combinations are advertised during the auto-negotiation
 process.
 
-The parameter may be specified as either a decimal or hexidecimal value as
+The parameter may be specified as either a decimal or hexadecimal value as
 determined by the bitmap below.
 
 Bit position   7      6      5       4       3      2      1       0
@@ -337,20 +391,19 @@
 
   Configuring the Driver on Different Distributions
   -------------------------------------------------
-
   Configuring a network driver to load properly when the system is started
-  is distribution dependent. Typically, the configuration process involves
+  is distribution dependent.  Typically, the configuration process involves
   adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well
-  as editing other system startup scripts and/or configuration files. Many
+  as editing other system startup scripts and/or configuration files.  Many
   popular Linux distributions ship with tools to make these changes for you.
   To learn the proper way to configure a network device for your system,
-  refer to your distribution documentation. If during this process you are
+  refer to your distribution documentation.  If during this process you are
   asked for the driver or module name, the name for the Linux Base Driver
-  for the Intel PRO/1000 Family of Adapters is e1000.
+  for the Intel(R) PRO/1000 Family of Adapters is e1000.
 
   As an example, if you install the e1000 driver for two PRO/1000 adapters
   (eth0 and eth1) and set the speed and duplex to 10full and 100half, add
-  the following to modules.conf or modprobe.conf:
+  the following to modules.conf or or modprobe.conf:
 
        alias eth0 e1000
        alias eth1 e1000
@@ -358,9 +411,8 @@
 
   Viewing Link Messages
   ---------------------
-
   Link messages will not be displayed to the console if the distribution is
-  restricting system messages. In order to see network driver link messages
+  restricting system messages.  In order to see network driver link messages
   on your console, set dmesg to eight by entering the following:
 
        dmesg -n 8
@@ -369,11 +421,9 @@
 
   Jumbo Frames
   ------------
-
-  The driver supports Jumbo Frames for all adapters except 82542 and
-  82573-based adapters. Jumbo Frames support is enabled by changing the
-  MTU to a value larger than the default of 1500. Use the ifconfig command
-  to increase the MTU size. For example:
+  Jumbo Frames support is enabled by changing the MTU to a value larger than
+  the default of 1500.  Use the ifconfig command to increase the MTU size.
+  For example:
 
        ifconfig eth<x> mtu 9000 up
 
@@ -390,26 +440,49 @@
 
   - To enable Jumbo Frames, increase the MTU size on the interface beyond
     1500.
-  - The maximum MTU setting for Jumbo Frames is 16110. This value coincides
+
+  - The maximum MTU setting for Jumbo Frames is 16110.  This value coincides
     with the maximum Jumbo Frames size of 16128.
+
   - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
     loss of link.
+
   - Some Intel gigabit adapters that support Jumbo Frames have a frame size
     limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes.
-    The adapters with this limitation are based on the Intel 82571EB and
-    82572EI controllers, which correspond to these product names:
-     Intel® PRO/1000 PT Dual Port Server Adapter
-     Intel® PRO/1000 PF Dual Port Server Adapter
-     Intel® PRO/1000 PT Server Adapter
-     Intel® PRO/1000 PT Desktop Adapter
-     Intel® PRO/1000 PF Server Adapter
+    The adapters with this limitation are based on the Intel(R) 82571EB,
+    82572EI, 82573L and 80003ES2LAN controller.  These correspond to the
+    following product names:
+     Intel(R) PRO/1000 PT Server Adapter
+     Intel(R) PRO/1000 PT Desktop Adapter
+     Intel(R) PRO/1000 PT Network Connection
+     Intel(R) PRO/1000 PT Dual Port Server Adapter
+     Intel(R) PRO/1000 PT Dual Port Network Connection
+     Intel(R) PRO/1000 PF Server Adapter
+     Intel(R) PRO/1000 PF Network Connection
+     Intel(R) PRO/1000 PF Dual Port Server Adapter
+     Intel(R) PRO/1000 PB Server Connection
+     Intel(R) PRO/1000 PL Network Connection
+     Intel(R) PRO/1000 EB Network Connection with I/O Acceleration
+     Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration
+     Intel(R) PRO/1000 PT Quad Port Server Adapter
 
-  - The Intel PRO/1000 PM Network Connection does not support jumbo frames.
+  - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
+    support Jumbo Frames. These correspond to the following product names:
+     Intel(R) PRO/1000 Gigabit Server Adapter
+     Intel(R) PRO/1000 PM Network Connection
+
+  - The following adapters do not support Jumbo Frames:
+     Intel(R) 82562V 10/100 Network Connection
+     Intel(R) 82566DM Gigabit Network Connection
+     Intel(R) 82566DC Gigabit Network Connection
+     Intel(R) 82566MM Gigabit Network Connection
+     Intel(R) 82566MC Gigabit Network Connection
+     Intel(R) 82562GT 10/100 Network Connection
+     Intel(R) 82562G 10/100 Network Connection
 
 
   Ethtool
   -------
-
   The driver utilizes the ethtool interface for driver configuration and
   diagnostics, as well as displaying statistical information.  Ethtool
   version 1.6 or later is required for this functionality.
@@ -417,15 +490,14 @@
   The latest release of ethtool can be found from
   http://sourceforge.net/projects/gkernel.
 
-  NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
+  NOTE: Ethtool 1.6 only supports a limited set of ethtool options.  Support
   for a more complete ethtool feature set can be enabled by upgrading
   ethtool to ethtool-1.8.1.
 
   Enabling Wake on LAN* (WoL)
   ---------------------------
-
-  WoL is configured through the Ethtool* utility. Ethtool is included with
-  all versions of Red Hat after Red Hat 7.2. For other Linux distributions,
+  WoL is configured through the Ethtool* utility.  Ethtool is included with
+  all versions of Red Hat after Red Hat 7.2.  For other Linux distributions,
   download and install Ethtool from the following website:
   http://sourceforge.net/projects/gkernel.
 
@@ -436,11 +508,17 @@
   For this driver version, in order to enable WoL, the e1000 driver must be
   loaded when shutting down or rebooting the system.
 
+  Wake On LAN is only supported on port A for the following devices:
+  Intel(R) PRO/1000 PT Dual Port Network Connection
+  Intel(R) PRO/1000 PT Dual Port Server Connection
+  Intel(R) PRO/1000 PT Dual Port Server Adapter
+  Intel(R) PRO/1000 PF Dual Port Server Adapter
+  Intel(R) PRO/1000 PT Quad Port Server Adapter 
+
   NAPI
   ----
-
-  NAPI (Rx polling mode) is supported in the e1000 driver. NAPI is enabled
-  or disabled based on the configuration of the kernel. To override
+  NAPI (Rx polling mode) is supported in the e1000 driver.  NAPI is enabled
+  or disabled based on the configuration of the kernel.  To override
   the default, use the following compile-time flags.
 
   To enable NAPI, compile the driver module, passing in a configuration option:
@@ -457,88 +535,105 @@
 Known Issues
 ============
 
-  Jumbo Frames System Requirement
-  -------------------------------
+Dropped Receive Packets on Half-duplex 10/100 Networks
+------------------------------------------------------
+If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half-
+duplex, you may observe occasional dropped receive packets.  There are no
+workarounds for this problem in this network configuration.  The network must
+be updated to operate in full-duplex, and/or 1000mbps only.
 
-  Memory allocation failures have been observed on Linux systems with 64 MB
-  of RAM or less that are running Jumbo Frames. If you are using Jumbo
-  Frames, your system may require more than the advertised minimum
-  requirement of 64 MB of system memory.
+Jumbo Frames System Requirement
+-------------------------------
+Memory allocation failures have been observed on Linux systems with 64 MB
+of RAM or less that are running Jumbo Frames.  If you are using Jumbo
+Frames, your system may require more than the advertised minimum
+requirement of 64 MB of system memory.
 
-  Performance Degradation with Jumbo Frames
-  -----------------------------------------
+Performance Degradation with Jumbo Frames
+-----------------------------------------
+Degradation in throughput performance may be observed in some Jumbo frames
+environments.  If this is observed, increasing the application's socket
+buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
+may help.  See the specific application manual and
+/usr/src/linux*/Documentation/
+networking/ip-sysctl.txt for more details.
 
-  Degradation in throughput performance may be observed in some Jumbo frames
-  environments. If this is observed, increasing the application's socket
-  buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
-  may help. See the specific application manual and
-  /usr/src/linux*/Documentation/
-  networking/ip-sysctl.txt for more details.
+Jumbo Frames on Foundry BigIron 8000 switch
+-------------------------------------------
+There is a known issue using Jumbo frames when connected to a Foundry
+BigIron 8000 switch.  This is a 3rd party limitation.  If you experience
+loss of packets, lower the MTU size.
 
-  Jumbo frames on Foundry BigIron 8000 switch
-  -------------------------------------------
-  There is a known issue using Jumbo frames when connected to a Foundry
-  BigIron 8000 switch. This is a 3rd party limitation. If you experience
-  loss of packets, lower the MTU size.
+Allocating Rx Buffers when Using Jumbo Frames 
+---------------------------------------------
+Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if 
+the available memory is heavily fragmented. This issue may be seen with PCI-X 
+adapters or with packet split disabled. This can be reduced or eliminated 
+by changing the amount of available memory for receive buffer allocation, by
+increasing /proc/sys/vm/min_free_kbytes. 
 
-  Multiple Interfaces on Same Ethernet Broadcast Network
-  ------------------------------------------------------
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
+Due to the default ARP behavior on Linux, it is not possible to have
+one system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected.  All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
 
-  Due to the default ARP behavior on Linux, it is not possible to have
-  one system on two IP networks in the same Ethernet broadcast domain
-  (non-partitioned switch) behave as expected. All Ethernet interfaces
-  will respond to IP traffic for any IP address assigned to the system.
-  This results in unbalanced receive traffic.
+If you have multiple interfaces in a server, either turn on ARP
+filtering by entering:
 
-  If you have multiple interfaces in a server, either turn on ARP
-  filtering by entering:
+    echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+(this only works if your kernel's version is higher than 2.4.5),
 
-      echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-  (this only works if your kernel's version is higher than 2.4.5),
+NOTE: This setting is not saved across reboots.  The configuration
+change can be made permanent by adding the line:
+    net.ipv4.conf.all.arp_filter = 1
+to the file /etc/sysctl.conf
 
-  NOTE: This setting is not saved across reboots. The configuration
-  change can be made permanent by adding the line:
-      net.ipv4.conf.all.arp_filter = 1
-  to the file /etc/sysctl.conf
+      or,
 
-        or,
+install the interfaces in separate broadcast domains (either in
+different switches or in a switch partitioned to VLANs).
 
-  install the interfaces in separate broadcast domains (either in
-  different switches or in a switch partitioned to VLANs).
+82541/82547 can't link or are slow to link with some link partners
+-----------------------------------------------------------------
+There is a known compatibility issue with 82541/82547 and some
+low-end switches where the link will not be established, or will
+be slow to establish.  In particular, these switches are known to
+be incompatible with 82541/82547:
 
-  82541/82547 can't link or are slow to link with some link partners
-  -----------------------------------------------------------------
+    Planex FXG-08TE
+    I-O Data ETG-SH8
 
-  There is a known compatibility issue with 82541/82547 and some
-  low-end switches where the link will not be established, or will
-  be slow to establish.  In particular, these switches are known to
-  be incompatible with 82541/82547:
+To workaround this issue, the driver can be compiled with an override
+of the PHY's master/slave setting.  Forcing master or forcing slave
+mode will improve time-to-link.
 
-      Planex FXG-08TE
-      I-O Data ETG-SH8
+    # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=<n>
 
-  To workaround this issue, the driver can be compiled with an override
-  of the PHY's master/slave setting.  Forcing master or forcing slave
-  mode will improve time-to-link.
+Where <n> is:
 
-      # make EXTRA_CFLAGS=-DE1000_MASTER_SLAVE=<n>
+    0 = Hardware default
+    1 = Master mode
+    2 = Slave mode
+    3 = Auto master/slave
 
-  Where <n> is:
+Disable rx flow control with ethtool
+------------------------------------
+In order to disable receive flow control using ethtool, you must turn
+off auto-negotiation on the same command line.
 
-      0 = Hardware default
-      1 = Master mode
-      2 = Slave mode
-      3 = Auto master/slave
+For example:
 
-  Disable rx flow control with ethtool
-  ------------------------------------
+   ethtool -A eth? autoneg off rx off
 
-  In order to disable receive flow control using ethtool, you must turn
-  off auto-negotiation on the same command line.
-
-  For example:
-
-     ethtool -A eth? autoneg off rx off
+Unplugging network cable while ethtool -p is running
+----------------------------------------------------
+In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging
+the network cable while ethtool -p is running will cause the system to
+become unresponsive to keyboard commands, except for control-alt-delete.
+Restarting the system appears to be the only remedy.
 
 
 Support
@@ -548,24 +643,10 @@
 
     http://support.intel.com
 
-    or the Intel Wired Networking project hosted by Sourceforge at:
+or the Intel Wired Networking project hosted by Sourceforge at:
 
     http://sourceforge.net/projects/e1000
 
 If an issue is identified with the released source code on the supported
 kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sourceforge.net
-
-
-License
-=======
-
-This software program is released under the terms of a license agreement
-between you ('Licensee') and Intel. Do not use or load this software or any
-associated materials (collectively, the 'Software') until you have carefully
-read the full terms and conditions of the file COPYING located in this software
-package. By loading or using the Software, you agree to the terms of this
-Agreement. If you do not agree with the terms of this Agreement, do not
-install or use the Software.
-
-* Other names and brands may be claimed as the property of others.
+to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/generic_netlink.txt b/Documentation/networking/generic_netlink.txt
new file mode 100644
index 0000000..d4f8b8b
--- /dev/null
+++ b/Documentation/networking/generic_netlink.txt
@@ -0,0 +1,3 @@
+A wiki document on how to use Generic Netlink can be found here:
+
+ * http://linux-net.osdl.org/index.php/Generic_Netlink_HOWTO
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index fd3c0c0..a0f6842 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -101,6 +101,11 @@
 
 TCP variables: 
 
+somaxconn - INTEGER
+	Limit of socket listen() backlog, known in userspace as SOMAXCONN.
+	Defaults to 128.  See also tcp_max_syn_backlog for additional tuning
+	for TCP sockets.
+
 tcp_abc - INTEGER
 	Controls Appropriate Byte Count (ABC) defined in RFC3465.
 	ABC is a way of increasing congestion window (cwnd) more slowly
@@ -112,15 +117,68 @@
 		  of two segments to compensate for delayed acknowledgments.
 	Default: 0 (off)
 
-tcp_syn_retries - INTEGER
-	Number of times initial SYNs for an active TCP connection attempt
-	will be retransmitted. Should not be higher than 255. Default value
-	is 5, which corresponds to ~180seconds.
+tcp_abort_on_overflow - BOOLEAN
+	If listening service is too slow to accept new connections,
+	reset them. Default state is FALSE. It means that if overflow
+	occurred due to a burst, connection will recover. Enable this
+	option _only_ if you are really sure that listening daemon
+	cannot be tuned to accept connections faster. Enabling this
+	option can harm clients of your server.
 
-tcp_synack_retries - INTEGER
-	Number of times SYNACKs for a passive TCP connection attempt will
-	be retransmitted. Should not be higher than 255. Default value
-	is 5, which corresponds to ~180seconds.
+tcp_adv_win_scale - INTEGER
+	Count buffering overhead as bytes/2^tcp_adv_win_scale
+	(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
+	if it is <= 0.
+	Default: 2
+
+tcp_allowed_congestion_control - STRING
+	Show/set the congestion control choices available to non-privileged
+	processes. The list is a subset of those listed in
+	tcp_available_congestion_control.
+	Default is "reno" and the default setting (tcp_congestion_control).
+
+tcp_app_win - INTEGER
+	Reserve max(window/2^tcp_app_win, mss) of window for application
+	buffer. Value 0 is special, it means that nothing is reserved.
+	Default: 31
+
+tcp_available_congestion_control - STRING
+	Shows the available congestion control choices that are registered.
+	More congestion control algorithms may be available as modules,
+	but not loaded.
+
+tcp_congestion_control - STRING
+	Set the congestion control algorithm to be used for new
+	connections. The algorithm "reno" is always available, but
+	additional choices may be available based on kernel configuration.
+	Default is set as part of kernel configuration.
+
+tcp_dsack - BOOLEAN
+	Allows TCP to send "duplicate" SACKs.
+
+tcp_ecn - BOOLEAN
+	Enable Explicit Congestion Notification in TCP.
+
+tcp_fack - BOOLEAN
+	Enable FACK congestion avoidance and fast retransmission.
+	The value is not used, if tcp_sack is not enabled.
+
+tcp_fin_timeout - INTEGER
+	Time to hold socket in state FIN-WAIT-2, if it was closed
+	by our side. Peer can be broken and never close its side,
+	or even died unexpectedly. Default value is 60sec.
+	Usual value used in 2.2 was 180 seconds, you may restore
+	it, but remember that if your machine is even underloaded WEB server,
+	you risk to overflow memory with kilotons of dead sockets,
+	FIN-WAIT-2 sockets are less dangerous than FIN-WAIT-1,
+	because they eat maximum 1.5K of memory, but they tend
+	to live longer.	Cf. tcp_max_orphans.
+
+tcp_frto - BOOLEAN
+	Enables F-RTO, an enhanced recovery algorithm for TCP retransmission
+	timeouts.  It is particularly beneficial in wireless environments
+	where packet loss is typically due to random radio interference
+	rather than intermediate router congestion.
 
 tcp_keepalive_time - INTEGER
 	How often TCP sends out keepalive messages when keepalive is enabled.
@@ -136,54 +194,13 @@
 	after probes started. Default value: 75sec i.e. connection
 	will be aborted after ~11 minutes of retries.
 
-tcp_retries1 - INTEGER
-	How many times to retry before deciding that something is wrong
-	and it is necessary to report this suspicion to network layer.
-	Minimal RFC value is 3, it is default, which corresponds
-	to ~3sec-8min depending on RTO.
-
-tcp_retries2 - INTEGER
-	How may times to retry before killing alive TCP connection.
-	RFC1122 says that the limit should be longer than 100 sec.
-	It is too small number.	Default value 15 corresponds to ~13-30min
-	depending on RTO.
-
-tcp_orphan_retries - INTEGER
-	How may times to retry before killing TCP connection, closed
-	by our side. Default value 7 corresponds to ~50sec-16min
-	depending on RTO. If you machine is loaded WEB server,
-	you should think about lowering this value, such sockets
-	may consume significant resources. Cf. tcp_max_orphans.
-
-tcp_fin_timeout - INTEGER
-	Time to hold socket in state FIN-WAIT-2, if it was closed
-	by our side. Peer can be broken and never close its side,
-	or even died unexpectedly. Default value is 60sec.
-	Usual value used in 2.2 was 180 seconds, you may restore
-	it, but remember that if your machine is even underloaded WEB server,
-	you risk to overflow memory with kilotons of dead sockets,
-	FIN-WAIT-2 sockets are less dangerous than FIN-WAIT-1,
-	because they eat maximum 1.5K of memory, but they tend
-	to live longer.	Cf. tcp_max_orphans.
-
-tcp_max_tw_buckets - INTEGER
-	Maximal number of timewait sockets held by system simultaneously.
-	If this number is exceeded time-wait socket is immediately destroyed
-	and warning is printed. This limit exists only to prevent
-	simple DoS attacks, you _must_ not lower the limit artificially,
-	but rather increase it (probably, after increasing installed memory),
-	if network conditions require more than default value.
-
-tcp_tw_recycle - BOOLEAN
-	Enable fast recycling TIME-WAIT sockets. Default value is 0.
-	It should not be changed without advice/request of technical
-	experts.
-
-tcp_tw_reuse - BOOLEAN
-	Allow to reuse TIME-WAIT sockets for new connections when it is
-	safe from protocol viewpoint. Default value is 0.
-	It should not be changed without advice/request of technical
-	experts.
+tcp_low_latency - BOOLEAN
+	If set, the TCP stack makes decisions that prefer lower
+	latency as opposed to higher throughput.  By default, this
+	option is not set meaning that higher throughput is preferred.
+	An example of an application where this default should be
+	changed would be a Beowulf compute cluster.
+	Default: 0
 
 tcp_max_orphans - INTEGER
 	Maximal number of TCP sockets not attached to any user file handle,
@@ -197,41 +214,6 @@
 	more aggressively. Let me to remind again: each orphan eats
 	up to ~64K of unswappable memory.
 
-tcp_abort_on_overflow - BOOLEAN
-	If listening service is too slow to accept new connections,
-	reset them. Default state is FALSE. It means that if overflow
-	occurred due to a burst, connection will recover. Enable this
-	option _only_ if you are really sure that listening daemon
-	cannot be tuned to accept connections faster. Enabling this
-	option can harm clients of your server.
-
-tcp_syncookies - BOOLEAN
-	Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
-	Send out syncookies when the syn backlog queue of a socket 
-	overflows. This is to prevent against the common 'syn flood attack'
-	Default: FALSE
-
-	Note, that syncookies is fallback facility.
-	It MUST NOT be used to help highly loaded servers to stand
-	against legal connection rate. If you see synflood warnings
-	in your logs, but investigation	shows that they occur
-	because of overload with legal connections, you should tune
-	another parameters until this warning disappear.
-	See: tcp_max_syn_backlog, tcp_synack_retries, tcp_abort_on_overflow.
-
-	syncookies seriously violate TCP protocol, do not allow
-	to use TCP extensions, can result in serious degradation
-	of some services (f.e. SMTP relaying), visible not by you,
-	but your clients and relays, contacting you. While you see
-	synflood warnings in logs not being really flooded, your server
-	is seriously misconfigured.
-
-tcp_stdurg - BOOLEAN
-	Use the Host requirements interpretation of the TCP urg pointer field.
-	Most hosts use the older BSD interpretation, so if you turn this on
-	Linux might not communicate correctly with them.	
-	Default: FALSE 
-	
 tcp_max_syn_backlog - INTEGER
 	Maximal number of remembered connection requests, which are
 	still did not receive an acknowledgment from connecting client.
@@ -239,24 +221,34 @@
 	and 128 for low memory machines. If server suffers of overload,
 	try to increase this number.
 
-tcp_window_scaling - BOOLEAN
-	Enable window scaling as defined in RFC1323.
+tcp_max_tw_buckets - INTEGER
+	Maximal number of timewait sockets held by system simultaneously.
+	If this number is exceeded time-wait socket is immediately destroyed
+	and warning is printed. This limit exists only to prevent
+	simple DoS attacks, you _must_ not lower the limit artificially,
+	but rather increase it (probably, after increasing installed memory),
+	if network conditions require more than default value.
 
-tcp_timestamps - BOOLEAN
-	Enable timestamps as defined in RFC1323.
+tcp_mem - vector of 3 INTEGERs: min, pressure, max
+	min: below this number of pages TCP is not bothered about its
+	memory appetite.
 
-tcp_sack - BOOLEAN
-	Enable select acknowledgments (SACKS).
+	pressure: when amount of memory allocated by TCP exceeds this number
+	of pages, TCP moderates its memory consumption and enters memory
+	pressure mode, which is exited when memory consumption falls
+	under "min".
 
-tcp_fack - BOOLEAN
-	Enable FACK congestion avoidance and fast retransmission.
-	The value is not used, if tcp_sack is not enabled.
+	max: number of pages allowed for queueing by all TCP sockets.
 
-tcp_dsack - BOOLEAN
-	Allows TCP to send "duplicate" SACKs.
+	Defaults are calculated at boot time from amount of available
+	memory.
 
-tcp_ecn - BOOLEAN
-	Enable Explicit Congestion Notification in TCP.
+tcp_orphan_retries - INTEGER
+	How may times to retry before killing TCP connection, closed
+	by our side. Default value 7 corresponds to ~50sec-16min
+	depending on RTO. If you machine is loaded WEB server,
+	you should think about lowering this value, such sockets
+	may consume significant resources. Cf. tcp_max_orphans.
 
 tcp_reordering - INTEGER
 	Maximal reordering of packets in a TCP stream.
@@ -267,20 +259,23 @@
 	On retransmit try to send bigger packets to work around bugs in
 	certain TCP stacks.
 
-tcp_wmem - vector of 3 INTEGERs: min, default, max
-	min: Amount of memory reserved for send buffers for TCP socket.
-	Each TCP socket has rights to use it due to fact of its birth.
-	Default: 4K
+tcp_retries1 - INTEGER
+	How many times to retry before deciding that something is wrong
+	and it is necessary to report this suspicion to network layer.
+	Minimal RFC value is 3, it is default, which corresponds
+	to ~3sec-8min depending on RTO.
 
-	default: Amount of memory allowed for send buffers for TCP socket
-	by default. This value overrides net.core.wmem_default used
-	by other protocols, it is usually lower than net.core.wmem_default.
-	Default: 16K
+tcp_retries2 - INTEGER
+	How may times to retry before killing alive TCP connection.
+	RFC1122 says that the limit should be longer than 100 sec.
+	It is too small number.	Default value 15 corresponds to ~13-30min
+	depending on RTO.
 
-	max: Maximal amount of memory allowed for automatically selected
-	send buffers for TCP socket. This value does not override
-	net.core.wmem_max, "static" selection via SO_SNDBUF does not use this.
-	Default: 128K
+tcp_rfc1337 - BOOLEAN
+	If set, the TCP stack behaves conforming to RFC1337. If unset,
+	we are not conforming to RFC, but prevent TCP TIME_WAIT
+	assassination.
+	Default: 0
 
 tcp_rmem - vector of 3 INTEGERs: min, default, max
 	min: Minimal size of receive buffer used by TCP sockets.
@@ -299,74 +294,8 @@
 	net.core.rmem_max, "static" selection via SO_RCVBUF does not use this.
 	Default: 87380*2 bytes.
 
-tcp_mem - vector of 3 INTEGERs: min, pressure, max
-	min: below this number of pages TCP is not bothered about its
-	memory appetite.
-
-	pressure: when amount of memory allocated by TCP exceeds this number
-	of pages, TCP moderates its memory consumption and enters memory
-	pressure mode, which is exited when memory consumption falls
-	under "min".
-
-	max: number of pages allowed for queueing by all TCP sockets.
-
-	Defaults are calculated at boot time from amount of available
-	memory.
-
-tcp_app_win - INTEGER
-	Reserve max(window/2^tcp_app_win, mss) of window for application
-	buffer. Value 0 is special, it means that nothing is reserved.
-	Default: 31
-
-tcp_adv_win_scale - INTEGER
-	Count buffering overhead as bytes/2^tcp_adv_win_scale
-	(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
-	if it is <= 0.
-	Default: 2
-
-tcp_rfc1337 - BOOLEAN
-	If set, the TCP stack behaves conforming to RFC1337. If unset,
-	we are not conforming to RFC, but prevent TCP TIME_WAIT
-	assassination.   
-	Default: 0
-
-tcp_low_latency - BOOLEAN
-	If set, the TCP stack makes decisions that prefer lower
-	latency as opposed to higher throughput.  By default, this
-	option is not set meaning that higher throughput is preferred.
-	An example of an application where this default should be
-	changed would be a Beowulf compute cluster.
-	Default: 0
-
-tcp_tso_win_divisor - INTEGER
-       This allows control over what percentage of the congestion window
-       can be consumed by a single TSO frame.
-       The setting of this parameter is a choice between burstiness and
-       building larger TSO frames.
-       Default: 3
-
-tcp_frto - BOOLEAN
-	Enables F-RTO, an enhanced recovery algorithm for TCP retransmission
-	timeouts.  It is particularly beneficial in wireless environments
-	where packet loss is typically due to random radio interference
-	rather than intermediate router congestion.
-
-tcp_congestion_control - STRING
-	Set the congestion control algorithm to be used for new
-	connections. The algorithm "reno" is always available, but
-	additional choices may be available based on kernel configuration.
-
-somaxconn - INTEGER
-	Limit of socket listen() backlog, known in userspace as SOMAXCONN.
-	Defaults to 128.  See also tcp_max_syn_backlog for additional tuning
-	for TCP sockets.
-
-tcp_workaround_signed_windows - BOOLEAN
-	If set, assume no receipt of a window scaling option means the
-	remote TCP is broken and treats the window as a signed quantity.
-	If unset, assume the remote TCP is not broken even if we do
-	not receive a window scaling option from them.
-	Default: 0
+tcp_sack - BOOLEAN
+	Enable select acknowledgments (SACKS).
 
 tcp_slow_start_after_idle - BOOLEAN
 	If set, provide RFC2861 behavior and time out the congestion
@@ -375,6 +304,89 @@
 	be timed out after an idle period.
 	Default: 1
 
+tcp_stdurg - BOOLEAN
+	Use the Host requirements interpretation of the TCP urg pointer field.
+	Most hosts use the older BSD interpretation, so if you turn this on
+	Linux might not communicate correctly with them.
+	Default: FALSE
+
+tcp_synack_retries - INTEGER
+	Number of times SYNACKs for a passive TCP connection attempt will
+	be retransmitted. Should not be higher than 255. Default value
+	is 5, which corresponds to ~180seconds.
+
+tcp_syncookies - BOOLEAN
+	Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
+	Send out syncookies when the syn backlog queue of a socket
+	overflows. This is to prevent against the common 'syn flood attack'
+	Default: FALSE
+
+	Note, that syncookies is fallback facility.
+	It MUST NOT be used to help highly loaded servers to stand
+	against legal connection rate. If you see synflood warnings
+	in your logs, but investigation	shows that they occur
+	because of overload with legal connections, you should tune
+	another parameters until this warning disappear.
+	See: tcp_max_syn_backlog, tcp_synack_retries, tcp_abort_on_overflow.
+
+	syncookies seriously violate TCP protocol, do not allow
+	to use TCP extensions, can result in serious degradation
+	of some services (f.e. SMTP relaying), visible not by you,
+	but your clients and relays, contacting you. While you see
+	synflood warnings in logs not being really flooded, your server
+	is seriously misconfigured.
+
+tcp_syn_retries - INTEGER
+	Number of times initial SYNs for an active TCP connection attempt
+	will be retransmitted. Should not be higher than 255. Default value
+	is 5, which corresponds to ~180seconds.
+
+tcp_timestamps - BOOLEAN
+	Enable timestamps as defined in RFC1323.
+
+tcp_tso_win_divisor - INTEGER
+	This allows control over what percentage of the congestion window
+	can be consumed by a single TSO frame.
+	The setting of this parameter is a choice between burstiness and
+	building larger TSO frames.
+	Default: 3
+
+tcp_tw_recycle - BOOLEAN
+	Enable fast recycling TIME-WAIT sockets. Default value is 0.
+	It should not be changed without advice/request of technical
+	experts.
+
+tcp_tw_reuse - BOOLEAN
+	Allow to reuse TIME-WAIT sockets for new connections when it is
+	safe from protocol viewpoint. Default value is 0.
+	It should not be changed without advice/request of technical
+	experts.
+
+tcp_window_scaling - BOOLEAN
+	Enable window scaling as defined in RFC1323.
+
+tcp_wmem - vector of 3 INTEGERs: min, default, max
+	min: Amount of memory reserved for send buffers for TCP socket.
+	Each TCP socket has rights to use it due to fact of its birth.
+	Default: 4K
+
+	default: Amount of memory allowed for send buffers for TCP socket
+	by default. This value overrides net.core.wmem_default used
+	by other protocols, it is usually lower than net.core.wmem_default.
+	Default: 16K
+
+	max: Maximal amount of memory allowed for automatically selected
+	send buffers for TCP socket. This value does not override
+	net.core.wmem_max, "static" selection via SO_SNDBUF does not use this.
+	Default: 128K
+
+tcp_workaround_signed_windows - BOOLEAN
+	If set, assume no receipt of a window scaling option means the
+	remote TCP is broken and treats the window as a signed quantity.
+	If unset, assume the remote TCP is not broken even if we do
+	not receive a window scaling option from them.
+	Default: 0
+
 CIPSOv4 Variables:
 
 cipso_cache_enable - BOOLEAN
@@ -974,4 +986,3 @@
 slot_timeout FIXME
 warn_noreply_time FIXME
 
-$Id: ip-sysctl.txt,v 1.20 2001/12/13 09:00:18 davem Exp $
diff --git a/Documentation/networking/iphase.txt b/Documentation/networking/iphase.txt
index 493203a..55eac4a 100644
--- a/Documentation/networking/iphase.txt
+++ b/Documentation/networking/iphase.txt
@@ -81,7 +81,7 @@
     1M. The RAM size decides the number of buffers and buffer size. The default 
     size and number of buffers are set as following: 
 
-          Totol    Rx RAM   Tx RAM   Rx Buf   Tx Buf   Rx buf   Tx buf
+          Total    Rx RAM   Tx RAM   Rx Buf   Tx Buf   Rx buf   Tx buf
          RAM size   size     size     size     size      cnt      cnt
          --------  ------   ------   ------   ------   ------   ------
            128K      64K      64K      10K      10K       6        6
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 12a008a..5a232d9 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -284,7 +284,7 @@
 -------------------
 
 If you check the source code you will see that what I draw here as a frame
-is not only the link level frame. At the begining of each frame there is a 
+is not only the link level frame. At the beginning of each frame there is a 
 header called struct tpacket_hdr used in PACKET_MMAP to hold link level's frame
 meta information like timestamp. So what we draw here a frame it's really 
 the following (from include/linux/if_packet.h):
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index 29ccae4..0bc95ea 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -1,7 +1,7 @@
 
 -------
 PHY Abstraction Layer
-(Updated 2005-07-21)
+(Updated 2006-11-30)
 
 Purpose
 
@@ -97,11 +97,12 @@
  
  Next, you need to know the device name of the PHY connected to this device. 
  The name will look something like, "phy0:0", where the first number is the
- bus id, and the second is the PHY's address on that bus.
+ bus id, and the second is the PHY's address on that bus.  Typically,
+ the bus is responsible for making its ID unique.
  
  Now, to connect, just call this function:
  
-   phydev = phy_connect(dev, phy_name, &adjust_link, flags);
+   phydev = phy_connect(dev, phy_name, &adjust_link, flags, interface);
 
  phydev is a pointer to the phy_device structure which represents the PHY.  If
  phy_connect is successful, it will return the pointer.  dev, here, is the
@@ -115,6 +116,10 @@
  This is useful if the system has put hardware restrictions on
  the PHY/controller, of which the PHY needs to be aware.
 
+ interface is a u32 which specifies the connection type used
+ between the controller and the PHY.  Examples are GMII, MII,
+ RGMII, and SGMII.  For a full list, see include/linux/phy.h
+
  Now just make sure that phydev->supported and phydev->advertising have any
  values pruned from them which don't make sense for your controller (a 10/100
  controller may be connected to a gigabit capable PHY, so you would need to
@@ -191,7 +196,7 @@
    start, or disables then frees them for stop.
 
  struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
-		 u32 flags);
+		 u32 flags, phy_interface_t interface);
 
    Attaches a network device to a particular PHY, binding the PHY to a generic
    driver if none was found during bus initialization.  Passes in
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index c8eee23..c6cf4a3 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -63,8 +63,8 @@
 Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
   763292pps 390Mb/sec (390805504bps) errors: 39664
 
-Confguring threads and devices
-==============================
+Configuring threads and devices
+================================
 This is done via the /proc interface easiest done via pgset in the scripts
 
 Examples:
@@ -116,7 +116,7 @@
 					 there must be no spaces between the
 					 arguments. Leading zeros are required.
 					 Do not set the bottom of stack bit,
-					 thats done automatically. If you do
+					 that's done automatically. If you do
 					 set the bottom of stack bit, that
 					 indicates that you want to randomly
 					 generate that address and the flag
diff --git a/Documentation/networking/proc_net_tcp.txt b/Documentation/networking/proc_net_tcp.txt
index 59cb915..5e21f7c 100644
--- a/Documentation/networking/proc_net_tcp.txt
+++ b/Documentation/networking/proc_net_tcp.txt
@@ -25,7 +25,7 @@
 
    1000        0 54165785 4 cd1e6040 25 4 27 3 -1
     |          |    |     |    |     |  | |  | |--> slow start size threshold, 
-    |          |    |     |    |     |  | |  |      or -1 if the treshold
+    |          |    |     |    |     |  | |  |      or -1 if the threshold
     |          |    |     |    |     |  | |  |      is >= 0xFFFF
     |          |    |     |    |     |  | |  |----> sending congestion window
     |          |    |     |    |     |  | |-------> (ack.quick<<1)|ack.pingpong
diff --git a/Documentation/networking/sk98lin.txt b/Documentation/networking/sk98lin.txt
index 4e1cc74..8590a95 100644
--- a/Documentation/networking/sk98lin.txt
+++ b/Documentation/networking/sk98lin.txt
@@ -346,7 +346,7 @@
       depending on the load of the system. If the driver detects that the
       system load is too high, the driver tries to shield the system against 
       too much network load by enabling interrupt moderation. If - at a later
-      time - the CPU utilizaton decreases again (or if the network load is 
+      time - the CPU utilization decreases again (or if the network load is 
       negligible) the interrupt moderation will automatically be disabled.
 
 Interrupt moderation should be used when the driver has to handle one or more
diff --git a/Documentation/networking/slicecom.txt b/Documentation/networking/slicecom.txt
index 2f04c92..32d3b91 100644
--- a/Documentation/networking/slicecom.txt
+++ b/Documentation/networking/slicecom.txt
@@ -126,7 +126,7 @@
 
 Though the options below are to be set on a single interface, they apply to the
 whole board. The restriction, to use them on 'UP' interfaces, is because the 
-command sequence below could lead to unpredicable results.
+command sequence below could lead to unpredictable results.
 
 	# echo 0        >boardnum
 	# echo internal >clock_source
diff --git a/Documentation/networking/udplite.txt b/Documentation/networking/udplite.txt
new file mode 100644
index 0000000..dd6f46b
--- /dev/null
+++ b/Documentation/networking/udplite.txt
@@ -0,0 +1,281 @@
+  ===========================================================================
+                      The UDP-Lite protocol (RFC 3828)
+  ===========================================================================
+
+
+  UDP-Lite is a Standards-Track IETF transport protocol whose characteristic
+  is a variable-length checksum. This has advantages for transport of multimedia
+  (video, VoIP) over wireless networks, as partly damaged packets can still be
+  fed into the codec instead of being discarded due to a failed checksum test.
+
+  This file briefly describes the existing kernel support and the socket API.
+  For in-depth information, you can consult:
+
+   o The UDP-Lite Homepage: http://www.erg.abdn.ac.uk/users/gerrit/udp-lite/
+       Fom here you can also download some example application source code.
+
+   o The UDP-Lite HOWTO on
+       http://www.erg.abdn.ac.uk/users/gerrit/udp-lite/files/UDP-Lite-HOWTO.txt
+
+   o The Wireshark UDP-Lite WiKi (with capture files):
+       http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
+
+   o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt
+
+
+  I) APPLICATIONS
+
+  Several applications have been ported successfully to UDP-Lite. Ethereal
+  (now called wireshark) has UDP-Litev4/v6 support by default. The tarball on
+
+   http://www.erg.abdn.ac.uk/users/gerrit/udp-lite/files/udplite_linux.tar.gz
+
+  has source code for several v4/v6 client-server and network testing examples.
+
+  Porting applications to UDP-Lite is straightforward: only socket level and
+  IPPROTO need to be changed; senders additionally set the checksum coverage
+  length (default = header length = 8). Details are in the next section.
+
+
+  II) PROGRAMMING API
+
+  UDP-Lite provides a connectionless, unreliable datagram service and hence
+  uses the same socket type as UDP. In fact, porting from UDP to UDP-Lite is
+  very easy: simply add `IPPROTO_UDPLITE' as the last argument of the socket(2)
+  call so that the statement looks like:
+
+      s = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDPLITE);
+
+                      or, respectively,
+
+      s = socket(PF_INET6, SOCK_DGRAM, IPPROTO_UDPLITE);
+
+  With just the above change you are able to run UDP-Lite services or connect
+  to UDP-Lite servers. The kernel will assume that you are not interested in
+  using partial checksum coverage and so emulate UDP mode (full coverage).
+
+  To make use of the partial checksum coverage facilities requires setting a
+  single socket option, which takes an integer specifying the coverage length:
+
+    * Sender checksum coverage: UDPLITE_SEND_CSCOV
+
+      For example,
+
+        int val = 20;
+        setsockopt(s, SOL_UDPLITE, UDPLITE_SEND_CSCOV, &val, sizeof(int));
+
+      sets the checksum coverage length to 20 bytes (12b data + 8b header).
+      Of each packet only the first 20 bytes (plus the pseudo-header) will be
+      checksummed. This is useful for RTP applications which have a 12-byte
+      base header.
+
+
+    * Receiver checksum coverage: UDPLITE_RECV_CSCOV
+
+      This option is the receiver-side analogue. It is truly optional, i.e. not
+      required to enable traffic with partial checksum coverage. Its function is
+      that of a traffic filter: when enabled, it instructs the kernel to drop
+      all packets which have a coverage _less_ than this value. For example, if
+      RTP and UDP headers are to be protected, a receiver can enforce that only
+      packets with a minimum coverage of 20 are admitted:
+
+        int min = 20;
+        setsockopt(s, SOL_UDPLITE, UDPLITE_RECV_CSCOV, &min, sizeof(int));
+
+  The calls to getsockopt(2) are analogous. Being an extension and not a stand-
+  alone protocol, all socket options known from UDP can be used in exactly the
+  same manner as before, e.g. UDP_CORK or UDP_ENCAP.
+
+  A detailed discussion of UDP-Lite checksum coverage options is in section IV.
+
+
+  III) HEADER FILES
+
+  The socket API requires support through header files in /usr/include:
+
+    * /usr/include/netinet/in.h
+        to define IPPROTO_UDPLITE
+
+    * /usr/include/netinet/udplite.h
+        for UDP-Lite header fields and protocol constants
+
+  For testing purposes, the following can serve as a `mini' header file:
+
+    #define IPPROTO_UDPLITE       136
+    #define SOL_UDPLITE           136
+    #define UDPLITE_SEND_CSCOV     10
+    #define UDPLITE_RECV_CSCOV     11
+
+  Ready-made header files for various distros are in the UDP-Lite tarball.
+
+
+  IV) KERNEL BEHAVIOUR WITH REGARD TO THE VARIOUS SOCKET OPTIONS
+
+  To enable debugging messages, the log level need to be set to 8, as most
+  messages use the KERN_DEBUG level (7).
+
+  1) Sender Socket Options
+
+  If the sender specifies a value of 0 as coverage length, the module
+  assumes full coverage, transmits a packet with coverage length of 0
+  and according checksum.  If the sender specifies a coverage < 8 and
+  different from 0, the kernel assumes 8 as default value.  Finally,
+  if the specified coverage length exceeds the packet length, the packet
+  length is used instead as coverage length.
+
+  2) Receiver Socket Options
+
+  The receiver specifies the minimum value of the coverage length it
+  is willing to accept.  A value of 0 here indicates that the receiver
+  always wants the whole of the packet covered. In this case, all
+  partially covered packets are dropped and an error is logged.
+
+  It is not possible to specify illegal values (<0 and <8); in these
+  cases the default of 8 is assumed.
+
+  All packets arriving with a coverage value less than the specified
+  threshold are discarded, these events are also logged.
+
+  3) Disabling the Checksum Computation
+
+  On both sender and receiver, checksumming will always be performed
+  and can not be disabled using SO_NO_CHECK. Thus
+
+        setsockopt(sockfd, SOL_SOCKET, SO_NO_CHECK,  ... );
+
+  will always will be ignored, while the value of
+
+        getsockopt(sockfd, SOL_SOCKET, SO_NO_CHECK, &value, ...);
+
+  is meaningless (as in TCP). Packets with a zero checksum field are
+  illegal (cf. RFC 3828, sec. 3.1) will be silently discarded.
+
+  4) Fragmentation
+
+  The checksum computation respects both buffersize and MTU. The size
+  of UDP-Lite packets is determined by the size of the send buffer. The
+  minimum size of the send buffer is 2048 (defined as SOCK_MIN_SNDBUF
+  in include/net/sock.h), the default value is configurable as
+  net.core.wmem_default or via setting the SO_SNDBUF socket(7)
+  option. The maximum upper bound for the send buffer is determined
+  by net.core.wmem_max.
+
+  Given a payload size larger than the send buffer size, UDP-Lite will
+  split the payload into several individual packets, filling up the
+  send buffer size in each case.
+
+  The precise value also depends on the interface MTU. The interface MTU,
+  in turn, may trigger IP fragmentation. In this case, the generated
+  UDP-Lite packet is split into several IP packets, of which only the
+  first one contains the L4 header.
+
+  The send buffer size has implications on the checksum coverage length.
+  Consider the following example:
+
+  Payload: 1536 bytes          Send Buffer:     1024 bytes
+  MTU:     1500 bytes          Coverage Length:  856 bytes
+
+  UDP-Lite will ship the 1536 bytes in two separate packets:
+
+  Packet 1: 1024 payload + 8 byte header + 20 byte IP header = 1052 bytes
+  Packet 2:  512 payload + 8 byte header + 20 byte IP header =  540 bytes
+
+  The coverage packet covers the UDP-Lite header and 848 bytes of the
+  payload in the first packet, the second packet is fully covered. Note
+  that for the second packet, the coverage length exceeds the packet
+  length. The kernel always re-adjusts the coverage length to the packet
+  length in such cases.
+
+  As an example of what happens when one UDP-Lite packet is split into
+  several tiny fragments, consider the following example.
+
+  Payload: 1024 bytes            Send buffer size: 1024 bytes
+  MTU:      300 bytes            Coverage length:   575 bytes
+
+  +-+-----------+--------------+--------------+--------------+
+  |8|    272    |      280     |     280      |     280      |
+  +-+-----------+--------------+--------------+--------------+
+               280            560            840           1032
+                                    ^
+  *****checksum coverage*************
+
+  The UDP-Lite module generates one 1032 byte packet (1024 + 8 byte
+  header). According to the interface MTU, these are split into 4 IP
+  packets (280 byte IP payload + 20 byte IP header). The kernel module
+  sums the contents of the entire first two packets, plus 15 bytes of
+  the last packet before releasing the fragments to the IP module.
+
+  To see the analogous case for IPv6 fragmentation, consider a link
+  MTU of 1280 bytes and a write buffer of 3356 bytes. If the checksum
+  coverage is less than 1232 bytes (MTU minus IPv6/fragment header
+  lengths), only the first fragment needs to be considered. When using
+  larger checksum coverage lengths, each eligible fragment needs to be
+  checksummed. Suppose we have a checksum coverage of 3062. The buffer
+  of 3356 bytes will be split into the following fragments:
+
+    Fragment 1: 1280 bytes carrying  1232 bytes of UDP-Lite data
+    Fragment 2: 1280 bytes carrying  1232 bytes of UDP-Lite data
+    Fragment 3:  948 bytes carrying   900 bytes of UDP-Lite data
+
+  The first two fragments have to be checksummed in full, of the last
+  fragment only 598 (= 3062 - 2*1232) bytes are checksummed.
+
+  While it is important that such cases are dealt with correctly, they
+  are (annoyingly) rare: UDP-Lite is designed for optimising multimedia
+  performance over wireless (or generally noisy) links and thus smaller
+  coverage lenghts are likely to be expected.
+
+
+  V) UDP-LITE RUNTIME STATISTICS AND THEIR MEANING
+
+  Exceptional and error conditions are logged to syslog at the KERN_DEBUG
+  level.  Live statistics about UDP-Lite are available in /proc/net/snmp
+  and can (with newer versions of netstat) be viewed using
+
+                            netstat -svu
+
+  This displays UDP-Lite statistics variables, whose meaning is as follows.
+
+   InDatagrams:     Total number of received datagrams.
+
+   NoPorts:         Number of packets received to an unknown port.
+                    These cases are counted separately (not as InErrors).
+
+   InErrors:        Number of erroneous UDP-Lite packets. Errors include:
+                      * internal socket queue receive errors
+                      * packet too short (less than 8 bytes or stated
+                        coverage length exceeds received length)
+                      * xfrm4_policy_check() returned with error
+                      * application has specified larger min. coverage
+                        length than that of incoming packet
+                      * checksum coverage violated
+                      * bad checksum
+
+   OutDatagrams:    Total number of sent datagrams.
+
+   These statistics derive from the UDP MIB (RFC 2013).
+
+
+  VI) IPTABLES
+
+  There is packet match support for UDP-Lite as well as support for the LOG target.
+  If you copy and paste the following line into /etc/protcols,
+
+  udplite 136     UDP-Lite        # UDP-Lite [RFC 3828]
+
+  then
+              iptables -A INPUT -p udplite -j LOG
+
+  will produce logging output to syslog. Dropping and rejecting packets also works.
+
+
+  VII) MAINTAINER ADDRESS
+
+  The UDP-Lite patch was developed at
+                    University of Aberdeen
+                    Electronics Research Group
+                    Department of Engineering
+                    Fraser Noble Building
+                    Aberdeen AB24 3UE; UK
+  The current maintainer is Gerrit Renker, <gerrit@erg.abdn.ac.uk>. Initial
+  code was developed by William  Stanislaus, <william@erg.abdn.ac.uk>.
diff --git a/Documentation/networking/wan-router.txt b/Documentation/networking/wan-router.txt
index 0cf6541..653978d 100644
--- a/Documentation/networking/wan-router.txt
+++ b/Documentation/networking/wan-router.txt
@@ -412,7 +412,7 @@
 
 beta3-2.1.4 Jul 2000		o X25 M_BIT Problem fix.
 				o Added the Multi-Port PPP
-				  Updated utilites for the Multi-Port PPP.
+				  Updated utilities for the Multi-Port PPP.
 
 2.1.4	Aut 2000
 				o In X25API:
@@ -444,13 +444,13 @@
 					
 				o Cpipemon
 					- Added set FT1 commands to the cpipemon. Thus CSU/DSU
-					  configuraiton can be performed using cpipemon.
+					  configuration can be performed using cpipemon.
 					  All systems that cannot run cfgft1 GUI utility should
 					  use cpipemon to configure the on board CSU/DSU.
 
 
 				o Keyboard Led Monitor/Debugger
-					- A new utilty /usr/sbin/wpkbdmon uses keyboard leds
+					- A new utility /usr/sbin/wpkbdmon uses keyboard leds
 					  to convey operational statistic information of the 
 					  Sangoma WANPIPE cards.
 					NUM_LOCK    = Line State  (On=connected,    Off=disconnected)
@@ -464,7 +464,7 @@
 					- Appropriate number of devices are dynamically loaded 
 					  based on the number of Sangoma cards found.
 
-					  Note: The kernel configuraiton option 
+					  Note: The kernel configuration option 
 						CONFIG_WANPIPE_CARDS has been taken out.
 					
 				o Fixed the Frame Relay and Chdlc network interfaces so they are
diff --git a/Documentation/networking/xfrm_sync.txt b/Documentation/networking/xfrm_sync.txt
index 8be626f..d7aac9d 100644
--- a/Documentation/networking/xfrm_sync.txt
+++ b/Documentation/networking/xfrm_sync.txt
@@ -47,10 +47,13 @@
 
    struct xfrm_aevent_id {
              struct xfrm_usersa_id           sa_id;
+             xfrm_address_t                  saddr;
              __u32                           flags;
+             __u32                           reqid;
    };
 
-xfrm_usersa_id in this message layout identifies the SA.
+The unique SA is identified by the combination of xfrm_usersa_id,
+reqid and saddr.
 
 flags are used to indicate different things. The possible
 flags are:
diff --git a/Documentation/pnp.txt b/Documentation/pnp.txt
index 9ff966b..28037aa 100644
--- a/Documentation/pnp.txt
+++ b/Documentation/pnp.txt
@@ -184,7 +184,7 @@
 Please note that the character 'X' can be used as a wild card in the function
 portion (last four characters).
 ex:
-	/* Unkown PnP modems */
+	/* Unknown PnP modems */
 	{	"PNPCXXX",		UNKNOWN_DEV	},
 
 Supported PnP card IDs can optionally be defined.
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 24edf25..c750f9f 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -153,7 +153,7 @@
 	events, which is implicit if it doesn't even support it in the first
 	place).
 
-	Note that the PMC Register in the device's PM Capabilties has a bitmask
+	Note that the PMC Register in the device's PM Capabilities has a bitmask
 	of the states it supports generating PME# from. D3hot is bit 3 and
 	D3cold is bit 4. So, while a value of 4 as the state may not seem
 	semantically correct, it is. 
@@ -268,7 +268,7 @@
 some non-standard way of generating a wake event on sleep.)
 
 Bits 15:11 of the PMC (Power Mgmt Capabilities) Register in a device's
-PM Capabilties describe what power states the device supports generating a 
+PM Capabilities describe what power states the device supports generating a 
 wake event from:
 
 +------------------+
diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt
new file mode 100644
index 0000000..b05f512
--- /dev/null
+++ b/Documentation/power/s2ram.txt
@@ -0,0 +1,56 @@
+			How to get s2ram working
+			~~~~~~~~~~~~~~~~~~~~~~~~
+			2006 Linus Torvalds
+			2006 Pavel Machek
+
+1) Check suspend.sf.net, program s2ram there has long whitelist of
+   "known ok" machines, along with tricks to use on each one.
+
+2) If that does not help, try reading tricks.txt and
+   video.txt. Perhaps problem is as simple as broken module, and
+   simple module unload can fix it.
+
+3) You can use Linus' TRACE_RESUME infrastructure, described below.
+
+		      Using TRACE_RESUME
+		      ~~~~~~~~~~~~~~~~~~
+
+I've been working at making the machines I have able to STR, and almost
+always it's a driver that is buggy. Thank God for the suspend/resume
+debugging - the thing that Chuck tried to disable. That's often the _only_
+way to debug these things, and it's actually pretty powerful (but
+time-consuming - having to insert TRACE_RESUME() markers into the device
+driver that doesn't resume and recompile and reboot).
+
+Anyway, the way to debug this for people who are interested (have a
+machine that doesn't boot) is:
+
+ - enable PM_DEBUG, and PM_TRACE
+
+ - use a script like this:
+
+	#!/bin/sh
+	sync
+	echo 1 > /sys/power/pm_trace
+	echo mem > /sys/power/state
+
+   to suspend
+
+ - if it doesn't come back up (which is usually the problem), reboot by
+   holding the power button down, and look at the dmesg output for things
+   like
+
+	Magic number: 4:156:725
+	hash matches drivers/base/power/resume.c:28
+	hash matches device 0000:01:00.0
+
+   which means that the last trace event was just before trying to resume
+   device 0000:01:00.0. Then figure out what driver is controlling that
+   device (lspci and /sys/devices/pci* is your friend), and see if you can
+   fix it, disable it, or trace into its resume function.
+
+For example, the above happens to be the VGA device on my EVO, which I
+used to run with "radeonfb" (it's an ATI Radeon mobility). It turns out
+that "radeonfb" simply cannot resume that device - it tries to set the
+PLL's, and it just _hangs_. Using the regular VGA console and letting X
+resume it instead works fine.
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 3e5e5d3..0931a33 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -62,7 +62,7 @@
 inconvenience, this method requires minimal work by the kernel, since
 the firmware will also handle restoring memory contents on resume. 
 
-If the kernel is responsible for persistantly saving state, a mechanism 
+If the kernel is responsible for persistently saving state, a mechanism
 called 'swsusp' (Swap Suspend) is used to write memory contents to
 free swap space. swsusp has some restrictive requirements, but should
 work in most cases. Some, albeit outdated, documentation can be found
diff --git a/Documentation/power/swsusp-and-swap-files.txt b/Documentation/power/swsusp-and-swap-files.txt
new file mode 100644
index 0000000..06f911a
--- /dev/null
+++ b/Documentation/power/swsusp-and-swap-files.txt
@@ -0,0 +1,60 @@
+Using swap files with software suspend (swsusp)
+	(C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+
+The Linux kernel handles swap files almost in the same way as it handles swap
+partitions and there are only two differences between these two types of swap
+areas:
+(1) swap files need not be contiguous,
+(2) the header of a swap file is not in the first block of the partition that
+holds it.  From the swsusp's point of view (1) is not a problem, because it is
+already taken care of by the swap-handling code, but (2) has to be taken into
+consideration.
+
+In principle the location of a swap file's header may be determined with the
+help of appropriate filesystem driver.  Unfortunately, however, it requires the
+filesystem holding the swap file to be mounted, and if this filesystem is
+journaled, it cannot be mounted during resume from disk.  For this reason to
+identify a swap file swsusp uses the name of the partition that holds the file
+and the offset from the beginning of the partition at which the swap file's
+header is located.  For convenience, this offset is expressed in <PAGE_SIZE>
+units.
+
+In order to use a swap file with swsusp, you need to:
+
+1) Create the swap file and make it active, eg.
+
+# dd if=/dev/zero of=<swap_file_path> bs=1024 count=<swap_file_size_in_k>
+# mkswap <swap_file_path>
+# swapon <swap_file_path>
+
+2) Use an application that will bmap the swap file with the help of the
+FIBMAP ioctl and determine the location of the file's swap header, as the
+offset, in <PAGE_SIZE> units, from the beginning of the partition which
+holds the swap file.
+
+3) Add the following parameters to the kernel command line:
+
+resume=<swap_file_partition> resume_offset=<swap_file_offset>
+
+where <swap_file_partition> is the partition on which the swap file is located
+and <swap_file_offset> is the offset of the swap header determined by the
+application in 2) (of course, this step may be carried out automatically
+by the same application that determies the swap file's header offset using the
+FIBMAP ioctl)
+
+OR
+
+Use a userland suspend application that will set the partition and offset
+with the help of the SNAPSHOT_SET_SWAP_AREA ioctl described in
+Documentation/power/userland-swsusp.txt (this is the only method to suspend
+to a swap file allowing the resume to be initiated from an initrd or initramfs
+image).
+
+Now, swsusp will use the swap file in the same way in which it would use a swap
+partition.  In particular, the swap file has to be active (ie. be present in
+/proc/swaps) so that it can be used for suspending.
+
+Note that if the swap file used for suspending is deleted and recreated,
+the location of its header need not be the same as before.  Thus every time
+this happens the value of the "resume_offset=" kernel command line parameter
+has to be updated.
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index 9ea2208..0761ff6 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -153,7 +153,7 @@
 
 If the thread is needed for writing the image to storage, you should
 instead set the PF_NOFREEZE process flag when creating the thread (and
-be very carefull).
+be very careful).
 
 
 Q: What is the difference between "platform", "shutdown" and
@@ -297,20 +297,12 @@
 suspend image to prevent sensitive data from being stolen after
 resume.
 
-Q: Why can't we suspend to a swap file?
+Q: Can I suspend to a swap file?
 
-A: Because accessing swap file needs the filesystem mounted, and
-filesystem might do something wrong (like replaying the journal)
-during mount.
-
-There are few ways to get that fixed:
-
-1) Probably could be solved by modifying every filesystem to support
-some kind of "really read-only!" option. Patches welcome.
-
-2) suspend2 gets around that by storing absolute positions in on-disk
-image (and blocksize), with resume parameter pointing directly to
-suspend header.
+A: Generally, yes, you can.  However, it requires you to use the "resume=" and
+"resume_offset=" kernel command line parameters, so the resume from a swap file
+cannot be initiated from an initrd or initramfs image.  See
+swsusp-and-swap-files.txt for details.
 
 Q: Is there a maximum system RAM size that is supported by swsusp?
 
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index 64755e9..000556c 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -9,9 +9,8 @@
 Now, to use the userland interface for software suspend you need special
 utilities that will read/write the system memory snapshot from/to the
 kernel.  Such utilities are available, for example, from
-<http://www.sisk.pl/kernel/utilities/suspend>.  You may want to have
-a look at them if you are going to develop your own suspend/resume
-utilities.
+<http://suspend.sourceforge.net>.  You may want to have a look at them if you
+are going to develop your own suspend/resume utilities.
 
 The interface consists of a character device providing the open(),
 release(), read(), and write() operations as well as several ioctl()
@@ -21,9 +20,9 @@
 
 The device can be open either for reading or for writing.  If open for
 reading, it is considered to be in the suspend mode.  Otherwise it is
-assumed to be in the resume mode.  The device cannot be open for reading
-and writing.  It is also impossible to have the device open more than once
-at a time.
+assumed to be in the resume mode.  The device cannot be open for simultaneous
+reading and writing.  It is also impossible to have the device open more than
+once at a time.
 
 The ioctl() commands recognized by the device are:
 
@@ -69,9 +68,46 @@
 SNAPSHOT_SET_SWAP_FILE - set the resume partition (the last ioctl() argument
 	should specify the device's major and minor numbers in the old
 	two-byte format, as returned by the stat() function in the .st_rdev
-	member of the stat structure); it is recommended to always use this
-	call, because the code to set the resume partition could be removed from
-	future kernels
+	member of the stat structure)
+
+SNAPSHOT_SET_SWAP_AREA - set the resume partition and the offset (in <PAGE_SIZE>
+	units) from the beginning of the partition at which the swap header is
+	located (the last ioctl() argument should point to a struct
+	resume_swap_area, as defined in kernel/power/power.h, containing the
+	resume device specification, as for the SNAPSHOT_SET_SWAP_FILE ioctl(),
+	and the offset); for swap partitions the offset is always 0, but it is
+	different to zero for swap files (please see
+	Documentation/swsusp-and-swap-files.txt for details).
+	The SNAPSHOT_SET_SWAP_AREA ioctl() is considered as a replacement for
+	SNAPSHOT_SET_SWAP_FILE which is regarded as obsolete.   It is
+	recommended to always use this call, because the code to set the resume
+	partition may be removed from future kernels
+
+SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to
+	immediately enter the suspend-to-RAM state, so this call must always
+	be preceded by the SNAPSHOT_FREEZE call and it is also necessary
+	to use the SNAPSHOT_UNFREEZE call after the system wakes up.  This call
+	is needed to implement the suspend-to-both mechanism in which the
+	suspend image is first created, as though the system had been suspended
+	to disk, and then the system is suspended to RAM (this makes it possible
+	to resume the system from RAM if there's enough battery power or restore
+	its state on the basis of the saved suspend image otherwise)
+
+SNAPSHOT_PMOPS - enable the usage of the pmops->prepare, pmops->enter and
+	pmops->finish methods (the in-kernel swsusp knows these as the "platform
+	method") which are needed on many machines to (among others) speed up
+	the resume by letting the BIOS skip some steps or to let the system
+	recognise the correct state of the hardware after the resume (in
+	particular on many machines this ensures that unplugged AC
+	adapters get correctly detected and that kacpid does not run wild after
+	the resume).  The last ioctl() argument can take one of the three
+	values, defined in kernel/power/power.h:
+	PMOPS_PREPARE - make the kernel carry out the
+		pm_ops->prepare(PM_SUSPEND_DISK) operation
+	PMOPS_ENTER - make the kernel power off the system by calling
+		pm_ops->enter(PM_SUSPEND_DISK)
+	PMOPS_FINISH - make the kernel carry out the
+		pm_ops->finish(PM_SUSPEND_DISK) operation
 
 The device's read() operation can be used to transfer the snapshot image from
 the kernel.  It has the following limitations:
@@ -91,10 +127,12 @@
 still frozen when the device is being closed).
 
 Currently it is assumed that the userland utilities reading/writing the
-snapshot image from/to the kernel will use a swap partition, called the resume
-partition, as storage space.  However, this is not really required, as they
-can use, for example, a special (blank) suspend partition or a file on a partition
-that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and mounted afterwards.
+snapshot image from/to the kernel will use a swap parition, called the resume
+partition, or a swap file as storage space (if a swap file is used, the resume
+partition is the partition that holds this file).  However, this is not really
+required, as they can use, for example, a special (blank) suspend partition or
+a file on a partition that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and
+mounted afterwards.
 
 These utilities SHOULD NOT make any assumptions regarding the ordering of
 data within the snapshot image, except for the image header that MAY be
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 27b457c..b3bd366 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -6,6 +6,8 @@
     IBM Corp.
 (c) 2005 Becky Bruce <becky.bruce at freescale.com>,
     Freescale Semiconductor, FSL SOC and 32-bit additions
+(c) 2006 MontaVista Software, Inc.
+    Flash chip node definition
 
    May 18, 2005: Rev 0.1 - Initial draft, no chapter III yet.
 
@@ -33,13 +35,13 @@
                          - Change version 16 format to always align
                            property data to 4 bytes. Since tokens are
                            already aligned, that means no specific
-                           required alignement between property size
+                           required alignment between property size
                            and property data. The old style variable
                            alignment would make it impossible to do
                            "simple" insertion of properties using
                            memove (thanks Milton for
                            noticing). Updated kernel patch as well
-			 - Correct a few more alignement constraints
+			 - Correct a few more alignment constraints
 			 - Add a chapter about the device-tree
                            compiler and the textural representation of
                            the tree that can be "compiled" by dtc.
@@ -854,7 +856,7 @@
       console device if any. Typically, if you have serial devices on
       your board, you may want to put the full path to the one set as
       the default console in the firmware here, for the kernel to pick
-      it up as it's own default console. If you look at the funciton
+      it up as its own default console. If you look at the function
       set_preferred_console() in arch/ppc64/kernel/setup.c, you'll see
       that the kernel tries to find out the default console and has
       knowledge of various types like 8250 serial ports. You may want
@@ -1124,7 +1126,7 @@
 	- interrupt-parent : contains the phandle of the interrupt
           controller which handles interrupts for this device
 	- interrupts : a list of tuples representing the interrupt
-          number and the interrupt sense and level for each interupt
+          number and the interrupt sense and level for each interrupt
           for this device.
 
 This information is used by the kernel to build the interrupt table
@@ -1693,6 +1695,43 @@
 		};
 	};
 
+    g) Flash chip nodes
+
+    Flash chips (Memory Technology Devices) are often used for solid state
+    file systems on embedded devices.
+
+    Required properties:
+
+     - device_type : has to be "rom"
+     - compatible : Should specify what this ROM device is compatible with
+       (i.e. "onenand"). Currently, this is most likely to be "direct-mapped"
+       (which corresponds to the MTD physmap mapping driver).
+     - regs : Offset and length of the register set (or memory mapping) for
+       the device.
+
+    Recommended properties :
+
+     - bank-width : Width of the flash data bus in bytes. Required
+       for the NOR flashes (compatible == "direct-mapped" and others) ONLY.
+     - partitions : Several pairs of 32-bit values where the first value is
+       partition's offset from the start of the device and the second one is
+       partition size in bytes with LSB used to signify a read only
+       partititon (so, the parition size should always be an even number).
+     - partition-names : The list of concatenated zero terminated strings
+       representing the partition names.
+
+   Example:
+
+ 	flash@ff000000 {
+ 		device_type = "rom";
+ 		compatible = "direct-mapped";
+ 		regs = <ff000000 01000000>;
+ 		bank-width = <4>;
+ 		partitions = <00000000 00f80000
+ 			      00f80000 00080001>;
+ 		partition-names = "fs\0firmware";
+ 	};
+
    More devices will be defined as this spec matures.
 
 
diff --git a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt
new file mode 100644
index 0000000..d077d76
--- /dev/null
+++ b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt
@@ -0,0 +1,189 @@
+MPC52xx Device Tree Bindings
+----------------------------
+
+(c) 2006 Secret Lab Technologies Ltd
+Grant Likely <grant.likely at secretlab.ca>
+
+I - Introduction
+================
+Boards supported by the arch/powerpc architecture require device tree be
+passed by the boot loader to the kernel at boot time.  The device tree
+describes what devices are present on the board and how they are
+connected.  The device tree can either be passed as a binary blob (as
+described in Documentation/powerpc/booting-without-of.txt), or passed
+by Open Firmare (IEEE 1275) compatible firmware using an OF compatible
+client interface API.
+
+This document specifies the requirements on the device-tree for mpc52xx
+based boards.  These requirements are above and beyond the details
+specified in either the OpenFirmware spec or booting-without-of.txt
+
+All new mpc52xx-based boards are expected to match this document.  In
+cases where this document is not sufficient to support a new board port,
+this document should be updated as part of adding the new board support.
+
+II - Philosophy
+===============
+The core of this document is naming convention.  The whole point of
+defining this convention is to reduce or eliminate the number of
+special cases required to support a 52xx board.  If all 52xx boards
+follow the same convention, then generic 52xx support code will work
+rather than coding special cases for each new board.
+
+This section tries to capture the thought process behind why the naming
+convention is what it is.
+
+1. Node names
+-------------
+There is strong convention/requirements already established for children
+of the root node.  'cpus' describes the processor cores, 'memory'
+describes memory, and 'chosen' provides boot configuration.  Other nodes
+are added to describe devices attached to the processor local bus.
+Following convention already established with other system-on-chip
+processors, MPC52xx boards must have an 'soc5200' node as a child of the
+root node.
+
+The soc5200 node holds child nodes for all on chip devices.  Child nodes
+are typically named after the configured function.  ie. the FEC node is
+named 'ethernet', and a PSC in uart mode is named 'serial'.
+
+2. device_type property
+-----------------------
+similar to the node name convention above; the device_type reflects the
+configured function of a device.  ie. 'serial' for a uart and 'spi' for
+an spi controller.  However, while node names *should* reflect the
+configured function, device_type *must* match the configured function
+exactly.
+
+3. compatible property
+----------------------
+Since device_type isn't enough to match devices to drivers, there also
+needs to be a naming convention for the compatible property.  Compatible
+is an list of device descriptions sorted from specific to generic.  For
+the mpc52xx, the required format for each compatible value is
+<chip>-<device>[-<mode>].  At the minimum, the list shall contain two
+items; the first specifying the exact chip, and the second specifying
+mpc52xx for the chip.
+
+ie. ethernet on mpc5200b: compatible = "mpc5200b-ethernet\0mpc52xx-ethernet"
+
+The idea here is that most drivers will match to the most generic field
+in the compatible list (mpc52xx-*), but can also test the more specific
+field for enabling bug fixes or extra features.
+
+Modal devices, like PSCs, also append the configured function to the
+end of the compatible field.  ie. A PSC in i2s mode would specify
+"mpc52xx-psc-i2s", not "mpc52xx-i2s".  This convention is chosen to
+avoid naming conflicts with non-psc devices providing the same
+function.  For example, "mpc52xx-spi" and "mpc52xx-psc-spi" describe
+the mpc5200 simple spi device and a PSC spi mode respectively.
+
+If the soc device is more generic and present on other SOCs, the
+compatible property can specify the more generic device type also.
+
+ie. mscan: compatible = "mpc5200-mscan\0mpc52xx-mscan\0fsl,mscan";
+
+At the time of writing, exact chip may be either 'mpc5200' or
+'mpc5200b'.
+
+Device drivers should always try to match as generically as possible.
+
+III - Structure
+===============
+The device tree for an mpc52xx board follows the structure defined in
+booting-without-of.txt with the following additional notes:
+
+0) the root node
+----------------
+Typical root description node; see booting-without-of
+
+1) The cpus node
+----------------
+The cpus node follows the basic layout described in booting-without-of.
+The bus-frequency property holds the XLB bus frequency
+The clock-frequency property holds the core frequency
+
+2) The memory node
+------------------
+Typical memory description node; see booting-without-of.
+
+3) The soc5200 node
+-------------------
+This node describes the on chip SOC peripherals.  Every mpc52xx based
+board will have this node, and as such there is a common naming
+convention for SOC devices.
+
+Required properties:
+name			type		description
+----			----		-----------
+device_type		string		must be "soc"
+ranges			int		should be <0 baseaddr baseaddr+10000>
+reg			int		must be <baseaddr 10000>
+
+Recommended properties:
+name			type		description
+----			----		-----------
+compatible		string		should be "<chip>-soc\0mpc52xx-soc"
+					ie. "mpc5200b-soc\0mpc52xx-soc"
+#interrupt-cells	int		must be <3>.  If it is not defined
+					here then it must be defined in every
+					soc device node.
+bus-frequency		int		IPB bus frequency in HZ.  Clock rate
+					used by most of the soc devices.
+					Defining it here avoids needing it
+					added to every device node.
+
+4) soc5200 child nodes
+----------------------
+Any on chip SOC devices available to Linux must appear as soc5200 child nodes.
+
+Note: in the tables below, '*' matches all <chip> values.  ie.
+*-pic would translate to "mpc5200-pic\0mpc52xx-pic"
+
+Required soc5200 child nodes:
+name		device_type		compatible	Description
+----		-----------		----------	-----------
+cdm@<addr>	cdm			*-cmd		Clock Distribution
+pic@<addr>	interrupt-controller	*-pic		need an interrupt
+							controller to boot
+bestcomm@<addr>	dma-controller		*-bestcomm	52xx pic also requires
+							the bestcomm device
+
+Recommended soc5200 child nodes; populate as needed for your board
+name		device_type	compatible	Description
+----		-----------	----------	-----------
+gpt@<addr>	gpt		*-gpt		General purpose timers
+rtc@<addr>	rtc		*-rtc		Real time clock
+mscan@<addr>	mscan		*-mscan		CAN bus controller
+pci@<addr>	pci		*-pci		PCI bridge
+serial@<addr>	serial		*-psc-uart	PSC in serial mode
+i2s@<addr>	i2s		*-psc-i2s	PSC in i2s mode
+ac97@<addr>	ac97		*-psc-ac97	PSC in ac97 mode
+spi@<addr>	spi		*-psc-spi	PSC in spi mode
+irda@<addr>	irda		*-psc-irda	PSC in IrDA mode
+spi@<addr>	spi		*-spi		MPC52xx spi device
+ethernet@<addr>	network		*-fec		MPC52xx ethernet device
+ata@<addr>	ata		*-ata		IDE ATA interface
+i2c@<addr>	i2c		*-i2c		I2C controller
+usb@<addr>	usb-ohci-be	*-ohci,ohci-be	USB controller
+xlb@<addr>	xlb		*-xlb		XLB arbritrator
+
+IV - Extra Notes
+================
+
+1. Interrupt mapping
+--------------------
+The mpc52xx pic driver splits hardware IRQ numbers into two levels.  The
+split reflects the layout of the PIC hardware itself, which groups
+interrupts into one of three groups; CRIT, MAIN or PERP.  Also, the
+Bestcomm dma engine has it's own set of interrupt sources which are
+cascaded off of peripheral interrupt 0, which the driver interprets as a
+fourth group, SDMA.
+
+The interrupts property for device nodes using the mpc52xx pic consists
+of three cells; <L1 L2 level>
+
+    L1 := [CRIT=0, MAIN=1, PERP=2, SDMA=3]
+    L2 := interrupt number; directly mapped from the value in the
+          "ICTL PerStat, MainStat, CritStat Encoded Register"
+    level := [LEVEL_HIGH=0, EDGE_RISING=1, EDGE_FALLING=2, LEVEL_LOW=3]
diff --git a/Documentation/robust-futex-ABI.txt b/Documentation/robust-futex-ABI.txt
index 8529a17..535f69f 100644
--- a/Documentation/robust-futex-ABI.txt
+++ b/Documentation/robust-futex-ABI.txt
@@ -170,7 +170,7 @@
  1) the 'head' pointer or an subsequent linked list pointer
     is not a valid address of a user space word
  2) the calculated location of the 'lock word' (address plus
-    'offset') is not the valud address of a 32 bit user space
+    'offset') is not the valid address of a 32 bit user space
     word
  3) if the list contains more than 1 million (subject to
     future kernel configuration changes) elements.
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
index 76e8064..0a9446a 100644
--- a/Documentation/robust-futexes.txt
+++ b/Documentation/robust-futexes.txt
@@ -181,7 +181,7 @@
 So there is virtually zero overhead for tasks not using robust futexes,
 and even for robust futex users, there is only one extra syscall per
 thread lifetime, and the cleanup operation, if it happens, is fast and
-straightforward. The kernel doesnt have any internal distinction between
+straightforward. The kernel doesn't have any internal distinction between
 robust and normal futexes.
 
 If a futex is found to be held at exit time, the kernel sets the
diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO
index d684a6a..22f82f2 100644
--- a/Documentation/s390/CommonIO
+++ b/Documentation/s390/CommonIO
@@ -74,7 +74,7 @@
 
   Note: While already known devices can be added to the list of devices to be
         ignored, there will be no effect on then. However, if such a device
-        disappears and then reappeares, it will then be ignored.
+	disappears and then reappears, it will then be ignored.
 
   For example,
 	"echo add 0.0.a000-0.0.accc, 0.0.af00-0.0.afff > /proc/cio_ignore"
@@ -82,7 +82,7 @@
   devices.
 
   The devices can be specified either by bus id (0.0.abcd) or, for 2.4 backward
-  compatibilty, by the device number in hexadecimal (0xabcd or abcd).
+  compatibility, by the device number in hexadecimal (0xabcd or abcd).
 
 
 * /proc/s390dbf/cio_*/ (S/390 debug feature)
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 4dd25ee..3f9ddbc 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -7,7 +7,7 @@
 
 Overview of Document:
 =====================
-This document is intended to give an good overview of how to debug 
+This document is intended to give a good overview of how to debug
 Linux for s/390 & z/Architecture. It isn't intended as a complete reference & not a
 tutorial on the fundamentals of C & assembly. It doesn't go into
 390 IO in any detail. It is intended to complement the documents in the
@@ -300,7 +300,7 @@
 but only mess with 2 segment indices each time we mess with
 a PMD.
 
-3) As z/Architecture supports upto a massive 5-level page table lookup we 
+3) As z/Architecture supports up to a massive 5-level page table lookup we
 can only use 3 currently on Linux ( as this is all the generic kernel
 currently supports ) however this may change in future
 this allows us to access ( according to my sums )
@@ -502,7 +502,7 @@
 ------
 1) The only requirement is that registers which are used
 by the callee are saved, e.g. the compiler is perfectly
-capible of using r11 for purposes other than a frame a
+capable of using r11 for purposes other than a frame a
 frame pointer if a frame pointer is not needed.
 2) In functions with variable arguments e.g. printf the calling procedure 
 is identical to one without variable arguments & the same number of 
@@ -846,7 +846,7 @@
 instead if the code isn't compiled -g, as it is much faster:
 objdump --disassemble-all --syms vmlinux > vmlinux.lst  
 
-As hard drive space is valuble most of us use the following approach.
+As hard drive space is valuable most of us use the following approach.
 1) Look at the emitted psw on the console to find the crash address in the kernel.
 2) Look at the file System.map ( in the linux directory ) produced when building 
 the kernel to find the closest address less than the current PSW to find the
@@ -902,7 +902,7 @@
 to a file & on the screen.
 
 Q. What use is it ?
-A. You can used it to find out what files a particular program opens.
+A. You can use it to find out what files a particular program opens.
 
 
 
@@ -911,7 +911,7 @@
 If you wanted to know does ping work but didn't have the source 
 strace ping -c 1 127.0.0.1  
 & then look at the man pages for each of the syscalls below,
-( In fact this is sometimes easier than looking at some spagetti
+( In fact this is sometimes easier than looking at some spaghetti
 source which conditionally compiles for several architectures ).
 Not everything that it throws out needs to make sense immediately.
 
@@ -1037,7 +1037,7 @@
 
 Performance Debugging
 =====================
-gcc is capible of compiling in profiling code just add the -p option
+gcc is capable of compiling in profiling code just add the -p option
 to the CFLAGS, this obviously affects program size & performance.
 This can be used by the gprof gnu profiling tool or the
 gcov the gnu code coverage tool ( code coverage is a means of testing
@@ -1419,7 +1419,7 @@
 To issue a command to a particular cpu try cpu <cpu number> e.g.
 CPU 01 TR I R 2000.3000
 If you are running on a guest with several cpus & you have a IO related problem
-& cannot follow the flow of code but you know it isnt smp related.
+& cannot follow the flow of code but you know it isn't smp related.
 from the bash prompt issue
 shutdown -h now or halt.
 do a Q CPUS to find out how many cpus you have
@@ -1602,7 +1602,7 @@
 our 3rd return address is 8001085A
 
 as the 04B52002 looks suspiciously like rubbish it is fair to assume that the kernel entry routines
-for the sake of optimisation dont set up a backchain.
+for the sake of optimisation don't set up a backchain.
 
 now look at System.map to see if the addresses make any sense.
 
@@ -1638,11 +1638,11 @@
 
 Unlike other bus architectures modern 390 systems do their IO using mostly
 fibre optics & devices such as tapes & disks can be shared between several mainframes,
-also S390 can support upto 65536 devices while a high end PC based system might be choking 
+also S390 can support up to 65536 devices while a high end PC based system might be choking
 with around 64. Here is some of the common IO terminology
 
 Subchannel:
-This is the logical number most IO commands use to talk to an IO device there can be upto
+This is the logical number most IO commands use to talk to an IO device there can be up to
 0x10000 (65536) of these in a configuration typically there is a few hundred. Under VM
 for simplicity they are allocated contiguously, however on the native hardware they are not
 they typically stay consistent between boots provided no new hardware is inserted or removed.
@@ -1651,7 +1651,7 @@
 TEST SUBCHANNEL ) we use this as the ID of the device we wish to talk to, the most
 important of these instructions are START SUBCHANNEL ( to start IO ), TEST SUBCHANNEL ( to check
 whether the IO completed successfully ), & HALT SUBCHANNEL ( to kill IO ), a subchannel
-can have up to 8 channel paths to a device this offers redunancy if one is not available.
+can have up to 8 channel paths to a device this offers redundancy if one is not available.
 
 
 Device Number:
@@ -1659,7 +1659,7 @@
 also they are made up of a CHPID ( Channel Path ID, the most significant 8 bits ) 
 & another lsb 8 bits. These remain static even if more devices are inserted or removed
 from the hardware, there is a 1 to 1 mapping between Subchannels & Device Numbers provided
-devices arent inserted or removed.
+devices aren't inserted or removed.
 
 Channel Control Words:
 CCWS are linked lists of instructions initially pointed to by an operation request block (ORB),
@@ -1674,7 +1674,7 @@
 from which you receive an Interruption response block (IRB). If you get channel & device end 
 status in the IRB without channel checks etc. your IO probably went okay. If you didn't you
 probably need a doctor to examine the IRB & extended status word etc.
-If an error occurs, more sophistocated control units have a facitity known as
+If an error occurs, more sophisticated control units have a facility known as
 concurrent sense this means that if an error occurs Extended sense information will
 be presented in the Extended status word in the IRB if not you have to issue a
 subsequent SENSE CCW command after the test subchannel. 
@@ -1749,7 +1749,7 @@
 This byte wide Parallel channel path/bus has parity & data on the "Bus" cable 
 & control lines on the "Tag" cable. These can operate in byte multiplex mode for
 sharing between several slow devices or burst mode & monopolize the channel for the
-whole burst. Upto 256 devices can be addressed  on one of these cables. These cables are
+whole burst. Up to 256 devices can be addressed  on one of these cables. These cables are
 about one inch in diameter. The maximum unextended length supported by these cables is
 125 Meters but this can be extended up to 2km with a fibre optic channel extended 
 such as a 3044. The maximum burst speed supported is 4.5 megabytes per second however
@@ -1759,7 +1759,7 @@
 
 ESCON if fibre optic it is also called FICON 
 Was introduced by IBM in 1990. Has 2 fibre optic cables & uses either leds or lasers
-for communication at a signaling rate of upto 200 megabits/sec. As 10bits are transferred
+for communication at a signaling rate of up to 200 megabits/sec. As 10bits are transferred
 for every 8 bits info this drops to 160 megabits/sec & to 18.6 Megabytes/sec once
 control info & CRC are added. ESCON only operates in burst mode.
  
@@ -1767,7 +1767,7 @@
 known as XDF ( extended distance facility ). This can be further extended by using an
 ESCON director which triples the above mentioned ranges. Unlike Bus & Tag as ESCON is
 serial it uses a packet switching architecture the standard Bus & Tag control protocol
-is however present within the packets. Upto 256 devices can be attached to each control 
+is however present within the packets. Up to 256 devices can be attached to each control
 unit that uses one of these interfaces.
 
 Common 390 Devices include:
@@ -2050,7 +2050,7 @@
 
 directory:
 Adds directories to be searched for source if gdb cannot find the source.
-(note it is a bit sensititive about slashes) 
+(note it is a bit sensitive about slashes)
 e.g. To add the root of the filesystem to the searchpath do
 directory //
 
@@ -2152,7 +2152,7 @@
 current working directory.
 This is very useful in that a customer can mail a core dump to a technical support department
 & the technical support department can reconstruct what happened.
-Provided the have an identical copy of this program with debugging symbols compiled in & 
+Provided they have an identical copy of this program with debugging symbols compiled in &
 the source base of this build is available.
 In short it is far more useful than something like a crash log could ever hope to be.
 
diff --git a/Documentation/s390/cds.txt b/Documentation/s390/cds.txt
index 32a96cc..05a2b4f 100644
--- a/Documentation/s390/cds.txt
+++ b/Documentation/s390/cds.txt
@@ -98,7 +98,7 @@
 Linux/390 common device support (CDS) provides to allow for device specific
 driver implementations on the IBM ESA/390 hardware platform. Those interfaces
 intend to provide the functionality required by every device driver
-implementaion to allow to drive a specific hardware device on the ESA/390
+implementation to allow to drive a specific hardware device on the ESA/390
 platform. Some of the interface routines are specific to Linux/390 and some
 of them can be found on other Linux platforms implementations too.
 Miscellaneous function prototypes, data declarations, and macro definitions
@@ -114,7 +114,7 @@
 provides a unified view of the devices physically attached to the systems.
 Though the ESA/390 hardware platform knows about a huge variety of different
 peripheral attachments like disk devices (aka. DASDs), tapes, communication
-controllers, etc. they can all by accessed by a well defined access method and
+controllers, etc. they can all be accessed by a well defined access method and
 they are presenting I/O completion a unified way : I/O interruptions. Every
 single device is uniquely identified to the system by a so called subchannel,
 where the ESA/390 architecture allows for 64k devices be attached.
@@ -338,7 +338,7 @@
 The ccw_device_start() function returns :
 
       0 - successful completion or request successfully initiated
--EBUSY  - The device is currently processing a previous I/O request, or ther is
+-EBUSY	- The device is currently processing a previous I/O request, or there is
           a status pending at the device.
 -ENODEV - cdev is invalid, the device is not operational or the ccw_device is
           not online.
@@ -361,7 +361,7 @@
 -EIO:       the common I/O layer terminated the request due to an error state
 
 If the concurrent sense flag in the extended status word in the irb is set, the
-field irb->scsw.count describes the numer of device specific sense bytes
+field irb->scsw.count describes the number of device specific sense bytes
 available in the extended control word irb->scsw.ecw[0]. No device sensing by
 the device driver itself is required.
 
@@ -410,7 +410,7 @@
 
 The device driver is allowed to issue the next ccw_device_start() call from
 within its interrupt handler already. It is not required to schedule a
-bottom-half, unless an non deterministically long running error recovery procedure
+bottom-half, unless a non deterministically long running error recovery procedure
 or similar needs to be scheduled. During I/O processing the Linux/390 generic
 I/O device driver support has already obtained the IRQ lock, i.e. the handler
 must not try to obtain it again when calling ccw_device_start() or we end in a
@@ -431,7 +431,7 @@
 case all I/O interruptions are presented to the device driver until final
 status is recognized.
 
-If a device is able to recover from asynchronosly presented I/O errors, it can
+If a device is able to recover from asynchronously presented I/O errors, it can
 perform overlapping I/O using the DOIO_EARLY_NOTIFICATION flag. While some
 devices always report channel-end and device-end together, with a single
 interrupt, others present primary status (channel-end) when the channel is
diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt
index 29dee79..71ae6ca 100644
--- a/Documentation/s390/crypto/crypto-API.txt
+++ b/Documentation/s390/crypto/crypto-API.txt
@@ -17,8 +17,8 @@
 2. Probing for availability of MSA
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 It should be possible to use Kernels with the z990 crypto implementations both
-on machines with MSA available an on those without MSA (pre z990 or z990
-without MSA). Therefore a simple probing mechanisms has been implemented:
+on machines with MSA available and on those without MSA (pre z990 or z990
+without MSA). Therefore a simple probing mechanism has been implemented:
 In the init function of each crypto module the availability of MSA and of the
 respective crypto algorithm in particular will be tested. If the algorithm is
 available the module will load and register its algorithm with the crypto API.
@@ -26,7 +26,7 @@
 If the respective crypto algorithm is not available, the init function will
 return -ENOSYS. In that case a fallback to the standard software implementation
 of the crypto algorithm must be taken ( -> the standard crypto modules are
-also build when compiling the kernel).
+also built when compiling the kernel).
 
 
 3. Ensuring z990 crypto module preference
@@ -75,8 +75,8 @@
 
 - SHA1 Digest Algorithm [sha1 -> sha1_z990]
 - DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990]
-- Tripple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
-- Tripple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
+- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
+- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
 
 In order to load, for example, the sha1_z990 module when the sha1 algorithm is
 requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf.
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index 000230c..0eb7c58 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -36,7 +36,7 @@
 that the records which describe the origin of the exception are not
 overwritten when a wrap around for the current area occurs.
 
-The debug areas itselve are also ordered in form of a ring buffer. 
+The debug areas themselves are also ordered in form of a ring buffer.
 When an exception is thrown in the last debug area, the following debug 
 entries are then written again in the very first area.
 
@@ -55,7 +55,7 @@
 the debugfs-filesystem. Under the toplevel directory "s390dbf" there is
 a directory for each registered component, which is named like the
 corresponding component. The debugfs normally should be mounted to
-/sys/kernel/debug therefore the debug feature can be accessed unter
+/sys/kernel/debug therefore the debug feature can be accessed under
 /sys/kernel/debug/s390dbf.
 
 The content of the directories are files which represent different views
@@ -87,11 +87,11 @@
 globally. The first possibility is to use the "debug_active" sysctl. If
 set to 1 the debug feature is running. If "debug_active" is set to 0 the
 debug feature is turned off.
-The second trigger which stops the debug feature is an kernel oops.
+The second trigger which stops the debug feature is a kernel oops.
 That prevents the debug feature from overwriting debug information that
 happened before the oops. After an oops you can reactivate the debug feature
 by piping 1 to /proc/sys/s390dbf/debug_active. Nevertheless, its not
-suggested to use an oopsed kernel in an production environment.
+suggested to use an oopsed kernel in a production environment.
 If you want to disallow the deactivation of the debug feature, you can use
 the "debug_stoppable" sysctl. If you set "debug_stoppable" to 0 the debug
 feature cannot be stopped. If the debug feature is already stopped, it
diff --git a/Documentation/scsi/aic79xx.txt b/Documentation/scsi/aic79xx.txt
index 904d49e..6aa9a89 100644
--- a/Documentation/scsi/aic79xx.txt
+++ b/Documentation/scsi/aic79xx.txt
@@ -127,7 +127,7 @@
         - Correct a reference to free'ed memory during controller
           shutdown.
         - Reset the bus on an SE->LVD change.  This is required
-          to reset our transcievers.
+          to reset our transceivers.
 
    1.3.5 (March 24th, 2003)
         - Fix a few register window mode bugs.
@@ -169,7 +169,7 @@
    1.3.0 (January 21st, 2003)
         - Full regression testing for all U320 products completed.
         - Added abort and target/lun reset error recovery handler and
-          interrupt coalessing.
+          interrupt coalescing.
 
    1.2.0 (November 14th, 2002)
         - Added support for Domain Validation
diff --git a/Documentation/scsi/aic7xxx_old.txt b/Documentation/scsi/aic7xxx_old.txt
index c92f447..05667e7 100644
--- a/Documentation/scsi/aic7xxx_old.txt
+++ b/Documentation/scsi/aic7xxx_old.txt
@@ -256,7 +256,7 @@
 	      En/Disable High Byte LVD Termination
 
 	The upper 2 bits that deal with LVD termination only apply to Ultra2
-	controllers.  Futhermore, due to the current Ultra2 controller
+	controllers.  Furthermore, due to the current Ultra2 controller
 	designs, these bits are tied together such that setting either bit
 	enables both low and high byte LVD termination.  It is not possible
 	to only set high or low byte LVD termination in this manner.  This is
@@ -436,7 +436,7 @@
     the commas to periods, insmod won't interpret this as more than one
     string and write junk into our binary image.  I consider it a bug in
     the insmod program that even if you wrap your string in quotes (quotes
-    that pass the shell mind you and that insmod sees) it still treates
+    that pass the shell mind you and that insmod sees) it still treats
     a comma inside of those quotes as starting a new variable, resulting
     in memory scribbles if you don't switch the commas to periods.
 
diff --git a/Documentation/scsi/ibmmca.txt b/Documentation/scsi/ibmmca.txt
index 35f6b8e..9707941 100644
--- a/Documentation/scsi/ibmmca.txt
+++ b/Documentation/scsi/ibmmca.txt
@@ -461,7 +461,7 @@
       This needs the RD-Bit to be disabled on IM_OTHER_SCSI_CMD_CMD which 
       allows data to be written from the system to the device. It is a
       necessary step to be allowed to set blocksize of SCSI-tape-drives and 
-      the tape-speed, whithout confusing the SCSI-Subsystem.
+      the tape-speed, without confusing the SCSI-Subsystem.
    2) The recognition of a tape is included in the check_devices routine.
       This is done by checking for TYPE_TAPE, that is already defined in
       the kernel-scsi-environment. The markup of a tape is done in the 
@@ -710,8 +710,8 @@
       of troubles with some controllers and after I wanted to apply some
       extensions, it jumped out in the same situation, on my w/cache, as like 
       on D. Weinehalls' Model 56, having integrated SCSI. This gave me the 
-      descissive hint to move the code-part out and declare it global. Now,
-      it seems to work by far much better an more stable. Let us see, what
+      decisive hint to move the code-part out and declare it global. Now
+      it seems to work far better and more stable. Let us see what
       the world thinks of it...
    3) By the way, only Sony DAT-drives seem to show density code 0x13. A
       test with a HP drive gave right results, so the problem is vendor-
@@ -822,10 +822,10 @@
    A long period of collecting bugreports from all corners of the world
    now lead to the following corrections to the code:
    1) SCSI-2 F/W support crashed with a COMMAND ERROR. The reason for this 
-      was, that it is possible to disbale Fast-SCSI for the external bus.
-      The feature-control command, where this crash appeared regularly tried
+      was that it is possible to disable Fast-SCSI for the external bus.
+      The feature-control command, where this crash appeared regularly, tried
       to set the maximum speed of 10MHz synchronous transfer speed and that
-      reports a COMMAND ERROR, if external bus Fast-SCSI is disabled. Now,
+      reports a COMMAND ERROR if external bus Fast-SCSI is disabled. Now,
       the feature-command probes down from maximum speed until the adapter 
       stops to complain, which is at the same time the maximum possible
       speed selected in the reference program. So, F/W external can run at
@@ -920,7 +920,7 @@
       completed in such a way, that they are now completely conform to the
       demands in the technical description of IBM. Main candidates were the
       DEVICE_INQUIRY, REQUEST_SENSE and DEVICE_CAPACITY commands. They must
-      be tranferred by bypassing the internal command buffer of the adapter
+      be transferred by bypassing the internal command buffer of the adapter
       or else the response can be a random result. GET_POS_INFO would be more
       safe in usage, if one could use the SUPRESS_EXCEPTION_SHORT, but this
       is not allowed by the technical references of IBM. (Sorry, folks, the
diff --git a/Documentation/scsi/in2000.txt b/Documentation/scsi/in2000.txt
index 80f1040..c3e2a90 100644
--- a/Documentation/scsi/in2000.txt
+++ b/Documentation/scsi/in2000.txt
@@ -24,7 +24,7 @@
 UPDATE NEWS: version 1.31 - 6 Jul 97
 
    Fixed a bug that caused incorrect SCSI status bytes to be
-   returned from commands sent to LUN's greater than 0. This
+   returned from commands sent to LUNs greater than 0. This
    means that CDROM changers work now! Fixed a bug in the
    handling of command-line arguments when loaded as a module.
    Also put all the header data in in2000.h where it belongs.
diff --git a/Documentation/scsi/libsas.txt b/Documentation/scsi/libsas.txt
index 9e2078b..aa54f54 100644
--- a/Documentation/scsi/libsas.txt
+++ b/Documentation/scsi/libsas.txt
@@ -393,7 +393,7 @@
 	task_proto -- _one_ of enum sas_proto
 	scatter -- pointer to scatter gather list array
 	num_scatter -- number of elements in scatter
-	total_xfer_len -- total number of bytes expected to be transfered
+	total_xfer_len -- total number of bytes expected to be transferred
 	data_dir -- PCI_DMA_...
 	task_done -- callback when the task has finished execution
 };
diff --git a/Documentation/scsi/ncr53c8xx.txt b/Documentation/scsi/ncr53c8xx.txt
index 58ad8db..caf10b1 100644
--- a/Documentation/scsi/ncr53c8xx.txt
+++ b/Documentation/scsi/ncr53c8xx.txt
@@ -115,7 +115,7 @@
 
           ftp://ftp.symbios.com/
 
-Usefull SCSI tools written by Eric Youngdale are available at tsx-11:
+Useful SCSI tools written by Eric Youngdale are available at tsx-11:
 
           ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/scsiinfo-X.Y.tar.gz
           ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/scsidev-X.Y.tar.gz
diff --git a/Documentation/scsi/scsi-changer.txt b/Documentation/scsi/scsi-changer.txt
index d74bbd2..032399b 100644
--- a/Documentation/scsi/scsi-changer.txt
+++ b/Documentation/scsi/scsi-changer.txt
@@ -88,7 +88,7 @@
 device [ try "dmesg" if you don't see anything ] and should show up in
 /proc/devices. If not....  some changers use ID ? / LUN 0 for the
 device and ID ? / LUN 1 for the robot mechanism. But Linux does *not*
-look for LUN's other than 0 as default, becauce there are to many
+look for LUNs other than 0 as default, because there are too many
 broken devices. So you can try:
 
   1) echo "scsi add-single-device 0 0 ID 1" > /proc/scsi/scsi
@@ -107,7 +107,7 @@
 strings then.
 
 You can display these messages with the dmesg command (or check the
-logfiles).  If you email me some question becauce of a problem with the
+logfiles).  If you email me some question because of a problem with the
 driver, please include these messages.
 
 
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
index b964eef..7acbebb 100644
--- a/Documentation/scsi/scsi_eh.txt
+++ b/Documentation/scsi/scsi_eh.txt
@@ -75,7 +75,7 @@
 
  - otherwise
 	scsi_eh_scmd_add(scmd, 0) is invoked for the command.  See
-	[1-3] for details of this funciton.
+	[1-3] for details of this function.
 
 
 [1-2-2] Completing a scmd w/ timeout
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 75a535a..6f70f2b 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -375,7 +375,6 @@
    scsi_add_device - creates new scsi device (lu) instance
    scsi_add_host - perform sysfs registration and set up transport class
    scsi_adjust_queue_depth - change the queue depth on a SCSI device
-   scsi_assign_lock - replace default host_lock with given lock
    scsi_bios_ptable - return copy of block device's partition table
    scsi_block_requests - prevent further commands being queued to given host
    scsi_deactivate_tcq - turn off tag command queueing
@@ -489,20 +488,6 @@
 
 
 /**
- * scsi_assign_lock - replace default host_lock with given lock
- * @shost: a pointer to a scsi host instance
- * @lock: pointer to lock to replace host_lock for this host
- *
- *      Returns nothing
- *
- *      Might block: no
- *
- *      Defined in: include/scsi/scsi_host.h .
- **/
-void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
-
-
-/**
  * scsi_bios_ptable - return copy of block device's partition table
  * @dev:        pointer to block device
  *
@@ -1366,17 +1351,11 @@
 Each struct Scsi_Host instance has a spin_lock called struct 
 Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in 
 hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer
-is initialized to point at default_lock with the scsi_assign_lock() function.
-Thereafter lock and unlock operations performed by the mid level use the
-struct Scsi_Host::host_lock pointer.
+is initialized to point at default_lock.  Thereafter lock and unlock
+operations performed by the mid level use the struct Scsi_Host::host_lock
+pointer.  Previously drivers could override the host_lock pointer but
+this is not allowed anymore.
 
-LLDs can override the use of struct Scsi_Host::default_lock by
-using scsi_assign_lock(). The earliest opportunity to do this would
-be in the detect() function after it has invoked scsi_register(). It
-could be replaced by a coarser grain lock (e.g. per driver) or a
-lock of equal granularity (i.e. per host). Using finer grain locks 
-(e.g. per SCSI device) may be possible by juggling locks in
-queuecommand().
 
 Autosense
 =========
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 5ff65b1..3c12422 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -261,7 +261,7 @@
 used instead of the equal mark. The definition is prepended by the
 string st=. Here is an example:
 
-	st=buffer_kbs:64,write_threhold_kbs:60
+	st=buffer_kbs:64,write_threshold_kbs:60
 
 The following syntax used by the old kernel versions is also supported:
 
diff --git a/Documentation/scsi/sym53c8xx_2.txt b/Documentation/scsi/sym53c8xx_2.txt
index 26c8a08..2c1745a 100644
--- a/Documentation/scsi/sym53c8xx_2.txt
+++ b/Documentation/scsi/sym53c8xx_2.txt
@@ -609,7 +609,7 @@
 be sure I will receive it.  Obviously, a bug in the driver code is
 possible.
 
-  My cyrrent email address: Gerard Roudier <groudier@free.fr>
+  My current email address: Gerard Roudier <groudier@free.fr>
 
 Allowing disconnections is important if you use several devices on
 your SCSI bus but often causes problems with buggy devices.
diff --git a/Documentation/sharedsubtree.txt b/Documentation/sharedsubtree.txt
index 2d8f403..ccf1ceb 100644
--- a/Documentation/sharedsubtree.txt
+++ b/Documentation/sharedsubtree.txt
@@ -942,13 +942,13 @@
 	->mnt_slave
 	->mnt_master
 
-	->mnt_share links togather all the mount to/from which this vfsmount
+	->mnt_share links together all the mount to/from which this vfsmount
 		send/receives propagation events.
 
 	->mnt_slave_list links all the mounts to which this vfsmount propagates
 		to.
 
-	->mnt_slave links togather all the slaves that its master vfsmount
+	->mnt_slave links together all the slaves that its master vfsmount
 		propagates to.
 
 	->mnt_master points to the master vfsmount from which this vfsmount
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 3472d9c..9fef210 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -955,7 +955,7 @@
 		  dmx6fire, dsp24, dsp24_value, dsp24_71, ez8,
 		  phase88, mediastation
     omni	- Omni I/O support for MidiMan M-Audio Delta44/66
-    cs8427_timeout - reset timeout for the CS8427 chip (S/PDIF transciever)
+    cs8427_timeout - reset timeout for the CS8427 chip (S/PDIF transceiver)
                      in msec resolution, default value is 500 (0.5 sec)
 
     This module supports multiple cards and autoprobe. Note: The consumer part
diff --git a/Documentation/sound/alsa/Audigy-mixer.txt b/Documentation/sound/alsa/Audigy-mixer.txt
index 5132fd9..7f10dc6 100644
--- a/Documentation/sound/alsa/Audigy-mixer.txt
+++ b/Documentation/sound/alsa/Audigy-mixer.txt
@@ -6,7 +6,7 @@
 
 The EMU10K2 chips have a DSP part which can be programmed to support 
 various ways of sample processing, which is described here.
-(This acticle does not deal with the overall functionality of the 
+(This article does not deal with the overall functionality of the 
 EMU10K2 chips. See the manuals section for further details.)
 
 The ALSA driver programs this portion of chip by default code
diff --git a/Documentation/sound/alsa/SB-Live-mixer.txt b/Documentation/sound/alsa/SB-Live-mixer.txt
index 651adaf..f5639d4 100644
--- a/Documentation/sound/alsa/SB-Live-mixer.txt
+++ b/Documentation/sound/alsa/SB-Live-mixer.txt
@@ -5,7 +5,7 @@
 
 The EMU10K1 chips have a DSP part which can be programmed to support
 various ways of sample processing, which is described here.
-(This acticle does not deal with the overall functionality of the 
+(This article does not deal with the overall functionality of the 
 EMU10K1 chips. See the manuals section for further details.)
 
 The ALSA driver programs this portion of chip by default code
diff --git a/Documentation/stable_api_nonsense.txt b/Documentation/stable_api_nonsense.txt
index f39c9d7..a2afca3 100644
--- a/Documentation/stable_api_nonsense.txt
+++ b/Documentation/stable_api_nonsense.txt
@@ -62,9 +62,6 @@
       - different structures can contain different fields
       - Some functions may not be implemented at all, (i.e. some locks
 	compile away to nothing for non-SMP builds.)
-      - Parameter passing of variables from function to function can be
-	done in different ways (the CONFIG_REGPARM option controls
-	this.)
       - Memory within the kernel can be aligned in different ways,
 	depending on the build options.
   - Linux runs on a wide range of different processor architectures.
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index 02a4812..c815c52 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -50,7 +50,7 @@
    Contact the kernel security team for more details on this procedure.
 
 
-Review committe:
+Review committee:
 
  - This is made up of a number of kernel developers who have volunteered for
    this task, and a few that haven't.
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 5c3a519..aa986a3 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -146,7 +146,7 @@
 	readable by root only. This allows the end user to remove
 	such a dump but not access it directly. For security reasons
 	core dumps in this mode will not overwrite one another or
-	other files. This mode is appropriate when adminstrators are
+	other files. This mode is appropriate when administrators are
 	attempting to debug problems in a normal environment.
 
 ==============================================================
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 0bc7f1e..5922e84 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -27,6 +27,7 @@
 - hotplug
 - java-appletviewer           [ binfmt_java, obsolete ]
 - java-interpreter            [ binfmt_java, obsolete ]
+- kstack_depth_to_print       [ X86 only ]
 - l2cr                        [ PPC only ]
 - modprobe                    ==> Documentation/kmod.txt
 - msgmax
@@ -170,6 +171,13 @@
 
 ==============================================================
 
+kstack_depth_to_print: (X86 only)
+
+Controls the number of words to print when dumping the raw
+kernel stack.
+
+==============================================================
+
 osrelease, ostype & version:
 
 # cat osrelease
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 20d0d79..e96a341 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -129,7 +129,7 @@
 
 zone_reclaim_mode:
 
-Zone_reclaim_mode allows to set more or less agressive approaches to
+Zone_reclaim_mode allows someone to set more or less aggressive approaches to
 reclaim memory when a zone runs out of memory. If it is set to zero then no
 zone reclaim occurs. Allocations will be satisfied from other zones / nodes
 in the system.
diff --git a/Documentation/uml/UserModeLinux-HOWTO.txt b/Documentation/uml/UserModeLinux-HOWTO.txt
index b60590e..628013f 100644
--- a/Documentation/uml/UserModeLinux-HOWTO.txt
+++ b/Documentation/uml/UserModeLinux-HOWTO.txt
@@ -1477,7 +1477,7 @@
 
 
 
-  Making it world-writeable looks bad, but it seems not to be
+  Making it world-writable looks bad, but it seems not to be
   exploitable as a security hole.  However, it does allow anyone to cre-
   ate useless tap devices (useless because they can't configure them),
   which is a DOS attack.  A somewhat more secure alternative would to be
diff --git a/Documentation/usb/hiddev.txt b/Documentation/usb/hiddev.txt
index 6a79075..6e8c9f1 100644
--- a/Documentation/usb/hiddev.txt
+++ b/Documentation/usb/hiddev.txt
@@ -8,7 +8,7 @@
 examples for this are power devices (especially uninterruptable power
 supplies) and monitor control on higher end monitors.
 
-To support these disparite requirements, the Linux USB system provides
+To support these disparate requirements, the Linux USB system provides
 HID events to two separate interfaces:
 * the input subsystem, which converts HID events into normal input
 device interfaces (such as keyboard, mouse and joystick) and a
diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt
index ab21db4..aee715a 100644
--- a/Documentation/usb/rio.txt
+++ b/Documentation/usb/rio.txt
@@ -24,10 +24,10 @@
 inconsequential.
 
 It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and want to transfer stuff that you
+I suggest when the batteries are low and you want to transfer stuff that you
 replace it with a fresh one. In my case, what happened is I lost two 16kb
 blocks (they are no longer usable to store information to it). But I don't
-know if thats normal or not. It could simply be a problem with the flash 
+know if that's normal or not; it could simply be a problem with the flash 
 memory.
 
 In an extreme case, I left my Rio playing overnight and the batteries wore 
diff --git a/Documentation/usb/usb-serial.txt b/Documentation/usb/usb-serial.txt
index 50436e1..d61f6e7 100644
--- a/Documentation/usb/usb-serial.txt
+++ b/Documentation/usb/usb-serial.txt
@@ -175,7 +175,7 @@
   
   Current status:
     The USA-18X, USA-28X, USA-19, USA-19W and USA-49W are supported and
-    have been pretty throughly tested at various baud rates with 8-N-1
+    have been pretty thoroughly tested at various baud rates with 8-N-1
     character settings.  Other character lengths and parity setups are
     presently untested.
 
@@ -253,7 +253,7 @@
 	together without hacking the adapter to set the line high.
 
 	The driver is smp safe.  Performance with the driver is rather low when using
-	it for transfering files.  This is being worked on, but I would be willing to
+	it for transferring files.  This is being worked on, but I would be willing to
 	accept patches.  An urb queue or packet buffer would likely fit the bill here.
 
 	If you have any questions, problems, patches, feature requests, etc. you can
@@ -297,7 +297,7 @@
       Parity       N,E,O,M,S
       Handshake    None, Software (XON/XOFF), Hardware (CTSRTS,CTSDTR)*
       Break        Set and clear
-      Line contrl  Input/Output query and control **
+      Line control Input/Output query and control **
 
       *  Hardware input flow control is only enabled for firmware
          levels above 2.06.  Read source code comments describing Belkin
@@ -309,7 +309,7 @@
          automatic hardware flow control.
 
   TO DO List:
-    -- Add true modem contol line query capability.  Currently tracks the
+    -- Add true modem control line query capability.  Currently tracks the
        states reported by the interrupt and the states requested.
     -- Add error reporting back to application for UART error conditions.
     -- Add support for flush ioctls.
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index 7e8ae83..8d16f6f 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -214,7 +214,7 @@
 
 Finally the SETOPTIONS ioctl can be used to control some aspects of
 the cards operation; right now the pcwd driver is the only one
-supporting thiss ioctl.
+supporting this ioctl.
 
     int options = 0;
     ioctl(fd, WDIOC_SETOPTIONS, options);
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index f3c57f4..dbdcaf6 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -52,10 +52,6 @@
 		 apicmaintimer. Useful when your PIT timer is totally
 		 broken.
 
-   disable_8254_timer / enable_8254_timer
-		 Enable interrupt 0 timer routing over the 8254 in addition to over
-	         the IO-APIC. The kernel tries to set a sensible default.
-
 Early Console
 
    syntax: earlyprintk=vga
@@ -183,7 +179,7 @@
 IOMMU
 
  iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
-         [,forcesac][,fullflush][,nomerge][,noaperture]
+         [,forcesac][,fullflush][,nomerge][,noaperture][,calgary]
    size  set size of iommu (in bytes)
    noagp don't initialize the AGP driver and use full aperture.
    off   don't use the IOMMU
@@ -204,6 +200,7 @@
 	    buffering.
    nodac    Forbid DMA >4GB
    panic    Always panic when IOMMU overflows
+   calgary  Use the Calgary IOMMU if it is available
 
   swiotlb=pages[,force]
 
diff --git a/MAINTAINERS b/MAINTAINERS
index deae374..89ef018 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -155,16 +155,16 @@
 S:	Maintained
 
 9P FILE SYSTEM
-P:      Eric Van Hensbergen
-M:      ericvh@gmail.com
-P:      Ron Minnich
-M:      rminnich@lanl.gov
-P:      Latchesar Ionkov
-M:      lucho@ionkov.net
-L:      v9fs-developer@lists.sourceforge.net
-W:      http://v9fs.sf.net
-T:      git kernel.org:/pub/scm/linux/kernel/ericvh/v9fs.git
-S:      Maintained
+P:	Eric Van Hensbergen
+M:	ericvh@gmail.com
+P:	Ron Minnich
+M:	rminnich@lanl.gov
+P:	Latchesar Ionkov
+M:	lucho@ionkov.net
+L:	v9fs-developer@lists.sourceforge.net
+W:	http://v9fs.sf.net
+T:	git kernel.org:/pub/scm/linux/kernel/ericvh/v9fs.git
+S:	Maintained
 
 A2232 SERIAL BOARD DRIVER
 P:	Enver Haase
@@ -290,8 +290,8 @@
 S:	Maintained for 2.4; PCI support for 2.6.
 
 AMD GEODE PROCESSOR/CHIPSET SUPPORT
-P:      Jordan Crouse
-M:      info-linux@geode.amd.com
+P:	Jordan Crouse
+M:	info-linux@geode.amd.com
 L:	info-linux@geode.amd.com
 W:	http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
 S:	Supported
@@ -439,6 +439,13 @@
 W:	http://linux-atm.sourceforge.net
 S:	Maintained
 
+ATMEL MACB ETHERNET DRIVER
+P:	Atmel AVR32 Support Team
+M:	avr32@atmel.com
+P:	Haavard Skinnemoen
+M:	hskinnemoen@atmel.com
+S:	Supported
+
 ATMEL WIRELESS DRIVER
 P:	Simon Kelley
 M:	simon@thekelleys.org.uk
@@ -608,13 +615,13 @@
 S:	Maintained
 
 BONDING DRIVER
-P:   Chad Tindel
-M:   ctindel@users.sourceforge.net
-P:   Jay Vosburgh
-M:   fubar@us.ibm.com
-L:   bonding-devel@lists.sourceforge.net
-W:   http://sourceforge.net/projects/bonding/
-S:   Supported
+P:	Chad Tindel
+M:	ctindel@users.sourceforge.net
+P:	Jay Vosburgh
+M:	fubar@us.ibm.com
+L:	bonding-devel@lists.sourceforge.net
+W:	http://sourceforge.net/projects/bonding/
+S:	Supported
 
 BROADBAND PROCESSOR ARCHITECTURE
 P:	Arnd Bergmann
@@ -751,8 +758,8 @@
 S:	Supported
 
 CRAMFS FILESYSTEM
-W:     http://sourceforge.net/projects/cramfs/
-S:     Orphan
+W:	http://sourceforge.net/projects/cramfs/
+S:	Orphan
 
 CRIS PORT
 P:	Mikael Starvik
@@ -1061,11 +1068,11 @@
 S:	Maintained
 
 EMULEX LPFC FC SCSI DRIVER
-P:      James Smart
-M:      james.smart@emulex.com
-L:      linux-scsi@vger.kernel.org
-W:      http://sourceforge.net/projects/lpfcxxxx
-S:      Supported
+P:	James Smart
+M:	james.smart@emulex.com
+L:	linux-scsi@vger.kernel.org
+W:	http://sourceforge.net/projects/lpfcxxxx
+S:	Supported
 
 EPSON 1355 FRAMEBUFFER DRIVER
 P:	Christopher Hoover
@@ -1091,13 +1098,19 @@
 S:	Maintained
 
 EXT2 FILE SYSTEM
-L:	ext2-devel@lists.sourceforge.net
+L:	linux-ext4@vger.kernel.org
 S:	Maintained
 
 EXT3 FILE SYSTEM
 P:	Stephen Tweedie, Andrew Morton
 M:	sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
-L:	ext2-devel@lists.sourceforge.net
+L:	linux-ext4@vger.kernel.org
+S:	Maintained
+
+EXT4 FILE SYSTEM
+P:	Stephen Tweedie, Andrew Morton
+M:	sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
+L:	linux-ext4@vger.kernel.org
 S:	Maintained
 
 F71805F HARDWARE MONITORING DRIVER
@@ -1166,11 +1179,6 @@
 M:	dhowells@redhat.com
 S:	Maintained
 
-FTAPE/QIC-117
-L:	linux-tape@vger.kernel.org
-W:	http://sourceforge.net/projects/ftape
-S:	Orphan
-
 FUSE: FILESYSTEM IN USERSPACE
 P:	Miklos Szeredi
 M:	miklos@szeredi.hu
@@ -1219,7 +1227,8 @@
 P:	Jean Delvare
 M:	khali@linux-fr.org
 L:	lm-sensors@lm-sensors.org
-W:	http://www.lm-sensors.nu/
+W:	http://www.lm-sensors.org/
+T:	quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-hwmon/
 S:	Maintained
 
 HARDWARE RANDOM NUMBER GENERATOR CORE
@@ -1345,8 +1354,7 @@
 P:	Jean Delvare
 M:	khali@linux-fr.org
 L:	i2c@lm-sensors.org
-W:	http://www.lm-sensors.nu/
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-i2c/
 S:	Maintained
 
 I2O
@@ -1502,16 +1510,16 @@
 S:	Maintained
 
 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
-P:      Sylvain Meyer
-M:      sylvain.meyer@worldonline.fr
-L:      linux-fbdev-devel@lists.sourceforge.net
-S:      Maintained
+P:	Sylvain Meyer
+M:	sylvain.meyer@worldonline.fr
+L:	linux-fbdev-devel@lists.sourceforge.net
+S:	Maintained
 
 INTEL 810/815 FRAMEBUFFER DRIVER
-P:      Antonino Daplas
-M:      adaplas@pol.net
-L:      linux-fbdev-devel@lists.sourceforge.net
-S:      Maintained
+P:	Antonino Daplas
+M:	adaplas@pol.net
+L:	linux-fbdev-devel@lists.sourceforge.net
+S:	Maintained
 
 INTEL APIC/IOAPIC, LOWLEVEL X86 SMP SUPPORT
 P:	Ingo Molnar
@@ -1678,7 +1686,7 @@
 JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
 P:	Stephen Tweedie, Andrew Morton
 M:	sct@redhat.com, akpm@osdl.org
-L:	ext2-devel@lists.sourceforge.net
+L:	linux-ext4@vger.kernel.org
 S:	Maintained
 
 K8TEMP HARDWARE MONITORING DRIVER
@@ -1837,11 +1845,11 @@
 S:	Maintained
 
 LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
-P:     Kumar Gala
-M:     galak@kernel.crashing.org
-W:     http://www.penguinppc.org/
-L:     linuxppc-embedded@ozlabs.org
-S:     Maintained
+P:	Kumar Gala
+M:	galak@kernel.crashing.org
+W:	http://www.penguinppc.org/
+L:	linuxppc-embedded@ozlabs.org
+S:	Maintained
 
 LINUX FOR POWERPC PA SEMI PWRFICIENT
 P:	Olof Johansson
@@ -1940,10 +1948,10 @@
 S: 	Supported
 
 MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
-P: Michael Kerrisk
-M: mtk-manpages@gmx.net
-W: ftp://ftp.kernel.org/pub/linux/docs/manpages
-S: Maintained
+P:	Michael Kerrisk
+M:	mtk-manpages@gmx.net
+W:	ftp://ftp.kernel.org/pub/linux/docs/manpages
+S:	Maintained
 
 MARVELL MV643XX ETHERNET DRIVER
 P:	Dale Farnsworth
@@ -1960,11 +1968,11 @@
 S:	Maintained
 
 MEGARAID SCSI DRIVERS
-P:     Neela Syam Kolli
-M:     Neela.Kolli@engenio.com
-S:     linux-scsi@vger.kernel.org
-W:     http://megaraid.lsilogic.com
-S:     Maintained
+P:	Neela Syam Kolli
+M:	Neela.Kolli@engenio.com
+S:	linux-scsi@vger.kernel.org
+W:	http://megaraid.lsilogic.com
+S:	Maintained
 
 MEMORY MANAGEMENT
 L:	linux-mm@kvack.org
@@ -2139,6 +2147,13 @@
 T:	git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
 S:	Maintained
 
+NETXEN (1/10) GbE SUPPORT
+P:	Amit S. Kale
+M:	amitkale@netxen.com
+L:	netdev@vger.kernel.org
+W:	http://www.netxen.com
+S:	Supported
+
 IPVS
 P:	Wensong Zhang
 M:	wensong@linux-vs.org
@@ -2193,10 +2208,10 @@
 S:	Maintained
 
 NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
-P:      Antonino Daplas
-M:      adaplas@pol.net
-L:      linux-fbdev-devel@lists.sourceforge.net
-S:      Maintained
+P:	Antonino Daplas
+M:	adaplas@pol.net
+L:	linux-fbdev-devel@lists.sourceforge.net
+S:	Maintained
 
 OPENCORES I2C BUS DRIVER
 P:	Peter Korsgaard
@@ -2436,6 +2451,13 @@
 W:	http://www.pnd-pc.demon.co.uk/promise/
 S:	Maintained
 
+PS3 PLATFORM SUPPORT
+P:	Geoff Levand
+M:	geoffrey.levand@am.sony.com
+L:	linuxppc-dev@ozlabs.org
+L:	cbe-oss-dev@ozlabs.org
+S:	Supported
+
 PVRUSB2 VIDEO4LINUX DRIVER
 P:	Mike Isely
 M:	isely@pobox.com
@@ -2546,10 +2568,10 @@
 S:	Orphan
 
 S3 SAVAGE FRAMEBUFFER DRIVER
-P:      Antonino Daplas
-M:      adaplas@pol.net
-L:      linux-fbdev-devel@lists.sourceforge.net
-S:      Maintained
+P:	Antonino Daplas
+M:	adaplas@pol.net
+L:	linux-fbdev-devel@lists.sourceforge.net
+S:	Maintained
 
 S390
 P:	Martin Schwidefsky
@@ -2630,10 +2652,10 @@
 S:	Maintained
 
 SCTP PROTOCOL
-P: Sridhar Samudrala
-M: sri@us.ibm.com
-L: lksctp-developers@lists.sourceforge.net
-S: Supported
+P:	Sridhar Samudrala
+M:	sri@us.ibm.com
+L:	lksctp-developers@lists.sourceforge.net
+S:	Supported
 
 SCx200 CPU SUPPORT
 P:	Jim Cromie
@@ -2801,9 +2823,9 @@
 S:	Maintained
 
 Telecom Clock Driver for MCPL0010
-P: Mark Gross
-M: mark.gross@intel.com
-S: Supported
+P:	Mark Gross
+M:	mark.gross@intel.com
+S:	Supported
 
 TENSILICA XTENSA PORT (xtensa):
 P:	Chris Zankel
@@ -2904,7 +2926,6 @@
 SUN3/3X
 P:	Sam Creasey
 M:	sammy@sammy.net
-L:	sun3-list@redhat.com
 W:	http://sammy.net/sun3/
 S:	Maintained
 
@@ -2950,9 +2971,9 @@
 S:	Maintained
 
 TI PARALLEL LINK CABLE DRIVER
-P:     Romain Lievin
-M:     roms@lpg.ticalc.org
-S:     Maintained
+P:	Romain Lievin
+M:	roms@lpg.ticalc.org
+S:	Maintained
 
 TIPC NETWORK LAYER
 P:	Per Liden
@@ -3002,12 +3023,12 @@
 S:	Maintained
 
 TRIVIAL PATCHES
-P:      Adrian Bunk
-M:      trivial@kernel.org
-L:      linux-kernel@vger.kernel.org
-W:      http://www.kernel.org/pub/linux/kernel/people/bunk/trivial/
-T:      git kernel.org:/pub/scm/linux/kernel/git/bunk/trivial.git
-S:      Maintained
+P:	Adrian Bunk
+M:	trivial@kernel.org
+L:	linux-kernel@vger.kernel.org
+W:	http://www.kernel.org/pub/linux/kernel/people/bunk/trivial/
+T:	git kernel.org:/pub/scm/linux/kernel/git/bunk/trivial.git
+S:	Maintained
 
 TMS380 TOKEN-RING NETWORK DRIVER
 P:	Adam Fritzler
@@ -3445,6 +3466,12 @@
 T:	git git://oss.sgi.com:8090/xfs/xfs-2.6
 S:	Supported
 
+XILINX UARTLITE SERIAL DRIVER
+P:	Peter Korsgaard
+M:	jacmet@sunsite.dk
+L:	linux-serial@vger.kernel.org
+S:	Maintained
+
 X86 3-LEVEL PAGING (PAE) SUPPORT
 P:	Ingo Molnar
 M:	mingo@redhat.com
diff --git a/README b/README
index 3e26472..c055615 100644
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-	Linux kernel release 2.6.xx <http://kernel.org>
+	Linux kernel release 2.6.xx <http://kernel.org/>
 
 These are the release notes for Linux version 2.6.  Read them carefully,
 as they tell you what this is all about, explain how to install the
@@ -22,15 +22,17 @@
 
   Although originally developed first for 32-bit x86-based PCs (386 or higher),
   today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and
-  UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH,
+  UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell,
   IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS,
-  and Renesas M32R architectures.
+  Cris, Xtensa, AVR32 and Renesas M32R architectures.
 
   Linux is easily portable to most general-purpose 32- or 64-bit architectures
   as long as they have a paged memory management unit (PMMU) and a port of the
   GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has
   also been ported to a number of architectures without a PMMU, although
   functionality is then obviously somewhat limited.
+  Linux has also been ported to itself. You can now run the kernel as a
+  userspace application - this is called UserMode Linux (UML).
 
 DOCUMENTATION:
 
@@ -113,6 +115,7 @@
    version 2.6.12.2 and want to jump to 2.6.12.3, you must first
    reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying
    the 2.6.12.3 patch.
+   You can read more on this in Documentation/applying-patches.txt
 
  - Make sure you have no stale .o files and dependencies lying around:
 
@@ -161,6 +164,7 @@
    only ask you for the answers to new questions.
 
  - Alternate configuration commands are:
+	"make config"      Plain text interface.
 	"make menuconfig"  Text based color menus, radiolists & dialogs.
 	"make xconfig"     X windows (Qt) based configuration tool.
 	"make gconfig"     X windows (Gtk) based configuration tool.
@@ -303,8 +307,9 @@
 
  - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
    as is, otherwise you will have to use the "ksymoops" program to make
-   sense of the dump.  This utility can be downloaded from
-   ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops.
+   sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred).
+   This utility can be downloaded from
+   ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops/ .
    Alternately you can do the dump lookup by hand:
 
  - In debugging dumps like the above, it helps enormously if you can
@@ -336,7 +341,7 @@
 
    If you for some reason cannot do the above (you have a pre-compiled
    kernel image or similar), telling me as much about your setup as
-   possible will help. 
+   possible will help.  Please read the REPORTING-BUGS document for details.
 
  - Alternately, you can use gdb on a running kernel. (read-only; i.e. you
    cannot change values or set break points.) To do this, first compile the
diff --git a/REPORTING-BUGS b/REPORTING-BUGS
index f9da827..ac02e42 100644
--- a/REPORTING-BUGS
+++ b/REPORTING-BUGS
@@ -40,7 +40,9 @@
 [1.] One line summary of the problem:
 [2.] Full description of the problem/report:
 [3.] Keywords (i.e., modules, networking, kernel):
-[4.] Kernel version (from /proc/version):
+[4.] Kernel information
+[4.1.] Kernel version (from /proc/version):
+[4.2.] Kernel .config file:
 [5.] Most recent kernel version which did not have the bug:
 [6.] Output of Oops.. message (if applicable) with symbolic information
      resolved (see Documentation/oops-tracing.txt)
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index ffb7d54..3c10b9a 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -516,10 +516,11 @@
 		if (bus == 0 && dfn == 0) {
 			hose = pci_isa_hose;
 		} else {
-			dev = pci_find_slot(bus, dfn);
+			dev = pci_get_bus_and_slot(bus, dfn);
 			if (!dev)
 				return -ENODEV;
 			hose = dev->sysdata;
+			pci_dev_put(dev);
 		}
 	}
 
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index b8b817f..910b43c 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -183,11 +183,15 @@
 
 	if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) {
 		u8 irq=0;
-
-		if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL)
+		struct pci_dev *pdev = pci_get_slot(dev->bus, dev->devfn & ~7);
+		if(pdev == NULL || pci_read_config_byte(pdev, 0x40,&irq) != PCIBIOS_SUCCESSFUL) {
+			pci_dev_put(pdev);
 			return -1;
-		else	
+		}
+		else	{
+			pci_dev_put(pdev);
 			return irq;
+		}
 	}
 
 	return COMMON_TABLE_LOOKUP;
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 93744ba..e7594a7 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -200,7 +200,7 @@
 	bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
 	hose->bus = bus;
 
-	irongate = pci_find_slot(0, 0);
+	irongate = pci_get_bus_and_slot(0, 0);
 	bus->self = irongate;
 	bus->resource[1] = &irongate_mem;
 
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index 89044e6..ab3761c 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -41,28 +41,25 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented.
  */
-unsigned short int csum_tcpudp_magic(unsigned long saddr,
-				   unsigned long daddr,
+__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 				   unsigned short len,
 				   unsigned short proto,
-				   unsigned int sum)
+				   __wsum sum)
 {
-	return ~from64to16(saddr + daddr + sum +
-		((unsigned long) ntohs(len) << 16) +
-		((unsigned long) proto << 8));
+	return (__force __sum16)~from64to16(
+		(__force u64)saddr + (__force u64)daddr +
+		(__force u64)sum + ((len + proto) << 8));
 }
 
-unsigned int csum_tcpudp_nofold(unsigned long saddr,
-				   unsigned long daddr,
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 				   unsigned short len,
 				   unsigned short proto,
-				   unsigned int sum)
+				   __wsum sum)
 {
 	unsigned long result;
 
-	result = (saddr + daddr + sum +
-		  ((unsigned long) ntohs(len) << 16) +
-		  ((unsigned long) proto << 8));
+	result = (__force u64)saddr + (__force u64)daddr +
+		 (__force u64)sum + ((len + proto) << 8);
 
 	/* Fold down to 32-bits so we don't lose in the typedef-less 
 	   network stack.  */
@@ -70,7 +67,7 @@
 	result = (result & 0xffffffff) + (result >> 32);
 	/* 33 to 32 */
 	result = (result & 0xffffffff) + (result >> 32);
-	return result;
+	return (__force __wsum)result;
 }
 
 /*
@@ -146,9 +143,9 @@
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
-	return ~do_csum(iph,ihl*4);
+	return (__force __sum16)~do_csum(iph,ihl*4);
 }
 
 /*
@@ -163,15 +160,15 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned long result = do_csum(buff, len);
 
 	/* add in old sum, and carry.. */
-	result += sum;
+	result += (__force u32)sum;
 	/* 32+c bits -> 32 bits */
 	result = (result & 0xffffffff) + (result >> 32);
-	return result;
+	return (__force __wsum)result;
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -180,7 +177,7 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-unsigned short ip_compute_csum(unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
 {
-	return ~from64to16(do_csum(buff,len));
+	return (__force __sum16)~from64to16(do_csum(buff,len));
 }
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index a37948f..4ca75c7 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -329,11 +329,11 @@
 	return checksum;
 }
 
-static unsigned int
-do_csum_partial_copy_from_user(const char __user *src, char *dst, int len,
-			       unsigned int sum, int *errp)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+			       __wsum sum, int *errp)
 {
-	unsigned long checksum = (unsigned) sum;
+	unsigned long checksum = (__force u32) sum;
 	unsigned long soff = 7 & (unsigned long) src;
 	unsigned long doff = 7 & (unsigned long) dst;
 
@@ -367,25 +367,12 @@
 		}
 		checksum = from64to16 (checksum);
 	}
-	return checksum;
+	return (__force __wsum)checksum;
 }
 
-unsigned int
-csum_partial_copy_from_user(const char __user *src, char *dst, int len,
-			    unsigned int sum, int *errp)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
-	if (!access_ok(VERIFY_READ, src, len)) {
-		*errp = -EFAULT;
-		memset(dst, 0, len);
-		return sum;
-	}
-
-	return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
-}
-
-unsigned int
-csum_partial_copy_nocheck(const char __user *src, char *dst, int len,
-			  unsigned int sum)
-{
-	return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
+	return csum_partial_copy_from_user((__force const void __user *)src,
+			dst, len, sum, NULL);
 }
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 8871529..8aa9db8 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -108,7 +108,7 @@
 
 	/* If we're in an interrupt context, or have no user context,
 	   we must not take the fault.  */
-	if (!mm || in_interrupt())
+	if (!mm || in_atomic())
 		goto no_context;
 
 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 605dedf..b359974 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -60,16 +60,16 @@
 static int sharpsl_fatal_check(void);
 static int sharpsl_average_value(int ad);
 static void sharpsl_average_clear(void);
-static void sharpsl_charge_toggle(void *private_);
-static void sharpsl_battery_thread(void *private_);
+static void sharpsl_charge_toggle(struct work_struct *private_);
+static void sharpsl_battery_thread(struct work_struct *private_);
 
 
 /*
  * Variables
  */
 struct sharpsl_pm_status sharpsl_pm;
-DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
-DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
+DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
 DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
 
 
@@ -116,7 +116,7 @@
 EXPORT_SYMBOL(sharpsl_battery_kick);
 
 
-static void sharpsl_battery_thread(void *private_)
+static void sharpsl_battery_thread(struct work_struct *private_)
 {
 	int voltage, percent, apm_status, i = 0;
 
@@ -128,7 +128,7 @@
 	/* Corgi cannot confirm when battery fully charged so periodically kick! */
 	if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
 			&& time_after(jiffies, sharpsl_pm.charge_start_time +  SHARPSL_CHARGE_ON_TIME_INTERVAL))
-		schedule_work(&toggle_charger);
+		schedule_delayed_work(&toggle_charger, 0);
 
 	while(1) {
 		voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
@@ -212,7 +212,7 @@
 	sharpsl_pm_led(SHARPSL_LED_OFF);
 	sharpsl_pm.charge_mode = CHRG_OFF;
 
-	schedule_work(&sharpsl_bat);
+	schedule_delayed_work(&sharpsl_bat, 0);
 }
 
 static void sharpsl_charge_error(void)
@@ -222,7 +222,7 @@
 	sharpsl_pm.charge_mode = CHRG_ERROR;
 }
 
-static void sharpsl_charge_toggle(void *private_)
+static void sharpsl_charge_toggle(struct work_struct *private_)
 {
 	dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
 
@@ -254,7 +254,7 @@
 	else if (sharpsl_pm.charge_mode == CHRG_ON)
 		sharpsl_charge_off();
 
-	schedule_work(&sharpsl_bat);
+	schedule_delayed_work(&sharpsl_bat, 0);
 }
 
 
@@ -279,10 +279,10 @@
 			sharpsl_charge_off();
 	} else if (sharpsl_pm.full_count < 2) {
 		dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
-		schedule_work(&toggle_charger);
+		schedule_delayed_work(&toggle_charger, 0);
 	} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
 		dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
-		schedule_work(&toggle_charger);
+		schedule_delayed_work(&toggle_charger, 0);
 	} else {
 		sharpsl_charge_off();
 		sharpsl_pm.charge_mode = CHRG_DONE;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f2b1d61..3843d3b 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -11,6 +11,7 @@
 #include <linux/signal.h>
 #include <linux/ptrace.h>
 #include <linux/personality.h>
+#include <linux/freezer.h>
 
 #include <asm/elf.h>
 #include <asm/cacheflush.h>
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 57f23b4..e316bd9 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -133,7 +133,7 @@
              into the kernel and we can use the standard read[bwl]/write[bwl]
              macros. This is the preferred method due to speed but it
              limits the system to just 64MB of PCI memory. This can be 
-             problamatic if using video cards and other memory-heavy devices.
+             problematic if using video cards and other memory-heavy devices.
           
           2) If > 64MB of memory space is required, the IXP4xx can be 
 	     configured to use indirect registers to access PCI This allows 
diff --git a/arch/arm/mach-lh7a40x/Kconfig b/arch/arm/mach-lh7a40x/Kconfig
index 147b019..6f4c6a1 100644
--- a/arch/arm/mach-lh7a40x/Kconfig
+++ b/arch/arm/mach-lh7a40x/Kconfig
@@ -8,7 +8,7 @@
 	help
 	  Say Y here if you are using the Sharp KEV7A400 development
 	  board.  This hardware is discontinued, so I'd be very
-	  suprised if you wanted this option.
+	  surprised if you wanted this option.
 
 config MACH_LPD7A400
 	bool "LPD7A400 Card Engine"
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f225a08..9d2346f 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -323,7 +323,8 @@
 
 	cancel_delayed_work(&irda_config->gpio_expa);
 	PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
-	schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+	schedule_delayed_work(&irda_config->gpio_expa, 0);
 
 	return 0;
 }
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index dbc555d..cbe909b 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -74,7 +74,7 @@
 	.rows		= 8,
 	.cols		= 8,
 	.keymap		= nokia770_keymap,
-	.keymapsize	= ARRAY_SIZE(nokia770_keymap)
+	.keymapsize	= ARRAY_SIZE(nokia770_keymap),
 	.delay		= 4,
 };
 
@@ -191,7 +191,7 @@
 		printk("HP connected\n");
 }
 
-static void codec_delayed_power_down(void *arg)
+static void codec_delayed_power_down(struct work_struct *work)
 {
 	down(&audio_pwr_sem);
 	if (audio_pwr_state == -1)
@@ -200,7 +200,7 @@
 	up(&audio_pwr_sem);
 }
 
-static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
+static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
 
 static void nokia770_audio_pwr_down(void)
 {
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index a611c3b..6dcd10a 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -55,7 +55,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#if	defined(CONFIG_OMAP_RTC) || defined(CONFIG_OMAP_RTC)
+#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
 
 #define	OMAP_RTC_BASE		0xfffb4800
 
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c
index 3b29e59..0cbf1b0 100644
--- a/arch/arm/mach-omap1/leds-osk.c
+++ b/arch/arm/mach-omap1/leds-osk.c
@@ -35,7 +35,7 @@
 
 static u8 tps_leds_change;
 
-static void tps_work(void *unused)
+static void tps_work(struct work_struct *unused)
 {
 	for (;;) {
 		u8	leds;
@@ -61,7 +61,7 @@
 	}
 }
 
-static DECLARE_WORK(work, tps_work, NULL);
+static DECLARE_WORK(work, tps_work);
 
 #ifdef	CONFIG_OMAP_OSK_MISTRAL
 
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 26a95a6..3b1ad1d 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -206,7 +206,8 @@
 
 	cancel_delayed_work(&irda_config->gpio_expa);
 	PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
-	schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+	schedule_delayed_work(&irda_config->gpio_expa, 0);
 
 	return 0;
 }
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c
index 1b39874..12d2fe0 100644
--- a/arch/arm/mach-pxa/akita-ioexp.c
+++ b/arch/arm/mach-pxa/akita-ioexp.c
@@ -36,11 +36,11 @@
 
 static int max7310_write(struct i2c_client *client, int address, int data);
 static struct i2c_client max7310_template;
-static void akita_ioexp_work(void *private_);
+static void akita_ioexp_work(struct work_struct *private_);
 
 static struct device *akita_ioexp_device;
 static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
-DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
+DECLARE_WORK(akita_ioexp, akita_ioexp_work);
 
 
 /*
@@ -158,7 +158,7 @@
 EXPORT_SYMBOL(akita_set_ioexp);
 EXPORT_SYMBOL(akita_reset_ioexp);
 
-static void akita_ioexp_work(void *private_)
+static void akita_ioexp_work(struct work_struct *private_)
 {
 	if (akita_ioexp_device)
 		max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index 08b2f30..9f46bf3 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -98,7 +98,7 @@
 config MACH_S3C2413
 	bool
 	help
-	  Internal node for S3C2413 verison of SMDK2413, so that
+	  Internal node for S3C2413 version of SMDK2413, so that
 	  machine_is_s3c2413() will work when MACH_SMDK2413 is
 	  selected
 
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index 3d211dc..01abb0a 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -40,7 +40,7 @@
 
 /* io map for dma */
 static void __iomem *dma_base;
-static kmem_cache_t *dma_kmem;
+static struct kmem_cache *dma_kmem;
 
 struct s3c24xx_dma_selection dma_sel;
 
@@ -1271,7 +1271,7 @@
 
 /* kmem cache implementation */
 
-static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
+static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f)
 {
 	memset(p, 0, sizeof(struct s3c2410_dma_buf));
 }
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 125cb3f..aade2f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -197,7 +197,7 @@
 	select CPU_CP15_MPU
 	help
 	  ARM940T is a member of the ARM9TDMI family of general-
-	  purpose microprocessors with MPU and seperate 4KB
+	  purpose microprocessors with MPU and separate 4KB
 	  instruction and 4KB data cases, each with a 4-word line
 	  length.
 
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5e658a8..9fd6d2e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -230,7 +230,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	/*
diff --git a/arch/arm26/kernel/ecard.c b/arch/arm26/kernel/ecard.c
index 047d0a4..43dd41b 100644
--- a/arch/arm26/kernel/ecard.c
+++ b/arch/arm26/kernel/ecard.c
@@ -620,12 +620,10 @@
 	struct ex_ecid cid;
 	int i, rc = -ENOMEM;
 
-	ec = kmalloc(sizeof(ecard_t), GFP_KERNEL);
+	ec = kzalloc(sizeof(ecard_t), GFP_KERNEL);
 	if (!ec)
 		goto nomem;
 
-	memset(ec, 0, sizeof(ecard_t));
-
 	ec->slot_no	= slot;
 	ec->type        = type;
 	ec->irq		= NO_IRQ;
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c
index a1f6d8a..93c0cee 100644
--- a/arch/arm26/mm/fault.c
+++ b/arch/arm26/mm/fault.c
@@ -215,7 +215,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c
index 34def63..f290158 100644
--- a/arch/arm26/mm/memc.c
+++ b/arch/arm26/mm/memc.c
@@ -24,7 +24,7 @@
 
 #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
 
-kmem_cache_t *pte_cache, *pgd_cache;
+struct kmem_cache *pte_cache, *pgd_cache;
 int page_nr;
 
 /*
@@ -162,12 +162,12 @@
 {
 }
 
-static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
+static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
 {
 	memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
 }
 
-static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
+static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
 {
 	memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
 }
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
index ca41fc1..d0abbca 100644
--- a/arch/avr32/kernel/kprobes.c
+++ b/arch/avr32/kernel/kprobes.c
@@ -154,6 +154,7 @@
 	return 1;
 
 no_kprobe:
+	preempt_enable_no_resched();
 	return ret;
 }
 
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index 3309665..0ec14854 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -15,7 +15,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 #include <asm/ucontext.h>
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 44ab8a7..b68d669 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -11,7 +11,7 @@
 #include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 
-void dma_cache_sync(void *vaddr, size_t size, int direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
 {
 	/*
 	 * No need to sync an uncached area
diff --git a/arch/cris/arch-v10/Kconfig b/arch/cris/arch-v10/Kconfig
index 44eb1b9..c7ea9ef 100644
--- a/arch/cris/arch-v10/Kconfig
+++ b/arch/cris/arch-v10/Kconfig
@@ -323,7 +323,7 @@
 	depends on ETRAX_ARCH_V10
 	default "95a6"
 	help
-	  Waitstates for SRAM, Flash and peripherials (not DRAM).  95f8 is a
+	  Waitstates for SRAM, Flash and peripherals (not DRAM).  95f8 is a
 	  good choice for most Axis products...
 
 config ETRAX_DEF_R_BUS_CONFIG
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index 734d5f3..e7e724bc 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -839,7 +839,7 @@
 	default "0"
 	help
 	  This controls the initial value of the trickle charge register.
-	  0 = disabled (use this if you are unsure or have a non rechargable battery)
+	  0 = disabled (use this if you are unsure or have a non rechargeable battery)
 	  Otherwise the following values can be OR:ed together to control the
 	  charge current:
 	  1 = 2kohm, 2 = 4kohm, 3 = 4kohm
diff --git a/arch/cris/arch-v10/drivers/eeprom.c b/arch/cris/arch-v10/drivers/eeprom.c
index 6e1f191..284ebfd 100644
--- a/arch/cris/arch-v10/drivers/eeprom.c
+++ b/arch/cris/arch-v10/drivers/eeprom.c
@@ -1,7 +1,7 @@
 /*!*****************************************************************************
 *!
-*!  Implements an interface for i2c compatible eeproms to run under linux.
-*!  Supports 2k, 8k(?) and 16k. Uses adaptive timing adjustents by
+*!  Implements an interface for i2c compatible eeproms to run under Linux.
+*!  Supports 2k, 8k(?) and 16k. Uses adaptive timing adjustments by
 *!  Johan.Adolfsson@axis.com
 *!
 *!  Probing results:
@@ -51,7 +51,7 @@
 *!  Revision 1.8  2001/06/15 13:24:29  jonashg
 *!  * Added verification of pointers from userspace in read and write.
 *!  * Made busy counter volatile.
-*!  * Added define for inital write delay.
+*!  * Added define for initial write delay.
 *!  * Removed warnings by using loff_t instead of unsigned long.
 *!
 *!  Revision 1.7  2001/06/14 15:26:54  jonashg
diff --git a/arch/cris/arch-v10/drivers/i2c.c b/arch/cris/arch-v10/drivers/i2c.c
index 6114596..092c724 100644
--- a/arch/cris/arch-v10/drivers/i2c.c
+++ b/arch/cris/arch-v10/drivers/i2c.c
@@ -47,7 +47,7 @@
 *! Update Port B register and shadow even when running with hardware support
 *!   to avoid glitches when reading bits
 *! Never set direction to out in i2c_inbyte
-*! Removed incorrect clock togling at end of i2c_inbyte
+*! Removed incorrect clock toggling at end of i2c_inbyte
 *!
 *! Revision 1.8  2002/08/13 06:31:53  starvik
 *! Made SDA and SCL line configurable
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index 34528da..07628a1 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -33,7 +33,7 @@
 *!
 *! Revision 1.2  2002/11/19 14:35:24  starvik
 *! Changes from linux 2.4
-*! Changed struct initializer syntax to the currently prefered notation
+*! Changed struct initializer syntax to the currently preferred notation
 *!
 *! Revision 1.1  2001/12/17 13:59:27  bjornw
 *! Initial revision
diff --git a/arch/cris/arch-v10/lib/old_checksum.c b/arch/cris/arch-v10/lib/old_checksum.c
index 22a6f0a..497634a 100644
--- a/arch/cris/arch-v10/lib/old_checksum.c
+++ b/arch/cris/arch-v10/lib/old_checksum.c
@@ -47,39 +47,41 @@
 
 #include <asm/delay.h>
 
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *p, int len, __wsum __sum)
 {
-  /*
-   * Experiments with ethernet and slip connections show that buff
-   * is aligned on either a 2-byte or 4-byte boundary.
-   */
-  const unsigned char *endMarker = buff + len;
-  const unsigned char *marker = endMarker - (len % 16);
+	u32 sum = (__force u32)__sum;
+	const u16 *buff = p;
+	/*
+	* Experiments with ethernet and slip connections show that buff
+	* is aligned on either a 2-byte or 4-byte boundary.
+	*/
+	const void *endMarker = p + len;
+	const void *marker = endMarker - (len % 16);
 #if 0
-  if((int)buff & 0x3)
-    printk("unaligned buff %p\n", buff);
-  __delay(900); /* extra delay of 90 us to test performance hit */
+	if((int)buff & 0x3)
+		printk("unaligned buff %p\n", buff);
+	__delay(900); /* extra delay of 90 us to test performance hit */
 #endif
-  BITON;
-  while (buff < marker) {
-    sum += *((unsigned short *)buff)++;
-    sum += *((unsigned short *)buff)++;
-    sum += *((unsigned short *)buff)++;
-    sum += *((unsigned short *)buff)++;
-    sum += *((unsigned short *)buff)++;
-    sum += *((unsigned short *)buff)++;
-    sum += *((unsigned short *)buff)++;
-    sum += *((unsigned short *)buff)++;
-  }
-  marker = endMarker - (len % 2);
-  while(buff < marker) {
-    sum += *((unsigned short *)buff)++;
-  }
-  if(endMarker - buff > 0) {
-    sum += *buff;                 /* add extra byte seperately */
-  }
-  BITOFF;
-  return(sum);
+	BITON;
+	while (buff < marker) {
+		sum += *buff++;
+		sum += *buff++;
+		sum += *buff++;
+		sum += *buff++;
+		sum += *buff++;
+		sum += *buff++;
+		sum += *buff++;
+		sum += *buff++;
+	}
+	marker = endMarker - (len % 2);
+	while (buff < marker)
+		sum += *buff++;
+
+	if (endMarker > buff)
+		sum += *(const u8 *)buff;	/* add extra byte seperately */
+
+	BITOFF;
+	return (__force __wsum)sum;
 }
 
 EXPORT_SYMBOL(csum_partial);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index a33097f..f64624f 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -88,7 +88,7 @@
 	help
 	  Enables the DMA7 input channel for ser0 (ttyS0).
 	  If you do not enable DMA, an interrupt for each character will be
-	  used when receiveing data.
+	  used when receiving data.
 	  Normally you want to use DMA, unless you use the DMA channel for
 	  something else.
 
@@ -157,7 +157,7 @@
 	help
 	  Enables the DMA5 input channel for ser1 (ttyS1).
 	  If you do not enable DMA, an interrupt for each character will be
-	  used when receiveing data.
+	  used when receiving data.
 	  Normally you want this on, unless you use the DMA channel for
 	  something else.
 
@@ -228,7 +228,7 @@
 	help
 	  Enables the DMA3 input channel for ser2 (ttyS2).
 	  If you do not enable DMA, an interrupt for each character will be
-	  used when receiveing data.
+	  used when receiving data.
 	  Normally you want to use DMA, unless you use the DMA channel for
 	  something else.
 
@@ -297,7 +297,7 @@
 	help
 	  Enables the DMA9 input channel for ser3 (ttyS3).
 	  If you do not enable DMA, an interrupt for each character will be
-	  used when receiveing data.
+	  used when receiving data.
 	  Normally you want to use DMA, unless you use the DMA channel for
 	  something else.
 
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 934c510..c73e91f 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -232,7 +232,7 @@
 	 * context, we must not take the fault..
 	 */
 
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index eae874a..14f64b0 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -10,9 +10,9 @@
  */
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/futex.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 
 /*
  * the various futex operations; MMU fault checking is ignored under no-MMU
@@ -200,7 +200,7 @@
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -223,7 +223,7 @@
 		break;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index a8c61da..1a5eb6c 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -947,7 +947,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
 			reserve_bootmem(INITRD_START, INITRD_SIZE);
-			initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start + INITRD_SIZE;
 		}
 		else {
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index b8a5882..85baeae 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -21,7 +21,7 @@
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
diff --git a/arch/frv/lib/checksum.c b/arch/frv/lib/checksum.c
index 20e7dfc..44e16d5 100644
--- a/arch/frv/lib/checksum.c
+++ b/arch/frv/lib/checksum.c
@@ -32,7 +32,6 @@
    of the assembly has to go. */
 
 #include <net/checksum.h>
-#include <asm/checksum.h>
 #include <linux/module.h>
 
 static inline unsigned short from32to16(unsigned long x)
@@ -105,15 +104,15 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned int result = do_csum(buff, len);
 
 	/* add in old sum, and carry.. */
-	result += sum;
-	if (sum > result)
+	result += (__force u32)sum;
+	if ((__force u32)sum > result)
 		result += 1;
-	return result;
+	return (__force __wsum)result;
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -122,9 +121,9 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
 {
-	return ~do_csum(buff, len);
+	return (__force __sum16)~do_csum(buff, len);
 }
 
 EXPORT_SYMBOL(ip_compute_csum);
@@ -132,9 +131,9 @@
 /*
  * copy from fs while checksumming, otherwise like csum_partial
  */
-unsigned int
-csum_partial_copy_from_user(const char __user *src, char *dst,
-			    int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+			    int len, __wsum sum, int *csum_err)
 {
 	int rem;
 
@@ -157,11 +156,11 @@
 /*
  * copy from ds while checksumming, otherwise like csum_partial
  */
-unsigned int
-csum_partial_copy(const char *src, char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
 	memcpy(dst, src, len);
 	return csum_partial(dst, len, sum);
 }
 
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 8b3eb50..3f12296 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -78,7 +78,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index f76dd03..19b13be 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -18,7 +18,7 @@
 #include <asm/cacheflush.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
-kmem_cache_t *pgd_cache;
+struct kmem_cache *pgd_cache;
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
@@ -100,7 +100,7 @@
 		set_page_private(next, (unsigned long) pprev);
 }
 
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags;
 
@@ -120,7 +120,7 @@
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags; /* can be called from interrupt context */
 
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index 9b4be05..d1b1526 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -39,7 +39,7 @@
 EXPORT_SYMBOL(disable_irq);
 
 /* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
 
 /* The following are special because they're not called
    explicitly (the C compiler generates them).  Fortunately,
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 1077b71..6adf8f4 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -116,7 +116,7 @@
 #endif
 #else
 	if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) && 
-	    (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS)
+	    (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS))
 	    /* overlap userarea */
 	    memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
 #endif
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 7787f70..0295560 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -38,7 +38,7 @@
 #include <linux/personality.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <asm/setup.h>
 #include <asm/uaccess.h>
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 756325d..f05288b 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -70,6 +70,7 @@
 #endif
         .text :
 	{
+	_text = .;
 #if defined(CONFIG_ROMKERNEL)
 	*(.int_redirect)
 #endif
diff --git a/arch/h8300/lib/checksum.c b/arch/h8300/lib/checksum.c
index 5aa688d..bdc5b03 100644
--- a/arch/h8300/lib/checksum.c
+++ b/arch/h8300/lib/checksum.c
@@ -96,9 +96,9 @@
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
-	return ~do_csum(iph,ihl*4);
+	return (__force __sum16)~do_csum(iph,ihl*4);
 }
 
 /*
@@ -113,15 +113,19 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+/*
+ * Egads...  That thing apparently assumes that *all* checksums it ever sees will
+ * be folded.  Very likely a bug.
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned int result = do_csum(buff, len);
 
 	/* add in old sum, and carry.. */
-	result += sum;
+	result += (__force u32)sum;
 	/* 16+c bits -> 16 bits */
 	result = (result & 0xffff) + (result >> 16);
-	return result;
+	return (__force __wsum)result;
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -130,20 +134,21 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
 {
-	return ~do_csum(buff,len);
+	return (__force __sum16)~do_csum(buff,len);
 }
 
 /*
  * copy from fs while checksumming, otherwise like csum_partial
  */
 
-unsigned int
-csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+			    __wsum sum, int *csum_err)
 {
 	if (csum_err) *csum_err = 0;
-	memcpy(dst, src, len);
+	memcpy(dst, (__force const void *)src, len);
 	return csum_partial(dst, len, sum);
 }
 
@@ -151,8 +156,8 @@
  * copy from ds while checksumming, otherwise like csum_partial
  */
 
-unsigned int
-csum_partial_copy(const char *src, char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
 	memcpy(dst, src, len);
 	return csum_partial(dst, len, sum);
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8ff1c6f..ea70359 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -182,6 +182,17 @@
 
 endchoice
 
+config PARAVIRT
+	bool "Paravirtualization support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  Paravirtualization is a way of running multiple instances of
+	  Linux on the same machine, under a hypervisor.  This option
+	  changes the kernel so it can modify itself when it is run
+	  under a hypervisor, improving performance significantly.
+	  However, when run without a hypervisor the kernel is
+	  theoretically slower.  If in doubt, say N.
+
 config ACPI_SRAT
 	bool
 	default y
@@ -443,7 +454,8 @@
 
 choice
 	prompt "High Memory Support"
-	default NOHIGHMEM
+	default HIGHMEM4G if !X86_NUMAQ
+	default HIGHMEM64G if X86_NUMAQ
 
 config NOHIGHMEM
 	bool "off"
@@ -710,20 +722,6 @@
 	depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
 	default y
 
-config REGPARM
-	bool "Use register arguments"
-	default y
-	help
-	Compile the kernel with -mregparm=3. This instructs gcc to use
-	a more efficient function call ABI which passes the first three
-	arguments of a function call via registers, which results in denser
-	and faster code.
-
-	If this option is disabled, then the default ABI of passing
-	arguments via the stack is used.
-
-	If unsure, say Y.
-
 config SECCOMP
 	bool "Enable seccomp to safely compute untrusted bytecode"
 	depends on PROC_FS
@@ -773,23 +771,39 @@
           PHYSICAL_START.
 	  For more details see Documentation/kdump/kdump.txt
 
-config PHYSICAL_START
-	hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
-
-	default "0x1000000" if CRASH_DUMP
-	default "0x100000"
+config RELOCATABLE
+	bool "Build a relocatable kernel(EXPERIMENTAL)"
+	depends on EXPERIMENTAL
 	help
-	  This gives the physical address where the kernel is loaded. Normally
-	  for regular kernels this value is 0x100000 (1MB). But in the case
-	  of kexec on panic the fail safe kernel needs to run at a different
-	  address than the panic-ed kernel. This option is used to set the load
-	  address for kernels used to capture crash dump on being kexec'ed
-	  after panic. The default value for crash dump kernels is
-	  0x1000000 (16MB). This can also be set based on the "X" value as
-	  specified in the "crashkernel=YM@XM" command line boot parameter
-	  passed to the panic-ed kernel. Typically this parameter is set as
-	  crashkernel=64M@16M. Please take a look at
-	  Documentation/kdump/kdump.txt for more details about crash dumps.
+	  This build a kernel image that retains relocation information
+          so it can be loaded someplace besides the default 1MB.
+	  The relocations tend to the kernel binary about 10% larger,
+          but are discarded at runtime.
+
+	  One use is for the kexec on panic case where the recovery kernel
+          must live at a different physical address than the primary
+          kernel.
+
+config PHYSICAL_ALIGN
+	hex "Alignment value to which kernel should be aligned"
+	default "0x100000"
+	range 0x2000 0x400000
+	help
+	  This value puts the alignment restrictions on physical address
+ 	  where kernel is loaded and run from. Kernel is compiled for an
+ 	  address which meets above alignment restriction.
+
+ 	  If bootloader loads the kernel at a non-aligned address and
+ 	  CONFIG_RELOCATABLE is set, kernel will move itself to nearest
+ 	  address aligned to above value and run from there.
+
+ 	  If bootloader loads the kernel at a non-aligned address and
+ 	  CONFIG_RELOCATABLE is not set, kernel will ignore the run time
+ 	  load address and decompress itself to the address it has been
+ 	  compiled for and run from there. The address for which kernel is
+ 	  compiled already meets above alignment restrictions. Hence the
+ 	  end result is that kernel runs from a physical address meeting
+	  above alignment restrictions.
 
 	  Don't change this unless you know what you are doing.
 
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index fc4f2ab..821fd26 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -103,8 +103,15 @@
 	  Select this for Intel Pentium M (not Pentium-4 M)
 	  notebook chips.
 
+config MCORE2
+	bool "Core 2/newer Xeon"
+	help
+	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx)
+	  CPUs. You can distingush newer from older Xeons by the CPU family
+	  in /proc/cpuinfo. Newer ones have 6.
+
 config MPENTIUM4
-	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
+	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
 	help
 	  Select this for Intel Pentium 4 chips.  This includes the
 	  Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
@@ -229,7 +236,7 @@
 	default "7" if MPENTIUM4 || X86_GENERIC
 	default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
 	default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-	default "6" if MK7 || MK8 || MPENTIUMM
+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2
 
 config RWSEM_GENERIC_SPINLOCK
 	bool
@@ -287,17 +294,17 @@
 
 config X86_GOOD_APIC
 	bool
-	depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
+	depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON || MCORE2
 	default y
 
 config X86_INTEL_USERCOPY
 	bool
-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
 	default y
 
 config X86_USE_PPRO_CHECKSUM
 	bool
-	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX
+	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2
 	default y
 
 config X86_USE_3DNOW
@@ -312,5 +319,5 @@
 
 config X86_TSC
 	bool
-	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ
+	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ
 	default y
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index b31c080..f68cc6f 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -85,4 +85,14 @@
           option saves about 4k and might cause you much additional grey
           hair.
 
+config DEBUG_PARAVIRT
+	bool "Enable some paravirtualization debugging"
+	default y
+	depends on PARAVIRT && DEBUG_KERNEL
+	help
+	  Currently deliberately clobbers regs which are allowed to be
+	  clobbered in inlined paravirt hooks, even in native mode.
+	  If turning this off solves a problem, then DISABLE_INTERRUPTS() or
+	  ENABLE_INTERRUPTS() is lying about what registers can be clobbered.
+
 endmenu
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 0677908..f7ac1ae 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -26,10 +26,12 @@
 
 LDFLAGS		:= -m elf_i386
 OBJCOPYFLAGS	:= -O binary -R .note -R .comment -S
-LDFLAGS_vmlinux :=
+ifdef CONFIG_RELOCATABLE
+LDFLAGS_vmlinux := --emit-relocs
+endif
 CHECKFLAGS	+= -D__i386__
 
-CFLAGS += -pipe -msoft-float
+CFLAGS += -pipe -msoft-float -mregparm=3
 
 # prevent gcc from keeping the stack 16 byte aligned
 CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
@@ -37,8 +39,6 @@
 # CPU-specific tuning. Anything which can be shared with UML should go here.
 include $(srctree)/arch/i386/Makefile.cpu
 
-cflags-$(CONFIG_REGPARM) += -mregparm=3
-
 # temporary until string.h is fixed
 cflags-y += -ffreestanding
 
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
index a11befb..a32c031 100644
--- a/arch/i386/Makefile.cpu
+++ b/arch/i386/Makefile.cpu
@@ -32,6 +32,7 @@
 cflags-$(CONFIG_MWINCHIP3D)	+= $(call cc-option,-march=winchip2,-march=i586)
 cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
 cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call cc-option,-mtune=core2,$(call cc-option,-mtune=generic,-mtune=i686))
 
 # AMD Elan support
 cflags-$(CONFIG_X86_ELAN)	+= -march=i486
diff --git a/arch/i386/boot/compressed/Makefile b/arch/i386/boot/compressed/Makefile
index 258ea95..a661217 100644
--- a/arch/i386/boot/compressed/Makefile
+++ b/arch/i386/boot/compressed/Makefile
@@ -4,22 +4,42 @@
 # create a compressed vmlinux image from the original vmlinux
 #
 
-targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
+targets		:= vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o \
+			vmlinux.bin.all vmlinux.relocs
 EXTRA_AFLAGS	:= -traditional
 
-LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup_32
+LDFLAGS_vmlinux := -T
+CFLAGS_misc.o += -fPIC
+hostprogs-y	:= relocs
 
-$(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
+$(obj)/vmlinux: $(src)/vmlinux.lds $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
 	$(call if_changed,ld)
 	@:
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
 
+quiet_cmd_relocs = RELOCS  $@
+      cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
+$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
+	$(call if_changed,relocs)
+
+vmlinux.bin.all-y := $(obj)/vmlinux.bin
+vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
+quiet_cmd_relocbin = BUILD   $@
+      cmd_relocbin = cat $(filter-out FORCE,$^) > $@
+$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
+	$(call if_changed,relocbin)
+
+ifdef CONFIG_RELOCATABLE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
+	$(call if_changed,gzip)
+else
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,gzip)
+endif
 
 LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
 
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
+$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
 	$(call if_changed,ld)
diff --git a/arch/i386/boot/compressed/head.S b/arch/i386/boot/compressed/head.S
index b5893e4..f395a4b 100644
--- a/arch/i386/boot/compressed/head.S
+++ b/arch/i386/boot/compressed/head.S
@@ -26,9 +26,11 @@
 #include <linux/linkage.h>
 #include <asm/segment.h>
 #include <asm/page.h>
+#include <asm/boot.h>
 
+.section ".text.head"
 	.globl startup_32
-	
+
 startup_32:
 	cld
 	cli
@@ -37,93 +39,142 @@
 	movl %eax,%es
 	movl %eax,%fs
 	movl %eax,%gs
+	movl %eax,%ss
 
-	lss stack_start,%esp
-	xorl %eax,%eax
-1:	incl %eax		# check that A20 really IS enabled
-	movl %eax,0x000000	# loop forever if it isn't
-	cmpl %eax,0x100000
-	je 1b
+/* Calculate the delta between where we were compiled to run
+ * at and where we were actually loaded at.  This can only be done
+ * with a short local call on x86.  Nothing  else will tell us what
+ * address we are running at.  The reserved chunk of the real-mode
+ * data at 0x34-0x3f are used as the stack for this calculation.
+ * Only 4 bytes are needed.
+ */
+	leal 0x40(%esi), %esp
+	call 1f
+1:	popl %ebp
+	subl $1b, %ebp
+
+/* %ebp contains the address we are loaded at by the boot loader and %ebx
+ * contains the address where we should move the kernel image temporarily
+ * for safe in-place decompression.
+ */
+
+#ifdef CONFIG_RELOCATABLE
+	movl 	%ebp, %ebx
+	addl    $(CONFIG_PHYSICAL_ALIGN - 1), %ebx
+	andl    $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx
+#else
+	movl $LOAD_PHYSICAL_ADDR, %ebx
+#endif
+
+	/* Replace the compressed data size with the uncompressed size */
+	subl input_len(%ebp), %ebx
+	movl output_len(%ebp), %eax
+	addl %eax, %ebx
+	/* Add 8 bytes for every 32K input block */
+	shrl $12, %eax
+	addl %eax, %ebx
+	/* Add 32K + 18 bytes of extra slack */
+	addl $(32768 + 18), %ebx
+	/* Align on a 4K boundary */
+	addl $4095, %ebx
+	andl $~4095, %ebx
+
+/* Copy the compressed kernel to the end of our buffer
+ * where decompression in place becomes safe.
+ */
+	pushl %esi
+	leal _end(%ebp), %esi
+	leal _end(%ebx), %edi
+	movl $(_end - startup_32), %ecx
+	std
+	rep
+	movsb
+	cld
+	popl %esi
+
+/* Compute the kernel start address.
+ */
+#ifdef CONFIG_RELOCATABLE
+	addl    $(CONFIG_PHYSICAL_ALIGN - 1), %ebp
+	andl    $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebp
+#else
+	movl	$LOAD_PHYSICAL_ADDR, %ebp
+#endif
 
 /*
- * Initialize eflags.  Some BIOS's leave bits like NT set.  This would
- * confuse the debugger if this code is traced.
- * XXX - best to initialize before switching to protected mode.
+ * Jump to the relocated address.
  */
-	pushl $0
-	popfl
+	leal relocated(%ebx), %eax
+	jmp *%eax
+.section ".text"
+relocated:
+
 /*
  * Clear BSS
  */
 	xorl %eax,%eax
-	movl $_edata,%edi
-	movl $_end,%ecx
+	leal _edata(%ebx),%edi
+	leal _end(%ebx), %ecx
 	subl %edi,%ecx
 	cld
 	rep
 	stosb
+
+/*
+ * Setup the stack for the decompressor
+ */
+	leal stack_end(%ebx), %esp
+
 /*
  * Do the decompression, and jump to the new kernel..
  */
-	subl $16,%esp	# place for structure on the stack
-	movl %esp,%eax
+	movl output_len(%ebx), %eax
+	pushl %eax
+	pushl %ebp	# output address
+	movl input_len(%ebx), %eax
+	pushl %eax	# input_len
+	leal input_data(%ebx), %eax
+	pushl %eax	# input_data
+	leal _end(%ebx), %eax
+	pushl %eax	# end of the image as third argument
 	pushl %esi	# real mode pointer as second arg
-	pushl %eax	# address of structure as first arg
 	call decompress_kernel
-	orl  %eax,%eax 
-	jnz  3f
-	popl %esi	# discard address
-	popl %esi	# real mode pointer
-	xorl %ebx,%ebx
-	ljmp $(__BOOT_CS), $__PHYSICAL_START
+	addl $20, %esp
+	popl %ecx
+
+#if CONFIG_RELOCATABLE
+/* Find the address of the relocations.
+ */
+	movl %ebp, %edi
+	addl %ecx, %edi
+
+/* Calculate the delta between where vmlinux was compiled to run
+ * and where it was actually loaded.
+ */
+	movl %ebp, %ebx
+	subl $LOAD_PHYSICAL_ADDR, %ebx
+	jz   2f		/* Nothing to be done if loaded at compiled addr. */
+/*
+ * Process relocations.
+ */
+
+1:	subl $4, %edi
+	movl 0(%edi), %ecx
+	testl %ecx, %ecx
+	jz 2f
+	addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+	jmp 1b
+2:
+#endif
 
 /*
- * We come here, if we were loaded high.
- * We need to move the move-in-place routine down to 0x1000
- * and then start it with the buffer addresses in registers,
- * which we got from the stack.
+ * Jump to the decompressed kernel.
  */
-3:
-	movl $move_routine_start,%esi
-	movl $0x1000,%edi
-	movl $move_routine_end,%ecx
-	subl %esi,%ecx
-	addl $3,%ecx
-	shrl $2,%ecx
-	cld
-	rep
-	movsl
-
-	popl %esi	# discard the address
-	popl %ebx	# real mode pointer
-	popl %esi	# low_buffer_start
-	popl %ecx	# lcount
-	popl %edx	# high_buffer_start
-	popl %eax	# hcount
-	movl $__PHYSICAL_START,%edi
-	cli		# make sure we don't get interrupted
-	ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
-
-/*
- * Routine (template) for moving the decompressed kernel in place,
- * if we were high loaded. This _must_ PIC-code !
- */
-move_routine_start:
-	movl %ecx,%ebp
-	shrl $2,%ecx
-	rep
-	movsl
-	movl %ebp,%ecx
-	andl $3,%ecx
-	rep
-	movsb
-	movl %edx,%esi
-	movl %eax,%ecx	# NOTE: rep movsb won't move if %ecx == 0
-	addl $3,%ecx
-	shrl $2,%ecx
-	rep
-	movsl
-	movl %ebx,%esi	# Restore setup pointer
 	xorl %ebx,%ebx
-	ljmp $(__BOOT_CS), $__PHYSICAL_START
-move_routine_end:
+	jmp *%ebp
+
+.bss
+.balign 4
+stack:
+	.fill 4096, 1, 0
+stack_end:
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index b2ccd54..1ce7017 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -9,11 +9,94 @@
  * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
  */
 
+#undef CONFIG_PARAVIRT
 #include <linux/linkage.h>
 #include <linux/vmalloc.h>
 #include <linux/screen_info.h>
 #include <asm/io.h>
 #include <asm/page.h>
+#include <asm/boot.h>
+
+/* WARNING!!
+ * This code is compiled with -fPIC and it is relocated dynamically
+ * at run time, but no relocation processing is performed.
+ * This means that it is not safe to place pointers in static structures.
+ */
+
+/*
+ * Getting to provable safe in place decompression is hard.
+ * Worst case behaviours need to be analized.
+ * Background information:
+ *
+ * The file layout is:
+ *    magic[2]
+ *    method[1]
+ *    flags[1]
+ *    timestamp[4]
+ *    extraflags[1]
+ *    os[1]
+ *    compressed data blocks[N]
+ *    crc[4] orig_len[4]
+ *
+ * resulting in 18 bytes of non compressed data overhead.
+ *
+ * Files divided into blocks
+ * 1 bit (last block flag)
+ * 2 bits (block type)
+ *
+ * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
+ * The smallest block type encoding is always used.
+ *
+ * stored:
+ *    32 bits length in bytes.
+ *
+ * fixed:
+ *    magic fixed tree.
+ *    symbols.
+ *
+ * dynamic:
+ *    dynamic tree encoding.
+ *    symbols.
+ *
+ *
+ * The buffer for decompression in place is the length of the
+ * uncompressed data, plus a small amount extra to keep the algorithm safe.
+ * The compressed data is placed at the end of the buffer.  The output
+ * pointer is placed at the start of the buffer and the input pointer
+ * is placed where the compressed data starts.  Problems will occur
+ * when the output pointer overruns the input pointer.
+ *
+ * The output pointer can only overrun the input pointer if the input
+ * pointer is moving faster than the output pointer.  A condition only
+ * triggered by data whose compressed form is larger than the uncompressed
+ * form.
+ *
+ * The worst case at the block level is a growth of the compressed data
+ * of 5 bytes per 32767 bytes.
+ *
+ * The worst case internal to a compressed block is very hard to figure.
+ * The worst case can at least be boundined by having one bit that represents
+ * 32764 bytes and then all of the rest of the bytes representing the very
+ * very last byte.
+ *
+ * All of which is enough to compute an amount of extra data that is required
+ * to be safe.  To avoid problems at the block level allocating 5 extra bytes
+ * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
+ * adding an extra 32767 bytes (the worst case uncompressed block size) is
+ * sufficient, to ensure that in the worst case the decompressed data for
+ * block will stop the byte before the compressed data for a block begins.
+ * To avoid problems with the compressed data's meta information an extra 18
+ * bytes are needed.  Leading to the formula:
+ *
+ * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
+ *
+ * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
+ * Adding 32768 instead of 32767 just makes for round numbers.
+ * Adding the decompressor_size is necessary as it musht live after all
+ * of the data as well.  Last I measured the decompressor is about 14K.
+ * 10K of actuall data and 4K of bss.
+ *
+ */
 
 /*
  * gzip declarations
@@ -30,15 +113,20 @@
 typedef unsigned short ush;
 typedef unsigned long  ulg;
 
-#define WSIZE 0x8000		/* Window size must be at least 32k, */
-				/* and a power of two */
+#define WSIZE 0x80000000	/* Window size must be at least 32k,
+				 * and a power of two
+				 * We don't actually have a window just
+				 * a huge output buffer so I report
+				 * a 2G windows size, as that should
+				 * always be larger than our output buffer.
+				 */
 
-static uch *inbuf;	     /* input buffer */
-static uch window[WSIZE];    /* Sliding window buffer */
+static uch *inbuf;	/* input buffer */
+static uch *window;	/* Sliding window buffer, (and final output buffer) */
 
-static unsigned insize = 0;  /* valid bytes in inbuf */
-static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
-static unsigned outcnt = 0;  /* bytes in output buffer */
+static unsigned insize;  /* valid bytes in inbuf */
+static unsigned inptr;   /* index of next byte to be processed in inbuf */
+static unsigned outcnt;  /* bytes in output buffer */
 
 /* gzip flag byte */
 #define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
@@ -89,8 +177,6 @@
 extern int input_len;
 
 static long bytes_out = 0;
-static uch *output_data;
-static unsigned long output_ptr = 0;
 
 static void *malloc(int size);
 static void free(void *where);
@@ -100,24 +186,17 @@
 
 static void putstr(const char *);
 
-extern int end;
-static long free_mem_ptr = (long)&end;
-static long free_mem_end_ptr;
+static unsigned long free_mem_ptr;
+static unsigned long free_mem_end_ptr;
 
-#define INPLACE_MOVE_ROUTINE  0x1000
-#define LOW_BUFFER_START      0x2000
-#define LOW_BUFFER_MAX       0x90000
 #define HEAP_SIZE             0x3000
-static unsigned int low_buffer_end, low_buffer_size;
-static int high_loaded =0;
-static uch *high_buffer_start /* = (uch *)(((ulg)&end) + HEAP_SIZE)*/;
 
 static char *vidmem = (char *)0xb8000;
 static int vidport;
 static int lines, cols;
 
 #ifdef CONFIG_X86_NUMAQ
-static void * xquad_portio = NULL;
+void *xquad_portio;
 #endif
 
 #include "../../../../lib/inflate.c"
@@ -151,7 +230,7 @@
 
 static void gzip_release(void **ptr)
 {
-	free_mem_ptr = (long) *ptr;
+	free_mem_ptr = (unsigned long) *ptr;
 }
  
 static void scroll(void)
@@ -179,7 +258,7 @@
 				y--;
 			}
 		} else {
-			vidmem [ ( x + cols * y ) * 2 ] = c; 
+			vidmem [ ( x + cols * y ) * 2 ] = c;
 			if ( ++x >= cols ) {
 				x = 0;
 				if ( ++y >= lines ) {
@@ -224,58 +303,31 @@
  */
 static int fill_inbuf(void)
 {
-	if (insize != 0) {
-		error("ran out of input data");
-	}
-
-	inbuf = input_data;
-	insize = input_len;
-	inptr = 1;
-	return inbuf[0];
+	error("ran out of input data");
+	return 0;
 }
 
 /* ===========================================================================
  * Write the output window window[0..outcnt-1] and update crc and bytes_out.
  * (Used for the decompressed data only.)
  */
-static void flush_window_low(void)
-{
-    ulg c = crc;         /* temporary variable */
-    unsigned n;
-    uch *in, *out, ch;
-    
-    in = window;
-    out = &output_data[output_ptr]; 
-    for (n = 0; n < outcnt; n++) {
-	    ch = *out++ = *in++;
-	    c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-    }
-    crc = c;
-    bytes_out += (ulg)outcnt;
-    output_ptr += (ulg)outcnt;
-    outcnt = 0;
-}
-
-static void flush_window_high(void)
-{
-    ulg c = crc;         /* temporary variable */
-    unsigned n;
-    uch *in,  ch;
-    in = window;
-    for (n = 0; n < outcnt; n++) {
-	ch = *output_data++ = *in++;
-	if ((ulg)output_data == low_buffer_end) output_data=high_buffer_start;
-	c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-    }
-    crc = c;
-    bytes_out += (ulg)outcnt;
-    outcnt = 0;
-}
-
 static void flush_window(void)
 {
-	if (high_loaded) flush_window_high();
-	else flush_window_low();
+	/* With my window equal to my output buffer
+	 * I only need to compute the crc here.
+	 */
+	ulg c = crc;         /* temporary variable */
+	unsigned n;
+	uch *in, ch;
+
+	in = window;
+	for (n = 0; n < outcnt; n++) {
+		ch = *in++;
+		c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+	}
+	crc = c;
+	bytes_out += (ulg)outcnt;
+	outcnt = 0;
 }
 
 static void error(char *x)
@@ -287,66 +339,8 @@
 	while(1);	/* Halt */
 }
 
-#define STACK_SIZE (4096)
-
-long user_stack [STACK_SIZE];
-
-struct {
-	long * a;
-	short b;
-	} stack_start = { & user_stack [STACK_SIZE] , __BOOT_DS };
-
-static void setup_normal_output_buffer(void)
-{
-#ifdef STANDARD_MEMORY_BIOS_CALL
-	if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
-#else
-	if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
-#endif
-	output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
-	free_mem_end_ptr = (long)real_mode;
-}
-
-struct moveparams {
-	uch *low_buffer_start;  int lcount;
-	uch *high_buffer_start; int hcount;
-};
-
-static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
-{
-	high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
-#ifdef STANDARD_MEMORY_BIOS_CALL
-	if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
-#else
-	if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
-#endif	
-	mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
-	low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
-	  ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
-	low_buffer_size = low_buffer_end - LOW_BUFFER_START;
-	high_loaded = 1;
-	free_mem_end_ptr = (long)high_buffer_start;
-	if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
-		high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
-		mv->hcount = 0; /* say: we need not to move high_buffer */
-	}
-	else mv->hcount = -1;
-	mv->high_buffer_start = high_buffer_start;
-}
-
-static void close_output_buffer_if_we_run_high(struct moveparams *mv)
-{
-	if (bytes_out > low_buffer_size) {
-		mv->lcount = low_buffer_size;
-		if (mv->hcount)
-			mv->hcount = bytes_out - low_buffer_size;
-	} else {
-		mv->lcount = bytes_out;
-		mv->hcount = 0;
-	}
-}
-
-asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
+asmlinkage void decompress_kernel(void *rmode, unsigned long end,
+			uch *input_data, unsigned long input_len, uch *output)
 {
 	real_mode = rmode;
 
@@ -361,13 +355,25 @@
 	lines = RM_SCREEN_INFO.orig_video_lines;
 	cols = RM_SCREEN_INFO.orig_video_cols;
 
-	if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
-	else setup_output_buffer_if_we_run_high(mv);
+	window = output;  	/* Output buffer (Normally at 1M) */
+	free_mem_ptr     = end;	/* Heap  */
+	free_mem_end_ptr = end + HEAP_SIZE;
+	inbuf  = input_data;	/* Input buffer */
+	insize = input_len;
+	inptr  = 0;
+
+	if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
+		error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
+	if (end > ((-__PAGE_OFFSET-(512 <<20)-1) & 0x7fffffff))
+		error("Destination address too large");
+#ifndef CONFIG_RELOCATABLE
+	if ((u32)output != LOAD_PHYSICAL_ADDR)
+		error("Wrong destination address");
+#endif
 
 	makecrc();
 	putstr("Uncompressing Linux... ");
 	gunzip();
 	putstr("Ok, booting the kernel.\n");
-	if (high_loaded) close_output_buffer_if_we_run_high(mv);
-	return high_loaded;
+	return;
 }
diff --git a/arch/i386/boot/compressed/relocs.c b/arch/i386/boot/compressed/relocs.c
new file mode 100644
index 0000000..468da89
--- /dev/null
+++ b/arch/i386/boot/compressed/relocs.c
@@ -0,0 +1,625 @@
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <elf.h>
+#include <byteswap.h>
+#define USE_BSD
+#include <endian.h>
+
+#define MAX_SHDRS 100
+static Elf32_Ehdr ehdr;
+static Elf32_Shdr shdr[MAX_SHDRS];
+static Elf32_Sym  *symtab[MAX_SHDRS];
+static Elf32_Rel  *reltab[MAX_SHDRS];
+static char *strtab[MAX_SHDRS];
+static unsigned long reloc_count, reloc_idx;
+static unsigned long *relocs;
+
+/*
+ * Following symbols have been audited. There values are constant and do
+ * not change if bzImage is loaded at a different physical address than
+ * the address for which it has been compiled. Don't warn user about
+ * absolute relocations present w.r.t these symbols.
+ */
+static const char* safe_abs_relocs[] = {
+		"__kernel_vsyscall",
+		"__kernel_rt_sigreturn",
+		"__kernel_sigreturn",
+		"SYSENTER_RETURN",
+};
+
+static int is_safe_abs_reloc(const char* sym_name)
+{
+	int i, array_size;
+
+	array_size = sizeof(safe_abs_relocs)/sizeof(char*);
+
+	for(i = 0; i < array_size; i++) {
+		if (!strcmp(sym_name, safe_abs_relocs[i]))
+			/* Match found */
+			return 1;
+	}
+	return 0;
+}
+
+static void die(char *fmt, ...)
+{
+	va_list ap;
+	va_start(ap, fmt);
+	vfprintf(stderr, fmt, ap);
+	va_end(ap);
+	exit(1);
+}
+
+static const char *sym_type(unsigned type)
+{
+	static const char *type_name[] = {
+#define SYM_TYPE(X) [X] = #X
+		SYM_TYPE(STT_NOTYPE),
+		SYM_TYPE(STT_OBJECT),
+		SYM_TYPE(STT_FUNC),
+		SYM_TYPE(STT_SECTION),
+		SYM_TYPE(STT_FILE),
+		SYM_TYPE(STT_COMMON),
+		SYM_TYPE(STT_TLS),
+#undef SYM_TYPE
+	};
+	const char *name = "unknown sym type name";
+	if (type < sizeof(type_name)/sizeof(type_name[0])) {
+		name = type_name[type];
+	}
+	return name;
+}
+
+static const char *sym_bind(unsigned bind)
+{
+	static const char *bind_name[] = {
+#define SYM_BIND(X) [X] = #X
+		SYM_BIND(STB_LOCAL),
+		SYM_BIND(STB_GLOBAL),
+		SYM_BIND(STB_WEAK),
+#undef SYM_BIND
+	};
+	const char *name = "unknown sym bind name";
+	if (bind < sizeof(bind_name)/sizeof(bind_name[0])) {
+		name = bind_name[bind];
+	}
+	return name;
+}
+
+static const char *sym_visibility(unsigned visibility)
+{
+	static const char *visibility_name[] = {
+#define SYM_VISIBILITY(X) [X] = #X
+		SYM_VISIBILITY(STV_DEFAULT),
+		SYM_VISIBILITY(STV_INTERNAL),
+		SYM_VISIBILITY(STV_HIDDEN),
+		SYM_VISIBILITY(STV_PROTECTED),
+#undef SYM_VISIBILITY
+	};
+	const char *name = "unknown sym visibility name";
+	if (visibility < sizeof(visibility_name)/sizeof(visibility_name[0])) {
+		name = visibility_name[visibility];
+	}
+	return name;
+}
+
+static const char *rel_type(unsigned type)
+{
+	static const char *type_name[] = {
+#define REL_TYPE(X) [X] = #X
+		REL_TYPE(R_386_NONE),
+		REL_TYPE(R_386_32),
+		REL_TYPE(R_386_PC32),
+		REL_TYPE(R_386_GOT32),
+		REL_TYPE(R_386_PLT32),
+		REL_TYPE(R_386_COPY),
+		REL_TYPE(R_386_GLOB_DAT),
+		REL_TYPE(R_386_JMP_SLOT),
+		REL_TYPE(R_386_RELATIVE),
+		REL_TYPE(R_386_GOTOFF),
+		REL_TYPE(R_386_GOTPC),
+#undef REL_TYPE
+	};
+	const char *name = "unknown type rel type name";
+	if (type < sizeof(type_name)/sizeof(type_name[0])) {
+		name = type_name[type];
+	}
+	return name;
+}
+
+static const char *sec_name(unsigned shndx)
+{
+	const char *sec_strtab;
+	const char *name;
+	sec_strtab = strtab[ehdr.e_shstrndx];
+	name = "<noname>";
+	if (shndx < ehdr.e_shnum) {
+		name = sec_strtab + shdr[shndx].sh_name;
+	}
+	else if (shndx == SHN_ABS) {
+		name = "ABSOLUTE";
+	}
+	else if (shndx == SHN_COMMON) {
+		name = "COMMON";
+	}
+	return name;
+}
+
+static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
+{
+	const char *name;
+	name = "<noname>";
+	if (sym->st_name) {
+		name = sym_strtab + sym->st_name;
+	}
+	else {
+		name = sec_name(shdr[sym->st_shndx].sh_name);
+	}
+	return name;
+}
+
+
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+#define le16_to_cpu(val) bswap_16(val)
+#define le32_to_cpu(val) bswap_32(val)
+#endif
+
+static uint16_t elf16_to_cpu(uint16_t val)
+{
+	return le16_to_cpu(val);
+}
+
+static uint32_t elf32_to_cpu(uint32_t val)
+{
+	return le32_to_cpu(val);
+}
+
+static void read_ehdr(FILE *fp)
+{
+	if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
+		die("Cannot read ELF header: %s\n",
+			strerror(errno));
+	}
+	if (memcmp(ehdr.e_ident, ELFMAG, 4) != 0) {
+		die("No ELF magic\n");
+	}
+	if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
+		die("Not a 32 bit executable\n");
+	}
+	if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
+		die("Not a LSB ELF executable\n");
+	}
+	if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
+		die("Unknown ELF version\n");
+	}
+	/* Convert the fields to native endian */
+	ehdr.e_type      = elf16_to_cpu(ehdr.e_type);
+	ehdr.e_machine   = elf16_to_cpu(ehdr.e_machine);
+	ehdr.e_version   = elf32_to_cpu(ehdr.e_version);
+	ehdr.e_entry     = elf32_to_cpu(ehdr.e_entry);
+	ehdr.e_phoff     = elf32_to_cpu(ehdr.e_phoff);
+	ehdr.e_shoff     = elf32_to_cpu(ehdr.e_shoff);
+	ehdr.e_flags     = elf32_to_cpu(ehdr.e_flags);
+	ehdr.e_ehsize    = elf16_to_cpu(ehdr.e_ehsize);
+	ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize);
+	ehdr.e_phnum     = elf16_to_cpu(ehdr.e_phnum);
+	ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize);
+	ehdr.e_shnum     = elf16_to_cpu(ehdr.e_shnum);
+	ehdr.e_shstrndx  = elf16_to_cpu(ehdr.e_shstrndx);
+
+	if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
+		die("Unsupported ELF header type\n");
+	}
+	if (ehdr.e_machine != EM_386) {
+		die("Not for x86\n");
+	}
+	if (ehdr.e_version != EV_CURRENT) {
+		die("Unknown ELF version\n");
+	}
+	if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) {
+		die("Bad Elf header size\n");
+	}
+	if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) {
+		die("Bad program header entry\n");
+	}
+	if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) {
+		die("Bad section header entry\n");
+	}
+	if (ehdr.e_shstrndx >= ehdr.e_shnum) {
+		die("String table index out of bounds\n");
+	}
+}
+
+static void read_shdrs(FILE *fp)
+{
+	int i;
+	if (ehdr.e_shnum > MAX_SHDRS) {
+		die("%d section headers supported: %d\n",
+			ehdr.e_shnum, MAX_SHDRS);
+	}
+	if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
+		die("Seek to %d failed: %s\n",
+			ehdr.e_shoff, strerror(errno));
+	}
+	if (fread(&shdr, sizeof(shdr[0]), ehdr.e_shnum, fp) != ehdr.e_shnum) {
+		die("Cannot read ELF section headers: %s\n",
+			strerror(errno));
+	}
+	for(i = 0; i < ehdr.e_shnum; i++) {
+		shdr[i].sh_name      = elf32_to_cpu(shdr[i].sh_name);
+		shdr[i].sh_type      = elf32_to_cpu(shdr[i].sh_type);
+		shdr[i].sh_flags     = elf32_to_cpu(shdr[i].sh_flags);
+		shdr[i].sh_addr      = elf32_to_cpu(shdr[i].sh_addr);
+		shdr[i].sh_offset    = elf32_to_cpu(shdr[i].sh_offset);
+		shdr[i].sh_size      = elf32_to_cpu(shdr[i].sh_size);
+		shdr[i].sh_link      = elf32_to_cpu(shdr[i].sh_link);
+		shdr[i].sh_info      = elf32_to_cpu(shdr[i].sh_info);
+		shdr[i].sh_addralign = elf32_to_cpu(shdr[i].sh_addralign);
+		shdr[i].sh_entsize   = elf32_to_cpu(shdr[i].sh_entsize);
+	}
+
+}
+
+static void read_strtabs(FILE *fp)
+{
+	int i;
+	for(i = 0; i < ehdr.e_shnum; i++) {
+		if (shdr[i].sh_type != SHT_STRTAB) {
+			continue;
+		}
+		strtab[i] = malloc(shdr[i].sh_size);
+		if (!strtab[i]) {
+			die("malloc of %d bytes for strtab failed\n",
+				shdr[i].sh_size);
+		}
+		if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+			die("Seek to %d failed: %s\n",
+				shdr[i].sh_offset, strerror(errno));
+		}
+		if (fread(strtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+			die("Cannot read symbol table: %s\n",
+				strerror(errno));
+		}
+	}
+}
+
+static void read_symtabs(FILE *fp)
+{
+	int i,j;
+	for(i = 0; i < ehdr.e_shnum; i++) {
+		if (shdr[i].sh_type != SHT_SYMTAB) {
+			continue;
+		}
+		symtab[i] = malloc(shdr[i].sh_size);
+		if (!symtab[i]) {
+			die("malloc of %d bytes for symtab failed\n",
+				shdr[i].sh_size);
+		}
+		if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+			die("Seek to %d failed: %s\n",
+				shdr[i].sh_offset, strerror(errno));
+		}
+		if (fread(symtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+			die("Cannot read symbol table: %s\n",
+				strerror(errno));
+		}
+		for(j = 0; j < shdr[i].sh_size/sizeof(symtab[i][0]); j++) {
+			symtab[i][j].st_name  = elf32_to_cpu(symtab[i][j].st_name);
+			symtab[i][j].st_value = elf32_to_cpu(symtab[i][j].st_value);
+			symtab[i][j].st_size  = elf32_to_cpu(symtab[i][j].st_size);
+			symtab[i][j].st_shndx = elf16_to_cpu(symtab[i][j].st_shndx);
+		}
+	}
+}
+
+
+static void read_relocs(FILE *fp)
+{
+	int i,j;
+	for(i = 0; i < ehdr.e_shnum; i++) {
+		if (shdr[i].sh_type != SHT_REL) {
+			continue;
+		}
+		reltab[i] = malloc(shdr[i].sh_size);
+		if (!reltab[i]) {
+			die("malloc of %d bytes for relocs failed\n",
+				shdr[i].sh_size);
+		}
+		if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+			die("Seek to %d failed: %s\n",
+				shdr[i].sh_offset, strerror(errno));
+		}
+		if (fread(reltab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+			die("Cannot read symbol table: %s\n",
+				strerror(errno));
+		}
+		for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+			reltab[i][j].r_offset = elf32_to_cpu(reltab[i][j].r_offset);
+			reltab[i][j].r_info   = elf32_to_cpu(reltab[i][j].r_info);
+		}
+	}
+}
+
+
+static void print_absolute_symbols(void)
+{
+	int i;
+	printf("Absolute symbols\n");
+	printf(" Num:    Value Size  Type       Bind        Visibility  Name\n");
+	for(i = 0; i < ehdr.e_shnum; i++) {
+		char *sym_strtab;
+		Elf32_Sym *sh_symtab;
+		int j;
+		if (shdr[i].sh_type != SHT_SYMTAB) {
+			continue;
+		}
+		sh_symtab = symtab[i];
+		sym_strtab = strtab[shdr[i].sh_link];
+		for(j = 0; j < shdr[i].sh_size/sizeof(symtab[0][0]); j++) {
+			Elf32_Sym *sym;
+			const char *name;
+			sym = &symtab[i][j];
+			name = sym_name(sym_strtab, sym);
+			if (sym->st_shndx != SHN_ABS) {
+				continue;
+			}
+			printf("%5d %08x %5d %10s %10s %12s %s\n",
+				j, sym->st_value, sym->st_size,
+				sym_type(ELF32_ST_TYPE(sym->st_info)),
+				sym_bind(ELF32_ST_BIND(sym->st_info)),
+				sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)),
+				name);
+		}
+	}
+	printf("\n");
+}
+
+static void print_absolute_relocs(void)
+{
+	int i, printed = 0;
+
+	for(i = 0; i < ehdr.e_shnum; i++) {
+		char *sym_strtab;
+		Elf32_Sym *sh_symtab;
+		unsigned sec_applies, sec_symtab;
+		int j;
+		if (shdr[i].sh_type != SHT_REL) {
+			continue;
+		}
+		sec_symtab  = shdr[i].sh_link;
+		sec_applies = shdr[i].sh_info;
+		if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
+			continue;
+		}
+		sh_symtab = symtab[sec_symtab];
+		sym_strtab = strtab[shdr[sec_symtab].sh_link];
+		for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+			Elf32_Rel *rel;
+			Elf32_Sym *sym;
+			const char *name;
+			rel = &reltab[i][j];
+			sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+			name = sym_name(sym_strtab, sym);
+			if (sym->st_shndx != SHN_ABS) {
+				continue;
+			}
+
+			/* Absolute symbols are not relocated if bzImage is
+			 * loaded at a non-compiled address. Display a warning
+			 * to user at compile time about the absolute
+			 * relocations present.
+			 *
+			 * User need to audit the code to make sure
+			 * some symbols which should have been section
+			 * relative have not become absolute because of some
+			 * linker optimization or wrong programming usage.
+			 *
+			 * Before warning check if this absolute symbol
+			 * relocation is harmless.
+			 */
+			if (is_safe_abs_reloc(name))
+				continue;
+
+			if (!printed) {
+				printf("WARNING: Absolute relocations"
+					" present\n");
+				printf("Offset     Info     Type     Sym.Value "
+					"Sym.Name\n");
+				printed = 1;
+			}
+
+			printf("%08x %08x %10s %08x  %s\n",
+				rel->r_offset,
+				rel->r_info,
+				rel_type(ELF32_R_TYPE(rel->r_info)),
+				sym->st_value,
+				name);
+		}
+	}
+
+	if (printed)
+		printf("\n");
+}
+
+static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+{
+	int i;
+	/* Walk through the relocations */
+	for(i = 0; i < ehdr.e_shnum; i++) {
+		char *sym_strtab;
+		Elf32_Sym *sh_symtab;
+		unsigned sec_applies, sec_symtab;
+		int j;
+		if (shdr[i].sh_type != SHT_REL) {
+			continue;
+		}
+		sec_symtab  = shdr[i].sh_link;
+		sec_applies = shdr[i].sh_info;
+		if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
+			continue;
+		}
+		sh_symtab = symtab[sec_symtab];
+		sym_strtab = strtab[shdr[sec_symtab].sh_link];
+		for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+			Elf32_Rel *rel;
+			Elf32_Sym *sym;
+			unsigned r_type;
+			rel = &reltab[i][j];
+			sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+			r_type = ELF32_R_TYPE(rel->r_info);
+			/* Don't visit relocations to absolute symbols */
+			if (sym->st_shndx == SHN_ABS) {
+				continue;
+			}
+			if (r_type == R_386_PC32) {
+				/* PC relative relocations don't need to be adjusted */
+			}
+			else if (r_type == R_386_32) {
+				/* Visit relocations that need to be adjusted */
+				visit(rel, sym);
+			}
+			else {
+				die("Unsupported relocation type: %d\n", r_type);
+			}
+		}
+	}
+}
+
+static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+{
+	reloc_count += 1;
+}
+
+static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+{
+	/* Remember the address that needs to be adjusted. */
+	relocs[reloc_idx++] = rel->r_offset;
+}
+
+static int cmp_relocs(const void *va, const void *vb)
+{
+	const unsigned long *a, *b;
+	a = va; b = vb;
+	return (*a == *b)? 0 : (*a > *b)? 1 : -1;
+}
+
+static void emit_relocs(int as_text)
+{
+	int i;
+	/* Count how many relocations I have and allocate space for them. */
+	reloc_count = 0;
+	walk_relocs(count_reloc);
+	relocs = malloc(reloc_count * sizeof(relocs[0]));
+	if (!relocs) {
+		die("malloc of %d entries for relocs failed\n",
+			reloc_count);
+	}
+	/* Collect up the relocations */
+	reloc_idx = 0;
+	walk_relocs(collect_reloc);
+
+	/* Order the relocations for more efficient processing */
+	qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
+
+	/* Print the relocations */
+	if (as_text) {
+		/* Print the relocations in a form suitable that
+		 * gas will like.
+		 */
+		printf(".section \".data.reloc\",\"a\"\n");
+		printf(".balign 4\n");
+		for(i = 0; i < reloc_count; i++) {
+			printf("\t .long 0x%08lx\n", relocs[i]);
+		}
+		printf("\n");
+	}
+	else {
+		unsigned char buf[4];
+		buf[0] = buf[1] = buf[2] = buf[3] = 0;
+		/* Print a stop */
+		printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+		/* Now print each relocation */
+		for(i = 0; i < reloc_count; i++) {
+			buf[0] = (relocs[i] >>  0) & 0xff;
+			buf[1] = (relocs[i] >>  8) & 0xff;
+			buf[2] = (relocs[i] >> 16) & 0xff;
+			buf[3] = (relocs[i] >> 24) & 0xff;
+			printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+		}
+	}
+}
+
+static void usage(void)
+{
+	die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
+}
+
+int main(int argc, char **argv)
+{
+	int show_absolute_syms, show_absolute_relocs;
+	int as_text;
+	const char *fname;
+	FILE *fp;
+	int i;
+
+	show_absolute_syms = 0;
+	show_absolute_relocs = 0;
+	as_text = 0;
+	fname = NULL;
+	for(i = 1; i < argc; i++) {
+		char *arg = argv[i];
+		if (*arg == '-') {
+			if (strcmp(argv[1], "--abs-syms") == 0) {
+				show_absolute_syms = 1;
+				continue;
+			}
+
+			if (strcmp(argv[1], "--abs-relocs") == 0) {
+				show_absolute_relocs = 1;
+				continue;
+			}
+			else if (strcmp(argv[1], "--text") == 0) {
+				as_text = 1;
+				continue;
+			}
+		}
+		else if (!fname) {
+			fname = arg;
+			continue;
+		}
+		usage();
+	}
+	if (!fname) {
+		usage();
+	}
+	fp = fopen(fname, "r");
+	if (!fp) {
+		die("Cannot open %s: %s\n",
+			fname, strerror(errno));
+	}
+	read_ehdr(fp);
+	read_shdrs(fp);
+	read_strtabs(fp);
+	read_symtabs(fp);
+	read_relocs(fp);
+	if (show_absolute_syms) {
+		print_absolute_symbols();
+		return 0;
+	}
+	if (show_absolute_relocs) {
+		print_absolute_relocs();
+		return 0;
+	}
+	emit_relocs(as_text);
+	return 0;
+}
diff --git a/arch/i386/boot/compressed/vmlinux.lds b/arch/i386/boot/compressed/vmlinux.lds
new file mode 100644
index 0000000..cc4854f
--- /dev/null
+++ b/arch/i386/boot/compressed/vmlinux.lds
@@ -0,0 +1,43 @@
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(startup_32)
+SECTIONS
+{
+        /* Be careful parts of head.S assume startup_32 is at
+         * address 0.
+	 */
+	. =  0 	;
+	.text.head : {
+		_head = . ;
+		*(.text.head)
+		_ehead = . ;
+	}
+	.data.compressed : {
+		*(.data.compressed)
+	}
+	.text :	{
+		_text = .; 	/* Text */
+		*(.text)
+		*(.text.*)
+		_etext = . ;
+	}
+	.rodata : {
+		_rodata = . ;
+		*(.rodata)	 /* read-only data */
+		*(.rodata.*)
+		_erodata = . ;
+	}
+	.data :	{
+		_data = . ;
+		*(.data)
+		*(.data.*)
+		_edata = . ;
+	}
+	.bss : {
+		_bss = . ;
+		*(.bss)
+		*(.bss.*)
+		*(COMMON)
+		_end = . ;
+	}
+}
diff --git a/arch/i386/boot/compressed/vmlinux.scr b/arch/i386/boot/compressed/vmlinux.scr
index 1ed9d79..707a88f 100644
--- a/arch/i386/boot/compressed/vmlinux.scr
+++ b/arch/i386/boot/compressed/vmlinux.scr
@@ -1,9 +1,10 @@
 SECTIONS
 {
-  .data : { 
+  .data.compressed : {
 	input_len = .;
 	LONG(input_data_end - input_data) input_data = .; 
 	*(.data) 
+	output_len = . - 4;
 	input_data_end = .; 
 	}
 }
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S
index 3aec4538..06edf1c 100644
--- a/arch/i386/boot/setup.S
+++ b/arch/i386/boot/setup.S
@@ -81,7 +81,7 @@
 # This is the setup header, and it must start at %cs:2 (old 0x9020:2)
 
 		.ascii	"HdrS"		# header signature
-		.word	0x0204		# header version number (>= 0x0105)
+		.word	0x0205		# header version number (>= 0x0105)
 					# or else old loadlin-1.5 will fail)
 realmode_swtch:	.word	0, 0		# default_switch, SETUPSEG
 start_sys_seg:	.word	SYSSEG
@@ -160,6 +160,17 @@
 					# The highest safe address for
 					# the contents of an initrd
 
+kernel_alignment:  .long CONFIG_PHYSICAL_ALIGN 	#physical addr alignment
+						#required for protected mode
+						#kernel
+#ifdef CONFIG_RELOCATABLE
+relocatable_kernel:    .byte 1
+#else
+relocatable_kernel:    .byte 0
+#endif
+pad2:			.byte 0
+pad3:			.word 0
+
 trampoline:	call	start_of_setup
 		.align 16
 					# The offset at this point is 0x240
@@ -588,11 +599,6 @@
 	call	default_switch
 
 rmodeswtch_end:
-# we get the code32 start address and modify the below 'jmpi'
-# (loader may have changed it)
-	movl	%cs:code32_start, %eax
-	movl	%eax, %cs:code32
-
 # Now we move the system to its rightful place ... but we check if we have a
 # big-kernel. In that case we *must* not move it ...
 	testb	$LOADED_HIGH, %cs:loadflags
@@ -788,11 +794,12 @@
 a20_done:
 
 #endif /* CONFIG_X86_VOYAGER */
-# set up gdt and idt
+# set up gdt and idt and 32bit start address
 	lidt	idt_48				# load idt with 0,0
 	xorl	%eax, %eax			# Compute gdt_base
 	movw	%ds, %ax			# (Convert %ds:gdt to a linear ptr)
 	shll	$4, %eax
+	addl	%eax, code32
 	addl	$gdt, %eax
 	movl	%eax, (gdt_48+2)
 	lgdt	gdt_48				# load gdt with whatever is
@@ -851,9 +858,26 @@
 #	Manual, Mixing 16-bit and 32-bit code, page 16-6)
 
 	.byte 0x66, 0xea			# prefix + jmpi-opcode
-code32:	.long	0x1000				# will be set to 0x100000
-						# for big kernels
+code32:	.long	startup_32			# will be set to %cs+startup_32
 	.word	__BOOT_CS
+.code32
+startup_32:
+	movl $(__BOOT_DS), %eax
+	movl %eax, %ds
+	movl %eax, %es
+	movl %eax, %fs
+	movl %eax, %gs
+	movl %eax, %ss
+
+	xorl %eax, %eax
+1:	incl %eax				# check that A20 really IS enabled
+	movl %eax, 0x00000000			# loop forever if it isn't
+	cmpl %eax, 0x00100000
+	je 1b
+
+	# Jump to the 32bit entry point
+	jmpl *(code32_start - start + (DELTA_INITSEG << 4))(%esi)
+.code16
 
 # Here's a bunch of information about your current kernel..
 kernel_version:	.ascii	UTS_RELEASE
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 97aacd6..65891f1 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc2-git4
-# Sat Oct 21 03:38:56 2006
+# Linux kernel version: 2.6.19-git7
+# Wed Dec  6 23:50:49 2006
 #
 CONFIG_X86_32=y
 CONFIG_GENERIC_TIME=y
@@ -40,13 +40,14 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 # CONFIG_CPUSETS is not set
+CONFIG_SYSFS_DEPRECATED=y
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
 CONFIG_UID16=y
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -110,6 +111,7 @@
 # CONFIG_X86_VISWS is not set
 CONFIG_X86_GENERICARCH=y
 # CONFIG_X86_ES7000 is not set
+# CONFIG_PARAVIRT is not set
 CONFIG_X86_CYCLONE_TIMER=y
 # CONFIG_M386 is not set
 # CONFIG_M486 is not set
@@ -120,6 +122,7 @@
 # CONFIG_MPENTIUMII is not set
 CONFIG_MPENTIUMIII=y
 # CONFIG_MPENTIUMM is not set
+# CONFIG_MCORE2 is not set
 # CONFIG_MPENTIUM4 is not set
 # CONFIG_MK6 is not set
 # CONFIG_MK7 is not set
@@ -197,7 +200,6 @@
 CONFIG_MTRR=y
 # CONFIG_EFI is not set
 # CONFIG_IRQBALANCE is not set
-CONFIG_REGPARM=y
 CONFIG_SECCOMP=y
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
@@ -205,7 +207,8 @@
 CONFIG_HZ=250
 # CONFIG_KEXEC is not set
 # CONFIG_CRASH_DUMP is not set
-CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
 # CONFIG_HOTPLUG_CPU is not set
 CONFIG_COMPAT_VDSO=y
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
@@ -367,6 +370,7 @@
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 CONFIG_IPV6=y
 # CONFIG_IPV6_PRIVACY is not set
 # CONFIG_IPV6_ROUTER_PREF is not set
@@ -677,6 +681,7 @@
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_JMICRON is not set
 # CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
 # CONFIG_PATA_OLDPIIX is not set
 # CONFIG_PATA_NETCELL is not set
@@ -850,6 +855,7 @@
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -984,10 +990,6 @@
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
 # CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 CONFIG_AGP=y
 # CONFIG_AGP_ALI is not set
 # CONFIG_AGP_ATI is not set
@@ -1108,6 +1110,7 @@
 # CONFIG_USB_BANDWIDTH is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_MULTITHREAD_PROBE is not set
 # CONFIG_USB_OTG is not set
 
 #
@@ -1185,6 +1188,7 @@
 # CONFIG_USB_KAWETH is not set
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET_MII is not set
 # CONFIG_USB_USBNET is not set
 CONFIG_USB_MON=y
 
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 1a884b6..1e8988e 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -6,7 +6,7 @@
 
 obj-y	:= process.o signal.o entry.o traps.o irq.o \
 		ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
-		pci-dma.o i386_ksyms.o i387.o bootflag.o \
+		pci-dma.o i386_ksyms.o i387.o bootflag.o e820.o\
 		quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
 
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
@@ -40,6 +40,9 @@
 obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
 obj-$(CONFIG_K8_NB)		+= k8.o
 
+# Make sure this is linked after any other paravirt_ops structs: see head.S
+obj-$(CONFIG_PARAVIRT)		+= paravirt.o
+
 EXTRA_AFLAGS   := -traditional
 
 obj-$(CONFIG_SCx200)		+= scx200.o
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index d12fb97..c8f96cf 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -333,7 +333,7 @@
 /*
  * Parse Interrupt Source Override for the ACPI SCI
  */
-static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger)
+static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
 {
 	if (trigger == 0)	/* compatible SCI trigger is level */
 		trigger = 3;
@@ -353,13 +353,13 @@
 	 * If GSI is < 16, this will update its flags,
 	 * else it will create a new mp_irqs[] entry.
 	 */
-	mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+	mp_override_legacy_irq(gsi, polarity, trigger, gsi);
 
 	/*
 	 * stash over-ride to indicate we've been here
 	 * and for later update of acpi_fadt
 	 */
-	acpi_sci_override_gsi = bus_irq;
+	acpi_sci_override_gsi = gsi;
 	return;
 }
 
@@ -377,7 +377,7 @@
 	acpi_table_print_madt_entry(header);
 
 	if (intsrc->bus_irq == acpi_fadt.sci_int) {
-		acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq,
+		acpi_sci_ioapic_setup(intsrc->global_irq,
 				      intsrc->flags.polarity,
 				      intsrc->flags.trigger);
 		return 0;
@@ -880,7 +880,7 @@
 	 * pretend we got one so we can set the SCI flags.
 	 */
 	if (!acpi_sci_override_gsi)
-		acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0);
+		acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
 
 	/* Fill in identity legacy mapings where no override */
 	mp_config_acpi_legacy_irqs();
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 20563e5..12e937c 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/cpu.h>
+#include <linux/sched.h>
 
 #include <acpi/processor.h>
 #include <asm/acpi.h>
@@ -155,10 +156,8 @@
 
 static void __exit ffh_cstate_exit(void)
 {
-	if (cpu_cstate_entry) {
-		free_percpu(cpu_cstate_entry);
-		cpu_cstate_entry = NULL;
-	}
+	free_percpu(cpu_cstate_entry);
+	cpu_cstate_entry = NULL;
 }
 
 arch_initcall(ffh_cstate_init);
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index c984169..4b60af7 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -10,6 +10,7 @@
 #include <asm/pci-direct.h>
 #include <asm/acpi.h>
 #include <asm/apic.h>
+#include <asm/irq.h>
 
 #ifdef CONFIG_ACPI
 
@@ -49,6 +50,24 @@
 	return 0;
 }
 
+static void check_intel(void)
+{
+	u16 vendor, device;
+
+	vendor = read_pci_config_16(0, 0, 0, PCI_VENDOR_ID);
+
+	if (vendor != PCI_VENDOR_ID_INTEL)
+		return;
+
+	device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
+#ifdef CONFIG_SMP
+	if (device == PCI_DEVICE_ID_INTEL_E7320_MCH ||
+	    device == PCI_DEVICE_ID_INTEL_E7520_MCH ||
+	    device == PCI_DEVICE_ID_INTEL_E7525_MCH)
+		quirk_intel_irqbalance();
+#endif
+}
+
 void __init check_acpi_pci(void)
 {
 	int num, slot, func;
@@ -60,6 +79,8 @@
 	if (!early_pci_allowed())
 		return;
 
+	check_intel();
+
 	/* Poor man's PCI discovery */
 	for (num = 0; num < 32; num++) {
 		for (slot = 0; slot < 32; slot++) {
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 583c238..9eca21b 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -1,4 +1,5 @@
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <asm/alternative.h>
@@ -123,6 +124,20 @@
 
 #endif /* CONFIG_X86_64 */
 
+static void nop_out(void *insns, unsigned int len)
+{
+	unsigned char **noptable = find_nop_table();
+
+	while (len > 0) {
+		unsigned int noplen = len;
+		if (noplen > ASM_NOP_MAX)
+			noplen = ASM_NOP_MAX;
+		memcpy(insns, noptable[noplen], noplen);
+		insns += noplen;
+		len -= noplen;
+	}
+}
+
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
 extern u8 *__smp_locks[], *__smp_locks_end[];
@@ -137,10 +152,9 @@
 
 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
 {
-	unsigned char **noptable = find_nop_table();
 	struct alt_instr *a;
 	u8 *instr;
-	int diff, i, k;
+	int diff;
 
 	DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
 	for (a = start; a < end; a++) {
@@ -158,13 +172,7 @@
 #endif
 		memcpy(instr, a->replacement, a->replacementlen);
 		diff = a->instrlen - a->replacementlen;
-		/* Pad the rest with nops */
-		for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
-			k = diff;
-			if (k > ASM_NOP_MAX)
-				k = ASM_NOP_MAX;
-			memcpy(a->instr + i, noptable[k], k);
-		}
+		nop_out(instr + a->replacementlen, diff);
 	}
 }
 
@@ -208,7 +216,6 @@
 
 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
 {
-	unsigned char **noptable = find_nop_table();
 	u8 **ptr;
 
 	for (ptr = start; ptr < end; ptr++) {
@@ -216,7 +223,7 @@
 			continue;
 		if (*ptr > text_end)
 			continue;
-		**ptr = noptable[1][0];
+		nop_out(*ptr, 1);
 	};
 }
 
@@ -342,6 +349,40 @@
 
 #endif
 
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{
+	struct paravirt_patch *p;
+
+	for (p = start; p < end; p++) {
+		unsigned int used;
+
+		used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
+					  p->len);
+#ifdef CONFIG_DEBUG_PARAVIRT
+		{
+		int i;
+		/* Deliberately clobber regs using "not %reg" to find bugs. */
+		for (i = 0; i < 3; i++) {
+			if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
+				memcpy(p->instr + used, "\xf7\xd0", 2);
+				p->instr[used+1] |= i;
+				used += 2;
+			}
+		}
+		}
+#endif
+		/* Pad the rest with nops */
+		nop_out(p->instr + used, p->len - used);
+	}
+
+	/* Sync to be conservative, in case we patched following instructions */
+	sync_core();
+}
+extern struct paravirt_patch __start_parainstructions[],
+	__stop_parainstructions[];
+#endif	/* CONFIG_PARAVIRT */
+
 void __init alternative_instructions(void)
 {
 	unsigned long flags;
@@ -389,5 +430,6 @@
 		alternatives_smp_switch(0);
 	}
 #endif
+ 	apply_paravirt(__start_parainstructions, __stop_parainstructions);
 	local_irq_restore(flags);
 }
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 2fd4b7d..776d9be 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -647,23 +647,30 @@
 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
 {
 	unsigned long flags;
+	int maxlvt;
 
 	if (!apic_pm_state.active)
 		return 0;
 
+	maxlvt = get_maxlvt();
+
 	apic_pm_state.apic_id = apic_read(APIC_ID);
 	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
 	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
 	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
 	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
 	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
-	apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
+	if (maxlvt >= 4)
+		apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
 	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
 	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
 	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
 	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
 	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
-	apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#ifdef CONFIG_X86_MCE_P4THERMAL
+	if (maxlvt >= 5)
+		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#endif
 	
 	local_irq_save(flags);
 	disable_local_APIC();
@@ -675,10 +682,13 @@
 {
 	unsigned int l, h;
 	unsigned long flags;
+	int maxlvt;
 
 	if (!apic_pm_state.active)
 		return 0;
 
+	maxlvt = get_maxlvt();
+
 	local_irq_save(flags);
 
 	/*
@@ -700,8 +710,12 @@
 	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
 	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
 	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
-	apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
-	apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
+#ifdef CONFIG_X86_MCE_P4THERMAL
+	if (maxlvt >= 5)
+		apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
+#endif
+	if (maxlvt >= 4)
+		apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
 	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
 	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
 	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index a60358f..a97847d 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -231,6 +231,7 @@
 #include <asm/uaccess.h>
 #include <asm/desc.h>
 #include <asm/i8253.h>
+#include <asm/paravirt.h>
 
 #include "io_ports.h"
 
@@ -2235,7 +2236,7 @@
 
 	dmi_check_system(apm_dmi_table);
 
-	if (apm_info.bios.version == 0) {
+	if (apm_info.bios.version == 0 || paravirt_enabled()) {
 		printk(KERN_INFO "apm: BIOS not found.\n");
 		return -ENODEV;
 	}
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index c80271f..1b2f3cd 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -15,6 +15,7 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/elf.h>
+#include <asm/pda.h>
 
 #define DEFINE(sym, val) \
         asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -51,13 +52,35 @@
 	OFFSET(TI_exec_domain, thread_info, exec_domain);
 	OFFSET(TI_flags, thread_info, flags);
 	OFFSET(TI_status, thread_info, status);
-	OFFSET(TI_cpu, thread_info, cpu);
 	OFFSET(TI_preempt_count, thread_info, preempt_count);
 	OFFSET(TI_addr_limit, thread_info, addr_limit);
 	OFFSET(TI_restart_block, thread_info, restart_block);
 	OFFSET(TI_sysenter_return, thread_info, sysenter_return);
 	BLANK();
 
+	OFFSET(GDS_size, Xgt_desc_struct, size);
+	OFFSET(GDS_address, Xgt_desc_struct, address);
+	OFFSET(GDS_pad, Xgt_desc_struct, pad);
+	BLANK();
+
+	OFFSET(PT_EBX, pt_regs, ebx);
+	OFFSET(PT_ECX, pt_regs, ecx);
+	OFFSET(PT_EDX, pt_regs, edx);
+	OFFSET(PT_ESI, pt_regs, esi);
+	OFFSET(PT_EDI, pt_regs, edi);
+	OFFSET(PT_EBP, pt_regs, ebp);
+	OFFSET(PT_EAX, pt_regs, eax);
+	OFFSET(PT_DS,  pt_regs, xds);
+	OFFSET(PT_ES,  pt_regs, xes);
+	OFFSET(PT_GS,  pt_regs, xgs);
+	OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
+	OFFSET(PT_EIP, pt_regs, eip);
+	OFFSET(PT_CS,  pt_regs, xcs);
+	OFFSET(PT_EFLAGS, pt_regs, eflags);
+	OFFSET(PT_OLDESP, pt_regs, esp);
+	OFFSET(PT_OLDSS,  pt_regs, xss);
+	BLANK();
+
 	OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
 	OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
 	BLANK();
@@ -74,4 +97,18 @@
 	DEFINE(VDSO_PRELINK, VDSO_PRELINK);
 
 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+
+	BLANK();
+ 	OFFSET(PDA_cpu, i386_pda, cpu_number);
+	OFFSET(PDA_pcurrent, i386_pda, pcurrent);
+
+#ifdef CONFIG_PARAVIRT
+	BLANK();
+	OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled);
+	OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable);
+	OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable);
+	OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit);
+	OFFSET(PARAVIRT_iret, paravirt_ops, iret);
+	OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
+#endif
 }
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index e475809..41cfea5 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -104,10 +104,7 @@
 					f_vide();
 				rdtscl(d2);
 				d = d2-d;
-				
-				/* Knock these two lines out if it debugs out ok */
-				printk(KERN_INFO "AMD K6 stepping B detected - ");
-				/* -- cut here -- */
+
 				if (d > 20*K6_BUG_LOOP) 
 					printk("system stability may be impaired when more than 32 MB are used.\n");
 				else 
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d9f3e3c..1b34c56 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -18,14 +18,15 @@
 #include <asm/apic.h>
 #include <mach_apic.h>
 #endif
+#include <asm/pda.h>
 
 #include "cpu.h"
 
 DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
 EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
 
-DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
+struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(_cpu_pda);
 
 static int cachesize_override __cpuinitdata = -1;
 static int disable_x86_fxsr __cpuinitdata;
@@ -235,29 +236,14 @@
 	return flag_is_changeable_p(X86_EFLAGS_ID);
 }
 
-/* Do minimum CPU detection early.
-   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-   The others are not touched to avoid unwanted side effects.
-
-   WARNING: this function is only called on the BP.  Don't add code here
-   that is supposed to run on all CPUs. */
-static void __init early_cpu_detect(void)
+void __init cpu_detect(struct cpuinfo_x86 *c)
 {
-	struct cpuinfo_x86 *c = &boot_cpu_data;
-
-	c->x86_cache_alignment = 32;
-
-	if (!have_cpuid_p())
-		return;
-
 	/* Get vendor name */
 	cpuid(0x00000000, &c->cpuid_level,
 	      (int *)&c->x86_vendor_id[0],
 	      (int *)&c->x86_vendor_id[8],
 	      (int *)&c->x86_vendor_id[4]);
 
-	get_cpu_vendor(c, 1);
-
 	c->x86 = 4;
 	if (c->cpuid_level >= 0x00000001) {
 		u32 junk, tfms, cap0, misc;
@@ -274,6 +260,26 @@
 	}
 }
 
+/* Do minimum CPU detection early.
+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+   The others are not touched to avoid unwanted side effects.
+
+   WARNING: this function is only called on the BP.  Don't add code here
+   that is supposed to run on all CPUs. */
+static void __init early_cpu_detect(void)
+{
+	struct cpuinfo_x86 *c = &boot_cpu_data;
+
+	c->x86_cache_alignment = 32;
+
+	if (!have_cpuid_p())
+		return;
+
+	cpu_detect(c);
+
+	get_cpu_vendor(c, 1);
+}
+
 static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
 {
 	u32 tfms, xlvl;
@@ -308,6 +314,8 @@
 #else
 			c->apicid = (ebx >> 24) & 0xFF;
 #endif
+			if (c->x86_capability[0] & (1<<19))
+				c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
 		} else {
 			/* Have CPUID level 0 only - unheard of */
 			c->x86 = 4;
@@ -372,6 +380,7 @@
 	c->x86_vendor_id[0] = '\0'; /* Unset */
 	c->x86_model_id[0] = '\0';  /* Unset */
 	c->x86_max_cores = 1;
+	c->x86_clflush_size = 32;
 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
 	if (!have_cpuid_p()) {
@@ -591,25 +600,134 @@
 	disable_pse = 1;
 #endif
 }
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- */
-void __cpuinit cpu_init(void)
+
+/* Make sure %gs is initialized properly in idle threads */
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
 {
-	int cpu = smp_processor_id();
-	struct tss_struct * t = &per_cpu(init_tss, cpu);
-	struct thread_struct *thread = &current->thread;
-	struct desc_struct *gdt;
-	__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
+	memset(regs, 0, sizeof(struct pt_regs));
+	regs->xgs = __KERNEL_PDA;
+	return regs;
+}
+
+static __cpuinit int alloc_gdt(int cpu)
+{
 	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+	struct desc_struct *gdt;
+	struct i386_pda *pda;
+
+	gdt = (struct desc_struct *)cpu_gdt_descr->address;
+	pda = cpu_pda(cpu);
+
+	/*
+	 * This is a horrible hack to allocate the GDT.  The problem
+	 * is that cpu_init() is called really early for the boot CPU
+	 * (and hence needs bootmem) but much later for the secondary
+	 * CPUs, when bootmem will have gone away
+	 */
+	if (NODE_DATA(0)->bdata->node_bootmem_map) {
+		BUG_ON(gdt != NULL || pda != NULL);
+
+		gdt = alloc_bootmem_pages(PAGE_SIZE);
+		pda = alloc_bootmem(sizeof(*pda));
+		/* alloc_bootmem(_pages) panics on failure, so no check */
+
+		memset(gdt, 0, PAGE_SIZE);
+		memset(pda, 0, sizeof(*pda));
+	} else {
+		/* GDT and PDA might already have been allocated if
+		   this is a CPU hotplug re-insertion. */
+		if (gdt == NULL)
+			gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+
+		if (pda == NULL)
+			pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
+
+		if (unlikely(!gdt || !pda)) {
+			free_pages((unsigned long)gdt, 0);
+			kfree(pda);
+			return 0;
+		}
+	}
+
+ 	cpu_gdt_descr->address = (unsigned long)gdt;
+	cpu_pda(cpu) = pda;
+
+	return 1;
+}
+
+/* Initial PDA used by boot CPU */
+struct i386_pda boot_pda = {
+	._pda = &boot_pda,
+	.cpu_number = 0,
+	.pcurrent = &init_task,
+};
+
+static inline void set_kernel_gs(void)
+{
+	/* Set %gs for this CPU's PDA.  Memory clobber is to create a
+	   barrier with respect to any PDA operations, so the compiler
+	   doesn't move any before here. */
+	asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
+}
+
+/* Initialize the CPU's GDT and PDA.  The boot CPU does this for
+   itself, but secondaries find this done for them. */
+__cpuinit int init_gdt(int cpu, struct task_struct *idle)
+{
+	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+	struct desc_struct *gdt;
+	struct i386_pda *pda;
+
+	/* For non-boot CPUs, the GDT and PDA should already have been
+	   allocated. */
+	if (!alloc_gdt(cpu)) {
+		printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
+		return 0;
+	}
+
+	gdt = (struct desc_struct *)cpu_gdt_descr->address;
+	pda = cpu_pda(cpu);
+
+	BUG_ON(gdt == NULL || pda == NULL);
+
+	/*
+	 * Initialize the per-CPU GDT with the boot GDT,
+	 * and set up the GDT descriptor:
+	 */
+ 	memcpy(gdt, cpu_gdt_table, GDT_SIZE);
+	cpu_gdt_descr->size = GDT_SIZE - 1;
+
+	pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
+			(u32 *)&gdt[GDT_ENTRY_PDA].b,
+			(unsigned long)pda, sizeof(*pda) - 1,
+			0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
+
+	memset(pda, 0, sizeof(*pda));
+	pda->_pda = pda;
+	pda->cpu_number = cpu;
+	pda->pcurrent = idle;
+
+	return 1;
+}
+
+/* Common CPU init for both boot and secondary CPUs */
+static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
+{
+	struct tss_struct * t = &per_cpu(init_tss, cpu);
+	struct thread_struct *thread = &curr->thread;
+	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+
+	/* Reinit these anyway, even if they've already been done (on
+	   the boot CPU, this will transition from the boot gdt+pda to
+	   the real ones). */
+	load_gdt(cpu_gdt_descr);
+	set_kernel_gs();
 
 	if (cpu_test_and_set(cpu, cpu_initialized)) {
 		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
 		for (;;) local_irq_enable();
 	}
+
 	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
 
 	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
@@ -621,56 +739,16 @@
 		set_in_cr4(X86_CR4_TSD);
 	}
 
-	/* The CPU hotplug case */
-	if (cpu_gdt_descr->address) {
-		gdt = (struct desc_struct *)cpu_gdt_descr->address;
-		memset(gdt, 0, PAGE_SIZE);
-		goto old_gdt;
-	}
-	/*
-	 * This is a horrible hack to allocate the GDT.  The problem
-	 * is that cpu_init() is called really early for the boot CPU
-	 * (and hence needs bootmem) but much later for the secondary
-	 * CPUs, when bootmem will have gone away
-	 */
-	if (NODE_DATA(0)->bdata->node_bootmem_map) {
-		gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
-		/* alloc_bootmem_pages panics on failure, so no check */
-		memset(gdt, 0, PAGE_SIZE);
-	} else {
-		gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
-		if (unlikely(!gdt)) {
-			printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
-			for (;;)
-				local_irq_enable();
-		}
-	}
-old_gdt:
-	/*
-	 * Initialize the per-CPU GDT with the boot GDT,
-	 * and set up the GDT descriptor:
-	 */
- 	memcpy(gdt, cpu_gdt_table, GDT_SIZE);
-
-	/* Set up GDT entry for 16bit stack */
- 	*(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
-		((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
-		((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
-		(CPU_16BIT_STACK_SIZE - 1);
-
-	cpu_gdt_descr->size = GDT_SIZE - 1;
- 	cpu_gdt_descr->address = (unsigned long)gdt;
-
-	load_gdt(cpu_gdt_descr);
 	load_idt(&idt_descr);
 
 	/*
 	 * Set up and load the per-CPU TSS and LDT
 	 */
 	atomic_inc(&init_mm.mm_count);
-	current->active_mm = &init_mm;
-	BUG_ON(current->mm);
-	enter_lazy_tlb(&init_mm, current);
+	curr->active_mm = &init_mm;
+	if (curr->mm)
+		BUG();
+	enter_lazy_tlb(&init_mm, curr);
 
 	load_esp0(t, thread);
 	set_tss_desc(cpu,t);
@@ -682,8 +760,8 @@
 	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
 #endif
 
-	/* Clear %fs and %gs. */
-	asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0));
+	/* Clear %fs. */
+	asm volatile ("mov %0, %%fs" : : "r" (0));
 
 	/* Clear all 6 debug registers: */
 	set_debugreg(0, 0);
@@ -701,6 +779,37 @@
 	mxcsr_feature_mask_init();
 }
 
+/* Entrypoint to initialize secondary CPU */
+void __cpuinit secondary_cpu_init(void)
+{
+	int cpu = smp_processor_id();
+	struct task_struct *curr = current;
+
+	_cpu_init(cpu, curr);
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __cpuinit cpu_init(void)
+{
+	int cpu = smp_processor_id();
+	struct task_struct *curr = current;
+
+	/* Set up the real GDT and PDA, so we can transition from the
+	   boot versions. */
+	if (!init_gdt(cpu, curr)) {
+		/* failed to allocate something; not much we can do... */
+		for (;;)
+			local_irq_enable();
+	}
+
+	_cpu_init(cpu, curr);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 void __cpuinit cpu_uninit(void)
 {
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 94a95aa..56fe265 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -107,7 +107,7 @@
 	 * Note that the workaround only should be initialized once...
 	 */
 	c->f00f_bug = 0;
-	if ( c->x86 == 5 ) {
+	if (!paravirt_enabled() && c->x86 == 5) {
 		static int f00f_workaround_enabled = 0;
 
 		c->f00f_bug = 1;
@@ -195,8 +195,16 @@
 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
 		set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
-}
 
+	if (cpu_has_ds) {
+		unsigned int l1;
+		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+		if (!(l1 & (1<<11)))
+			set_bit(X86_FEATURE_BTS, c->x86_capability);
+		if (!(l1 & (1<<12)))
+			set_bit(X86_FEATURE_PEBS, c->x86_capability);
+	}
+}
 
 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
 {
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 5c43be4..80b4c5d 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -480,12 +480,10 @@
 	if (num_cache_leaves == 0)
 		return -ENOENT;
 
-	cpuid4_info[cpu] = kmalloc(
+	cpuid4_info[cpu] = kzalloc(
 	    sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
 	if (unlikely(cpuid4_info[cpu] == NULL))
 		return -ENOMEM;
-	memset(cpuid4_info[cpu], 0,
-	    sizeof(struct _cpuid4_info) * num_cache_leaves);
 
 	oldmask = current->cpus_allowed;
 	retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -658,17 +656,14 @@
 		return -ENOENT;
 
 	/* Allocate all required memory */
-	cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL);
+	cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
 	if (unlikely(cache_kobject[cpu] == NULL))
 		goto err_out;
-	memset(cache_kobject[cpu], 0, sizeof(struct kobject));
 
-	index_kobject[cpu] = kmalloc(
+	index_kobject[cpu] = kzalloc(
 	    sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
 	if (unlikely(index_kobject[cpu] == NULL))
 		goto err_out;
-	memset(index_kobject[cpu], 0,
-	    sizeof(struct _index_kobject) * num_cache_leaves);
 
 	return 0;
 
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 1f9153a..6b5d351 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -51,10 +51,10 @@
 	}
 }
 
-static void mce_work_fn(void *data);
-static DECLARE_WORK(mce_work, mce_work_fn, NULL);
+static void mce_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
 
-static void mce_work_fn(void *data)
+static void mce_work_fn(struct work_struct *work)
 { 
 	on_each_cpu(mce_checkregs, NULL, 1, 1);
 	schedule_delayed_work(&mce_work, MCE_RATE);
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 2d8703b..065005c 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -20,6 +20,7 @@
 #include <linux/cpu.h>
 #include <asm/cpu.h>
 #include <linux/notifier.h>
+#include <linux/jiffies.h>
 #include <asm/therm_throt.h>
 
 /* How long to wait between reporting thermal events */
@@ -115,7 +116,6 @@
 	return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
 {
 	return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
@@ -152,7 +152,6 @@
 {
 	.notifier_call = thermal_throttle_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static __init int thermal_throttle_init_device(void)
 {
diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile
index a25b701..191fc05 100644
--- a/arch/i386/kernel/cpu/mtrr/Makefile
+++ b/arch/i386/kernel/cpu/mtrr/Makefile
@@ -1,5 +1,3 @@
 obj-y		:= main.o if.o generic.o state.o
-obj-y		+= amd.o
-obj-y		+= cyrix.o
-obj-y		+= centaur.o
+obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
 
diff --git a/arch/i386/kernel/cpu/mtrr/amd.c b/arch/i386/kernel/cpu/mtrr/amd.c
index 1a1e04b..0949cdb 100644
--- a/arch/i386/kernel/cpu/mtrr/amd.c
+++ b/arch/i386/kernel/cpu/mtrr/amd.c
@@ -7,7 +7,7 @@
 
 static void
 amd_get_mtrr(unsigned int reg, unsigned long *base,
-	     unsigned int *size, mtrr_type * type)
+	     unsigned long *size, mtrr_type * type)
 {
 	unsigned long low, high;
 
diff --git a/arch/i386/kernel/cpu/mtrr/centaur.c b/arch/i386/kernel/cpu/mtrr/centaur.c
index 33f00ac..cb9aa3a 100644
--- a/arch/i386/kernel/cpu/mtrr/centaur.c
+++ b/arch/i386/kernel/cpu/mtrr/centaur.c
@@ -17,7 +17,7 @@
  */
 
 static int
-centaur_get_free_region(unsigned long base, unsigned long size)
+centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 /*  [SUMMARY] Get a free MTRR.
     <base> The starting (base) address of the region.
     <size> The size (in bytes) of the region.
@@ -26,10 +26,11 @@
 {
 	int i, max;
 	mtrr_type ltype;
-	unsigned long lbase;
-	unsigned int lsize;
+	unsigned long lbase, lsize;
 
 	max = num_var_ranges;
+	if (replace_reg >= 0 && replace_reg < max)
+		return replace_reg;
 	for (i = 0; i < max; ++i) {
 		if (centaur_mcr_reserved & (1 << i))
 			continue;
@@ -49,7 +50,7 @@
 
 static void
 centaur_get_mcr(unsigned int reg, unsigned long *base,
-		unsigned int *size, mtrr_type * type)
+		unsigned long *size, mtrr_type * type)
 {
 	*base = centaur_mcr[reg].high >> PAGE_SHIFT;
 	*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
index 9027a98..0737a59 100644
--- a/arch/i386/kernel/cpu/mtrr/cyrix.c
+++ b/arch/i386/kernel/cpu/mtrr/cyrix.c
@@ -9,7 +9,7 @@
 
 static void
 cyrix_get_arr(unsigned int reg, unsigned long *base,
-	      unsigned int *size, mtrr_type * type)
+	      unsigned long *size, mtrr_type * type)
 {
 	unsigned long flags;
 	unsigned char arr, ccr3, rcr, shift;
@@ -77,7 +77,7 @@
 }
 
 static int
-cyrix_get_free_region(unsigned long base, unsigned long size)
+cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 /*  [SUMMARY] Get a free ARR.
     <base> The starting (base) address of the region.
     <size> The size (in bytes) of the region.
@@ -86,9 +86,24 @@
 {
 	int i;
 	mtrr_type ltype;
-	unsigned long lbase;
-	unsigned int  lsize;
+	unsigned long lbase, lsize;
 
+	switch (replace_reg) {
+	case 7:
+		if (size < 0x40)
+			break;
+	case 6:
+	case 5:
+	case 4:
+		return replace_reg;
+	case 3:
+		if (arr3_protected)
+			break;
+	case 2:
+	case 1:
+	case 0:
+		return replace_reg;
+	}
 	/* If we are to set up a region >32M then look at ARR7 immediately */
 	if (size > 0x2000) {
 		cyrix_get_arr(7, &lbase, &lsize, &ltype);
@@ -214,7 +229,7 @@
 
 typedef struct {
 	unsigned long base;
-	unsigned int size;
+	unsigned long size;
 	mtrr_type type;
 } arr_state_t;
 
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index 0b61eed..f77fc53 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -3,6 +3,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <asm/io.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
@@ -15,12 +16,19 @@
 	struct mtrr_var_range *var_ranges;
 	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
 	unsigned char enabled;
+	unsigned char have_fixed;
 	mtrr_type def_type;
 };
 
 static unsigned long smp_changes_mask;
 static struct mtrr_state mtrr_state = {};
 
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "mtrr."
+
+static __initdata int mtrr_show;
+module_param_named(show, mtrr_show, bool, 0);
+
 /*  Get the MSR pair relating to a var range  */
 static void __init
 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
@@ -43,6 +51,14 @@
 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
 }
 
+static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
+{
+	unsigned i;
+
+	for (i = 0; i < 8; ++i, ++types, base += step)
+		printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
+}
+
 /*  Grab all of the MTRR state for this CPU into *state  */
 void __init get_mtrr_state(void)
 {
@@ -58,13 +74,49 @@
 	} 
 	vrs = mtrr_state.var_ranges;
 
+	rdmsr(MTRRcap_MSR, lo, dummy);
+	mtrr_state.have_fixed = (lo >> 8) & 1;
+
 	for (i = 0; i < num_var_ranges; i++)
 		get_mtrr_var_range(i, &vrs[i]);
-	get_fixed_ranges(mtrr_state.fixed_ranges);
+	if (mtrr_state.have_fixed)
+		get_fixed_ranges(mtrr_state.fixed_ranges);
 
 	rdmsr(MTRRdefType_MSR, lo, dummy);
 	mtrr_state.def_type = (lo & 0xff);
 	mtrr_state.enabled = (lo & 0xc00) >> 10;
+
+	if (mtrr_show) {
+		int high_width;
+
+		printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
+		if (mtrr_state.have_fixed) {
+			printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
+			       mtrr_state.enabled & 1 ? "en" : "dis");
+			print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
+			for (i = 0; i < 2; ++i)
+				print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
+			for (i = 0; i < 8; ++i)
+				print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
+		}
+		printk(KERN_INFO "MTRR variable ranges %sabled:\n",
+		       mtrr_state.enabled & 2 ? "en" : "dis");
+		high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
+		for (i = 0; i < num_var_ranges; ++i) {
+			if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
+				printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
+				       i,
+				       high_width,
+				       mtrr_state.var_ranges[i].base_hi,
+				       mtrr_state.var_ranges[i].base_lo >> 12,
+				       high_width,
+				       mtrr_state.var_ranges[i].mask_hi,
+				       mtrr_state.var_ranges[i].mask_lo >> 12,
+				       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
+			else
+				printk(KERN_INFO "MTRR %u disabled\n", i);
+		}
+	}
 }
 
 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
@@ -95,7 +147,7 @@
 			smp_processor_id(), msr, a, b);
 }
 
-int generic_get_free_region(unsigned long base, unsigned long size)
+int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 /*  [SUMMARY] Get a free MTRR.
     <base> The starting (base) address of the region.
     <size> The size (in bytes) of the region.
@@ -104,10 +156,11 @@
 {
 	int i, max;
 	mtrr_type ltype;
-	unsigned long lbase;
-	unsigned lsize;
+	unsigned long lbase, lsize;
 
 	max = num_var_ranges;
+	if (replace_reg >= 0 && replace_reg < max)
+		return replace_reg;
 	for (i = 0; i < max; ++i) {
 		mtrr_if->get(i, &lbase, &lsize, &ltype);
 		if (lsize == 0)
@@ -117,7 +170,7 @@
 }
 
 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
-			     unsigned int *size, mtrr_type * type)
+			     unsigned long *size, mtrr_type *type)
 {
 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
 
@@ -202,7 +255,9 @@
 	return changed;
 }
 
-static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
+static u32 deftype_lo, deftype_hi;
+
+static unsigned long set_mtrr_state(void)
 /*  [SUMMARY] Set the MTRR state for this CPU.
     <state> The MTRR state information to read.
     <ctxt> Some relevant CPU context.
@@ -217,14 +272,14 @@
 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
 
-	if (set_fixed_ranges(mtrr_state.fixed_ranges))
+	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
 		change_mask |= MTRR_CHANGE_MASK_FIXED;
 
 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
 	   so to set it we fiddle with the saved value  */
 	if ((deftype_lo & 0xff) != mtrr_state.def_type
 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
-		deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
+		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
 	}
 
@@ -233,7 +288,6 @@
 
 
 static unsigned long cr4 = 0;
-static u32 deftype_lo, deftype_hi;
 static DEFINE_SPINLOCK(set_atomicity_lock);
 
 /*
@@ -271,7 +325,7 @@
 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
 
 	/*  Disable MTRRs, and set the default type to uncached  */
-	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
+	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
 }
 
 static void post_set(void) __releases(set_atomicity_lock)
@@ -300,7 +354,7 @@
 	prepare_set();
 
 	/* Actually set the state */
-	mask = set_mtrr_state(deftype_lo,deftype_hi);
+	mask = set_mtrr_state();
 
 	post_set();
 	local_irq_restore(flags);
@@ -366,7 +420,7 @@
 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
 			return -EINVAL;
 		}
-		if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
+		if (!(base + size < 0x70000 || base > 0x7003F) &&
 		    (type == MTRR_TYPE_WRCOMB
 		     || type == MTRR_TYPE_WRBACK)) {
 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index 5ac051b..5ae1705 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -17,7 +17,7 @@
 
 #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
 
-static char *mtrr_strings[MTRR_NUM_TYPES] =
+static const char *const mtrr_strings[MTRR_NUM_TYPES] =
 {
     "uncachable",               /* 0 */
     "write-combining",          /* 1 */
@@ -28,7 +28,7 @@
     "write-back",               /* 6 */
 };
 
-char *mtrr_attrib_to_str(int x)
+const char *mtrr_attrib_to_str(int x)
 {
 	return (x <= 6) ? mtrr_strings[x] : "?";
 }
@@ -44,10 +44,9 @@
 
 	max = num_var_ranges;
 	if (fcount == NULL) {
-		fcount = kmalloc(max * sizeof *fcount, GFP_KERNEL);
+		fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
 		if (!fcount)
 			return -ENOMEM;
-		memset(fcount, 0, max * sizeof *fcount);
 		FILE_FCOUNT(file) = fcount;
 	}
 	if (!page) {
@@ -155,6 +154,7 @@
 {
 	int err = 0;
 	mtrr_type type;
+	unsigned long size;
 	struct mtrr_sentry sentry;
 	struct mtrr_gentry gentry;
 	void __user *arg = (void __user *) __arg;
@@ -235,15 +235,15 @@
 	case MTRRIOC_GET_ENTRY:
 		if (gentry.regnum >= num_var_ranges)
 			return -EINVAL;
-		mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
+		mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
 
 		/* Hide entries that go above 4GB */
-		if (gentry.base + gentry.size > 0x100000
-		    || gentry.size == 0x100000)
+		if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
+		    || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
 			gentry.base = gentry.size = gentry.type = 0;
 		else {
 			gentry.base <<= PAGE_SHIFT;
-			gentry.size <<= PAGE_SHIFT;
+			gentry.size = size << PAGE_SHIFT;
 			gentry.type = type;
 		}
 
@@ -273,8 +273,14 @@
 	case MTRRIOC_GET_PAGE_ENTRY:
 		if (gentry.regnum >= num_var_ranges)
 			return -EINVAL;
-		mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
-		gentry.type = type;
+		mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
+		/* Hide entries that would overflow */
+		if (size != (__typeof__(gentry.size))size)
+			gentry.base = gentry.size = gentry.type = 0;
+		else {
+			gentry.size = size;
+			gentry.type = type;
+		}
 		break;
 	}
 
@@ -353,8 +359,7 @@
 	char factor;
 	int i, max, len;
 	mtrr_type type;
-	unsigned long base;
-	unsigned int size;
+	unsigned long base, size;
 
 	len = 0;
 	max = num_var_ranges;
@@ -373,7 +378,7 @@
 			}
 			/* RED-PEN: base can be > 32bit */ 
 			len += seq_printf(seq, 
-				   "reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n",
+				   "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
 			     i, base, base >> (20 - PAGE_SHIFT), size, factor,
 			     mtrr_attrib_to_str(type), usage_table[i]);
 		}
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index fff90bd..16bb7ea 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -59,7 +59,11 @@
 static void set_mtrr(unsigned int reg, unsigned long base,
 		     unsigned long size, mtrr_type type);
 
+#ifndef CONFIG_X86_64
 extern int arr3_protected;
+#else
+#define arr3_protected 0
+#endif
 
 void set_mtrr_ops(struct mtrr_ops * ops)
 {
@@ -168,6 +172,13 @@
 
 #endif
 
+static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
+	return type1 == MTRR_TYPE_UNCACHABLE ||
+	       type2 == MTRR_TYPE_UNCACHABLE ||
+	       (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
+	       (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
+}
+
 /**
  * set_mtrr - update mtrrs on all processors
  * @reg:	mtrr in question
@@ -263,8 +274,8 @@
 
 /**
  *	mtrr_add_page - Add a memory type region
- *	@base: Physical base address of region in pages (4 KB)
- *	@size: Physical size of region in pages (4 KB)
+ *	@base: Physical base address of region in pages (in units of 4 kB!)
+ *	@size: Physical size of region in pages (4 kB)
  *	@type: Type of MTRR desired
  *	@increment: If this is true do usage counting on the region
  *
@@ -300,11 +311,9 @@
 int mtrr_add_page(unsigned long base, unsigned long size, 
 		  unsigned int type, char increment)
 {
-	int i;
+	int i, replace, error;
 	mtrr_type ltype;
-	unsigned long lbase;
-	unsigned int lsize;
-	int error;
+	unsigned long lbase, lsize;
 
 	if (!mtrr_if)
 		return -ENXIO;
@@ -324,12 +333,18 @@
 		return -ENOSYS;
 	}
 
+	if (!size) {
+		printk(KERN_WARNING "mtrr: zero sized request\n");
+		return -EINVAL;
+	}
+
 	if (base & size_or_mask || size & size_or_mask) {
 		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
 		return -EINVAL;
 	}
 
 	error = -EINVAL;
+	replace = -1;
 
 	/* No CPU hotplug when we change MTRR entries */
 	lock_cpu_hotplug();
@@ -337,21 +352,28 @@
 	mutex_lock(&mtrr_mutex);
 	for (i = 0; i < num_var_ranges; ++i) {
 		mtrr_if->get(i, &lbase, &lsize, &ltype);
-		if (base >= lbase + lsize)
-			continue;
-		if ((base < lbase) && (base + size <= lbase))
+		if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
 			continue;
 		/*  At this point we know there is some kind of overlap/enclosure  */
-		if ((base < lbase) || (base + size > lbase + lsize)) {
+		if (base < lbase || base + size - 1 > lbase + lsize - 1) {
+			if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
+				/*  New region encloses an existing region  */
+				if (type == ltype) {
+					replace = replace == -1 ? i : -2;
+					continue;
+				}
+				else if (types_compatible(type, ltype))
+					continue;
+			}
 			printk(KERN_WARNING
 			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"
-			       " 0x%lx000,0x%x000\n", base, size, lbase,
+			       " 0x%lx000,0x%lx000\n", base, size, lbase,
 			       lsize);
 			goto out;
 		}
 		/*  New region is enclosed by an existing region  */
 		if (ltype != type) {
-			if (type == MTRR_TYPE_UNCACHABLE)
+			if (types_compatible(type, ltype))
 				continue;
 			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
 			     base, size, mtrr_attrib_to_str(ltype),
@@ -364,10 +386,18 @@
 		goto out;
 	}
 	/*  Search for an empty MTRR  */
-	i = mtrr_if->get_free_region(base, size);
+	i = mtrr_if->get_free_region(base, size, replace);
 	if (i >= 0) {
 		set_mtrr(i, base, size, type);
-		usage_table[i] = 1;
+		if (likely(replace < 0))
+			usage_table[i] = 1;
+		else {
+			usage_table[i] = usage_table[replace] + !!increment;
+			if (unlikely(replace != i)) {
+				set_mtrr(replace, 0, 0, 0);
+				usage_table[replace] = 0;
+			}
+		}
 	} else
 		printk(KERN_INFO "mtrr: no more MTRRs available\n");
 	error = i;
@@ -455,8 +485,7 @@
 {
 	int i, max;
 	mtrr_type ltype;
-	unsigned long lbase;
-	unsigned int lsize;
+	unsigned long lbase, lsize;
 	int error = -EINVAL;
 
 	if (!mtrr_if)
@@ -544,9 +573,11 @@
 
 static void __init init_ifs(void)
 {
+#ifndef CONFIG_X86_64
 	amd_init_mtrr();
 	cyrix_init_mtrr();
 	centaur_init_mtrr();
+#endif
 }
 
 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
@@ -555,7 +586,7 @@
 struct mtrr_value {
 	mtrr_type	ltype;
 	unsigned long	lbase;
-	unsigned int	lsize;
+	unsigned long	lsize;
 };
 
 static struct mtrr_value * mtrr_state;
@@ -565,10 +596,8 @@
 	int i;
 	int size = num_var_ranges * sizeof(struct mtrr_value);
 
-	mtrr_state = kmalloc(size,GFP_ATOMIC);
-	if (mtrr_state)
-		memset(mtrr_state,0,size);
-	else
+	mtrr_state = kzalloc(size,GFP_ATOMIC);
+	if (!mtrr_state)
 		return -ENOMEM;
 
 	for (i = 0; i < num_var_ranges; i++) {
diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h
index 99c9f26..d61ea9d 100644
--- a/arch/i386/kernel/cpu/mtrr/mtrr.h
+++ b/arch/i386/kernel/cpu/mtrr/mtrr.h
@@ -43,15 +43,16 @@
 	void	(*set_all)(void);
 
 	void	(*get)(unsigned int reg, unsigned long *base,
-		       unsigned int *size, mtrr_type * type);
-	int	(*get_free_region) (unsigned long base, unsigned long size);
-
+		       unsigned long *size, mtrr_type * type);
+	int	(*get_free_region)(unsigned long base, unsigned long size,
+				   int replace_reg);
 	int	(*validate_add_page)(unsigned long base, unsigned long size,
 				     unsigned int type);
 	int	(*have_wrcomb)(void);
 };
 
-extern int generic_get_free_region(unsigned long base, unsigned long size);
+extern int generic_get_free_region(unsigned long base, unsigned long size,
+				   int replace_reg);
 extern int generic_validate_add_page(unsigned long base, unsigned long size,
 				     unsigned int type);
 
@@ -62,17 +63,17 @@
 /* library functions for processor-specific routines */
 struct set_mtrr_context {
 	unsigned long flags;
-	unsigned long deftype_lo;
-	unsigned long deftype_hi;
 	unsigned long cr4val;
-	unsigned long ccr3;
+	u32 deftype_lo;
+	u32 deftype_hi;
+	u32 ccr3;
 };
 
 struct mtrr_var_range {
-	unsigned long base_lo;
-	unsigned long base_hi;
-	unsigned long mask_lo;
-	unsigned long mask_hi;
+	u32 base_lo;
+	u32 base_hi;
+	u32 mask_lo;
+	u32 mask_hi;
 };
 
 void set_mtrr_done(struct set_mtrr_context *ctxt);
@@ -92,6 +93,6 @@
 extern unsigned int num_var_ranges;
 
 void mtrr_state_warn(void);
-char *mtrr_attrib_to_str(int x);
+const char *mtrr_attrib_to_str(int x);
 void mtrr_wrmsr(unsigned, unsigned, unsigned);
 
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 76aac08..6624d85 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -152,9 +152,10 @@
 				seq_printf(m, " [%d]", i);
 		}
 
-	seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
+	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
 		     c->loops_per_jiffy/(500000/HZ),
 		     (c->loops_per_jiffy/(5000/HZ)) % 100);
+	seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
 
 	return 0;
 }
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index fde8bea..db6dd20 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -34,7 +34,6 @@
 #include <linux/major.h>
 #include <linux/fs.h>
 #include <linux/smp_lock.h>
-#include <linux/fs.h>
 #include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
@@ -156,28 +155,27 @@
 	.open = cpuid_open,
 };
 
-static int cpuid_class_device_create(int i)
+static int cpuid_device_create(int i)
 {
 	int err = 0;
-	struct class_device *class_err;
+	struct device *dev;
 
-	class_err = class_device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i);
-	if (IS_ERR(class_err))
-		err = PTR_ERR(class_err);
+	dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i);
+	if (IS_ERR(dev))
+		err = PTR_ERR(dev);
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
 
 	switch (action) {
 	case CPU_ONLINE:
-		cpuid_class_device_create(cpu);
+		cpuid_device_create(cpu);
 		break;
 	case CPU_DEAD:
-		class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+		device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
 		break;
 	}
 	return NOTIFY_OK;
@@ -187,7 +185,6 @@
 {
 	.notifier_call = cpuid_class_cpu_callback,
 };
-#endif /* !CONFIG_HOTPLUG_CPU */
 
 static int __init cpuid_init(void)
 {
@@ -206,7 +203,7 @@
 		goto out_chrdev;
 	}
 	for_each_online_cpu(i) {
-		err = cpuid_class_device_create(i);
+		err = cpuid_device_create(i);
 		if (err != 0) 
 			goto out_class;
 	}
@@ -218,7 +215,7 @@
 out_class:
 	i = 0;
 	for_each_online_cpu(i) {
-		class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i));
+		device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i));
 	}
 	class_destroy(cpuid_class);
 out_chrdev:
@@ -232,7 +229,7 @@
 	int cpu = 0;
 
 	for_each_online_cpu(cpu)
-		class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+		device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
 	class_destroy(cpuid_class);
 	unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
 	unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 144b432..a5e0e99 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -31,68 +31,6 @@
 /* This keeps a track of which one is crashing cpu. */
 static int crashing_cpu;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
-							       size_t data_len)
-{
-	struct elf_note note;
-
-	note.n_namesz = strlen(name) + 1;
-	note.n_descsz = data_len;
-	note.n_type   = type;
-	memcpy(buf, &note, sizeof(note));
-	buf += (sizeof(note) +3)/4;
-	memcpy(buf, name, note.n_namesz);
-	buf += (note.n_namesz + 3)/4;
-	memcpy(buf, data, note.n_descsz);
-	buf += (note.n_descsz + 3)/4;
-
-	return buf;
-}
-
-static void final_note(u32 *buf)
-{
-	struct elf_note note;
-
-	note.n_namesz = 0;
-	note.n_descsz = 0;
-	note.n_type   = 0;
-	memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-	struct elf_prstatus prstatus;
-	u32 *buf;
-
-	if ((cpu < 0) || (cpu >= NR_CPUS))
-		return;
-
-	/* Using ELF notes here is opportunistic.
-	 * I need a well defined structure format
-	 * for the data I pass, and I need tags
-	 * on the data to indicate what information I have
-	 * squirrelled away.  ELF notes happen to provide
-	 * all of that, so there is no need to invent something new.
-	 */
-	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-	if (!buf)
-		return;
-	memset(&prstatus, 0, sizeof(prstatus));
-	prstatus.pr_pid = current->pid;
-	elf_core_copy_regs(&prstatus.pr_reg, regs);
-	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-				sizeof(prstatus));
-	final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
-	int cpu;
-
-	cpu = safe_smp_processor_id();
-	crash_save_this_cpu(regs, cpu);
-}
-
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 static atomic_t waiting_for_crash_ipi;
 
@@ -121,7 +59,7 @@
 		crash_fixup_ss_esp(&fixed_regs, regs);
 		regs = &fixed_regs;
 	}
-	crash_save_this_cpu(regs, cpu);
+	crash_save_cpu(regs, cpu);
 	disable_local_APIC();
 	atomic_dec(&waiting_for_crash_ipi);
 	/* Assume hlt works */
@@ -195,5 +133,5 @@
 #if defined(CONFIG_X86_IO_APIC)
 	disable_IO_APIC();
 #endif
-	crash_save_self(regs);
+	crash_save_cpu(regs, safe_smp_processor_id());
 }
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
new file mode 100644
index 0000000..2f7d0a9
--- /dev/null
+++ b/arch/i386/kernel/e820.c
@@ -0,0 +1,894 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/efi.h>
+#include <linux/pfn.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/e820.h>
+
+#ifdef CONFIG_EFI
+int efi_enabled = 0;
+EXPORT_SYMBOL(efi_enabled);
+#endif
+
+struct e820map e820;
+struct change_member {
+	struct e820entry *pbios; /* pointer to original bios entry */
+	unsigned long long addr; /* address for this change point */
+};
+static struct change_member change_point_list[2*E820MAX] __initdata;
+static struct change_member *change_point[2*E820MAX] __initdata;
+static struct e820entry *overlap_list[E820MAX] __initdata;
+static struct e820entry new_bios[E820MAX] __initdata;
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+extern int user_defined_memmap;
+struct resource data_resource = {
+	.name	= "Kernel data",
+	.start	= 0,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+struct resource code_resource = {
+	.name	= "Kernel code",
+	.start	= 0,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource system_rom_resource = {
+	.name	= "System ROM",
+	.start	= 0xf0000,
+	.end	= 0xfffff,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource extension_rom_resource = {
+	.name	= "Extension ROM",
+	.start	= 0xe0000,
+	.end	= 0xeffff,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource adapter_rom_resources[] = { {
+	.name 	= "Adapter ROM",
+	.start	= 0xc8000,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+	.name 	= "Adapter ROM",
+	.start	= 0,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+	.name 	= "Adapter ROM",
+	.start	= 0,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+	.name 	= "Adapter ROM",
+	.start	= 0,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+	.name 	= "Adapter ROM",
+	.start	= 0,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+	.name 	= "Adapter ROM",
+	.start	= 0,
+	.end	= 0,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+} };
+
+static struct resource video_rom_resource = {
+	.name 	= "Video ROM",
+	.start	= 0xc0000,
+	.end	= 0xc7fff,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource video_ram_resource = {
+	.name	= "Video RAM area",
+	.start	= 0xa0000,
+	.end	= 0xbffff,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource standard_io_resources[] = { {
+	.name	= "dma1",
+	.start	= 0x0000,
+	.end	= 0x001f,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name	= "pic1",
+	.start	= 0x0020,
+	.end	= 0x0021,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name   = "timer0",
+	.start	= 0x0040,
+	.end    = 0x0043,
+	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name   = "timer1",
+	.start  = 0x0050,
+	.end    = 0x0053,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name	= "keyboard",
+	.start	= 0x0060,
+	.end	= 0x006f,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name	= "dma page reg",
+	.start	= 0x0080,
+	.end	= 0x008f,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name	= "pic2",
+	.start	= 0x00a0,
+	.end	= 0x00a1,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name	= "dma2",
+	.start	= 0x00c0,
+	.end	= 0x00df,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+	.name	= "fpu",
+	.start	= 0x00f0,
+	.end	= 0x00ff,
+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
+} };
+
+static int romsignature(const unsigned char *x)
+{
+	unsigned short sig;
+	int ret = 0;
+	if (probe_kernel_address((const unsigned short *)x, sig) == 0)
+		ret = (sig == 0xaa55);
+	return ret;
+}
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+	unsigned char *p, sum = 0;
+
+	for (p = rom; p < rom + length; p++)
+		sum += *p;
+	return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+	unsigned long start, length, upper;
+	unsigned char *rom;
+	int	      i;
+
+	/* video rom */
+	upper = adapter_rom_resources[0].start;
+	for (start = video_rom_resource.start; start < upper; start += 2048) {
+		rom = isa_bus_to_virt(start);
+		if (!romsignature(rom))
+			continue;
+
+		video_rom_resource.start = start;
+
+		/* 0 < length <= 0x7f * 512, historically */
+		length = rom[2] * 512;
+
+		/* if checksum okay, trust length byte */
+		if (length && romchecksum(rom, length))
+			video_rom_resource.end = start + length - 1;
+
+		request_resource(&iomem_resource, &video_rom_resource);
+		break;
+	}
+
+	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+	if (start < upper)
+		start = upper;
+
+	/* system rom */
+	request_resource(&iomem_resource, &system_rom_resource);
+	upper = system_rom_resource.start;
+
+	/* check for extension rom (ignore length byte!) */
+	rom = isa_bus_to_virt(extension_rom_resource.start);
+	if (romsignature(rom)) {
+		length = extension_rom_resource.end - extension_rom_resource.start + 1;
+		if (romchecksum(rom, length)) {
+			request_resource(&iomem_resource, &extension_rom_resource);
+			upper = extension_rom_resource.start;
+		}
+	}
+
+	/* check for adapter roms on 2k boundaries */
+	for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
+		rom = isa_bus_to_virt(start);
+		if (!romsignature(rom))
+			continue;
+
+		/* 0 < length <= 0x7f * 512, historically */
+		length = rom[2] * 512;
+
+		/* but accept any length that fits if checksum okay */
+		if (!length || start + length > upper || !romchecksum(rom, length))
+			continue;
+
+		adapter_rom_resources[i].start = start;
+		adapter_rom_resources[i].end = start + length - 1;
+		request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+		start = adapter_rom_resources[i++].end & ~2047UL;
+	}
+}
+
+/*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+static void __init
+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+{
+	int i;
+
+	probe_roms();
+	for (i = 0; i < e820.nr_map; i++) {
+		struct resource *res;
+#ifndef CONFIG_RESOURCES_64BIT
+		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+			continue;
+#endif
+		res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+		switch (e820.map[i].type) {
+		case E820_RAM:	res->name = "System RAM"; break;
+		case E820_ACPI:	res->name = "ACPI Tables"; break;
+		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
+		default:	res->name = "reserved";
+		}
+		res->start = e820.map[i].addr;
+		res->end = res->start + e820.map[i].size - 1;
+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+		if (request_resource(&iomem_resource, res)) {
+			kfree(res);
+			continue;
+		}
+		if (e820.map[i].type == E820_RAM) {
+			/*
+			 *  We don't know which RAM region contains kernel data,
+			 *  so we try it repeatedly and let the resource manager
+			 *  test it.
+			 */
+			request_resource(res, code_resource);
+			request_resource(res, data_resource);
+#ifdef CONFIG_KEXEC
+			request_resource(res, &crashk_res);
+#endif
+		}
+	}
+}
+
+/*
+ * Request address space for all standard resources
+ *
+ * This is called just before pcibios_init(), which is also a
+ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
+ */
+static int __init request_standard_resources(void)
+{
+	int i;
+
+	printk("Setting up standard PCI resources\n");
+	if (efi_enabled)
+		efi_initialize_iomem_resources(&code_resource, &data_resource);
+	else
+		legacy_init_iomem_resources(&code_resource, &data_resource);
+
+	/* EFI systems may still have VGA */
+	request_resource(&iomem_resource, &video_ram_resource);
+
+	/* request I/O space for devices used on all i[345]86 PCs */
+	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+		request_resource(&ioport_resource, &standard_io_resources[i]);
+	return 0;
+}
+
+subsys_initcall(request_standard_resources);
+
+void __init add_memory_region(unsigned long long start,
+			      unsigned long long size, int type)
+{
+	int x;
+
+	if (!efi_enabled) {
+       		x = e820.nr_map;
+
+		if (x == E820MAX) {
+		    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+		    return;
+		}
+
+		e820.map[x].addr = start;
+		e820.map[x].size = size;
+		e820.map[x].type = type;
+		e820.nr_map++;
+	}
+} /* add_memory_region */
+
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries.  The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+	struct change_member *change_tmp;
+	unsigned long current_type, last_type;
+	unsigned long long last_addr;
+	int chgidx, still_changing;
+	int overlap_entries;
+	int new_bios_entry;
+	int old_nr, new_nr, chg_nr;
+	int i;
+
+	/*
+		Visually we're performing the following (1,2,3,4 = memory types)...
+
+		Sample memory map (w/overlaps):
+		   ____22__________________
+		   ______________________4_
+		   ____1111________________
+		   _44_____________________
+		   11111111________________
+		   ____________________33__
+		   ___________44___________
+		   __________33333_________
+		   ______________22________
+		   ___________________2222_
+		   _________111111111______
+		   _____________________11_
+		   _________________4______
+
+		Sanitized equivalent (no overlap):
+		   1_______________________
+		   _44_____________________
+		   ___1____________________
+		   ____22__________________
+		   ______11________________
+		   _________1______________
+		   __________3_____________
+		   ___________44___________
+		   _____________33_________
+		   _______________2________
+		   ________________1_______
+		   _________________4______
+		   ___________________2____
+		   ____________________33__
+		   ______________________4_
+	*/
+	printk("sanitize start\n");
+	/* if there's only one memory region, don't bother */
+	if (*pnr_map < 2) {
+		printk("sanitize bail 0\n");
+		return -1;
+	}
+
+	old_nr = *pnr_map;
+
+	/* bail out if we find any unreasonable addresses in bios map */
+	for (i=0; i<old_nr; i++)
+		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
+			printk("sanitize bail 1\n");
+			return -1;
+		}
+
+	/* create pointers for initial change-point information (for sorting) */
+	for (i=0; i < 2*old_nr; i++)
+		change_point[i] = &change_point_list[i];
+
+	/* record all known change-points (starting and ending addresses),
+	   omitting those that are for empty memory regions */
+	chgidx = 0;
+	for (i=0; i < old_nr; i++)	{
+		if (biosmap[i].size != 0) {
+			change_point[chgidx]->addr = biosmap[i].addr;
+			change_point[chgidx++]->pbios = &biosmap[i];
+			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+			change_point[chgidx++]->pbios = &biosmap[i];
+		}
+	}
+	chg_nr = chgidx;    	/* true number of change-points */
+
+	/* sort change-point list by memory addresses (low -> high) */
+	still_changing = 1;
+	while (still_changing)	{
+		still_changing = 0;
+		for (i=1; i < chg_nr; i++)  {
+			/* if <current_addr> > <last_addr>, swap */
+			/* or, if current=<start_addr> & last=<end_addr>, swap */
+			if ((change_point[i]->addr < change_point[i-1]->addr) ||
+				((change_point[i]->addr == change_point[i-1]->addr) &&
+				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
+				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+			   )
+			{
+				change_tmp = change_point[i];
+				change_point[i] = change_point[i-1];
+				change_point[i-1] = change_tmp;
+				still_changing=1;
+			}
+		}
+	}
+
+	/* create a new bios memory map, removing overlaps */
+	overlap_entries=0;	 /* number of entries in the overlap table */
+	new_bios_entry=0;	 /* index for creating new bios map entries */
+	last_type = 0;		 /* start with undefined memory type */
+	last_addr = 0;		 /* start with 0 as last starting address */
+	/* loop through change-points, determining affect on the new bios map */
+	for (chgidx=0; chgidx < chg_nr; chgidx++)
+	{
+		/* keep track of all overlapping bios entries */
+		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+		{
+			/* add map entry to overlap list (> 1 entry implies an overlap) */
+			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+		}
+		else
+		{
+			/* remove entry from list (order independent, so swap with last) */
+			for (i=0; i<overlap_entries; i++)
+			{
+				if (overlap_list[i] == change_point[chgidx]->pbios)
+					overlap_list[i] = overlap_list[overlap_entries-1];
+			}
+			overlap_entries--;
+		}
+		/* if there are overlapping entries, decide which "type" to use */
+		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+		current_type = 0;
+		for (i=0; i<overlap_entries; i++)
+			if (overlap_list[i]->type > current_type)
+				current_type = overlap_list[i]->type;
+		/* continue building up new bios map based on this information */
+		if (current_type != last_type)	{
+			if (last_type != 0)	 {
+				new_bios[new_bios_entry].size =
+					change_point[chgidx]->addr - last_addr;
+				/* move forward only if the new size was non-zero */
+				if (new_bios[new_bios_entry].size != 0)
+					if (++new_bios_entry >= E820MAX)
+						break; 	/* no more space left for new bios entries */
+			}
+			if (current_type != 0)	{
+				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+				new_bios[new_bios_entry].type = current_type;
+				last_addr=change_point[chgidx]->addr;
+			}
+			last_type = current_type;
+		}
+	}
+	new_nr = new_bios_entry;   /* retain count for new bios entries */
+
+	/* copy new bios mapping into original location */
+	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+	*pnr_map = new_nr;
+
+	printk("sanitize end\n");
+	return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory.  If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+	/* Only one memory region (or negative)? Ignore it */
+	if (nr_map < 2)
+		return -1;
+
+	do {
+		unsigned long long start = biosmap->addr;
+		unsigned long long size = biosmap->size;
+		unsigned long long end = start + size;
+		unsigned long type = biosmap->type;
+		printk("copy_e820_map() start: %016Lx size: %016Lx end: %016Lx type: %ld\n", start, size, end, type);
+
+		/* Overflow in 64 bits? Ignore the memory map. */
+		if (start > end)
+			return -1;
+
+		/*
+		 * Some BIOSes claim RAM in the 640k - 1M region.
+		 * Not right. Fix it up.
+		 */
+		if (type == E820_RAM) {
+			printk("copy_e820_map() type is E820_RAM\n");
+			if (start < 0x100000ULL && end > 0xA0000ULL) {
+				printk("copy_e820_map() lies in range...\n");
+				if (start < 0xA0000ULL) {
+					printk("copy_e820_map() start < 0xA0000ULL\n");
+					add_memory_region(start, 0xA0000ULL-start, type);
+				}
+				if (end <= 0x100000ULL) {
+					printk("copy_e820_map() end <= 0x100000ULL\n");
+					continue;
+				}
+				start = 0x100000ULL;
+				size = end - start;
+			}
+		}
+		add_memory_region(start, size, type);
+	} while (biosmap++,--nr_map);
+	return 0;
+}
+
+/*
+ * Callback for efi_memory_walk.
+ */
+static int __init
+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+{
+	unsigned long *max_pfn = arg, pfn;
+
+	if (start < end) {
+		pfn = PFN_UP(end -1);
+		if (pfn > *max_pfn)
+			*max_pfn = pfn;
+	}
+	return 0;
+}
+
+static int __init
+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
+{
+	memory_present(0, PFN_UP(start), PFN_DOWN(end));
+	return 0;
+}
+
+/*
+ * Find the highest page frame number we have available
+ */
+void __init find_max_pfn(void)
+{
+	int i;
+
+	max_pfn = 0;
+	if (efi_enabled) {
+		efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+		efi_memmap_walk(efi_memory_present_wrapper, NULL);
+		return;
+	}
+
+	for (i = 0; i < e820.nr_map; i++) {
+		unsigned long start, end;
+		/* RAM? */
+		if (e820.map[i].type != E820_RAM)
+			continue;
+		start = PFN_UP(e820.map[i].addr);
+		end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+		if (start >= end)
+			continue;
+		if (end > max_pfn)
+			max_pfn = end;
+		memory_present(0, start, end);
+	}
+}
+
+/*
+ * Free all available memory for boot time allocation.  Used
+ * as a callback function by efi_memory_walk()
+ */
+
+static int __init
+free_available_memory(unsigned long start, unsigned long end, void *arg)
+{
+	/* check max_low_pfn */
+	if (start >= (max_low_pfn << PAGE_SHIFT))
+		return 0;
+	if (end >= (max_low_pfn << PAGE_SHIFT))
+		end = max_low_pfn << PAGE_SHIFT;
+	if (start < end)
+		free_bootmem(start, end - start);
+
+	return 0;
+}
+/*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+{
+	int i;
+
+	if (efi_enabled) {
+		efi_memmap_walk(free_available_memory, NULL);
+		return;
+	}
+	for (i = 0; i < e820.nr_map; i++) {
+		unsigned long curr_pfn, last_pfn, size;
+		/*
+		 * Reserve usable low memory
+		 */
+		if (e820.map[i].type != E820_RAM)
+			continue;
+		/*
+		 * We are rounding up the start address of usable memory:
+		 */
+		curr_pfn = PFN_UP(e820.map[i].addr);
+		if (curr_pfn >= max_low_pfn)
+			continue;
+		/*
+		 * ... and at the end of the usable range downwards:
+		 */
+		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+		if (last_pfn > max_low_pfn)
+			last_pfn = max_low_pfn;
+
+		/*
+		 * .. finally, did all the rounding and playing
+		 * around just make the area go away?
+		 */
+		if (last_pfn <= curr_pfn)
+			continue;
+
+		size = last_pfn - curr_pfn;
+		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+	}
+}
+
+void __init register_memory(void)
+{
+	unsigned long gapstart, gapsize, round;
+	unsigned long long last;
+	int i;
+
+	/*
+	 * Search for the bigest gap in the low 32 bits of the e820
+	 * memory space.
+	 */
+	last = 0x100000000ull;
+	gapstart = 0x10000000;
+	gapsize = 0x400000;
+	i = e820.nr_map;
+	while (--i >= 0) {
+		unsigned long long start = e820.map[i].addr;
+		unsigned long long end = start + e820.map[i].size;
+
+		/*
+		 * Since "last" is at most 4GB, we know we'll
+		 * fit in 32 bits if this condition is true
+		 */
+		if (last > end) {
+			unsigned long gap = last - end;
+
+			if (gap > gapsize) {
+				gapsize = gap;
+				gapstart = end;
+			}
+		}
+		if (start < last)
+			last = start;
+	}
+
+	/*
+	 * See how much we want to round up: start off with
+	 * rounding to the next 1MB area.
+	 */
+	round = 0x100000;
+	while ((gapsize >> 4) > round)
+		round += round;
+	/* Fun with two's complement */
+	pci_mem_start = (gapstart + round) & -round;
+
+	printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
+		pci_mem_start, gapstart, gapsize);
+}
+
+void __init print_memory_map(char *who)
+{
+	int i;
+
+	for (i = 0; i < e820.nr_map; i++) {
+		printk(" %s: %016Lx - %016Lx ", who,
+			e820.map[i].addr,
+			e820.map[i].addr + e820.map[i].size);
+		switch (e820.map[i].type) {
+		case E820_RAM:	printk("(usable)\n");
+				break;
+		case E820_RESERVED:
+				printk("(reserved)\n");
+				break;
+		case E820_ACPI:
+				printk("(ACPI data)\n");
+				break;
+		case E820_NVS:
+				printk("(ACPI NVS)\n");
+				break;
+		default:	printk("type %lu\n", e820.map[i].type);
+				break;
+		}
+	}
+}
+
+static __init __always_inline void efi_limit_regions(unsigned long long size)
+{
+	unsigned long long current_addr = 0;
+	efi_memory_desc_t *md, *next_md;
+	void *p, *p1;
+	int i, j;
+
+	j = 0;
+	p1 = memmap.map;
+	for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
+		md = p;
+		next_md = p1;
+		current_addr = md->phys_addr +
+			PFN_PHYS(md->num_pages);
+		if (is_available_memory(md)) {
+			if (md->phys_addr >= size) continue;
+			memcpy(next_md, md, memmap.desc_size);
+			if (current_addr >= size) {
+				next_md->num_pages -=
+					PFN_UP(current_addr-size);
+			}
+			p1 += memmap.desc_size;
+			next_md = p1;
+			j++;
+		} else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
+			   EFI_MEMORY_RUNTIME) {
+			/* In order to make runtime services
+			 * available we have to include runtime
+			 * memory regions in memory map */
+			memcpy(next_md, md, memmap.desc_size);
+			p1 += memmap.desc_size;
+			next_md = p1;
+			j++;
+		}
+	}
+	memmap.nr_map = j;
+	memmap.map_end = memmap.map +
+		(memmap.nr_map * memmap.desc_size);
+}
+
+void __init limit_regions(unsigned long long size)
+{
+	unsigned long long current_addr;
+	int i;
+
+	print_memory_map("limit_regions start");
+	if (efi_enabled) {
+		efi_limit_regions(size);
+		return;
+	}
+	for (i = 0; i < e820.nr_map; i++) {
+		current_addr = e820.map[i].addr + e820.map[i].size;
+		if (current_addr < size)
+			continue;
+
+		if (e820.map[i].type != E820_RAM)
+			continue;
+
+		if (e820.map[i].addr >= size) {
+			/*
+			 * This region starts past the end of the
+			 * requested size, skip it completely.
+			 */
+			e820.nr_map = i;
+		} else {
+			e820.nr_map = i + 1;
+			e820.map[i].size -= current_addr - size;
+		}
+		print_memory_map("limit_regions endfor");
+		return;
+	}
+	print_memory_map("limit_regions endfunc");
+}
+
+ /*
+  * This function checks if the entire range <start,end> is mapped with type.
+  *
+  * Note: this function only works correct if the e820 table is sorted and
+  * not-overlapping, which is the case
+  */
+int __init
+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
+{
+	u64 start = s;
+	u64 end = e;
+	int i;
+	for (i = 0; i < e820.nr_map; i++) {
+		struct e820entry *ei = &e820.map[i];
+		if (type && ei->type != type)
+			continue;
+		/* is the region (part) in overlap with the current region ?*/
+		if (ei->addr >= end || ei->addr + ei->size <= start)
+			continue;
+		/* if the region is at the beginning of <start,end> we move
+		 * start to the end of the region since it's ok until there
+		 */
+		if (ei->addr <= start)
+			start = ei->addr + ei->size;
+		/* if start is now at or beyond end, we're done, full
+		 * coverage */
+		if (start >= end)
+			return 1; /* we're done */
+	}
+	return 0;
+}
+
+static int __init parse_memmap(char *arg)
+{
+	if (!arg)
+		return -EINVAL;
+
+	if (strcmp(arg, "exactmap") == 0) {
+#ifdef CONFIG_CRASH_DUMP
+		/* If we are doing a crash dump, we
+		 * still need to know the real mem
+		 * size before original memory map is
+		 * reset.
+		 */
+		find_max_pfn();
+		saved_max_pfn = max_pfn;
+#endif
+		e820.nr_map = 0;
+		user_defined_memmap = 1;
+	} else {
+		/* If the user specifies memory size, we
+		 * limit the BIOS-provided memory map to
+		 * that size. exactmap can be used to specify
+		 * the exact map. mem=number can be used to
+		 * trim the existing memory map.
+		 */
+		unsigned long long start_at, mem_size;
+
+		mem_size = memparse(arg, &arg);
+		if (*arg == '@') {
+			start_at = memparse(arg+1, &arg);
+			add_memory_region(start_at, mem_size, E820_RAM);
+		} else if (*arg == '#') {
+			start_at = memparse(arg+1, &arg);
+			add_memory_region(start_at, mem_size, E820_ACPI);
+		} else if (*arg == '$') {
+			start_at = memparse(arg+1, &arg);
+			add_memory_region(start_at, mem_size, E820_RESERVED);
+		} else {
+			limit_regions(mem_size);
+			user_defined_memmap = 1;
+		}
+	}
+	return 0;
+}
+early_param("memmap", parse_memmap);
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index 8b40648..b92c7f0 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -194,17 +194,24 @@
 	return 0;
 }
 /*
- * This should only be used during kernel init and before runtime
- * services have been remapped, therefore, we'll need to call in physical
- * mode.  Note, this call isn't used later, so mark it __init.
+ * This is used during kernel init before runtime
+ * services have been remapped and also during suspend, therefore,
+ * we'll need to call both in physical and virtual modes.
  */
-inline unsigned long __init efi_get_time(void)
+inline unsigned long efi_get_time(void)
 {
 	efi_status_t status;
 	efi_time_t eft;
 	efi_time_cap_t cap;
 
-	status = phys_efi_get_time(&eft, &cap);
+	if (efi.get_time) {
+		/* if we are in virtual mode use remapped function */
+ 		status = efi.get_time(&eft, &cap);
+	} else {
+		/* we are in physical mode */
+		status = phys_efi_get_time(&eft, &cap);
+	}
+
 	if (status != EFI_SUCCESS)
 		printk("Oops: efitime: can't read time status: 0x%lx\n",status);
 
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 5a63d6f..de34b7fe 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -30,12 +30,13 @@
  *	18(%esp) - %eax
  *	1C(%esp) - %ds
  *	20(%esp) - %es
- *	24(%esp) - orig_eax
- *	28(%esp) - %eip
- *	2C(%esp) - %cs
- *	30(%esp) - %eflags
- *	34(%esp) - %oldesp
- *	38(%esp) - %oldss
+ *	24(%esp) - %gs
+ *	28(%esp) - orig_eax
+ *	2C(%esp) - %eip
+ *	30(%esp) - %cs
+ *	34(%esp) - %eflags
+ *	38(%esp) - %oldesp
+ *	3C(%esp) - %oldss
  *
  * "current" is in register %ebx during any slow entries.
  */
@@ -48,26 +49,24 @@
 #include <asm/smp.h>
 #include <asm/page.h>
 #include <asm/desc.h>
+#include <asm/percpu.h>
 #include <asm/dwarf2.h>
 #include "irq_vectors.h"
 
-#define nr_syscalls ((syscall_table_size)/4)
+/*
+ * We use macros for low-level operations which need to be overridden
+ * for paravirtualization.  The following will never clobber any registers:
+ *   INTERRUPT_RETURN (aka. "iret")
+ *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
+ *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
+ *
+ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
+ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
+ * Allowing a register to be clobbered can shrink the paravirt replacement
+ * enough to patch inline, increasing performance.
+ */
 
-EBX		= 0x00
-ECX		= 0x04
-EDX		= 0x08
-ESI		= 0x0C
-EDI		= 0x10
-EBP		= 0x14
-EAX		= 0x18
-DS		= 0x1C
-ES		= 0x20
-ORIG_EAX	= 0x24
-EIP		= 0x28
-CS		= 0x2C
-EFLAGS		= 0x30
-OLDESP		= 0x34
-OLDSS		= 0x38
+#define nr_syscalls ((syscall_table_size)/4)
 
 CF_MASK		= 0x00000001
 TF_MASK		= 0x00000100
@@ -76,23 +75,16 @@
 NT_MASK		= 0x00004000
 VM_MASK		= 0x00020000
 
-/* These are replaces for paravirtualization */
-#define DISABLE_INTERRUPTS		cli
-#define ENABLE_INTERRUPTS		sti
-#define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
-#define INTERRUPT_RETURN		iret
-#define GET_CR0_INTO_EAX		movl %cr0, %eax
-
 #ifdef CONFIG_PREEMPT
-#define preempt_stop		DISABLE_INTERRUPTS; TRACE_IRQS_OFF
+#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
 #else
-#define preempt_stop
+#define preempt_stop(clobbers)
 #define resume_kernel		restore_nocheck
 #endif
 
 .macro TRACE_IRQS_IRET
 #ifdef CONFIG_TRACE_IRQFLAGS
-	testl $IF_MASK,EFLAGS(%esp)     # interrupts off?
+	testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?
 	jz 1f
 	TRACE_IRQS_ON
 1:
@@ -107,6 +99,9 @@
 
 #define SAVE_ALL \
 	cld; \
+	pushl %gs; \
+	CFI_ADJUST_CFA_OFFSET 4;\
+	/*CFI_REL_OFFSET gs, 0;*/\
 	pushl %es; \
 	CFI_ADJUST_CFA_OFFSET 4;\
 	/*CFI_REL_OFFSET es, 0;*/\
@@ -136,7 +131,9 @@
 	CFI_REL_OFFSET ebx, 0;\
 	movl $(__USER_DS), %edx; \
 	movl %edx, %ds; \
-	movl %edx, %es;
+	movl %edx, %es; \
+	movl $(__KERNEL_PDA), %edx; \
+	movl %edx, %gs
 
 #define RESTORE_INT_REGS \
 	popl %ebx;	\
@@ -169,17 +166,22 @@
 2:	popl %es;	\
 	CFI_ADJUST_CFA_OFFSET -4;\
 	/*CFI_RESTORE es;*/\
-.section .fixup,"ax";	\
-3:	movl $0,(%esp);	\
-	jmp 1b;		\
+3:	popl %gs;	\
+	CFI_ADJUST_CFA_OFFSET -4;\
+	/*CFI_RESTORE gs;*/\
+.pushsection .fixup,"ax";	\
 4:	movl $0,(%esp);	\
+	jmp 1b;		\
+5:	movl $0,(%esp);	\
 	jmp 2b;		\
-.previous;		\
+6:	movl $0,(%esp);	\
+	jmp 3b;		\
 .section __ex_table,"a";\
 	.align 4;	\
-	.long 1b,3b;	\
-	.long 2b,4b;	\
-.previous
+	.long 1b,4b;	\
+	.long 2b,5b;	\
+	.long 3b,6b;	\
+.popsection
 
 #define RING0_INT_FRAME \
 	CFI_STARTPROC simple;\
@@ -198,18 +200,18 @@
 #define RING0_PTREGS_FRAME \
 	CFI_STARTPROC simple;\
 	CFI_SIGNAL_FRAME;\
-	CFI_DEF_CFA esp, OLDESP-EBX;\
-	/*CFI_OFFSET cs, CS-OLDESP;*/\
-	CFI_OFFSET eip, EIP-OLDESP;\
-	/*CFI_OFFSET es, ES-OLDESP;*/\
-	/*CFI_OFFSET ds, DS-OLDESP;*/\
-	CFI_OFFSET eax, EAX-OLDESP;\
-	CFI_OFFSET ebp, EBP-OLDESP;\
-	CFI_OFFSET edi, EDI-OLDESP;\
-	CFI_OFFSET esi, ESI-OLDESP;\
-	CFI_OFFSET edx, EDX-OLDESP;\
-	CFI_OFFSET ecx, ECX-OLDESP;\
-	CFI_OFFSET ebx, EBX-OLDESP
+	CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
+	/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
+	CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
+	/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
+	/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
+	CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
+	CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
+	CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
+	CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
+	CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
+	CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
+	CFI_OFFSET ebx, PT_EBX-PT_OLDESP
 
 ENTRY(ret_from_fork)
 	CFI_STARTPROC
@@ -237,17 +239,18 @@
 	ALIGN
 	RING0_PTREGS_FRAME
 ret_from_exception:
-	preempt_stop
+	preempt_stop(CLBR_ANY)
 ret_from_intr:
 	GET_THREAD_INFO(%ebp)
 check_userspace:
-	movl EFLAGS(%esp), %eax		# mix EFLAGS and CS
-	movb CS(%esp), %al
+	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
+	movb PT_CS(%esp), %al
 	andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
 	cmpl $USER_RPL, %eax
 	jb resume_kernel		# not returning to v8086 or userspace
+
 ENTRY(resume_userspace)
- 	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
+ 	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
 					# setting need_resched or sigpending
 					# between sampling and the iret
 	movl TI_flags(%ebp), %ecx
@@ -258,14 +261,14 @@
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
-	DISABLE_INTERRUPTS
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
 	jnz restore_nocheck
 need_resched:
 	movl TI_flags(%ebp), %ecx	# need_resched set ?
 	testb $_TIF_NEED_RESCHED, %cl
 	jz restore_all
-	testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
+	testl $IF_MASK,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
 	jz restore_all
 	call preempt_schedule_irq
 	jmp need_resched
@@ -287,7 +290,7 @@
 	 * No need to follow this irqs on/off section: the syscall
 	 * disabled irqs and here we enable it straight after entry:
 	 */
-	ENABLE_INTERRUPTS
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushl $(__USER_DS)
 	CFI_ADJUST_CFA_OFFSET 4
 	/*CFI_REL_OFFSET ss, 0*/
@@ -331,20 +334,27 @@
 	cmpl $(nr_syscalls), %eax
 	jae syscall_badsys
 	call *sys_call_table(,%eax,4)
-	movl %eax,EAX(%esp)
-	DISABLE_INTERRUPTS
+	movl %eax,PT_EAX(%esp)
+	DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
 	TRACE_IRQS_OFF
 	movl TI_flags(%ebp), %ecx
 	testw $_TIF_ALLWORK_MASK, %cx
 	jne syscall_exit_work
 /* if something modifies registers it must also disable sysexit */
-	movl EIP(%esp), %edx
-	movl OLDESP(%esp), %ecx
+	movl PT_EIP(%esp), %edx
+	movl PT_OLDESP(%esp), %ecx
 	xorl %ebp,%ebp
 	TRACE_IRQS_ON
+1:	mov  PT_GS(%esp), %gs
 	ENABLE_INTERRUPTS_SYSEXIT
 	CFI_ENDPROC
-
+.pushsection .fixup,"ax"
+2:	movl $0,PT_GS(%esp)
+	jmp 1b
+.section __ex_table,"a"
+	.align 4
+	.long 1b,2b
+.popsection
 
 	# system call handler stub
 ENTRY(system_call)
@@ -353,7 +363,7 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	SAVE_ALL
 	GET_THREAD_INFO(%ebp)
-	testl $TF_MASK,EFLAGS(%esp)
+	testl $TF_MASK,PT_EFLAGS(%esp)
 	jz no_singlestep
 	orl $_TIF_SINGLESTEP,TI_flags(%ebp)
 no_singlestep:
@@ -365,9 +375,9 @@
 	jae syscall_badsys
 syscall_call:
 	call *sys_call_table(,%eax,4)
-	movl %eax,EAX(%esp)		# store the return value
+	movl %eax,PT_EAX(%esp)		# store the return value
 syscall_exit:
-	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
+	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
 					# setting need_resched or sigpending
 					# between sampling and the iret
 	TRACE_IRQS_OFF
@@ -376,12 +386,12 @@
 	jne syscall_exit_work
 
 restore_all:
-	movl EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
-	# Warning: OLDSS(%esp) contains the wrong/random values if we
+	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS, SS and CS
+	# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
 	# are returning to the kernel.
 	# See comments in process.c:copy_thread() for details.
-	movb OLDSS(%esp), %ah
-	movb CS(%esp), %al
+	movb PT_OLDSS(%esp), %ah
+	movb PT_CS(%esp), %al
 	andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
 	CFI_REMEMBER_STATE
@@ -390,13 +400,13 @@
 	TRACE_IRQS_IRET
 restore_nocheck_notrace:
 	RESTORE_REGS
-	addl $4, %esp
+	addl $4, %esp			# skip orig_eax/error_code
 	CFI_ADJUST_CFA_OFFSET -4
 1:	INTERRUPT_RETURN
 .section .fixup,"ax"
 iret_exc:
 	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS
+	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushl $0			# no error code
 	pushl $do_iret_error
 	jmp error_code
@@ -408,33 +418,42 @@
 
 	CFI_RESTORE_STATE
 ldt_ss:
-	larl OLDSS(%esp), %eax
+	larl PT_OLDSS(%esp), %eax
 	jnz restore_nocheck
 	testl $0x00400000, %eax		# returning to 32bit stack?
 	jnz restore_nocheck		# allright, normal return
+
+#ifdef CONFIG_PARAVIRT
+	/*
+	 * The kernel can't run on a non-flat stack if paravirt mode
+	 * is active.  Rather than try to fixup the high bits of
+	 * ESP, bypass this code entirely.  This may break DOSemu
+	 * and/or Wine support in a paravirt VM, although the option
+	 * is still available to implement the setting of the high
+	 * 16-bits in the INTERRUPT_RETURN paravirt-op.
+	 */
+	cmpl $0, paravirt_ops+PARAVIRT_enabled
+	jne restore_nocheck
+#endif
+
 	/* If returning to userspace with 16bit stack,
 	 * try to fix the higher word of ESP, as the CPU
 	 * won't restore it.
 	 * This is an "official" bug of all the x86-compatible
 	 * CPUs, which we can try to work around to make
 	 * dosemu and wine happy. */
-	subl $8, %esp		# reserve space for switch16 pointer
-	CFI_ADJUST_CFA_OFFSET 8
-	DISABLE_INTERRUPTS
+	movl PT_OLDESP(%esp), %eax
+	movl %esp, %edx
+	call patch_espfix_desc
+	pushl $__ESPFIX_SS
+	CFI_ADJUST_CFA_OFFSET 4
+	pushl %eax
+	CFI_ADJUST_CFA_OFFSET 4
+	DISABLE_INTERRUPTS(CLBR_EAX)
 	TRACE_IRQS_OFF
-	movl %esp, %eax
-	/* Set up the 16bit stack frame with switch32 pointer on top,
-	 * and a switch16 pointer on top of the current frame. */
-	call setup_x86_bogus_stack
-	CFI_ADJUST_CFA_OFFSET -8	# frame has moved
-	TRACE_IRQS_IRET
-	RESTORE_REGS
-	lss 20+4(%esp), %esp	# switch to 16bit stack
-1:	INTERRUPT_RETURN
-.section __ex_table,"a"
-	.align 4
-	.long 1b,iret_exc
-.previous
+	lss (%esp), %esp
+	CFI_ADJUST_CFA_OFFSET -8
+	jmp restore_nocheck
 	CFI_ENDPROC
 
 	# perform work that needs to be done immediately before resumption
@@ -445,7 +464,7 @@
 	jz work_notifysig
 work_resched:
 	call schedule
-	DISABLE_INTERRUPTS		# make sure we don't miss an interrupt
+	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
 					# setting need_resched or sigpending
 					# between sampling and the iret
 	TRACE_IRQS_OFF
@@ -458,7 +477,8 @@
 
 work_notifysig:				# deal with pending signals and
 					# notify-resume requests
-	testl $VM_MASK, EFLAGS(%esp)
+#ifdef CONFIG_VM86
+	testl $VM_MASK, PT_EFLAGS(%esp)
 	movl %esp, %eax
 	jne work_notifysig_v86		# returning to kernel-space or
 					# vm86-space
@@ -468,29 +488,30 @@
 
 	ALIGN
 work_notifysig_v86:
-#ifdef CONFIG_VM86
 	pushl %ecx			# save ti_flags for do_notify_resume
 	CFI_ADJUST_CFA_OFFSET 4
 	call save_v86_state		# %eax contains pt_regs pointer
 	popl %ecx
 	CFI_ADJUST_CFA_OFFSET -4
 	movl %eax, %esp
+#else
+	movl %esp, %eax
+#endif
 	xorl %edx, %edx
 	call do_notify_resume
 	jmp resume_userspace_sig
-#endif
 
 	# perform syscall exit tracing
 	ALIGN
 syscall_trace_entry:
-	movl $-ENOSYS,EAX(%esp)
+	movl $-ENOSYS,PT_EAX(%esp)
 	movl %esp, %eax
 	xorl %edx,%edx
 	call do_syscall_trace
 	cmpl $0, %eax
 	jne resume_userspace		# ret != 0 -> running under PTRACE_SYSEMU,
 					# so must skip actual syscall
-	movl ORIG_EAX(%esp), %eax
+	movl PT_ORIG_EAX(%esp), %eax
 	cmpl $(nr_syscalls), %eax
 	jnae syscall_call
 	jmp syscall_exit
@@ -501,7 +522,7 @@
 	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
 	jz work_pending
 	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS		# could let do_syscall_trace() call
+	ENABLE_INTERRUPTS(CLBR_ANY)	# could let do_syscall_trace() call
 					# schedule() instead
 	movl %esp, %eax
 	movl $1, %edx
@@ -515,39 +536,38 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	SAVE_ALL
 	GET_THREAD_INFO(%ebp)
-	movl $-EFAULT,EAX(%esp)
+	movl $-EFAULT,PT_EAX(%esp)
 	jmp resume_userspace
 
 syscall_badsys:
-	movl $-ENOSYS,EAX(%esp)
+	movl $-ENOSYS,PT_EAX(%esp)
 	jmp resume_userspace
 	CFI_ENDPROC
 
 #define FIXUP_ESPFIX_STACK \
-	movl %esp, %eax; \
-	/* switch to 32bit stack using the pointer on top of 16bit stack */ \
-	lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
-	/* copy data from 16bit stack to 32bit stack */ \
-	call fixup_x86_bogus_stack; \
-	/* put ESP to the proper location */ \
-	movl %eax, %esp;
-#define UNWIND_ESPFIX_STACK \
+	/* since we are on a wrong stack, we cant make it a C code :( */ \
+	movl %gs:PDA_cpu, %ebx; \
+	PER_CPU(cpu_gdt_descr, %ebx); \
+	movl GDS_address(%ebx), %ebx; \
+	GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
+	addl %esp, %eax; \
+	pushl $__KERNEL_DS; \
+	CFI_ADJUST_CFA_OFFSET 4; \
 	pushl %eax; \
 	CFI_ADJUST_CFA_OFFSET 4; \
+	lss (%esp), %esp; \
+	CFI_ADJUST_CFA_OFFSET -8;
+#define UNWIND_ESPFIX_STACK \
 	movl %ss, %eax; \
-	/* see if on 16bit stack */ \
+	/* see if on espfix stack */ \
 	cmpw $__ESPFIX_SS, %ax; \
-	je 28f; \
-27:	popl %eax; \
-	CFI_ADJUST_CFA_OFFSET -4; \
-.section .fixup,"ax"; \
-28:	movl $__KERNEL_DS, %eax; \
+	jne 27f; \
+	movl $__KERNEL_DS, %eax; \
 	movl %eax, %ds; \
 	movl %eax, %es; \
-	/* switch to 32bit stack */ \
+	/* switch to normal stack */ \
 	FIXUP_ESPFIX_STACK; \
-	jmp 27b; \
-.previous
+27:;
 
 /*
  * Build the entry stubs and pointer table with
@@ -608,13 +628,16 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	ALIGN
 error_code:
+	/* the function address is in %gs's slot on the stack */
+	pushl %es
+	CFI_ADJUST_CFA_OFFSET 4
+	/*CFI_REL_OFFSET es, 0*/
 	pushl %ds
 	CFI_ADJUST_CFA_OFFSET 4
 	/*CFI_REL_OFFSET ds, 0*/
 	pushl %eax
 	CFI_ADJUST_CFA_OFFSET 4
 	CFI_REL_OFFSET eax, 0
-	xorl %eax, %eax
 	pushl %ebp
 	CFI_ADJUST_CFA_OFFSET 4
 	CFI_REL_OFFSET ebp, 0
@@ -627,7 +650,6 @@
 	pushl %edx
 	CFI_ADJUST_CFA_OFFSET 4
 	CFI_REL_OFFSET edx, 0
-	decl %eax			# eax = -1
 	pushl %ecx
 	CFI_ADJUST_CFA_OFFSET 4
 	CFI_REL_OFFSET ecx, 0
@@ -635,18 +657,20 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	CFI_REL_OFFSET ebx, 0
 	cld
-	pushl %es
+	pushl %gs
 	CFI_ADJUST_CFA_OFFSET 4
-	/*CFI_REL_OFFSET es, 0*/
+	/*CFI_REL_OFFSET gs, 0*/
+	movl $(__KERNEL_PDA), %ecx
+	movl %ecx, %gs
 	UNWIND_ESPFIX_STACK
 	popl %ecx
 	CFI_ADJUST_CFA_OFFSET -4
 	/*CFI_REGISTER es, ecx*/
-	movl ES(%esp), %edi		# get the function address
-	movl ORIG_EAX(%esp), %edx	# get the error code
-	movl %eax, ORIG_EAX(%esp)
-	movl %ecx, ES(%esp)
-	/*CFI_REL_OFFSET es, ES*/
+	movl PT_GS(%esp), %edi		# get the function address
+	movl PT_ORIG_EAX(%esp), %edx	# get the error code
+	movl $-1, PT_ORIG_EAX(%esp)	# no syscall to restart
+	mov  %ecx, PT_GS(%esp)
+	/*CFI_REL_OFFSET gs, ES*/
 	movl $(__USER_DS), %ecx
 	movl %ecx, %ds
 	movl %ecx, %es
@@ -682,7 +706,7 @@
 	GET_CR0_INTO_EAX
 	testl $0x4, %eax		# EM (math emulation bit)
 	jne device_not_available_emulate
-	preempt_stop
+	preempt_stop(CLBR_ANY)
 	call math_state_restore
 	jmp ret_from_exception
 device_not_available_emulate:
@@ -754,7 +778,7 @@
 	cmpw $__ESPFIX_SS, %ax
 	popl %eax
 	CFI_ADJUST_CFA_OFFSET -4
-	je nmi_16bit_stack
+	je nmi_espfix_stack
 	cmpl $sysenter_entry,(%esp)
 	je nmi_stack_fixup
 	pushl %eax
@@ -797,7 +821,7 @@
 	FIX_STACK(24,nmi_stack_correct, 1)
 	jmp nmi_stack_correct
 
-nmi_16bit_stack:
+nmi_espfix_stack:
 	/* We have a RING0_INT_FRAME here.
 	 *
 	 * create the pointer to lss back
@@ -806,7 +830,6 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	pushl %esp
 	CFI_ADJUST_CFA_OFFSET 4
-	movzwl %sp, %esp
 	addw $4, (%esp)
 	/* copy the iret frame of 12 bytes */
 	.rept 3
@@ -817,11 +840,11 @@
 	CFI_ADJUST_CFA_OFFSET 4
 	SAVE_ALL
 	FIXUP_ESPFIX_STACK		# %eax == %esp
-	CFI_ADJUST_CFA_OFFSET -20	# the frame has now moved
 	xorl %edx,%edx			# zero error code
 	call do_nmi
 	RESTORE_REGS
-	lss 12+4(%esp), %esp		# back to 16bit stack
+	lss 12+4(%esp), %esp		# back to espfix stack
+	CFI_ADJUST_CFA_OFFSET -24
 1:	INTERRUPT_RETURN
 	CFI_ENDPROC
 .section __ex_table,"a"
@@ -830,6 +853,19 @@
 .previous
 KPROBE_END(nmi)
 
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_iret)
+1:	iret
+.section __ex_table,"a"
+	.align 4
+	.long 1b,iret_exc
+.previous
+
+ENTRY(native_irq_enable_sysexit)
+	sti
+	sysexit
+#endif
+
 KPROBE_ENTRY(int3)
 	RING0_INT_FRAME
 	pushl $-1			# mark this as an int
@@ -949,26 +985,27 @@
 	movl	4(%esp), %edx
 	movl	(%esp), %ecx
 	leal	4(%esp), %eax
-	movl	%ebx, EBX(%edx)
+	movl	%ebx, PT_EBX(%edx)
 	xorl	%ebx, %ebx
-	movl	%ebx, ECX(%edx)
-	movl	%ebx, EDX(%edx)
-	movl	%esi, ESI(%edx)
-	movl	%edi, EDI(%edx)
-	movl	%ebp, EBP(%edx)
-	movl	%ebx, EAX(%edx)
-	movl	$__USER_DS, DS(%edx)
-	movl	$__USER_DS, ES(%edx)
-	movl	%ebx, ORIG_EAX(%edx)
-	movl	%ecx, EIP(%edx)
+	movl	%ebx, PT_ECX(%edx)
+	movl	%ebx, PT_EDX(%edx)
+	movl	%esi, PT_ESI(%edx)
+	movl	%edi, PT_EDI(%edx)
+	movl	%ebp, PT_EBP(%edx)
+	movl	%ebx, PT_EAX(%edx)
+	movl	$__USER_DS, PT_DS(%edx)
+	movl	$__USER_DS, PT_ES(%edx)
+	movl	$0, PT_GS(%edx)
+	movl	%ebx, PT_ORIG_EAX(%edx)
+	movl	%ecx, PT_EIP(%edx)
 	movl	12(%esp), %ecx
-	movl	$__KERNEL_CS, CS(%edx)
-	movl	%ebx, EFLAGS(%edx)
-	movl	%eax, OLDESP(%edx)
+	movl	$__KERNEL_CS, PT_CS(%edx)
+	movl	%ebx, PT_EFLAGS(%edx)
+	movl	%eax, PT_OLDESP(%edx)
 	movl	8(%esp), %eax
 	movl	%ecx, 8(%esp)
-	movl	EBX(%edx), %ebx
-	movl	$__KERNEL_DS, OLDSS(%edx)
+	movl	PT_EBX(%edx), %ebx
+	movl	$__KERNEL_DS, PT_OLDSS(%edx)
 	jmpl	*%eax
 	CFI_ENDPROC
 ENDPROC(arch_unwind_init_running)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index ca31f18..edef508 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -55,6 +55,12 @@
  */
 ENTRY(startup_32)
 
+#ifdef CONFIG_PARAVIRT
+        movl %cs, %eax
+        testl $0x3, %eax
+        jnz startup_paravirt
+#endif
+
 /*
  * Set segments to known values.
  */
@@ -302,6 +308,7 @@
 	movl %eax,%cr0
 
 	call check_x87
+	call setup_pda
 	lgdt cpu_gdt_descr
 	lidt idt_descr
 	ljmp $(__KERNEL_CS),$1f
@@ -312,10 +319,13 @@
 	movl %eax,%ds
 	movl %eax,%es
 
-	xorl %eax,%eax			# Clear FS/GS and LDT
+	xorl %eax,%eax			# Clear FS and LDT
 	movl %eax,%fs
-	movl %eax,%gs
 	lldt %ax
+
+	movl $(__KERNEL_PDA),%eax
+	mov  %eax,%gs
+
 	cld			# gcc2 wants the direction flag cleared at all times
 	pushl $0		# fake return address for unwinder
 #ifdef CONFIG_SMP
@@ -346,6 +356,23 @@
 	ret
 
 /*
+ * Point the GDT at this CPU's PDA.  On boot this will be
+ * cpu_gdt_table and boot_pda; for secondary CPUs, these will be
+ * that CPU's GDT and PDA.
+ */
+setup_pda:
+	/* get the PDA pointer */
+	movl start_pda, %eax
+
+	/* slot the PDA address into the GDT */
+	mov cpu_gdt_descr+2, %ecx
+	mov %ax, (__KERNEL_PDA+0+2)(%ecx)		/* base & 0x0000ffff */
+	shr $16, %eax
+	mov %al, (__KERNEL_PDA+4+0)(%ecx)		/* base & 0x00ff0000 */
+	mov %ah, (__KERNEL_PDA+4+3)(%ecx)		/* base & 0xff000000 */
+	ret
+
+/*
  *  setup_idt
  *
  *  sets up a idt with 256 entries pointing to
@@ -465,6 +492,33 @@
 #endif
 	iret
 
+#ifdef CONFIG_PARAVIRT
+startup_paravirt:
+	cld
+ 	movl $(init_thread_union+THREAD_SIZE),%esp
+
+	/* We take pains to preserve all the regs. */
+	pushl	%edx
+	pushl	%ecx
+	pushl	%eax
+
+	/* paravirt.o is last in link, and that probe fn never returns */
+	pushl	$__start_paravirtprobe
+1:
+	movl	0(%esp), %eax
+	pushl	(%eax)
+	movl	8(%esp), %eax
+	call	*(%esp)
+	popl	%eax
+
+	movl	4(%esp), %eax
+	movl	8(%esp), %ecx
+	movl	12(%esp), %edx
+
+	addl	$4, (%esp)
+	jmp	1b
+#endif
+
 /*
  * Real beginning of normal "text" segment
  */
@@ -484,6 +538,8 @@
  * This starts the data section.
  */
 .data
+ENTRY(start_pda)
+	.long boot_pda
 
 ENTRY(stack_start)
 	.long init_thread_union+THREAD_SIZE
@@ -525,7 +581,7 @@
 
 # boot GDT descriptor (later on used by CPU#0):
 	.word 0				# 32 bit align gdt_desc.address
-cpu_gdt_descr:
+ENTRY(cpu_gdt_descr)
 	.word GDT_ENTRIES*8-1
 	.long cpu_gdt_table
 
@@ -584,8 +640,8 @@
 	.quad 0x00009a000000ffff	/* 0xc0 APM CS 16 code (16 bit) */
 	.quad 0x004092000000ffff	/* 0xc8 APM DS    data */
 
-	.quad 0x0000920000000000	/* 0xd0 - ESPFIX 16-bit SS */
-	.quad 0x0000000000000000	/* 0xd8 - unused */
+	.quad 0x00c0920000000000	/* 0xd0 - ESPFIX SS */
+	.quad 0x00cf92000000ffff	/* 0xd8 - PDA */
 	.quad 0x0000000000000000	/* 0xe0 - unused */
 	.quad 0x0000000000000000	/* 0xe8 - unused */
 	.quad 0x0000000000000000	/* 0xf0 - unused */
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index 17647a5..45a8685 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -34,6 +34,7 @@
 	unsigned long hpet_period;
 	void __iomem* hpet_base;
 	u64 tmp;
+	int err;
 
 	if (!is_hpet_enabled())
 		return -ENODEV;
@@ -61,7 +62,11 @@
 	do_div(tmp, FSEC_PER_NSEC);
 	clocksource_hpet.mult = (u32)tmp;
 
-	return clocksource_register(&clocksource_hpet);
+	err = clocksource_register(&clocksource_hpet);
+	if (err)
+		iounmap(hpet_base);
+
+	return err;
 }
 
 module_init(init_hpet_clocksource);
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 62996cd..c8d4582 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -381,7 +381,10 @@
 	}
 }
 
-void __init init_IRQ(void)
+/* Overridden in paravirt.c */
+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+void __init native_init_IRQ(void)
 {
 	int i;
 
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 3b7a63e..e21dcde 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -34,6 +34,7 @@
 #include <linux/pci.h>
 #include <linux/msi.h>
 #include <linux/htirq.h>
+#include <linux/freezer.h>
 
 #include <asm/io.h>
 #include <asm/smp.h>
@@ -153,14 +154,20 @@
  * the interrupt, and we need to make sure the entry is fully populated
  * before that happens.
  */
+static void
+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+	union entry_union eu;
+	eu.entry = e;
+	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+}
+
 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
 	unsigned long flags;
-	union entry_union eu;
-	eu.entry = e;
 	spin_lock_irqsave(&ioapic_lock, flags);
-	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+	__ioapic_write_entry(apic, pin, e);
 	spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -836,8 +843,7 @@
 
 		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
 		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_NEC98
+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA
 		    ) &&
 		    (mp_irqs[i].mpc_irqtype == type) &&
 		    (mp_irqs[i].mpc_srcbusirq == irq))
@@ -856,8 +862,7 @@
 
 		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
 		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-		     mp_bus_id_to_type[lbus] == MP_BUS_NEC98
+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA
 		    ) &&
 		    (mp_irqs[i].mpc_irqtype == type) &&
 		    (mp_irqs[i].mpc_srcbusirq == irq))
@@ -987,12 +992,6 @@
 #define default_MCA_trigger(idx)	(1)
 #define default_MCA_polarity(idx)	(0)
 
-/* NEC98 interrupts are always polarity zero edge triggered,
- * when listed as conforming in the MP table. */
-
-#define default_NEC98_trigger(idx)     (0)
-#define default_NEC98_polarity(idx)    (0)
-
 static int __init MPBIOS_polarity(int idx)
 {
 	int bus = mp_irqs[idx].mpc_srcbus;
@@ -1027,11 +1026,6 @@
 					polarity = default_MCA_polarity(idx);
 					break;
 				}
-				case MP_BUS_NEC98: /* NEC 98 pin */
-				{
-					polarity = default_NEC98_polarity(idx);
-					break;
-				}
 				default:
 				{
 					printk(KERN_WARNING "broken BIOS!!\n");
@@ -1101,11 +1095,6 @@
 					trigger = default_MCA_trigger(idx);
 					break;
 				}
-				case MP_BUS_NEC98: /* NEC 98 pin */
-				{
-					trigger = default_NEC98_trigger(idx);
-					break;
-				}
 				default:
 				{
 					printk(KERN_WARNING "broken BIOS!!\n");
@@ -1167,7 +1156,6 @@
 		case MP_BUS_ISA: /* ISA pin */
 		case MP_BUS_EISA:
 		case MP_BUS_MCA:
-		case MP_BUS_NEC98:
 		{
 			irq = mp_irqs[idx].mpc_srcbusirq;
 			break;
@@ -1235,7 +1223,7 @@
 }
 
 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
+static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
 
 static int __assign_irq_vector(int irq)
 {
@@ -1360,8 +1348,8 @@
 			if (!apic && (irq < 16))
 				disable_8259A_irq(irq);
 		}
-		ioapic_write_entry(apic, pin, entry);
 		spin_lock_irqsave(&ioapic_lock, flags);
+		__ioapic_write_entry(apic, pin, entry);
 		set_native_irq_info(irq, TARGET_CPUS);
 		spin_unlock_irqrestore(&ioapic_lock, flags);
 	}
@@ -1926,6 +1914,15 @@
 static void __init setup_ioapic_ids_from_mpc(void) { }
 #endif
 
+static int no_timer_check __initdata;
+
+static int __init notimercheck(char *s)
+{
+	no_timer_check = 1;
+	return 1;
+}
+__setup("no_timer_check", notimercheck);
+
 /*
  * There is a nasty bug in some older SMP boards, their mptable lies
  * about the timer IRQ. We do the following to work around the situation:
@@ -1934,10 +1931,13 @@
  *	- if this function detects that timer IRQs are defunct, then we fall
  *	  back to ISA timer IRQs
  */
-static int __init timer_irq_works(void)
+int __init timer_irq_works(void)
 {
 	unsigned long t1 = jiffies;
 
+	if (no_timer_check)
+		return 1;
+
 	local_irq_enable();
 	/* Let ten ticks pass... */
 	mdelay((10 * 1000) / HZ);
@@ -2161,9 +2161,15 @@
 	unsigned char save_control, save_freq_select;
 
 	pin  = find_isa_irq_pin(8, mp_INT);
-	apic = find_isa_irq_apic(8, mp_INT);
-	if (pin == -1)
+	if (pin == -1) {
+		WARN_ON_ONCE(1);
 		return;
+	}
+	apic = find_isa_irq_apic(8, mp_INT);
+	if (apic == -1) {
+		WARN_ON_ONCE(1);
+		return;
+	}
 
 	entry0 = ioapic_read_entry(apic, pin);
 	clear_IO_APIC_pin(apic, pin);
@@ -2208,7 +2214,7 @@
  * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
  * fanatically on his truly buggy board.
  */
-static inline void check_timer(void)
+static inline void __init check_timer(void)
 {
 	int apic1, pin1, apic2, pin2;
 	int vector;
@@ -2856,8 +2862,8 @@
 	if (!ioapic && (irq < 16))
 		disable_8259A_irq(irq);
 
-	ioapic_write_entry(ioapic, pin, entry);
 	spin_lock_irqsave(&ioapic_lock, flags);
+	__ioapic_write_entry(ioapic, pin, entry);
 	set_native_irq_info(irq, TARGET_CPUS);
 	spin_unlock_irqrestore(&ioapic_lock, flags);
 
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index fc79e1e..af1d533 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -184,7 +184,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
 	mutex_unlock(&kprobe_mutex);
 }
 
@@ -333,7 +333,7 @@
 		return 1;
 
 ss_probe:
-#ifndef CONFIG_PREEMPT
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
 	if (p->ainsn.boostable == 1 && !p->post_handler){
 		/* Boost up -- we can execute copied instructions directly */
 		reset_current_kprobe();
diff --git a/arch/i386/kernel/ldt.c b/arch/i386/kernel/ldt.c
index 445211e..b410e5f 100644
--- a/arch/i386/kernel/ldt.c
+++ b/arch/i386/kernel/ldt.c
@@ -160,16 +160,14 @@
 {
 	int err;
 	unsigned long size;
-	void *address;
 
 	err = 0;
-	address = &default_ldt[0];
 	size = 5*sizeof(struct desc_struct);
 	if (size > bytecount)
 		size = bytecount;
 
 	err = size;
-	if (copy_to_user(ptr, address, size))
+	if (clear_user(ptr, size))
 		err = -EFAULT;
 
 	return err;
diff --git a/arch/i386/kernel/mca.c b/arch/i386/kernel/mca.c
index eb57a85..b83672b 100644
--- a/arch/i386/kernel/mca.c
+++ b/arch/i386/kernel/mca.c
@@ -283,10 +283,9 @@
 	bus->f.mca_transform_memory = mca_dummy_transform_memory;
 
 	/* get the motherboard device */
-	mca_dev = kmalloc(sizeof(struct mca_device), GFP_KERNEL);
+	mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL);
 	if(unlikely(!mca_dev))
 		goto out_nomem;
-	memset(mca_dev, 0, sizeof(struct mca_device));
 
 	/*
 	 * We do not expect many MCA interrupts during initialization,
@@ -310,11 +309,9 @@
 	mca_dev->slot = MCA_MOTHERBOARD;
 	mca_register_device(MCA_PRIMARY_BUS, mca_dev);
 
-	mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+	mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
 	if(unlikely(!mca_dev))
 		goto out_unlock_nomem;
-	memset(mca_dev, 0, sizeof(struct mca_device));
-
 
 	/* Put motherboard into video setup mode, read integrated video
 	 * POS registers, and turn motherboard setup off.
@@ -349,10 +346,9 @@
 	}
 	if(which_scsi) {
 		/* found a scsi card */
-		mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+		mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
 		if(unlikely(!mca_dev))
 			goto out_unlock_nomem;
-		memset(mca_dev, 0, sizeof(struct mca_device));
 
 		for(j = 0; j < 8; j++)
 			mca_dev->pos[j] = pos[j];
@@ -378,10 +374,9 @@
 		if(!mca_read_and_store_pos(pos))
 			continue;
 
-		mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+		mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
 		if(unlikely(!mca_dev))
 			goto out_unlock_nomem;
-		memset(mca_dev, 0, sizeof(struct mca_device));
 
 		for(j=0; j<8; j++)
 			mca_dev->pos[j]=pos[j];
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 23f5984..9723466 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -703,7 +703,6 @@
 	.resume = mc_sysdev_resume,
 };
 
-#ifdef CONFIG_HOTPLUG_CPU
 static __cpuinit int
 mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
@@ -726,7 +725,6 @@
 static struct notifier_block mc_cpu_notifier = {
 	.notifier_call = mc_cpu_callback,
 };
-#endif
 
 static int __init microcode_init (void)
 {
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c
index 470cf97..d7d9c8b 100644
--- a/arch/i386/kernel/module.c
+++ b/arch/i386/kernel/module.c
@@ -108,7 +108,8 @@
 		    const Elf_Shdr *sechdrs,
 		    struct module *me)
 {
-	const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
+	const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
+		*para = NULL;
 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
 	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 
@@ -118,6 +119,8 @@
 			alt = s;
 		if (!strcmp(".smp_locks", secstrings + s->sh_name))
 			locks= s;
+		if (!strcmp(".parainstructions", secstrings + s->sh_name))
+			para = s;
 	}
 
 	if (alt) {
@@ -132,6 +135,12 @@
 					    lseg, lseg + locks->sh_size,
 					    tseg, tseg + text->sh_size);
 	}
+
+	if (para) {
+		void *pseg = (void *)para->sh_addr;
+		apply_paravirt(pseg, pseg + para->sh_size);
+	}
+
 	return 0;
 }
 
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 442aaf8..2ce6722 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -249,8 +249,6 @@
 		mp_current_pci_id++;
 	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
 		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-	} else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
 	} else {
 		printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
 	}
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index d535cdb..1d1a56c 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -195,7 +195,6 @@
 {
 	const u32 __user *tmp = (const u32 __user *)buf;
 	u32 data[2];
-	size_t rv;
 	u32 reg = *ppos;
 	int cpu = iminor(file->f_dentry->d_inode);
 	int err;
@@ -203,7 +202,7 @@
 	if (count % 8)
 		return -EINVAL;	/* Invalid chunk size */
 
-	for (rv = 0; count; count -= 8) {
+	for (; count; count -= 8) {
 		if (copy_from_user(&data, tmp, 8))
 			return -EFAULT;
 		err = do_wrmsr(cpu, reg, data[0], data[1]);
@@ -239,18 +238,17 @@
 	.open = msr_open,
 };
 
-static int msr_class_device_create(int i)
+static int msr_device_create(int i)
 {
 	int err = 0;
-	struct class_device *class_err;
+	struct device *dev;
 
-	class_err = class_device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i);
-	if (IS_ERR(class_err)) 
-		err = PTR_ERR(class_err);
+	dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), "msr%d",i);
+	if (IS_ERR(dev))
+		err = PTR_ERR(dev);
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int msr_class_cpu_callback(struct notifier_block *nfb,
 				unsigned long action, void *hcpu)
 {
@@ -258,10 +256,10 @@
 
 	switch (action) {
 	case CPU_ONLINE:
-		msr_class_device_create(cpu);
+		msr_device_create(cpu);
 		break;
 	case CPU_DEAD:
-		class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+		device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
 		break;
 	}
 	return NOTIFY_OK;
@@ -271,7 +269,6 @@
 {
 	.notifier_call = msr_class_cpu_callback,
 };
-#endif
 
 static int __init msr_init(void)
 {
@@ -290,7 +287,7 @@
 		goto out_chrdev;
 	}
 	for_each_online_cpu(i) {
-		err = msr_class_device_create(i);
+		err = msr_device_create(i);
 		if (err != 0)
 			goto out_class;
 	}
@@ -302,7 +299,7 @@
 out_class:
 	i = 0;
 	for_each_online_cpu(i)
-		class_device_destroy(msr_class, MKDEV(MSR_MAJOR, i));
+		device_destroy(msr_class, MKDEV(MSR_MAJOR, i));
 	class_destroy(msr_class);
 out_chrdev:
 	unregister_chrdev(MSR_MAJOR, "cpu/msr");
@@ -314,7 +311,7 @@
 {
 	int cpu = 0;
 	for_each_online_cpu(cpu)
-		class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+		device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
 	class_destroy(msr_class);
 	unregister_chrdev(MSR_MAJOR, "cpu/msr");
 	unregister_hotcpu_notifier(&msr_class_cpu_notifier);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index eaafe23..f5bc7e1 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -22,6 +22,7 @@
 #include <linux/percpu.h>
 #include <linux/dmi.h>
 #include <linux/kprobes.h>
+#include <linux/cpumask.h>
 
 #include <asm/smp.h>
 #include <asm/nmi.h>
@@ -42,6 +43,8 @@
 static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
 static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
 
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
  */
@@ -867,14 +870,16 @@
 
 void touch_nmi_watchdog (void)
 {
-	int i;
+	if (nmi_watchdog > 0) {
+		unsigned cpu;
 
-	/*
-	 * Just reset the alert counters, (other CPUs might be
-	 * spinning on locks we hold):
-	 */
-	for_each_possible_cpu(i)
-		alert_counter[i] = 0;
+		/*
+		 * Just reset the alert counters, (other CPUs might be
+		 * spinning on locks we hold):
+		 */
+		for_each_present_cpu (cpu)
+			alert_counter[cpu] = 0;
+	}
 
 	/*
 	 * Tickle the softlockup detector too:
@@ -907,6 +912,16 @@
 		touched = 1;
 	}
 
+	if (cpu_isset(cpu, backtrace_mask)) {
+		static DEFINE_SPINLOCK(lock);	/* Serialise the printks */
+
+		spin_lock(&lock);
+		printk("NMI backtrace for cpu %d\n", cpu);
+		dump_stack();
+		spin_unlock(&lock);
+		cpu_clear(cpu, backtrace_mask);
+	}
+
 	sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
 
 	/* if the apic timer isn't firing, this cpu isn't doing much */
@@ -1033,6 +1048,19 @@
 
 #endif
 
+void __trigger_all_cpu_backtrace(void)
+{
+	int i;
+
+	backtrace_mask = cpu_online_map;
+	/* Wait for up to 10 seconds for all CPUs to do the backtrace */
+	for (i = 0; i < 10 * 1000; i++) {
+		if (cpus_empty(backtrace_mask))
+			break;
+		mdelay(1);
+	}
+}
+
 EXPORT_SYMBOL(nmi_active);
 EXPORT_SYMBOL(nmi_watchdog);
 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
new file mode 100644
index 0000000..3dceab5
--- /dev/null
+++ b/arch/i386/kernel/paravirt.c
@@ -0,0 +1,569 @@
+/*  Paravirtualization interfaces
+    Copyright (C) 2006 Rusty Russell IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+*/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/bcd.h>
+#include <linux/start_kernel.h>
+
+#include <asm/bug.h>
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/setup.h>
+#include <asm/arch_hooks.h>
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+#include <asm/fixmap.h>
+#include <asm/apic.h>
+#include <asm/tlbflush.h>
+
+/* nop stub */
+static void native_nop(void)
+{
+}
+
+static void __init default_banner(void)
+{
+	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+	       paravirt_ops.name);
+}
+
+char *memory_setup(void)
+{
+	return paravirt_ops.memory_setup();
+}
+
+/* Simple instruction patching code. */
+#define DEF_NATIVE(name, code)					\
+	extern const char start_##name[], end_##name[];		\
+	asm("start_" #name ": " code "; end_" #name ":")
+DEF_NATIVE(cli, "cli");
+DEF_NATIVE(sti, "sti");
+DEF_NATIVE(popf, "push %eax; popf");
+DEF_NATIVE(pushf, "pushf; pop %eax");
+DEF_NATIVE(pushf_cli, "pushf; pop %eax; cli");
+DEF_NATIVE(iret, "iret");
+DEF_NATIVE(sti_sysexit, "sti; sysexit");
+
+static const struct native_insns
+{
+	const char *start, *end;
+} native_insns[] = {
+	[PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli },
+	[PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti },
+	[PARAVIRT_RESTORE_FLAGS] = { start_popf, end_popf },
+	[PARAVIRT_SAVE_FLAGS] = { start_pushf, end_pushf },
+	[PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushf_cli, end_pushf_cli },
+	[PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret },
+	[PARAVIRT_STI_SYSEXIT] = { start_sti_sysexit, end_sti_sysexit },
+};
+
+static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+{
+	unsigned int insn_len;
+
+	/* Don't touch it if we don't have a replacement */
+	if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start)
+		return len;
+
+	insn_len = native_insns[type].end - native_insns[type].start;
+
+	/* Similarly if we can't fit replacement. */
+	if (len < insn_len)
+		return len;
+
+	memcpy(insns, native_insns[type].start, insn_len);
+	return insn_len;
+}
+
+static fastcall unsigned long native_get_debugreg(int regno)
+{
+	unsigned long val = 0; 	/* Damn you, gcc! */
+
+	switch (regno) {
+	case 0:
+		asm("movl %%db0, %0" :"=r" (val)); break;
+	case 1:
+		asm("movl %%db1, %0" :"=r" (val)); break;
+	case 2:
+		asm("movl %%db2, %0" :"=r" (val)); break;
+	case 3:
+		asm("movl %%db3, %0" :"=r" (val)); break;
+	case 6:
+		asm("movl %%db6, %0" :"=r" (val)); break;
+	case 7:
+		asm("movl %%db7, %0" :"=r" (val)); break;
+	default:
+		BUG();
+	}
+	return val;
+}
+
+static fastcall void native_set_debugreg(int regno, unsigned long value)
+{
+	switch (regno) {
+	case 0:
+		asm("movl %0,%%db0"	: /* no output */ :"r" (value));
+		break;
+	case 1:
+		asm("movl %0,%%db1"	: /* no output */ :"r" (value));
+		break;
+	case 2:
+		asm("movl %0,%%db2"	: /* no output */ :"r" (value));
+		break;
+	case 3:
+		asm("movl %0,%%db3"	: /* no output */ :"r" (value));
+		break;
+	case 6:
+		asm("movl %0,%%db6"	: /* no output */ :"r" (value));
+		break;
+	case 7:
+		asm("movl %0,%%db7"	: /* no output */ :"r" (value));
+		break;
+	default:
+		BUG();
+	}
+}
+
+void init_IRQ(void)
+{
+	paravirt_ops.init_IRQ();
+}
+
+static fastcall void native_clts(void)
+{
+	asm volatile ("clts");
+}
+
+static fastcall unsigned long native_read_cr0(void)
+{
+	unsigned long val;
+	asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static fastcall void native_write_cr0(unsigned long val)
+{
+	asm volatile("movl %0,%%cr0": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr2(void)
+{
+	unsigned long val;
+	asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static fastcall void native_write_cr2(unsigned long val)
+{
+	asm volatile("movl %0,%%cr2": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr3(void)
+{
+	unsigned long val;
+	asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static fastcall void native_write_cr3(unsigned long val)
+{
+	asm volatile("movl %0,%%cr3": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr4(void)
+{
+	unsigned long val;
+	asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
+	return val;
+}
+
+static fastcall unsigned long native_read_cr4_safe(void)
+{
+	unsigned long val;
+	/* This could fault if %cr4 does not exist */
+	asm("1: movl %%cr4, %0		\n"
+		"2:				\n"
+		".section __ex_table,\"a\"	\n"
+		".long 1b,2b			\n"
+		".previous			\n"
+		: "=r" (val): "0" (0));
+	return val;
+}
+
+static fastcall void native_write_cr4(unsigned long val)
+{
+	asm volatile("movl %0,%%cr4": :"r" (val));
+}
+
+static fastcall unsigned long native_save_fl(void)
+{
+	unsigned long f;
+	asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
+	return f;
+}
+
+static fastcall void native_restore_fl(unsigned long f)
+{
+	asm volatile("pushl %0 ; popfl": /* no output */
+			     :"g" (f)
+			     :"memory", "cc");
+}
+
+static fastcall void native_irq_disable(void)
+{
+	asm volatile("cli": : :"memory");
+}
+
+static fastcall void native_irq_enable(void)
+{
+	asm volatile("sti": : :"memory");
+}
+
+static fastcall void native_safe_halt(void)
+{
+	asm volatile("sti; hlt": : :"memory");
+}
+
+static fastcall void native_halt(void)
+{
+	asm volatile("hlt": : :"memory");
+}
+
+static fastcall void native_wbinvd(void)
+{
+	asm volatile("wbinvd": : :"memory");
+}
+
+static fastcall unsigned long long native_read_msr(unsigned int msr, int *err)
+{
+	unsigned long long val;
+
+	asm volatile("2: rdmsr ; xorl %0,%0\n"
+		     "1:\n\t"
+		     ".section .fixup,\"ax\"\n\t"
+		     "3:  movl %3,%0 ; jmp 1b\n\t"
+		     ".previous\n\t"
+ 		     ".section __ex_table,\"a\"\n"
+		     "   .align 4\n\t"
+		     "   .long 	2b,3b\n\t"
+		     ".previous"
+		     : "=r" (*err), "=A" (val)
+		     : "c" (msr), "i" (-EFAULT));
+
+	return val;
+}
+
+static fastcall int native_write_msr(unsigned int msr, unsigned long long val)
+{
+	int err;
+	asm volatile("2: wrmsr ; xorl %0,%0\n"
+		     "1:\n\t"
+		     ".section .fixup,\"ax\"\n\t"
+		     "3:  movl %4,%0 ; jmp 1b\n\t"
+		     ".previous\n\t"
+ 		     ".section __ex_table,\"a\"\n"
+		     "   .align 4\n\t"
+		     "   .long 	2b,3b\n\t"
+		     ".previous"
+		     : "=a" (err)
+		     : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
+		       "i" (-EFAULT));
+	return err;
+}
+
+static fastcall unsigned long long native_read_tsc(void)
+{
+	unsigned long long val;
+	asm volatile("rdtsc" : "=A" (val));
+	return val;
+}
+
+static fastcall unsigned long long native_read_pmc(void)
+{
+	unsigned long long val;
+	asm volatile("rdpmc" : "=A" (val));
+	return val;
+}
+
+static fastcall void native_load_tr_desc(void)
+{
+	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+}
+
+static fastcall void native_load_gdt(const struct Xgt_desc_struct *dtr)
+{
+	asm volatile("lgdt %0"::"m" (*dtr));
+}
+
+static fastcall void native_load_idt(const struct Xgt_desc_struct *dtr)
+{
+	asm volatile("lidt %0"::"m" (*dtr));
+}
+
+static fastcall void native_store_gdt(struct Xgt_desc_struct *dtr)
+{
+	asm ("sgdt %0":"=m" (*dtr));
+}
+
+static fastcall void native_store_idt(struct Xgt_desc_struct *dtr)
+{
+	asm ("sidt %0":"=m" (*dtr));
+}
+
+static fastcall unsigned long native_store_tr(void)
+{
+	unsigned long tr;
+	asm ("str %0":"=r" (tr));
+	return tr;
+}
+
+static fastcall void native_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+	C(0); C(1); C(2);
+#undef C
+}
+
+static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
+{
+	u32 *lp = (u32 *)((char *)dt + entry*8);
+	lp[0] = entry_low;
+	lp[1] = entry_high;
+}
+
+static fastcall void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+	native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+	native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+	native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_load_esp0(struct tss_struct *tss,
+				      struct thread_struct *thread)
+{
+	tss->esp0 = thread->esp0;
+
+	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
+	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+		tss->ss1 = thread->sysenter_cs;
+		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+	}
+}
+
+static fastcall void native_io_delay(void)
+{
+	asm volatile("outb %al,$0x80");
+}
+
+static fastcall void native_flush_tlb(void)
+{
+	__native_flush_tlb();
+}
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+static fastcall void native_flush_tlb_global(void)
+{
+	__native_flush_tlb_global();
+}
+
+static fastcall void native_flush_tlb_single(u32 addr)
+{
+	__native_flush_tlb_single(addr);
+}
+
+#ifndef CONFIG_X86_PAE
+static fastcall void native_set_pte(pte_t *ptep, pte_t pteval)
+{
+	*ptep = pteval;
+}
+
+static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
+{
+	*ptep = pteval;
+}
+
+static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	*pmdp = pmdval;
+}
+
+#else /* CONFIG_X86_PAE */
+
+static fastcall void native_set_pte(pte_t *ptep, pte_t pte)
+{
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
+{
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+	ptep->pte_low = 0;
+	smp_wmb();
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_atomic(pte_t *ptep, pte_t pteval)
+{
+	set_64bit((unsigned long long *)ptep,pte_val(pteval));
+}
+
+static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	set_64bit((unsigned long long *)pmdp,pmd_val(pmdval));
+}
+
+static fastcall void native_set_pud(pud_t *pudp, pud_t pudval)
+{
+	*pudp = pudval;
+}
+
+static fastcall void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	ptep->pte_low = 0;
+	smp_wmb();
+	ptep->pte_high = 0;
+}
+
+static fastcall void native_pmd_clear(pmd_t *pmd)
+{
+	u32 *tmp = (u32 *)pmd;
+	*tmp = 0;
+	smp_wmb();
+	*(tmp + 1) = 0;
+}
+#endif /* CONFIG_X86_PAE */
+
+/* These are in entry.S */
+extern fastcall void native_iret(void);
+extern fastcall void native_irq_enable_sysexit(void);
+
+static int __init print_banner(void)
+{
+	paravirt_ops.banner();
+	return 0;
+}
+core_initcall(print_banner);
+
+/* We simply declare start_kernel to be the paravirt probe of last resort. */
+paravirt_probe(start_kernel);
+
+struct paravirt_ops paravirt_ops = {
+	.name = "bare hardware",
+	.paravirt_enabled = 0,
+	.kernel_rpl = 0,
+
+ 	.patch = native_patch,
+	.banner = default_banner,
+	.arch_setup = native_nop,
+	.memory_setup = machine_specific_memory_setup,
+	.get_wallclock = native_get_wallclock,
+	.set_wallclock = native_set_wallclock,
+	.time_init = time_init_hook,
+	.init_IRQ = native_init_IRQ,
+
+	.cpuid = native_cpuid,
+	.get_debugreg = native_get_debugreg,
+	.set_debugreg = native_set_debugreg,
+	.clts = native_clts,
+	.read_cr0 = native_read_cr0,
+	.write_cr0 = native_write_cr0,
+	.read_cr2 = native_read_cr2,
+	.write_cr2 = native_write_cr2,
+	.read_cr3 = native_read_cr3,
+	.write_cr3 = native_write_cr3,
+	.read_cr4 = native_read_cr4,
+	.read_cr4_safe = native_read_cr4_safe,
+	.write_cr4 = native_write_cr4,
+	.save_fl = native_save_fl,
+	.restore_fl = native_restore_fl,
+	.irq_disable = native_irq_disable,
+	.irq_enable = native_irq_enable,
+	.safe_halt = native_safe_halt,
+	.halt = native_halt,
+	.wbinvd = native_wbinvd,
+	.read_msr = native_read_msr,
+	.write_msr = native_write_msr,
+	.read_tsc = native_read_tsc,
+	.read_pmc = native_read_pmc,
+	.load_tr_desc = native_load_tr_desc,
+	.set_ldt = native_set_ldt,
+	.load_gdt = native_load_gdt,
+	.load_idt = native_load_idt,
+	.store_gdt = native_store_gdt,
+	.store_idt = native_store_idt,
+	.store_tr = native_store_tr,
+	.load_tls = native_load_tls,
+	.write_ldt_entry = native_write_ldt_entry,
+	.write_gdt_entry = native_write_gdt_entry,
+	.write_idt_entry = native_write_idt_entry,
+	.load_esp0 = native_load_esp0,
+
+	.set_iopl_mask = native_set_iopl_mask,
+	.io_delay = native_io_delay,
+	.const_udelay = __const_udelay,
+
+#ifdef CONFIG_X86_LOCAL_APIC
+	.apic_write = native_apic_write,
+	.apic_write_atomic = native_apic_write_atomic,
+	.apic_read = native_apic_read,
+#endif
+
+	.flush_tlb_user = native_flush_tlb,
+	.flush_tlb_kernel = native_flush_tlb_global,
+	.flush_tlb_single = native_flush_tlb_single,
+
+	.set_pte = native_set_pte,
+	.set_pte_at = native_set_pte_at,
+	.set_pmd = native_set_pmd,
+	.pte_update = (void *)native_nop,
+	.pte_update_defer = (void *)native_nop,
+#ifdef CONFIG_X86_PAE
+	.set_pte_atomic = native_set_pte_atomic,
+	.set_pte_present = native_set_pte_present,
+	.set_pud = native_set_pud,
+	.pte_clear = native_pte_clear,
+	.pmd_clear = native_pmd_clear,
+#endif
+
+	.irq_enable_sysexit = native_irq_enable_sysexit,
+	.iret = native_iret,
+};
+EXPORT_SYMBOL(paravirt_ops);
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
index 25fe668..41af692 100644
--- a/arch/i386/kernel/pci-dma.c
+++ b/arch/i386/kernel/pci-dma.c
@@ -75,7 +75,7 @@
 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
 				dma_addr_t device_addr, size_t size, int flags)
 {
-	void __iomem *mem_base;
+	void __iomem *mem_base = NULL;
 	int pages = size >> PAGE_SHIFT;
 	int bitmap_size = (pages + 31)/32;
 
@@ -92,14 +92,12 @@
 	if (!mem_base)
 		goto out;
 
-	dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+	dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
 	if (!dev->dma_mem)
 		goto out;
-	memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
-	dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
+	dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 	if (!dev->dma_mem->bitmap)
 		goto free1_out;
-	memset(dev->dma_mem->bitmap, 0, bitmap_size);
 
 	dev->dma_mem->virt_base = mem_base;
 	dev->dma_mem->device_base = device_addr;
@@ -114,6 +112,8 @@
  free1_out:
 	kfree(dev->dma_mem->bitmap);
  out:
+	if (mem_base)
+		iounmap(mem_base);
 	return 0;
 }
 EXPORT_SYMBOL(dma_declare_coherent_memory);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index dd53c58..9930851 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -56,6 +56,7 @@
 
 #include <asm/tlbflush.h>
 #include <asm/cpu.h>
+#include <asm/pda.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
@@ -99,22 +100,18 @@
  */
 void default_idle(void)
 {
-	local_irq_enable();
-
 	if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
 		current_thread_info()->status &= ~TS_POLLING;
 		smp_mb__after_clear_bit();
-		while (!need_resched()) {
-			local_irq_disable();
-			if (!need_resched())
-				safe_halt();
-			else
-				local_irq_enable();
-		}
+		local_irq_disable();
+		if (!need_resched())
+			safe_halt();	/* enables interrupts racelessly */
+		else
+			local_irq_enable();
 		current_thread_info()->status |= TS_POLLING;
 	} else {
-		while (!need_resched())
-			cpu_relax();
+		/* loop is done by the caller */
+		cpu_relax();
 	}
 }
 #ifdef CONFIG_APM_MODULE
@@ -128,14 +125,7 @@
  */
 static void poll_idle (void)
 {
-	local_irq_enable();
-
-	asm volatile(
-		"2:"
-		"testl %0, %1;"
-		"rep; nop;"
-		"je 2b;"
-		: : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+	cpu_relax();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -256,8 +246,7 @@
 static void mwait_idle(void)
 {
 	local_irq_enable();
-	while (!need_resched())
-		mwait_idle_with_hints(0, 0);
+	mwait_idle_with_hints(0, 0);
 }
 
 void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
@@ -314,8 +303,8 @@
 		regs->eax,regs->ebx,regs->ecx,regs->edx);
 	printk("ESI: %08lx EDI: %08lx EBP: %08lx",
 		regs->esi, regs->edi, regs->ebp);
-	printk(" DS: %04x ES: %04x\n",
-		0xffff & regs->xds,0xffff & regs->xes);
+	printk(" DS: %04x ES: %04x GS: %04x\n",
+	       0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
 
 	cr0 = read_cr0();
 	cr2 = read_cr2();
@@ -346,6 +335,7 @@
 
 	regs.xds = __USER_DS;
 	regs.xes = __USER_DS;
+	regs.xgs = __KERNEL_PDA;
 	regs.orig_eax = -1;
 	regs.eip = (unsigned long) kernel_thread_helper;
 	regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -431,7 +421,6 @@
 	p->thread.eip = (unsigned long) ret_from_fork;
 
 	savesegment(fs,p->thread.fs);
-	savesegment(gs,p->thread.gs);
 
 	tsk = current;
 	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -508,7 +497,7 @@
 	dump->regs.ds = regs->xds;
 	dump->regs.es = regs->xes;
 	savesegment(fs,dump->regs.fs);
-	savesegment(gs,dump->regs.gs);
+	dump->regs.gs = regs->xgs;
 	dump->regs.orig_eax = regs->orig_eax;
 	dump->regs.eip = regs->eip;
 	dump->regs.cs = regs->xcs;
@@ -648,22 +637,27 @@
 
 	__unlazy_fpu(prev_p);
 
+
+	/* we're going to use this soon, after a few expensive things */
+	if (next_p->fpu_counter > 5)
+		prefetch(&next->i387.fxsave);
+
 	/*
 	 * Reload esp0.
 	 */
 	load_esp0(tss, next);
 
 	/*
-	 * Save away %fs and %gs. No need to save %es and %ds, as
-	 * those are always kernel segments while inside the kernel.
-	 * Doing this before setting the new TLS descriptors avoids
-	 * the situation where we temporarily have non-reloadable
-	 * segments in %fs and %gs.  This could be an issue if the
-	 * NMI handler ever used %fs or %gs (it does not today), or
-	 * if the kernel is running inside of a hypervisor layer.
+	 * Save away %fs. No need to save %gs, as it was saved on the
+	 * stack on entry.  No need to save %es and %ds, as those are
+	 * always kernel segments while inside the kernel.  Doing this
+	 * before setting the new TLS descriptors avoids the situation
+	 * where we temporarily have non-reloadable segments in %fs
+	 * and %gs.  This could be an issue if the NMI handler ever
+	 * used %fs or %gs (it does not today), or if the kernel is
+	 * running inside of a hypervisor layer.
 	 */
 	savesegment(fs, prev->fs);
-	savesegment(gs, prev->gs);
 
 	/*
 	 * Load the per-thread Thread-Local Storage descriptor.
@@ -671,22 +665,14 @@
 	load_TLS(next, cpu);
 
 	/*
-	 * Restore %fs and %gs if needed.
+	 * Restore %fs if needed.
 	 *
-	 * Glibc normally makes %fs be zero, and %gs is one of
-	 * the TLS segments.
+	 * Glibc normally makes %fs be zero.
 	 */
 	if (unlikely(prev->fs | next->fs))
 		loadsegment(fs, next->fs);
 
-	if (prev->gs | next->gs)
-		loadsegment(gs, next->gs);
-
-	/*
-	 * Restore IOPL if needed.
-	 */
-	if (unlikely(prev->iopl != next->iopl))
-		set_iopl_mask(next->iopl);
+	write_pda(pcurrent, next_p);
 
 	/*
 	 * Now maybe handle debug registers and/or IO bitmaps
@@ -697,6 +683,13 @@
 
 	disable_tsc(prev_p, next_p);
 
+	/* If the task has used fpu the last 5 timeslices, just do a full
+	 * restore of the math state immediately to avoid the trap; the
+	 * chances of needing FPU soon are obviously high now
+	 */
+	if (next_p->fpu_counter > 5)
+		math_state_restore();
+
 	return prev_p;
 }
 
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 775f50e..f3f94ac 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -94,13 +94,9 @@
 				return -EIO;
 			child->thread.fs = value;
 			return 0;
-		case GS:
-			if (value && (value & 3) != 3)
-				return -EIO;
-			child->thread.gs = value;
-			return 0;
 		case DS:
 		case ES:
+		case GS:
 			if (value && (value & 3) != 3)
 				return -EIO;
 			value &= 0xffff;
@@ -116,8 +112,8 @@
 			value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
 			break;
 	}
-	if (regno > GS*4)
-		regno -= 2*4;
+	if (regno > ES*4)
+		regno -= 1*4;
 	put_stack_long(child, regno - sizeof(struct pt_regs), value);
 	return 0;
 }
@@ -131,18 +127,16 @@
 		case FS:
 			retval = child->thread.fs;
 			break;
-		case GS:
-			retval = child->thread.gs;
-			break;
 		case DS:
 		case ES:
+		case GS:
 		case SS:
 		case CS:
 			retval = 0xffff;
 			/* fall through */
 		default:
-			if (regno > GS*4)
-				regno -= 2*4;
+			if (regno > ES*4)
+				regno -= 1*4;
 			regno = regno - sizeof(struct pt_regs);
 			retval &= get_stack_long(child, regno);
 	}
diff --git a/arch/i386/kernel/quirks.c b/arch/i386/kernel/quirks.c
index 9f6ab17..a01320a 100644
--- a/arch/i386/kernel/quirks.c
+++ b/arch/i386/kernel/quirks.c
@@ -3,10 +3,23 @@
  */
 #include <linux/pci.h>
 #include <linux/irq.h>
+#include <asm/pci-direct.h>
+#include <asm/genapic.h>
+#include <asm/cpu.h>
 
 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
+static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev)
+{
+#ifdef CONFIG_X86_64
+	if (genapic !=  &apic_flat)
+		panic("APIC mode must be flat on this system\n");
+#elif defined(CONFIG_X86_GENERICARCH)
+	if (genapic != &apic_default)
+		panic("APIC mode must be default(flat) on this system. Use apic=default\n");
+#endif
+}
 
-static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
+void __init quirk_intel_irqbalance(void)
 {
 	u8 config, rev;
 	u32 word;
@@ -16,18 +29,18 @@
 	 * based platforms.
 	 * Disable SW irqbalance/affinity on those platforms.
 	 */
-	pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+	rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION);
 	if (rev > 0x9)
 		return;
 
 	printk(KERN_INFO "Intel E7520/7320/7525 detected.");
 
-	/* enable access to config space*/
-	pci_read_config_byte(dev, 0xf4, &config);
-	pci_write_config_byte(dev, 0xf4, config|0x2);
+	/* enable access to config space */
+	config = read_pci_config_byte(0, 0, 0, 0xf4);
+	write_pci_config_byte(0, 0, 0, 0xf4, config|0x2);
 
 	/* read xTPR register */
-	raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
+	word = read_pci_config_16(0, 0, 0x40, 0x4c);
 
 	if (!(word & (1 << 13))) {
 		printk(KERN_INFO "Disabling irq balancing and affinity\n");
@@ -38,13 +51,24 @@
 #ifdef CONFIG_PROC_FS
 		no_irq_affinity = 1;
 #endif
+#ifdef CONFIG_HOTPLUG_CPU
+		printk(KERN_INFO "Disabling cpu hotplug control\n");
+		enable_cpu_hotplug = 0;
+#endif
+#ifdef CONFIG_X86_64
+		/* force the genapic selection to flat mode so that
+		 * interrupts can be redirected to more than one CPU.
+		 */
+		genapic_force = &apic_flat;
+#endif
 	}
 
-	/* put back the original value for config space*/
+	/* put back the original value for config space */
 	if (!(config & 0x2))
-		pci_write_config_byte(dev, 0xf4, config);
+		write_pci_config_byte(0, 0, 0, 0xf4, config);
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7320_MCH,  verify_quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7525_MCH,  verify_quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7520_MCH,  verify_quirk_intel_irqbalance);
+
 #endif
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 84278e0..3514b41 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -12,6 +12,7 @@
 #include <linux/dmi.h>
 #include <linux/ctype.h>
 #include <linux/pm.h>
+#include <linux/reboot.h>
 #include <asm/uaccess.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 141041d..79df6e6 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -63,9 +63,6 @@
 #include <setup_arch.h>
 #include <bios_ebda.h>
 
-/* Forward Declaration. */
-void __init find_max_pfn(void);
-
 /* This value is set up by the early boot code to point to the value
    immediately after the boot time page tables.  It contains a *physical*
    address, and must not be in the .bss segment! */
@@ -76,11 +73,8 @@
 /*
  * Machine setup..
  */
-
-#ifdef CONFIG_EFI
-int efi_enabled = 0;
-EXPORT_SYMBOL(efi_enabled);
-#endif
+extern struct resource code_resource;
+extern struct resource data_resource;
 
 /* cpu data as detected by the assembly code in head.S */
 struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
@@ -99,12 +93,6 @@
 unsigned int BIOS_revision;
 unsigned int mca_pentium_flag;
 
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x10000000;
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
 int bootloader_type;
 
@@ -134,7 +122,6 @@
 	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
 EXPORT_SYMBOL(ist_info);
 #endif
-struct e820map e820;
 
 extern void early_cpu_init(void);
 extern int root_mountflags;
@@ -149,516 +136,6 @@
 
 unsigned char __initdata boot_params[PARAM_SIZE];
 
-static struct resource data_resource = {
-	.name	= "Kernel data",
-	.start	= 0,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource code_resource = {
-	.name	= "Kernel code",
-	.start	= 0,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource system_rom_resource = {
-	.name	= "System ROM",
-	.start	= 0xf0000,
-	.end	= 0xfffff,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource extension_rom_resource = {
-	.name	= "Extension ROM",
-	.start	= 0xe0000,
-	.end	= 0xeffff,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource adapter_rom_resources[] = { {
-	.name 	= "Adapter ROM",
-	.start	= 0xc8000,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-	.name 	= "Adapter ROM",
-	.start	= 0,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-	.name 	= "Adapter ROM",
-	.start	= 0,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-	.name 	= "Adapter ROM",
-	.start	= 0,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-	.name 	= "Adapter ROM",
-	.start	= 0,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-	.name 	= "Adapter ROM",
-	.start	= 0,
-	.end	= 0,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-} };
-
-static struct resource video_rom_resource = {
-	.name 	= "Video ROM",
-	.start	= 0xc0000,
-	.end	= 0xc7fff,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource video_ram_resource = {
-	.name	= "Video RAM area",
-	.start	= 0xa0000,
-	.end	= 0xbffff,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource standard_io_resources[] = { {
-	.name	= "dma1",
-	.start	= 0x0000,
-	.end	= 0x001f,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name	= "pic1",
-	.start	= 0x0020,
-	.end	= 0x0021,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name   = "timer0",
-	.start	= 0x0040,
-	.end    = 0x0043,
-	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name   = "timer1",
-	.start  = 0x0050,
-	.end    = 0x0053,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name	= "keyboard",
-	.start	= 0x0060,
-	.end	= 0x006f,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name	= "dma page reg",
-	.start	= 0x0080,
-	.end	= 0x008f,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name	= "pic2",
-	.start	= 0x00a0,
-	.end	= 0x00a1,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name	= "dma2",
-	.start	= 0x00c0,
-	.end	= 0x00df,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-	.name	= "fpu",
-	.start	= 0x00f0,
-	.end	= 0x00ff,
-	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-} };
-
-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-
-static int __init romchecksum(unsigned char *rom, unsigned long length)
-{
-	unsigned char *p, sum = 0;
-
-	for (p = rom; p < rom + length; p++)
-		sum += *p;
-	return sum == 0;
-}
-
-static void __init probe_roms(void)
-{
-	unsigned long start, length, upper;
-	unsigned char *rom;
-	int	      i;
-
-	/* video rom */
-	upper = adapter_rom_resources[0].start;
-	for (start = video_rom_resource.start; start < upper; start += 2048) {
-		rom = isa_bus_to_virt(start);
-		if (!romsignature(rom))
-			continue;
-
-		video_rom_resource.start = start;
-
-		/* 0 < length <= 0x7f * 512, historically */
-		length = rom[2] * 512;
-
-		/* if checksum okay, trust length byte */
-		if (length && romchecksum(rom, length))
-			video_rom_resource.end = start + length - 1;
-
-		request_resource(&iomem_resource, &video_rom_resource);
-		break;
-	}
-
-	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-	if (start < upper)
-		start = upper;
-
-	/* system rom */
-	request_resource(&iomem_resource, &system_rom_resource);
-	upper = system_rom_resource.start;
-
-	/* check for extension rom (ignore length byte!) */
-	rom = isa_bus_to_virt(extension_rom_resource.start);
-	if (romsignature(rom)) {
-		length = extension_rom_resource.end - extension_rom_resource.start + 1;
-		if (romchecksum(rom, length)) {
-			request_resource(&iomem_resource, &extension_rom_resource);
-			upper = extension_rom_resource.start;
-		}
-	}
-
-	/* check for adapter roms on 2k boundaries */
-	for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
-		rom = isa_bus_to_virt(start);
-		if (!romsignature(rom))
-			continue;
-
-		/* 0 < length <= 0x7f * 512, historically */
-		length = rom[2] * 512;
-
-		/* but accept any length that fits if checksum okay */
-		if (!length || start + length > upper || !romchecksum(rom, length))
-			continue;
-
-		adapter_rom_resources[i].start = start;
-		adapter_rom_resources[i].end = start + length - 1;
-		request_resource(&iomem_resource, &adapter_rom_resources[i]);
-
-		start = adapter_rom_resources[i++].end & ~2047UL;
-	}
-}
-
-static void __init limit_regions(unsigned long long size)
-{
-	unsigned long long current_addr = 0;
-	int i;
-
-	if (efi_enabled) {
-		efi_memory_desc_t *md;
-		void *p;
-
-		for (p = memmap.map, i = 0; p < memmap.map_end;
-			p += memmap.desc_size, i++) {
-			md = p;
-			current_addr = md->phys_addr + (md->num_pages << 12);
-			if (md->type == EFI_CONVENTIONAL_MEMORY) {
-				if (current_addr >= size) {
-					md->num_pages -=
-						(((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
-					memmap.nr_map = i + 1;
-					return;
-				}
-			}
-		}
-	}
-	for (i = 0; i < e820.nr_map; i++) {
-		current_addr = e820.map[i].addr + e820.map[i].size;
-		if (current_addr < size)
-			continue;
-
-		if (e820.map[i].type != E820_RAM)
-			continue;
-
-		if (e820.map[i].addr >= size) {
-			/*
-			 * This region starts past the end of the
-			 * requested size, skip it completely.
-			 */
-			e820.nr_map = i;
-		} else {
-			e820.nr_map = i + 1;
-			e820.map[i].size -= current_addr - size;
-		}
-		return;
-	}
-}
-
-void __init add_memory_region(unsigned long long start,
-			      unsigned long long size, int type)
-{
-	int x;
-
-	if (!efi_enabled) {
-       		x = e820.nr_map;
-
-		if (x == E820MAX) {
-		    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-		    return;
-		}
-
-		e820.map[x].addr = start;
-		e820.map[x].size = size;
-		e820.map[x].type = type;
-		e820.nr_map++;
-	}
-} /* add_memory_region */
-
-#define E820_DEBUG	1
-
-static void __init print_memory_map(char *who)
-{
-	int i;
-
-	for (i = 0; i < e820.nr_map; i++) {
-		printk(" %s: %016Lx - %016Lx ", who,
-			e820.map[i].addr,
-			e820.map[i].addr + e820.map[i].size);
-		switch (e820.map[i].type) {
-		case E820_RAM:	printk("(usable)\n");
-				break;
-		case E820_RESERVED:
-				printk("(reserved)\n");
-				break;
-		case E820_ACPI:
-				printk("(ACPI data)\n");
-				break;
-		case E820_NVS:
-				printk("(ACPI NVS)\n");
-				break;
-		default:	printk("type %lu\n", e820.map[i].type);
-				break;
-		}
-	}
-}
-
-/*
- * Sanitize the BIOS e820 map.
- *
- * Some e820 responses include overlapping entries.  The following 
- * replaces the original e820 map with a new one, removing overlaps.
- *
- */
-struct change_member {
-	struct e820entry *pbios; /* pointer to original bios entry */
-	unsigned long long addr; /* address for this change point */
-};
-static struct change_member change_point_list[2*E820MAX] __initdata;
-static struct change_member *change_point[2*E820MAX] __initdata;
-static struct e820entry *overlap_list[E820MAX] __initdata;
-static struct e820entry new_bios[E820MAX] __initdata;
-
-int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-{
-	struct change_member *change_tmp;
-	unsigned long current_type, last_type;
-	unsigned long long last_addr;
-	int chgidx, still_changing;
-	int overlap_entries;
-	int new_bios_entry;
-	int old_nr, new_nr, chg_nr;
-	int i;
-
-	/*
-		Visually we're performing the following (1,2,3,4 = memory types)...
-
-		Sample memory map (w/overlaps):
-		   ____22__________________
-		   ______________________4_
-		   ____1111________________
-		   _44_____________________
-		   11111111________________
-		   ____________________33__
-		   ___________44___________
-		   __________33333_________
-		   ______________22________
-		   ___________________2222_
-		   _________111111111______
-		   _____________________11_
-		   _________________4______
-
-		Sanitized equivalent (no overlap):
-		   1_______________________
-		   _44_____________________
-		   ___1____________________
-		   ____22__________________
-		   ______11________________
-		   _________1______________
-		   __________3_____________
-		   ___________44___________
-		   _____________33_________
-		   _______________2________
-		   ________________1_______
-		   _________________4______
-		   ___________________2____
-		   ____________________33__
-		   ______________________4_
-	*/
-
-	/* if there's only one memory region, don't bother */
-	if (*pnr_map < 2)
-		return -1;
-
-	old_nr = *pnr_map;
-
-	/* bail out if we find any unreasonable addresses in bios map */
-	for (i=0; i<old_nr; i++)
-		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-			return -1;
-
-	/* create pointers for initial change-point information (for sorting) */
-	for (i=0; i < 2*old_nr; i++)
-		change_point[i] = &change_point_list[i];
-
-	/* record all known change-points (starting and ending addresses),
-	   omitting those that are for empty memory regions */
-	chgidx = 0;
-	for (i=0; i < old_nr; i++)	{
-		if (biosmap[i].size != 0) {
-			change_point[chgidx]->addr = biosmap[i].addr;
-			change_point[chgidx++]->pbios = &biosmap[i];
-			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-			change_point[chgidx++]->pbios = &biosmap[i];
-		}
-	}
-	chg_nr = chgidx;    	/* true number of change-points */
-
-	/* sort change-point list by memory addresses (low -> high) */
-	still_changing = 1;
-	while (still_changing)	{
-		still_changing = 0;
-		for (i=1; i < chg_nr; i++)  {
-			/* if <current_addr> > <last_addr>, swap */
-			/* or, if current=<start_addr> & last=<end_addr>, swap */
-			if ((change_point[i]->addr < change_point[i-1]->addr) ||
-				((change_point[i]->addr == change_point[i-1]->addr) &&
-				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
-				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-			   )
-			{
-				change_tmp = change_point[i];
-				change_point[i] = change_point[i-1];
-				change_point[i-1] = change_tmp;
-				still_changing=1;
-			}
-		}
-	}
-
-	/* create a new bios memory map, removing overlaps */
-	overlap_entries=0;	 /* number of entries in the overlap table */
-	new_bios_entry=0;	 /* index for creating new bios map entries */
-	last_type = 0;		 /* start with undefined memory type */
-	last_addr = 0;		 /* start with 0 as last starting address */
-	/* loop through change-points, determining affect on the new bios map */
-	for (chgidx=0; chgidx < chg_nr; chgidx++)
-	{
-		/* keep track of all overlapping bios entries */
-		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-		{
-			/* add map entry to overlap list (> 1 entry implies an overlap) */
-			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-		}
-		else
-		{
-			/* remove entry from list (order independent, so swap with last) */
-			for (i=0; i<overlap_entries; i++)
-			{
-				if (overlap_list[i] == change_point[chgidx]->pbios)
-					overlap_list[i] = overlap_list[overlap_entries-1];
-			}
-			overlap_entries--;
-		}
-		/* if there are overlapping entries, decide which "type" to use */
-		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-		current_type = 0;
-		for (i=0; i<overlap_entries; i++)
-			if (overlap_list[i]->type > current_type)
-				current_type = overlap_list[i]->type;
-		/* continue building up new bios map based on this information */
-		if (current_type != last_type)	{
-			if (last_type != 0)	 {
-				new_bios[new_bios_entry].size =
-					change_point[chgidx]->addr - last_addr;
-				/* move forward only if the new size was non-zero */
-				if (new_bios[new_bios_entry].size != 0)
-					if (++new_bios_entry >= E820MAX)
-						break; 	/* no more space left for new bios entries */
-			}
-			if (current_type != 0)	{
-				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-				new_bios[new_bios_entry].type = current_type;
-				last_addr=change_point[chgidx]->addr;
-			}
-			last_type = current_type;
-		}
-	}
-	new_nr = new_bios_entry;   /* retain count for new bios entries */
-
-	/* copy new bios mapping into original location */
-	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-	*pnr_map = new_nr;
-
-	return 0;
-}
-
-/*
- * Copy the BIOS e820 map into a safe place.
- *
- * Sanity-check it while we're at it..
- *
- * If we're lucky and live on a modern system, the setup code
- * will have given us a memory map that we can use to properly
- * set up memory.  If we aren't, we'll fake a memory map.
- *
- * We check to see that the memory map contains at least 2 elements
- * before we'll use it, because the detection code in setup.S may
- * not be perfect and most every PC known to man has two memory
- * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
- * thinkpad 560x, for example, does not cooperate with the memory
- * detection code.)
- */
-int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-{
-	/* Only one memory region (or negative)? Ignore it */
-	if (nr_map < 2)
-		return -1;
-
-	do {
-		unsigned long long start = biosmap->addr;
-		unsigned long long size = biosmap->size;
-		unsigned long long end = start + size;
-		unsigned long type = biosmap->type;
-
-		/* Overflow in 64 bits? Ignore the memory map. */
-		if (start > end)
-			return -1;
-
-		/*
-		 * Some BIOSes claim RAM in the 640k - 1M region.
-		 * Not right. Fix it up.
-		 */
-		if (type == E820_RAM) {
-			if (start < 0x100000ULL && end > 0xA0000ULL) {
-				if (start < 0xA0000ULL)
-					add_memory_region(start, 0xA0000ULL-start, type);
-				if (end <= 0x100000ULL)
-					continue;
-				start = 0x100000ULL;
-				size = end - start;
-			}
-		}
-		add_memory_region(start, size, type);
-	} while (biosmap++,--nr_map);
-	return 0;
-}
-
 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
 struct edd edd;
 #ifdef CONFIG_EDD_MODULE
@@ -682,7 +159,7 @@
 }
 #endif
 
-static int __initdata user_defined_memmap = 0;
+int __initdata user_defined_memmap = 0;
 
 /*
  * "mem=nopentium" disables the 4MB page tables.
@@ -719,51 +196,6 @@
 }
 early_param("mem", parse_mem);
 
-static int __init parse_memmap(char *arg)
-{
-	if (!arg)
-		return -EINVAL;
-
-	if (strcmp(arg, "exactmap") == 0) {
-#ifdef CONFIG_CRASH_DUMP
-		/* If we are doing a crash dump, we
-		 * still need to know the real mem
-		 * size before original memory map is
-		 * reset.
-		 */
-		find_max_pfn();
-		saved_max_pfn = max_pfn;
-#endif
-		e820.nr_map = 0;
-		user_defined_memmap = 1;
-	} else {
-		/* If the user specifies memory size, we
-		 * limit the BIOS-provided memory map to
-		 * that size. exactmap can be used to specify
-		 * the exact map. mem=number can be used to
-		 * trim the existing memory map.
-		 */
-		unsigned long long start_at, mem_size;
-
-		mem_size = memparse(arg, &arg);
-		if (*arg == '@') {
-			start_at = memparse(arg+1, &arg);
-			add_memory_region(start_at, mem_size, E820_RAM);
-		} else if (*arg == '#') {
-			start_at = memparse(arg+1, &arg);
-			add_memory_region(start_at, mem_size, E820_ACPI);
-		} else if (*arg == '$') {
-			start_at = memparse(arg+1, &arg);
-			add_memory_region(start_at, mem_size, E820_RESERVED);
-		} else {
-			limit_regions(mem_size);
-			user_defined_memmap = 1;
-		}
-	}
-	return 0;
-}
-early_param("memmap", parse_memmap);
-
 #ifdef CONFIG_PROC_VMCORE
 /* elfcorehdr= specifies the location of elf core header
  * stored by the crashed kernel.
@@ -828,90 +260,6 @@
 early_param("reservetop", parse_reservetop);
 
 /*
- * Callback for efi_memory_walk.
- */
-static int __init
-efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-{
-	unsigned long *max_pfn = arg, pfn;
-
-	if (start < end) {
-		pfn = PFN_UP(end -1);
-		if (pfn > *max_pfn)
-			*max_pfn = pfn;
-	}
-	return 0;
-}
-
-static int __init
-efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
-{
-	memory_present(0, PFN_UP(start), PFN_DOWN(end));
-	return 0;
-}
-
- /*
-  * This function checks if the entire range <start,end> is mapped with type.
-  *
-  * Note: this function only works correct if the e820 table is sorted and
-  * not-overlapping, which is the case
-  */
-int __init
-e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
-{
-	u64 start = s;
-	u64 end = e;
-	int i;
-	for (i = 0; i < e820.nr_map; i++) {
-		struct e820entry *ei = &e820.map[i];
-		if (type && ei->type != type)
-			continue;
-		/* is the region (part) in overlap with the current region ?*/
-		if (ei->addr >= end || ei->addr + ei->size <= start)
-			continue;
-		/* if the region is at the beginning of <start,end> we move
-		 * start to the end of the region since it's ok until there
-		 */
-		if (ei->addr <= start)
-			start = ei->addr + ei->size;
-		/* if start is now at or beyond end, we're done, full
-		 * coverage */
-		if (start >= end)
-			return 1; /* we're done */
-	}
-	return 0;
-}
-
-/*
- * Find the highest page frame number we have available
- */
-void __init find_max_pfn(void)
-{
-	int i;
-
-	max_pfn = 0;
-	if (efi_enabled) {
-		efi_memmap_walk(efi_find_max_pfn, &max_pfn);
-		efi_memmap_walk(efi_memory_present_wrapper, NULL);
-		return;
-	}
-
-	for (i = 0; i < e820.nr_map; i++) {
-		unsigned long start, end;
-		/* RAM? */
-		if (e820.map[i].type != E820_RAM)
-			continue;
-		start = PFN_UP(e820.map[i].addr);
-		end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-		if (start >= end)
-			continue;
-		if (end > max_pfn)
-			max_pfn = end;
-		memory_present(0, start, end);
-	}
-}
-
-/*
  * Determine low and high memory ranges:
  */
 unsigned long __init find_max_low_pfn(void)
@@ -971,68 +319,6 @@
 }
 
 /*
- * Free all available memory for boot time allocation.  Used
- * as a callback function by efi_memory_walk()
- */
-
-static int __init
-free_available_memory(unsigned long start, unsigned long end, void *arg)
-{
-	/* check max_low_pfn */
-	if (start >= (max_low_pfn << PAGE_SHIFT))
-		return 0;
-	if (end >= (max_low_pfn << PAGE_SHIFT))
-		end = max_low_pfn << PAGE_SHIFT;
-	if (start < end)
-		free_bootmem(start, end - start);
-
-	return 0;
-}
-/*
- * Register fully available low RAM pages with the bootmem allocator.
- */
-static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-{
-	int i;
-
-	if (efi_enabled) {
-		efi_memmap_walk(free_available_memory, NULL);
-		return;
-	}
-	for (i = 0; i < e820.nr_map; i++) {
-		unsigned long curr_pfn, last_pfn, size;
-		/*
-		 * Reserve usable low memory
-		 */
-		if (e820.map[i].type != E820_RAM)
-			continue;
-		/*
-		 * We are rounding up the start address of usable memory:
-		 */
-		curr_pfn = PFN_UP(e820.map[i].addr);
-		if (curr_pfn >= max_low_pfn)
-			continue;
-		/*
-		 * ... and at the end of the usable range downwards:
-		 */
-		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-
-		if (last_pfn > max_low_pfn)
-			last_pfn = max_low_pfn;
-
-		/*
-		 * .. finally, did all the rounding and playing
-		 * around just make the area go away?
-		 */
-		if (last_pfn <= curr_pfn)
-			continue;
-
-		size = last_pfn - curr_pfn;
-		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
-	}
-}
-
-/*
  * workaround for Dell systems that neglect to reserve EBDA
  */
 static void __init reserve_ebda_region(void)
@@ -1118,8 +404,8 @@
 	 * the (very unlikely) case of us accidentally initializing the
 	 * bootmem allocator with an invalid RAM area.
 	 */
-	reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
-			 bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
+	reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
+			 bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
 
 	/*
 	 * reserve physical page 0 - it's a special BIOS page on many boxes,
@@ -1162,8 +448,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
 			reserve_bootmem(INITRD_START, INITRD_SIZE);
-			initrd_start =
-				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start+INITRD_SIZE;
 		}
 		else {
@@ -1200,126 +485,6 @@
 	}
 }
 
-/*
- * Request address space for all standard RAM and ROM resources
- * and also for regions reported as reserved by the e820.
- */
-static void __init
-legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
-{
-	int i;
-
-	probe_roms();
-	for (i = 0; i < e820.nr_map; i++) {
-		struct resource *res;
-#ifndef CONFIG_RESOURCES_64BIT
-		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
-			continue;
-#endif
-		res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
-		switch (e820.map[i].type) {
-		case E820_RAM:	res->name = "System RAM"; break;
-		case E820_ACPI:	res->name = "ACPI Tables"; break;
-		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
-		default:	res->name = "reserved";
-		}
-		res->start = e820.map[i].addr;
-		res->end = res->start + e820.map[i].size - 1;
-		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-		if (request_resource(&iomem_resource, res)) {
-			kfree(res);
-			continue;
-		}
-		if (e820.map[i].type == E820_RAM) {
-			/*
-			 *  We don't know which RAM region contains kernel data,
-			 *  so we try it repeatedly and let the resource manager
-			 *  test it.
-			 */
-			request_resource(res, code_resource);
-			request_resource(res, data_resource);
-#ifdef CONFIG_KEXEC
-			request_resource(res, &crashk_res);
-#endif
-		}
-	}
-}
-
-/*
- * Request address space for all standard resources
- *
- * This is called just before pcibios_init(), which is also a
- * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
- */
-static int __init request_standard_resources(void)
-{
-	int i;
-
-	printk("Setting up standard PCI resources\n");
-	if (efi_enabled)
-		efi_initialize_iomem_resources(&code_resource, &data_resource);
-	else
-		legacy_init_iomem_resources(&code_resource, &data_resource);
-
-	/* EFI systems may still have VGA */
-	request_resource(&iomem_resource, &video_ram_resource);
-
-	/* request I/O space for devices used on all i[345]86 PCs */
-	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
-		request_resource(&ioport_resource, &standard_io_resources[i]);
-	return 0;
-}
-
-subsys_initcall(request_standard_resources);
-
-static void __init register_memory(void)
-{
-	unsigned long gapstart, gapsize, round;
-	unsigned long long last;
-	int i;
-
-	/*
-	 * Search for the bigest gap in the low 32 bits of the e820
-	 * memory space.
-	 */
-	last = 0x100000000ull;
-	gapstart = 0x10000000;
-	gapsize = 0x400000;
-	i = e820.nr_map;
-	while (--i >= 0) {
-		unsigned long long start = e820.map[i].addr;
-		unsigned long long end = start + e820.map[i].size;
-
-		/*
-		 * Since "last" is at most 4GB, we know we'll
-		 * fit in 32 bits if this condition is true
-		 */
-		if (last > end) {
-			unsigned long gap = last - end;
-
-			if (gap > gapsize) {
-				gapsize = gap;
-				gapstart = end;
-			}
-		}
-		if (start < last)
-			last = start;
-	}
-
-	/*
-	 * See how much we want to round up: start off with
-	 * rounding to the next 1MB area.
-	 */
-	round = 0x100000;
-	while ((gapsize >> 4) > round)
-		round += round;
-	/* Fun with two's complement */
-	pci_mem_start = (gapstart + round) & -round;
-
-	printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-		pci_mem_start, gapstart, gapsize);
-}
-
 #ifdef CONFIG_MCA
 static void set_mca_bus(int x)
 {
@@ -1329,6 +494,12 @@
 static void set_mca_bus(int x) { }
 #endif
 
+/* Overridden in paravirt.c if CONFIG_PARAVIRT */
+char * __attribute__((weak)) memory_setup(void)
+{
+	return machine_specific_memory_setup();
+}
+
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -1381,7 +552,7 @@
 		efi_init();
 	else {
 		printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-		print_memory_map(machine_specific_memory_setup());
+		print_memory_map(memory_setup());
 	}
 
 	copy_edd();
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 43002cf..65d7620 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -128,7 +128,7 @@
 			 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
 			 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
 
-	GET_SEG(gs);
+	COPY_SEG(gs);
 	GET_SEG(fs);
 	COPY_SEG(es);
 	COPY_SEG(ds);
@@ -244,9 +244,7 @@
 {
 	int tmp, err = 0;
 
-	tmp = 0;
-	savesegment(gs, tmp);
-	err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+	err |= __put_user(regs->xgs, (unsigned int __user *)&sc->gs);
 	savesegment(fs, tmp);
 	err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
 
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 31e5c65..5285aff 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -321,7 +321,6 @@
 
 fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
 {
-	struct pt_regs *old_regs = set_irq_regs(regs);
 	unsigned long cpu;
 
 	cpu = get_cpu();
@@ -352,7 +351,6 @@
 	smp_mb__after_clear_bit();
 out:
 	put_cpu_no_resched();
-	set_irq_regs(old_regs);
 }
 
 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
@@ -607,14 +605,11 @@
  */
 fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
 {
-	struct pt_regs *old_regs = set_irq_regs(regs);
 	ack_APIC_irq();
-	set_irq_regs(old_regs);
 }
 
 fastcall void smp_call_function_interrupt(struct pt_regs *regs)
 {
-	struct pt_regs *old_regs = set_irq_regs(regs);
 	void (*func) (void *info) = call_data->func;
 	void *info = call_data->info;
 	int wait = call_data->wait;
@@ -637,7 +632,6 @@
 		mb();
 		atomic_inc(&call_data->finished);
 	}
-	set_irq_regs(old_regs);
 }
 
 /*
@@ -699,6 +693,10 @@
 		put_cpu();
 		return -EBUSY;
 	}
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
 	spin_lock_bh(&call_lock);
 	__smp_call_function_single(cpu, func, info, nonatomic, wait);
 	spin_unlock_bh(&call_lock);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 4bb8b77..4bf0e3c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -33,6 +33,11 @@
  *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process. */
 
+
+/* SMP boot always wants to use real time delay to allow sufficient time for
+ * the APs to come online */
+#define USE_REAL_TIME_DELAY
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -52,6 +57,8 @@
 #include <asm/desc.h>
 #include <asm/arch_hooks.h>
 #include <asm/nmi.h>
+#include <asm/pda.h>
+#include <asm/genapic.h>
 
 #include <mach_apic.h>
 #include <mach_wakecpu.h>
@@ -536,11 +543,11 @@
 static void __devinit start_secondary(void *unused)
 {
 	/*
-	 * Dont put anything before smp_callin(), SMP
+	 * Don't put *anything* before secondary_cpu_init(), SMP
 	 * booting is too fragile that we want to limit the
 	 * things done here to the most necessary things.
 	 */
-	cpu_init();
+	secondary_cpu_init();
 	preempt_disable();
 	smp_callin();
 	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
@@ -599,13 +606,16 @@
 		"movl %0,%%esp\n\t"
 		"jmp *%1"
 		:
-		:"r" (current->thread.esp),"r" (current->thread.eip));
+		:"m" (current->thread.esp),"m" (current->thread.eip));
 }
 
+/* Static state in head.S used to set up a CPU */
 extern struct {
 	void * esp;
 	unsigned short ss;
 } stack_start;
+extern struct i386_pda *start_pda;
+extern struct Xgt_desc_struct cpu_gdt_descr;
 
 #ifdef CONFIG_NUMA
 
@@ -936,9 +946,6 @@
 	unsigned long start_eip;
 	unsigned short nmi_high = 0, nmi_low = 0;
 
-	++cpucount;
-	alternatives_smp_switch(1);
-
 	/*
 	 * We can't use kernel_thread since we must avoid to
 	 * reschedule the child.
@@ -946,15 +953,30 @@
 	idle = alloc_idle_task(cpu);
 	if (IS_ERR(idle))
 		panic("failed fork for CPU %d", cpu);
+
+	/* Pre-allocate and initialize the CPU's GDT and PDA so it
+	   doesn't have to do any memory allocation during the
+	   delicate CPU-bringup phase. */
+	if (!init_gdt(cpu, idle)) {
+		printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
+		return -1;	/* ? */
+	}
+
 	idle->thread.eip = (unsigned long) start_secondary;
 	/* start_eip had better be page-aligned! */
 	start_eip = setup_trampoline();
 
+	++cpucount;
+	alternatives_smp_switch(1);
+
 	/* So we see what's up   */
 	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
 	/* Stack for startup_32 can be just as for start_secondary onwards */
 	stack_start.esp = (void *) idle->thread.esp;
 
+	start_pda = cpu_pda(cpu);
+	cpu_gdt_descr = per_cpu(cpu_gdt_descr, cpu);
+
 	irq_ctx_init(cpu);
 
 	x86_cpu_to_apicid[cpu] = apicid;
@@ -1049,13 +1071,15 @@
 
 struct warm_boot_cpu_info {
 	struct completion *complete;
+	struct work_struct task;
 	int apicid;
 	int cpu;
 };
 
-static void __cpuinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
 {
-	struct warm_boot_cpu_info *info = p;
+	struct warm_boot_cpu_info *info =
+		container_of(work, struct warm_boot_cpu_info, task);
 	do_boot_cpu(info->apicid, info->cpu);
 	complete(info->complete);
 }
@@ -1064,7 +1088,6 @@
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct warm_boot_cpu_info info;
-	struct work_struct task;
 	int	apicid, ret;
 	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
 
@@ -1089,7 +1112,7 @@
 	info.complete = &done;
 	info.apicid = apicid;
 	info.cpu = cpu;
-	INIT_WORK(&task, do_warm_boot_cpu, &info);
+	INIT_WORK(&info.task, do_warm_boot_cpu);
 
 	tsc_sync_disabled = 1;
 
@@ -1097,7 +1120,7 @@
 	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
 			KERNEL_PGD_PTRS);
 	flush_tlb_all();
-	schedule_work(&task);
+	schedule_work(&info.task);
 	wait_for_completion(&done);
 
 	tsc_sync_disabled = 0;
@@ -1108,34 +1131,15 @@
 }
 #endif
 
-static void smp_tune_scheduling (void)
+static void smp_tune_scheduling(void)
 {
 	unsigned long cachesize;       /* kB   */
-	unsigned long bandwidth = 350; /* MB/s */
-	/*
-	 * Rough estimation for SMP scheduling, this is the number of
-	 * cycles it takes for a fully memory-limited process to flush
-	 * the SMP-local cache.
-	 *
-	 * (For a P5 this pretty much means we will choose another idle
-	 *  CPU almost always at wakeup time (this is due to the small
-	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on
-	 *  the cache size)
-	 */
 
-	if (!cpu_khz) {
-		/*
-		 * this basically disables processor-affinity
-		 * scheduling on SMP without a TSC.
-		 */
-		return;
-	} else {
+	if (cpu_khz) {
 		cachesize = boot_cpu_data.x86_cache_size;
-		if (cachesize == -1) {
-			cachesize = 16; /* Pentiums, 2x8kB cache */
-			bandwidth = 100;
-		}
-		max_cache_size = cachesize * 1024;
+
+		if (cachesize > 0)
+			max_cache_size = cachesize * 1024;
 	}
 }
 
@@ -1461,6 +1465,12 @@
 	cpu_set(cpu, smp_commenced_mask);
 	while (!cpu_isset(cpu, cpu_online_map))
 		cpu_relax();
+
+#ifdef CONFIG_X86_GENERICARCH
+	if (num_online_cpus() > 8 && genapic == &apic_default)
+		panic("Default flat APIC routing can't be used with > 8 cpus\n");
+#endif
+
 	return 0;
 }
 
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 713ba39..7de9117 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -27,7 +27,11 @@
  * Should the kernel map a VDSO page into processes and pass its
  * address down to glibc upon exec()?
  */
+#ifdef CONFIG_PARAVIRT
+unsigned int __read_mostly vdso_enabled = 0;
+#else
 unsigned int __read_mostly vdso_enabled = 1;
+#endif
 
 EXPORT_SYMBOL_GPL(vdso_enabled);
 
@@ -132,7 +136,7 @@
 		goto up_fail;
 	}
 
-	vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma) {
 		ret = -ENOMEM;
 		goto up_fail;
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 78af572..c505b16 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -56,6 +56,7 @@
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/timer.h>
+#include <asm/time.h>
 
 #include "mach_time.h"
 
@@ -116,10 +117,7 @@
 	/* gets recalled with irq locally disabled */
 	/* XXX - does irqsave resolve this? -johnstul */
 	spin_lock_irqsave(&rtc_lock, flags);
-	if (efi_enabled)
-		retval = efi_set_rtc_mmss(nowtime);
-	else
-		retval = mach_set_rtc_mmss(nowtime);
+	retval = set_wallclock(nowtime);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
 	return retval;
@@ -223,10 +221,7 @@
 
 	spin_lock_irqsave(&rtc_lock, flags);
 
-	if (efi_enabled)
-		retval = efi_get_time();
-	else
-		retval = mach_get_cmos_time();
+	retval = get_wallclock();
 
 	spin_unlock_irqrestore(&rtc_lock, flags);
 
@@ -370,7 +365,7 @@
 		printk("Using HPET for base-timer\n");
 	}
 
-	time_init_hook();
+	do_time_init();
 }
 #endif
 
@@ -392,5 +387,5 @@
 
 	do_settimeofday(&ts);
 
-	time_init_hook();
+	do_time_init();
 }
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 1a2a979..1e4702d 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -132,14 +132,20 @@
 	 * the single HPET timer for system time.
 	 */
 #ifdef CONFIG_HPET_EMULATE_RTC
-	if (!(id & HPET_ID_NUMBER))
+	if (!(id & HPET_ID_NUMBER)) {
+		iounmap(hpet_virt_address);
+		hpet_virt_address = NULL;
 		return -1;
+	}
 #endif
 
 
 	hpet_period = hpet_readl(HPET_PERIOD);
-	if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD))
+	if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) {
+		iounmap(hpet_virt_address);
+		hpet_virt_address = NULL;
 		return -1;
+	}
 
 	/*
 	 * 64 bit math
@@ -156,8 +162,11 @@
 
 	hpet_use_timer = id & HPET_ID_LEGSUP;
 
-	if (hpet_timer_stop_set_go(hpet_tick))
+	if (hpet_timer_stop_set_go(hpet_tick)) {
+		iounmap(hpet_virt_address);
+		hpet_virt_address = NULL;
 		return -1;
+	}
 
 	use_hpet = 1;
 
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 07d6da3..79cf608 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -40,14 +40,18 @@
 	 * restrictions and assumptions in kernel. This basically
 	 * doesnt add a control file, one cannot attempt to offline
 	 * BSP.
+	 *
+	 * Also certain PCI quirks require not to enable hotplug control
+	 * for all CPU's.
 	 */
-	if (!num)
-		cpu_devices[num].cpu.no_control = 1;
+	if (num && enable_cpu_hotplug)
+		cpu_devices[num].cpu.hotpluggable = 1;
 
 	return register_cpu(&cpu_devices[num].cpu, num);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+int enable_cpu_hotplug = 1;
 
 void arch_unregister_cpu(int num) {
 	return unregister_cpu(&cpu_devices[num].cpu);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index fe9c5e8..68de48e 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -29,6 +29,7 @@
 #include <linux/kexec.h>
 #include <linux/unwind.h>
 #include <linux/uaccess.h>
+#include <linux/nmi.h>
 
 #ifdef CONFIG_EISA
 #include <linux/ioport.h>
@@ -61,9 +62,6 @@
 
 asmlinkage int system_call(void);
 
-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
-		{ 0, 0 }, { 0, 0 } };
-
 /* Do we ignore FPU interrupts ? */
 char ignore_fpu_irq = 0;
 
@@ -94,7 +92,7 @@
 asmlinkage void spurious_interrupt_bug(void);
 asmlinkage void machine_check(void);
 
-static int kstack_depth_to_print = 24;
+int kstack_depth_to_print = 24;
 #ifdef CONFIG_STACK_UNWIND
 static int call_trace = 1;
 #else
@@ -163,16 +161,25 @@
 {
 	struct ops_and_data *oad = (struct ops_and_data *)data;
 	int n = 0;
+	unsigned long sp = UNW_SP(info);
 
+	if (arch_unw_user_mode(info))
+		return -1;
 	while (unwind(info) == 0 && UNW_PC(info)) {
 		n++;
 		oad->ops->address(oad->data, UNW_PC(info));
 		if (arch_unw_user_mode(info))
 			break;
+		if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
+		    && sp > UNW_SP(info))
+			break;
+		sp = UNW_SP(info);
 	}
 	return n;
 }
 
+#define MSG(msg) ops->warning(data, msg)
+
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
 	        unsigned long *stack,
 		struct stacktrace_ops *ops, void *data)
@@ -191,29 +198,31 @@
 			if (unwind_init_frame_info(&info, task, regs) == 0)
 				unw_ret = dump_trace_unwind(&info, &oad);
 		} else if (task == current)
-			unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+			unw_ret = unwind_init_running(&info, dump_trace_unwind,
+						      &oad);
 		else {
 			if (unwind_init_blocked(&info, task) == 0)
 				unw_ret = dump_trace_unwind(&info, &oad);
 		}
 		if (unw_ret > 0) {
 			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-				ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+				ops->warning_symbol(data,
+					     "DWARF2 unwinder stuck at %s",
 					     UNW_PC(&info));
 				if (UNW_SP(&info) >= PAGE_OFFSET) {
-					ops->warning(data, "Leftover inexact backtrace:\n");
+					MSG("Leftover inexact backtrace:");
 					stack = (void *)UNW_SP(&info);
 					if (!stack)
 						return;
 					ebp = UNW_FP(&info);
 				} else
-					ops->warning(data, "Full inexact backtrace again:\n");
+					MSG("Full inexact backtrace again:");
 			} else if (call_trace >= 1)
 				return;
 			else
-				ops->warning(data, "Full inexact backtrace again:\n");
+				MSG("Full inexact backtrace again:");
 		} else
-			ops->warning(data, "Inexact backtrace:\n");
+			MSG("Inexact backtrace:");
 	}
 	if (!stack) {
 		unsigned long dummy;
@@ -247,6 +256,7 @@
 		stack = (unsigned long*)context->previous_esp;
 		if (!stack)
 			break;
+		touch_nmi_watchdog();
 	}
 }
 EXPORT_SYMBOL(dump_trace);
@@ -379,7 +389,7 @@
 	 * time of the fault..
 	 */
 	if (in_kernel) {
-		u8 __user *eip;
+		u8 *eip;
 		int code_bytes = 64;
 		unsigned char c;
 
@@ -388,18 +398,20 @@
 
 		printk(KERN_EMERG "Code: ");
 
-		eip = (u8 __user *)regs->eip - 43;
-		if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
+		eip = (u8 *)regs->eip - 43;
+		if (eip < (u8 *)PAGE_OFFSET ||
+			probe_kernel_address(eip, c)) {
 			/* try starting at EIP */
-			eip = (u8 __user *)regs->eip;
+			eip = (u8 *)regs->eip;
 			code_bytes = 32;
 		}
 		for (i = 0; i < code_bytes; i++, eip++) {
-			if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
+			if (eip < (u8 *)PAGE_OFFSET ||
+				probe_kernel_address(eip, c)) {
 				printk(" Bad EIP value.");
 				break;
 			}
-			if (eip == (u8 __user *)regs->eip)
+			if (eip == (u8 *)regs->eip)
 				printk("<%02x> ", c);
 			else
 				printk("%02x ", c);
@@ -415,7 +427,7 @@
 
 	if (eip < PAGE_OFFSET)
 		return;
-	if (probe_kernel_address((unsigned short __user *)eip, ud2))
+	if (probe_kernel_address((unsigned short *)eip, ud2))
 		return;
 	if (ud2 != 0x0b0f)
 		return;
@@ -428,11 +440,11 @@
 		char *file;
 		char c;
 
-		if (probe_kernel_address((unsigned short __user *)(eip + 2),
-					line))
+		if (probe_kernel_address((unsigned short *)(eip + 2), line))
 			break;
-		if (__get_user(file, (char * __user *)(eip + 4)) ||
-		    (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+		if (probe_kernel_address((char **)(eip + 4), file) ||
+		    (unsigned long)file < PAGE_OFFSET ||
+			probe_kernel_address(file, c))
 			file = "<bad filename>";
 
 		printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
@@ -452,7 +464,7 @@
 		u32 lock_owner;
 		int lock_owner_depth;
 	} die = {
-		.lock =			SPIN_LOCK_UNLOCKED,
+		.lock =			__SPIN_LOCK_UNLOCKED(die.lock),
 		.lock_owner =		-1,
 		.lock_owner_depth =	0
 	};
@@ -707,8 +719,7 @@
 {
 	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
 		"CPU %d.\n", reason, smp_processor_id());
-	printk(KERN_EMERG "You probably have a hardware problem with your RAM "
-			"chips\n");
+	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
 	if (panic_on_unrecovered_nmi)
                 panic("NMI: Not continuing");
 
@@ -773,7 +784,6 @@
 	printk(" on CPU%d, eip %08lx, registers:\n",
 		smp_processor_id(), regs->eip);
 	show_registers(regs);
-	printk(KERN_EMERG "console shuts up ...\n");
 	console_silent();
 	spin_unlock(&nmi_print_lock);
 	bust_spinlocks(0);
@@ -1088,49 +1098,24 @@
 #endif
 }
 
-fastcall void setup_x86_bogus_stack(unsigned char * stk)
+fastcall unsigned long patch_espfix_desc(unsigned long uesp,
+					  unsigned long kesp)
 {
-	unsigned long *switch16_ptr, *switch32_ptr;
-	struct pt_regs *regs;
-	unsigned long stack_top, stack_bot;
-	unsigned short iret_frame16_off;
 	int cpu = smp_processor_id();
-	/* reserve the space on 32bit stack for the magic switch16 pointer */
-	memmove(stk, stk + 8, sizeof(struct pt_regs));
-	switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
-	regs = (struct pt_regs *)stk;
-	/* now the switch32 on 16bit stack */
-	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-	switch32_ptr = (unsigned long *)(stack_top - 8);
-	iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
-	/* copy iret frame on 16bit stack */
-	memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
-	/* fill in the switch pointers */
-	switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
-	switch16_ptr[1] = __ESPFIX_SS;
-	switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
-		8 - CPU_16BIT_STACK_SIZE;
-	switch32_ptr[1] = __KERNEL_DS;
-}
-
-fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
-{
-	unsigned long *switch32_ptr;
-	unsigned char *stack16, *stack32;
-	unsigned long stack_top, stack_bot;
-	int len;
-	int cpu = smp_processor_id();
-	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-	switch32_ptr = (unsigned long *)(stack_top - 8);
-	/* copy the data from 16bit stack to 32bit stack */
-	len = CPU_16BIT_STACK_SIZE - 8 - sp;
-	stack16 = (unsigned char *)(stack_bot + sp);
-	stack32 = (unsigned char *)
-		(switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
-	memcpy(stack32, stack16, len);
-	return stack32;
+	struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+	struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
+	unsigned long base = (kesp - uesp) & -THREAD_SIZE;
+	unsigned long new_kesp = kesp - base;
+	unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
+	__u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
+	/* Set up base for espfix segment */
+ 	desc &= 0x00f0ff0000000000ULL;
+ 	desc |=	((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
+		((((__u64)base) << 32) & 0xff00000000000000ULL) |
+		((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
+		(lim_pages & 0xffff);
+	*(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
+	return new_kesp;
 }
 
 /*
@@ -1143,7 +1128,7 @@
  * Must be called with kernel preemption disabled (in this case,
  * local interrupts are disabled at the call-site in entry.S).
  */
-asmlinkage void math_state_restore(struct pt_regs regs)
+asmlinkage void math_state_restore(void)
 {
 	struct thread_info *thread = current_thread_info();
 	struct task_struct *tsk = thread->task;
@@ -1153,6 +1138,7 @@
 		init_fpu(tsk);
 	restore_fpu(tsk);
 	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
+	tsk->fpu_counter++;
 }
 
 #ifndef CONFIG_MATH_EMULATION
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index fbc9582..1bbe45d 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -13,7 +13,6 @@
 
 #include <asm/delay.h>
 #include <asm/tsc.h>
-#include <asm/delay.h>
 #include <asm/io.h>
 
 #include "mach_timer.h"
@@ -217,7 +216,7 @@
 static unsigned int cpufreq_init = 0;
 static struct work_struct cpufreq_delayed_get_work;
 
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *work)
 {
 	unsigned int cpu;
 
@@ -306,7 +305,7 @@
 {
 	int ret;
 
-	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
 	ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
 					CPUFREQ_TRANSITION_NOTIFIER);
 	if (!ret)
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index cbcd61d..be2f96e 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -43,6 +43,7 @@
 #include <linux/highmem.h>
 #include <linux/ptrace.h>
 #include <linux/audit.h>
+#include <linux/stddef.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -72,10 +73,10 @@
 /*
  * 8- and 16-bit register defines..
  */
-#define AL(regs)	(((unsigned char *)&((regs)->eax))[0])
-#define AH(regs)	(((unsigned char *)&((regs)->eax))[1])
-#define IP(regs)	(*(unsigned short *)&((regs)->eip))
-#define SP(regs)	(*(unsigned short *)&((regs)->esp))
+#define AL(regs)	(((unsigned char *)&((regs)->pt.eax))[0])
+#define AH(regs)	(((unsigned char *)&((regs)->pt.eax))[1])
+#define IP(regs)	(*(unsigned short *)&((regs)->pt.eip))
+#define SP(regs)	(*(unsigned short *)&((regs)->pt.esp))
 
 /*
  * virtual flags (16 and 32-bit versions)
@@ -89,10 +90,37 @@
 #define SAFE_MASK	(0xDD5)
 #define RETURN_MASK	(0xDFF)
 
-#define VM86_REGS_PART2 orig_eax
-#define VM86_REGS_SIZE1 \
-        ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) )
-#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
+/* convert kernel_vm86_regs to vm86_regs */
+static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
+				  const struct kernel_vm86_regs *regs)
+{
+	int ret = 0;
+
+	/* kernel_vm86_regs is missing xfs, so copy everything up to
+	   (but not including) xgs, and then rest after xgs. */
+	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.xgs));
+	ret += copy_to_user(&user->__null_gs, &regs->pt.xgs,
+			    sizeof(struct kernel_vm86_regs) -
+			    offsetof(struct kernel_vm86_regs, pt.xgs));
+
+	return ret;
+}
+
+/* convert vm86_regs to kernel_vm86_regs */
+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
+				    const struct vm86_regs __user *user,
+				    unsigned extra)
+{
+	int ret = 0;
+
+	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.xgs));
+	ret += copy_from_user(&regs->pt.xgs, &user->__null_gs,
+			      sizeof(struct kernel_vm86_regs) -
+			      offsetof(struct kernel_vm86_regs, pt.xgs) +
+			      extra);
+
+	return ret;
+}
 
 struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
 struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
@@ -112,10 +140,8 @@
 		printk("no vm86_info: BAD\n");
 		do_exit(SIGSEGV);
 	}
-	set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
-	tmp = copy_to_user(&current->thread.vm86_info->regs,regs, VM86_REGS_SIZE1);
-	tmp += copy_to_user(&current->thread.vm86_info->regs.VM86_REGS_PART2,
-		&regs->VM86_REGS_PART2, VM86_REGS_SIZE2);
+	set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
+	tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
 	tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
 	if (tmp) {
 		printk("vm86: could not access userspace vm86_info\n");
@@ -129,9 +155,11 @@
 	current->thread.saved_esp0 = 0;
 	put_cpu();
 
-	loadsegment(fs, current->thread.saved_fs);
-	loadsegment(gs, current->thread.saved_gs);
 	ret = KVM86->regs32;
+
+	loadsegment(fs, current->thread.saved_fs);
+	ret->xgs = current->thread.saved_gs;
+
 	return ret;
 }
 
@@ -183,9 +211,9 @@
 	tsk = current;
 	if (tsk->thread.saved_esp0)
 		goto out;
-	tmp  = copy_from_user(&info, v86, VM86_REGS_SIZE1);
-	tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
-		(long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2);
+	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+				       offsetof(struct kernel_vm86_struct, vm86plus) -
+				       sizeof(info.regs));
 	ret = -EFAULT;
 	if (tmp)
 		goto out;
@@ -233,9 +261,9 @@
 	if (tsk->thread.saved_esp0)
 		goto out;
 	v86 = (struct vm86plus_struct __user *)regs.ecx;
-	tmp  = copy_from_user(&info, v86, VM86_REGS_SIZE1);
-	tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
-		(long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2);
+	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+				       offsetof(struct kernel_vm86_struct, regs32) -
+				       sizeof(info.regs));
 	ret = -EFAULT;
 	if (tmp)
 		goto out;
@@ -252,15 +280,15 @@
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
 {
 	struct tss_struct *tss;
-	long eax;
 /*
  * make sure the vm86() system call doesn't try to do anything silly
  */
-	info->regs.__null_ds = 0;
-	info->regs.__null_es = 0;
+	info->regs.pt.xds = 0;
+	info->regs.pt.xes = 0;
+	info->regs.pt.xgs = 0;
 
-/* we are clearing fs,gs later just before "jmp resume_userspace",
- * because starting with Linux 2.1.x they aren't no longer saved/restored
+/* we are clearing fs later just before "jmp resume_userspace",
+ * because it is not saved/restored.
  */
 
 /*
@@ -268,10 +296,10 @@
  * has set it up safely, so this makes sure interrupt etc flags are
  * inherited from protected mode.
  */
- 	VEFLAGS = info->regs.eflags;
-	info->regs.eflags &= SAFE_MASK;
-	info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK;
-	info->regs.eflags |= VM_MASK;
+ 	VEFLAGS = info->regs.pt.eflags;
+	info->regs.pt.eflags &= SAFE_MASK;
+	info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
+	info->regs.pt.eflags |= VM_MASK;
 
 	switch (info->cpu_type) {
 		case CPU_286:
@@ -294,7 +322,7 @@
 	info->regs32->eax = 0;
 	tsk->thread.saved_esp0 = tsk->thread.esp0;
 	savesegment(fs, tsk->thread.saved_fs);
-	savesegment(gs, tsk->thread.saved_gs);
+	tsk->thread.saved_gs = info->regs32->xgs;
 
 	tss = &per_cpu(init_tss, get_cpu());
 	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
@@ -306,19 +334,18 @@
 	tsk->thread.screen_bitmap = info->screen_bitmap;
 	if (info->flags & VM86_SCREEN_BITMAP)
 		mark_screen_rdonly(tsk->mm);
-	__asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t");
-	__asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax));
 
 	/*call audit_syscall_exit since we do not exit via the normal paths */
 	if (unlikely(current->audit_context))
-		audit_syscall_exit(AUDITSC_RESULT(eax), eax);
+		audit_syscall_exit(AUDITSC_RESULT(0), 0);
 
 	__asm__ __volatile__(
 		"movl %0,%%esp\n\t"
 		"movl %1,%%ebp\n\t"
+		"mov  %2, %%fs\n\t"
 		"jmp resume_userspace"
 		: /* no outputs */
-		:"r" (&info->regs), "r" (task_thread_info(tsk)));
+		:"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
 	/* we never return here */
 }
 
@@ -348,12 +375,12 @@
 
 static inline void clear_TF(struct kernel_vm86_regs * regs)
 {
-	regs->eflags &= ~TF_MASK;
+	regs->pt.eflags &= ~TF_MASK;
 }
 
 static inline void clear_AC(struct kernel_vm86_regs * regs)
 {
-	regs->eflags &= ~AC_MASK;
+	regs->pt.eflags &= ~AC_MASK;
 }
 
 /* It is correct to call set_IF(regs) from the set_vflags_*
@@ -370,7 +397,7 @@
 static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
 {
 	set_flags(VEFLAGS, eflags, current->thread.v86mask);
-	set_flags(regs->eflags, eflags, SAFE_MASK);
+	set_flags(regs->pt.eflags, eflags, SAFE_MASK);
 	if (eflags & IF_MASK)
 		set_IF(regs);
 	else
@@ -380,7 +407,7 @@
 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
 {
 	set_flags(VFLAGS, flags, current->thread.v86mask);
-	set_flags(regs->eflags, flags, SAFE_MASK);
+	set_flags(regs->pt.eflags, flags, SAFE_MASK);
 	if (flags & IF_MASK)
 		set_IF(regs);
 	else
@@ -389,7 +416,7 @@
 
 static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
 {
-	unsigned long flags = regs->eflags & RETURN_MASK;
+	unsigned long flags = regs->pt.eflags & RETURN_MASK;
 
 	if (VEFLAGS & VIF_MASK)
 		flags |= IF_MASK;
@@ -493,7 +520,7 @@
 	unsigned long __user *intr_ptr;
 	unsigned long segoffs;
 
-	if (regs->cs == BIOSSEG)
+	if (regs->pt.xcs == BIOSSEG)
 		goto cannot_handle;
 	if (is_revectored(i, &KVM86->int_revectored))
 		goto cannot_handle;
@@ -505,9 +532,9 @@
 	if ((segoffs >> 16) == BIOSSEG)
 		goto cannot_handle;
 	pushw(ssp, sp, get_vflags(regs), cannot_handle);
-	pushw(ssp, sp, regs->cs, cannot_handle);
+	pushw(ssp, sp, regs->pt.xcs, cannot_handle);
 	pushw(ssp, sp, IP(regs), cannot_handle);
-	regs->cs = segoffs >> 16;
+	regs->pt.xcs = segoffs >> 16;
 	SP(regs) -= 6;
 	IP(regs) = segoffs & 0xffff;
 	clear_TF(regs);
@@ -524,7 +551,7 @@
 	if (VMPI.is_vm86pus) {
 		if ( (trapno==3) || (trapno==1) )
 			return_to_32bit(regs, VM86_TRAP + (trapno << 8));
-		do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs));
+		do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
 		return 0;
 	}
 	if (trapno !=1)
@@ -560,10 +587,10 @@
 		handle_vm86_trap(regs, 0, 1); \
 	return; } while (0)
 
-	orig_flags = *(unsigned short *)&regs->eflags;
+	orig_flags = *(unsigned short *)&regs->pt.eflags;
 
-	csp = (unsigned char __user *) (regs->cs << 4);
-	ssp = (unsigned char __user *) (regs->ss << 4);
+	csp = (unsigned char __user *) (regs->pt.xcs << 4);
+	ssp = (unsigned char __user *) (regs->pt.xss << 4);
 	sp = SP(regs);
 	ip = IP(regs);
 
@@ -650,7 +677,7 @@
 			SP(regs) += 6;
 		}
 		IP(regs) = newip;
-		regs->cs = newcs;
+		regs->pt.xcs = newcs;
 		CHECK_IF_IN_TRAP;
 		if (data32) {
 			set_vflags_long(newflags, regs);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index c6f84a0..56e6ad5c 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -1,13 +1,26 @@
 /* ld script to make i386 Linux kernel
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ *
+ * Don't define absolute symbols until and unless you know that symbol
+ * value is should remain constant even if kernel image is relocated
+ * at run time. Absolute symbols are not relocated. If symbol value should
+ * change if kernel is relocated, make the symbol section relative and
+ * put it inside the section definition.
  */
 
+/* Don't define absolute symbols until and unless you know that symbol
+ * value is should remain constant even if kernel image is relocated
+ * at run time. Absolute symbols are not relocated. If symbol value should
+ * change if kernel is relocated, make the symbol section relative and
+ * put it inside the section definition.
+ */
 #define LOAD_OFFSET __PAGE_OFFSET
 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/thread_info.h>
 #include <asm/page.h>
 #include <asm/cache.h>
+#include <asm/boot.h>
 
 OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
 OUTPUT_ARCH(i386)
@@ -21,34 +34,35 @@
 }
 SECTIONS
 {
-  . = __KERNEL_START;
+  . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
   phys_startup_32 = startup_32 - LOAD_OFFSET;
   /* read-only */
-  _text = .;			/* Text and read-only data */
   .text : AT(ADDR(.text) - LOAD_OFFSET) {
+  	_text = .;			/* Text and read-only data */
 	*(.text)
 	SCHED_TEXT
 	LOCK_TEXT
 	KPROBES_TEXT
 	*(.fixup)
 	*(.gnu.warning)
-	} :text = 0x9090
-
-  _etext = .;			/* End of text section */
+  	_etext = .;			/* End of text section */
+  } :text = 0x9090
 
   . = ALIGN(16);		/* Exception table */
-  __start___ex_table = .;
-  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
-  __stop___ex_table = .;
+  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+  	__start___ex_table = .;
+	 *(__ex_table)
+  	__stop___ex_table = .;
+  }
 
   RODATA
 
   . = ALIGN(4);
-  __tracedata_start = .;
   .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
+  	__tracedata_start = .;
 	*(.tracedata)
+  	__tracedata_end = .;
   }
-  __tracedata_end = .;
 
   /* writeable */
   . = ALIGN(4096);
@@ -57,11 +71,19 @@
 	CONSTRUCTORS
 	} :data
 
+  .paravirtprobe : AT(ADDR(.paravirtprobe) - LOAD_OFFSET) {
+  	__start_paravirtprobe = .;
+	*(.paravirtprobe)
+  	__stop_paravirtprobe = .;
+  }
+
   . = ALIGN(4096);
-  __nosave_begin = .;
-  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
-  . = ALIGN(4096);
-  __nosave_end = .;
+  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+  	__nosave_begin = .;
+	*(.data.nosave)
+  	. = ALIGN(4096);
+  	__nosave_end = .;
+  }
 
   . = ALIGN(4096);
   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
@@ -75,17 +97,10 @@
 
   /* rarely changed data like cpu maps */
   . = ALIGN(32);
-  .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
-  _edata = .;			/* End of data section */
-
-#ifdef CONFIG_STACK_UNWIND
-  . = ALIGN(4);
-  .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
-	__start_unwind = .;
-  	*(.eh_frame)
-	__end_unwind = .;
+  .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+	*(.data.read_mostly)
+	_edata = .;		/* End of data section */
   }
-#endif
 
   . = ALIGN(THREAD_SIZE);	/* init_task */
   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
@@ -94,88 +109,102 @@
 
   /* might get freed after init */
   . = ALIGN(4096);
-  __smp_alt_begin = .;
-  __smp_alt_instructions = .;
   .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
+	__smp_alt_begin = .;
+	__smp_alt_instructions = .;
 	*(.smp_altinstructions)
+	__smp_alt_instructions_end = .;
   }
-  __smp_alt_instructions_end = .;
   . = ALIGN(4);
-  __smp_locks = .;
   .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+  	__smp_locks = .;
 	*(.smp_locks)
+	__smp_locks_end = .;
   }
-  __smp_locks_end = .;
   .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
 	*(.smp_altinstr_replacement)
+  	__smp_alt_end = .;
   }
+  /* will be freed after init
+   * Following ALIGN() is required to make sure no other data falls on the
+   * same page where __smp_alt_end is pointing as that page might be freed
+   * after boot. Always make sure that ALIGN() directive is present after
+   * the section which contains __smp_alt_end.
+   */
   . = ALIGN(4096);
-  __smp_alt_end = .;
 
   /* will be freed after init */
   . = ALIGN(4096);		/* Init code and data */
-  __init_begin = .;
   .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+  	__init_begin = .;
 	_sinittext = .;
 	*(.init.text)
 	_einittext = .;
   }
   .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
   . = ALIGN(16);
-  __setup_start = .;
-  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
-  __setup_end = .;
-  __initcall_start = .;
+  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+  	__setup_start = .;
+	*(.init.setup)
+  	__setup_end = .;
+   }
   .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
+  	__initcall_start = .;
 	INITCALLS
+  	__initcall_end = .;
   }
-  __initcall_end = .;
-  __con_initcall_start = .;
   .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+  	__con_initcall_start = .;
 	*(.con_initcall.init)
+  	__con_initcall_end = .;
   }
-  __con_initcall_end = .;
   SECURITY_INIT
   . = ALIGN(4);
-  __alt_instructions = .;
   .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+  	__alt_instructions = .;
 	*(.altinstructions)
+	__alt_instructions_end = .;
   }
-  __alt_instructions_end = .; 
   .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
 	*(.altinstr_replacement)
   }
+  . = ALIGN(4);
+  .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
+  	__start_parainstructions = .;
+	*(.parainstructions)
+  	__stop_parainstructions = .;
+  }
   /* .exit.text is discard at runtime, not link time, to deal with references
      from .altinstructions and .eh_frame */
   .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
   .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
   . = ALIGN(4096);
-  __initramfs_start = .;
-  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
-  __initramfs_end = .;
+  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+	__initramfs_start = .;
+	*(.init.ramfs)
+	__initramfs_end = .;
+  }
   . = ALIGN(L1_CACHE_BYTES);
-  __per_cpu_start = .;
-  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
-  __per_cpu_end = .;
+  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
+	__per_cpu_start = .;
+	*(.data.percpu)
+	__per_cpu_end = .;
+  }
   . = ALIGN(4096);
-  __init_end = .;
   /* freed after init ends here */
 	
-  __bss_start = .;		/* BSS */
-  .bss.page_aligned : AT(ADDR(.bss.page_aligned) - LOAD_OFFSET) {
-	*(.bss.page_aligned)
-  }
   .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+	__init_end = .;
+	__bss_start = .;		/* BSS */
+	*(.bss.page_aligned)
 	*(.bss)
+	. = ALIGN(4);
+	__bss_stop = .;
+  	_end = . ;
+	/* This is where the kernel creates the early boot page tables */
+	. = ALIGN(4096);
+	pg0 = . ;
   }
-  . = ALIGN(4);
-  __bss_stop = .; 
-
-  _end = . ;
-
-  /* This is where the kernel creates the early boot page tables */
-  . = ALIGN(4096);
-  pg0 = .;
 
   /* Sections to be discarded */
   /DISCARD/ : {
diff --git a/arch/i386/mach-generic/probe.c b/arch/i386/mach-generic/probe.c
index 94b1fd9..a7b3999 100644
--- a/arch/i386/mach-generic/probe.c
+++ b/arch/i386/mach-generic/probe.c
@@ -45,7 +45,9 @@
 			return 0;
 		}
 	}
-	return -ENOENT;
+
+	/* Parsed again by __setup for debug/verbose */
+	return 0;
 }
 early_param("apic", parse_apic);
 
diff --git a/arch/i386/mach-voyager/voyager_cat.c b/arch/i386/mach-voyager/voyager_cat.c
index f50c6c6..943a947 100644
--- a/arch/i386/mach-voyager/voyager_cat.c
+++ b/arch/i386/mach-voyager/voyager_cat.c
@@ -776,7 +776,7 @@
 		for(asic=0; asic < (*modpp)->num_asics; asic++) {
 			int j;
 			voyager_asic_t *asicp = *asicpp 
-				= kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
+				= kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
 			voyager_sp_table_t *sp_table;
 			voyager_at_t *asic_table;
 			voyager_jtt_t *jtag_table;
@@ -785,7 +785,6 @@
 				printk("**WARNING** kmalloc failure in cat_init\n");
 				continue;
 			}
-			memset(asicp, 0, sizeof(voyager_asic_t));
 			asicpp = &(asicp->next);
 			asicp->asic_location = asic;
 			sp_table = (voyager_sp_table_t *)(eprom_buf + sp_offset);
@@ -851,8 +850,7 @@
 #endif
 
 		{
-			struct resource *res = kmalloc(sizeof(struct resource),GFP_KERNEL);
-			memset(res, 0, sizeof(struct resource));
+			struct resource *res = kzalloc(sizeof(struct resource),GFP_KERNEL);
 			res->name = kmalloc(128, GFP_KERNEL);
 			sprintf((char *)res->name, "Voyager %s Quad CPI", cat_module_name(i));
 			res->start = qic_addr;
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index f3fea2a..55428e6 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -28,6 +28,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/arch_hooks.h>
+#include <asm/pda.h>
 
 /* TLB state -- visible externally, indexed physically */
 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -422,6 +423,7 @@
 	     VOYAGER_SUS_IN_CONTROL_PORT);
 
 	current_thread_info()->cpu = boot_cpu_id;
+	write_pda(cpu_number, boot_cpu_id);
 }
 
 /*
@@ -458,7 +460,7 @@
 	/* external functions not defined in the headers */
 	extern void calibrate_delay(void);
 
-	cpu_init();
+	secondary_cpu_init();
 
 	/* OK, we're in the routine */
 	ack_CPI(VIC_CPU_BOOT_CPI);
@@ -578,6 +580,15 @@
 	/* init_tasks (in sched.c) is indexed logically */
 	stack_start.esp = (void *) idle->thread.esp;
 
+	/* Pre-allocate and initialize the CPU's GDT and PDA so it
+	   doesn't have to do any memory allocation during the
+	   delicate CPU-bringup phase. */
+	if (!init_gdt(cpu, idle)) {
+		printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
+		cpucount--;
+		return;
+	}
+
 	irq_ctx_init(cpu);
 
 	/* Note: Don't modify initial ss override */
@@ -1963,4 +1974,5 @@
 smp_setup_processor_id(void)
 {
 	current_thread_info()->cpu = hard_smp_processor_id();
+	write_pda(cpu_number, hard_smp_processor_id());
 }
diff --git a/arch/i386/math-emu/fpu_emu.h b/arch/i386/math-emu/fpu_emu.h
index d62b20a..65120f5 100644
--- a/arch/i386/math-emu/fpu_emu.h
+++ b/arch/i386/math-emu/fpu_emu.h
@@ -57,6 +57,7 @@
 #define TAG_Special	Const(2)	/* De-normal, + or - infinity,
 					   or Not a Number */
 #define TAG_Empty	Const(3)	/* empty */
+#define TAG_Error	Const(0x80)	/* probably need to abort */
 
 #define LOADED_DATA	Const(10101)	/* Special st() number to identify
 					   loaded data (not on stack). */
diff --git a/arch/i386/math-emu/fpu_entry.c b/arch/i386/math-emu/fpu_entry.c
index d93f16e..ddf8fa3 100644
--- a/arch/i386/math-emu/fpu_entry.c
+++ b/arch/i386/math-emu/fpu_entry.c
@@ -742,7 +742,8 @@
   S387->fcs &= ~0xf8000000;
   S387->fos |= 0xffff0000;
 #endif /* PECULIAR_486 */
-  __copy_to_user(d, &S387->cwd, 7*4);
+  if (__copy_to_user(d, &S387->cwd, 7*4))
+    return -1;
   RE_ENTRANT_CHECK_ON;
 
   d += 7*4;
diff --git a/arch/i386/math-emu/fpu_system.h b/arch/i386/math-emu/fpu_system.h
index bf26341..a3ae28c 100644
--- a/arch/i386/math-emu/fpu_system.h
+++ b/arch/i386/math-emu/fpu_system.h
@@ -68,6 +68,7 @@
 
 #define FPU_access_ok(x,y,z)	if ( !access_ok(x,y,z) ) \
 				math_abort(FPU_info,SIGSEGV)
+#define FPU_abort		math_abort(FPU_info, SIGSEGV)
 
 #undef FPU_IGNORE_CODE_SEGV
 #ifdef FPU_IGNORE_CODE_SEGV
diff --git a/arch/i386/math-emu/load_store.c b/arch/i386/math-emu/load_store.c
index 85314be..eebd6fb 100644
--- a/arch/i386/math-emu/load_store.c
+++ b/arch/i386/math-emu/load_store.c
@@ -227,6 +227,8 @@
     case 027:      /* fild m64int */
       clear_C1();
       loaded_tag = FPU_load_int64((long long __user *)data_address);
+      if (loaded_tag == TAG_Error)
+	return 0;
       FPU_settag0(loaded_tag);
       break;
     case 030:     /* fstenv  m14/28byte */
diff --git a/arch/i386/math-emu/reg_ld_str.c b/arch/i386/math-emu/reg_ld_str.c
index f06ed41..e976cae 100644
--- a/arch/i386/math-emu/reg_ld_str.c
+++ b/arch/i386/math-emu/reg_ld_str.c
@@ -244,7 +244,8 @@
 
   RE_ENTRANT_CHECK_OFF;
   FPU_access_ok(VERIFY_READ, _s, 8);
-  copy_from_user(&s,_s,8);
+  if (copy_from_user(&s,_s,8))
+    FPU_abort;
   RE_ENTRANT_CHECK_ON;
 
   if (s == 0)
@@ -907,7 +908,8 @@
 
   RE_ENTRANT_CHECK_OFF;
   FPU_access_ok(VERIFY_WRITE,d,8);
-  copy_to_user(d, &tll, 8);
+  if (copy_to_user(d, &tll, 8))
+    FPU_abort;
   RE_ENTRANT_CHECK_ON;
 
   return 1;
@@ -1336,7 +1338,8 @@
       I387.soft.fcs &= ~0xf8000000;
       I387.soft.fos |= 0xffff0000;
 #endif /* PECULIAR_486 */
-      __copy_to_user(d, &control_word, 7*4);
+      if (__copy_to_user(d, &control_word, 7*4))
+	FPU_abort;
       RE_ENTRANT_CHECK_ON;
       d += 0x1c;
     }
@@ -1359,9 +1362,11 @@
   FPU_access_ok(VERIFY_WRITE,d,80);
 
   /* Copy all registers in stack order. */
-  __copy_to_user(d, register_base+offset, other);
+  if (__copy_to_user(d, register_base+offset, other))
+    FPU_abort;
   if ( offset )
-    __copy_to_user(d+other, register_base, offset);
+    if (__copy_to_user(d+other, register_base, offset))
+      FPU_abort;
   RE_ENTRANT_CHECK_ON;
 
   finit();
diff --git a/arch/i386/mm/boot_ioremap.c b/arch/i386/mm/boot_ioremap.c
index 4de11f5..4de95a1 100644
--- a/arch/i386/mm/boot_ioremap.c
+++ b/arch/i386/mm/boot_ioremap.c
@@ -16,6 +16,7 @@
  */
 
 #undef CONFIG_X86_PAE
+#undef CONFIG_PARAVIRT
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index ddbdb03..103b76e 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -168,7 +168,7 @@
 	if (nid && node_has_online_mem(nid))
 		NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
 	else {
-		NODE_DATA(nid) = (pg_data_t *)(__va(min_low_pfn << PAGE_SHIFT));
+		NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(min_low_pfn));
 		min_low_pfn += PFN_UP(sizeof(pg_data_t));
 	}
 }
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 2581575..aaaa4d2 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -22,9 +22,9 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/kprobes.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/desc.h>
 #include <asm/kdebug.h>
 #include <asm/segment.h>
@@ -167,7 +167,7 @@
 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
 { 
 	unsigned long limit;
-	unsigned long instr = get_segment_eip (regs, &limit);
+	unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
 	int scan_more = 1;
 	int prefetch = 0; 
 	int i;
@@ -177,9 +177,9 @@
 		unsigned char instr_hi;
 		unsigned char instr_lo;
 
-		if (instr > limit)
+		if (instr > (unsigned char *)limit)
 			break;
-		if (__get_user(opcode, (unsigned char __user *) instr))
+		if (probe_kernel_address(instr, opcode))
 			break; 
 
 		instr_hi = opcode & 0xf0; 
@@ -204,9 +204,9 @@
 		case 0x00:
 			/* Prefetch instruction is 0x0F0D or 0x0F18 */
 			scan_more = 0;
-			if (instr > limit)
+			if (instr > (unsigned char *)limit)
 				break;
-			if (__get_user(opcode, (unsigned char __user *) instr))
+			if (probe_kernel_address(instr, opcode))
 				break;
 			prefetch = (instr_lo == 0xF) &&
 				(opcode == 0x0D || opcode == 0x18);
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index f9f647c..e0fa6cb 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -32,7 +32,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -50,26 +50,22 @@
 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-#ifdef CONFIG_DEBUG_HIGHMEM
-	if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
-		dec_preempt_count();
-		preempt_check_resched();
-		return;
-	}
-
-	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-		BUG();
-#endif
 	/*
 	 * Force other mappings to Oops if they'll try to access this pte
 	 * without first remap it.  Keeping stale mappings around is a bad idea
 	 * also, in case the page changes cacheability attributes or becomes
 	 * a protected page in a hypervisor.
 	 */
-	kpte_clear_flush(kmap_pte-idx, vaddr);
+	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+		kpte_clear_flush(kmap_pte-idx, vaddr);
+	else {
+#ifdef CONFIG_DEBUG_HIGHMEM
+		BUG_ON(vaddr < PAGE_OFFSET);
+		BUG_ON(vaddr >= (unsigned long)high_memory);
+#endif
+	}
 
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 /* This is the same as kmap_atomic() but can map memory that doesn't
@@ -80,7 +76,7 @@
 	enum fixed_addresses idx;
 	unsigned long vaddr;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	idx = type + KM_TYPE_NR*smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 1719a81..34728e4 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -17,6 +17,113 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
+static unsigned long page_table_shareable(struct vm_area_struct *svma,
+				struct vm_area_struct *vma,
+				unsigned long addr, pgoff_t idx)
+{
+	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
+				svma->vm_start;
+	unsigned long sbase = saddr & PUD_MASK;
+	unsigned long s_end = sbase + PUD_SIZE;
+
+	/*
+	 * match the virtual addresses, permission and the alignment of the
+	 * page table page.
+	 */
+	if (pmd_index(addr) != pmd_index(saddr) ||
+	    vma->vm_flags != svma->vm_flags ||
+	    sbase < svma->vm_start || svma->vm_end < s_end)
+		return 0;
+
+	return saddr;
+}
+
+static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+{
+	unsigned long base = addr & PUD_MASK;
+	unsigned long end = base + PUD_SIZE;
+
+	/*
+	 * check on proper vm_flags and page table alignment
+	 */
+	if (vma->vm_flags & VM_MAYSHARE &&
+	    vma->vm_start <= base && end <= vma->vm_end)
+		return 1;
+	return 0;
+}
+
+/*
+ * search for a shareable pmd page for hugetlb.
+ */
+static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+	struct vm_area_struct *vma = find_vma(mm, addr);
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+			vma->vm_pgoff;
+	struct prio_tree_iter iter;
+	struct vm_area_struct *svma;
+	unsigned long saddr;
+	pte_t *spte = NULL;
+
+	if (!vma_shareable(vma, addr))
+		return;
+
+	spin_lock(&mapping->i_mmap_lock);
+	vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
+		if (svma == vma)
+			continue;
+
+		saddr = page_table_shareable(svma, vma, addr, idx);
+		if (saddr) {
+			spte = huge_pte_offset(svma->vm_mm, saddr);
+			if (spte) {
+				get_page(virt_to_page(spte));
+				break;
+			}
+		}
+	}
+
+	if (!spte)
+		goto out;
+
+	spin_lock(&mm->page_table_lock);
+	if (pud_none(*pud))
+		pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK);
+	else
+		put_page(virt_to_page(spte));
+	spin_unlock(&mm->page_table_lock);
+out:
+	spin_unlock(&mapping->i_mmap_lock);
+}
+
+/*
+ * unmap huge page backed by shared pte.
+ *
+ * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
+ * indicated by page_count > 1, unmap is achieved by clearing pud and
+ * decrementing the ref count. If count == 1, the pte page is not shared.
+ *
+ * called with vma->vm_mm->page_table_lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+ *	    0 the underlying pte page is not shared, or it is the last user
+ */
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	pgd_t *pgd = pgd_offset(mm, *addr);
+	pud_t *pud = pud_offset(pgd, *addr);
+
+	BUG_ON(page_count(virt_to_page(ptep)) == 0);
+	if (page_count(virt_to_page(ptep)) == 1)
+		return 0;
+
+	pud_clear(pud);
+	put_page(virt_to_page(ptep));
+	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+	return 1;
+}
+
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
@@ -25,8 +132,11 @@
 
 	pgd = pgd_offset(mm, addr);
 	pud = pud_alloc(mm, pgd, addr);
-	if (pud)
+	if (pud) {
+		if (pud_none(*pud))
+			huge_pmd_share(mm, addr, pud);
 		pte = (pte_t *) pmd_alloc(mm, pud, addr);
+	}
 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
 
 	return pte;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 1674161..84697df 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -192,8 +192,6 @@
 	return 0;
 }
 
-extern int is_available_memory(efi_memory_desc_t *);
-
 int page_is_ram(unsigned long pagenr)
 {
 	int i;
@@ -699,8 +697,8 @@
 #endif
 #endif
 
-kmem_cache_t *pgd_cache;
-kmem_cache_t *pmd_cache;
+struct kmem_cache *pgd_cache;
+struct kmem_cache *pmd_cache;
 
 void __init pgtable_cache_init(void)
 {
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 8564b6a..ad91528 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -67,11 +67,17 @@
 	return base;
 } 
 
-static void flush_kernel_map(void *dummy) 
+static void flush_kernel_map(void *arg)
 { 
-	/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
-	if (boot_cpu_data.x86_model >= 4) 
+	unsigned long adr = (unsigned long)arg;
+
+	if (adr && cpu_has_clflush) {
+		int i;
+		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+			asm volatile("clflush (%0)" :: "r" (adr + i));
+	} else if (boot_cpu_data.x86_model >= 4)
 		wbinvd();
+
 	/* Flush all to work around Errata in early athlons regarding 
 	 * large page flushing. 
 	 */
@@ -173,9 +179,9 @@
 	return 0;
 } 
 
-static inline void flush_map(void)
+static inline void flush_map(void *adr)
 {
-	on_each_cpu(flush_kernel_map, NULL, 1, 1);
+	on_each_cpu(flush_kernel_map, adr, 1, 1);
 }
 
 /*
@@ -217,9 +223,13 @@
 	spin_lock_irq(&cpa_lock);
 	list_replace_init(&df_list, &l);
 	spin_unlock_irq(&cpa_lock);
-	flush_map();
-	list_for_each_entry_safe(pg, next, &l, lru)
+	if (!cpu_has_clflush)
+		flush_map(0);
+	list_for_each_entry_safe(pg, next, &l, lru) {
+		if (cpu_has_clflush)
+			flush_map(page_address(pg));
 		__free_page(pg);
+	}
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 10126e3..f349eaf 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -95,8 +95,11 @@
 		return;
 	}
 	pte = pte_offset_kernel(pmd, vaddr);
-	/* <pfn,flags> stored as-is, to permit clearing entries */
-	set_pte(pte, pfn_pte(pfn, flags));
+	if (pgprot_val(flags))
+		/* <pfn,flags> stored as-is, to permit clearing entries */
+		set_pte(pte, pfn_pte(pfn, flags));
+	else
+		pte_clear(&init_mm, vaddr, pte);
 
 	/*
 	 * It's enough to flush this one mapping.
@@ -193,7 +196,7 @@
 	return pte;
 }
 
-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
 }
@@ -233,7 +236,7 @@
 		set_page_private(next, (unsigned long)pprev);
 }
 
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags;
 
@@ -253,7 +256,7 @@
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
 	unsigned long flags; /* can be called from interrupt context */
 
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index cdfcf97..53ca6e8 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -20,7 +20,7 @@
 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
 				PCI_PROBE_MMCONF;
 
-int pci_bf_sort;
+static int pci_bf_sort;
 int pci_routeirq;
 int pcibios_last_bus = -1;
 unsigned long pirq_table_addr;
diff --git a/arch/i386/pci/early.c b/arch/i386/pci/early.c
index 713d6c8..42df4b6 100644
--- a/arch/i386/pci/early.c
+++ b/arch/i386/pci/early.c
@@ -45,6 +45,13 @@
 	outl(val, 0xcfc);
 }
 
+void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
+{
+	PDprintk("%x writing to %x: %x\n", slot, offset, val);
+	outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
+	outb(val, 0xcfc);
+}
+
 int early_pci_allowed(void)
 {
 	return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) ==
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index c1949ff..cde1170 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -74,52 +74,6 @@
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810);
 
-static void __devinit pci_fixup_ide_bases(struct pci_dev *d)
-{
-	int i;
-
-	/*
-	 * PCI IDE controllers use non-standard I/O port decoding, respect it.
-	 */
-	if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
-		return;
-	DBG("PCI: IDE base address fixup for %s\n", pci_name(d));
-	for(i=0; i<4; i++) {
-		struct resource *r = &d->resource[i];
-		if ((r->start & ~0x80) == 0x374) {
-			r->start |= 2;
-			r->end = r->start;
-		}
-	}
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
-
-static void __devinit  pci_fixup_ide_trash(struct pci_dev *d)
-{
-	int i;
-
-	/*
-	 * Runs the fixup only for the first IDE controller
-	 * (Shai Fultheim - shai@ftcon.com)
-	 */
-	static int called = 0;
-	if (called)
-		return;
-	called = 1;
-
-	/*
-	 * There exist PCI IDE controllers which have utter garbage
-	 * in first four base registers. Ignore that.
-	 */
-	DBG("PCI: IDE base address trash cleared for %s\n", pci_name(d));
-	for(i=0; i<4; i++)
-		d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, pci_fixup_ide_trash);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_11, pci_fixup_ide_trash);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_9, pci_fixup_ide_trash);
-
 static void __devinit  pci_fixup_latency(struct pci_dev *d)
 {
 	/*
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index 9858029..43005f0 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -104,16 +104,24 @@
 	/* Depth-First Search on bus tree */
 	list_for_each_entry(bus, bus_list, node) {
 		if ((dev = bus->self)) {
-			for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+			for (idx = PCI_BRIDGE_RESOURCES;
+			    idx < PCI_NUM_RESOURCES; idx++) {
 				r = &dev->resource[idx];
 				if (!r->flags)
 					continue;
 				pr = pci_find_parent_resource(dev, r);
-				if (!r->start || !pr || request_resource(pr, r) < 0) {
-					printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
-					/* Something is wrong with the region.
-					   Invalidate the resource to prevent child
-					   resource allocations in this range. */
+				if (!r->start || !pr ||
+				    request_resource(pr, r) < 0) {
+					printk(KERN_ERR "PCI: Cannot allocate "
+						"resource region %d "
+						"of bridge %s\n",
+						idx, pci_name(dev));
+					/*
+					 * Something is wrong with the region.
+					 * Invalidate the resource to prevent
+					 * child resource allocations in this
+					 * range.
+					 */
 					r->flags = 0;
 				}
 			}
@@ -131,7 +139,7 @@
 
 	for_each_pci_dev(dev) {
 		pci_read_config_word(dev, PCI_COMMAND, &command);
-		for(idx = 0; idx < 6; idx++) {
+		for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
 			r = &dev->resource[idx];
 			if (r->parent)		/* Already allocated */
 				continue;
@@ -142,11 +150,15 @@
 			else
 				disabled = !(command & PCI_COMMAND_MEMORY);
 			if (pass == disabled) {
-				DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
+				DBG("PCI: Resource %08lx-%08lx "
+				    "(f=%lx, d=%d, p=%d)\n",
 				    r->start, r->end, r->flags, disabled, pass);
 				pr = pci_find_parent_resource(dev, r);
 				if (!pr || request_resource(pr, r) < 0) {
-					printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
+					printk(KERN_ERR "PCI: Cannot allocate "
+						"resource region %d "
+						"of device %s\n",
+						idx, pci_name(dev));
 					/* We'll assign a new address later */
 					r->end -= r->start;
 					r->start = 0;
@@ -156,12 +168,16 @@
 		if (!pass) {
 			r = &dev->resource[PCI_ROM_RESOURCE];
 			if (r->flags & IORESOURCE_ROM_ENABLE) {
-				/* Turn the ROM off, leave the resource region, but keep it unregistered. */
+				/* Turn the ROM off, leave the resource region,
+				 * but keep it unregistered. */
 				u32 reg;
-				DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
+				DBG("PCI: Switching off ROM of %s\n",
+					pci_name(dev));
 				r->flags &= ~IORESOURCE_ROM_ENABLE;
-				pci_read_config_dword(dev, dev->rom_base_reg, &reg);
-				pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
+				pci_read_config_dword(dev,
+						dev->rom_base_reg, &reg);
+				pci_write_config_dword(dev, dev->rom_base_reg,
+						reg & ~PCI_ROM_ADDRESS_ENABLE);
 			}
 		}
 	}
@@ -173,9 +189,11 @@
 	struct resource *r, *pr;
 
 	if (!(pci_probe & PCI_ASSIGN_ROMS)) {
-		/* Try to use BIOS settings for ROMs, otherwise let
-		   pci_assign_unassigned_resources() allocate the new
-		   addresses. */
+		/*
+		 * Try to use BIOS settings for ROMs, otherwise let
+		 * pci_assign_unassigned_resources() allocate the new
+		 * addresses.
+		 */
 		for_each_pci_dev(dev) {
 			r = &dev->resource[PCI_ROM_RESOURCE];
 			if (!r->flags || !r->start)
@@ -215,9 +233,9 @@
 
 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
 	old_cmd = cmd;
-	for(idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
+	for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
 		/* Only set up the requested stuff */
-		if (!(mask & (1<<idx)))
+		if (!(mask & (1 << idx)))
 			continue;
 
 		r = &dev->resource[idx];
@@ -227,7 +245,9 @@
 				(!(r->flags & IORESOURCE_ROM_ENABLE)))
 			continue;
 		if (!r->start && r->end) {
-			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
+			printk(KERN_ERR "PCI: Device %s not available "
+				"because of resource collisions\n",
+				pci_name(dev));
 			return -EINVAL;
 		}
 		if (r->flags & IORESOURCE_IO)
@@ -236,7 +256,8 @@
 			cmd |= PCI_COMMAND_MEMORY;
 	}
 	if (cmd != old_cmd) {
-		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
+		printk("PCI: Enabling device %s (%04x -> %04x)\n",
+			pci_name(dev), old_cmd, cmd);
 		pci_write_config_word(dev, PCI_COMMAND, cmd);
 	}
 	return 0;
@@ -258,7 +279,8 @@
 		lat = pcibios_max_latency;
 	else
 		return;
-	printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
+	printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n",
+		pci_name(dev), lat);
 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 }
 
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 6916399..f2cb942 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -543,6 +543,12 @@
 		case PCI_DEVICE_ID_INTEL_ICH8_2:
 		case PCI_DEVICE_ID_INTEL_ICH8_3:
 		case PCI_DEVICE_ID_INTEL_ICH8_4:
+		case PCI_DEVICE_ID_INTEL_ICH9_0:
+		case PCI_DEVICE_ID_INTEL_ICH9_1:
+		case PCI_DEVICE_ID_INTEL_ICH9_2:
+		case PCI_DEVICE_ID_INTEL_ICH9_3:
+		case PCI_DEVICE_ID_INTEL_ICH9_4:
+		case PCI_DEVICE_ID_INTEL_ICH9_5:
 			r->name = "PIIX/ICH";
 			r->get = pirq_piix_get;
 			r->set = pirq_piix_set;
@@ -758,7 +764,7 @@
 	DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
 	    rt->rtr_vendor, rt->rtr_device);
 
-	pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
+	pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn);
 	if (!pirq_router_dev) {
 		DBG(KERN_DEBUG "PCI: Interrupt router not found at "
 			"%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
@@ -778,6 +784,8 @@
 		pirq_router_dev->vendor,
 		pirq_router_dev->device,
 		pci_name(pirq_router_dev));
+
+	/* The device remains referenced for the kernel lifetime */
 }
 
 static struct irq_info *pirq_get_info(struct pci_dev *dev)
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index ed1512a..5f51934 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include "pci.h"
 #include "pci-functions.h"
 
@@ -314,6 +315,10 @@
 	for (check = (union bios32 *) __va(0xe0000);
 	     check <= (union bios32 *) __va(0xffff0);
 	     ++check) {
+		long sig;
+		if (probe_kernel_address(&check->fields.signature, sig))
+			continue;
+
 		if (check->fields.signature != BIOS32_SIGNATURE)
 			continue;
 		length = check->fields.length * 16;
@@ -331,11 +336,13 @@
 		}
 		DBG("PCI: BIOS32 Service Directory structure at 0x%p\n", check);
 		if (check->fields.entry >= 0x100000) {
-			printk("PCI: BIOS32 entry (0x%p) in high memory, cannot use.\n", check);
+			printk("PCI: BIOS32 entry (0x%p) in high memory, "
+					"cannot use.\n", check);
 			return NULL;
 		} else {
 			unsigned long bios32_entry = check->fields.entry;
-			DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
+			DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n",
+					bios32_entry);
 			bios32_indirect.address = bios32_entry + PAGE_OFFSET;
 			if (check_pcibios())
 				return &pci_bios_access;
diff --git a/arch/i386/power/Makefile b/arch/i386/power/Makefile
index 8cfa4e8..2de7bbf 100644
--- a/arch/i386/power/Makefile
+++ b/arch/i386/power/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_PM)		+= cpu.o
-obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o
+obj-$(CONFIG_SOFTWARE_SUSPEND)	+= swsusp.o suspend.o
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index 5a1abef..2c15500 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -26,8 +26,8 @@
 	/*
 	 * descriptor tables
 	 */
- 	store_gdt(&ctxt->gdt_limit);
- 	store_idt(&ctxt->idt_limit);
+ 	store_gdt(&ctxt->gdt);
+ 	store_idt(&ctxt->idt);
  	store_tr(ctxt->tr);
 
 	/*
@@ -99,8 +99,8 @@
 	 * now restore the descriptor tables to their proper values
 	 * ltr is done i fix_processor_context().
 	 */
- 	load_gdt(&ctxt->gdt_limit);
- 	load_idt(&ctxt->idt_limit);
+ 	load_gdt(&ctxt->gdt);
+ 	load_idt(&ctxt->idt);
 
 	/*
 	 * segment registers
diff --git a/arch/i386/power/suspend.c b/arch/i386/power/suspend.c
new file mode 100644
index 0000000..db5e98d
--- /dev/null
+++ b/arch/i386/power/suspend.c
@@ -0,0 +1,158 @@
+/*
+ * Suspend support specific for i386 - temporary page tables
+ *
+ * Distribute under GPLv2
+ *
+ * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ */
+
+#include <linux/suspend.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+/* Defined in arch/i386/power/swsusp.S */
+extern int restore_image(void);
+
+/* Pointer to the temporary resume page tables */
+pgd_t *resume_pg_dir;
+
+/* The following three functions are based on the analogous code in
+ * arch/i386/mm/init.c
+ */
+
+/*
+ * Create a middle page table on a resume-safe page and put a pointer to it in
+ * the given global directory entry.  This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t *resume_one_md_table_init(pgd_t *pgd)
+{
+	pud_t *pud;
+	pmd_t *pmd_table;
+
+#ifdef CONFIG_X86_PAE
+	pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
+	if (!pmd_table)
+		return NULL;
+
+	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+	pud = pud_offset(pgd, 0);
+
+	BUG_ON(pmd_table != pmd_offset(pud, 0));
+#else
+	pud = pud_offset(pgd, 0);
+	pmd_table = pmd_offset(pud, 0);
+#endif
+
+	return pmd_table;
+}
+
+/*
+ * Create a page table on a resume-safe page and place a pointer to it in
+ * a middle page directory entry.
+ */
+static pte_t *resume_one_page_table_init(pmd_t *pmd)
+{
+	if (pmd_none(*pmd)) {
+		pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
+		if (!page_table)
+			return NULL;
+
+		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+
+		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+
+		return page_table;
+	}
+
+	return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
+ */
+static int resume_physical_mapping_init(pgd_t *pgd_base)
+{
+	unsigned long pfn;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	int pgd_idx, pmd_idx;
+
+	pgd_idx = pgd_index(PAGE_OFFSET);
+	pgd = pgd_base + pgd_idx;
+	pfn = 0;
+
+	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+		pmd = resume_one_md_table_init(pgd);
+		if (!pmd)
+			return -ENOMEM;
+
+		if (pfn >= max_low_pfn)
+			continue;
+
+		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+			if (pfn >= max_low_pfn)
+				break;
+
+			/* Map with big pages if possible, otherwise create
+			 * normal page tables.
+			 * NOTE: We can mark everything as executable here
+			 */
+			if (cpu_has_pse) {
+				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+				pfn += PTRS_PER_PTE;
+			} else {
+				pte_t *max_pte;
+
+				pte = resume_one_page_table_init(pmd);
+				if (!pte)
+					return -ENOMEM;
+
+				max_pte = pte + PTRS_PER_PTE;
+				for (; pte < max_pte; pte++, pfn++) {
+					if (pfn >= max_low_pfn)
+						break;
+
+					set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
+{
+#ifdef CONFIG_X86_PAE
+	int i;
+
+	/* Init entries of the first-level page table to the zero page */
+	for (i = 0; i < PTRS_PER_PGD; i++)
+		set_pgd(pg_dir + i,
+			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+#endif
+}
+
+int swsusp_arch_resume(void)
+{
+	int error;
+
+	resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+	if (!resume_pg_dir)
+		return -ENOMEM;
+
+	resume_init_first_level_page_table(resume_pg_dir);
+	error = resume_physical_mapping_init(resume_pg_dir);
+	if (error)
+		return error;
+
+	/* We have got enough memory and from now on we cannot recover */
+	restore_image();
+	return 0;
+}
diff --git a/arch/i386/power/swsusp.S b/arch/i386/power/swsusp.S
index 8a2b50a..53662e0 100644
--- a/arch/i386/power/swsusp.S
+++ b/arch/i386/power/swsusp.S
@@ -28,8 +28,9 @@
 	call swsusp_save
 	ret
 
-ENTRY(swsusp_arch_resume)
-	movl	$swsusp_pg_dir-__PAGE_OFFSET, %ecx
+ENTRY(restore_image)
+	movl	resume_pg_dir, %ecx
+	subl	$__PAGE_OFFSET, %ecx
 	movl	%ecx, %cr3
 
 	movl	restore_pblist, %edx
@@ -51,6 +52,10 @@
 	.p2align 4,,7
 
 done:
+	/* go back to the original page tables */
+	movl	$swapper_pg_dir, %ecx
+	subl	$__PAGE_OFFSET, %ecx
+	movl	%ecx, %cr3
 	/* Flush TLB, including "global" things (vmalloc) */
 	movl	mmu_cr4_features, %eax
 	movl	%eax, %edx
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 683b12c..75d8397 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -434,6 +434,29 @@
 
 source "drivers/sn/Kconfig"
 
+config KEXEC
+	bool "kexec system call (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+	help
+	  kexec is a system call that implements the ability to shutdown your
+	  current kernel, and to start another kernel.  It is like a reboot
+	  but it is indepedent of the system firmware.   And like a reboot
+	  you can start any kernel with it, not just Linux.
+
+	  The name comes from the similiarity to the exec system call.
+
+	  It is an ongoing process to be certain the hardware in a machine
+	  is properly shutdown, so do not be surprised if this code does not
+	  initially work for you.  It may help to enable device hotplugging
+	  support.  As of this writing the exact hardware interface is
+	  strongly in flux, so no good recommendation can be made.
+
+config CRASH_DUMP
+	  bool "kernel crash dumps (EXPERIMENTAL)"
+	  depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+	  help
+	    Generate crash dump after being started by kexec.
+
 source "drivers/firmware/Kconfig"
 
 source "fs/Kconfig.binfmt"
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index db8e1fc..ce49fe3 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -75,7 +75,7 @@
 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
 ** a hard failure, ie. MCA.  Version 3.0 and later of the zx1 LBA should
 ** disconnect on 4k boundaries and prevent such issues.  If the device is
-** particularly agressive, this option will keep the entire pdir valid such
+** particularly aggressive, this option will keep the entire pdir valid such
 ** that prefetching will hit a valid address.  This could severely impact
 ** error containment, and is therefore off by default.  The page that is
 ** used for spill-over is poisoned, so that should help debugging somewhat.
@@ -258,10 +258,10 @@
 
 /*
 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
-** (or rather not merge) DMA's into managable chunks.
+** (or rather not merge) DMAs into manageable chunks.
 ** On parisc, this is more of the software/tuning constraint
-** rather than the HW. I/O MMU allocation alogorithms can be
-** faster with smaller size is (to some degree).
+** rather than the HW. I/O MMU allocation algorithms can be
+** faster with smaller sizes (to some degree).
 */
 #define DMA_CHUNK_SIZE  (BITS_PER_LONG*iovp_size)
 
@@ -1672,15 +1672,13 @@
 	 * SAC (single address cycle) addressable, so allocate a
 	 * pseudo-device to enforce that.
 	 */
-	sac = kmalloc(sizeof(*sac), GFP_KERNEL);
+	sac = kzalloc(sizeof(*sac), GFP_KERNEL);
 	if (!sac)
 		panic(PFX "Couldn't allocate struct pci_dev");
-	memset(sac, 0, sizeof(*sac));
 
-	controller = kmalloc(sizeof(*controller), GFP_KERNEL);
+	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 	if (!controller)
 		panic(PFX "Couldn't allocate struct pci_controller");
-	memset(controller, 0, sizeof(*controller));
 
 	controller->iommu = ioc;
 	sac->sysdata = controller;
@@ -1737,12 +1735,10 @@
 	struct ioc *ioc;
 	struct ioc_iommu *info;
 
-	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
 	if (!ioc)
 		return NULL;
 
-	memset(ioc, 0, sizeof(*ioc));
-
 	ioc->next = ioc_list;
 	ioc_list = ioc;
 
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index caab986..1f16ebb9 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -209,7 +209,7 @@
 }
 #endif
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *private_)
 {
 	printk(KERN_ERR "simserial: do_softint called\n");
 }
@@ -684,12 +684,11 @@
 		*ret_info = sstate->info;
 		return 0;
 	}
-	info = kmalloc(sizeof(struct async_struct), GFP_KERNEL);
+	info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
 	if (!info) {
 		sstate->count--;
 		return -ENOMEM;
 	}
-	memset(info, 0, sizeof(struct async_struct));
 	init_waitqueue_head(&info->open_wait);
 	init_waitqueue_head(&info->close_wait);
 	init_waitqueue_head(&info->delta_msr_wait);
@@ -698,7 +697,7 @@
 	info->flags = sstate->flags;
 	info->xmit_fifo_size = sstate->xmit_fifo_size;
 	info->line = line;
-	INIT_WORK(&info->work, do_softint, info);
+	INIT_WORK(&info->work, do_softint);
 	info->state = sstate;
 	if (sstate->info) {
 		kfree(info);
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index daa6b91..578737e 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -91,7 +91,7 @@
 	 * it with privilege level 3 because the IVE uses non-privileged accesses to these
 	 * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@
 	 * code is locked in specific gate page, which is pointed by pretcode
 	 * when setup_frame_ia32
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@
 	 * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
 	 * until a task modifies them via modify_ldt().
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@
 		bprm->loader += stack_base;
 	bprm->exec += stack_base;
 
-	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!mpnt)
 		return -ENOMEM;
 
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index c187743..6af400a 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,7 +249,7 @@
 
 #if PAGE_SHIFT > IA32_PAGE_SHIFT
 	{
-		extern kmem_cache_t *partial_page_cachep;
+		extern struct kmem_cache *partial_page_cachep;
 
 		partial_page_cachep = kmem_cache_create("partial_page_cache",
 							sizeof(struct partial_page), 0, 0,
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 703a67c..cfa0bc0 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -330,8 +330,6 @@
 void ia64_elf32_init(struct pt_regs *regs);
 #define ELF_PLAT_INIT(_r, load_addr)	ia64_elf32_init(_r)
 
-#define elf_addr_t	u32
-
 /* This macro yields a bitmask that programs can use to figure out
    what instruction set this CPU supports.  */
 #define ELF_HWCAP	0
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 9d6a3f2..a4a6e14 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -254,7 +254,7 @@
 }
 
 /* SLAB cache for partial_page structures */
-kmem_cache_t *partial_page_cachep;
+struct kmem_cache *partial_page_cachep;
 
 /*
  * init partial_page_list.
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index cfa099b..8ae384e 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
 obj-$(CONFIG_IA64_MCA_RECOVERY)	+= mca_recovery.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o jprobes.o
+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR)	+= uncached.o
 obj-$(CONFIG_AUDIT)		+= audit.o
 obj-$(CONFIG_PCI_MSI)		+= msi_ia64.o
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 86faf22..088f130 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -68,7 +68,8 @@
 
 	dprintk("processor_get_pstate\n");
 
-	retval = ia64_pal_get_pstate(&pstate_index);
+	retval = ia64_pal_get_pstate(&pstate_index,
+	                             PAL_GET_PSTATE_TYPE_INSTANT);
 	*value = (u32) pstate_index;
 
 	if (retval)
@@ -91,7 +92,7 @@
 	dprintk("extract_clock\n");
 
 	for (i = 0; i < data->acpi_data.state_count; i++) {
-		if (value >= data->acpi_data.states[i].control)
+		if (value == data->acpi_data.states[i].status)
 			return data->acpi_data.states[i].core_frequency;
 	}
 	return data->acpi_data.states[i-1].core_frequency;
@@ -117,11 +118,7 @@
 		goto migrate_end;
 	}
 
-	/*
-	 * processor_get_pstate gets the average frequency since the
-	 * last get. So, do two PAL_get_freq()...
-	 */
-	ret = processor_get_pstate(&value);
+	/* processor_get_pstate gets the instantaneous frequency */
 	ret = processor_get_pstate(&value);
 
 	if (ret) {
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
new file mode 100644
index 0000000..0aabedf
--- /dev/null
+++ b/arch/ia64/kernel/crash.c
@@ -0,0 +1,245 @@
+/*
+ * arch/ia64/kernel/crash.c
+ *
+ * Architecture specific (ia64) functions for kexec based crash dumps.
+ *
+ * Created by: Khalid Aziz <khalid.aziz@hp.com>
+ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2005 Intel Corp	Zou Nan hai <nanhai.zou@intel.com>
+ *
+ */
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/crash_dump.h>
+#include <linux/bootmem.h>
+#include <linux/kexec.h>
+#include <linux/elfcore.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+
+#include <asm/kdebug.h>
+#include <asm/mca.h>
+#include <asm/uaccess.h>
+
+int kdump_status[NR_CPUS];
+atomic_t kdump_cpu_freezed;
+atomic_t kdump_in_progress;
+int kdump_on_init = 1;
+ssize_t
+copy_oldmem_page(unsigned long pfn, char *buf,
+		size_t csize, unsigned long offset, int userbuf)
+{
+	void  *vaddr;
+
+	if (!csize)
+		return 0;
+	vaddr = __va(pfn<<PAGE_SHIFT);
+	if (userbuf) {
+		if (copy_to_user(buf, (vaddr + offset), csize)) {
+			return -EFAULT;
+		}
+	} else
+		memcpy(buf, (vaddr + offset), csize);
+	return csize;
+}
+
+static inline Elf64_Word
+*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
+		size_t data_len)
+{
+	struct elf_note *note = (struct elf_note *)buf;
+	note->n_namesz = strlen(name) + 1;
+	note->n_descsz = data_len;
+	note->n_type   = type;
+	buf += (sizeof(*note) + 3)/4;
+	memcpy(buf, name, note->n_namesz);
+	buf += (note->n_namesz + 3)/4;
+	memcpy(buf, data, data_len);
+	buf += (data_len + 3)/4;
+	return buf;
+}
+
+static void
+final_note(void *buf)
+{
+	memset(buf, 0, sizeof(struct elf_note));
+}
+
+extern void ia64_dump_cpu_regs(void *);
+
+static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
+
+void
+crash_save_this_cpu()
+{
+	void *buf;
+	unsigned long cfm, sof, sol;
+
+	int cpu = smp_processor_id();
+	struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
+
+	elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
+	memset(prstatus, 0, sizeof(*prstatus));
+	prstatus->pr_pid = current->pid;
+
+	ia64_dump_cpu_regs(dst);
+	cfm = dst[43];
+	sol = (cfm >> 7) & 0x7f;
+	sof = cfm & 0x7f;
+	dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
+			sof - sol);
+
+	buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
+	if (!buf)
+		return;
+	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus,
+			sizeof(*prstatus));
+	final_note(buf);
+}
+
+static int
+kdump_wait_cpu_freeze(void)
+{
+	int cpu_num = num_online_cpus() - 1;
+	int timeout = 1000;
+	while(timeout-- > 0) {
+		if (atomic_read(&kdump_cpu_freezed) == cpu_num)
+			return 0;
+		udelay(1000);
+	}
+	return 1;
+}
+
+void
+machine_crash_shutdown(struct pt_regs *pt)
+{
+	/* This function is only called after the system
+	 * has paniced or is otherwise in a critical state.
+	 * The minimum amount of code to allow a kexec'd kernel
+	 * to run successfully needs to happen here.
+	 *
+	 * In practice this means shooting down the other cpus in
+	 * an SMP system.
+	 */
+	kexec_disable_iosapic();
+#ifdef CONFIG_SMP
+	kdump_smp_send_stop();
+	if (kdump_wait_cpu_freeze() && kdump_on_init) 	{
+		//not all cpu response to IPI, send INIT to freeze them
+		kdump_smp_send_init();
+	}
+#endif
+}
+
+static void
+machine_kdump_on_init(void)
+{
+	local_irq_disable();
+	kexec_disable_iosapic();
+	machine_kexec(ia64_kimage);
+}
+
+void
+kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
+{
+	int cpuid;
+	local_irq_disable();
+	cpuid = smp_processor_id();
+	crash_save_this_cpu();
+	current->thread.ksp = (__u64)info->sw - 16;
+	atomic_inc(&kdump_cpu_freezed);
+	kdump_status[cpuid] = 1;
+	mb();
+	if (cpuid == 0) {
+		for (;;)
+			cpu_relax();
+	} else
+		ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
+}
+
+static int
+kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
+{
+	struct ia64_mca_notify_die *nd;
+	struct die_args *args = data;
+
+	if (!kdump_on_init)
+		return NOTIFY_DONE;
+
+	if (val != DIE_INIT_MONARCH_ENTER &&
+	    val != DIE_INIT_SLAVE_ENTER &&
+	    val != DIE_MCA_RENDZVOUS_LEAVE &&
+	    val != DIE_MCA_MONARCH_LEAVE)
+		return NOTIFY_DONE;
+
+	nd = (struct ia64_mca_notify_die *)args->err;
+	/* Reason code 1 means machine check rendezous*/
+	if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) &&
+		 nd->sos->rv_rc == 1)
+		return NOTIFY_DONE;
+
+	switch (val) {
+		case DIE_INIT_MONARCH_ENTER:
+			machine_kdump_on_init();
+			break;
+		case DIE_INIT_SLAVE_ENTER:
+			unw_init_running(kdump_cpu_freeze, NULL);
+			break;
+		case DIE_MCA_RENDZVOUS_LEAVE:
+			if (atomic_read(&kdump_in_progress))
+				unw_init_running(kdump_cpu_freeze, NULL);
+			break;
+		case DIE_MCA_MONARCH_LEAVE:
+		     /* die_register->signr indicate if MCA is recoverable */
+			if (!args->signr)
+				machine_kdump_on_init();
+			break;
+	}
+	return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_SYSCTL
+static ctl_table kdump_on_init_table[] = {
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "kdump_on_init",
+		.data = &kdump_on_init,
+		.maxlen = sizeof(int),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{ .ctl_name = 0 }
+};
+
+static ctl_table sys_table[] = {
+	{
+	  .ctl_name = CTL_KERN,
+	  .procname = "kernel",
+	  .mode = 0555,
+	  .child = kdump_on_init_table,
+	},
+	{ .ctl_name = 0 }
+};
+#endif
+
+static int
+machine_crash_setup(void)
+{
+	char *from = strstr(saved_command_line, "elfcorehdr=");
+	static struct notifier_block kdump_init_notifier_nb = {
+		.notifier_call = kdump_init_notifier,
+	};
+	int ret;
+	if (from)
+		elfcorehdr_addr = memparse(from+11, &from);
+	saved_max_pfn = (unsigned long)-1;
+	if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
+		return ret;
+#ifdef CONFIG_SYSCTL
+	register_sysctl_table(sys_table, 0);
+#endif
+	return 0;
+}
+
+__initcall(machine_crash_setup);
+
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index bb8770a..0b25a7d 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -26,6 +26,7 @@
 #include <linux/types.h>
 #include <linux/time.h>
 #include <linux/efi.h>
+#include <linux/kexec.h>
 
 #include <asm/io.h>
 #include <asm/kregs.h>
@@ -41,7 +42,7 @@
 struct efi efi;
 EXPORT_SYMBOL(efi);
 static efi_runtime_services_t *runtime;
-static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
+static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
 
 #define efi_call_virt(f, args...)	(*(f))(args)
 
@@ -224,7 +225,7 @@
 }
 
 static int
-is_available_memory (efi_memory_desc_t *md)
+is_memory_available (efi_memory_desc_t *md)
 {
 	if (!(md->attribute & EFI_MEMORY_WB))
 		return 0;
@@ -421,6 +422,8 @@
 			mem_limit = memparse(cp + 4, &cp);
 		} else if (memcmp(cp, "max_addr=", 9) == 0) {
 			max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
+		} else if (memcmp(cp, "min_addr=", 9) == 0) {
+			min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
 		} else {
 			while (*cp != ' ' && *cp)
 				++cp;
@@ -428,6 +431,8 @@
 				++cp;
 		}
 	}
+	if (min_addr != 0UL)
+		printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
 	if (max_addr != ~0UL)
 		printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
 
@@ -887,14 +892,15 @@
 			}
 			contig_high = GRANULEROUNDDOWN(contig_high);
 		}
-		if (!is_available_memory(md) || md->type == EFI_LOADER_DATA)
+		if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
 			continue;
 
 		/* Round ends inward to granule boundaries */
 		as = max(contig_low, md->phys_addr);
 		ae = min(contig_high, efi_md_end(md));
 
-		/* keep within max_addr= command line arg */
+		/* keep within max_addr= and min_addr= command line arg */
+		as = max(as, min_addr);
 		ae = min(ae, max_addr);
 		if (ae <= as)
 			continue;
@@ -962,7 +968,7 @@
 			}
 			contig_high = GRANULEROUNDDOWN(contig_high);
 		}
-		if (!is_available_memory(md))
+		if (!is_memory_available(md))
 			continue;
 
 		/*
@@ -1004,7 +1010,8 @@
 		} else
 			ae = efi_md_end(md);
 
-		/* keep within max_addr= command line arg */
+		/* keep within max_addr= and min_addr= command line arg */
+		as = max(as, min_addr);
 		ae = min(ae, max_addr);
 		if (ae <= as)
 			continue;
@@ -1116,6 +1123,58 @@
 			 */
 			insert_resource(res, code_resource);
 			insert_resource(res, data_resource);
+#ifdef CONFIG_KEXEC
+                        insert_resource(res, &efi_memmap_res);
+                        insert_resource(res, &boot_param_res);
+			if (crashk_res.end > crashk_res.start)
+				insert_resource(res, &crashk_res);
+#endif
 		}
 	}
 }
+
+#ifdef CONFIG_KEXEC
+/* find a block of memory aligned to 64M exclude reserved regions
+   rsvd_regions are sorted
+ */
+unsigned long
+kdump_find_rsvd_region (unsigned long size,
+		struct rsvd_region *r, int n)
+{
+  int i;
+  u64 start, end;
+  u64 alignment = 1UL << _PAGE_SIZE_64M;
+  void *efi_map_start, *efi_map_end, *p;
+  efi_memory_desc_t *md;
+  u64 efi_desc_size;
+
+  efi_map_start = __va(ia64_boot_param->efi_memmap);
+  efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+  efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+  for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+	  md = p;
+	  if (!efi_wb(md))
+		  continue;
+	  start = ALIGN(md->phys_addr, alignment);
+	  end = efi_md_end(md);
+	  for (i = 0; i < n; i++) {
+		if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
+			if (__pa(r[i].start) > start + size)
+				return start;
+			start = ALIGN(__pa(r[i].end), alignment);
+			if (i < n-1 && __pa(r[i+1].start) < start + size)
+				continue;
+			else
+				break;
+		}
+	  }
+	  if (end > start + size)
+		return start;
+  }
+
+  printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
+	size);
+  return ~0UL;
+}
+#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 3390b7c..15234ed 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1575,7 +1575,7 @@
 	data8 sys_mq_timedreceive		// 1265
 	data8 sys_mq_notify
 	data8 sys_mq_getsetattr
-	data8 sys_ni_syscall			// reserved for kexec_load
+	data8 sys_kexec_load
 	data8 sys_ni_syscall			// reserved for vserver
 	data8 sys_waitid			// 1270
 	data8 sys_add_key
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 879c181..bd17190 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -14,6 +14,7 @@
 
 #include <asm/checksum.h>
 EXPORT_SYMBOL(ip_fast_csum);		/* hand-coded assembly */
+EXPORT_SYMBOL(csum_ipv6_magic);
 
 #include <asm/semaphore.h>
 EXPORT_SYMBOL(__down);
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 60d6495..0fc5fb7 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -288,6 +288,27 @@
 	/* do nothing... */
 }
 
+
+#ifdef CONFIG_KEXEC
+void
+kexec_disable_iosapic(void)
+{
+	struct iosapic_intr_info *info;
+	struct iosapic_rte_info *rte;
+	u8 vec = 0;
+	for (info = iosapic_intr_info; info <
+			iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
+		list_for_each_entry(rte, &info->rtes,
+				rte_list) {
+			iosapic_write(rte->addr,
+					IOSAPIC_RTE_LOW(rte->rte_index),
+					IOSAPIC_MASK|vec);
+			iosapic_eoi(rte->addr, vec);
+		}
+	}
+}
+#endif
+
 static void
 mask_irq (unsigned int irq)
 {
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 51217d6..76e7789 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -481,7 +481,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 /*
@@ -851,7 +851,7 @@
 			return;
 		}
 	} while (unw_unwind(info) >= 0);
-	lp->bsp = 0;
+	lp->bsp = NULL;
 	lp->cfm = 0;
 	return;
 }
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
new file mode 100644
index 0000000..468233f
--- /dev/null
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -0,0 +1,133 @@
+/*
+ * arch/ia64/kernel/machine_kexec.c
+ *
+ * Handle transition of Linux booting another kernel
+ * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
+ * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
+ * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/setup.h>
+#include <asm/delay.h>
+#include <asm/meminit.h>
+
+typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long,
+		struct ia64_boot_param *, unsigned long);
+
+struct kimage *ia64_kimage;
+
+struct resource efi_memmap_res = {
+        .name  = "EFI Memory Map",
+        .start = 0,
+        .end   = 0,
+        .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+struct resource boot_param_res = {
+        .name  = "Boot parameter",
+        .start = 0,
+        .end   = 0,
+        .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+
+/*
+ * Do what every setup is needed on image and the
+ * reboot code buffer to allow us to avoid allocations
+ * later.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+	void *control_code_buffer;
+	const unsigned long *func;
+
+	func = (unsigned long *)&relocate_new_kernel;
+	/* Pre-load control code buffer to minimize work in kexec path */
+	control_code_buffer = page_address(image->control_code_page);
+	memcpy((void *)control_code_buffer, (const void *)func[0],
+			relocate_new_kernel_size);
+	flush_icache_range((unsigned long)control_code_buffer,
+			(unsigned long)control_code_buffer + relocate_new_kernel_size);
+	ia64_kimage = image;
+
+	return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+}
+
+void machine_shutdown(void)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		if (cpu != smp_processor_id())
+			cpu_down(cpu);
+	}
+	kexec_disable_iosapic();
+}
+
+/*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+extern void *efi_get_pal_addr(void);
+static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
+{
+	struct kimage *image = arg;
+	relocate_new_kernel_t rnk;
+	void *pal_addr = efi_get_pal_addr();
+	unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
+	unsigned long vector;
+	int ii;
+
+	if (image->type == KEXEC_TYPE_CRASH) {
+		crash_save_this_cpu();
+		current->thread.ksp = (__u64)info->sw - 16;
+	}
+
+	/* Interrupts aren't acceptable while we reboot */
+	local_irq_disable();
+
+	/* Mask CMC and Performance Monitor interrupts */
+	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
+	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
+
+	/* Mask ITV and Local Redirect Registers */
+	ia64_set_itv(1 << 16);
+	ia64_set_lrr0(1 << 16);
+	ia64_set_lrr1(1 << 16);
+
+	/* terminate possible nested in-service interrupts */
+	for (ii = 0; ii < 16; ii++)
+		ia64_eoi();
+
+	/* unmask TPR and clear any pending interrupts */
+	ia64_setreg(_IA64_REG_CR_TPR, 0);
+	ia64_srlz_d();
+	vector = ia64_get_ivr();
+	while (vector != IA64_SPURIOUS_INT_VECTOR) {
+		ia64_eoi();
+		vector = ia64_get_ivr();
+	}
+	platform_kernel_launch_event();
+	rnk = (relocate_new_kernel_t)&code_addr;
+	(*rnk)(image->head, image->start, ia64_boot_param,
+		     GRANULEROUNDDOWN((unsigned long) pal_addr));
+	BUG();
+}
+
+void machine_kexec(struct kimage *image)
+{
+	unw_init_running(ia64_machine_kexec, image);
+	for(;;);
+}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7cfa63a..87c1c4f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -82,6 +82,7 @@
 #include <asm/system.h>
 #include <asm/sal.h>
 #include <asm/mca.h>
+#include <asm/kexec.h>
 
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
@@ -678,7 +679,7 @@
  * disable the cmc interrupt vector.
  */
 static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
+ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
 {
 	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
 }
@@ -690,7 +691,7 @@
  * enable the cmc interrupt vector.
  */
 static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
+ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
 {
 	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
 }
@@ -1238,6 +1239,10 @@
 	} else {
 		/* Dump buffered message to console */
 		ia64_mlogbuf_finish(1);
+#ifdef CONFIG_CRASH_DUMP
+		atomic_set(&kdump_in_progress, 1);
+		monarch_cpu = -1;
+#endif
 	}
 	if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
 			== NOTIFY_STOP)
@@ -1247,8 +1252,8 @@
 	monarch_cpu = -1;
 }
 
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
+static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
 
 /*
  * ia64_mca_cmc_int_handler
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 0b546e2..a71df9a 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -16,6 +16,7 @@
  * 02/05/2001   S.Eranian	fixed module support
  * 10/23/2001	S.Eranian	updated pal_perf_mon_info bug fixes
  * 03/24/2004	Ashok Raj	updated to work with CPU Hotplug
+ * 10/26/2006   Russ Anderson	updated processor features to rev 2.2 spec
  */
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -314,13 +315,20 @@
 		     "Protection Key Registers(PKR)  : %d\n"
 		     "Implemented bits in PKR.key    : %d\n"
 		     "Hash Tag ID                    : 0x%x\n"
-		     "Size of RR.rid                 : %d\n",
+		     "Size of RR.rid                 : %d\n"
+		     "Max Purges                     : ",
 		     vm_info_1.pal_vm_info_1_s.phys_add_size,
 		     vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
 		     vm_info_1.pal_vm_info_1_s.max_pkr+1,
 		     vm_info_1.pal_vm_info_1_s.key_size,
 		     vm_info_1.pal_vm_info_1_s.hash_tag_id,
 		     vm_info_2.pal_vm_info_2_s.rid_size);
+		if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
+			p += sprintf(p, "unlimited\n");
+		else
+			p += sprintf(p, "%d\n",
+		     		vm_info_2.pal_vm_info_2_s.max_purges ?
+				vm_info_2.pal_vm_info_2_s.max_purges : 1);
 	}
 
 	if (ia64_pal_mem_attrib(&attrib) == 0) {
@@ -467,7 +475,11 @@
 	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
 	NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
-	NULL,NULL,NULL,NULL,NULL,
+	"Unimplemented instruction address fault",
+	"INIT, PMI, and LINT pins",
+	"Simple unimplemented instr addresses",
+	"Variable P-state performance",
+	"Virtual machine features implemented",
 	"XIP,XPSR,XFS implemented",
 	"XR1-XR3 implemented",
 	"Disable dynamic predicate prediction",
@@ -475,7 +487,11 @@
 	"Disable dynamic data cache prefetch",
 	"Disable dynamic inst cache prefetch",
 	"Disable dynamic branch prediction",
-	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+	NULL, NULL, NULL, NULL,
+	"Disable P-states",
+	"Enable MCA on Data Poisoning",
+	"Enable vmsw instruction",
+	"Enable extern environmental notification",
 	"Disable BINIT on processor time-out",
 	"Disable dynamic power management (DPM)",
 	"Disable coherency",
@@ -952,7 +968,6 @@
 	}
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int palinfo_cpu_callback(struct notifier_block *nfb,
 					unsigned long action, void *hcpu)
 {
@@ -974,7 +989,6 @@
 	.notifier_call = palinfo_cpu_callback,
 	.priority = 0,
 };
-#endif
 
 static int __init
 palinfo_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 3aaede0..dbb2816 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -853,9 +853,8 @@
 	 * allocate context descriptor 
 	 * must be able to free with interrupts disabled
 	 */
-	ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
+	ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
 	if (ctx) {
-		memset(ctx, 0, sizeof(pfm_context_t));
 		DPRINT(("alloc ctx @%p\n", ctx));
 	}
 	return ctx;
@@ -2302,7 +2301,7 @@
 	DPRINT(("smpl_buf @%p\n", smpl_buf));
 
 	/* allocate vma */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma) {
 		DPRINT(("Cannot allocate vma\n"));
 		goto error_kmem;
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
index cd06ac6..7f8da4c 100644
--- a/arch/ia64/kernel/perfmon_montecito.h
+++ b/arch/ia64/kernel/perfmon_montecito.h
@@ -45,16 +45,16 @@
 /* pmc29 */ { PFM_REG_NOTIMPL, },
 /* pmc30 */ { PFM_REG_NOTIMPL, },
 /* pmc31 */ { PFM_REG_NOTIMPL, },
-/* pmc32 */ { PFM_REG_CONFIG,  0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc33 */ { PFM_REG_CONFIG,  0, 0x0,  0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc34 */ { PFM_REG_CONFIG,  0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc35 */ { PFM_REG_CONFIG,  0, 0x0,  0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc32 */ { PFM_REG_CONFIG,  0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc33 */ { PFM_REG_CONFIG,  0, 0x0,  0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc34 */ { PFM_REG_CONFIG,  0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc35 */ { PFM_REG_CONFIG,  0, 0x0,  0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
 /* pmc36 */ { PFM_REG_CONFIG,  0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
 /* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
 /* pmc38 */ { PFM_REG_CONFIG,  0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
 /* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
 /* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
-/* pmc41 */ { PFM_REG_CONFIG,  0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc41 */ { PFM_REG_CONFIG,  0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
 /* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
 	    { PFM_REG_END    , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
 };
@@ -185,7 +185,7 @@
 	DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
 
 	if (cnum == 41 && is_loaded 
-	    && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
+	    && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
 
 		DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
 
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
new file mode 100644
index 0000000..ae473e3
--- /dev/null
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -0,0 +1,334 @@
+/*
+ * arch/ia64/kernel/relocate_kernel.S
+ *
+ * Relocate kexec'able kernel and start it
+ *
+ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2005 Khalid Aziz  <khalid.aziz@hp.com>
+ * Copyright (C) 2005 Intel Corp,  Zou Nan hai <nanhai.zou@intel.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+#include <asm/asmmacro.h>
+#include <asm/kregs.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mca_asm.h>
+
+       /* Must be relocatable PIC code callable as a C function
+        */
+GLOBAL_ENTRY(relocate_new_kernel)
+	.prologue
+	alloc r31=ar.pfs,4,0,0,0
+        .body
+.reloc_entry:
+{
+	rsm psr.i| psr.ic
+	mov r2=ip
+}
+	;;
+{
+        flushrs                         // must be first insn in group
+        srlz.i
+}
+	;;
+	dep r2=0,r2,61,3		//to physical address
+	;;
+	//first switch to physical mode
+	add r3=1f-.reloc_entry, r2
+	movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
+	mov ar.rsc=0	          	// put RSE in enforced lazy mode
+	;;
+	add sp=(memory_stack_end - 16 - .reloc_entry),r2
+	add r8=(register_stack - .reloc_entry),r2
+	;;
+	mov r18=ar.rnat
+	mov ar.bspstore=r8
+	;;
+        mov cr.ipsr=r16
+        mov cr.iip=r3
+        mov cr.ifs=r0
+	srlz.i
+	;;
+	mov ar.rnat=r18
+	rfi
+	;;
+1:
+	//physical mode code begin
+	mov b6=in1
+	dep r28=0,in2,61,3	//to physical address
+
+	// purge all TC entries
+#define O(member)       IA64_CPUINFO_##member##_OFFSET
+        GET_THIS_PADDR(r2, cpu_info)    // load phys addr of cpu_info into r2
+        ;;
+        addl r17=O(PTCE_STRIDE),r2
+        addl r2=O(PTCE_BASE),r2
+        ;;
+        ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;    	// r18=ptce_base
+        ld4 r19=[r2],4                                  // r19=ptce_count[0]
+        ld4 r21=[r17],4                                 // r21=ptce_stride[0]
+        ;;
+        ld4 r20=[r2]                                    // r20=ptce_count[1]
+        ld4 r22=[r17]                                   // r22=ptce_stride[1]
+        mov r24=r0
+        ;;
+        adds r20=-1,r20
+        ;;
+#undef O
+2:
+        cmp.ltu p6,p7=r24,r19
+(p7)    br.cond.dpnt.few 4f
+        mov ar.lc=r20
+3:
+        ptc.e r18
+        ;;
+        add r18=r22,r18
+        br.cloop.sptk.few 3b
+        ;;
+        add r18=r21,r18
+        add r24=1,r24
+        ;;
+        br.sptk.few 2b
+4:
+        srlz.i
+        ;;
+	//purge TR entry for kernel text and data
+        movl r16=KERNEL_START
+        mov r18=KERNEL_TR_PAGE_SHIFT<<2
+        ;;
+        ptr.i r16, r18
+        ptr.d r16, r18
+        ;;
+        srlz.i
+        ;;
+
+	// purge TR entry for percpu data
+        movl r16=PERCPU_ADDR
+        mov r18=PERCPU_PAGE_SHIFT<<2
+        ;;
+        ptr.d r16,r18
+        ;;
+        srlz.d
+	;;
+
+        // purge TR entry for pal code
+        mov r16=in3
+        mov r18=IA64_GRANULE_SHIFT<<2
+        ;;
+        ptr.i r16,r18
+        ;;
+        srlz.i
+	;;
+
+        // purge TR entry for stack
+        mov r16=IA64_KR(CURRENT_STACK)
+        ;;
+        shl r16=r16,IA64_GRANULE_SHIFT
+        movl r19=PAGE_OFFSET
+        ;;
+        add r16=r19,r16
+        mov r18=IA64_GRANULE_SHIFT<<2
+        ;;
+        ptr.d r16,r18
+        ;;
+        srlz.i
+	;;
+
+	//copy segments
+	movl r16=PAGE_MASK
+        mov  r30=in0                    // in0 is page_list
+        br.sptk.few .dest_page
+	;;
+.loop:
+	ld8  r30=[in0], 8;;
+.dest_page:
+	tbit.z p0, p6=r30, 0;;    	// 0x1 dest page
+(p6)	and r17=r30, r16
+(p6)	br.cond.sptk.few .loop;;
+
+	tbit.z p0, p6=r30, 1;;		// 0x2 indirect page
+(p6)	and in0=r30, r16
+(p6)	br.cond.sptk.few .loop;;
+
+	tbit.z p0, p6=r30, 2;;		// 0x4 end flag
+(p6)	br.cond.sptk.few .end_loop;;
+
+	tbit.z p6, p0=r30, 3;;		// 0x8 source page
+(p6)	br.cond.sptk.few .loop
+
+	and r18=r30, r16
+
+	// simple copy page, may optimize later
+	movl r14=PAGE_SIZE/8 - 1;;
+	mov ar.lc=r14;;
+1:
+	ld8 r14=[r18], 8;;
+	st8 [r17]=r14;;
+	fc.i r17
+	add r17=8, r17
+	br.ctop.sptk.few 1b
+	br.sptk.few .loop
+	;;
+
+.end_loop:
+	sync.i			// for fc.i
+	;;
+	srlz.i
+	;;
+	srlz.d
+	;;
+	br.call.sptk.many b0=b6;;
+
+.align  32
+memory_stack:
+	.fill           8192, 1, 0
+memory_stack_end:
+register_stack:
+	.fill           8192, 1, 0
+register_stack_end:
+relocate_new_kernel_end:
+END(relocate_new_kernel)
+
+.global relocate_new_kernel_size
+relocate_new_kernel_size:
+	data8	relocate_new_kernel_end - relocate_new_kernel
+
+GLOBAL_ENTRY(ia64_dump_cpu_regs)
+        .prologue
+        alloc loc0=ar.pfs,1,2,0,0
+        .body
+        mov     ar.rsc=0                // put RSE in enforced lazy mode
+        add     loc1=4*8, in0           // save r4 and r5 first
+        ;;
+{
+        flushrs                         // flush dirty regs to backing store
+        srlz.i
+}
+        st8 [loc1]=r4, 8
+        ;;
+        st8 [loc1]=r5, 8
+        ;;
+        add loc1=32*8, in0
+        mov r4=ar.rnat
+        ;;
+        st8 [in0]=r0, 8			// r0
+        st8 [loc1]=r4, 8		// rnat
+        mov r5=pr
+        ;;
+        st8 [in0]=r1, 8			// r1
+        st8 [loc1]=r5, 8		// pr
+        mov r4=b0
+        ;;
+        st8 [in0]=r2, 8			// r2
+        st8 [loc1]=r4, 8		// b0
+        mov r5=b1;
+        ;;
+        st8 [in0]=r3, 24		// r3
+        st8 [loc1]=r5, 8		// b1
+        mov r4=b2
+        ;;
+        st8 [in0]=r6, 8			// r6
+        st8 [loc1]=r4, 8		// b2
+	mov r5=b3
+        ;;
+        st8 [in0]=r7, 8			// r7
+        st8 [loc1]=r5, 8		// b3
+        mov r4=b4
+        ;;
+        st8 [in0]=r8, 8			// r8
+        st8 [loc1]=r4, 8		// b4
+        mov r5=b5
+        ;;
+        st8 [in0]=r9, 8			// r9
+        st8 [loc1]=r5, 8		// b5
+        mov r4=b6
+        ;;
+        st8 [in0]=r10, 8		// r10
+        st8 [loc1]=r5, 8		// b6
+        mov r5=b7
+        ;;
+        st8 [in0]=r11, 8		// r11
+        st8 [loc1]=r5, 8		// b7
+        mov r4=b0
+        ;;
+        st8 [in0]=r12, 8		// r12
+        st8 [loc1]=r4, 8		// ip
+        mov r5=loc0
+	;;
+        st8 [in0]=r13, 8		// r13
+        extr.u r5=r5, 0, 38		// ar.pfs.pfm
+	mov r4=r0			// user mask
+        ;;
+        st8 [in0]=r14, 8		// r14
+        st8 [loc1]=r5, 8		// cfm
+        ;;
+        st8 [in0]=r15, 8		// r15
+        st8 [loc1]=r4, 8        	// user mask
+	mov r5=ar.rsc
+        ;;
+        st8 [in0]=r16, 8		// r16
+        st8 [loc1]=r5, 8        	// ar.rsc
+        mov r4=ar.bsp
+        ;;
+        st8 [in0]=r17, 8		// r17
+        st8 [loc1]=r4, 8        	// ar.bsp
+        mov r5=ar.bspstore
+        ;;
+        st8 [in0]=r18, 8		// r18
+        st8 [loc1]=r5, 8        	// ar.bspstore
+        mov r4=ar.rnat
+        ;;
+        st8 [in0]=r19, 8		// r19
+        st8 [loc1]=r4, 8        	// ar.rnat
+        mov r5=ar.ccv
+        ;;
+        st8 [in0]=r20, 8		// r20
+	st8 [loc1]=r5, 8        	// ar.ccv
+        mov r4=ar.unat
+        ;;
+        st8 [in0]=r21, 8		// r21
+        st8 [loc1]=r4, 8        	// ar.unat
+        mov r5 = ar.fpsr
+        ;;
+        st8 [in0]=r22, 8		// r22
+        st8 [loc1]=r5, 8        	// ar.fpsr
+        mov r4 = ar.unat
+        ;;
+        st8 [in0]=r23, 8		// r23
+        st8 [loc1]=r4, 8        	// unat
+        mov r5 = ar.fpsr
+        ;;
+        st8 [in0]=r24, 8		// r24
+        st8 [loc1]=r5, 8        	// fpsr
+        mov r4 = ar.pfs
+        ;;
+        st8 [in0]=r25, 8		// r25
+        st8 [loc1]=r4, 8        	// ar.pfs
+        mov r5 = ar.lc
+        ;;
+        st8 [in0]=r26, 8		// r26
+        st8 [loc1]=r5, 8        	// ar.lc
+        mov r4 = ar.ec
+        ;;
+        st8 [in0]=r27, 8		// r27
+        st8 [loc1]=r4, 8        	// ar.ec
+        mov r5 = ar.csd
+        ;;
+        st8 [in0]=r28, 8		// r28
+        st8 [loc1]=r5, 8        	// ar.csd
+        mov r4 = ar.ssd
+        ;;
+        st8 [in0]=r29, 8		// r29
+        st8 [loc1]=r4, 8        	// ar.ssd
+        ;;
+        st8 [in0]=r30, 8		// r30
+        ;;
+	st8 [in0]=r31, 8		// r31
+        mov ar.pfs=loc0
+        ;;
+        br.ret.sptk.many rp
+END(ia64_dump_cpu_regs)
+
+
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e63b8ca..fd607ca 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -575,7 +575,6 @@
 	.write   = salinfo_log_write,
 };
 
-#ifdef	CONFIG_HOTPLUG_CPU
 static int __devinit
 salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
@@ -620,7 +619,6 @@
 	.notifier_call = salinfo_cpu_callback,
 	.priority = 0,
 };
-#endif	/* CONFIG_HOTPLUG_CPU */
 
 static int __init
 salinfo_init(void)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index d10404a..14e1200 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -43,6 +43,8 @@
 #include <linux/initrd.h>
 #include <linux/pm.h>
 #include <linux/cpufreq.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
 
 #include <asm/ia32.h>
 #include <asm/machvec.h>
@@ -252,6 +254,41 @@
 	efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
 	n++;
 
+#ifdef CONFIG_KEXEC
+	/* crashkernel=size@offset specifies the size to reserve for a crash
+	 * kernel.(offset is ingored for keep compatibility with other archs)
+	 * By reserving this memory we guarantee that linux never set's it
+	 * up as a DMA target.Useful for holding code to do something
+	 * appropriate after a kernel panic.
+	 */
+	{
+		char *from = strstr(saved_command_line, "crashkernel=");
+		unsigned long base, size;
+		if (from) {
+			size = memparse(from + 12, &from);
+			if (size) {
+				sort_regions(rsvd_region, n);
+				base = kdump_find_rsvd_region(size,
+				rsvd_region, n);
+				if (base != ~0UL) {
+					rsvd_region[n].start =
+						(unsigned long)__va(base);
+					rsvd_region[n].end =
+						(unsigned long)__va(base + size);
+					n++;
+					crashk_res.start = base;
+					crashk_res.end = base + size - 1;
+				}
+			}
+		}
+		efi_memmap_res.start = ia64_boot_param->efi_memmap;
+                efi_memmap_res.end = efi_memmap_res.start +
+                        ia64_boot_param->efi_memmap_size;
+                boot_param_res.start = __pa(ia64_boot_param);
+                boot_param_res.end = boot_param_res.start +
+                        sizeof(*ia64_boot_param);
+	}
+#endif
 	/* end of memory marker */
 	rsvd_region[n].start = ~0UL;
 	rsvd_region[n].end   = ~0UL;
@@ -263,6 +300,7 @@
 	sort_regions(rsvd_region, num_rsvd_regions);
 }
 
+
 /**
  * find_initrd - get initrd parameters from the boot parameter structure
  *
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 6ab95ce..b1b9aa4 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/efi.h>
 #include <linux/bitops.h>
+#include <linux/kexec.h>
 
 #include <asm/atomic.h>
 #include <asm/current.h>
@@ -66,6 +67,7 @@
 
 #define IPI_CALL_FUNC		0
 #define IPI_CPU_STOP		1
+#define IPI_KDUMP_CPU_STOP	3
 
 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
@@ -155,7 +157,11 @@
 			      case IPI_CPU_STOP:
 				stop_this_cpu();
 				break;
-
+#ifdef CONFIG_CRASH_DUMP
+			      case IPI_KDUMP_CPU_STOP:
+				unw_init_running(kdump_cpu_freeze, NULL);
+				break;
+#endif
 			      default:
 				printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
 				break;
@@ -213,6 +219,26 @@
 	send_IPI_single(smp_processor_id(), op);
 }
 
+#ifdef CONFIG_CRASH_DUMP
+void
+kdump_smp_send_stop()
+{
+ 	send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
+}
+
+void
+kdump_smp_send_init()
+{
+	unsigned int cpu, self_cpu;
+	self_cpu = smp_processor_id();
+	for_each_online_cpu(cpu) {
+		if (cpu != self_cpu) {
+			if(kdump_status[cpu] == 0)
+				platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
+		}
+	}
+}
+#endif
 /*
  * Called with preeemption disabled.
  */
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f7d7f56..b21ddec 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -463,15 +463,17 @@
 }
 
 struct create_idle {
+	struct work_struct work;
 	struct task_struct *idle;
 	struct completion done;
 	int cpu;
 };
 
 void
-do_fork_idle(void *_c_idle)
+do_fork_idle(struct work_struct *work)
 {
-	struct create_idle *c_idle = _c_idle;
+	struct create_idle *c_idle =
+		container_of(work, struct create_idle, work);
 
 	c_idle->idle = fork_idle(c_idle->cpu);
 	complete(&c_idle->done);
@@ -482,10 +484,10 @@
 {
 	int timeout;
 	struct create_idle c_idle = {
+		.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
 		.cpu	= cpu,
 		.done	= COMPLETION_INITIALIZER(c_idle.done),
 	};
-	DECLARE_WORK(work, do_fork_idle, &c_idle);
 
  	c_idle.idle = get_idle_for_cpu(cpu);
  	if (c_idle.idle) {
@@ -497,9 +499,9 @@
 	 * We can't use kernel_thread since we must avoid to reschedule the child.
 	 */
 	if (!keventd_up() || current_is_keventd())
-		work.func(work.data);
+		c_idle.work.func(&c_idle.work);
 	else {
-		schedule_work(&work);
+		schedule_work(&c_idle.work);
 		wait_for_completion(&c_idle.done);
 	}
 
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 5629b45..687500d 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -31,11 +31,11 @@
 {
 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
 	/*
-	 * If CPEI cannot be re-targetted, and this is
-	 * CPEI target, then dont create the control file
+	 * If CPEI can be re-targetted or if this is not
+	 * CPEI target, then it is hotpluggable
 	 */
-	if (!can_cpei_retarget() && is_cpu_cpei_target(num))
-		sysfs_cpus[num].cpu.no_control = 1;
+	if (can_cpei_retarget() || !is_cpu_cpei_target(num))
+		sysfs_cpus[num].cpu.hotpluggable = 1;
 	map_cpu_to_node(num, node_cpuid[num].nid);
 #endif
 
diff --git a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c
index beb1172..4411d9b 100644
--- a/arch/ia64/lib/checksum.c
+++ b/arch/ia64/lib/checksum.c
@@ -33,32 +33,32 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented.
  */
-unsigned short int
-csum_tcpudp_magic (unsigned long saddr, unsigned long daddr, unsigned short len,
-		   unsigned short proto, unsigned int sum)
+__sum16
+csum_tcpudp_magic (__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
 {
-	return ~from64to16(saddr + daddr + sum + ((unsigned long) ntohs(len) << 16) +
-			   ((unsigned long) proto << 8));
+	return (__force __sum16)~from64to16(
+		(__force u64)saddr + (__force u64)daddr +
+		(__force u64)sum + ((len + proto) << 8));
 }
 
 EXPORT_SYMBOL(csum_tcpudp_magic);
 
-unsigned int
-csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, unsigned short len,
-		    unsigned short proto, unsigned int sum)
+__wsum
+csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
+		    unsigned short proto, __wsum sum)
 {
 	unsigned long result;
 
-	result = (saddr + daddr + sum +
-		  ((unsigned long) ntohs(len) << 16) +
-		  ((unsigned long) proto << 8));
+	result = (__force u64)saddr + (__force u64)daddr +
+		 (__force u64)sum + ((len + proto) << 8);
 
 	/* Fold down to 32-bits so we don't lose in the typedef-less network stack.  */
 	/* 64 to 33 */
 	result = (result & 0xffffffff) + (result >> 32);
 	/* 33 to 32 */
 	result = (result & 0xffffffff) + (result >> 32);
-	return result;
+	return (__force __wsum)result;
 }
 
 extern unsigned long do_csum (const unsigned char *, long);
@@ -75,16 +75,15 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int
-csum_partial (const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
-	unsigned long result = do_csum(buff, len);
+	u64 result = do_csum(buff, len);
 
 	/* add in old sum, and carry.. */
-	result += sum;
+	result += (__force u32)sum;
 	/* 32+c bits -> 32 bits */
 	result = (result & 0xffffffff) + (result >> 32);
-	return result;
+	return (__force __wsum)result;
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -93,10 +92,9 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-unsigned short
-ip_compute_csum (unsigned char * buff, int len)
+__sum16 ip_compute_csum (const void *buff, int len)
 {
-	return ~do_csum(buff,len);
+	return (__force __sum16)~do_csum(buff,len);
 }
 
 EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
index 36866e8..503dfe6 100644
--- a/arch/ia64/lib/csum_partial_copy.c
+++ b/arch/ia64/lib/csum_partial_copy.c
@@ -104,9 +104,9 @@
  */
 extern unsigned long do_csum(const unsigned char *, long);
 
-static unsigned int
-do_csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *dst,
-				int len, unsigned int psum, int *errp)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+				int len, __wsum psum, int *errp)
 {
 	unsigned long result;
 
@@ -122,30 +122,17 @@
 	result = do_csum(dst, len);
 
 	/* add in old sum, and carry.. */
-	result += psum;
+	result += (__force u32)psum;
 	/* 32+c bits -> 32 bits */
 	result = (result & 0xffffffff) + (result >> 32);
-	return result;
+	return (__force __wsum)result;
 }
 
-unsigned int
-csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *dst,
-			     int len, unsigned int sum, int *errp)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
-	if (!access_ok(VERIFY_READ, src, len)) {
-		*errp = -EFAULT;
-		memset(dst, 0, len);
-		return sum;
-	}
-
-	return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
-}
-
-unsigned int
-csum_partial_copy_nocheck(const unsigned char __user *src, unsigned char *dst,
-			  int len, unsigned int sum)
-{
-	return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
+	return csum_partial_copy_from_user((__force const void __user *)src,
+					   dst, len, sum, NULL);
 }
 
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index 19674ca..1f86aeb 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -8,8 +8,8 @@
  *      in0: address of buffer to checksum (char *)
  *      in1: length of the buffer (int)
  *
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
+ * Copyright (C) 2002, 2006 Intel Corp.
+ * Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com>
  */
 
 #include <asm/asmmacro.h>
@@ -25,6 +25,9 @@
 
 #define in0	r32
 #define in1	r33
+#define in2	r34
+#define in3	r35
+#define in4	r36
 #define ret0	r8
 
 GLOBAL_ENTRY(ip_fast_csum)
@@ -65,8 +68,9 @@
 	zxt2	r20=r20
 	;;
 	add	r20=ret0,r20
+	mov	r9=0xffff
 	;;
-	andcm	ret0=-1,r20
+	andcm	ret0=r9,r20
 	.restore sp		// reset frame state
 	br.ret.sptk.many b0
 	;;
@@ -88,3 +92,51 @@
 	mov	b0=r34
 	br.ret.sptk.many b0
 END(ip_fast_csum)
+
+GLOBAL_ENTRY(csum_ipv6_magic)
+	ld4	r20=[in0],4
+	ld4	r21=[in1],4
+	dep	r15=in3,in2,32,16
+	;;
+	ld4	r22=[in0],4
+	ld4	r23=[in1],4
+	mux1	r15=r15,@rev
+	;;
+	ld4	r24=[in0],4
+	ld4	r25=[in1],4
+	shr.u	r15=r15,16
+	add	r16=r20,r21
+	add	r17=r22,r23
+	;;
+	ld4	r26=[in0],4
+	ld4	r27=[in1],4
+	add	r18=r24,r25
+	add	r8=r16,r17
+	;;
+	add	r19=r26,r27
+	add	r8=r8,r18
+	;;
+	add	r8=r8,r19
+	add	r15=r15,in4
+	;;
+	add	r8=r8,r15
+	;;
+	shr.u	r10=r8,32	// now fold sum into short
+	zxt4	r11=r8
+	;;
+	add	r8=r10,r11
+	;;
+	shr.u	r10=r8,16	// yeah, keep it rolling
+	zxt2	r11=r8
+	;;
+	add	r8=r10,r11
+	;;
+	shr.u	r10=r8,16	// three times lucky
+	zxt2	r11=r8
+	;;
+	add	r8=r10,r11
+	mov	r9=0xffff
+	;;
+	andcm	r8=r9,r8
+	br.ret.sptk.many b0
+END(csum_ipv6_magic)
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index f3a9585..0c7e94e 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -64,6 +64,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
 
 /*
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff87a5c..56dc2024 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -156,7 +156,7 @@
 	 * the problem.  When the process attempts to write to the register backing store
 	 * for the first time, it will get a SEGFAULT in this case.
 	 */
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (vma) {
 		memset(vma, 0, sizeof(*vma));
 		vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@
 
 	/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
 	if (!(current->personality & MMAP_PAGE_ZERO)) {
-		vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 		if (vma) {
 			memset(vma, 0, sizeof(*vma));
 			vma->vm_mm = current->mm;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index b30be7c..474d179 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -125,11 +125,10 @@
 {
 	struct pci_controller *controller;
 
-	controller = kmalloc(sizeof(*controller), GFP_KERNEL);
+	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 	if (!controller)
 		return NULL;
 
-	memset(controller, 0, sizeof(*controller));
 	controller->segment = seg;
 	controller->node = -1;
 	return controller;
@@ -469,10 +468,11 @@
 	}
 }
 
-static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
+void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
 {
 	pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
 }
+EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 
 static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
 {
@@ -493,6 +493,7 @@
 	}
 	list_for_each_entry(dev, &b->devices, bus_list)
 		pcibios_fixup_device_resources(dev);
+	platform_pci_fixup_bus(b);
 
 	return;
 }
@@ -562,8 +563,8 @@
 void
 pcibios_disable_device (struct pci_dev *dev)
 {
-	if (dev->is_enabled)
-		acpi_pci_irq_disable(dev);
+	BUG_ON(atomic_read(&dev->enable_cnt));
+	acpi_pci_irq_disable(dev);
 }
 
 void
@@ -738,75 +739,44 @@
 	return ret;
 }
 
+/* It's defined in drivers/pci/pci.c */
+extern u8 pci_cache_line_size;
+
 /**
- * pci_cacheline_size - determine cacheline size for PCI devices
- * @dev: void
+ * set_pci_cacheline_size - determine cacheline size for PCI devices
  *
  * We want to use the line-size of the outer-most cache.  We assume
  * that this line-size is the same for all CPUs.
  *
  * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
- *
- * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
  */
-static unsigned long
-pci_cacheline_size (void)
+static void __init set_pci_cacheline_size(void)
 {
 	u64 levels, unique_caches;
 	s64 status;
 	pal_cache_config_info_t cci;
-	static u8 cacheline_size;
-
-	if (cacheline_size)
-		return cacheline_size;
 
 	status = ia64_pal_cache_summary(&levels, &unique_caches);
 	if (status != 0) {
-		printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
-		       __FUNCTION__, status);
-		return SMP_CACHE_BYTES;
+		printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
+			"(status=%ld)\n", __FUNCTION__, status);
+		return;
 	}
 
-	status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2,
-					    &cci);
+	status = ia64_pal_cache_config_info(levels - 1,
+				/* cache_type (data_or_unified)= */ 2, &cci);
 	if (status != 0) {
-		printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n",
-		       __FUNCTION__, status);
-		return SMP_CACHE_BYTES;
+		printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
+			"(status=%ld)\n", __FUNCTION__, status);
+		return;
 	}
-	cacheline_size = 1 << cci.pcci_line_size;
-	return cacheline_size;
+	pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
 }
 
-/**
- * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
- *
- * For ia64, we can get the cacheline sizes from PAL.
- *
- * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
- */
-int
-pcibios_prep_mwi (struct pci_dev *dev)
+static int __init pcibios_init(void)
 {
-	unsigned long desired_linesize, current_linesize;
-	int rc = 0;
-	u8 pci_linesize;
-
-	desired_linesize = pci_cacheline_size();
-
-	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize);
-	current_linesize = 4 * pci_linesize;
-	if (desired_linesize != current_linesize) {
-		printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,",
-		       pci_name(dev), current_linesize);
-		if (current_linesize > desired_linesize) {
-			printk(" expected %lu bytes instead\n", desired_linesize);
-			rc = -EINVAL;
-		} else {
-			printk(" correcting to %lu\n", desired_linesize);
-			pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4);
-		}
-	}
-	return rc;
+	set_pci_cacheline_size();
+	return 0;
 }
+
+subsys_initcall(pcibios_init);
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 2d78f34..0a59371 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,13 +4,14 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 #
-# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc.  All Rights Reserved.
+# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc.  All Rights Reserved.
 #
 
 CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
 
 obj-y				+= setup.o bte.o bte_error.o irq.o mca.o idle.o \
-				   huberror.o io_init.o iomv.o klconflib.o pio_phys.o \
+				   huberror.o io_acpi_init.o io_common.o \
+				   io_init.o iomv.o klconflib.o pio_phys.o \
 				   sn2/
 obj-$(CONFIG_IA64_GENERIC)      += machvec.o
 obj-$(CONFIG_SGI_TIOCX)		+= tiocx.o
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
new file mode 100644
index 0000000..99d7f27
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -0,0 +1,231 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/sn_sal.h>
+#include "xtalk/hubdev.h"
+#include <linux/acpi.h>
+
+
+/*
+ * The code in this file will only be executed when running with
+ * a PROM that has ACPI IO support. (i.e., SN_ACPI_BASE_SUPPORT() == 1)
+ */
+
+
+/*
+ * This value must match the UUID the PROM uses
+ * (io/acpi/defblk.c) when building a vendor descriptor.
+ */
+struct acpi_vendor_uuid sn_uuid = {
+	.subtype = 0,
+	.data	= { 0x2c, 0xc6, 0xa6, 0xfe, 0x9c, 0x44, 0xda, 0x11,
+		    0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
+};
+
+/*
+ * Perform the early IO init in PROM.
+ */
+static s64
+sal_ioif_init(u64 *result)
+{
+	struct ia64_sal_retval isrv = {0,0,0,0};
+
+	SAL_CALL_NOLOCK(isrv,
+			SN_SAL_IOIF_INIT, 0, 0, 0, 0, 0, 0, 0);
+	*result = isrv.v0;
+	return isrv.status;
+}
+
+/*
+ * sn_hubdev_add - The 'add' function of the acpi_sn_hubdev_driver.
+ *		   Called for every "SGIHUB" or "SGITIO" device defined
+ *		   in the ACPI namespace.
+ */
+static int __init
+sn_hubdev_add(struct acpi_device *device)
+{
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	u64 addr;
+	struct hubdev_info *hubdev;
+	struct hubdev_info *hubdev_ptr;
+	int i;
+	u64 nasid;
+	struct acpi_resource *resource;
+	int ret = 0;
+	acpi_status status;
+	struct acpi_resource_vendor_typed *vendor;
+	extern void sn_common_hubdev_init(struct hubdev_info *);
+
+	status = acpi_get_vendor_resource(device->handle, METHOD_NAME__CRS,
+					  &sn_uuid, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_ERR
+		       "sn_hubdev_add: acpi_get_vendor_resource() failed: %d\n",
+		        status);
+		return 1;
+	}
+
+	resource = buffer.pointer;
+	vendor = &resource->data.vendor_typed;
+	if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
+	    sizeof(struct hubdev_info *)) {
+		printk(KERN_ERR
+		       "sn_hubdev_add: Invalid vendor data length: %d\n",
+		        vendor->byte_length);
+		ret = 1;
+		goto exit;
+	}
+
+	memcpy(&addr, vendor->byte_data, sizeof(struct hubdev_info *));
+	hubdev_ptr = __va((struct hubdev_info *) addr);
+
+	nasid = hubdev_ptr->hdi_nasid;
+	i = nasid_to_cnodeid(nasid);
+	hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
+	*hubdev = *hubdev_ptr;
+	sn_common_hubdev_init(hubdev);
+
+exit:
+	kfree(buffer.pointer);
+	return ret;
+}
+
+/*
+ * sn_get_bussoft_ptr() - The pcibus_bussoft pointer is found in
+ *			  the ACPI Vendor resource for this bus.
+ */
+static struct pcibus_bussoft *
+sn_get_bussoft_ptr(struct pci_bus *bus)
+{
+	u64 addr;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	acpi_handle handle;
+	struct pcibus_bussoft *prom_bussoft_ptr;
+	struct acpi_resource *resource;
+	acpi_status status;
+	struct acpi_resource_vendor_typed *vendor;
+
+
+	handle = PCI_CONTROLLER(bus)->acpi_handle;
+	status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
+					  &sn_uuid, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_ERR "get_acpi_pcibus_ptr: "
+		       "get_acpi_bussoft_info() failed: %d\n",
+		       status);
+		return NULL;
+	}
+	resource = buffer.pointer;
+	vendor = &resource->data.vendor_typed;
+
+	if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
+	     sizeof(struct pcibus_bussoft *)) {
+		printk(KERN_ERR
+		       "get_acpi_bussoft_ptr: Invalid vendor data "
+		       "length %d\n", vendor->byte_length);
+		kfree(buffer.pointer);
+		return NULL;
+	}
+	memcpy(&addr, vendor->byte_data, sizeof(struct pcibus_bussoft *));
+	prom_bussoft_ptr = __va((struct pcibus_bussoft *) addr);
+	kfree(buffer.pointer);
+
+	return prom_bussoft_ptr;
+}
+
+/*
+ * sn_acpi_bus_fixup
+ */
+void
+sn_acpi_bus_fixup(struct pci_bus *bus)
+{
+	struct pci_dev *pci_dev = NULL;
+	struct pcibus_bussoft *prom_bussoft_ptr;
+	extern void sn_common_bus_fixup(struct pci_bus *,
+					struct pcibus_bussoft *);
+
+	if (!bus->parent) {	/* If root bus */
+		prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
+		if (prom_bussoft_ptr == NULL) {
+			printk(KERN_ERR
+			       "sn_pci_fixup_bus: 0x%04x:0x%02x Unable to "
+			       "obtain prom_bussoft_ptr\n",
+			       pci_domain_nr(bus), bus->number);
+			return;
+		}
+		sn_common_bus_fixup(bus, prom_bussoft_ptr);
+	}
+	list_for_each_entry(pci_dev, &bus->devices, bus_list) {
+		sn_pci_fixup_slot(pci_dev);
+	}
+}
+
+/*
+ * sn_acpi_slot_fixup - Perform any SN specific slot fixup.
+ *			At present there does not appear to be
+ *			any generic way to handle a ROM image
+ *			that has been shadowed by the PROM, so
+ *			we pass a pointer to it	within the
+ *			pcidev_info structure.
+ */
+
+void
+sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
+{
+	void __iomem *addr;
+	size_t size;
+
+	if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
+		/*
+		 * A valid ROM image exists and has been shadowed by the
+		 * PROM. Setup the pci_dev ROM resource to point to
+		 * the shadowed copy.
+		 */
+		size = dev->resource[PCI_ROM_RESOURCE].end -
+				dev->resource[PCI_ROM_RESOURCE].start;
+		addr =
+		     ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
+			     size);
+		dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
+		dev->resource[PCI_ROM_RESOURCE].end =
+						(unsigned long) addr + size;
+		dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
+	}
+}
+
+static struct acpi_driver acpi_sn_hubdev_driver = {
+	.name = "SGI HUBDEV Driver",
+	.ids = "SGIHUB,SGITIO",
+	.ops = {
+		.add = sn_hubdev_add,
+		},
+};
+
+
+/*
+ * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
+ *		     nodes and root buses in the DSDT. As a result, bus scanning
+ *		     will be initiated by the Linux ACPI code.
+ */
+
+void __init
+sn_io_acpi_init(void)
+{
+	u64 result;
+	s64 status;
+
+	acpi_bus_register_driver(&acpi_sn_hubdev_driver);
+	status = sal_ioif_init(&result);
+	if (status || result)
+		panic("sal_ioif_init failed: [%lx] %s\n",
+		      status, ia64_sal_strerror(status));
+}
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
new file mode 100644
index 0000000..d4dd8f4
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -0,0 +1,613 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/bootmem.h>
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn_feature_sets.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/io.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/tioca_provider.h>
+#include <asm/sn/tioce_provider.h>
+#include "xtalk/hubdev.h"
+#include "xtalk/xwidgetdev.h"
+#include <linux/acpi.h>
+#include <asm/sn/sn2/sn_hwperf.h>
+#include <asm/sn/acpi.h>
+
+extern void sn_init_cpei_timer(void);
+extern void register_sn_procfs(void);
+extern void sn_acpi_bus_fixup(struct pci_bus *);
+extern void sn_bus_fixup(struct pci_bus *);
+extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
+extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
+extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
+extern void sn_io_acpi_init(void);
+extern void sn_io_init(void);
+
+
+static struct list_head sn_sysdata_list;
+
+/* sysdata list struct */
+struct sysdata_el {
+	struct list_head entry;
+	void *sysdata;
+};
+
+int sn_ioif_inited;		/* SN I/O infrastructure initialized? */
+
+struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES];	/* indexed by asic type */
+
+/*
+ * Hooks and struct for unsupported pci providers
+ */
+
+static dma_addr_t
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
+{
+	return 0;
+}
+
+static void
+sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
+{
+	return;
+}
+
+static void *
+sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
+{
+	return NULL;
+}
+
+static struct sn_pcibus_provider sn_pci_default_provider = {
+	.dma_map = sn_default_pci_map,
+	.dma_map_consistent = sn_default_pci_map,
+	.dma_unmap = sn_default_pci_unmap,
+	.bus_fixup = sn_default_pci_bus_fixup,
+};
+
+/*
+ * Retrieve the DMA Flush List given nasid, widget, and device.
+ * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
+ */
+static inline u64
+sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
+			     u64 address)
+{
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff,
+			(u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
+			(u64) nasid, (u64) widget_num,
+			(u64) device_num, (u64) address, 0, 0, 0);
+	return ret_stuff.status;
+}
+
+/*
+ * Retrieve the pci device information given the bus and device|function number.
+ */
+static inline u64
+sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
+		    u64 sn_irq_info)
+{
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff,
+			(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
+			(u64) segment, (u64) bus_number, (u64) devfn,
+			(u64) pci_dev,
+			sn_irq_info, 0, 0);
+	return ret_stuff.v0;
+}
+
+/*
+ * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
+ *			  device.
+ */
+inline struct pcidev_info *
+sn_pcidev_info_get(struct pci_dev *dev)
+{
+	struct pcidev_info *pcidev;
+
+	list_for_each_entry(pcidev,
+			    &(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) {
+		if (pcidev->pdi_linux_pcidev == dev)
+			return pcidev;
+	}
+	return NULL;
+}
+
+/* Older PROM flush WAR
+ *
+ * 01/16/06 -- This war will be in place until a new official PROM is released.
+ * Additionally note that the struct sn_flush_device_war also has to be
+ * removed from arch/ia64/sn/include/xtalk/hubdev.h
+ */
+static u8 war_implemented = 0;
+
+static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
+			       struct sn_flush_device_common *common)
+{
+	struct sn_flush_device_war *war_list;
+	struct sn_flush_device_war *dev_entry;
+	struct ia64_sal_retval isrv = {0,0,0,0};
+
+	if (!war_implemented) {
+		printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
+		       "PROM flush WAR\n");
+		war_implemented = 1;
+	}
+
+	war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+	if (!war_list)
+		BUG();
+
+	SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+			nasid, widget, __pa(war_list), 0, 0, 0 ,0);
+	if (isrv.status)
+		panic("sn_device_fixup_war failed: %s\n",
+		      ia64_sal_strerror(isrv.status));
+
+	dev_entry = war_list + device;
+	memcpy(common,dev_entry, sizeof(*common));
+	kfree(war_list);
+
+	return isrv.status;
+}
+
+/*
+ * sn_common_hubdev_init() - This routine is called to initialize the HUB data
+ *			     structure for each node in the system.
+ */
+void __init
+sn_common_hubdev_init(struct hubdev_info *hubdev)
+{
+
+	struct sn_flush_device_kernel *sn_flush_device_kernel;
+	struct sn_flush_device_kernel *dev_entry;
+	s64 status;
+	int widget, device, size;
+
+	/* Attach the error interrupt handlers */
+	if (hubdev->hdi_nasid & 1)	/* If TIO */
+		ice_error_init(hubdev);
+	else
+		hub_error_init(hubdev);
+
+	for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
+		hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
+
+	if (!hubdev->hdi_flush_nasid_list.widget_p)
+		return;
+
+	size = (HUB_WIDGET_ID_MAX + 1) *
+		sizeof(struct sn_flush_device_kernel *);
+	hubdev->hdi_flush_nasid_list.widget_p =
+		kzalloc(size, GFP_KERNEL);
+	if (!hubdev->hdi_flush_nasid_list.widget_p)
+		BUG();
+
+	for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+		size = DEV_PER_WIDGET *
+			sizeof(struct sn_flush_device_kernel);
+		sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
+		if (!sn_flush_device_kernel)
+			BUG();
+
+		dev_entry = sn_flush_device_kernel;
+		for (device = 0; device < DEV_PER_WIDGET;
+		     device++, dev_entry++) {
+			size = sizeof(struct sn_flush_device_common);
+			dev_entry->common = kzalloc(size, GFP_KERNEL);
+			if (!dev_entry->common)
+				BUG();
+			if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
+				status = sal_get_device_dmaflush_list(
+					     hubdev->hdi_nasid, widget, device,
+					     (u64)(dev_entry->common));
+			else
+				status = sn_device_fixup_war(hubdev->hdi_nasid,
+							     widget, device,
+							     dev_entry->common);
+			if (status != SALRET_OK)
+				panic("SAL call failed: %s\n",
+				      ia64_sal_strerror(status));
+
+			spin_lock_init(&dev_entry->sfdl_flush_lock);
+		}
+
+		if (sn_flush_device_kernel)
+			hubdev->hdi_flush_nasid_list.widget_p[widget] =
+							 sn_flush_device_kernel;
+	}
+}
+
+void sn_pci_unfixup_slot(struct pci_dev *dev)
+{
+	struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
+
+	sn_irq_unfixup(dev);
+	pci_dev_put(host_pci_dev);
+	pci_dev_put(dev);
+}
+
+/*
+ * sn_pci_fixup_slot() - This routine sets up a slot's resources consistent
+ *			 with the Linux PCI abstraction layer. Resources
+ *			 acquired from our PCI provider include PIO maps
+ *			 to BAR space and interrupt objects.
+ */
+void sn_pci_fixup_slot(struct pci_dev *dev)
+{
+	int segment = pci_domain_nr(dev->bus);
+	int status = 0;
+	struct pcibus_bussoft *bs;
+ 	struct pci_bus *host_pci_bus;
+ 	struct pci_dev *host_pci_dev;
+	struct pcidev_info *pcidev_info;
+ 	struct sn_irq_info *sn_irq_info;
+ 	unsigned int bus_no, devfn;
+
+	pci_dev_get(dev); /* for the sysdata pointer */
+	pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
+	if (!pcidev_info)
+		BUG();		/* Cannot afford to run out of memory */
+
+	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+	if (!sn_irq_info)
+		BUG();		/* Cannot afford to run out of memory */
+
+	/* Call to retrieve pci device information needed by kernel. */
+	status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
+				     dev->devfn,
+				     (u64) __pa(pcidev_info),
+				     (u64) __pa(sn_irq_info));
+	if (status)
+		BUG(); /* Cannot get platform pci device information */
+
+	/* Add pcidev_info to list in pci_controller.platform_data */
+	list_add_tail(&pcidev_info->pdi_list,
+		      &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
+
+	if (SN_ACPI_BASE_SUPPORT())
+		sn_acpi_slot_fixup(dev, pcidev_info);
+	else
+		sn_more_slot_fixup(dev, pcidev_info);
+	/*
+	 * Using the PROMs values for the PCI host bus, get the Linux
+ 	 * PCI host_pci_dev struct and set up host bus linkages
+ 	 */
+
+	bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
+	devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
+ 	host_pci_bus = pci_find_bus(segment, bus_no);
+ 	host_pci_dev = pci_get_slot(host_pci_bus, devfn);
+
+	pcidev_info->host_pci_dev = host_pci_dev;
+	pcidev_info->pdi_linux_pcidev = dev;
+	pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
+	bs = SN_PCIBUS_BUSSOFT(dev->bus);
+	pcidev_info->pdi_pcibus_info = bs;
+
+	if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
+		SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
+	} else {
+		SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
+	}
+
+	/* Only set up IRQ stuff if this device has a host bus context */
+	if (bs && sn_irq_info->irq_irq) {
+		pcidev_info->pdi_sn_irq_info = sn_irq_info;
+		dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
+		sn_irq_fixup(dev, sn_irq_info);
+	} else {
+		pcidev_info->pdi_sn_irq_info = NULL;
+		kfree(sn_irq_info);
+	}
+}
+
+/*
+ * sn_common_bus_fixup - Perform platform specific bus fixup.
+ *			 Execute the ASIC specific fixup routine
+ *			 for this bus.
+ */
+void
+sn_common_bus_fixup(struct pci_bus *bus,
+		    struct pcibus_bussoft *prom_bussoft_ptr)
+{
+	int cnode;
+	struct pci_controller *controller;
+	struct hubdev_info *hubdev_info;
+	int nasid;
+	void *provider_soft;
+	struct sn_pcibus_provider *provider;
+	struct sn_platform_data *sn_platform_data;
+
+	controller = PCI_CONTROLLER(bus);
+	/*
+	 * Per-provider fixup.  Copies the bus soft structure from prom
+	 * to local area and links SN_PCIBUS_BUSSOFT().
+	 */
+
+	if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
+		printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d",
+		       prom_bussoft_ptr->bs_asic_type);
+		return;
+	}
+
+	if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
+		return;	/* no further fixup necessary */
+
+	provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
+	if (provider == NULL)
+		panic("sn_common_bus_fixup: No provider registered for this asic type, %d",
+		      prom_bussoft_ptr->bs_asic_type);
+
+	if (provider->bus_fixup)
+		provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr,
+				 controller);
+	else
+		provider_soft = NULL;
+
+	/*
+	 * Generic bus fixup goes here.  Don't reference prom_bussoft_ptr
+	 * after this point.
+	 */
+	controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
+					    GFP_KERNEL);
+	if (controller->platform_data == NULL)
+		BUG();
+	sn_platform_data =
+			(struct sn_platform_data *) controller->platform_data;
+	sn_platform_data->provider_soft = provider_soft;
+	INIT_LIST_HEAD(&((struct sn_platform_data *)
+			 controller->platform_data)->pcidev_info);
+	nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
+	cnode = nasid_to_cnodeid(nasid);
+	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+	SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
+	    &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
+
+	/*
+	 * If the node information we obtained during the fixup phase is
+	 * invalid then set controller->node to -1 (undetermined)
+	 */
+	if (controller->node >= num_online_nodes()) {
+		struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
+
+		printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
+		       "L_IO=%lx L_MEM=%lx BASE=%lx\n",
+		       b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
+		       b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
+		printk(KERN_WARNING "on node %d but only %d nodes online."
+		       "Association set to undetermined.\n",
+		       controller->node, num_online_nodes());
+		controller->node = -1;
+	}
+}
+
+void sn_bus_store_sysdata(struct pci_dev *dev)
+{
+	struct sysdata_el *element;
+
+	element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
+	if (!element) {
+		dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
+		return;
+	}
+	element->sysdata = SN_PCIDEV_INFO(dev);
+	list_add(&element->entry, &sn_sysdata_list);
+}
+
+void sn_bus_free_sysdata(void)
+{
+	struct sysdata_el *element;
+	struct list_head *list, *safe;
+
+	list_for_each_safe(list, safe, &sn_sysdata_list) {
+		element = list_entry(list, struct sysdata_el, entry);
+		list_del(&element->entry);
+		list_del(&(((struct pcidev_info *)
+			     (element->sysdata))->pdi_list));
+		kfree(element->sysdata);
+		kfree(element);
+	}
+	return;
+}
+
+/*
+ * hubdev_init_node() - Creates the HUB data structure and link them to it's
+ *			own NODE specific data area.
+ */
+void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+{
+	struct hubdev_info *hubdev_info;
+	int size;
+	pg_data_t *pg;
+
+	size = sizeof(struct hubdev_info);
+
+	if (node >= num_online_nodes())	/* Headless/memless IO nodes */
+		pg = NODE_DATA(0);
+	else
+		pg = NODE_DATA(node);
+
+	hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
+
+	npda->pdinfo = (void *)hubdev_info;
+}
+
+geoid_t
+cnodeid_get_geoid(cnodeid_t cnode)
+{
+	struct hubdev_info *hubdev;
+
+	hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+	return hubdev->hdi_geoid;
+}
+
+void sn_generate_path(struct pci_bus *pci_bus, char *address)
+{
+	nasid_t nasid;
+	cnodeid_t cnode;
+	geoid_t geoid;
+	moduleid_t moduleid;
+	u16 bricktype;
+
+	nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+	cnode = nasid_to_cnodeid(nasid);
+	geoid = cnodeid_get_geoid(cnode);
+	moduleid = geo_module(geoid);
+
+	sprintf(address, "module_%c%c%c%c%.2d",
+		'0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
+		'0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
+		'0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
+		MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
+
+	/* Tollhouse requires slot id to be displayed */
+	bricktype = MODULE_GET_BTYPE(moduleid);
+	if ((bricktype == L1_BRICKTYPE_191010) ||
+	    (bricktype == L1_BRICKTYPE_1932))
+			sprintf(address, "%s^%d", address, geo_slot(geoid));
+}
+
+/*
+ * sn_pci_fixup_bus() - Perform SN specific setup of software structs
+ *			(pcibus_bussoft, pcidev_info) and hardware
+ *			registers, for the specified bus and devices under it.
+ */
+void __devinit
+sn_pci_fixup_bus(struct pci_bus *bus)
+{
+
+	if (SN_ACPI_BASE_SUPPORT())
+		sn_acpi_bus_fixup(bus);
+	else
+		sn_bus_fixup(bus);
+}
+
+/*
+ * sn_io_early_init - Perform early IO (and some non-IO) initialization.
+ *		      In particular, setup the sn_pci_provider[] array.
+ *		      This needs to be done prior to any bus scanning
+ *		      (acpi_scan_init()) in the ACPI case, as the SN
+ *		      bus fixup code will reference the array.
+ */
+static int __init
+sn_io_early_init(void)
+{
+	int i;
+
+	if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+		return 0;
+
+	/*
+	 * prime sn_pci_provider[].  Individial provider init routines will
+	 * override their respective default entries.
+	 */
+
+	for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
+		sn_pci_provider[i] = &sn_pci_default_provider;
+
+	pcibr_init_provider();
+	tioca_init_provider();
+	tioce_init_provider();
+
+	/*
+	 * This is needed to avoid bounce limit checks in the blk layer
+	 */
+	ia64_max_iommu_merge_mask = ~PAGE_MASK;
+
+	sn_irq_lh_init();
+	INIT_LIST_HEAD(&sn_sysdata_list);
+	sn_init_cpei_timer();
+
+#ifdef CONFIG_PROC_FS
+	register_sn_procfs();
+#endif
+
+	printk(KERN_INFO "ACPI  DSDT OEM Rev 0x%x\n",
+	       acpi_gbl_DSDT->oem_revision);
+	if (SN_ACPI_BASE_SUPPORT())
+		sn_io_acpi_init();
+	else
+		sn_io_init();
+	return 0;
+}
+
+arch_initcall(sn_io_early_init);
+
+/*
+ * sn_io_late_init() - Perform any final platform specific IO initialization.
+ */
+
+int __init
+sn_io_late_init(void)
+{
+	struct pci_bus *bus;
+	struct pcibus_bussoft *bussoft;
+	cnodeid_t cnode;
+	nasid_t nasid;
+	cnodeid_t near_cnode;
+
+	if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+		return 0;
+
+	/*
+	 * Setup closest node in pci_controller->node for
+	 * PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using
+	 * info from the PROM).
+	 */
+	bus = NULL;
+	while ((bus = pci_find_next_bus(bus)) != NULL) {
+		bussoft = SN_PCIBUS_BUSSOFT(bus);
+		nasid = NASID_GET(bussoft->bs_base);
+		cnode = nasid_to_cnodeid(nasid);
+		if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) ||
+		    (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE)) {
+			/* TIO PCI Bridge: find nearest node with CPUs */
+			int e = sn_hwperf_get_nearest_node(cnode, NULL,
+							   &near_cnode);
+			if (e < 0) {
+				near_cnode = (cnodeid_t)-1; /* use any node */
+				printk(KERN_WARNING "pcibr_bus_fixup: failed "
+				       "to find near node with CPUs to TIO "
+				       "node %d, err=%d\n", cnode, e);
+			}
+			PCI_CONTROLLER(bus)->node = near_cnode;
+		} else if (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC) {
+			PCI_CONTROLLER(bus)->node = cnode;
+		}
+	}
+
+	sn_ioif_inited = 1;	/* SN I/O infrastructure now initialized */
+
+	return 0;
+}
+
+fs_initcall(sn_io_late_init);
+
+EXPORT_SYMBOL(sn_pci_fixup_slot);
+EXPORT_SYMBOL(sn_pci_unfixup_slot);
+EXPORT_SYMBOL(sn_bus_store_sysdata);
+EXPORT_SYMBOL(sn_bus_free_sysdata);
+EXPORT_SYMBOL(sn_generate_path);
+
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index dc09a6a..9ad843e 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -3,103 +3,28 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
  */
 
-#include <linux/bootmem.h>
-#include <linux/nodemask.h>
 #include <asm/sn/types.h>
 #include <asm/sn/addrs.h>
-#include <asm/sn/sn_feature_sets.h>
-#include <asm/sn/geo.h>
 #include <asm/sn/io.h>
-#include <asm/sn/l1.h>
 #include <asm/sn/module.h>
-#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/intr.h>
 #include <asm/sn/pcibus_provider_defs.h>
 #include <asm/sn/pcidev.h>
-#include <asm/sn/simulator.h>
 #include <asm/sn/sn_sal.h>
-#include <asm/sn/tioca_provider.h>
-#include <asm/sn/tioce_provider.h>
 #include "xtalk/hubdev.h"
-#include "xtalk/xwidgetdev.h"
 
-
-extern void sn_init_cpei_timer(void);
-extern void register_sn_procfs(void);
-
-static struct list_head sn_sysdata_list;
-
-/* sysdata list struct */
-struct sysdata_el {
-	struct list_head entry;
-	void *sysdata;
-};
-
-struct slab_info {
-	struct hubdev_info hubdev;
-};
-
-struct brick {
-	moduleid_t id;		/* Module ID of this module        */
-	struct slab_info slab_info[MAX_SLABS + 1];
-};
-
-int sn_ioif_inited;		/* SN I/O infrastructure initialized? */
-
-struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES];	/* indexed by asic type */
+/*
+ * The code in this file will only be executed when running with
+ * a PROM that does _not_ have base ACPI IO support.
+ * (i.e., SN_ACPI_BASE_SUPPORT() == 0)
+ */
 
 static int max_segment_number;		 /* Default highest segment number */
 static int max_pcibus_number = 255;	/* Default highest pci bus number */
 
-/*
- * Hooks and struct for unsupported pci providers
- */
-
-static dma_addr_t
-sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
-{
-	return 0;
-}
-
-static void
-sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
-{
-	return;
-}
-
-static void *
-sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
-{
-	return NULL;
-}
-
-static struct sn_pcibus_provider sn_pci_default_provider = {
-	.dma_map = sn_default_pci_map,
-	.dma_map_consistent = sn_default_pci_map,
-	.dma_unmap = sn_default_pci_unmap,
-	.bus_fixup = sn_default_pci_bus_fixup,
-};
-
-/*
- * Retrieve the DMA Flush List given nasid, widget, and device.
- * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
- */
-static inline u64
-sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
-			     u64 address)
-{
-	struct ia64_sal_retval ret_stuff;
-	ret_stuff.status = 0;
-	ret_stuff.v0 = 0;
-
-	SAL_CALL_NOLOCK(ret_stuff,
-			(u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
-			(u64) nasid, (u64) widget_num,
-			(u64) device_num, (u64) address, 0, 0, 0);
-	return ret_stuff.status;
-}
 
 /*
  * Retrieve the hub device info structure for the given nasid.
@@ -131,93 +56,20 @@
 	return ret_stuff.v0;
 }
 
-/*
- * Retrieve the pci device information given the bus and device|function number.
- */
-static inline u64
-sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
-		    u64 sn_irq_info)
-{
-	struct ia64_sal_retval ret_stuff;
-	ret_stuff.status = 0;
-	ret_stuff.v0 = 0;
-
-	SAL_CALL_NOLOCK(ret_stuff,
-			(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
-			(u64) segment, (u64) bus_number, (u64) devfn,
-			(u64) pci_dev,
-			sn_irq_info, 0, 0);
-	return ret_stuff.v0;
-}
 
 /*
- * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
- *			  device.
- */
-inline struct pcidev_info *
-sn_pcidev_info_get(struct pci_dev *dev)
-{
-	struct pcidev_info *pcidev;
-
-	list_for_each_entry(pcidev,
-			    &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
-		if (pcidev->pdi_linux_pcidev == dev) {
-			return pcidev;
-		}
-	}
-	return NULL;
-}
-
-/* Older PROM flush WAR
- *
- * 01/16/06 -- This war will be in place until a new official PROM is released.
- * Additionally note that the struct sn_flush_device_war also has to be
- * removed from arch/ia64/sn/include/xtalk/hubdev.h
- */
-static u8 war_implemented = 0;
-
-static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
-			       struct sn_flush_device_common *common)
-{
-	struct sn_flush_device_war *war_list;
-	struct sn_flush_device_war *dev_entry;
-	struct ia64_sal_retval isrv = {0,0,0,0};
-
-	if (!war_implemented) {
-		printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
-		       "PROM flush WAR\n");
-		war_implemented = 1;
-	}
-
-	war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
-	if (!war_list)
-		BUG();
-
-	SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
-			nasid, widget, __pa(war_list), 0, 0, 0 ,0);
-	if (isrv.status)
-		panic("sn_device_fixup_war failed: %s\n",
-		      ia64_sal_strerror(isrv.status));
-
-	dev_entry = war_list + device;
-	memcpy(common,dev_entry, sizeof(*common));
-	kfree(war_list);
-
-	return isrv.status;
-}
-
-/*
- * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
- *	each node in the system.
+ * sn_fixup_ionodes() - This routine initializes the HUB data structure for
+ *			each node in the system. This function is only
+ *			executed when running with a non-ACPI capable PROM.
  */
 static void __init sn_fixup_ionodes(void)
 {
-	struct sn_flush_device_kernel *sn_flush_device_kernel;
-	struct sn_flush_device_kernel *dev_entry;
+
 	struct hubdev_info *hubdev;
 	u64 status;
 	u64 nasid;
-	int i, widget, device, size;
+	int i;
+	extern void sn_common_hubdev_init(struct hubdev_info *);
 
 	/*
 	 * Get SGI Specific HUB chipset information.
@@ -240,70 +92,47 @@
 			max_segment_number = hubdev->max_segment_number;
 			max_pcibus_number = hubdev->max_pcibus_number;
 		}
-
-		/* Attach the error interrupt handlers */
-		if (nasid & 1)
-			ice_error_init(hubdev);
-		else
-			hub_error_init(hubdev);
-
-		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
-			hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
-
-		if (!hubdev->hdi_flush_nasid_list.widget_p)
-			continue;
-
-		size = (HUB_WIDGET_ID_MAX + 1) *
-			sizeof(struct sn_flush_device_kernel *);
-		hubdev->hdi_flush_nasid_list.widget_p =
-			kzalloc(size, GFP_KERNEL);
-		if (!hubdev->hdi_flush_nasid_list.widget_p)
-			BUG();
-
-		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
-			size = DEV_PER_WIDGET *
-				sizeof(struct sn_flush_device_kernel);
-			sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
-			if (!sn_flush_device_kernel)
-				BUG();
-
-			dev_entry = sn_flush_device_kernel;
-			for (device = 0; device < DEV_PER_WIDGET;
-			     device++,dev_entry++) {
-				size = sizeof(struct sn_flush_device_common);
-				dev_entry->common = kzalloc(size, GFP_KERNEL);
-				if (!dev_entry->common)
-					BUG();
-
-				if (sn_prom_feature_available(
-						       PRF_DEVICE_FLUSH_LIST))
-					status = sal_get_device_dmaflush_list(
-						     nasid, widget, device,
-						     (u64)(dev_entry->common));
-				else
-					status = sn_device_fixup_war(nasid,
-						     widget, device,
-						     dev_entry->common);
-				if (status != SALRET_OK)
-					panic("SAL call failed: %s\n",
-					      ia64_sal_strerror(status));
-
-				spin_lock_init(&dev_entry->sfdl_flush_lock);
-			}
-
-			if (sn_flush_device_kernel)
-				hubdev->hdi_flush_nasid_list.widget_p[widget] =
-						       sn_flush_device_kernel;
-	        }
+		sn_common_hubdev_init(hubdev);
 	}
 }
 
 /*
+ * sn_pci_legacy_window_fixup - Create PCI controller windows for
+ *				legacy IO and MEM space. This needs to
+ *				be done here, as the PROM does not have
+ *				ACPI support defining the root buses
+ *				and their resources (_CRS),
+ */
+static void
+sn_legacy_pci_window_fixup(struct pci_controller *controller,
+			   u64 legacy_io, u64 legacy_mem)
+{
+		controller->window = kcalloc(2, sizeof(struct pci_window),
+					     GFP_KERNEL);
+		if (controller->window == NULL)
+			BUG();
+		controller->window[0].offset = legacy_io;
+		controller->window[0].resource.name = "legacy_io";
+		controller->window[0].resource.flags = IORESOURCE_IO;
+		controller->window[0].resource.start = legacy_io;
+		controller->window[0].resource.end =
+	    			controller->window[0].resource.start + 0xffff;
+		controller->window[0].resource.parent = &ioport_resource;
+		controller->window[1].offset = legacy_mem;
+		controller->window[1].resource.name = "legacy_mem";
+		controller->window[1].resource.flags = IORESOURCE_MEM;
+		controller->window[1].resource.start = legacy_mem;
+		controller->window[1].resource.end =
+	    	       controller->window[1].resource.start + (1024 * 1024) - 1;
+		controller->window[1].resource.parent = &iomem_resource;
+		controller->windows = 2;
+}
+
+/*
  * sn_pci_window_fixup() - Create a pci_window for each device resource.
- *			   Until ACPI support is added, we need this code
- *			   to setup pci_windows for use by
- *			   pcibios_bus_to_resource(),
- *			   pcibios_resource_to_bus(), etc.
+ *			   It will setup pci_windows for use by
+ *			   pcibios_bus_to_resource(), pcibios_resource_to_bus(),
+ *			   etc.
  */
 static void
 sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
@@ -342,60 +171,22 @@
 	controller->window = new_window;
 }
 
-void sn_pci_unfixup_slot(struct pci_dev *dev)
-{
-	struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
-
-	sn_irq_unfixup(dev);
-	pci_dev_put(host_pci_dev);
-	pci_dev_put(dev);
-}
-
 /*
- * sn_pci_fixup_slot() - This routine sets up a slot's resources
- * consistent with the Linux PCI abstraction layer.  Resources acquired
- * from our PCI provider include PIO maps to BAR space and interrupt
- * objects.
+ * sn_more_slot_fixup() - We are not running with an ACPI capable PROM,
+ *			  and need to convert the pci_dev->resource
+ *			  'start' and 'end' addresses to mapped addresses,
+ *			  and setup the pci_controller->window array entries.
  */
-void sn_pci_fixup_slot(struct pci_dev *dev)
+void
+sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
 {
 	unsigned int count = 0;
 	int idx;
-	int segment = pci_domain_nr(dev->bus);
-	int status = 0;
-	struct pcibus_bussoft *bs;
- 	struct pci_bus *host_pci_bus;
- 	struct pci_dev *host_pci_dev;
-	struct pcidev_info *pcidev_info;
 	s64 pci_addrs[PCI_ROM_RESOURCE + 1];
- 	struct sn_irq_info *sn_irq_info;
- 	unsigned long size;
- 	unsigned int bus_no, devfn;
-
-	pci_dev_get(dev); /* for the sysdata pointer */
-	pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
-	if (!pcidev_info)
-		BUG();		/* Cannot afford to run out of memory */
-
-	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
-	if (!sn_irq_info)
-		BUG();		/* Cannot afford to run out of memory */
-
-	/* Call to retrieve pci device information needed by kernel. */
-	status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, 
-				     dev->devfn,
-				     (u64) __pa(pcidev_info),
-				     (u64) __pa(sn_irq_info));
-	if (status)
-		BUG(); /* Cannot get platform pci device information */
-
-	/* Add pcidev_info to list in sn_pci_controller struct */
-	list_add_tail(&pcidev_info->pdi_list,
-		      &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
+	unsigned long addr, end, size, start;
 
 	/* Copy over PIO Mapped Addresses */
 	for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
-		unsigned long start, end, addr;
 
 		if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
 			pci_addrs[idx] = -1;
@@ -419,60 +210,28 @@
 			dev->resource[idx].parent = &ioport_resource;
 		else
 			dev->resource[idx].parent = &iomem_resource;
+		/* If ROM, mark as shadowed in PROM */
+		if (idx == PCI_ROM_RESOURCE)
+			dev->resource[idx].flags |= IORESOURCE_ROM_BIOS_COPY;
 	}
 	/* Create a pci_window in the pci_controller struct for
 	 * each device resource.
 	 */
 	if (count > 0)
 		sn_pci_window_fixup(dev, count, pci_addrs);
-
-	/*
-	 * Using the PROMs values for the PCI host bus, get the Linux
- 	 * PCI host_pci_dev struct and set up host bus linkages
- 	 */
-
-	bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
-	devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
- 	host_pci_bus = pci_find_bus(segment, bus_no);
- 	host_pci_dev = pci_get_slot(host_pci_bus, devfn);
-
-	pcidev_info->host_pci_dev = host_pci_dev;
-	pcidev_info->pdi_linux_pcidev = dev;
-	pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
-	bs = SN_PCIBUS_BUSSOFT(dev->bus);
-	pcidev_info->pdi_pcibus_info = bs;
-
-	if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
-		SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
-	} else {
-		SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
-	}
-
-	/* Only set up IRQ stuff if this device has a host bus context */
-	if (bs && sn_irq_info->irq_irq) {
-		pcidev_info->pdi_sn_irq_info = sn_irq_info;
-		dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
-		sn_irq_fixup(dev, sn_irq_info);
-	} else {
-		pcidev_info->pdi_sn_irq_info = NULL;
-		kfree(sn_irq_info);
-	}
 }
 
 /*
  * sn_pci_controller_fixup() - This routine sets up a bus's resources
- * consistent with the Linux PCI abstraction layer.
+ *			       consistent with the Linux PCI abstraction layer.
  */
-void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
+static void
+sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
 {
-	int status;
-	int nasid, cnode;
+	s64 status = 0;
 	struct pci_controller *controller;
-	struct sn_pci_controller *sn_controller;
 	struct pcibus_bussoft *prom_bussoft_ptr;
-	struct hubdev_info *hubdev_info;
-	void *provider_soft;
-	struct sn_pcibus_provider *provider;
+
 
  	status = sal_get_pcibus_info((u64) segment, (u64) busnum,
  				     (u64) ia64_tpa(&prom_bussoft_ptr));
@@ -480,261 +239,77 @@
 		return;		/*bus # does not exist */
 	prom_bussoft_ptr = __va(prom_bussoft_ptr);
 
-	/* Allocate a sn_pci_controller, which has a pci_controller struct
-	 * as the first member.
-	 */
-	sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
-	if (!sn_controller)
+	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+	if (!controller)
 		BUG();
-	INIT_LIST_HEAD(&sn_controller->pcidev_info);
-	controller = &sn_controller->pci_controller;
 	controller->segment = segment;
 
-	if (bus == NULL) {
- 		bus = pci_scan_bus(busnum, &pci_root_ops, controller);
- 		if (bus == NULL)
- 			goto error_return; /* error, or bus already scanned */
- 		bus->sysdata = NULL;
-	}
-
-	if (bus->sysdata)
-		goto error_return; /* sysdata already alloc'd */
-
 	/*
-	 * Per-provider fixup.  Copies the contents from prom to local
-	 * area and links SN_PCIBUS_BUSSOFT().
+	 * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
+	 * (platform_data will be overwritten later in sn_common_bus_fixup())
 	 */
+	controller->platform_data = prom_bussoft_ptr;
 
-	if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
-		goto error_return; /* unsupported asic type */
-
-	if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
-		goto error_return; /* no further fixup necessary */
-
-	provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
-	if (provider == NULL)
-		goto error_return; /* no provider registerd for this asic */
+	bus = pci_scan_bus(busnum, &pci_root_ops, controller);
+ 	if (bus == NULL)
+ 		goto error_return; /* error, or bus already scanned */
 
 	bus->sysdata = controller;
-	if (provider->bus_fixup)
-		provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
-	else
-		provider_soft = NULL;
 
-	if (provider_soft == NULL) {
-		/* fixup failed or not applicable */
-		bus->sysdata = NULL;
-		goto error_return;
-	}
-
-	/*
-	 * Setup pci_windows for legacy IO and MEM space.
-	 * (Temporary until ACPI support is in place.)
-	 */
-	controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
-	if (controller->window == NULL)
-		BUG();
-	controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
-	controller->window[0].resource.name = "legacy_io";
-	controller->window[0].resource.flags = IORESOURCE_IO;
-	controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
-	controller->window[0].resource.end =
-	    controller->window[0].resource.start + 0xffff;
-	controller->window[0].resource.parent = &ioport_resource;
-	controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
-	controller->window[1].resource.name = "legacy_mem";
-	controller->window[1].resource.flags = IORESOURCE_MEM;
-	controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
-	controller->window[1].resource.end =
-	    controller->window[1].resource.start + (1024 * 1024) - 1;
-	controller->window[1].resource.parent = &iomem_resource;
-	controller->windows = 2;
-
-	/*
-	 * Generic bus fixup goes here.  Don't reference prom_bussoft_ptr
-	 * after this point.
-	 */
-
-	PCI_CONTROLLER(bus)->platform_data = provider_soft;
-	nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
-	cnode = nasid_to_cnodeid(nasid);
-	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
-	SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
-	    &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
-
-	/*
-	 * If the node information we obtained during the fixup phase is invalid
-	 * then set controller->node to -1 (undetermined)
-	 */
-	if (controller->node >= num_online_nodes()) {
-		struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
-
-		printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
-				    "L_IO=%lx L_MEM=%lx BASE=%lx\n",
-			b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
-			b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
-		printk(KERN_WARNING "on node %d but only %d nodes online."
-			"Association set to undetermined.\n",
-			controller->node, num_online_nodes());
-		controller->node = -1;
-	}
 	return;
 
 error_return:
 
-	kfree(sn_controller);
-	return;
-}
-
-void sn_bus_store_sysdata(struct pci_dev *dev)
-{
-	struct sysdata_el *element;
-
-	element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
-	if (!element) {
-		dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
-		return;
-	}
-	element->sysdata = SN_PCIDEV_INFO(dev);
-	list_add(&element->entry, &sn_sysdata_list);
-}
-
-void sn_bus_free_sysdata(void)
-{
-	struct sysdata_el *element;
-	struct list_head *list, *safe;
-
-	list_for_each_safe(list, safe, &sn_sysdata_list) {
-		element = list_entry(list, struct sysdata_el, entry);
-		list_del(&element->entry);
-		list_del(&(((struct pcidev_info *)
-			     (element->sysdata))->pdi_list));
-		kfree(element->sysdata);
-		kfree(element);
-	}
+	kfree(controller);
 	return;
 }
 
 /*
- * Ugly hack to get PCI setup until we have a proper ACPI namespace.
+ * sn_bus_fixup
+ */
+void
+sn_bus_fixup(struct pci_bus *bus)
+{
+	struct pci_dev *pci_dev = NULL;
+	struct pcibus_bussoft *prom_bussoft_ptr;
+	extern void sn_common_bus_fixup(struct pci_bus *,
+					struct pcibus_bussoft *);
+
+
+	if (!bus->parent) {  /* If root bus */
+		prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
+		if (prom_bussoft_ptr == NULL) {
+			printk(KERN_ERR
+			       "sn_bus_fixup: 0x%04x:0x%02x Unable to "
+			       "obtain prom_bussoft_ptr\n",
+			       pci_domain_nr(bus), bus->number);
+			return;
+		}
+		sn_common_bus_fixup(bus, prom_bussoft_ptr);
+		sn_legacy_pci_window_fixup(PCI_CONTROLLER(bus),
+					   prom_bussoft_ptr->bs_legacy_io,
+					   prom_bussoft_ptr->bs_legacy_mem);
+        }
+        list_for_each_entry(pci_dev, &bus->devices, bus_list) {
+                sn_pci_fixup_slot(pci_dev);
+        }
+
+}
+
+/*
+ * sn_io_init - PROM does not have ACPI support to define nodes or root buses,
+ *		so we need to do things the hard way, including initiating the
+ *		bus scanning ourselves.
  */
 
-#define PCI_BUSES_TO_SCAN 256
-
-static int __init sn_pci_init(void)
+void __init sn_io_init(void)
 {
 	int i, j;
-	struct pci_dev *pci_dev = NULL;
 
-	if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
-		return 0;
-
-	/*
-	 * prime sn_pci_provider[].  Individial provider init routines will
-	 * override their respective default entries.
-	 */
-
-	for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
-		sn_pci_provider[i] = &sn_pci_default_provider;
-
-	pcibr_init_provider();
-	tioca_init_provider();
-	tioce_init_provider();
-
-	/*
-	 * This is needed to avoid bounce limit checks in the blk layer
-	 */
-	ia64_max_iommu_merge_mask = ~PAGE_MASK;
 	sn_fixup_ionodes();
-	sn_irq_lh_init();
-	INIT_LIST_HEAD(&sn_sysdata_list);
-	sn_init_cpei_timer();
-
-#ifdef CONFIG_PROC_FS
-	register_sn_procfs();
-#endif
 
 	/* busses are not known yet ... */
 	for (i = 0; i <= max_segment_number; i++)
 		for (j = 0; j <= max_pcibus_number; j++)
 			sn_pci_controller_fixup(i, j, NULL);
-
-	/*
-	 * Generic Linux PCI Layer has created the pci_bus and pci_dev 
-	 * structures - time for us to add our SN PLatform specific 
-	 * information.
-	 */
-
-	while ((pci_dev =
-		pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
-		sn_pci_fixup_slot(pci_dev);
-
-	sn_ioif_inited = 1;	/* sn I/O infrastructure now initialized */
-
-	return 0;
 }
-
-/*
- * hubdev_init_node() - Creates the HUB data structure and link them to it's 
- *	own NODE specific data area.
- */
-void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
-{
-	struct hubdev_info *hubdev_info;
-	int size;
-	pg_data_t *pg;
-
-	size = sizeof(struct hubdev_info);
-
-	if (node >= num_online_nodes())	/* Headless/memless IO nodes */
-		pg = NODE_DATA(0);
-	else
-		pg = NODE_DATA(node);
-
-	hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
-
-	npda->pdinfo = (void *)hubdev_info;
-}
-
-geoid_t
-cnodeid_get_geoid(cnodeid_t cnode)
-{
-	struct hubdev_info *hubdev;
-
-	hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
-	return hubdev->hdi_geoid;
-}
-
-void sn_generate_path(struct pci_bus *pci_bus, char *address)
-{
-	nasid_t nasid;
-	cnodeid_t cnode;
-	geoid_t geoid;
-	moduleid_t moduleid;
-	u16 bricktype;
-
-	nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
-	cnode = nasid_to_cnodeid(nasid);
-	geoid = cnodeid_get_geoid(cnode);
-	moduleid = geo_module(geoid);
-
-	sprintf(address, "module_%c%c%c%c%.2d",
-		'0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
-		'0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
-		'0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
-		MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
-
-	/* Tollhouse requires slot id to be displayed */
-	bricktype = MODULE_GET_BTYPE(moduleid);
-	if ((bricktype == L1_BRICKTYPE_191010) ||
-	    (bricktype == L1_BRICKTYPE_1932))
-			sprintf(address, "%s^%d", address, geo_slot(geoid));
-}
-
-subsys_initcall(sn_pci_init);
-EXPORT_SYMBOL(sn_pci_fixup_slot);
-EXPORT_SYMBOL(sn_pci_unfixup_slot);
-EXPORT_SYMBOL(sn_pci_controller_fixup);
-EXPORT_SYMBOL(sn_bus_store_sysdata);
-EXPORT_SYMBOL(sn_bus_free_sysdata);
-EXPORT_SYMBOL(sn_generate_path);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 7ce3cda..4aa4f30 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -3,10 +3,11 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved.
  */
 
 #include <linux/module.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 #include <asm/delay.h>
 #include <asm/vga.h>
@@ -15,6 +16,7 @@
 #include <asm/sn/pda.h>
 #include <asm/sn/sn_cpuid.h>
 #include <asm/sn/shub_mmr.h>
+#include <asm/sn/acpi.h>
 
 #define IS_LEGACY_VGA_IOPORT(p) \
 	(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
@@ -31,11 +33,14 @@
 {
 	if (!IS_RUNNING_ON_SIMULATOR()) {
 		if (IS_LEGACY_VGA_IOPORT(port))
-			port += vga_console_iobase;
+			return (__ia64_mk_io_addr(port));
 		/* On sn2, legacy I/O ports don't point at anything */
 		if (port < (64 * 1024))
 			return NULL;
-		return ((void *)(port | __IA64_UNCACHED_OFFSET));
+		if (SN_ACPI_BASE_SUPPORT())
+			return (__ia64_mk_io_addr(port));
+		else
+			return ((void *)(port | __IA64_UNCACHED_OFFSET));
 	} else {
 		/* but the simulator uses them... */
 		unsigned long addr;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 0b49459..8c5bee0 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -117,7 +117,10 @@
 				       nasid_t nasid, int slice)
 {
 	int vector;
+	int cpuid;
+#ifdef CONFIG_SMP
 	int cpuphys;
+#endif
 	int64_t bridge;
 	int local_widget, status;
 	nasid_t local_nasid;
@@ -146,7 +149,6 @@
 	vector = sn_irq_info->irq_irq;
 	/* Free the old PROM new_irq_info structure */
 	sn_intr_free(local_nasid, local_widget, new_irq_info);
-	/* Update kernels new_irq_info with new target info */
 	unregister_intr_pda(new_irq_info);
 
 	/* allocate a new PROM new_irq_info struct */
@@ -160,8 +162,10 @@
 		return NULL;
 	}
 
-	cpuphys = nasid_slice_to_cpuid(nasid, slice);
-	new_irq_info->irq_cpuid = cpuphys;
+	/* Update kernels new_irq_info with new target info */
+	cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
+				     new_irq_info->irq_slice);
+	new_irq_info->irq_cpuid = cpuid;
 	register_intr_pda(new_irq_info);
 
 	pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
@@ -180,6 +184,7 @@
 	call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
 
 #ifdef CONFIG_SMP
+	cpuphys = cpu_physical_id(cpuid);
 	set_irq_affinity_info((vector & 0xff), cpuphys, 0);
 #endif
 
@@ -299,6 +304,9 @@
 	nasid_t nasid = sn_irq_info->irq_nasid;
 	int slice = sn_irq_info->irq_slice;
 	int cpu = nasid_slice_to_cpuid(nasid, slice);
+#ifdef CONFIG_SMP
+	int cpuphys;
+#endif
 
 	pci_dev_get(pci_dev);
 	sn_irq_info->irq_cpuid = cpu;
@@ -311,6 +319,10 @@
 	spin_unlock(&sn_irq_info_lock);
 
 	register_intr_pda(sn_irq_info);
+#ifdef CONFIG_SMP
+	cpuphys = cpu_physical_id(cpu);
+	set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
+#endif
 }
 
 void sn_irq_unfixup(struct pci_dev *pci_dev)
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 6ffd1f8..b3a435f 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -136,10 +136,6 @@
 	 */
 	msg.data = 0x100 + irq;
 
-#ifdef CONFIG_SMP
-	set_irq_affinity_info(irq, sn_irq_info->irq_cpuid, 0);
-#endif
-
 	write_msi_msg(irq, &msg);
 	set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
 
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 7a2d824..a934ad0 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -388,6 +388,14 @@
 	ia64_sn_plat_set_error_handling_features();	// obsolete
 	ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
 	ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
+	/*
+	 * Note: The calls to notify the PROM of ACPI and PCI Segment
+	 *	 support must be done prior to acpi_load_tables(), as
+	 *	 an ACPI capable PROM will rebuild the DSDT as result
+	 *	 of the call.
+	 */
+	ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE);
+	ia64_sn_set_os_feature(OSF_ACPI_ENABLE);
 
 
 #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
@@ -413,6 +421,16 @@
 	if (! vga_console_membase)
 		sn_scan_pcdp();
 
+	/*
+	 *	Setup legacy IO space.
+	 *	vga_console_iobase maps to PCI IO Space address 0 on the
+	 * 	bus containing the VGA console.
+	 */
+	if (vga_console_iobase) {
+		io_space[0].mmio_base = vga_console_iobase;
+		io_space[0].sparse = 0;
+	}
+
 	if (vga_console_membase) {
 		/* usable vga ... make tty0 the preferred default console */
 		if (!strstr(*cmdline_p, "console="))
@@ -751,5 +769,13 @@
 		return 0;
 	return test_bit(id, sn_prom_features);
 }
+
+void
+sn_kernel_launch_event(void)
+{
+	/* ignore status until we understand possible failure, if any*/
+	if (ia64_sn_kernel_launch_event())
+		printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
+}
 EXPORT_SYMBOL(sn_prom_feature_available);
 
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index feaf1a6..493380b 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -552,7 +552,7 @@
 	bus_unregister(&tiocx_bus_type);
 }
 
-subsys_initcall(tiocx_init);
+fs_initcall(tiocx_init);
 module_exit(tiocx_exit);
 
 /************************************************************************
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 27dd7df..6846dc9 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
  */
 
 #include <linux/interrupt.h>
@@ -109,7 +109,6 @@
 pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
 {
 	int nasid, cnode, j;
-	cnodeid_t near_cnode;
 	struct hubdev_info *hubdev_info;
 	struct pcibus_info *soft;
 	struct sn_flush_device_kernel *sn_flush_device_kernel;
@@ -186,20 +185,6 @@
 		return NULL;
 	}
 
-	if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
-		/* TIO PCI Bridge: find nearest node with CPUs */
-		int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);
-
-		if (e < 0) {
-			near_cnode = (cnodeid_t)-1; /* use any node */
-			printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
-				"near node with CPUs to TIO node %d, err=%d\n",
-				cnode, e);
-		}
-		controller->node = near_cnode;
-	}
-	else
-		controller->node = cnode;
 	return soft;
 }
 
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 46e16dc..35f854f 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -15,7 +15,6 @@
 #include <asm/sn/pcidev.h>
 #include <asm/sn/pcibus_provider_defs.h>
 #include <asm/sn/tioce_provider.h>
-#include <asm/sn/sn2/sn_hwperf.h>
 
 /*
  * 1/26/2006
@@ -990,8 +989,6 @@
 static void *
 tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
 {
-	int my_nasid;
-	cnodeid_t my_cnode, mem_cnode;
 	struct tioce_common *tioce_common;
 	struct tioce_kernel *tioce_kern;
 	struct tioce __iomem *tioce_mmr;
@@ -1035,21 +1032,6 @@
 		       tioce_common->ce_pcibus.bs_persist_segment,
 		       tioce_common->ce_pcibus.bs_persist_busnum);
 
-	/*
-	 * identify closest nasid for memory allocations
-	 */
-
-	my_nasid = NASID_GET(tioce_common->ce_pcibus.bs_base);
-	my_cnode = nasid_to_cnodeid(my_nasid);
-
-	if (sn_hwperf_get_nearest_node(my_cnode, &mem_cnode, NULL) < 0) {
-		printk(KERN_WARNING "tioce_bus_fixup: failed to find "
-		       "closest node with MEM to TIO node %d\n", my_cnode);
-		mem_cnode = (cnodeid_t)-1; /* use any node */
-	}
-
-	controller->node = mem_cnode;
-
 	return tioce_common;
 }
 
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 0e7778b..936205f 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -196,9 +196,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
 			reserve_bootmem(INITRD_START, INITRD_SIZE);
-			initrd_start = INITRD_START ?
-				INITRD_START + PAGE_OFFSET : 0;
-
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start + INITRD_SIZE;
 			printk("initrd:start[%08lx],size[%08lx]\n",
 				initrd_start, INITRD_SIZE);
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index b60cea4..092ea86 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -21,7 +21,7 @@
 #include <linux/unistd.h>
 #include <linux/stddef.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
diff --git a/arch/m32r/lib/csum_partial_copy.c b/arch/m32r/lib/csum_partial_copy.c
index 3d5f061..5596f3d 100644
--- a/arch/m32r/lib/csum_partial_copy.c
+++ b/arch/m32r/lib/csum_partial_copy.c
@@ -27,9 +27,8 @@
 /*
  * Copy while checksumming, otherwise like csum_partial
  */
-unsigned int
-csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst,
-                           int len, unsigned int sum)
+__wsum
+csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
 {
 	sum = csum_partial(src, len, sum);
 	memcpy(dst, src, len);
@@ -42,10 +41,9 @@
  * Copy from userspace and compute checksum.  If we catch an exception
  * then zero the rest of the buffer.
  */
-unsigned int
-csum_partial_copy_from_user (const unsigned char __user *src,
-			     unsigned char *dst,
-			     int len, unsigned int sum, int *err_ptr)
+__wsum
+csum_partial_copy_from_user (const void __user *src, void *dst,
+			     int len, __wsum sum, int *err_ptr)
 {
 	int missing;
 
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index abb34cc..c7efdb0 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -105,9 +105,7 @@
 		if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
 			reserve_bootmem_node(NODE_DATA(0), INITRD_START,
 				INITRD_SIZE);
-			initrd_start = INITRD_START ?
-				INITRD_START + PAGE_OFFSET : 0;
-
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start + INITRD_SIZE;
 			printk("initrd:start[%08lx],size[%08lx]\n",
 				initrd_start, INITRD_SIZE);
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index de1304c..fa015d8 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -52,10 +52,9 @@
 #ifdef DEBUG
     printk("amiga_chip_alloc: allocate %ld bytes\n", size);
 #endif
-    res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+    res = kzalloc(sizeof(struct resource), GFP_KERNEL);
     if (!res)
 	return NULL;
-    memset(res, 0, sizeof(struct resource));
     res->name = name;
 
     if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
diff --git a/arch/m68k/atari/hades-pci.c b/arch/m68k/atari/hades-pci.c
index 6ca57b6..bee2b14 100644
--- a/arch/m68k/atari/hades-pci.c
+++ b/arch/m68k/atari/hades-pci.c
@@ -375,10 +375,9 @@
 	 * Allocate memory for bus info structure.
 	 */
 
-	bus = kmalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
+	bus = kzalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
 	if (!bus)
 		return NULL;
-	memset(bus, 0, sizeof(struct pci_bus_info));
 
 	/*
 	 * Claim resources. The m68k has no separate I/O space, both
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index cb13c6e..aed3be2 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -39,8 +39,7 @@
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
 
-unsigned int
-csum_partial (const unsigned char *buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned long tmp1, tmp2;
 	  /*
@@ -133,9 +132,9 @@
  * copy from user space while checksumming, with exception handling.
  */
 
-unsigned int
-csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
-			    int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+			    int len, __wsum sum, int *csum_err)
 {
 	/*
 	 * GCC doesn't like more than 10 operands for the asm
@@ -325,8 +324,8 @@
  * copy from kernel space while checksumming, otherwise like csum_partial
  */
 
-unsigned int
-csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
 	unsigned long tmp1, tmp2;
 	__asm__("movel %2,%4\n\t"
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 911f2ce..2adbeb1 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -99,7 +99,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 6d920d4..aa70dde 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -173,7 +173,7 @@
 	  On many SoC style CPUs the master CPU clock is also used to drive 
 	  on-chip peripherals. The clock that is distributed to these
 	  peripherals is sometimes a fixed ratio of the master clock
-	  frequency. If so then set this to the divider ration of the
+	  frequency. If so then set this to the divider ratio of the
 	  master clock to the peripheral clock. If not sure then select 1.
 
 config OLDMASK
@@ -192,7 +192,7 @@
 	  Support for the Palm Pilot 1000/5000, Personal/Pro and PalmIII.
 
 config XCOPILOT_BUGS
-	bool "  (X)Copilot support"
+	bool "(X)Copilot support"
 	depends on PILOT3
 	help
 	  Support the bugs of Xcopilot.
@@ -216,20 +216,20 @@
 	  Support for the DragenEngine II board.
 
 config DIRECT_IO_ACCESS
-	bool "  Allow user to access IO directly"
+	bool "Allow user to access IO directly"
 	depends on (UCSIMM || UCDIMM || DRAGEN2)
 	help
 	  Disable the CPU internal registers protection in user mode,
           to allow a user application to read/write them.
 
 config INIT_LCD
-	bool "  Initialize LCD"
+	bool "Initialize LCD"
 	depends on (UCSIMM || UCDIMM || DRAGEN2)
 	help
 	  Initialize the LCD controller of the 68x328 processor.
 
 config MEMORY_RESERVE
-	int "  Memory reservation (MiB)"
+	int "Memory reservation (MiB)"
 	depends on (UCSIMM || UCDIMM)
 	help
 	  Reserve certain memory regions on 68x328 based boards.
@@ -409,7 +409,7 @@
 	  Support for the Netburner MOD-5272 board.
 
 config ROMFS_FROM_ROM
-	bool "  ROMFS image not RAM resident"
+	bool "ROMFS image not RAM resident"
 	depends on (NETtel || SNAPGEAR)
 	help
 	  The ROMfs filesystem will stay resident in the FLASH/ROM, not be
@@ -565,7 +565,7 @@
 	depends on ROM
 	help
 	  This is almost always the same as the base of the ROM. Since on all
-	  68000 type varients the vectors are at the base of the boot device
+	  68000 type variants the vectors are at the base of the boot device
 	  on system startup.
 
 config ROMVECSIZE
@@ -574,7 +574,7 @@
 	depends on ROM
 	help
 	  Define the size of the vector region in ROM. For most 68000
-	  varients this would be 0x400 bytes in size. Set to 0 if you do
+	  variants this would be 0x400 bytes in size. Set to 0 if you do
 	  not want a vector region at the start of the ROM.
 
 config ROMSTART
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index 1e62150..25327c9 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -38,7 +38,7 @@
 EXPORT_SYMBOL(kernel_thread);
 
 /* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
 
 /* The following are special because they're not called
    explicitly (the C compiler generates them).  Fortunately,
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index c18a833..941955d 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -290,7 +290,7 @@
 	unsigned char	*tp;
 	int		i;
 
-	printk(KERN_EMERG "\nCURRENT PROCESS:\n\n");
+	printk(KERN_EMERG "\n" KERN_EMERG "CURRENT PROCESS:\n" KERN_EMERG "\n");
 	printk(KERN_EMERG "COMM=%s PID=%d\n", current->comm, current->pid);
 
 	if (current->mm) {
@@ -301,7 +301,8 @@
 			(int) current->mm->end_data,
 			(int) current->mm->end_data,
 			(int) current->mm->brk);
-		printk(KERN_EMERG "USER-STACK=%08x  KERNEL-STACK=%08x\n\n",
+		printk(KERN_EMERG "USER-STACK=%08x KERNEL-STACK=%08x\n"
+			KERN_EMERG "\n",
 			(int) current->mm->start_stack,
 			(int)(((unsigned long) current) + THREAD_SIZE));
 	}
@@ -312,36 +313,35 @@
 		fp->d0, fp->d1, fp->d2, fp->d3);
 	printk(KERN_EMERG "d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
 		fp->d4, fp->d5, fp->a0, fp->a1);
-	printk(KERN_EMERG "\nUSP: %08x   TRAPFRAME: %08x\n", (unsigned int) rdusp(),
-		(unsigned int) fp);
+	printk(KERN_EMERG "\n" KERN_EMERG "USP: %08x   TRAPFRAME: %08x\n",
+		(unsigned int) rdusp(), (unsigned int) fp);
 
-	printk(KERN_EMERG "\nCODE:");
+	printk(KERN_EMERG "\n" KERN_EMERG "CODE:");
 	tp = ((unsigned char *) fp->pc) - 0x20;
 	for (sp = (unsigned long *) tp, i = 0; (i < 0x40);  i += 4) {
 		if ((i % 0x10) == 0)
-			printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
-		printk(KERN_EMERG "%08x ", (int) *sp++);
+			printk("\n" KERN_EMERG "%08x: ", (int) (tp + i));
+		printk("%08x ", (int) *sp++);
 	}
-	printk(KERN_EMERG "\n");
+	printk("\n" KERN_EMERG "\n");
 
-	printk(KERN_EMERG "\nKERNEL STACK:");
+	printk(KERN_EMERG "KERNEL STACK:");
 	tp = ((unsigned char *) fp) - 0x40;
 	for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
 		if ((i % 0x10) == 0)
-			printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
-		printk(KERN_EMERG "%08x ", (int) *sp++);
+			printk("\n" KERN_EMERG "%08x: ", (int) (tp + i));
+		printk("%08x ", (int) *sp++);
 	}
-	printk(KERN_EMERG "\n");
-	printk(KERN_EMERG "\n");
+	printk("\n" KERN_EMERG "\n");
 
-	printk(KERN_EMERG "\nUSER STACK:");
+	printk(KERN_EMERG "USER STACK:");
 	tp = (unsigned char *) (rdusp() - 0x10);
 	for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
 		if ((i % 0x10) == 0)
-			printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
-		printk(KERN_EMERG "%08x ", (int) *sp++);
+			printk("\n" KERN_EMERG "%08x: ", (int) (tp + i));
+		printk("%08x ", (int) *sp++);
 	}
-	printk(KERN_EMERG "\n\n");
+	printk("\n" KERN_EMERG "\n");
 }
 
 /*
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 7b21959..9cf2e4d 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -36,10 +36,7 @@
 #include <asm/setup.h>
 #include <asm/irq.h>
 #include <asm/machdep.h>
-
-#ifdef CONFIG_BLK_DEV_INITRD
 #include <asm/pgtable.h>
-#endif
 
 unsigned long memory_start;
 unsigned long memory_end;
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index c3494b8..3265b2d 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -137,7 +137,7 @@
 asmlinkage int sys_ipc (uint call, int first, int second,
 			int third, void *ptr, long fifth)
 {
-	int version;
+	int version, ret;
 
 	version = call >> 16; /* hack for backward compatibility */
 	call &= 0xffff;
@@ -190,6 +190,27 @@
 		default:
 			return -EINVAL;
 		}
+	if (call <= SHMCTL)
+		switch (call) {
+		case SHMAT:
+			switch (version) {
+			default: {
+				ulong raddr;
+				ret = do_shmat (first, ptr, second, &raddr);
+				if (ret)
+					return ret;
+				return put_user (raddr, (ulong __user *) third);
+			}
+			}
+		case SHMDT:
+			return sys_shmdt (ptr);
+		case SHMGET:
+			return sys_shmget (first, second, third);
+		case SHMCTL:
+			return sys_shmctl (first, second, ptr);
+		default:
+			return -ENOSYS;
+		}
 
 	return -EINVAL;
 }
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index 17649d2..9129b3a 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -127,11 +127,12 @@
 		if (stack + 1 > endstack)
 			break;
 		if (i % 8 == 0)
-			printk(KERN_EMERG "\n       ");
-		printk(KERN_EMERG " %08lx", *stack++);
+			printk("\n" KERN_EMERG "       ");
+		printk(" %08lx", *stack++);
 	}
+	printk("\n");
 
-	printk(KERN_EMERG "\nCall Trace:");
+	printk(KERN_EMERG "Call Trace:");
 	i = 0;
 	while (stack + 1 <= endstack) {
 		addr = *stack++;
@@ -146,12 +147,12 @@
 		if (((addr >= (unsigned long) &_start) &&
 		     (addr <= (unsigned long) &_etext))) {
 			if (i % 4 == 0)
-				printk(KERN_EMERG "\n       ");
-			printk(KERN_EMERG " [<%08lx>]", addr);
+				printk("\n" KERN_EMERG "       ");
+			printk(" [<%08lx>]", addr);
 			i++;
 		}
 	}
-	printk(KERN_EMERG "\n");
+	printk("\n");
 }
 
 void bad_super_trap(struct frame *fp)
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index 58afa8b..2b2a10d 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -60,6 +60,7 @@
 #endif
 
 	.text : {
+		_text = .;
 		_stext = . ;
         	*(.text)
 		SCHED_TEXT
diff --git a/arch/m68knommu/lib/checksum.c b/arch/m68knommu/lib/checksum.c
index 7bec6fd..269d83b 100644
--- a/arch/m68knommu/lib/checksum.c
+++ b/arch/m68knommu/lib/checksum.c
@@ -96,9 +96,9 @@
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
-	return ~do_csum(iph,ihl*4);
+	return (__force __sum16)~do_csum(iph,ihl*4);
 }
 
 /*
@@ -113,15 +113,15 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned int result = do_csum(buff, len);
 
 	/* add in old sum, and carry.. */
-	result += sum;
-	if (sum > result)
+	result += (__force u32)sum;
+	if ((__force u32)sum > result)
 		result += 1;
-	return result;
+	return (__force __wsum)result;
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -130,21 +130,21 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
 {
-	return ~do_csum(buff,len);
+	return (__force __sum16)~do_csum(buff,len);
 }
 
 /*
  * copy from fs while checksumming, otherwise like csum_partial
  */
 
-unsigned int
-csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
-			    int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+			    int len, __wsum sum, int *csum_err)
 {
 	if (csum_err) *csum_err = 0;
-	memcpy(dst, src, len);
+	memcpy(dst, (__force const void *)src, len);
 	return csum_partial(dst, len, sum);
 }
 
@@ -152,8 +152,8 @@
  * copy from ds while checksumming, otherwise like csum_partial
  */
 
-unsigned int
-csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
 	memcpy(dst, src, len);
 	return csum_partial(dst, len, sum);
diff --git a/arch/m68knommu/platform/5307/head.S b/arch/m68knommu/platform/5307/head.S
index f2edb64..b9aa0ca 100644
--- a/arch/m68knommu/platform/5307/head.S
+++ b/arch/m68knommu/platform/5307/head.S
@@ -64,6 +64,26 @@
 	negl	%d0			/* negate bits */
 .endm
 
+#elif defined(CONFIG_M520x)
+.macro GET_MEM_SIZE
+	clrl	%d0
+	movel	MCF_MBAR+MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */
+	andl	#0x1f, %d2		/* Get only the chip select size */
+	beq	3f			/* Check if it is enabled */
+	addql	#1, %d2			/* Form exponent */
+	moveql	#1, %d0
+	lsll	%d2, %d0		/* 2 ^ exponent */
+3:
+	movel	MCF_MBAR+MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */
+	andl	#0x1f, %d2		/* Get only the chip select size */
+	beq	4f			/* Check if it is enabled */
+	addql	#1, %d2			/* Form exponent */
+	moveql	#1, %d1
+	lsll	%d2, %d1		/* 2 ^ exponent */
+	addl	%d1, %d0		/* Total size of SDRAM in d0 */
+4:
+.endm
+
 #else
 #error "ERROR: I don't know how to probe your boards memory size?"
 #endif
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c
index 24781f0..e5668af 100644
--- a/arch/m68knommu/platform/5307/timers.c
+++ b/arch/m68knommu/platform/5307/timers.c
@@ -3,7 +3,7 @@
 /*
  *	timers.c -- generic ColdFire hardware timer support.
  *
- *	Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com)
+ *	Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
  */
 
 /***************************************************************************/
@@ -44,6 +44,14 @@
 extern void mcf_settimericr(int timer, int level);
 extern int mcf_timerirqpending(int timer);
 
+#if defined(CONFIG_M532x)
+#define	__raw_readtrr	__raw_readl
+#define	__raw_writetrr	__raw_writel
+#else
+#define	__raw_readtrr	__raw_readw
+#define	__raw_writetrr	__raw_writew
+#endif
+
 /***************************************************************************/
 
 void coldfire_tick(void)
@@ -57,7 +65,7 @@
 void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
 {
 	__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
-	__raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
+	__raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
 	__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
 		MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
 
@@ -76,7 +84,7 @@
 	unsigned long trr, tcn, offset;
 
 	tcn = __raw_readw(TA(MCFTIMER_TCN));
-	trr = __raw_readw(TA(MCFTIMER_TRR));
+	trr = __raw_readtrr(TA(MCFTIMER_TRR));
 	offset = (tcn * (1000000 / HZ)) / trr;
 
 	/* Check if we just wrapped the counters and maybe missed a tick */
@@ -120,7 +128,7 @@
 	/* Set up TIMER 2 as high speed profile clock */
 	__raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
 
-	__raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
+	__raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
 	__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
 		MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
 
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index c5482e3..1b36f62 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -114,7 +114,7 @@
 {
 }
 
-int BSP_hwclk(int op, struct hwclk_time *t)
+int BSP_hwclk(int op, struct rtc_time *t)
 {
   if (!op) {
     /* read */
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S
index 2ea5147..2ef0624 100644
--- a/arch/m68knommu/platform/68360/head-ram.S
+++ b/arch/m68knommu/platform/68360/head-ram.S
@@ -25,6 +25,7 @@
 .global _periph_base
 
 #define	RAMEND                      (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+#define	ROMEND                      (CONFIG_ROMBASE + CONFIG_ROMSIZE)
 
 #define REGB                        0x1000
 #define PEPAR                       (_dprbase + REGB + 0x0016)
@@ -175,7 +176,7 @@
 	move.l	%d0, BR0
 
 configure_chip_select_1:
-	move.l	#__rom_end, %d0
+	move.l	#ROMEND, %d0
 	subi.l	#__rom_start, %d0
 	subq.l	#0x01, %d0
 	eori.l	#SIM_OR_MASK, %d0
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1443024..d8af858f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -16,6 +16,7 @@
 	bool "4G Systems MTX-1 board"
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
+	select RESOURCES_64BIT if PCI
 	select SOC_AU1500
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -32,6 +33,7 @@
 	select SOC_AU1000
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
+	select RESOURCES_64BIT if PCI
 	select SWAP_IO_SPACE
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -41,6 +43,7 @@
 	select SOC_AU1100
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
+	select RESOURCES_64BIT if PCI
 	select SWAP_IO_SPACE
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -50,6 +53,7 @@
 	select SOC_AU1500
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
+	select RESOURCES_64BIT if PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -59,6 +63,7 @@
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
 	select MIPS_DISABLE_OBSOLETE_IDE
+	select RESOURCES_64BIT if PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -67,6 +72,7 @@
 	select SOC_AU1200
 	select DMA_NONCOHERENT
 	select MIPS_DISABLE_OBSOLETE_IDE
+	select RESOURCES_64BIT if PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -75,6 +81,7 @@
 	select SOC_AU1000
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
+	select RESOURCES_64BIT if PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -91,6 +98,7 @@
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
 	select MIPS_DISABLE_OBSOLETE_IDE
+	select RESOURCES_64BIT if PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -101,6 +109,7 @@
 	select HW_HAS_PCI
 	select DMA_NONCOHERENT
 	select MIPS_DISABLE_OBSOLETE_IDE
+	select RESOURCES_64BIT if PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -233,6 +242,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config MIPS_ATLAS
 	bool "MIPS Atlas board"
@@ -256,6 +266,7 @@
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_MULTITHREADING if EXPERIMENTAL
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  This enables support for the MIPS Technologies Atlas evaluation
 	  board.
@@ -266,8 +277,8 @@
 	select BOOT_ELF32
 	select HAVE_STD_PC_SERIAL_PORT
 	select DMA_NONCOHERENT
-	select IRQ_CPU
 	select GENERIC_ISA_DMA
+	select IRQ_CPU
 	select HW_HAS_PCI
 	select I8259
 	select MIPS_BOARDS_GEN
@@ -410,6 +421,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select SYS_SUPPORTS_BIG_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  The Ocelot is a MIPS-based Single Board Computer (SBC) made by
 	  Momentum Computer <http://www.momenco.com/>.
@@ -534,7 +546,7 @@
 	select HW_HAS_EISA
 	select IP22_CPU_SCACHE
 	select IRQ_CPU
-	select NO_ISA if ISA
+	select GENERIC_ISA_DMA_SUPPORT_BROKEN
 	select SWAP_IO_SPACE
 	select SYS_HAS_CPU_R4X00
 	select SYS_HAS_CPU_R5000
@@ -560,6 +572,7 @@
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_NUMA
 	select SYS_SUPPORTS_SMP
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
 	  workstations.  To compile a Linux kernel that runs on these, say Y
@@ -766,6 +779,23 @@
 
 endchoice
 
+config KEXEC
+ 	bool "Kexec system call (EXPERIMENTAL)"
+ 	depends on EXPERIMENTAL
+ 	help
+ 	  kexec is a system call that implements the ability to shutdown your
+ 	  current kernel, and to start another kernel.  It is like a reboot
+ 	  but it is indepedent of the system firmware.   And like a reboot
+ 	  you can start any kernel with it, not just Linux.
+
+ 	  The name comes from the similiarity to the exec system call.
+
+ 	  It is an ongoing process to be certain the hardware in a machine
+ 	  is properly shutdown, so do not be surprised if this code does not
+ 	  initially work for you.  It may help to enable device hotplugging
+ 	  support.  As of this writing the exact hardware interface is
+ 	  strongly in flux, so no good recommendation can be made.
+
 source "arch/mips/ddb5xxx/Kconfig"
 source "arch/mips/gt64120/ev64120/Kconfig"
 source "arch/mips/jazz/Kconfig"
@@ -809,6 +839,10 @@
 	bool
 	default y
 
+config GENERIC_HARDIRQS_NO__DO_IRQ
+	bool
+	default n
+
 #
 # Select some configuration options automatically based on user selections.
 #
@@ -864,8 +898,11 @@
 config MIPS_DISABLE_OBSOLETE_IDE
 	bool
 
+config GENERIC_ISA_DMA_SUPPORT_BROKEN
+	bool
+
 #
-# Endianess selection.  Suffiently obscure so many users don't know what to
+# Endianess selection.  Sufficiently obscure so many users don't know what to
 # answer,so we try hard to limit the available choices.  Also the use of a
 # choice statement should be more obvious to the user.
 #
@@ -874,7 +911,7 @@
 	help
 	  Some MIPS machines can be configured for either little or big endian
 	  byte order. These modes require different kernels and a different
-	  Linux distribution.  In general there is one prefered byteorder for a
+	  Linux distribution.  In general there is one preferred byteorder for a
 	  particular system but some systems are just as commonly used in the
 	  one or the other endianess.
 
@@ -967,6 +1004,7 @@
 	select HW_HAS_PCI
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_32BIT_KERNEL
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config SWAP_IO_SPACE
 	bool
@@ -1248,6 +1286,7 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
+	select WEAK_ORDERING
 
 config CPU_SB1
 	bool "SB1"
@@ -1256,6 +1295,7 @@
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_64BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
+	select WEAK_ORDERING
 
 endchoice
 
@@ -1316,6 +1356,8 @@
 config SYS_HAS_CPU_SB1
 	bool
 
+config WEAK_ORDERING
+	bool
 endmenu
 
 #
@@ -1835,13 +1877,11 @@
 config ISA
 	bool
 
-config NO_ISA
-	bool
-
 config EISA
 	bool "EISA support"
 	depends on HW_HAS_EISA
 	select ISA
+	select GENERIC_ISA_DMA
 	---help---
 	  The Extended Industry Standard Architecture (EISA) bus was
 	  developed as an open alternative to the IBM MicroChannel bus.
@@ -1922,6 +1962,11 @@
 	depends on MIPS32_COMPAT
 	default y
 
+config SYSVIPC_COMPAT
+	bool
+	depends on COMPAT && SYSVIPC
+	default y
+
 config MIPS32_O32
 	bool "Kernel support for o32 binaries"
 	depends on MIPS32_COMPAT
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index d580d46..641aa30 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -63,9 +63,7 @@
 ifdef CONFIG_BUILD_ELF64
 cflags-y		+= $(call cc-option,-mno-explicit-relocs)
 else
-# -msym32 can not be used for modules since they are loaded into XKSEG
-CFLAGS_MODULE		+= $(call cc-option,-mno-explicit-relocs)
-CFLAGS_KERNEL		+= $(call cc-option,-msym32)
+cflags-y		+= $(call cc-option,-msym32)
 endif
 endif
 
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index 2abe132..9cf7b671 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -70,7 +70,6 @@
 extern void mips_timer_interrupt(void);
 
 static void setup_local_irq(unsigned int irq, int type, int int_req);
-static unsigned int startup_irq(unsigned int irq);
 static void end_irq(unsigned int irq_nr);
 static inline void mask_and_ack_level_irq(unsigned int irq_nr);
 static inline void mask_and_ack_rise_edge_irq(unsigned int irq_nr);
@@ -84,20 +83,6 @@
 static DEFINE_SPINLOCK(irq_lock);
 
 
-static unsigned int startup_irq(unsigned int irq_nr)
-{
-	local_enable_irq(irq_nr);
-	return 0;
-}
-
-
-static void shutdown_irq(unsigned int irq_nr)
-{
-	local_disable_irq(irq_nr);
-	return;
-}
-
-
 inline void local_enable_irq(unsigned int irq_nr)
 {
 	if (irq_nr > AU1000_LAST_INTC0_INT) {
@@ -249,41 +234,37 @@
 
 static struct irq_chip rise_edge_irq_type = {
 	.typename = "Au1000 Rise Edge",
-	.startup = startup_irq,
-	.shutdown = shutdown_irq,
-	.enable = local_enable_irq,
-	.disable = local_disable_irq,
 	.ack = mask_and_ack_rise_edge_irq,
+	.mask = local_disable_irq,
+	.mask_ack = mask_and_ack_rise_edge_irq,
+	.unmask = local_enable_irq,
 	.end = end_irq,
 };
 
 static struct irq_chip fall_edge_irq_type = {
 	.typename = "Au1000 Fall Edge",
-	.startup = startup_irq,
-	.shutdown = shutdown_irq,
-	.enable = local_enable_irq,
-	.disable = local_disable_irq,
 	.ack = mask_and_ack_fall_edge_irq,
+	.mask = local_disable_irq,
+	.mask_ack = mask_and_ack_fall_edge_irq,
+	.unmask = local_enable_irq,
 	.end = end_irq,
 };
 
 static struct irq_chip either_edge_irq_type = {
 	.typename = "Au1000 Rise or Fall Edge",
-	.startup = startup_irq,
-	.shutdown = shutdown_irq,
-	.enable = local_enable_irq,
-	.disable = local_disable_irq,
 	.ack = mask_and_ack_either_edge_irq,
+	.mask = local_disable_irq,
+	.mask_ack = mask_and_ack_either_edge_irq,
+	.unmask = local_enable_irq,
 	.end = end_irq,
 };
 
 static struct irq_chip level_irq_type = {
 	.typename = "Au1000 Level",
-	.startup = startup_irq,
-	.shutdown = shutdown_irq,
-	.enable = local_enable_irq,
-	.disable = local_disable_irq,
 	.ack = mask_and_ack_level_irq,
+	.mask = local_disable_irq,
+	.mask_ack = mask_and_ack_level_irq,
+	.unmask = local_enable_irq,
 	.end = end_irq,
 };
 
@@ -328,31 +309,31 @@
 				au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
 				au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
 				au_writel(1<<(irq_nr-32), IC1_CFG0SET);
-				irq_desc[irq_nr].chip = &rise_edge_irq_type;
+				set_irq_chip(irq_nr, &rise_edge_irq_type);
 				break;
 			case INTC_INT_FALL_EDGE: /* 0:1:0 */
 				au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
 				au_writel(1<<(irq_nr-32), IC1_CFG1SET);
 				au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
-				irq_desc[irq_nr].chip = &fall_edge_irq_type;
+				set_irq_chip(irq_nr, &fall_edge_irq_type);
 				break;
 			case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
 				au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
 				au_writel(1<<(irq_nr-32), IC1_CFG1SET);
 				au_writel(1<<(irq_nr-32), IC1_CFG0SET);
-				irq_desc[irq_nr].chip = &either_edge_irq_type;
+				set_irq_chip(irq_nr, &either_edge_irq_type);
 				break;
 			case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
 				au_writel(1<<(irq_nr-32), IC1_CFG2SET);
 				au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
 				au_writel(1<<(irq_nr-32), IC1_CFG0SET);
-				irq_desc[irq_nr].chip = &level_irq_type;
+				set_irq_chip(irq_nr, &level_irq_type);
 				break;
 			case INTC_INT_LOW_LEVEL: /* 1:1:0 */
 				au_writel(1<<(irq_nr-32), IC1_CFG2SET);
 				au_writel(1<<(irq_nr-32), IC1_CFG1SET);
 				au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
-				irq_desc[irq_nr].chip = &level_irq_type;
+				set_irq_chip(irq_nr, &level_irq_type);
 				break;
 			case INTC_INT_DISABLED: /* 0:0:0 */
 				au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
@@ -380,31 +361,31 @@
 				au_writel(1<<irq_nr, IC0_CFG2CLR);
 				au_writel(1<<irq_nr, IC0_CFG1CLR);
 				au_writel(1<<irq_nr, IC0_CFG0SET);
-				irq_desc[irq_nr].chip = &rise_edge_irq_type;
+				set_irq_chip(irq_nr, &rise_edge_irq_type);
 				break;
 			case INTC_INT_FALL_EDGE: /* 0:1:0 */
 				au_writel(1<<irq_nr, IC0_CFG2CLR);
 				au_writel(1<<irq_nr, IC0_CFG1SET);
 				au_writel(1<<irq_nr, IC0_CFG0CLR);
-				irq_desc[irq_nr].chip = &fall_edge_irq_type;
+				set_irq_chip(irq_nr, &fall_edge_irq_type);
 				break;
 			case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
 				au_writel(1<<irq_nr, IC0_CFG2CLR);
 				au_writel(1<<irq_nr, IC0_CFG1SET);
 				au_writel(1<<irq_nr, IC0_CFG0SET);
-				irq_desc[irq_nr].chip = &either_edge_irq_type;
+				set_irq_chip(irq_nr, &either_edge_irq_type);
 				break;
 			case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
 				au_writel(1<<irq_nr, IC0_CFG2SET);
 				au_writel(1<<irq_nr, IC0_CFG1CLR);
 				au_writel(1<<irq_nr, IC0_CFG0SET);
-				irq_desc[irq_nr].chip = &level_irq_type;
+				set_irq_chip(irq_nr, &level_irq_type);
 				break;
 			case INTC_INT_LOW_LEVEL: /* 1:1:0 */
 				au_writel(1<<irq_nr, IC0_CFG2SET);
 				au_writel(1<<irq_nr, IC0_CFG1SET);
 				au_writel(1<<irq_nr, IC0_CFG0CLR);
-				irq_desc[irq_nr].chip = &level_irq_type;
+				set_irq_chip(irq_nr, &level_irq_type);
 				break;
 			case INTC_INT_DISABLED: /* 0:0:0 */
 				au_writel(1<<irq_nr, IC0_CFG0CLR);
diff --git a/arch/mips/au1000/pb1200/board_setup.c b/arch/mips/au1000/pb1200/board_setup.c
index 8b953b9..043302b 100644
--- a/arch/mips/au1000/pb1200/board_setup.c
+++ b/arch/mips/au1000/pb1200/board_setup.c
@@ -55,7 +55,7 @@
 #endif
 
 extern void _board_init_irq(void);
-extern void	(*board_init_irq)(void);
+extern void (*board_init_irq)(void);
 
 void board_reset (void)
 {
@@ -151,11 +151,7 @@
 #endif
 
 	/* Setup Pb1200 External Interrupt Controller */
-	{
-		extern void (*board_init_irq)(void);
-		extern void _board_init_irq(void);
-		board_init_irq = _board_init_irq;
-	}
+	board_init_irq = _board_init_irq;
 }
 
 int
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index 82e569d..4c46f0e 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -45,25 +45,22 @@
 {
 	unsigned int mask, pending, devfn;
 
-	mask = GALILEO_INL(GT_INTRMASK_OFS);
-	pending = GALILEO_INL(GT_INTRCAUSE_OFS) & mask;
+	mask = GT_READ(GT_INTRMASK_OFS);
+	pending = GT_READ(GT_INTRCAUSE_OFS) & mask;
 
-	if (pending & GALILEO_INTR_T0EXP) {
-
-		GALILEO_OUTL(~GALILEO_INTR_T0EXP, GT_INTRCAUSE_OFS);
+	if (pending & GT_INTR_T0EXP_MSK) {
+		GT_WRITE(GT_INTRCAUSE_OFS, ~GT_INTR_T0EXP_MSK);
 		do_IRQ(COBALT_GALILEO_IRQ);
-
-	} else if (pending & GALILEO_INTR_RETRY_CTR) {
-
-		devfn = GALILEO_INL(GT_PCI0_CFGADDR_OFS) >> 8;
-		GALILEO_OUTL(~GALILEO_INTR_RETRY_CTR, GT_INTRCAUSE_OFS);
-		printk(KERN_WARNING "Galileo: PCI retry count exceeded (%02x.%u)\n",
-			PCI_SLOT(devfn), PCI_FUNC(devfn));
-
+	} else if (pending & GT_INTR_RETRYCTR0_MSK) {
+		devfn = GT_READ(GT_PCI0_CFGADDR_OFS) >> 8;
+		GT_WRITE(GT_INTRCAUSE_OFS, ~GT_INTR_RETRYCTR0_MSK);
+		printk(KERN_WARNING
+		       "Galileo: PCI retry count exceeded (%02x.%u)\n",
+		       PCI_SLOT(devfn), PCI_FUNC(devfn));
 	} else {
-
-		GALILEO_OUTL(mask & ~pending, GT_INTRMASK_OFS);
-		printk(KERN_WARNING "Galileo: masking unexpected interrupt %08x\n", pending);
+		GT_WRITE(GT_INTRMASK_OFS, mask & ~pending);
+		printk(KERN_WARNING
+		       "Galileo: masking unexpected interrupt %08x\n", pending);
 	}
 }
 
@@ -104,7 +101,7 @@
 	 * Mask all Galileo interrupts. The Galileo
 	 * handler is set in cobalt_timer_setup()
 	 */
-	GALILEO_OUTL(0, GT_INTRMASK_OFS);
+	GT_WRITE(GT_INTRMASK_OFS, 0);
 
 	init_i8259_irqs();				/*  0 ... 15 */
 	mips_cpu_irq_init(COBALT_CPU_IRQ);		/* 16 ... 23 */
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index bf9dc72..e8f0f20 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -51,23 +51,23 @@
 void __init plat_timer_setup(struct irqaction *irq)
 {
 	/* Load timer value for HZ (TCLK is 50MHz) */
-	GALILEO_OUTL(50*1000*1000 / HZ, GT_TC0_OFS);
+	GT_WRITE(GT_TC0_OFS, 50*1000*1000 / HZ);
 
 	/* Enable timer */
-	GALILEO_OUTL(GALILEO_ENTC0 | GALILEO_SELTC0, GT_TC_CONTROL_OFS);
+	GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
 
 	/* Register interrupt */
 	setup_irq(COBALT_GALILEO_IRQ, irq);
 
 	/* Enable interrupt */
-	GALILEO_OUTL(GALILEO_INTR_T0EXP | GALILEO_INL(GT_INTRMASK_OFS), GT_INTRMASK_OFS);
+	GT_WRITE(GT_INTRMASK_OFS, GT_INTR_T0EXP_MSK | GT_READ(GT_INTRMASK_OFS));
 }
 
 extern struct pci_ops gt64111_pci_ops;
 
 static struct resource cobalt_mem_resource = {
-	.start	= GT64111_MEM_BASE,
-	.end	= GT64111_MEM_END,
+	.start	= GT_DEF_PCI0_MEM0_BASE,
+	.end	= GT_DEF_PCI0_MEM0_BASE + GT_DEF_PCI0_MEM0_SIZE - 1,
 	.name	= "PCI memory",
 	.flags	= IORESOURCE_MEM
 };
@@ -115,7 +115,7 @@
 	.mem_resource	= &cobalt_mem_resource,
 	.mem_offset	= 0,
 	.io_resource	= &cobalt_io_resource,
-	.io_offset	= 0 - GT64111_IO_BASE
+	.io_offset	= 0 - GT_DEF_PCI0_IO_BASE,
 };
 
 void __init plat_mem_setup(void)
@@ -128,7 +128,7 @@
 	_machine_halt = cobalt_machine_halt;
 	pm_power_off = cobalt_machine_power_off;
 
-        set_io_port_base(CKSEG1ADDR(GT64111_IO_BASE));
+	set_io_port_base(CKSEG1ADDR(GT_DEF_PCI0_IO_BASE));
 
 	/* I/O port resource must include UART and LCD/buttons */
 	ioport_resource.end = 0x0fffffff;
@@ -139,7 +139,7 @@
 
         /* Read the cobalt id register out of the PCI config space */
         PCI_CFG_SET(devfn, (VIA_COBALT_BRD_ID_REG & ~0x3));
-        cobalt_board_id = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+        cobalt_board_id = GT_READ(GT_PCI0_CFGDATA_OFS);
         cobalt_board_id >>= ((VIA_COBALT_BRD_ID_REG & 3) * 8);
         cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(cobalt_board_id);
 
diff --git a/arch/mips/ddb5xxx/ddb5477/irq_5477.c b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
index ba52705..96249aa 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq_5477.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
@@ -53,14 +53,6 @@
 	ll_vrc5477_irq_disable(irq - vrc5477_irq_base);
 }
 
-static unsigned int vrc5477_irq_startup(unsigned int irq)
-{
-	vrc5477_irq_enable(irq);
-	return 0;
-}
-
-#define	vrc5477_irq_shutdown	vrc5477_irq_disable
-
 static void
 vrc5477_irq_ack(unsigned int irq)
 {
@@ -91,11 +83,10 @@
 
 struct irq_chip vrc5477_irq_controller = {
 	.typename = "vrc5477_irq",
-	.startup = vrc5477_irq_startup,
-	.shutdown = vrc5477_irq_shutdown,
-	.enable = vrc5477_irq_enable,
-	.disable = vrc5477_irq_disable,
 	.ack = vrc5477_irq_ack,
+	.mask = vrc5477_irq_disable,
+	.mask_ack = vrc5477_irq_ack,
+	.unmask = vrc5477_irq_enable,
 	.end = vrc5477_irq_end
 };
 
@@ -103,12 +94,8 @@
 {
 	u32 i;
 
-	for (i= irq_base; i< irq_base+ NUM_5477_IRQ; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &vrc5477_irq_controller;
-	}
+	for (i= irq_base; i< irq_base+ NUM_5477_IRQ; i++)
+		set_irq_chip(i, &vrc5477_irq_controller);
 
 	vrc5477_irq_base = irq_base;
 }
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c
index 3e374d0..6d55e8a 100644
--- a/arch/mips/dec/ecc-berr.c
+++ b/arch/mips/dec/ecc-berr.c
@@ -18,7 +18,6 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
-#include <linux/spinlock.h>
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
@@ -26,6 +25,7 @@
 #include <asm/cpu.h>
 #include <asm/irq_regs.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>
 #include <asm/system.h>
 #include <asm/traps.h>
 
@@ -231,13 +231,10 @@
 static inline void dec_kn02_be_init(void)
 {
 	volatile u32 *csr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
-	unsigned long flags;
 
 	kn0x_erraddr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_ERRADDR);
 	kn0x_chksyn = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CHKSYN);
 
-	spin_lock_irqsave(&kn02_lock, flags);
-
 	/* Preset write-only bits of the Control Register cache. */
 	cached_kn02_csr = *csr | KN02_CSR_LEDS;
 
@@ -247,8 +244,6 @@
 	cached_kn02_csr |= KN02_CSR_CORRECT;
 	*csr = cached_kn02_csr;
 	iob();
-
-	spin_unlock_irqrestore(&kn02_lock, flags);
 }
 
 static inline void dec_kn03_be_init(void)
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 31dd47d..b251ef8 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -267,7 +267,7 @@
 		LONG_L	s0, TI_REGS($28)
 		LONG_S	sp, TI_REGS($28)
 		PTR_LA	ra, ret_from_irq
-		j	do_IRQ
+		j	dec_irq_dispatch
 		 nop
 
 #ifdef CONFIG_32BIT
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 41cd2a9..4c7cb40 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -13,7 +13,6 @@
 
 #include <linux/init.h>
 #include <linux/irq.h>
-#include <linux/spinlock.h>
 #include <linux/types.h>
 
 #include <asm/dec/ioasic.h>
@@ -21,8 +20,6 @@
 #include <asm/dec/ioasic_ints.h>
 
 
-static DEFINE_SPINLOCK(ioasic_lock);
-
 static int ioasic_irq_base;
 
 
@@ -52,65 +49,30 @@
 	ioasic_write(IO_REG_SIR, sir);
 }
 
-static inline void enable_ioasic_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioasic_lock, flags);
-	unmask_ioasic_irq(irq);
-	spin_unlock_irqrestore(&ioasic_lock, flags);
-}
-
-static inline void disable_ioasic_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ioasic_lock, flags);
-	mask_ioasic_irq(irq);
-	spin_unlock_irqrestore(&ioasic_lock, flags);
-}
-
-
-static inline unsigned int startup_ioasic_irq(unsigned int irq)
-{
-	enable_ioasic_irq(irq);
-	return 0;
-}
-
-#define shutdown_ioasic_irq disable_ioasic_irq
-
 static inline void ack_ioasic_irq(unsigned int irq)
 {
-	spin_lock(&ioasic_lock);
 	mask_ioasic_irq(irq);
-	spin_unlock(&ioasic_lock);
 	fast_iob();
 }
 
 static inline void end_ioasic_irq(unsigned int irq)
 {
 	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		enable_ioasic_irq(irq);
+		unmask_ioasic_irq(irq);
 }
 
 static struct irq_chip ioasic_irq_type = {
 	.typename = "IO-ASIC",
-	.startup = startup_ioasic_irq,
-	.shutdown = shutdown_ioasic_irq,
-	.enable = enable_ioasic_irq,
-	.disable = disable_ioasic_irq,
 	.ack = ack_ioasic_irq,
-	.end = end_ioasic_irq,
+	.mask = mask_ioasic_irq,
+	.mask_ack = ack_ioasic_irq,
+	.unmask = unmask_ioasic_irq,
 };
 
 
-#define startup_ioasic_dma_irq startup_ioasic_irq
+#define unmask_ioasic_dma_irq unmask_ioasic_irq
 
-#define shutdown_ioasic_dma_irq shutdown_ioasic_irq
-
-#define enable_ioasic_dma_irq enable_ioasic_irq
-
-#define disable_ioasic_dma_irq disable_ioasic_irq
+#define mask_ioasic_dma_irq mask_ioasic_irq
 
 #define ack_ioasic_dma_irq ack_ioasic_irq
 
@@ -123,11 +85,10 @@
 
 static struct irq_chip ioasic_dma_irq_type = {
 	.typename = "IO-ASIC-DMA",
-	.startup = startup_ioasic_dma_irq,
-	.shutdown = shutdown_ioasic_dma_irq,
-	.enable = enable_ioasic_dma_irq,
-	.disable = disable_ioasic_dma_irq,
 	.ack = ack_ioasic_dma_irq,
+	.mask = mask_ioasic_dma_irq,
+	.mask_ack = ack_ioasic_dma_irq,
+	.unmask = unmask_ioasic_dma_irq,
 	.end = end_ioasic_dma_irq,
 };
 
@@ -140,18 +101,11 @@
 	ioasic_write(IO_REG_SIMR, 0);
 	fast_iob();
 
-	for (i = base; i < base + IO_INR_DMA; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &ioasic_irq_type;
-	}
-	for (; i < base + IO_IRQ_LINES; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &ioasic_dma_irq_type;
-	}
+	for (i = base; i < base + IO_INR_DMA; i++)
+		set_irq_chip_and_handler(i, &ioasic_irq_type,
+					 handle_level_irq);
+	for (; i < base + IO_IRQ_LINES; i++)
+		set_irq_chip(i, &ioasic_dma_irq_type);
 
 	ioasic_irq_base = base;
 }
diff --git a/arch/mips/dec/kn01-berr.c b/arch/mips/dec/kn01-berr.c
index f19b461..d3b8002 100644
--- a/arch/mips/dec/kn01-berr.c
+++ b/arch/mips/dec/kn01-berr.c
@@ -20,8 +20,10 @@
 #include <linux/types.h>
 
 #include <asm/inst.h>
+#include <asm/irq_regs.h>
 #include <asm/mipsregs.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 #include <asm/system.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 04a367a..916e46b 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -14,7 +14,6 @@
 
 #include <linux/init.h>
 #include <linux/irq.h>
-#include <linux/spinlock.h>
 #include <linux/types.h>
 
 #include <asm/dec/kn02.h>
@@ -29,7 +28,6 @@
  * There is no default value -- it has to be initialized.
  */
 u32 cached_kn02_csr;
-DEFINE_SPINLOCK(kn02_lock);
 
 
 static int kn02_irq_base;
@@ -53,55 +51,18 @@
 	*csr = cached_kn02_csr;
 }
 
-static inline void enable_kn02_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&kn02_lock, flags);
-	unmask_kn02_irq(irq);
-	spin_unlock_irqrestore(&kn02_lock, flags);
-}
-
-static inline void disable_kn02_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&kn02_lock, flags);
-	mask_kn02_irq(irq);
-	spin_unlock_irqrestore(&kn02_lock, flags);
-}
-
-
-static unsigned int startup_kn02_irq(unsigned int irq)
-{
-	enable_kn02_irq(irq);
-	return 0;
-}
-
-#define shutdown_kn02_irq disable_kn02_irq
-
 static void ack_kn02_irq(unsigned int irq)
 {
-	spin_lock(&kn02_lock);
 	mask_kn02_irq(irq);
-	spin_unlock(&kn02_lock);
 	iob();
 }
 
-static void end_kn02_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		enable_kn02_irq(irq);
-}
-
 static struct irq_chip kn02_irq_type = {
 	.typename = "KN02-CSR",
-	.startup = startup_kn02_irq,
-	.shutdown = shutdown_kn02_irq,
-	.enable = enable_kn02_irq,
-	.disable = disable_kn02_irq,
 	.ack = ack_kn02_irq,
-	.end = end_kn02_irq,
+	.mask = mask_kn02_irq,
+	.mask_ack = ack_kn02_irq,
+	.unmask = unmask_kn02_irq,
 };
 
 
@@ -109,22 +70,15 @@
 {
 	volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
 						       KN02_CSR);
-	unsigned long flags;
 	int i;
 
 	/* Mask interrupts. */
-	spin_lock_irqsave(&kn02_lock, flags);
 	cached_kn02_csr &= ~KN02_CSR_IOINTEN;
 	*csr = cached_kn02_csr;
 	iob();
-	spin_unlock_irqrestore(&kn02_lock, flags);
 
-	for (i = base; i < base + KN02_IRQ_LINES; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &kn02_irq_type;
-	}
+	for (i = base; i < base + KN02_IRQ_LINES; i++)
+		set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
 
 	kn02_irq_base = base;
 }
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index 6b7481e..d34032a 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -761,3 +761,9 @@
 	if (dec_interrupt[DEC_IRQ_HALT] >= 0)
 		setup_irq(dec_interrupt[DEC_IRQ_HALT], &haltirq);
 }
+
+asmlinkage unsigned int dec_irq_dispatch(unsigned int irq)
+{
+	do_IRQ(irq);
+	return 0;
+}
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 69e424e..8b7e0c1 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -151,7 +151,7 @@
 	CMOS_READ(RTC_REG_C);			/* Ack the RTC interrupt.  */
 }
 
-static unsigned int dec_ioasic_hpt_read(void)
+static cycle_t dec_ioasic_hpt_read(void)
 {
 	/*
 	 * The free-running counter is 32-bit which is good for about
@@ -171,7 +171,7 @@
 
 	if (!cpu_has_counter && IOASIC)
 		/* For pre-R4k systems we use the I/O ASIC's counter.  */
-		mips_hpt_read = dec_ioasic_hpt_read;
+		clocksource_mips.read = dec_ioasic_hpt_read;
 
 	/* Set up the rate of periodic DS1287 interrupts.  */
 	CMOS_WRITE(RTC_REF_CLCK_32KHZ | (16 - __ffs(HZ)), RTC_REG_A);
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c
index 197ed4c..8d880f0 100644
--- a/arch/mips/emma2rh/common/irq_emma2rh.c
+++ b/arch/mips/emma2rh/common/irq_emma2rh.c
@@ -56,49 +56,21 @@
 	ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
 }
 
-static unsigned int emma2rh_irq_startup(unsigned int irq)
-{
-	emma2rh_irq_enable(irq);
-	return 0;
-}
-
-#define	emma2rh_irq_shutdown	emma2rh_irq_disable
-
-static void emma2rh_irq_ack(unsigned int irq)
-{
-	/* disable interrupt - some handler will re-enable the irq
-	 * and if the interrupt is leveled, we will have infinite loop
-	 */
-	ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
-}
-
-static void emma2rh_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		ll_emma2rh_irq_enable(irq - emma2rh_irq_base);
-}
-
 struct irq_chip emma2rh_irq_controller = {
 	.typename = "emma2rh_irq",
-	.startup = emma2rh_irq_startup,
-	.shutdown = emma2rh_irq_shutdown,
-	.enable = emma2rh_irq_enable,
-	.disable = emma2rh_irq_disable,
-	.ack = emma2rh_irq_ack,
-	.end = emma2rh_irq_end,
-	.set_affinity = NULL	/* no affinity stuff for UP */
+	.ack = emma2rh_irq_disable,
+	.mask = emma2rh_irq_disable,
+	.mask_ack = emma2rh_irq_disable,
+	.unmask = emma2rh_irq_enable,
 };
 
 void emma2rh_irq_init(u32 irq_base)
 {
 	u32 i;
 
-	for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &emma2rh_irq_controller;
-	}
+	for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ; i++)
+		set_irq_chip_and_handler(i, &emma2rh_irq_controller,
+					 handle_level_irq);
 
 	emma2rh_irq_base = irq_base;
 }
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c
index 0b36eb0..2116d9b 100644
--- a/arch/mips/emma2rh/markeins/irq_markeins.c
+++ b/arch/mips/emma2rh/markeins/irq_markeins.c
@@ -48,46 +48,21 @@
 	ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
 }
 
-static unsigned int emma2rh_sw_irq_startup(unsigned int irq)
-{
-	emma2rh_sw_irq_enable(irq);
-	return 0;
-}
-
-#define emma2rh_sw_irq_shutdown emma2rh_sw_irq_disable
-
-static void emma2rh_sw_irq_ack(unsigned int irq)
-{
-	ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
-}
-
-static void emma2rh_sw_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		ll_emma2rh_sw_irq_enable(irq - emma2rh_sw_irq_base);
-}
-
 struct irq_chip emma2rh_sw_irq_controller = {
 	.typename = "emma2rh_sw_irq",
-	.startup = emma2rh_sw_irq_startup,
-	.shutdown = emma2rh_sw_irq_shutdown,
-	.enable = emma2rh_sw_irq_enable,
-	.disable = emma2rh_sw_irq_disable,
-	.ack = emma2rh_sw_irq_ack,
-	.end = emma2rh_sw_irq_end,
-	.set_affinity = NULL,
+	.ack = emma2rh_sw_irq_disable,
+	.mask = emma2rh_sw_irq_disable,
+	.mask_ack = emma2rh_sw_irq_disable,
+	.unmask = emma2rh_sw_irq_enable,
 };
 
 void emma2rh_sw_irq_init(u32 irq_base)
 {
 	u32 i;
 
-	for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_SW; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 2;
-		irq_desc[i].chip = &emma2rh_sw_irq_controller;
-	}
+	for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_SW; i++)
+		set_irq_chip_and_handler(i, &emma2rh_sw_irq_controller,
+					 handle_level_irq);
 
 	emma2rh_sw_irq_base = irq_base;
 }
@@ -126,14 +101,6 @@
 	ll_emma2rh_gpio_irq_disable(irq - emma2rh_gpio_irq_base);
 }
 
-static unsigned int emma2rh_gpio_irq_startup(unsigned int irq)
-{
-	emma2rh_gpio_irq_enable(irq);
-	return 0;
-}
-
-#define emma2rh_gpio_irq_shutdown emma2rh_gpio_irq_disable
-
 static void emma2rh_gpio_irq_ack(unsigned int irq)
 {
 	irq -= emma2rh_gpio_irq_base;
@@ -149,25 +116,19 @@
 
 struct irq_chip emma2rh_gpio_irq_controller = {
 	.typename = "emma2rh_gpio_irq",
-	.startup = emma2rh_gpio_irq_startup,
-	.shutdown = emma2rh_gpio_irq_shutdown,
-	.enable = emma2rh_gpio_irq_enable,
-	.disable = emma2rh_gpio_irq_disable,
 	.ack = emma2rh_gpio_irq_ack,
+	.mask = emma2rh_gpio_irq_disable,
+	.mask_ack = emma2rh_gpio_irq_ack,
+	.unmask = emma2rh_gpio_irq_enable,
 	.end = emma2rh_gpio_irq_end,
-	.set_affinity = NULL,
 };
 
 void emma2rh_gpio_irq_init(u32 irq_base)
 {
 	u32 i;
 
-	for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_GPIO; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 2;
-		irq_desc[i].chip = &emma2rh_gpio_irq_controller;
-	}
+	for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_GPIO; i++)
+		set_irq_chip(i, &emma2rh_gpio_irq_controller);
 
 	emma2rh_gpio_irq_base = irq_base;
 }
diff --git a/arch/mips/gt64120/ev64120/irq.c b/arch/mips/gt64120/ev64120/irq.c
index ed4d82b..b3e5796 100644
--- a/arch/mips/gt64120/ev64120/irq.c
+++ b/arch/mips/gt64120/ev64120/irq.c
@@ -66,38 +66,21 @@
 
 static void disable_ev64120_irq(unsigned int irq_nr)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	if (irq_nr >= 8) {	// All PCI interrupts are on line 5 or 2
 		clear_c0_status(9 << 10);
 	} else {
 		clear_c0_status(1 << (irq_nr + 8));
 	}
-	local_irq_restore(flags);
 }
 
 static void enable_ev64120_irq(unsigned int irq_nr)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	if (irq_nr >= 8)	// All PCI interrupts are on line 5 or 2
 		set_c0_status(9 << 10);
 	else
 		set_c0_status(1 << (irq_nr + 8));
-	local_irq_restore(flags);
 }
 
-static unsigned int startup_ev64120_irq(unsigned int irq)
-{
-	enable_ev64120_irq(irq);
-	return 0;		/* Never anything pending  */
-}
-
-#define shutdown_ev64120_irq     disable_ev64120_irq
-#define mask_and_ack_ev64120_irq disable_ev64120_irq
-
 static void end_ev64120_irq(unsigned int irq)
 {
 	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -106,13 +89,11 @@
 
 static struct irq_chip ev64120_irq_type = {
 	.typename	= "EV64120",
-	.startup	= startup_ev64120_irq,
-	.shutdown	= shutdown_ev64120_irq,
-	.enable		= enable_ev64120_irq,
-	.disable	= disable_ev64120_irq,
-	.ack		= mask_and_ack_ev64120_irq,
+	.ack		= disable_ev64120_irq,
+	.mask		= disable_ev64120_irq,
+	.mask_ack	= disable_ev64120_irq,
+	.unmask		= enable_ev64120_irq,
 	.end		= end_ev64120_irq,
-	.set_affinity	= NULL
 };
 
 void gt64120_irq_setup(void)
@@ -122,8 +103,6 @@
 	 */
 	clear_c0_status(ST0_IM);
 
-	local_irq_disable();
-
 	/*
 	 * Enable timer.  Other interrupts will be enabled as they are
 	 * registered.
@@ -133,16 +112,5 @@
 
 void __init arch_init_irq(void)
 {
-	int i;
-
-	/*  Let's initialize our IRQ descriptors  */
-	for (i = 0; i < NR_IRQS; i++) {
-		irq_desc[i].status = 0;
-		irq_desc[i].chip = &no_irq_chip;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 0;
-		spin_lock_init(&irq_desc[i].lock);
-	}
-
 	gt64120_irq_setup();
 }
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index d5bd6b3..f8d417b 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -28,14 +28,6 @@
 	spin_unlock_irqrestore(&r4030_lock, flags);
 }
 
-static unsigned int startup_r4030_irq(unsigned int irq)
-{
-	enable_r4030_irq(irq);
-	return 0; /* never anything pending */
-}
-
-#define shutdown_r4030_irq	disable_r4030_irq
-
 void disable_r4030_irq(unsigned int irq)
 {
 	unsigned int mask = ~(1 << (irq - JAZZ_PARALLEL_IRQ));
@@ -47,34 +39,20 @@
 	spin_unlock_irqrestore(&r4030_lock, flags);
 }
 
-#define mask_and_ack_r4030_irq disable_r4030_irq
-
-static void end_r4030_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_r4030_irq(irq);
-}
-
 static struct irq_chip r4030_irq_type = {
 	.typename = "R4030",
-	.startup = startup_r4030_irq,
-	.shutdown = shutdown_r4030_irq,
-	.enable = enable_r4030_irq,
-	.disable = disable_r4030_irq,
-	.ack = mask_and_ack_r4030_irq,
-	.end = end_r4030_irq,
+	.ack = disable_r4030_irq,
+	.mask = disable_r4030_irq,
+	.mask_ack = disable_r4030_irq,
+	.unmask = enable_r4030_irq,
 };
 
 void __init init_r4030_ints(void)
 {
 	int i;
 
-	for (i = JAZZ_PARALLEL_IRQ; i <= JAZZ_TIMER_IRQ; i++) {
-		irq_desc[i].status     = IRQ_DISABLED;
-		irq_desc[i].action     = 0;
-		irq_desc[i].depth      = 1;
-		irq_desc[i].chip    = &r4030_irq_type;
-	}
+	for (i = JAZZ_PARALLEL_IRQ; i <= JAZZ_TIMER_IRQ; i++)
+		set_irq_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
 
 	r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
 	r4030_read_reg16(JAZZ_IO_IRQ_SOURCE);		/* clear pending IRQs */
diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c
index de4a238..3da49c5 100644
--- a/arch/mips/jmr3927/rbhma3100/irq.c
+++ b/arch/mips/jmr3927/rbhma3100/irq.c
@@ -90,17 +90,6 @@
 static void jmr3927_irq_disable(unsigned int irq_nr);
 static void jmr3927_irq_enable(unsigned int irq_nr);
 
-static DEFINE_SPINLOCK(jmr3927_irq_lock);
-
-static unsigned int jmr3927_irq_startup(unsigned int irq)
-{
-	jmr3927_irq_enable(irq);
-
-	return 0;
-}
-
-#define	jmr3927_irq_shutdown	jmr3927_irq_disable
-
 static void jmr3927_irq_ack(unsigned int irq)
 {
 	if (irq == JMR3927_IRQ_IRC_TMR0)
@@ -118,9 +107,7 @@
 static void jmr3927_irq_disable(unsigned int irq_nr)
 {
 	struct tb_irq_space* sp;
-	unsigned long flags;
 
-	spin_lock_irqsave(&jmr3927_irq_lock, flags);
 	for (sp = tb_irq_spaces; sp; sp = sp->next) {
 		if (sp->start_irqno <= irq_nr &&
 		    irq_nr < sp->start_irqno + sp->nr_irqs) {
@@ -130,15 +117,12 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&jmr3927_irq_lock, flags);
 }
 
 static void jmr3927_irq_enable(unsigned int irq_nr)
 {
 	struct tb_irq_space* sp;
-	unsigned long flags;
 
-	spin_lock_irqsave(&jmr3927_irq_lock, flags);
 	for (sp = tb_irq_spaces; sp; sp = sp->next) {
 		if (sp->start_irqno <= irq_nr &&
 		    irq_nr < sp->start_irqno + sp->nr_irqs) {
@@ -148,7 +132,6 @@
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&jmr3927_irq_lock, flags);
 }
 
 /*
@@ -457,11 +440,10 @@
 
 static struct irq_chip jmr3927_irq_controller = {
 	.typename = "jmr3927_irq",
-	.startup = jmr3927_irq_startup,
-	.shutdown = jmr3927_irq_shutdown,
-	.enable = jmr3927_irq_enable,
-	.disable = jmr3927_irq_disable,
 	.ack = jmr3927_irq_ack,
+	.mask = jmr3927_irq_disable,
+	.mask_ack = jmr3927_irq_ack,
+	.unmask = jmr3927_irq_enable,
 	.end = jmr3927_irq_end,
 };
 
@@ -469,12 +451,8 @@
 {
 	u32 i;
 
-	for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &jmr3927_irq_controller;
-	}
+	for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++)
+		set_irq_chip(i, &jmr3927_irq_controller);
 
 	jmr3927_irq_base = irq_base;
 }
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index 16e5dfe..138f25e 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -170,7 +170,7 @@
 	while (1);
 }
 
-static unsigned int jmr3927_hpt_read(void)
+static cycle_t jmr3927_hpt_read(void)
 {
 	/* We assume this function is called xtime_lock held. */
 	return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr;
@@ -182,7 +182,7 @@
 #endif
 static void __init jmr3927_time_init(void)
 {
-	mips_hpt_read = jmr3927_hpt_read;
+	clocksource_mips.read = jmr3927_hpt_read;
 	mips_hpt_frequency = JMR3927_TIMER_CLK;
 #ifdef USE_RTC_DS1742
 	if (jmr3927_have_nvram()) {
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6bfbbed..bbbb8d7 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -45,7 +45,6 @@
 obj-$(CONFIG_MIPS_VPE_LOADER)	+= vpe.o
 obj-$(CONFIG_MIPS_VPE_APSP_API)	+= rtlx.o
 
-obj-$(CONFIG_NO_ISA)		+= dma-no-isa.o
 obj-$(CONFIG_I8259)		+= i8259.o
 obj-$(CONFIG_IRQ_CPU)		+= irq_cpu.o
 obj-$(CONFIG_IRQ_CPU_RM7K)	+= irq-rm7000.o
@@ -67,6 +66,8 @@
 
 obj-$(CONFIG_I8253)		+= i8253.o
 
+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
+
 CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
 EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 4a9f1ec..9b34238 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -90,7 +90,6 @@
 	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
 };
 
-#define elf_addr_t	u32
 #define elf_caddr_t	u32
 #define init_elf_binfmt init_elfn32_binfmt
 
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e318137..993f7ec 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -92,7 +92,6 @@
 	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
 };
 
-#define elf_addr_t	u32
 #define elf_caddr_t	u32
 #define init_elf_binfmt init_elf32_binfmt
 
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 8485af3..442839e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -110,9 +110,8 @@
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
 
-	printk("Checking for 'wait' instruction... ");
 	if (nowait) {
-		printk (" disabled.\n");
+		printk("Wait instruction disabled.\n");
 		return;
 	}
 
@@ -120,11 +119,9 @@
 	case CPU_R3081:
 	case CPU_R3081E:
 		cpu_wait = r3081_wait;
-		printk(" available.\n");
 		break;
 	case CPU_TX3927:
 		cpu_wait = r39xx_wait;
-		printk(" available.\n");
 		break;
 	case CPU_R4200:
 /*	case CPU_R4300: */
@@ -146,33 +143,23 @@
 	case CPU_74K:
  	case CPU_PR4450:
 		cpu_wait = r4k_wait;
-		printk(" available.\n");
 		break;
 	case CPU_TX49XX:
 		cpu_wait = r4k_wait_irqoff;
-		printk(" available.\n");
 		break;
 	case CPU_AU1000:
 	case CPU_AU1100:
 	case CPU_AU1500:
 	case CPU_AU1550:
 	case CPU_AU1200:
-		if (allow_au1k_wait) {
+		if (allow_au1k_wait)
 			cpu_wait = au1k_wait;
-			printk(" available.\n");
-		} else
-			printk(" unavailable.\n");
 		break;
 	case CPU_RM9000:
-		if ((c->processor_id & 0x00ff) >= 0x40) {
+		if ((c->processor_id & 0x00ff) >= 0x40)
 			cpu_wait = r4k_wait;
-			printk(" available.\n");
-		} else {
-			printk(" unavailable.\n");
-		}
 		break;
 	default:
-		printk(" unavailable.\n");
 		break;
 	}
 }
diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c
deleted file mode 100644
index 6df8b07..0000000
--- a/arch/mips/kernel/dma-no-isa.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle
- *
- * Dummy ISA DMA functions for systems that don't have ISA but share drivers
- * with ISA such as legacy free PCI.
- */
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-
-DEFINE_SPINLOCK(dma_spin_lock);
-
-int request_dma(unsigned int dmanr, const char * device_id)
-{
-	return -EINVAL;
-}
-
-void free_dma(unsigned int dmanr)
-{
-}
-
-EXPORT_SYMBOL(dma_spin_lock);
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 5baca16..aacd4a0 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -19,6 +19,7 @@
 #include <asm/mipsregs.h>
 #include <asm/stackframe.h>
 #include <asm/war.h>
+#include <asm/page.h>
 
 #define PANIC_PIC(msg)					\
 		.set push;				\
@@ -378,6 +379,68 @@
 	BUILD_HANDLER dsp dsp sti silent		/* #26 */
 	BUILD_HANDLER reserved reserved sti verbose	/* others */
 
+	.align	5
+	LEAF(handle_ri_rdhwr_vivt)
+#ifdef CONFIG_MIPS_MT_SMTC
+	PANIC_PIC("handle_ri_rdhwr_vivt called")
+#else
+	.set	push
+	.set	noat
+	.set	noreorder
+	/* check if TLB contains a entry for EPC */
+	MFC0	k1, CP0_ENTRYHI
+	andi	k1, 0xff	/* ASID_MASK */
+	MFC0	k0, CP0_EPC
+	PTR_SRL	k0, PAGE_SHIFT + 1
+	PTR_SLL	k0, PAGE_SHIFT + 1
+	or	k1, k0
+	MTC0	k1, CP0_ENTRYHI
+	mtc0_tlbw_hazard
+	tlbp
+	tlb_probe_hazard
+	mfc0	k1, CP0_INDEX
+	.set	pop
+	bltz	k1, handle_ri	/* slow path */
+	/* fall thru */
+#endif
+	END(handle_ri_rdhwr_vivt)
+
+	LEAF(handle_ri_rdhwr)
+	.set	push
+	.set	noat
+	.set	noreorder
+	/* 0x7c03e83b: rdhwr v1,$29 */
+	MFC0	k1, CP0_EPC
+	lui	k0, 0x7c03
+	lw	k1, (k1)
+	ori	k0, 0xe83b
+	.set	reorder
+	bne	k0, k1, handle_ri	/* if not ours */
+	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
+	get_saved_sp	/* k1 := current_thread_info */
+	.set	noreorder
+	MFC0	k0, CP0_EPC
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+	ori	k1, _THREAD_MASK
+	xori	k1, _THREAD_MASK
+	LONG_L	v1, TI_TP_VALUE(k1)
+	LONG_ADDIU	k0, 4
+	jr	k0
+	 rfe
+#else
+	LONG_ADDIU	k0, 4		/* stall on $k0 */
+	MTC0	k0, CP0_EPC
+	/* I hope three instructions between MTC0 and ERET are enough... */
+	ori	k1, _THREAD_MASK
+	xori	k1, _THREAD_MASK
+	LONG_L	v1, TI_TP_VALUE(k1)
+	.set	mips3
+	eret
+	.set	mips0
+#endif
+	.set	pop
+	END(handle_ri_rdhwr)
+
 #ifdef CONFIG_64BIT
 /* A temporary overflow handler used by check_daddi(). */
 
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index ddc1b71..a2e095a 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -250,6 +250,9 @@
 	 */
 	page	swapper_pg_dir, _PGD_ORDER
 #ifdef CONFIG_64BIT
+#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
+	page	module_pg_dir, _PGD_ORDER
+#endif
 	page	invalid_pmd_table, _PMD_ORDER
 #endif
 	page	invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 48e3418..b59a676 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -19,9 +19,6 @@
 #include <asm/i8259.h>
 #include <asm/io.h>
 
-void enable_8259A_irq(unsigned int irq);
-void disable_8259A_irq(unsigned int irq);
-
 /*
  * This is the 'legacy' 8259A Programmable Interrupt Controller,
  * present in the majority of PC/AT boxes.
@@ -31,34 +28,16 @@
  * moves to arch independent land
  */
 
+static int i8259A_auto_eoi;
 DEFINE_SPINLOCK(i8259A_lock);
-
-static void end_8259A_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
-	    irq_desc[irq].action)
-		enable_8259A_irq(irq);
-}
-
-#define shutdown_8259A_irq	disable_8259A_irq
-
+/* some platforms call this... */
 void mask_and_ack_8259A(unsigned int);
 
-static unsigned int startup_8259A_irq(unsigned int irq)
-{
-	enable_8259A_irq(irq);
-
-	return 0; /* never anything pending */
-}
-
-static struct irq_chip i8259A_irq_type = {
-	.typename = "XT-PIC",
-	.startup = startup_8259A_irq,
-	.shutdown = shutdown_8259A_irq,
-	.enable = enable_8259A_irq,
-	.disable = disable_8259A_irq,
-	.ack = mask_and_ack_8259A,
-	.end = end_8259A_irq,
+static struct irq_chip i8259A_chip = {
+	.name		= "XT-PIC",
+	.mask		= disable_8259A_irq,
+	.unmask		= enable_8259A_irq,
+	.mask_ack	= mask_and_ack_8259A,
 };
 
 /*
@@ -70,8 +49,8 @@
  */
 static unsigned int cached_irq_mask = 0xffff;
 
-#define cached_21	(cached_irq_mask)
-#define cached_A1	(cached_irq_mask >> 8)
+#define cached_master_mask	(cached_irq_mask)
+#define cached_slave_mask	(cached_irq_mask >> 8)
 
 void disable_8259A_irq(unsigned int irq)
 {
@@ -81,9 +60,9 @@
 	spin_lock_irqsave(&i8259A_lock, flags);
 	cached_irq_mask |= mask;
 	if (irq & 8)
-		outb(cached_A1,0xA1);
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
 	else
-		outb(cached_21,0x21);
+		outb(cached_master_mask, PIC_MASTER_IMR);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
@@ -95,9 +74,9 @@
 	spin_lock_irqsave(&i8259A_lock, flags);
 	cached_irq_mask &= mask;
 	if (irq & 8)
-		outb(cached_A1,0xA1);
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
 	else
-		outb(cached_21,0x21);
+		outb(cached_master_mask, PIC_MASTER_IMR);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
@@ -109,9 +88,9 @@
 
 	spin_lock_irqsave(&i8259A_lock, flags);
 	if (irq < 8)
-		ret = inb(0x20) & mask;
+		ret = inb(PIC_MASTER_CMD) & mask;
 	else
-		ret = inb(0xA0) & (mask >> 8);
+		ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 
 	return ret;
@@ -120,7 +99,7 @@
 void make_8259A_irq(unsigned int irq)
 {
 	disable_irq_nosync(irq);
-	irq_desc[irq].chip = &i8259A_irq_type;
+	set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
 	enable_irq(irq);
 }
 
@@ -136,14 +115,14 @@
 	int irqmask = 1 << irq;
 
 	if (irq < 8) {
-		outb(0x0B,0x20);		/* ISR register */
-		value = inb(0x20) & irqmask;
-		outb(0x0A,0x20);		/* back to the IRR register */
+		outb(0x0B,PIC_MASTER_CMD);	/* ISR register */
+		value = inb(PIC_MASTER_CMD) & irqmask;
+		outb(0x0A,PIC_MASTER_CMD);	/* back to the IRR register */
 		return value;
 	}
-	outb(0x0B,0xA0);		/* ISR register */
-	value = inb(0xA0) & (irqmask >> 8);
-	outb(0x0A,0xA0);		/* back to the IRR register */
+	outb(0x0B,PIC_SLAVE_CMD);	/* ISR register */
+	value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+	outb(0x0A,PIC_SLAVE_CMD);	/* back to the IRR register */
 	return value;
 }
 
@@ -160,17 +139,19 @@
 
 	spin_lock_irqsave(&i8259A_lock, flags);
 	/*
-	 * Lightweight spurious IRQ detection. We do not want to overdo
-	 * spurious IRQ handling - it's usually a sign of hardware problems, so
-	 * we only do the checks we can do without slowing down good hardware
-	 * nnecesserily.
+	 * Lightweight spurious IRQ detection. We do not want
+	 * to overdo spurious IRQ handling - it's usually a sign
+	 * of hardware problems, so we only do the checks we can
+	 * do without slowing down good hardware unnecessarily.
 	 *
-	 * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting
-	 * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A.
-	 * Thus we can check spurious 8259A IRQs without doing the quite slow
-	 * i8259A_irq_real() call for every IRQ.  This does not cover 100% of
-	 * spurious interrupts, but should be enough to warn the user that
-	 * there is something bad going on ...
+	 * Note that IRQ7 and IRQ15 (the two spurious IRQs
+	 * usually resulting from the 8259A-1|2 PICs) occur
+	 * even if the IRQ is masked in the 8259A. Thus we
+	 * can check spurious 8259A IRQs without doing the
+	 * quite slow i8259A_irq_real() call for every IRQ.
+	 * This does not cover 100% of spurious interrupts,
+	 * but should be enough to warn the user that there
+	 * is something bad going on ...
 	 */
 	if (cached_irq_mask & irqmask)
 		goto spurious_8259A_irq;
@@ -178,14 +159,14 @@
 
 handle_real_irq:
 	if (irq & 8) {
-		inb(0xA1);		/* DUMMY - (do we need this?) */
-		outb(cached_A1,0xA1);
-		outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
-		outb(0x62,0x20);	/* 'Specific EOI' to master-IRQ2 */
+		inb(PIC_SLAVE_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+		outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+		outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
 	} else {
-		inb(0x21);		/* DUMMY - (do we need this?) */
-		outb(cached_21,0x21);
-		outb(0x60+irq,0x20);	/* 'Specific EOI' to master */
+		inb(PIC_MASTER_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_master_mask, PIC_MASTER_IMR);
+		outb(0x60+irq,PIC_MASTER_CMD);	/* 'Specific EOI to master */
 	}
 #ifdef CONFIG_MIPS_MT_SMTC
         if (irq_hwmask[irq] & ST0_IM)
@@ -206,7 +187,7 @@
 		goto handle_real_irq;
 
 	{
-		static int spurious_irq_mask = 0;
+		static int spurious_irq_mask;
 		/*
 		 * At this point we can be sure the IRQ is spurious,
 		 * lets ACK and report it. [once per IRQ]
@@ -227,13 +208,25 @@
 
 static int i8259A_resume(struct sys_device *dev)
 {
-	init_8259A(0);
+	init_8259A(i8259A_auto_eoi);
+	return 0;
+}
+
+static int i8259A_shutdown(struct sys_device *dev)
+{
+	/* Put the i8259A into a quiescent state that
+	 * the kernel initialization code can get it
+	 * out of.
+	 */
+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-1 */
 	return 0;
 }
 
 static struct sysdev_class i8259_sysdev_class = {
 	set_kset_name("i8259"),
 	.resume = i8259A_resume,
+	.shutdown = i8259A_shutdown,
 };
 
 static struct sys_device device_i8259A = {
@@ -255,41 +248,41 @@
 {
 	unsigned long flags;
 
+	i8259A_auto_eoi = auto_eoi;
+
 	spin_lock_irqsave(&i8259A_lock, flags);
 
-	outb(0xff, 0x21);	/* mask all of 8259A-1 */
-	outb(0xff, 0xA1);	/* mask all of 8259A-2 */
+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
 
 	/*
 	 * outb_p - this has to work on a wide range of PC hardware.
 	 */
-	outb_p(0x11, 0x20);	/* ICW1: select 8259A-1 init */
-	outb_p(0x00, 0x21);	/* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */
-	outb_p(0x04, 0x21);	/* 8259A-1 (the master) has a slave on IR2 */
-	if (auto_eoi)
-		outb_p(0x03, 0x21);	/* master does Auto EOI */
-	else
-		outb_p(0x01, 0x21);	/* master expects normal EOI */
+	outb_p(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
+	outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+	outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
+	if (auto_eoi)	/* master does Auto EOI */
+		outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+	else		/* master expects normal EOI */
+		outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
 
-	outb_p(0x11, 0xA0);	/* ICW1: select 8259A-2 init */
-	outb_p(0x08, 0xA1);	/* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */
-	outb_p(0x02, 0xA1);	/* 8259A-2 is a slave on master's IR2 */
-	outb_p(0x01, 0xA1);	/* (slave's support for AEOI in flat mode
-				    is to be investigated) */
-
+	outb_p(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
+	outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+	outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
+	outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
 	if (auto_eoi)
 		/*
-		 * in AEOI mode we just have to mask the interrupt
+		 * In AEOI mode we just have to mask the interrupt
 		 * when acking.
 		 */
-		i8259A_irq_type.ack = disable_8259A_irq;
+		i8259A_chip.mask_ack = disable_8259A_irq;
 	else
-		i8259A_irq_type.ack = mask_and_ack_8259A;
+		i8259A_chip.mask_ack = mask_and_ack_8259A;
 
 	udelay(100);		/* wait for 8259A to initialize */
 
-	outb(cached_21, 0x21);	/* restore master IRQ mask */
-	outb(cached_A1, 0xA1);	/* restore slave IRQ mask */
+	outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+	outb(cached_slave_mask, PIC_SLAVE_IMR);	  /* restore slave IRQ mask */
 
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 }
@@ -302,11 +295,17 @@
 };
 
 static struct resource pic1_io_resource = {
-	.name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY
+	.name = "pic1",
+	.start = PIC_MASTER_CMD,
+	.end = PIC_MASTER_IMR,
+	.flags = IORESOURCE_BUSY
 };
 
 static struct resource pic2_io_resource = {
-	.name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY
+	.name = "pic2",
+	.start = PIC_SLAVE_CMD,
+	.end = PIC_SLAVE_IMR,
+	.flags = IORESOURCE_BUSY
 };
 
 /*
@@ -323,12 +322,8 @@
 
 	init_8259A(0);
 
-	for (i = 0; i < 16; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &i8259A_irq_type;
-	}
+	for (i = 0; i < 16; i++)
+		set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
 
-	setup_irq(2, &irq2);
+	setup_irq(PIC_CASCADE_IR, &irq2);
 }
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index ab12c8f..1bbefbf 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -52,10 +52,6 @@
 	irix_core_dump, PAGE_SIZE
 };
 
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
 #ifdef DEBUG
 /* Debugging routines. */
 static char *get_elf_p_type(Elf32_Word p_type)
@@ -1013,7 +1009,7 @@
 	int sz;
 
 	sz = sizeof(struct elf_note);
-	sz += roundup(strlen(en->name), 4);
+	sz += roundup(strlen(en->name) + 1, 4);
 	sz += roundup(en->datasz, 4);
 
 	return sz;
@@ -1032,7 +1028,7 @@
 {
 	struct elf_note en;
 
-	en.n_namesz = strlen(men->name);
+	en.n_namesz = strlen(men->name) + 1;
 	en.n_descsz = men->datasz;
 	en.n_type = men->type;
 
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 650a80c..bcaad66 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -45,31 +45,6 @@
 }
 
 /*
- * Enables the IRQ on SOC-it
- */
-static void enable_msc_irq(unsigned int irq)
-{
-	unmask_msc_irq(irq);
-}
-
-/*
- * Initialize the IRQ on SOC-it
- */
-static unsigned int startup_msc_irq(unsigned int irq)
-{
-	unmask_msc_irq(irq);
-	return 0;
-}
-
-/*
- * Disables the IRQ on SOC-it
- */
-static void disable_msc_irq(unsigned int irq)
-{
-	mask_msc_irq(irq);
-}
-
-/*
  * Masks and ACKs an IRQ
  */
 static void level_mask_and_ack_msc_irq(unsigned int irq)
@@ -136,25 +111,23 @@
 		    (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
 }
 
-#define shutdown_msc_irq	disable_msc_irq
-
 struct irq_chip msc_levelirq_type = {
 	.typename = "SOC-it-Level",
-	.startup = startup_msc_irq,
-	.shutdown = shutdown_msc_irq,
-	.enable = enable_msc_irq,
-	.disable = disable_msc_irq,
 	.ack = level_mask_and_ack_msc_irq,
+	.mask = mask_msc_irq,
+	.mask_ack = level_mask_and_ack_msc_irq,
+	.unmask = unmask_msc_irq,
+	.eoi = unmask_msc_irq,
 	.end = end_msc_irq,
 };
 
 struct irq_chip msc_edgeirq_type = {
 	.typename = "SOC-it-Edge",
-	.startup =startup_msc_irq,
-	.shutdown = shutdown_msc_irq,
-	.enable = enable_msc_irq,
-	.disable = disable_msc_irq,
 	.ack = edge_mask_and_ack_msc_irq,
+	.mask = mask_msc_irq,
+	.mask_ack = edge_mask_and_ack_msc_irq,
+	.unmask = unmask_msc_irq,
+	.eoi = unmask_msc_irq,
 	.end = end_msc_irq,
 };
 
@@ -175,14 +148,14 @@
 
 		switch (imp->im_type) {
 		case MSC01_IRQ_EDGE:
-			irq_desc[base+n].chip = &msc_edgeirq_type;
+			set_irq_chip(base+n, &msc_edgeirq_type);
 			if (cpu_has_veic)
 				MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
 			else
 				MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
 			break;
 		case MSC01_IRQ_LEVEL:
-			irq_desc[base+n].chip = &msc_levelirq_type;
+			set_irq_chip(base+n, &msc_levelirq_type);
 			if (cpu_has_veic)
 				MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
 			else
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index 37d1062..efbd219 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -67,48 +67,6 @@
 }
 
 /*
- * Enables the IRQ on Marvell Chip
- */
-static void enable_mv64340_irq(unsigned int irq)
-{
-	unmask_mv64340_irq(irq);
-}
-
-/*
- * Initialize the IRQ on Marvell Chip
- */
-static unsigned int startup_mv64340_irq(unsigned int irq)
-{
-	unmask_mv64340_irq(irq);
-	return 0;
-}
-
-/*
- * Disables the IRQ on Marvell Chip
- */
-static void disable_mv64340_irq(unsigned int irq)
-{
-	mask_mv64340_irq(irq);
-}
-
-/*
- * Masks and ACKs an IRQ
- */
-static void mask_and_ack_mv64340_irq(unsigned int irq)
-{
-	mask_mv64340_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_mv64340_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		unmask_mv64340_irq(irq);
-}
-
-/*
  * Interrupt handler for interrupts coming from the Marvell chip.
  * It could be built in ethernet ports etc...
  */
@@ -133,29 +91,21 @@
 		do_IRQ(ls1bit32(irq_src_high) + irq_base + 32);
 }
 
-#define shutdown_mv64340_irq	disable_mv64340_irq
-
 struct irq_chip mv64340_irq_type = {
 	.typename = "MV-64340",
-	.startup = startup_mv64340_irq,
-	.shutdown = shutdown_mv64340_irq,
-	.enable = enable_mv64340_irq,
-	.disable = disable_mv64340_irq,
-	.ack = mask_and_ack_mv64340_irq,
-	.end = end_mv64340_irq,
+	.ack = mask_mv64340_irq,
+	.mask = mask_mv64340_irq,
+	.mask_ack = mask_mv64340_irq,
+	.unmask = unmask_mv64340_irq,
 };
 
 void __init mv64340_irq_init(unsigned int base)
 {
 	int i;
 
-	/* Reset irq handlers pointers to NULL */
-	for (i = base; i < base + 64; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 2;
-		irq_desc[i].chip = &mv64340_irq_type;
-	}
+	for (i = base; i < base + 64; i++)
+		set_irq_chip_and_handler(i, &mv64340_irq_type,
+					 handle_level_irq);
 
 	irq_base = base;
 }
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 6b54c71..123324b 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -29,56 +29,12 @@
 	clear_c0_intcontrol(0x100 << (irq - irq_base));
 }
 
-static inline void rm7k_cpu_irq_enable(unsigned int irq)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	unmask_rm7k_irq(irq);
-	local_irq_restore(flags);
-}
-
-static void rm7k_cpu_irq_disable(unsigned int irq)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	mask_rm7k_irq(irq);
-	local_irq_restore(flags);
-}
-
-static unsigned int rm7k_cpu_irq_startup(unsigned int irq)
-{
-	rm7k_cpu_irq_enable(irq);
-
-	return 0;
-}
-
-#define	rm7k_cpu_irq_shutdown	rm7k_cpu_irq_disable
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues.  Same for rm7k_cpu_irq_end.
- */
-static void rm7k_cpu_irq_ack(unsigned int irq)
-{
-	mask_rm7k_irq(irq);
-}
-
-static void rm7k_cpu_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		unmask_rm7k_irq(irq);
-}
-
 static struct irq_chip rm7k_irq_controller = {
 	.typename = "RM7000",
-	.startup = rm7k_cpu_irq_startup,
-	.shutdown = rm7k_cpu_irq_shutdown,
-	.enable = rm7k_cpu_irq_enable,
-	.disable = rm7k_cpu_irq_disable,
-	.ack = rm7k_cpu_irq_ack,
-	.end = rm7k_cpu_irq_end,
+	.ack = mask_rm7k_irq,
+	.mask = mask_rm7k_irq,
+	.mask_ack = mask_rm7k_irq,
+	.unmask = unmask_rm7k_irq,
 };
 
 void __init rm7k_cpu_irq_init(int base)
@@ -87,12 +43,9 @@
 
 	clear_c0_intcontrol(0x00000f00);		/* Mask all */
 
-	for (i = base; i < base + 4; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &rm7k_irq_controller;
-	}
+	for (i = base; i < base + 4; i++)
+		set_irq_chip_and_handler(i, &rm7k_irq_controller,
+					 handle_level_irq);
 
 	irq_base = base;
 }
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 62f011b..0e6f4c5 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -48,15 +48,6 @@
 	local_irq_restore(flags);
 }
 
-static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
-{
-	rm9k_cpu_irq_enable(irq);
-
-	return 0;
-}
-
-#define	rm9k_cpu_irq_shutdown	rm9k_cpu_irq_disable
-
 /*
  * Performance counter interrupts are global on all processors.
  */
@@ -89,40 +80,22 @@
 	on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
 }
 
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues.  Same for rm9k_cpu_irq_end.
- */
-static void rm9k_cpu_irq_ack(unsigned int irq)
-{
-	mask_rm9k_irq(irq);
-}
-
-static void rm9k_cpu_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		unmask_rm9k_irq(irq);
-}
-
 static struct irq_chip rm9k_irq_controller = {
 	.typename = "RM9000",
-	.startup = rm9k_cpu_irq_startup,
-	.shutdown = rm9k_cpu_irq_shutdown,
-	.enable = rm9k_cpu_irq_enable,
-	.disable = rm9k_cpu_irq_disable,
-	.ack = rm9k_cpu_irq_ack,
-	.end = rm9k_cpu_irq_end,
+	.ack = mask_rm9k_irq,
+	.mask = mask_rm9k_irq,
+	.mask_ack = mask_rm9k_irq,
+	.unmask = unmask_rm9k_irq,
 };
 
 static struct irq_chip rm9k_perfcounter_irq = {
 	.typename = "RM9000",
 	.startup = rm9k_perfcounter_irq_startup,
 	.shutdown = rm9k_perfcounter_irq_shutdown,
-	.enable = rm9k_cpu_irq_enable,
-	.disable = rm9k_cpu_irq_disable,
-	.ack = rm9k_cpu_irq_ack,
-	.end = rm9k_cpu_irq_end,
+	.ack = mask_rm9k_irq,
+	.mask = mask_rm9k_irq,
+	.mask_ack = mask_rm9k_irq,
+	.unmask = unmask_rm9k_irq,
 };
 
 unsigned int rm9000_perfcount_irq;
@@ -135,15 +108,13 @@
 
 	clear_c0_intcontrol(0x0000f000);		/* Mask all */
 
-	for (i = base; i < base + 4; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &rm9k_irq_controller;
-	}
+	for (i = base; i < base + 4; i++)
+		set_irq_chip_and_handler(i, &rm9k_irq_controller,
+					 handle_level_irq);
 
 	rm9000_perfcount_irq = base + 1;
-	irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq;
+	set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
+				 handle_level_irq);
 
 	irq_base = base;
 }
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 9b0e49d..2fe4c86 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -88,25 +88,6 @@
 unsigned long irq_hwmask[NR_IRQS];
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-#undef do_IRQ
-
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int do_IRQ(unsigned int irq)
-{
-	irq_enter();
-
-	__DO_IRQ_SMTC_HOOK();
-	__do_IRQ(irq);
-
-	irq_exit();
-
-	return 1;
-}
-
 /*
  * Generic, controller-independent functions:
  */
@@ -136,7 +117,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
-		seq_printf(p, " %14s", irq_desc[i].chip->typename);
+		seq_printf(p, " %14s", irq_desc[i].chip->name);
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
@@ -172,19 +153,6 @@
 
 void __init init_IRQ(void)
 {
-	int i;
-
-	for (i = 0; i < NR_IRQS; i++) {
-		irq_desc[i].status  = IRQ_DISABLED;
-		irq_desc[i].action  = NULL;
-		irq_desc[i].depth   = 1;
-		irq_desc[i].chip = &no_irq_chip;
-		spin_lock_init(&irq_desc[i].lock);
-#ifdef CONFIG_MIPS_MT_SMTC
-		irq_hwmask[i] = 0;
-#endif /* CONFIG_MIPS_MT_SMTC */
-	}
-
 	arch_init_irq();
 
 #ifdef CONFIG_KGDB
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 9bb21c7..fcc86b9 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -50,58 +50,13 @@
 	irq_disable_hazard();
 }
 
-static inline void mips_cpu_irq_enable(unsigned int irq)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	unmask_mips_irq(irq);
-	back_to_back_c0_hazard();
-	local_irq_restore(flags);
-}
-
-static void mips_cpu_irq_disable(unsigned int irq)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	mask_mips_irq(irq);
-	back_to_back_c0_hazard();
-	local_irq_restore(flags);
-}
-
-static unsigned int mips_cpu_irq_startup(unsigned int irq)
-{
-	mips_cpu_irq_enable(irq);
-
-	return 0;
-}
-
-#define	mips_cpu_irq_shutdown		mips_cpu_irq_disable
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues.  Same for mips_cpu_irq_end.
- */
-static void mips_cpu_irq_ack(unsigned int irq)
-{
-	mask_mips_irq(irq);
-}
-
-static void mips_cpu_irq_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		unmask_mips_irq(irq);
-}
-
 static struct irq_chip mips_cpu_irq_controller = {
 	.typename	= "MIPS",
-	.startup	= mips_cpu_irq_startup,
-	.shutdown	= mips_cpu_irq_shutdown,
-	.enable		= mips_cpu_irq_enable,
-	.disable	= mips_cpu_irq_disable,
-	.ack		= mips_cpu_irq_ack,
-	.end		= mips_cpu_irq_end,
+	.ack		= mask_mips_irq,
+	.mask		= mask_mips_irq,
+	.mask_ack	= mask_mips_irq,
+	.unmask		= unmask_mips_irq,
+	.eoi		= unmask_mips_irq,
 };
 
 /*
@@ -110,8 +65,6 @@
 
 #define unmask_mips_mt_irq	unmask_mips_irq
 #define mask_mips_mt_irq	mask_mips_irq
-#define mips_mt_cpu_irq_enable	mips_cpu_irq_enable
-#define mips_mt_cpu_irq_disable	mips_cpu_irq_disable
 
 static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
 {
@@ -119,13 +72,11 @@
 
 	clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
 	evpe(vpflags);
-	mips_mt_cpu_irq_enable(irq);
+	unmask_mips_mt_irq(irq);
 
 	return 0;
 }
 
-#define	mips_mt_cpu_irq_shutdown	mips_mt_cpu_irq_disable
-
 /*
  * While we ack the interrupt interrupts are disabled and thus we don't need
  * to deal with concurrency issues.  Same for mips_cpu_irq_end.
@@ -138,16 +89,14 @@
 	mask_mips_mt_irq(irq);
 }
 
-#define mips_mt_cpu_irq_end mips_cpu_irq_end
-
 static struct irq_chip mips_mt_cpu_irq_controller = {
 	.typename	= "MIPS",
 	.startup	= mips_mt_cpu_irq_startup,
-	.shutdown	= mips_mt_cpu_irq_shutdown,
-	.enable		= mips_mt_cpu_irq_enable,
-	.disable	= mips_mt_cpu_irq_disable,
 	.ack		= mips_mt_cpu_irq_ack,
-	.end		= mips_mt_cpu_irq_end,
+	.mask		= mask_mips_mt_irq,
+	.mask_ack	= mips_mt_cpu_irq_ack,
+	.unmask		= unmask_mips_mt_irq,
+	.eoi		= unmask_mips_mt_irq,
 };
 
 void __init mips_cpu_irq_init(int irq_base)
@@ -163,19 +112,12 @@
 	 * leave them uninitialized for other processors.
 	 */
 	if (cpu_has_mipsmt)
-		for (i = irq_base; i < irq_base + 2; i++) {
-			irq_desc[i].status = IRQ_DISABLED;
-			irq_desc[i].action = NULL;
-			irq_desc[i].depth = 1;
-			irq_desc[i].chip = &mips_mt_cpu_irq_controller;
-		}
+		for (i = irq_base; i < irq_base + 2; i++)
+			set_irq_chip(i, &mips_mt_cpu_irq_controller);
 
-	for (i = irq_base + 2; i < irq_base + 8; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = NULL;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &mips_cpu_irq_controller;
-	}
+	for (i = irq_base + 2; i < irq_base + 8; i++)
+		set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
+					 handle_level_irq);
 
 	mips_cpu_irq_base = irq_base;
 }
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f06a144..2c82412 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -319,7 +319,7 @@
 static int channel_open = 0;
 
 /* the work handler */
-static void sp_work(void *data)
+static void sp_work(struct work_struct *unused)
 {
 	if (!channel_open) {
 		if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@
 			return;
 		}
 
-		INIT_WORK(&work, sp_work, NULL);
+		INIT_WORK(&work, sp_work);
 		queue_work(workqueue, &work);
 	} else
 		queue_work(workqueue, &work);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 7a3ebbe..b061c9a 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -382,531 +382,6 @@
 	return ret;
 }
 
-struct msgbuf32 { s32 mtype; char mtext[1]; };
-
-struct ipc_perm32
-{
-	key_t    	  key;
-        __compat_uid_t  uid;
-        __compat_gid_t  gid;
-        __compat_uid_t  cuid;
-        __compat_gid_t  cgid;
-        compat_mode_t	mode;
-        unsigned short  seq;
-};
-
-struct ipc64_perm32 {
-	key_t key;
-	__compat_uid_t uid;
-	__compat_gid_t gid;
-	__compat_uid_t cuid;
-	__compat_gid_t cgid;
-	compat_mode_t	mode;
-	unsigned short	seq;
-	unsigned short __pad1;
-	unsigned int __unused1;
-	unsigned int __unused2;
-};
-
-struct semid_ds32 {
-        struct ipc_perm32 sem_perm;               /* permissions .. see ipc.h */
-        compat_time_t   sem_otime;              /* last semop time */
-        compat_time_t   sem_ctime;              /* last change time */
-        u32 sem_base;              /* ptr to first semaphore in array */
-        u32 sem_pending;          /* pending operations to be processed */
-        u32 sem_pending_last;    /* last pending operation */
-        u32 undo;                  /* undo requests on this array */
-        unsigned short  sem_nsems;              /* no. of semaphores in array */
-};
-
-struct semid64_ds32 {
-	struct ipc64_perm32	sem_perm;
-	compat_time_t	sem_otime;
-	compat_time_t	sem_ctime;
-	unsigned int		sem_nsems;
-	unsigned int		__unused1;
-	unsigned int		__unused2;
-};
-
-struct msqid_ds32
-{
-        struct ipc_perm32 msg_perm;
-        u32 msg_first;
-        u32 msg_last;
-        compat_time_t   msg_stime;
-        compat_time_t   msg_rtime;
-        compat_time_t   msg_ctime;
-        u32 wwait;
-        u32 rwait;
-        unsigned short msg_cbytes;
-        unsigned short msg_qnum;
-        unsigned short msg_qbytes;
-        compat_ipc_pid_t msg_lspid;
-        compat_ipc_pid_t msg_lrpid;
-};
-
-struct msqid64_ds32 {
-	struct ipc64_perm32 msg_perm;
-	compat_time_t msg_stime;
-	unsigned int __unused1;
-	compat_time_t msg_rtime;
-	unsigned int __unused2;
-	compat_time_t msg_ctime;
-	unsigned int __unused3;
-	unsigned int msg_cbytes;
-	unsigned int msg_qnum;
-	unsigned int msg_qbytes;
-	compat_pid_t msg_lspid;
-	compat_pid_t msg_lrpid;
-	unsigned int __unused4;
-	unsigned int __unused5;
-};
-
-struct shmid_ds32 {
-        struct ipc_perm32       shm_perm;
-        int                     shm_segsz;
-        compat_time_t		shm_atime;
-        compat_time_t		shm_dtime;
-        compat_time_t		shm_ctime;
-        compat_ipc_pid_t    shm_cpid;
-        compat_ipc_pid_t    shm_lpid;
-        unsigned short          shm_nattch;
-};
-
-struct shmid64_ds32 {
-	struct ipc64_perm32	shm_perm;
-	compat_size_t		shm_segsz;
-	compat_time_t		shm_atime;
-	compat_time_t		shm_dtime;
-	compat_time_t shm_ctime;
-	compat_pid_t shm_cpid;
-	compat_pid_t shm_lpid;
-	unsigned int shm_nattch;
-	unsigned int __unused1;
-	unsigned int __unused2;
-};
-
-struct ipc_kludge32 {
-	u32 msgp;
-	s32 msgtyp;
-};
-
-static int
-do_sys32_semctl(int first, int second, int third, void __user *uptr)
-{
-	union semun fourth;
-	u32 pad;
-	int err, err2;
-	struct semid64_ds s;
-	mm_segment_t old_fs;
-
-	if (!uptr)
-		return -EINVAL;
-	err = -EFAULT;
-	if (get_user (pad, (u32 __user *)uptr))
-		return err;
-	if ((third & ~IPC_64) == SETVAL)
-		fourth.val = (int)pad;
-	else
-		fourth.__pad = (void __user *)A(pad);
-	switch (third & ~IPC_64) {
-	case IPC_INFO:
-	case IPC_RMID:
-	case IPC_SET:
-	case SEM_INFO:
-	case GETVAL:
-	case GETPID:
-	case GETNCNT:
-	case GETZCNT:
-	case GETALL:
-	case SETVAL:
-	case SETALL:
-		err = sys_semctl (first, second, third, fourth);
-		break;
-
-	case IPC_STAT:
-	case SEM_STAT:
-		fourth.__pad = (struct semid64_ds __user *)&s;
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_semctl(first, second, third | IPC_64, fourth);
-		set_fs(old_fs);
-
-		if (third & IPC_64) {
-			struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
-
-			if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
-			err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
-			err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
-			err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
-			err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
-			err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
-			err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
-			err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
-			err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
-			err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
-		} else {
-			struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
-
-			if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
-			err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
-			err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
-			err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
-			err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
-			err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
-			err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
-			err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
-			err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
-			err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
-		}
-		if (err2)
-			err = -EFAULT;
-		break;
-
-	default:
-		err = - EINVAL;
-		break;
-	}
-
-	return err;
-}
-
-static int
-do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
-{
-	struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
-	struct msgbuf *p;
-	mm_segment_t old_fs;
-	int err;
-
-	if (second < 0)
-		return -EINVAL;
-	p = kmalloc (second + sizeof (struct msgbuf)
-				    + 4, GFP_USER);
-	if (!p)
-		return -ENOMEM;
-	err = get_user (p->mtype, &up->mtype);
-	if (err)
-		goto out;
-	err |= __copy_from_user (p->mtext, &up->mtext, second);
-	if (err)
-		goto out;
-	old_fs = get_fs ();
-	set_fs (KERNEL_DS);
-	err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
-	set_fs (old_fs);
-out:
-	kfree (p);
-
-	return err;
-}
-
-static int
-do_sys32_msgrcv (int first, int second, int msgtyp, int third,
-		 int version, void __user *uptr)
-{
-	struct msgbuf32 __user *up;
-	struct msgbuf *p;
-	mm_segment_t old_fs;
-	int err;
-
-	if (!version) {
-		struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
-		struct ipc_kludge32 ipck;
-
-		err = -EINVAL;
-		if (!uptr)
-			goto out;
-		err = -EFAULT;
-		if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
-			goto out;
-		uptr = (void __user *)AA(ipck.msgp);
-		msgtyp = ipck.msgtyp;
-	}
-
-	if (second < 0)
-		return -EINVAL;
-	err = -ENOMEM;
-	p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
-	if (!p)
-		goto out;
-	old_fs = get_fs ();
-	set_fs (KERNEL_DS);
-	err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
-	set_fs (old_fs);
-	if (err < 0)
-		goto free_then_out;
-	up = (struct msgbuf32 __user *)uptr;
-	if (put_user (p->mtype, &up->mtype) ||
-	    __copy_to_user (&up->mtext, p->mtext, err))
-		err = -EFAULT;
-free_then_out:
-	kfree (p);
-out:
-	return err;
-}
-
-static int
-do_sys32_msgctl (int first, int second, void __user *uptr)
-{
-	int err = -EINVAL, err2;
-	struct msqid64_ds m;
-	struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
-	struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
-	mm_segment_t old_fs;
-
-	switch (second & ~IPC_64) {
-	case IPC_INFO:
-	case IPC_RMID:
-	case MSG_INFO:
-		err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
-		break;
-
-	case IPC_SET:
-		if (second & IPC_64) {
-			if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
-				err = -EFAULT;
-				break;
-			}
-			err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
-			err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
-			err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
-			err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
-		} else {
-			if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
-				err = -EFAULT;
-				break;
-			}
-			err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
-			err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
-			err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
-			err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
-		}
-		if (err)
-			break;
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
-		set_fs(old_fs);
-		break;
-
-	case IPC_STAT:
-	case MSG_STAT:
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
-		set_fs(old_fs);
-		if (second & IPC_64) {
-			if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
-			err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
-			err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
-			err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
-			err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
-			err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
-			err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
-			err2 |= __put_user(m.msg_stime, &up64->msg_stime);
-			err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
-			err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
-			err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
-			err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
-			err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
-			err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
-			err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
-			if (err2)
-				err = -EFAULT;
-		} else {
-			if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
-			err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
-			err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
-			err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
-			err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
-			err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
-			err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
-			err2 |= __put_user(m.msg_stime, &up32->msg_stime);
-			err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
-			err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
-			err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
-			err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
-			err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
-			err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
-			err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
-			if (err2)
-				err = -EFAULT;
-		}
-		break;
-	}
-
-	return err;
-}
-
-static int
-do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
-{
-	unsigned long raddr;
-	u32 __user *uaddr = (u32 __user *)A((u32)third);
-	int err = -EINVAL;
-
-	if (version == 1)
-		return err;
-	err = do_shmat (first, uptr, second, &raddr);
-	if (err)
-		return err;
-	err = put_user (raddr, uaddr);
-	return err;
-}
-
-struct shm_info32 {
-	int used_ids;
-	u32 shm_tot, shm_rss, shm_swp;
-	u32 swap_attempts, swap_successes;
-};
-
-static int
-do_sys32_shmctl (int first, int second, void __user *uptr)
-{
-	struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
-	struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
-	struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
-	int err = -EFAULT, err2;
-	struct shmid64_ds s64;
-	mm_segment_t old_fs;
-	struct shm_info si;
-	struct shmid_ds s;
-
-	switch (second & ~IPC_64) {
-	case IPC_INFO:
-		second = IPC_INFO; /* So that we don't have to translate it */
-	case IPC_RMID:
-	case SHM_LOCK:
-	case SHM_UNLOCK:
-		err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
-		break;
-	case IPC_SET:
-		if (second & IPC_64) {
-			err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
-			err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
-			err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
-		} else {
-			err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
-			err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
-			err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
-		}
-		if (err)
-			break;
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
-		set_fs(old_fs);
-		break;
-
-	case IPC_STAT:
-	case SHM_STAT:
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
-		set_fs(old_fs);
-		if (err < 0)
-			break;
-		if (second & IPC_64) {
-			if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
-			err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
-			err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
-			err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
-			err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
-			err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
-			err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
-			err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
-			err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
-			err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
-			err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
-			err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
-			err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
-			err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
-		} else {
-			if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
-				err = -EFAULT;
-				break;
-			}
-			err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
-			err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
-			err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
-			err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
-			err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
-			err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
-			err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
-			err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
-			err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
-			err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
-			err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
-			err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
-			err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
-			err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
-		}
-		if (err2)
-			err = -EFAULT;
-		break;
-
-	case SHM_INFO:
-		old_fs = get_fs();
-		set_fs(KERNEL_DS);
-		err = sys_shmctl(first, second, (void __user *)&si);
-		set_fs(old_fs);
-		if (err < 0)
-			break;
-		err2 = put_user(si.used_ids, &uip->used_ids);
-		err2 |= __put_user(si.shm_tot, &uip->shm_tot);
-		err2 |= __put_user(si.shm_rss, &uip->shm_rss);
-		err2 |= __put_user(si.shm_swp, &uip->shm_swp);
-		err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
-		err2 |= __put_user (si.swap_successes, &uip->swap_successes);
-		if (err2)
-			err = -EFAULT;
-		break;
-
-	default:
-		err = -EINVAL;
-		break;
-	}
-
-	return err;
-}
-
-static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
-                            const struct compat_timespec __user *timeout32)
-{
-	struct compat_timespec t32;
-	struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
-
-	if (copy_from_user(&t32, timeout32, sizeof(t32)))
-		return -EFAULT;
-
-	if (put_user(t32.tv_sec, &t64->tv_sec) ||
-	    put_user(t32.tv_nsec, &t64->tv_nsec))
-		return -EFAULT;
-
-	return sys_semtimedop(semid, tsems, nsems, t64);
-}
-
 asmlinkage long
 sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
 {
@@ -918,48 +393,43 @@
 	switch (call) {
 	case SEMOP:
 		/* struct sembuf is the same on 32 and 64bit :)) */
-		err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
-		                      NULL);
+		err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
 		break;
 	case SEMTIMEDOP:
-		err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
-		                      (const struct compat_timespec __user *)AA(fifth));
+		err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
+					    compat_ptr(fifth));
 		break;
 	case SEMGET:
-		err = sys_semget (first, second, third);
+		err = sys_semget(first, second, third);
 		break;
 	case SEMCTL:
-		err = do_sys32_semctl (first, second, third,
-				       (void __user *)AA(ptr));
+		err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
 		break;
-
 	case MSGSND:
-		err = do_sys32_msgsnd (first, second, third,
-				       (void __user *)AA(ptr));
+		err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
 		break;
 	case MSGRCV:
-		err = do_sys32_msgrcv (first, second, fifth, third,
-				       version, (void __user *)AA(ptr));
+		err = compat_sys_msgrcv(first, second, fifth, third,
+					version, compat_ptr(ptr));
 		break;
 	case MSGGET:
-		err = sys_msgget ((key_t) first, second);
+		err = sys_msgget((key_t) first, second);
 		break;
 	case MSGCTL:
-		err = do_sys32_msgctl (first, second, (void __user *)AA(ptr));
+		err = compat_sys_msgctl(first, second, compat_ptr(ptr));
 		break;
-
 	case SHMAT:
-		err = do_sys32_shmat (first, second, third,
-				      version, (void __user *)AA(ptr));
+		err = compat_sys_shmat(first, second, third, version,
+				       compat_ptr(ptr));
 		break;
 	case SHMDT:
-		err = sys_shmdt ((char __user *)A(ptr));
+		err = sys_shmdt(compat_ptr(ptr));
 		break;
 	case SHMGET:
-		err = sys_shmget (first, (unsigned)second, third);
+		err = sys_shmget(first, (unsigned)second, third);
 		break;
 	case SHMCTL:
-		err = do_sys32_shmctl (first, second, (void __user *)AA(ptr));
+		err = compat_sys_shmctl(first, second, compat_ptr(ptr));
 		break;
 	default:
 		err = -EINVAL;
@@ -969,18 +439,16 @@
 	return err;
 }
 
-asmlinkage long sys32_shmat(int shmid, char __user *shmaddr,
-			  int shmflg, int32_t __user *addr)
+#ifdef CONFIG_MIPS32_N32
+asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, union semun arg)
 {
-	unsigned long raddr;
-	int err;
-
-	err = do_shmat(shmid, shmaddr, shmflg, &raddr);
-	if (err)
-		return err;
-
-	return put_user(raddr, addr);
+	/* compat_sys_semctl expects a pointer to union semun */
+	u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
+	if (put_user(ptr_to_compat(arg.__pad), uptr))
+		return -EFAULT;
+	return compat_sys_semctl(semid, semnum, cmd, uptr);
 }
+#endif
 
 struct sysctl_args32
 {
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
new file mode 100644
index 0000000..e0ad754
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.c
@@ -0,0 +1,85 @@
+/*
+ * machine_kexec.c for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+const extern unsigned char relocate_new_kernel[];
+const extern unsigned int relocate_new_kernel_size;
+
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
+int
+machine_kexec_prepare(struct kimage *kimage)
+{
+	return 0;
+}
+
+void
+machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+void
+machine_shutdown(void)
+{
+}
+
+void
+machine_crash_shutdown(struct pt_regs *regs)
+{
+}
+
+void
+machine_kexec(struct kimage *image)
+{
+	unsigned long reboot_code_buffer;
+	unsigned long entry;
+	unsigned long *ptr;
+
+	reboot_code_buffer =
+	  (unsigned long)page_address(image->control_code_page);
+
+	kexec_start_address = image->start;
+	kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK);
+
+	memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+	       relocate_new_kernel_size);
+
+	/*
+	 * The generic kexec code builds a page list with physical
+	 * addresses. they are directly accessible through KSEG0 (or
+	 * CKSEG0 or XPHYS if on 64bit system), hence the
+	 * pys_to_virt() call.
+	 */
+	for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
+	     ptr = (entry & IND_INDIRECTION) ?
+	       phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+		    *ptr & IND_DESTINATION)
+			*ptr = phys_to_virt(*ptr);
+	}
+
+	/*
+	 * we do not want to be bothered.
+	 */
+	local_irq_disable();
+
+	flush_icache_range(reboot_code_buffer,
+			   reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
+
+	printk("Will call new kernel at %08x\n", image->start);
+	printk("Bye ...\n");
+	flush_cache_all();
+	((void (*)(void))reboot_code_buffer)();
+}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index d7bf021..cb08014 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -29,6 +29,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
+#include <asm/pgtable.h>	/* MODULE_START */
 
 struct mips_hi16 {
 	struct mips_hi16 *next;
@@ -43,9 +44,23 @@
 
 void *module_alloc(unsigned long size)
 {
+#ifdef MODULE_START
+	struct vm_struct *area;
+
+	size = PAGE_ALIGN(size);
+	if (!size)
+		return NULL;
+
+	area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
+	if (!area)
+		return NULL;
+
+	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+#else
 	if (size == 0)
 		return NULL;
 	return vmalloc(size);
+#endif
 }
 
 /* Free memory returned from module_alloc */
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
new file mode 100644
index 0000000..a3f0d00
--- /dev/null
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -0,0 +1,80 @@
+/*
+ * relocate_kernel.S for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/page.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+
+	.globl relocate_new_kernel
+relocate_new_kernel:
+
+	PTR_L	s0, kexec_indirection_page
+	PTR_L	s1, kexec_start_address
+
+process_entry:
+	PTR_L	s2, (s0)
+	PTR_ADD	s0, s0, SZREG
+
+	/* destination page */
+	and	s3, s2, 0x1
+	beq	s3, zero, 1f
+	and	s4, s2, ~0x1	/* store destination addr in s4 */
+	move	a0, s4
+	b	process_entry
+
+1:
+	/* indirection page, update s0  */
+	and	s3, s2, 0x2
+	beq	s3, zero, 1f
+	and	s0, s2, ~0x2
+	b	process_entry
+
+1:
+	/* done page */
+	and	s3, s2, 0x4
+	beq	s3, zero, 1f
+	b	done
+1:
+	/* source page */
+	and	s3, s2, 0x8
+	beq	s3, zero, process_entry
+	and	s2, s2, ~0x8
+	li	s6, (1 << PAGE_SHIFT) / SZREG
+
+copy_word:
+	/* copy page word by word */
+	REG_L	s5, (s2)
+	REG_S	s5, (s4)
+	INT_ADD	s4, s4, SZREG
+	INT_ADD	s2, s2, SZREG
+	INT_SUB	s6, s6, 1
+	beq	s6, zero, process_entry
+	b	copy_word
+	b	process_entry
+
+done:
+	/* jump to kexec_start_address */
+	j	s1
+
+	.globl kexec_start_address
+kexec_start_address:
+	.long	0x0
+
+	.globl kexec_indirection_page
+kexec_indirection_page:
+	.long	0x0
+
+relocate_new_kernel_end:
+
+	.globl relocate_new_kernel_size
+relocate_new_kernel_size:
+	.long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a95f37d..7c0b393 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -653,7 +653,7 @@
 	sys	sys_move_pages		6
 	sys	sys_set_robust_list	2
 	sys	sys_get_robust_list	3	/* 4310 */
-	sys	sys_ni_syscall		0
+	sys	sys_kexec_load		4
 	sys	sys_getcpu		3
 	sys	sys_epoll_pwait		6
 	.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 8fb0f60..e569b84 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -468,6 +468,6 @@
 	PTR	sys_move_pages
 	PTR	sys_set_robust_list
 	PTR	sys_get_robust_list
-	PTR	sys_ni_syscall			/* 5270 */
+	PTR	sys_kexec_load			/* 5270 */
 	PTR	sys_getcpu
 	PTR	sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0da5ca2..34567d8 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -149,8 +149,8 @@
 	PTR	sys_mincore
 	PTR	sys_madvise
 	PTR	sys_shmget
-	PTR	sys32_shmat
-	PTR	sys_shmctl			/* 6030 */
+	PTR	sys_shmat
+	PTR	compat_sys_shmctl			/* 6030 */
 	PTR	sys_dup
 	PTR	sys_dup2
 	PTR	sys_pause
@@ -184,12 +184,12 @@
 	PTR	sys32_newuname
 	PTR	sys_semget
 	PTR	sys_semop
-	PTR	sys_semctl
+	PTR	sysn32_semctl
 	PTR	sys_shmdt			/* 6065 */
 	PTR	sys_msgget
-	PTR	sys_msgsnd
-	PTR	sys_msgrcv
-	PTR	sys_msgctl
+	PTR	compat_sys_msgsnd
+	PTR	compat_sys_msgrcv
+	PTR	compat_sys_msgctl
 	PTR	compat_sys_fcntl		/* 6070 */
 	PTR	sys_flock
 	PTR	sys_fsync
@@ -335,7 +335,7 @@
 	PTR	compat_sys_fcntl64
 	PTR	sys_set_tid_address
 	PTR	sys_restart_syscall
-	PTR	sys_semtimedop			/* 6215 */
+	PTR	compat_sys_semtimedop			/* 6215 */
 	PTR	sys_fadvise64_64
 	PTR	compat_sys_statfs64
 	PTR	compat_sys_fstatfs64
@@ -394,6 +394,6 @@
 	PTR	sys_move_pages
 	PTR	compat_sys_set_robust_list
 	PTR	compat_sys_get_robust_list
-	PTR	sys_ni_syscall
+	PTR	compat_sys_kexec_load
 	PTR	sys_getcpu
 	PTR	sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b9d00ca..e91379c 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -516,7 +516,7 @@
 	PTR	compat_sys_move_pages
 	PTR	compat_sys_set_robust_list
 	PTR	compat_sys_get_robust_list	/* 4310 */
-	PTR	sys_ni_syscall
+	PTR	compat_sys_kexec_load
 	PTR	sys_getcpu
 	PTR	sys_epoll_pwait
 	.size	sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8f6e896..89440a0 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -145,13 +145,12 @@
 	unsigned long start = memparse(p, &p);
 
 #ifdef CONFIG_64BIT
-	/* HACK: Guess if the sign extension was forgotten */
-	if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
-		start |= 0xffffffff00000000UL;
+	/* Guess if the sign extension was forgotten by bootloader */
+	if (start < XKPHYS)
+		start = (int)start;
 #endif
 	initrd_start = start;
 	initrd_end += start;
-
 	return 0;
 }
 early_param("rd_start", rd_start_early);
@@ -159,41 +158,64 @@
 static int __init rd_size_early(char *p)
 {
 	initrd_end += memparse(p, &p);
-
 	return 0;
 }
 early_param("rd_size", rd_size_early);
 
+/* it returns the next free pfn after initrd */
 static unsigned long __init init_initrd(void)
 {
-	unsigned long tmp, end, size;
+	unsigned long end;
 	u32 *initrd_header;
 
-	ROOT_DEV = Root_RAM0;
-
 	/*
 	 * Board specific code or command line parser should have
 	 * already set up initrd_start and initrd_end. In these cases
 	 * perfom sanity checks and use them if all looks good.
 	 */
-	size = initrd_end - initrd_start;
-	if (initrd_end == 0 || size == 0) {
-		initrd_start = 0;
-		initrd_end = 0;
-	} else
-		return initrd_end;
+	if (initrd_start && initrd_end > initrd_start)
+		goto sanitize;
 
-	end = (unsigned long)&_end;
-	tmp = PAGE_ALIGN(end) - sizeof(u32) * 2;
-	if (tmp < end)
-		tmp += PAGE_SIZE;
+	/*
+	 * See if initrd has been added to the kernel image by
+	 * arch/mips/boot/addinitrd.c. In that case a header is
+	 * prepended to initrd and is made up by 8 bytes. The fisrt
+	 * word is a magic number and the second one is the size of
+	 * initrd.  Initrd start must be page aligned in any cases.
+	 */
+	initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
+	if (initrd_header[0] != 0x494E5244)
+		goto disable;
+	initrd_start = (unsigned long)(initrd_header + 2);
+	initrd_end = initrd_start + initrd_header[1];
 
-	initrd_header = (u32 *)tmp;
-	if (initrd_header[0] == 0x494E5244) {
-		initrd_start = (unsigned long)&initrd_header[2];
-		initrd_end = initrd_start + initrd_header[1];
+sanitize:
+	if (initrd_start & ~PAGE_MASK) {
+		printk(KERN_ERR "initrd start must be page aligned\n");
+		goto disable;
 	}
-	return initrd_end;
+	if (initrd_start < PAGE_OFFSET) {
+		printk(KERN_ERR "initrd start < PAGE_OFFSET\n");
+		goto disable;
+	}
+
+	/*
+	 * Sanitize initrd addresses. For example firmware
+	 * can't guess if they need to pass them through
+	 * 64-bits values if the kernel has been built in pure
+	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
+	 * addresses now, so the code can now safely use __pa().
+	 */
+	end = __pa(initrd_end);
+	initrd_end = (unsigned long)__va(end);
+	initrd_start = (unsigned long)__va(__pa(initrd_start));
+
+	ROOT_DEV = Root_RAM0;
+	return PFN_UP(end);
+disable:
+	initrd_start = 0;
+	initrd_end = 0;
+	return 0;
 }
 
 static void __init finalize_initrd(void)
@@ -204,12 +226,12 @@
 		printk(KERN_INFO "Initrd not found or empty");
 		goto disable;
 	}
-	if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
+	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
 		printk("Initrd extends beyond end of memory");
 		goto disable;
 	}
 
-	reserve_bootmem(CPHYSADDR(initrd_start), size);
+	reserve_bootmem(__pa(initrd_start), size);
 	initrd_below_start_ok = 1;
 
 	printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
@@ -259,8 +281,7 @@
 	 * not selected. Once that done we can determine the low bound
 	 * of usable memory.
 	 */
-	reserved_end = init_initrd();
-	reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end)));
+	reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
 
 	/*
 	 * Find the highest page frame number we have available.
@@ -432,10 +453,10 @@
 	if (UNCAC_BASE != IO_BASE)
 		return;
 
-	code_resource.start = virt_to_phys(&_text);
-	code_resource.end = virt_to_phys(&_etext) - 1;
-	data_resource.start = virt_to_phys(&_etext);
-	data_resource.end = virt_to_phys(&_edata) - 1;
+	code_resource.start = __pa_symbol(&_text);
+	code_resource.end = __pa_symbol(&_etext) - 1;
+	data_resource.start = __pa_symbol(&_etext);
+	data_resource.end = __pa_symbol(&_edata) - 1;
 
 	/*
 	 * Request address space for all standard RAM.
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 477c533..a67c185 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -17,7 +17,6 @@
  */
 #include <linux/cache.h>
 #include <linux/sched.h>
-#include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
 #include <linux/smp_lock.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 2ac19a6..1ee689c 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -278,7 +278,9 @@
 
 	/* need to mark IPI's as IRQ_PER_CPU */
 	irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
+	set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
 	irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
+	set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
 }
 
 /*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 49db516..f2a8701 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -172,7 +172,7 @@
 
 	spin_lock(&smp_call_lock);
 	call_data = &data;
-	mb();
+	smp_mb();
 
 	/* Send a message to all other CPUs and wait for them to respond */
 	for_each_online_cpu(i)
@@ -204,7 +204,7 @@
 	 * Notify initiating CPU that I've grabbed the data and am
 	 * about to execute the function.
 	 */
-	mb();
+	smp_mb();
 	atomic_inc(&call_data->started);
 
 	/*
@@ -215,7 +215,7 @@
 	irq_exit();
 
 	if (wait) {
-		mb();
+		smp_mb();
 		atomic_inc(&call_data->finished);
 	}
 }
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 3b78caf..802febe 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1009,6 +1009,7 @@
 	setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
 
 	irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
+	set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
 }
 
 /*
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index e535f86..11aab6d 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,7 +11,6 @@
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
-#include <linux/clocksource.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -83,17 +82,11 @@
 /*
  * Null high precision timer functions for systems lacking one.
  */
-static unsigned int null_hpt_read(void)
+static cycle_t null_hpt_read(void)
 {
 	return 0;
 }
 
-static void __init null_hpt_init(void)
-{
-	/* nothing */
-}
-
-
 /*
  * Timer ack for an R4k-compatible timer of a known frequency.
  */
@@ -118,7 +111,7 @@
 /*
  * High precision timer functions for a R4k-compatible timer.
  */
-static unsigned int c0_hpt_read(void)
+static cycle_t c0_hpt_read(void)
 {
 	return read_c0_count();
 }
@@ -132,9 +125,6 @@
 
 int (*mips_timer_state)(void);
 void (*mips_timer_ack)(void);
-unsigned int (*mips_hpt_read)(void);
-void (*mips_hpt_init)(void) __initdata = null_hpt_init;
-unsigned int mips_hpt_mask = 0xffffffff;
 
 /* last time when xtime and rtc are sync'ed up */
 static long last_rtc_update;
@@ -276,8 +266,7 @@
 
 static unsigned int __init calibrate_hpt(void)
 {
-	u64 frequency;
-	u32 hpt_start, hpt_end, hpt_count, hz;
+	cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
 
 	const int loops = HZ / 10;
 	int log_2_loops = 0;
@@ -303,28 +292,23 @@
 	 * during the calculated number of periods between timer
 	 * interrupts.
 	 */
-	hpt_start = mips_hpt_read();
+	hpt_start = clocksource_mips.read();
 	do {
 		while (mips_timer_state());
 		while (!mips_timer_state());
 	} while (--i);
-	hpt_end = mips_hpt_read();
+	hpt_end = clocksource_mips.read();
 
-	hpt_count = (hpt_end - hpt_start) & mips_hpt_mask;
+	hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask;
 	hz = HZ;
-	frequency = (u64)hpt_count * (u64)hz;
+	frequency = hpt_count * hz;
 
 	return frequency >> log_2_loops;
 }
 
-static cycle_t read_mips_hpt(void)
-{
-	return (cycle_t)mips_hpt_read();
-}
-
-static struct clocksource clocksource_mips = {
+struct clocksource clocksource_mips = {
 	.name		= "MIPS",
-	.read		= read_mips_hpt,
+	.mask		= 0xffffffff,
 	.is_continuous	= 1,
 };
 
@@ -333,7 +317,7 @@
 	u64 temp;
 	u32 shift;
 
-	if (!mips_hpt_frequency || mips_hpt_read == null_hpt_read)
+	if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
 		return;
 
 	/* Calclate a somewhat reasonable rating value */
@@ -347,7 +331,6 @@
 	}
 	clocksource_mips.shift = shift;
 	clocksource_mips.mult = (u32)temp;
-	clocksource_mips.mask = mips_hpt_mask;
 
 	clocksource_register(&clocksource_mips);
 }
@@ -367,32 +350,36 @@
 	                        -xtime.tv_sec, -xtime.tv_nsec);
 
 	/* Choose appropriate high precision timer routines.  */
-	if (!cpu_has_counter && !mips_hpt_read)
+	if (!cpu_has_counter && !clocksource_mips.read)
 		/* No high precision timer -- sorry.  */
-		mips_hpt_read = null_hpt_read;
+		clocksource_mips.read = null_hpt_read;
 	else if (!mips_hpt_frequency && !mips_timer_state) {
 		/* A high precision timer of unknown frequency.  */
-		if (!mips_hpt_read)
+		if (!clocksource_mips.read)
 			/* No external high precision timer -- use R4k.  */
-			mips_hpt_read = c0_hpt_read;
+			clocksource_mips.read = c0_hpt_read;
 	} else {
 		/* We know counter frequency.  Or we can get it.  */
-		if (!mips_hpt_read) {
+		if (!clocksource_mips.read) {
 			/* No external high precision timer -- use R4k.  */
-			mips_hpt_read = c0_hpt_read;
+			clocksource_mips.read = c0_hpt_read;
 
 			if (!mips_timer_state) {
 				/* No external timer interrupt -- use R4k.  */
-				mips_hpt_init = c0_hpt_timer_init;
 				mips_timer_ack = c0_timer_ack;
+				/* Calculate cache parameters.  */
+				cycles_per_jiffy =
+					(mips_hpt_frequency + HZ / 2) / HZ;
+				/*
+				 * This sets up the high precision
+				 * timer for the first interrupt.
+				 */
+				c0_hpt_timer_init();
 			}
 		}
 		if (!mips_hpt_frequency)
 			mips_hpt_frequency = calibrate_hpt();
 
-		/* Calculate cache parameters.  */
-		cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ;
-
 		/* Report the high precision timer rate for a reference.  */
 		printk("Using %u.%03u MHz high precision timer.\n",
 		       ((mips_hpt_frequency + 500) / 1000) / 1000,
@@ -403,9 +390,6 @@
 		/* No timer interrupt ack (e.g. i8254).  */
 		mips_timer_ack = null_timer_ack;
 
-	/* This sets up the high precision timer for the first interrupt.  */
-	mips_hpt_init();
-
 	/*
 	 * Call board specific timer interrupt setup.
 	 *
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9fda1b8..2a932ca 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -54,6 +54,8 @@
 extern asmlinkage void handle_sys(void);
 extern asmlinkage void handle_bp(void);
 extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_ri_rdhwr_vivt(void);
+extern asmlinkage void handle_ri_rdhwr(void);
 extern asmlinkage void handle_cpu(void);
 extern asmlinkage void handle_ov(void);
 extern asmlinkage void handle_tr(void);
@@ -397,19 +399,6 @@
 	force_sig(SIGBUS, current);
 }
 
-static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
-{
-	unsigned int __user *epc;
-
-	epc = (unsigned int __user *) regs->cp0_epc +
-	      ((regs->cp0_cause & CAUSEF_BD) != 0);
-	if (!get_user(*opcode, epc))
-		return 0;
-
-	force_sig(SIGSEGV, current);
-	return 1;
-}
-
 /*
  * ll/sc emulation
  */
@@ -544,8 +533,8 @@
 {
 	unsigned int opcode;
 
-	if (unlikely(get_insn_opcode(regs, &opcode)))
-		return -EFAULT;
+	if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+		goto out_sigsegv;
 
 	if ((opcode & OPCODE) == LL) {
 		simulate_ll(regs, opcode);
@@ -557,6 +546,10 @@
 	}
 
 	return -EFAULT;			/* Strange things going on ... */
+
+out_sigsegv:
+	force_sig(SIGSEGV, current);
+	return -EFAULT;
 }
 
 /*
@@ -569,8 +562,8 @@
 	struct thread_info *ti = task_thread_info(current);
 	unsigned int opcode;
 
-	if (unlikely(get_insn_opcode(regs, &opcode)))
-		return -EFAULT;
+	if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+		goto out_sigsegv;
 
 	if (unlikely(compute_return_epc(regs)))
 		return -EFAULT;
@@ -589,6 +582,10 @@
 
 	/* Not ours.  */
 	return -EFAULT;
+
+out_sigsegv:
+	force_sig(SIGSEGV, current);
+	return -EFAULT;
 }
 
 asmlinkage void do_ov(struct pt_regs *regs)
@@ -672,10 +669,8 @@
 	unsigned int opcode, bcode;
 	siginfo_t info;
 
-	die_if_kernel("Break instruction in kernel code", regs);
-
-	if (get_insn_opcode(regs, &opcode))
-		return;
+	if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+		goto out_sigsegv;
 
 	/*
 	 * There is the ancient bug in the MIPS assemblers that the break
@@ -696,6 +691,7 @@
 	switch (bcode) {
 	case BRK_OVERFLOW << 10:
 	case BRK_DIVZERO << 10:
+		die_if_kernel("Break instruction in kernel code", regs);
 		if (bcode == (BRK_DIVZERO << 10))
 			info.si_code = FPE_INTDIV;
 		else
@@ -705,9 +701,16 @@
 		info.si_addr = (void __user *) regs->cp0_epc;
 		force_sig_info(SIGFPE, &info, current);
 		break;
+	case BRK_BUG:
+		die("Kernel bug detected", regs);
+		break;
 	default:
+		die_if_kernel("Break instruction in kernel code", regs);
 		force_sig(SIGTRAP, current);
 	}
+
+out_sigsegv:
+	force_sig(SIGSEGV, current);
 }
 
 asmlinkage void do_tr(struct pt_regs *regs)
@@ -715,10 +718,8 @@
 	unsigned int opcode, tcode = 0;
 	siginfo_t info;
 
-	die_if_kernel("Trap instruction in kernel code", regs);
-
-	if (get_insn_opcode(regs, &opcode))
-		return;
+	if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+		goto out_sigsegv;
 
 	/* Immediate versions don't provide a code.  */
 	if (!(opcode & OPCODE))
@@ -733,6 +734,7 @@
 	switch (tcode) {
 	case BRK_OVERFLOW:
 	case BRK_DIVZERO:
+		die_if_kernel("Trap instruction in kernel code", regs);
 		if (tcode == BRK_DIVZERO)
 			info.si_code = FPE_INTDIV;
 		else
@@ -742,9 +744,16 @@
 		info.si_addr = (void __user *) regs->cp0_epc;
 		force_sig_info(SIGFPE, &info, current);
 		break;
+	case BRK_BUG:
+		die("Kernel bug detected", regs);
+		break;
 	default:
+		die_if_kernel("Trap instruction in kernel code", regs);
 		force_sig(SIGTRAP, current);
 	}
+
+out_sigsegv:
+	force_sig(SIGSEGV, current);
 }
 
 asmlinkage void do_ri(struct pt_regs *regs)
@@ -1423,6 +1432,15 @@
 	memcpy((void *)(uncached_ebase + offset), addr, size);
 }
 
+static int __initdata rdhwr_noopt;
+static int __init set_rdhwr_noopt(char *str)
+{
+	rdhwr_noopt = 1;
+	return 1;
+}
+
+__setup("rdhwr_noopt", set_rdhwr_noopt);
+
 void __init trap_init(void)
 {
 	extern char except_vec3_generic, except_vec3_r4000;
@@ -1502,7 +1520,9 @@
 
 	set_except_vector(8, handle_sys);
 	set_except_vector(9, handle_bp);
-	set_except_vector(10, handle_ri);
+	set_except_vector(10, rdhwr_noopt ? handle_ri :
+			  (cpu_has_vtag_icache ?
+			   handle_ri_rdhwr_vivt : handle_ri_rdhwr));
 	set_except_vector(11, handle_cpu);
 	set_except_vector(12, handle_ov);
 	set_except_vector(13, handle_tr);
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index a144a00..2affa5f 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -36,47 +36,20 @@
 
 void disable_lasat_irq(unsigned int irq_nr)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	*lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift;
-	local_irq_restore(flags);
 }
 
 void enable_lasat_irq(unsigned int irq_nr)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	*lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift;
-	local_irq_restore(flags);
-}
-
-static unsigned int startup_lasat_irq(unsigned int irq)
-{
-	enable_lasat_irq(irq);
-
-	return 0; /* never anything pending */
-}
-
-#define shutdown_lasat_irq	disable_lasat_irq
-
-#define mask_and_ack_lasat_irq disable_lasat_irq
-
-static void end_lasat_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_lasat_irq(irq);
 }
 
 static struct irq_chip lasat_irq_type = {
 	.typename = "Lasat",
-	.startup = startup_lasat_irq,
-	.shutdown = shutdown_lasat_irq,
-	.enable = enable_lasat_irq,
-	.disable = disable_lasat_irq,
-	.ack = mask_and_ack_lasat_irq,
-	.end = end_lasat_irq,
+	.ack = disable_lasat_irq,
+	.mask = disable_lasat_irq,
+	.mask_ack = disable_lasat_irq,
+	.unmask = enable_lasat_irq,
 };
 
 static inline int ls1bit32(unsigned int x)
@@ -152,10 +125,6 @@
 		panic("arch_init_irq: mips_machtype incorrect");
 	}
 
-	for (i = 0; i <= LASATINT_END; i++) {
-		irq_desc[i].status	= IRQ_DISABLED;
-		irq_desc[i].action	= 0;
-		irq_desc[i].depth	= 1;
-		irq_desc[i].chip	= &lasat_irq_type;
-	}
+	for (i = 0; i <= LASATINT_END; i++)
+		set_irq_chip_and_handler(i, &lasat_irq_type, handle_level_irq);
 }
diff --git a/arch/mips/lib-32/Makefile b/arch/mips/lib-32/Makefile
index ad28578..dcd4d2e 100644
--- a/arch/mips/lib-32/Makefile
+++ b/arch/mips/lib-32/Makefile
@@ -2,7 +2,7 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y	+= csum_partial.o memset.o watch.o
+lib-y	+= memset.o watch.o
 
 obj-$(CONFIG_CPU_MIPS32)	+= dump_tlb.o
 obj-$(CONFIG_CPU_MIPS64)	+= dump_tlb.o
diff --git a/arch/mips/lib-32/csum_partial.S b/arch/mips/lib-32/csum_partial.S
deleted file mode 100644
index ea257db..0000000
--- a/arch/mips/lib-32/csum_partial.S
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ralf Baechle
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-#define ADDC(sum,reg)			\
-	addu	sum, reg;		\
-	sltu	v1, sum, reg;		\
-	addu	sum, v1
-
-#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
-	lw	t0, (offset + 0x00)(src); \
-	lw	t1, (offset + 0x04)(src); \
-	lw	t2, (offset + 0x08)(src); \
-	lw	t3, (offset + 0x0c)(src); \
-	ADDC(sum, t0);                    \
-	ADDC(sum, t1);                    \
-	ADDC(sum, t2);                    \
-	ADDC(sum, t3);                    \
-	lw	t0, (offset + 0x10)(src); \
-	lw	t1, (offset + 0x14)(src); \
-	lw	t2, (offset + 0x18)(src); \
-	lw	t3, (offset + 0x1c)(src); \
-	ADDC(sum, t0);                    \
-	ADDC(sum, t1);                    \
-	ADDC(sum, t2);                    \
-	ADDC(sum, t3);                    \
-
-/*
- * a0: source address
- * a1: length of the area to checksum
- * a2: partial checksum
- */
-
-#define src a0
-#define dest a1
-#define sum v0
-
-	.text
-	.set	noreorder
-
-/* unknown src alignment and < 8 bytes to go  */
-small_csumcpy:
-	move	a1, t2
-
-	andi	t0, a1, 4
-	beqz	t0, 1f
-	 andi	t0, a1, 2
-
-	/* Still a full word to go  */
-	ulw	t1, (src)
-	addiu	src, 4
-	ADDC(sum, t1)
-
-1:	move	t1, zero
-	beqz	t0, 1f
-	 andi	t0, a1, 1
-
-	/* Still a halfword to go  */
-	ulhu	t1, (src)
-	addiu	src, 2
-
-1:	beqz	t0, 1f
-	 sll	t1, t1, 16
-
-	lbu	t2, (src)
-	 nop
-
-#ifdef __MIPSEB__
-	sll	t2, t2, 8
-#endif
-	or	t1, t2
-
-1:	ADDC(sum, t1)
-
-	/* fold checksum */
-	sll	v1, sum, 16
-	addu	sum, v1
-	sltu	v1, sum, v1
-	srl	sum, sum, 16
-	addu	sum, v1
-
-	/* odd buffer alignment? */
-	beqz	t7, 1f
-	 nop
-	sll	v1, sum, 8
-	srl	sum, sum, 8
-	or	sum, v1
-	andi	sum, 0xffff
-1:
-	.set	reorder
-	/* Add the passed partial csum.  */
-	ADDC(sum, a2)
-	jr	ra
-	.set	noreorder
-
-/* ------------------------------------------------------------------------- */
-
-	.align	5
-LEAF(csum_partial)
-	move	sum, zero
-	move	t7, zero
-
-	sltiu	t8, a1, 0x8
-	bnez	t8, small_csumcpy		/* < 8 bytes to copy */
-	 move	t2, a1
-
-	beqz	a1, out
-	 andi	t7, src, 0x1			/* odd buffer? */
-
-hword_align:
-	beqz	t7, word_align
-	 andi	t8, src, 0x2
-
-	lbu	t0, (src)
-	subu	a1, a1, 0x1
-#ifdef __MIPSEL__
-	sll	t0, t0, 8
-#endif
-	ADDC(sum, t0)
-	addu	src, src, 0x1
-	andi	t8, src, 0x2
-
-word_align:
-	beqz	t8, dword_align
-	 sltiu	t8, a1, 56
-
-	lhu	t0, (src)
-	subu	a1, a1, 0x2
-	ADDC(sum, t0)
-	sltiu	t8, a1, 56
-	addu	src, src, 0x2
-
-dword_align:
-	bnez	t8, do_end_words
-	 move	t8, a1
-
-	andi	t8, src, 0x4
-	beqz	t8, qword_align
-	 andi	t8, src, 0x8
-
-	lw	t0, 0x00(src)
-	subu	a1, a1, 0x4
-	ADDC(sum, t0)
-	addu	src, src, 0x4
-	andi	t8, src, 0x8
-
-qword_align:
-	beqz	t8, oword_align
-	 andi	t8, src, 0x10
-
-	lw	t0, 0x00(src)
-	lw	t1, 0x04(src)
-	subu	a1, a1, 0x8
-	ADDC(sum, t0)
-	ADDC(sum, t1)
-	addu	src, src, 0x8
-	andi	t8, src, 0x10
-
-oword_align:
-	beqz	t8, begin_movement
-	 srl	t8, a1, 0x7
-
-	lw	t3, 0x08(src)
-	lw	t4, 0x0c(src)
-	lw	t0, 0x00(src)
-	lw	t1, 0x04(src)
-	ADDC(sum, t3)
-	ADDC(sum, t4)
-	ADDC(sum, t0)
-	ADDC(sum, t1)
-	subu	a1, a1, 0x10
-	addu	src, src, 0x10
-	srl	t8, a1, 0x7
-
-begin_movement:
-	beqz	t8, 1f
-	 andi	t2, a1, 0x40
-
-move_128bytes:
-	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
-	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
-	CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
-	CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
-	subu	t8, t8, 0x01
-	bnez	t8, move_128bytes
-	 addu	src, src, 0x80
-
-1:
-	beqz	t2, 1f
-	 andi	t2, a1, 0x20
-
-move_64bytes:
-	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
-	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
-	addu	src, src, 0x40
-
-1:
-	beqz	t2, do_end_words
-	 andi	t8, a1, 0x1c
-
-move_32bytes:
-	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
-	andi	t8, a1, 0x1c
-	addu	src, src, 0x20
-
-do_end_words:
-	beqz	t8, maybe_end_cruft
-	 srl	t8, t8, 0x2
-
-end_words:
-	lw	t0, (src)
-	subu	t8, t8, 0x1
-	ADDC(sum, t0)
-	bnez	t8, end_words
-	 addu	src, src, 0x4
-
-maybe_end_cruft:
-	andi	t2, a1, 0x3
-
-small_memcpy:
- j small_csumcpy; move a1, t2
-	beqz	t2, out
-	 move	a1, t2
-
-end_bytes:
-	lb	t0, (src)
-	subu	a1, a1, 0x1
-	bnez	a2, end_bytes
-	 addu	src, src, 0x1
-
-out:
-	jr	ra
-	 move	v0, sum
-	END(csum_partial)
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
index ad28578..dcd4d2e 100644
--- a/arch/mips/lib-64/Makefile
+++ b/arch/mips/lib-64/Makefile
@@ -2,7 +2,7 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y	+= csum_partial.o memset.o watch.o
+lib-y	+= memset.o watch.o
 
 obj-$(CONFIG_CPU_MIPS32)	+= dump_tlb.o
 obj-$(CONFIG_CPU_MIPS64)	+= dump_tlb.o
diff --git a/arch/mips/lib-64/csum_partial.S b/arch/mips/lib-64/csum_partial.S
deleted file mode 100644
index 25aba66..0000000
--- a/arch/mips/lib-64/csum_partial.S
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Quick'n'dirty IP checksum ...
- *
- * Copyright (C) 1998, 1999 Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-#define ADDC(sum,reg)						\
-	addu	sum, reg;					\
-	sltu	v1, sum, reg;					\
-	addu	sum, v1
-
-#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3)		\
-	lw	t0, (offset + 0x00)(src);			\
-	lw	t1, (offset + 0x04)(src);			\
-	lw	t2, (offset + 0x08)(src); 			\
-	lw	t3, (offset + 0x0c)(src); 			\
-	ADDC(sum, t0);						\
-	ADDC(sum, t1);						\
-	ADDC(sum, t2);						\
-	ADDC(sum, t3);						\
-	lw	t0, (offset + 0x10)(src);			\
-	lw	t1, (offset + 0x14)(src);			\
-	lw	t2, (offset + 0x18)(src);			\
-	lw	t3, (offset + 0x1c)(src);			\
-	ADDC(sum, t0);						\
-	ADDC(sum, t1);						\
-	ADDC(sum, t2);						\
-	ADDC(sum, t3);						\
-
-/*
- * a0: source address
- * a1: length of the area to checksum
- * a2: partial checksum
- */
-
-#define src a0
-#define sum v0
-
-	.text
-	.set	noreorder
-
-/* unknown src alignment and < 8 bytes to go  */
-small_csumcpy:
-	move	a1, ta2
-
-	andi	ta0, a1, 4
-	beqz	ta0, 1f
-	 andi	ta0, a1, 2
-
-	/* Still a full word to go  */
-	ulw	ta1, (src)
-	daddiu	src, 4
-	ADDC(sum, ta1)
-
-1:	move	ta1, zero
-	beqz	ta0, 1f
-	 andi	ta0, a1, 1
-
-	/* Still a halfword to go  */
-	ulhu	ta1, (src)
-	daddiu	src, 2
-
-1:	beqz	ta0, 1f
-	 sll	ta1, ta1, 16
-
-	lbu	ta2, (src)
-	 nop
-
-#ifdef __MIPSEB__
-	sll	ta2, ta2, 8
-#endif
-	or	ta1, ta2
-
-1:	ADDC(sum, ta1)
-
-	/* fold checksum */
-	sll	v1, sum, 16
-	addu	sum, v1
-	sltu	v1, sum, v1
-	srl	sum, sum, 16
-	addu	sum, v1
-
-	/* odd buffer alignment? */
-	beqz	t3, 1f
-	 nop
-	sll	v1, sum, 8
-	srl	sum, sum, 8
-	or	sum, v1
-	andi	sum, 0xffff
-1:
-	.set	reorder
-	/* Add the passed partial csum.  */
-	ADDC(sum, a2)
-	jr	ra
-	.set	noreorder
-
-/* ------------------------------------------------------------------------- */
-
-	.align	5
-LEAF(csum_partial)
-	move	sum, zero
-	move	t3, zero
-
-	sltiu	t8, a1, 0x8
-	bnez	t8, small_csumcpy		/* < 8 bytes to copy */
-	 move	ta2, a1
-
-	beqz	a1, out
-	 andi	t3, src, 0x1			/* odd buffer? */
-
-hword_align:
-	beqz	t3, word_align
-	 andi	t8, src, 0x2
-
-	lbu	ta0, (src)
-	dsubu	a1, a1, 0x1
-#ifdef __MIPSEL__
-	sll	ta0, ta0, 8
-#endif
-	ADDC(sum, ta0)
-	daddu	src, src, 0x1
-	andi	t8, src, 0x2
-
-word_align:
-	beqz	t8, dword_align
-	 sltiu	t8, a1, 56
-
-	lhu	ta0, (src)
-	dsubu	a1, a1, 0x2
-	ADDC(sum, ta0)
-	sltiu	t8, a1, 56
-	daddu	src, src, 0x2
-
-dword_align:
-	bnez	t8, do_end_words
-	 move	t8, a1
-
-	andi	t8, src, 0x4
-	beqz	t8, qword_align
-	 andi	t8, src, 0x8
-
-	lw	ta0, 0x00(src)
-	dsubu	a1, a1, 0x4
-	ADDC(sum, ta0)
-	daddu	src, src, 0x4
-	andi	t8, src, 0x8
-
-qword_align:
-	beqz	t8, oword_align
-	 andi	t8, src, 0x10
-
-	lw	ta0, 0x00(src)
-	lw	ta1, 0x04(src)
-	dsubu	a1, a1, 0x8
-	ADDC(sum, ta0)
-	ADDC(sum, ta1)
-	daddu	src, src, 0x8
-	andi	t8, src, 0x10
-
-oword_align:
-	beqz	t8, begin_movement
-	 dsrl	t8, a1, 0x7
-
-	lw	ta3, 0x08(src)
-	lw	t0, 0x0c(src)
-	lw	ta0, 0x00(src)
-	lw	ta1, 0x04(src)
-	ADDC(sum, ta3)
-	ADDC(sum, t0)
-	ADDC(sum, ta0)
-	ADDC(sum, ta1)
-	dsubu	a1, a1, 0x10
-	daddu	src, src, 0x10
-	dsrl	t8, a1, 0x7
-
-begin_movement:
-	beqz	t8, 1f
-	 andi	ta2, a1, 0x40
-
-move_128bytes:
-	CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
-	CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
-	CSUM_BIGCHUNK(src, 0x40, sum, ta0, ta1, ta3, t0)
-	CSUM_BIGCHUNK(src, 0x60, sum, ta0, ta1, ta3, t0)
-	dsubu	t8, t8, 0x01
-	bnez	t8, move_128bytes
-	 daddu	src, src, 0x80
-
-1:
-	beqz	ta2, 1f
-	 andi	ta2, a1, 0x20
-
-move_64bytes:
-	CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
-	CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
-	daddu	src, src, 0x40
-
-1:
-	beqz	ta2, do_end_words
-	 andi	t8, a1, 0x1c
-
-move_32bytes:
-	CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
-	andi	t8, a1, 0x1c
-	daddu	src, src, 0x20
-
-do_end_words:
-	beqz	t8, maybe_end_cruft
-	 dsrl	t8, t8, 0x2
-
-end_words:
-	lw	ta0, (src)
-	dsubu	t8, t8, 0x1
-	ADDC(sum, ta0)
-	bnez	t8, end_words
-	 daddu	src, src, 0x4
-
-maybe_end_cruft:
-	andi	ta2, a1, 0x3
-
-small_memcpy:
- j small_csumcpy; move a1, ta2		/* XXX ??? */
-	beqz	t2, out
-	 move	a1, ta2
-
-end_bytes:
-	lb	ta0, (src)
-	dsubu	a1, a1, 0x1
-	bnez	a2, end_bytes
-	 daddu	src, src, 0x1
-
-out:
-	jr	ra
-	 move	v0, sum
-	END(csum_partial)
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index b225543..888b61e 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,8 +2,8 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y	+= csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \
-	   strnlen_user.o uncached.o
+lib-y	+= csum_partial.o csum_partial_copy.o memcpy.o promlib.o \
+	   strlen_user.o strncpy_user.o strnlen_user.o uncached.o
 
 obj-y	+= iomap.o
 
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644
index 0000000..15611d9
--- /dev/null
+++ b/arch/mips/lib/csum_partial.S
@@ -0,0 +1,258 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Quick'n'dirty IP checksum ...
+ *
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_64BIT
+#define T0	ta0
+#define T1	ta1
+#define T2	ta2
+#define T3	ta3
+#define T4	t0
+#define T7	t3
+#else
+#define T0	t0
+#define T1	t1
+#define T2	t2
+#define T3	t3
+#define T4	t4
+#define T7	t7
+#endif
+
+#define ADDC(sum,reg)						\
+	addu	sum, reg;					\
+	sltu	v1, sum, reg;					\
+	addu	sum, v1
+
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)	\
+	lw	_t0, (offset + 0x00)(src);			\
+	lw	_t1, (offset + 0x04)(src);			\
+	lw	_t2, (offset + 0x08)(src); 			\
+	lw	_t3, (offset + 0x0c)(src); 			\
+	ADDC(sum, _t0);						\
+	ADDC(sum, _t1);						\
+	ADDC(sum, _t2);						\
+	ADDC(sum, _t3);						\
+	lw	_t0, (offset + 0x10)(src);			\
+	lw	_t1, (offset + 0x14)(src);			\
+	lw	_t2, (offset + 0x18)(src);			\
+	lw	_t3, (offset + 0x1c)(src);			\
+	ADDC(sum, _t0);						\
+	ADDC(sum, _t1);						\
+	ADDC(sum, _t2);						\
+	ADDC(sum, _t3);						\
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define sum v0
+
+	.text
+	.set	noreorder
+
+/* unknown src alignment and < 8 bytes to go  */
+small_csumcpy:
+	move	a1, T2
+
+	andi	T0, a1, 4
+	beqz	T0, 1f
+	 andi	T0, a1, 2
+
+	/* Still a full word to go  */
+	ulw	T1, (src)
+	PTR_ADDIU	src, 4
+	ADDC(sum, T1)
+
+1:	move	T1, zero
+	beqz	T0, 1f
+	 andi	T0, a1, 1
+
+	/* Still a halfword to go  */
+	ulhu	T1, (src)
+	PTR_ADDIU	src, 2
+
+1:	beqz	T0, 1f
+	 sll	T1, T1, 16
+
+	lbu	T2, (src)
+	 nop
+
+#ifdef __MIPSEB__
+	sll	T2, T2, 8
+#endif
+	or	T1, T2
+
+1:	ADDC(sum, T1)
+
+	/* fold checksum */
+	sll	v1, sum, 16
+	addu	sum, v1
+	sltu	v1, sum, v1
+	srl	sum, sum, 16
+	addu	sum, v1
+
+	/* odd buffer alignment? */
+	beqz	T7, 1f
+	 nop
+	sll	v1, sum, 8
+	srl	sum, sum, 8
+	or	sum, v1
+	andi	sum, 0xffff
+1:
+	.set	reorder
+	/* Add the passed partial csum.  */
+	ADDC(sum, a2)
+	jr	ra
+	.set	noreorder
+
+/* ------------------------------------------------------------------------- */
+
+	.align	5
+LEAF(csum_partial)
+	move	sum, zero
+	move	T7, zero
+
+	sltiu	t8, a1, 0x8
+	bnez	t8, small_csumcpy		/* < 8 bytes to copy */
+	 move	T2, a1
+
+	beqz	a1, out
+	 andi	T7, src, 0x1			/* odd buffer? */
+
+hword_align:
+	beqz	T7, word_align
+	 andi	t8, src, 0x2
+
+	lbu	T0, (src)
+	LONG_SUBU	a1, a1, 0x1
+#ifdef __MIPSEL__
+	sll	T0, T0, 8
+#endif
+	ADDC(sum, T0)
+	PTR_ADDU	src, src, 0x1
+	andi	t8, src, 0x2
+
+word_align:
+	beqz	t8, dword_align
+	 sltiu	t8, a1, 56
+
+	lhu	T0, (src)
+	LONG_SUBU	a1, a1, 0x2
+	ADDC(sum, T0)
+	sltiu	t8, a1, 56
+	PTR_ADDU	src, src, 0x2
+
+dword_align:
+	bnez	t8, do_end_words
+	 move	t8, a1
+
+	andi	t8, src, 0x4
+	beqz	t8, qword_align
+	 andi	t8, src, 0x8
+
+	lw	T0, 0x00(src)
+	LONG_SUBU	a1, a1, 0x4
+	ADDC(sum, T0)
+	PTR_ADDU	src, src, 0x4
+	andi	t8, src, 0x8
+
+qword_align:
+	beqz	t8, oword_align
+	 andi	t8, src, 0x10
+
+	lw	T0, 0x00(src)
+	lw	T1, 0x04(src)
+	LONG_SUBU	a1, a1, 0x8
+	ADDC(sum, T0)
+	ADDC(sum, T1)
+	PTR_ADDU	src, src, 0x8
+	andi	t8, src, 0x10
+
+oword_align:
+	beqz	t8, begin_movement
+	 LONG_SRL	t8, a1, 0x7
+
+	lw	T3, 0x08(src)
+	lw	T4, 0x0c(src)
+	lw	T0, 0x00(src)
+	lw	T1, 0x04(src)
+	ADDC(sum, T3)
+	ADDC(sum, T4)
+	ADDC(sum, T0)
+	ADDC(sum, T1)
+	LONG_SUBU	a1, a1, 0x10
+	PTR_ADDU	src, src, 0x10
+	LONG_SRL	t8, a1, 0x7
+
+begin_movement:
+	beqz	t8, 1f
+	 andi	T2, a1, 0x40
+
+move_128bytes:
+	CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+	CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
+	CSUM_BIGCHUNK(src, 0x40, sum, T0, T1, T3, T4)
+	CSUM_BIGCHUNK(src, 0x60, sum, T0, T1, T3, T4)
+	LONG_SUBU	t8, t8, 0x01
+	bnez	t8, move_128bytes
+	 PTR_ADDU	src, src, 0x80
+
+1:
+	beqz	T2, 1f
+	 andi	T2, a1, 0x20
+
+move_64bytes:
+	CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+	CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
+	PTR_ADDU	src, src, 0x40
+
+1:
+	beqz	T2, do_end_words
+	 andi	t8, a1, 0x1c
+
+move_32bytes:
+	CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+	andi	t8, a1, 0x1c
+	PTR_ADDU	src, src, 0x20
+
+do_end_words:
+	beqz	t8, maybe_end_cruft
+	 LONG_SRL	t8, t8, 0x2
+
+end_words:
+	lw	T0, (src)
+	LONG_SUBU	t8, t8, 0x1
+	ADDC(sum, T0)
+	bnez	t8, end_words
+	 PTR_ADDU	src, src, 0x4
+
+maybe_end_cruft:
+	andi	T2, a1, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a1, T2		/* XXX ??? */
+	beqz	t2, out
+	 move	a1, T2
+
+end_bytes:
+	lb	T0, (src)
+	LONG_SUBU	a1, a1, 0x1
+	bnez	a2, end_bytes
+	 PTR_ADDU	src, src, 0x1
+
+out:
+	jr	ra
+	 move	v0, sum
+	END(csum_partial)
diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c
index 6e9f366..1720f2c 100644
--- a/arch/mips/lib/csum_partial_copy.c
+++ b/arch/mips/lib/csum_partial_copy.c
@@ -16,8 +16,8 @@
 /*
  * copy while checksumming, otherwise like csum_partial
  */
-unsigned int csum_partial_copy_nocheck(const unsigned char *src,
-	unsigned char *dst, int len, unsigned int sum)
+__wsum csum_partial_copy_nocheck(const void *src,
+	void *dst, int len, __wsum sum)
 {
 	/*
 	 * It's 2:30 am and I don't feel like doing it real ...
@@ -33,8 +33,8 @@
  * Copy from userspace and compute checksum.  If we catch an exception
  * then zero the rest of the buffer.
  */
-unsigned int csum_partial_copy_from_user (const unsigned char __user *src,
-	unsigned char *dst, int len, unsigned int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user (const void __user *src,
+	void *dst, int len, __wsum sum, int *err_ptr)
 {
 	int missing;
 
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index be624b8..43dba6c 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -62,16 +62,6 @@
 	iob();
 }
 
-static unsigned int startup_atlas_irq(unsigned int irq)
-{
-	enable_atlas_irq(irq);
-	return 0; /* never anything pending */
-}
-
-#define shutdown_atlas_irq	disable_atlas_irq
-
-#define mask_and_ack_atlas_irq disable_atlas_irq
-
 static void end_atlas_irq(unsigned int irq)
 {
 	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -80,11 +70,11 @@
 
 static struct irq_chip atlas_irq_type = {
 	.typename = "Atlas",
-	.startup = startup_atlas_irq,
-	.shutdown = shutdown_atlas_irq,
-	.enable = enable_atlas_irq,
-	.disable = disable_atlas_irq,
-	.ack = mask_and_ack_atlas_irq,
+	.ack = disable_atlas_irq,
+	.mask = disable_atlas_irq,
+	.mask_ack = disable_atlas_irq,
+	.unmask = enable_atlas_irq,
+	.eoi = enable_atlas_irq,
 	.end = end_atlas_irq,
 };
 
@@ -217,13 +207,8 @@
 	 */
 	atlas_hw0_icregs->intrsten = 0xffffffff;
 
-	for (i = ATLAS_INT_BASE; i <= ATLAS_INT_END; i++) {
-		irq_desc[i].status	= IRQ_DISABLED;
-		irq_desc[i].action	= 0;
-		irq_desc[i].depth	= 1;
-		irq_desc[i].chip	= &atlas_irq_type;
-		spin_lock_init(&irq_desc[i].lock);
-	}
+	for (i = ATLAS_INT_BASE; i <= ATLAS_INT_END; i++)
+		set_irq_chip_and_handler(i, &atlas_irq_type, handle_level_irq);
 }
 
 static struct irqaction atlasirq = {
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index d817c60..e4604c7 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -288,6 +288,7 @@
 	   The effect is that the int remains disabled on the second cpu.
 	   Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
 	irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
+	set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
 #endif
 
         /* to generate the first timer interrupt */
diff --git a/arch/mips/mips-boards/sim/sim_time.c b/arch/mips/mips-boards/sim/sim_time.c
index 24a4ed0..30711d0 100644
--- a/arch/mips/mips-boards/sim/sim_time.c
+++ b/arch/mips/mips-boards/sim/sim_time.c
@@ -3,6 +3,9 @@
 #include <linux/kernel_stat.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+#include <linux/timex.h>
 
 #include <asm/mipsregs.h>
 #include <asm/ptrace.h>
@@ -10,24 +13,14 @@
 #include <asm/div64.h>
 #include <asm/cpu.h>
 #include <asm/time.h>
-
-#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
-#include <linux/timex.h>
-#include <asm/mipsregs.h>
-#include <asm/hardirq.h>
 #include <asm/irq.h>
-#include <asm/div64.h>
-#include <asm/cpu.h>
-#include <asm/time.h>
 #include <asm/mc146818-time.h>
 #include <asm/msc01_ic.h>
+#include <asm/smp.h>
 
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/simint.h>
-#include <asm/mc146818-time.h>
-#include <asm/smp.h>
 
 
 unsigned long cpu_khz;
@@ -203,7 +196,8 @@
 	   on seperate cpu's the first one tries to handle the second interrupt.
 	   The effect is that the int remains disabled on the second cpu.
 	   Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
-	irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
+	irq_desc[mips_cpu_timer_irq].flags |= IRQ_PER_CPU;
+	set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
 #endif
 
 	/* to generate the first timer interrupt */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index cc895da..df04a31 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -323,7 +323,6 @@
 static inline void local_r4k_flush_cache_all(void * args)
 {
 	r4k_blast_dcache();
-	r4k_blast_icache();
 }
 
 static void r4k_flush_cache_all(void)
@@ -359,21 +358,19 @@
 static inline void local_r4k_flush_cache_range(void * args)
 {
 	struct vm_area_struct *vma = args;
-	int exec;
 
 	if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
 		return;
 
-	exec = vma->vm_flags & VM_EXEC;
-	if (cpu_has_dc_aliases || exec)
-		r4k_blast_dcache();
-	if (exec)
-		r4k_blast_icache();
+	r4k_blast_dcache();
 }
 
 static void r4k_flush_cache_range(struct vm_area_struct *vma,
 	unsigned long start, unsigned long end)
 {
+	if (!cpu_has_dc_aliases)
+		return;
+
 	r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
 }
 
@@ -384,18 +381,21 @@
 	if (!cpu_context(smp_processor_id(), mm))
 		return;
 
-	r4k_blast_dcache();
-	r4k_blast_icache();
-
 	/*
 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
 	 * only flush the primary caches but R10000 and R12000 behave sane ...
+	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
+	 * caches, so we can bail out early.
 	 */
 	if (current_cpu_data.cputype == CPU_R4000SC ||
 	    current_cpu_data.cputype == CPU_R4000MC ||
 	    current_cpu_data.cputype == CPU_R4400SC ||
-	    current_cpu_data.cputype == CPU_R4400MC)
+	    current_cpu_data.cputype == CPU_R4400MC) {
 		r4k_blast_scache();
+		return;
+	}
+
+	r4k_blast_dcache();
 }
 
 static void r4k_flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c
index 7fa5fd1..5697c6e 100644
--- a/arch/mips/mm/dma-coherent.c
+++ b/arch/mips/mm/dma-coherent.c
@@ -190,14 +190,14 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c
index 8da19fd..f088344 100644
--- a/arch/mips/mm/dma-ip27.c
+++ b/arch/mips/mm/dma-ip27.c
@@ -197,14 +197,14 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index ec54ed0..b42b6f7 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -363,14 +363,15 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction direction)
 {
 	if (direction == DMA_NONE)
 		return;
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 2eeffe5..8cecef0 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -299,14 +299,15 @@
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction direction)
 {
 	if (direction == DMA_NONE)
 		return;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 8423d85..6f90e7e 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -60,6 +60,10 @@
 	 */
 	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
 		goto vmalloc_fault;
+#ifdef MODULE_START
+	if (unlikely(address >= MODULE_START && address < MODULE_END))
+		goto vmalloc_fault;
+#endif
 
 	/*
 	 * If we're in an interrupt or have no user
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 99ebf3c..675502a 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -39,7 +39,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -62,8 +62,7 @@
 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
 	if (vaddr < FIXADDR_START) { // FIXME
-		dec_preempt_count();
-		preempt_check_resched();
+		pagefault_enable();
 		return;
 	}
 
@@ -78,8 +77,7 @@
 	local_flush_tlb_one(vaddr);
 #endif
 
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 #ifndef CONFIG_LIMITED_DMA
@@ -92,7 +90,7 @@
 	enum fixed_addresses idx;
 	unsigned long vaddr;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	idx = type + KM_TYPE_NR*smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 2de4d3c..9e29ba9 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -90,9 +90,9 @@
 	if (!empty_zero_page)
 		panic("Oh boy, that early out of memory?");
 
-	page = virt_to_page(empty_zero_page);
+	page = virt_to_page((void *)empty_zero_page);
 	split_page(page, order);
-	while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+	while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
 		SetPageReserved(page);
 		page++;
 	}
@@ -443,15 +443,18 @@
 }
 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
 
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
+static void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
-	unsigned long addr;
+	unsigned long pfn;
 
-	for (addr = begin; addr < end; addr += PAGE_SIZE) {
-		ClearPageReserved(virt_to_page(addr));
-		init_page_count(virt_to_page(addr));
-		memset((void *)addr, 0xcc, PAGE_SIZE);
-		free_page(addr);
+	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
+		struct page *page = pfn_to_page(pfn);
+		void *addr = phys_to_virt(PFN_PHYS(pfn));
+
+		ClearPageReserved(page);
+		init_page_count(page);
+		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
+		__free_page(page);
 		totalram_pages++;
 	}
 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
@@ -460,12 +463,9 @@
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-#ifdef CONFIG_64BIT
-	/* Switch from KSEG0 to XKPHYS addresses */
-	start = (unsigned long)phys_to_virt(CPHYSADDR(start));
-	end = (unsigned long)phys_to_virt(CPHYSADDR(end));
-#endif
-	free_init_pages("initrd memory", start, end);
+	free_init_pages("initrd memory",
+			virt_to_phys((void *)start),
+			virt_to_phys((void *)end));
 }
 #endif
 
@@ -473,17 +473,13 @@
 
 void free_initmem(void)
 {
-	unsigned long start, end, freed;
+	unsigned long freed;
 
 	freed = prom_free_prom_memory();
 	if (freed)
 		printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed);
 
-	start = (unsigned long)(&__init_begin);
-	end = (unsigned long)(&__init_end);
-#ifdef CONFIG_64BIT
-	start = PAGE_OFFSET | CPHYSADDR(start);
-	end = PAGE_OFFSET | CPHYSADDR(end);
-#endif
-	free_init_pages("unused kernel memory", start, end);
+	free_init_pages("unused kernel memory",
+			__pa_symbol(&__init_begin),
+			__pa_symbol(&__init_end));
 }
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 8d600d3..c46eb65 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -58,6 +58,9 @@
 
 	/* Initialize the entire pgd.  */
 	pgd_init((unsigned long)swapper_pg_dir);
+#ifdef MODULE_START
+	pgd_init((unsigned long)module_pg_dir);
+#endif
 	pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
 
 	pgd_base = swapper_pg_dir;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index fec318a..492c518 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -423,6 +423,9 @@
 	label_invalid,
 	label_second_part,
 	label_leave,
+#ifdef MODULE_START
+	label_module_alloc,
+#endif
 	label_vmalloc,
 	label_vmalloc_done,
 	label_tlbw_hazard,
@@ -455,6 +458,9 @@
 
 L_LA(_second_part)
 L_LA(_leave)
+#ifdef MODULE_START
+L_LA(_module_alloc)
+#endif
 L_LA(_vmalloc)
 L_LA(_vmalloc_done)
 L_LA(_tlbw_hazard)
@@ -686,6 +692,13 @@
 	i_bgezl(p, reg, 0);
 }
 
+static void __init __attribute__((unused))
+il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
+{
+	r_mips_pc16(r, *p, l);
+	i_bgez(p, reg, 0);
+}
+
 /* The only general purpose registers allowed in TLB handlers. */
 #define K0		26
 #define K1		27
@@ -970,7 +983,11 @@
 	 * The vmalloc handling is not in the hotpath.
 	 */
 	i_dmfc0(p, tmp, C0_BADVADDR);
+#ifdef MODULE_START
+	il_bltz(p, r, tmp, label_module_alloc);
+#else
 	il_bltz(p, r, tmp, label_vmalloc);
+#endif
 	/* No i_nop needed here, since the next insn doesn't touch TMP. */
 
 #ifdef CONFIG_SMP
@@ -1023,8 +1040,46 @@
 {
 	long swpd = (long)swapper_pg_dir;
 
+#ifdef MODULE_START
+	long modd = (long)module_pg_dir;
+
+	l_module_alloc(l, *p);
+	/*
+	 * Assumption:
+	 * VMALLOC_START >= 0xc000000000000000UL
+	 * MODULE_START >= 0xe000000000000000UL
+	 */
+	i_SLL(p, ptr, bvaddr, 2);
+	il_bgez(p, r, ptr, label_vmalloc);
+
+	if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) {
+		i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */
+	} else {
+		/* unlikely configuration */
+		i_nop(p); /* delay slot */
+		i_LA(p, ptr, MODULE_START);
+	}
+	i_dsubu(p, bvaddr, bvaddr, ptr);
+
+	if (in_compat_space_p(modd) && !rel_lo(modd)) {
+		il_b(p, r, label_vmalloc_done);
+		i_lui(p, ptr, rel_hi(modd));
+	} else {
+		i_LA_mostly(p, ptr, modd);
+		il_b(p, r, label_vmalloc_done);
+		i_daddiu(p, ptr, ptr, rel_lo(modd));
+	}
+
+	l_vmalloc(l, *p);
+	if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) &&
+	    MODULE_START << 32 == VMALLOC_START)
+		i_dsll32(p, ptr, ptr, 0);	/* typical case */
+	else
+		i_LA(p, ptr, VMALLOC_START);
+#else
 	l_vmalloc(l, *p);
 	i_LA(p, ptr, VMALLOC_START);
+#endif
 	i_dsubu(p, bvaddr, bvaddr, ptr);
 
 	if (in_compat_space_p(swpd) && !rel_lo(swpd)) {
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index 47e3fa3..bb11fef 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -66,48 +66,6 @@
 }
 
 /*
- * Enables the IRQ in the FPGA
- */
-static void enable_cpci_irq(unsigned int irq)
-{
-	unmask_cpci_irq(irq);
-}
-
-/*
- * Initialize the IRQ in the FPGA
- */
-static unsigned int startup_cpci_irq(unsigned int irq)
-{
-	unmask_cpci_irq(irq);
-	return 0;
-}
-
-/*
- * Disables the IRQ in the FPGA
- */
-static void disable_cpci_irq(unsigned int irq)
-{
-	mask_cpci_irq(irq);
-}
-
-/*
- * Masks and ACKs an IRQ
- */
-static void mask_and_ack_cpci_irq(unsigned int irq)
-{
-	mask_cpci_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_cpci_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		unmask_cpci_irq(irq);
-}
-
-/*
  * Interrupt handler for interrupts coming from the FPGA chip.
  * It could be built in ethernet ports etc...
  */
@@ -125,27 +83,18 @@
 	do_IRQ(ls1bit8(irq_src) + CPCI_IRQ_BASE);
 }
 
-#define shutdown_cpci_irq	disable_cpci_irq
-
 struct irq_chip cpci_irq_type = {
 	.typename = "CPCI/FPGA",
-	.startup = startup_cpci_irq,
-	.shutdown = shutdown_cpci_irq,
-	.enable = enable_cpci_irq,
-	.disable = disable_cpci_irq,
-	.ack = mask_and_ack_cpci_irq,
-	.end = end_cpci_irq,
+	.ack = mask_cpci_irq,
+	.mask = mask_cpci_irq,
+	.mask_ack = mask_cpci_irq,
+	.unmask = unmask_cpci_irq,
 };
 
 void cpci_irq_init(void)
 {
 	int i;
 
-	/* Reset irq handlers pointers to NULL */
-	for (i = CPCI_IRQ_BASE; i < (CPCI_IRQ_BASE + 8); i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 2;
-		irq_desc[i].chip = &cpci_irq_type;
-	}
+	for (i = CPCI_IRQ_BASE; i < (CPCI_IRQ_BASE + 8); i++)
+		set_irq_chip_and_handler(i, &cpci_irq_type, handle_level_irq);
 }
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index 510257d..a7a80c0 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -60,48 +60,6 @@
 }
 
 /*
- * Enables the IRQ in the FPGA
- */
-static void enable_uart_irq(unsigned int irq)
-{
-	unmask_uart_irq(irq);
-}
-
-/*
- * Initialize the IRQ in the FPGA
- */
-static unsigned int startup_uart_irq(unsigned int irq)
-{
-	unmask_uart_irq(irq);
-	return 0;
-}
-
-/*
- * Disables the IRQ in the FPGA
- */
-static void disable_uart_irq(unsigned int irq)
-{
-	mask_uart_irq(irq);
-}
-
-/*
- * Masks and ACKs an IRQ
- */
-static void mask_and_ack_uart_irq(unsigned int irq)
-{
-	mask_uart_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_uart_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		unmask_uart_irq(irq);
-}
-
-/*
  * Interrupt handler for interrupts coming from the FPGA chip.
  */
 void ll_uart_irq(void)
@@ -118,28 +76,16 @@
 	do_IRQ(ls1bit8(irq_src) + 74);
 }
 
-#define shutdown_uart_irq	disable_uart_irq
-
 struct irq_chip uart_irq_type = {
 	.typename = "UART/FPGA",
-	.startup = startup_uart_irq,
-	.shutdown = shutdown_uart_irq,
-	.enable = enable_uart_irq,
-	.disable = disable_uart_irq,
-	.ack = mask_and_ack_uart_irq,
-	.end = end_uart_irq,
+	.ack = mask_uart_irq,
+	.mask = mask_uart_irq,
+	.mask_ack = mask_uart_irq,
+	.unmask = unmask_uart_irq,
 };
 
 void uart_irq_init(void)
 {
-	/* Reset irq handlers pointers to NULL */
-	irq_desc[80].status = IRQ_DISABLED;
-	irq_desc[80].action = 0;
-	irq_desc[80].depth = 2;
-	irq_desc[80].chip = &uart_irq_type;
-
-	irq_desc[81].status = IRQ_DISABLED;
-	irq_desc[81].action = 0;
-	irq_desc[81].depth = 2;
-	irq_desc[81].chip = &uart_irq_type;
+	set_irq_chip_and_handler(80, &uart_irq_type, handle_level_irq);
+	set_irq_chip_and_handler(81, &uart_irq_type, handle_level_irq);
 }
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 0a50aad..bf3be6f 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -12,5 +12,6 @@
 
 oprofile-$(CONFIG_CPU_MIPS32)		+= op_model_mipsxx.o
 oprofile-$(CONFIG_CPU_MIPS64)		+= op_model_mipsxx.o
+oprofile-$(CONFIG_CPU_R10000)		+= op_model_mipsxx.o
 oprofile-$(CONFIG_CPU_SB1)		+= op_model_mipsxx.o
 oprofile-$(CONFIG_CPU_RM9000)		+= op_model_rm9000.o
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 65eb554..4e0a90b 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -83,6 +83,9 @@
 	case CPU_74K:
 	case CPU_SB1:
 	case CPU_SB1A:
+	case CPU_R10000:
+	case CPU_R12000:
+	case CPU_R14000:
 		lmodel = &op_model_mipsxx_ops;
 		break;
 
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1fb240c..455d76a 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -18,7 +18,7 @@
 #define M_PERFCTL_SUPERVISOR		(1UL      <<  2)
 #define M_PERFCTL_USER			(1UL      <<  3)
 #define M_PERFCTL_INTERRUPT_ENABLE	(1UL      <<  4)
-#define M_PERFCTL_EVENT(event)		((event)  << 5)
+#define M_PERFCTL_EVENT(event)		(((event) & 0x3f)  << 5)
 #define M_PERFCTL_VPEID(vpe)		((vpe)    << 16)
 #define M_PERFCTL_MT_EN(filter)		((filter) << 20)
 #define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
@@ -218,13 +218,23 @@
 
 static inline int n_counters(void)
 {
-	int counters = __n_counters();
+	int counters;
+
+	switch (current_cpu_data.cputype) {
+	case CPU_R10000:
+		counters = 2;
+
+	case CPU_R12000:
+	case CPU_R14000:
+		counters = 4;
+
+	default:
+		counters = __n_counters();
+	}
 
 #ifdef CONFIG_MIPS_MT_SMP
-	if (current_cpu_data.cputype == CPU_34K)
-		return counters >> 1;
+	counters >> 1;
 #endif
-
 	return counters;
 }
 
@@ -284,6 +294,18 @@
 		op_model_mipsxx_ops.cpu_type = "mips/5K";
 		break;
 
+	case CPU_R10000:
+		if ((current_cpu_data.processor_id & 0xff) == 0x20)
+			op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
+		else
+			op_model_mipsxx_ops.cpu_type = "mips/r10000";
+		break;
+
+	case CPU_R12000:
+	case CPU_R14000:
+		op_model_mipsxx_ops.cpu_type = "mips/r12000";
+		break;
+
 	case CPU_SB1:
 	case CPU_SB1A:
 		op_model_mipsxx_ops.cpu_type = "mips/sb1";
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c
index 75a01e7..7d5f6bb 100644
--- a/arch/mips/pci/fixup-cobalt.c
+++ b/arch/mips/pci/fixup-cobalt.c
@@ -94,22 +94,21 @@
 #if 0
 	if (galileo_id >= 0x10) {
 		/* New Galileo, assumes PCI stop line to VIA is connected. */
-		GALILEO_OUTL(0x4020, GT_PCI0_TOR_OFS);
+		GT_WRITE(GT_PCI0_TOR_OFS, 0x4020);
 	} else if (galileo_id == 0x1 || galileo_id == 0x2)
 #endif
 	{
 		signed int timeo;
 		/* XXX WE MUST DO THIS ELSE GALILEO LOCKS UP! -DaveM */
-		timeo = GALILEO_INL(GT_PCI0_TOR_OFS);
+		timeo = GT_READ(GT_PCI0_TOR_OFS);
 		/* Old Galileo, assumes PCI STOP line to VIA is disconnected. */
-		GALILEO_OUTL(
+		GT_WRITE(GT_PCI0_TOR_OFS,
 			(0xff << 16) |		/* retry count */
 			(0xff << 8) |		/* timeout 1   */
-			0xff,			/* timeout 0   */
-			GT_PCI0_TOR_OFS);
+			0xff);			/* timeout 0   */
 
 		/* enable PCI retry exceeded interrupt */
-		GALILEO_OUTL(GALILEO_INTR_RETRY_CTR | GALILEO_INL(GT_INTRMASK_OFS), GT_INTRMASK_OFS);
+		GT_WRITE(GT_INTRMASK_OFS, GT_INTR_RETRYCTR0_MSK | GT_READ(GT_INTRMASK_OFS));
 	}
 }
 
diff --git a/arch/mips/pci/ops-gt64111.c b/arch/mips/pci/ops-gt64111.c
index 13de459..ecd3991 100644
--- a/arch/mips/pci/ops-gt64111.c
+++ b/arch/mips/pci/ops-gt64111.c
@@ -38,18 +38,18 @@
 	switch (size) {
 	case 4:
 		PCI_CFG_SET(devfn, where);
-		*val = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+		*val = GT_READ(GT_PCI0_CFGDATA_OFS);
 		return PCIBIOS_SUCCESSFUL;
 
 	case 2:
 		PCI_CFG_SET(devfn, (where & ~0x3));
-		*val = GALILEO_INL(GT_PCI0_CFGDATA_OFS)
+		*val = GT_READ(GT_PCI0_CFGDATA_OFS)
 		    >> ((where & 3) * 8);
 		return PCIBIOS_SUCCESSFUL;
 
 	case 1:
 		PCI_CFG_SET(devfn, (where & ~0x3));
-		*val = GALILEO_INL(GT_PCI0_CFGDATA_OFS)
+		*val = GT_READ(GT_PCI0_CFGDATA_OFS)
 		    >> ((where & 3) * 8);
 		return PCIBIOS_SUCCESSFUL;
 	}
@@ -68,25 +68,25 @@
 	switch (size) {
 	case 4:
 		PCI_CFG_SET(devfn, where);
-		GALILEO_OUTL(val, GT_PCI0_CFGDATA_OFS);
+		GT_WRITE(GT_PCI0_CFGDATA_OFS, val);
 
 		return PCIBIOS_SUCCESSFUL;
 
 	case 2:
 		PCI_CFG_SET(devfn, (where & ~0x3));
-		tmp = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+		tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
 		tmp &= ~(0xffff << ((where & 0x3) * 8));
 		tmp |= (val << ((where & 0x3) * 8));
-		GALILEO_OUTL(tmp, GT_PCI0_CFGDATA_OFS);
+		GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
 
 		return PCIBIOS_SUCCESSFUL;
 
 	case 1:
 		PCI_CFG_SET(devfn, (where & ~0x3));
-		tmp = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+		tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
 		tmp &= ~(0xff << ((where & 0x3) * 8));
 		tmp |= (val << ((where & 0x3) * 8));
-		GALILEO_OUTL(tmp, GT_PCI0_CFGDATA_OFS);
+		GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
 
 		return PCIBIOS_SUCCESSFUL;
 	}
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 7106116..2c36c10 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -38,8 +38,6 @@
 #include <int.h>
 #include <uart.h>
 
-static DEFINE_SPINLOCK(irq_lock);
-
 /* default prio for interrupts */
 /* first one is a no-no so therefore always prio 0 (disabled) */
 static char gic_prio[PNX8550_INT_GIC_TOTINT] = {
@@ -149,38 +147,6 @@
 	}
 }
 
-#define pnx8550_disable pnx8550_ack
-static void pnx8550_ack(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&irq_lock, flags);
-	mask_irq(irq);
-	spin_unlock_irqrestore(&irq_lock, flags);
-}
-
-#define pnx8550_enable pnx8550_unmask
-static void pnx8550_unmask(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&irq_lock, flags);
-	unmask_irq(irq);
-	spin_unlock_irqrestore(&irq_lock, flags);
-}
-
-static unsigned int startup_irq(unsigned int irq_nr)
-{
-	pnx8550_unmask(irq_nr);
-	return 0;
-}
-
-static void shutdown_irq(unsigned int irq_nr)
-{
-	pnx8550_ack(irq_nr);
-	return;
-}
-
 int pnx8550_set_gic_priority(int irq, int priority)
 {
 	int gic_irq = irq-PNX8550_INT_GIC_MIN;
@@ -192,27 +158,12 @@
 	return prev_priority;
 }
 
-static inline void mask_and_ack_level_irq(unsigned int irq)
-{
-	pnx8550_disable(irq);
-	return;
-}
-
-static void end_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
-		pnx8550_enable(irq);
-	}
-}
-
 static struct irq_chip level_irq_type = {
 	.typename =	"PNX Level IRQ",
-	.startup =	startup_irq,
-	.shutdown =	shutdown_irq,
-	.enable =	pnx8550_enable,
-	.disable =	pnx8550_disable,
-	.ack =		mask_and_ack_level_irq,
-	.end =		end_irq,
+	.ack =		mask_irq,
+	.mask =		mask_irq,
+	.mask_ack =	mask_irq,
+	.unmask =	unmask_irq,
 };
 
 static struct irqaction gic_action = {
@@ -233,8 +184,8 @@
 	int configPR;
 
 	for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) {
-		irq_desc[i].chip = &level_irq_type;
-		pnx8550_ack(i);	/* mask the irq just in case  */
+		set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
+		mask_irq(i);	/* mask the irq just in case  */
 	}
 
 	/* init of GIC/IPC interrupts */
@@ -270,7 +221,7 @@
 		/* mask/priority is still 0 so we will not get any
 		 * interrupts until it is unmasked */
 
-		irq_desc[i].chip = &level_irq_type;
+		set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
 	}
 
 	/* Priority level 0 */
@@ -279,20 +230,21 @@
 	/* Set int vector table address */
 	PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
 
-	irq_desc[MIPS_CPU_GIC_IRQ].chip = &level_irq_type;
+	set_irq_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
+				 handle_level_irq);
 	setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
 
 	/* init of Timer interrupts */
-	for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) {
-		irq_desc[i].chip = &level_irq_type;
-	}
+	for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
+		set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
 
 	/* Stop Timer 1-3 */
 	configPR = read_c0_config7();
 	configPR |= 0x00000038;
 	write_c0_config7(configPR);
 
-	irq_desc[MIPS_CPU_TIMER_IRQ].chip = &level_irq_type;
+	set_irq_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
+				 handle_level_irq);
 	setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
 }
 
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 3cc0436..305491e 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -99,8 +99,6 @@
  */
 void prom_init_secondary(void)
 {
-	mips_hpt_init();
-
 	set_c0_status(ST0_CO | ST0_IE | ST0_IM);
 }
 
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index 0d18ed4..a1a9af6 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -95,16 +95,11 @@
 
 static void enable_eisa1_irq(unsigned int irq)
 {
-	unsigned long flags;
 	u8 mask;
 
-	local_irq_save(flags);
-
 	mask = inb(EISA_INT1_MASK);
 	mask &= ~((u8) (1 << irq));
 	outb(mask, EISA_INT1_MASK);
-
-	local_irq_restore(flags);
 }
 
 static unsigned int startup_eisa1_irq(unsigned int irq)
@@ -130,8 +125,6 @@
 	outb(mask, EISA_INT1_MASK);
 }
 
-#define shutdown_eisa1_irq	disable_eisa1_irq
-
 static void mask_and_ack_eisa1_irq(unsigned int irq)
 {
 	disable_eisa1_irq(irq);
@@ -148,25 +141,20 @@
 static struct irq_chip ip22_eisa1_irq_type = {
 	.typename	= "IP22 EISA",
 	.startup	= startup_eisa1_irq,
-	.shutdown	= shutdown_eisa1_irq,
-	.enable		= enable_eisa1_irq,
-	.disable	= disable_eisa1_irq,
 	.ack		= mask_and_ack_eisa1_irq,
+	.mask		= disable_eisa1_irq,
+	.mask_ack	= mask_and_ack_eisa1_irq,
+	.unmask		= enable_eisa1_irq,
 	.end		= end_eisa1_irq,
 };
 
 static void enable_eisa2_irq(unsigned int irq)
 {
-	unsigned long flags;
 	u8 mask;
 
-	local_irq_save(flags);
-
 	mask = inb(EISA_INT2_MASK);
 	mask &= ~((u8) (1 << (irq - 8)));
 	outb(mask, EISA_INT2_MASK);
-
-	local_irq_restore(flags);
 }
 
 static unsigned int startup_eisa2_irq(unsigned int irq)
@@ -192,8 +180,6 @@
 	outb(mask, EISA_INT2_MASK);
 }
 
-#define shutdown_eisa2_irq	disable_eisa2_irq
-
 static void mask_and_ack_eisa2_irq(unsigned int irq)
 {
 	disable_eisa2_irq(irq);
@@ -210,10 +196,10 @@
 static struct irq_chip ip22_eisa2_irq_type = {
 	.typename	= "IP22 EISA",
 	.startup	= startup_eisa2_irq,
-	.shutdown	= shutdown_eisa2_irq,
-	.enable		= enable_eisa2_irq,
-	.disable	= disable_eisa2_irq,
 	.ack		= mask_and_ack_eisa2_irq,
+	.mask		= disable_eisa2_irq,
+	.mask_ack	= mask_and_ack_eisa2_irq,
+	.unmask		= enable_eisa2_irq,
 	.end		= end_eisa2_irq,
 };
 
@@ -275,13 +261,10 @@
 	outb(0, EISA_DMA2_WRITE_SINGLE);
 
 	for (i = SGINT_EISA; i < (SGINT_EISA + EISA_MAX_IRQ); i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
 		if (i < (SGINT_EISA + 8))
-			irq_desc[i].chip = &ip22_eisa1_irq_type;
+			set_irq_chip(i, &ip22_eisa1_irq_type);
 		else
-			irq_desc[i].chip = &ip22_eisa2_irq_type;
+			set_irq_chip(i, &ip22_eisa2_irq_type);
 	}
 
 	/* Cannot use request_irq because of kmalloc not being ready at such
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index af51889..c44f8be 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -40,186 +40,86 @@
 
 static void enable_local0_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	/* don't allow mappable interrupt to be enabled from setup_irq,
 	 * we have our own way to do so */
 	if (irq != SGI_MAP_0_IRQ)
 		sgint->imask0 |= (1 << (irq - SGINT_LOCAL0));
-	local_irq_restore(flags);
-}
-
-static unsigned int startup_local0_irq(unsigned int irq)
-{
-	enable_local0_irq(irq);
-	return 0;		/* Never anything pending  */
 }
 
 static void disable_local0_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0));
-	local_irq_restore(flags);
-}
-
-#define shutdown_local0_irq	disable_local0_irq
-#define mask_and_ack_local0_irq	disable_local0_irq
-
-static void end_local0_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local0_irq(irq);
 }
 
 static struct irq_chip ip22_local0_irq_type = {
 	.typename	= "IP22 local 0",
-	.startup	= startup_local0_irq,
-	.shutdown	= shutdown_local0_irq,
-	.enable		= enable_local0_irq,
-	.disable	= disable_local0_irq,
-	.ack		= mask_and_ack_local0_irq,
-	.end		= end_local0_irq,
+	.ack		= disable_local0_irq,
+	.mask		= disable_local0_irq,
+	.mask_ack	= disable_local0_irq,
+	.unmask		= enable_local0_irq,
 };
 
 static void enable_local1_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	/* don't allow mappable interrupt to be enabled from setup_irq,
 	 * we have our own way to do so */
 	if (irq != SGI_MAP_1_IRQ)
 		sgint->imask1 |= (1 << (irq - SGINT_LOCAL1));
-	local_irq_restore(flags);
-}
-
-static unsigned int startup_local1_irq(unsigned int irq)
-{
-	enable_local1_irq(irq);
-	return 0;		/* Never anything pending  */
 }
 
 void disable_local1_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1));
-	local_irq_restore(flags);
-}
-
-#define shutdown_local1_irq	disable_local1_irq
-#define mask_and_ack_local1_irq	disable_local1_irq
-
-static void end_local1_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local1_irq(irq);
 }
 
 static struct irq_chip ip22_local1_irq_type = {
 	.typename	= "IP22 local 1",
-	.startup	= startup_local1_irq,
-	.shutdown	= shutdown_local1_irq,
-	.enable		= enable_local1_irq,
-	.disable	= disable_local1_irq,
-	.ack		= mask_and_ack_local1_irq,
-	.end		= end_local1_irq,
+	.ack		= disable_local1_irq,
+	.mask		= disable_local1_irq,
+	.mask_ack	= disable_local1_irq,
+	.unmask		= enable_local1_irq,
 };
 
 static void enable_local2_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
 	sgint->cmeimask0 |= (1 << (irq - SGINT_LOCAL2));
-	local_irq_restore(flags);
-}
-
-static unsigned int startup_local2_irq(unsigned int irq)
-{
-	enable_local2_irq(irq);
-	return 0;		/* Never anything pending  */
 }
 
 void disable_local2_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	sgint->cmeimask0 &= ~(1 << (irq - SGINT_LOCAL2));
 	if (!sgint->cmeimask0)
 		sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
-	local_irq_restore(flags);
-}
-
-#define shutdown_local2_irq disable_local2_irq
-#define mask_and_ack_local2_irq	disable_local2_irq
-
-static void end_local2_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local2_irq(irq);
 }
 
 static struct irq_chip ip22_local2_irq_type = {
 	.typename	= "IP22 local 2",
-	.startup	= startup_local2_irq,
-	.shutdown	= shutdown_local2_irq,
-	.enable		= enable_local2_irq,
-	.disable	= disable_local2_irq,
-	.ack		= mask_and_ack_local2_irq,
-	.end		= end_local2_irq,
+	.ack		= disable_local2_irq,
+	.mask		= disable_local2_irq,
+	.mask_ack	= disable_local2_irq,
+	.unmask		= enable_local2_irq,
 };
 
 static void enable_local3_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
 	sgint->cmeimask1 |= (1 << (irq - SGINT_LOCAL3));
-	local_irq_restore(flags);
-}
-
-static unsigned int startup_local3_irq(unsigned int irq)
-{
-	enable_local3_irq(irq);
-	return 0;		/* Never anything pending  */
 }
 
 void disable_local3_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
 	sgint->cmeimask1 &= ~(1 << (irq - SGINT_LOCAL3));
 	if (!sgint->cmeimask1)
 		sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
-	local_irq_restore(flags);
-}
-
-#define shutdown_local3_irq disable_local3_irq
-#define mask_and_ack_local3_irq	disable_local3_irq
-
-static void end_local3_irq (unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-		enable_local3_irq(irq);
 }
 
 static struct irq_chip ip22_local3_irq_type = {
 	.typename	= "IP22 local 3",
-	.startup	= startup_local3_irq,
-	.shutdown	= shutdown_local3_irq,
-	.enable		= enable_local3_irq,
-	.disable	= disable_local3_irq,
-	.ack		= mask_and_ack_local3_irq,
-	.end		= end_local3_irq,
+	.ack		= disable_local3_irq,
+	.mask		= disable_local3_irq,
+	.mask_ack	= disable_local3_irq,
+	.unmask		= enable_local3_irq,
 };
 
 static void indy_local0_irqdispatch(void)
@@ -430,10 +330,7 @@
 		else
 			handler		= &ip22_local3_irq_type;
 
-		irq_desc[i].status	= IRQ_DISABLED;
-		irq_desc[i].action	= 0;
-		irq_desc[i].depth	= 1;
-		irq_desc[i].chip	= handler;
+		set_irq_chip_and_handler(i, handler, handle_level_irq);
 	}
 
 	/* vector handler. this register the IRQ as non-sharable */
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 270ecd3..319f880 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -332,34 +332,19 @@
 	intr_disconnect_level(cpu, swlevel);
 }
 
-static void mask_and_ack_bridge_irq(unsigned int irq)
-{
-	disable_bridge_irq(irq);
-}
-
-static void end_bridge_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
-	    irq_desc[irq].action)
-		enable_bridge_irq(irq);
-}
-
 static struct irq_chip bridge_irq_type = {
 	.typename	= "bridge",
 	.startup	= startup_bridge_irq,
 	.shutdown	= shutdown_bridge_irq,
-	.enable		= enable_bridge_irq,
-	.disable	= disable_bridge_irq,
-	.ack		= mask_and_ack_bridge_irq,
-	.end		= end_bridge_irq,
+	.ack		= disable_bridge_irq,
+	.mask		= disable_bridge_irq,
+	.mask_ack	= disable_bridge_irq,
+	.unmask		= enable_bridge_irq,
 };
 
 void __devinit register_bridge_irq(unsigned int irq)
 {
-	irq_desc[irq].status	= IRQ_DISABLED;
-	irq_desc[irq].action	= 0;
-	irq_desc[irq].depth	= 1;
-	irq_desc[irq].chip	= &bridge_irq_type;
+	set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
 }
 
 int __devinit request_bridge_irq(struct bridge_controller *bc)
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 5e82a26..c20e989 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -172,15 +172,6 @@
         return mktime(year, month, date, hour, min, sec);
 }
 
-static unsigned int startup_rt_irq(unsigned int irq)
-{
-	return 0;
-}
-
-static void shutdown_rt_irq(unsigned int irq)
-{
-}
-
 static void enable_rt_irq(unsigned int irq)
 {
 }
@@ -189,22 +180,13 @@
 {
 }
 
-static void mask_and_ack_rt(unsigned int irq)
-{
-}
-
-static void end_rt_irq(unsigned int irq)
-{
-}
-
 static struct irq_chip rt_irq_type = {
 	.typename	= "SN HUB RT timer",
-	.startup	= startup_rt_irq,
-	.shutdown	= shutdown_rt_irq,
-	.enable		= enable_rt_irq,
-	.disable	= disable_rt_irq,
-	.ack		= mask_and_ack_rt,
-	.end		= end_rt_irq,
+	.ack		= disable_rt_irq,
+	.mask		= disable_rt_irq,
+	.mask_ack	= disable_rt_irq,
+	.unmask		= enable_rt_irq,
+	.eoi		= enable_rt_irq,
 };
 
 static struct irqaction rt_irqaction = {
@@ -221,10 +203,7 @@
 	if (irqno < 0)
 		panic("Can't allocate interrupt number for timer interrupt");
 
-	irq_desc[irqno].status	= IRQ_DISABLED;
-	irq_desc[irqno].action	= NULL;
-	irq_desc[irqno].depth	= 1;
-	irq_desc[irqno].chip	= &rt_irq_type;
+	set_irq_chip_and_handler(irqno, &rt_irq_type, handle_percpu_irq);
 
 	/* over-write the handler, we use our own way */
 	irq->handler = no_action;
@@ -239,14 +218,14 @@
 	setup_irq(irqno, &rt_irqaction);
 }
 
-static unsigned int ip27_hpt_read(void)
+static cycle_t ip27_hpt_read(void)
 {
 	return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
 }
 
 void __init ip27_time_init(void)
 {
-	mips_hpt_read = ip27_hpt_read;
+	clocksource_mips.read = ip27_hpt_read;
 	mips_hpt_frequency = CYCLES_PER_SEC;
 	xtime.tv_sec = get_m48t35_time();
 	xtime.tv_nsec = 0;
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index c9acadd..ae06386 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -113,12 +113,6 @@
  * is quite different anyway.
  */
 
-/*
- * IRQ spinlock - Ralf says not to disable CPU interrupts,
- * and I think he knows better.
- */
-static DEFINE_SPINLOCK(ip32_irq_lock);
-
 /* Some initial interrupts to set up */
 extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
 extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
@@ -138,12 +132,6 @@
 	set_c0_status(STATUSF_IP7);
 }
 
-static unsigned int startup_cpu_irq(unsigned int irq)
-{
-	enable_cpu_irq(irq);
-	return 0;
-}
-
 static void disable_cpu_irq(unsigned int irq)
 {
 	clear_c0_status(STATUSF_IP7);
@@ -155,16 +143,12 @@
 		enable_cpu_irq (irq);
 }
 
-#define shutdown_cpu_irq disable_cpu_irq
-#define mask_and_ack_cpu_irq disable_cpu_irq
-
 static struct irq_chip ip32_cpu_interrupt = {
 	.typename = "IP32 CPU",
-	.startup = startup_cpu_irq,
-	.shutdown = shutdown_cpu_irq,
-	.enable = enable_cpu_irq,
-	.disable = disable_cpu_irq,
-	.ack = mask_and_ack_cpu_irq,
+	.ack = disable_cpu_irq,
+	.mask = disable_cpu_irq,
+	.mask_ack = disable_cpu_irq,
+	.unmask = enable_cpu_irq,
 	.end = end_cpu_irq,
 };
 
@@ -177,45 +161,27 @@
 
 static void enable_crime_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	crime_mask |= 1 << (irq - 1);
 	crime->imask = crime_mask;
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_crime_irq(unsigned int irq)
-{
-	enable_crime_irq(irq);
-	return 0; /* This is probably not right; we could have pending irqs */
 }
 
 static void disable_crime_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	crime_mask &= ~(1 << (irq - 1));
 	crime->imask = crime_mask;
 	flush_crime_bus();
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
 }
 
 static void mask_and_ack_crime_irq(unsigned int irq)
 {
-	unsigned long flags;
-
 	/* Edge triggered interrupts must be cleared. */
 	if ((irq >= CRIME_GBE0_IRQ && irq <= CRIME_GBE3_IRQ)
 	    || (irq >= CRIME_RE_EMPTY_E_IRQ && irq <= CRIME_RE_IDLE_E_IRQ)
 	    || (irq >= CRIME_SOFT0_IRQ && irq <= CRIME_SOFT2_IRQ)) {
 	        uint64_t crime_int;
-		spin_lock_irqsave(&ip32_irq_lock, flags);
 		crime_int = crime->hard_int;
 		crime_int &= ~(1 << (irq - 1));
 		crime->hard_int = crime_int;
-		spin_unlock_irqrestore(&ip32_irq_lock, flags);
 	}
 	disable_crime_irq(irq);
 }
@@ -226,15 +192,12 @@
 		enable_crime_irq(irq);
 }
 
-#define shutdown_crime_irq disable_crime_irq
-
 static struct irq_chip ip32_crime_interrupt = {
 	.typename = "IP32 CRIME",
-	.startup = startup_crime_irq,
-	.shutdown = shutdown_crime_irq,
-	.enable = enable_crime_irq,
-	.disable = disable_crime_irq,
 	.ack = mask_and_ack_crime_irq,
+	.mask = disable_crime_irq,
+	.mask_ack = mask_and_ack_crime_irq,
+	.unmask = enable_crime_irq,
 	.end = end_crime_irq,
 };
 
@@ -248,34 +211,20 @@
 
 static void enable_macepci_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	macepci_mask |= MACEPCI_CONTROL_INT(irq - 9);
 	mace->pci.control = macepci_mask;
 	crime_mask |= 1 << (irq - 1);
 	crime->imask = crime_mask;
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_macepci_irq(unsigned int irq)
-{
-  	enable_macepci_irq (irq);
-	return 0;
 }
 
 static void disable_macepci_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	crime_mask &= ~(1 << (irq - 1));
 	crime->imask = crime_mask;
 	flush_crime_bus();
 	macepci_mask &= ~MACEPCI_CONTROL_INT(irq - 9);
 	mace->pci.control = macepci_mask;
 	flush_mace_bus();
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
 }
 
 static void end_macepci_irq(unsigned int irq)
@@ -284,16 +233,12 @@
 		enable_macepci_irq(irq);
 }
 
-#define shutdown_macepci_irq disable_macepci_irq
-#define mask_and_ack_macepci_irq disable_macepci_irq
-
 static struct irq_chip ip32_macepci_interrupt = {
 	.typename = "IP32 MACE PCI",
-	.startup = startup_macepci_irq,
-	.shutdown = shutdown_macepci_irq,
-	.enable = enable_macepci_irq,
-	.disable = disable_macepci_irq,
-	.ack = mask_and_ack_macepci_irq,
+	.ack = disable_macepci_irq,
+	.mask = disable_macepci_irq,
+	.mask_ack = disable_macepci_irq,
+	.unmask = enable_macepci_irq,
 	.end = end_macepci_irq,
 };
 
@@ -339,7 +284,6 @@
 static void enable_maceisa_irq (unsigned int irq)
 {
 	unsigned int crime_int = 0;
-	unsigned long flags;
 
 	DBG ("maceisa enable: %u\n", irq);
 
@@ -355,26 +299,16 @@
 		break;
 	}
 	DBG ("crime_int %08x enabled\n", crime_int);
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	crime_mask |= crime_int;
 	crime->imask = crime_mask;
 	maceisa_mask |= 1 << (irq - 33);
 	mace->perif.ctrl.imask = maceisa_mask;
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_maceisa_irq(unsigned int irq)
-{
-	enable_maceisa_irq(irq);
-	return 0;
 }
 
 static void disable_maceisa_irq(unsigned int irq)
 {
 	unsigned int crime_int = 0;
-	unsigned long flags;
 
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	maceisa_mask &= ~(1 << (irq - 33));
         if(!(maceisa_mask & MACEISA_AUDIO_INT))
 		crime_int |= MACE_AUDIO_INT;
@@ -387,23 +321,20 @@
 	flush_crime_bus();
 	mace->perif.ctrl.imask = maceisa_mask;
 	flush_mace_bus();
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
 }
 
 static void mask_and_ack_maceisa_irq(unsigned int irq)
 {
-	unsigned long mace_int, flags;
+	unsigned long mace_int;
 
 	switch (irq) {
 	case MACEISA_PARALLEL_IRQ:
 	case MACEISA_SERIAL1_TDMAPR_IRQ:
 	case MACEISA_SERIAL2_TDMAPR_IRQ:
 		/* edge triggered */
-		spin_lock_irqsave(&ip32_irq_lock, flags);
 		mace_int = mace->perif.ctrl.istat;
 		mace_int &= ~(1 << (irq - 33));
 		mace->perif.ctrl.istat = mace_int;
-		spin_unlock_irqrestore(&ip32_irq_lock, flags);
 		break;
 	}
 	disable_maceisa_irq(irq);
@@ -415,15 +346,12 @@
 		enable_maceisa_irq(irq);
 }
 
-#define shutdown_maceisa_irq disable_maceisa_irq
-
 static struct irq_chip ip32_maceisa_interrupt = {
 	.typename = "IP32 MACE ISA",
-	.startup = startup_maceisa_irq,
-	.shutdown = shutdown_maceisa_irq,
-	.enable = enable_maceisa_irq,
-	.disable = disable_maceisa_irq,
 	.ack = mask_and_ack_maceisa_irq,
+	.mask = disable_maceisa_irq,
+	.mask_ack = mask_and_ack_maceisa_irq,
+	.unmask = enable_maceisa_irq,
 	.end = end_maceisa_irq,
 };
 
@@ -433,29 +361,15 @@
 
 static void enable_mace_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	crime_mask |= 1 << (irq - 1);
 	crime->imask = crime_mask;
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_mace_irq(unsigned int irq)
-{
-	enable_mace_irq(irq);
-	return 0;
 }
 
 static void disable_mace_irq(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ip32_irq_lock, flags);
 	crime_mask &= ~(1 << (irq - 1));
 	crime->imask = crime_mask;
 	flush_crime_bus();
-	spin_unlock_irqrestore(&ip32_irq_lock, flags);
 }
 
 static void end_mace_irq(unsigned int irq)
@@ -464,16 +378,12 @@
 		enable_mace_irq(irq);
 }
 
-#define shutdown_mace_irq disable_mace_irq
-#define mask_and_ack_mace_irq disable_mace_irq
-
 static struct irq_chip ip32_mace_interrupt = {
 	.typename = "IP32 MACE",
-	.startup = startup_mace_irq,
-	.shutdown = shutdown_mace_irq,
-	.enable = enable_mace_irq,
-	.disable = disable_mace_irq,
-	.ack = mask_and_ack_mace_irq,
+	.ack = disable_mace_irq,
+	.mask = disable_mace_irq,
+	.mask_ack = disable_mace_irq,
+	.unmask = enable_mace_irq,
 	.end = end_mace_irq,
 };
 
@@ -586,10 +496,7 @@
 		else
 			controller = &ip32_maceisa_interrupt;
 
-		irq_desc[irq].status = IRQ_DISABLED;
-		irq_desc[irq].action = 0;
-		irq_desc[irq].depth = 0;
-		irq_desc[irq].chip = controller;
+		set_irq_chip(irq, controller);
 	}
 	setup_irq(CRIME_MEMERR_IRQ, &memerr_irq);
 	setup_irq(CRIME_CPUERR_IRQ, &cpuerr_irq);
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 8b1f414..2e8f6b2 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -45,11 +45,9 @@
  */
 
 
-#define shutdown_bcm1480_irq	disable_bcm1480_irq
 static void end_bcm1480_irq(unsigned int irq);
 static void enable_bcm1480_irq(unsigned int irq);
 static void disable_bcm1480_irq(unsigned int irq);
-static unsigned int startup_bcm1480_irq(unsigned int irq);
 static void ack_bcm1480_irq(unsigned int irq);
 #ifdef CONFIG_SMP
 static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask);
@@ -85,11 +83,10 @@
 
 static struct irq_chip bcm1480_irq_type = {
 	.typename = "BCM1480-IMR",
-	.startup = startup_bcm1480_irq,
-	.shutdown = shutdown_bcm1480_irq,
-	.enable = enable_bcm1480_irq,
-	.disable = disable_bcm1480_irq,
 	.ack = ack_bcm1480_irq,
+	.mask = disable_bcm1480_irq,
+	.mask_ack = ack_bcm1480_irq,
+	.unmask = enable_bcm1480_irq,
 	.end = end_bcm1480_irq,
 #ifdef CONFIG_SMP
 	.set_affinity = bcm1480_set_affinity
@@ -188,14 +185,6 @@
 
 /*****************************************************************************/
 
-static unsigned int startup_bcm1480_irq(unsigned int irq)
-{
-	bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq);
-
-	return 0;		/* never anything pending */
-}
-
-
 static void disable_bcm1480_irq(unsigned int irq)
 {
 	bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
@@ -270,16 +259,9 @@
 {
 	int i;
 
-	for (i = 0; i < NR_IRQS; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
-		if (i < BCM1480_NR_IRQS) {
-			irq_desc[i].chip = &bcm1480_irq_type;
-			bcm1480_irq_owner[i] = 0;
-		} else {
-			irq_desc[i].chip = &no_irq_chip;
-		}
+	for (i = 0; i < BCM1480_NR_IRQS; i++) {
+		set_irq_chip(i, &bcm1480_irq_type);
+		bcm1480_irq_owner[i] = 0;
 	}
 }
 
diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c
index e136bde..6f3f71b 100644
--- a/arch/mips/sibyte/bcm1480/time.c
+++ b/arch/mips/sibyte/bcm1480/time.c
@@ -94,8 +94,6 @@
 	 */
 }
 
-#include <asm/sibyte/sb1250.h>
-
 void bcm1480_timer_interrupt(void)
 {
 	int cpu = smp_processor_id();
@@ -119,7 +117,7 @@
 	}
 }
 
-static unsigned int bcm1480_hpt_read(void)
+static cycle_t bcm1480_hpt_read(void)
 {
 	/* We assume this function is called xtime_lock held. */
 	unsigned long count =
@@ -129,6 +127,6 @@
 
 void __init bcm1480_hpt_setup(void)
 {
-	mips_hpt_read = bcm1480_hpt_read;
+	clocksource_mips.read = bcm1480_hpt_read;
 	mips_hpt_frequency = BCM1480_HPT_VALUE;
 }
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index d5d2677..82ce753 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -44,11 +44,9 @@
  */
 
 
-#define shutdown_sb1250_irq	disable_sb1250_irq
 static void end_sb1250_irq(unsigned int irq);
 static void enable_sb1250_irq(unsigned int irq);
 static void disable_sb1250_irq(unsigned int irq);
-static unsigned int startup_sb1250_irq(unsigned int irq);
 static void ack_sb1250_irq(unsigned int irq);
 #ifdef CONFIG_SMP
 static void sb1250_set_affinity(unsigned int irq, cpumask_t mask);
@@ -70,11 +68,10 @@
 
 static struct irq_chip sb1250_irq_type = {
 	.typename = "SB1250-IMR",
-	.startup = startup_sb1250_irq,
-	.shutdown = shutdown_sb1250_irq,
-	.enable = enable_sb1250_irq,
-	.disable = disable_sb1250_irq,
 	.ack = ack_sb1250_irq,
+	.mask = disable_sb1250_irq,
+	.mask_ack = ack_sb1250_irq,
+	.unmask = enable_sb1250_irq,
 	.end = end_sb1250_irq,
 #ifdef CONFIG_SMP
 	.set_affinity = sb1250_set_affinity
@@ -163,14 +160,6 @@
 
 /*****************************************************************************/
 
-static unsigned int startup_sb1250_irq(unsigned int irq)
-{
-	sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
-
-	return 0;		/* never anything pending */
-}
-
-
 static void disable_sb1250_irq(unsigned int irq)
 {
 	sb1250_mask_irq(sb1250_irq_owner[irq], irq);
@@ -239,16 +228,9 @@
 {
 	int i;
 
-	for (i = 0; i < NR_IRQS; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
-		if (i < SB1250_NR_IRQS) {
-			irq_desc[i].chip = &sb1250_irq_type;
-			sb1250_irq_owner[i] = 0;
-		} else {
-			irq_desc[i].chip = &no_irq_chip;
-		}
+	for (i = 0; i < SB1250_NR_IRQS; i++) {
+		set_irq_chip(i, &sb1250_irq_type);
+		sb1250_irq_owner[i] = 0;
 	}
 }
 
diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c
index bcb74f2..2efffe1 100644
--- a/arch/mips/sibyte/sb1250/time.c
+++ b/arch/mips/sibyte/sb1250/time.c
@@ -51,7 +51,7 @@
 
 extern int sb1250_steal_irq(int irq);
 
-static unsigned int sb1250_hpt_read(void);
+static cycle_t sb1250_hpt_read(void);
 
 void __init sb1250_hpt_setup(void)
 {
@@ -66,8 +66,8 @@
 			     IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG)));
 
 		mips_hpt_frequency = V_SCD_TIMER_FREQ;
-		mips_hpt_read = sb1250_hpt_read;
-		mips_hpt_mask = M_SCD_TIMER_INIT;
+		clocksource_mips.read = sb1250_hpt_read;
+		clocksource_mips.mask = M_SCD_TIMER_INIT;
 	}
 }
 
@@ -143,7 +143,7 @@
  * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
  * again.
  */
-static unsigned int sb1250_hpt_read(void)
+static cycle_t sb1250_hpt_read(void)
 {
 	unsigned int count;
 
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index ac342f5..defa1f1 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -43,7 +43,7 @@
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
 #include <asm/sibyte/sb1250_regs.h>
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 #include <asm/sibyte/sb1250_genbus.h>
 #include <asm/sibyte/board.h>
@@ -53,7 +53,7 @@
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
 extern void sb1250_setup(void);
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 
 extern int xicor_probe(void);
@@ -90,7 +90,7 @@
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
 	sb1250_time_init();
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 }
 
@@ -111,7 +111,7 @@
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
 	sb1250_setup();
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 
 	panic_timeout = 5;  /* For debug.  */
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 48fb74a..8511bcc 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -11,44 +11,25 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
-#include <linux/spinlock.h>
 
 #include <asm/i8259.h>
 #include <asm/io.h>
 #include <asm/sni.h>
 
-DEFINE_SPINLOCK(pciasic_lock);
-
 static void enable_pciasic_irq(unsigned int irq)
 {
 	unsigned int mask = 1 << (irq - PCIMT_IRQ_INT2);
-	unsigned long flags;
 
-	spin_lock_irqsave(&pciasic_lock, flags);
 	*(volatile u8 *) PCIMT_IRQSEL |= mask;
-	spin_unlock_irqrestore(&pciasic_lock, flags);
 }
 
-static unsigned int startup_pciasic_irq(unsigned int irq)
-{
-	enable_pciasic_irq(irq);
-	return 0; /* never anything pending */
-}
-
-#define shutdown_pciasic_irq	disable_pciasic_irq
-
 void disable_pciasic_irq(unsigned int irq)
 {
 	unsigned int mask = ~(1 << (irq - PCIMT_IRQ_INT2));
-	unsigned long flags;
 
-	spin_lock_irqsave(&pciasic_lock, flags);
 	*(volatile u8 *) PCIMT_IRQSEL &= mask;
-	spin_unlock_irqrestore(&pciasic_lock, flags);
 }
 
-#define mask_and_ack_pciasic_irq disable_pciasic_irq
-
 static void end_pciasic_irq(unsigned int irq)
 {
 	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -57,11 +38,10 @@
 
 static struct irq_chip pciasic_irq_type = {
 	.typename = "ASIC-PCI",
-	.startup = startup_pciasic_irq,
-	.shutdown = shutdown_pciasic_irq,
-	.enable = enable_pciasic_irq,
-	.disable = disable_pciasic_irq,
-	.ack = mask_and_ack_pciasic_irq,
+	.ack = disable_pciasic_irq,
+	.mask = disable_pciasic_irq,
+	.mask_ack = disable_pciasic_irq,
+	.unmask = enable_pciasic_irq,
 	.end = end_pciasic_irq,
 };
 
@@ -178,12 +158,8 @@
 
 void __init init_pciasic(void)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&pciasic_lock, flags);
 	* (volatile u8 *) PCIMT_IRQSEL =
 		IT_EISA | IT_INTA | IT_INTB | IT_INTC | IT_INTD;
-	spin_unlock_irqrestore(&pciasic_lock, flags);
 }
 
 /*
@@ -199,12 +175,8 @@
 	init_pciasic();
 
 	/* Actually we've got more interrupts to handle ...  */
-	for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_ETHERNET; i++) {
-		irq_desc[i].status     = IRQ_DISABLED;
-		irq_desc[i].action     = 0;
-		irq_desc[i].depth      = 1;
-		irq_desc[i].chip    = &pciasic_irq_type;
-	}
+	for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_ETHERNET; i++)
+		set_irq_chip(i, &pciasic_irq_type);
 
 	change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ2|IE_IRQ3|IE_IRQ4);
 }
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index 8266a88..ed4a19a 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -64,20 +64,12 @@
 #define TX4927_IRQ_NEST4       ( 1 <<  9 )
 
 #define TX4927_IRQ_CP0_INIT     ( 1 << 10 )
-#define TX4927_IRQ_CP0_STARTUP  ( 1 << 11 )
-#define TX4927_IRQ_CP0_SHUTDOWN ( 1 << 12 )
 #define TX4927_IRQ_CP0_ENABLE   ( 1 << 13 )
 #define TX4927_IRQ_CP0_DISABLE  ( 1 << 14 )
-#define TX4927_IRQ_CP0_MASK     ( 1 << 15 )
-#define TX4927_IRQ_CP0_ENDIRQ   ( 1 << 16 )
 
 #define TX4927_IRQ_PIC_INIT     ( 1 << 20 )
-#define TX4927_IRQ_PIC_STARTUP  ( 1 << 21 )
-#define TX4927_IRQ_PIC_SHUTDOWN ( 1 << 22 )
 #define TX4927_IRQ_PIC_ENABLE   ( 1 << 23 )
 #define TX4927_IRQ_PIC_DISABLE  ( 1 << 24 )
-#define TX4927_IRQ_PIC_MASK     ( 1 << 25 )
-#define TX4927_IRQ_PIC_ENDIRQ   ( 1 << 26 )
 
 #define TX4927_IRQ_ALL         0xffffffff
 #endif
@@ -87,19 +79,11 @@
 					  | TX4927_IRQ_INFO
 					  | TX4927_IRQ_WARN | TX4927_IRQ_EROR
 //                                       | TX4927_IRQ_CP0_INIT
-//                                       | TX4927_IRQ_CP0_STARTUP
-//                                       | TX4927_IRQ_CP0_SHUTDOWN
 //                                       | TX4927_IRQ_CP0_ENABLE
-//                                       | TX4927_IRQ_CP0_DISABLE
-//                                       | TX4927_IRQ_CP0_MASK
 //                                       | TX4927_IRQ_CP0_ENDIRQ
 //                                       | TX4927_IRQ_PIC_INIT
-//                                       | TX4927_IRQ_PIC_STARTUP
-//                                       | TX4927_IRQ_PIC_SHUTDOWN
 //                                       | TX4927_IRQ_PIC_ENABLE
 //                                       | TX4927_IRQ_PIC_DISABLE
-//                                       | TX4927_IRQ_PIC_MASK
-//                                       | TX4927_IRQ_PIC_ENDIRQ
 //                                       | TX4927_IRQ_INIT
 //                                       | TX4927_IRQ_NEST1
 //                                       | TX4927_IRQ_NEST2
@@ -124,49 +108,32 @@
  * Forwad definitions for all pic's
  */
 
-static unsigned int tx4927_irq_cp0_startup(unsigned int irq);
-static void tx4927_irq_cp0_shutdown(unsigned int irq);
 static void tx4927_irq_cp0_enable(unsigned int irq);
 static void tx4927_irq_cp0_disable(unsigned int irq);
-static void tx4927_irq_cp0_mask_and_ack(unsigned int irq);
-static void tx4927_irq_cp0_end(unsigned int irq);
 
-static unsigned int tx4927_irq_pic_startup(unsigned int irq);
-static void tx4927_irq_pic_shutdown(unsigned int irq);
 static void tx4927_irq_pic_enable(unsigned int irq);
 static void tx4927_irq_pic_disable(unsigned int irq);
-static void tx4927_irq_pic_mask_and_ack(unsigned int irq);
-static void tx4927_irq_pic_end(unsigned int irq);
 
 /*
  * Kernel structs for all pic's
  */
 
-static DEFINE_SPINLOCK(tx4927_cp0_lock);
-static DEFINE_SPINLOCK(tx4927_pic_lock);
-
 #define TX4927_CP0_NAME "TX4927-CP0"
 static struct irq_chip tx4927_irq_cp0_type = {
 	.typename	= TX4927_CP0_NAME,
-	.startup	= tx4927_irq_cp0_startup,
-	.shutdown	= tx4927_irq_cp0_shutdown,
-	.enable		= tx4927_irq_cp0_enable,
-	.disable	= tx4927_irq_cp0_disable,
-	.ack		= tx4927_irq_cp0_mask_and_ack,
-	.end		= tx4927_irq_cp0_end,
-	.set_affinity	= NULL
+	.ack		= tx4927_irq_cp0_disable,
+	.mask		= tx4927_irq_cp0_disable,
+	.mask_ack	= tx4927_irq_cp0_disable,
+	.unmask		= tx4927_irq_cp0_enable,
 };
 
 #define TX4927_PIC_NAME "TX4927-PIC"
 static struct irq_chip tx4927_irq_pic_type = {
 	.typename	= TX4927_PIC_NAME,
-	.startup	= tx4927_irq_pic_startup,
-	.shutdown	= tx4927_irq_pic_shutdown,
-	.enable		= tx4927_irq_pic_enable,
-	.disable	= tx4927_irq_pic_disable,
-	.ack		= tx4927_irq_pic_mask_and_ack,
-	.end		= tx4927_irq_pic_end,
-	.set_affinity	= NULL
+	.ack		= tx4927_irq_pic_disable,
+	.mask		= tx4927_irq_pic_disable,
+	.mask_ack	= tx4927_irq_pic_disable,
+	.unmask		= tx4927_irq_pic_enable,
 };
 
 #define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL }
@@ -211,8 +178,6 @@
 			break;
 		}
 	}
-
-	return;
 }
 
 static void __init tx4927_irq_cp0_init(void)
@@ -222,82 +187,23 @@
 	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_INIT, "beg=%d end=%d\n",
 			   TX4927_IRQ_CP0_BEG, TX4927_IRQ_CP0_END);
 
-	for (i = TX4927_IRQ_CP0_BEG; i <= TX4927_IRQ_CP0_END; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &tx4927_irq_cp0_type;
-	}
-
-	return;
-}
-
-static unsigned int tx4927_irq_cp0_startup(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_STARTUP, "irq=%d \n", irq);
-
-	tx4927_irq_cp0_enable(irq);
-
-	return (0);
-}
-
-static void tx4927_irq_cp0_shutdown(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_SHUTDOWN, "irq=%d \n", irq);
-
-	tx4927_irq_cp0_disable(irq);
-
-	return;
+	for (i = TX4927_IRQ_CP0_BEG; i <= TX4927_IRQ_CP0_END; i++)
+		set_irq_chip_and_handler(i, &tx4927_irq_cp0_type,
+					 handle_level_irq);
 }
 
 static void tx4927_irq_cp0_enable(unsigned int irq)
 {
-	unsigned long flags;
-
 	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENABLE, "irq=%d \n", irq);
 
-	spin_lock_irqsave(&tx4927_cp0_lock, flags);
-
 	tx4927_irq_cp0_modify(CCP0_STATUS, 0, tx4927_irq_cp0_mask(irq));
-
-	spin_unlock_irqrestore(&tx4927_cp0_lock, flags);
-
-	return;
 }
 
 static void tx4927_irq_cp0_disable(unsigned int irq)
 {
-	unsigned long flags;
-
 	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_DISABLE, "irq=%d \n", irq);
 
-	spin_lock_irqsave(&tx4927_cp0_lock, flags);
-
 	tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0);
-
-	spin_unlock_irqrestore(&tx4927_cp0_lock, flags);
-
-	return;
-}
-
-static void tx4927_irq_cp0_mask_and_ack(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_MASK, "irq=%d \n", irq);
-
-	tx4927_irq_cp0_disable(irq);
-
-	return;
-}
-
-static void tx4927_irq_cp0_end(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENDIRQ, "irq=%d \n", irq);
-
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4927_irq_cp0_enable(irq);
-	}
-
-	return;
 }
 
 /*
@@ -418,105 +324,39 @@
 	val &= (~clr_bits);
 	val |= (set_bits);
 	TX4927_WR(pic_reg, val);
-
-	return;
 }
 
 static void __init tx4927_irq_pic_init(void)
 {
-	unsigned long flags;
 	int i;
 
 	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_INIT, "beg=%d end=%d\n",
 			   TX4927_IRQ_PIC_BEG, TX4927_IRQ_PIC_END);
 
-	for (i = TX4927_IRQ_PIC_BEG; i <= TX4927_IRQ_PIC_END; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 2;
-		irq_desc[i].chip = &tx4927_irq_pic_type;
-	}
+	for (i = TX4927_IRQ_PIC_BEG; i <= TX4927_IRQ_PIC_END; i++)
+		set_irq_chip_and_handler(i, &tx4927_irq_pic_type,
+					 handle_level_irq);
 
 	setup_irq(TX4927_IRQ_NEST_PIC_ON_CP0, &tx4927_irq_pic_action);
 
-	spin_lock_irqsave(&tx4927_pic_lock, flags);
-
 	TX4927_WR(0xff1ff640, 0x6);	/* irq level mask -- only accept hightest */
 	TX4927_WR(0xff1ff600, TX4927_RD(0xff1ff600) | 0x1);	/* irq enable */
-
-	spin_unlock_irqrestore(&tx4927_pic_lock, flags);
-
-	return;
-}
-
-static unsigned int tx4927_irq_pic_startup(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_STARTUP, "irq=%d\n", irq);
-
-	tx4927_irq_pic_enable(irq);
-
-	return (0);
-}
-
-static void tx4927_irq_pic_shutdown(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_SHUTDOWN, "irq=%d\n", irq);
-
-	tx4927_irq_pic_disable(irq);
-
-	return;
 }
 
 static void tx4927_irq_pic_enable(unsigned int irq)
 {
-	unsigned long flags;
-
 	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENABLE, "irq=%d\n", irq);
 
-	spin_lock_irqsave(&tx4927_pic_lock, flags);
-
 	tx4927_irq_pic_modify(tx4927_irq_pic_addr(irq), 0,
 			      tx4927_irq_pic_mask(irq));
-
-	spin_unlock_irqrestore(&tx4927_pic_lock, flags);
-
-	return;
 }
 
 static void tx4927_irq_pic_disable(unsigned int irq)
 {
-	unsigned long flags;
-
 	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_DISABLE, "irq=%d\n", irq);
 
-	spin_lock_irqsave(&tx4927_pic_lock, flags);
-
 	tx4927_irq_pic_modify(tx4927_irq_pic_addr(irq),
 			      tx4927_irq_pic_mask(irq), 0);
-
-	spin_unlock_irqrestore(&tx4927_pic_lock, flags);
-
-	return;
-}
-
-static void tx4927_irq_pic_mask_and_ack(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_MASK, "irq=%d\n", irq);
-
-	tx4927_irq_pic_disable(irq);
-
-	return;
-}
-
-static void tx4927_irq_pic_end(unsigned int irq)
-{
-	TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENDIRQ, "irq=%d\n", irq);
-
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4927_irq_pic_enable(irq);
-	}
-
-	return;
 }
 
 /*
@@ -533,8 +373,6 @@
 	tx4927_irq_pic_init();
 
 	TX4927_IRQ_DPRINTK(TX4927_IRQ_INIT, "+\n");
-
-	return;
 }
 
 static int tx4927_irq_nested(void)
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index 0c3c3f6..5a5ea6c 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -151,16 +151,10 @@
 #define TOSHIBA_RBTX4927_IRQ_EROR          ( 1 <<  2 )
 
 #define TOSHIBA_RBTX4927_IRQ_IOC_INIT      ( 1 << 10 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_STARTUP   ( 1 << 11 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN  ( 1 << 12 )
 #define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE    ( 1 << 13 )
 #define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE   ( 1 << 14 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_MASK      ( 1 << 15 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ    ( 1 << 16 )
 
 #define TOSHIBA_RBTX4927_IRQ_ISA_INIT      ( 1 << 20 )
-#define TOSHIBA_RBTX4927_IRQ_ISA_STARTUP   ( 1 << 21 )
-#define TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN  ( 1 << 22 )
 #define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE    ( 1 << 23 )
 #define TOSHIBA_RBTX4927_IRQ_ISA_DISABLE   ( 1 << 24 )
 #define TOSHIBA_RBTX4927_IRQ_ISA_MASK      ( 1 << 25 )
@@ -175,15 +169,9 @@
     (TOSHIBA_RBTX4927_IRQ_NONE | TOSHIBA_RBTX4927_IRQ_INFO |
      TOSHIBA_RBTX4927_IRQ_WARN | TOSHIBA_RBTX4927_IRQ_EROR
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_INIT
-//                                                 | TOSHIBA_RBTX4927_IRQ_IOC_STARTUP
-//                                                 | TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE
-//                                                 | TOSHIBA_RBTX4927_IRQ_IOC_MASK
-//                                                 | TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_INIT
-//                                                 | TOSHIBA_RBTX4927_IRQ_ISA_STARTUP
-//                                                 | TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_MASK
@@ -231,35 +219,23 @@
 extern void mask_and_ack_8259A(unsigned int irq);
 #endif
 
-static unsigned int toshiba_rbtx4927_irq_ioc_startup(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_shutdown(unsigned int irq);
 static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq);
 static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_mask_and_ack(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq);
 
 #ifdef CONFIG_TOSHIBA_FPCIB0
-static unsigned int toshiba_rbtx4927_irq_isa_startup(unsigned int irq);
-static void toshiba_rbtx4927_irq_isa_shutdown(unsigned int irq);
 static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq);
 static void toshiba_rbtx4927_irq_isa_disable(unsigned int irq);
 static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq);
 static void toshiba_rbtx4927_irq_isa_end(unsigned int irq);
 #endif
 
-static DEFINE_SPINLOCK(toshiba_rbtx4927_ioc_lock);
-
-
 #define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
 static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
 	.typename = TOSHIBA_RBTX4927_IOC_NAME,
-	.startup = toshiba_rbtx4927_irq_ioc_startup,
-	.shutdown = toshiba_rbtx4927_irq_ioc_shutdown,
-	.enable = toshiba_rbtx4927_irq_ioc_enable,
-	.disable = toshiba_rbtx4927_irq_ioc_disable,
-	.ack = toshiba_rbtx4927_irq_ioc_mask_and_ack,
-	.end = toshiba_rbtx4927_irq_ioc_end,
-	.set_affinity = NULL
+	.ack = toshiba_rbtx4927_irq_ioc_disable,
+	.mask = toshiba_rbtx4927_irq_ioc_disable,
+	.mask_ack = toshiba_rbtx4927_irq_ioc_disable,
+	.unmask = toshiba_rbtx4927_irq_ioc_enable,
 };
 #define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000
 #define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006
@@ -269,13 +245,11 @@
 #define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA"
 static struct irq_chip toshiba_rbtx4927_irq_isa_type = {
 	.typename = TOSHIBA_RBTX4927_ISA_NAME,
-	.startup = toshiba_rbtx4927_irq_isa_startup,
-	.shutdown = toshiba_rbtx4927_irq_isa_shutdown,
-	.enable = toshiba_rbtx4927_irq_isa_enable,
-	.disable = toshiba_rbtx4927_irq_isa_disable,
 	.ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
+	.mask = toshiba_rbtx4927_irq_isa_disable,
+	.mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
+	.unmask = toshiba_rbtx4927_irq_isa_enable,
 	.end = toshiba_rbtx4927_irq_isa_end,
-	.set_affinity = NULL
 };
 #endif
 
@@ -363,58 +337,16 @@
 				     TOSHIBA_RBTX4927_IRQ_IOC_END);
 
 	for (i = TOSHIBA_RBTX4927_IRQ_IOC_BEG;
-	     i <= TOSHIBA_RBTX4927_IRQ_IOC_END; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 3;
-		irq_desc[i].chip = &toshiba_rbtx4927_irq_ioc_type;
-	}
+	     i <= TOSHIBA_RBTX4927_IRQ_IOC_END; i++)
+		set_irq_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
+					 handle_level_irq);
 
 	setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_IOC_ON_PIC,
 		  &toshiba_rbtx4927_irq_ioc_action);
-
-	return;
 }
 
-static unsigned int toshiba_rbtx4927_irq_ioc_startup(unsigned int irq)
-{
-	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_STARTUP,
-				     "irq=%d\n", irq);
-
-	if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
-	    || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
-		TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-					     "bad irq=%d\n", irq);
-		panic("\n");
-	}
-
-	toshiba_rbtx4927_irq_ioc_enable(irq);
-
-	return (0);
-}
-
-
-static void toshiba_rbtx4927_irq_ioc_shutdown(unsigned int irq)
-{
-	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN,
-				     "irq=%d\n", irq);
-
-	if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
-	    || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
-		TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-					     "bad irq=%d\n", irq);
-		panic("\n");
-	}
-
-	toshiba_rbtx4927_irq_ioc_disable(irq);
-
-	return;
-}
-
-
 static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq)
 {
-	unsigned long flags;
 	volatile unsigned char v;
 
 	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENABLE,
@@ -427,21 +359,14 @@
 		panic("\n");
 	}
 
-	spin_lock_irqsave(&toshiba_rbtx4927_ioc_lock, flags);
-
 	v = TX4927_RD08(TOSHIBA_RBTX4927_IOC_INTR_ENAB);
 	v |= (1 << (irq - TOSHIBA_RBTX4927_IRQ_IOC_BEG));
 	TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
-
-	spin_unlock_irqrestore(&toshiba_rbtx4927_ioc_lock, flags);
-
-	return;
 }
 
 
 static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq)
 {
-	unsigned long flags;
 	volatile unsigned char v;
 
 	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_DISABLE,
@@ -454,53 +379,9 @@
 		panic("\n");
 	}
 
-	spin_lock_irqsave(&toshiba_rbtx4927_ioc_lock, flags);
-
 	v = TX4927_RD08(TOSHIBA_RBTX4927_IOC_INTR_ENAB);
 	v &= ~(1 << (irq - TOSHIBA_RBTX4927_IRQ_IOC_BEG));
 	TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
-
-	spin_unlock_irqrestore(&toshiba_rbtx4927_ioc_lock, flags);
-
-	return;
-}
-
-
-static void toshiba_rbtx4927_irq_ioc_mask_and_ack(unsigned int irq)
-{
-	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_MASK,
-				     "irq=%d\n", irq);
-
-	if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
-	    || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
-		TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-					     "bad irq=%d\n", irq);
-		panic("\n");
-	}
-
-	toshiba_rbtx4927_irq_ioc_disable(irq);
-
-	return;
-}
-
-
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq)
-{
-	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ,
-				     "irq=%d\n", irq);
-
-	if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
-	    || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
-		TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-					     "bad irq=%d\n", irq);
-		panic("\n");
-	}
-
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		toshiba_rbtx4927_irq_ioc_enable(irq);
-	}
-
-	return;
 }
 
 
@@ -520,13 +401,8 @@
 				     TOSHIBA_RBTX4927_IRQ_ISA_END);
 
 	for (i = TOSHIBA_RBTX4927_IRQ_ISA_BEG;
-	     i <= TOSHIBA_RBTX4927_IRQ_ISA_END; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth =
-		    ((i < TOSHIBA_RBTX4927_IRQ_ISA_MID) ? (4) : (5));
-		irq_desc[i].chip = &toshiba_rbtx4927_irq_isa_type;
-	}
+	     i <= TOSHIBA_RBTX4927_IRQ_ISA_END; i++)
+		set_irq_chip(i, &toshiba_rbtx4927_irq_isa_type);
 
 	setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_ISA_ON_IOC,
 		  &toshiba_rbtx4927_irq_isa_master);
@@ -536,48 +412,6 @@
 	/* make sure we are looking at IRR (not ISR) */
 	outb(0x0A, 0x20);
 	outb(0x0A, 0xA0);
-
-	return;
-}
-#endif
-
-
-#ifdef CONFIG_TOSHIBA_FPCIB0
-static unsigned int toshiba_rbtx4927_irq_isa_startup(unsigned int irq)
-{
-	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_ISA_STARTUP,
-				     "irq=%d\n", irq);
-
-	if (irq < TOSHIBA_RBTX4927_IRQ_ISA_BEG
-	    || irq > TOSHIBA_RBTX4927_IRQ_ISA_END) {
-		TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-					     "bad irq=%d\n", irq);
-		panic("\n");
-	}
-
-	toshiba_rbtx4927_irq_isa_enable(irq);
-
-	return (0);
-}
-#endif
-
-
-#ifdef CONFIG_TOSHIBA_FPCIB0
-static void toshiba_rbtx4927_irq_isa_shutdown(unsigned int irq)
-{
-	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN,
-				     "irq=%d\n", irq);
-
-	if (irq < TOSHIBA_RBTX4927_IRQ_ISA_BEG
-	    || irq > TOSHIBA_RBTX4927_IRQ_ISA_END) {
-		TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-					     "bad irq=%d\n", irq);
-		panic("\n");
-	}
-
-	toshiba_rbtx4927_irq_isa_disable(irq);
-
-	return;
 }
 #endif
 
@@ -596,8 +430,6 @@
 	}
 
 	enable_8259A_irq(irq);
-
-	return;
 }
 #endif
 
@@ -616,8 +448,6 @@
 	}
 
 	disable_8259A_irq(irq);
-
-	return;
 }
 #endif
 
@@ -636,8 +466,6 @@
 	}
 
 	mask_and_ack_8259A(irq);
-
-	return;
 }
 #endif
 
@@ -658,8 +486,6 @@
 	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
 		toshiba_rbtx4927_irq_isa_enable(irq);
 	}
-
-	return;
 }
 #endif
 
@@ -668,8 +494,6 @@
 {
 	extern void tx4927_irq_init(void);
 
-	local_irq_disable();
-
 	tx4927_irq_init();
 	toshiba_rbtx4927_irq_ioc_init();
 #ifdef CONFIG_TOSHIBA_FPCIB0
@@ -681,8 +505,6 @@
 #endif
 
 	wbflush();
-
-	return;
 }
 
 void toshiba_rbtx4927_irq_dump(char *key)
@@ -715,7 +537,6 @@
 		}
 	}
 #endif
-	return;
 }
 
 void toshiba_rbtx4927_irq_dump_pics(char *s)
@@ -780,6 +601,4 @@
 				     level5_s);
 	TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_INFO, "[%s]\n",
 				     s);
-
-	return;
 }
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index 77fe245..a347b42 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -37,48 +37,32 @@
 /* Forwad definitions for all pic's                                               */
 /**********************************************************************************/
 
-static unsigned int tx4938_irq_cp0_startup(unsigned int irq);
-static void tx4938_irq_cp0_shutdown(unsigned int irq);
 static void tx4938_irq_cp0_enable(unsigned int irq);
 static void tx4938_irq_cp0_disable(unsigned int irq);
-static void tx4938_irq_cp0_mask_and_ack(unsigned int irq);
-static void tx4938_irq_cp0_end(unsigned int irq);
 
-static unsigned int tx4938_irq_pic_startup(unsigned int irq);
-static void tx4938_irq_pic_shutdown(unsigned int irq);
 static void tx4938_irq_pic_enable(unsigned int irq);
 static void tx4938_irq_pic_disable(unsigned int irq);
-static void tx4938_irq_pic_mask_and_ack(unsigned int irq);
-static void tx4938_irq_pic_end(unsigned int irq);
 
 /**********************************************************************************/
 /* Kernel structs for all pic's                                                   */
 /**********************************************************************************/
-DEFINE_SPINLOCK(tx4938_cp0_lock);
-DEFINE_SPINLOCK(tx4938_pic_lock);
 
 #define TX4938_CP0_NAME "TX4938-CP0"
 static struct irq_chip tx4938_irq_cp0_type = {
 	.typename = TX4938_CP0_NAME,
-	.startup = tx4938_irq_cp0_startup,
-	.shutdown = tx4938_irq_cp0_shutdown,
-	.enable = tx4938_irq_cp0_enable,
-	.disable = tx4938_irq_cp0_disable,
-	.ack = tx4938_irq_cp0_mask_and_ack,
-	.end = tx4938_irq_cp0_end,
-	.set_affinity = NULL
+	.ack = tx4938_irq_cp0_disable,
+	.mask = tx4938_irq_cp0_disable,
+	.mask_ack = tx4938_irq_cp0_disable,
+	.unmask = tx4938_irq_cp0_enable,
 };
 
 #define TX4938_PIC_NAME "TX4938-PIC"
 static struct irq_chip tx4938_irq_pic_type = {
 	.typename = TX4938_PIC_NAME,
-	.startup = tx4938_irq_pic_startup,
-	.shutdown = tx4938_irq_pic_shutdown,
-	.enable = tx4938_irq_pic_enable,
-	.disable = tx4938_irq_pic_disable,
-	.ack = tx4938_irq_pic_mask_and_ack,
-	.end = tx4938_irq_pic_end,
-	.set_affinity = NULL
+	.ack = tx4938_irq_pic_disable,
+	.mask = tx4938_irq_pic_disable,
+	.mask_ack = tx4938_irq_pic_disable,
+	.unmask = tx4938_irq_pic_enable,
 };
 
 static struct irqaction tx4938_irq_pic_action = {
@@ -99,64 +83,21 @@
 {
 	int i;
 
-	for (i = TX4938_IRQ_CP0_BEG; i <= TX4938_IRQ_CP0_END; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 1;
-		irq_desc[i].chip = &tx4938_irq_cp0_type;
-	}
-}
-
-static unsigned int
-tx4938_irq_cp0_startup(unsigned int irq)
-{
-	tx4938_irq_cp0_enable(irq);
-
-	return 0;
-}
-
-static void
-tx4938_irq_cp0_shutdown(unsigned int irq)
-{
-	tx4938_irq_cp0_disable(irq);
+	for (i = TX4938_IRQ_CP0_BEG; i <= TX4938_IRQ_CP0_END; i++)
+		set_irq_chip_and_handler(i, &tx4938_irq_cp0_type,
+					 handle_level_irq);
 }
 
 static void
 tx4938_irq_cp0_enable(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&tx4938_cp0_lock, flags);
-
 	set_c0_status(tx4938_irq_cp0_mask(irq));
-
-	spin_unlock_irqrestore(&tx4938_cp0_lock, flags);
 }
 
 static void
 tx4938_irq_cp0_disable(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&tx4938_cp0_lock, flags);
-
 	clear_c0_status(tx4938_irq_cp0_mask(irq));
-
-	spin_unlock_irqrestore(&tx4938_cp0_lock, flags);
-}
-
-static void
-tx4938_irq_cp0_mask_and_ack(unsigned int irq)
-{
-	tx4938_irq_cp0_disable(irq);
-}
-
-static void
-tx4938_irq_cp0_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4938_irq_cp0_enable(irq);
-	}
 }
 
 /**********************************************************************************/
@@ -290,78 +231,30 @@
 static void __init
 tx4938_irq_pic_init(void)
 {
-	unsigned long flags;
 	int i;
 
-	for (i = TX4938_IRQ_PIC_BEG; i <= TX4938_IRQ_PIC_END; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 2;
-		irq_desc[i].chip = &tx4938_irq_pic_type;
-	}
+	for (i = TX4938_IRQ_PIC_BEG; i <= TX4938_IRQ_PIC_END; i++)
+		set_irq_chip_and_handler(i, &tx4938_irq_pic_type,
+					 handle_level_irq);
 
 	setup_irq(TX4938_IRQ_NEST_PIC_ON_CP0, &tx4938_irq_pic_action);
 
-	spin_lock_irqsave(&tx4938_pic_lock, flags);
-
 	TX4938_WR(0xff1ff640, 0x6);	/* irq level mask -- only accept hightest */
 	TX4938_WR(0xff1ff600, TX4938_RD(0xff1ff600) | 0x1);	/* irq enable */
-
-	spin_unlock_irqrestore(&tx4938_pic_lock, flags);
-}
-
-static unsigned int
-tx4938_irq_pic_startup(unsigned int irq)
-{
-	tx4938_irq_pic_enable(irq);
-
-	return 0;
-}
-
-static void
-tx4938_irq_pic_shutdown(unsigned int irq)
-{
-	tx4938_irq_pic_disable(irq);
 }
 
 static void
 tx4938_irq_pic_enable(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&tx4938_pic_lock, flags);
-
 	tx4938_irq_pic_modify(tx4938_irq_pic_addr(irq), 0,
 			      tx4938_irq_pic_mask(irq));
-
-	spin_unlock_irqrestore(&tx4938_pic_lock, flags);
 }
 
 static void
 tx4938_irq_pic_disable(unsigned int irq)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&tx4938_pic_lock, flags);
-
 	tx4938_irq_pic_modify(tx4938_irq_pic_addr(irq),
 			      tx4938_irq_pic_mask(irq), 0);
-
-	spin_unlock_irqrestore(&tx4938_pic_lock, flags);
-}
-
-static void
-tx4938_irq_pic_mask_and_ack(unsigned int irq)
-{
-	tx4938_irq_pic_disable(irq);
-}
-
-static void
-tx4938_irq_pic_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		tx4938_irq_pic_enable(irq);
-	}
 }
 
 /**********************************************************************************/
diff --git a/arch/mips/tx4938/common/setup.c b/arch/mips/tx4938/common/setup.c
index f415a1f..dc87d92 100644
--- a/arch/mips/tx4938/common/setup.c
+++ b/arch/mips/tx4938/common/setup.c
@@ -31,7 +31,6 @@
 #include <asm/mipsregs.h>
 #include <asm/system.h>
 #include <asm/time.h>
-#include <asm/time.h>
 #include <asm/tx4938/rbtx4938.h>
 
 extern void toshiba_rbtx4938_setup(void);
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index 102e473..b6f363d 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -87,25 +87,16 @@
 #include <linux/bootmem.h>
 #include <asm/tx4938/rbtx4938.h>
 
-static unsigned int toshiba_rbtx4938_irq_ioc_startup(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_shutdown(unsigned int irq);
 static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq);
 static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_mask_and_ack(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_end(unsigned int irq);
-
-DEFINE_SPINLOCK(toshiba_rbtx4938_ioc_lock);
 
 #define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
 static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
 	.typename = TOSHIBA_RBTX4938_IOC_NAME,
-	.startup = toshiba_rbtx4938_irq_ioc_startup,
-	.shutdown = toshiba_rbtx4938_irq_ioc_shutdown,
-	.enable = toshiba_rbtx4938_irq_ioc_enable,
-	.disable = toshiba_rbtx4938_irq_ioc_disable,
-	.ack = toshiba_rbtx4938_irq_ioc_mask_and_ack,
-	.end = toshiba_rbtx4938_irq_ioc_end,
-	.set_affinity = NULL
+	.ack = toshiba_rbtx4938_irq_ioc_disable,
+	.mask = toshiba_rbtx4938_irq_ioc_disable,
+	.mask_ack = toshiba_rbtx4938_irq_ioc_disable,
+	.unmask = toshiba_rbtx4938_irq_ioc_enable,
 };
 
 #define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000
@@ -142,77 +133,36 @@
 	int i;
 
 	for (i = TOSHIBA_RBTX4938_IRQ_IOC_BEG;
-	     i <= TOSHIBA_RBTX4938_IRQ_IOC_END; i++) {
-		irq_desc[i].status = IRQ_DISABLED;
-		irq_desc[i].action = 0;
-		irq_desc[i].depth = 3;
-		irq_desc[i].chip = &toshiba_rbtx4938_irq_ioc_type;
-	}
+	     i <= TOSHIBA_RBTX4938_IRQ_IOC_END; i++)
+		set_irq_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
+					 handle_level_irq);
 
 	setup_irq(RBTX4938_IRQ_IOCINT,
 		  &toshiba_rbtx4938_irq_ioc_action);
 }
 
-static unsigned int
-toshiba_rbtx4938_irq_ioc_startup(unsigned int irq)
-{
-	toshiba_rbtx4938_irq_ioc_enable(irq);
-
-	return 0;
-}
-
-static void
-toshiba_rbtx4938_irq_ioc_shutdown(unsigned int irq)
-{
-	toshiba_rbtx4938_irq_ioc_disable(irq);
-}
-
 static void
 toshiba_rbtx4938_irq_ioc_enable(unsigned int irq)
 {
-	unsigned long flags;
 	volatile unsigned char v;
 
-	spin_lock_irqsave(&toshiba_rbtx4938_ioc_lock, flags);
-
 	v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
 	v |= (1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
 	TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
 	mmiowb();
 	TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
-
-	spin_unlock_irqrestore(&toshiba_rbtx4938_ioc_lock, flags);
 }
 
 static void
 toshiba_rbtx4938_irq_ioc_disable(unsigned int irq)
 {
-	unsigned long flags;
 	volatile unsigned char v;
 
-	spin_lock_irqsave(&toshiba_rbtx4938_ioc_lock, flags);
-
 	v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
 	v &= ~(1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
 	TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
 	mmiowb();
 	TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
-
-	spin_unlock_irqrestore(&toshiba_rbtx4938_ioc_lock, flags);
-}
-
-static void
-toshiba_rbtx4938_irq_ioc_mask_and_ack(unsigned int irq)
-{
-	toshiba_rbtx4938_irq_ioc_disable(irq);
-}
-
-static void
-toshiba_rbtx4938_irq_ioc_end(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-		toshiba_rbtx4938_irq_ioc_enable(irq);
-	}
 }
 
 extern void __init txx9_spi_irqinit(int irc_irq);
diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig
index 92f41f6..c8dfd80 100644
--- a/arch/mips/vr41xx/Kconfig
+++ b/arch/mips/vr41xx/Kconfig
@@ -6,6 +6,7 @@
 	select ISA
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config IBM_WORKPAD
 	bool "Support for IBM WorkPad z50"
@@ -15,6 +16,7 @@
 	select ISA
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config NEC_CMBVR4133
 	bool "Support for NEC CMB-VR4133"
@@ -39,6 +41,7 @@
 	select IRQ_CPU
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 	help
 	  The TANBAC VR4131 multichip module(TB0225) and
 	  the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms
@@ -71,6 +74,7 @@
 	select IRQ_CPU
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config ZAO_CAPCELLA
 	bool "Support for ZAO Networks Capcella"
@@ -80,6 +84,7 @@
 	select IRQ_CPU
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config PCI_VR41XX
 	bool "Add PCI control unit support of NEC VR4100 series"
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index c215c0d..c075261 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -417,14 +417,7 @@
 
 EXPORT_SYMBOL(vr41xx_disable_bcuint);
 
-static unsigned int startup_sysint1_irq(unsigned int irq)
-{
-	icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
-
-	return 0; /* never anything pending */
-}
-
-static void shutdown_sysint1_irq(unsigned int irq)
+static void disable_sysint1_irq(unsigned int irq)
 {
 	icu1_clear(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
 }
@@ -434,33 +427,15 @@
 	icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
 }
 
-#define disable_sysint1_irq	shutdown_sysint1_irq
-#define ack_sysint1_irq		shutdown_sysint1_irq
-
-static void end_sysint1_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
-}
-
 static struct irq_chip sysint1_irq_type = {
 	.typename	= "SYSINT1",
-	.startup	= startup_sysint1_irq,
-	.shutdown	= shutdown_sysint1_irq,
-	.enable		= enable_sysint1_irq,
-	.disable	= disable_sysint1_irq,
-	.ack		= ack_sysint1_irq,
-	.end		= end_sysint1_irq,
+	.ack		= disable_sysint1_irq,
+	.mask		= disable_sysint1_irq,
+	.mask_ack	= disable_sysint1_irq,
+	.unmask		= enable_sysint1_irq,
 };
 
-static unsigned int startup_sysint2_irq(unsigned int irq)
-{
-	icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
-
-	return 0; /* never anything pending */
-}
-
-static void shutdown_sysint2_irq(unsigned int irq)
+static void disable_sysint2_irq(unsigned int irq)
 {
 	icu2_clear(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
 }
@@ -470,23 +445,12 @@
 	icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
 }
 
-#define disable_sysint2_irq	shutdown_sysint2_irq
-#define ack_sysint2_irq		shutdown_sysint2_irq
-
-static void end_sysint2_irq(unsigned int irq)
-{
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
-}
-
 static struct irq_chip sysint2_irq_type = {
 	.typename	= "SYSINT2",
-	.startup	= startup_sysint2_irq,
-	.shutdown	= shutdown_sysint2_irq,
-	.enable		= enable_sysint2_irq,
-	.disable	= disable_sysint2_irq,
-	.ack		= ack_sysint2_irq,
-	.end		= end_sysint2_irq,
+	.ack		= disable_sysint2_irq,
+	.mask		= disable_sysint2_irq,
+	.mask_ack	= disable_sysint2_irq,
+	.unmask		= enable_sysint2_irq,
 };
 
 static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
@@ -723,10 +687,12 @@
 	icu2_write(MGIUINTHREG, 0xffff);
 
 	for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
-		irq_desc[i].chip = &sysint1_irq_type;
+		set_irq_chip_and_handler(i, &sysint1_irq_type,
+					 handle_level_irq);
 
 	for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
-		irq_desc[i].chip = &sysint2_irq_type;
+		set_irq_chip_and_handler(i, &sysint2_irq_type,
+					 handle_level_irq);
 
 	cascade_irq(INT0_IRQ, icu_get_irq);
 	cascade_irq(INT1_IRQ, icu_get_irq);
diff --git a/arch/mips/vr41xx/nec-cmbvr4133/irq.c b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
index 2483487..a039bb7 100644
--- a/arch/mips/vr41xx/nec-cmbvr4133/irq.c
+++ b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
@@ -30,17 +30,6 @@
 
 extern int vr4133_rockhopper;
 
-static unsigned int startup_i8259_irq(unsigned int irq)
-{
-	enable_8259A_irq(irq - I8259_IRQ_BASE);
-	return 0;
-}
-
-static void shutdown_i8259_irq(unsigned int irq)
-{
-	disable_8259A_irq(irq - I8259_IRQ_BASE);
-}
-
 static void enable_i8259_irq(unsigned int irq)
 {
 	enable_8259A_irq(irq - I8259_IRQ_BASE);
@@ -64,11 +53,10 @@
 
 static struct irq_chip i8259_irq_type = {
 	.typename       = "XT-PIC",
-	.startup        = startup_i8259_irq,
-	.shutdown       = shutdown_i8259_irq,
-	.enable         = enable_i8259_irq,
-	.disable        = disable_i8259_irq,
 	.ack            = ack_i8259_irq,
+	.mask		= disable_i8259_irq,
+	.mask_ack	= ack_i8259_irq,
+	.unmask		= enable_i8259_irq,
 	.end            = end_i8259_irq,
 };
 
@@ -104,7 +92,7 @@
 	}
 
 	for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
-		irq_desc[i].chip = &i8259_irq_type;
+		set_irq_chip(i, &i8259_irq_type);
 
 	setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);
 
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
index 1e64e7b..ecb10a4 100644
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -75,7 +75,6 @@
 	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
 };
 
-#define elf_addr_t	unsigned int
 #define init_elf_binfmt init_elf32_binfmt
 
 #define ELF_PLATFORM  ("PARISC32\0")
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
index 8a1e080..462696d 100644
--- a/arch/parisc/lib/checksum.c
+++ b/arch/parisc/lib/checksum.c
@@ -101,11 +101,14 @@
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+/*
+ * why bother folding?
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned int result = do_csum(buff, len);
 	addc(result, sum);
-	return from32to16(result);
+	return (__force __wsum)from32to16(result);
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -113,8 +116,8 @@
 /*
  * copy while checksumming, otherwise like csum_partial
  */
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
-				       int len, unsigned int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum)
 {
 	/*
 	 * It's 2:30 am and I don't feel like doing it real ...
@@ -131,9 +134,9 @@
  * Copy from userspace and compute checksum.  If we catch an exception
  * then zero the rest of the buffer.
  */
-unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
-					unsigned char *dst, int len,
-					unsigned int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src,
+					void *dst, int len,
+					__wsum sum, int *err_ptr)
 {
 	int missing;
 
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 64785e4..641f9c9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -152,7 +152,7 @@
 	const struct exception_table_entry *fix;
 	unsigned long acc_type;
 
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	down_read(&mm->mmap_sem);
diff --git a/arch/powerpc/.gitignore b/arch/powerpc/.gitignore
new file mode 100644
index 0000000..a1a869c
--- /dev/null
+++ b/arch/powerpc/.gitignore
@@ -0,0 +1 @@
+include
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 0673dbe..291c95a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -112,7 +112,7 @@
 	default 6xx
 
 config CLASSIC32
-	bool "6xx/7xx/74xx"
+	bool "52xx/6xx/7xx/74xx"
 	select PPC_FPU
 	select 6xx
 	help
@@ -121,16 +121,18 @@
 	  versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
 	  embedded versions (403 and 405) and the high end 64 bit Power
 	  processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
+
+	  This option is the catch-all for 6xx types, including some of the
+	  embedded versions.  Unless there is see an option for the specific
+	  chip family you are using, you want this option.
 	  
-	  Unless you are building a kernel for one of the embedded processor
-	  systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
+	  You do not want this if you are building a kernel for a 64 bit
+	  IBM RS/6000 or an Apple G5, choose 6xx.
+	  
+	  If unsure, select this option
+	  
 	  Note that the kernel runs in 32-bit mode even on 64-bit chips.
 
-config PPC_52xx
-	bool "Freescale 52xx"
-	select 6xx
-	select PPC_FPU
-	
 config PPC_82xx
 	bool "Freescale 82xx"
 	select 6xx
@@ -160,9 +162,11 @@
 
 config 40x
 	bool "AMCC 40x"
+	select PPC_DCR_NATIVE
 
 config 44x
 	bool "AMCC 44x"
+	select PPC_DCR_NATIVE
 
 config 8xx
 	bool "Freescale 8xx"
@@ -208,6 +212,24 @@
 	bool
 	default y if PPC64
 
+config PPC_DCR_NATIVE
+	bool
+	default n
+
+config PPC_DCR_MMIO
+	bool
+	default n
+
+config PPC_DCR
+	bool
+	depends on PPC_DCR_NATIVE || PPC_DCR_MMIO
+	default y
+
+config PPC_OF_PLATFORM_PCI
+	bool
+	depends on PPC64 # not supported on 32 bits yet
+	default n
+
 config BOOKE
 	bool
 	depends on E200 || E500
@@ -227,6 +249,7 @@
 config PHYS_64BIT
 	bool 'Large physical address support' if E500
 	depends on 44x || E500
+	select RESOURCES_64BIT
 	default y if 44x
 	---help---
 	  This option enables kernel support for larger than 32-bit physical
@@ -369,11 +392,13 @@
 	select PPC_RTAS
 	select RTAS_ERROR_LOGGING
 	select PPC_UDBG_16550
+	select PPC_NATIVE
 	default y
 
 config PPC_ISERIES
 	bool "IBM Legacy iSeries"
 	depends on PPC_MULTIPLATFORM && PPC64
+	select PPC_INDIRECT_IO
 
 config PPC_CHRP
 	bool "Common Hardware Reference Platform (CHRP) based machines"
@@ -384,14 +409,35 @@
 	select PPC_RTAS
 	select PPC_MPC106
 	select PPC_UDBG_16550
+	select PPC_NATIVE
 	default y
 
+config PPC_MPC52xx
+	bool
+	default n
+
+config PPC_EFIKA
+	bool "bPlan Efika 5k2. MPC5200B based computer"
+	depends on PPC_MULTIPLATFORM && PPC32
+	select PPC_RTAS
+	select RTAS_PROC
+	select PPC_MPC52xx
+	select PPC_NATIVE
+	default y
+
+config PPC_LITE5200
+	bool "Freescale Lite5200 Eval Board"
+	depends on PPC_MULTIPLATFORM && PPC32
+	select PPC_MPC52xx
+	default n
+
 config PPC_PMAC
 	bool "Apple PowerMac based machines"
 	depends on PPC_MULTIPLATFORM
 	select MPIC
 	select PPC_INDIRECT_PCI if PPC32
 	select PPC_MPC106 if PPC32
+	select PPC_NATIVE
 	default y
 
 config PPC_PMAC64
@@ -411,6 +457,7 @@
 	select PPC_I8259
 	select PPC_INDIRECT_PCI
 	select PPC_UDBG_16550
+	select PPC_NATIVE
 	default y
 
 config PPC_MAPLE
@@ -422,10 +469,11 @@
 	select GENERIC_TBSYNC
 	select PPC_UDBG_16550
 	select PPC_970_NAP
+	select PPC_NATIVE
 	default n
 	help
           This option enables support for the Maple 970FX Evaluation Board.
-	  For more informations, refer to <http://www.970eval.com>
+	  For more information, refer to <http://www.970eval.com>
 
 config PPC_PASEMI
 	depends on PPC_MULTIPLATFORM && PPC64
@@ -434,6 +482,7 @@
 	select MPIC
 	select PPC_UDBG_16550
 	select GENERIC_TBSYNC
+	select PPC_NATIVE
 	help
 	  This option enables support for PA Semi's PWRficient line
 	  of SoC processors, including PA6T-1682M
@@ -445,6 +494,11 @@
 config PPC_CELL_NATIVE
 	bool
 	select PPC_CELL
+	select PPC_DCR_MMIO
+	select PPC_OF_PLATFORM_PCI
+	select PPC_INDIRECT_IO
+	select PPC_NATIVE
+	select MPIC
 	default n
 
 config PPC_IBM_CELL_BLADE
@@ -456,6 +510,22 @@
 	select PPC_UDBG_16550
 	select UDBG_RTAS_CONSOLE
 
+config PPC_PS3
+	bool "Sony PS3"
+	depends on PPC_MULTIPLATFORM && PPC64
+	select PPC_CELL
+	help
+	  This option enables support for the Sony PS3 game console
+	  and other platforms using the PS3 hypervisor.
+
+config PPC_NATIVE
+	bool
+	depends on PPC_MULTIPLATFORM
+	help
+	  Support for running natively on the hardware, i.e. without
+	  a hypervisor. This option is not user-selectable but should
+	  be selected by all platforms that need it.
+
 config UDBG_RTAS_CONSOLE
 	bool "RTAS based debug console"
 	depends on PPC_RTAS
@@ -517,6 +587,15 @@
 	bool
 	default n
 
+config PPC_INDIRECT_IO
+	bool
+	select GENERIC_IOMAP
+	default n
+
+config GENERIC_IOMAP
+	bool
+	default n
+
 source "drivers/cpufreq/Kconfig"
 
 config CPU_FREQ_PMAC
@@ -594,12 +673,6 @@
 
 	  If in doubt, say N here.
 
-config PPC_TODC
-	depends on EMBEDDED6xx
-	bool "Generic Time-of-day Clock (TODC) support"
-	---help---
-	  This adds support for many TODC/RTC chips.
-
 endmenu
 
 source arch/powerpc/platforms/embedded6xx/Kconfig
@@ -610,6 +683,7 @@
 source arch/powerpc/platforms/86xx/Kconfig
 source arch/powerpc/platforms/8xx/Kconfig
 source arch/powerpc/platforms/cell/Kconfig
+source arch/powerpc/platforms/ps3/Kconfig
 
 menu "Kernel options"
 
@@ -790,7 +864,6 @@
 
 config CMDLINE_BOOL
 	bool "Default bootloader kernel arguments"
-	depends on !PPC_ISERIES
 
 config CMDLINE
 	string "Initial kernel command string"
@@ -880,7 +953,7 @@
 
 config PCI
 	bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx \
-		|| PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) || MPC7448HPC2
+		|| PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) || MPC7448HPC2 || PPC_PS3
 	default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx \
 		&& !PPC_85xx && !PPC_86xx
 	default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5ad149b..f0e51ed 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -77,7 +77,7 @@
 
 config XMON
 	bool "Include xmon kernel debugger"
-	depends on DEBUGGER && !PPC_ISERIES
+	depends on DEBUGGER
 	help
 	  Include in-kernel hooks for the xmon kernel monitor/debugger.
 	  Unless you are intending to debug the kernel, say N here.
@@ -98,6 +98,15 @@
 	  xmon is normally disabled unless booted with 'xmon=on'.
 	  Use 'xmon=off' to disable xmon init during runtime.
 
+config XMON_DISASSEMBLY
+	bool "Include disassembly support in xmon"
+	depends on XMON
+	default y
+	help
+	  Include support for disassembling in xmon. You probably want
+	  to say Y here, unless you're building for a memory-constrained
+	  system.
+
 config IRQSTACKS
 	bool "Use separate kernel stacks when processing interrupts"
 	depends on PPC64
@@ -116,7 +125,7 @@
 
 config BOOTX_TEXT
 	bool "Support for early boot text console (BootX or OpenFirmware only)"
-	depends PPC_OF && !PPC_ISERIES
+	depends PPC_OF
 	help
 	  Say Y here to see progress messages from the boot firmware in text
 	  mode. Requires either BootX or Open Firmware.
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 45c9ad2..0734b2f 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -1,19 +1,32 @@
 addnote
+empty.c
+hack-coff
 infblock.c
 infblock.h
 infcodes.c
 infcodes.h
 inffast.c
 inffast.h
+inffixed.h
 inflate.c
+inflate.h
 inftrees.c
 inftrees.h
 infutil.c
 infutil.h
 kernel-vmlinux.strip.c
 kernel-vmlinux.strip.gz
+mktree
 uImage
 zImage
+zImage.chrp
+zImage.coff
+zImage.coff.lds
+zImage.lds
+zImage.miboot
+zImage.pmac
+zImage.pseries
+zImage.sandpoint
 zImage.vmode
 zconf.h
 zlib.h
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 4b2be61..343dbcf 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -40,7 +40,8 @@
 $(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) \
 		$(addprefix $(obj)/,$(zlibheader))
 
-src-wlib := string.S stdio.c main.c div64.S $(zlib)
+src-wlib := string.S stdio.c main.c flatdevtree.c flatdevtree_misc.c \
+		ns16550.c serial.c simple_alloc.c div64.S util.S $(zlib)
 src-plat := of.c
 src-boot := crt0.S $(src-wlib) $(src-plat) empty.c
 
@@ -74,7 +75,7 @@
 	@cp $< $@
 
 clean-files := $(zlib) $(zlibheader) $(zliblinuxheader) \
-		$(obj)/empty.c
+		empty.c zImage zImage.coff.lds zImage.lds zImage.sandpoint
 
 quiet_cmd_bootcc = BOOTCC  $@
       cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
@@ -93,13 +94,13 @@
 $(obj)/wrapper.a: $(obj-wlib)
 	$(call cmd,bootar)
 
-hostprogs-y	:= addnote addRamDisk hack-coff
+hostprogs-y	:= addnote addRamDisk hack-coff mktree
 
 extra-y		:= $(obj)/crt0.o $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
 		   $(obj)/zImage.lds $(obj)/zImage.coff.lds
 
 wrapper		:=$(srctree)/$(src)/wrapper
-wrapperbits	:= $(extra-y) $(addprefix $(obj)/,addnote hack-coff)
+wrapperbits	:= $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree)
 
 #############
 # Bits for building various flavours of zImage
@@ -148,13 +149,18 @@
 $(obj)/zImage.initrd.miboot: vmlinux $(wrapperbits)
 	$(call cmd,wrap_initrd,miboot)
 
+$(obj)/zImage.ps3: vmlinux
+	$(STRIP) -s -R .comment $< -o $@
+
 $(obj)/uImage: vmlinux $(wrapperbits)
 	$(call cmd,wrap,uboot)
 
 image-$(CONFIG_PPC_PSERIES)		+= zImage.pseries
 image-$(CONFIG_PPC_MAPLE)		+= zImage.pseries
 image-$(CONFIG_PPC_IBM_CELL_BLADE)	+= zImage.pseries
+image-$(CONFIG_PPC_PS3)			+= zImage.ps3
 image-$(CONFIG_PPC_CHRP)		+= zImage.chrp
+image-$(CONFIG_PPC_EFIKA)		+= zImage.chrp
 image-$(CONFIG_PPC_PMAC)		+= zImage.pmac
 image-$(CONFIG_DEFAULT_UIMAGE)		+= uImage
 
@@ -176,3 +182,4 @@
 
 clean-files += $(addprefix $(objtree)/, $(obj-boot) vmlinux.strip.gz)
 clean-files += $(addprefix $(objtree)/, $(obj-boot) vmlinux.bin.gz)
+clean-files += $(image-)
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
new file mode 100644
index 0000000..d06b0b0
--- /dev/null
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -0,0 +1,148 @@
+/*
+ * Device Tree Souce for Buffalo KuroboxHG
+ *
+ * Choose CONFIG_LINKSTATION to build a kernel for KuroboxHG, or use
+ * the default configuration linkstation_defconfig.
+ *
+ * Based on sandpoint.dts
+ *
+ * 2006 (c) G. Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+
+XXXX add flash parts, rtc, ??
+
+build with: "dtc -f -I dts -O dtb -o kuroboxHG.dtb -V 16 kuroboxHG.dts"
+
+
+ */
+
+/ {
+	linux,phandle = <1000>;
+	model = "KuroboxHG";
+	compatible = "linkstation";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	cpus {
+		linux,phandle = <2000>;
+		#cpus = <1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,603e { /* Really 8241 */
+			linux,phandle = <2100>;
+			linux,boot-cpu;
+			device_type = "cpu";
+			reg = <0>;
+			clock-frequency = <fdad680>;	/* Fixed by bootwrapper */
+			timebase-frequency = <1F04000>; /* Fixed by bootwrapper */
+			bus-frequency = <0>;		/* From bootloader */
+			/* Following required by dtc but not used */
+			i-cache-line-size = <0>;
+			d-cache-line-size = <0>;
+			i-cache-size = <4000>;
+			d-cache-size = <4000>;
+		};
+	};
+
+	memory {
+		linux,phandle = <3000>;
+		device_type = "memory";
+		reg = <00000000 08000000>;
+	};
+
+	soc10x { /* AFAICT need to make soc for 8245's uarts to be defined */
+		linux,phandle = <4000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		#interrupt-cells = <2>;
+		device_type = "soc";
+		compatible = "mpc10x";
+		store-gathering = <0>; /* 0 == off, !0 == on */
+		reg = <80000000 00100000>;
+		ranges = <80000000 80000000 70000000	/* pci mem space */
+			  fc000000 fc000000 00100000	/* EUMB */
+			  fe000000 fe000000 00c00000	/* pci i/o space */
+			  fec00000 fec00000 00300000	/* pci cfg regs */
+			  fef00000 fef00000 00100000>;	/* pci iack */
+
+		i2c@80003000 {
+			linux,phandle = <4300>;
+			device_type = "i2c";
+			compatible = "fsl-i2c";
+			reg = <80003000 1000>;
+			interrupts = <5 2>;
+			interrupt-parent = <4400>;
+		};
+
+		serial@80004500 {
+			linux,phandle = <4511>;
+			device_type = "serial";
+			compatible = "ns16550";
+			reg = <80004500 8>;
+			clock-frequency = <7c044a8>;
+			current-speed = <2580>;
+			interrupts = <9 2>;
+			interrupt-parent = <4400>;
+		};
+
+		serial@80004600 {
+			linux,phandle = <4512>;
+			device_type = "serial";
+			compatible = "ns16550";
+			reg = <80004600 8>;
+			clock-frequency = <7c044a8>;
+			current-speed = <e100>;
+			interrupts = <a 0>;
+			interrupt-parent = <4400>;
+		};
+
+		pic@80040000 {
+			linux,phandle = <4400>;
+			#interrupt-cells = <2>;
+			#address-cells = <0>;
+			device_type = "open-pic";
+			compatible = "chrp,open-pic";
+			interrupt-controller;
+			reg = <80040000 40000>;
+			built-in;
+		};
+
+		pci@fec00000 {
+			linux,phandle = <4500>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			#interrupt-cells = <1>;
+			device_type = "pci";
+			compatible = "mpc10x-pci";
+			reg = <fec00000 400000>;
+			ranges = <01000000 0        0 fe000000 0 00c00000
+				  02000000 0 80000000 80000000 0 70000000>;
+			bus-range = <0 ff>;
+			clock-frequency = <7f28155>;
+			interrupt-parent = <4400>;
+			interrupt-map-mask = <f800 0 0 7>;
+			interrupt-map = <
+				/* IDSEL 0x11 - IRQ0 ETH */
+				5800 0 0 1 4400 0 1
+				5800 0 0 2 4400 1 1
+				5800 0 0 3 4400 2 1
+				5800 0 0 4 4400 3 1
+				/* IDSEL 0x12 - IRQ1 IDE0 */
+				6000 0 0 1 4400 1 1
+				6000 0 0 2 4400 2 1
+				6000 0 0 3 4400 3 1
+				6000 0 0 4 4400 0 1
+				/* IDSEL 0x14 - IRQ3 USB2.0 */
+				7000 0 0 1 4400 3 1
+				7000 0 0 2 4400 3 1
+				7000 0 0 3 4400 3 1
+				7000 0 0 4 4400 3 1
+			>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
new file mode 100644
index 0000000..8bc0d25
--- /dev/null
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -0,0 +1,313 @@
+/*
+ * Lite5200 board Device Tree Source
+ *
+ * Copyright 2006 Secret Lab Technologies Ltd.
+ * Grant Likely <grant.likely@secretlab.ca>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/ {
+	model = "Lite5200";
+	compatible = "lite5200\0lite52xx\0mpc5200\0mpc52xx";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	cpus {
+		#cpus = <1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,5200@0 {
+			device_type = "cpu";
+			reg = <0>;
+			d-cache-line-size = <20>;
+			i-cache-line-size = <20>;
+			d-cache-size = <4000>;		// L1, 16K
+			i-cache-size = <4000>;		// L1, 16K
+			timebase-frequency = <0>;	// from bootloader
+			bus-frequency = <0>;		// from bootloader
+			clock-frequency = <0>;		// from bootloader
+			32-bit;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <00000000 04000000>;	// 64MB
+	};
+
+	soc5200@f0000000 {
+		#interrupt-cells = <3>;
+		device_type = "soc";
+		ranges = <0 f0000000 f0010000>;
+		reg = <f0000000 00010000>;
+		bus-frequency = <0>;		// from bootloader
+
+		cdm@200 {
+			compatible = "mpc5200-cdm\0mpc52xx-cdm";
+			reg = <200 38>;
+		};
+
+		pic@500 {
+			// 5200 interrupts are encoded into two levels;
+			linux,phandle = <500>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			device_type = "interrupt-controller";
+			compatible = "mpc5200-pic\0mpc52xx-pic";
+			reg = <500 80>;
+			built-in;
+		};
+
+		gpt@600 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <600 10>;
+			interrupts = <1 9 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@610 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <610 10>;
+			interrupts = <1 a 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@620 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <620 10>;
+			interrupts = <1 b 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@630 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <630 10>;
+			interrupts = <1 c 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@640 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <640 10>;
+			interrupts = <1 d 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@650 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <650 10>;
+			interrupts = <1 e 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@660 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <660 10>;
+			interrupts = <1 f 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@670 {	// General Purpose Timer
+			compatible = "mpc5200-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <670 10>;
+			interrupts = <1 10 0>;
+			interrupt-parent = <500>;
+		};
+
+		rtc@800 {	// Real time clock
+			compatible = "mpc5200-rtc\0mpc52xx-rtc";
+			device_type = "rtc";
+			reg = <800 100>;
+			interrupts = <1 5 0 1 6 0>;
+			interrupt-parent = <500>;
+		};
+
+		mscan@900 {
+			device_type = "mscan";
+			compatible = "mpc5200-mscan\0mpc52xx-mscan";
+			interrupts = <2 11 0>;
+			interrupt-parent = <500>;
+			reg = <900 80>;
+		};
+
+		mscan@980 {
+			device_type = "mscan";
+			compatible = "mpc5200-mscan\0mpc52xx-mscan";
+			interrupts = <1 12 0>;
+			interrupt-parent = <500>;
+			reg = <980 80>;
+		};
+
+		gpio@b00 {
+			compatible = "mpc5200-gpio\0mpc52xx-gpio";
+			reg = <b00 40>;
+			interrupts = <1 7 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpio-wkup@b00 {
+			compatible = "mpc5200-gpio-wkup\0mpc52xx-gpio-wkup";
+			reg = <c00 40>;
+			interrupts = <1 8 0 0 3 0>;
+			interrupt-parent = <500>;
+		};
+
+		pci@0d00 {
+			#interrupt-cells = <1>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			device_type = "pci";
+			compatible = "mpc5200-pci\0mpc52xx-pci";
+			reg = <d00 100>;
+			interrupt-map-mask = <f800 0 0 7>;
+			interrupt-map = <c000 0 0 1 500 0 0 3
+			                 c000 0 0 2 500 0 0 3
+			                 c000 0 0 3 500 0 0 3
+			                 c000 0 0 4 500 0 0 3>;
+			clock-frequency = <0>; // From boot loader
+			interrupts = <2 8 0 2 9 0 2 a 0>;
+			interrupt-parent = <500>;
+			bus-range = <0 0>;
+			ranges = <42000000 0 80000000 80000000 0 20000000
+			          02000000 0 a0000000 a0000000 0 10000000
+			          01000000 0 00000000 b0000000 0 01000000>;
+		};
+
+		spi@f00 {
+			device_type = "spi";
+			compatible = "mpc5200-spi\0mpc52xx-spi";
+			reg = <f00 20>;
+			interrupts = <2 d 0 2 e 0>;
+			interrupt-parent = <500>;
+		};
+
+		usb@1000 {
+			device_type = "usb-ohci-be";
+			compatible = "mpc5200-ohci\0mpc52xx-ohci\0ohci-be";
+			reg = <1000 ff>;
+			interrupts = <2 6 0>;
+			interrupt-parent = <500>;
+		};
+
+		bestcomm@1200 {
+			device_type = "dma-controller";
+			compatible = "mpc5200-bestcomm\0mpc52xx-bestcomm";
+			reg = <1200 80>;
+			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
+			              3 4 0  3 5 0  3 6 0  3 7 0
+			              3 8 0  3 9 0  3 a 0  3 b 0
+			              3 c 0  3 d 0  3 e 0  3 f 0>;
+			interrupt-parent = <500>;
+		};
+
+		xlb@1f00 {
+			compatible = "mpc5200-xlb\0mpc52xx-xlb";
+			reg = <1f00 100>;
+		};
+
+		serial@2000 {		// PSC1
+			device_type = "serial";
+			compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+			port-number = <0>;  // Logical port assignment
+			reg = <2000 100>;
+			interrupts = <2 1 0>;
+			interrupt-parent = <500>;
+		};
+
+		// PSC2 in spi mode example
+		spi@2200 {		// PSC2
+			device_type = "spi";
+			compatible = "mpc5200-psc-spi\0mpc52xx-psc-spi";
+			reg = <2200 100>;
+			interrupts = <2 2 0>;
+			interrupt-parent = <500>;
+		};
+
+		// PSC3 in CODEC mode example
+		i2s@2400 {		// PSC3
+			device_type = "i2s";
+			compatible = "mpc5200-psc-i2s\0mpc52xx-psc-i2s";
+			reg = <2400 100>;
+			interrupts = <2 3 0>;
+			interrupt-parent = <500>;
+		};
+
+		// PSC4 unconfigured
+		//serial@2600 {		// PSC4
+		//	device_type = "serial";
+		//	compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+		//	reg = <2600 100>;
+		//	interrupts = <2 b 0>;
+		//	interrupt-parent = <500>;
+		//};
+
+		// PSC5 unconfigured
+		//serial@2800 {		// PSC5
+		//	device_type = "serial";
+		//	compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+		//	reg = <2800 100>;
+		//	interrupts = <2 c 0>;
+		//	interrupt-parent = <500>;
+		//};
+
+		// PSC6 in AC97 mode example
+		ac97@2c00 {		// PSC6
+			device_type = "ac97";
+			compatible = "mpc5200-psc-ac97\0mpc52xx-psc-ac97";
+			reg = <2c00 100>;
+			interrupts = <2 4 0>;
+			interrupt-parent = <500>;
+		};
+
+		ethernet@3000 {
+			device_type = "network";
+			compatible = "mpc5200-fec\0mpc52xx-fec";
+			reg = <3000 800>;
+			mac-address = [ 02 03 04 05 06 07 ]; // Bad!
+			interrupts = <2 5 0>;
+			interrupt-parent = <500>;
+		};
+
+		ata@3a00 {
+			device_type = "ata";
+			compatible = "mpc5200-ata\0mpc52xx-ata";
+			reg = <3a00 100>;
+			interrupts = <2 7 0>;
+			interrupt-parent = <500>;
+		};
+
+		i2c@3d00 {
+			device_type = "i2c";
+			compatible = "mpc5200-i2c\0mpc52xx-i2c";
+			reg = <3d00 40>;
+			interrupts = <2 f 0>;
+			interrupt-parent = <500>;
+		};
+
+		i2c@3d40 {
+			device_type = "i2c";
+			compatible = "mpc5200-i2c\0mpc52xx-i2c";
+			reg = <3d40 40>;
+			interrupts = <2 10 0>;
+			interrupt-parent = <500>;
+		};
+		sram@8000 {
+			device_type = "sram";
+			compatible = "mpc5200-sram\0mpc52xx-sram\0sram";
+			reg = <8000 4000>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
new file mode 100644
index 0000000..81cb764
--- /dev/null
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -0,0 +1,318 @@
+/*
+ * Lite5200B board Device Tree Source
+ *
+ * Copyright 2006 Secret Lab Technologies Ltd.
+ * Grant Likely <grant.likely@secretlab.ca>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/ {
+	model = "Lite5200b";
+	compatible = "lite5200b\0lite52xx\0mpc5200b\0mpc52xx";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	cpus {
+		#cpus = <1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		PowerPC,5200@0 {
+			device_type = "cpu";
+			reg = <0>;
+			d-cache-line-size = <20>;
+			i-cache-line-size = <20>;
+			d-cache-size = <4000>;		// L1, 16K
+			i-cache-size = <4000>;		// L1, 16K
+			timebase-frequency = <0>;	// from bootloader
+			bus-frequency = <0>;		// from bootloader
+			clock-frequency = <0>;		// from bootloader
+			32-bit;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <00000000 10000000>;	// 256MB
+	};
+
+	soc5200@f0000000 {
+		#interrupt-cells = <3>;
+		device_type = "soc";
+		ranges = <0 f0000000 f0010000>;
+		reg = <f0000000 00010000>;
+		bus-frequency = <0>;		// from bootloader
+
+		cdm@200 {
+			compatible = "mpc5200b-cdm\0mpc52xx-cdm";
+			reg = <200 38>;
+		};
+
+		pic@500 {
+			// 5200 interrupts are encoded into two levels;
+			linux,phandle = <500>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			device_type = "interrupt-controller";
+			compatible = "mpc5200b-pic\0mpc52xx-pic";
+			reg = <500 80>;
+			built-in;
+		};
+
+		gpt@600 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <600 10>;
+			interrupts = <1 9 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@610 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <610 10>;
+			interrupts = <1 a 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@620 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <620 10>;
+			interrupts = <1 b 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@630 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <630 10>;
+			interrupts = <1 c 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@640 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <640 10>;
+			interrupts = <1 d 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@650 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <650 10>;
+			interrupts = <1 e 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@660 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <660 10>;
+			interrupts = <1 f 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpt@670 {	// General Purpose Timer
+			compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+			device_type = "gpt";
+			reg = <670 10>;
+			interrupts = <1 10 0>;
+			interrupt-parent = <500>;
+		};
+
+		rtc@800 {	// Real time clock
+			compatible = "mpc5200b-rtc\0mpc52xx-rtc";
+			device_type = "rtc";
+			reg = <800 100>;
+			interrupts = <1 5 0 1 6 0>;
+			interrupt-parent = <500>;
+		};
+
+		mscan@900 {
+			device_type = "mscan";
+			compatible = "mpc5200b-mscan\0mpc52xx-mscan";
+			interrupts = <2 11 0>;
+			interrupt-parent = <500>;
+			reg = <900 80>;
+		};
+
+		mscan@980 {
+			device_type = "mscan";
+			compatible = "mpc5200b-mscan\0mpc52xx-mscan";
+			interrupts = <1 12 0>;
+			interrupt-parent = <500>;
+			reg = <980 80>;
+		};
+
+		gpio@b00 {
+			compatible = "mpc5200b-gpio\0mpc52xx-gpio";
+			reg = <b00 40>;
+			interrupts = <1 7 0>;
+			interrupt-parent = <500>;
+		};
+
+		gpio-wkup@b00 {
+			compatible = "mpc5200b-gpio-wkup\0mpc52xx-gpio-wkup";
+			reg = <c00 40>;
+			interrupts = <1 8 0 0 3 0>;
+			interrupt-parent = <500>;
+		};
+
+		pci@0d00 {
+			#interrupt-cells = <1>;
+			#size-cells = <2>;
+			#address-cells = <3>;
+			device_type = "pci";
+			compatible = "mpc5200b-pci\0mpc52xx-pci";
+			reg = <d00 100>;
+			interrupt-map-mask = <f800 0 0 7>;
+			interrupt-map = <c000 0 0 1 500 0 0 3 // 1st slot
+			                 c000 0 0 2 500 1 1 3
+			                 c000 0 0 3 500 1 2 3
+			                 c000 0 0 4 500 1 3 3
+
+			                 c800 0 0 1 500 1 1 3 // 2nd slot
+			                 c800 0 0 2 500 1 2 3
+			                 c800 0 0 3 500 1 3 3
+			                 c800 0 0 4 500 0 0 3>;
+			clock-frequency = <0>; // From boot loader
+			interrupts = <2 8 0 2 9 0 2 a 0>;
+			interrupt-parent = <500>;
+			bus-range = <0 0>;
+			ranges = <42000000 0 80000000 80000000 0 20000000
+			          02000000 0 a0000000 a0000000 0 10000000
+			          01000000 0 00000000 b0000000 0 01000000>;
+		};
+
+		spi@f00 {
+			device_type = "spi";
+			compatible = "mpc5200b-spi\0mpc52xx-spi";
+			reg = <f00 20>;
+			interrupts = <2 d 0 2 e 0>;
+			interrupt-parent = <500>;
+		};
+
+		usb@1000 {
+			device_type = "usb-ohci-be";
+			compatible = "mpc5200b-ohci\0mpc52xx-ohci\0ohci-be";
+			reg = <1000 ff>;
+			interrupts = <2 6 0>;
+			interrupt-parent = <500>;
+		};
+
+		bestcomm@1200 {
+			device_type = "dma-controller";
+			compatible = "mpc5200b-bestcomm\0mpc52xx-bestcomm";
+			reg = <1200 80>;
+			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
+			              3 4 0  3 5 0  3 6 0  3 7 0
+			              3 8 0  3 9 0  3 a 0  3 b 0
+			              3 c 0  3 d 0  3 e 0  3 f 0>;
+			interrupt-parent = <500>;
+		};
+
+		xlb@1f00 {
+			compatible = "mpc5200b-xlb\0mpc52xx-xlb";
+			reg = <1f00 100>;
+		};
+
+		serial@2000 {		// PSC1
+			device_type = "serial";
+			compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+			port-number = <0>;  // Logical port assignment
+			reg = <2000 100>;
+			interrupts = <2 1 0>;
+			interrupt-parent = <500>;
+		};
+
+		// PSC2 in spi mode example
+		spi@2200 {		// PSC2
+			device_type = "spi";
+			compatible = "mpc5200b-psc-spi\0mpc52xx-psc-spi";
+			reg = <2200 100>;
+			interrupts = <2 2 0>;
+			interrupt-parent = <500>;
+		};
+
+		// PSC3 in CODEC mode example
+		i2s@2400 {		// PSC3
+			device_type = "i2s";
+			compatible = "mpc5200b-psc-i2s\0mpc52xx-psc-i2s";
+			reg = <2400 100>;
+			interrupts = <2 3 0>;
+			interrupt-parent = <500>;
+		};
+
+		// PSC4 unconfigured
+		//serial@2600 {		// PSC4
+		//	device_type = "serial";
+		//	compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+		//	reg = <2600 100>;
+		//	interrupts = <2 b 0>;
+		//	interrupt-parent = <500>;
+		//};
+
+		// PSC5 unconfigured
+		//serial@2800 {		// PSC5
+		//	device_type = "serial";
+		//	compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+		//	reg = <2800 100>;
+		//	interrupts = <2 c 0>;
+		//	interrupt-parent = <500>;
+		//};
+
+		// PSC6 in AC97 mode example
+		ac97@2c00 {		// PSC6
+			device_type = "ac97";
+			compatible = "mpc5200b-psc-ac97\0mpc52xx-psc-ac97";
+			reg = <2c00 100>;
+			interrupts = <2 4 0>;
+			interrupt-parent = <500>;
+		};
+
+		ethernet@3000 {
+			device_type = "network";
+			compatible = "mpc5200b-fec\0mpc52xx-fec";
+			reg = <3000 800>;
+			mac-address = [ 02 03 04 05 06 07 ]; // Bad!
+			interrupts = <2 5 0>;
+			interrupt-parent = <500>;
+		};
+
+		ata@3a00 {
+			device_type = "ata";
+			compatible = "mpc5200b-ata\0mpc52xx-ata";
+			reg = <3a00 100>;
+			interrupts = <2 7 0>;
+			interrupt-parent = <500>;
+		};
+
+		i2c@3d00 {
+			device_type = "i2c";
+			compatible = "mpc5200b-i2c\0mpc52xx-i2c";
+			reg = <3d00 40>;
+			interrupts = <2 f 0>;
+			interrupt-parent = <500>;
+		};
+
+		i2c@3d40 {
+			device_type = "i2c";
+			compatible = "mpc5200b-i2c\0mpc52xx-i2c";
+			reg = <3d40 40>;
+			interrupts = <2 10 0>;
+			interrupt-parent = <500>;
+		};
+		sram@8000 {
+			device_type = "sram";
+			compatible = "mpc5200b-sram\0mpc52xx-sram\0sram";
+			reg = <8000 4000>;
+		};
+	};
+};
diff --git a/arch/powerpc/boot/dts/mpc7448hpc2.dts b/arch/powerpc/boot/dts/mpc7448hpc2.dts
index d7b985e..c4d9562 100644
--- a/arch/powerpc/boot/dts/mpc7448hpc2.dts
+++ b/arch/powerpc/boot/dts/mpc7448hpc2.dts
@@ -161,29 +161,41 @@
 			interrupt-map = <
 
 				/* IDSEL 0x11 */
-				0800 0 0 1 7400 24 0
-				0800 0 0 2 7400 25 0
-				0800 0 0 3 7400 26 0
-				0800 0 0 4 7400 27 0
+				0800 0 0 1 1180 24 0
+				0800 0 0 2 1180 25 0
+				0800 0 0 3 1180 26 0
+				0800 0 0 4 1180 27 0
 
 				/* IDSEL 0x12 */
-				1000 0 0 1 7400 25 0
-				1000 0 0 2 7400 26 0
-				1000 0 0 3 7400 27 0
-				1000 0 0 4 7400 24 0
+				1000 0 0 1 1180 25 0
+				1000 0 0 2 1180 26 0
+				1000 0 0 3 1180 27 0
+				1000 0 0 4 1180 24 0
 
 				/* IDSEL 0x13 */
-				1800 0 0 1 7400 26 0
-				1800 0 0 2 7400 27 0
-				1800 0 0 3 7400 24 0
-				1800 0 0 4 7400 25 0
+				1800 0 0 1 1180 26 0
+				1800 0 0 2 1180 27 0
+				1800 0 0 3 1180 24 0
+				1800 0 0 4 1180 25 0
 
 				/* IDSEL 0x14 */
-				2000 0 0 1 7400 27 0
-				2000 0 0 2 7400 24 0
-				2000 0 0 3 7400 25 0
-				2000 0 0 4 7400 26 0
+				2000 0 0 1 1180 27 0
+				2000 0 0 2 1180 24 0
+				2000 0 0 3 1180 25 0
+				2000 0 0 4 1180 26 0
 				>;
+			router@1180 {
+				linux,phandle = <1180>;
+				clock-frequency = <0>;
+				interrupt-controller;
+				device_type = "pic-router";
+				#address-cells = <0>;
+				#interrupt-cells = <2>;
+				built-in;
+				big-endian;
+				interrupts = <17 2>;
+				interrupt-parent = <7400>;
+			};
 		};
 	};
 
diff --git a/arch/powerpc/boot/flatdevtree.c b/arch/powerpc/boot/flatdevtree.c
new file mode 100644
index 0000000..c76c194
--- /dev/null
+++ b/arch/powerpc/boot/flatdevtree.c
@@ -0,0 +1,880 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright Pantelis Antoniou 2006
+ * Copyright (C) IBM Corporation 2006
+ *
+ * Authors: Pantelis Antoniou <pantelis@embeddedalley.com>
+ *	    Hollis Blanchard <hollisb@us.ibm.com>
+ *	    Mark A. Greer <mgreer@mvista.com>
+ *	    Paul Mackerras <paulus@samba.org>
+ */
+
+#include <string.h>
+#include <stddef.h>
+#include "flatdevtree.h"
+#include "flatdevtree_env.h"
+
+#define _ALIGN(x, al)	(((x) + (al) - 1) & ~((al) - 1))
+
+/* Routines for keeping node ptrs returned by ft_find_device current */
+/* First entry not used b/c it would return 0 and be taken as NULL/error */
+static void *ft_node_add(struct ft_cxt *cxt, char *node)
+{
+	unsigned int i;
+
+	for (i = 1; i < cxt->nodes_used; i++)	/* already there? */
+		if (cxt->node_tbl[i] == node)
+			return (void *)i;
+
+	if (cxt->nodes_used < cxt->node_max) {
+		cxt->node_tbl[cxt->nodes_used] = node;
+		return (void *)cxt->nodes_used++;
+	}
+
+	return NULL;
+}
+
+static char *ft_node_ph2node(struct ft_cxt *cxt, const void *phandle)
+{
+	unsigned int i = (unsigned int)phandle;
+
+	if (i < cxt->nodes_used)
+		return cxt->node_tbl[i];
+	return NULL;
+}
+
+static void ft_node_update_before(struct ft_cxt *cxt, char *addr, int shift)
+{
+	unsigned int i;
+
+	if (shift == 0)
+		return;
+
+	for (i = 1; i < cxt->nodes_used; i++)
+		if (cxt->node_tbl[i] < addr)
+			cxt->node_tbl[i] += shift;
+}
+
+static void ft_node_update_after(struct ft_cxt *cxt, char *addr, int shift)
+{
+	unsigned int i;
+
+	if (shift == 0)
+		return;
+
+	for (i = 1; i < cxt->nodes_used; i++)
+		if (cxt->node_tbl[i] >= addr)
+			cxt->node_tbl[i] += shift;
+}
+
+/* Struct used to return info from ft_next() */
+struct ft_atom {
+	u32 tag;
+	const char *name;
+	void *data;
+	u32 size;
+};
+
+/* Set ptrs to current one's info; return addr of next one */
+static char *ft_next(struct ft_cxt *cxt, char *p, struct ft_atom *ret)
+{
+	u32 sz;
+
+	if (p >= cxt->rgn[FT_STRUCT].start + cxt->rgn[FT_STRUCT].size)
+		return NULL;
+
+	ret->tag = be32_to_cpu(*(u32 *) p);
+	p += 4;
+
+	switch (ret->tag) {	/* Tag */
+	case OF_DT_BEGIN_NODE:
+		ret->name = p;
+		ret->data = (void *)(p - 4);	/* start of node */
+		p += _ALIGN(strlen(p) + 1, 4);
+		break;
+	case OF_DT_PROP:
+		ret->size = sz = be32_to_cpu(*(u32 *) p);
+		ret->name = cxt->str_anchor + be32_to_cpu(*(u32 *) (p + 4));
+		ret->data = (void *)(p + 8);
+		p += 8 + _ALIGN(sz, 4);
+		break;
+	case OF_DT_END_NODE:
+	case OF_DT_NOP:
+		break;
+	case OF_DT_END:
+	default:
+		p = NULL;
+		break;
+	}
+
+	return p;
+}
+
+#define HDR_SIZE	_ALIGN(sizeof(struct boot_param_header), 8)
+#define EXPAND_INCR	1024	/* alloc this much extra when expanding */
+
+/* See if the regions are in the standard order and non-overlapping */
+static int ft_ordered(struct ft_cxt *cxt)
+{
+	char *p = (char *)cxt->bph + HDR_SIZE;
+	enum ft_rgn_id r;
+
+	for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
+		if (p > cxt->rgn[r].start)
+			return 0;
+		p = cxt->rgn[r].start + cxt->rgn[r].size;
+	}
+	return p <= (char *)cxt->bph + cxt->max_size;
+}
+
+/* Copy the tree to a newly-allocated region and put things in order */
+static int ft_reorder(struct ft_cxt *cxt, int nextra)
+{
+	unsigned long tot;
+	enum ft_rgn_id r;
+	char *p, *pend;
+	int stroff;
+
+	tot = HDR_SIZE + EXPAND_INCR;
+	for (r = FT_RSVMAP; r <= FT_STRINGS; ++r)
+		tot += cxt->rgn[r].size;
+	if (nextra > 0)
+		tot += nextra;
+	tot = _ALIGN(tot, 8);
+
+	if (!cxt->realloc)
+		return 0;
+	p = cxt->realloc(NULL, tot);
+	if (!p)
+		return 0;
+
+	memcpy(p, cxt->bph, sizeof(struct boot_param_header));
+	/* offsets get fixed up later */
+
+	cxt->bph = (struct boot_param_header *)p;
+	cxt->max_size = tot;
+	pend = p + tot;
+	p += HDR_SIZE;
+
+	memcpy(p, cxt->rgn[FT_RSVMAP].start, cxt->rgn[FT_RSVMAP].size);
+	cxt->rgn[FT_RSVMAP].start = p;
+	p += cxt->rgn[FT_RSVMAP].size;
+
+	memcpy(p, cxt->rgn[FT_STRUCT].start, cxt->rgn[FT_STRUCT].size);
+	ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+			p - cxt->rgn[FT_STRUCT].start);
+	cxt->p += p - cxt->rgn[FT_STRUCT].start;
+	cxt->rgn[FT_STRUCT].start = p;
+
+	p = pend - cxt->rgn[FT_STRINGS].size;
+	memcpy(p, cxt->rgn[FT_STRINGS].start, cxt->rgn[FT_STRINGS].size);
+	stroff = cxt->str_anchor - cxt->rgn[FT_STRINGS].start;
+	cxt->rgn[FT_STRINGS].start = p;
+	cxt->str_anchor = p + stroff;
+
+	cxt->isordered = 1;
+	return 1;
+}
+
+static inline char *prev_end(struct ft_cxt *cxt, enum ft_rgn_id r)
+{
+	if (r > FT_RSVMAP)
+		return cxt->rgn[r - 1].start + cxt->rgn[r - 1].size;
+	return (char *)cxt->bph + HDR_SIZE;
+}
+
+static inline char *next_start(struct ft_cxt *cxt, enum ft_rgn_id r)
+{
+	if (r < FT_STRINGS)
+		return cxt->rgn[r + 1].start;
+	return (char *)cxt->bph + cxt->max_size;
+}
+
+/*
+ * See if we can expand region rgn by nextra bytes by using up
+ * free space after or before the region.
+ */
+static int ft_shuffle(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+		int nextra)
+{
+	char *p = *pp;
+	char *rgn_start, *rgn_end;
+
+	rgn_start = cxt->rgn[rgn].start;
+	rgn_end = rgn_start + cxt->rgn[rgn].size;
+	if (nextra <= 0 || rgn_end + nextra <= next_start(cxt, rgn)) {
+		/* move following stuff */
+		if (p < rgn_end) {
+			if (nextra < 0)
+				memmove(p, p - nextra, rgn_end - p + nextra);
+			else
+				memmove(p + nextra, p, rgn_end - p);
+			if (rgn == FT_STRUCT)
+				ft_node_update_after(cxt, p, nextra);
+		}
+		cxt->rgn[rgn].size += nextra;
+		if (rgn == FT_STRINGS)
+			/* assumes strings only added at beginning */
+			cxt->str_anchor += nextra;
+		return 1;
+	}
+	if (prev_end(cxt, rgn) <= rgn_start - nextra) {
+		/* move preceding stuff */
+		if (p > rgn_start) {
+			memmove(rgn_start - nextra, rgn_start, p - rgn_start);
+			if (rgn == FT_STRUCT)
+				ft_node_update_before(cxt, p, -nextra);
+		}
+		*p -= nextra;
+		cxt->rgn[rgn].start -= nextra;
+		cxt->rgn[rgn].size += nextra;
+		return 1;
+	}
+	return 0;
+}
+
+static int ft_make_space(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+			 int nextra)
+{
+	unsigned long size, ssize, tot;
+	char *str, *next;
+	enum ft_rgn_id r;
+
+	if (!cxt->isordered && !ft_reorder(cxt, nextra))
+		return 0;
+	if (ft_shuffle(cxt, pp, rgn, nextra))
+		return 1;
+
+	/* See if there is space after the strings section */
+	ssize = cxt->rgn[FT_STRINGS].size;
+	if (cxt->rgn[FT_STRINGS].start + ssize
+			< (char *)cxt->bph + cxt->max_size) {
+		/* move strings up as far as possible */
+		str = (char *)cxt->bph + cxt->max_size - ssize;
+		cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+		memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+		cxt->rgn[FT_STRINGS].start = str;
+		/* enough space now? */
+		if (rgn >= FT_STRUCT && ft_shuffle(cxt, pp, rgn, nextra))
+			return 1;
+	}
+
+	/* how much total free space is there following this region? */
+	tot = 0;
+	for (r = rgn; r < FT_STRINGS; ++r) {
+		char *r_end = cxt->rgn[r].start + cxt->rgn[r].size;
+		tot += next_start(cxt, rgn) - r_end;
+	}
+
+	/* cast is to shut gcc up; we know nextra >= 0 */
+	if (tot < (unsigned int)nextra) {
+		/* have to reallocate */
+		char *newp, *new_start;
+		int shift;
+
+		if (!cxt->realloc)
+			return 0;
+		size = _ALIGN(cxt->max_size + (nextra - tot) + EXPAND_INCR, 8);
+		newp = cxt->realloc(cxt->bph, size);
+		if (!newp)
+			return 0;
+		cxt->max_size = size;
+		shift = newp - (char *)cxt->bph;
+
+		if (shift) { /* realloc can return same addr */
+			cxt->bph = (struct boot_param_header *)newp;
+			ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+					shift);
+			for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
+				new_start = cxt->rgn[r].start + shift;
+				cxt->rgn[r].start = new_start;
+			}
+			*pp += shift;
+			cxt->str_anchor += shift;
+		}
+
+		/* move strings up to the end */
+		str = newp + size - ssize;
+		cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+		memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+		cxt->rgn[FT_STRINGS].start = str;
+
+		if (ft_shuffle(cxt, pp, rgn, nextra))
+			return 1;
+	}
+
+	/* must be FT_RSVMAP and we need to move FT_STRUCT up */
+	if (rgn == FT_RSVMAP) {
+		next = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+			+ nextra;
+		ssize = cxt->rgn[FT_STRUCT].size;
+		if (next + ssize >= cxt->rgn[FT_STRINGS].start)
+			return 0;	/* "can't happen" */
+		memmove(next, cxt->rgn[FT_STRUCT].start, ssize);
+		ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start, nextra);
+		cxt->rgn[FT_STRUCT].start = next;
+
+		if (ft_shuffle(cxt, pp, rgn, nextra))
+			return 1;
+	}
+
+	return 0;		/* "can't happen" */
+}
+
+static void ft_put_word(struct ft_cxt *cxt, u32 v)
+{
+	*(u32 *) cxt->p = cpu_to_be32(v);
+	cxt->p += 4;
+}
+
+static void ft_put_bin(struct ft_cxt *cxt, const void *data, unsigned int sz)
+{
+	unsigned long sza = _ALIGN(sz, 4);
+
+	/* zero out the alignment gap if necessary */
+	if (sz < sza)
+		*(u32 *) (cxt->p + sza - 4) = 0;
+
+	/* copy in the data */
+	memcpy(cxt->p, data, sz);
+
+	cxt->p += sza;
+}
+
+int ft_begin_node(struct ft_cxt *cxt, const char *name)
+{
+	unsigned long nlen = strlen(name) + 1;
+	unsigned long len = 8 + _ALIGN(nlen, 4);
+
+	if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+		return -1;
+	ft_put_word(cxt, OF_DT_BEGIN_NODE);
+	ft_put_bin(cxt, name, strlen(name) + 1);
+	return 0;
+}
+
+void ft_end_node(struct ft_cxt *cxt)
+{
+	ft_put_word(cxt, OF_DT_END_NODE);
+}
+
+void ft_nop(struct ft_cxt *cxt)
+{
+	if (ft_make_space(cxt, &cxt->p, FT_STRUCT, 4))
+		ft_put_word(cxt, OF_DT_NOP);
+}
+
+#define NO_STRING	0x7fffffff
+
+static int lookup_string(struct ft_cxt *cxt, const char *name)
+{
+	char *p, *end;
+
+	p = cxt->rgn[FT_STRINGS].start;
+	end = p + cxt->rgn[FT_STRINGS].size;
+	while (p < end) {
+		if (strcmp(p, (char *)name) == 0)
+			return p - cxt->str_anchor;
+		p += strlen(p) + 1;
+	}
+
+	return NO_STRING;
+}
+
+/* lookup string and insert if not found */
+static int map_string(struct ft_cxt *cxt, const char *name)
+{
+	int off;
+	char *p;
+
+	off = lookup_string(cxt, name);
+	if (off != NO_STRING)
+		return off;
+	p = cxt->rgn[FT_STRINGS].start;
+	if (!ft_make_space(cxt, &p, FT_STRINGS, strlen(name) + 1))
+		return NO_STRING;
+	strcpy(p, name);
+	return p - cxt->str_anchor;
+}
+
+int ft_prop(struct ft_cxt *cxt, const char *name, const void *data,
+		unsigned int sz)
+{
+	int off, len;
+
+	off = lookup_string(cxt, name);
+	if (off == NO_STRING)
+		return -1;
+
+	len = 12 + _ALIGN(sz, 4);
+	if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+		return -1;
+
+	ft_put_word(cxt, OF_DT_PROP);
+	ft_put_word(cxt, sz);
+	ft_put_word(cxt, off);
+	ft_put_bin(cxt, data, sz);
+	return 0;
+}
+
+int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str)
+{
+	return ft_prop(cxt, name, str, strlen(str) + 1);
+}
+
+int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val)
+{
+	u32 v = cpu_to_be32((u32) val);
+
+	return ft_prop(cxt, name, &v, 4);
+}
+
+/* Calculate the size of the reserved map */
+static unsigned long rsvmap_size(struct ft_cxt *cxt)
+{
+	struct ft_reserve *res;
+
+	res = (struct ft_reserve *)cxt->rgn[FT_RSVMAP].start;
+	while (res->start || res->len)
+		++res;
+	return (char *)(res + 1) - cxt->rgn[FT_RSVMAP].start;
+}
+
+/* Calculate the size of the struct region by stepping through it */
+static unsigned long struct_size(struct ft_cxt *cxt)
+{
+	char *p = cxt->rgn[FT_STRUCT].start;
+	char *next;
+	struct ft_atom atom;
+
+	/* make check in ft_next happy */
+	if (cxt->rgn[FT_STRUCT].size == 0)
+		cxt->rgn[FT_STRUCT].size = 0xfffffffful - (unsigned long)p;
+
+	while ((next = ft_next(cxt, p, &atom)) != NULL)
+		p = next;
+	return p + 4 - cxt->rgn[FT_STRUCT].start;
+}
+
+/* add `adj' on to all string offset values in the struct area */
+static void adjust_string_offsets(struct ft_cxt *cxt, int adj)
+{
+	char *p = cxt->rgn[FT_STRUCT].start;
+	char *next;
+	struct ft_atom atom;
+	int off;
+
+	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+		if (atom.tag == OF_DT_PROP) {
+			off = be32_to_cpu(*(u32 *) (p + 8));
+			*(u32 *) (p + 8) = cpu_to_be32(off + adj);
+		}
+		p = next;
+	}
+}
+
+/* start construction of the flat OF tree from scratch */
+void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+		void *(*realloc_fn) (void *, unsigned long))
+{
+	struct boot_param_header *bph = blob;
+	char *p;
+	struct ft_reserve *pres;
+
+	/* clear the cxt */
+	memset(cxt, 0, sizeof(*cxt));
+
+	cxt->bph = bph;
+	cxt->max_size = max_size;
+	cxt->realloc = realloc_fn;
+	cxt->isordered = 1;
+
+	/* zero everything in the header area */
+	memset(bph, 0, sizeof(*bph));
+
+	bph->magic = cpu_to_be32(OF_DT_HEADER);
+	bph->version = cpu_to_be32(0x10);
+	bph->last_comp_version = cpu_to_be32(0x10);
+
+	/* start pointers */
+	cxt->rgn[FT_RSVMAP].start = p = blob + HDR_SIZE;
+	cxt->rgn[FT_RSVMAP].size = sizeof(struct ft_reserve);
+	pres = (struct ft_reserve *)p;
+	cxt->rgn[FT_STRUCT].start = p += sizeof(struct ft_reserve);
+	cxt->rgn[FT_STRUCT].size = 4;
+	cxt->rgn[FT_STRINGS].start = blob + max_size;
+	cxt->rgn[FT_STRINGS].size = 0;
+
+	/* init rsvmap and struct */
+	pres->start = 0;
+	pres->len = 0;
+	*(u32 *) p = cpu_to_be32(OF_DT_END);
+
+	cxt->str_anchor = blob;
+}
+
+/* open up an existing blob to be examined or modified */
+int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+		unsigned int max_find_device,
+		void *(*realloc_fn) (void *, unsigned long))
+{
+	struct boot_param_header *bph = blob;
+
+	/* can't cope with version < 16 */
+	if (be32_to_cpu(bph->version) < 16)
+		return -1;
+
+	/* clear the cxt */
+	memset(cxt, 0, sizeof(*cxt));
+
+	/* alloc node_tbl to track node ptrs returned by ft_find_device */
+	++max_find_device;
+	cxt->node_tbl = realloc_fn(NULL, max_find_device * sizeof(char *));
+	if (!cxt->node_tbl)
+		return -1;
+	memset(cxt->node_tbl, 0, max_find_device * sizeof(char *));
+	cxt->node_max = max_find_device;
+	cxt->nodes_used = 1;	/* don't use idx 0 b/c looks like NULL */
+
+	cxt->bph = bph;
+	cxt->max_size = max_size;
+	cxt->realloc = realloc_fn;
+
+	cxt->rgn[FT_RSVMAP].start = blob + be32_to_cpu(bph->off_mem_rsvmap);
+	cxt->rgn[FT_RSVMAP].size = rsvmap_size(cxt);
+	cxt->rgn[FT_STRUCT].start = blob + be32_to_cpu(bph->off_dt_struct);
+	cxt->rgn[FT_STRUCT].size = struct_size(cxt);
+	cxt->rgn[FT_STRINGS].start = blob + be32_to_cpu(bph->off_dt_strings);
+	cxt->rgn[FT_STRINGS].size = be32_to_cpu(bph->dt_strings_size);
+	/* Leave as '0' to force first ft_make_space call to do a ft_reorder
+	 * and move dt to an area allocated by realloc.
+	cxt->isordered = ft_ordered(cxt);
+	*/
+
+	cxt->p = cxt->rgn[FT_STRUCT].start;
+	cxt->str_anchor = cxt->rgn[FT_STRINGS].start;
+
+	return 0;
+}
+
+/* add a reserver physical area to the rsvmap */
+int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size)
+{
+	char *p;
+	struct ft_reserve *pres;
+
+	p = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+		- sizeof(struct ft_reserve);
+	if (!ft_make_space(cxt, &p, FT_RSVMAP, sizeof(struct ft_reserve)))
+		return -1;
+
+	pres = (struct ft_reserve *)p;
+	pres->start = cpu_to_be64(physaddr);
+	pres->len = cpu_to_be64(size);
+
+	return 0;
+}
+
+void ft_begin_tree(struct ft_cxt *cxt)
+{
+	cxt->p = cxt->rgn[FT_STRUCT].start;
+}
+
+void ft_end_tree(struct ft_cxt *cxt)
+{
+	struct boot_param_header *bph = cxt->bph;
+	char *p, *oldstr, *str, *endp;
+	unsigned long ssize;
+	int adj;
+
+	if (!cxt->isordered)
+		return;		/* we haven't touched anything */
+
+	/* adjust string offsets */
+	oldstr = cxt->rgn[FT_STRINGS].start;
+	adj = cxt->str_anchor - oldstr;
+	if (adj)
+		adjust_string_offsets(cxt, adj);
+
+	/* make strings end on 8-byte boundary */
+	ssize = cxt->rgn[FT_STRINGS].size;
+	endp = (char *)_ALIGN((unsigned long)cxt->rgn[FT_STRUCT].start
+			+ cxt->rgn[FT_STRUCT].size + ssize, 8);
+	str = endp - ssize;
+
+	/* move strings down to end of structs */
+	memmove(str, oldstr, ssize);
+	cxt->str_anchor = str;
+	cxt->rgn[FT_STRINGS].start = str;
+
+	/* fill in header fields */
+	p = (char *)bph;
+	bph->totalsize = cpu_to_be32(endp - p);
+	bph->off_mem_rsvmap = cpu_to_be32(cxt->rgn[FT_RSVMAP].start - p);
+	bph->off_dt_struct = cpu_to_be32(cxt->rgn[FT_STRUCT].start - p);
+	bph->off_dt_strings = cpu_to_be32(cxt->rgn[FT_STRINGS].start - p);
+	bph->dt_strings_size = cpu_to_be32(ssize);
+}
+
+void *ft_find_device(struct ft_cxt *cxt, const char *srch_path)
+{
+	char *node;
+
+	/* require absolute path */
+	if (srch_path[0] != '/')
+		return NULL;
+	node = ft_find_descendent(cxt, cxt->rgn[FT_STRUCT].start, srch_path);
+	return ft_node_add(cxt, node);
+}
+
+void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path)
+{
+	struct ft_atom atom;
+	char *p;
+	const char *cp, *q;
+	int cl;
+	int depth = -1;
+	int dmatch = 0;
+	const char *path_comp[FT_MAX_DEPTH];
+
+	cp = srch_path;
+	cl = 0;
+	p = top;
+
+	while ((p = ft_next(cxt, p, &atom)) != NULL) {
+		switch (atom.tag) {
+		case OF_DT_BEGIN_NODE:
+			++depth;
+			if (depth != dmatch)
+				break;
+			cxt->genealogy[depth] = atom.data;
+			cxt->genealogy[depth + 1] = NULL;
+			if (depth && !(strncmp(atom.name, cp, cl) == 0
+					&& (atom.name[cl] == '/'
+						|| atom.name[cl] == '\0'
+						|| atom.name[cl] == '@')))
+				break;
+			path_comp[dmatch] = cp;
+			/* it matches so far, advance to next path component */
+			cp += cl;
+			/* skip slashes */
+			while (*cp == '/')
+				++cp;
+			/* we're done if this is the end of the string */
+			if (*cp == 0)
+				return atom.data;
+			/* look for end of this component */
+			q = strchr(cp, '/');
+			if (q)
+				cl = q - cp;
+			else
+				cl = strlen(cp);
+			++dmatch;
+			break;
+		case OF_DT_END_NODE:
+			if (depth == 0)
+				return NULL;
+			if (dmatch > depth) {
+				--dmatch;
+				cl = cp - path_comp[dmatch] - 1;
+				cp = path_comp[dmatch];
+				while (cl > 0 && cp[cl - 1] == '/')
+					--cl;
+			}
+			--depth;
+			break;
+		}
+	}
+	return NULL;
+}
+
+void *ft_get_parent(struct ft_cxt *cxt, const void *phandle)
+{
+	void *node;
+	int d;
+	struct ft_atom atom;
+	char *p;
+
+	node = ft_node_ph2node(cxt, phandle);
+	if (node == NULL)
+		return NULL;
+
+	for (d = 0; cxt->genealogy[d] != NULL; ++d)
+		if (cxt->genealogy[d] == node)
+			return cxt->genealogy[d > 0 ? d - 1 : 0];
+
+	/* have to do it the hard way... */
+	p = cxt->rgn[FT_STRUCT].start;
+	d = 0;
+	while ((p = ft_next(cxt, p, &atom)) != NULL) {
+		switch (atom.tag) {
+		case OF_DT_BEGIN_NODE:
+			cxt->genealogy[d] = atom.data;
+			if (node == atom.data) {
+				/* found it */
+				cxt->genealogy[d + 1] = NULL;
+				return d > 0 ? cxt->genealogy[d - 1] : node;
+			}
+			++d;
+			break;
+		case OF_DT_END_NODE:
+			--d;
+			break;
+		}
+	}
+	return NULL;
+}
+
+int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+		void *buf, const unsigned int buflen)
+{
+	struct ft_atom atom;
+	void *node;
+	char *p;
+	int depth;
+	unsigned int size;
+
+	node = ft_node_ph2node(cxt, phandle);
+	if (node == NULL)
+		return -1;
+
+	depth = 0;
+	p = (char *)node;
+
+	while ((p = ft_next(cxt, p, &atom)) != NULL) {
+		switch (atom.tag) {
+		case OF_DT_BEGIN_NODE:
+			++depth;
+			break;
+		case OF_DT_PROP:
+			if ((depth != 1) || strcmp(atom.name, propname))
+				break;
+			size = min(atom.size, buflen);
+			memcpy(buf, atom.data, size);
+			return atom.size;
+		case OF_DT_END_NODE:
+			if (--depth <= 0)
+				return -1;
+		}
+	}
+	return -1;
+}
+
+int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+		const void *buf, const unsigned int buflen)
+{
+	struct ft_atom atom;
+	void *node;
+	char *p, *next;
+	int nextra, depth;
+
+	node = ft_node_ph2node(cxt, phandle);
+	if (node == NULL)
+		return -1;
+
+	depth = 0;
+	p = node;
+
+	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+		switch (atom.tag) {
+		case OF_DT_BEGIN_NODE:
+			++depth;
+			break;
+		case OF_DT_END_NODE:
+			if (--depth > 0)
+				break;
+			/* haven't found the property, insert here */
+			cxt->p = p;
+			return ft_prop(cxt, propname, buf, buflen);
+		case OF_DT_PROP:
+			if ((depth != 1) || strcmp(atom.name, propname))
+				break;
+			/* found an existing property, overwrite it */
+			nextra = _ALIGN(buflen, 4) - _ALIGN(atom.size, 4);
+			cxt->p = atom.data;
+			if (nextra && !ft_make_space(cxt, &cxt->p, FT_STRUCT,
+						nextra))
+				return -1;
+			*(u32 *) (cxt->p - 8) = cpu_to_be32(buflen);
+			ft_put_bin(cxt, buf, buflen);
+			return 0;
+		}
+		p = next;
+	}
+	return -1;
+}
+
+int ft_del_prop(struct ft_cxt *cxt, const void *phandle, const char *propname)
+{
+	struct ft_atom atom;
+	void *node;
+	char *p, *next;
+	int size;
+
+	node = ft_node_ph2node(cxt, phandle);
+	if (node == NULL)
+		return -1;
+
+	p = node;
+	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+		switch (atom.tag) {
+		case OF_DT_BEGIN_NODE:
+		case OF_DT_END_NODE:
+			return -1;
+		case OF_DT_PROP:
+			if (strcmp(atom.name, propname))
+				break;
+			/* found the property, remove it */
+			size = 12 + -_ALIGN(atom.size, 4);
+			cxt->p = p;
+			if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, -size))
+				return -1;
+			return 0;
+		}
+		p = next;
+	}
+	return -1;
+}
+
+void *ft_create_node(struct ft_cxt *cxt, const void *parent, const char *path)
+{
+	struct ft_atom atom;
+	char *p, *next;
+	int depth = 0;
+
+	p = cxt->rgn[FT_STRUCT].start;
+	while ((next = ft_next(cxt, p, &atom)) != NULL) {
+		switch (atom.tag) {
+		case OF_DT_BEGIN_NODE:
+			++depth;
+			if (depth == 1 && strcmp(atom.name, path) == 0)
+				/* duplicate node path, return error */
+				return NULL;
+			break;
+		case OF_DT_END_NODE:
+			--depth;
+			if (depth > 0)
+				break;
+			/* end of node, insert here */
+			cxt->p = p;
+			ft_begin_node(cxt, path);
+			ft_end_node(cxt);
+			return p;
+		}
+		p = next;
+	}
+	return NULL;
+}
diff --git a/arch/powerpc/boot/flatdevtree.h b/arch/powerpc/boot/flatdevtree.h
index 761c8dc..b9cd9f6 100644
--- a/arch/powerpc/boot/flatdevtree.h
+++ b/arch/powerpc/boot/flatdevtree.h
@@ -17,7 +17,7 @@
 #ifndef FLATDEVTREE_H
 #define FLATDEVTREE_H
 
-#include "types.h"
+#include "flatdevtree_env.h"
 
 /* Definitions used by the flattened device tree */
 #define OF_DT_HEADER            0xd00dfeed      /* marker */
@@ -43,4 +43,64 @@
 	u32 dt_strings_size;    /* size of the DT strings block */
 };
 
+struct ft_reserve {
+	u64 start;
+	u64 len;
+};
+
+struct ft_region {
+	char *start;
+	unsigned long size;
+};
+
+enum ft_rgn_id {
+	FT_RSVMAP,
+	FT_STRUCT,
+	FT_STRINGS,
+	FT_N_REGION
+};
+
+#define FT_MAX_DEPTH	50
+
+struct ft_cxt {
+	struct boot_param_header *bph;
+	int max_size;           /* maximum size of tree */
+	int isordered;		/* everything in standard order */
+	void *(*realloc)(void *, unsigned long);
+	char *str_anchor;
+	char *p;		/* current insertion point in structs */
+	struct ft_region rgn[FT_N_REGION];
+	void *genealogy[FT_MAX_DEPTH+1];
+	char **node_tbl;
+	unsigned int node_max;
+	unsigned int nodes_used;
+};
+
+int ft_begin_node(struct ft_cxt *cxt, const char *name);
+void ft_end_node(struct ft_cxt *cxt);
+
+void ft_begin_tree(struct ft_cxt *cxt);
+void ft_end_tree(struct ft_cxt *cxt);
+
+void ft_nop(struct ft_cxt *cxt);
+int ft_prop(struct ft_cxt *cxt, const char *name,
+	    const void *data, unsigned int sz);
+int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str);
+int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val);
+void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+	      void *(*realloc_fn)(void *, unsigned long));
+int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+		unsigned int max_find_device,
+		void *(*realloc_fn)(void *, unsigned long));
+int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size);
+
+void ft_dump_blob(const void *bphp);
+void ft_merge_blob(struct ft_cxt *cxt, void *blob);
+void *ft_find_device(struct ft_cxt *cxt, const char *srch_path);
+void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path);
+int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+		void *buf, const unsigned int buflen);
+int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+		const void *buf, const unsigned int buflen);
+
 #endif /* FLATDEVTREE_H */
diff --git a/arch/powerpc/boot/flatdevtree_env.h b/arch/powerpc/boot/flatdevtree_env.h
new file mode 100644
index 0000000..83bc1c7
--- /dev/null
+++ b/arch/powerpc/boot/flatdevtree_env.h
@@ -0,0 +1,47 @@
+/*
+ * This file adds the header file glue so that the shared files
+ * flatdevicetree.[ch] can compile and work in the powerpc bootwrapper.
+ *
+ * strncmp & strchr copied from <file:lib/strings.c>
+ * Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * Maintained by: Mark A. Greer <mgreer@mvista.com>
+ */
+#ifndef _PPC_BOOT_FLATDEVTREE_ENV_H_
+#define _PPC_BOOT_FLATDEVTREE_ENV_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "ops.h"
+
+#define be16_to_cpu(x)		(x)
+#define cpu_to_be16(x)		(x)
+#define be32_to_cpu(x)		(x)
+#define cpu_to_be32(x)		(x)
+#define be64_to_cpu(x)		(x)
+#define cpu_to_be64(x)		(x)
+
+static inline int strncmp(const char *cs, const char *ct, size_t count)
+{
+	signed char __res = 0;
+
+	while (count) {
+		if ((__res = *cs - *ct++) != 0 || !*cs++)
+			break;
+		count--;
+	}
+	return __res;
+}
+
+static inline char *strchr(const char *s, int c)
+{
+	for (; *s != (char)c; ++s)
+		if (*s == '\0')
+			return NULL;
+	return (char *)s;
+}
+
+#endif /* _PPC_BOOT_FLATDEVTREE_ENV_H_ */
diff --git a/arch/powerpc/boot/flatdevtree_misc.c b/arch/powerpc/boot/flatdevtree_misc.c
new file mode 100644
index 0000000..04da38f
--- /dev/null
+++ b/arch/powerpc/boot/flatdevtree_misc.c
@@ -0,0 +1,51 @@
+/*
+ * This file does the necessary interface mapping between the bootwrapper
+ * device tree operations and the interface provided by shared source
+ * files flatdevicetree.[ch].
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) MontaVista Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <stddef.h>
+#include "flatdevtree.h"
+#include "ops.h"
+
+static struct ft_cxt cxt;
+
+static void *ft_finddevice(const char *name)
+{
+	return ft_find_device(&cxt, name);
+}
+
+static int ft_getprop(const void *phandle, const char *propname, void *buf,
+		const int buflen)
+{
+	return ft_get_prop(&cxt, phandle, propname, buf, buflen);
+}
+
+static int ft_setprop(const void *phandle, const char *propname,
+		const void *buf, const int buflen)
+{
+	return ft_set_prop(&cxt, phandle, propname, buf, buflen);
+}
+
+static unsigned long ft_finalize(void)
+{
+	ft_end_tree(&cxt);
+	return (unsigned long)cxt.bph;
+}
+
+int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device)
+{
+	dt_ops.finddevice = ft_finddevice;
+	dt_ops.getprop = ft_getprop;
+	dt_ops.setprop = ft_setprop;
+	dt_ops.finalize = ft_finalize;
+
+	return ft_open(&cxt, dt_blob, max_size, max_find_device,
+			platform_ops.realloc);
+}
diff --git a/arch/powerpc/boot/io.h b/arch/powerpc/boot/io.h
new file mode 100644
index 0000000..32974ed
--- /dev/null
+++ b/arch/powerpc/boot/io.h
@@ -0,0 +1,53 @@
+#ifndef _IO_H
+#define __IO_H
+/*
+ * Low-level I/O routines.
+ *
+ * Copied from <file:include/asm-powerpc/io.h> (which has no copyright)
+ */
+static inline int in_8(const volatile unsigned char *addr)
+{
+	int ret;
+
+	__asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
+			     : "=r" (ret) : "m" (*addr));
+	return ret;
+}
+
+static inline void out_8(volatile unsigned char *addr, int val)
+{
+	__asm__ __volatile__("stb%U0%X0 %1,%0; sync"
+			     : "=m" (*addr) : "r" (val));
+}
+
+static inline unsigned in_le32(const volatile unsigned *addr)
+{
+	unsigned ret;
+
+	__asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync"
+			     : "=r" (ret) : "r" (addr), "m" (*addr));
+	return ret;
+}
+
+static inline unsigned in_be32(const volatile unsigned *addr)
+{
+	unsigned ret;
+
+	__asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
+			     : "=r" (ret) : "m" (*addr));
+	return ret;
+}
+
+static inline void out_le32(volatile unsigned *addr, int val)
+{
+	__asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr)
+			     : "r" (val), "r" (addr));
+}
+
+static inline void out_be32(volatile unsigned *addr, int val)
+{
+	__asm__ __volatile__("stw%U0%X0 %1,%0; sync"
+			     : "=m" (*addr) : "r" (val));
+}
+
+#endif /* _IO_H */
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index d719bb9..6f6b50d 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -27,6 +27,8 @@
 extern char _vmlinux_end[];
 extern char _initrd_start[];
 extern char _initrd_end[];
+extern char _dtb_start[];
+extern char _dtb_end[];
 
 struct addr_range {
 	unsigned long addr;
@@ -167,7 +169,7 @@
 	return 1;
 }
 
-static void prep_kernel(unsigned long *a1, unsigned long *a2)
+static void prep_kernel(unsigned long a1, unsigned long a2)
 {
 	int len;
 
@@ -203,11 +205,14 @@
 	}
 
 	/*
-	 * Now we try to alloc memory for the initrd (and copy it there)
+	 * Now find the initrd
+	 *
+	 * First see if we have an image attached to us.  If so
+	 * allocate memory for it and copy it there.
 	 */
 	initrd.size = (unsigned long)(_initrd_end - _initrd_start);
 	initrd.memsize = initrd.size;
-	if ( initrd.size > 0 ) {
+	if (initrd.size > 0) {
 		printf("Allocating 0x%lx bytes for initrd ...\n\r",
 		       initrd.size);
 		initrd.addr = (unsigned long)malloc((u32)initrd.size);
@@ -216,8 +221,6 @@
 					"ramdisk !\n\r");
 			exit();
 		}
-		*a1 = initrd.addr;
-		*a2 = initrd.size;
 		printf("initial ramdisk moving 0x%lx <- 0x%lx "
 			"(0x%lx bytes)\n\r", initrd.addr,
 			(unsigned long)_initrd_start, initrd.size);
@@ -225,6 +228,12 @@
 			initrd.size);
 		printf("initrd head: 0x%lx\n\r",
 				*((unsigned long *)initrd.addr));
+	} else if (a2 != 0) {
+		/* Otherwise, see if yaboot or another loader gave us an initrd */
+		initrd.addr = a1;
+		initrd.memsize = initrd.size = a2;
+		printf("Using loader supplied initrd at 0x%lx (0x%lx bytes)\n\r",
+		       initrd.addr, initrd.size);
 	}
 
 	/* Eventually gunzip the kernel */
@@ -250,10 +259,6 @@
 	flush_cache((void *)vmlinux.addr, vmlinux.size);
 }
 
-void __attribute__ ((weak)) ft_init(void *dt_blob)
-{
-}
-
 /* A buffer that may be edited by tools operating on a zImage binary so as to
  * edit the command line passed to vmlinux (by setting /chosen/bootargs).
  * The buffer is put in it's own section so that tools may locate it easier.
@@ -285,36 +290,22 @@
 		setprop(devp, "bootargs", buf, strlen(buf) + 1);
 }
 
-/* Section where ft can be tacked on after zImage is built */
-union blobspace {
-	struct boot_param_header hdr;
-	char space[8*1024];
-} dt_blob __attribute__((__section__("__builtin_ft")));
-
 struct platform_ops platform_ops;
 struct dt_ops dt_ops;
 struct console_ops console_ops;
 
 void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
 {
-	int have_dt = 0;
 	kernel_entry_t kentry;
 	char cmdline[COMMAND_LINE_SIZE];
+	unsigned long ft_addr = 0;
 
 	memset(__bss_start, 0, _end - __bss_start);
 	memset(&platform_ops, 0, sizeof(platform_ops));
 	memset(&dt_ops, 0, sizeof(dt_ops));
 	memset(&console_ops, 0, sizeof(console_ops));
 
-	/* Override the dt_ops and device tree if there was an flat dev
-	 * tree attached to the zImage.
-	 */
-	if (dt_blob.hdr.magic == OF_DT_HEADER) {
-		have_dt = 1;
-		ft_init(&dt_blob);
-	}
-
-	if (platform_init(promptr))
+	if (platform_init(promptr, _dtb_start, _dtb_end))
 		exit();
 	if (console_ops.open && (console_ops.open() < 0))
 		exit();
@@ -324,7 +315,7 @@
 	printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r",
 	       _start, sp);
 
-	prep_kernel(&a1, &a2);
+	prep_kernel(a1, a2);
 
 	/* If cmdline came from zimage wrapper or if we can edit the one
 	 * in the dt, print it out and edit it, if possible.
@@ -338,15 +329,23 @@
 		set_cmdline(cmdline);
 	}
 
+	printf("Finalizing device tree...");
+	if (dt_ops.finalize)
+		ft_addr = dt_ops.finalize();
+	if (ft_addr)
+		printf(" flat tree at 0x%lx\n\r", ft_addr);
+	else
+		printf(" using OF tree (promptr=%p)\n\r", promptr);
+
 	if (console_ops.close)
 		console_ops.close();
 
 	kentry = (kernel_entry_t) vmlinux.addr;
-	if (have_dt)
-		kentry(dt_ops.ft_addr(), 0, NULL);
+	if (ft_addr)
+		kentry(ft_addr, 0, NULL);
 	else
 		/* XXX initrd addr/size should be passed in properties */
-		kentry(a1, a2, promptr);
+		kentry(initrd.addr, initrd.size, promptr);
 
 	/* console closed so printf below may not work */
 	printf("Error: Linux kernel returned to zImage boot wrapper!\n\r");
diff --git a/arch/powerpc/boot/mktree.c b/arch/powerpc/boot/mktree.c
new file mode 100644
index 0000000..4cb8929
--- /dev/null
+++ b/arch/powerpc/boot/mktree.c
@@ -0,0 +1,152 @@
+/*
+ * Makes a tree bootable image for IBM Evaluation boards.
+ * Basically, just take a zImage, skip the ELF header, and stuff
+ * a 32 byte header on the front.
+ *
+ * We use htonl, which is a network macro, to make sure we're doing
+ * The Right Thing on an LE machine.  It's non-obvious, but it should
+ * work on anything BSD'ish.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <netinet/in.h>
+#ifdef __sun__
+#include <inttypes.h>
+#else
+#include <stdint.h>
+#endif
+
+/* This gets tacked on the front of the image.  There are also a few
+ * bytes allocated after the _start label used by the boot rom (see
+ * head.S for details).
+ */
+typedef struct boot_block {
+	uint32_t bb_magic;		/* 0x0052504F */
+	uint32_t bb_dest;		/* Target address of the image */
+	uint32_t bb_num_512blocks;	/* Size, rounded-up, in 512 byte blks */
+	uint32_t bb_debug_flag;	/* Run debugger or image after load */
+	uint32_t bb_entry_point;	/* The image address to start */
+	uint32_t bb_checksum;	/* 32 bit checksum including header */
+	uint32_t reserved[2];
+} boot_block_t;
+
+#define IMGBLK	512
+char	tmpbuf[IMGBLK];
+
+int main(int argc, char *argv[])
+{
+	int	in_fd, out_fd;
+	int	nblks, i;
+	uint	cksum, *cp;
+	struct	stat	st;
+	boot_block_t	bt;
+
+	if (argc < 3) {
+		fprintf(stderr, "usage: %s <zImage-file> <boot-image> [entry-point]\n",argv[0]);
+		exit(1);
+	}
+
+	if (stat(argv[1], &st) < 0) {
+		perror("stat");
+		exit(2);
+	}
+
+	nblks = (st.st_size + IMGBLK) / IMGBLK;
+
+	bt.bb_magic = htonl(0x0052504F);
+
+	/* If we have the optional entry point parameter, use it */
+	if (argc == 4)
+		bt.bb_dest = bt.bb_entry_point = htonl(strtoul(argv[3], NULL, 0));
+	else
+		bt.bb_dest = bt.bb_entry_point = htonl(0x500000);
+
+	/* We know these from the linker command.
+	 * ...and then move it up into memory a little more so the
+	 * relocation can happen.
+	 */
+	bt.bb_num_512blocks = htonl(nblks);
+	bt.bb_debug_flag = 0;
+
+	bt.bb_checksum = 0;
+
+	/* To be neat and tidy :-).
+	*/
+	bt.reserved[0] = 0;
+	bt.reserved[1] = 0;
+
+	if ((in_fd = open(argv[1], O_RDONLY)) < 0) {
+		perror("zImage open");
+		exit(3);
+	}
+
+	if ((out_fd = open(argv[2], (O_RDWR | O_CREAT | O_TRUNC), 0666)) < 0) {
+		perror("bootfile open");
+		exit(3);
+	}
+
+	cksum = 0;
+	cp = (void *)&bt;
+	for (i=0; i<sizeof(bt)/sizeof(uint); i++)
+		cksum += *cp++;
+
+	/* Assume zImage is an ELF file, and skip the 64K header.
+	*/
+	if (read(in_fd, tmpbuf, IMGBLK) != IMGBLK) {
+		fprintf(stderr, "%s is too small to be an ELF image\n",
+				argv[1]);
+		exit(4);
+	}
+
+	if ((*(uint *)tmpbuf) != htonl(0x7f454c46)) {
+		fprintf(stderr, "%s is not an ELF image\n", argv[1]);
+		exit(4);
+	}
+
+	if (lseek(in_fd, (64 * 1024), SEEK_SET) < 0) {
+		fprintf(stderr, "%s failed to seek in ELF image\n", argv[1]);
+		exit(4);
+	}
+
+	nblks -= (64 * 1024) / IMGBLK;
+
+	/* And away we go......
+	*/
+	if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
+		perror("boot-image write");
+		exit(5);
+	}
+
+	while (nblks-- > 0) {
+		if (read(in_fd, tmpbuf, IMGBLK) < 0) {
+			perror("zImage read");
+			exit(5);
+		}
+		cp = (uint *)tmpbuf;
+		for (i=0; i<sizeof(tmpbuf)/sizeof(uint); i++)
+			cksum += *cp++;
+		if (write(out_fd, tmpbuf, sizeof(tmpbuf)) != sizeof(tmpbuf)) {
+			perror("boot-image write");
+			exit(5);
+		}
+	}
+
+	/* rewrite the header with the computed checksum.
+	*/
+	bt.bb_checksum = htonl(cksum);
+	if (lseek(out_fd, 0, SEEK_SET) < 0) {
+		perror("rewrite seek");
+		exit(1);
+	}
+	if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
+		perror("boot-image rewrite");
+		exit(1);
+	}
+
+	exit(0);
+}
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
new file mode 100644
index 0000000..1ffe72e
--- /dev/null
+++ b/arch/powerpc/boot/ns16550.c
@@ -0,0 +1,74 @@
+/*
+ * 16550 serial console support.
+ *
+ * Original copied from <file:arch/ppc/boot/common/ns16550.c>
+ * (which had no copyright)
+ * Modifications: 2006 (c) MontaVista Software, Inc.
+ *
+ * Modified by: Mark A. Greer <mgreer@mvista.com>
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "io.h"
+#include "ops.h"
+
+#define UART_DLL	0	/* Out: Divisor Latch Low */
+#define UART_DLM	1	/* Out: Divisor Latch High */
+#define UART_FCR	2	/* Out: FIFO Control Register */
+#define UART_LCR	3	/* Out: Line Control Register */
+#define UART_MCR	4	/* Out: Modem Control Register */
+#define UART_LSR	5	/* In:  Line Status Register */
+#define UART_LSR_THRE	0x20	/* Transmit-hold-register empty */
+#define UART_LSR_DR	0x01	/* Receiver data ready */
+#define UART_MSR	6	/* In:  Modem Status Register */
+#define UART_SCR	7	/* I/O: Scratch Register */
+
+static unsigned char *reg_base;
+static u32 reg_shift;
+
+static int ns16550_open(void)
+{
+	out_8(reg_base + (UART_FCR << reg_shift), 0x06);
+	return 0;
+}
+
+static void ns16550_putc(unsigned char c)
+{
+	while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_THRE) == 0);
+	out_8(reg_base, c);
+}
+
+static unsigned char ns16550_getc(void)
+{
+	while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) == 0);
+	return in_8(reg_base);
+}
+
+static u8 ns16550_tstc(void)
+{
+	return ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) != 0);
+}
+
+int ns16550_console_init(void *devp, struct serial_console_data *scdp)
+{
+	int n;
+
+	n = getprop(devp, "virtual-reg", &reg_base, sizeof(reg_base));
+	if (n != sizeof(reg_base))
+		return -1;
+
+	n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
+	if (n != sizeof(reg_shift))
+		reg_shift = 0;
+
+	scdp->open = ns16550_open;
+	scdp->putc = ns16550_putc;
+	scdp->getc = ns16550_getc;
+	scdp->tstc = ns16550_tstc;
+	scdp->close = NULL;
+
+	return 0;
+}
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 3a71845..0182f38 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -256,24 +256,18 @@
 	call_prom("write", 3, 1, of_stdout_handle, buf, len);
 }
 
-int platform_init(void *promptr)
+int platform_init(void *promptr, char *dt_blob_start, char *dt_blob_end)
 {
-	platform_ops.fixups = NULL;
 	platform_ops.image_hdr = of_image_hdr;
 	platform_ops.malloc = of_try_claim;
-	platform_ops.free = NULL;
 	platform_ops.exit = of_exit;
 
 	dt_ops.finddevice = of_finddevice;
 	dt_ops.getprop = of_getprop;
 	dt_ops.setprop = of_setprop;
-	dt_ops.translate_addr = NULL;
 
 	console_ops.open = of_console_open;
 	console_ops.write = of_console_write;
-	console_ops.edit_cmdline = NULL;
-	console_ops.close = NULL;
-	console_ops.data = NULL;
 
 	prom = (int (*)(void *))promptr;
 	return 0;
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 135eb4b..8abb651 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -22,7 +22,8 @@
 	void	(*fixups)(void);
 	void	(*image_hdr)(const void *);
 	void *	(*malloc)(u32 size);
-	void	(*free)(void *ptr, u32 size);
+	void	(*free)(void *ptr);
+	void *	(*realloc)(void *ptr, unsigned long size);
 	void	(*exit)(void);
 };
 extern struct platform_ops platform_ops;
@@ -30,13 +31,11 @@
 /* Device Tree operations */
 struct dt_ops {
 	void *	(*finddevice)(const char *name);
-	int	(*getprop)(const void *node, const char *name, void *buf,
+	int	(*getprop)(const void *phandle, const char *name, void *buf,
 			const int buflen);
-	int	(*setprop)(const void *node, const char *name,
+	int	(*setprop)(const void *phandle, const char *name,
 			const void *buf, const int buflen);
-	u64	(*translate_addr)(const char *path, const u32 *in_addr,
-			const u32 addr_len);
-	unsigned long (*ft_addr)(void);
+	unsigned long (*finalize)(void);
 };
 extern struct dt_ops dt_ops;
 
@@ -59,10 +58,13 @@
 	void		(*close)(void);
 };
 
-extern int platform_init(void *promptr);
-extern void simple_alloc_init(void);
-extern void ft_init(void *dt_blob);
-extern int serial_console_init(void);
+int platform_init(void *promptr, char *dt_blob_start, char *dt_blob_end);
+int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device);
+int serial_console_init(void);
+int ns16550_console_init(void *devp, struct serial_console_data *scdp);
+void *simple_alloc_init(char *base, u32 heap_size, u32 granularity,
+		u32 max_allocs);
+
 
 static inline void *finddevice(const char *name)
 {
@@ -84,10 +86,10 @@
 	return (platform_ops.malloc) ? platform_ops.malloc(size) : NULL;
 }
 
-static inline void free(void *ptr, u32 size)
+static inline void free(void *ptr)
 {
 	if (platform_ops.free)
-		platform_ops.free(ptr, size);
+		platform_ops.free(ptr);
 }
 
 static inline void exit(void)
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
new file mode 100644
index 0000000..e8de4cf
--- /dev/null
+++ b/arch/powerpc/boot/serial.c
@@ -0,0 +1,142 @@
+/*
+ * Generic serial console support
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * Code in serial_edit_cmdline() copied from <file:arch/ppc/boot/simple/misc.c>
+ * and was written by Matt Porter <mporter@kernel.crashing.org>.
+ *
+ * 2001,2006 (c) MontaVista Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "io.h"
+#include "ops.h"
+
+extern void udelay(long delay);
+
+static int serial_open(void)
+{
+	struct serial_console_data *scdp = console_ops.data;
+	return scdp->open();
+}
+
+static void serial_write(char *buf, int len)
+{
+	struct serial_console_data *scdp = console_ops.data;
+
+	while (*buf != '\0')
+		scdp->putc(*buf++);
+}
+
+static void serial_edit_cmdline(char *buf, int len)
+{
+	int timer = 0, count;
+	char ch, *cp;
+	struct serial_console_data *scdp = console_ops.data;
+
+	cp = buf;
+	count = strlen(buf);
+	cp = &buf[count];
+	count++;
+
+	while (timer++ < 5*1000) {
+		if (scdp->tstc()) {
+			while (((ch = scdp->getc()) != '\n') && (ch != '\r')) {
+				/* Test for backspace/delete */
+				if ((ch == '\b') || (ch == '\177')) {
+					if (cp != buf) {
+						cp--;
+						count--;
+						printf("\b \b");
+					}
+				/* Test for ^x/^u (and wipe the line) */
+				} else if ((ch == '\030') || (ch == '\025')) {
+					while (cp != buf) {
+						cp--;
+						count--;
+						printf("\b \b");
+					}
+				} else if (count < len) {
+						*cp++ = ch;
+						count++;
+						scdp->putc(ch);
+				}
+			}
+			break;  /* Exit 'timer' loop */
+		}
+		udelay(1000);  /* 1 msec */
+	}
+	*cp = 0;
+}
+
+static void serial_close(void)
+{
+	struct serial_console_data *scdp = console_ops.data;
+
+	if (scdp->close)
+		scdp->close();
+}
+
+static void *serial_get_stdout_devp(void)
+{
+	void *devp;
+	char devtype[MAX_PROP_LEN];
+	char path[MAX_PATH_LEN];
+
+	devp = finddevice("/chosen");
+	if (devp == NULL)
+		goto err_out;
+
+	if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
+		devp = finddevice(path);
+		if (devp == NULL)
+			goto err_out;
+
+		if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0)
+				&& !strcmp(devtype, "serial"))
+			return devp;
+	}
+err_out:
+	return NULL;
+}
+
+static struct serial_console_data serial_cd;
+
+/* Node's "compatible" property determines which serial driver to use */
+int serial_console_init(void)
+{
+	void *devp;
+	int rc = -1;
+	char compat[MAX_PROP_LEN];
+
+	devp = serial_get_stdout_devp();
+	if (devp == NULL)
+		goto err_out;
+
+	if (getprop(devp, "compatible", compat, sizeof(compat)) < 0)
+		goto err_out;
+
+	if (!strcmp(compat, "ns16550"))
+		rc = ns16550_console_init(devp, &serial_cd);
+
+	/* Add other serial console driver calls here */
+
+	if (!rc) {
+		console_ops.open = serial_open;
+		console_ops.write = serial_write;
+		console_ops.edit_cmdline = serial_edit_cmdline;
+		console_ops.close = serial_close;
+		console_ops.data = &serial_cd;
+
+		return 0;
+	}
+err_out:
+	return -1;
+}
diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
new file mode 100644
index 0000000..cfe3a75
--- /dev/null
+++ b/arch/powerpc/boot/simple_alloc.c
@@ -0,0 +1,149 @@
+/*
+ * Implement primitive realloc(3) functionality.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) MontaVista, Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <stddef.h>
+#include "types.h"
+#include "page.h"
+#include "string.h"
+#include "ops.h"
+
+#define	ENTRY_BEEN_USED	0x01
+#define	ENTRY_IN_USE	0x02
+
+static struct alloc_info {
+	u32	flags;
+	u32	base;
+	u32	size;
+} *alloc_tbl;
+
+static u32 tbl_entries;
+static u32 alloc_min;
+static u32 next_base;
+static u32 space_left;
+
+/*
+ * First time an entry is used, its base and size are set.
+ * An entry can be freed and re-malloc'd but its base & size don't change.
+ * Should be smart enough for needs of bootwrapper.
+ */
+static void *simple_malloc(u32 size)
+{
+	u32 i;
+	struct alloc_info *p = alloc_tbl;
+
+	if (size == 0)
+		goto err_out;
+
+	size = _ALIGN_UP(size, alloc_min);
+
+	for (i=0; i<tbl_entries; i++, p++)
+		if (!(p->flags & ENTRY_BEEN_USED)) { /* never been used */
+			if (size <= space_left) {
+				p->base = next_base;
+				p->size = size;
+				p->flags = ENTRY_BEEN_USED | ENTRY_IN_USE;
+				next_base += size;
+				space_left -= size;
+				return (void *)p->base;
+			}
+			goto err_out; /* not enough space left */
+		}
+		/* reuse an entry keeping same base & size */
+		else if (!(p->flags & ENTRY_IN_USE) && (size <= p->size)) {
+			p->flags |= ENTRY_IN_USE;
+			return (void *)p->base;
+		}
+err_out:
+	return NULL;
+}
+
+static struct alloc_info *simple_find_entry(void *ptr)
+{
+	u32 i;
+	struct alloc_info *p = alloc_tbl;
+
+	for (i=0; i<tbl_entries; i++,p++) {
+		if (!(p->flags & ENTRY_BEEN_USED))
+			break;
+		if ((p->flags & ENTRY_IN_USE) && (p->base == (u32)ptr))
+			return p;
+	}
+	return NULL;
+}
+
+static void simple_free(void *ptr)
+{
+	struct alloc_info *p = simple_find_entry(ptr);
+
+	if (p != NULL)
+		p->flags &= ~ENTRY_IN_USE;
+}
+
+/*
+ * Change size of area pointed to by 'ptr' to 'size'.
+ * If 'ptr' is NULL, then its a malloc().  If 'size' is 0, then its a free().
+ * 'ptr' must be NULL or a pointer to a non-freed area previously returned by
+ * simple_realloc() or simple_malloc().
+ */
+static void *simple_realloc(void *ptr, unsigned long size)
+{
+	struct alloc_info *p;
+	void *new;
+
+	if (size == 0) {
+		simple_free(ptr);
+		return NULL;
+	}
+
+	if (ptr == NULL)
+		return simple_malloc(size);
+
+	p = simple_find_entry(ptr);
+	if (p == NULL) /* ptr not from simple_malloc/simple_realloc */
+		return NULL;
+	if (size <= p->size) /* fits in current block */
+		return ptr;
+
+	new = simple_malloc(size);
+	memcpy(new, ptr, p->size);
+	simple_free(ptr);
+	return new;
+}
+
+/*
+ * Returns addr of first byte after heap so caller can see if it took
+ * too much space.  If so, change args & try again.
+ */
+void *simple_alloc_init(char *base, u32 heap_size, u32 granularity,
+		u32 max_allocs)
+{
+	u32 heap_base, tbl_size;
+
+	heap_size = _ALIGN_UP(heap_size, granularity);
+	alloc_min = granularity;
+	tbl_entries = max_allocs;
+
+	tbl_size = tbl_entries * sizeof(struct alloc_info);
+
+	alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8);
+	memset(alloc_tbl, 0, tbl_size);
+
+	heap_base = _ALIGN_UP((u32)alloc_tbl + tbl_size, alloc_min);
+
+	next_base = heap_base;
+	space_left = heap_size;
+
+	platform_ops.malloc = simple_malloc;
+	platform_ops.free = simple_free;
+	platform_ops.realloc = simple_realloc;
+
+	return (void *)(heap_base + heap_size);
+}
diff --git a/arch/powerpc/boot/stdio.c b/arch/powerpc/boot/stdio.c
index 6d5f638..0a9feeb 100644
--- a/arch/powerpc/boot/stdio.c
+++ b/arch/powerpc/boot/stdio.c
@@ -320,6 +320,7 @@
 	va_start(args, fmt);
 	n = vsprintf(sprint_buf, fmt, args);
 	va_end(args);
-	console_ops.write(sprint_buf, n);
+	if (console_ops.write)
+		console_ops.write(sprint_buf, n);
 	return n;
 }
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
new file mode 100644
index 0000000..427ddfc
--- /dev/null
+++ b/arch/powerpc/boot/util.S
@@ -0,0 +1,88 @@
+/*
+ * Copied from <file:arch/powerpc/kernel/misc_32.S>
+ *
+ * This file contains miscellaneous low-level functions.
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * kexec bits:
+ * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include "ppc_asm.h"
+
+#define SPRN_PVR        0x11F   /* Processor Version Register */
+
+	.text
+
+/* udelay (on non-601 processors) needs to know the period of the
+ * timebase in nanoseconds.  This used to be hardcoded to be 60ns
+ * (period of 66MHz/4).  Now a variable is used that is initialized to
+ * 60 for backward compatibility, but it can be overridden as necessary
+ * with code something like this:
+ *    extern unsigned long timebase_period_ns;
+ *    timebase_period_ns = 1000000000 / bd->bi_tbfreq;
+ */
+	.data
+	.globl timebase_period_ns
+timebase_period_ns:
+	.long	60
+
+	.text
+/*
+ * Delay for a number of microseconds
+ */
+	.globl	udelay
+udelay:
+	mfspr	r4,SPRN_PVR
+	srwi	r4,r4,16
+	cmpwi	0,r4,1		/* 601 ? */
+	bne	.udelay_not_601
+00:	li	r0,86	/* Instructions / microsecond? */
+	mtctr	r0
+10:	addi	r0,r0,0 /* NOP */
+	bdnz	10b
+	subic.	r3,r3,1
+	bne	00b
+	blr
+
+.udelay_not_601:
+	mulli	r4,r3,1000	/* nanoseconds */
+	/*  Change r4 to be the number of ticks using:
+	 *	(nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
+	 *  timebase_period_ns defaults to 60 (16.6MHz) */
+	mflr	r5
+	bl	0f
+0:	mflr	r6
+	mtlr	r5
+	lis	r5,0b@ha
+	addi	r5,r5,0b@l
+	subf	r5,r5,r6	/* In case we're relocated */
+	addis	r5,r5,timebase_period_ns@ha
+	lwz	r5,timebase_period_ns@l(r5)
+	add	r4,r4,r5
+	addi	r4,r4,-1
+	divw	r4,r4,r5	/* BUS ticks */
+1:	mftbu	r5
+	mftb	r6
+	mftbu	r7
+	cmpw	0,r5,r7
+	bne	1b		/* Get [synced] base time */
+	addc	r9,r6,r4	/* Compute end time */
+	addze	r8,r5
+2:	mftbu	r5
+	cmpw	0,r5,r8
+	blt	2b
+	bgt	3f
+	mftb	r6
+	cmpw	0,r6,r9
+	blt	2b
+3:	blr
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index b5fb1fe..024e4d4 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -184,6 +184,9 @@
 
 if [ -n "$dtb" ]; then
     addsec $tmp "$dtb" .kernel:dtb
+    if [ -n "$dts" ]; then
+	rm $dtb
+    fi
 fi
 
 if [ "$platform" != "miboot" ]; then
diff --git a/arch/powerpc/boot/zImage.coff.lds.S b/arch/powerpc/boot/zImage.coff.lds.S
index 05f3238..a360905 100644
--- a/arch/powerpc/boot/zImage.coff.lds.S
+++ b/arch/powerpc/boot/zImage.coff.lds.S
@@ -21,6 +21,10 @@
     *(.got2)
     __got2_end = .;
 
+    _dtb_start = .;
+    *(.kernel:dtb)
+    _dtb_end = .;
+
     _vmlinux_start =  .;
     *(.kernel:vmlinux.strip)
     _vmlinux_end =  .;
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 0aba06d..a98c982 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.18
-# Wed Oct  4 15:30:50 2006
+# Linux kernel version: 2.6.19-rc6
+# Wed Nov 22 15:33:04 2006
 #
 CONFIG_PPC64=y
 CONFIG_64BIT=y
@@ -32,6 +32,10 @@
 CONFIG_POWER3=y
 CONFIG_POWER4=y
 CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+CONFIG_PPC_DCR_MMIO=y
+CONFIG_PPC_DCR=y
+CONFIG_PPC_OF_PLATFORM_PCI=y
 CONFIG_ALTIVEC=y
 CONFIG_PPC_STD_MMU=y
 CONFIG_VIRT_CPU_ACCOUNTING=y
@@ -67,7 +71,7 @@
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -131,6 +135,7 @@
 CONFIG_PPC_CELL_NATIVE=y
 CONFIG_PPC_IBM_CELL_BLADE=y
 CONFIG_UDBG_RTAS_CONSOLE=y
+CONFIG_PPC_PS3=y
 # CONFIG_U3_DART is not set
 CONFIG_PPC_RTAS=y
 # CONFIG_RTAS_ERROR_LOGGING is not set
@@ -139,9 +144,23 @@
 CONFIG_MMIO_NVRAM=y
 # CONFIG_PPC_MPC106 is not set
 # CONFIG_PPC_970_NAP is not set
-# CONFIG_CPU_FREQ is not set
+CONFIG_PPC_INDIRECT_IO=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_DEBUG=y
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+# CONFIG_CPU_FREQ_PMAC64 is not set
 # CONFIG_WANT_EARLY_SERIAL is not set
-# CONFIG_MPIC is not set
+CONFIG_MPIC=y
 
 #
 # Cell Broadband Engine options
@@ -149,6 +168,15 @@
 CONFIG_SPU_FS=m
 CONFIG_SPU_BASE=y
 CONFIG_CBE_RAS=y
+CONFIG_CBE_THERM=m
+CONFIG_CBE_CPUFREQ=m
+
+#
+# PS3 Platform Options
+#
+CONFIG_PS3_HTAB_SIZE=20
+# CONFIG_PS3_DYNAMIC_DMA is not set
+CONFIG_PS3_USE_LPAR_ADDR=y
 
 #
 # Kernel options
@@ -166,13 +194,14 @@
 CONFIG_FORCE_MAX_ZONEORDER=9
 # CONFIG_IOMMU_VMERGE is not set
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-CONFIG_KEXEC=y
+# CONFIG_KEXEC is not set
 # CONFIG_CRASH_DUMP is not set
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_NUMA=y
 CONFIG_NODES_SHIFT=4
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_SELECT_MEMORY_MODEL=y
 # CONFIG_FLATMEM_MANUAL is not set
@@ -189,6 +218,7 @@
 CONFIG_MIGRATION=y
 CONFIG_RESOURCES_64BIT=y
 CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_NODES_SPAN_OTHER_NODES=y
 CONFIG_PPC_64K_PAGES=y
 CONFIG_SCHED_SMT=y
 CONFIG_PROC_DEVICETREE=y
@@ -207,7 +237,6 @@
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 CONFIG_PCIEPORTBUS=y
-# CONFIG_PCI_MULTITHREAD_PROBE is not set
 # CONFIG_PCI_DEBUG is not set
 
 #
@@ -280,7 +309,6 @@
 # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
 # CONFIG_IPV6_SIT is not set
 CONFIG_IPV6_TUNNEL=m
-# CONFIG_IPV6_SUBTREES is not set
 # CONFIG_IPV6_MULTIPLE_TABLES is not set
 # CONFIG_NETWORK_SECMARK is not set
 CONFIG_NETFILTER=y
@@ -1107,7 +1135,8 @@
 #
 # Instrumentation Support
 #
-# CONFIG_PROFILING is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
 # CONFIG_KPROBES is not set
 
 #
@@ -1142,6 +1171,7 @@
 CONFIG_DEBUGGER=y
 CONFIG_XMON=y
 CONFIG_XMON_DEFAULT=y
+CONFIG_XMON_DISASSEMBLY=y
 CONFIG_IRQSTACKS=y
 # CONFIG_BOOTX_TEXT is not set
 # CONFIG_PPC_EARLY_DEBUG is not set
@@ -1159,7 +1189,7 @@
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_BLKCIPHER=m
 CONFIG_CRYPTO_HASH=y
-# CONFIG_CRYPTO_MANAGER is not set
+CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_HMAC=y
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_MD4 is not set
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
new file mode 100644
index 0000000..23fd210
--- /dev/null
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -0,0 +1,1583 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc4
+# Wed Nov 15 20:36:30 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_86xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-kuroboxHG"
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+# CONFIG_PPC_MULTIPLATFORM is not set
+CONFIG_EMBEDDED6xx=y
+# CONFIG_APUS is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_KATANA is not set
+# CONFIG_WILLOW is not set
+# CONFIG_CPCI690 is not set
+# CONFIG_POWERPMC250 is not set
+# CONFIG_CHESTNUT is not set
+# CONFIG_SPRUCE is not set
+# CONFIG_HDPU is not set
+# CONFIG_EV64260 is not set
+# CONFIG_LOPEC is not set
+# CONFIG_MVME5100 is not set
+# CONFIG_PPLUS is not set
+# CONFIG_PRPMC750 is not set
+# CONFIG_PRPMC800 is not set
+# CONFIG_SANDPOINT is not set
+CONFIG_LINKSTATION=y
+# CONFIG_MPC7448HPC2 is not set
+# CONFIG_RADSTONE_PPC7D is not set
+# CONFIG_PAL4 is not set
+# CONFIG_GEMINI is not set
+# CONFIG_EST8260 is not set
+# CONFIG_SBC82xx is not set
+# CONFIG_SBS8260 is not set
+# CONFIG_RPX8260 is not set
+# CONFIG_TQM8260 is not set
+# CONFIG_ADS8272 is not set
+# CONFIG_PQ2FADS is not set
+# CONFIG_LITE5200 is not set
+# CONFIG_EV64360 is not set
+CONFIG_PPC_GEN550=y
+CONFIG_MPC10X_BRIDGE=y
+CONFIG_MPC10X_OPENPIC=y
+# CONFIG_MPC10X_STORE_GATHERING is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_MPIC=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_MULTITHREAD_PROBE is not set
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK is not set
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+# CONFIG_IP_NF_NETBIOS_NS is not set
+CONFIG_IP_NF_TFTP=m
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_PPTP is not set
+# CONFIG_IP_NF_H323 is not set
+# CONFIG_IP_NF_SIP is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_IEEE80211=m
+CONFIG_IEEE80211_DEBUG=y
+CONFIG_IEEE80211_CRYPT_WEP=m
+CONFIG_IEEE80211_CRYPT_CCMP=m
+CONFIG_IEEE80211_CRYPT_TKIP=m
+CONFIG_IEEE80211_SOFTMAC=m
+CONFIG_IEEE80211_SOFTMAC_DEBUG=y
+CONFIG_WIRELESS_EXT=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=m
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+# CONFIG_MTD_MAP_BANK_WIDTH_2 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+# CONFIG_MTD_CFI_I2 is not set
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_OTP is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0xffc00000
+CONFIG_MTD_PHYSMAP_LEN=0x400000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=1
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+CONFIG_ATA=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+CONFIG_PATA_SIL680=y
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_R8169=y
+# CONFIG_R8169_NAPI is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+# CONFIG_NET_WIRELESS_RTNETLINK is not set
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_AIRO is not set
+# CONFIG_HERMES is not set
+# CONFIG_ATMEL is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_BCM43XX is not set
+# CONFIG_ZD1211RW is not set
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+# CONFIG_USB_DABUSB is not set
+
+#
+# Graphics support
+#
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_SPLIT_ISO is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_TOUCHSCREEN is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+
+#
+# RTC drivers
+#
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+CONFIG_RTC_DRV_RS5C372=y
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_TEST is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=m
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/powerpc/configs/lite5200_defconfig b/arch/powerpc/configs/lite5200_defconfig
new file mode 100644
index 0000000..ee76557
--- /dev/null
+++ b/arch/powerpc/configs/lite5200_defconfig
@@ -0,0 +1,931 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc6
+# Mon Nov 27 11:08:20 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_86xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC_CHRP is not set
+CONFIG_PPC_MPC52xx=y
+# CONFIG_PPC_EFIKA is not set
+CONFIG_PPC_LITE5200=y
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+# CONFIG_MPIC is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+# CONFIG_PM_SYSFS_DEPRECATED is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_INDIRECT_PCI is not set
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+CONFIG_ATA=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+CONFIG_PATA_MPC52xx=y
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_MV643XX_ETH is not set
+# CONFIG_QLA3XXX is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index be11df7..1c00965 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1386,8 +1386,8 @@
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
 CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-# CONFIG_AUTOFS4_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
 # CONFIG_FUSE_FS is not set
 
 #
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
new file mode 100644
index 0000000..f2d888e
--- /dev/null
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -0,0 +1,837 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc6
+# Tue Nov 21 19:38:53 2006
+#
+CONFIG_PPC64=y
+CONFIG_64BIT=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+# CONFIG_POWER4_ONLY is not set
+CONFIG_POWER3=y
+CONFIG_POWER4=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_PPC_OF_PLATFORM_PCI is not set
+CONFIG_ALTIVEC=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_VIRT_CPU_ACCOUNTING=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC_PSERIES is not set
+# CONFIG_PPC_ISERIES is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_MAPLE is not set
+# CONFIG_PPC_PASEMI is not set
+CONFIG_PPC_CELL=y
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_IBM_CELL_BLADE is not set
+CONFIG_PPC_PS3=y
+# CONFIG_U3_DART is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+# CONFIG_MPIC is not set
+
+#
+# Cell Broadband Engine options
+#
+CONFIG_SPU_FS=y
+CONFIG_SPU_BASE=y
+# CONFIG_CBE_RAS is not set
+
+#
+# PS3 Platform Options
+#
+CONFIG_PS3_HTAB_SIZE=20
+CONFIG_PS3_DYNAMIC_DMA=y
+CONFIG_PS3_USE_LPAR_ADDR=y
+
+#
+# Kernel options
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_BKL is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+CONFIG_FORCE_MAX_ZONEORDER=9
+# CONFIG_IOMMU_VMERGE is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_IRQ_ALL_CPUS is not set
+# CONFIG_NUMA is not set
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_SPARSE=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_PPC_64K_PAGES=y
+# CONFIG_SCHED_SMT is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="root=/dev/nfs rw ip=dhcp"
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+CONFIG_KERNEL_START=0xc000000000000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+# CONFIG_KPROBES is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_DEBUG_LIST=y
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUGGER is not set
+CONFIG_IRQSTACKS=y
+# CONFIG_BOOTX_TEXT is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 7af23c4..4fe53d0 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -17,11 +17,11 @@
 obj-$(CONFIG_PPC64)		+= setup_64.o binfmt_elf32.o sys_ppc32.o \
 				   signal_64.o ptrace32.o \
 				   paca.o cpu_setup_ppc970.o \
-				   firmware.o sysfs.o
+				   firmware.o sysfs.o nvram_64.o
 obj-$(CONFIG_PPC64)		+= vdso64/
 obj-$(CONFIG_ALTIVEC)		+= vecemu.o vector.o
 obj-$(CONFIG_PPC_970_NAP)	+= idle_power4.o
-obj-$(CONFIG_PPC_OF)		+= of_device.o prom_parse.o
+obj-$(CONFIG_PPC_OF)		+= of_device.o of_platform.o prom_parse.o
 procfs-$(CONFIG_PPC64)		:= proc_ppc64.o
 obj-$(CONFIG_PROC_FS)		+= $(procfs-y)
 rtaspci-$(CONFIG_PPC64)		:= rtas_pci.o
@@ -32,7 +32,6 @@
 obj-$(CONFIG_IBMVIO)		+= vio.o
 obj-$(CONFIG_IBMEBUS)           += ibmebus.o
 obj-$(CONFIG_GENERIC_TBSYNC)	+= smp-tbsync.o
-obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
 obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
 obj-$(CONFIG_6xx)		+= idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
 obj-$(CONFIG_TAU)		+= tau_6xx.o
@@ -59,11 +58,11 @@
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
 obj-$(CONFIG_PPC_UDBG_16550)	+= legacy_serial.o udbg_16550.o
+
 module-$(CONFIG_PPC64)		+= module_64.o
 obj-$(CONFIG_MODULES)		+= $(module-y)
 
-pci64-$(CONFIG_PPC64)		+= pci_64.o pci_dn.o pci_iommu.o \
-				   pci_direct_iommu.o iomap.o
+pci64-$(CONFIG_PPC64)		+= pci_64.o pci_dn.o
 pci32-$(CONFIG_PPC32)		:= pci_32.o
 obj-$(CONFIG_PCI)		+= $(pci64-y) $(pci32-y)
 kexec-$(CONFIG_PPC64)		:= machine_kexec_64.o
@@ -72,8 +71,12 @@
 obj-$(CONFIG_AUDIT)		+= audit.o
 obj64-$(CONFIG_AUDIT)		+= compat_audit.o
 
+ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
+obj-y				+= iomap.o
+endif
+
 ifeq ($(CONFIG_PPC_ISERIES),y)
-$(obj)/head_64.o: $(obj)/lparmap.s
+extra-y += lparmap.s
 AFLAGS_head_64.o += -I$(obj)
 endif
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d06f378..e965215 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -118,7 +118,8 @@
 	DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
 	DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
 	DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
-	DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
+	DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
+	DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
 	DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
 	DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
 	DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 6525948..bf118c3 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -83,6 +83,22 @@
 	rldimi	r0,r11,52,8		/* set NAP and DPM */
 	li	r11,0
 	rldimi	r0,r11,32,31		/* clear EN_ATTN */
+	b	load_hids		/* Jump to shared code */
+
+
+_GLOBAL(__setup_cpu_ppc970MP)
+	/* Do nothing if not running in HV mode */
+	mfmsr	r0
+	rldicl.	r0,r0,4,63
+	beqlr
+
+	mfspr	r0,SPRN_HID0
+	li	r11,0x15		/* clear DOZE and SLEEP */
+	rldimi	r0,r11,52,6		/* set DEEPNAP, NAP and DPM */
+	li	r11,0
+	rldimi	r0,r11,32,31		/* clear EN_ATTN */
+
+load_hids:
 	mtspr	SPRN_HID0,r0
 	mfspr	r0,SPRN_HID0
 	mfspr	r0,SPRN_HID0
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bfd499e..9d1614c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -42,6 +42,7 @@
 #endif /* CONFIG_PPC32 */
 #ifdef CONFIG_PPC64
 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
 extern void __restore_cpu_ppc970(void);
 #endif /* CONFIG_PPC64 */
 
@@ -222,9 +223,9 @@
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
 		.num_pmcs		= 8,
-		.cpu_setup		= __setup_cpu_ppc970,
+		.cpu_setup		= __setup_cpu_ppc970MP,
 		.cpu_restore		= __restore_cpu_ppc970,
-		.oprofile_cpu_type	= "ppc64/970",
+		.oprofile_cpu_type	= "ppc64/970MP",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.platform		= "ppc970",
 	},
@@ -276,10 +277,45 @@
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
 		.platform		= "power5+",
 	},
+	{	/* POWER6 in P5+ mode; 2.04-compliant processor */
+		.pvr_mask		= 0xffffffff,
+		.pvr_value		= 0x0f000001,
+		.cpu_name		= "POWER5+",
+		.cpu_features		= CPU_FTRS_POWER5,
+		.cpu_user_features	= COMMON_USER_POWER5_PLUS,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.oprofile_cpu_type	= "ppc64/power6",
+		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.oprofile_mmcra_sihv	= POWER6_MMCRA_SIHV,
+		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
+		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
+			POWER6_MMCRA_OTHER,
+		.platform		= "power5+",
+	},
 	{	/* Power6 */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x003e0000,
-		.cpu_name		= "POWER6",
+		.cpu_name		= "POWER6 (raw)",
+		.cpu_features		= CPU_FTRS_POWER6,
+		.cpu_user_features	= COMMON_USER_POWER6 |
+			PPC_FEATURE_POWER6_EXT,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.oprofile_cpu_type	= "ppc64/power6",
+		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.oprofile_mmcra_sihv	= POWER6_MMCRA_SIHV,
+		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
+		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
+			POWER6_MMCRA_OTHER,
+		.platform		= "power6x",
+	},
+	{	/* 2.05-compliant processor, i.e. Power6 "architected" mode */
+		.pvr_mask		= 0xffffffff,
+		.pvr_value		= 0x0f000002,
+		.cpu_name		= "POWER6 (architected)",
 		.cpu_features		= CPU_FTRS_POWER6,
 		.cpu_user_features	= COMMON_USER_POWER6,
 		.icache_bsize		= 128,
@@ -303,6 +339,9 @@
 			PPC_FEATURE_SMT,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
+		.num_pmcs		= 4,
+		.oprofile_cpu_type	= "ppc64/cell-be",
+		.oprofile_type		= PPC_OPROFILE_CELL,
 		.platform		= "ppc-cell-be",
 	},
 	{	/* PA Semi PA6T */
@@ -801,6 +840,17 @@
 		.cpu_setup		= __setup_cpu_603,
 		.platform		= "ppc603",
 	},
+	{	/* e300c3 on 83xx  */
+		.pvr_mask		= 0x7fff0000,
+		.pvr_value		= 0x00850000,
+		.cpu_name		= "e300c3",
+		.cpu_features		= CPU_FTRS_E300,
+		.cpu_user_features	= COMMON_USER,
+		.icache_bsize		= 32,
+		.dcache_bsize		= 32,
+		.cpu_setup		= __setup_cpu_603,
+		.platform		= "ppc603",
+	},
 	{	/* default match, we assume split I/D cache & TB (non-601)... */
 		.pvr_mask		= 0x00000000,
 		.pvr_value		= 0x00000000,
@@ -1169,19 +1219,15 @@
 #endif /* CONFIG_PPC32 */
 };
 
-struct cpu_spec *identify_cpu(unsigned long offset)
+struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr)
 {
 	struct cpu_spec *s = cpu_specs;
 	struct cpu_spec **cur = &cur_cpu_spec;
-	unsigned int pvr = mfspr(SPRN_PVR);
 	int i;
 
 	s = PTRRELOC(s);
 	cur = PTRRELOC(cur);
 
-	if (*cur != NULL)
-		return PTRRELOC(*cur);
-
 	for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
 		if ((pvr & s->pvr_mask) == s->pvr_value) {
 			*cur = cpu_specs + i;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 1af41f7..d3f2080 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -46,61 +46,6 @@
 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
 cpumask_t cpus_in_sr = CPU_MASK_NONE;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
-							       size_t data_len)
-{
-	struct elf_note note;
-
-	note.n_namesz = strlen(name) + 1;
-	note.n_descsz = data_len;
-	note.n_type   = type;
-	memcpy(buf, &note, sizeof(note));
-	buf += (sizeof(note) +3)/4;
-	memcpy(buf, name, note.n_namesz);
-	buf += (note.n_namesz + 3)/4;
-	memcpy(buf, data, note.n_descsz);
-	buf += (note.n_descsz + 3)/4;
-
-	return buf;
-}
-
-static void final_note(u32 *buf)
-{
-	struct elf_note note;
-
-	note.n_namesz = 0;
-	note.n_descsz = 0;
-	note.n_type   = 0;
-	memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-	struct elf_prstatus prstatus;
-	u32 *buf;
-
-	if ((cpu < 0) || (cpu >= NR_CPUS))
-		return;
-
-	/* Using ELF notes here is opportunistic.
-	 * I need a well defined structure format
-	 * for the data I pass, and I need tags
-	 * on the data to indicate what information I have
-	 * squirrelled away.  ELF notes happen to provide
-	 * all of that that no need to invent something new.
-	 */
-	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-	if (!buf) 
-		return;
-
-	memset(&prstatus, 0, sizeof(prstatus));
-	prstatus.pr_pid = current->pid;
-	elf_core_copy_regs(&prstatus.pr_reg, regs);
-	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-			sizeof(prstatus));
-	final_note(buf);
-}
-
 #ifdef CONFIG_SMP
 static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
 
@@ -111,9 +56,9 @@
 	if (!cpu_online(cpu))
 		return;
 
-	local_irq_disable();
+	hard_irq_disable();
 	if (!cpu_isset(cpu, cpus_in_crash))
-		crash_save_this_cpu(regs, cpu);
+		crash_save_cpu(regs, cpu);
 	cpu_set(cpu, cpus_in_crash);
 
 	/*
@@ -289,7 +234,7 @@
 	 * an SMP system.
 	 * The kernel is broken so disable interrupts.
 	 */
-	local_irq_disable();
+	hard_irq_disable();
 
 	for_each_irq(irq) {
 		struct irq_desc *desc = irq_desc + irq;
@@ -306,7 +251,7 @@
 	 * such that another IPI will not be sent.
 	 */
 	crashing_cpu = smp_processor_id();
-	crash_save_this_cpu(regs, crashing_cpu);
+	crash_save_cpu(regs, crashing_cpu);
 	crash_kexec_prepare_cpus(crashing_cpu);
 	cpu_set(crashing_cpu, cpus_in_crash);
 	if (ppc_md.kexec_cpu_down)
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 6c168f6..7b0e754 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -1,151 +1,194 @@
 /*
- * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  *
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses and busses using the iommu infrastructure
  */
 
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
-/* Include the busses we support */
-#include <linux/pci.h>
-#include <asm/vio.h>
-#include <asm/ibmebus.h>
-#include <asm/scatterlist.h>
 #include <asm/bug.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
 
-static struct dma_mapping_ops *get_dma_ops(struct device *dev)
+/*
+ * Generic iommu implementation
+ */
+
+static inline unsigned long device_to_mask(struct device *dev)
 {
-#ifdef CONFIG_PCI
-	if (dev->bus == &pci_bus_type)
-		return &pci_dma_ops;
-#endif
-#ifdef CONFIG_IBMVIO
-	if (dev->bus == &vio_bus_type)
-		return &vio_dma_ops;
-#endif
-#ifdef CONFIG_IBMEBUS
-	if (dev->bus == &ibmebus_bus_type)
-		return &ibmebus_dma_ops;
-#endif
-	return NULL;
+	if (dev->dma_mask && *dev->dma_mask)
+		return *dev->dma_mask;
+	/* Assume devices without mask can take 32 bit addresses */
+	return 0xfffffffful;
 }
 
-int dma_supported(struct device *dev, u64 mask)
+
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
+				      dma_addr_t *dma_handle, gfp_t flag)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-	BUG_ON(!dma_ops);
-
-	return dma_ops->dma_supported(dev, mask);
+	return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
+				    device_to_mask(dev), flag,
+				    dev->archdata.numa_node);
 }
-EXPORT_SYMBOL(dma_supported);
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static void dma_iommu_free_coherent(struct device *dev, size_t size,
+				    void *vaddr, dma_addr_t dma_handle)
 {
-#ifdef CONFIG_PCI
-	if (dev->bus == &pci_bus_type)
-		return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
-#endif
-#ifdef CONFIG_IBMVIO
-	if (dev->bus == &vio_bus_type)
-		return -EIO;
-#endif /* CONFIG_IBMVIO */
-#ifdef CONFIG_IBMEBUS
-	if (dev->bus == &ibmebus_bus_type)
-		return -EIO;
-#endif
-	BUG();
-	return 0;
+	iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
 }
-EXPORT_SYMBOL(dma_set_mask);
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-		dma_addr_t *dma_handle, gfp_t flag)
+/* Creates TCEs for a user provided buffer.  The user buffer must be
+ * contiguous real kernel storage (not vmalloc).  The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer.  The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
+				       size_t size,
+				       enum dma_data_direction direction)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-	BUG_ON(!dma_ops);
-
-	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+	return iommu_map_single(dev->archdata.dma_data, vaddr, size,
+			        device_to_mask(dev), direction);
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-		dma_addr_t dma_handle)
+
+static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
+				   size_t size,
+				   enum dma_data_direction direction)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-	BUG_ON(!dma_ops);
-
-	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+	iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-		enum dma_data_direction direction)
+
+static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
+			    int nelems, enum dma_data_direction direction)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-	BUG_ON(!dma_ops);
-
-	return dma_ops->map_single(dev, cpu_addr, size, direction);
+	return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
+			    device_to_mask(dev), direction);
 }
-EXPORT_SYMBOL(dma_map_single);
 
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-		enum dma_data_direction direction)
+static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
+		int nelems, enum dma_data_direction direction)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-	BUG_ON(!dma_ops);
-
-	dma_ops->unmap_single(dev, dma_addr, size, direction);
+	iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
 }
-EXPORT_SYMBOL(dma_unmap_single);
 
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size,
-		enum dma_data_direction direction)
+/* We support DMA to/from any memory page via the iommu */
+static int dma_iommu_dma_supported(struct device *dev, u64 mask)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+	struct iommu_table *tbl = dev->archdata.dma_data;
 
-	BUG_ON(!dma_ops);
-
-	return dma_ops->map_single(dev, page_address(page) + offset, size,
-			direction);
+	if (!tbl || tbl->it_offset > mask) {
+		printk(KERN_INFO
+		       "Warning: IOMMU offset too big for device mask\n");
+		if (tbl)
+			printk(KERN_INFO
+			       "mask: 0x%08lx, table offset: 0x%08lx\n",
+				mask, tbl->it_offset);
+		else
+			printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+				mask);
+		return 0;
+	} else
+		return 1;
 }
-EXPORT_SYMBOL(dma_map_page);
 
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-		enum dma_data_direction direction)
+struct dma_mapping_ops dma_iommu_ops = {
+	.alloc_coherent	= dma_iommu_alloc_coherent,
+	.free_coherent	= dma_iommu_free_coherent,
+	.map_single	= dma_iommu_map_single,
+	.unmap_single	= dma_iommu_unmap_single,
+	.map_sg		= dma_iommu_map_sg,
+	.unmap_sg	= dma_iommu_unmap_sg,
+	.dma_supported	= dma_iommu_dma_supported,
+};
+EXPORT_SYMBOL(dma_iommu_ops);
+
+/*
+ * Generic direct DMA implementation
+ *
+ * This implementation supports a global offset that can be applied if
+ * the address at which memory is visible to devices is not 0.
+ */
+unsigned long dma_direct_offset;
+
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+	struct page *page;
+	void *ret;
+	int node = dev->archdata.numa_node;
 
-	BUG_ON(!dma_ops);
+	/* TODO: Maybe use the numa node here too ? */
+	page = alloc_pages_node(node, flag, get_order(size));
+	if (page == NULL)
+		return NULL;
+	ret = page_address(page);
+	memset(ret, 0, size);
+	*dma_handle = virt_to_abs(ret) | dma_direct_offset;
 
-	dma_ops->unmap_single(dev, dma_address, size, direction);
+	return ret;
 }
-EXPORT_SYMBOL(dma_unmap_page);
 
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-		enum dma_data_direction direction)
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+				     void *vaddr, dma_addr_t dma_handle)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-	BUG_ON(!dma_ops);
-
-	return dma_ops->map_sg(dev, sg, nents, direction);
+	free_pages((unsigned long)vaddr, get_order(size));
 }
-EXPORT_SYMBOL(dma_map_sg);
 
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-		enum dma_data_direction direction)
+static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
+					size_t size,
+					enum dma_data_direction direction)
 {
-	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-	BUG_ON(!dma_ops);
-
-	dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+	return virt_to_abs(ptr) | dma_direct_offset;
 }
-EXPORT_SYMBOL(dma_unmap_sg);
+
+static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
+				    size_t size,
+				    enum dma_data_direction direction)
+{
+}
+
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
+			     int nents, enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; i++, sg++) {
+		sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
+			dma_direct_offset;
+		sg->dma_length = sg->length;
+	}
+
+	return nents;
+}
+
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction direction)
+{
+}
+
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+	/* Could be improved to check for memory though it better be
+	 * done via some global so platforms can set the limit in case
+	 * they have limited DMA windows
+	 */
+	return mask >= DMA_32BIT_MASK;
+}
+
+struct dma_mapping_ops dma_direct_ops = {
+	.alloc_coherent	= dma_direct_alloc_coherent,
+	.free_coherent	= dma_direct_free_coherent,
+	.map_single	= dma_direct_map_single,
+	.unmap_single	= dma_direct_unmap_single,
+	.map_sg		= dma_direct_map_sg,
+	.unmap_sg	= dma_direct_unmap_sg,
+	.dma_supported	= dma_direct_dma_supported,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 748e74f..1a3d4de 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -87,15 +87,19 @@
 	addi	r9,r1,STACK_FRAME_OVERHEAD
 	ld	r11,exception_marker@toc(r2)
 	std	r11,-16(r9)		/* "regshere" marker */
+	li	r10,1
+	stb	r10,PACASOFTIRQEN(r13)
+	stb	r10,PACAHARDIRQEN(r13)
+	std	r10,SOFTE(r1)
 #ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
 	/* Hack for handling interrupts when soft-enabling on iSeries */
 	cmpdi	cr1,r0,0x5555		/* syscall 0x5555 */
 	andi.	r10,r12,MSR_PR		/* from kernel */
 	crand	4*cr0+eq,4*cr1+eq,4*cr0+eq
-	beq	hardware_interrupt_entry
-	lbz	r10,PACAPROCENABLED(r13)
-	std	r10,SOFTE(r1)
+	bne	2f
+	b	hardware_interrupt_entry
+2:
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
 	mfmsr	r11
@@ -460,9 +464,9 @@
 #endif
 
 restore:
+	ld	r5,SOFTE(r1)
 #ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
-	ld	r5,SOFTE(r1)
 	cmpdi	0,r5,0
 	beq	4f
 	/* Check for pending interrupts (iSeries) */
@@ -472,21 +476,25 @@
 	beq+	4f			/* skip do_IRQ if no interrupts */
 
 	li	r3,0
-	stb	r3,PACAPROCENABLED(r13)	/* ensure we are soft-disabled */
+	stb	r3,PACASOFTIRQEN(r13)	/* ensure we are soft-disabled */
 	ori	r10,r10,MSR_EE
 	mtmsrd	r10			/* hard-enable again */
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	.do_IRQ
 	b	.ret_from_except_lite		/* loop back and handle more */
-
-4:	stb	r5,PACAPROCENABLED(r13)
+4:
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
+	stb	r5,PACASOFTIRQEN(r13)
 
 	ld	r3,_MSR(r1)
 	andi.	r0,r3,MSR_RI
 	beq-	unrecov_restore
 
+	/* extract EE bit and use it to restore paca->hard_enabled */
+	rldicl	r4,r3,49,63		/* r0 = (r3 >> 15) & 1 */
+	stb	r4,PACAHARDIRQEN(r13)
+
 	andi.	r0,r3,MSR_PR
 
 	/*
@@ -538,25 +546,15 @@
 	/* Check that preempt_count() == 0 and interrupts are enabled */
 	lwz	r8,TI_PREEMPT(r9)
 	cmpwi	cr1,r8,0
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
 	ld	r0,SOFTE(r1)
 	cmpdi	r0,0
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
-	andi.	r0,r3,MSR_EE
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
 	crandc	eq,cr1*4+eq,eq
 	bne	restore
 	/* here we are preempting the current task */
 1:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
 	li	r0,1
-	stb	r0,PACAPROCENABLED(r13)
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
+	stb	r0,PACASOFTIRQEN(r13)
+	stb	r0,PACAHARDIRQEN(r13)
 	ori	r10,r10,MSR_EE
 	mtmsrd	r10,1		/* reenable interrupts */
 	bl	.preempt_schedule
@@ -639,8 +637,7 @@
 	/* There is no way it is acceptable to get here with interrupts enabled,
 	 * check it with the asm equivalent of WARN_ON
 	 */
-	mfmsr	r6
-	andi.	r0,r6,MSR_EE
+	lbz	r0,PACASOFTIRQEN(r13)
 1:	tdnei	r0,0
 .section __bug_table,"a"
 	.llong	1b,__LINE__ + 0x1000000, 1f, 2f
@@ -649,7 +646,13 @@
 1:	.asciz	__FILE__
 2:	.asciz "enter_rtas"
 .previous
-	
+
+	/* Hard-disable interrupts */
+	mfmsr	r6
+	rldicl	r7,r6,48,1
+	rotldi	r7,r7,16
+	mtmsrd	r7,1
+
 	/* Unfortunately, the stack pointer and the MSR are also clobbered,
 	 * so they are saved in the PACA which allows us to restore
 	 * our original state after RTAS returns.
@@ -735,8 +738,6 @@
 
 #endif /* CONFIG_PPC_RTAS */
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
 _GLOBAL(enter_prom)
 	mflr	r0
 	std	r0,16(r1)
@@ -821,5 +822,3 @@
 	ld	r0,16(r1)
 	mtlr    r0
         blr
-	
-#endif	/* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index e720729..71b1fe5 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -35,9 +35,7 @@
 #include <asm/thread_info.h>
 #include <asm/firmware.h>
 
-#ifdef CONFIG_PPC_ISERIES
 #define DO_SOFT_DISABLE
-#endif
 
 /*
  * We layout physical memory as follows:
@@ -74,13 +72,11 @@
 	.text
 	.globl  _stext
 _stext:
-#ifdef CONFIG_PPC_MULTIPLATFORM
 _GLOBAL(__start)
 	/* NOP this out unconditionally */
 BEGIN_FTR_SECTION
 	b	.__start_initialization_multiplatform
 END_FTR_SECTION(0, 1)
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 
 	/* Catch branch to 0 in real mode */
 	trap
@@ -308,7 +304,9 @@
 	std	r9,_LINK(r1);						   \
 	mfctr	r10;			/* save CTR in stackframe	*/ \
 	std	r10,_CTR(r1);						   \
+	lbz	r10,PACASOFTIRQEN(r13);				   \
 	mfspr	r11,SPRN_XER;		/* save XER in stackframe	*/ \
+	std	r10,SOFTE(r1);						   \
 	std	r11,_XER(r1);						   \
 	li	r9,(n)+1;						   \
 	std	r9,_TRAP(r1);		/* set trap number		*/ \
@@ -343,6 +341,34 @@
 	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
 
 
+#define MASKABLE_EXCEPTION_PSERIES(n, label)				\
+	. = n;								\
+	.globl label##_pSeries;						\
+label##_pSeries:							\
+	HMT_MEDIUM;							\
+	mtspr	SPRN_SPRG1,r13;		/* save r13 */			\
+	mfspr	r13,SPRN_SPRG3;		/* get paca address into r13 */	\
+	std	r9,PACA_EXGEN+EX_R9(r13);	/* save r9, r10 */	\
+	std	r10,PACA_EXGEN+EX_R10(r13);				\
+	lbz	r10,PACASOFTIRQEN(r13);					\
+	mfcr	r9;							\
+	cmpwi	r10,0;							\
+	beq	masked_interrupt;					\
+	mfspr	r10,SPRN_SPRG1;						\
+	std	r10,PACA_EXGEN+EX_R13(r13);				\
+	std	r11,PACA_EXGEN+EX_R11(r13);				\
+	std	r12,PACA_EXGEN+EX_R12(r13);				\
+	clrrdi	r12,r13,32;		/* get high part of &label */	\
+	mfmsr	r10;							\
+	mfspr	r11,SPRN_SRR0;		/* save SRR0 */			\
+	LOAD_HANDLER(r12,label##_common)				\
+	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI;				\
+	mtspr	SPRN_SRR0,r12;						\
+	mfspr	r12,SPRN_SRR1;		/* and SRR1 */			\
+	mtspr	SPRN_SRR1,r10;						\
+	rfid;								\
+	b	.	/* prevent speculative execution */
+
 #define STD_EXCEPTION_ISERIES(n, label, area)		\
 	.globl label##_iSeries;				\
 label##_iSeries:					\
@@ -358,40 +384,32 @@
 	HMT_MEDIUM;							\
 	mtspr	SPRN_SPRG1,r13;		/* save r13 */			\
 	EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);				\
-	lbz	r10,PACAPROCENABLED(r13);				\
+	lbz	r10,PACASOFTIRQEN(r13);					\
 	cmpwi	0,r10,0;						\
 	beq-	label##_iSeries_masked;					\
 	EXCEPTION_PROLOG_ISERIES_2;					\
 	b	label##_common;						\
 
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
 #define DISABLE_INTS				\
-BEGIN_FW_FTR_SECTION;				\
-	lbz	r10,PACAPROCENABLED(r13);	\
 	li	r11,0;				\
-	std	r10,SOFTE(r1);			\
+	stb	r11,PACASOFTIRQEN(r13);		\
+BEGIN_FW_FTR_SECTION;				\
+	stb	r11,PACAHARDIRQEN(r13);		\
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES);	\
+BEGIN_FW_FTR_SECTION;				\
 	mfmsr	r10;				\
-	stb	r11,PACAPROCENABLED(r13);	\
 	ori	r10,r10,MSR_EE;			\
 	mtmsrd	r10,1;				\
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 
-#define ENABLE_INTS				\
-BEGIN_FW_FTR_SECTION;				\
-	lbz	r10,PACAPROCENABLED(r13);	\
-	mfmsr	r11;				\
-	std	r10,SOFTE(r1);			\
-	ori	r11,r11,MSR_EE;			\
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES);	\
-BEGIN_FW_FTR_SECTION;				\
-	ld	r12,_MSR(r1);			\
-	mfmsr	r11;				\
-	rlwimi	r11,r12,0,MSR_EE;		\
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES);	\
-	mtmsrd	r11,1
+#else
+#define DISABLE_INTS				\
+	li	r11,0;				\
+	stb	r11,PACASOFTIRQEN(r13);		\
+	stb	r11,PACAHARDIRQEN(r13)
 
-#else	/* hard enable/disable interrupts */
-#define DISABLE_INTS
+#endif /* CONFIG_PPC_ISERIES */
 
 #define ENABLE_INTS				\
 	ld	r12,_MSR(r1);			\
@@ -399,8 +417,6 @@
 	rlwimi	r11,r12,0,MSR_EE;		\
 	mtmsrd	r11,1
 
-#endif
-
 #define STD_EXCEPTION_COMMON(trap, label, hdlr)		\
 	.align	7;					\
 	.globl label##_common;				\
@@ -541,11 +557,11 @@
 	mfspr	r12,SPRN_SRR1		/* and SRR1 */
 	b	.slb_miss_realmode	/* Rel. branch works in real mode */
 
-	STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+	MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
 	STD_EXCEPTION_PSERIES(0x600, alignment)
 	STD_EXCEPTION_PSERIES(0x700, program_check)
 	STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
-	STD_EXCEPTION_PSERIES(0x900, decrementer)
+	MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
 	STD_EXCEPTION_PSERIES(0xa00, trap_0a)
 	STD_EXCEPTION_PSERIES(0xb00, trap_0b)
 
@@ -597,7 +613,24 @@
 /*** pSeries interrupt support ***/
 
 	/* moved from 0xf00 */
-	STD_EXCEPTION_PSERIES(., performance_monitor)
+	MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
+
+/*
+ * An interrupt came in while soft-disabled; clear EE in SRR1,
+ * clear paca->hard_enabled and return.
+ */
+masked_interrupt:
+	stb	r10,PACAHARDIRQEN(r13)
+	mtcrf	0x80,r9
+	ld	r9,PACA_EXGEN+EX_R9(r13)
+	mfspr	r10,SPRN_SRR1
+	rldicl	r10,r10,48,1		/* clear MSR_EE */
+	rotldi	r10,r10,16
+	mtspr	SPRN_SRR1,r10
+	ld	r10,PACA_EXGEN+EX_R10(r13)
+	mfspr	r13,SPRN_SPRG1
+	rfid
+	b	.
 
 	.align	7
 do_stab_bolted_pSeries:
@@ -792,7 +825,7 @@
 
 	cmpwi	0,r23,0
 	beq	iSeries_secondary_smp_loop	/* Loop until told to go */
-	bne	.__secondary_start		/* Loop until told to go */
+	bne	__secondary_start		/* Loop until told to go */
 iSeries_secondary_smp_loop:
 	/* Let the Hypervisor know we are alive */
 	/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -813,7 +846,6 @@
 	b	1b			/* If SMP not configured, secondaries
 					 * loop forever */
 
-	.globl decrementer_iSeries_masked
 decrementer_iSeries_masked:
 	/* We may not have a valid TOC pointer in here. */
 	li	r11,1
@@ -824,7 +856,6 @@
 	mtspr	SPRN_DEC,r12
 	/* fall through */
 
-	.globl hardware_interrupt_iSeries_masked
 hardware_interrupt_iSeries_masked:
 	mtcrf	0x80,r9		/* Restore regs */
 	ld	r12,PACALPPACAPTR(r13)
@@ -926,10 +957,18 @@
  * any task or sent any task a signal, you should use
  * ret_from_except or ret_from_except_lite instead of this.
  */
+fast_exc_return_irq:			/* restores irq state too */
+	ld	r3,SOFTE(r1)
+	ld	r12,_MSR(r1)
+	stb	r3,PACASOFTIRQEN(r13)	/* restore paca->soft_enabled */
+	rldicl	r4,r12,49,63		/* get MSR_EE to LSB */
+	stb	r4,PACAHARDIRQEN(r13)	/* restore paca->hard_enabled */
+	b	1f
+
 	.globl	fast_exception_return
 fast_exception_return:
 	ld	r12,_MSR(r1)
-	ld	r11,_NIP(r1)
+1:	ld	r11,_NIP(r1)
 	andi.	r3,r12,MSR_RI		/* check if RI is set */
 	beq-	unrecov_fer
 
@@ -952,7 +991,8 @@
 	REST_8GPRS(2, r1)
 
 	mfmsr	r10
-	clrrdi	r10,r10,2		/* clear RI (LE is 0 already) */
+	rldicl	r10,r10,48,1		/* clear EE */
+	rldicr	r10,r10,16,61		/* clear RI (LE is 0 already) */
 	mtmsrd	r10,1
 
 	mtspr	SPRN_SRR1,r12
@@ -1326,6 +1366,16 @@
 	 * interrupts if necessary.
 	 */
 	beq	13f
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif
+BEGIN_FW_FTR_SECTION
+	/*
+	 * Here we have interrupts hard-disabled, so it is sufficient
+	 * to restore paca->{soft,hard}_enable and get out.
+	 */
+	beq	fast_exc_return_irq	/* Return from exception on success */
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
 	/* For a hash failure, we don't bother re-enabling interrupts */
 	ble-	12f
 
@@ -1337,14 +1387,6 @@
 	ld	r3,SOFTE(r1)
 	bl	.local_irq_restore
 	b	11f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
-	beq	fast_exception_return   /* Return from exception on success */
-	ble-	12f			/* Failure return from hash_page */
-
-	/* fall through */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
 
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
@@ -1362,6 +1404,8 @@
 	bl	.bad_page_fault
 	b	.ret_from_except
 
+13:	b	.ret_from_except_lite
+
 /* We have a page fault that hash_page could handle but HV refused
  * the PTE insertion
  */
@@ -1371,8 +1415,6 @@
 	bl	.low_hash_fault
 	b	.ret_from_except
 
-13:	b	.ret_from_except_lite
-
 	/* here we have a segment miss */
 do_ste_alloc:
 	bl	.ste_allocate		/* try to insert stab entry */
@@ -1560,7 +1602,7 @@
 	ld	r1,PACAEMERGSP(r13)
 	subi	r1,r1,STACK_FRAME_OVERHEAD
 
-	b	.__secondary_start
+	b	__secondary_start
 #endif
 
 #ifdef CONFIG_PPC_ISERIES
@@ -1595,7 +1637,6 @@
 	b	.start_here_common
 #endif /* CONFIG_PPC_ISERIES */
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 
 _STATIC(__mmu_off)
 	mfmsr	r3
@@ -1621,13 +1662,11 @@
  *
  */
 _GLOBAL(__start_initialization_multiplatform)
-#ifdef CONFIG_PPC_MULTIPLATFORM
 	/*
 	 * Are we booted from a PROM Of-type client-interface ?
 	 */
 	cmpldi	cr0,r5,0
 	bne	.__boot_from_prom		/* yes -> prom */
-#endif
 
 	/* Save parameters */
 	mr	r31,r3
@@ -1656,7 +1695,6 @@
 	bl	.__mmu_off
 	b	.__after_prom_start
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 _STATIC(__boot_from_prom)
 	/* Save parameters */
 	mr	r31,r3
@@ -1696,7 +1734,6 @@
 	bl	.prom_init
 	/* We never return */
 	trap
-#endif
 
 /*
  * At this point, r3 contains the physical address we are running at,
@@ -1752,8 +1789,6 @@
 	bl	.copy_and_flush		/* copy the rest */
 	b	.start_here_multiplatform
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 /*
  * Copy routine used to copy the kernel to start at physical address 0
  * and flush and invalidate the caches as needed.
@@ -1836,7 +1871,7 @@
 	ld	r1,PACAEMERGSP(r13)
 	subi	r1,r1,STACK_FRAME_OVERHEAD
 
-	b	.__secondary_start
+	b	__secondary_start
 
 #endif /* CONFIG_PPC_PMAC */
 
@@ -1853,7 +1888,7 @@
  *   r13   = paca virtual address
  *   SPRG3 = paca virtual address
  */
-_GLOBAL(__secondary_start)
+__secondary_start:
 	/* Set thread priority to MEDIUM */
 	HMT_MEDIUM
 
@@ -1877,11 +1912,16 @@
 	/* enable MMU and jump to start_secondary */
 	LOAD_REG_ADDR(r3, .start_secondary_prolog)
 	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
 	ori	r4,r4,MSR_EE
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
+BEGIN_FW_FTR_SECTION
+	stb	r7,PACASOFTIRQEN(r13)
+	stb	r7,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
 	mtspr	SPRN_SRR0,r3
 	mtspr	SPRN_SRR1,r4
 	rfid
@@ -1913,7 +1953,6 @@
 	isync
 	blr
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 /*
  * This is where the main kernel code starts.
  */
@@ -1977,7 +2016,6 @@
 	mtspr	SPRN_SRR1,r4
 	rfid
 	b	.	/* prevent speculative execution */
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 	
 	/* This is where all platforms converge execution */
 _STATIC(start_here_common)
@@ -2005,15 +2043,18 @@
 
 	/* Load up the kernel context */
 5:
-#ifdef DO_SOFT_DISABLE
-BEGIN_FW_FTR_SECTION
 	li	r5,0
-	stb	r5,PACAPROCENABLED(r13)	/* Soft Disabled */
+	stb	r5,PACASOFTIRQEN(r13)	/* Soft Disabled */
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
 	mfmsr	r5
 	ori	r5,r5,MSR_EE		/* Hard Enabled */
 	mtmsrd	r5
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
+BEGIN_FW_FTR_SECTION
+	stb	r5,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
 
 	bl .start_kernel
 
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 39db7a3..82bd2f1 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -112,7 +112,7 @@
 	return 1;
 }
 
-struct dma_mapping_ops ibmebus_dma_ops = {
+static struct dma_mapping_ops ibmebus_dma_ops = {
 	.alloc_coherent = ibmebus_alloc_coherent,
 	.free_coherent  = ibmebus_free_coherent,
 	.map_single     = ibmebus_map_single,
@@ -176,6 +176,10 @@
 	dev->ofdev.dev.bus     = &ibmebus_bus_type;
 	dev->ofdev.dev.release = ibmebus_dev_release;
 
+	dev->ofdev.dev.archdata.of_node = dev->ofdev.node;
+	dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops;
+	dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node);
+
 	/* An ibmebusdev is based on a of_device. We have to change the
 	 * bus type to use our own DMA mapping operations. 
 	 */       
@@ -210,11 +214,10 @@
 		return NULL;
 	}
 
-	dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
+	dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
 	if (!dev) {
 		return NULL;
 	}
-	memset(dev, 0, sizeof(struct ibmebus_dev));
 
 	dev->ofdev.node = of_node_get(dn);
        
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 4180c39..8994af3 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -39,6 +39,13 @@
 #define cpu_should_die()	0
 #endif
 
+static int __init powersave_off(char *arg)
+{
+	ppc_md.power_save = NULL;
+	return 0;
+}
+__setup("powersave=off", powersave_off);
+
 /*
  * The body of the idle task.
  */
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 30de81d..ba31954 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -30,6 +30,13 @@
 	beqlr
 
 	/* Go to NAP now */
+	mfmsr	r7
+	rldicl	r0,r7,48,1
+	rotldi	r0,r0,16
+	mtmsrd	r0,1			/* hard-disable interrupts */
+	li	r0,1
+	stb	r0,PACASOFTIRQEN(r13)	/* we'll hard-enable shortly */
+	stb	r0,PACAHARDIRQEN(r13)
 BEGIN_FTR_SECTION
 	DSSALL
 	sync
@@ -38,7 +45,6 @@
 	ld	r8,TI_LOCAL_FLAGS(r9)	/* set napping bit */
 	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception */
 	std	r8,TI_LOCAL_FLAGS(r9)	/* it will return to our caller */
-	mfmsr	r7
 	ori	r7,r7,MSR_EE
 	oris	r7,r7,MSR_POW@h
 1:	sync
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index e981806..34ae114 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -25,13 +25,11 @@
 #include <asm/firmware.h>
 #include <asm/bug.h>
 
-void _insb(volatile u8 __iomem *port, void *buf, long count)
+void _insb(const volatile u8 __iomem *port, void *buf, long count)
 {
 	u8 *tbuf = buf;
 	u8 tmp;
 
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
 	if (unlikely(count <= 0))
 		return;
 	asm volatile("sync");
@@ -48,8 +46,6 @@
 {
 	const u8 *tbuf = buf;
 
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
 	if (unlikely(count <= 0))
 		return;
 	asm volatile("sync");
@@ -60,13 +56,11 @@
 }
 EXPORT_SYMBOL(_outsb);
 
-void _insw_ns(volatile u16 __iomem *port, void *buf, long count)
+void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
 {
 	u16 *tbuf = buf;
 	u16 tmp;
 
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
 	if (unlikely(count <= 0))
 		return;
 	asm volatile("sync");
@@ -83,8 +77,6 @@
 {
 	const u16 *tbuf = buf;
 
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
 	if (unlikely(count <= 0))
 		return;
 	asm volatile("sync");
@@ -95,13 +87,11 @@
 }
 EXPORT_SYMBOL(_outsw_ns);
 
-void _insl_ns(volatile u32 __iomem *port, void *buf, long count)
+void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
 {
 	u32 *tbuf = buf;
 	u32 tmp;
 
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
 	if (unlikely(count <= 0))
 		return;
 	asm volatile("sync");
@@ -118,8 +108,6 @@
 {
 	const u32 *tbuf = buf;
 
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
 	if (unlikely(count <= 0))
 		return;
 	asm volatile("sync");
@@ -129,3 +117,90 @@
 	asm volatile("sync");
 }
 EXPORT_SYMBOL(_outsl_ns);
+
+#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
+
+void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
+{
+	void *p = (void __force *)addr;
+	u32 lc = c;
+	lc |= lc << 8;
+	lc |= lc << 16;
+
+	__asm__ __volatile__ ("sync" : : : "memory");
+	while(n && !IO_CHECK_ALIGN(p, 4)) {
+		*((volatile u8 *)p) = c;
+		p++;
+		n--;
+	}
+	while(n >= 4) {
+		*((volatile u32 *)p) = lc;
+		p += 4;
+		n -= 4;
+	}
+	while(n) {
+		*((volatile u8 *)p) = c;
+		p++;
+		n--;
+	}
+	__asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memset_io);
+
+void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+		    unsigned long n)
+{
+	void *vsrc = (void __force *) src;
+
+	__asm__ __volatile__ ("sync" : : : "memory");
+	while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
+		*((u8 *)dest) = *((volatile u8 *)vsrc);
+		__asm__ __volatile__ ("eieio" : : : "memory");
+		vsrc++;
+		dest++;
+		n--;
+	}
+	while(n > 4) {
+		*((u32 *)dest) = *((volatile u32 *)vsrc);
+		__asm__ __volatile__ ("eieio" : : : "memory");
+		vsrc += 4;
+		dest += 4;
+		n -= 4;
+	}
+	while(n) {
+		*((u8 *)dest) = *((volatile u8 *)vsrc);
+		__asm__ __volatile__ ("eieio" : : : "memory");
+		vsrc++;
+		dest++;
+		n--;
+	}
+	__asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_fromio);
+
+void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
+{
+	void *vdest = (void __force *) dest;
+
+	__asm__ __volatile__ ("sync" : : : "memory");
+	while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
+		*((volatile u8 *)vdest) = *((u8 *)src);
+		src++;
+		vdest++;
+		n--;
+	}
+	while(n > 4) {
+		*((volatile u32 *)vdest) = *((volatile u32 *)src);
+		src += 4;
+		vdest += 4;
+		n-=4;
+	}
+	while(n) {
+		*((volatile u8 *)vdest) = *((u8 *)src);
+		src++;
+		vdest++;
+		n--;
+	}
+	__asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_toio);
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index a13a93d..c681133 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -106,7 +106,7 @@
 
 void __iomem *ioport_map(unsigned long port, unsigned int len)
 {
-	return (void __iomem *) (port+pci_io_base);
+	return (void __iomem *) (port + _IO_BASE);
 }
 
 void ioport_unmap(void __iomem *addr)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ba6b725..95edad4 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -258,9 +258,9 @@
 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
 }
 
-int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
-		struct scatterlist *sglist, int nelems,
-		unsigned long mask, enum dma_data_direction direction)
+int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+		 int nelems, unsigned long mask,
+		 enum dma_data_direction direction)
 {
 	dma_addr_t dma_next = 0, dma_addr;
 	unsigned long flags;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5e37bf1..0bd8c76 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -64,8 +64,9 @@
 #include <asm/ptrace.h>
 #include <asm/machdep.h>
 #include <asm/udbg.h>
-#ifdef CONFIG_PPC_ISERIES
+#ifdef CONFIG_PPC64
 #include <asm/paca.h>
+#include <asm/firmware.h>
 #endif
 
 int __irq_offset_value;
@@ -95,6 +96,74 @@
 EXPORT_SYMBOL(irq_desc);
 
 int distribute_irqs = 1;
+
+static inline unsigned long get_hard_enabled(void)
+{
+	unsigned long enabled;
+
+	__asm__ __volatile__("lbz %0,%1(13)"
+	: "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
+
+	return enabled;
+}
+
+static inline void set_soft_enabled(unsigned long enable)
+{
+	__asm__ __volatile__("stb %0,%1(13)"
+	: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
+}
+
+void local_irq_restore(unsigned long en)
+{
+	/*
+	 * get_paca()->soft_enabled = en;
+	 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
+	 * That was allowed before, and in such a case we do need to take care
+	 * that gcc will set soft_enabled directly via r13, not choose to use
+	 * an intermediate register, lest we're preempted to a different cpu.
+	 */
+	set_soft_enabled(en);
+	if (!en)
+		return;
+
+	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+		/*
+		 * Do we need to disable preemption here?  Not really: in the
+		 * unlikely event that we're preempted to a different cpu in
+		 * between getting r13, loading its lppaca_ptr, and loading
+		 * its any_int, we might call iseries_handle_interrupts without
+		 * an interrupt pending on the new cpu, but that's no disaster,
+		 * is it?  And the business of preempting us off the old cpu
+		 * would itself involve a local_irq_restore which handles the
+		 * interrupt to that cpu.
+		 *
+		 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
+		 * to avoid any preemption checking added into get_paca().
+		 */
+		if (local_paca->lppaca_ptr->int_dword.any_int)
+			iseries_handle_interrupts();
+		return;
+	}
+
+	/*
+	 * if (get_paca()->hard_enabled) return;
+	 * But again we need to take care that gcc gets hard_enabled directly
+	 * via r13, not choose to use an intermediate register, lest we're
+	 * preempted to a different cpu in between the two instructions.
+	 */
+	if (get_hard_enabled())
+		return;
+
+	/*
+	 * Need to hard-enable interrupts here.  Since currently disabled,
+	 * no need to take further asm precautions against preemption; but
+	 * use local_paca instead of get_paca() to avoid preemption checking.
+	 */
+	local_paca->hard_enabled = en;
+	if ((int)mfspr(SPRN_DEC) < 0)
+		mtspr(SPRN_DEC, 1);
+	hard_irq_enable();
+}
 #endif /* CONFIG_PPC64 */
 
 int show_interrupts(struct seq_file *p, void *v)
@@ -246,7 +315,8 @@
 	set_irq_regs(old_regs);
 
 #ifdef CONFIG_PPC_ISERIES
-	if (get_lppaca()->int_dword.fields.decr_int) {
+	if (firmware_has_feature(FW_FEATURE_ISERIES) &&
+			get_lppaca()->int_dword.fields.decr_int) {
 		get_lppaca()->int_dword.fields.decr_int = 0;
 		/* Signal a fake decrementer interrupt */
 		timer_interrupt(regs);
@@ -626,10 +696,14 @@
 
 void irq_dispose_mapping(unsigned int virq)
 {
-	struct irq_host *host = irq_map[virq].host;
+	struct irq_host *host;
 	irq_hw_number_t hwirq;
 	unsigned long flags;
 
+	if (virq == NO_IRQ)
+		return;
+
+	host = irq_map[virq].host;
 	WARN_ON (host == NULL);
 	if (host == NULL)
 		return;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7b8d12b..4657563 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -85,7 +85,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 397c83e..8a06724 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -9,6 +9,34 @@
 #include <asm/of_device.h>
 
 /**
+ * of_match_node - Tell if an device_node has a matching of_match structure
+ * @ids: array of of device match structures to search in
+ * @node: the of device structure to match against
+ *
+ * Low level utility function used by device matching.
+ */
+const struct of_device_id *of_match_node(const struct of_device_id *matches,
+					 const struct device_node *node)
+{
+	while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
+		int match = 1;
+		if (matches->name[0])
+			match &= node->name
+				&& !strcmp(matches->name, node->name);
+		if (matches->type[0])
+			match &= node->type
+				&& !strcmp(matches->type, node->type);
+		if (matches->compatible[0])
+			match &= device_is_compatible(node,
+						      matches->compatible);
+		if (match)
+			return matches;
+		matches++;
+	}
+	return NULL;
+}
+
+/**
  * of_match_device - Tell if an of_device structure has a matching
  * of_match structure
  * @ids: array of of device match structures to search in
@@ -22,34 +50,7 @@
 {
 	if (!dev->node)
 		return NULL;
-	while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
-		int match = 1;
-		if (matches->name[0])
-			match &= dev->node->name
-				&& !strcmp(matches->name, dev->node->name);
-		if (matches->type[0])
-			match &= dev->node->type
-				&& !strcmp(matches->type, dev->node->type);
-		if (matches->compatible[0])
-			match &= device_is_compatible(dev->node,
-				matches->compatible);
-		if (match)
-			return matches;
-		matches++;
-	}
-	return NULL;
-}
-
-static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
-{
-	struct of_device * of_dev = to_of_device(dev);
-	struct of_platform_driver * of_drv = to_of_platform_driver(drv);
-	const struct of_device_id * matches = of_drv->match_table;
-
-	if (!matches)
-		return 0;
-
-	return of_match_device(matches, of_dev) != NULL;
+	return of_match_node(matches, dev->node);
 }
 
 struct of_device *of_dev_get(struct of_device *dev)
@@ -71,96 +72,8 @@
 		put_device(&dev->dev);
 }
 
-
-static int of_device_probe(struct device *dev)
-{
-	int error = -ENODEV;
-	struct of_platform_driver *drv;
-	struct of_device *of_dev;
-	const struct of_device_id *match;
-
-	drv = to_of_platform_driver(dev->driver);
-	of_dev = to_of_device(dev);
-
-	if (!drv->probe)
-		return error;
-
-	of_dev_get(of_dev);
-
-	match = of_match_device(drv->match_table, of_dev);
-	if (match)
-		error = drv->probe(of_dev, match);
-	if (error)
-		of_dev_put(of_dev);
-
-	return error;
-}
-
-static int of_device_remove(struct device *dev)
-{
-	struct of_device * of_dev = to_of_device(dev);
-	struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-
-	if (dev->driver && drv->remove)
-		drv->remove(of_dev);
-	return 0;
-}
-
-static int of_device_suspend(struct device *dev, pm_message_t state)
-{
-	struct of_device * of_dev = to_of_device(dev);
-	struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-	int error = 0;
-
-	if (dev->driver && drv->suspend)
-		error = drv->suspend(of_dev, state);
-	return error;
-}
-
-static int of_device_resume(struct device * dev)
-{
-	struct of_device * of_dev = to_of_device(dev);
-	struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-	int error = 0;
-
-	if (dev->driver && drv->resume)
-		error = drv->resume(of_dev);
-	return error;
-}
-
-struct bus_type of_platform_bus_type = {
-       .name	= "of_platform",
-       .match	= of_platform_bus_match,
-       .probe	= of_device_probe,
-       .remove	= of_device_remove,
-       .suspend	= of_device_suspend,
-       .resume	= of_device_resume,
-};
-
-static int __init of_bus_driver_init(void)
-{
-	return bus_register(&of_platform_bus_type);
-}
-
-postcore_initcall(of_bus_driver_init);
-
-int of_register_driver(struct of_platform_driver *drv)
-{
-	/* initialize common driver fields */
-	drv->driver.name = drv->name;
-	drv->driver.bus = &of_platform_bus_type;
-
-	/* register with core */
-	return driver_register(&drv->driver);
-}
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
-	driver_unregister(&drv->driver);
-}
-
-
-static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dev_show_devspec(struct device *dev,
+				struct device_attribute *attr, char *buf)
 {
 	struct of_device *ofdev;
 
@@ -208,41 +121,11 @@
 	device_unregister(&ofdev->dev);
 }
 
-struct of_device* of_platform_device_create(struct device_node *np,
-					    const char *bus_id,
-					    struct device *parent)
-{
-	struct of_device *dev;
 
-	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
-	if (!dev)
-		return NULL;
-	memset(dev, 0, sizeof(*dev));
-
-	dev->node = of_node_get(np);
-	dev->dma_mask = 0xffffffffUL;
-	dev->dev.dma_mask = &dev->dma_mask;
-	dev->dev.parent = parent;
-	dev->dev.bus = &of_platform_bus_type;
-	dev->dev.release = of_release_dev;
-
-	strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
-
-	if (of_device_register(dev) != 0) {
-		kfree(dev);
-		return NULL;
-	}
-
-	return dev;
-}
-
+EXPORT_SYMBOL(of_match_node);
 EXPORT_SYMBOL(of_match_device);
-EXPORT_SYMBOL(of_platform_bus_type);
-EXPORT_SYMBOL(of_register_driver);
-EXPORT_SYMBOL(of_unregister_driver);
 EXPORT_SYMBOL(of_device_register);
 EXPORT_SYMBOL(of_device_unregister);
 EXPORT_SYMBOL(of_dev_get);
 EXPORT_SYMBOL(of_dev_put);
-EXPORT_SYMBOL(of_platform_device_create);
 EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
new file mode 100644
index 0000000..b3189d0
--- /dev/null
+++ b/arch/powerpc/kernel/of_platform.c
@@ -0,0 +1,489 @@
+/*
+ *    Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ *			 <benh@kernel.crashing.org>
+ *    and		 Arnd Bergmann, IBM Corp.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include <asm/errno.h>
+#include <asm/dcr.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/topology.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+#include <asm/atomic.h>
+
+
+/*
+ * The list of OF IDs below is used for matching bus types in the
+ * system whose devices are to be exposed as of_platform_devices.
+ *
+ * This is the default list valid for most platforms. This file provides
+ * functions who can take an explicit list if necessary though
+ *
+ * The search is always performed recursively looking for children of
+ * the provided device_node and recursively if such a children matches
+ * a bus type in the list
+ */
+
+static struct of_device_id of_default_bus_ids[] = {
+	{ .type = "soc", },
+	{ .compatible = "soc", },
+	{ .type = "spider", },
+	{ .type = "axon", },
+	{ .type = "plb5", },
+	{ .type = "plb4", },
+	{ .type = "opb", },
+	{},
+};
+
+static atomic_t bus_no_reg_magic;
+
+/*
+ *
+ * OF platform device type definition & base infrastructure
+ *
+ */
+
+static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct of_device * of_dev = to_of_device(dev);
+	struct of_platform_driver * of_drv = to_of_platform_driver(drv);
+	const struct of_device_id * matches = of_drv->match_table;
+
+	if (!matches)
+		return 0;
+
+	return of_match_device(matches, of_dev) != NULL;
+}
+
+static int of_platform_device_probe(struct device *dev)
+{
+	int error = -ENODEV;
+	struct of_platform_driver *drv;
+	struct of_device *of_dev;
+	const struct of_device_id *match;
+
+	drv = to_of_platform_driver(dev->driver);
+	of_dev = to_of_device(dev);
+
+	if (!drv->probe)
+		return error;
+
+	of_dev_get(of_dev);
+
+	match = of_match_device(drv->match_table, of_dev);
+	if (match)
+		error = drv->probe(of_dev, match);
+	if (error)
+		of_dev_put(of_dev);
+
+	return error;
+}
+
+static int of_platform_device_remove(struct device *dev)
+{
+	struct of_device * of_dev = to_of_device(dev);
+	struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+
+	if (dev->driver && drv->remove)
+		drv->remove(of_dev);
+	return 0;
+}
+
+static int of_platform_device_suspend(struct device *dev, pm_message_t state)
+{
+	struct of_device * of_dev = to_of_device(dev);
+	struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+	int error = 0;
+
+	if (dev->driver && drv->suspend)
+		error = drv->suspend(of_dev, state);
+	return error;
+}
+
+static int of_platform_device_resume(struct device * dev)
+{
+	struct of_device * of_dev = to_of_device(dev);
+	struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+	int error = 0;
+
+	if (dev->driver && drv->resume)
+		error = drv->resume(of_dev);
+	return error;
+}
+
+struct bus_type of_platform_bus_type = {
+       .name	= "of_platform",
+       .match	= of_platform_bus_match,
+       .probe	= of_platform_device_probe,
+       .remove	= of_platform_device_remove,
+       .suspend	= of_platform_device_suspend,
+       .resume	= of_platform_device_resume,
+};
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static int __init of_bus_driver_init(void)
+{
+	return bus_register(&of_platform_bus_type);
+}
+
+postcore_initcall(of_bus_driver_init);
+
+int of_register_platform_driver(struct of_platform_driver *drv)
+{
+	/* initialize common driver fields */
+	drv->driver.name = drv->name;
+	drv->driver.bus = &of_platform_bus_type;
+
+	/* register with core */
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(of_register_platform_driver);
+
+void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(of_unregister_platform_driver);
+
+static void of_platform_make_bus_id(struct of_device *dev)
+{
+	struct device_node *node = dev->node;
+	char *name = dev->dev.bus_id;
+	const u32 *reg;
+	u64 addr;
+	long magic;
+
+	/*
+	 * If it's a DCR based device, use 'd' for native DCRs
+	 * and 'D' for MMIO DCRs.
+	 */
+#ifdef CONFIG_PPC_DCR
+	reg = get_property(node, "dcr-reg", NULL);
+	if (reg) {
+#ifdef CONFIG_PPC_DCR_NATIVE
+		snprintf(name, BUS_ID_SIZE, "d%x.%s",
+			 *reg, node->name);
+#else /* CONFIG_PPC_DCR_NATIVE */
+		addr = of_translate_dcr_address(node, *reg, NULL);
+		if (addr != OF_BAD_ADDR) {
+			snprintf(name, BUS_ID_SIZE,
+				 "D%llx.%s", (unsigned long long)addr,
+				 node->name);
+			return;
+		}
+#endif /* !CONFIG_PPC_DCR_NATIVE */
+	}
+#endif /* CONFIG_PPC_DCR */
+
+	/*
+	 * For MMIO, get the physical address
+	 */
+	reg = get_property(node, "reg", NULL);
+	if (reg) {
+		addr = of_translate_address(node, reg);
+		if (addr != OF_BAD_ADDR) {
+			snprintf(name, BUS_ID_SIZE,
+				 "%llx.%s", (unsigned long long)addr,
+				 node->name);
+			return;
+		}
+	}
+
+	/*
+	 * No BusID, use the node name and add a globally incremented
+	 * counter (and pray...)
+	 */
+	magic = atomic_add_return(1, &bus_no_reg_magic);
+	snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
+}
+
+struct of_device* of_platform_device_create(struct device_node *np,
+					    const char *bus_id,
+					    struct device *parent)
+{
+	struct of_device *dev;
+
+	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+	memset(dev, 0, sizeof(*dev));
+
+	dev->node = of_node_get(np);
+	dev->dma_mask = 0xffffffffUL;
+	dev->dev.dma_mask = &dev->dma_mask;
+	dev->dev.parent = parent;
+	dev->dev.bus = &of_platform_bus_type;
+	dev->dev.release = of_release_dev;
+	dev->dev.archdata.of_node = np;
+	dev->dev.archdata.numa_node = of_node_to_nid(np);
+
+	/* We do not fill the DMA ops for platform devices by default.
+	 * This is currently the responsibility of the platform code
+	 * to do such, possibly using a device notifier
+	 */
+
+	if (bus_id)
+		strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+	else
+		of_platform_make_bus_id(dev);
+
+	if (of_device_register(dev) != 0) {
+		kfree(dev);
+		return NULL;
+	}
+
+	return dev;
+}
+EXPORT_SYMBOL(of_platform_device_create);
+
+
+
+/**
+ * of_platform_bus_create - Create an OF device for a bus node and all its
+ * children. Optionally recursively instanciate matching busses.
+ * @bus: device node of the bus to instanciate
+ * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
+ * disallow recursive creation of child busses
+ */
+static int of_platform_bus_create(struct device_node *bus,
+				  struct of_device_id *matches,
+				  struct device *parent)
+{
+	struct device_node *child;
+	struct of_device *dev;
+	int rc = 0;
+
+	for (child = NULL; (child = of_get_next_child(bus, child)); ) {
+		pr_debug("   create child: %s\n", child->full_name);
+		dev = of_platform_device_create(child, NULL, parent);
+		if (dev == NULL)
+			rc = -ENOMEM;
+		else if (!of_match_node(matches, child))
+			continue;
+		if (rc == 0) {
+			pr_debug("   and sub busses\n");
+			rc = of_platform_bus_create(child, matches, &dev->dev);
+		} if (rc) {
+			of_node_put(child);
+			break;
+		}
+	}
+	return rc;
+}
+
+/**
+ * of_platform_bus_probe - Probe the device-tree for platform busses
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Note that children of the provided root are not instanciated as devices
+ * unless the specified root itself matches the bus list and is not NULL.
+ */
+
+int of_platform_bus_probe(struct device_node *root,
+			  struct of_device_id *matches,
+			  struct device *parent)
+{
+	struct device_node *child;
+	struct of_device *dev;
+	int rc = 0;
+
+	if (matches == NULL)
+		matches = of_default_bus_ids;
+	if (matches == OF_NO_DEEP_PROBE)
+		return -EINVAL;
+	if (root == NULL)
+		root = of_find_node_by_path("/");
+	else
+		of_node_get(root);
+
+	pr_debug("of_platform_bus_probe()\n");
+	pr_debug(" starting at: %s\n", root->full_name);
+
+	/* Do a self check of bus type, if there's a match, create
+	 * children
+	 */
+	if (of_match_node(matches, root)) {
+		pr_debug(" root match, create all sub devices\n");
+		dev = of_platform_device_create(root, NULL, parent);
+		if (dev == NULL) {
+			rc = -ENOMEM;
+			goto bail;
+		}
+		pr_debug(" create all sub busses\n");
+		rc = of_platform_bus_create(root, matches, &dev->dev);
+		goto bail;
+	}
+	for (child = NULL; (child = of_get_next_child(root, child)); ) {
+		if (!of_match_node(matches, child))
+			continue;
+
+		pr_debug("  match: %s\n", child->full_name);
+		dev = of_platform_device_create(child, NULL, parent);
+		if (dev == NULL)
+			rc = -ENOMEM;
+		else
+			rc = of_platform_bus_create(child, matches, &dev->dev);
+		if (rc) {
+			of_node_put(child);
+			break;
+		}
+	}
+ bail:
+	of_node_put(root);
+	return rc;
+}
+EXPORT_SYMBOL(of_platform_bus_probe);
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+	return to_of_device(dev)->node == data;
+}
+
+struct of_device *of_find_device_by_node(struct device_node *np)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&of_platform_bus_type,
+			      NULL, np, of_dev_node_match);
+	if (dev)
+		return to_of_device(dev);
+	return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+static int of_dev_phandle_match(struct device *dev, void *data)
+{
+	phandle *ph = data;
+	return to_of_device(dev)->node->linux_phandle == *ph;
+}
+
+struct of_device *of_find_device_by_phandle(phandle ph)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&of_platform_bus_type,
+			      NULL, &ph, of_dev_phandle_match);
+	if (dev)
+		return to_of_device(dev);
+	return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_phandle);
+
+
+#ifdef CONFIG_PPC_OF_PLATFORM_PCI
+
+/* The probing of PCI controllers from of_platform is currently
+ * 64 bits only, mostly due to gratuitous differences between
+ * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
+ * lacking some bits needed here.
+ */
+
+static int __devinit of_pci_phb_probe(struct of_device *dev,
+				      const struct of_device_id *match)
+{
+	struct pci_controller *phb;
+
+	/* Check if we can do that ... */
+	if (ppc_md.pci_setup_phb == NULL)
+		return -ENODEV;
+
+	printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name);
+
+	/* Alloc and setup PHB data structure */
+	phb = pcibios_alloc_controller(dev->node);
+	if (!phb)
+		return -ENODEV;
+
+	/* Setup parent in sysfs */
+	phb->parent = &dev->dev;
+
+	/* Setup the PHB using arch provided callback */
+	if (ppc_md.pci_setup_phb(phb)) {
+		pcibios_free_controller(phb);
+		return -ENODEV;
+	}
+
+	/* Process "ranges" property */
+	pci_process_bridge_OF_ranges(phb, dev->node, 0);
+
+	/* Setup IO space.
+	 * This will not work properly for ISA IOs, something needs to be done
+	 * about it if we ever generalize that way of probing PCI brigdes
+	 */
+	pci_setup_phb_io_dynamic(phb, 0);
+
+	/* Init pci_dn data structures */
+	pci_devs_phb_init_dynamic(phb);
+
+	/* Register devices with EEH */
+#ifdef CONFIG_EEH
+	if (dev->node->child)
+		eeh_add_device_tree_early(dev->node);
+#endif /* CONFIG_EEH */
+
+	/* Scan the bus */
+	scan_phb(phb);
+
+	/* Claim resources. This might need some rework as well depending
+	 * wether we are doing probe-only or not, like assigning unassigned
+	 * resources etc...
+	 */
+	pcibios_claim_one_bus(phb->bus);
+
+	/* Finish EEH setup */
+#ifdef CONFIG_EEH
+	eeh_add_device_tree_late(phb->bus);
+#endif
+
+	/* Add probed PCI devices to the device model */
+	pci_bus_add_devices(phb->bus);
+
+	return 0;
+}
+
+static struct of_device_id of_pci_phb_ids[] = {
+	{ .type = "pci", },
+	{ .type = "pcix", },
+	{ .type = "pcie", },
+	{ .type = "pciex", },
+	{ .type = "ht", },
+	{}
+};
+
+static struct of_platform_driver of_pci_phb_driver = {
+       .name = "of-pci",
+       .match_table = of_pci_phb_ids,
+       .probe = of_pci_phb_probe,
+       .driver = {
+	       .multithread_probe = 1,
+       },
+};
+
+static __init int of_pci_phb_init(void)
+{
+	return of_register_platform_driver(&of_pci_phb_driver);
+}
+
+device_initcall(of_pci_phb_init);
+
+#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 0d9ff72..2f54cd8 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -12,6 +12,7 @@
 #include <linux/errno.h>
 #include <linux/bootmem.h>
 #include <linux/irq.h>
+#include <linux/list.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
@@ -99,7 +100,7 @@
 			continue;
 		if (res->end == 0xffffffff) {
 			DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
-			    pci_name(dev), i, res->start, res->end);
+			    pci_name(dev), i, (u64)res->start, (u64)res->end);
 			res->end -= res->start;
 			res->start = 0;
 			res->flags |= IORESOURCE_UNSET;
@@ -115,11 +116,9 @@
 		if (offset != 0) {
 			res->start += offset;
 			res->end += offset;
-#ifdef DEBUG
-			printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
-			       i, res->flags, pci_name(dev),
-			       res->start - offset, res->start);
-#endif
+			DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
+			    i, res->flags, pci_name(dev),
+			    (u64)res->start - offset, (u64)res->start);
 		}
 	}
 
@@ -255,7 +254,7 @@
 			}
 
 			DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
-				res->start, res->end, res->flags, pr);
+			    (u64)res->start, (u64)res->end, res->flags, pr);
 			if (pr) {
 				if (request_resource(pr, res) == 0)
 					continue;
@@ -306,7 +305,7 @@
 	for (p = res->child; p != NULL; p = p->sibling) {
 		p->parent = res;
 		DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
-		    p->name, p->start, p->end, res->name);
+		    p->name, (u64)p->start, (u64)p->end, res->name);
 	}
 	return 0;
 }
@@ -362,7 +361,7 @@
 	}
 	if (request_resource(pr, res)) {
 		DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
-		    res->start, res->end);
+		    (u64)res->start, (u64)res->end);
 		return -1;		/* "can't happen" */
 	}
 	update_bridge_base(bus, i);
@@ -480,14 +479,14 @@
 	struct resource *pr, *r = &dev->resource[idx];
 
 	DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
-	    pci_name(dev), idx, r->start, r->end, r->flags);
+	    pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
 	pr = pci_find_parent_resource(dev, r);
 	if (!pr || request_resource(pr, r) < 0) {
 		printk(KERN_ERR "PCI: Cannot allocate resource region %d"
 		       " of device %s\n", idx, pci_name(dev));
 		if (pr)
 			DBG("PCI:  parent is %p: %016llx-%016llx (f=%lx)\n",
-			    pr, pr->start, pr->end, pr->flags);
+			    pr, (u64)pr->start, (u64)pr->end, pr->flags);
 		/* We'll assign a new address later */
 		r->flags |= IORESOURCE_UNSET;
 		r->end -= r->start;
@@ -960,7 +959,7 @@
 			res->flags = IORESOURCE_IO;
 			res->start = ranges[2];
 			DBG("PCI: IO 0x%llx -> 0x%llx\n",
-				    res->start, res->start + size - 1);
+			    (u64)res->start, (u64)res->start + size - 1);
 			break;
 		case 2:		/* memory space */
 			memno = 0;
@@ -982,7 +981,7 @@
 					res->flags |= IORESOURCE_PREFETCH;
 				res->start = ranges[na+2];
 				DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
-					    res->start, res->start + size - 1);
+				    (u64)res->start, (u64)res->start + size - 1);
 			}
 			break;
 		}
@@ -1268,7 +1267,10 @@
 		if (pci_assign_all_buses)
 			hose->first_busno = next_busno;
 		hose->last_busno = 0xff;
-		bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
+		bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
+					    hose->ops, hose);
+		if (bus)
+			pci_bus_add_devices(bus);
 		hose->last_busno = bus->subordinate;
 		if (pci_assign_all_buses || next_busno <= hose->last_busno)
 			next_busno = hose->last_busno + pcibios_assign_bus_offset;
@@ -1282,10 +1284,6 @@
 	if (pci_assign_all_buses && have_of)
 		pcibios_make_OF_bus_map();
 
-	/* Do machine dependent PCI interrupt routing */
-	if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
-		pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
-
 	/* Call machine dependent fixup */
 	if (ppc_md.pcibios_fixup)
 		ppc_md.pcibios_fixup();
@@ -1308,25 +1306,6 @@
 
 subsys_initcall(pcibios_init);
 
-unsigned char __init
-common_swizzle(struct pci_dev *dev, unsigned char *pinp)
-{
-	struct pci_controller *hose = dev->sysdata;
-
-	if (dev->bus->number != hose->first_busno) {
-		u8 pin = *pinp;
-		do {
-			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
-			/* Move up the chain of bridges. */
-			dev = dev->bus->self;
-		} while (dev->bus->self);
-		*pinp = pin;
-
-		/* The slot is the idsel of the last bridge. */
-	}
-	return PCI_SLOT(dev->devfn);
-}
-
 unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
 			     unsigned long start, unsigned long size)
 {
@@ -1338,6 +1317,7 @@
 	struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
 	unsigned long io_offset;
 	struct resource *res;
+	struct pci_dev *dev;
 	int i;
 
 	io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -1390,8 +1370,16 @@
 		}
 	}
 
+	/* Platform specific bus fixups */
 	if (ppc_md.pcibios_fixup_bus)
 		ppc_md.pcibios_fixup_bus(bus);
+
+	/* Read default IRQs and fixup if necessary */
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		pci_read_irq_line(dev);
+		if (ppc_md.pci_irq_fixup)
+			ppc_md.pci_irq_fixup(dev);
+	}
 }
 
 char __init *pcibios_setup(char *str)
@@ -1571,7 +1559,7 @@
 		*offset += hose->pci_mem_offset;
 		res_bit = IORESOURCE_MEM;
 	} else {
-		io_offset = hose->io_base_virt - ___IO_BASE;
+		io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
 		*offset += io_offset;
 		res_bit = IORESOURCE_IO;
 	}
@@ -1826,7 +1814,8 @@
 		return;
 
 	if (rsrc->flags & IORESOURCE_IO)
-		offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
+		offset = (void __iomem *)_IO_BASE - hose->io_base_virt
+			+ hose->io_base_phys;
 
 	*start = rsrc->start + offset;
 	*end = rsrc->end + offset;
@@ -1845,35 +1834,6 @@
 	res->child = NULL;
 }
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
-	unsigned long flags = pci_resource_flags(dev, bar);
-
-	if (!len)
-		return NULL;
-	if (max && len > max)
-		len = max;
-	if (flags & IORESOURCE_IO)
-		return ioport_map(start, len);
-	if (flags & IORESOURCE_MEM)
-		/* Not checking IORESOURCE_CACHEABLE because PPC does
-		 * not currently distinguish between ioremap and
-		 * ioremap_nocache.
-		 */
-		return ioremap(start, len);
-	/* What? */
-	return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
-	/* Nothing to do */
-}
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
-
 unsigned long pci_address_to_pio(phys_addr_t address)
 {
 	struct pci_controller* hose = hose_head;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 9bae8a5..6fa9a0a 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -42,11 +42,9 @@
 unsigned long pci_probe_only = 1;
 int pci_assign_all_buses = 0;
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 static void fixup_resource(struct resource *res, struct pci_dev *dev);
 static void do_bus_setup(struct pci_bus *bus);
 static void phbs_remap_io(void);
-#endif
 
 /* pci_io_base -- the base address from which io bars are offsets.
  * This is the lowest I/O base address (so bar values are always positive),
@@ -63,7 +61,7 @@
 
 LIST_HEAD(hose_list);
 
-struct dma_mapping_ops pci_dma_ops;
+struct dma_mapping_ops *pci_dma_ops;
 EXPORT_SYMBOL(pci_dma_ops);
 
 int global_phb_number;		/* Global phb counter */
@@ -212,6 +210,10 @@
 
 void pcibios_free_controller(struct pci_controller *phb)
 {
+	spin_lock(&hose_spinlock);
+	list_del(&phb->list_node);
+	spin_unlock(&hose_spinlock);
+
 	if (phb->is_dynamic)
 		kfree(phb);
 }
@@ -251,7 +253,6 @@
 		pcibios_claim_one_bus(b);
 }
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
 {
 	const u32 *prop;
@@ -329,7 +330,7 @@
 	struct pci_dev *dev;
 	const char *type;
 
-	dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
+	dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
 	if (!dev)
 		return NULL;
 	type = get_property(node, "device_type", NULL);
@@ -338,7 +339,6 @@
 
 	DBG("    create device, devfn: %x, type: %s\n", devfn, type);
 
-	memset(dev, 0, sizeof(struct pci_dev));
 	dev->bus = bus;
 	dev->sysdata = node;
 	dev->dev.parent = bus->bridge;
@@ -506,7 +506,6 @@
 		pci_scan_child_bus(bus);
 }
 EXPORT_SYMBOL(of_scan_pci_bridge);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 
 void __devinit scan_phb(struct pci_controller *hose)
 {
@@ -517,7 +516,7 @@
 
 	DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
 
-	bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
+	bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
 	if (bus == NULL) {
 		printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
 		       hose->global_number);
@@ -540,7 +539,7 @@
 	}
 
 	mode = PCI_PROBE_NORMAL;
-#ifdef CONFIG_PPC_MULTIPLATFORM
+
 	if (node && ppc_md.pci_probe_mode)
 		mode = ppc_md.pci_probe_mode(bus);
 	DBG("    probe mode: %d\n", mode);
@@ -548,7 +547,7 @@
 		bus->subordinate = hose->last_busno;
 		of_scan_bus(node, bus);
 	}
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+
 	if (mode == PCI_PROBE_NORMAL)
 		hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
 }
@@ -592,11 +591,9 @@
 	if (ppc64_isabridge_dev != NULL)
 		printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
 		/* map in PCI I/O space */
 		phbs_remap_io();
-#endif
 
 	printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
 
@@ -873,8 +870,6 @@
 	device_create_file(&pdev->dev, &dev_attr_devspec);
 }
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
 #define ISA_SPACE_MASK 0x1
 #define ISA_SPACE_IO 0x1
 
@@ -975,11 +970,7 @@
 		res = NULL;
 		pci_space = ranges[0];
 		pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
-
-		cpu_phys_addr = ranges[3];
-		if (na >= 2)
-			cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
-
+		cpu_phys_addr = of_translate_address(dev, &ranges[3]);
 		size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
 		ranges += np;
 		if (size == 0)
@@ -1145,7 +1136,7 @@
 	
 	if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
 		return 1;
-	if (iounmap_explicit((void __iomem *) start_virt, size))
+	if (__iounmap_explicit((void __iomem *) start_virt, size))
 		return 1;
 
 	return 0;
@@ -1213,23 +1204,52 @@
 }
 EXPORT_SYMBOL(pcibios_fixup_device_resources);
 
+void __devinit pcibios_setup_new_device(struct pci_dev *dev)
+{
+	struct dev_archdata *sd = &dev->dev.archdata;
+
+	sd->of_node = pci_device_to_OF_node(dev);
+
+	DBG("PCI device %s OF node: %s\n", pci_name(dev),
+	    sd->of_node ? sd->of_node->full_name : "<none>");
+
+	sd->dma_ops = pci_dma_ops;
+#ifdef CONFIG_NUMA
+	sd->numa_node = pcibus_to_node(dev->bus);
+#else
+	sd->numa_node = -1;
+#endif
+	if (ppc_md.pci_dma_dev_setup)
+		ppc_md.pci_dma_dev_setup(dev);
+}
+EXPORT_SYMBOL(pcibios_setup_new_device);
 
 static void __devinit do_bus_setup(struct pci_bus *bus)
 {
 	struct pci_dev *dev;
 
-	ppc_md.iommu_bus_setup(bus);
+	if (ppc_md.pci_dma_bus_setup)
+		ppc_md.pci_dma_bus_setup(bus);
 
 	list_for_each_entry(dev, &bus->devices, bus_list)
-		ppc_md.iommu_dev_setup(dev);
+		pcibios_setup_new_device(dev);
 
-	if (ppc_md.irq_bus_setup)
-		ppc_md.irq_bus_setup(bus);
+	/* Read default IRQs and fixup if necessary */
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		pci_read_irq_line(dev);
+		if (ppc_md.pci_irq_fixup)
+			ppc_md.pci_irq_fixup(dev);
+	}
 }
 
 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
 {
 	struct pci_dev *dev = bus->self;
+	struct device_node *np;
+
+	np = pci_bus_to_OF_node(bus);
+
+	DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
 
 	if (dev && pci_probe_only &&
 	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
@@ -1343,8 +1363,6 @@
 	return NULL;
 }
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 unsigned long pci_address_to_pio(phys_addr_t address)
 {
 	struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
deleted file mode 100644
index 72ce082..0000000
--- a/arch/powerpc/kernel/pci_direct_iommu.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Support for DMA from PCI devices to main memory on
- * machines without an iommu or with directly addressable
- * RAM (typically a pmac with 2Gb of RAM or less)
- *
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/sections.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/ppc-pci.h>
-
-static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
-				   dma_addr_t *dma_handle, gfp_t flag)
-{
-	void *ret;
-
-	ret = (void *)__get_free_pages(flag, get_order(size));
-	if (ret != NULL) {
-		memset(ret, 0, size);
-		*dma_handle = virt_to_abs(ret);
-	}
-	return ret;
-}
-
-static void pci_direct_free_coherent(struct device *hwdev, size_t size,
-				 void *vaddr, dma_addr_t dma_handle)
-{
-	free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
-		size_t size, enum dma_data_direction direction)
-{
-	return virt_to_abs(ptr);
-}
-
-static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
-		size_t size, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
-		int nents, enum dma_data_direction direction)
-{
-	int i;
-
-	for (i = 0; i < nents; i++, sg++) {
-		sg->dma_address = page_to_phys(sg->page) + sg->offset;
-		sg->dma_length = sg->length;
-	}
-
-	return nents;
-}
-
-static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-		int nents, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_dma_supported(struct device *dev, u64 mask)
-{
-	return mask < 0x100000000ull;
-}
-
-static struct dma_mapping_ops pci_direct_ops = {
-	.alloc_coherent = pci_direct_alloc_coherent,
-	.free_coherent = pci_direct_free_coherent,
-	.map_single = pci_direct_map_single,
-	.unmap_single = pci_direct_unmap_single,
-	.map_sg = pci_direct_map_sg,
-	.unmap_sg = pci_direct_unmap_sg,
-	.dma_supported = pci_direct_dma_supported,
-};
-
-void __init pci_direct_iommu_init(void)
-{
-	pci_dma_ops = pci_direct_ops;
-}
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
deleted file mode 100644
index 0688b25..0000000
--- a/arch/powerpc/kernel/pci_iommu.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- *
- * Rewrite, cleanup, new allocation schemes:
- * Copyright (C) 2004 Olof Johansson, IBM Corporation
- *
- * Dynamic DMA mapping support, platform-independent parts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-
-/*
- * We can use ->sysdata directly and avoid the extra work in
- * pci_device_to_OF_node since ->sysdata will have been initialised
- * in the iommu init code for all devices.
- */
-#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-
-static inline struct iommu_table *device_to_table(struct device *hwdev)
-{
-	struct pci_dev *pdev;
-
-	if (!hwdev) {
-		pdev = ppc64_isabridge_dev;
-		if (!pdev)
-			return NULL;
-	} else
-		pdev = to_pci_dev(hwdev);
-
-	return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
-}
-
-
-static inline unsigned long device_to_mask(struct device *hwdev)
-{
-	struct pci_dev *pdev;
-
-	if (!hwdev) {
-		pdev = ppc64_isabridge_dev;
-		if (!pdev) /* This is the best guess we can do */
-			return 0xfffffffful;
-	} else
-		pdev = to_pci_dev(hwdev);
-
-	if (pdev->dma_mask)
-		return pdev->dma_mask;
-
-	/* Assume devices without mask can take 32 bit addresses */
-	return 0xfffffffful;
-}
-
-
-/* Allocates a contiguous real buffer and creates mappings over it.
- * Returns the virtual address of the buffer and sets dma_handle
- * to the dma address (mapping) of the first page.
- */
-static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
-			   dma_addr_t *dma_handle, gfp_t flag)
-{
-	return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
-			device_to_mask(hwdev), flag,
-			pcibus_to_node(to_pci_dev(hwdev)->bus));
-}
-
-static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
-			 void *vaddr, dma_addr_t dma_handle)
-{
-	iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
-}
-
-/* Creates TCEs for a user provided buffer.  The user buffer must be 
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
- */
-static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
-		size_t size, enum dma_data_direction direction)
-{
-	return iommu_map_single(device_to_table(hwdev), vaddr, size,
-			        device_to_mask(hwdev), direction);
-}
-
-
-static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
-		size_t size, enum dma_data_direction direction)
-{
-	iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
-}
-
-
-static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
-		int nelems, enum dma_data_direction direction)
-{
-	return iommu_map_sg(pdev, device_to_table(pdev), sglist,
-			nelems, device_to_mask(pdev), direction);
-}
-
-static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
-		int nelems, enum dma_data_direction direction)
-{
-	iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
-}
-
-/* We support DMA to/from any memory page via the iommu */
-static int pci_iommu_dma_supported(struct device *dev, u64 mask)
-{
-	struct iommu_table *tbl = device_to_table(dev);
-
-	if (!tbl || tbl->it_offset > mask) {
-		printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
-		if (tbl)
-			printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n",
-				mask, tbl->it_offset);
-		else
-			printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
-				mask);
-		return 0;
-	} else
-		return 1;
-}
-
-struct dma_mapping_ops pci_iommu_ops = {
-	.alloc_coherent = pci_iommu_alloc_coherent,
-	.free_coherent = pci_iommu_free_coherent,
-	.map_single = pci_iommu_map_single,
-	.unmap_single = pci_iommu_unmap_single,
-	.map_sg = pci_iommu_map_sg,
-	.unmap_sg = pci_iommu_unmap_sg,
-	.dma_supported = pci_iommu_dma_supported,
-};
-
-void pci_iommu_init(void)
-{
-	pci_dma_ops = pci_iommu_ops;
-}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 807193a..9179f07 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -49,6 +49,10 @@
 #include <asm/commproc.h>
 #endif
 
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(local_irq_restore);
+#endif
+
 #ifdef CONFIG_PPC32
 extern void transfer_to_handler(void);
 extern void do_IRQ(struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index bdb412d..c18dbe7 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -538,35 +538,31 @@
 	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
 };
 
-static void __init check_cpu_pa_features(unsigned long node)
+static void __init scan_features(unsigned long node, unsigned char *ftrs,
+				 unsigned long tablelen,
+				 struct ibm_pa_feature *fp,
+				 unsigned long ft_size)
 {
-	unsigned char *pa_ftrs;
-	unsigned long len, tablelen, i, bit;
-
-	pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
-	if (pa_ftrs == NULL)
-		return;
+	unsigned long i, len, bit;
 
 	/* find descriptor with type == 0 */
 	for (;;) {
 		if (tablelen < 3)
 			return;
-		len = 2 + pa_ftrs[0];
+		len = 2 + ftrs[0];
 		if (tablelen < len)
 			return;		/* descriptor 0 not found */
-		if (pa_ftrs[1] == 0)
+		if (ftrs[1] == 0)
 			break;
 		tablelen -= len;
-		pa_ftrs += len;
+		ftrs += len;
 	}
 
 	/* loop over bits we know about */
-	for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
-		struct ibm_pa_feature *fp = &ibm_pa_features[i];
-
-		if (fp->pabyte >= pa_ftrs[0])
+	for (i = 0; i < ft_size; ++i, ++fp) {
+		if (fp->pabyte >= ftrs[0])
 			continue;
-		bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
+		bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
 		if (bit ^ fp->invert) {
 			cur_cpu_spec->cpu_features |= fp->cpu_features;
 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
@@ -577,16 +573,59 @@
 	}
 }
 
+static void __init check_cpu_pa_features(unsigned long node)
+{
+	unsigned char *pa_ftrs;
+	unsigned long tablelen;
+
+	pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
+	if (pa_ftrs == NULL)
+		return;
+
+	scan_features(node, pa_ftrs, tablelen,
+		      ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
+}
+
+static struct feature_property {
+	const char *name;
+	u32 min_value;
+	unsigned long cpu_feature;
+	unsigned long cpu_user_ftr;
+} feature_properties[] __initdata = {
+#ifdef CONFIG_ALTIVEC
+	{"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+	{"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+	{"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
+	{"ibm,purr", 1, CPU_FTR_PURR, 0},
+	{"ibm,spurr", 1, CPU_FTR_SPURR, 0},
+#endif /* CONFIG_PPC64 */
+};
+
+static void __init check_cpu_feature_properties(unsigned long node)
+{
+	unsigned long i;
+	struct feature_property *fp = feature_properties;
+	const u32 *prop;
+
+	for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
+		prop = of_get_flat_dt_prop(node, fp->name, NULL);
+		if (prop && *prop >= fp->min_value) {
+			cur_cpu_spec->cpu_features |= fp->cpu_feature;
+			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
+		}
+	}
+}
+
 static int __init early_init_dt_scan_cpus(unsigned long node,
 					  const char *uname, int depth,
 					  void *data)
 {
 	static int logical_cpuid = 0;
 	char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-#ifdef CONFIG_ALTIVEC
-	u32 *prop;
-#endif
-	u32 *intserv;
+	const u32 *prop;
+	const u32 *intserv;
 	int i, nthreads;
 	unsigned long len;
 	int found = 0;
@@ -643,24 +682,27 @@
 			intserv[i]);
 		boot_cpuid = logical_cpuid;
 		set_hard_smp_processor_id(boot_cpuid, intserv[i]);
+
+		/*
+		 * PAPR defines "logical" PVR values for cpus that
+		 * meet various levels of the architecture:
+		 * 0x0f000001	Architecture version 2.04
+		 * 0x0f000002	Architecture version 2.05
+		 * If the cpu-version property in the cpu node contains
+		 * such a value, we call identify_cpu again with the
+		 * logical PVR value in order to use the cpu feature
+		 * bits appropriate for the architecture level.
+		 *
+		 * A POWER6 partition in "POWER6 architected" mode
+		 * uses the 0x0f000002 PVR value; in POWER5+ mode
+		 * it uses 0x0f000001.
+		 */
+		prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+		if (prop && (*prop & 0xff000000) == 0x0f000000)
+			identify_cpu(0, *prop);
 	}
 
-#ifdef CONFIG_ALTIVEC
-	/* Check if we have a VMX and eventually update CPU features */
-	prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
-	if (prop && (*prop) > 0) {
-		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
-		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
-	}
-
-	/* Same goes for Apple's "altivec" property */
-	prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
-	if (prop) {
-		cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
-		cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
-	}
-#endif /* CONFIG_ALTIVEC */
-
+	check_cpu_feature_properties(node);
 	check_cpu_pa_features(node);
 
 #ifdef CONFIG_PPC_PSERIES
@@ -1674,6 +1716,7 @@
 	}
 	return NULL;
 }
+EXPORT_SYMBOL(of_get_cpu_node);
 
 #ifdef DEBUG
 static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b9176163..46cf326 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -173,8 +173,8 @@
 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
 
 #ifdef CONFIG_PPC64
-static int __initdata iommu_force_on;
-static int __initdata ppc64_iommu_off;
+static int __initdata prom_iommu_force_on;
+static int __initdata prom_iommu_off;
 static unsigned long __initdata prom_tce_alloc_start;
 static unsigned long __initdata prom_tce_alloc_end;
 #endif
@@ -582,9 +582,9 @@
 		while (*opt && *opt == ' ')
 			opt++;
 		if (!strncmp(opt, RELOC("off"), 3))
-			RELOC(ppc64_iommu_off) = 1;
+			RELOC(prom_iommu_off) = 1;
 		else if (!strncmp(opt, RELOC("force"), 5))
-			RELOC(iommu_force_on) = 1;
+			RELOC(prom_iommu_force_on) = 1;
 	}
 #endif
 }
@@ -627,6 +627,7 @@
 /* Option vector 3: processor options supported */
 #define OV3_FP			0x80	/* floating point */
 #define OV3_VMX			0x40	/* VMX/Altivec */
+#define OV3_DFP			0x20	/* decimal FP */
 
 /* Option vector 5: PAPR/OF options supported */
 #define OV5_LPAR		0x80	/* logical partitioning supported */
@@ -642,6 +643,7 @@
 static unsigned char ibm_architecture_vec[] = {
 	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
+	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
 	5 - 1,				/* 5 option vectors */
 
@@ -668,7 +670,7 @@
 	/* option vector 3: processor options supported */
 	3 - 2,				/* length */
 	0,				/* don't ignore, don't halt */
-	OV3_FP | OV3_VMX,
+	OV3_FP | OV3_VMX | OV3_DFP,
 
 	/* option vector 4: IBM PAPR implementation */
 	2 - 2,				/* length */
@@ -1167,7 +1169,7 @@
 	u64 local_alloc_top, local_alloc_bottom;
 	u64 i;
 
-	if (RELOC(ppc64_iommu_off))
+	if (RELOC(prom_iommu_off))
 		return;
 
 	prom_debug("starting prom_initialize_tce_table\n");
@@ -2283,11 +2285,11 @@
 	 * Fill in some infos for use by the kernel later on
 	 */
 #ifdef CONFIG_PPC64
-	if (RELOC(ppc64_iommu_off))
+	if (RELOC(prom_iommu_off))
 		prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
 			     NULL, 0);
 
-	if (RELOC(iommu_force_on))
+	if (RELOC(prom_iommu_force_on))
 		prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
 			     NULL, 0);
 
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 603dff3..0dfbe1c 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -25,6 +25,12 @@
 #define OF_CHECK_COUNTS(na, ns)	((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
 			(ns) > 0)
 
+static struct of_bus *of_match_bus(struct device_node *np);
+static int __of_address_to_resource(struct device_node *dev,
+		const u32 *addrp, u64 size, unsigned int flags,
+		struct resource *r);
+
+
 /* Debug utility */
 #ifdef DEBUG
 static void of_dump_addr(const char *s, const u32 *addr, int na)
@@ -101,6 +107,7 @@
 }
 
 
+#ifdef CONFIG_PCI
 /*
  * PCI bus specific translator
  */
@@ -153,15 +160,156 @@
 	switch((w >> 24) & 0x03) {
 	case 0x01:
 		flags |= IORESOURCE_IO;
+		break;
 	case 0x02: /* 32 bits */
 	case 0x03: /* 64 bits */
 		flags |= IORESOURCE_MEM;
+		break;
 	}
 	if (w & 0x40000000)
 		flags |= IORESOURCE_PREFETCH;
 	return flags;
 }
 
+const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+			unsigned int *flags)
+{
+	const u32 *prop;
+	unsigned int psize;
+	struct device_node *parent;
+	struct of_bus *bus;
+	int onesize, i, na, ns;
+
+	/* Get parent & match bus type */
+	parent = of_get_parent(dev);
+	if (parent == NULL)
+		return NULL;
+	bus = of_match_bus(parent);
+	if (strcmp(bus->name, "pci")) {
+		of_node_put(parent);
+		return NULL;
+	}
+	bus->count_cells(dev, &na, &ns);
+	of_node_put(parent);
+	if (!OF_CHECK_COUNTS(na, ns))
+		return NULL;
+
+	/* Get "reg" or "assigned-addresses" property */
+	prop = get_property(dev, bus->addresses, &psize);
+	if (prop == NULL)
+		return NULL;
+	psize /= 4;
+
+	onesize = na + ns;
+	for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+		if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
+			if (size)
+				*size = of_read_number(prop + na, ns);
+			if (flags)
+				*flags = bus->get_flags(prop);
+			return prop;
+		}
+	return NULL;
+}
+EXPORT_SYMBOL(of_get_pci_address);
+
+int of_pci_address_to_resource(struct device_node *dev, int bar,
+			       struct resource *r)
+{
+	const u32	*addrp;
+	u64		size;
+	unsigned int	flags;
+
+	addrp = of_get_pci_address(dev, bar, &size, &flags);
+	if (addrp == NULL)
+		return -EINVAL;
+	return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
+{
+	return (((pin - 1) + slot) % 4) + 1;
+}
+
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+	struct device_node *dn, *ppnode;
+	struct pci_dev *ppdev;
+	u32 lspec;
+	u32 laddr[3];
+	u8 pin;
+	int rc;
+
+	/* Check if we have a device node, if yes, fallback to standard OF
+	 * parsing
+	 */
+	dn = pci_device_to_OF_node(pdev);
+	if (dn)
+		return of_irq_map_one(dn, 0, out_irq);
+
+	/* Ok, we don't, time to have fun. Let's start by building up an
+	 * interrupt spec.  we assume #interrupt-cells is 1, which is standard
+	 * for PCI. If you do different, then don't use that routine.
+	 */
+	rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+	if (rc != 0)
+		return rc;
+	/* No pin, exit */
+	if (pin == 0)
+		return -ENODEV;
+
+	/* Now we walk up the PCI tree */
+	lspec = pin;
+	for (;;) {
+		/* Get the pci_dev of our parent */
+		ppdev = pdev->bus->self;
+
+		/* Ouch, it's a host bridge... */
+		if (ppdev == NULL) {
+#ifdef CONFIG_PPC64
+			ppnode = pci_bus_to_OF_node(pdev->bus);
+#else
+			struct pci_controller *host;
+			host = pci_bus_to_host(pdev->bus);
+			ppnode = host ? host->arch_data : NULL;
+#endif
+			/* No node for host bridge ? give up */
+			if (ppnode == NULL)
+				return -EINVAL;
+		} else
+			/* We found a P2P bridge, check if it has a node */
+			ppnode = pci_device_to_OF_node(ppdev);
+
+		/* Ok, we have found a parent with a device-node, hand over to
+		 * the OF parsing code.
+		 * We build a unit address from the linux device to be used for
+		 * resolution. Note that we use the linux bus number which may
+		 * not match your firmware bus numbering.
+		 * Fortunately, in most cases, interrupt-map-mask doesn't include
+		 * the bus number as part of the matching.
+		 * You should still be careful about that though if you intend
+		 * to rely on this function (you ship  a firmware that doesn't
+		 * create device nodes for all PCI devices).
+		 */
+		if (ppnode)
+			break;
+
+		/* We can only get here if we hit a P2P bridge with no node,
+		 * let's do standard swizzling and try again
+		 */
+		lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+		pdev = ppdev;
+	}
+
+	laddr[0] = (pdev->bus->number << 16)
+		| (pdev->devfn << 8);
+	laddr[1]  = laddr[2] = 0;
+	return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
+#endif /* CONFIG_PCI */
+
 /*
  * ISA bus specific translator
  */
@@ -223,6 +371,7 @@
  */
 
 static struct of_bus of_busses[] = {
+#ifdef CONFIG_PCI
 	/* PCI */
 	{
 		.name = "pci",
@@ -233,6 +382,7 @@
 		.translate = of_bus_pci_translate,
 		.get_flags = of_bus_pci_get_flags,
 	},
+#endif /* CONFIG_PCI */
 	/* ISA */
 	{
 		.name = "isa",
@@ -445,48 +595,6 @@
 }
 EXPORT_SYMBOL(of_get_address);
 
-const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
-			unsigned int *flags)
-{
-	const u32 *prop;
-	unsigned int psize;
-	struct device_node *parent;
-	struct of_bus *bus;
-	int onesize, i, na, ns;
-
-	/* Get parent & match bus type */
-	parent = of_get_parent(dev);
-	if (parent == NULL)
-		return NULL;
-	bus = of_match_bus(parent);
-	if (strcmp(bus->name, "pci")) {
-		of_node_put(parent);
-		return NULL;
-	}
-	bus->count_cells(dev, &na, &ns);
-	of_node_put(parent);
-	if (!OF_CHECK_COUNTS(na, ns))
-		return NULL;
-
-	/* Get "reg" or "assigned-addresses" property */
-	prop = get_property(dev, bus->addresses, &psize);
-	if (prop == NULL)
-		return NULL;
-	psize /= 4;
-
-	onesize = na + ns;
-	for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
-		if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
-			if (size)
-				*size = of_read_number(prop + na, ns);
-			if (flags)
-				*flags = bus->get_flags(prop);
-			return prop;
-		}
-	return NULL;
-}
-EXPORT_SYMBOL(of_get_pci_address);
-
 static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
 				    u64 size, unsigned int flags,
 				    struct resource *r)
@@ -529,20 +637,6 @@
 }
 EXPORT_SYMBOL_GPL(of_address_to_resource);
 
-int of_pci_address_to_resource(struct device_node *dev, int bar,
-			       struct resource *r)
-{
-	const u32	*addrp;
-	u64		size;
-	unsigned int	flags;
-
-	addrp = of_get_pci_address(dev, bar, &size, &flags);
-	if (addrp == NULL)
-		return -EINVAL;
-	return __of_address_to_resource(dev, addrp, size, flags, r);
-}
-EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-
 void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
 		unsigned long *busno, unsigned long *phys, unsigned long *size)
 {
@@ -898,87 +992,3 @@
 	return res;
 }
 EXPORT_SYMBOL_GPL(of_irq_map_one);
-
-#ifdef CONFIG_PCI
-static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
-{
-	return (((pin - 1) + slot) % 4) + 1;
-}
-
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
-{
-	struct device_node *dn, *ppnode;
-	struct pci_dev *ppdev;
-	u32 lspec;
-	u32 laddr[3];
-	u8 pin;
-	int rc;
-
-	/* Check if we have a device node, if yes, fallback to standard OF
-	 * parsing
-	 */
-	dn = pci_device_to_OF_node(pdev);
-	if (dn)
-		return of_irq_map_one(dn, 0, out_irq);
-
-	/* Ok, we don't, time to have fun. Let's start by building up an
-	 * interrupt spec.  we assume #interrupt-cells is 1, which is standard
-	 * for PCI. If you do different, then don't use that routine.
-	 */
-	rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
-	if (rc != 0)
-		return rc;
-	/* No pin, exit */
-	if (pin == 0)
-		return -ENODEV;
-
-	/* Now we walk up the PCI tree */
-	lspec = pin;
-	for (;;) {
-		/* Get the pci_dev of our parent */
-		ppdev = pdev->bus->self;
-
-		/* Ouch, it's a host bridge... */
-		if (ppdev == NULL) {
-#ifdef CONFIG_PPC64
-			ppnode = pci_bus_to_OF_node(pdev->bus);
-#else
-			struct pci_controller *host;
-			host = pci_bus_to_host(pdev->bus);
-			ppnode = host ? host->arch_data : NULL;
-#endif
-			/* No node for host bridge ? give up */
-			if (ppnode == NULL)
-				return -EINVAL;
-		} else
-			/* We found a P2P bridge, check if it has a node */
-			ppnode = pci_device_to_OF_node(ppdev);
-
-		/* Ok, we have found a parent with a device-node, hand over to
-		 * the OF parsing code.
-		 * We build a unit address from the linux device to be used for
-		 * resolution. Note that we use the linux bus number which may
-		 * not match your firmware bus numbering.
-		 * Fortunately, in most cases, interrupt-map-mask doesn't include
-		 * the bus number as part of the matching.
-		 * You should still be careful about that though if you intend
-		 * to rely on this function (you ship  a firmware that doesn't
-		 * create device nodes for all PCI devices).
-		 */
-		if (ppnode)
-			break;
-
-		/* We can only get here if we hit a P2P bridge with no node,
-		 * let's do standard swizzling and try again
-		 */
-		lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
-		pdev = ppdev;
-	}
-
-	laddr[0] = (pdev->bus->number << 16)
-		| (pdev->devfn << 8);
-	laddr[1]  = laddr[2] = 0;
-	return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
-}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
-#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 6ef80d4..387ed0d 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -810,9 +810,9 @@
 	return 0;
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 /* This version can't take the spinlock, because it never returns */
-
-struct rtas_args rtas_stop_self_args = {
+static struct rtas_args rtas_stop_self_args = {
 	/* The token is initialized for real in setup_system() */
 	.token = RTAS_UNKNOWN_SERVICE,
 	.nargs = 0,
@@ -834,6 +834,7 @@
 
 	panic("Alas, I survived.\n");
 }
+#endif
 
 /*
  * Call early during boot, before mem init or bootmem, to retrieve the RTAS
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 6f6fc97..7d0f13f 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -101,7 +101,7 @@
 static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
 
 /* Use slab cache to guarantee 4k alignment */
-static kmem_cache_t *flash_block_cache = NULL;
+static struct kmem_cache *flash_block_cache = NULL;
 
 #define FLASH_BLOCK_LIST_VERSION (1UL)
 
@@ -286,7 +286,7 @@
 }
 
 /* constructor for flash_block_cache */
-void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(ptr, 0, RTAS_BLK_SIZE);
 }
@@ -681,14 +681,12 @@
 	int *status;
 	int token;
 
-	dp->data = kmalloc(buf_size, GFP_KERNEL);
+	dp->data = kzalloc(buf_size, GFP_KERNEL);
 	if (dp->data == NULL) {
 		remove_flash_pde(dp);
 		return -ENOMEM;
 	}
 
-	memset(dp->data, 0, buf_size);
-
 	/*
 	 * This code assumes that the status int is the first member of the
 	 * struct 
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index b4a0de7..ace9f4c 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -38,6 +38,7 @@
 #include <asm/rtas.h>
 #include <asm/mpic.h>
 #include <asm/ppc-pci.h>
+#include <asm/eeh.h>
 
 /* RTAS tokens */
 static int read_pci_config;
@@ -231,32 +232,13 @@
 
 unsigned long __devinit get_phb_buid (struct device_node *phb)
 {
-	int addr_cells;
-	const unsigned int *buid_vals;
-	unsigned int len;
-	unsigned long buid;
+	struct resource r;
 
-	if (ibm_read_pci_config == -1) return 0;
-
-	/* PHB's will always be children of the root node,
-	 * or so it is promised by the current firmware. */
-	if (phb->parent == NULL)
+	if (ibm_read_pci_config == -1)
 		return 0;
-	if (phb->parent->parent)
+	if (of_address_to_resource(phb, 0, &r))
 		return 0;
-
-	buid_vals = get_property(phb, "reg", &len);
-	if (buid_vals == NULL)
-		return 0;
-
-	addr_cells = prom_n_addr_cells(phb);
-	if (addr_cells == 1) {
-		buid = (unsigned long) buid_vals[0];
-	} else {
-		buid = (((unsigned long)buid_vals[0]) << 32UL) |
-			(((unsigned long)buid_vals[1]) & 0xffffffff);
-	}
-	return buid;
+	return r.start;
 }
 
 static int phb_set_bus_ranges(struct device_node *dev,
@@ -276,8 +258,10 @@
 	return 0;
 }
 
-int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb)
+int __devinit rtas_setup_phb(struct pci_controller *phb)
 {
+	struct device_node *dev = phb->arch_data;
+
 	if (is_python(dev))
 		python_countermeasures(dev);
 
@@ -309,7 +293,7 @@
 		phb = pcibios_alloc_controller(node);
 		if (!phb)
 			continue;
-		setup_phb(node, phb);
+		rtas_setup_phb(phb);
 		pci_process_bridge_OF_ranges(phb, node, 0);
 		pci_setup_phb_io(phb, index == 0);
 		index++;
@@ -381,7 +365,6 @@
 		}
 	}
 
-	list_del(&phb->list_node);
 	pcibios_free_controller(phb);
 
 	return 0;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a4c2964..61c65d1 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -63,10 +63,6 @@
 
 int have_of = 1;
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-dev_t boot_dev;
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 #ifdef CONFIG_VGA_CONSOLE
 unsigned long vgacon_remap_base;
 #endif
@@ -101,7 +97,7 @@
 	 * Identify the CPU type and fix up code sections
 	 * that depend on which cpu we have.
 	 */
-	spec = identify_cpu(offset);
+	spec = identify_cpu(offset, mfspr(SPRN_PVR));
 
 	do_feature_fixups(spec->cpu_features,
 			  PTRRELOC(&__start___ftr_fixup),
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1627896..3733de3 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,6 +33,7 @@
 #include <linux/serial.h>
 #include <linux/serial_8250.h>
 #include <linux/bootmem.h>
+#include <linux/pci.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -71,7 +72,6 @@
 
 int have_of = 1;
 int boot_cpuid = 0;
-dev_t boot_dev;
 u64 ppc64_pft_size;
 
 /* Pick defaults since we might want to patch instructions
@@ -171,7 +171,7 @@
 void __init early_setup(unsigned long dt_ptr)
 {
 	/* Identify CPU type */
-	identify_cpu(0);
+	identify_cpu(0, mfspr(SPRN_PVR));
 
 	/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
 	setup_paca(0);
@@ -226,8 +226,8 @@
 {
 	struct paca_struct *lpaca = get_paca();
 
-	/* Mark enabled in PACA */
-	lpaca->proc_enabled = 0;
+	/* Mark interrupts enabled in PACA */
+	lpaca->soft_enabled = 0;
 
 	/* Initialize hash table for that CPU */
 	htab_initialize_secondary();
@@ -392,7 +392,8 @@
 	 * setting up the hash table pointers. It also sets up some interrupt-mapping
 	 * related options that will be used by finish_device_tree()
 	 */
-	ppc_md.init_early();
+	if (ppc_md.init_early)
+		ppc_md.init_early();
 
  	/*
 	 * We can discover serial ports now since the above did setup the
@@ -598,3 +599,10 @@
 	}
 }
 #endif
+
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+struct ppc_pci_io ppc_pci_io;
+EXPORT_SYMBOL(ppc_pci_io);
+#endif /* CONFIG_PPC_INDIRECT_IO */
+
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 320353f..e4ebe1a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -36,7 +36,7 @@
 #include <linux/stddef.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #endif
 
 #include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index de59c6c..bc892e69 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -78,7 +78,7 @@
 {
 	int i, score=0;
 	u64 tb;
-	long mark;
+	u64 mark;
 
 	tbsync->cmd = cmd;
 
@@ -116,8 +116,7 @@
 	printk("Synchronizing timebase\n");
 
 	/* if this fails then this kernel won't work anyway... */
-	tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL );
-	memset( tbsync, 0, sizeof(*tbsync) );
+	tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
 	mb();
 	running = 1;
 
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 35c6309..9b28c23 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -65,6 +65,7 @@
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_sibling_map);
 
 /* SMP operations for this machine */
 struct smp_ops_t *smp_ops;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d15c33e..03a2a2f 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -51,6 +51,7 @@
 #include <asm/time.h>
 #include <asm/mmu_context.h>
 #include <asm/ppc-pci.h>
+#include <asm/syscalls.h>
 
 /* readdir & getdents */
 #define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index d45a168..63ed265 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -200,10 +200,9 @@
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
 	struct sys_device *s = &c->sysdev;
 
-#ifndef CONFIG_PPC_ISERIES
-	if (cpu_has_feature(CPU_FTR_SMT))
+	if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+			cpu_has_feature(CPU_FTR_SMT))
 		sysdev_create_file(s, &attr_smt_snooze_delay);
-#endif
 
 	/* PMC stuff */
 
@@ -240,12 +239,11 @@
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
 	struct sys_device *s = &c->sysdev;
 
-	BUG_ON(c->no_control);
+	BUG_ON(!c->hotpluggable);
 
-#ifndef CONFIG_PPC_ISERIES
-	if (cpu_has_feature(CPU_FTR_SMT))
+	if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+			cpu_has_feature(CPU_FTR_SMT))
 		sysdev_remove_file(s, &attr_smt_snooze_delay);
-#endif
 
 	/* PMC stuff */
 
@@ -299,6 +297,72 @@
 	.notifier_call	= sysfs_cpu_notify,
 };
 
+static DEFINE_MUTEX(cpu_mutex);
+
+int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
+{
+	int cpu;
+
+	mutex_lock(&cpu_mutex);
+
+	for_each_possible_cpu(cpu) {
+		sysdev_create_file(get_cpu_sysdev(cpu), attr);
+	}
+
+	mutex_unlock(&cpu_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
+
+int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
+{
+	int cpu;
+	struct sys_device *sysdev;
+
+	mutex_lock(&cpu_mutex);
+
+	for_each_possible_cpu(cpu) {
+		sysdev = get_cpu_sysdev(cpu);
+		sysfs_create_group(&sysdev->kobj, attrs);
+	}
+
+	mutex_unlock(&cpu_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
+
+
+void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
+{
+	int cpu;
+
+	mutex_lock(&cpu_mutex);
+
+	for_each_possible_cpu(cpu) {
+		sysdev_remove_file(get_cpu_sysdev(cpu), attr);
+	}
+
+	mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
+
+void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
+{
+	int cpu;
+	struct sys_device *sysdev;
+
+	mutex_lock(&cpu_mutex);
+
+	for_each_possible_cpu(cpu) {
+		sysdev = get_cpu_sysdev(cpu);
+		sysfs_remove_group(&sysdev->kobj, attrs);
+	}
+
+	mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
+
+
 /* NUMA stuff */
 
 #ifdef CONFIG_NUMA
@@ -360,10 +424,10 @@
 		 * CPU.  For instance, the boot cpu might never be valid
 		 * for hotplugging.
 		 */
-		if (!ppc_md.cpu_die)
-			c->no_control = 1;
+		if (ppc_md.cpu_die)
+			c->hotpluggable = 1;
 
-		if (cpu_online(cpu) || (c->no_control == 0)) {
+		if (cpu_online(cpu) || c->hotpluggable) {
 			register_cpu(c, cpu);
 
 			sysdev_create_file(&c->sysdev, &attr_physical_id);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 46a24de..f6f0c6b 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -631,7 +631,8 @@
 	calculate_steal_time();
 
 #ifdef CONFIG_PPC_ISERIES
-	get_lppaca()->int_dword.fields.decr_int = 0;
+	if (firmware_has_feature(FW_FEATURE_ISERIES))
+		get_lppaca()->int_dword.fields.decr_int = 0;
 #endif
 
 	while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
@@ -674,7 +675,7 @@
 	set_dec(next_dec);
 
 #ifdef CONFIG_PPC_ISERIES
-	if (hvlpevent_is_pending())
+	if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
 		process_hvlpevents();
 #endif
 
@@ -774,7 +775,7 @@
 	 * settimeofday to perform this operation.
 	 */
 #ifdef CONFIG_PPC_ISERIES
-	if (first_settimeofday) {
+	if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) {
 		iSeries_tb_recal();
 		first_settimeofday = 0;
 	}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c66b477..0d4e203 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,10 +53,6 @@
 #endif
 #include <asm/kexec.h>
 
-#ifdef CONFIG_PPC64	/* XXX */
-#define _IO_BASE	pci_io_base
-#endif
-
 #ifdef CONFIG_DEBUGGER
 int (*__debugger)(struct pt_regs *regs);
 int (*__debugger_ipi)(struct pt_regs *regs);
@@ -241,7 +237,7 @@
  */
 static inline int check_io_access(struct pt_regs *regs)
 {
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+#ifdef CONFIG_PPC32
 	unsigned long msr = regs->msr;
 	const struct exception_table_entry *entry;
 	unsigned int *nip = (unsigned int *)regs->nip;
@@ -274,7 +270,7 @@
 			return 1;
 		}
 	}
-#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
+#endif /* CONFIG_PPC32 */
 	return 0;
 }
 
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index c913ad5..a4b28c7 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -264,7 +264,7 @@
 
 
 	/* Allocate a VMA structure and fill it up */
-	vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 	if (vma == NULL) {
 		rc = -ENOMEM;
 		goto fail_mmapsem;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index ed00787..a80f8f1 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -81,15 +81,15 @@
 		struct iommu_table *tbl;
 		unsigned long offset, size;
 
-		dma_window = get_property(dev->dev.platform_data,
-				"ibm,my-dma-window", NULL);
+		dma_window = get_property(dev->dev.archdata.of_node,
+					  "ibm,my-dma-window", NULL);
 		if (!dma_window)
 			return NULL;
 
 		tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
 
-		of_parse_dma_window(dev->dev.platform_data, dma_window,
-				&tbl->it_index, &offset, &size);
+		of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
+				    &tbl->it_index, &offset, &size);
 
 		/* TCE table size - measured in tce entries */
 		tbl->it_size = size >> IOMMU_PAGE_SHIFT;
@@ -117,7 +117,8 @@
 {
 	while (ids->type[0] != '\0') {
 		if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
-		    device_is_compatible(dev->dev.platform_data, ids->compat))
+		    device_is_compatible(dev->dev.archdata.of_node,
+					 ids->compat))
 			return ids;
 		ids++;
 	}
@@ -198,9 +199,9 @@
 /* vio_dev refcount hit 0 */
 static void __devinit vio_dev_release(struct device *dev)
 {
-	if (dev->platform_data) {
-		/* XXX free TCE table */
-		of_node_put(dev->platform_data);
+	if (dev->archdata.of_node) {
+		/* XXX should free TCE table */
+		of_node_put(dev->archdata.of_node);
 	}
 	kfree(to_vio_dev(dev));
 }
@@ -210,7 +211,7 @@
  * @of_node:	The OF node for this device.
  *
  * Creates and initializes a vio_dev structure from the data in
- * of_node (dev.platform_data) and adds it to the list of virtual devices.
+ * of_node and adds it to the list of virtual devices.
  * Returns a pointer to the created vio_dev or NULL if node has
  * NULL device_type or compatible fields.
  */
@@ -240,8 +241,6 @@
 	if (viodev == NULL)
 		return NULL;
 
-	viodev->dev.platform_data = of_node_get(of_node);
-
 	viodev->irq = irq_of_parse_and_map(of_node, 0);
 
 	snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
@@ -254,7 +253,10 @@
 		if (unit_address != NULL)
 			viodev->unit_address = *unit_address;
 	}
-	viodev->iommu_table = vio_build_iommu_table(viodev);
+	viodev->dev.archdata.of_node = of_node_get(of_node);
+	viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+	viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
+	viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
 
 	/* init generic 'struct device' fields: */
 	viodev->dev.parent = &vio_bus_device.dev;
@@ -285,10 +287,11 @@
 #ifdef CONFIG_PPC_ISERIES
 	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
 		iommu_vio_init();
-		vio_bus_device.iommu_table = &vio_iommu_table;
+		vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops;
+		vio_bus_device.dev.archdata.dma_data = &vio_iommu_table;
 		iSeries_vio_dev = &vio_bus_device.dev;
 	}
-#endif
+#endif /* CONFIG_PPC_ISERIES */
 
 	err = bus_register(&vio_bus_type);
 	if (err) {
@@ -336,7 +339,7 @@
 static ssize_t devspec_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct device_node *of_node = dev->platform_data;
+	struct device_node *of_node = dev->archdata.of_node;
 
 	return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
 }
@@ -353,62 +356,6 @@
 }
 EXPORT_SYMBOL(vio_unregister_device);
 
-static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
-			  size_t size, enum dma_data_direction direction)
-{
-	return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
-			~0ul, direction);
-}
-
-static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
-		      size_t size, enum dma_data_direction direction)
-{
-	iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
-			direction);
-}
-
-static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
-		int nelems, enum dma_data_direction direction)
-{
-	return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
-			nelems, ~0ul, direction);
-}
-
-static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
-		int nelems, enum dma_data_direction direction)
-{
-	iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
-}
-
-static void *vio_alloc_coherent(struct device *dev, size_t size,
-			   dma_addr_t *dma_handle, gfp_t flag)
-{
-	return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
-			dma_handle, ~0ul, flag, -1);
-}
-
-static void vio_free_coherent(struct device *dev, size_t size,
-			 void *vaddr, dma_addr_t dma_handle)
-{
-	iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
-			dma_handle);
-}
-
-static int vio_dma_supported(struct device *dev, u64 mask)
-{
-	return 1;
-}
-
-struct dma_mapping_ops vio_dma_ops = {
-	.alloc_coherent = vio_alloc_coherent,
-	.free_coherent = vio_free_coherent,
-	.map_single = vio_map_single,
-	.unmap_single = vio_unmap_single,
-	.map_sg = vio_map_sg,
-	.unmap_sg = vio_unmap_sg,
-	.dma_supported = vio_dma_supported,
-};
-
 static int vio_bus_match(struct device *dev, struct device_driver *drv)
 {
 	const struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -422,13 +369,14 @@
 			char *buffer, int buffer_size)
 {
 	const struct vio_dev *vio_dev = to_vio_dev(dev);
-	struct device_node *dn = dev->platform_data;
+	struct device_node *dn;
 	const char *cp;
 	int length;
 
 	if (!num_envp)
 		return -ENOMEM;
 
+	dn = dev->archdata.of_node;
 	if (!dn)
 		return -ENODEV;
 	cp = get_property(dn, "compatible", &length);
@@ -465,7 +413,7 @@
 */
 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
 {
-	return get_property(vdev->dev.platform_data, which, length);
+	return get_property(vdev->dev.archdata.of_node, which, length);
 }
 EXPORT_SYMBOL(vio_get_attribute);
 
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index e8342d8..04b9867 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -33,6 +33,7 @@
 
 	/* Text and gots */
 	.text : {
+		_text = .;
 		*(.text .text.*)
 		SCHED_TEXT
 		LOCK_TEXT
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 93441e7..38a8196 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -8,7 +8,7 @@
 
 obj-y				:= fault.o mem.o lmb.o
 obj-$(CONFIG_PPC32)		+= init_32.o pgtable_32.o mmu_context_32.o
-hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
+hash-$(CONFIG_PPC_NATIVE)	:= hash_native_64.o
 obj-$(CONFIG_PPC64)		+= init_64.o pgtable_64.o mmu_context_64.o \
 				   hash_utils_64.o hash_low_64.o tlb_64.o \
 				   slb_low.o slb.o stab.o mmap.o imalloc.o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e8fa506..03aeb3a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -426,18 +426,21 @@
 
 	/* kernel has accessed a bad area */
 
-	printk(KERN_ALERT "Unable to handle kernel paging request for ");
 	switch (regs->trap) {
-		case 0x300:
-		case 0x380:
-			printk("data at address 0x%08lx\n", regs->dar);
-			break;
-		case 0x400:
-		case 0x480:
-			printk("instruction fetch\n");
-			break;
-		default:
-			printk("unknown fault\n");
+	case 0x300:
+	case 0x380:
+		printk(KERN_ALERT "Unable to handle kernel paging request for "
+			"data at address 0x%08lx\n", regs->dar);
+		break;
+	case 0x400:
+	case 0x480:
+		printk(KERN_ALERT "Unable to handle kernel paging request for "
+			"instruction fetch\n");
+		break;
+	default:
+		printk(KERN_ALERT "Unable to handle kernel paging request for "
+			"unknown fault\n");
+		break;
 	}
 	printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
 		regs->nip);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index c90f124..6f1016a 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -123,7 +123,7 @@
 	clear_bit(HPTE_LOCK_BIT, word);
 }
 
-long native_hpte_insert(unsigned long hpte_group, unsigned long va,
+static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
 			unsigned long pa, unsigned long rflags,
 			unsigned long vflags, int psize)
 {
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1915661..c0d2a69 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -277,7 +277,7 @@
 	 * Not in the device-tree, let's fallback on known size
 	 * list for 16M capable GP & GR
 	 */
-	if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
+	if (cpu_has_feature(CPU_FTR_16M_PAGE))
 		memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
 		       sizeof(mmu_psize_defaults_gp));
  found:
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 506d897..89c836d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -146,6 +146,11 @@
 	return hugepte_offset(hpdp, addr);
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
 {
 	pte_t *hugepte = hugepd_page(*hpdp);
@@ -1042,7 +1047,7 @@
 	return err;
 }
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(addr, 0, kmem_cache_size(cache));
 }
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3ff3746..d12a87e 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -130,7 +130,7 @@
 		/* GFP_ATOMIC to avoid might_sleep warnings during boot */
 		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
 		if (!kcore_mem)
-			panic("mem_init: kmalloc failed\n");
+			panic("%s: kmalloc failed\n", __FUNCTION__);
 
 		kclist_add(kcore_mem, __va(base), size);
 	}
@@ -141,7 +141,7 @@
 }
 module_init(setup_kcore);
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
 	memset(addr, 0, kmem_cache_size(cache));
 }
@@ -166,9 +166,9 @@
 /* Hugepages need one extra cache, initialized in hugetlbpage.c.  We
  * can't put into the tables above, because HPAGE_SHIFT is not compile
  * time constant. */
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
 #else
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
 #endif
 
 void pgtable_cache_init(void)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8fcacb0..1891dbe 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -141,31 +141,21 @@
 	__free_page(ptepage);
 }
 
-#ifndef CONFIG_PHYS_64BIT
 void __iomem *
 ioremap(phys_addr_t addr, unsigned long size)
 {
 	return __ioremap(addr, size, _PAGE_NO_CACHE);
 }
-#else /* CONFIG_PHYS_64BIT */
-void __iomem *
-ioremap64(unsigned long long addr, unsigned long size)
-{
-	return __ioremap(addr, size, _PAGE_NO_CACHE);
-}
-EXPORT_SYMBOL(ioremap64);
-
-void __iomem *
-ioremap(phys_addr_t addr, unsigned long size)
-{
-	phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
-
-	return ioremap64(addr64, size);
-}
-#endif /* CONFIG_PHYS_64BIT */
 EXPORT_SYMBOL(ioremap);
 
 void __iomem *
+ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
+{
+	return __ioremap(addr, size, flags);
+}
+EXPORT_SYMBOL(ioremap_flags);
+
+void __iomem *
 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
 {
 	unsigned long v, i;
@@ -264,20 +254,7 @@
 }
 EXPORT_SYMBOL(iounmap);
 
-void __iomem *ioport_map(unsigned long port, unsigned int len)
-{
-	return (void __iomem *) (port + _IO_BASE);
-}
-
-void ioport_unmap(void __iomem *addr)
-{
-	/* Nothing to do */
-}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-
-int
-map_page(unsigned long va, phys_addr_t pa, int flags)
+int map_page(unsigned long va, phys_addr_t pa, int flags)
 {
 	pmd_t *pd;
 	pte_t *pg;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac64f4a..16e4ee1 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -113,7 +113,7 @@
 }
 
 
-static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
+static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
 			    unsigned long ea, unsigned long size,
 			    unsigned long flags)
 {
@@ -129,22 +129,12 @@
 	return (void __iomem *) (ea + (addr & ~PAGE_MASK));
 }
 
-
-void __iomem *
-ioremap(unsigned long addr, unsigned long size)
-{
-	return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
-
-void __iomem * __ioremap(unsigned long addr, unsigned long size,
+void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
 			 unsigned long flags)
 {
 	unsigned long pa, ea;
 	void __iomem *ret;
 
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return (void __iomem *)addr;
-
 	/*
 	 * Choose an address to map it to.
 	 * Once the imalloc system is running, we use it.
@@ -178,9 +168,28 @@
 	return ret;
 }
 
+
+void __iomem * ioremap(phys_addr_t addr, unsigned long size)
+{
+	unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+	if (ppc_md.ioremap)
+		return ppc_md.ioremap(addr, size, flags);
+	return __ioremap(addr, size, flags);
+}
+
+void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
+			     unsigned long flags)
+{
+	if (ppc_md.ioremap)
+		return ppc_md.ioremap(addr, size, flags);
+	return __ioremap(addr, size, flags);
+}
+
+
 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
 
-int __ioremap_explicit(unsigned long pa, unsigned long ea,
+int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
 		       unsigned long size, unsigned long flags)
 {
 	struct vm_struct *area;
@@ -235,13 +244,10 @@
  *
  * XXX	what about calls before mem_init_done (ie python_countermeasures())
  */
-void iounmap(volatile void __iomem *token)
+void __iounmap(volatile void __iomem *token)
 {
 	void *addr;
 
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return;
-
 	if (!mem_init_done)
 		return;
 	
@@ -250,6 +256,14 @@
 	im_free(addr);
 }
 
+void iounmap(volatile void __iomem *token)
+{
+	if (ppc_md.iounmap)
+		ppc_md.iounmap(token);
+	else
+		__iounmap(token);
+}
+
 static int iounmap_subset_regions(unsigned long addr, unsigned long size)
 {
 	struct vm_struct *area;
@@ -268,7 +282,7 @@
 	return 0;
 }
 
-int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
 {
 	struct vm_struct *area;
 	unsigned long addr;
@@ -303,8 +317,10 @@
 }
 
 EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(__iounmap);
 
 void __iomem * reserve_phb_iospace(unsigned long size)
 {
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index d373391..224e960 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -23,6 +23,7 @@
 #include <asm/cputable.h>
 #include <asm/cacheflush.h>
 #include <asm/smp.h>
+#include <asm/firmware.h>
 #include <linux/compiler.h>
 
 #ifdef DEBUG
@@ -193,6 +194,7 @@
 void slb_initialize(void)
 {
 	unsigned long linear_llp, vmalloc_llp, io_llp;
+	unsigned long lflags, vflags;
 	static int slb_encoding_inited;
 	extern unsigned int *slb_miss_kernel_load_linear;
 	extern unsigned int *slb_miss_kernel_load_io;
@@ -225,11 +227,12 @@
 #endif
 	}
 
+	get_paca()->stab_rr = SLB_NUM_BOLTED;
+
 	/* On iSeries the bolted entries have already been set up by
 	 * the hypervisor from the lparMap data in head.S */
-#ifndef CONFIG_PPC_ISERIES
- {
-	unsigned long lflags, vflags;
+	if (firmware_has_feature(FW_FEATURE_ISERIES))
+		return;
 
 	lflags = SLB_VSID_KERNEL | linear_llp;
 	vflags = SLB_VSID_KERNEL | vmalloc_llp;
@@ -247,8 +250,4 @@
 	 * elsewhere, we'll call _switch() which will bolt in the new
 	 * one. */
 	asm volatile("isync":::"memory");
- }
-#endif /* CONFIG_PPC_ISERIES */
-
-	get_paca()->stab_rr = SLB_NUM_BOLTED;
 }
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index 0b5df9c..4ccef2d 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -11,6 +11,7 @@
 		timer_int.o )
 
 oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
+oprofile-$(CONFIG_PPC_CELL_NATIVE) += op_model_cell.o
 oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
 oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
 oprofile-$(CONFIG_6xx) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 63bbef3..b6d8239 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -69,7 +69,10 @@
 
 static int op_powerpc_start(void)
 {
-	on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
+	if (model->global_start)
+		model->global_start(ctr);
+	if (model->start)
+		on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
 	return 0;
 }
 
@@ -80,7 +83,10 @@
 
 static void op_powerpc_stop(void)
 {
-	on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
+	if (model->stop)
+		on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
+        if (model->global_stop)
+                model->global_stop();
 }
 
 static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
@@ -141,6 +147,11 @@
 
 	switch (cur_cpu_spec->oprofile_type) {
 #ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_CELL_NATIVE
+		case PPC_OPROFILE_CELL:
+			model = &op_model_cell;
+			break;
+#endif
 		case PPC_OPROFILE_RS64:
 			model = &op_model_rs64;
 			break;
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
new file mode 100644
index 0000000..2eb15f3
--- /dev/null
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -0,0 +1,724 @@
+/*
+ * Cell Broadband Engine OProfile Support
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author: David Erb (djerb@us.ibm.com)
+ * Modifications:
+ *         Carl Love <carll@us.ibm.com>
+ *         Maynard Johnson <maynardj@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/oprofile.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <asm/cell-pmu.h>
+#include <asm/cputable.h>
+#include <asm/firmware.h>
+#include <asm/io.h>
+#include <asm/oprofile_impl.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/rtas.h>
+#include <asm/system.h>
+
+#include "../platforms/cell/interrupt.h"
+
+#define PPU_CYCLES_EVENT_NUM 1	/*  event number for CYCLES */
+#define CBE_COUNT_ALL_CYCLES 0x42800000	/* PPU cycle event specifier */
+
+#define NUM_THREADS 2
+#define VIRT_CNTR_SW_TIME_NS 100000000	// 0.5 seconds
+
+struct pmc_cntrl_data {
+	unsigned long vcntr;
+	unsigned long evnts;
+	unsigned long masks;
+	unsigned long enabled;
+};
+
+/*
+ * ibm,cbe-perftools rtas parameters
+ */
+
+struct pm_signal {
+	u16 cpu;		/* Processor to modify */
+	u16 sub_unit;		/* hw subunit this applies to (if applicable) */
+	u16 signal_group;	/* Signal Group to Enable/Disable */
+	u8 bus_word;		/* Enable/Disable on this Trace/Trigger/Event
+				 * Bus Word(s) (bitmask)
+				 */
+	u8 bit;			/* Trigger/Event bit (if applicable) */
+};
+
+/*
+ * rtas call arguments
+ */
+enum {
+	SUBFUNC_RESET = 1,
+	SUBFUNC_ACTIVATE = 2,
+	SUBFUNC_DEACTIVATE = 3,
+
+	PASSTHRU_IGNORE = 0,
+	PASSTHRU_ENABLE = 1,
+	PASSTHRU_DISABLE = 2,
+};
+
+struct pm_cntrl {
+	u16 enable;
+	u16 stop_at_max;
+	u16 trace_mode;
+	u16 freeze;
+	u16 count_mode;
+};
+
+static struct {
+	u32 group_control;
+	u32 debug_bus_control;
+	struct pm_cntrl pm_cntrl;
+	u32 pm07_cntrl[NR_PHYS_CTRS];
+} pm_regs;
+
+
+#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
+#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
+#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
+#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
+#define GET_COUNT_CYCLES(x) (x & 0x00000001)
+#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
+
+
+static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
+
+static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
+
+/* Interpetation of hdw_thread:
+ * 0 - even virtual cpus 0, 2, 4,...
+ * 1 - odd virtual cpus 1, 3, 5, ...
+ */
+static u32 hdw_thread;
+
+static u32 virt_cntr_inter_mask;
+static struct timer_list timer_virt_cntr;
+
+/* pm_signal needs to be global since it is initialized in
+ * cell_reg_setup at the time when the necessary information
+ * is available.
+ */
+static struct pm_signal pm_signal[NR_PHYS_CTRS];
+static int pm_rtas_token;
+
+static u32 reset_value[NR_PHYS_CTRS];
+static int num_counters;
+static int oprofile_running;
+static spinlock_t virt_cntr_lock = SPIN_LOCK_UNLOCKED;
+
+static u32 ctr_enabled;
+
+static unsigned char trace_bus[4];
+static unsigned char input_bus[2];
+
+/*
+ * Firmware interface functions
+ */
+static int
+rtas_ibm_cbe_perftools(int subfunc, int passthru,
+		       void *address, unsigned long length)
+{
+	u64 paddr = __pa(address);
+
+	return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc, passthru,
+			 paddr >> 32, paddr & 0xffffffff, length);
+}
+
+static void pm_rtas_reset_signals(u32 node)
+{
+	int ret;
+	struct pm_signal pm_signal_local;
+
+	/*  The debug bus is being set to the passthru disable state.
+	 *  However, the FW still expects atleast one legal signal routing
+	 *  entry or it will return an error on the arguments.  If we don't
+	 *  supply a valid entry, we must ignore all return values.  Ignoring
+	 *  all return values means we might miss an error we should be
+	 *  concerned about.
+	 */
+
+	/*  fw expects physical cpu #. */
+	pm_signal_local.cpu = node;
+	pm_signal_local.signal_group = 21;
+	pm_signal_local.bus_word = 1;
+	pm_signal_local.sub_unit = 0;
+	pm_signal_local.bit = 0;
+
+	ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
+				     &pm_signal_local,
+				     sizeof(struct pm_signal));
+
+	if (ret)
+		printk(KERN_WARNING "%s: rtas returned: %d\n",
+		       __FUNCTION__, ret);
+}
+
+static void pm_rtas_activate_signals(u32 node, u32 count)
+{
+	int ret;
+	int j;
+	struct pm_signal pm_signal_local[NR_PHYS_CTRS];
+
+	for (j = 0; j < count; j++) {
+		/* fw expects physical cpu # */
+		pm_signal_local[j].cpu = node;
+		pm_signal_local[j].signal_group = pm_signal[j].signal_group;
+		pm_signal_local[j].bus_word = pm_signal[j].bus_word;
+		pm_signal_local[j].sub_unit = pm_signal[j].sub_unit;
+		pm_signal_local[j].bit = pm_signal[j].bit;
+	}
+
+	ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
+				     pm_signal_local,
+				     count * sizeof(struct pm_signal));
+
+	if (ret)
+		printk(KERN_WARNING "%s: rtas returned: %d\n",
+		       __FUNCTION__, ret);
+}
+
+/*
+ * PM Signal functions
+ */
+static void set_pm_event(u32 ctr, int event, u32 unit_mask)
+{
+	struct pm_signal *p;
+	u32 signal_bit;
+	u32 bus_word, bus_type, count_cycles, polarity, input_control;
+	int j, i;
+
+	if (event == PPU_CYCLES_EVENT_NUM) {
+		/* Special Event: Count all cpu cycles */
+		pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
+		p = &(pm_signal[ctr]);
+		p->signal_group = 21;
+		p->bus_word = 1;
+		p->sub_unit = 0;
+		p->bit = 0;
+		goto out;
+	} else {
+		pm_regs.pm07_cntrl[ctr] = 0;
+	}
+
+	bus_word = GET_BUS_WORD(unit_mask);
+	bus_type = GET_BUS_TYPE(unit_mask);
+	count_cycles = GET_COUNT_CYCLES(unit_mask);
+	polarity = GET_POLARITY(unit_mask);
+	input_control = GET_INPUT_CONTROL(unit_mask);
+	signal_bit = (event % 100);
+
+	p = &(pm_signal[ctr]);
+
+	p->signal_group = event / 100;
+	p->bus_word = bus_word;
+	p->sub_unit = unit_mask & 0x0000f000;
+
+	pm_regs.pm07_cntrl[ctr] = 0;
+	pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
+	pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
+	pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
+
+	if (input_control == 0) {
+		if (signal_bit > 31) {
+			signal_bit -= 32;
+			if (bus_word == 0x3)
+				bus_word = 0x2;
+			else if (bus_word == 0xc)
+				bus_word = 0x8;
+		}
+
+		if ((bus_type == 0) && p->signal_group >= 60)
+			bus_type = 2;
+		if ((bus_type == 1) && p->signal_group >= 50)
+			bus_type = 0;
+
+		pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
+	} else {
+		pm_regs.pm07_cntrl[ctr] = 0;
+		p->bit = signal_bit;
+	}
+
+	for (i = 0; i < 4; i++) {
+		if (bus_word & (1 << i)) {
+			pm_regs.debug_bus_control |=
+			    (bus_type << (31 - (2 * i) + 1));
+
+			for (j = 0; j < 2; j++) {
+				if (input_bus[j] == 0xff) {
+					input_bus[j] = i;
+					pm_regs.group_control |=
+					    (i << (31 - i));
+					break;
+				}
+			}
+		}
+	}
+out:
+	;
+}
+
+static void write_pm_cntrl(int cpu, struct pm_cntrl *pm_cntrl)
+{
+	/* Oprofile will use 32 bit counters, set bits 7:10 to 0 */
+	u32 val = 0;
+	if (pm_cntrl->enable == 1)
+		val |= CBE_PM_ENABLE_PERF_MON;
+
+	if (pm_cntrl->stop_at_max == 1)
+		val |= CBE_PM_STOP_AT_MAX;
+
+	if (pm_cntrl->trace_mode == 1)
+		val |= CBE_PM_TRACE_MODE_SET(pm_cntrl->trace_mode);
+
+	if (pm_cntrl->freeze == 1)
+		val |= CBE_PM_FREEZE_ALL_CTRS;
+
+	/* Routine set_count_mode must be called previously to set
+	 * the count mode based on the user selection of user and kernel.
+	 */
+	val |= CBE_PM_COUNT_MODE_SET(pm_cntrl->count_mode);
+	cbe_write_pm(cpu, pm_control, val);
+}
+
+static inline void
+set_count_mode(u32 kernel, u32 user, struct pm_cntrl *pm_cntrl)
+{
+	/* The user must specify user and kernel if they want them. If
+	 *  neither is specified, OProfile will count in hypervisor mode
+	 */
+	if (kernel) {
+		if (user)
+			pm_cntrl->count_mode = CBE_COUNT_ALL_MODES;
+		else
+			pm_cntrl->count_mode = CBE_COUNT_SUPERVISOR_MODE;
+	} else {
+		if (user)
+			pm_cntrl->count_mode = CBE_COUNT_PROBLEM_MODE;
+		else
+			pm_cntrl->count_mode = CBE_COUNT_HYPERVISOR_MODE;
+	}
+}
+
+static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
+{
+
+	pm07_cntrl[ctr] |= PM07_CTR_ENABLE(1);
+	cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
+}
+
+/*
+ * Oprofile is expected to collect data on all CPUs simultaneously.
+ * However, there is one set of performance counters per node.  There are
+ * two hardware threads or virtual CPUs on each node.  Hence, OProfile must
+ * multiplex in time the performance counter collection on the two virtual
+ * CPUs.  The multiplexing of the performance counters is done by this
+ * virtual counter routine.
+ *
+ * The pmc_values used below is defined as 'per-cpu' but its use is
+ * more akin to 'per-node'.  We need to store two sets of counter
+ * values per node -- one for the previous run and one for the next.
+ * The per-cpu[NR_PHYS_CTRS] gives us the storage we need.  Each odd/even
+ * pair of per-cpu arrays is used for storing the previous and next
+ * pmc values for a given node.
+ * NOTE: We use the per-cpu variable to improve cache performance.
+ */
+static void cell_virtual_cntr(unsigned long data)
+{
+	/* This routine will alternate loading the virtual counters for
+	 * virtual CPUs
+	 */
+	int i, prev_hdw_thread, next_hdw_thread;
+	u32 cpu;
+	unsigned long flags;
+
+	/* Make sure that the interrupt_hander and
+	 * the virt counter are not both playing with
+	 * the counters on the same node.
+	 */
+
+	spin_lock_irqsave(&virt_cntr_lock, flags);
+
+	prev_hdw_thread = hdw_thread;
+
+	/* switch the cpu handling the interrupts */
+	hdw_thread = 1 ^ hdw_thread;
+	next_hdw_thread = hdw_thread;
+
+	/* The following is done only once per each node, but
+	 * we need cpu #, not node #, to pass to the cbe_xxx functions.
+	 */
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		/* stop counters, save counter values, restore counts
+		 * for previous thread
+		 */
+		cbe_disable_pm(cpu);
+		cbe_disable_pm_interrupts(cpu);
+		for (i = 0; i < num_counters; i++) {
+			per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
+			    = cbe_read_ctr(cpu, i);
+
+			if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
+			    == 0xFFFFFFFF)
+				/* If the cntr value is 0xffffffff, we must
+				 * reset that to 0xfffffff0 when the current
+				 * thread is restarted.  This will generate a new
+				 * interrupt and make sure that we never restore
+				 * the counters to the max value.  If the counters
+				 * were restored to the max value, they do not
+				 * increment and no interrupts are generated.  Hence
+				 * no more samples will be collected on that cpu.
+				 */
+				cbe_write_ctr(cpu, i, 0xFFFFFFF0);
+			else
+				cbe_write_ctr(cpu, i,
+					      per_cpu(pmc_values,
+						      cpu +
+						      next_hdw_thread)[i]);
+		}
+
+		/* Switch to the other thread. Change the interrupt
+		 * and control regs to be scheduled on the CPU
+		 * corresponding to the thread to execute.
+		 */
+		for (i = 0; i < num_counters; i++) {
+			if (pmc_cntrl[next_hdw_thread][i].enabled) {
+				/* There are some per thread events.
+				 * Must do the set event, enable_cntr
+				 * for each cpu.
+				 */
+				set_pm_event(i,
+				     pmc_cntrl[next_hdw_thread][i].evnts,
+				     pmc_cntrl[next_hdw_thread][i].masks);
+				enable_ctr(cpu, i,
+					   pm_regs.pm07_cntrl);
+			} else {
+				cbe_write_pm07_control(cpu, i, 0);
+			}
+		}
+
+		/* Enable interrupts on the CPU thread that is starting */
+		cbe_enable_pm_interrupts(cpu, next_hdw_thread,
+					 virt_cntr_inter_mask);
+		cbe_enable_pm(cpu);
+	}
+
+	spin_unlock_irqrestore(&virt_cntr_lock, flags);
+
+	mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
+}
+
+static void start_virt_cntrs(void)
+{
+	init_timer(&timer_virt_cntr);
+	timer_virt_cntr.function = cell_virtual_cntr;
+	timer_virt_cntr.data = 0UL;
+	timer_virt_cntr.expires = jiffies + HZ / 10;
+	add_timer(&timer_virt_cntr);
+}
+
+/* This function is called once for all cpus combined */
+static void
+cell_reg_setup(struct op_counter_config *ctr,
+	       struct op_system_config *sys, int num_ctrs)
+{
+	int i, j, cpu;
+
+	pm_rtas_token = rtas_token("ibm,cbe-perftools");
+	if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) {
+		printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n",
+		       __FUNCTION__);
+		goto out;
+	}
+
+	num_counters = num_ctrs;
+
+	pm_regs.group_control = 0;
+	pm_regs.debug_bus_control = 0;
+
+	/* setup the pm_control register */
+	memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
+	pm_regs.pm_cntrl.stop_at_max = 1;
+	pm_regs.pm_cntrl.trace_mode = 0;
+	pm_regs.pm_cntrl.freeze = 1;
+
+	set_count_mode(sys->enable_kernel, sys->enable_user,
+		       &pm_regs.pm_cntrl);
+
+	/* Setup the thread 0 events */
+	for (i = 0; i < num_ctrs; ++i) {
+
+		pmc_cntrl[0][i].evnts = ctr[i].event;
+		pmc_cntrl[0][i].masks = ctr[i].unit_mask;
+		pmc_cntrl[0][i].enabled = ctr[i].enabled;
+		pmc_cntrl[0][i].vcntr = i;
+
+		for_each_possible_cpu(j)
+			per_cpu(pmc_values, j)[i] = 0;
+	}
+
+	/* Setup the thread 1 events, map the thread 0 event to the
+	 * equivalent thread 1 event.
+	 */
+	for (i = 0; i < num_ctrs; ++i) {
+		if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
+			pmc_cntrl[1][i].evnts = ctr[i].event + 19;
+		else if (ctr[i].event == 2203)
+			pmc_cntrl[1][i].evnts = ctr[i].event;
+		else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
+			pmc_cntrl[1][i].evnts = ctr[i].event + 16;
+		else
+			pmc_cntrl[1][i].evnts = ctr[i].event;
+
+		pmc_cntrl[1][i].masks = ctr[i].unit_mask;
+		pmc_cntrl[1][i].enabled = ctr[i].enabled;
+		pmc_cntrl[1][i].vcntr = i;
+	}
+
+	for (i = 0; i < 4; i++)
+		trace_bus[i] = 0xff;
+
+	for (i = 0; i < 2; i++)
+		input_bus[i] = 0xff;
+
+	/* Our counters count up, and "count" refers to
+	 * how much before the next interrupt, and we interrupt
+	 * on overflow.  So we calculate the starting value
+	 * which will give us "count" until overflow.
+	 * Then we set the events on the enabled counters.
+	 */
+	for (i = 0; i < num_counters; ++i) {
+		/* start with virtual counter set 0 */
+		if (pmc_cntrl[0][i].enabled) {
+			/* Using 32bit counters, reset max - count */
+			reset_value[i] = 0xFFFFFFFF - ctr[i].count;
+			set_pm_event(i,
+				     pmc_cntrl[0][i].evnts,
+				     pmc_cntrl[0][i].masks);
+
+			/* global, used by cell_cpu_setup */
+			ctr_enabled |= (1 << i);
+		}
+	}
+
+	/* initialize the previous counts for the virtual cntrs */
+	for_each_online_cpu(cpu)
+		for (i = 0; i < num_counters; ++i) {
+			per_cpu(pmc_values, cpu)[i] = reset_value[i];
+		}
+out:
+	;
+}
+
+/* This function is called once for each cpu */
+static void cell_cpu_setup(struct op_counter_config *cntr)
+{
+	u32 cpu = smp_processor_id();
+	u32 num_enabled = 0;
+	int i;
+
+	/* There is one performance monitor per processor chip (i.e. node),
+	 * so we only need to perform this function once per node.
+	 */
+	if (cbe_get_hw_thread_id(cpu))
+		goto out;
+
+	if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) {
+		printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n",
+		       __FUNCTION__);
+		goto out;
+	}
+
+	/* Stop all counters */
+	cbe_disable_pm(cpu);
+	cbe_disable_pm_interrupts(cpu);
+
+	cbe_write_pm(cpu, pm_interval, 0);
+	cbe_write_pm(cpu, pm_start_stop, 0);
+	cbe_write_pm(cpu, group_control, pm_regs.group_control);
+	cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
+	write_pm_cntrl(cpu, &pm_regs.pm_cntrl);
+
+	for (i = 0; i < num_counters; ++i) {
+		if (ctr_enabled & (1 << i)) {
+			pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
+			num_enabled++;
+		}
+	}
+
+	pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
+out:
+	;
+}
+
+static void cell_global_start(struct op_counter_config *ctr)
+{
+	u32 cpu;
+	u32 interrupt_mask = 0;
+	u32 i;
+
+	/* This routine gets called once for the system.
+	 * There is one performance monitor per node, so we
+	 * only need to perform this function once per node.
+	 */
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		interrupt_mask = 0;
+
+		for (i = 0; i < num_counters; ++i) {
+			if (ctr_enabled & (1 << i)) {
+				cbe_write_ctr(cpu, i, reset_value[i]);
+				enable_ctr(cpu, i, pm_regs.pm07_cntrl);
+				interrupt_mask |=
+				    CBE_PM_CTR_OVERFLOW_INTR(i);
+			} else {
+				/* Disable counter */
+				cbe_write_pm07_control(cpu, i, 0);
+			}
+		}
+
+		cbe_clear_pm_interrupts(cpu);
+		cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
+		cbe_enable_pm(cpu);
+	}
+
+	virt_cntr_inter_mask = interrupt_mask;
+	oprofile_running = 1;
+	smp_wmb();
+
+	/* NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
+	 * executed which manipulates the PMU.  We start the "virtual counter"
+	 * here so that we do not need to synchronize access to the PMU in
+	 * the above for-loop.
+	 */
+	start_virt_cntrs();
+}
+
+static void cell_global_stop(void)
+{
+	int cpu;
+
+	/* This routine will be called once for the system.
+	 * There is one performance monitor per node, so we
+	 * only need to perform this function once per node.
+	 */
+	del_timer_sync(&timer_virt_cntr);
+	oprofile_running = 0;
+	smp_wmb();
+
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		cbe_sync_irq(cbe_cpu_to_node(cpu));
+		/* Stop the counters */
+		cbe_disable_pm(cpu);
+
+		/* Deactivate the signals */
+		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+		/* Deactivate interrupts */
+		cbe_disable_pm_interrupts(cpu);
+	}
+}
+
+static void
+cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
+{
+	u32 cpu;
+	u64 pc;
+	int is_kernel;
+	unsigned long flags = 0;
+	u32 interrupt_mask;
+	int i;
+
+	cpu = smp_processor_id();
+
+	/* Need to make sure the interrupt handler and the virt counter
+	 * routine are not running at the same time. See the
+	 * cell_virtual_cntr() routine for additional comments.
+	 */
+	spin_lock_irqsave(&virt_cntr_lock, flags);
+
+	/* Need to disable and reenable the performance counters
+	 * to get the desired behavior from the hardware.  This
+	 * is hardware specific.
+	 */
+
+	cbe_disable_pm(cpu);
+
+	interrupt_mask = cbe_clear_pm_interrupts(cpu);
+
+	/* If the interrupt mask has been cleared, then the virt cntr
+	 * has cleared the interrupt.  When the thread that generated
+	 * the interrupt is restored, the data count will be restored to
+	 * 0xffffff0 to cause the interrupt to be regenerated.
+	 */
+
+	if ((oprofile_running == 1) && (interrupt_mask != 0)) {
+		pc = regs->nip;
+		is_kernel = is_kernel_addr(pc);
+
+		for (i = 0; i < num_counters; ++i) {
+			if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
+			    && ctr[i].enabled) {
+				oprofile_add_pc(pc, is_kernel, i);
+				cbe_write_ctr(cpu, i, reset_value[i]);
+			}
+		}
+
+		/* The counters were frozen by the interrupt.
+		 * Reenable the interrupt and restart the counters.
+		 * If there was a race between the interrupt handler and
+		 * the virtual counter routine.  The virutal counter
+		 * routine may have cleared the interrupts.  Hence must
+		 * use the virt_cntr_inter_mask to re-enable the interrupts.
+		 */
+		cbe_enable_pm_interrupts(cpu, hdw_thread,
+					 virt_cntr_inter_mask);
+
+		/* The writes to the various performance counters only writes
+		 * to a latch.  The new values (interrupt setting bits, reset
+		 * counter value etc.) are not copied to the actual registers
+		 * until the performance monitor is enabled.  In order to get
+		 * this to work as desired, the permormance monitor needs to
+		 * be disabled while writting to the latches.  This is a
+		 * HW design issue.
+		 */
+		cbe_enable_pm(cpu);
+	}
+	spin_unlock_irqrestore(&virt_cntr_lock, flags);
+}
+
+struct op_powerpc_model op_model_cell = {
+	.reg_setup = cell_reg_setup,
+	.cpu_setup = cell_cpu_setup,
+	.global_start = cell_global_start,
+	.global_stop = cell_global_stop,
+	.handle_interrupt = cell_handle_interrupt,
+};
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
new file mode 100644
index 0000000..a46184a
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for 52xx based boards
+#
+ifeq ($(CONFIG_PPC_MERGE),y)
+obj-y				+= mpc52xx_pic.o mpc52xx_common.o
+endif
+
+obj-$(CONFIG_PPC_EFIKA)		+= efika-setup.o efika-pci.o
+obj-$(CONFIG_PPC_LITE5200)	+= lite5200.o
diff --git a/arch/powerpc/platforms/52xx/efika-pci.c b/arch/powerpc/platforms/52xx/efika-pci.c
new file mode 100644
index 0000000..62e05b2
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika-pci.c
@@ -0,0 +1,119 @@
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/sections.h>
+#include <asm/pci-bridge.h>
+#include <asm/rtas.h>
+
+#include "efika.h"
+
+#ifdef CONFIG_PCI
+/*
+ * Access functions for PCI config space using RTAS calls.
+ */
+static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+			    int len, u32 * val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
+	    | (((bus->number - hose->first_busno) & 0xff) << 16)
+	    | (hose->index << 24);
+	int ret = -1;
+	int rval;
+
+	rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len);
+	*val = ret;
+	return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static int rtas_write_config(struct pci_bus *bus, unsigned int devfn,
+			     int offset, int len, u32 val)
+{
+	struct pci_controller *hose = bus->sysdata;
+	unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
+	    | (((bus->number - hose->first_busno) & 0xff) << 16)
+	    | (hose->index << 24);
+	int rval;
+
+	rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
+			 addr, len, val);
+	return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops rtas_pci_ops = {
+	rtas_read_config,
+	rtas_write_config
+};
+
+void __init efika_pcisetup(void)
+{
+	const int *bus_range;
+	int len;
+	struct pci_controller *hose;
+	struct device_node *root;
+	struct device_node *pcictrl;
+
+	root = of_find_node_by_path("/");
+	if (root == NULL) {
+		printk(KERN_WARNING EFIKA_PLATFORM_NAME
+		       ": Unable to find the root node\n");
+		return;
+	}
+
+	for (pcictrl = NULL;;) {
+		pcictrl = of_get_next_child(root, pcictrl);
+		if ((pcictrl == NULL) || (strcmp(pcictrl->name, "pci") == 0))
+			break;
+	}
+
+	of_node_put(root);
+
+	if (pcictrl == NULL) {
+		printk(KERN_WARNING EFIKA_PLATFORM_NAME
+		       ": Unable to find the PCI bridge node\n");
+		return;
+	}
+
+	bus_range = get_property(pcictrl, "bus-range", &len);
+	if (bus_range == NULL || len < 2 * sizeof(int)) {
+		printk(KERN_WARNING EFIKA_PLATFORM_NAME
+		       ": Can't get bus-range for %s\n", pcictrl->full_name);
+		return;
+	}
+
+	if (bus_range[1] == bus_range[0])
+		printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI bus %d",
+		       bus_range[0]);
+	else
+		printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d",
+		       bus_range[0], bus_range[1]);
+	printk(" controlled by %s\n", pcictrl->full_name);
+	printk("\n");
+
+	hose = pcibios_alloc_controller();
+	if (!hose) {
+		printk(KERN_WARNING EFIKA_PLATFORM_NAME
+		       ": Can't allocate PCI controller structure for %s\n",
+		       pcictrl->full_name);
+		return;
+	}
+
+	hose->arch_data = of_node_get(pcictrl);
+	hose->first_busno = bus_range[0];
+	hose->last_busno = bus_range[1];
+	hose->ops = &rtas_pci_ops;
+
+	pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+}
+
+#else
+void __init efika_pcisetup(void)
+{}
+#endif
diff --git a/arch/powerpc/platforms/52xx/efika-setup.c b/arch/powerpc/platforms/52xx/efika-setup.c
new file mode 100644
index 0000000..110c980
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika-setup.c
@@ -0,0 +1,150 @@
+/*
+ *
+ * Efika 5K2 platform setup
+ * Some code really inspired from the lite5200b platform.
+ * 
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/utsrelease.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/rtas.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/mpc52xx.h>
+
+#include "efika.h"
+
+static void efika_show_cpuinfo(struct seq_file *m)
+{
+	struct device_node *root;
+	const char *revision = NULL;
+	const char *codegendescription = NULL;
+	const char *codegenvendor = NULL;
+
+	root = of_find_node_by_path("/");
+	if (root) {
+		revision = get_property(root, "revision", NULL);
+		codegendescription =
+		    get_property(root, "CODEGEN,description", NULL);
+		codegenvendor = get_property(root, "CODEGEN,vendor", NULL);
+
+		of_node_put(root);
+	}
+
+	if (codegendescription)
+		seq_printf(m, "machine\t\t: %s\n", codegendescription);
+	else
+		seq_printf(m, "machine\t\t: Efika\n");
+
+	if (revision)
+		seq_printf(m, "revision\t: %s\n", revision);
+
+	if (codegenvendor)
+		seq_printf(m, "vendor\t\t: %s\n", codegenvendor);
+
+	of_node_put(root);
+}
+
+static void __init efika_setup_arch(void)
+{
+	rtas_initialize();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	initrd_below_start_ok = 1;
+
+	if (initrd_start)
+		ROOT_DEV = Root_RAM0;
+	else
+#endif
+		ROOT_DEV = Root_SDA2;	/* sda2 (sda1 is for the kernel) */
+
+	efika_pcisetup();
+
+	if (ppc_md.progress)
+		ppc_md.progress("Linux/PPC " UTS_RELEASE " runnung on Efika ;-)\n", 0x0);
+}
+
+static void __init efika_init(void)
+{
+	struct device_node *np;
+	struct device_node *cnp = NULL;
+	const u32 *base;
+
+	/* Find every child of the SOC node and add it to of_platform */
+	np = of_find_node_by_name(NULL, "builtin");
+	if (np) {
+		char name[BUS_ID_SIZE];
+		while ((cnp = of_get_next_child(np, cnp))) {
+			strcpy(name, cnp->name);
+
+			base = get_property(cnp, "reg", NULL);
+			if (base == NULL)
+				continue;
+
+			snprintf(name+strlen(name), BUS_ID_SIZE, "@%x", *base);
+			of_platform_device_create(cnp, name, NULL);
+
+			printk(KERN_INFO EFIKA_PLATFORM_NAME" : Added %s (type '%s' at '%s') to the known devices\n", name, cnp->type, cnp->full_name);
+		}
+	}
+
+	if (ppc_md.progress)
+		ppc_md.progress("  Have fun with your Efika!    ", 0x7777);
+}
+
+static int __init efika_probe(void)
+{
+	char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+					  "model", NULL);
+
+	if (model == NULL)
+		return 0;
+	if (strcmp(model, "EFIKA5K2"))
+		return 0;
+
+	ISA_DMA_THRESHOLD = ~0L;
+	DMA_MODE_READ = 0x44;
+	DMA_MODE_WRITE = 0x48;
+
+	return 1;
+}
+
+define_machine(efika)
+{
+	.name = EFIKA_PLATFORM_NAME,
+	.probe = efika_probe,
+	.setup_arch = efika_setup_arch,
+	.init = efika_init,
+	.show_cpuinfo = efika_show_cpuinfo,
+	.init_IRQ = mpc52xx_init_irq,
+	.get_irq = mpc52xx_get_irq,
+	.restart = rtas_restart,
+	.power_off = rtas_power_off,
+	.halt = rtas_halt,
+	.set_rtc_time = rtas_set_rtc_time,
+	.get_rtc_time = rtas_get_rtc_time,
+	.progress = rtas_progress,
+	.get_boot_time = rtas_get_boot_time,
+	.calibrate_decr = generic_calibrate_decr,
+	.phys_mem_access_prot = pci_phys_mem_access_prot,
+};
diff --git a/arch/powerpc/platforms/52xx/efika.h b/arch/powerpc/platforms/52xx/efika.h
new file mode 100644
index 0000000..2f060fd
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika.h
@@ -0,0 +1,19 @@
+/*
+ * Efika 5K2 platform setup - Header file
+ *
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef __ARCH_POWERPC_EFIKA__
+#define __ARCH_POWERPC_EFIKA__
+
+#define EFIKA_PLATFORM_NAME "Efika"
+
+extern void __init efika_pcisetup(void);
+
+#endif
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
new file mode 100644
index 0000000..a375c15
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -0,0 +1,162 @@
+/*
+ * Freescale Lite5200 board support
+ *
+ * Written by: Grant Likely <grant.likely@secretlab.ca>
+ *
+ * Copyright (C) Secret Lab Technologies Ltd. 2006. All rights reserved.
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Description:
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <sysdev/fsl_soc.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+#include <asm/of_platform.h>
+
+#include <asm/mpc52xx.h>
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+
+static void __init
+lite52xx_setup_cpu(void)
+{
+	struct mpc52xx_gpio __iomem *gpio;
+	u32 port_config;
+
+	/* Map zones */
+	gpio = mpc52xx_find_and_map("mpc52xx-gpio");
+	if (!gpio) {
+		printk(KERN_ERR __FILE__ ": "
+			"Error while mapping GPIO register for port config. "
+			"Expect some abnormal behavior\n");
+		goto error;
+	}
+
+	/* Set port config */
+	port_config = in_be32(&gpio->port_config);
+
+	port_config &= ~0x00800000;	/* 48Mhz internal, pin is GPIO	*/
+
+	port_config &= ~0x00007000;	/* USB port : Differential mode	*/
+	port_config |=  0x00001000;	/*            USB 1 only	*/
+
+	port_config &= ~0x03000000;	/* ATA CS is on csb_4/5		*/
+	port_config |=  0x01000000;
+
+	pr_debug("port_config: old:%x new:%x\n",
+	         in_be32(&gpio->port_config), port_config);
+	out_be32(&gpio->port_config, port_config);
+
+	/* Unmap zone */
+error:
+	iounmap(gpio);
+}
+
+static void __init lite52xx_setup_arch(void)
+{
+	struct device_node *np;
+
+	if (ppc_md.progress)
+		ppc_md.progress("lite52xx_setup_arch()", 0);
+
+	np = of_find_node_by_type(NULL, "cpu");
+	if (np) {
+		unsigned int *fp =
+		    (int *)get_property(np, "clock-frequency", NULL);
+		if (fp != 0)
+			loops_per_jiffy = *fp / HZ;
+		else
+			loops_per_jiffy = 50000000 / HZ;
+		of_node_put(np);
+	}
+
+	/* CPU & Port mux setup */
+	mpc52xx_setup_cpu();	/* Generic */
+	lite52xx_setup_cpu();	/* Platorm specific */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start)
+		ROOT_DEV = Root_RAM0;
+	else
+#endif
+#ifdef  CONFIG_ROOT_NFS
+		ROOT_DEV = Root_NFS;
+#else
+		ROOT_DEV = Root_HDA1;
+#endif
+
+}
+
+void lite52xx_show_cpuinfo(struct seq_file *m)
+{
+	struct device_node* np = of_find_all_nodes(NULL);
+	const char *model = NULL;
+
+	if (np)
+		model = get_property(np, "model", NULL);
+
+	seq_printf(m, "vendor\t\t:	Freescale Semiconductor\n");
+	seq_printf(m, "machine\t\t:	%s\n", model ? model : "unknown");
+
+	of_node_put(np);
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init lite52xx_probe(void)
+{
+	unsigned long node = of_get_flat_dt_root();
+	const char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+	if (!of_flat_dt_is_compatible(node, "lite52xx"))
+		return 0;
+	pr_debug("%s board w/ mpc52xx found\n", model ? model : "unknown");
+
+	return 1;
+}
+
+define_machine(lite52xx) {
+	.name 		= "lite52xx",
+	.probe 		= lite52xx_probe,
+	.setup_arch 	= lite52xx_setup_arch,
+	.init_IRQ 	= mpc52xx_init_irq,
+	.get_irq 	= mpc52xx_get_irq,
+	.show_cpuinfo	= lite52xx_show_cpuinfo,
+	.calibrate_decr	= generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
new file mode 100644
index 0000000..8331ff4
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -0,0 +1,126 @@
+/*
+ *
+ * Utility functions for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/of_platform.h>
+#include <asm/mpc52xx.h>
+
+
+void __iomem *
+mpc52xx_find_and_map(const char *compatible)
+{
+	struct device_node *ofn;
+	const u32 *regaddr_p;
+	u64 regaddr64, size64;
+
+	ofn = of_find_compatible_node(NULL, NULL, compatible);
+	if (!ofn)
+		return NULL;
+
+	regaddr_p = of_get_address(ofn, 0, &size64, NULL);
+	if (!regaddr_p) {
+		of_node_put(ofn);
+		return NULL;
+	}
+
+	regaddr64 = of_translate_address(ofn, regaddr_p);
+
+	of_node_put(ofn);
+
+	return ioremap((u32)regaddr64, (u32)size64);
+}
+EXPORT_SYMBOL(mpc52xx_find_and_map);
+
+
+/**
+ * 	mpc52xx_find_ipb_freq - Find the IPB bus frequency for a device
+ * 	@node:	device node
+ *
+ * 	Returns IPB bus frequency, or 0 if the bus frequency cannot be found.
+ */
+unsigned int
+mpc52xx_find_ipb_freq(struct device_node *node)
+{
+	struct device_node *np;
+	const unsigned int *p_ipb_freq = NULL;
+
+	of_node_get(node);
+	while (node) {
+		p_ipb_freq = get_property(node, "bus-frequency", NULL);
+		if (p_ipb_freq)
+			break;
+
+		np = of_get_parent(node);
+		of_node_put(node);
+		node = np;
+	}
+	if (node)
+		of_node_put(node);
+
+	return p_ipb_freq ? *p_ipb_freq : 0;
+}
+EXPORT_SYMBOL(mpc52xx_find_ipb_freq);
+
+
+void __init
+mpc52xx_setup_cpu(void)
+{
+	struct mpc52xx_cdm  __iomem *cdm;
+	struct mpc52xx_xlb  __iomem *xlb;
+
+	/* Map zones */
+	cdm = mpc52xx_find_and_map("mpc52xx-cdm");
+	xlb = mpc52xx_find_and_map("mpc52xx-xlb");
+
+	if (!cdm || !xlb) {
+		printk(KERN_ERR __FILE__ ": "
+			"Error while mapping CDM/XLB during mpc52xx_setup_cpu. "
+			"Expect some abnormal behavior\n");
+		goto unmap_regs;
+	}
+
+	/* Use internal 48 Mhz */
+	out_8(&cdm->ext_48mhz_en, 0x00);
+	out_8(&cdm->fd_enable, 0x01);
+	if (in_be32(&cdm->rstcfg) & 0x40)	/* Assumes 33Mhz clock */
+		out_be16(&cdm->fd_counters, 0x0001);
+	else
+		out_be16(&cdm->fd_counters, 0x5555);
+
+	/* Configure the XLB Arbiter priorities */
+	out_be32(&xlb->master_pri_enable, 0xff);
+	out_be32(&xlb->master_priority, 0x11111111);
+
+	/* Disable XLB pipelining */
+	/* (cfr errate 292. We could do this only just before ATA PIO
+	    transaction and re-enable it afterwards ...) */
+	out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
+
+	/* Unmap zones */
+unmap_regs:
+	if (cdm) iounmap(cdm);
+	if (xlb) iounmap(xlb);
+}
+
+static int __init
+mpc52xx_declare_of_platform_devices(void)
+{
+	/* Find every child of the SOC node and add it to of_platform */
+	return of_platform_bus_probe(NULL, NULL, NULL);
+}
+
+device_initcall(mpc52xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
new file mode 100644
index 0000000..cd91a6c
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -0,0 +1,473 @@
+/*
+ *
+ * Programmable Interrupt Controller functions for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * Based on the code from the 2.4 kernel by
+ * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
+ *
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Montavista Software, Inc
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/hardirq.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/mpc52xx.h>
+#include "mpc52xx_pic.h"
+
+/*
+ *
+*/
+
+static struct mpc52xx_intr __iomem *intr;
+static struct mpc52xx_sdma __iomem *sdma;
+static struct irq_host *mpc52xx_irqhost = NULL;
+
+static unsigned char mpc52xx_map_senses[4] = {
+	IRQ_TYPE_LEVEL_HIGH,
+	IRQ_TYPE_EDGE_RISING,
+	IRQ_TYPE_EDGE_FALLING,
+	IRQ_TYPE_LEVEL_LOW,
+};
+
+/*
+ *
+*/
+
+static inline void io_be_setbit(u32 __iomem *addr, int bitno)
+{
+	out_be32(addr, in_be32(addr) | (1 << bitno));
+}
+
+static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
+{
+	out_be32(addr, in_be32(addr) & ~(1 << bitno));
+}
+
+/*
+ * IRQ[0-3] interrupt irq_chip
+*/
+
+static void mpc52xx_extirq_mask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_clrbit(&intr->ctrl, 11 - l2irq);
+}
+
+static void mpc52xx_extirq_unmask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_setbit(&intr->ctrl, 11 - l2irq);
+}
+
+static void mpc52xx_extirq_ack(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_setbit(&intr->ctrl, 27-l2irq);
+}
+
+static struct irq_chip mpc52xx_extirq_irqchip = {
+	.typename = " MPC52xx IRQ[0-3] ",
+	.mask = mpc52xx_extirq_mask,
+	.unmask = mpc52xx_extirq_unmask,
+	.ack = mpc52xx_extirq_ack,
+};
+
+/*
+ * Main interrupt irq_chip
+*/
+
+static void mpc52xx_main_mask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_setbit(&intr->main_mask, 15 - l2irq);
+}
+
+static void mpc52xx_main_unmask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_clrbit(&intr->main_mask, 15 - l2irq);
+}
+
+static struct irq_chip mpc52xx_main_irqchip = {
+	.typename = "MPC52xx Main",
+	.mask = mpc52xx_main_mask,
+	.mask_ack = mpc52xx_main_mask,
+	.unmask = mpc52xx_main_unmask,
+};
+
+/*
+ * Peripherals interrupt irq_chip
+*/
+
+static void mpc52xx_periph_mask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_setbit(&intr->per_mask, 31 - l2irq);
+}
+
+static void mpc52xx_periph_unmask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_clrbit(&intr->per_mask, 31 - l2irq);
+}
+
+static struct irq_chip mpc52xx_periph_irqchip = {
+	.typename = "MPC52xx Peripherals",
+	.mask = mpc52xx_periph_mask,
+	.mask_ack = mpc52xx_periph_mask,
+	.unmask = mpc52xx_periph_unmask,
+};
+
+/*
+ * SDMA interrupt irq_chip
+*/
+
+static void mpc52xx_sdma_mask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_setbit(&sdma->IntMask, l2irq);
+}
+
+static void mpc52xx_sdma_unmask(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	io_be_clrbit(&sdma->IntMask, l2irq);
+}
+
+static void mpc52xx_sdma_ack(unsigned int virq)
+{
+	int irq;
+	int l2irq;
+
+	irq = irq_map[virq].hwirq;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+	out_be32(&sdma->IntPend, 1 << l2irq);
+}
+
+static struct irq_chip mpc52xx_sdma_irqchip = {
+	.typename = "MPC52xx SDMA",
+	.mask = mpc52xx_sdma_mask,
+	.unmask = mpc52xx_sdma_unmask,
+	.ack = mpc52xx_sdma_ack,
+};
+
+/*
+ * irq_host
+*/
+
+static int mpc52xx_irqhost_match(struct irq_host *h, struct device_node *node)
+{
+	pr_debug("%s: node=%p\n", __func__, node);
+	return mpc52xx_irqhost->host_data == node;
+}
+
+static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
+				 u32 * intspec, unsigned int intsize,
+				 irq_hw_number_t * out_hwirq,
+				 unsigned int *out_flags)
+{
+	int intrvect_l1;
+	int intrvect_l2;
+	int intrvect_type;
+	int intrvect_linux;
+
+	if (intsize != 3)
+		return -1;
+
+	intrvect_l1 = (int)intspec[0];
+	intrvect_l2 = (int)intspec[1];
+	intrvect_type = (int)intspec[2];
+
+	intrvect_linux =
+	    (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK;
+	intrvect_linux |=
+	    (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK;
+
+	pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
+		 intrvect_l2);
+
+	*out_hwirq = intrvect_linux;
+	*out_flags = mpc52xx_map_senses[intrvect_type];
+
+	return 0;
+}
+
+/*
+ * this function retrieves the correct IRQ type out
+ * of the MPC regs
+ * Only externals IRQs needs this
+*/
+static int mpc52xx_irqx_gettype(int irq)
+{
+	int type;
+	u32 ctrl_reg;
+
+	ctrl_reg = in_be32(&intr->ctrl);
+	type = (ctrl_reg >> (22 - irq * 2)) & 0x3;
+
+	return mpc52xx_map_senses[type];
+}
+
+static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
+			       irq_hw_number_t irq)
+{
+	int l1irq;
+	int l2irq;
+	struct irq_chip *good_irqchip;
+	void *good_handle;
+	int type;
+
+	l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
+	l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+	/*
+	 * Most of ours IRQs will be level low
+	 * Only external IRQs on some platform may be others
+	 */
+	type = IRQ_TYPE_LEVEL_LOW;
+
+	switch (l1irq) {
+	case MPC52xx_IRQ_L1_CRIT:
+		pr_debug("%s: Critical. l2=%x\n", __func__, l2irq);
+
+		BUG_ON(l2irq != 0);
+
+		type = mpc52xx_irqx_gettype(l2irq);
+		good_irqchip = &mpc52xx_extirq_irqchip;
+		break;
+
+	case MPC52xx_IRQ_L1_MAIN:
+		pr_debug("%s: Main IRQ[1-3] l2=%x\n", __func__, l2irq);
+
+		if ((l2irq >= 1) && (l2irq <= 3)) {
+			type = mpc52xx_irqx_gettype(l2irq);
+			good_irqchip = &mpc52xx_extirq_irqchip;
+		} else {
+			good_irqchip = &mpc52xx_main_irqchip;
+		}
+		break;
+
+	case MPC52xx_IRQ_L1_PERP:
+		pr_debug("%s: Peripherals. l2=%x\n", __func__, l2irq);
+		good_irqchip = &mpc52xx_periph_irqchip;
+		break;
+
+	case MPC52xx_IRQ_L1_SDMA:
+		pr_debug("%s: SDMA. l2=%x\n", __func__, l2irq);
+		good_irqchip = &mpc52xx_sdma_irqchip;
+		break;
+
+	default:
+		pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq);
+		printk(KERN_ERR "Unknow IRQ!\n");
+		return -EINVAL;
+	}
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_FALLING:
+	case IRQ_TYPE_EDGE_RISING:
+		good_handle = handle_edge_irq;
+		break;
+	default:
+		good_handle = handle_level_irq;
+	}
+
+	set_irq_chip_and_handler(virq, good_irqchip, good_handle);
+
+	pr_debug("%s: virq=%x, hw=%x. type=%x\n", __func__, virq,
+		 (int)irq, type);
+
+	return 0;
+}
+
+static struct irq_host_ops mpc52xx_irqhost_ops = {
+	.match = mpc52xx_irqhost_match,
+	.xlate = mpc52xx_irqhost_xlate,
+	.map = mpc52xx_irqhost_map,
+};
+
+/*
+ * init (public)
+*/
+
+void __init mpc52xx_init_irq(void)
+{
+	u32 intr_ctrl;
+	struct device_node *picnode;
+
+	/* Remap the necessary zones */
+	picnode = of_find_compatible_node(NULL, NULL, "mpc52xx-pic");
+
+	intr = mpc52xx_find_and_map("mpc52xx-pic");
+	if (!intr)
+		panic(__FILE__	": find_and_map failed on 'mpc52xx-pic'. "
+				"Check node !");
+
+	sdma = mpc52xx_find_and_map("mpc52xx-bestcomm");
+	if (!sdma)
+		panic(__FILE__	": find_and_map failed on 'mpc52xx-bestcomm'. "
+				"Check node !");
+
+	/* Disable all interrupt sources. */
+	out_be32(&sdma->IntPend, 0xffffffff);	/* 1 means clear pending */
+	out_be32(&sdma->IntMask, 0xffffffff);	/* 1 means disabled */
+	out_be32(&intr->per_mask, 0x7ffffc00);	/* 1 means disabled */
+	out_be32(&intr->main_mask, 0x00010fff);	/* 1 means disabled */
+	intr_ctrl = in_be32(&intr->ctrl);
+	intr_ctrl &= 0x00ff0000;	/* Keeps IRQ[0-3] config */
+	intr_ctrl |=	0x0f000000 |	/* clear IRQ 0-3 */
+			0x00001000 |	/* MEE master external enable */
+			0x00000000 |	/* 0 means disable IRQ 0-3 */
+			0x00000001;	/* CEb route critical normally */
+	out_be32(&intr->ctrl, intr_ctrl);
+
+	/* Zero a bunch of the priority settings. */
+	out_be32(&intr->per_pri1, 0);
+	out_be32(&intr->per_pri2, 0);
+	out_be32(&intr->per_pri3, 0);
+	out_be32(&intr->main_pri1, 0);
+	out_be32(&intr->main_pri2, 0);
+
+	/*
+	 * As last step, add an irq host to translate the real
+	 * hw irq information provided by the ofw to linux virq
+	 */
+
+	mpc52xx_irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
+	                                 MPC52xx_IRQ_HIGHTESTHWIRQ,
+	                                 &mpc52xx_irqhost_ops, -1);
+
+	if (!mpc52xx_irqhost)
+		panic(__FILE__ ": Cannot allocate the IRQ host\n");
+
+	mpc52xx_irqhost->host_data = picnode;
+	printk(KERN_INFO "MPC52xx PIC is up and running!\n");
+}
+
+/*
+ * get_irq (public)
+*/
+unsigned int mpc52xx_get_irq(void)
+{
+	u32 status;
+	int irq = NO_IRQ_IGNORE;
+
+	status = in_be32(&intr->enc_status);
+	if (status & 0x00000400) {	/* critical */
+		irq = (status >> 8) & 0x3;
+		if (irq == 2)	/* high priority peripheral */
+			goto peripheral;
+		irq |=	(MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) &
+			MPC52xx_IRQ_L1_MASK;
+	} else if (status & 0x00200000) {	/* main */
+		irq = (status >> 16) & 0x1f;
+		if (irq == 4)	/* low priority peripheral */
+			goto peripheral;
+		irq |=	(MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) &
+			MPC52xx_IRQ_L1_MASK;
+	} else if (status & 0x20000000) {	/* peripheral */
+	      peripheral:
+		irq = (status >> 24) & 0x1f;
+		if (irq == 0) {	/* bestcomm */
+			status = in_be32(&sdma->IntPend);
+			irq = ffs(status) - 1;
+			irq |=	(MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) &
+				MPC52xx_IRQ_L1_MASK;
+		} else {
+			irq |=	(MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) &
+				MPC52xx_IRQ_L1_MASK;
+		}
+	}
+
+	pr_debug("%s: irq=%x. virq=%d\n", __func__, irq,
+		 irq_linear_revmap(mpc52xx_irqhost, irq));
+
+	return irq_linear_revmap(mpc52xx_irqhost, irq);
+}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
new file mode 100644
index 0000000..1a26bcd
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
@@ -0,0 +1,53 @@
+/*
+ * Header file for Freescale MPC52xx Interrupt controller
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__
+#define __POWERPC_SYSDEV_MPC52xx_PIC_H__
+
+#include <asm/types.h>
+
+
+/* HW IRQ mapping */
+#define MPC52xx_IRQ_L1_CRIT	(0)
+#define MPC52xx_IRQ_L1_MAIN	(1)
+#define MPC52xx_IRQ_L1_PERP	(2)
+#define MPC52xx_IRQ_L1_SDMA	(3)
+
+#define MPC52xx_IRQ_L1_OFFSET   (6)
+#define MPC52xx_IRQ_L1_MASK     (0x00c0)
+
+#define MPC52xx_IRQ_L2_OFFSET   (0)
+#define MPC52xx_IRQ_L2_MASK     (0x003f)
+
+#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
+
+
+/* Interrupt controller Register set */
+struct mpc52xx_intr {
+	u32 per_mask;		/* INTR + 0x00 */
+	u32 per_pri1;		/* INTR + 0x04 */
+	u32 per_pri2;		/* INTR + 0x08 */
+	u32 per_pri3;		/* INTR + 0x0c */
+	u32 ctrl;		/* INTR + 0x10 */
+	u32 main_mask;		/* INTR + 0x14 */
+	u32 main_pri1;		/* INTR + 0x18 */
+	u32 main_pri2;		/* INTR + 0x1c */
+	u32 reserved1;		/* INTR + 0x20 */
+	u32 enc_status;		/* INTR + 0x24 */
+	u32 crit_status;	/* INTR + 0x28 */
+	u32 main_status;	/* INTR + 0x2c */
+	u32 per_status;		/* INTR + 0x30 */
+	u32 reserved2;		/* INTR + 0x34 */
+	u32 per_error;		/* INTR + 0x38 */
+};
+
+#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */
+
diff --git a/arch/powerpc/platforms/82xx/mpc82xx_ads.c b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
index bb9acbb..ea880f1 100644
--- a/arch/powerpc/platforms/82xx/mpc82xx_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
@@ -515,16 +515,6 @@
 		return PCIBIOS_SUCCESSFUL;
 }
 
-static void
-__init mpc82xx_pcibios_fixup(void)
-{
-	struct pci_dev *dev = NULL;
-
-	for_each_pci_dev(dev) {
-		pci_read_irq_line(dev);
-	}
-}
-
 void __init add_bridge(struct device_node *np)
 {
 	int len;
@@ -597,9 +587,6 @@
 		add_bridge(np);
 
 	of_node_put(np);
-	ppc_md.pci_map_irq = NULL;
-	ppc_md.pcibios_fixup = mpc82xx_pcibios_fixup;
-	ppc_md.pcibios_fixup_bus = NULL;
 #endif
 
 #ifdef  CONFIG_ROOT_NFS
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 7edb6b4..edcd5b8 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -21,7 +21,7 @@
 	  Be aware that PCI buses can only function when SYS board is plugged
 	  into the PIB (Platform IO Board) board from Freescale which provide
 	  3 PCI slots.  The PIBs PCI initialization is the bootloader's
-	  responsiblilty.
+	  responsibility.
 
 config MPC834x_ITX
 	bool "Freescale MPC834x ITX"
@@ -30,7 +30,7 @@
 	  This option enables support for the MPC 834x ITX evaluation board.
 
 	  Be aware that PCI initialization is the bootloader's
-	  responsiblilty.
+	  responsibility.
 
 config MPC8360E_PB
 	bool "Freescale MPC8360E PB"
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index a43ac71..f58c978 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -97,8 +97,6 @@
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
 		add_bridge(np);
-
-	ppc_md.pci_swizzle = common_swizzle;
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
 
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index e2bcaaf..314c42a 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -118,7 +118,4 @@
 	.time_init		= mpc83xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
-#ifdef CONFIG_PCI
-	.pcibios_fixup		= mpc83xx_pcibios_fixup,
-#endif
 };
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
index 6771961..80b735a 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_sys.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c
@@ -137,7 +137,4 @@
 	.time_init		= mpc83xx_time_init,
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
-#ifdef CONFIG_PCI
-	.pcibios_fixup		= mpc83xx_pcibios_fixup,
-#endif
 };
diff --git a/arch/powerpc/platforms/83xx/mpc8360e_pb.c b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
index 1a523c8..7bfd47a 100644
--- a/arch/powerpc/platforms/83xx/mpc8360e_pb.c
+++ b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
@@ -102,8 +102,6 @@
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
 		add_bridge(np);
-
-	ppc_md.pci_swizzle = common_swizzle;
 	ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
 
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 2c82bca..01cae10 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -11,7 +11,6 @@
 
 extern int add_bridge(struct device_node *dev);
 extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
-extern void mpc83xx_pcibios_fixup(void);
 extern void mpc83xx_restart(char *cmd);
 extern long mpc83xx_time_init(void);
 
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
index 4557ac5..9c36505 100644
--- a/arch/powerpc/platforms/83xx/pci.c
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -45,15 +45,6 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-void __init mpc83xx_pcibios_fixup(void)
-{
-	struct pci_dev *dev = NULL;
-
-	/* map all the PCI irqs */
-	for_each_pci_dev(dev)
-		pci_read_irq_line(dev);
-}
-
 int __init add_bridge(struct device_node *dev)
 {
 	int len;
diff --git a/arch/powerpc/platforms/85xx/misc.c b/arch/powerpc/platforms/85xx/misc.c
index 26c5e822..3e62fcb 100644
--- a/arch/powerpc/platforms/85xx/misc.c
+++ b/arch/powerpc/platforms/85xx/misc.c
@@ -21,11 +21,3 @@
 	local_irq_disable();
 	abort();
 }
-
-/* For now this is a pass through */
-phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
-{
-	return addr;
-};
-
-EXPORT_SYMBOL(fixup_bigphys_addr);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index d3e669d..bda2e55 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -53,15 +53,6 @@
 	else
 		return PCIBIOS_SUCCESSFUL;
 }
-
-void __init
-mpc85xx_pcibios_fixup(void)
-{
-	struct pci_dev *dev = NULL;
-
-	for_each_pci_dev(dev)
-		pci_read_irq_line(dev);
-}
 #endif /* CONFIG_PCI */
 
 #ifdef CONFIG_CPM2
@@ -253,8 +244,6 @@
 #ifdef CONFIG_PCI
 	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
 		add_bridge(np);
-
-	ppc_md.pcibios_fixup = mpc85xx_pcibios_fixup;
 	ppc_md.pci_exclude_device = mpc85xx_exclude_device;
 #endif
 
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 1a1c226..f4dd5f2 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -398,15 +398,6 @@
 }
 
 
-void __init mpc86xx_hpcn_pcibios_fixup(void)
-{
-	struct pci_dev *dev = NULL;
-
-	for_each_pci_dev(dev)
-		pci_read_irq_line(dev);
-}
-
-
 /*
  * Called very early, device-tree isn't unflattened
  */
@@ -461,7 +452,6 @@
 	.setup_arch		= mpc86xx_hpcn_setup_arch,
 	.init_IRQ		= mpc86xx_hpcn_init_irq,
 	.show_cpuinfo		= mpc86xx_hpcn_show_cpuinfo,
-	.pcibios_fixup		= mpc86xx_hpcn_pcibios_fixup,
 	.get_irq		= mpic_get_irq,
 	.restart		= mpc86xx_restart,
 	.time_init		= mpc86xx_time_init,
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index e58fa95..44d95ea 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -7,12 +7,14 @@
 endif
 obj-$(CONFIG_PPC_CHRP)		+= chrp/
 obj-$(CONFIG_4xx)		+= 4xx/
+obj-$(CONFIG_PPC_MPC52xx)	+= 52xx/
 obj-$(CONFIG_PPC_83xx)		+= 83xx/
 obj-$(CONFIG_PPC_85xx)		+= 85xx/
 obj-$(CONFIG_PPC_86xx)		+= 86xx/
 obj-$(CONFIG_PPC_PSERIES)	+= pseries/
 obj-$(CONFIG_PPC_ISERIES)	+= iseries/
 obj-$(CONFIG_PPC_MAPLE)		+= maple/
-obj-$(CONFIG_PPC_PASEMI)		+= pasemi/
+obj-$(CONFIG_PPC_PASEMI)	+= pasemi/
 obj-$(CONFIG_PPC_CELL)		+= cell/
+obj-$(CONFIG_PPC_PS3)		+= ps3/
 obj-$(CONFIG_EMBEDDED6xx)	+= embedded6xx/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 3e430b4..06a85b7 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -20,4 +20,18 @@
 	bool "RAS features for bare metal Cell BE"
 	default y
 
+config CBE_THERM
+	tristate "CBE thermal support"
+	default m
+	depends on CBE_RAS
+
+config CBE_CPUFREQ
+	tristate "CBE frequency scaling"
+	depends on CBE_RAS && CPU_FREQ
+	default m
+	help
+	  This adds the cpufreq driver for Cell BE processors.
+	  For details, take a look at <file:Documentation/cpu-freq/>.
+	  If you don't have such processor, say N
+
 endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index c89cdd6..f90e833 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,11 @@
 obj-$(CONFIG_PPC_CELL_NATIVE)		+= interrupt.o iommu.o setup.o \
-					   cbe_regs.o spider-pic.o pervasive.o
+					   cbe_regs.o spider-pic.o \
+					   pervasive.o pmu.o io-workarounds.o
 obj-$(CONFIG_CBE_RAS)			+= ras.o
 
+obj-$(CONFIG_CBE_THERM)			+= cbe_thermal.o
+obj-$(CONFIG_CBE_CPUFREQ)		+= cbe_cpufreq.o
+
 ifeq ($(CONFIG_SMP),y)
 obj-$(CONFIG_PPC_CELL_NATIVE)		+= smp.o
 endif
@@ -11,5 +15,6 @@
 spu-priv1-$(CONFIG_PPC_CELL_NATIVE)	+= spu_priv1_mmio.o
 
 obj-$(CONFIG_SPU_BASE)			+= spu_callbacks.o spu_base.o \
+					   spu_coredump.o \
 					   $(spufs-modular-m) \
 					   $(spu-priv1-y) spufs/
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
new file mode 100644
index 0000000..a3850fd
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -0,0 +1,248 @@
+/*
+ * cpufreq driver for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/timer.h>
+
+#include <asm/hw_irq.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+
+#include "cbe_regs.h"
+
+static DEFINE_MUTEX(cbe_switch_mutex);
+
+
+/* the CBE supports an 8 step frequency scaling */
+static struct cpufreq_frequency_table cbe_freqs[] = {
+	{1,	0},
+	{2,	0},
+	{3,	0},
+	{4,	0},
+	{5,	0},
+	{6,	0},
+	{8,	0},
+	{10,	0},
+	{0,	CPUFREQ_TABLE_END},
+};
+
+/* to write to MIC register */
+static u64 MIC_Slow_Fast_Timer_table[] = {
+	[0 ... 7] = 0x007fc00000000000ull,
+};
+
+/* more values for the MIC */
+static u64 MIC_Slow_Next_Timer_table[] = {
+	0x0000240000000000ull,
+	0x0000268000000000ull,
+	0x000029C000000000ull,
+	0x00002D0000000000ull,
+	0x0000300000000000ull,
+	0x0000334000000000ull,
+	0x000039C000000000ull,
+	0x00003FC000000000ull,
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int get_pmode(int cpu)
+{
+	int ret;
+	struct cbe_pmd_regs __iomem *pmd_regs;
+
+	pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+	ret = in_be64(&pmd_regs->pmsr) & 0x07;
+
+	return ret;
+}
+
+static int set_pmode(int cpu, unsigned int pmode)
+{
+	struct cbe_pmd_regs __iomem *pmd_regs;
+	struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+	u64 flags;
+	u64 value;
+
+	local_irq_save(flags);
+
+	mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
+	pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+	pr_debug("pm register is mapped at %p\n", &pmd_regs->pmcr);
+	pr_debug("mic register is mapped at %p\n", &mic_tm_regs->slow_fast_timer_0);
+
+	out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
+	out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
+
+	out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
+	out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
+
+	value = in_be64(&pmd_regs->pmcr);
+	/* set bits to zero */
+	value &= 0xFFFFFFFFFFFFFFF8ull;
+	/* set bits to next pmode */
+	value |= pmode;
+
+	out_be64(&pmd_regs->pmcr, value);
+
+	/* wait until new pmode appears in status register */
+	value = in_be64(&pmd_regs->pmsr) & 0x07;
+	while(value != pmode) {
+		cpu_relax();
+		value = in_be64(&pmd_regs->pmsr) & 0x07;
+	}
+
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int cbe_cpufreq_cpu_init (struct cpufreq_policy *policy)
+{
+	u32 *max_freq;
+	int i, cur_pmode;
+	struct device_node *cpu;
+
+	cpu = of_get_cpu_node(policy->cpu, NULL);
+
+	if(!cpu)
+		return -ENODEV;
+
+	pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+	max_freq = (u32*) get_property(cpu, "clock-frequency", NULL);
+
+	if(!max_freq)
+		return -EINVAL;
+
+	// we need the freq in kHz
+	*max_freq /= 1000;
+
+	pr_debug("max clock-frequency is at %u kHz\n", *max_freq);
+	pr_debug("initializing frequency table\n");
+
+	// initialize frequency table
+	for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
+		cbe_freqs[i].frequency = *max_freq / cbe_freqs[i].index;
+		pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
+	}
+
+	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+	/* if DEBUG is enabled set_pmode() measures the correct latency of a transition */
+	policy->cpuinfo.transition_latency = 25000;
+
+	cur_pmode = get_pmode(policy->cpu);
+	pr_debug("current pmode is at %d\n",cur_pmode);
+
+	policy->cur = cbe_freqs[cur_pmode].frequency;
+
+#ifdef CONFIG_SMP
+	policy->cpus = cpu_sibling_map[policy->cpu];
+#endif
+
+	cpufreq_frequency_table_get_attr (cbe_freqs, policy->cpu);
+
+	/* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
+	return cpufreq_frequency_table_cpuinfo (policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+	cpufreq_frequency_table_put_attr(policy->cpu);
+	return 0;
+}
+
+static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy, cbe_freqs);
+}
+
+
+static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq,
+			    unsigned int relation)
+{
+	int rc;
+	struct cpufreq_freqs freqs;
+	int cbe_pmode_new;
+
+	cpufreq_frequency_table_target(policy,
+				       cbe_freqs,
+				       target_freq,
+				       relation,
+				       &cbe_pmode_new);
+
+	freqs.old = policy->cur;
+	freqs.new = cbe_freqs[cbe_pmode_new].frequency;
+	freqs.cpu = policy->cpu;
+
+	mutex_lock (&cbe_switch_mutex);
+	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+	pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
+		 policy->cpu,
+		 cbe_freqs[cbe_pmode_new].frequency,
+		 cbe_freqs[cbe_pmode_new].index);
+
+	rc = set_pmode(policy->cpu, cbe_pmode_new);
+	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	mutex_unlock(&cbe_switch_mutex);
+
+	return rc;
+}
+
+static struct cpufreq_driver cbe_cpufreq_driver = {
+	.verify		= cbe_cpufreq_verify,
+	.target		= cbe_cpufreq_target,
+	.init		= cbe_cpufreq_cpu_init,
+	.exit		= cbe_cpufreq_cpu_exit,
+	.name		= "cbe-cpufreq",
+	.owner		= THIS_MODULE,
+	.flags		= CPUFREQ_CONST_LOOPS,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init cbe_cpufreq_init(void)
+{
+	return cpufreq_register_driver(&cbe_cpufreq_driver);
+}
+
+static void __exit cbe_cpufreq_exit(void)
+{
+	cpufreq_unregister_driver(&cbe_cpufreq_driver);
+}
+
+module_init(cbe_cpufreq_init);
+module_exit(cbe_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 2f194ba..9a0ee62 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -8,6 +8,7 @@
 
 #include <linux/percpu.h>
 #include <linux/types.h>
+#include <linux/module.h>
 
 #include <asm/io.h>
 #include <asm/pgtable.h>
@@ -16,8 +17,6 @@
 
 #include "cbe_regs.h"
 
-#define MAX_CBE		2
-
 /*
  * Current implementation uses "cpu" nodes. We build our own mapping
  * array of cpu numbers to cpu nodes locally for now to allow interrupt
@@ -30,6 +29,8 @@
 	struct device_node *cpu_node;
 	struct cbe_pmd_regs __iomem *pmd_regs;
 	struct cbe_iic_regs __iomem *iic_regs;
+	struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+	struct cbe_pmd_shadow_regs pmd_shadow_regs;
 } cbe_regs_maps[MAX_CBE];
 static int cbe_regs_map_count;
 
@@ -42,6 +43,19 @@
 static struct cbe_regs_map *cbe_find_map(struct device_node *np)
 {
 	int i;
+	struct device_node *tmp_np;
+
+	if (strcasecmp(np->type, "spe") == 0) {
+		if (np->data == NULL) {
+			/* walk up path until cpu node was found */
+			tmp_np = np->parent;
+			while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
+				tmp_np = tmp_np->parent;
+
+			np->data = cbe_find_map(tmp_np);
+		}
+		return np->data;
+	}
 
 	for (i = 0; i < cbe_regs_map_count; i++)
 		if (cbe_regs_maps[i].cpu_node == np)
@@ -56,6 +70,7 @@
 		return NULL;
 	return map->pmd_regs;
 }
+EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
 
 struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
 {
@@ -64,7 +79,23 @@
 		return NULL;
 	return map->pmd_regs;
 }
+EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
 
+struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
+{
+	struct cbe_regs_map *map = cbe_find_map(np);
+	if (map == NULL)
+		return NULL;
+	return &map->pmd_shadow_regs;
+}
+
+struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
+{
+	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+	if (map == NULL)
+		return NULL;
+	return &map->pmd_shadow_regs;
+}
 
 struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
 {
@@ -73,6 +104,7 @@
 		return NULL;
 	return map->iic_regs;
 }
+
 struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
 {
 	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
@@ -81,6 +113,36 @@
 	return map->iic_regs;
 }
 
+struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
+{
+	struct cbe_regs_map *map = cbe_find_map(np);
+	if (map == NULL)
+		return NULL;
+	return map->mic_tm_regs;
+}
+
+struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
+{
+	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+	if (map == NULL)
+		return NULL;
+	return map->mic_tm_regs;
+}
+EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
+
+/* FIXME
+ * This is little more than a stub at the moment.  It should be
+ * fleshed out so that it works for both SMT and non-SMT, no
+ * matter if the passed cpu is odd or even.
+ * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
+ * For SMT disabled, returns 0 for all cpus.
+ */
+u32 cbe_get_hw_thread_id(int cpu)
+{
+	return (cpu & 1);
+}
+EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
+
 void __init cbe_regs_init(void)
 {
 	int i;
@@ -119,6 +181,11 @@
 		prop = get_property(cpu, "iic", NULL);
 		if (prop != NULL)
 			map->iic_regs = ioremap(prop->address, prop->len);
+
+		prop = (struct address_prop *)get_property(cpu, "mic-tm",
+							   NULL);
+		if (prop != NULL)
+			map->mic_tm_regs = ioremap(prop->address, prop->len);
 	}
 }
 
diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h
index e76e4a6..440a7ec 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.h
+++ b/arch/powerpc/platforms/cell/cbe_regs.h
@@ -4,12 +4,19 @@
  * This file is intended to hold the various register definitions for CBE
  * on-chip system devices (memory controller, IO controller, etc...)
  *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ *          David J. Erb (djerb@us.ibm.com)
+ *
  * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  */
 
 #ifndef CBE_REGS_H
 #define CBE_REGS_H
 
+#include <asm/cell-pmu.h>
+
 /*
  *
  * Some HID register definitions
@@ -22,6 +29,7 @@
 #define HID0_CBE_THERM_INT_EN	0x0000000400000000ul
 #define HID0_CBE_SYSERR_INT_EN	0x0000000200000000ul
 
+#define MAX_CBE		2
 
 /*
  *
@@ -29,51 +37,124 @@
  *
  */
 
+union spe_reg {
+	u64 val;
+	u8 spe[8];
+};
+
+union ppe_spe_reg {
+	u64 val;
+	struct {
+		u32 ppe;
+		u32 spe;
+	};
+};
+
+
 struct cbe_pmd_regs {
-	u8 pad_0x0000_0x0800[0x0800 - 0x0000];			/* 0x0000 */
+	/* Debug Bus Control */
+	u64	pad_0x0000;					/* 0x0000 */
+
+	u64	group_control;					/* 0x0008 */
+
+	u8	pad_0x0010_0x00a8 [0x00a8 - 0x0010];		/* 0x0010 */
+
+	u64	debug_bus_control;				/* 0x00a8 */
+
+	u8	pad_0x00b0_0x0100 [0x0100 - 0x00b0];		/* 0x00b0 */
+
+	u64	trace_aux_data;					/* 0x0100 */
+	u64	trace_buffer_0_63;				/* 0x0108 */
+	u64	trace_buffer_64_127;				/* 0x0110 */
+	u64	trace_address;					/* 0x0118 */
+	u64	ext_tr_timer;					/* 0x0120 */
+
+	u8	pad_0x0128_0x0400 [0x0400 - 0x0128];		/* 0x0128 */
+
+	/* Performance Monitor */
+	u64	pm_status;					/* 0x0400 */
+	u64	pm_control;					/* 0x0408 */
+	u64	pm_interval;					/* 0x0410 */
+	u64	pm_ctr[4];					/* 0x0418 */
+	u64	pm_start_stop;					/* 0x0438 */
+	u64	pm07_control[8];				/* 0x0440 */
+
+	u8	pad_0x0480_0x0800 [0x0800 - 0x0480];		/* 0x0480 */
 
 	/* Thermal Sensor Registers */
-	u64  ts_ctsr1;						/* 0x0800 */
-	u64  ts_ctsr2;						/* 0x0808 */
-	u64  ts_mtsr1;						/* 0x0810 */
-	u64  ts_mtsr2;						/* 0x0818 */
-	u64  ts_itr1;						/* 0x0820 */
-	u64  ts_itr2;						/* 0x0828 */
-	u64  ts_gitr;						/* 0x0830 */
-	u64  ts_isr;						/* 0x0838 */
-	u64  ts_imr;						/* 0x0840 */
-	u64  tm_cr1;						/* 0x0848 */
-	u64  tm_cr2;						/* 0x0850 */
-	u64  tm_simr;						/* 0x0858 */
-	u64  tm_tpr;						/* 0x0860 */
-	u64  tm_str1;						/* 0x0868 */
-	u64  tm_str2;						/* 0x0870 */
-	u64  tm_tsr;						/* 0x0878 */
+	union	spe_reg	ts_ctsr1;				/* 0x0800 */
+	u64	ts_ctsr2;					/* 0x0808 */
+	union	spe_reg	ts_mtsr1;				/* 0x0810 */
+	u64	ts_mtsr2;					/* 0x0818 */
+	union	spe_reg	ts_itr1;				/* 0x0820 */
+	u64	ts_itr2;					/* 0x0828 */
+	u64	ts_gitr;					/* 0x0830 */
+	u64	ts_isr;						/* 0x0838 */
+	u64	ts_imr;						/* 0x0840 */
+	union	spe_reg	tm_cr1;					/* 0x0848 */
+	u64	tm_cr2;						/* 0x0850 */
+	u64	tm_simr;					/* 0x0858 */
+	union	ppe_spe_reg tm_tpr;				/* 0x0860 */
+	union	spe_reg	tm_str1;				/* 0x0868 */
+	u64	tm_str2;					/* 0x0870 */
+	union	ppe_spe_reg tm_tsr;				/* 0x0878 */
 
 	/* Power Management */
-	u64  pm_control;					/* 0x0880 */
-#define CBE_PMD_PAUSE_ZERO_CONTROL		0x10000
-	u64  pm_status;						/* 0x0888 */
+	u64	pmcr;						/* 0x0880 */
+#define CBE_PMD_PAUSE_ZERO_CONTROL	0x10000
+	u64	pmsr;						/* 0x0888 */
 
 	/* Time Base Register */
-	u64  tbr;						/* 0x0890 */
+	u64	tbr;						/* 0x0890 */
 
-	u8   pad_0x0898_0x0c00 [0x0c00 - 0x0898];		/* 0x0898 */
+	u8	pad_0x0898_0x0c00 [0x0c00 - 0x0898];		/* 0x0898 */
 
 	/* Fault Isolation Registers */
-	u64  checkstop_fir;					/* 0x0c00 */
-	u64  recoverable_fir;
-	u64  spec_att_mchk_fir;
-	u64  fir_mode_reg;
-	u64  fir_enable_mask;
+	u64	checkstop_fir;					/* 0x0c00 */
+	u64	recoverable_fir;				/* 0x0c08 */
+	u64	spec_att_mchk_fir;				/* 0x0c10 */
+	u64	fir_mode_reg;					/* 0x0c18 */
+	u64	fir_enable_mask;				/* 0x0c20 */
 
-	u8   pad_0x0c28_0x1000 [0x1000 - 0x0c28];		/* 0x0c28 */
+	u8	pad_0x0c28_0x1000 [0x1000 - 0x0c28];		/* 0x0c28 */
 };
 
 extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
 extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
 
 /*
+ * PMU shadow registers
+ *
+ * Many of the registers in the performance monitoring unit are write-only,
+ * so we need to save a copy of what we write to those registers.
+ *
+ * The actual data counters are read/write. However, writing to the counters
+ * only takes effect if the PMU is enabled. Otherwise the value is stored in
+ * a hardware latch until the next time the PMU is enabled. So we save a copy
+ * of the counter values if we need to read them back while the PMU is
+ * disabled. The counter_value_in_latch field is a bitmap indicating which
+ * counters currently have a value waiting to be written.
+ */
+
+struct cbe_pmd_shadow_regs {
+	u32 group_control;
+	u32 debug_bus_control;
+	u32 trace_address;
+	u32 ext_tr_timer;
+	u32 pm_status;
+	u32 pm_control;
+	u32 pm_interval;
+	u32 pm_start_stop;
+	u32 pm07_control[NR_CTRS];
+
+	u32 pm_ctr[NR_PHYS_CTRS];
+	u32 counter_value_in_latch;
+};
+
+extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
+extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
+
+/*
  *
  * IIC unit register definitions
  *
@@ -102,18 +183,28 @@
 
 	/* IIC interrupt registers */
 	struct	cbe_iic_thread_regs thread[2];			/* 0x0400 */
-	u64     iic_ir;						/* 0x0440 */
-	u64     iic_is;						/* 0x0448 */
+
+	u64	iic_ir;						/* 0x0440 */
+#define CBE_IIC_IR_PRIO(x)      (((x) & 0xf) << 12)
+#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
+#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
+#define CBE_IIC_IR_IOC_0        0x0
+#define CBE_IIC_IR_IOC_1S       0xb
+#define CBE_IIC_IR_PT_0         0xe
+#define CBE_IIC_IR_PT_1         0xf
+
+	u64	iic_is;						/* 0x0448 */
+#define CBE_IIC_IS_PMI		0x2
 
 	u8	pad_0x0450_0x0500[0x0500 - 0x0450];		/* 0x0450 */
 
 	/* IOC FIR */
 	u64	ioc_fir_reset;					/* 0x0500 */
-	u64	ioc_fir_set;
-	u64	ioc_checkstop_enable;
-	u64	ioc_fir_error_mask;
-	u64	ioc_syserr_enable;
-	u64	ioc_fir;
+	u64	ioc_fir_set;					/* 0x0508 */
+	u64	ioc_checkstop_enable;				/* 0x0510 */
+	u64	ioc_fir_error_mask;				/* 0x0518 */
+	u64	ioc_syserr_enable;				/* 0x0520 */
+	u64	ioc_fir;					/* 0x0528 */
 
 	u8	pad_0x0530_0x1000[0x1000 - 0x0530];		/* 0x0530 */
 };
@@ -122,6 +213,48 @@
 extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
 
 
+struct cbe_mic_tm_regs {
+	u8	pad_0x0000_0x0040[0x0040 - 0x0000];		/* 0x0000 */
+
+	u64	mic_ctl_cnfg2;					/* 0x0040 */
+#define CBE_MIC_ENABLE_AUX_TRC		0x8000000000000000LL
+#define CBE_MIC_DISABLE_PWR_SAV_2	0x0200000000000000LL
+#define CBE_MIC_DISABLE_AUX_TRC_WRAP	0x0100000000000000LL
+#define CBE_MIC_ENABLE_AUX_TRC_INT	0x0080000000000000LL
+
+	u64	pad_0x0048;					/* 0x0048 */
+
+	u64	mic_aux_trc_base;				/* 0x0050 */
+	u64	mic_aux_trc_max_addr;				/* 0x0058 */
+	u64	mic_aux_trc_cur_addr;				/* 0x0060 */
+	u64	mic_aux_trc_grf_addr;				/* 0x0068 */
+	u64	mic_aux_trc_grf_data;				/* 0x0070 */
+
+	u64	pad_0x0078;					/* 0x0078 */
+
+	u64	mic_ctl_cnfg_0;					/* 0x0080 */
+#define CBE_MIC_DISABLE_PWR_SAV_0	0x8000000000000000LL
+
+	u64	pad_0x0088;					/* 0x0088 */
+
+	u64	slow_fast_timer_0;				/* 0x0090 */
+	u64	slow_next_timer_0;				/* 0x0098 */
+
+	u8	pad_0x00a0_0x01c0[0x01c0 - 0x0a0];		/* 0x00a0 */
+
+	u64	mic_ctl_cnfg_1;					/* 0x01c0 */
+#define CBE_MIC_DISABLE_PWR_SAV_1	0x8000000000000000LL
+	u64	pad_0x01c8;					/* 0x01c8 */
+
+	u64	slow_fast_timer_1;				/* 0x01d0 */
+	u64	slow_next_timer_1;				/* 0x01d8 */
+
+	u8	pad_0x01e0_0x1000[0x1000 - 0x01e0];		/* 0x01e0 */
+};
+
+extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
+extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
+
 /* Init this module early */
 extern void cbe_regs_init(void);
 
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
new file mode 100644
index 0000000..616a0a3
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -0,0 +1,226 @@
+/*
+ * thermal support for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <asm/spu.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+
+#include "cbe_regs.h"
+#include "spu_priv1_mmio.h"
+
+static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
+{
+	struct spu *spu;
+
+	spu = container_of(sysdev, struct spu, sysdev);
+
+	return cbe_get_pmd_regs(spu_devnode(spu));
+}
+
+/* returns the value for a given spu in a given register */
+static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg)
+{
+	unsigned int *id;
+	union spe_reg value;
+	struct spu *spu;
+
+	/* getting the id from the reg attribute will not work on future device-tree layouts
+	 * in future we should store the id to the spu struct and use it here */
+	spu = container_of(sysdev, struct spu, sysdev);
+	id = (unsigned int *)get_property(spu_devnode(spu), "reg", NULL);
+	value.val = in_be64(&reg->val);
+
+	return value.spe[*id];
+}
+
+static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf)
+{
+	int value;
+	struct cbe_pmd_regs __iomem *pmd_regs;
+
+	pmd_regs = get_pmd_regs(sysdev);
+
+	value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
+	/* clear all other bits */
+	value &= 0x3F;
+	/* temp is stored in steps of 2 degrees */
+	value *= 2;
+	/* base temp is 65 degrees */
+	value += 65;
+
+	return sprintf(buf, "%d\n", (int) value);
+}
+
+static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
+{
+	struct cbe_pmd_regs __iomem *pmd_regs;
+	u64 value;
+
+	pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+	value = in_be64(&pmd_regs->ts_ctsr2);
+
+	/* access the corresponding byte */
+	value >>= pos;
+	/* clear all other bits */
+	value &= 0x3F;
+	/* temp is stored in steps of 2 degrees */
+	value *= 2;
+	/* base temp is 65 degrees */
+	value += 65;
+
+	return sprintf(buf, "%d\n", (int) value);
+}
+
+
+/* shows the temperature of the DTS on the PPE,
+ * located near the linear thermal sensor */
+static ssize_t ppe_show_temp0(struct sys_device *sysdev, char *buf)
+{
+	return ppe_show_temp(sysdev, buf, 32);
+}
+
+/* shows the temperature of the second DTS on the PPE */
+static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf)
+{
+	return ppe_show_temp(sysdev, buf, 0);
+}
+
+static struct sysdev_attribute attr_spu_temperature = {
+	.attr = {.name = "temperature", .mode = 0400 },
+	.show = spu_show_temp,
+};
+
+static struct attribute *spu_attributes[] = {
+	&attr_spu_temperature.attr,
+};
+
+static struct attribute_group spu_attribute_group = {
+	.name	= "thermal",
+	.attrs	= spu_attributes,
+};
+
+static struct sysdev_attribute attr_ppe_temperature0 = {
+	.attr = {.name = "temperature0", .mode = 0400 },
+	.show = ppe_show_temp0,
+};
+
+static struct sysdev_attribute attr_ppe_temperature1 = {
+	.attr = {.name = "temperature1", .mode = 0400 },
+	.show = ppe_show_temp1,
+};
+
+static struct attribute *ppe_attributes[] = {
+	&attr_ppe_temperature0.attr,
+	&attr_ppe_temperature1.attr,
+};
+
+static struct attribute_group ppe_attribute_group = {
+	.name	= "thermal",
+	.attrs	= ppe_attributes,
+};
+
+/*
+ * initialize throttling with default values
+ */
+static void __init init_default_values(void)
+{
+	int cpu;
+	struct cbe_pmd_regs __iomem *pmd_regs;
+	struct sys_device *sysdev;
+	union ppe_spe_reg tpr;
+	union spe_reg str1;
+	u64 str2;
+	union spe_reg cr1;
+	u64 cr2;
+
+	/* TPR defaults */
+	/* ppe
+	 *	1F - no full stop
+	 *	08 - dynamic throttling starts if over 80 degrees
+	 *	03 - dynamic throttling ceases if below 70 degrees */
+	tpr.ppe = 0x1F0803;
+	/* spe
+	 *	10 - full stopped when over 96 degrees
+	 *	08 - dynamic throttling starts if over 80 degrees
+	 *	03 - dynamic throttling ceases if below 70 degrees
+	 */
+	tpr.spe = 0x100803;
+
+	/* STR defaults */
+	/* str1
+	 *	10 - stop 16 of 32 cycles
+	 */
+	str1.val = 0x1010101010101010ull;
+	/* str2
+	 *	10 - stop 16 of 32 cycles
+	 */
+	str2 = 0x10;
+
+	/* CR defaults */
+	/* cr1
+	 *	4 - normal operation
+	 */
+	cr1.val = 0x0404040404040404ull;
+	/* cr2
+	 *	4 - normal operation
+	 */
+	cr2 = 0x04;
+
+	for_each_possible_cpu (cpu) {
+		pr_debug("processing cpu %d\n", cpu);
+		sysdev = get_cpu_sysdev(cpu);
+		pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+
+		out_be64(&pmd_regs->tm_str2, str2);
+		out_be64(&pmd_regs->tm_str1.val, str1.val);
+		out_be64(&pmd_regs->tm_tpr.val, tpr.val);
+		out_be64(&pmd_regs->tm_cr1.val, cr1.val);
+		out_be64(&pmd_regs->tm_cr2, cr2);
+	}
+}
+
+
+static int __init thermal_init(void)
+{
+	init_default_values();
+
+	spu_add_sysdev_attr_group(&spu_attribute_group);
+	cpu_add_sysdev_attr_group(&ppe_attribute_group);
+
+	return 0;
+}
+module_init(thermal_init);
+
+static void __exit thermal_exit(void)
+{
+	spu_remove_sysdev_attr_group(&spu_attribute_group);
+	cpu_remove_sysdev_attr_group(&ppe_attribute_group);
+}
+module_exit(thermal_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
+
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a914c12..6666d03 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -396,3 +396,19 @@
 	/* Enable on current CPU */
 	iic_setup_cpu();
 }
+
+void iic_set_interrupt_routing(int cpu, int thread, int priority)
+{
+	struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
+	u64 iic_ir = 0;
+	int node = cpu >> 1;
+
+	/* Set which node and thread will handle the next interrupt */
+	iic_ir |= CBE_IIC_IR_PRIO(priority) |
+		  CBE_IIC_IR_DEST_NODE(node);
+	if (thread == 0)
+		iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
+	else
+		iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
+	out_be64(&iic_regs->iic_ir, iic_ir);
+}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 9ba1d3c..942dc39 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -83,5 +83,7 @@
 
 extern void spider_init_IRQ(void);
 
+extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
+
 #endif
 #endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
new file mode 100644
index 0000000..580d425
--- /dev/null
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -0,0 +1,346 @@
+/*
+ *  Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *		       IBM, Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+
+
+#define SPIDER_PCI_REG_BASE		0xd000
+#define SPIDER_PCI_VCI_CNTL_STAT	0x0110
+#define SPIDER_PCI_DUMMY_READ		0x0810
+#define SPIDER_PCI_DUMMY_READ_BASE	0x0814
+
+/* Undefine that to re-enable bogus prefetch
+ *
+ * Without that workaround, the chip will do bogus prefetch past
+ * page boundary from system memory. This setting will disable that,
+ * though the documentation is unclear as to the consequences of doing
+ * so, either purely performances, or possible misbehaviour... It's not
+ * clear wether the chip can handle unaligned accesses at all without
+ * prefetching enabled.
+ *
+ * For now, things appear to be behaving properly with that prefetching
+ * disabled and IDE, possibly because IDE isn't doing any unaligned
+ * access.
+ */
+#define SPIDER_DISABLE_PREFETCH
+
+#define MAX_SPIDERS	2
+
+static struct spider_pci_bus {
+	void __iomem	*regs;
+	unsigned long	mmio_start;
+	unsigned long	mmio_end;
+	unsigned long	pio_vstart;
+	unsigned long	pio_vend;
+} spider_pci_busses[MAX_SPIDERS];
+static int spider_pci_count;
+
+static struct spider_pci_bus *spider_pci_find(unsigned long vaddr,
+					      unsigned long paddr)
+{
+	int i;
+
+	for (i = 0; i < spider_pci_count; i++) {
+		struct spider_pci_bus *bus = &spider_pci_busses[i];
+		if (paddr && paddr >= bus->mmio_start && paddr < bus->mmio_end)
+			return bus;
+		if (vaddr && vaddr >= bus->pio_vstart && vaddr < bus->pio_vend)
+			return bus;
+	}
+	return NULL;
+}
+
+static void spider_io_flush(const volatile void __iomem *addr)
+{
+	struct spider_pci_bus *bus;
+	int token;
+
+	/* Get platform token (set by ioremap) from address */
+	token = PCI_GET_ADDR_TOKEN(addr);
+
+	/* Fast path if we have a non-0 token, it indicates which bus we
+	 * are on.
+	 *
+	 * If the token is 0, that means either the the ioremap was done
+	 * before we initialized this layer, or it's a PIO operation. We
+	 * fallback to a low path in this case. Hopefully, internal devices
+	 * which are ioremap'ed early should use in_XX/out_XX functions
+	 * instead of the PCI ones and thus not suffer from the slowdown.
+	 *
+	 * Also note that currently, the workaround will not work for areas
+	 * that are not mapped with PTEs (bolted in the hash table). This
+	 * is the case for ioremaps done very early at boot (before
+	 * mem_init_done) and includes the mapping of the ISA IO space.
+	 *
+	 * Fortunately, none of the affected devices is expected to do DMA
+	 * and thus there should be no problem in practice.
+	 *
+	 * In order to improve performances, we only do the PTE search for
+	 * addresses falling in the PHB IO space area. That means it will
+	 * not work for hotplug'ed PHBs but those don't exist with Spider.
+	 */
+	if (token && token <= spider_pci_count)
+		bus = &spider_pci_busses[token - 1];
+	else {
+		unsigned long vaddr, paddr;
+		pte_t *ptep;
+
+		/* Fixup physical address */
+		vaddr = (unsigned long)PCI_FIX_ADDR(addr);
+
+		/* Check if it's in allowed range for  PIO */
+		if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE)
+			return;
+
+		/* Try to find a PTE. If not, clear the paddr, we'll do
+		 * a vaddr only lookup (PIO only)
+		 */
+		ptep = find_linux_pte(init_mm.pgd, vaddr);
+		if (ptep == NULL)
+			paddr = 0;
+		else
+			paddr = pte_pfn(*ptep) << PAGE_SHIFT;
+
+		bus = spider_pci_find(vaddr, paddr);
+		if (bus == NULL)
+			return;
+	}
+
+	/* Now do the workaround
+	 */
+	(void)in_be32(bus->regs + SPIDER_PCI_DUMMY_READ);
+}
+
+static u8 spider_readb(const volatile void __iomem *addr)
+{
+	u8 val = __do_readb(addr);
+	spider_io_flush(addr);
+	return val;
+}
+
+static u16 spider_readw(const volatile void __iomem *addr)
+{
+	u16 val = __do_readw(addr);
+	spider_io_flush(addr);
+	return val;
+}
+
+static u32 spider_readl(const volatile void __iomem *addr)
+{
+	u32 val = __do_readl(addr);
+	spider_io_flush(addr);
+	return val;
+}
+
+static u64 spider_readq(const volatile void __iomem *addr)
+{
+	u64 val = __do_readq(addr);
+	spider_io_flush(addr);
+	return val;
+}
+
+static u16 spider_readw_be(const volatile void __iomem *addr)
+{
+	u16 val = __do_readw_be(addr);
+	spider_io_flush(addr);
+	return val;
+}
+
+static u32 spider_readl_be(const volatile void __iomem *addr)
+{
+	u32 val = __do_readl_be(addr);
+	spider_io_flush(addr);
+	return val;
+}
+
+static u64 spider_readq_be(const volatile void __iomem *addr)
+{
+	u64 val = __do_readq_be(addr);
+	spider_io_flush(addr);
+	return val;
+}
+
+static void spider_readsb(const volatile void __iomem *addr, void *buf,
+			  unsigned long count)
+{
+	__do_readsb(addr, buf, count);
+	spider_io_flush(addr);
+}
+
+static void spider_readsw(const volatile void __iomem *addr, void *buf,
+			  unsigned long count)
+{
+	__do_readsw(addr, buf, count);
+	spider_io_flush(addr);
+}
+
+static void spider_readsl(const volatile void __iomem *addr, void *buf,
+			  unsigned long count)
+{
+	__do_readsl(addr, buf, count);
+	spider_io_flush(addr);
+}
+
+static void spider_memcpy_fromio(void *dest, const volatile void __iomem *src,
+				 unsigned long n)
+{
+	__do_memcpy_fromio(dest, src, n);
+	spider_io_flush(src);
+}
+
+
+static void __iomem * spider_ioremap(unsigned long addr, unsigned long size,
+				     unsigned long flags)
+{
+	struct spider_pci_bus *bus;
+	void __iomem *res = __ioremap(addr, size, flags);
+	int busno;
+
+	pr_debug("spider_ioremap(0x%lx, 0x%lx, 0x%lx) -> 0x%p\n",
+		 addr, size, flags, res);
+
+	bus = spider_pci_find(0, addr);
+	if (bus != NULL) {
+		busno = bus - spider_pci_busses;
+		pr_debug(" found bus %d, setting token\n", busno);
+		PCI_SET_ADDR_TOKEN(res, busno + 1);
+	}
+	pr_debug(" result=0x%p\n", res);
+
+	return res;
+}
+
+static void __init spider_pci_setup_chip(struct spider_pci_bus *bus)
+{
+#ifdef SPIDER_DISABLE_PREFETCH
+	u32 val = in_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT);
+	pr_debug(" PVCI_Control_Status was 0x%08x\n", val);
+	out_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
+#endif
+
+	/* Configure the dummy address for the workaround */
+	out_be32(bus->regs + SPIDER_PCI_DUMMY_READ_BASE, 0x80000000);
+}
+
+static void __init spider_pci_add_one(struct pci_controller *phb)
+{
+	struct spider_pci_bus *bus = &spider_pci_busses[spider_pci_count];
+	struct device_node *np = phb->arch_data;
+	struct resource rsrc;
+	void __iomem *regs;
+
+	if (spider_pci_count >= MAX_SPIDERS) {
+		printk(KERN_ERR "Too many spider bridges, workarounds"
+		       " disabled for %s\n", np->full_name);
+		return;
+	}
+
+	/* Get the registers for the beast */
+	if (of_address_to_resource(np, 0, &rsrc)) {
+		printk(KERN_ERR "Failed to get registers for spider %s"
+		       " workarounds disabled\n", np->full_name);
+		return;
+	}
+
+	/* Mask out some useless bits in there to get to the base of the
+	 * spider chip
+	 */
+	rsrc.start &= ~0xfffffffful;
+
+	/* Map them */
+	regs = ioremap(rsrc.start + SPIDER_PCI_REG_BASE, 0x1000);
+	if (regs == NULL) {
+		printk(KERN_ERR "Failed to map registers for spider %s"
+		       " workarounds disabled\n", np->full_name);
+		return;
+	}
+
+	spider_pci_count++;
+
+	/* We assume spiders only have one MMIO resource */
+	bus->mmio_start = phb->mem_resources[0].start;
+	bus->mmio_end = phb->mem_resources[0].end + 1;
+
+	bus->pio_vstart = (unsigned long)phb->io_base_virt;
+	bus->pio_vend = bus->pio_vstart + phb->pci_io_size;
+
+	bus->regs = regs;
+
+	printk(KERN_INFO "PCI: Spider MMIO workaround for %s\n",np->full_name);
+
+	pr_debug(" mmio (P) = 0x%016lx..0x%016lx\n",
+		 bus->mmio_start, bus->mmio_end);
+	pr_debug("  pio (V) = 0x%016lx..0x%016lx\n",
+		 bus->pio_vstart, bus->pio_vend);
+	pr_debug(" regs (P) = 0x%016lx (V) = 0x%p\n",
+		 rsrc.start + SPIDER_PCI_REG_BASE, bus->regs);
+
+	spider_pci_setup_chip(bus);
+}
+
+static struct ppc_pci_io __initdata spider_pci_io = {
+	.readb = spider_readb,
+	.readw = spider_readw,
+	.readl = spider_readl,
+	.readq = spider_readq,
+	.readw_be = spider_readw_be,
+	.readl_be = spider_readl_be,
+	.readq_be = spider_readq_be,
+	.readsb = spider_readsb,
+	.readsw = spider_readsw,
+	.readsl = spider_readsl,
+	.memcpy_fromio = spider_memcpy_fromio,
+};
+
+static int __init spider_pci_workaround_init(void)
+{
+	struct pci_controller *phb;
+
+	if (!machine_is(cell))
+		return 0;
+
+	/* Find spider bridges. We assume they have been all probed
+	 * in setup_arch(). If that was to change, we would need to
+	 * update this code to cope with dynamically added busses
+	 */
+	list_for_each_entry(phb, &hose_list, list_node) {
+		struct device_node *np = phb->arch_data;
+		const char *model = get_property(np, "model", NULL);
+
+		/* If no model property or name isn't exactly "pci", skip */
+		if (model == NULL || strcmp(np->name, "pci"))
+			continue;
+		/* If model is not "Spider", skip */
+		if (strcmp(model, "Spider"))
+			continue;
+		spider_pci_add_one(phb);
+	}
+
+	/* No Spider PCI found, exit */
+	if (spider_pci_count == 0)
+		return 0;
+
+	/* Setup IO callbacks. We only setup MMIO reads. PIO reads will
+	 * fallback to MMIO reads (though without a token, thus slower)
+	 */
+	ppc_pci_io = spider_pci_io;
+
+	/* Setup ioremap callback */
+	ppc_md.ioremap = spider_ioremap;
+
+	return 0;
+}
+arch_initcall(spider_pci_workaround_init);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index aca4c3d..b43466b 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1,514 +1,747 @@
 /*
  * IOMMU implementation for Cell Broadband Processor Architecture
- * We just establish a linear mapping at boot by setting all the
- * IOPT cache entries in the CPU.
- * The mapping functions should be identical to pci_direct_iommu, 
- * except for the handling of the high order bit that is required
- * by the Spider bridge. These should be split into a separate
- * file at the point where we get a different bridge chip.
  *
- * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH,
- *			 Arnd Bergmann <arndb@de.ibm.com>
+ * (C) Copyright IBM Corporation 2006
  *
- * Based on linear mapping
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * Author: Jeremy Kerr <jk@ozlabs.org>
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #undef DEBUG
 
 #include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
 #include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
 
-#include <asm/sections.h>
-#include <asm/iommu.h>
-#include <asm/io.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
+#include <asm/iommu.h>
 #include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/system.h>
-#include <asm/ppc-pci.h>
+#include <asm/pci-bridge.h>
 #include <asm/udbg.h>
+#include <asm/of_platform.h>
+#include <asm/lmb.h>
 
-#include "iommu.h"
+#include "cbe_regs.h"
+#include "interrupt.h"
 
-static inline unsigned long 
-get_iopt_entry(unsigned long real_address, unsigned long ioid,
-			 unsigned long prot)
-{
-	return (prot & IOPT_PROT_MASK)
-	     | (IOPT_COHERENT)
-	     | (IOPT_ORDER_VC)
-	     | (real_address & IOPT_RPN_MASK)
-	     | (ioid & IOPT_IOID_MASK);
-}
+/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
+ * instead of leaving them mapped to some dummy page. This can be
+ * enabled once the appropriate workarounds for spider bugs have
+ * been enabled
+ */
+#define CELL_IOMMU_REAL_UNMAP
 
-typedef struct {
-	unsigned long val;
-} ioste;
+/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
+ * IO PTEs based on the transfer direction. That can be enabled
+ * once spider-net has been fixed to pass the correct direction
+ * to the DMA mapping functions
+ */
+#define CELL_IOMMU_STRICT_PROTECTION
 
-static inline ioste
-mk_ioste(unsigned long val)
-{
-	ioste ioste = { .val = val, };
-	return ioste;
-}
 
-static inline ioste
-get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size)
-{
-	unsigned long ps;
-	unsigned long iostep;
-	unsigned long nnpt;
-	unsigned long shift;
+#define NR_IOMMUS			2
 
-	switch (page_size) {
-	case 0x1000000:
-		ps = IOST_PS_16M;
-		nnpt = 0;  /* one page per segment */
-		shift = 5; /* segment has 16 iopt entries */
-		break;
+/* IOC mmap registers */
+#define IOC_Reg_Size			0x2000
 
-	case 0x100000:
-		ps = IOST_PS_1M;
-		nnpt = 0;  /* one page per segment */
-		shift = 1; /* segment has 256 iopt entries */
-		break;
+#define IOC_IOPT_CacheInvd		0x908
+#define IOC_IOPT_CacheInvd_NE_Mask	0xffe0000000000000ul
+#define IOC_IOPT_CacheInvd_IOPTE_Mask	0x000003fffffffff8ul
+#define IOC_IOPT_CacheInvd_Busy		0x0000000000000001ul
 
-	case 0x10000:
-		ps = IOST_PS_64K;
-		nnpt = 0x07; /* 8 pages per io page table */
-		shift = 0;   /* all entries are used */
-		break;
+#define IOC_IOST_Origin			0x918
+#define IOC_IOST_Origin_E		0x8000000000000000ul
+#define IOC_IOST_Origin_HW		0x0000000000000800ul
+#define IOC_IOST_Origin_HL		0x0000000000000400ul
 
-	case 0x1000:
-		ps = IOST_PS_4K;
-		nnpt = 0x7f; /* 128 pages per io page table */
-		shift = 0;   /* all entries are used */
-		break;
+#define IOC_IO_ExcpStat			0x920
+#define IOC_IO_ExcpStat_V		0x8000000000000000ul
+#define IOC_IO_ExcpStat_SPF_Mask	0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_S		0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_P		0x4000000000000000ul
+#define IOC_IO_ExcpStat_ADDR_Mask	0x00000007fffff000ul
+#define IOC_IO_ExcpStat_RW_Mask		0x0000000000000800ul
+#define IOC_IO_ExcpStat_IOID_Mask	0x00000000000007fful
 
-	default: /* not a known compile time constant */
-		{
-			/* BUILD_BUG_ON() is not usable here */
-			extern void __get_iost_entry_bad_page_size(void);
-			__get_iost_entry_bad_page_size();
-		}
-		break;
-	}
+#define IOC_IO_ExcpMask			0x928
+#define IOC_IO_ExcpMask_SFE		0x4000000000000000ul
+#define IOC_IO_ExcpMask_PFE		0x2000000000000000ul
 
-	iostep = iopt_base +
-			 /* need 8 bytes per iopte */
-			(((io_address / page_size * 8)
-			 /* align io page tables on 4k page boundaries */
-				 << shift) 
-			 /* nnpt+1 pages go into each iopt */
-				 & ~(nnpt << 12));
+#define IOC_IOCmd_Offset		0x1000
 
-	nnpt++; /* this seems to work, but the documentation is not clear
-		   about wether we put nnpt or nnpt-1 into the ioste bits.
-		   In theory, this can't work for 4k pages. */
-	return mk_ioste(IOST_VALID_MASK
-			| (iostep & IOST_PT_BASE_MASK)
-			| ((nnpt << 5) & IOST_NNPT_MASK)
-			| (ps & IOST_PS_MASK));
-}
+#define IOC_IOCmd_Cfg			0xc00
+#define IOC_IOCmd_Cfg_TE		0x0000800000000000ul
 
-/* compute the address of an io pte */
-static inline unsigned long
-get_ioptep(ioste iost_entry, unsigned long io_address)
-{
-	unsigned long iopt_base;
-	unsigned long page_size;
-	unsigned long page_number;
-	unsigned long iopt_offset;
 
-	iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
-	page_size = iost_entry.val & IOST_PS_MASK;
+/* Segment table entries */
+#define IOSTE_V			0x8000000000000000ul /* valid */
+#define IOSTE_H			0x4000000000000000ul /* cache hint */
+#define IOSTE_PT_Base_RPN_Mask  0x3ffffffffffff000ul /* base RPN of IOPT */
+#define IOSTE_NPPT_Mask		0x0000000000000fe0ul /* no. pages in IOPT */
+#define IOSTE_PS_Mask		0x0000000000000007ul /* page size */
+#define IOSTE_PS_4K		0x0000000000000001ul /*   - 4kB  */
+#define IOSTE_PS_64K		0x0000000000000003ul /*   - 64kB */
+#define IOSTE_PS_1M		0x0000000000000005ul /*   - 1MB  */
+#define IOSTE_PS_16M		0x0000000000000007ul /*   - 16MB */
 
-	/* decode page size to compute page number */
-	page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
-	/* page number is an offset into the io page table */
-	iopt_offset = (page_number << 3) & 0x7fff8ul;
-	return iopt_base + iopt_offset;
-}
+/* Page table entries */
+#define IOPTE_PP_W		0x8000000000000000ul /* protection: write */
+#define IOPTE_PP_R		0x4000000000000000ul /* protection: read */
+#define IOPTE_M			0x2000000000000000ul /* coherency required */
+#define IOPTE_SO_R		0x1000000000000000ul /* ordering: writes */
+#define IOPTE_SO_RW             0x1800000000000000ul /* ordering: r & w */
+#define IOPTE_RPN_Mask		0x07fffffffffff000ul /* RPN */
+#define IOPTE_H			0x0000000000000800ul /* cache hint */
+#define IOPTE_IOID_Mask		0x00000000000007fful /* ioid */
 
-/* compute the tag field of the iopt cache entry */
-static inline unsigned long
-get_ioc_tag(ioste iost_entry, unsigned long io_address)
-{
-	unsigned long iopte = get_ioptep(iost_entry, io_address);
 
-	return IOPT_VALID_MASK
-	     | ((iopte & 0x00000000000000ff8ul) >> 3)
-	     | ((iopte & 0x0000003fffffc0000ul) >> 9);
-}
+/* IOMMU sizing */
+#define IO_SEGMENT_SHIFT	28
+#define IO_PAGENO_BITS		(IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
 
-/* compute the hashed 6 bit index for the 4-way associative pte cache */
-static inline unsigned long
-get_ioc_hash(ioste iost_entry, unsigned long io_address)
-{
-	unsigned long iopte = get_ioptep(iost_entry, io_address);
+/* The high bit needs to be set on every DMA address */
+#define SPIDER_DMA_OFFSET	0x80000000ul
 
-	return ((iopte & 0x000000000000001f8ul) >> 3)
-	     ^ ((iopte & 0x00000000000020000ul) >> 17)
-	     ^ ((iopte & 0x00000000000010000ul) >> 15)
-	     ^ ((iopte & 0x00000000000008000ul) >> 13)
-	     ^ ((iopte & 0x00000000000004000ul) >> 11)
-	     ^ ((iopte & 0x00000000000002000ul) >> 9)
-	     ^ ((iopte & 0x00000000000001000ul) >> 7);
-}
-
-/* same as above, but pretend that we have a simpler 1-way associative
-   pte cache with an 8 bit index */
-static inline unsigned long
-get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
-{
-	unsigned long iopte = get_ioptep(iost_entry, io_address);
-
-	return ((iopte & 0x000000000000001f8ul) >> 3)
-	     ^ ((iopte & 0x00000000000020000ul) >> 17)
-	     ^ ((iopte & 0x00000000000010000ul) >> 15)
-	     ^ ((iopte & 0x00000000000008000ul) >> 13)
-	     ^ ((iopte & 0x00000000000004000ul) >> 11)
-	     ^ ((iopte & 0x00000000000002000ul) >> 9)
-	     ^ ((iopte & 0x00000000000001000ul) >> 7)
-	     ^ ((iopte & 0x0000000000000c000ul) >> 8);
-}
-
-static inline ioste
-get_iost_cache(void __iomem *base, unsigned long index)
-{
-	unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
-	return mk_ioste(in_be64(&p[index]));
-}
-
-static inline void
-set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
-{
-	unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
-	pr_debug("ioste %02lx was %016lx, store %016lx", index,
-			get_iost_cache(base, index).val, ste.val);
-	out_be64(&p[index], ste.val);
-	pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
-}
-
-static inline unsigned long
-get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag)
-{
-	unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR);
-	unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG);	
-
-	*tag = tags[index];
-	rmb();
-	return *p;
-}
-
-static inline void
-set_iopt_cache(void __iomem *base, unsigned long index,
-		 unsigned long tag, unsigned long val)
-{
-	unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
-	unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
-
-	out_be64(p, val);
-	out_be64(&tags[index], tag);
-}
-
-static inline void
-set_iost_origin(void __iomem *base)
-{
-	unsigned long __iomem *p = base + IOC_ST_ORIGIN;
-	unsigned long origin = IOSTO_ENABLE | IOSTO_SW;
-
-	pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin);
-	out_be64(p, origin);
-}
-
-static inline void
-set_iocmd_config(void __iomem *base)
-{
-	unsigned long __iomem *p = base + 0xc00;
-	unsigned long conf;
-
-	conf = in_be64(p);
-	pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE);
-	out_be64(p, conf | IOCMD_CONF_TE);
-}
-
-static void enable_mapping(void __iomem *base, void __iomem *mmio_base)
-{
-	set_iocmd_config(base);
-	set_iost_origin(mmio_base);
-}
-
-static void iommu_dev_setup_null(struct pci_dev *d) { }
-static void iommu_bus_setup_null(struct pci_bus *b) { }
-
-struct cell_iommu {
-	unsigned long base;
-	unsigned long mmio_base;
-	void __iomem *mapped_base;
-	void __iomem *mapped_mmio_base;
+struct iommu_window {
+	struct list_head list;
+	struct cbe_iommu *iommu;
+	unsigned long offset;
+	unsigned long size;
+	unsigned long pte_offset;
+	unsigned int ioid;
+	struct iommu_table table;
 };
 
-static struct cell_iommu cell_iommus[NR_CPUS];
+#define NAMESIZE 8
+struct cbe_iommu {
+	int nid;
+	char name[NAMESIZE];
+	void __iomem *xlate_regs;
+	void __iomem *cmd_regs;
+	unsigned long *stab;
+	unsigned long *ptab;
+	void *pad_page;
+	struct list_head windows;
+};
 
-/* initialize the iommu to support a simple linear mapping
- * for each DMA window used by any device. For now, we
- * happen to know that there is only one DMA window in use,
- * starting at iopt_phys_offset. */
-static void cell_do_map_iommu(struct cell_iommu *iommu,
-			      unsigned int ioid,
-			      unsigned long map_start,
-			      unsigned long map_size)
+/* Static array of iommus, one per node
+ *   each contains a list of windows, keyed from dma_window property
+ *   - on bus setup, look for a matching window, or create one
+ *   - on dev setup, assign iommu_table ptr
+ */
+static struct cbe_iommu iommus[NR_IOMMUS];
+static int cbe_nr_iommus;
+
+static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
+		long n_ptes)
 {
-	unsigned long io_address, real_address;
-	void __iomem *ioc_base, *ioc_mmio_base;
-	ioste ioste;
-	unsigned long index;
+	unsigned long *reg, val;
+	long n;
 
-	/* we pretend the io page table was at a very high address */
-	const unsigned long fake_iopt = 0x10000000000ul;
-	const unsigned long io_page_size = 0x1000000; /* use 16M pages */
-	const unsigned long io_segment_size = 0x10000000; /* 256M */
+	reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
 
-	ioc_base = iommu->mapped_base;
-	ioc_mmio_base = iommu->mapped_mmio_base;
+	while (n_ptes > 0) {
+		/* we can invalidate up to 1 << 11 PTEs at once */
+		n = min(n_ptes, 1l << 11);
+		val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
+			| (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
+		        | IOC_IOPT_CacheInvd_Busy;
 
-	for (real_address = 0, io_address = map_start;
-	     io_address <= map_start + map_size;
-	     real_address += io_page_size, io_address += io_page_size) {
-		ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
-		if ((real_address % io_segment_size) == 0) /* segment start */
-			set_iost_cache(ioc_mmio_base,
-				       io_address >> 28, ioste);
-		index = get_ioc_hash_1way(ioste, io_address);
-		pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
-					 io_address, index, ioste.val);
-		set_iopt_cache(ioc_mmio_base,
-			get_ioc_hash_1way(ioste, io_address),
-			get_ioc_tag(ioste, io_address),
-			get_iopt_entry(real_address, ioid, IOPT_PROT_RW));
+		out_be64(reg, val);
+		while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
+			;
+
+		n_ptes -= n;
+		pte += n;
 	}
 }
 
-static void iommu_devnode_setup(struct device_node *d)
+static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
+		unsigned long uaddr, enum dma_data_direction direction)
 {
+	int i;
+	unsigned long *io_pte, base_pte;
+	struct iommu_window *window =
+		container_of(tbl, struct iommu_window, table);
+
+	/* implementing proper protection causes problems with the spidernet
+	 * driver - check mapping directions later, but allow read & write by
+	 * default for now.*/
+#ifdef CELL_IOMMU_STRICT_PROTECTION
+	/* to avoid referencing a global, we use a trick here to setup the
+	 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
+	 * together for each of the 3 supported direction values. It is then
+	 * shifted left so that the fields matching the desired direction
+	 * lands on the appropriate bits, and other bits are masked out.
+	 */
+	const unsigned long prot = 0xc48;
+	base_pte =
+		((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
+		| IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
+#else
+	base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
+		(window->ioid & IOPTE_IOID_Mask);
+#endif
+
+	io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
+
+	for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
+		io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
+
+	mb();
+
+	invalidate_tce_cache(window->iommu, io_pte, npages);
+
+	pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
+		 index, npages, direction, base_pte);
+}
+
+static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
+{
+
+	int i;
+	unsigned long *io_pte, pte;
+	struct iommu_window *window =
+		container_of(tbl, struct iommu_window, table);
+
+	pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
+
+#ifdef CELL_IOMMU_REAL_UNMAP
+	pte = 0;
+#else
+	/* spider bridge does PCI reads after freeing - insert a mapping
+	 * to a scratch page instead of an invalid entry */
+	pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
+		| (window->ioid & IOPTE_IOID_Mask);
+#endif
+
+	io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
+
+	for (i = 0; i < npages; i++)
+		io_pte[i] = pte;
+
+	mb();
+
+	invalidate_tce_cache(window->iommu, io_pte, npages);
+}
+
+static irqreturn_t ioc_interrupt(int irq, void *data)
+{
+	unsigned long stat;
+	struct cbe_iommu *iommu = data;
+
+	stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+
+	/* Might want to rate limit it */
+	printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
+	printk(KERN_ERR "  V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
+	       !!(stat & IOC_IO_ExcpStat_V),
+	       (stat & IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
+	       (stat & IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
+	       (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
+	       (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
+	printk(KERN_ERR "  page=0x%016lx\n",
+	       stat & IOC_IO_ExcpStat_ADDR_Mask);
+
+	/* clear interrupt */
+	stat &= ~IOC_IO_ExcpStat_V;
+	out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
+
+	return IRQ_HANDLED;
+}
+
+static int cell_iommu_find_ioc(int nid, unsigned long *base)
+{
+	struct device_node *np;
+	struct resource r;
+
+	*base = 0;
+
+	/* First look for new style /be nodes */
+	for_each_node_by_name(np, "ioc") {
+		if (of_node_to_nid(np) != nid)
+			continue;
+		if (of_address_to_resource(np, 0, &r)) {
+			printk(KERN_ERR "iommu: can't get address for %s\n",
+			       np->full_name);
+			continue;
+		}
+		*base = r.start;
+		of_node_put(np);
+		return 0;
+	}
+
+	/* Ok, let's try the old way */
+	for_each_node_by_type(np, "cpu") {
+		const unsigned int *nidp;
+		const unsigned long *tmp;
+
+		nidp = get_property(np, "node-id", NULL);
+		if (nidp && *nidp == nid) {
+			tmp = get_property(np, "ioc-translation", NULL);
+			if (tmp) {
+				*base = *tmp;
+				of_node_put(np);
+				return 0;
+			}
+		}
+	}
+
+	return -ENODEV;
+}
+
+static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long size)
+{
+	struct page *page;
+	int ret, i;
+	unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages;
+	unsigned long xlate_base;
+	unsigned int virq;
+
+	if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
+		panic("%s: missing IOC register mappings for node %d\n",
+		      __FUNCTION__, iommu->nid);
+
+	iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
+	iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
+
+	segments = size >> IO_SEGMENT_SHIFT;
+	pages_per_segment = 1ull << IO_PAGENO_BITS;
+
+	pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
+			__FUNCTION__, iommu->nid, segments, pages_per_segment);
+
+	/* set up the segment table */
+	page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+	BUG_ON(!page);
+	iommu->stab = page_address(page);
+	clear_page(iommu->stab);
+
+	/* ... and the page tables. Since these are contiguous, we can treat
+	 * the page tables as one array of ptes, like pSeries does.
+	 */
+	ptab_size = segments * pages_per_segment * sizeof(unsigned long);
+	pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
+			iommu->nid, ptab_size, get_order(ptab_size));
+	page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
+	BUG_ON(!page);
+
+	iommu->ptab = page_address(page);
+	memset(iommu->ptab, 0, ptab_size);
+
+	/* allocate a bogus page for the end of each mapping */
+	page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+	BUG_ON(!page);
+	iommu->pad_page = page_address(page);
+	clear_page(iommu->pad_page);
+
+	/* number of pages needed for a page table */
+	n_pte_pages = (pages_per_segment *
+		       sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
+
+	pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
+			__FUNCTION__, iommu->nid, iommu->stab, iommu->ptab,
+			n_pte_pages);
+
+	/* initialise the STEs */
+	reg = IOSTE_V | ((n_pte_pages - 1) << 5);
+
+	if (IOMMU_PAGE_SIZE == 0x1000)
+		reg |= IOSTE_PS_4K;
+	else if (IOMMU_PAGE_SIZE == 0x10000)
+		reg |= IOSTE_PS_64K;
+	else {
+		extern void __unknown_page_size_error(void);
+		__unknown_page_size_error();
+	}
+
+	pr_debug("Setting up IOMMU stab:\n");
+	for (i = 0; i * (1ul << IO_SEGMENT_SHIFT) < size; i++) {
+		iommu->stab[i] = reg |
+			(__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i);
+		pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
+	}
+
+	/* ensure that the STEs have updated */
+	mb();
+
+	/* setup interrupts for the iommu. */
+	reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+	out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
+			reg & ~IOC_IO_ExcpStat_V);
+	out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
+			IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
+
+	virq = irq_create_mapping(NULL,
+			IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
+	BUG_ON(virq == NO_IRQ);
+
+	ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED,
+			iommu->name, iommu);
+	BUG_ON(ret);
+
+	/* set the IOC segment table origin register (and turn on the iommu) */
+	reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
+	out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
+	in_be64(iommu->xlate_regs + IOC_IOST_Origin);
+
+	/* turn on IO translation */
+	reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
+	out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
+}
+
+#if 0/* Unused for now */
+static struct iommu_window *find_window(struct cbe_iommu *iommu,
+		unsigned long offset, unsigned long size)
+{
+	struct iommu_window *window;
+
+	/* todo: check for overlapping (but not equal) windows) */
+
+	list_for_each_entry(window, &(iommu->windows), list) {
+		if (window->offset == offset && window->size == size)
+			return window;
+	}
+
+	return NULL;
+}
+#endif
+
+static struct iommu_window * __init
+cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
+			unsigned long offset, unsigned long size,
+			unsigned long pte_offset)
+{
+	struct iommu_window *window;
 	const unsigned int *ioid;
-	unsigned long map_start, map_size, token;
-	const unsigned long *dma_window;
-	struct cell_iommu *iommu;
 
-	ioid = get_property(d, "ioid", NULL);
-	if (!ioid)
-		pr_debug("No ioid entry found !\n");
+	ioid = get_property(np, "ioid", NULL);
+	if (ioid == NULL)
+		printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
+		       np->full_name);
 
-	dma_window = get_property(d, "ibm,dma-window", NULL);
-	if (!dma_window)
-		pr_debug("No ibm,dma-window entry found !\n");
+	window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+	BUG_ON(window == NULL);
 
-	map_start = dma_window[1];
-	map_size = dma_window[2];
-	token = dma_window[0] >> 32;
+	window->offset = offset;
+	window->size = size;
+	window->ioid = ioid ? *ioid : 0;
+	window->iommu = iommu;
+	window->pte_offset = pte_offset;
 
-	iommu = &cell_iommus[token];
+	window->table.it_blocksize = 16;
+	window->table.it_base = (unsigned long)iommu->ptab;
+	window->table.it_index = iommu->nid;
+	window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) +
+		window->pte_offset;
+	window->table.it_size = size >> IOMMU_PAGE_SHIFT;
 
-	cell_do_map_iommu(iommu, *ioid, map_start, map_size);
+	iommu_init_table(&window->table, iommu->nid);
+
+	pr_debug("\tioid      %d\n", window->ioid);
+	pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
+	pr_debug("\tbase      0x%016lx\n", window->table.it_base);
+	pr_debug("\toffset    0x%lx\n", window->table.it_offset);
+	pr_debug("\tsize      %ld\n", window->table.it_size);
+
+	list_add(&window->list, &iommu->windows);
+
+	if (offset != 0)
+		return window;
+
+	/* We need to map and reserve the first IOMMU page since it's used
+	 * by the spider workaround. In theory, we only need to do that when
+	 * running on spider but it doesn't really matter.
+	 *
+	 * This code also assumes that we have a window that starts at 0,
+	 * which is the case on all spider based blades.
+	 */
+	__set_bit(0, window->table.it_map);
+	tce_build_cell(&window->table, window->table.it_offset, 1,
+		       (unsigned long)iommu->pad_page, DMA_TO_DEVICE);
+	window->table.it_hint = window->table.it_blocksize;
+
+	return window;
 }
 
-static void iommu_bus_setup(struct pci_bus *b)
+static struct cbe_iommu *cell_iommu_for_node(int nid)
 {
-	struct device_node *d = (struct device_node *)b->sysdata;
-	iommu_devnode_setup(d);
+	int i;
+
+	for (i = 0; i < cbe_nr_iommus; i++)
+		if (iommus[i].nid == nid)
+			return &iommus[i];
+	return NULL;
 }
 
-
-static int cell_map_iommu_hardcoded(int num_nodes)
+static void cell_dma_dev_setup(struct device *dev)
 {
-	struct cell_iommu *iommu = NULL;
+	struct iommu_window *window;
+	struct cbe_iommu *iommu;
+	struct dev_archdata *archdata = &dev->archdata;
 
-	pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__);
+	/* If we run without iommu, no need to do anything */
+	if (pci_dma_ops == &dma_direct_ops)
+		return;
 
-	/* node 0 */
-	iommu = &cell_iommus[0];
-	iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
-	iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
+	/* Current implementation uses the first window available in that
+	 * node's iommu. We -might- do something smarter later though it may
+	 * never be necessary
+	 */
+	iommu = cell_iommu_for_node(archdata->numa_node);
+	if (iommu == NULL || list_empty(&iommu->windows)) {
+		printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
+		       archdata->of_node ? archdata->of_node->full_name : "?",
+		       archdata->numa_node);
+		return;
+	}
+	window = list_entry(iommu->windows.next, struct iommu_window, list);
 
-	enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
+	archdata->dma_data = &window->table;
+}
 
-	cell_do_map_iommu(iommu, 0x048a,
-			  0x20000000ul,0x20000000ul);
+static void cell_pci_dma_dev_setup(struct pci_dev *dev)
+{
+	cell_dma_dev_setup(&dev->dev);
+}
 
-	if (num_nodes < 2)
+static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
+			      void *data)
+{
+	struct device *dev = data;
+
+	/* We are only intereted in device addition */
+	if (action != BUS_NOTIFY_ADD_DEVICE)
 		return 0;
 
-	/* node 1 */
-	iommu = &cell_iommus[1];
-	iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
-	iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
+	/* We use the PCI DMA ops */
+	dev->archdata.dma_ops = pci_dma_ops;
 
-	enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
-
-	cell_do_map_iommu(iommu, 0x048a,
-			  0x20000000,0x20000000ul);
+	cell_dma_dev_setup(dev);
 
 	return 0;
 }
 
-
-static int cell_map_iommu(void)
-{
-	unsigned int num_nodes = 0;
-	const unsigned int *node_id;
-	const unsigned long *base, *mmio_base;
-	struct device_node *dn;
-	struct cell_iommu *iommu = NULL;
-
-	/* determine number of nodes (=iommus) */
-	pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
-	for(dn = of_find_node_by_type(NULL, "cpu");
-	    dn;
-	    dn = of_find_node_by_type(dn, "cpu")) {
-		node_id = get_property(dn, "node-id", NULL);
-
-		if (num_nodes < *node_id)
-			num_nodes = *node_id;
-		}
-
-	num_nodes++;
-	pr_debug("%i found.\n", num_nodes);
-
-	/* map the iommu registers for each node */
-	pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__);
-	for(dn = of_find_node_by_type(NULL, "cpu");
-	    dn;
-	    dn = of_find_node_by_type(dn, "cpu")) {
-
-		node_id = get_property(dn, "node-id", NULL);
-		base = get_property(dn, "ioc-cache", NULL);
-		mmio_base = get_property(dn, "ioc-translation", NULL);
-
-		if (!base || !mmio_base || !node_id)
-			return cell_map_iommu_hardcoded(num_nodes);
-
-		iommu = &cell_iommus[*node_id];
-		iommu->base = *base;
-		iommu->mmio_base = *mmio_base;
-
-		iommu->mapped_base = ioremap(*base, 0x1000);
-		iommu->mapped_mmio_base = ioremap(*mmio_base, 0x1000);
-
-		enable_mapping(iommu->mapped_base,
-			       iommu->mapped_mmio_base);
-
-		/* everything else will be done in iommu_bus_setup */
-	}
-
-	return 1;
-}
-
-static void *cell_alloc_coherent(struct device *hwdev, size_t size,
-			   dma_addr_t *dma_handle, gfp_t flag)
-{
-	void *ret;
-
-	ret = (void *)__get_free_pages(flag, get_order(size));
-	if (ret != NULL) {
-		memset(ret, 0, size);
-		*dma_handle = virt_to_abs(ret) | CELL_DMA_VALID;
-	}
-	return ret;
-}
-
-static void cell_free_coherent(struct device *hwdev, size_t size,
-				 void *vaddr, dma_addr_t dma_handle)
-{
-	free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t cell_map_single(struct device *hwdev, void *ptr,
-		size_t size, enum dma_data_direction direction)
-{
-	return virt_to_abs(ptr) | CELL_DMA_VALID;
-}
-
-static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
-		size_t size, enum dma_data_direction direction)
-{
-}
-
-static int cell_map_sg(struct device *hwdev, struct scatterlist *sg,
-		int nents, enum dma_data_direction direction)
-{
-	int i;
-
-	for (i = 0; i < nents; i++, sg++) {
-		sg->dma_address = (page_to_phys(sg->page) + sg->offset)
-					| CELL_DMA_VALID;
-		sg->dma_length = sg->length;
-	}
-
-	return nents;
-}
-
-static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-		int nents, enum dma_data_direction direction)
-{
-}
-
-static int cell_dma_supported(struct device *dev, u64 mask)
-{
-	return mask < 0x100000000ull;
-}
-
-static struct dma_mapping_ops cell_iommu_ops = {
-	.alloc_coherent = cell_alloc_coherent,
-	.free_coherent = cell_free_coherent,
-	.map_single = cell_map_single,
-	.unmap_single = cell_unmap_single,
-	.map_sg = cell_map_sg,
-	.unmap_sg = cell_unmap_sg,
-	.dma_supported = cell_dma_supported,
+static struct notifier_block cell_of_bus_notifier = {
+	.notifier_call = cell_of_bus_notify
 };
 
-void cell_init_iommu(void)
+static int __init cell_iommu_get_window(struct device_node *np,
+					 unsigned long *base,
+					 unsigned long *size)
 {
-	int setup_bus = 0;
+	const void *dma_window;
+	unsigned long index;
 
-	if (of_find_node_by_path("/mambo")) {
-		pr_info("Not using iommu on systemsim\n");
-	} else {
-
-		if (!(of_chosen &&
-		      get_property(of_chosen, "linux,iommu-off", NULL)))
-			setup_bus = cell_map_iommu();
-
-		if (setup_bus) {
-			pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
-			ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-			ppc_md.iommu_bus_setup = iommu_bus_setup;
-		} else {
-			pr_debug("%s: IOMMU mapping activated, "
-				 "no device action necessary\n", __FUNCTION__);
-			/* Direct I/O, IOMMU off */
-			ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-			ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-		}
+	/* Use ibm,dma-window if available, else, hard code ! */
+	dma_window = get_property(np, "ibm,dma-window", NULL);
+	if (dma_window == NULL) {
+		*base = 0;
+		*size = 0x80000000u;
+		return -ENODEV;
 	}
 
-	pci_dma_ops = cell_iommu_ops;
+	of_parse_dma_window(np, dma_window, &index, base, size);
+	return 0;
 }
+
+static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset)
+{
+	struct cbe_iommu *iommu;
+	unsigned long base, size;
+	int nid, i;
+
+	/* Get node ID */
+	nid = of_node_to_nid(np);
+	if (nid < 0) {
+		printk(KERN_ERR "iommu: failed to get node for %s\n",
+		       np->full_name);
+		return;
+	}
+	pr_debug("iommu: setting up iommu for node %d (%s)\n",
+		 nid, np->full_name);
+
+	/* XXX todo: If we can have multiple windows on the same IOMMU, which
+	 * isn't the case today, we probably want here to check wether the
+	 * iommu for that node is already setup.
+	 * However, there might be issue with getting the size right so let's
+	 * ignore that for now. We might want to completely get rid of the
+	 * multiple window support since the cell iommu supports per-page ioids
+	 */
+
+	if (cbe_nr_iommus >= NR_IOMMUS) {
+		printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
+		       np->full_name);
+		return;
+	}
+
+	/* Init base fields */
+	i = cbe_nr_iommus++;
+	iommu = &iommus[i];
+	iommu->stab = 0;
+	iommu->nid = nid;
+	snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
+	INIT_LIST_HEAD(&iommu->windows);
+
+	/* Obtain a window for it */
+	cell_iommu_get_window(np, &base, &size);
+
+	pr_debug("\ttranslating window 0x%lx...0x%lx\n",
+		 base, base + size - 1);
+
+	/* Initialize the hardware */
+	cell_iommu_setup_hardware(iommu, size);
+
+	/* Setup the iommu_table */
+	cell_iommu_setup_window(iommu, np, base, size,
+				offset >> IOMMU_PAGE_SHIFT);
+}
+
+static void __init cell_disable_iommus(void)
+{
+	int node;
+	unsigned long base, val;
+	void __iomem *xregs, *cregs;
+
+	/* Make sure IOC translation is disabled on all nodes */
+	for_each_online_node(node) {
+		if (cell_iommu_find_ioc(node, &base))
+			continue;
+		xregs = ioremap(base, IOC_Reg_Size);
+		if (xregs == NULL)
+			continue;
+		cregs = xregs + IOC_IOCmd_Offset;
+
+		pr_debug("iommu: cleaning up iommu on node %d\n", node);
+
+		out_be64(xregs + IOC_IOST_Origin, 0);
+		(void)in_be64(xregs + IOC_IOST_Origin);
+		val = in_be64(cregs + IOC_IOCmd_Cfg);
+		val &= ~IOC_IOCmd_Cfg_TE;
+		out_be64(cregs + IOC_IOCmd_Cfg, val);
+		(void)in_be64(cregs + IOC_IOCmd_Cfg);
+
+		iounmap(xregs);
+	}
+}
+
+static int __init cell_iommu_init_disabled(void)
+{
+	struct device_node *np = NULL;
+	unsigned long base = 0, size;
+
+	/* When no iommu is present, we use direct DMA ops */
+	pci_dma_ops = &dma_direct_ops;
+
+	/* First make sure all IOC translation is turned off */
+	cell_disable_iommus();
+
+	/* If we have no Axon, we set up the spider DMA magic offset */
+	if (of_find_node_by_name(NULL, "axon") == NULL)
+		dma_direct_offset = SPIDER_DMA_OFFSET;
+
+	/* Now we need to check to see where the memory is mapped
+	 * in PCI space. We assume that all busses use the same dma
+	 * window which is always the case so far on Cell, thus we
+	 * pick up the first pci-internal node we can find and check
+	 * the DMA window from there.
+	 */
+	for_each_node_by_name(np, "axon") {
+		if (np->parent == NULL || np->parent->parent != NULL)
+			continue;
+		if (cell_iommu_get_window(np, &base, &size) == 0)
+			break;
+	}
+	if (np == NULL) {
+		for_each_node_by_name(np, "pci-internal") {
+			if (np->parent == NULL || np->parent->parent != NULL)
+				continue;
+			if (cell_iommu_get_window(np, &base, &size) == 0)
+				break;
+		}
+	}
+	of_node_put(np);
+
+	/* If we found a DMA window, we check if it's big enough to enclose
+	 * all of physical memory. If not, we force enable IOMMU
+	 */
+	if (np && size < lmb_end_of_DRAM()) {
+		printk(KERN_WARNING "iommu: force-enabled, dma window"
+		       " (%ldMB) smaller than total memory (%ldMB)\n",
+		       size >> 20, lmb_end_of_DRAM() >> 20);
+		return -ENODEV;
+	}
+
+	dma_direct_offset += base;
+
+	printk("iommu: disabled, direct DMA offset is 0x%lx\n",
+	       dma_direct_offset);
+
+	return 0;
+}
+
+static int __init cell_iommu_init(void)
+{
+	struct device_node *np;
+
+	if (!machine_is(cell))
+		return -ENODEV;
+
+	/* If IOMMU is disabled or we have little enough RAM to not need
+	 * to enable it, we setup a direct mapping.
+	 *
+	 * Note: should we make sure we have the IOMMU actually disabled ?
+	 */
+	if (iommu_is_off ||
+	    (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull))
+		if (cell_iommu_init_disabled() == 0)
+			goto bail;
+
+	/* Setup various ppc_md. callbacks */
+	ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+	ppc_md.tce_build = tce_build_cell;
+	ppc_md.tce_free = tce_free_cell;
+
+	/* Create an iommu for each /axon node.  */
+	for_each_node_by_name(np, "axon") {
+		if (np->parent == NULL || np->parent->parent != NULL)
+			continue;
+		cell_iommu_init_one(np, 0);
+	}
+
+	/* Create an iommu for each toplevel /pci-internal node for
+	 * old hardware/firmware
+	 */
+	for_each_node_by_name(np, "pci-internal") {
+		if (np->parent == NULL || np->parent->parent != NULL)
+			continue;
+		cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
+	}
+
+	/* Setup default PCI iommu ops */
+	pci_dma_ops = &dma_iommu_ops;
+
+ bail:
+	/* Register callbacks on OF platform device addition/removal
+	 * to handle linking them to the right DMA operations
+	 */
+	bus_register_notifier(&of_platform_bus_type, &cell_of_bus_notifier);
+
+	return 0;
+}
+arch_initcall(cell_iommu_init);
+
diff --git a/arch/powerpc/platforms/cell/iommu.h b/arch/powerpc/platforms/cell/iommu.h
deleted file mode 100644
index 490d77a..0000000
--- a/arch/powerpc/platforms/cell/iommu.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef CELL_IOMMU_H
-#define CELL_IOMMU_H
-
-/* some constants */
-enum {
-	/* segment table entries */
-	IOST_VALID_MASK	  = 0x8000000000000000ul,
-	IOST_TAG_MASK     = 0x3000000000000000ul,
-	IOST_PT_BASE_MASK = 0x000003fffffff000ul,
-	IOST_NNPT_MASK	  = 0x0000000000000fe0ul,
-	IOST_PS_MASK	  = 0x000000000000000ful,
-
-	IOST_PS_4K	  = 0x1,
-	IOST_PS_64K	  = 0x3,
-	IOST_PS_1M	  = 0x5,
-	IOST_PS_16M	  = 0x7,
-
-	/* iopt tag register */
-	IOPT_VALID_MASK   = 0x0000000200000000ul,
-	IOPT_TAG_MASK	  = 0x00000001fffffffful,
-
-	/* iopt cache register */
-	IOPT_PROT_MASK	  = 0xc000000000000000ul,
-	IOPT_PROT_NONE	  = 0x0000000000000000ul,
-	IOPT_PROT_READ	  = 0x4000000000000000ul,
-	IOPT_PROT_WRITE	  = 0x8000000000000000ul,
-	IOPT_PROT_RW	  = 0xc000000000000000ul,
-	IOPT_COHERENT	  = 0x2000000000000000ul,
-	
-	IOPT_ORDER_MASK	  = 0x1800000000000000ul,
-	/* order access to same IOID/VC on same address */
-	IOPT_ORDER_ADDR	  = 0x0800000000000000ul,
-	/* similar, but only after a write access */
-	IOPT_ORDER_WRITES = 0x1000000000000000ul,
-	/* Order all accesses to same IOID/VC */
-	IOPT_ORDER_VC	  = 0x1800000000000000ul,
-	
-	IOPT_RPN_MASK	  = 0x000003fffffff000ul,
-	IOPT_HINT_MASK	  = 0x0000000000000800ul,
-	IOPT_IOID_MASK	  = 0x00000000000007fful,
-
-	IOSTO_ENABLE	  = 0x8000000000000000ul,
-	IOSTO_ORIGIN	  = 0x000003fffffff000ul,
-	IOSTO_HW	  = 0x0000000000000800ul,
-	IOSTO_SW	  = 0x0000000000000400ul,
-
-	IOCMD_CONF_TE	  = 0x0000800000000000ul,
-
-	/* memory mapped registers */
-	IOC_PT_CACHE_DIR  = 0x000,
-	IOC_ST_CACHE_DIR  = 0x800,
-	IOC_PT_CACHE_REG  = 0x910,
-	IOC_ST_ORIGIN     = 0x918,
-	IOC_CONF	  = 0x930,
-
-	/* The high bit needs to be set on every DMA address,
-	   only 2GB are addressable */
-	CELL_DMA_VALID	  = 0x80000000,
-	CELL_DMA_MASK	  = 0x7fffffff,
-};
-
-
-void cell_init_iommu(void);
-
-#endif
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 9f2e4ed..8c20f0f 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -38,32 +38,25 @@
 #include "pervasive.h"
 #include "cbe_regs.h"
 
-static DEFINE_SPINLOCK(cbe_pervasive_lock);
-
-static void __init cbe_enable_pause_zero(void)
+static void cbe_power_save(void)
 {
-	unsigned long thread_switch_control;
-	unsigned long temp_register;
-	struct cbe_pmd_regs __iomem *pregs;
+	unsigned long ctrl, thread_switch_control;
 
-	spin_lock_irq(&cbe_pervasive_lock);
-	pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
-	if (pregs == NULL)
-		goto out;
+	/*
+	 * We need to hard disable interrupts, but we also need to mark them
+	 * hard disabled in the PACA so that the local_irq_enable() done by
+	 * our caller upon return propertly hard enables.
+	 */
+	hard_irq_disable();
+	get_paca()->hard_enabled = 0;
 
-	pr_debug("Power Management: CPU %d\n", smp_processor_id());
-
-	 /* Enable Pause(0) control bit */
-	temp_register = in_be64(&pregs->pm_control);
-
-	out_be64(&pregs->pm_control,
-		 temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
+	ctrl = mfspr(SPRN_CTRLF);
 
 	/* Enable DEC and EE interrupt request */
 	thread_switch_control  = mfspr(SPRN_TSC_CELL);
 	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
 
-	switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
+	switch (ctrl & CTRL_CT) {
 	case CTRL_CT0:
 		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
 		break;
@@ -75,58 +68,21 @@
 			__FUNCTION__);
 		break;
 	}
-
 	mtspr(SPRN_TSC_CELL, thread_switch_control);
 
-out:
-	spin_unlock_irq(&cbe_pervasive_lock);
-}
-
-static void cbe_idle(void)
-{
-	unsigned long ctrl;
-
-	/* Why do we do that on every idle ? Couldn't that be done once for
-	 * all or do we lose the state some way ? Also, the pm_control
-	 * register setting, that can't be set once at boot ? We really want
-	 * to move that away in order to implement a simple powersave
+	/*
+	 * go into low thread priority, medium priority will be
+	 * restored for us after wake-up.
 	 */
-	cbe_enable_pause_zero();
+	HMT_low();
 
-	while (1) {
-		if (!need_resched()) {
-			local_irq_disable();
-			while (!need_resched()) {
-				/* go into low thread priority */
-				HMT_low();
-
-				/*
-				 * atomically disable thread execution
-				 * and runlatch.
-				 * External and Decrementer exceptions
-				 * are still handled when the thread
-				 * is disabled but now enter in
-				 * cbe_system_reset_exception()
-				 */
-				ctrl = mfspr(SPRN_CTRLF);
-				ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
-				mtspr(SPRN_CTRLT, ctrl);
-			}
-			/* restore thread prio */
-			HMT_medium();
-			local_irq_enable();
-		}
-
-		/*
-		 * turn runlatch on again before scheduling the
-		 * process we just woke up
-		 */
-		ppc64_runlatch_on();
-
-		preempt_enable_no_resched();
-		schedule();
-		preempt_disable();
-	}
+	/*
+	 * atomically disable thread execution and runlatch.
+	 * External and Decrementer exceptions are still handled when the
+	 * thread is disabled but now enter in cbe_system_reset_exception()
+	 */
+	ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
+	mtspr(SPRN_CTRLT, ctrl);
 }
 
 static int cbe_system_reset_exception(struct pt_regs *regs)
@@ -158,9 +114,20 @@
 
 void __init cbe_pervasive_init(void)
 {
+	int cpu;
 	if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
 		return;
 
-	ppc_md.idle_loop = cbe_idle;
+	for_each_possible_cpu(cpu) {
+		struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
+		if (!regs)
+			continue;
+
+		 /* Enable Pause(0) control bit */
+		out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
+					    CBE_PMD_PAUSE_ZERO_CONTROL);
+	}
+
+	ppc_md.power_save = cbe_power_save;
 	ppc_md.system_reset_exception = cbe_system_reset_exception;
 }
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
new file mode 100644
index 0000000..99c6120
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -0,0 +1,429 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Author:
+ *    David Erb (djerb@us.ibm.com)
+ *    Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/irq_regs.h>
+#include <asm/machdep.h>
+#include <asm/pmc.h>
+#include <asm/reg.h>
+#include <asm/spu.h>
+
+#include "cbe_regs.h"
+#include "interrupt.h"
+
+/*
+ * When writing to write-only mmio addresses, save a shadow copy. All of the
+ * registers are 32-bit, but stored in the upper-half of a 64-bit field in
+ * pmd_regs.
+ */
+
+#define WRITE_WO_MMIO(reg, x)					\
+	do {							\
+		u32 _x = (x);					\
+		struct cbe_pmd_regs __iomem *pmd_regs;		\
+		struct cbe_pmd_shadow_regs *shadow_regs;	\
+		pmd_regs = cbe_get_cpu_pmd_regs(cpu);		\
+		shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);	\
+		out_be64(&(pmd_regs->reg), (((u64)_x) << 32));	\
+		shadow_regs->reg = _x;				\
+	} while (0)
+
+#define READ_SHADOW_REG(val, reg)				\
+	do {							\
+		struct cbe_pmd_shadow_regs *shadow_regs;	\
+		shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);	\
+		(val) = shadow_regs->reg;			\
+	} while (0)
+
+#define READ_MMIO_UPPER32(val, reg)				\
+	do {							\
+		struct cbe_pmd_regs __iomem *pmd_regs;		\
+		pmd_regs = cbe_get_cpu_pmd_regs(cpu);		\
+		(val) = (u32)(in_be64(&pmd_regs->reg) >> 32);	\
+	} while (0)
+
+/*
+ * Physical counter registers.
+ * Each physical counter can act as one 32-bit counter or two 16-bit counters.
+ */
+
+u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
+{
+	u32 val_in_latch, val = 0;
+
+	if (phys_ctr < NR_PHYS_CTRS) {
+		READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
+
+		/* Read the latch or the actual counter, whichever is newer. */
+		if (val_in_latch & (1 << phys_ctr)) {
+			READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
+		} else {
+			READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
+		}
+	}
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
+
+void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
+{
+	struct cbe_pmd_shadow_regs *shadow_regs;
+	u32 pm_ctrl;
+
+	if (phys_ctr < NR_PHYS_CTRS) {
+		/* Writing to a counter only writes to a hardware latch.
+		 * The new value is not propagated to the actual counter
+		 * until the performance monitor is enabled.
+		 */
+		WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
+
+		pm_ctrl = cbe_read_pm(cpu, pm_control);
+		if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
+			/* The counters are already active, so we need to
+			 * rewrite the pm_control register to "re-enable"
+			 * the PMU.
+			 */
+			cbe_write_pm(cpu, pm_control, pm_ctrl);
+		} else {
+			shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+			shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
+
+/*
+ * "Logical" counter registers.
+ * These will read/write 16-bits or 32-bits depending on the
+ * current size of the counter. Counters 4 - 7 are always 16-bit.
+ */
+
+u32 cbe_read_ctr(u32 cpu, u32 ctr)
+{
+	u32 val;
+	u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+	val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+	if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
+		val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_ctr);
+
+void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
+{
+	u32 phys_ctr;
+	u32 phys_val;
+
+	phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+	if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
+		phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+		if (ctr < NR_PHYS_CTRS)
+			val = (val << 16) | (phys_val & 0xffff);
+		else
+			val = (val & 0xffff) | (phys_val & 0xffff0000);
+	}
+
+	cbe_write_phys_ctr(cpu, phys_ctr, val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_ctr);
+
+/*
+ * Counter-control registers.
+ * Each "logical" counter has a corresponding control register.
+ */
+
+u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
+{
+	u32 pm07_control = 0;
+
+	if (ctr < NR_CTRS)
+		READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
+
+	return pm07_control;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
+
+void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
+{
+	if (ctr < NR_CTRS)
+		WRITE_WO_MMIO(pm07_control[ctr], val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
+
+/*
+ * Other PMU control registers. Most of these are write-only.
+ */
+
+u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
+{
+	u32 val = 0;
+
+	switch (reg) {
+	case group_control:
+		READ_SHADOW_REG(val, group_control);
+		break;
+
+	case debug_bus_control:
+		READ_SHADOW_REG(val, debug_bus_control);
+		break;
+
+	case trace_address:
+		READ_MMIO_UPPER32(val, trace_address);
+		break;
+
+	case ext_tr_timer:
+		READ_SHADOW_REG(val, ext_tr_timer);
+		break;
+
+	case pm_status:
+		READ_MMIO_UPPER32(val, pm_status);
+		break;
+
+	case pm_control:
+		READ_SHADOW_REG(val, pm_control);
+		break;
+
+	case pm_interval:
+		READ_SHADOW_REG(val, pm_interval);
+		break;
+
+	case pm_start_stop:
+		READ_SHADOW_REG(val, pm_start_stop);
+		break;
+	}
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm);
+
+void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
+{
+	switch (reg) {
+	case group_control:
+		WRITE_WO_MMIO(group_control, val);
+		break;
+
+	case debug_bus_control:
+		WRITE_WO_MMIO(debug_bus_control, val);
+		break;
+
+	case trace_address:
+		WRITE_WO_MMIO(trace_address, val);
+		break;
+
+	case ext_tr_timer:
+		WRITE_WO_MMIO(ext_tr_timer, val);
+		break;
+
+	case pm_status:
+		WRITE_WO_MMIO(pm_status, val);
+		break;
+
+	case pm_control:
+		WRITE_WO_MMIO(pm_control, val);
+		break;
+
+	case pm_interval:
+		WRITE_WO_MMIO(pm_interval, val);
+		break;
+
+	case pm_start_stop:
+		WRITE_WO_MMIO(pm_start_stop, val);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm);
+
+/*
+ * Get/set the size of a physical counter to either 16 or 32 bits.
+ */
+
+u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
+{
+	u32 pm_ctrl, size = 0;
+
+	if (phys_ctr < NR_PHYS_CTRS) {
+		pm_ctrl = cbe_read_pm(cpu, pm_control);
+		size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
+	}
+
+	return size;
+}
+EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
+
+void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
+{
+	u32 pm_ctrl;
+
+	if (phys_ctr < NR_PHYS_CTRS) {
+		pm_ctrl = cbe_read_pm(cpu, pm_control);
+		switch (ctr_size) {
+		case 16:
+			pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
+			break;
+
+		case 32:
+			pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
+			break;
+		}
+		cbe_write_pm(cpu, pm_control, pm_ctrl);
+	}
+}
+EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
+
+/*
+ * Enable/disable the entire performance monitoring unit.
+ * When we enable the PMU, all pending writes to counters get committed.
+ */
+
+void cbe_enable_pm(u32 cpu)
+{
+	struct cbe_pmd_shadow_regs *shadow_regs;
+	u32 pm_ctrl;
+
+	shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+	shadow_regs->counter_value_in_latch = 0;
+
+	pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
+	cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm);
+
+void cbe_disable_pm(u32 cpu)
+{
+	u32 pm_ctrl;
+	pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
+	cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm);
+
+/*
+ * Reading from the trace_buffer.
+ * The trace buffer is two 64-bit registers. Reading from
+ * the second half automatically increments the trace_address.
+ */
+
+void cbe_read_trace_buffer(u32 cpu, u64 *buf)
+{
+	struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+	*buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
+	*buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
+}
+EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
+
+/*
+ * Enabling/disabling interrupts for the entire performance monitoring unit.
+ */
+
+u32 cbe_query_pm_interrupts(u32 cpu)
+{
+	return cbe_read_pm(cpu, pm_status);
+}
+EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
+
+u32 cbe_clear_pm_interrupts(u32 cpu)
+{
+	/* Reading pm_status clears the interrupt bits. */
+	return cbe_query_pm_interrupts(cpu);
+}
+EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts);
+
+void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
+{
+	/* Set which node and thread will handle the next interrupt. */
+	iic_set_interrupt_routing(cpu, thread, 0);
+
+	/* Enable the interrupt bits in the pm_status register. */
+	if (mask)
+		cbe_write_pm(cpu, pm_status, mask);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
+
+void cbe_disable_pm_interrupts(u32 cpu)
+{
+	cbe_clear_pm_interrupts(cpu);
+	cbe_write_pm(cpu, pm_status, 0);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
+
+static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
+{
+	perf_irq(get_irq_regs());
+	return IRQ_HANDLED;
+}
+
+int __init cbe_init_pm_irq(void)
+{
+	unsigned int irq;
+	int rc, node;
+
+	for_each_node(node) {
+		irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
+					       (node << IIC_IRQ_NODE_SHIFT));
+		if (irq == NO_IRQ) {
+			printk("ERROR: Unable to allocate irq for node %d\n",
+			       node);
+			return -EINVAL;
+		}
+
+		rc = request_irq(irq, cbe_pm_irq,
+				 IRQF_DISABLED, "cbe-pmu-0", NULL);
+		if (rc) {
+			printk("ERROR: Request for irq on node %d failed\n",
+			       node);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+arch_initcall(cbe_init_pm_irq);
+
+void cbe_sync_irq(int node)
+{
+	unsigned int irq;
+
+	irq = irq_find_mapping(NULL,
+			       IIC_IRQ_IOEX_PMI
+			       | (node << IIC_IRQ_NODE_SHIFT));
+
+	if (irq == NO_IRQ) {
+		printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
+		"for node %d\n", irq, node);
+		return;
+	}
+
+	synchronize_irq(irq);
+}
+EXPORT_SYMBOL_GPL(cbe_sync_irq);
+
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 22c228a..36989c2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -50,9 +50,10 @@
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
 #include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/of_platform.h>
 
 #include "interrupt.h"
-#include "iommu.h"
 #include "cbe_regs.h"
 #include "pervasive.h"
 #include "ras.h"
@@ -80,24 +81,72 @@
 	printk("*** %04x : %s\n", hex, s ? s : "");
 }
 
-static void __init cell_pcibios_fixup(void)
+static int __init cell_publish_devices(void)
 {
-	struct pci_dev *dev = NULL;
+	if (!machine_is(cell))
+		return 0;
 
-	for_each_pci_dev(dev)
-		pci_read_irq_line(dev);
+	/* Publish OF platform devices for southbridge IOs */
+	of_platform_bus_probe(NULL, NULL, NULL);
+
+	return 0;
 }
+device_initcall(cell_publish_devices);
+
+static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
+{
+	struct mpic *mpic = desc->handler_data;
+	unsigned int virq;
+
+	virq = mpic_get_one_irq(mpic);
+	if (virq != NO_IRQ)
+		generic_handle_irq(virq);
+	desc->chip->eoi(irq);
+}
+
+static void __init mpic_init_IRQ(void)
+{
+	struct device_node *dn;
+	struct mpic *mpic;
+	unsigned int virq;
+
+	for (dn = NULL;
+	     (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
+		if (!device_is_compatible(dn, "CBEA,platform-open-pic"))
+			continue;
+
+		/* The MPIC driver will get everything it needs from the
+		 * device-tree, just pass 0 to all arguments
+		 */
+		mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC     ");
+		if (mpic == NULL)
+			continue;
+		mpic_init(mpic);
+
+		virq = irq_of_parse_and_map(dn, 0);
+		if (virq == NO_IRQ)
+			continue;
+
+		printk(KERN_INFO "%s : hooking up to IRQ %d\n",
+		       dn->full_name, virq);
+		set_irq_data(virq, mpic);
+		set_irq_chained_handler(virq, cell_mpic_cascade);
+	}
+}
+
 
 static void __init cell_init_irq(void)
 {
 	iic_init_IRQ();
 	spider_init_IRQ();
+	mpic_init_IRQ();
 }
 
 static void __init cell_setup_arch(void)
 {
 #ifdef CONFIG_SPU_BASE
-	spu_priv1_ops         = &spu_priv1_mmio_ops;
+	spu_priv1_ops = &spu_priv1_mmio_ops;
+	spu_management_ops = &spu_management_of_ops;
 #endif
 
 	cbe_regs_init();
@@ -109,7 +158,6 @@
 #ifdef CONFIG_SMP
 	smp_init_cell();
 #endif
-
 	/* init to some ~sane value until calibrate_delay() runs */
 	loops_per_jiffy = 50000000;
 
@@ -129,19 +177,6 @@
 	mmio_nvram_init();
 }
 
-/*
- * Early initialization.  Relocation is on but do not reference unbolted pages
- */
-static void __init cell_init_early(void)
-{
-	DBG(" -> cell_init_early()\n");
-
-	cell_init_iommu();
-
-	DBG(" <- cell_init_early()\n");
-}
-
-
 static int __init cell_probe(void)
 {
 	unsigned long root = of_get_flat_dt_root();
@@ -168,7 +203,6 @@
 	.name			= "Cell",
 	.probe			= cell_probe,
 	.setup_arch		= cell_setup_arch,
-	.init_early		= cell_init_early,
 	.show_cpuinfo		= cell_show_cpuinfo,
 	.restart		= rtas_restart,
 	.power_off		= rtas_power_off,
@@ -180,7 +214,7 @@
 	.check_legacy_ioport	= cell_check_legacy_ioport,
 	.progress		= cell_progress,
 	.init_IRQ       	= cell_init_irq,
-	.pcibios_fixup		= cell_pcibios_fixup,
+	.pci_setup_phb		= rtas_setup_phb,
 #ifdef CONFIG_KEXEC
 	.machine_kexec		= default_machine_kexec,
 	.machine_kexec_prepare	= default_machine_kexec_prepare,
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 7aa809d..bd7bffc 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,22 +25,17 @@
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
-
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/prom.h>
+#include <linux/mm.h>
+#include <linux/io.h>
 #include <linux/mutex.h>
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
-#include <asm/mmu_context.h>
+#include <asm/xmon.h>
 
-#include "interrupt.h"
-
+const struct spu_management_ops *spu_management_ops;
 const struct spu_priv1_ops *spu_priv1_ops;
 
 EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -89,7 +84,30 @@
 		printk("%s: invalid access during switch!\n", __func__);
 		return 1;
 	}
-	if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
+	esid = (ea & ESID_MASK) | SLB_ESID_V;
+
+	switch(REGION_ID(ea)) {
+	case USER_REGION_ID:
+#ifdef CONFIG_HUGETLB_PAGE
+		if (in_hugepage_area(mm->context, ea))
+			llp = mmu_psize_defs[mmu_huge_psize].sllp;
+		else
+#endif
+			llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+		vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
+				SLB_VSID_USER | llp;
+		break;
+	case VMALLOC_REGION_ID:
+		llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+		vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+			SLB_VSID_KERNEL | llp;
+		break;
+	case KERNEL_REGION_ID:
+		llp = mmu_psize_defs[mmu_linear_psize].sllp;
+		vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+			SLB_VSID_KERNEL | llp;
+		break;
+	default:
 		/* Future: support kernel segments so that drivers
 		 * can use SPUs.
 		 */
@@ -97,16 +115,6 @@
 		return 1;
 	}
 
-	esid = (ea & ESID_MASK) | SLB_ESID_V;
-#ifdef CONFIG_HUGETLB_PAGE
-	if (in_hugepage_area(mm->context, ea))
-		llp = mmu_psize_defs[mmu_huge_psize].sllp;
-	else
-#endif
-		llp = mmu_psize_defs[mmu_virtual_psize].sllp;
-	vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
-			SLB_VSID_USER | llp;
-
 	out_be64(&priv2->slb_index_W, spu->slb_replace);
 	out_be64(&priv2->slb_vsid_RW, vsid);
 	out_be64(&priv2->slb_esid_RW, esid);
@@ -320,6 +328,7 @@
 }
 
 static struct list_head spu_list[MAX_NUMNODES];
+static LIST_HEAD(spu_full_list);
 static DEFINE_MUTEX(spu_mutex);
 
 static void spu_init_channels(struct spu *spu)
@@ -364,8 +373,7 @@
 	if (!list_empty(&spu_list[node])) {
 		spu = list_entry(spu_list[node].next, struct spu, list);
 		list_del_init(&spu->list);
-		pr_debug("Got SPU %x %d %d\n",
-			 spu->isrc, spu->number, spu->node);
+		pr_debug("Got SPU %d %d\n", spu->number, spu->node);
 		spu_init_channels(spu);
 	}
 	mutex_unlock(&spu_mutex);
@@ -493,280 +501,65 @@
 	if (!error) {
 		spu_restart_dma(spu);
 	} else {
-		__spu_trap_invalid_dma(spu);
+		spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
 	}
 	return ret;
 }
 
-static int __init find_spu_node_id(struct device_node *spe)
-{
-	const unsigned int *id;
-	struct device_node *cpu;
-	cpu = spe->parent->parent;
-	id = get_property(cpu, "node-id", NULL);
-	return id ? *id : 0;
-}
-
-static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
-		const char *prop)
-{
-	static DEFINE_MUTEX(add_spumem_mutex);
-
-	const struct address_prop {
-		unsigned long address;
-		unsigned int len;
-	} __attribute__((packed)) *p;
-	int proplen;
-
-	unsigned long start_pfn, nr_pages;
-	struct pglist_data *pgdata;
-	struct zone *zone;
-	int ret;
-
-	p = get_property(spe, prop, &proplen);
-	WARN_ON(proplen != sizeof (*p));
-
-	start_pfn = p->address >> PAGE_SHIFT;
-	nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-	pgdata = NODE_DATA(spu->nid);
-	zone = pgdata->node_zones;
-
-	/* XXX rethink locking here */
-	mutex_lock(&add_spumem_mutex);
-	ret = __add_pages(zone, start_pfn, nr_pages);
-	mutex_unlock(&add_spumem_mutex);
-
-	return ret;
-}
-
-static void __iomem * __init map_spe_prop(struct spu *spu,
-		struct device_node *n, const char *name)
-{
-	const struct address_prop {
-		unsigned long address;
-		unsigned int len;
-	} __attribute__((packed)) *prop;
-
-	const void *p;
-	int proplen;
-	void __iomem *ret = NULL;
-	int err = 0;
-
-	p = get_property(n, name, &proplen);
-	if (proplen != sizeof (struct address_prop))
-		return NULL;
-
-	prop = p;
-
-	err = cell_spuprop_present(spu, n, name);
-	if (err && (err != -EEXIST))
-		goto out;
-
-	ret = ioremap(prop->address, prop->len);
-
- out:
-	return ret;
-}
-
-static void spu_unmap(struct spu *spu)
-{
-	iounmap(spu->priv2);
-	iounmap(spu->priv1);
-	iounmap(spu->problem);
-	iounmap((__force u8 __iomem *)spu->local_store);
-}
-
-/* This function shall be abstracted for HV platforms */
-static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
-{
-	unsigned int isrc;
-	const u32 *tmp;
-
-	/* Get the interrupt source unit from the device-tree */
-	tmp = get_property(np, "isrc", NULL);
-	if (!tmp)
-		return -ENODEV;
-	isrc = tmp[0];
-
-	/* Add the node number */
-	isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
-	spu->isrc = isrc;
-
-	/* Now map interrupts of all 3 classes */
-	spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
-	spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
-	spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
-
-	/* Right now, we only fail if class 2 failed */
-	return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
-}
-
-static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
-{
-	const char *prop;
-	int ret;
-
-	ret = -ENODEV;
-	spu->name = get_property(node, "name", NULL);
-	if (!spu->name)
-		goto out;
-
-	prop = get_property(node, "local-store", NULL);
-	if (!prop)
-		goto out;
-	spu->local_store_phys = *(unsigned long *)prop;
-
-	/* we use local store as ram, not io memory */
-	spu->local_store = (void __force *)
-		map_spe_prop(spu, node, "local-store");
-	if (!spu->local_store)
-		goto out;
-
-	prop = get_property(node, "problem", NULL);
-	if (!prop)
-		goto out_unmap;
-	spu->problem_phys = *(unsigned long *)prop;
-
-	spu->problem= map_spe_prop(spu, node, "problem");
-	if (!spu->problem)
-		goto out_unmap;
-
-	spu->priv1= map_spe_prop(spu, node, "priv1");
-	/* priv1 is not available on a hypervisor */
-
-	spu->priv2= map_spe_prop(spu, node, "priv2");
-	if (!spu->priv2)
-		goto out_unmap;
-	ret = 0;
-	goto out;
-
-out_unmap:
-	spu_unmap(spu);
-out:
-	return ret;
-}
-
-static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
-{
-	struct of_irq oirq;
-	int ret;
-	int i;
-
-	for (i=0; i < 3; i++) {
-		ret = of_irq_map_one(np, i, &oirq);
-		if (ret) {
-			pr_debug("spu_new: failed to get irq %d\n", i);
-			goto err;
-		}
-		ret = -EINVAL;
-		pr_debug("  irq %d no 0x%x on %s\n", i, oirq.specifier[0],
-			 oirq.controller->full_name);
-		spu->irqs[i] = irq_create_of_mapping(oirq.controller,
-					oirq.specifier, oirq.size);
-		if (spu->irqs[i] == NO_IRQ) {
-			pr_debug("spu_new: failed to map it !\n");
-			goto err;
-		}
-	}
-	return 0;
-
-err:
-	pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
-	for (; i >= 0; i--) {
-		if (spu->irqs[i] != NO_IRQ)
-			irq_dispose_mapping(spu->irqs[i]);
-	}
-	return ret;
-}
-
-static int spu_map_resource(struct device_node *node, int nr,
-		void __iomem** virt, unsigned long *phys)
-{
-	struct resource resource = { };
-	int ret;
-
-	ret = of_address_to_resource(node, nr, &resource);
-	if (ret)
-		goto out;
-
-	if (phys)
-		*phys = resource.start;
-	*virt = ioremap(resource.start, resource.end - resource.start);
-	if (!*virt)
-		ret = -EINVAL;
-
-out:
-	return ret;
-}
-
-static int __init spu_map_device(struct spu *spu, struct device_node *node)
-{
-	int ret = -ENODEV;
-	spu->name = get_property(node, "name", NULL);
-	if (!spu->name)
-		goto out;
-
-	ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
-					&spu->local_store_phys);
-	if (ret) {
-		pr_debug("spu_new: failed to map %s resource 0\n",
-			 node->full_name);
-		goto out;
-	}
-	ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
-					&spu->problem_phys);
-	if (ret) {
-		pr_debug("spu_new: failed to map %s resource 1\n",
-			 node->full_name);
-		goto out_unmap;
-	}
-	ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
-					NULL);
-	if (ret) {
-		pr_debug("spu_new: failed to map %s resource 2\n",
-			 node->full_name);
-		goto out_unmap;
-	}
-
-	if (!firmware_has_feature(FW_FEATURE_LPAR))
-		ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
-					NULL);
-	if (ret) {
-		pr_debug("spu_new: failed to map %s resource 3\n",
-			 node->full_name);
-		goto out_unmap;
-	}
-	pr_debug("spu_new: %s maps:\n", node->full_name);
-	pr_debug("  local store   : 0x%016lx -> 0x%p\n",
-		 spu->local_store_phys, spu->local_store);
-	pr_debug("  problem state : 0x%016lx -> 0x%p\n",
-		 spu->problem_phys, spu->problem);
-	pr_debug("  priv2         :                       0x%p\n", spu->priv2);
-	pr_debug("  priv1         :                       0x%p\n", spu->priv1);
-
-	return 0;
-
-out_unmap:
-	spu_unmap(spu);
-out:
-	pr_debug("failed to map spe %s: %d\n", spu->name, ret);
-	return ret;
-}
-
 struct sysdev_class spu_sysdev_class = {
 	set_kset_name("spu")
 };
 
-static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
+int spu_add_sysdev_attr(struct sysdev_attribute *attr)
 {
-	struct spu *spu = container_of(sysdev, struct spu, sysdev);
-	return sprintf(buf, "%d\n", spu->isrc);
+	struct spu *spu;
+	mutex_lock(&spu_mutex);
 
+	list_for_each_entry(spu, &spu_full_list, full_list)
+		sysdev_create_file(&spu->sysdev, attr);
+
+	mutex_unlock(&spu_mutex);
+	return 0;
 }
-static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
+EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
 
-extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
+int spu_add_sysdev_attr_group(struct attribute_group *attrs)
+{
+	struct spu *spu;
+	mutex_lock(&spu_mutex);
+
+	list_for_each_entry(spu, &spu_full_list, full_list)
+		sysfs_create_group(&spu->sysdev.kobj, attrs);
+
+	mutex_unlock(&spu_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
+
+
+void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
+{
+	struct spu *spu;
+	mutex_lock(&spu_mutex);
+
+	list_for_each_entry(spu, &spu_full_list, full_list)
+		sysdev_remove_file(&spu->sysdev, attr);
+
+	mutex_unlock(&spu_mutex);
+}
+EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
+
+void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
+{
+	struct spu *spu;
+	mutex_lock(&spu_mutex);
+
+	list_for_each_entry(spu, &spu_full_list, full_list)
+		sysfs_remove_group(&spu->sysdev.kobj, attrs);
+
+	mutex_unlock(&spu_mutex);
+}
+EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
 
 static int spu_create_sysdev(struct spu *spu)
 {
@@ -781,21 +574,18 @@
 		return ret;
 	}
 
-	if (spu->isrc != 0)
-		sysdev_create_file(&spu->sysdev, &attr_isrc);
-	sysfs_add_device_to_node(&spu->sysdev, spu->nid);
+	sysfs_add_device_to_node(&spu->sysdev, spu->node);
 
 	return 0;
 }
 
 static void spu_destroy_sysdev(struct spu *spu)
 {
-	sysdev_remove_file(&spu->sysdev, &attr_isrc);
-	sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
+	sysfs_remove_device_from_node(&spu->sysdev, spu->node);
 	sysdev_unregister(&spu->sysdev);
 }
 
-static int __init create_spu(struct device_node *spe)
+static int __init create_spu(void *data)
 {
 	struct spu *spu;
 	int ret;
@@ -806,57 +596,37 @@
 	if (!spu)
 		goto out;
 
-	spu->node = find_spu_node_id(spe);
-	if (spu->node >= MAX_NUMNODES) {
-		printk(KERN_WARNING "SPE %s on node %d ignored,"
-		       " node number too big\n", spe->full_name, spu->node);
-		printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
-		return -ENODEV;
-	}
-	spu->nid = of_node_to_nid(spe);
-	if (spu->nid == -1)
-		spu->nid = 0;
+	spin_lock_init(&spu->register_lock);
+	mutex_lock(&spu_mutex);
+	spu->number = number++;
+	mutex_unlock(&spu_mutex);
 
-	ret = spu_map_device(spu, spe);
-	/* try old method */
-	if (ret)
-		ret = spu_map_device_old(spu, spe);
+	ret = spu_create_spu(spu, data);
+
 	if (ret)
 		goto out_free;
 
-	ret = spu_map_interrupts(spu, spe);
-	if (ret)
-		ret = spu_map_interrupts_old(spu, spe);
-	if (ret)
-		goto out_unmap;
-	spin_lock_init(&spu->register_lock);
-	spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
+	spu_mfc_sdr_setup(spu);
 	spu_mfc_sr1_set(spu, 0x33);
-	mutex_lock(&spu_mutex);
-
-	spu->number = number++;
 	ret = spu_request_irqs(spu);
 	if (ret)
-		goto out_unlock;
+		goto out_destroy;
 
 	ret = spu_create_sysdev(spu);
 	if (ret)
 		goto out_free_irqs;
 
+	mutex_lock(&spu_mutex);
 	list_add(&spu->list, &spu_list[spu->node]);
+	list_add(&spu->full_list, &spu_full_list);
 	mutex_unlock(&spu_mutex);
 
-	pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
-		spu->name, spu->isrc, spu->local_store,
-		spu->problem, spu->priv1, spu->priv2, spu->number);
 	goto out;
 
 out_free_irqs:
 	spu_free_irqs(spu);
-out_unlock:
-	mutex_unlock(&spu_mutex);
-out_unmap:
-	spu_unmap(spu);
+out_destroy:
+	spu_destroy_spu(spu);
 out_free:
 	kfree(spu);
 out:
@@ -866,10 +636,11 @@
 static void destroy_spu(struct spu *spu)
 {
 	list_del_init(&spu->list);
+	list_del_init(&spu->full_list);
 
 	spu_destroy_sysdev(spu);
 	spu_free_irqs(spu);
-	spu_unmap(spu);
+	spu_destroy_spu(spu);
 	kfree(spu);
 }
 
@@ -890,9 +661,11 @@
 
 static int __init init_spu_base(void)
 {
-	struct device_node *node;
 	int i, ret;
 
+	if (!spu_management_ops)
+		return 0;
+
 	/* create sysdev class for spus */
 	ret = sysdev_class_register(&spu_sysdev_class);
 	if (ret)
@@ -901,17 +674,17 @@
 	for (i = 0; i < MAX_NUMNODES; i++)
 		INIT_LIST_HEAD(&spu_list[i]);
 
-	ret = -ENODEV;
-	for (node = of_find_node_by_type(NULL, "spe");
-			node; node = of_find_node_by_type(node, "spe")) {
-		ret = create_spu(node);
-		if (ret) {
-			printk(KERN_WARNING "%s: Error initializing %s\n",
-				__FUNCTION__, node->name);
-			cleanup_spu_base();
-			break;
-		}
+	ret = spu_enumerate_spus(create_spu);
+
+	if (ret) {
+		printk(KERN_WARNING "%s: Error initializing spus\n",
+			__FUNCTION__);
+		cleanup_spu_base();
+		return ret;
 	}
+
+	xmon_register_spus(&spu_full_list);
+
 	return ret;
 }
 module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c
new file mode 100644
index 0000000..6915b41
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_coredump.c
@@ -0,0 +1,81 @@
+/*
+ * SPU core dump code
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/spu.h>
+
+static struct spu_coredump_calls spu_coredump_calls;
+static DEFINE_MUTEX(spu_coredump_mutex);
+
+int arch_notes_size(void)
+{
+	long ret;
+	struct module *owner = spu_coredump_calls.owner;
+
+	ret = -ENOSYS;
+	mutex_lock(&spu_coredump_mutex);
+	if (owner && try_module_get(owner)) {
+		ret = spu_coredump_calls.arch_notes_size();
+		module_put(owner);
+	}
+	mutex_unlock(&spu_coredump_mutex);
+	return ret;
+}
+
+void arch_write_notes(struct file *file)
+{
+	struct module *owner = spu_coredump_calls.owner;
+
+	mutex_lock(&spu_coredump_mutex);
+	if (owner && try_module_get(owner)) {
+		spu_coredump_calls.arch_write_notes(file);
+		module_put(owner);
+	}
+	mutex_unlock(&spu_coredump_mutex);
+}
+
+int register_arch_coredump_calls(struct spu_coredump_calls *calls)
+{
+	if (spu_coredump_calls.owner)
+		return -EBUSY;
+
+	mutex_lock(&spu_coredump_mutex);
+	spu_coredump_calls.arch_notes_size = calls->arch_notes_size;
+	spu_coredump_calls.arch_write_notes = calls->arch_write_notes;
+	spu_coredump_calls.owner = calls->owner;
+	mutex_unlock(&spu_coredump_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
+
+void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
+{
+	BUG_ON(spu_coredump_calls.owner != calls->owner);
+
+	mutex_lock(&spu_coredump_mutex);
+	spu_coredump_calls.owner = NULL;
+	mutex_unlock(&spu_coredump_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 71b69f0..a5de043 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -18,120 +18,498 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/interrupt.h>
+#include <linux/list.h>
 #include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
 
-#include <asm/io.h>
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
+#include <asm/firmware.h>
+#include <asm/prom.h>
 
 #include "interrupt.h"
+#include "spu_priv1_mmio.h"
+
+struct spu_pdata {
+	int nid;
+	struct device_node *devnode;
+	struct spu_priv1 __iomem *priv1;
+};
+
+static struct spu_pdata *spu_get_pdata(struct spu *spu)
+{
+	BUG_ON(!spu->pdata);
+	return spu->pdata;
+}
+
+struct device_node *spu_devnode(struct spu *spu)
+{
+	return spu_get_pdata(spu)->devnode;
+}
+
+EXPORT_SYMBOL_GPL(spu_devnode);
+
+static int __init find_spu_node_id(struct device_node *spe)
+{
+	const unsigned int *id;
+	struct device_node *cpu;
+	cpu = spe->parent->parent;
+	id = get_property(cpu, "node-id", NULL);
+	return id ? *id : 0;
+}
+
+static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
+		const char *prop)
+{
+	static DEFINE_MUTEX(add_spumem_mutex);
+
+	const struct address_prop {
+		unsigned long address;
+		unsigned int len;
+	} __attribute__((packed)) *p;
+	int proplen;
+
+	unsigned long start_pfn, nr_pages;
+	struct pglist_data *pgdata;
+	struct zone *zone;
+	int ret;
+
+	p = get_property(spe, prop, &proplen);
+	WARN_ON(proplen != sizeof (*p));
+
+	start_pfn = p->address >> PAGE_SHIFT;
+	nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	pgdata = NODE_DATA(spu_get_pdata(spu)->nid);
+	zone = pgdata->node_zones;
+
+	/* XXX rethink locking here */
+	mutex_lock(&add_spumem_mutex);
+	ret = __add_pages(zone, start_pfn, nr_pages);
+	mutex_unlock(&add_spumem_mutex);
+
+	return ret;
+}
+
+static void __iomem * __init map_spe_prop(struct spu *spu,
+		struct device_node *n, const char *name)
+{
+	const struct address_prop {
+		unsigned long address;
+		unsigned int len;
+	} __attribute__((packed)) *prop;
+
+	const void *p;
+	int proplen;
+	void __iomem *ret = NULL;
+	int err = 0;
+
+	p = get_property(n, name, &proplen);
+	if (proplen != sizeof (struct address_prop))
+		return NULL;
+
+	prop = p;
+
+	err = cell_spuprop_present(spu, n, name);
+	if (err && (err != -EEXIST))
+		goto out;
+
+	ret = ioremap(prop->address, prop->len);
+
+ out:
+	return ret;
+}
+
+static void spu_unmap(struct spu *spu)
+{
+	iounmap(spu->priv2);
+	iounmap(spu_get_pdata(spu)->priv1);
+	iounmap(spu->problem);
+	iounmap((__force u8 __iomem *)spu->local_store);
+}
+
+static int __init spu_map_interrupts_old(struct spu *spu,
+	struct device_node *np)
+{
+	unsigned int isrc;
+	const u32 *tmp;
+
+	/* Get the interrupt source unit from the device-tree */
+	tmp = get_property(np, "isrc", NULL);
+	if (!tmp)
+		return -ENODEV;
+	isrc = tmp[0];
+
+	/* Add the node number */
+	isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
+
+	/* Now map interrupts of all 3 classes */
+	spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
+	spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
+	spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
+
+	/* Right now, we only fail if class 2 failed */
+	return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+}
+
+static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
+{
+	const char *prop;
+	int ret;
+
+	ret = -ENODEV;
+	spu->name = get_property(node, "name", NULL);
+	if (!spu->name)
+		goto out;
+
+	prop = get_property(node, "local-store", NULL);
+	if (!prop)
+		goto out;
+	spu->local_store_phys = *(unsigned long *)prop;
+
+	/* we use local store as ram, not io memory */
+	spu->local_store = (void __force *)
+		map_spe_prop(spu, node, "local-store");
+	if (!spu->local_store)
+		goto out;
+
+	prop = get_property(node, "problem", NULL);
+	if (!prop)
+		goto out_unmap;
+	spu->problem_phys = *(unsigned long *)prop;
+
+	spu->problem= map_spe_prop(spu, node, "problem");
+	if (!spu->problem)
+		goto out_unmap;
+
+	spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
+
+	spu->priv2= map_spe_prop(spu, node, "priv2");
+	if (!spu->priv2)
+		goto out_unmap;
+	ret = 0;
+	goto out;
+
+out_unmap:
+	spu_unmap(spu);
+out:
+	return ret;
+}
+
+static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
+{
+	struct of_irq oirq;
+	int ret;
+	int i;
+
+	for (i=0; i < 3; i++) {
+		ret = of_irq_map_one(np, i, &oirq);
+		if (ret) {
+			pr_debug("spu_new: failed to get irq %d\n", i);
+			goto err;
+		}
+		ret = -EINVAL;
+		pr_debug("  irq %d no 0x%x on %s\n", i, oirq.specifier[0],
+			 oirq.controller->full_name);
+		spu->irqs[i] = irq_create_of_mapping(oirq.controller,
+					oirq.specifier, oirq.size);
+		if (spu->irqs[i] == NO_IRQ) {
+			pr_debug("spu_new: failed to map it !\n");
+			goto err;
+		}
+	}
+	return 0;
+
+err:
+	pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
+		spu->name);
+	for (; i >= 0; i--) {
+		if (spu->irqs[i] != NO_IRQ)
+			irq_dispose_mapping(spu->irqs[i]);
+	}
+	return ret;
+}
+
+static int spu_map_resource(struct device_node *node, int nr,
+		void __iomem** virt, unsigned long *phys)
+{
+	struct resource resource = { };
+	int ret;
+
+	ret = of_address_to_resource(node, nr, &resource);
+	if (ret)
+		goto out;
+
+	if (phys)
+		*phys = resource.start;
+	*virt = ioremap(resource.start, resource.end - resource.start);
+	if (!*virt)
+		ret = -EINVAL;
+
+out:
+	return ret;
+}
+
+static int __init spu_map_device(struct spu *spu, struct device_node *node)
+{
+	int ret = -ENODEV;
+	spu->name = get_property(node, "name", NULL);
+	if (!spu->name)
+		goto out;
+
+	ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
+					&spu->local_store_phys);
+	if (ret) {
+		pr_debug("spu_new: failed to map %s resource 0\n",
+			 node->full_name);
+		goto out;
+	}
+	ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
+					&spu->problem_phys);
+	if (ret) {
+		pr_debug("spu_new: failed to map %s resource 1\n",
+			 node->full_name);
+		goto out_unmap;
+	}
+	ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
+					NULL);
+	if (ret) {
+		pr_debug("spu_new: failed to map %s resource 2\n",
+			 node->full_name);
+		goto out_unmap;
+	}
+	if (!firmware_has_feature(FW_FEATURE_LPAR))
+		ret = spu_map_resource(node, 3,
+			(void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
+	if (ret) {
+		pr_debug("spu_new: failed to map %s resource 3\n",
+			 node->full_name);
+		goto out_unmap;
+	}
+	pr_debug("spu_new: %s maps:\n", node->full_name);
+	pr_debug("  local store   : 0x%016lx -> 0x%p\n",
+		 spu->local_store_phys, spu->local_store);
+	pr_debug("  problem state : 0x%016lx -> 0x%p\n",
+		 spu->problem_phys, spu->problem);
+	pr_debug("  priv2         :                       0x%p\n", spu->priv2);
+	pr_debug("  priv1         :                       0x%p\n",
+						spu_get_pdata(spu)->priv1);
+
+	return 0;
+
+out_unmap:
+	spu_unmap(spu);
+out:
+	pr_debug("failed to map spe %s: %d\n", spu->name, ret);
+	return ret;
+}
+
+static int __init of_enumerate_spus(int (*fn)(void *data))
+{
+	int ret;
+	struct device_node *node;
+
+	ret = -ENODEV;
+	for (node = of_find_node_by_type(NULL, "spe");
+			node; node = of_find_node_by_type(node, "spe")) {
+		ret = fn(node);
+		if (ret) {
+			printk(KERN_WARNING "%s: Error initializing %s\n",
+				__FUNCTION__, node->name);
+			break;
+		}
+	}
+	return ret;
+}
+
+static int __init of_create_spu(struct spu *spu, void *data)
+{
+	int ret;
+	struct device_node *spe = (struct device_node *)data;
+
+	spu->pdata = kzalloc(sizeof(struct spu_pdata),
+		GFP_KERNEL);
+	if (!spu->pdata) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	spu->node = find_spu_node_id(spe);
+	if (spu->node >= MAX_NUMNODES) {
+		printk(KERN_WARNING "SPE %s on node %d ignored,"
+		       " node number too big\n", spe->full_name, spu->node);
+		printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
+		ret = -ENODEV;
+		goto out_free;
+	}
+
+	spu_get_pdata(spu)->nid = of_node_to_nid(spe);
+	if (spu_get_pdata(spu)->nid == -1)
+		spu_get_pdata(spu)->nid = 0;
+
+	ret = spu_map_device(spu, spe);
+	/* try old method */
+	if (ret)
+		ret = spu_map_device_old(spu, spe);
+	if (ret)
+		goto out_free;
+
+	ret = spu_map_interrupts(spu, spe);
+	if (ret)
+		ret = spu_map_interrupts_old(spu, spe);
+	if (ret)
+		goto out_unmap;
+
+	spu_get_pdata(spu)->devnode = of_node_get(spe);
+
+	pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
+		spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
+		spu->priv2, spu->number);
+	goto out;
+
+out_unmap:
+	spu_unmap(spu);
+out_free:
+	kfree(spu->pdata);
+	spu->pdata = NULL;
+out:
+	return ret;
+}
+
+static int of_destroy_spu(struct spu *spu)
+{
+	spu_unmap(spu);
+	of_node_put(spu_get_pdata(spu)->devnode);
+	kfree(spu->pdata);
+	spu->pdata = NULL;
+	return 0;
+}
+
+const struct spu_management_ops spu_management_of_ops = {
+	.enumerate_spus = of_enumerate_spus,
+	.create_spu = of_create_spu,
+	.destroy_spu = of_destroy_spu,
+};
 
 static void int_mask_and(struct spu *spu, int class, u64 mask)
 {
 	u64 old_mask;
 
-	old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
-	out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
+	old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+	out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+		old_mask & mask);
 }
 
 static void int_mask_or(struct spu *spu, int class, u64 mask)
 {
 	u64 old_mask;
 
-	old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
-	out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
+	old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+	out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+		old_mask | mask);
 }
 
 static void int_mask_set(struct spu *spu, int class, u64 mask)
 {
-	out_be64(&spu->priv1->int_mask_RW[class], mask);
+	out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
 }
 
 static u64 int_mask_get(struct spu *spu, int class)
 {
-	return in_be64(&spu->priv1->int_mask_RW[class]);
+	return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
 }
 
 static void int_stat_clear(struct spu *spu, int class, u64 stat)
 {
-	out_be64(&spu->priv1->int_stat_RW[class], stat);
+	out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
 }
 
 static u64 int_stat_get(struct spu *spu, int class)
 {
-	return in_be64(&spu->priv1->int_stat_RW[class]);
+	return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
 }
 
 static void cpu_affinity_set(struct spu *spu, int cpu)
 {
 	u64 target = iic_get_target_id(cpu);
 	u64 route = target << 48 | target << 32 | target << 16;
-	out_be64(&spu->priv1->int_route_RW, route);
+	out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
 }
 
 static u64 mfc_dar_get(struct spu *spu)
 {
-	return in_be64(&spu->priv1->mfc_dar_RW);
+	return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
 }
 
 static u64 mfc_dsisr_get(struct spu *spu)
 {
-	return in_be64(&spu->priv1->mfc_dsisr_RW);
+	return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
 }
 
 static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
 {
-	out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
+	out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
 }
 
-static void mfc_sdr_set(struct spu *spu, u64 sdr)
+static void mfc_sdr_setup(struct spu *spu)
 {
-	out_be64(&spu->priv1->mfc_sdr_RW, sdr);
+	out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
 }
 
 static void mfc_sr1_set(struct spu *spu, u64 sr1)
 {
-	out_be64(&spu->priv1->mfc_sr1_RW, sr1);
+	out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
 }
 
 static u64 mfc_sr1_get(struct spu *spu)
 {
-	return in_be64(&spu->priv1->mfc_sr1_RW);
+	return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
 }
 
 static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
 {
-	out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
+	out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
 }
 
 static u64 mfc_tclass_id_get(struct spu *spu)
 {
-	return in_be64(&spu->priv1->mfc_tclass_id_RW);
+	return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
 }
 
 static void tlb_invalidate(struct spu *spu)
 {
-	out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
+	out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
 }
 
 static void resource_allocation_groupID_set(struct spu *spu, u64 id)
 {
-	out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
+	out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
+		id);
 }
 
 static u64 resource_allocation_groupID_get(struct spu *spu)
 {
-	return in_be64(&spu->priv1->resource_allocation_groupID_RW);
+	return in_be64(
+		&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
 }
 
 static void resource_allocation_enable_set(struct spu *spu, u64 enable)
 {
-	out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
+	out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
+		enable);
 }
 
 static u64 resource_allocation_enable_get(struct spu *spu)
 {
-	return in_be64(&spu->priv1->resource_allocation_enable_RW);
+	return in_be64(
+		&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
 }
 
 const struct spu_priv1_ops spu_priv1_mmio_ops =
@@ -146,7 +524,7 @@
 	.mfc_dar_get = mfc_dar_get,
 	.mfc_dsisr_get = mfc_dsisr_get,
 	.mfc_dsisr_set = mfc_dsisr_set,
-	.mfc_sdr_set = mfc_sdr_set,
+	.mfc_sdr_setup = mfc_sdr_setup,
 	.mfc_sr1_set = mfc_sr1_set,
 	.mfc_sr1_get = mfc_sr1_get,
 	.mfc_tclass_id_set = mfc_tclass_id_set,
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
new file mode 100644
index 0000000..7b62bd1
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
@@ -0,0 +1,26 @@
+/*
+ * spu hypervisor abstraction for direct hardware access.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SPU_PRIV1_MMIO_H
+#define SPU_PRIV1_MMIO_H
+
+struct device_node *spu_devnode(struct spu *spu);
+
+#endif /* SPU_PRIV1_MMIO_H */
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index ecdfbb3..472217d 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,7 +1,7 @@
 obj-y += switch.o
 
 obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o syscalls.o
+spufs-y += inode.o file.o context.o syscalls.o coredump.o
 spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
 
 # Rules to build switch.o with the help of SPU tool chain
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 2d22cd5..1898f0d 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -36,6 +36,7 @@
 #include <asm/io.h>
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
+#include <asm/spu_info.h>
 #include <asm/mmu_context.h>
 #include "spufs.h"
 
@@ -267,6 +268,11 @@
 	return ctx->csa.lscsa->ls;
 }
 
+static u32 spu_backing_runcntl_read(struct spu_context *ctx)
+{
+	return ctx->csa.prob.spu_runcntl_RW;
+}
+
 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
 {
 	spin_lock(&ctx->csa.register_lock);
@@ -279,9 +285,26 @@
 	spin_unlock(&ctx->csa.register_lock);
 }
 
-static void spu_backing_runcntl_stop(struct spu_context *ctx)
+static void spu_backing_master_start(struct spu_context *ctx)
 {
-	spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
+	struct spu_state *csa = &ctx->csa;
+	u64 sr1;
+
+	spin_lock(&csa->register_lock);
+	sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+	csa->priv1.mfc_sr1_RW = sr1;
+	spin_unlock(&csa->register_lock);
+}
+
+static void spu_backing_master_stop(struct spu_context *ctx)
+{
+	struct spu_state *csa = &ctx->csa;
+	u64 sr1;
+
+	spin_lock(&csa->register_lock);
+	sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+	csa->priv1.mfc_sr1_RW = sr1;
+	spin_unlock(&csa->register_lock);
 }
 
 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
@@ -345,8 +368,10 @@
 	.npc_write = spu_backing_npc_write,
 	.status_read = spu_backing_status_read,
 	.get_ls = spu_backing_get_ls,
+	.runcntl_read = spu_backing_runcntl_read,
 	.runcntl_write = spu_backing_runcntl_write,
-	.runcntl_stop = spu_backing_runcntl_stop,
+	.master_start = spu_backing_master_start,
+	.master_stop = spu_backing_master_stop,
 	.set_mfc_query = spu_backing_set_mfc_query,
 	.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
 	.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 034cf6a..0870009 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -120,6 +120,33 @@
 		unmap_mapping_range(ctx->signal2, 0, 0x4000, 1);
 }
 
+int spu_acquire_exclusive(struct spu_context *ctx)
+{
+	int ret = 0;
+
+	down_write(&ctx->state_sema);
+	/* ctx is about to be freed, can't acquire any more */
+	if (!ctx->owner) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (ctx->state == SPU_STATE_SAVED) {
+		ret = spu_activate(ctx, 0);
+		if (ret)
+			goto out;
+		ctx->state = SPU_STATE_RUNNABLE;
+	} else {
+		/* We need to exclude userspace access to the context. */
+		spu_unmap_mappings(ctx);
+	}
+
+out:
+	if (ret)
+		up_write(&ctx->state_sema);
+	return ret;
+}
+
 int spu_acquire_runnable(struct spu_context *ctx)
 {
 	int ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
new file mode 100644
index 0000000..26945c4
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -0,0 +1,238 @@
+/*
+ * SPU core dump code
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/elf.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+struct spufs_ctx_info {
+	struct list_head list;
+	int dfd;
+	int memsize; /* in bytes */
+	struct spu_context *ctx;
+};
+
+static LIST_HEAD(ctx_info_list);
+
+static ssize_t do_coredump_read(int num, struct spu_context *ctx, void __user *buffer,
+				size_t size, loff_t *off)
+{
+	u64 data;
+	int ret;
+
+	if (spufs_coredump_read[num].read)
+		return spufs_coredump_read[num].read(ctx, buffer, size, off);
+
+	data = spufs_coredump_read[num].get(ctx);
+	ret = copy_to_user(buffer, &data, 8);
+	return ret ? -EFAULT : 8;
+}
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * functions to write out all the necessary info.
+ */
+static int spufs_dump_write(struct file *file, const void *addr, int nr)
+{
+	return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+
+static int spufs_dump_seek(struct file *file, loff_t off)
+{
+	if (file->f_op->llseek) {
+		if (file->f_op->llseek(file, off, 0) != off)
+			return 0;
+	} else
+		file->f_pos = off;
+	return 1;
+}
+
+static void spufs_fill_memsize(struct spufs_ctx_info *ctx_info)
+{
+	struct spu_context *ctx;
+	unsigned long long lslr;
+
+	ctx = ctx_info->ctx;
+	lslr = ctx->csa.priv2.spu_lslr_RW;
+	ctx_info->memsize = lslr + 1;
+}
+
+static int spufs_ctx_note_size(struct spufs_ctx_info *ctx_info)
+{
+	int dfd, memsize, i, sz, total = 0;
+	char *name;
+	char fullname[80];
+
+	dfd = ctx_info->dfd;
+	memsize = ctx_info->memsize;
+
+	for (i = 0; spufs_coredump_read[i].name; i++) {
+		name = spufs_coredump_read[i].name;
+		sz = spufs_coredump_read[i].size;
+
+		sprintf(fullname, "SPU/%d/%s", dfd, name);
+
+		total += sizeof(struct elf_note);
+		total += roundup(strlen(fullname) + 1, 4);
+		if (!strcmp(name, "mem"))
+			total += roundup(memsize, 4);
+		else
+			total += roundup(sz, 4);
+	}
+
+	return total;
+}
+
+static int spufs_add_one_context(struct file *file, int dfd)
+{
+	struct spu_context *ctx;
+	struct spufs_ctx_info *ctx_info;
+	int size;
+
+	ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+	if (ctx->flags & SPU_CREATE_NOSCHED)
+		return 0;
+
+	ctx_info = kzalloc(sizeof(*ctx_info), GFP_KERNEL);
+	if (unlikely(!ctx_info))
+		return -ENOMEM;
+
+	ctx_info->dfd = dfd;
+	ctx_info->ctx = ctx;
+
+	spufs_fill_memsize(ctx_info);
+
+	size = spufs_ctx_note_size(ctx_info);
+	list_add(&ctx_info->list, &ctx_info_list);
+	return size;
+}
+
+/*
+ * The additional architecture-specific notes for Cell are various
+ * context files in the spu context.
+ *
+ * This function iterates over all open file descriptors and sees
+ * if they are a directory in spufs.  In that case we use spufs
+ * internal functionality to dump them without needing to actually
+ * open the files.
+ */
+static int spufs_arch_notes_size(void)
+{
+	struct fdtable *fdt = files_fdtable(current->files);
+	int size = 0, fd;
+
+	for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) {
+		if (FD_ISSET(fd, fdt->open_fds)) {
+			struct file *file = fcheck(fd);
+
+			if (file && file->f_op == &spufs_context_fops) {
+				int rval = spufs_add_one_context(file, fd);
+				if (rval < 0)
+					break;
+				size += rval;
+			}
+		}
+	}
+
+	return size;
+}
+
+static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
+				struct file *file)
+{
+	struct spu_context *ctx;
+	loff_t pos = 0;
+	int sz, dfd, rc, total = 0;
+	const int bufsz = 4096;
+	char *name;
+	char fullname[80], *buf;
+	struct elf_note en;
+
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	dfd = ctx_info->dfd;
+	name = spufs_coredump_read[i].name;
+
+	if (!strcmp(name, "mem"))
+		sz = ctx_info->memsize;
+	else
+		sz = spufs_coredump_read[i].size;
+
+	ctx = ctx_info->ctx;
+	if (!ctx) {
+		return;
+	}
+
+	sprintf(fullname, "SPU/%d/%s", dfd, name);
+	en.n_namesz = strlen(fullname) + 1;
+	en.n_descsz = sz;
+	en.n_type = NT_SPU;
+
+	if (!spufs_dump_write(file, &en, sizeof(en)))
+		return;
+	if (!spufs_dump_write(file, fullname, en.n_namesz))
+		return;
+	if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
+		return;
+
+	do {
+		rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
+		if (rc > 0) {
+			if (!spufs_dump_write(file, buf, rc))
+				return;
+			total += rc;
+		}
+	} while (rc == bufsz && total < sz);
+
+	spufs_dump_seek(file, roundup((unsigned long)file->f_pos
+						- total + sz, 4));
+}
+
+static void spufs_arch_write_notes(struct file *file)
+{
+	int j;
+	struct spufs_ctx_info *ctx_info, *next;
+
+	list_for_each_entry_safe(ctx_info, next, &ctx_info_list, list) {
+		spu_acquire_saved(ctx_info->ctx);
+		for (j = 0; j < spufs_coredump_num_notes; j++)
+			spufs_arch_write_note(ctx_info, j, file);
+		spu_release(ctx_info->ctx);
+		list_del(&ctx_info->list);
+		kfree(ctx_info);
+	}
+}
+
+struct spu_coredump_calls spufs_coredump_calls = {
+	.arch_notes_size = spufs_arch_notes_size,
+	.arch_write_notes = spufs_arch_write_notes,
+	.owner = THIS_MODULE,
+};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 533e272..347eff5 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -32,13 +32,13 @@
 #include <asm/io.h>
 #include <asm/semaphore.h>
 #include <asm/spu.h>
+#include <asm/spu_info.h>
 #include <asm/uaccess.h>
 
 #include "spufs.h"
 
 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
 
-
 static int
 spufs_mem_open(struct inode *inode, struct file *file)
 {
@@ -51,18 +51,23 @@
 }
 
 static ssize_t
+__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
+			size_t size, loff_t *pos)
+{
+	char *local_store = ctx->ops->get_ls(ctx);
+	return simple_read_from_buffer(buffer, size, pos, local_store,
+					LS_SIZE);
+}
+
+static ssize_t
 spufs_mem_read(struct file *file, char __user *buffer,
 				size_t size, loff_t *pos)
 {
-	struct spu_context *ctx = file->private_data;
-	char *local_store;
 	int ret;
+	struct spu_context *ctx = file->private_data;
 
 	spu_acquire(ctx);
-
-	local_store = ctx->ops->get_ls(ctx);
-	ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
-
+	ret = __spufs_mem_read(ctx, buffer, size, pos);
 	spu_release(ctx);
 	return ret;
 }
@@ -104,11 +109,11 @@
 
 	if (ctx->state == SPU_STATE_SAVED) {
 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-					& ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
+							& ~_PAGE_NO_CACHE);
 		page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
 	} else {
 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-					| _PAGE_NO_CACHE | _PAGE_GUARDED);
+							| _PAGE_NO_CACHE);
 		page = pfn_to_page((ctx->spu->local_store_phys + offset)
 				   >> PAGE_SHIFT);
 	}
@@ -131,7 +136,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
-	/* FIXME: */
+	vma->vm_flags |= VM_IO;
 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
 				     | _PAGE_NO_CACHE);
 
@@ -200,7 +205,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
-	vma->vm_flags |= VM_RESERVED;
+	vma->vm_flags |= VM_IO;
 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -261,18 +266,23 @@
 }
 
 static ssize_t
+__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
+			size_t size, loff_t *pos)
+{
+	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+	return simple_read_from_buffer(buffer, size, pos,
+				      lscsa->gprs, sizeof lscsa->gprs);
+}
+
+static ssize_t
 spufs_regs_read(struct file *file, char __user *buffer,
 		size_t size, loff_t *pos)
 {
-	struct spu_context *ctx = file->private_data;
-	struct spu_lscsa *lscsa = ctx->csa.lscsa;
 	int ret;
+	struct spu_context *ctx = file->private_data;
 
 	spu_acquire_saved(ctx);
-
-	ret = simple_read_from_buffer(buffer, size, pos,
-				      lscsa->gprs, sizeof lscsa->gprs);
-
+	ret = __spufs_regs_read(ctx, buffer, size, pos);
 	spu_release(ctx);
 	return ret;
 }
@@ -307,18 +317,23 @@
 };
 
 static ssize_t
+__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
+			size_t size, loff_t * pos)
+{
+	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+	return simple_read_from_buffer(buffer, size, pos,
+				      &lscsa->fpcr, sizeof(lscsa->fpcr));
+}
+
+static ssize_t
 spufs_fpcr_read(struct file *file, char __user * buffer,
 		size_t size, loff_t * pos)
 {
-	struct spu_context *ctx = file->private_data;
-	struct spu_lscsa *lscsa = ctx->csa.lscsa;
 	int ret;
+	struct spu_context *ctx = file->private_data;
 
 	spu_acquire_saved(ctx);
-
-	ret = simple_read_from_buffer(buffer, size, pos,
-				      &lscsa->fpcr, sizeof(lscsa->fpcr));
-
+	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
 	spu_release(ctx);
 	return ret;
 }
@@ -718,23 +733,41 @@
 	return nonseekable_open(inode, file);
 }
 
-static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
 			size_t len, loff_t *pos)
 {
-	struct spu_context *ctx = file->private_data;
+	int ret = 0;
 	u32 data;
 
 	if (len < 4)
 		return -EINVAL;
 
-	spu_acquire(ctx);
-	data = ctx->ops->signal1_read(ctx);
-	spu_release(ctx);
+	if (ctx->csa.spu_chnlcnt_RW[3]) {
+		data = ctx->csa.spu_chnldata_RW[3];
+		ret = 4;
+	}
+
+	if (!ret)
+		goto out;
 
 	if (copy_to_user(buf, &data, 4))
 		return -EFAULT;
 
-	return 4;
+out:
+	return ret;
+}
+
+static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+			size_t len, loff_t *pos)
+{
+	int ret;
+	struct spu_context *ctx = file->private_data;
+
+	spu_acquire_saved(ctx);
+	ret = __spufs_signal1_read(ctx, buf, len, pos);
+	spu_release(ctx);
+
+	return ret;
 }
 
 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
@@ -782,7 +815,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
-	vma->vm_flags |= VM_RESERVED;
+	vma->vm_flags |= VM_IO;
 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -807,25 +840,41 @@
 	return nonseekable_open(inode, file);
 }
 
-static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
 			size_t len, loff_t *pos)
 {
-	struct spu_context *ctx;
+	int ret = 0;
 	u32 data;
 
-	ctx = file->private_data;
-
 	if (len < 4)
 		return -EINVAL;
 
-	spu_acquire(ctx);
-	data = ctx->ops->signal2_read(ctx);
-	spu_release(ctx);
+	if (ctx->csa.spu_chnlcnt_RW[4]) {
+		data =  ctx->csa.spu_chnldata_RW[4];
+		ret = 4;
+	}
+
+	if (!ret)
+		goto out;
 
 	if (copy_to_user(buf, &data, 4))
 		return -EFAULT;
 
-	return 4;
+out:
+	return ret;
+}
+
+static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+			size_t len, loff_t *pos)
+{
+	struct spu_context *ctx = file->private_data;
+	int ret;
+
+	spu_acquire_saved(ctx);
+	ret = __spufs_signal2_read(ctx, buf, len, pos);
+	spu_release(ctx);
+
+	return ret;
 }
 
 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
@@ -874,8 +923,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
-	/* FIXME: */
-	vma->vm_flags |= VM_RESERVED;
+	vma->vm_flags |= VM_IO;
 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -902,13 +950,19 @@
 	spu_release(ctx);
 }
 
+static u64 __spufs_signal1_type_get(void *data)
+{
+	struct spu_context *ctx = data;
+	return ctx->ops->signal1_type_get(ctx);
+}
+
 static u64 spufs_signal1_type_get(void *data)
 {
 	struct spu_context *ctx = data;
 	u64 ret;
 
 	spu_acquire(ctx);
-	ret = ctx->ops->signal1_type_get(ctx);
+	ret = __spufs_signal1_type_get(data);
 	spu_release(ctx);
 
 	return ret;
@@ -925,13 +979,19 @@
 	spu_release(ctx);
 }
 
+static u64 __spufs_signal2_type_get(void *data)
+{
+	struct spu_context *ctx = data;
+	return ctx->ops->signal2_type_get(ctx);
+}
+
 static u64 spufs_signal2_type_get(void *data)
 {
 	struct spu_context *ctx = data;
 	u64 ret;
 
 	spu_acquire(ctx);
-	ret = ctx->ops->signal2_type_get(ctx);
+	ret = __spufs_signal2_type_get(data);
 	spu_release(ctx);
 
 	return ret;
@@ -958,7 +1018,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
-	vma->vm_flags |= VM_RESERVED;
+	vma->vm_flags |= VM_IO;
 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -1000,7 +1060,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
-	vma->vm_flags |= VM_RESERVED;
+	vma->vm_flags |= VM_IO;
 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -1041,7 +1101,7 @@
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
-	vma->vm_flags |= VM_RESERVED;
+	vma->vm_flags |= VM_IO;
 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -1265,6 +1325,7 @@
 		goto out;
 
 	ctx->tagwait |= 1 << cmd.tag;
+	ret = size;
 
 out:
 	return ret;
@@ -1360,7 +1421,8 @@
 	spu_release(ctx);
 	return ret;
 }
-DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
+DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
+			"0x%llx\n")
 
 static void spufs_decr_set(void *data, u64 val)
 {
@@ -1371,18 +1433,24 @@
 	spu_release(ctx);
 }
 
-static u64 spufs_decr_get(void *data)
+static u64 __spufs_decr_get(void *data)
 {
 	struct spu_context *ctx = data;
 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+	return lscsa->decr.slot[0];
+}
+
+static u64 spufs_decr_get(void *data)
+{
+	struct spu_context *ctx = data;
 	u64 ret;
 	spu_acquire_saved(ctx);
-	ret = lscsa->decr.slot[0];
+	ret = __spufs_decr_get(data);
 	spu_release(ctx);
 	return ret;
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
-			"%llx\n")
+			"0x%llx\n")
 
 static void spufs_decr_status_set(void *data, u64 val)
 {
@@ -1393,40 +1461,24 @@
 	spu_release(ctx);
 }
 
-static u64 spufs_decr_status_get(void *data)
+static u64 __spufs_decr_status_get(void *data)
 {
 	struct spu_context *ctx = data;
 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+	return lscsa->decr_status.slot[0];
+}
+
+static u64 spufs_decr_status_get(void *data)
+{
+	struct spu_context *ctx = data;
 	u64 ret;
 	spu_acquire_saved(ctx);
-	ret = lscsa->decr_status.slot[0];
+	ret = __spufs_decr_status_get(data);
 	spu_release(ctx);
 	return ret;
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
-			spufs_decr_status_set, "%llx\n")
-
-static void spufs_spu_tag_mask_set(void *data, u64 val)
-{
-	struct spu_context *ctx = data;
-	struct spu_lscsa *lscsa = ctx->csa.lscsa;
-	spu_acquire_saved(ctx);
-	lscsa->tag_mask.slot[0] = (u32) val;
-	spu_release(ctx);
-}
-
-static u64 spufs_spu_tag_mask_get(void *data)
-{
-	struct spu_context *ctx = data;
-	struct spu_lscsa *lscsa = ctx->csa.lscsa;
-	u64 ret;
-	spu_acquire_saved(ctx);
-	ret = lscsa->tag_mask.slot[0];
-	spu_release(ctx);
-	return ret;
-}
-DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
-			spufs_spu_tag_mask_set, "%llx\n")
+			spufs_decr_status_set, "0x%llx\n")
 
 static void spufs_event_mask_set(void *data, u64 val)
 {
@@ -1437,18 +1489,48 @@
 	spu_release(ctx);
 }
 
-static u64 spufs_event_mask_get(void *data)
+static u64 __spufs_event_mask_get(void *data)
 {
 	struct spu_context *ctx = data;
 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
+	return lscsa->event_mask.slot[0];
+}
+
+static u64 spufs_event_mask_get(void *data)
+{
+	struct spu_context *ctx = data;
 	u64 ret;
 	spu_acquire_saved(ctx);
-	ret = lscsa->event_mask.slot[0];
+	ret = __spufs_event_mask_get(data);
 	spu_release(ctx);
 	return ret;
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
-			spufs_event_mask_set, "%llx\n")
+			spufs_event_mask_set, "0x%llx\n")
+
+static u64 __spufs_event_status_get(void *data)
+{
+	struct spu_context *ctx = data;
+	struct spu_state *state = &ctx->csa;
+	u64 stat;
+	stat = state->spu_chnlcnt_RW[0];
+	if (stat)
+		return state->spu_chnldata_RW[0];
+	return 0;
+}
+
+static u64 spufs_event_status_get(void *data)
+{
+	struct spu_context *ctx = data;
+	u64 ret = 0;
+
+	spu_acquire_saved(ctx);
+	ret = __spufs_event_status_get(data);
+	spu_release(ctx);
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
+			NULL, "0x%llx\n")
 
 static void spufs_srr0_set(void *data, u64 val)
 {
@@ -1470,7 +1552,7 @@
 	return ret;
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
-			"%llx\n")
+			"0x%llx\n")
 
 static u64 spufs_id_get(void *data)
 {
@@ -1488,12 +1570,18 @@
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
 
-static u64 spufs_object_id_get(void *data)
+static u64 __spufs_object_id_get(void *data)
 {
 	struct spu_context *ctx = data;
 	return ctx->object_id;
 }
 
+static u64 spufs_object_id_get(void *data)
+{
+	/* FIXME: Should there really be no locking here? */
+	return __spufs_object_id_get(data);
+}
+
 static void spufs_object_id_set(void *data, u64 id)
 {
 	struct spu_context *ctx = data;
@@ -1503,6 +1591,250 @@
 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
 		spufs_object_id_set, "0x%llx\n");
 
+static u64 __spufs_lslr_get(void *data)
+{
+	struct spu_context *ctx = data;
+	return ctx->csa.priv2.spu_lslr_RW;
+}
+
+static u64 spufs_lslr_get(void *data)
+{
+	struct spu_context *ctx = data;
+	u64 ret;
+
+	spu_acquire_saved(ctx);
+	ret = __spufs_lslr_get(data);
+	spu_release(ctx);
+
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
+
+static int spufs_info_open(struct inode *inode, struct file *file)
+{
+	struct spufs_inode_info *i = SPUFS_I(inode);
+	struct spu_context *ctx = i->i_ctx;
+	file->private_data = ctx;
+	return 0;
+}
+
+static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
+			char __user *buf, size_t len, loff_t *pos)
+{
+	u32 mbox_stat;
+	u32 data;
+
+	mbox_stat = ctx->csa.prob.mb_stat_R;
+	if (mbox_stat & 0x0000ff) {
+		data = ctx->csa.prob.pu_mb_R;
+	}
+
+	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+}
+
+static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
+				   size_t len, loff_t *pos)
+{
+	int ret;
+	struct spu_context *ctx = file->private_data;
+
+	if (!access_ok(VERIFY_WRITE, buf, len))
+		return -EFAULT;
+
+	spu_acquire_saved(ctx);
+	spin_lock(&ctx->csa.register_lock);
+	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
+	spin_unlock(&ctx->csa.register_lock);
+	spu_release(ctx);
+
+	return ret;
+}
+
+static struct file_operations spufs_mbox_info_fops = {
+	.open = spufs_info_open,
+	.read = spufs_mbox_info_read,
+	.llseek  = generic_file_llseek,
+};
+
+static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
+				char __user *buf, size_t len, loff_t *pos)
+{
+	u32 ibox_stat;
+	u32 data;
+
+	ibox_stat = ctx->csa.prob.mb_stat_R;
+	if (ibox_stat & 0xff0000) {
+		data = ctx->csa.priv2.puint_mb_R;
+	}
+
+	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+}
+
+static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
+				   size_t len, loff_t *pos)
+{
+	struct spu_context *ctx = file->private_data;
+	int ret;
+
+	if (!access_ok(VERIFY_WRITE, buf, len))
+		return -EFAULT;
+
+	spu_acquire_saved(ctx);
+	spin_lock(&ctx->csa.register_lock);
+	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
+	spin_unlock(&ctx->csa.register_lock);
+	spu_release(ctx);
+
+	return ret;
+}
+
+static struct file_operations spufs_ibox_info_fops = {
+	.open = spufs_info_open,
+	.read = spufs_ibox_info_read,
+	.llseek  = generic_file_llseek,
+};
+
+static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
+			char __user *buf, size_t len, loff_t *pos)
+{
+	int i, cnt;
+	u32 data[4];
+	u32 wbox_stat;
+
+	wbox_stat = ctx->csa.prob.mb_stat_R;
+	cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
+	for (i = 0; i < cnt; i++) {
+		data[i] = ctx->csa.spu_mailbox_data[i];
+	}
+
+	return simple_read_from_buffer(buf, len, pos, &data,
+				cnt * sizeof(u32));
+}
+
+static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
+				   size_t len, loff_t *pos)
+{
+	struct spu_context *ctx = file->private_data;
+	int ret;
+
+	if (!access_ok(VERIFY_WRITE, buf, len))
+		return -EFAULT;
+
+	spu_acquire_saved(ctx);
+	spin_lock(&ctx->csa.register_lock);
+	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
+	spin_unlock(&ctx->csa.register_lock);
+	spu_release(ctx);
+
+	return ret;
+}
+
+static struct file_operations spufs_wbox_info_fops = {
+	.open = spufs_info_open,
+	.read = spufs_wbox_info_read,
+	.llseek  = generic_file_llseek,
+};
+
+static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
+			char __user *buf, size_t len, loff_t *pos)
+{
+	struct spu_dma_info info;
+	struct mfc_cq_sr *qp, *spuqp;
+	int i;
+
+	info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
+	info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
+	info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
+	info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
+	info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
+	for (i = 0; i < 16; i++) {
+		qp = &info.dma_info_command_data[i];
+		spuqp = &ctx->csa.priv2.spuq[i];
+
+		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
+		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
+		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
+		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
+	}
+
+	return simple_read_from_buffer(buf, len, pos, &info,
+				sizeof info);
+}
+
+static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
+			      size_t len, loff_t *pos)
+{
+	struct spu_context *ctx = file->private_data;
+	int ret;
+
+	if (!access_ok(VERIFY_WRITE, buf, len))
+		return -EFAULT;
+
+	spu_acquire_saved(ctx);
+	spin_lock(&ctx->csa.register_lock);
+	ret = __spufs_dma_info_read(ctx, buf, len, pos);
+	spin_unlock(&ctx->csa.register_lock);
+	spu_release(ctx);
+
+	return ret;
+}
+
+static struct file_operations spufs_dma_info_fops = {
+	.open = spufs_info_open,
+	.read = spufs_dma_info_read,
+};
+
+static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
+			char __user *buf, size_t len, loff_t *pos)
+{
+	struct spu_proxydma_info info;
+	struct mfc_cq_sr *qp, *puqp;
+	int ret = sizeof info;
+	int i;
+
+	if (len < ret)
+		return -EINVAL;
+
+	if (!access_ok(VERIFY_WRITE, buf, len))
+		return -EFAULT;
+
+	info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
+	info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
+	info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
+	for (i = 0; i < 8; i++) {
+		qp = &info.proxydma_info_command_data[i];
+		puqp = &ctx->csa.priv2.puq[i];
+
+		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
+		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
+		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
+		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
+	}
+
+	return simple_read_from_buffer(buf, len, pos, &info,
+				sizeof info);
+}
+
+static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
+				   size_t len, loff_t *pos)
+{
+	struct spu_context *ctx = file->private_data;
+	int ret;
+
+	spu_acquire_saved(ctx);
+	spin_lock(&ctx->csa.register_lock);
+	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
+	spin_unlock(&ctx->csa.register_lock);
+	spu_release(ctx);
+
+	return ret;
+}
+
+static struct file_operations spufs_proxydma_info_fops = {
+	.open = spufs_info_open,
+	.read = spufs_proxydma_info_read,
+};
+
 struct tree_descr spufs_dir_contents[] = {
 	{ "mem",  &spufs_mem_fops,  0666, },
 	{ "regs", &spufs_regs_fops,  0666, },
@@ -1516,18 +1848,70 @@
 	{ "signal2", &spufs_signal2_fops, 0666, },
 	{ "signal1_type", &spufs_signal1_type, 0666, },
 	{ "signal2_type", &spufs_signal2_type, 0666, },
+	{ "cntl", &spufs_cntl_fops,  0666, },
+	{ "fpcr", &spufs_fpcr_fops, 0666, },
+	{ "lslr", &spufs_lslr_ops, 0444, },
+	{ "mfc", &spufs_mfc_fops, 0666, },
+	{ "mss", &spufs_mss_fops, 0666, },
+	{ "npc", &spufs_npc_ops, 0666, },
+	{ "srr0", &spufs_srr0_ops, 0666, },
+	{ "decr", &spufs_decr_ops, 0666, },
+	{ "decr_status", &spufs_decr_status_ops, 0666, },
+	{ "event_mask", &spufs_event_mask_ops, 0666, },
+	{ "event_status", &spufs_event_status_ops, 0444, },
+	{ "psmap", &spufs_psmap_fops, 0666, },
+	{ "phys-id", &spufs_id_ops, 0666, },
+	{ "object-id", &spufs_object_id_ops, 0666, },
+	{ "mbox_info", &spufs_mbox_info_fops, 0444, },
+	{ "ibox_info", &spufs_ibox_info_fops, 0444, },
+	{ "wbox_info", &spufs_wbox_info_fops, 0444, },
+	{ "dma_info", &spufs_dma_info_fops, 0444, },
+	{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+	{},
+};
+
+struct tree_descr spufs_dir_nosched_contents[] = {
+	{ "mem",  &spufs_mem_fops,  0666, },
+	{ "mbox", &spufs_mbox_fops, 0444, },
+	{ "ibox", &spufs_ibox_fops, 0444, },
+	{ "wbox", &spufs_wbox_fops, 0222, },
+	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
+	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
+	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+	{ "signal1", &spufs_signal1_fops, 0666, },
+	{ "signal2", &spufs_signal2_fops, 0666, },
+	{ "signal1_type", &spufs_signal1_type, 0666, },
+	{ "signal2_type", &spufs_signal2_type, 0666, },
 	{ "mss", &spufs_mss_fops, 0666, },
 	{ "mfc", &spufs_mfc_fops, 0666, },
 	{ "cntl", &spufs_cntl_fops,  0666, },
 	{ "npc", &spufs_npc_ops, 0666, },
-	{ "fpcr", &spufs_fpcr_fops, 0666, },
-	{ "decr", &spufs_decr_ops, 0666, },
-	{ "decr_status", &spufs_decr_status_ops, 0666, },
-	{ "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
-	{ "event_mask", &spufs_event_mask_ops, 0666, },
-	{ "srr0", &spufs_srr0_ops, 0666, },
 	{ "psmap", &spufs_psmap_fops, 0666, },
 	{ "phys-id", &spufs_id_ops, 0666, },
 	{ "object-id", &spufs_object_id_ops, 0666, },
 	{},
 };
+
+struct spufs_coredump_reader spufs_coredump_read[] = {
+	{ "regs", __spufs_regs_read, NULL, 128 * 16 },
+	{ "fpcr", __spufs_fpcr_read, NULL, 16 },
+	{ "lslr", NULL, __spufs_lslr_get, 11 },
+	{ "decr", NULL, __spufs_decr_get, 11 },
+	{ "decr_status", NULL, __spufs_decr_status_get, 11 },
+	{ "mem", __spufs_mem_read, NULL, 256 * 1024, },
+	{ "signal1", __spufs_signal1_read, NULL, 4 },
+	{ "signal1_type", NULL, __spufs_signal1_type_get, 2 },
+	{ "signal2", __spufs_signal2_read, NULL, 4 },
+	{ "signal2_type", NULL, __spufs_signal2_type_get, 2 },
+	{ "event_mask", NULL, __spufs_event_mask_get, 8 },
+	{ "event_status", NULL, __spufs_event_status_get, 8 },
+	{ "mbox_info", __spufs_mbox_info_read, NULL, 4 },
+	{ "ibox_info", __spufs_ibox_info_read, NULL, 4 },
+	{ "wbox_info", __spufs_wbox_info_read, NULL, 16 },
+	{ "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
+	{ "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
+	{ "object-id", NULL, __spufs_object_id_get, 19 },
+	{ },
+};
+int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
+
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index d805ffe..ae42e03 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -135,21 +135,11 @@
 	return ret;
 }
 
-static u32 spu_hw_signal1_read(struct spu_context *ctx)
-{
-	return in_be32(&ctx->spu->problem->signal_notify1);
-}
-
 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
 {
 	out_be32(&ctx->spu->problem->signal_notify1, data);
 }
 
-static u32 spu_hw_signal2_read(struct spu_context *ctx)
-{
-	return in_be32(&ctx->spu->problem->signal_notify2);
-}
-
 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
 {
 	out_be32(&ctx->spu->problem->signal_notify2, data);
@@ -217,21 +207,42 @@
 	return ctx->spu->local_store;
 }
 
-static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
+static u32 spu_hw_runcntl_read(struct spu_context *ctx)
 {
-	eieio();
-	out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
+	return in_be32(&ctx->spu->problem->spu_runcntl_RW);
 }
 
-static void spu_hw_runcntl_stop(struct spu_context *ctx)
+static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
 {
 	spin_lock_irq(&ctx->spu->register_lock);
-	out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
-	while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
-		cpu_relax();
+	if (val & SPU_RUNCNTL_ISOLATE)
+		out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
+	out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
 	spin_unlock_irq(&ctx->spu->register_lock);
 }
 
+static void spu_hw_master_start(struct spu_context *ctx)
+{
+	struct spu *spu = ctx->spu;
+	u64 sr1;
+
+	spin_lock_irq(&spu->register_lock);
+	sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+	spu_mfc_sr1_set(spu, sr1);
+	spin_unlock_irq(&spu->register_lock);
+}
+
+static void spu_hw_master_stop(struct spu_context *ctx)
+{
+	struct spu *spu = ctx->spu;
+	u64 sr1;
+
+	spin_lock_irq(&spu->register_lock);
+	sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+	spu_mfc_sr1_set(spu, sr1);
+	spin_unlock_irq(&spu->register_lock);
+}
+
 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
 {
 	struct spu_problem __iomem *prob = ctx->spu->problem;
@@ -291,9 +302,7 @@
 	.mbox_stat_poll = spu_hw_mbox_stat_poll,
 	.ibox_read = spu_hw_ibox_read,
 	.wbox_write = spu_hw_wbox_write,
-	.signal1_read = spu_hw_signal1_read,
 	.signal1_write = spu_hw_signal1_write,
-	.signal2_read = spu_hw_signal2_read,
 	.signal2_write = spu_hw_signal2_write,
 	.signal1_type_set = spu_hw_signal1_type_set,
 	.signal1_type_get = spu_hw_signal1_type_get,
@@ -303,8 +312,10 @@
 	.npc_write = spu_hw_npc_write,
 	.status_read = spu_hw_status_read,
 	.get_ls = spu_hw_get_ls,
+	.runcntl_read = spu_hw_runcntl_read,
 	.runcntl_write = spu_hw_runcntl_write,
-	.runcntl_stop = spu_hw_runcntl_stop,
+	.master_start = spu_hw_master_start,
+	.master_stop = spu_hw_master_stop,
 	.set_mfc_query = spu_hw_set_mfc_query,
 	.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
 	.get_mfc_free_elements = spu_hw_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 427d00a..e3af911 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -33,21 +33,22 @@
 #include <linux/slab.h>
 #include <linux/parser.h>
 
-#include <asm/io.h>
+#include <asm/prom.h>
 #include <asm/semaphore.h>
 #include <asm/spu.h>
 #include <asm/uaccess.h>
 
 #include "spufs.h"
 
-static kmem_cache_t *spufs_inode_cache;
+static struct kmem_cache *spufs_inode_cache;
+char *isolated_loader;
 
 static struct inode *
 spufs_alloc_inode(struct super_block *sb)
 {
 	struct spufs_inode_info *ei;
 
-	ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
+	ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 
@@ -64,7 +65,7 @@
 }
 
 static void
-spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
+spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct spufs_inode_info *ei = p;
 
@@ -231,6 +232,7 @@
 	.readdir	= dcache_readdir,
 	.fsync		= simple_sync_file,
 };
+EXPORT_SYMBOL_GPL(spufs_context_fops);
 
 static int
 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
@@ -255,10 +257,14 @@
 		goto out_iput;
 
 	ctx->flags = flags;
-
 	inode->i_op = &spufs_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
-	ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+	if (flags & SPU_CREATE_NOSCHED)
+		ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
+					 mode, ctx);
+	else
+		ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+
 	if (ret)
 		goto out_free_ctx;
 
@@ -307,6 +313,20 @@
 {
 	int ret;
 
+	ret = -EPERM;
+	if ((flags & SPU_CREATE_NOSCHED) &&
+	    !capable(CAP_SYS_NICE))
+		goto out_unlock;
+
+	ret = -EINVAL;
+	if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
+	    == SPU_CREATE_ISOLATE)
+		goto out_unlock;
+
+	ret = -ENODEV;
+	if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
+		goto out_unlock;
+
 	ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
 	if (ret)
 		goto out_unlock;
@@ -540,6 +560,30 @@
 	return 1;
 }
 
+static void
+spufs_init_isolated_loader(void)
+{
+	struct device_node *dn;
+	const char *loader;
+	int size;
+
+	dn = of_find_node_by_path("/spu-isolation");
+	if (!dn)
+		return;
+
+	loader = get_property(dn, "loader", &size);
+	if (!loader)
+		return;
+
+	/* kmalloc should align on a 16 byte boundary..* */
+	isolated_loader = kmalloc(size, GFP_KERNEL);
+	if (!isolated_loader)
+		return;
+
+	memcpy(isolated_loader, loader, size);
+	printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
+}
+
 static int
 spufs_create_root(struct super_block *sb, void *data)
 {
@@ -608,6 +652,7 @@
 static int __init spufs_init(void)
 {
 	int ret;
+
 	ret = -ENOMEM;
 	spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
 			sizeof(struct spufs_inode_info), 0,
@@ -625,6 +670,12 @@
 	ret = register_spu_syscalls(&spufs_calls);
 	if (ret)
 		goto out_fs;
+	ret = register_arch_coredump_calls(&spufs_coredump_calls);
+	if (ret)
+		goto out_fs;
+
+	spufs_init_isolated_loader();
+
 	return 0;
 out_fs:
 	unregister_filesystem(&spufs_type);
@@ -638,6 +689,7 @@
 static void __exit spufs_exit(void)
 {
 	spu_sched_exit();
+	unregister_arch_coredump_calls(&spufs_coredump_calls);
 	unregister_spu_syscalls(&spufs_calls);
 	unregister_filesystem(&spufs_type);
 	kmem_cache_destroy(spufs_inode_cache);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 63df8cf..1acc2ff 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -1,7 +1,11 @@
+#define DEBUG
+
 #include <linux/wait.h>
 #include <linux/ptrace.h>
 
 #include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/io.h>
 #include <asm/unistd.h>
 
 #include "spufs.h"
@@ -24,6 +28,7 @@
 	} else {
 		switch (type) {
 		case SPE_EVENT_DMA_ALIGNMENT:
+		case SPE_EVENT_SPE_DATA_STORAGE:
 		case SPE_EVENT_INVALID_DMA:
 			force_sig(SIGBUS, /* info, */ current);
 			break;
@@ -48,15 +53,122 @@
 	return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
 }
 
+static int spu_setup_isolated(struct spu_context *ctx)
+{
+	int ret;
+	u64 __iomem *mfc_cntl;
+	u64 sr1;
+	u32 status;
+	unsigned long timeout;
+	const u32 status_loading = SPU_STATUS_RUNNING
+		| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
+
+	if (!isolated_loader)
+		return -ENODEV;
+
+	ret = spu_acquire_exclusive(ctx);
+	if (ret)
+		goto out;
+
+	mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
+
+	/* purge the MFC DMA queue to ensure no spurious accesses before we
+	 * enter kernel mode */
+	timeout = jiffies + HZ;
+	out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
+	while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
+			!= MFC_CNTL_PURGE_DMA_COMPLETE) {
+		if (time_after(jiffies, timeout)) {
+			printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
+					__FUNCTION__);
+			ret = -EIO;
+			goto out_unlock;
+		}
+		cond_resched();
+	}
+
+	/* put the SPE in kernel mode to allow access to the loader */
+	sr1 = spu_mfc_sr1_get(ctx->spu);
+	sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
+	spu_mfc_sr1_set(ctx->spu, sr1);
+
+	/* start the loader */
+	ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
+	ctx->ops->signal2_write(ctx,
+			(unsigned long)isolated_loader & 0xffffffff);
+
+	ctx->ops->runcntl_write(ctx,
+			SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
+
+	ret = 0;
+	timeout = jiffies + HZ;
+	while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
+				status_loading) {
+		if (time_after(jiffies, timeout)) {
+			printk(KERN_ERR "%s: timeout waiting for loader\n",
+					__FUNCTION__);
+			ret = -EIO;
+			goto out_drop_priv;
+		}
+		cond_resched();
+	}
+
+	if (!(status & SPU_STATUS_RUNNING)) {
+		/* If isolated LOAD has failed: run SPU, we will get a stop-and
+		 * signal later. */
+		pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
+		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
+		ret = -EACCES;
+
+	} else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
+		/* This isn't allowed by the CBEA, but check anyway */
+		pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
+		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
+		ret = -EINVAL;
+	}
+
+out_drop_priv:
+	/* Finished accessing the loader. Drop kernel mode */
+	sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
+	spu_mfc_sr1_set(ctx->spu, sr1);
+
+out_unlock:
+	spu_release_exclusive(ctx);
+out:
+	return ret;
+}
+
 static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
 {
 	int ret;
+	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
 
-	if ((ret = spu_acquire_runnable(ctx)) != 0)
+	ret = spu_acquire_runnable(ctx);
+	if (ret)
 		return ret;
-	ctx->ops->npc_write(ctx, *npc);
-	ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
-	return 0;
+
+	if (ctx->flags & SPU_CREATE_ISOLATE) {
+		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
+			/* Need to release ctx, because spu_setup_isolated will
+			 * acquire it exclusively.
+			 */
+			spu_release(ctx);
+			ret = spu_setup_isolated(ctx);
+			if (!ret)
+				ret = spu_acquire_runnable(ctx);
+		}
+
+		/* if userspace has set the runcntrl register (eg, to issue an
+		 * isolated exit), we need to re-set it here */
+		runcntl = ctx->ops->runcntl_read(ctx) &
+			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
+		if (runcntl == 0)
+			runcntl = SPU_RUNCNTL_RUNNABLE;
+	} else
+		ctx->ops->npc_write(ctx, *npc);
+
+	ctx->ops->runcntl_write(ctx, runcntl);
+	return ret;
 }
 
 static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
@@ -70,13 +182,7 @@
 
 	if (signal_pending(current))
 		ret = -ERESTARTSYS;
-	if (unlikely(current->ptrace & PT_PTRACED)) {
-		if ((*status & SPU_STATUS_STOPPED_BY_STOP)
-		    && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
-			force_sig(SIGTRAP, current);
-			ret = -ERESTARTSYS;
-		}
-	}
+
 	return ret;
 }
 
@@ -204,6 +310,7 @@
 	if (down_interruptible(&ctx->run_sema))
 		return -ERESTARTSYS;
 
+	ctx->ops->master_start(ctx);
 	ctx->event_return = 0;
 	ret = spu_run_init(ctx, npc);
 	if (ret)
@@ -223,7 +330,7 @@
 		if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
 			ret = spu_reacquire_runnable(ctx, npc, &status);
 			if (ret)
-				goto out;
+				goto out2;
 			continue;
 		}
 		ret = spu_process_events(ctx);
@@ -231,12 +338,24 @@
 	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
 				      SPU_STATUS_STOPPED_BY_HALT)));
 
-	ctx->ops->runcntl_stop(ctx);
+	ctx->ops->master_stop(ctx);
 	ret = spu_run_fini(ctx, npc, &status);
-	if (!ret)
-		ret = status;
 	spu_yield(ctx);
 
+out2:
+	if ((ret == 0) ||
+	    ((ret == -ERESTARTSYS) &&
+	     ((status & SPU_STATUS_STOPPED_BY_HALT) ||
+	      ((status & SPU_STATUS_STOPPED_BY_STOP) &&
+	       (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
+		ret = status;
+
+	if ((status & SPU_STATUS_STOPPED_BY_STOP)
+	    && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
+		force_sig(SIGTRAP, current);
+		ret = -ERESTARTSYS;
+	}
+
 out:
 	*event = ctx->event_return;
 	up(&ctx->run_sema);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index a0f55ca..70fb133 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -29,6 +29,7 @@
 
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
+#include <asm/spu_info.h>
 
 /* The magic number for our file system */
 enum {
@@ -114,13 +115,19 @@
 	void (*npc_write) (struct spu_context * ctx, u32 data);
 	 u32(*status_read) (struct spu_context * ctx);
 	char*(*get_ls) (struct spu_context * ctx);
+	 u32 (*runcntl_read) (struct spu_context * ctx);
 	void (*runcntl_write) (struct spu_context * ctx, u32 data);
-	void (*runcntl_stop) (struct spu_context * ctx);
+	void (*master_start) (struct spu_context * ctx);
+	void (*master_stop) (struct spu_context * ctx);
 	int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
 	u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
 	u32 (*get_mfc_free_elements)(struct spu_context *ctx);
-	int (*send_mfc_command)(struct spu_context *ctx,
-					struct mfc_dma_command *cmd);
+	int (*send_mfc_command)(struct spu_context * ctx,
+				struct mfc_dma_command * cmd);
+	void (*dma_info_read) (struct spu_context * ctx,
+			       struct spu_dma_info * info);
+	void (*proxydma_info_read) (struct spu_context * ctx,
+				    struct spu_proxydma_info * info);
 };
 
 extern struct spu_context_ops spu_hw_ops;
@@ -135,6 +142,7 @@
 	container_of(inode, struct spufs_inode_info, vfs_inode)
 
 extern struct tree_descr spufs_dir_contents[];
+extern struct tree_descr spufs_dir_nosched_contents[];
 
 /* system call implementation */
 long spufs_run_spu(struct file *file,
@@ -162,6 +170,12 @@
 void spu_release(struct spu_context *ctx);
 int spu_acquire_runnable(struct spu_context *ctx);
 void spu_acquire_saved(struct spu_context *ctx);
+int spu_acquire_exclusive(struct spu_context *ctx);
+
+static inline void spu_release_exclusive(struct spu_context *ctx)
+{
+	up_write(&ctx->state_sema);
+}
 
 int spu_activate(struct spu_context *ctx, u64 flags);
 void spu_deactivate(struct spu_context *ctx);
@@ -169,6 +183,8 @@
 int __init spu_sched_init(void);
 void __exit spu_sched_exit(void);
 
+extern char *isolated_loader;
+
 /*
  * spufs_wait
  * 	Same as wait_event_interruptible(), except that here
@@ -207,4 +223,15 @@
 void spufs_mfc_callback(struct spu *spu);
 void spufs_dma_callback(struct spu *spu, int type);
 
+extern struct spu_coredump_calls spufs_coredump_calls;
+struct spufs_coredump_reader {
+	char *name;
+	ssize_t (*read)(struct spu_context *ctx,
+			char __user *buffer, size_t size, loff_t *pos);
+	u64 (*get)(void *data);
+	size_t size;
+};
+extern struct spufs_coredump_reader spufs_coredump_read[];
+extern int spufs_coredump_num_notes;
+
 #endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 0f782ca..c08981f 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -102,7 +102,7 @@
 	 *     saved at this time.
 	 */
 	isolate_state = SPU_STATUS_ISOLATED_STATE |
-	    SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS;
+	    SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
 	return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
 }
 
@@ -1046,12 +1046,12 @@
 	 */
 	if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
 		if (in_be32(&prob->spu_status_R) &
-		    SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+		    SPU_STATUS_ISOLATED_EXIT_STATUS) {
 			POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
 					SPU_STATUS_RUNNING);
 		}
 		if ((in_be32(&prob->spu_status_R) &
-		     SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+		     SPU_STATUS_ISOLATED_LOAD_STATUS)
 		    || (in_be32(&prob->spu_status_R) &
 			SPU_STATUS_ISOLATED_STATE)) {
 			out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
@@ -1085,7 +1085,7 @@
 	 */
 	if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
 		if (in_be32(&prob->spu_status_R) &
-		    SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+		    SPU_STATUS_ISOLATED_EXIT_STATUS) {
 			spu_mfc_sr1_set(spu,
 					MFC_STATE1_MASTER_RUN_CONTROL_MASK);
 			eieio();
@@ -1095,7 +1095,7 @@
 					SPU_STATUS_RUNNING);
 		}
 		if ((in_be32(&prob->spu_status_R) &
-		     SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+		     SPU_STATUS_ISOLATED_LOAD_STATUS)
 		    || (in_be32(&prob->spu_status_R) &
 			SPU_STATUS_ISOLATED_STATE)) {
 			spu_mfc_sr1_set(spu,
@@ -1916,6 +1916,51 @@
 	wait_spu_stopped(prev, spu);	/* Step 57. */
 }
 
+static void force_spu_isolate_exit(struct spu *spu)
+{
+	struct spu_problem __iomem *prob = spu->problem;
+	struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+	/* Stop SPE execution and wait for completion. */
+	out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+	iobarrier_rw();
+	POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
+
+	/* Restart SPE master runcntl. */
+	spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
+	iobarrier_w();
+
+	/* Initiate isolate exit request and wait for completion. */
+	out_be64(&priv2->spu_privcntl_RW, 4LL);
+	iobarrier_w();
+	out_be32(&prob->spu_runcntl_RW, 2);
+	iobarrier_rw();
+	POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
+				& SPU_STATUS_STOPPED_BY_STOP));
+
+	/* Reset load request to normal. */
+	out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
+	iobarrier_w();
+}
+
+/**
+ * stop_spu_isolate
+ *	Check SPU run-control state and force isolated
+ *	exit function as necessary.
+ */
+static void stop_spu_isolate(struct spu *spu)
+{
+	struct spu_problem __iomem *prob = spu->problem;
+
+	if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
+		/* The SPU is in isolated state; the only way
+		 * to get it out is to perform an isolated
+		 * exit (clean) operation.
+		 */
+		force_spu_isolate_exit(spu);
+	}
+}
+
 static void harvest(struct spu_state *prev, struct spu *spu)
 {
 	/*
@@ -1928,6 +1973,7 @@
 	inhibit_user_access(prev, spu);	        /* Step 3.  */
 	terminate_spu_app(prev, spu);	        /* Step 4.  */
 	set_switch_pending(prev, spu);	        /* Step 5.  */
+	stop_spu_isolate(spu);			/* NEW.     */
 	remove_other_spu_access(prev, spu);	/* Step 6.  */
 	suspend_mfc(prev, spu);	                /* Step 7.  */
 	wait_suspend_mfc_complete(prev, spu);	/* Step 8.  */
@@ -2096,11 +2142,11 @@
 	acquire_spu_lock(spu);	        /* Step 1.     */
 	rc = __do_spu_save(prev, spu);	/* Steps 2-53. */
 	release_spu_lock(spu);
-	if (rc) {
+	if (rc != 0 && rc != 2 && rc != 6) {
 		panic("%s failed on SPU[%d], rc=%d.\n",
 		      __func__, spu->number, rc);
 	}
-	return rc;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(spu_save);
 
@@ -2165,9 +2211,6 @@
 	    MFC_STATE1_PROBLEM_STATE_MASK |
 	    MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
 
-	/* Set storage description.  */
-	csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
-
 	/* Enable OS-specific set of interrupts. */
 	csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
 	    CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h
index 996c287..63f0aee 100644
--- a/arch/powerpc/platforms/chrp/chrp.h
+++ b/arch/powerpc/platforms/chrp/chrp.h
@@ -9,4 +9,3 @@
 
 extern void chrp_find_bridges(void);
 extern void chrp_event_scan(unsigned long);
-extern void chrp_pcibios_fixup(void);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 0f43405..ddb4a11 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -156,15 +156,6 @@
 	return 1;
 }
 
-void __init
-chrp_pcibios_fixup(void)
-{
-	struct pci_dev *dev = NULL;
-
-	for_each_pci_dev(dev)
-		pci_read_irq_line(dev);
-}
-
 #define PRG_CL_RESET_VALID 0x00010000
 
 static void __init
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 49b8dab..e1f51d4 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -588,7 +588,6 @@
 	ISA_DMA_THRESHOLD = ~0L;
 	DMA_MODE_READ = 0x44;
 	DMA_MODE_WRITE = 0x48;
-	isa_io_base = CHRP_ISA_IO_BASE;		/* default value */
 
 	return 1;
 }
@@ -600,7 +599,6 @@
 	.init			= chrp_init2,
 	.show_cpuinfo		= chrp_show_cpuinfo,
 	.init_IRQ		= chrp_init_IRQ,
-	.pcibios_fixup		= chrp_pcibios_fixup,
 	.restart		= rtas_restart,
 	.power_off		= rtas_power_off,
 	.halt			= rtas_halt,
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 234a861..ddbe398 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -74,6 +74,18 @@
 	  Select SANDPOINT if configuring for a Motorola Sandpoint X3
 	  (any flavor).
 
+config LINKSTATION
+	bool "Linkstation / Kurobox(HG) from Buffalo"
+	select MPIC
+	select FSL_SOC
+	select PPC_UDBG_16550 if SERIAL_8250
+	help
+	  Select LINKSTATION if configuring for one of PPC- (MPC8241)
+	  based NAS systems from Buffalo Technology. So far only
+	  KuroboxHG has been tested. In the future classical Kurobox,
+	  Linkstation-I HD-HLAN and HD-HGLAN versions, and PPC-based
+	  Terastation systems should be supported too.
+
 config MPC7448HPC2
 	bool "Freescale MPC7448HPC2(Taiga)"
 	select TSI108_BRIDGE
@@ -146,15 +158,6 @@
 	  Select PQ2FADS if you wish to configure for a Freescale
 	  PQ2FADS board (-VR or -ZU).
 
-config LITE5200
-	bool "Freescale LITE5200 / (IceCube)"
-	select PPC_MPC52xx
-	help
-	  Support for the LITE5200 dev board for the MPC5200 from Freescale.
-	  This is for the LITE5200 version 2.0 board. Don't know if it changes
-	  much but it's only been tested on this board version. I think this
-	  board is also known as IceCube.
-
 config EV64360
 	bool "Marvell-EV64360BP"
 	help
@@ -172,9 +175,6 @@
 	depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
 	default y
 
-config PPC_MPC52xx
-	bool
-
 config 8260
 	bool "CPM2 Support" if WILLOW
 	depends on 6xx
@@ -208,7 +208,7 @@
 	depends on SANDPOINT || SPRUCE || PPLUS || \
 		PRPMC750 || PRPMC800 || LOPEC || \
 		(EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
-		83xx
+		83xx || LINKSTATION
 	default y
 
 config FORCE
@@ -282,13 +282,13 @@
 
 config MPC10X_BRIDGE
 	bool
-	depends on POWERPMC250 || LOPEC || SANDPOINT
+	depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
 	select PPC_INDIRECT_PCI
 	default y
 
 config MPC10X_OPENPIC
 	bool
-	depends on POWERPMC250 || LOPEC || SANDPOINT
+	depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
 	default y
 
 config MPC10X_STORE_GATHERING
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index fa499fe..d3d11a3 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -2,3 +2,4 @@
 # Makefile for the 6xx/7xx/7xxxx linux kernel.
 #
 obj-$(CONFIG_MPC7448HPC2)	+= mpc7448_hpc2.o
+obj-$(CONFIG_LINKSTATION)	+= linkstation.o ls_uart.o
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
new file mode 100644
index 0000000..61599d9
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -0,0 +1,211 @@
+/*
+ * Board setup routines for the Buffalo Linkstation / Kurobox Platform.
+ *
+ * Copyright (C) 2006 G. Liakhovetski (g.liakhovetski@gmx.de)
+ *
+ * Based on sandpoint.c by Mark A. Greer
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/mtd/physmap.h>
+
+#include <asm/time.h>
+#include <asm/prom.h>
+#include <asm/mpic.h>
+#include <asm/mpc10x.h>
+#include <asm/pci-bridge.h>
+
+static struct mtd_partition linkstation_physmap_partitions[] = {
+	{
+		.name   = "mtd_firmimg",
+		.offset = 0x000000,
+		.size   = 0x300000,
+	},
+	{
+		.name   = "mtd_bootcode",
+		.offset = 0x300000,
+		.size   = 0x070000,
+	},
+	{
+		.name   = "mtd_status",
+		.offset = 0x370000,
+		.size   = 0x010000,
+	},
+	{
+		.name   = "mtd_conf",
+		.offset = 0x380000,
+		.size   = 0x080000,
+	},
+	{
+		.name   = "mtd_allflash",
+		.offset = 0x000000,
+		.size   = 0x400000,
+	},
+	{
+		.name   = "mtd_data",
+		.offset = 0x310000,
+		.size   = 0x0f0000,
+	},
+};
+
+static int __init add_bridge(struct device_node *dev)
+{
+	int len;
+	struct pci_controller *hose;
+	int *bus_range;
+
+	printk("Adding PCI host bridge %s\n", dev->full_name);
+
+	bus_range = (int *) get_property(dev, "bus-range", &len);
+	if (bus_range == NULL || len < 2 * sizeof(int))
+		printk(KERN_WARNING "Can't get bus-range for %s, assume"
+				" bus 0\n", dev->full_name);
+
+	hose = pcibios_alloc_controller();
+	if (hose == NULL)
+		return -ENOMEM;
+	hose->first_busno = bus_range ? bus_range[0] : 0;
+	hose->last_busno = bus_range ? bus_range[1] : 0xff;
+	hose->arch_data = dev;
+	setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+
+	/* Interpret the "ranges" property */
+	/* This also maps the I/O region and sets isa_io/mem_base */
+	pci_process_bridge_OF_ranges(hose, dev, 1);
+
+	return 0;
+}
+
+static void __init linkstation_setup_arch(void)
+{
+	struct device_node *np;
+#ifdef CONFIG_MTD_PHYSMAP
+	physmap_set_partitions(linkstation_physmap_partitions,
+			       ARRAY_SIZE(linkstation_physmap_partitions));
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start)
+		ROOT_DEV = Root_RAM0;
+	else
+#endif
+#ifdef	CONFIG_ROOT_NFS
+		ROOT_DEV = Root_NFS;
+#else
+		ROOT_DEV = Root_HDA1;
+#endif
+
+	/* Lookup PCI host bridges */
+	for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+		add_bridge(np);
+
+	printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
+	printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
+}
+
+/*
+ * Interrupt setup and service.  Interrrupts on the linkstation come
+ * from the four PCI slots plus onboard 8241 devices: I2C, DUART.
+ */
+static void __init linkstation_init_IRQ(void)
+{
+	struct mpic *mpic;
+	struct device_node *dnp;
+	void *prop;
+	int size;
+	phys_addr_t paddr;
+
+	dnp = of_find_node_by_type(NULL, "open-pic");
+	if (dnp == NULL)
+		return;
+
+	prop = (struct device_node *)get_property(dnp, "reg", &size);
+	paddr = (phys_addr_t)of_translate_address(dnp, prop);
+
+	mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC     ");
+	BUG_ON(mpic == NULL);
+
+	/* PCI IRQs */
+	mpic_assign_isu(mpic, 0, paddr + 0x10200);
+
+	/* I2C */
+	mpic_assign_isu(mpic, 1, paddr + 0x11000);
+
+	/* ttyS0, ttyS1 */
+	mpic_assign_isu(mpic, 2, paddr + 0x11100);
+
+	mpic_init(mpic);
+}
+
+extern void avr_uart_configure(void);
+extern void avr_uart_send(const char);
+
+static void linkstation_restart(char *cmd)
+{
+	local_irq_disable();
+
+	/* Reset system via AVR */
+	avr_uart_configure();
+	/* Send reboot command */
+	avr_uart_send('C');
+
+	for(;;)  /* Spin until reset happens */
+		avr_uart_send('G');	/* "kick" */
+}
+
+static void linkstation_power_off(void)
+{
+	local_irq_disable();
+
+	/* Power down system via AVR */
+	avr_uart_configure();
+	/* send shutdown command */
+	avr_uart_send('E');
+
+	for(;;)  /* Spin until power-off happens */
+		avr_uart_send('G');	/* "kick" */
+	/* NOTREACHED */
+}
+
+static void linkstation_halt(void)
+{
+	linkstation_power_off();
+	/* NOTREACHED */
+}
+
+static void linkstation_show_cpuinfo(struct seq_file *m)
+{
+	seq_printf(m, "vendor\t\t: Buffalo Technology\n");
+	seq_printf(m, "machine\t\t: Linkstation I/Kurobox(HG)\n");
+}
+
+static int __init linkstation_probe(void)
+{
+	unsigned long root;
+
+	root = of_get_flat_dt_root();
+
+	if (!of_flat_dt_is_compatible(root, "linkstation"))
+		return 0;
+	return 1;
+}
+
+define_machine(linkstation){
+	.name 			= "Buffalo Linkstation",
+	.probe 			= linkstation_probe,
+	.setup_arch 		= linkstation_setup_arch,
+	.init_IRQ 		= linkstation_init_IRQ,
+	.show_cpuinfo 		= linkstation_show_cpuinfo,
+	.get_irq 		= mpic_get_irq,
+	.restart 		= linkstation_restart,
+	.power_off 		= linkstation_power_off,
+	.halt	 		= linkstation_halt,
+	.calibrate_decr 	= generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
new file mode 100644
index 0000000..0e83776
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -0,0 +1,131 @@
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include <asm/io.h>
+#include <asm/mpc10x.h>
+#include <asm/ppc_sys.h>
+#include <asm/prom.h>
+#include <asm/termbits.h>
+
+static void __iomem *avr_addr;
+static unsigned long avr_clock;
+
+static struct work_struct wd_work;
+
+static void wd_stop(struct work_struct *unused)
+{
+	const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
+	int i = 0, rescue = 8;
+	int len = strlen(string);
+
+	while (rescue--) {
+		int j;
+		char lsr = in_8(avr_addr + UART_LSR);
+
+		if (lsr & (UART_LSR_THRE | UART_LSR_TEMT)) {
+			for (j = 0; j < 16 && i < len; j++, i++)
+				out_8(avr_addr + UART_TX, string[i]);
+			if (i == len) {
+				/* Read "OK" back: 4ms for the last "KKKK"
+				   plus a couple bytes back */
+				msleep(7);
+				printk("linkstation: disarming the AVR watchdog: ");
+				while (in_8(avr_addr + UART_LSR) & UART_LSR_DR)
+					printk("%c", in_8(avr_addr + UART_RX));
+				break;
+			}
+		}
+		msleep(17);
+	}
+	printk("\n");
+}
+
+#define AVR_QUOT(clock) ((clock) + 8 * 9600) / (16 * 9600)
+
+void avr_uart_configure(void)
+{
+	unsigned char cval = UART_LCR_WLEN8;
+	unsigned int quot = AVR_QUOT(avr_clock);
+
+	if (!avr_addr || !avr_clock)
+		return;
+
+	out_8(avr_addr + UART_LCR, cval);			/* initialise UART */
+	out_8(avr_addr + UART_MCR, 0);
+	out_8(avr_addr + UART_IER, 0);
+
+	cval |= UART_LCR_STOP | UART_LCR_PARITY | UART_LCR_EPAR;
+
+	out_8(avr_addr + UART_LCR, cval);			/* Set character format */
+
+	out_8(avr_addr + UART_LCR, cval | UART_LCR_DLAB);	/* set DLAB */
+	out_8(avr_addr + UART_DLL, quot & 0xff);		/* LS of divisor */
+	out_8(avr_addr + UART_DLM, quot >> 8);			/* MS of divisor */
+	out_8(avr_addr + UART_LCR, cval);			/* reset DLAB */
+	out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO);	/* enable FIFO */
+}
+
+void avr_uart_send(const char c)
+{
+	if (!avr_addr || !avr_clock)
+		return;
+
+	out_8(avr_addr + UART_TX, c);
+	out_8(avr_addr + UART_TX, c);
+	out_8(avr_addr + UART_TX, c);
+	out_8(avr_addr + UART_TX, c);
+}
+
+static void __init ls_uart_init(void)
+{
+	local_irq_disable();
+
+#ifndef CONFIG_SERIAL_8250
+	out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO);	/* enable FIFO */
+	out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO |
+	      UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);	/* clear FIFOs */
+	out_8(avr_addr + UART_FCR, 0);
+	out_8(avr_addr + UART_IER, 0);
+
+	/* Clear up interrupts */
+	(void) in_8(avr_addr + UART_LSR);
+	(void) in_8(avr_addr + UART_RX);
+	(void) in_8(avr_addr + UART_IIR);
+	(void) in_8(avr_addr + UART_MSR);
+#endif
+	avr_uart_configure();
+
+	local_irq_enable();
+}
+
+static int __init ls_uarts_init(void)
+{
+	struct device_node *avr;
+	phys_addr_t phys_addr;
+	int len;
+
+	avr = of_find_node_by_path("/soc10x/serial@80004500");
+	if (!avr)
+		return -EINVAL;
+
+	avr_clock = *(u32*)get_property(avr, "clock-frequency", &len);
+	phys_addr = ((u32*)get_property(avr, "reg", &len))[0];
+
+	if (!avr_clock || !phys_addr)
+		return -EINVAL;
+
+	avr_addr = ioremap(phys_addr, 32);
+	if (!avr_addr)
+		return -EFAULT;
+
+	ls_uart_init();
+
+	INIT_WORK(&wd_work, wd_stop);
+	schedule_work(&wd_work);
+
+	return 0;
+}
+
+late_initcall(ls_uarts_init);
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index bdb475c..3fcc85f 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -60,7 +60,7 @@
 
 extern int tsi108_setup_pci(struct device_node *dev);
 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
-extern void tsi108_pci_int_init(void);
+extern void tsi108_pci_int_init(struct device_node *node);
 extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
 
 int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
@@ -71,65 +71,6 @@
 		return PCIBIOS_SUCCESSFUL;
 }
 
-/*
- * find pci slot by devfn in interrupt map of OF tree
- */
-u8 find_slot_by_devfn(unsigned int *interrupt_map, unsigned int devfn)
-{
-	int i;
-	unsigned int tmp;
-	for (i = 0; i < 4; i++){
-		tmp = interrupt_map[i*4*7];
-		if ((tmp >> 11) == (devfn >> 3))
-			return i;
-	}
-	return i;
-}
-
-/*
- * Scans the interrupt map for pci device
- */
-void mpc7448_hpc2_fixup_irq(struct pci_dev *dev)
-{
-	struct pci_controller *hose;
-	struct device_node *node;
-	const unsigned int *interrupt;
-	int busnr;
-	int len;
-	u8 slot;
-	u8 pin;
-
-	/* Lookup the hose */
-	busnr = dev->bus->number;
-	hose = pci_bus_to_hose(busnr);
-	if (!hose)
-		printk(KERN_ERR "No pci hose found\n");
-
-	/* Check it has an OF node associated */
-	node = (struct device_node *) hose->arch_data;
-	if (!node)
-		printk(KERN_ERR "No pci node found\n");
-
-	interrupt = get_property(node, "interrupt-map", &len);
-	slot = find_slot_by_devfn(interrupt, dev->devfn);
-	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-	if (pin == 0 || pin > 4)
-		pin = 1;
-	pin--;
-	dev->irq  = interrupt[slot*4*7 + pin*7 + 5];
-	DBG("TSI_PCI: dev->irq = 0x%x\n", dev->irq);
-}
-/* temporary pci irq map fixup*/
-
-void __init mpc7448_hpc2_pcibios_fixup(void)
-{
-	struct pci_dev *dev = NULL;
-	for_each_pci_dev(dev) {
-		mpc7448_hpc2_fixup_irq(dev);
-		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
-	}
-}
-
 static void __init mpc7448_hpc2_setup_arch(void)
 {
 	struct device_node *cpu;
@@ -192,9 +133,12 @@
 {
 	struct mpic *mpic;
 	phys_addr_t mpic_paddr = 0;
+	struct device_node *tsi_pic;
+#ifdef CONFIG_PCI
 	unsigned int cascade_pci_irq;
 	struct device_node *tsi_pci;
-	struct device_node *tsi_pic;
+	struct device_node *cascade_node = NULL;
+#endif
 
 	tsi_pic = of_find_node_by_type(NULL, "open-pic");
 	if (tsi_pic) {
@@ -208,31 +152,41 @@
 		return;
 	}
 
-	DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__,
+	DBG("%s: tsi108 pic phys_addr = 0x%x\n", __FUNCTION__,
 	    (u32) mpic_paddr);
 
 	mpic = mpic_alloc(tsi_pic, mpic_paddr,
 			MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
 			MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
-			0, /* num_sources used */
-			0, /* num_sources used */
+			24,
+			NR_IRQS-4, /* num_sources used */
 			"Tsi108_PIC");
 
-	BUG_ON(mpic == NULL); /* XXXX */
+	BUG_ON(mpic == NULL);
+
+	mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
+
 	mpic_init(mpic);
 
+#ifdef CONFIG_PCI
 	tsi_pci = of_find_node_by_type(NULL, "pci");
-	if (tsi_pci == 0) {
+	if (tsi_pci == NULL) {
 		printk("%s: No tsi108 pci node found !\n", __FUNCTION__);
 		return;
 	}
+	cascade_node = of_find_node_by_type(NULL, "pic-router");
+	if (cascade_node == NULL) {
+		printk("%s: No tsi108 pci cascade node found !\n", __FUNCTION__);
+		return;
+	}
 
 	cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
+	DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __FUNCTION__,
+	    (u32) cascade_pci_irq);
+	tsi108_pci_int_init(cascade_node);
 	set_irq_data(cascade_pci_irq, mpic);
 	set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
-
-	tsi108_pci_int_init();
-
+#endif
 	/* Configure MPIC outputs to CPU0 */
 	tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
 	of_node_put(tsi_pic);
@@ -290,7 +244,6 @@
 		return 1;
 	}
 	return 0;
-
 }
 
 define_machine(mpc7448_hpc2){
@@ -300,7 +253,6 @@
 	.init_IRQ 		= mpc7448_hpc2_init_IRQ,
 	.show_cpuinfo 		= mpc7448_hpc2_show_cpuinfo,
 	.get_irq 		= mpic_get_irq,
-	.pcibios_fixup 		= mpc7448_hpc2_pcibios_fixup,
 	.restart 		= mpc7448_hpc2_restart,
 	.calibrate_decr 	= generic_calibrate_decr,
 	.machine_check_exception= mpc7448_machine_check_exception,
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
index dee4eb4..13ac301 100644
--- a/arch/powerpc/platforms/iseries/Makefile
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -1,5 +1,7 @@
 EXTRA_CFLAGS	+= -mno-minimal-toc
 
+extra-y += dt.o
+
 obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt_mod.o mf.o lpevents.o \
 	hvcall.o proc.o htab.o iommu.o misc.o irq.o
 obj-$(CONFIG_PCI) += pci.o vpdinfo.o
@@ -7,5 +9,9 @@
 obj-$(CONFIG_VIOPATH) += viopath.o
 obj-$(CONFIG_MODULES) += ksyms.o
 
+quiet_cmd_dt_strings = DT_STR  $@
+      cmd_dt_strings = $(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings \
+				$< $@
+
 $(obj)/dt_mod.o:	$(obj)/dt.o
-	@$(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings $(obj)/dt.o $(obj)/dt_mod.o
+	$(call if_changed,dt_strings)
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index e305dee..9e8a334 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -41,6 +41,7 @@
 #include "call_pci.h"
 #include "pci.h"
 #include "it_exp_vpd_panel.h"
+#include "naca.h"
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -205,13 +206,11 @@
 	dt_prop(dt, name, &data, sizeof(u32));
 }
 
-#ifdef notyet
 static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
 		u64 data)
 {
 	dt_prop(dt, name, &data, sizeof(u64));
 }
-#endif
 
 static void __init dt_prop_u64_list(struct iseries_flat_dt *dt,
 		const char *name, u64 *data, int n)
@@ -306,6 +305,17 @@
 	dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
 }
 
+static void __init dt_initrd(struct iseries_flat_dt *dt)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (naca.xRamDisk) {
+		dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
+		dt_prop_u64(dt, "linux,initrd-end",
+			(u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
+	}
+#endif
+}
+
 static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
 		const char *name, u32 reg, int unit,
 		const char *type, const char *compat, int end)
@@ -641,6 +651,7 @@
 	/* /chosen */
 	dt_start_node(iseries_dt, "chosen");
 	dt_prop_str(iseries_dt, "bootargs", cmd_line);
+	dt_initrd(iseries_dt);
 	dt_end_node(iseries_dt);
 
 	dt_cpus(iseries_dt);
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 218817d..d7a756d 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -27,6 +27,7 @@
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
 #include <linux/list.h>
+#include <linux/pci.h>
 
 #include <asm/iommu.h>
 #include <asm/tce.h>
@@ -114,12 +115,10 @@
 {
 	struct iommu_table_cb *parms;
 
-	parms = kmalloc(sizeof(*parms), GFP_KERNEL);
+	parms = kzalloc(sizeof(*parms), GFP_KERNEL);
 	if (parms == NULL)
 		panic("PCI_DMA: TCE Table Allocation failed.");
 
-	memset(parms, 0, sizeof(*parms));
-
 	parms->itc_busno = busno;
 	parms->itc_slotno = slotno;
 	parms->itc_virtbus = virtbus;
@@ -168,7 +167,7 @@
 }
 
 
-void iommu_devnode_init_iSeries(struct device_node *dn)
+void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn)
 {
 	struct iommu_table *tbl;
 	struct pci_dn *pdn = PCI_DN(dn);
@@ -186,19 +185,14 @@
 		pdn->iommu_table = iommu_init_table(tbl, -1);
 	else
 		kfree(tbl);
+	pdev->dev.archdata.dma_data = pdn->iommu_table;
 }
 #endif
 
-static void iommu_dev_setup_iSeries(struct pci_dev *dev) { }
-static void iommu_bus_setup_iSeries(struct pci_bus *bus) { }
-
 void iommu_init_early_iSeries(void)
 {
 	ppc_md.tce_build = tce_build_iSeries;
 	ppc_md.tce_free  = tce_free_iSeries;
 
-	ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries;
-	ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries;
-
-	pci_iommu_init();
+	pci_dma_ops = &dma_iommu_ops;
 }
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
index a220084..2430848 100644
--- a/arch/powerpc/platforms/iseries/ksyms.c
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -19,9 +19,3 @@
 EXPORT_SYMBOL(HvCall5);
 EXPORT_SYMBOL(HvCall6);
 EXPORT_SYMBOL(HvCall7);
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(local_get_flags);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_restore);
-#endif
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
index 7641fc7..2c6ff0f 100644
--- a/arch/powerpc/platforms/iseries/misc.S
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -19,39 +19,8 @@
 
 	.text
 
-/* unsigned long local_save_flags(void) */
-_GLOBAL(local_get_flags)
-	lbz	r3,PACAPROCENABLED(r13)
-	blr
-
-/* unsigned long local_irq_disable(void) */
-_GLOBAL(local_irq_disable)
-	lbz	r3,PACAPROCENABLED(r13)
-	li	r4,0
-	stb	r4,PACAPROCENABLED(r13)
-	blr			/* Done */
-
-/* void local_irq_restore(unsigned long flags) */
-_GLOBAL(local_irq_restore)
-	lbz	r5,PACAPROCENABLED(r13)
-	 /* Check if things are setup the way we want _already_. */
-	cmpw	0,r3,r5
-	beqlr
-	/* are we enabling interrupts? */
-	cmpdi	0,r3,0
-	stb	r3,PACAPROCENABLED(r13)
-	beqlr
-	/* Check pending interrupts */
-	/*   A decrementer, IPI or PMC interrupt may have occurred
-	 *   while we were in the hypervisor (which enables) */
-	ld	r4,PACALPPACAPTR(r13)
-	ld	r4,LPPACAANYINT(r4)
-	cmpdi	r4,0
-	beqlr
-
-	/*
-	 * Handle pending interrupts in interrupt context
-	 */
+/* Handle pending interrupts in interrupt context */
+_GLOBAL(iseries_handle_interrupts)
 	li	r0,0x5555
 	sc
 	blr
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 4aa165e..4a06d9c 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -156,53 +156,6 @@
 }
 
 /*
- * iSeries_pcibios_init
- *
- * Description:
- *   This function checks for all possible system PCI host bridges that connect
- *   PCI buses.  The system hypervisor is queried as to the guest partition
- *   ownership status.  A pci_controller is built for any bus which is partially
- *   owned or fully owned by this guest partition.
- */
-void iSeries_pcibios_init(void)
-{
-	struct pci_controller *phb;
-	struct device_node *root = of_find_node_by_path("/");
-	struct device_node *node = NULL;
-
-	if (root == NULL) {
-		printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
-				"of device tree\n");
-		return;
-	}
-	while ((node = of_get_next_child(root, node)) != NULL) {
-		HvBusNumber bus;
-		const u32 *busp;
-
-		if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
-			continue;
-
-		busp = get_property(node, "bus-range", NULL);
-		if (busp == NULL)
-			continue;
-		bus = *busp;
-		printk("bus %d appears to exist\n", bus);
-		phb = pcibios_alloc_controller(node);
-		if (phb == NULL)
-			continue;
-
-		phb->pci_mem_offset = phb->local_number = bus;
-		phb->first_busno = bus;
-		phb->last_busno = bus;
-		phb->ops = &iSeries_pci_ops;
-	}
-
-	of_node_put(root);
-
-	pci_devs_phb_init();
-}
-
-/*
  * iSeries_pci_final_fixup(void)
  */
 void __init iSeries_pci_final_fixup(void)
@@ -253,7 +206,7 @@
 			PCI_DN(node)->pcidev = pdev;
 			allocate_device_bars(pdev);
 			iSeries_Device_Information(pdev, DeviceCount);
-			iommu_devnode_init_iSeries(node);
+			iommu_devnode_init_iSeries(pdev, node);
 		} else
 			printk("PCI: Device Tree not found for 0x%016lX\n",
 					(unsigned long)pdev);
@@ -438,11 +391,7 @@
 /*
  * Read MM I/O Instructions for the iSeries
  * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
- * else, data is returned in big Endian format.
- *
- * iSeries_Read_Byte = Read Byte  ( 8 bit)
- * iSeries_Read_Word = Read Word  (16 bit)
- * iSeries_Read_Long = Read Long  (32 bit)
+ * else, data is returned in Big Endian format.
  */
 static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
 {
@@ -462,14 +411,15 @@
 			num_printed = 0;
 		}
 		if (num_printed++ < 10)
-			printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
+			printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n",
+			       IoAddress);
 		return 0xff;
 	}
 	do {
 		HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
 	} while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
 
-	return (u8)ret.value;
+	return ret.value;
 }
 
 static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
@@ -490,7 +440,8 @@
 			num_printed = 0;
 		}
 		if (num_printed++ < 10)
-			printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
+			printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n",
+			       IoAddress);
 		return 0xffff;
 	}
 	do {
@@ -498,7 +449,7 @@
 				BarOffset, 0);
 	} while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
 
-	return swab16((u16)ret.value);
+	return ret.value;
 }
 
 static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
@@ -519,7 +470,8 @@
 			num_printed = 0;
 		}
 		if (num_printed++ < 10)
-			printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
+			printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n",
+			       IoAddress);
 		return 0xffffffff;
 	}
 	do {
@@ -527,15 +479,12 @@
 				BarOffset, 0);
 	} while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
 
-	return swab32((u32)ret.value);
+	return ret.value;
 }
 
 /*
  * Write MM I/O Instructions for the iSeries
  *
- * iSeries_Write_Byte = Write Byte (8 bit)
- * iSeries_Write_Word = Write Word(16 bit)
- * iSeries_Write_Long = Write Long(32 bit)
  */
 static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
 {
@@ -581,11 +530,12 @@
 			num_printed = 0;
 		}
 		if (num_printed++ < 10)
-			printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
+			printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n",
+			       IoAddress);
 		return;
 	}
 	do {
-		rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
+		rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, data, 0);
 	} while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
 }
 
@@ -607,231 +557,221 @@
 			num_printed = 0;
 		}
 		if (num_printed++ < 10)
-			printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
+			printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n",
+			       IoAddress);
 		return;
 	}
 	do {
-		rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
+		rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, data, 0);
 	} while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
 }
 
-extern unsigned char __raw_readb(const volatile void __iomem *addr)
+static u8 iseries_readb(const volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	return *(volatile unsigned char __force *)addr;
+	return iSeries_Read_Byte(addr);
 }
-EXPORT_SYMBOL(__raw_readb);
 
-extern unsigned short __raw_readw(const volatile void __iomem *addr)
+static u16 iseries_readw(const volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	return *(volatile unsigned short __force *)addr;
+	return le16_to_cpu(iSeries_Read_Word(addr));
 }
-EXPORT_SYMBOL(__raw_readw);
 
-extern unsigned int __raw_readl(const volatile void __iomem *addr)
+static u32 iseries_readl(const volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	return *(volatile unsigned int __force *)addr;
+	return le32_to_cpu(iSeries_Read_Long(addr));
 }
-EXPORT_SYMBOL(__raw_readl);
 
-extern unsigned long __raw_readq(const volatile void __iomem *addr)
+static u16 iseries_readw_be(const volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	return *(volatile unsigned long __force *)addr;
+	return iSeries_Read_Word(addr);
 }
-EXPORT_SYMBOL(__raw_readq);
 
-extern void __raw_writeb(unsigned char v, volatile void __iomem *addr)
+static u32 iseries_readl_be(const volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	*(volatile unsigned char __force *)addr = v;
+	return iSeries_Read_Long(addr);
 }
-EXPORT_SYMBOL(__raw_writeb);
 
-extern void __raw_writew(unsigned short v, volatile void __iomem *addr)
+static void iseries_writeb(u8 data, volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	*(volatile unsigned short __force *)addr = v;
+	iSeries_Write_Byte(data, addr);
 }
-EXPORT_SYMBOL(__raw_writew);
 
-extern void __raw_writel(unsigned int v, volatile void __iomem *addr)
+static void iseries_writew(u16 data, volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	*(volatile unsigned int __force *)addr = v;
+	iSeries_Write_Word(cpu_to_le16(data), addr);
 }
-EXPORT_SYMBOL(__raw_writel);
 
-extern void __raw_writeq(unsigned long v, volatile void __iomem *addr)
+static void iseries_writel(u32 data, volatile void __iomem *addr)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	*(volatile unsigned long __force *)addr = v;
+	iSeries_Write_Long(cpu_to_le32(data), addr);
 }
-EXPORT_SYMBOL(__raw_writeq);
 
-int in_8(const volatile unsigned char __iomem *addr)
+static void iseries_writew_be(u16 data, volatile void __iomem *addr)
 {
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return iSeries_Read_Byte(addr);
-	return __in_8(addr);
+	iSeries_Write_Word(data, addr);
 }
-EXPORT_SYMBOL(in_8);
 
-void out_8(volatile unsigned char __iomem *addr, int val)
+static void iseries_writel_be(u32 data, volatile void __iomem *addr)
 {
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		iSeries_Write_Byte(val, addr);
-	else
-		__out_8(addr, val);
+	iSeries_Write_Long(data, addr);
 }
-EXPORT_SYMBOL(out_8);
 
-int in_le16(const volatile unsigned short __iomem *addr)
+static void iseries_readsb(const volatile void __iomem *addr, void *buf,
+			   unsigned long count)
 {
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return iSeries_Read_Word(addr);
-	return __in_le16(addr);
+	u8 *dst = buf;
+	while(count-- > 0)
+		*(dst++) = iSeries_Read_Byte(addr);
 }
-EXPORT_SYMBOL(in_le16);
 
-int in_be16(const volatile unsigned short __iomem *addr)
+static void iseries_readsw(const volatile void __iomem *addr, void *buf,
+			   unsigned long count)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	return __in_be16(addr);
+	u16 *dst = buf;
+	while(count-- > 0)
+		*(dst++) = iSeries_Read_Word(addr);
 }
-EXPORT_SYMBOL(in_be16);
 
-void out_le16(volatile unsigned short __iomem *addr, int val)
+static void iseries_readsl(const volatile void __iomem *addr, void *buf,
+			   unsigned long count)
 {
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		iSeries_Write_Word(val, addr);
-	else
-		__out_le16(addr, val);
+	u32 *dst = buf;
+	while(count-- > 0)
+		*(dst++) = iSeries_Read_Long(addr);
 }
-EXPORT_SYMBOL(out_le16);
 
-void out_be16(volatile unsigned short __iomem *addr, int val)
+static void iseries_writesb(volatile void __iomem *addr, const void *buf,
+			    unsigned long count)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	__out_be16(addr, val);
+	const u8 *src = buf;
+	while(count-- > 0)
+		iSeries_Write_Byte(*(src++), addr);
 }
-EXPORT_SYMBOL(out_be16);
 
-unsigned in_le32(const volatile unsigned __iomem *addr)
+static void iseries_writesw(volatile void __iomem *addr, const void *buf,
+			    unsigned long count)
 {
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		return iSeries_Read_Long(addr);
-	return __in_le32(addr);
+	const u16 *src = buf;
+	while(count-- > 0)
+		iSeries_Write_Word(*(src++), addr);
 }
-EXPORT_SYMBOL(in_le32);
 
-unsigned in_be32(const volatile unsigned __iomem *addr)
+static void iseries_writesl(volatile void __iomem *addr, const void *buf,
+			    unsigned long count)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	return __in_be32(addr);
+	const u32 *src = buf;
+	while(count-- > 0)
+		iSeries_Write_Long(*(src++), addr);
 }
-EXPORT_SYMBOL(in_be32);
 
-void out_le32(volatile unsigned __iomem *addr, int val)
+static void iseries_memset_io(volatile void __iomem *addr, int c,
+			      unsigned long n)
 {
-	if (firmware_has_feature(FW_FEATURE_ISERIES))
-		iSeries_Write_Long(val, addr);
-	else
-		__out_le32(addr, val);
-}
-EXPORT_SYMBOL(out_le32);
+	volatile char __iomem *d = addr;
 
-void out_be32(volatile unsigned __iomem *addr, int val)
+	while (n-- > 0)
+		iSeries_Write_Byte(c, d++);
+}
+
+static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
+				  unsigned long n)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+	char *d = dest;
+	const volatile char __iomem *s = src;
 
-	__out_be32(addr, val);
+	while (n-- > 0)
+		*d++ = iSeries_Read_Byte(s++);
 }
-EXPORT_SYMBOL(out_be32);
 
-unsigned long in_le64(const volatile unsigned long __iomem *addr)
+static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
+				unsigned long n)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+	const char *s = src;
+	volatile char __iomem *d = dest;
 
-	return __in_le64(addr);
+	while (n-- > 0)
+		iSeries_Write_Byte(*s++, d++);
 }
-EXPORT_SYMBOL(in_le64);
 
-unsigned long in_be64(const volatile unsigned long __iomem *addr)
+/* We only set MMIO ops. The default PIO ops will be default
+ * to the MMIO ops + pci_io_base which is 0 on iSeries as
+ * expected so both should work.
+ *
+ * Note that we don't implement the readq/writeq versions as
+ * I don't know of an HV call for doing so. Thus, the default
+ * operation will be used instead, which will fault a the value
+ * return by iSeries for MMIO addresses always hits a non mapped
+ * area. This is as good as the BUG() we used to have there.
+ */
+static struct ppc_pci_io __initdata iseries_pci_io = {
+	.readb = iseries_readb,
+	.readw = iseries_readw,
+	.readl = iseries_readl,
+	.readw_be = iseries_readw_be,
+	.readl_be = iseries_readl_be,
+	.writeb = iseries_writeb,
+	.writew = iseries_writew,
+	.writel = iseries_writel,
+	.writew_be = iseries_writew_be,
+	.writel_be = iseries_writel_be,
+	.readsb = iseries_readsb,
+	.readsw = iseries_readsw,
+	.readsl = iseries_readsl,
+	.writesb = iseries_writesb,
+	.writesw = iseries_writesw,
+	.writesl = iseries_writesl,
+	.memset_io = iseries_memset_io,
+	.memcpy_fromio = iseries_memcpy_fromio,
+	.memcpy_toio = iseries_memcpy_toio,
+};
+
+/*
+ * iSeries_pcibios_init
+ *
+ * Description:
+ *   This function checks for all possible system PCI host bridges that connect
+ *   PCI buses.  The system hypervisor is queried as to the guest partition
+ *   ownership status.  A pci_controller is built for any bus which is partially
+ *   owned or fully owned by this guest partition.
+ */
+void __init iSeries_pcibios_init(void)
 {
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+	struct pci_controller *phb;
+	struct device_node *root = of_find_node_by_path("/");
+	struct device_node *node = NULL;
 
-	return __in_be64(addr);
+	/* Install IO hooks */
+	ppc_pci_io = iseries_pci_io;
+
+	if (root == NULL) {
+		printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
+				"of device tree\n");
+		return;
+	}
+	while ((node = of_get_next_child(root, node)) != NULL) {
+		HvBusNumber bus;
+		const u32 *busp;
+
+		if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
+			continue;
+
+		busp = get_property(node, "bus-range", NULL);
+		if (busp == NULL)
+			continue;
+		bus = *busp;
+		printk("bus %d appears to exist\n", bus);
+		phb = pcibios_alloc_controller(node);
+		if (phb == NULL)
+			continue;
+
+		phb->pci_mem_offset = phb->local_number = bus;
+		phb->first_busno = bus;
+		phb->last_busno = bus;
+		phb->ops = &iSeries_pci_ops;
+	}
+
+	of_node_put(root);
+
+	pci_devs_phb_init();
 }
-EXPORT_SYMBOL(in_be64);
 
-void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
-{
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	__out_le64(addr, val);
-}
-EXPORT_SYMBOL(out_le64);
-
-void out_be64(volatile unsigned long __iomem *addr, unsigned long val)
-{
-	BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-	__out_be64(addr, val);
-}
-EXPORT_SYMBOL(out_be64);
-
-void memset_io(volatile void __iomem *addr, int c, unsigned long n)
-{
-	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
-		volatile char __iomem *d = addr;
-
-		while (n-- > 0) {
-			iSeries_Write_Byte(c, d++);
-		}
-	} else
-		eeh_memset_io(addr, c, n);
-}
-EXPORT_SYMBOL(memset_io);
-
-void memcpy_fromio(void *dest, const volatile void __iomem *src,
-                                 unsigned long n)
-{
-	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
-		char *d = dest;
-		const volatile char __iomem *s = src;
-
-		while (n-- > 0) {
-			*d++ = iSeries_Read_Byte(s++);
-		}
-	} else
-		eeh_memcpy_fromio(dest, src, n);
-}
-EXPORT_SYMBOL(memcpy_fromio);
-
-void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
-{
-	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
-		const char *s = src;
-		volatile char __iomem *d = dest;
-
-		while (n-- > 0) {
-			iSeries_Write_Byte(*s++, d++);
-		}
-	} else
-		eeh_memcpy_toio(dest, src, n);
-}
-EXPORT_SYMBOL(memcpy_toio);
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 6f73469..bdf2afb 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -21,7 +21,6 @@
 #include <linux/smp.h>
 #include <linux/param.h>
 #include <linux/string.h>
-#include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/kdev_t.h>
 #include <linux/major.h>
@@ -80,8 +79,6 @@
 static void iSeries_pci_final_fixup(void) { }
 #endif
 
-extern int rd_size;		/* Defined in drivers/block/rd.c */
-
 extern unsigned long iSeries_recal_tb;
 extern unsigned long iSeries_recal_titan;
 
@@ -295,24 +292,6 @@
 {
 	DBG(" -> iSeries_init_early()\n");
 
-#if defined(CONFIG_BLK_DEV_INITRD)
-	/*
-	 * If the init RAM disk has been configured and there is
-	 * a non-zero starting address for it, set it up
-	 */
-	if (naca.xRamDisk) {
-		initrd_start = (unsigned long)__va(naca.xRamDisk);
-		initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE;
-		initrd_below_start_ok = 1;	// ramdisk in kernel space
-		ROOT_DEV = Root_RAM0;
-		if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize)
-			rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024;
-	} else
-#endif /* CONFIG_BLK_DEV_INITRD */
-	{
-	    /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
-	}
-
 	iSeries_recal_tb = get_tb();
 	iSeries_recal_titan = HvCallXm_loadTod();
 
@@ -331,17 +310,6 @@
 
 	mf_init();
 
-	/* If we were passed an initrd, set the ROOT_DEV properly if the values
-	 * look sensible. If not, clear initrd reference.
-	 */
-#ifdef CONFIG_BLK_DEV_INITRD
-	if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
-	    initrd_end > initrd_start)
-		ROOT_DEV = Root_RAM0;
-	else
-		initrd_start = initrd_end = 0;
-#endif /* CONFIG_BLK_DEV_INITRD */
-
 	DBG(" <- iSeries_init_early()\n");
 }
 
@@ -649,6 +617,16 @@
 void __init iSeries_init_IRQ(void) { }
 #endif
 
+static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
+				     unsigned long flags)
+{
+	return (void __iomem *)address;
+}
+
+static void iseries_iounmap(volatile void __iomem *token)
+{
+}
+
 /*
  * iSeries has no legacy IO, anything calling this function has to
  * fail or bad things will happen
@@ -665,6 +643,8 @@
 		return 0;
 
 	hpte_init_iSeries();
+	/* iSeries does not support 16M pages */
+	cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE;
 
 	return 1;
 }
@@ -687,6 +667,8 @@
 	.progress	= iSeries_progress,
 	.probe		= iseries_probe,
 	.check_legacy_ioport	= iseries_check_legacy_ioport,
+	.ioremap	= iseries_ioremap,
+	.iounmap	= iseries_iounmap,
 	/* XXX Implement enable_pmcs for iSeries */
 };
 
@@ -697,7 +679,7 @@
 	/* Identify CPU type. This is done again by the common code later
 	 * on but calling this function multiple times is fine.
 	 */
-	identify_cpu(0);
+	identify_cpu(0, mfspr(SPRN_PVR));
 
 	powerpc_firmware_features |= FW_FEATURE_ISERIES;
 	powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 04e07e5..84e7ee2 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -119,10 +119,9 @@
 	struct device_node *node;
 	const char *sysid;
 
-	buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL);
+	buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
 	if (!buf)
 		return 0;
-	memset(buf, 0, HW_PAGE_SIZE);
 
 	handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
 				DMA_FROM_DEVICE);
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index 0657c57..c6911dd 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -8,5 +8,5 @@
 extern unsigned long maple_get_boot_time(void);
 extern void maple_calibrate_decr(void);
 extern void maple_pci_init(void);
-extern void maple_pcibios_fixup(void);
+extern void maple_pci_irq_fixup(struct pci_dev *dev);
 extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 63b4d1b..3a32ded 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -502,38 +502,29 @@
 }
 
 
-void __init maple_pcibios_fixup(void)
+void __devinit maple_pci_irq_fixup(struct pci_dev *dev)
 {
-	struct pci_dev *dev = NULL;
+	DBG(" -> maple_pci_irq_fixup\n");
 
-	DBG(" -> maple_pcibios_fixup\n");
-
-	for_each_pci_dev(dev) {
-		/* Fixup IRQ for PCIe host */
-		if (u4_pcie != NULL && dev->bus->number == 0 &&
-		    pci_bus_to_host(dev->bus) == u4_pcie) {
-			printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
-			dev->irq = irq_create_mapping(NULL, 1);
-			if (dev->irq != NO_IRQ)
-				set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
-			continue;
-		}
-
-		/* Hide AMD8111 IDE interrupt when in legacy mode so
-		 * the driver calls pci_get_legacy_ide_irq()
-		 */
-		if (dev->vendor == PCI_VENDOR_ID_AMD &&
-		    dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
-		    (dev->class & 5) != 5) {
-			dev->irq = NO_IRQ;
-			continue;
-		}
-
-		/* For all others, map the interrupt from the device-tree */
-		pci_read_irq_line(dev);
+	/* Fixup IRQ for PCIe host */
+	if (u4_pcie != NULL && dev->bus->number == 0 &&
+	    pci_bus_to_host(dev->bus) == u4_pcie) {
+		printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
+		dev->irq = irq_create_mapping(NULL, 1);
+		if (dev->irq != NO_IRQ)
+			set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
 	}
 
-	DBG(" <- maple_pcibios_fixup\n");
+	/* Hide AMD8111 IDE interrupt when in legacy mode so
+	 * the driver calls pci_get_legacy_ide_irq()
+	 */
+	if (dev->vendor == PCI_VENDOR_ID_AMD &&
+	    dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
+	    (dev->class & 5) != 5) {
+		dev->irq = NO_IRQ;
+	}
+
+	DBG(" <- maple_pci_irq_fixup\n");
 }
 
 static void __init maple_fixup_phb_resources(void)
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index fe6b9bf..094989d 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -312,7 +312,7 @@
 	.setup_arch		= maple_setup_arch,
 	.init_early		= maple_init_early,
 	.init_IRQ		= maple_init_IRQ,
-	.pcibios_fixup		= maple_pcibios_fixup,
+	.pci_irq_fixup		= maple_pci_irq_fixup,
 	.pci_get_legacy_ide_irq	= maple_pci_get_legacy_ide_irq,
 	.restart		= maple_restart,
 	.power_off		= maple_power_off,
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index fd71d72..51c2a23 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -3,6 +3,5 @@
 
 extern unsigned long pas_get_boot_time(void);
 extern void pas_pci_init(void);
-extern void pas_pcibios_fixup(void);
 
 #endif /* _PASEMI_PASEMI_H */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index 39020c1..faa618e 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -148,14 +148,6 @@
 }
 
 
-void __init pas_pcibios_fixup(void)
-{
-	struct pci_dev *dev = NULL;
-
-	for_each_pci_dev(dev)
-		pci_read_irq_line(dev);
-}
-
 static void __init pas_fixup_phb_resources(void)
 {
 	struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 106896c..89d6e29 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -26,6 +26,7 @@
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/console.h>
+#include <linux/pci.h>
 
 #include <asm/prom.h>
 #include <asm/system.h>
@@ -71,6 +72,9 @@
 	/* Setup SMP callback */
 	smp_ops = &pas_smp_ops;
 #endif
+	/* no iommu yet */
+	pci_dma_ops = &dma_direct_ops;
+
 	/* Lookup PCI hosts */
 	pas_pci_init();
 
@@ -81,17 +85,6 @@
 	printk(KERN_DEBUG "Using default idle loop\n");
 }
 
-static void iommu_dev_setup_null(struct pci_dev *dev) { }
-static void iommu_bus_setup_null(struct pci_bus *bus) { }
-
-static void __init pas_init_early(void)
-{
-	/* No iommu code yet */
-	ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-	ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-	pci_direct_iommu_init();
-}
-
 /* No legacy IO on our parts */
 static int pas_check_legacy_ioport(unsigned int baseport)
 {
@@ -173,10 +166,8 @@
 	.name			= "PA Semi PA6T-1682M",
 	.probe			= pas_probe,
 	.setup_arch		= pas_setup_arch,
-	.init_early		= pas_init_early,
 	.init_IRQ		= pas_init_IRQ,
 	.get_irq		= mpic_get_irq,
-	.pcibios_fixup		= pas_pcibios_fixup,
 	.restart		= pas_restart,
 	.power_off		= pas_power_off,
 	.halt			= pas_halt,
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index afa593a..c3a8941 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -18,11 +18,11 @@
 
 #define OLD_BACKLIGHT_MAX 15
 
-static void pmac_backlight_key_worker(void *data);
-static void pmac_backlight_set_legacy_worker(void *data);
+static void pmac_backlight_key_worker(struct work_struct *work);
+static void pmac_backlight_set_legacy_worker(struct work_struct *work);
 
-static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
-static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
+static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
+static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
 
 /* Although these variables are used in interrupt context, it makes no sense to
  * protect them. No user is able to produce enough key events per second and
@@ -94,7 +94,7 @@
 	return level;
 }
 
-static void pmac_backlight_key_worker(void *data)
+static void pmac_backlight_key_worker(struct work_struct *work)
 {
 	if (atomic_read(&kernel_backlight_disabled))
 		return;
@@ -166,7 +166,7 @@
 	return error;
 }
 
-static void pmac_backlight_set_legacy_worker(void *data)
+static void pmac_backlight_set_legacy_worker(struct work_struct *work)
 {
 	if (atomic_read(&kernel_backlight_disabled))
 		return;
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index e49621b..c29a6a0 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -486,10 +486,6 @@
 
 static u32 save_fcr[6];
 static u32 save_mbcr;
-static u32 save_gpio_levels[2];
-static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
-static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
-static u32 save_unin_clock_ctl;
 static struct dbdma_regs save_dbdma[13];
 static struct dbdma_regs save_alt_dbdma[13];
 
@@ -1548,6 +1544,10 @@
 
 
 #ifdef CONFIG_PM
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
 
 static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
 {
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 9923adc..f42475b 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -48,7 +48,6 @@
 static int has_second_ohare;
 #endif /* CONFIG_PPC64 */
 
-extern u8 pci_cache_line_size;
 extern int pcibios_assign_bus_offset;
 
 struct device_node *k2_skiplist[2];
@@ -985,30 +984,23 @@
 	return 0;
 }
 
-void __init pmac_pcibios_fixup(void)
+void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
 {
-	struct pci_dev* dev = NULL;
-
-	for_each_pci_dev(dev) {
-		/* Read interrupt from the device-tree */
-		pci_read_irq_line(dev);
-
 #ifdef CONFIG_PPC32
-		/* Fixup interrupt for the modem/ethernet combo controller.
-		 * on machines with a second ohare chip.
-		 * The number in the device tree (27) is bogus (correct for
-		 * the ethernet-only board but not the combo ethernet/modem
-		 * board). The real interrupt is 28 on the second controller
-		 * -> 28+32 = 60.
-		 */
-		if (has_second_ohare &&
-		    dev->vendor == PCI_VENDOR_ID_DEC &&
-		    dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
-			dev->irq = irq_create_mapping(NULL, 60);
-			set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
-		}
-#endif /* CONFIG_PPC32 */
+	/* Fixup interrupt for the modem/ethernet combo controller.
+	 * on machines with a second ohare chip.
+	 * The number in the device tree (27) is bogus (correct for
+	 * the ethernet-only board but not the combo ethernet/modem
+	 * board). The real interrupt is 28 on the second controller
+	 * -> 28+32 = 60.
+	 */
+	if (has_second_ohare &&
+	    dev->vendor == PCI_VENDOR_ID_DEC &&
+	    dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
+		dev->irq = irq_create_mapping(NULL, 60);
+		set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
 	}
+#endif /* CONFIG_PPC32 */
 }
 
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 94e7b24..6e090a7 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -20,7 +20,7 @@
 extern int pmac_set_rtc_time(struct rtc_time *);
 extern void pmac_read_rtc_time(void);
 extern void pmac_calibrate_decr(void);
-extern void pmac_pcibios_fixup(void);
+extern void pmac_pci_irq_fixup(struct pci_dev *);
 extern void pmac_pci_init(void);
 extern unsigned long pmac_ide_get_base(int index);
 extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 824a618..d949e9df 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -70,6 +70,7 @@
 #include <asm/pmac_feature.h>
 #include <asm/time.h>
 #include <asm/of_device.h>
+#include <asm/of_platform.h>
 #include <asm/mmu_context.h>
 #include <asm/iommu.h>
 #include <asm/smu.h>
@@ -361,7 +362,7 @@
 void *boot_host;
 int boot_target;
 int boot_part;
-extern dev_t boot_dev;
+static dev_t boot_dev;
 
 #ifdef CONFIG_SCSI
 void __init note_scsi_host(struct device_node *node, void *host)
@@ -676,8 +677,6 @@
 
 #ifdef CONFIG_PPC32
 	/* isa_io_base gets set in pmac_pci_init */
-	isa_mem_base = PMAC_ISA_MEM_BASE;
-	pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
 	ISA_DMA_THRESHOLD = ~0L;
 	DMA_MODE_READ = 1;
 	DMA_MODE_WRITE = 2;
@@ -727,7 +726,7 @@
 	.show_cpuinfo		= pmac_show_cpuinfo,
 	.init_IRQ		= pmac_pic_init,
 	.get_irq		= NULL,	/* changed later */
-	.pcibios_fixup		= pmac_pcibios_fixup,
+	.pci_irq_fixup		= pmac_pci_irq_fixup,
 	.restart		= pmac_restart,
 	.power_off		= pmac_power_off,
 	.halt			= pmac_halt,
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
new file mode 100644
index 0000000..451bfcd
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -0,0 +1,43 @@
+menu "PS3 Platform Options"
+	depends on PPC_PS3
+
+config PS3_HTAB_SIZE
+	depends on PPC_PS3
+	int "PS3 Platform pagetable size"
+	range 18 20
+	default 20
+	help
+	  This option is only for experts who may have the desire to fine
+	  tune the pagetable size on their system.  The value here is
+	  expressed as the log2 of the page table size.  Valid values are
+	  18, 19, and 20, corresponding to 256KB, 512KB and 1MB respectively.
+
+	  If unsure, choose the default (20) with the confidence that your
+	  system will have optimal runtime performance.
+
+config PS3_DYNAMIC_DMA
+	depends on PPC_PS3 && EXPERIMENTAL
+	bool "PS3 Platform dynamic DMA page table management"
+	default n
+	help
+	  This option will enable kernel support to take advantage of the
+	  per device dynamic DMA page table management provided by the Cell
+	  processor's IO Controller.  This support incurs some runtime
+	  overhead and also slightly increases kernel memory usage.  The
+	  current implementation should be considered experimental.
+
+	  This support is mainly for Linux kernel development.  If unsure,
+	  say N.
+
+config PS3_USE_LPAR_ADDR
+	depends on PPC_PS3 && EXPERIMENTAL
+	bool "PS3 use lpar address space"
+	default y
+	help
+	  This option is solely for experimentation by experts.  Disables
+	  translation of lpar addresses.  SPE support currently won't work
+	  without this set to y.
+
+	  If you have any doubt, choose the default y.
+
+endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
new file mode 100644
index 0000000..3757cfa
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/Makefile
@@ -0,0 +1,4 @@
+obj-y += setup.o mm.o smp.o time.o hvcall.o htab.o repository.o
+obj-y += interrupt.o exports.o os-area.o
+
+obj-$(CONFIG_SPU_BASE) += spu.o
diff --git a/arch/powerpc/platforms/ps3/exports.c b/arch/powerpc/platforms/ps3/exports.c
new file mode 100644
index 0000000..a7e8ffd
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/exports.c
@@ -0,0 +1,27 @@
+/*
+ *  PS3 hvcall exports for modules.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+
+#define LV1_CALL(name, in, out, num)                          \
+  extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL); \
+  EXPORT_SYMBOL(_lv1_##name);
+
+#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
new file mode 100644
index 0000000..8fe1769
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -0,0 +1,277 @@
+/*
+ *  PS3 pagetable management routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/machdep.h>
+#include <asm/lmb.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static hpte_t *htab;
+static unsigned long htab_addr;
+static unsigned char *bolttab;
+static unsigned char *inusetab;
+
+static spinlock_t ps3_bolttab_lock = SPIN_LOCK_UNLOCKED;
+
+#define debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g) \
+	_debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
+static void _debug_dump_hpte(unsigned long pa, unsigned long va,
+	unsigned long group, unsigned long bitmap, hpte_t lhpte, int psize,
+	unsigned long slot, const char* func, int line)
+{
+	DBG("%s:%d: pa     = %lxh\n", func, line, pa);
+	DBG("%s:%d: lpar   = %lxh\n", func, line,
+		ps3_mm_phys_to_lpar(pa));
+	DBG("%s:%d: va     = %lxh\n", func, line, va);
+	DBG("%s:%d: group  = %lxh\n", func, line, group);
+	DBG("%s:%d: bitmap = %lxh\n", func, line, bitmap);
+	DBG("%s:%d: hpte.v = %lxh\n", func, line, lhpte.v);
+	DBG("%s:%d: hpte.r = %lxh\n", func, line, lhpte.r);
+	DBG("%s:%d: psize  = %xh\n", func, line, psize);
+	DBG("%s:%d: slot   = %lxh\n", func, line, slot);
+}
+
+static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
+	unsigned long pa, unsigned long rflags, unsigned long vflags, int psize)
+{
+	unsigned long slot;
+	hpte_t lhpte;
+	int secondary = 0;
+	unsigned long result;
+	unsigned long bitmap;
+	unsigned long flags;
+	unsigned long p_pteg, s_pteg, b_index, b_mask, cb, ci;
+
+	vflags &= ~HPTE_V_SECONDARY; /* this bit is ignored */
+
+	lhpte.v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
+	lhpte.r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
+
+	p_pteg = hpte_group / HPTES_PER_GROUP;
+	s_pteg = ~p_pteg & htab_hash_mask;
+
+	spin_lock_irqsave(&ps3_bolttab_lock, flags);
+
+	BUG_ON(bolttab[p_pteg] == 0xff && bolttab[s_pteg] == 0xff);
+
+	bitmap = (inusetab[p_pteg] << 8) | inusetab[s_pteg];
+
+	if (bitmap == 0xffff) {
+		/*
+		 * PTEG is full. Search for victim.
+		 */
+		bitmap &= ~((bolttab[p_pteg] << 8) | bolttab[s_pteg]);
+		do {
+			ci = mftb() & 15;
+			cb = 0x8000UL >> ci;
+		} while ((cb & bitmap) == 0);
+	} else {
+		/*
+		 * search free slot in hardware order
+		 *	[primary]	0, 2, 4, 6, 1, 3, 5, 7
+		 *	[secondary]	0, 2, 4, 6, 1, 3, 5, 7
+		 */
+		for (ci = 0; ci < HPTES_PER_GROUP; ci += 2) {
+			cb = 0x8000UL >> ci;
+			if ((cb & bitmap) == 0)
+				goto found;
+		}
+		for (ci = 1; ci < HPTES_PER_GROUP; ci += 2) {
+			cb = 0x8000UL >> ci;
+			if ((cb & bitmap) == 0)
+				goto found;
+		}
+		for (ci = HPTES_PER_GROUP; ci < HPTES_PER_GROUP*2; ci += 2) {
+			cb = 0x8000UL >> ci;
+			if ((cb & bitmap) == 0)
+				goto found;
+		}
+		for (ci = HPTES_PER_GROUP+1; ci < HPTES_PER_GROUP*2; ci += 2) {
+			cb = 0x8000UL >> ci;
+			if ((cb & bitmap) == 0)
+				goto found;
+		}
+	}
+
+found:
+	if (ci < HPTES_PER_GROUP) {
+		slot = p_pteg * HPTES_PER_GROUP + ci;
+	} else {
+		slot = s_pteg * HPTES_PER_GROUP + (ci & 7);
+		/* lhpte.dw0.dw0.h = 1; */
+		vflags |= HPTE_V_SECONDARY;
+		lhpte.v |= HPTE_V_SECONDARY;
+	}
+
+	result = lv1_write_htab_entry(0, slot, lhpte.v, lhpte.r);
+
+	if (result) {
+		debug_dump_hpte(pa, va, hpte_group, bitmap, lhpte, psize, slot);
+		BUG();
+	}
+
+	/*
+	 * If used slot is not in primary HPTE group,
+	 * the slot should be in secondary HPTE group.
+	 */
+
+	if ((hpte_group ^ slot) & ~(HPTES_PER_GROUP - 1)) {
+		secondary = 1;
+		b_index = s_pteg;
+	} else {
+		secondary = 0;
+		b_index = p_pteg;
+	}
+
+	b_mask = (lhpte.v & HPTE_V_BOLTED) ? 1 << 7 : 0 << 7;
+	bolttab[b_index] |= b_mask >> (slot & 7);
+	b_mask = 1 << 7;
+	inusetab[b_index] |= b_mask >> (slot & 7);
+	spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+	return (slot & 7) | (secondary << 3);
+}
+
+static long ps3_hpte_remove(unsigned long hpte_group)
+{
+	panic("ps3_hpte_remove() not implemented");
+	return 0;
+}
+
+static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
+	unsigned long va, int psize, int local)
+{
+	unsigned long flags;
+	unsigned long result;
+	unsigned long pteg, bit;
+	unsigned long hpte_v, want_v;
+
+	want_v = hpte_encode_v(va, psize);
+
+	spin_lock_irqsave(&ps3_bolttab_lock, flags);
+
+	hpte_v = htab[slot].v;
+	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
+		spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+		/* ps3_hpte_insert() will be used to update PTE */
+		return -1;
+	}
+
+	result = lv1_write_htab_entry(0, slot, 0, 0);
+
+	if (result) {
+		DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
+		       __func__, va, slot, psize, result, result);
+		BUG();
+	}
+
+	pteg = slot / HPTES_PER_GROUP;
+	bit = slot % HPTES_PER_GROUP;
+	inusetab[pteg] &= ~(0x80 >> bit);
+
+	spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+	/* ps3_hpte_insert() will be used to update PTE */
+	return -1;
+}
+
+static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
+	int psize)
+{
+	panic("ps3_hpte_updateboltedpp() not implemented");
+}
+
+static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
+	int psize, int local)
+{
+	unsigned long flags;
+	unsigned long result;
+	unsigned long pteg, bit;
+
+	spin_lock_irqsave(&ps3_bolttab_lock, flags);
+	result = lv1_write_htab_entry(0, slot, 0, 0);
+
+	if (result) {
+		DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
+		       __func__, va, slot, psize, result, result);
+		BUG();
+	}
+
+	pteg = slot / HPTES_PER_GROUP;
+	bit = slot % HPTES_PER_GROUP;
+	inusetab[pteg] &= ~(0x80 >> bit);
+	spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+}
+
+static void ps3_hpte_clear(void)
+{
+	lv1_unmap_htab(htab_addr);
+}
+
+void __init ps3_hpte_init(unsigned long htab_size)
+{
+	long bitmap_size;
+
+	DBG(" -> %s:%d\n", __func__, __LINE__);
+
+	ppc_md.hpte_invalidate = ps3_hpte_invalidate;
+	ppc_md.hpte_updatepp = ps3_hpte_updatepp;
+	ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
+	ppc_md.hpte_insert = ps3_hpte_insert;
+	ppc_md.hpte_remove = ps3_hpte_remove;
+	ppc_md.hpte_clear_all = ps3_hpte_clear;
+
+	ppc64_pft_size = __ilog2(htab_size);
+
+	bitmap_size = htab_size / sizeof(hpte_t) / 8;
+
+	bolttab = __va(lmb_alloc(bitmap_size, 1));
+	inusetab = __va(lmb_alloc(bitmap_size, 1));
+
+	memset(bolttab, 0, bitmap_size);
+	memset(inusetab, 0, bitmap_size);
+
+	DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+void __init ps3_map_htab(void)
+{
+	long result;
+	unsigned long htab_size = (1UL << ppc64_pft_size);
+
+	result = lv1_map_htab(0, &htab_addr);
+
+	htab = (hpte_t *)__ioremap(htab_addr, htab_size, PAGE_READONLY_X);
+
+	DBG("%s:%d: lpar %016lxh, virt %016lxh\n", __func__, __LINE__,
+		htab_addr, (unsigned long)htab);
+}
diff --git a/arch/powerpc/platforms/ps3/hvcall.S b/arch/powerpc/platforms/ps3/hvcall.S
new file mode 100644
index 0000000..54be652
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/hvcall.S
@@ -0,0 +1,804 @@
+/*
+ *  PS3 hvcall interface.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *  Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+#define lv1call .long 0x44000022; extsw r3, r3
+
+#define LV1_N_IN_0_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_0_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_1_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_2_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_3_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_4_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_5_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_6_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_7_IN_0_OUT LV1_N_IN_0_OUT
+
+#define LV1_0_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu    r3, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_0_IN_2_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r3, -8(r1);			\
+	stdu	r4, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_0_IN_3_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r3, -8(r1);			\
+	std	r4, -16(r1);			\
+	stdu	r5, -24(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 24;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_0_IN_7_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r3, -8(r1);			\
+	std	r4, -16(r1);			\
+	std	r5, -24(r1);			\
+	std	r6, -32(r1);			\
+	std	r7, -40(r1);			\
+	std	r8, -48(r1);			\
+	stdu	r9, -56(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 56;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+	ld	r11, -32(r1);			\
+	std	r7, 0(r11);			\
+	ld	r11, -40(r1);			\
+	std	r8, 0(r11);			\
+	ld	r11, -48(r1);			\
+	std	r9, 0(r11);			\
+	ld	r11, -56(r1);			\
+	std	r10, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_1_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu    r4, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_1_IN_2_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r4, -8(r1);			\
+	stdu	r5, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_1_IN_3_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r4, -8(r1);			\
+	std	r5, -16(r1);			\
+	stdu	r6, -24(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 24;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_1_IN_4_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r4, -8(r1);			\
+	std	r5, -16(r1);			\
+	std	r6, -24(r1);			\
+	stdu	r7, -32(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 32;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+	ld	r11, -32(r1);			\
+	std	r7, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_1_IN_5_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r4, -8(r1);			\
+	std	r5, -16(r1);			\
+	std	r6, -24(r1);			\
+	std	r7, -32(r1);			\
+	stdu	r8, -40(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 40;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+	ld	r11, -32(r1);			\
+	std	r7, 0(r11);			\
+	ld	r11, -40(r1);			\
+	std	r8, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_1_IN_6_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r4, -8(r1);			\
+	std	r5, -16(r1);			\
+	std	r6, -24(r1);			\
+	std	r7, -32(r1);			\
+	std	r8, -40(r1);			\
+	stdu	r9, -48(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 48;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+	ld	r11, -32(r1);			\
+	std	r7, 0(r11);			\
+	ld	r11, -40(r1);			\
+	std	r8, 0(r11);			\
+	ld	r11, -48(r1);			\
+	std	r9, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_1_IN_7_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r4, -8(r1);			\
+	std	r5, -16(r1);			\
+	std	r6, -24(r1);			\
+	std	r7, -32(r1);			\
+	std	r8, -40(r1);			\
+	std	r9, -48(r1);			\
+	stdu	r10, -56(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 56;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+	ld	r11, -32(r1);			\
+	std	r7, 0(r11);			\
+	ld	r11, -40(r1);			\
+	std	r8, 0(r11);			\
+	ld	r11, -48(r1);			\
+	std	r9, 0(r11);			\
+	ld	r11, -56(r1);			\
+	std	r10, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_2_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu	r5, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_2_IN_2_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r5, -8(r1);			\
+	stdu	r6, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_2_IN_3_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r5, -8(r1);			\
+	std	r6, -16(r1);			\
+	stdu	r7, -24(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 24;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_2_IN_4_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r5, -8(r1);			\
+	std	r6, -16(r1);			\
+	std	r7, -24(r1);			\
+	stdu	r8, -32(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 32;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+	ld	r11, -32(r1);			\
+	std	r7, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_2_IN_5_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r5, -8(r1);			\
+	std	r6, -16(r1);			\
+	std	r7, -24(r1);			\
+	std	r8, -32(r1);			\
+	stdu	r9, -40(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 40;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+	ld	r11, -32(r1);			\
+	std	r7, 0(r11);			\
+	ld	r11, -40(r1);			\
+	std	r8, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_3_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu	r6, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_3_IN_2_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r6, -8(r1);			\
+	stdu	r7, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_3_IN_3_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r6, -8(r1);			\
+	std	r7, -16(r1);			\
+	stdu	r8, -24(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 24;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_4_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu    r7, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_4_IN_2_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r7, -8(r1);			\
+	stdu	r8, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_4_IN_3_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r7, -8(r1);			\
+	std	r8, -16(r1);			\
+	stdu	r9, -24(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 24;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_5_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu    r8, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_5_IN_2_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r8, -8(r1);			\
+	stdu	r9, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_5_IN_3_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r8, -8(r1);			\
+	std	r9, -16(r1);			\
+	stdu	r10, -24(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 24;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, -24(r1);			\
+	std	r6, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_6_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu    r9, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_6_IN_2_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r9, -8(r1);			\
+	stdu    r10, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_6_IN_3_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std     r9, -8(r1);			\
+	stdu    r10, -16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 16;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+	ld	r11, -16(r1);			\
+	std	r5, 0(r11);			\
+	ld	r11, 48+8*8(r1);		\
+	std	r6, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_7_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	stdu    r10, -8(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	addi	r1, r1, 8;			\
+	ld	r11, -8(r1);			\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_7_IN_6_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	std	r10, 48+8*7(r1);		\
+						\
+	li	r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	ld	r11, 48+8*7(r1);		\
+	std	r4, 0(r11);			\
+	ld	r11, 48+8*8(r1);		\
+	std	r5, 0(r11);			\
+	ld	r11, 48+8*9(r1);		\
+	std	r6, 0(r11);			\
+	ld	r11, 48+8*10(r1);		\
+	std	r7, 0(r11);			\
+	ld	r11, 48+8*11(r1);		\
+	std	r8, 0(r11);			\
+	ld	r11, 48+8*12(r1);		\
+	std	r9, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+#define LV1_8_IN_1_OUT(API_NAME, API_NUMBER)	\
+_GLOBAL(_##API_NAME)				\
+						\
+	mflr	r0;				\
+	std	r0, 16(r1);			\
+						\
+	li      r11, API_NUMBER;		\
+	lv1call;				\
+						\
+	ld	r11, 48+8*8(r1);		\
+	std	r4, 0(r11);			\
+						\
+	ld	r0, 16(r1);			\
+	mtlr	r0;				\
+	blr
+
+	.text
+
+/* the lv1 underscored call definitions expand here */
+
+#define LV1_CALL(name, in, out, num) LV1_##in##_IN_##out##_OUT(lv1_##name, num)
+#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
new file mode 100644
index 0000000..056c1e4
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -0,0 +1,575 @@
+/*
+ *  PS3 interrupt routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+/**
+ * ps3_alloc_io_irq - Assign a virq to a system bus device.
+ * interrupt_id: The device interrupt id read from the system repository.
+ * @virq: The assigned Linux virq.
+ *
+ * An io irq represents a non-virtualized device interrupt.  interrupt_id
+ * coresponds to the interrupt number of the interrupt controller.
+ */
+
+int ps3_alloc_io_irq(unsigned int interrupt_id, unsigned int *virq)
+{
+	int result;
+	unsigned long outlet;
+
+	result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		return result;
+	}
+
+	*virq = irq_create_mapping(NULL, outlet);
+
+	pr_debug("%s:%d: interrupt_id %u => outlet %lu, virq %u\n",
+		__func__, __LINE__, interrupt_id, outlet, *virq);
+
+	return 0;
+}
+
+int ps3_free_io_irq(unsigned int virq)
+{
+	int result;
+
+	result = lv1_destruct_io_irq_outlet(virq_to_hw(virq));
+
+	if (!result)
+		pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+
+	irq_dispose_mapping(virq);
+
+	return result;
+}
+
+/**
+ * ps3_alloc_event_irq - Allocate a virq for use with a system event.
+ * @virq: The assigned Linux virq.
+ *
+ * The virq can be used with lv1_connect_interrupt_event_receive_port() to
+ * arrange to receive events, or with ps3_send_event_locally() to signal
+ * events.
+ */
+
+int ps3_alloc_event_irq(unsigned int *virq)
+{
+	int result;
+	unsigned long outlet;
+
+	result = lv1_construct_event_receive_port(&outlet);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		*virq = NO_IRQ;
+		return result;
+	}
+
+	*virq = irq_create_mapping(NULL, outlet);
+
+	pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__, outlet,
+		*virq);
+
+	return 0;
+}
+
+int ps3_free_event_irq(unsigned int virq)
+{
+	int result;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	result = lv1_destruct_event_receive_port(virq_to_hw(virq));
+
+	if (result)
+		pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+
+	irq_dispose_mapping(virq);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+int ps3_send_event_locally(unsigned int virq)
+{
+	return lv1_send_event_locally(virq_to_hw(virq));
+}
+
+/**
+ * ps3_connect_event_irq - Assign a virq to a system bus device.
+ * @did: The HV device identifier read from the system repository.
+ * @interrupt_id: The device interrupt id read from the system repository.
+ * @virq: The assigned Linux virq.
+ *
+ * An event irq represents a virtual device interrupt.  The interrupt_id
+ * coresponds to the software interrupt number.
+ */
+
+int ps3_connect_event_irq(const struct ps3_device_id *did,
+	unsigned int interrupt_id, unsigned int *virq)
+{
+	int result;
+
+	result = ps3_alloc_event_irq(virq);
+
+	if (result)
+		return result;
+
+	result = lv1_connect_interrupt_event_receive_port(did->bus_id,
+		did->dev_id, virq_to_hw(*virq), interrupt_id);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
+			" failed: %s\n", __func__, __LINE__,
+			ps3_result(result));
+		ps3_free_event_irq(*virq);
+		*virq = NO_IRQ;
+		return result;
+	}
+
+	pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+		interrupt_id, *virq);
+
+	return 0;
+}
+
+int ps3_disconnect_event_irq(const struct ps3_device_id *did,
+	unsigned int interrupt_id, unsigned int virq)
+{
+	int result;
+
+	pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+		interrupt_id, virq);
+
+	result = lv1_disconnect_interrupt_event_receive_port(did->bus_id,
+		did->dev_id, virq_to_hw(virq), interrupt_id);
+
+	if (result)
+		pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
+			" failed: %s\n", __func__, __LINE__,
+			ps3_result(result));
+
+	ps3_free_event_irq(virq);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+/**
+ * ps3_alloc_vuart_irq - Configure the system virtual uart virq.
+ * @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
+ * @virq: The assigned Linux virq.
+ *
+ * The system supports only a single virtual uart, so multiple calls without
+ * freeing the interrupt will return a wrong state error.
+ */
+
+int ps3_alloc_vuart_irq(void* virt_addr_bmp, unsigned int *virq)
+{
+	int result;
+	unsigned long outlet;
+	unsigned long lpar_addr;
+
+	BUG_ON(!is_kernel_addr((unsigned long)virt_addr_bmp));
+
+	lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
+
+	result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		return result;
+	}
+
+	*virq = irq_create_mapping(NULL, outlet);
+
+	pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__,
+		outlet, *virq);
+
+	return 0;
+}
+
+int ps3_free_vuart_irq(unsigned int virq)
+{
+	int result;
+
+	result = lv1_deconfigure_virtual_uart_irq();
+
+	if (result) {
+		pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		return result;
+	}
+
+	irq_dispose_mapping(virq);
+
+	return result;
+}
+
+/**
+ * ps3_alloc_spe_irq - Configure an spe virq.
+ * @spe_id: The spe_id returned from lv1_construct_logical_spe().
+ * @class: The spe interrupt class {0,1,2}.
+ * @virq: The assigned Linux virq.
+ *
+ */
+
+int ps3_alloc_spe_irq(unsigned long spe_id, unsigned int class,
+	unsigned int *virq)
+{
+	int result;
+	unsigned long outlet;
+
+	BUG_ON(class > 2);
+
+	result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		return result;
+	}
+
+	*virq = irq_create_mapping(NULL, outlet);
+
+	pr_debug("%s:%d: spe_id %lu, class %u, outlet %lu, virq %u\n",
+		__func__, __LINE__, spe_id, class, outlet, *virq);
+
+	return 0;
+}
+
+int ps3_free_spe_irq(unsigned int virq)
+{
+	irq_dispose_mapping(virq);
+	return 0;
+}
+
+#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
+#define PS3_PLUG_MAX 63
+
+/**
+ * struct bmp - a per cpu irq status and mask bitmap structure
+ * @status: 256 bit status bitmap indexed by plug
+ * @unused_1:
+ * @mask: 256 bit mask bitmap indexed by plug
+ * @unused_2:
+ * @lock:
+ * @ipi_debug_brk_mask:
+ *
+ * The HV mantains per SMT thread mappings of HV outlet to HV plug on
+ * behalf of the guest.  These mappings are implemented as 256 bit guest
+ * supplied bitmaps indexed by plug number.  The address of the bitmaps are
+ * registered with the HV through lv1_configure_irq_state_bitmap().
+ *
+ * The HV supports 256 plugs per thread, assigned as {0..255}, for a total
+ * of 512 plugs supported on a processor.  To simplify the logic this
+ * implementation equates HV plug value to linux virq value, constrains each
+ * interrupt to have a system wide unique plug number, and limits the range
+ * of the plug values to map into the first dword of the bitmaps.  This
+ * gives a usable range of plug values of  {NUM_ISA_INTERRUPTS..63}.  Note
+ * that there is no constraint on how many in this set an individual thread
+ * can aquire.
+ */
+
+struct bmp {
+	struct {
+		unsigned long status;
+		unsigned long unused_1[3];
+		unsigned long mask;
+		unsigned long unused_2[3];
+	} __attribute__ ((packed));
+	spinlock_t lock;
+	unsigned long ipi_debug_brk_mask;
+};
+
+/**
+ * struct private - a per cpu data structure
+ * @node: HV node id
+ * @cpu: HV thread id
+ * @bmp: an HV bmp structure
+ */
+
+struct private {
+	unsigned long node;
+	unsigned int cpu;
+	struct bmp bmp;
+};
+
+#if defined(DEBUG)
+static void _dump_64_bmp(const char *header, const unsigned long *p, unsigned cpu,
+	const char* func, int line)
+{
+	pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n",
+		func, line, header, cpu,
+		*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
+		*p & 0xffff);
+}
+
+static void __attribute__ ((unused)) _dump_256_bmp(const char *header,
+	const unsigned long *p, unsigned cpu, const char* func, int line)
+{
+	pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
+		func, line, header, cpu, p[0], p[1], p[2], p[3]);
+}
+
+#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
+static void _dump_bmp(struct private* pd, const char* func, int line)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->bmp.lock, flags);
+	_dump_64_bmp("stat", &pd->bmp.status, pd->cpu, func, line);
+	_dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+	spin_unlock_irqrestore(&pd->bmp.lock, flags);
+}
+
+#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_mask(struct private* pd,
+	const char* func, int line)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->bmp.lock, flags);
+	_dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+	spin_unlock_irqrestore(&pd->bmp.lock, flags);
+}
+#else
+static void dump_bmp(struct private* pd) {};
+#endif /* defined(DEBUG) */
+
+static void chip_mask(unsigned int virq)
+{
+	unsigned long flags;
+	struct private *pd = get_irq_chip_data(virq);
+
+	pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
+
+	BUG_ON(virq < NUM_ISA_INTERRUPTS);
+	BUG_ON(virq > PS3_PLUG_MAX);
+
+	spin_lock_irqsave(&pd->bmp.lock, flags);
+	pd->bmp.mask &= ~(0x8000000000000000UL >> virq);
+	spin_unlock_irqrestore(&pd->bmp.lock, flags);
+
+	lv1_did_update_interrupt_mask(pd->node, pd->cpu);
+}
+
+static void chip_unmask(unsigned int virq)
+{
+	unsigned long flags;
+	struct private *pd = get_irq_chip_data(virq);
+
+	pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
+
+	BUG_ON(virq < NUM_ISA_INTERRUPTS);
+	BUG_ON(virq > PS3_PLUG_MAX);
+
+	spin_lock_irqsave(&pd->bmp.lock, flags);
+	pd->bmp.mask |= (0x8000000000000000UL >> virq);
+	spin_unlock_irqrestore(&pd->bmp.lock, flags);
+
+	lv1_did_update_interrupt_mask(pd->node, pd->cpu);
+}
+
+static void chip_eoi(unsigned int virq)
+{
+	lv1_end_of_interrupt(virq);
+}
+
+static struct irq_chip irq_chip = {
+	.typename = "ps3",
+	.mask = chip_mask,
+	.unmask = chip_unmask,
+	.eoi = chip_eoi,
+};
+
+static void host_unmap(struct irq_host *h, unsigned int virq)
+{
+	int result;
+
+	pr_debug("%s:%d: virq %d\n", __func__, __LINE__, virq);
+
+	lv1_disconnect_irq_plug(virq);
+
+	result = set_irq_chip_data(virq, NULL);
+	BUG_ON(result);
+}
+
+static DEFINE_PER_CPU(struct private, private);
+
+static int host_map(struct irq_host *h, unsigned int virq,
+	irq_hw_number_t hwirq)
+{
+	int result;
+	unsigned int cpu;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+	pr_debug("%s:%d: hwirq %lu => virq %u\n", __func__, __LINE__, hwirq,
+		virq);
+
+	/* bind this virq to a cpu */
+
+	preempt_disable();
+	cpu = smp_processor_id();
+	result = lv1_connect_irq_plug(virq, hwirq);
+	preempt_enable();
+
+	if (result) {
+		pr_info("%s:%d: lv1_connect_irq_plug failed:"
+			" %s\n", __func__, __LINE__, ps3_result(result));
+		return -EPERM;
+	}
+
+	result = set_irq_chip_data(virq, &per_cpu(private, cpu));
+	BUG_ON(result);
+
+	set_irq_chip_and_handler(virq, &irq_chip, handle_fasteoi_irq);
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+static struct irq_host_ops host_ops = {
+	.map = host_map,
+	.unmap = host_unmap,
+};
+
+void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
+{
+	struct private *pd = &per_cpu(private, cpu);
+
+	pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
+
+	pr_debug("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
+		cpu, virq, pd->bmp.ipi_debug_brk_mask);
+}
+
+static int bmp_get_and_clear_status_bit(struct bmp *m)
+{
+	unsigned long flags;
+	unsigned int bit;
+	unsigned long x;
+
+	spin_lock_irqsave(&m->lock, flags);
+
+	/* check for ipi break first to stop this cpu ASAP */
+
+	if (m->status & m->ipi_debug_brk_mask) {
+		m->status &= ~m->ipi_debug_brk_mask;
+		spin_unlock_irqrestore(&m->lock, flags);
+		return __ilog2(m->ipi_debug_brk_mask);
+	}
+
+	x = (m->status & m->mask);
+
+	for (bit = NUM_ISA_INTERRUPTS, x <<= bit; x; bit++, x <<= 1)
+		if (x & 0x8000000000000000UL) {
+			m->status &= ~(0x8000000000000000UL >> bit);
+			spin_unlock_irqrestore(&m->lock, flags);
+			return bit;
+		}
+
+	spin_unlock_irqrestore(&m->lock, flags);
+
+	pr_debug("%s:%d: not found\n", __func__, __LINE__);
+	return -1;
+}
+
+unsigned int ps3_get_irq(void)
+{
+	int plug;
+
+	struct private *pd = &__get_cpu_var(private);
+
+	plug = bmp_get_and_clear_status_bit(&pd->bmp);
+
+	if (plug < 1) {
+		pr_debug("%s:%d: no plug found: cpu %u\n", __func__, __LINE__,
+			pd->cpu);
+		dump_bmp(&per_cpu(private, 0));
+		dump_bmp(&per_cpu(private, 1));
+		return NO_IRQ;
+	}
+
+#if defined(DEBUG)
+	if (plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX) {
+		dump_bmp(&per_cpu(private, 0));
+		dump_bmp(&per_cpu(private, 1));
+		BUG();
+	}
+#endif
+	return plug;
+}
+
+void __init ps3_init_IRQ(void)
+{
+	int result;
+	unsigned long node;
+	unsigned cpu;
+	struct irq_host *host;
+
+	lv1_get_logical_ppe_id(&node);
+
+	host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &host_ops,
+		PS3_INVALID_OUTLET);
+	irq_set_default_host(host);
+	irq_set_virq_count(PS3_PLUG_MAX + 1);
+
+	for_each_possible_cpu(cpu) {
+		struct private *pd = &per_cpu(private, cpu);
+
+		pd->node = node;
+		pd->cpu = cpu;
+		spin_lock_init(&pd->bmp.lock);
+
+		result = lv1_configure_irq_state_bitmap(node, cpu,
+			ps3_mm_phys_to_lpar(__pa(&pd->bmp.status)));
+
+		if (result)
+			pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
+				" %s\n", __func__, __LINE__,
+				ps3_result(result));
+	}
+
+	ppc_md.get_irq = ps3_get_irq;
+}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
new file mode 100644
index 0000000..49c0d01
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -0,0 +1,831 @@
+/*
+ *  PS3 address space management.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/memory_hotplug.h>
+
+#include <asm/firmware.h>
+#include <asm/lmb.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+enum {
+#if defined(CONFIG_PS3_USE_LPAR_ADDR)
+	USE_LPAR_ADDR = 1,
+#else
+	USE_LPAR_ADDR = 0,
+#endif
+#if defined(CONFIG_PS3_DYNAMIC_DMA)
+	USE_DYNAMIC_DMA = 1,
+#else
+	USE_DYNAMIC_DMA = 0,
+#endif
+};
+
+enum {
+	PAGE_SHIFT_4K = 12U,
+	PAGE_SHIFT_64K = 16U,
+	PAGE_SHIFT_16M = 24U,
+};
+
+static unsigned long make_page_sizes(unsigned long a, unsigned long b)
+{
+	return (a << 56) | (b << 48);
+}
+
+enum {
+	ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
+	ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
+};
+
+/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
+
+enum {
+	HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
+	HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
+};
+
+/*============================================================================*/
+/* virtual address space routines                                             */
+/*============================================================================*/
+
+/**
+ * struct mem_region - memory region structure
+ * @base: base address
+ * @size: size in bytes
+ * @offset: difference between base and rm.size
+ */
+
+struct mem_region {
+	unsigned long base;
+	unsigned long size;
+	unsigned long offset;
+};
+
+/**
+ * struct map - address space state variables holder
+ * @total: total memory available as reported by HV
+ * @vas_id - HV virtual address space id
+ * @htab_size: htab size in bytes
+ *
+ * The HV virtual address space (vas) allows for hotplug memory regions.
+ * Memory regions can be created and destroyed in the vas at runtime.
+ * @rm: real mode (bootmem) region
+ * @r1: hotplug memory region(s)
+ *
+ * ps3 addresses
+ * virt_addr: a cpu 'translated' effective address
+ * phys_addr: an address in what Linux thinks is the physical address space
+ * lpar_addr: an address in the HV virtual address space
+ * bus_addr: an io controller 'translated' address on a device bus
+ */
+
+struct map {
+	unsigned long total;
+	unsigned long vas_id;
+	unsigned long htab_size;
+	struct mem_region rm;
+	struct mem_region r1;
+};
+
+#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
+static void _debug_dump_map(const struct map* m, const char* func, int line)
+{
+	DBG("%s:%d: map.total     = %lxh\n", func, line, m->total);
+	DBG("%s:%d: map.rm.size   = %lxh\n", func, line, m->rm.size);
+	DBG("%s:%d: map.vas_id    = %lu\n", func, line, m->vas_id);
+	DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
+	DBG("%s:%d: map.r1.base   = %lxh\n", func, line, m->r1.base);
+	DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
+	DBG("%s:%d: map.r1.size   = %lxh\n", func, line, m->r1.size);
+}
+
+static struct map map;
+
+/**
+ * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
+ * @phys_addr: linux physical address
+ */
+
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
+{
+	BUG_ON(is_kernel_addr(phys_addr));
+	if (USE_LPAR_ADDR)
+		return phys_addr;
+	else
+		return (phys_addr < map.rm.size || phys_addr >= map.total)
+			? phys_addr : phys_addr + map.r1.offset;
+}
+
+EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
+
+/**
+ * ps3_mm_vas_create - create the virtual address space
+ */
+
+void __init ps3_mm_vas_create(unsigned long* htab_size)
+{
+	int result;
+	unsigned long start_address;
+	unsigned long size;
+	unsigned long access_right;
+	unsigned long max_page_size;
+	unsigned long flags;
+
+	result = lv1_query_logical_partition_address_region_info(0,
+		&start_address, &size, &access_right, &max_page_size,
+		&flags);
+
+	if (result) {
+		DBG("%s:%d: lv1_query_logical_partition_address_region_info "
+			"failed: %s\n", __func__, __LINE__,
+			ps3_result(result));
+		goto fail;
+	}
+
+	if (max_page_size < PAGE_SHIFT_16M) {
+		DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
+			max_page_size);
+		goto fail;
+	}
+
+	BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
+	BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
+
+	result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
+			2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
+			&map.vas_id, &map.htab_size);
+
+	if (result) {
+		DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		goto fail;
+	}
+
+	result = lv1_select_virtual_address_space(map.vas_id);
+
+	if (result) {
+		DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		goto fail;
+	}
+
+	*htab_size = map.htab_size;
+
+	debug_dump_map(&map);
+
+	return;
+
+fail:
+	panic("ps3_mm_vas_create failed");
+}
+
+/**
+ * ps3_mm_vas_destroy -
+ */
+
+void ps3_mm_vas_destroy(void)
+{
+	if (map.vas_id) {
+		lv1_select_virtual_address_space(0);
+		lv1_destruct_virtual_address_space(map.vas_id);
+		map.vas_id = 0;
+	}
+}
+
+/*============================================================================*/
+/* memory hotplug routines                                                    */
+/*============================================================================*/
+
+/**
+ * ps3_mm_region_create - create a memory region in the vas
+ * @r: pointer to a struct mem_region to accept initialized values
+ * @size: requested region size
+ *
+ * This implementation creates the region with the vas large page size.
+ * @size is rounded down to a multiple of the vas large page size.
+ */
+
+int ps3_mm_region_create(struct mem_region *r, unsigned long size)
+{
+	int result;
+	unsigned long muid;
+
+	r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
+
+	DBG("%s:%d requested  %lxh\n", __func__, __LINE__, size);
+	DBG("%s:%d actual     %lxh\n", __func__, __LINE__, r->size);
+	DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
+		(unsigned long)(size - r->size),
+		(size - r->size) / 1024 / 1024);
+
+	if (r->size == 0) {
+		DBG("%s:%d: size == 0\n", __func__, __LINE__);
+		result = -1;
+		goto zero_region;
+	}
+
+	result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
+		ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
+
+	if (result || r->base < map.rm.size) {
+		DBG("%s:%d: lv1_allocate_memory failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		goto zero_region;
+	}
+
+	r->offset = r->base - map.rm.size;
+	return result;
+
+zero_region:
+	r->size = r->base = r->offset = 0;
+	return result;
+}
+
+/**
+ * ps3_mm_region_destroy - destroy a memory region
+ * @r: pointer to struct mem_region
+ */
+
+void ps3_mm_region_destroy(struct mem_region *r)
+{
+	if (r->base) {
+		lv1_release_memory(r->base);
+		r->size = r->base = r->offset = 0;
+		map.total = map.rm.size;
+	}
+}
+
+/**
+ * ps3_mm_add_memory - hot add memory
+ */
+
+static int __init ps3_mm_add_memory(void)
+{
+	int result;
+	unsigned long start_addr;
+	unsigned long start_pfn;
+	unsigned long nr_pages;
+
+	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+		return 0;
+
+	BUG_ON(!mem_init_done);
+
+	start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
+	start_pfn = start_addr >> PAGE_SHIFT;
+	nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
+		__func__, __LINE__, start_addr, start_pfn, nr_pages);
+
+	result = add_memory(0, start_addr, map.r1.size);
+
+	if (result) {
+		DBG("%s:%d: add_memory failed: (%d)\n",
+			__func__, __LINE__, result);
+		return result;
+	}
+
+	result = online_pages(start_pfn, nr_pages);
+
+	if (result)
+		DBG("%s:%d: online_pages failed: (%d)\n",
+			__func__, __LINE__, result);
+
+	return result;
+}
+
+core_initcall(ps3_mm_add_memory);
+
+/*============================================================================*/
+/* dma routines                                                               */
+/*============================================================================*/
+
+/**
+ * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
+ * @r: pointer to dma region structure
+ * @lpar_addr: HV lpar address
+ */
+
+static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
+	unsigned long lpar_addr)
+{
+	BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
+	return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
+		: lpar_addr - map.r1.offset);
+}
+
+#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
+static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
+	int line)
+{
+	DBG("%s:%d: dev        %u:%u\n", func, line, r->did.bus_id,
+		r->did.dev_id);
+	DBG("%s:%d: page_size  %u\n", func, line, r->page_size);
+	DBG("%s:%d: bus_addr   %lxh\n", func, line, r->bus_addr);
+	DBG("%s:%d: len        %lxh\n", func, line, r->len);
+}
+
+/**
+ * dma_chunk - A chunk of dma pages mapped by the io controller.
+ * @region - The dma region that owns this chunk.
+ * @lpar_addr: Starting lpar address of the area to map.
+ * @bus_addr: Starting ioc bus address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
+ * list of all chuncks owned by the region.
+ *
+ * This implementation uses a very simple dma page manager
+ * based on the dma_chunk structure.  This scheme assumes
+ * that all drivers use very well behaved dma ops.
+ */
+
+struct dma_chunk {
+	struct ps3_dma_region *region;
+	unsigned long lpar_addr;
+	unsigned long bus_addr;
+	unsigned long len;
+	struct list_head link;
+	unsigned int usage_count;
+};
+
+#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
+static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
+	int line)
+{
+	DBG("%s:%d: r.dev        %u:%u\n", func, line,
+		c->region->did.bus_id, c->region->did.dev_id);
+	DBG("%s:%d: r.bus_addr   %lxh\n", func, line, c->region->bus_addr);
+	DBG("%s:%d: r.page_size  %u\n", func, line, c->region->page_size);
+	DBG("%s:%d: r.len        %lxh\n", func, line, c->region->len);
+	DBG("%s:%d: c.lpar_addr  %lxh\n", func, line, c->lpar_addr);
+	DBG("%s:%d: c.bus_addr   %lxh\n", func, line, c->bus_addr);
+	DBG("%s:%d: c.len        %lxh\n", func, line, c->len);
+}
+
+static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
+	unsigned long bus_addr, unsigned long len)
+{
+	struct dma_chunk *c;
+	unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
+	unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+
+	list_for_each_entry(c, &r->chunk_list.head, link) {
+		/* intersection */
+		if (aligned_bus >= c->bus_addr
+			&& aligned_bus < c->bus_addr + c->len
+			&& aligned_bus + aligned_len <= c->bus_addr + c->len) {
+			return c;
+		}
+		/* below */
+		if (aligned_bus + aligned_len <= c->bus_addr) {
+			continue;
+		}
+		/* above */
+		if (aligned_bus >= c->bus_addr + c->len) {
+			continue;
+		}
+
+		/* we don't handle the multi-chunk case for now */
+
+		dma_dump_chunk(c);
+		BUG();
+	}
+	return NULL;
+}
+
+static int dma_free_chunk(struct dma_chunk *c)
+{
+	int result = 0;
+
+	if (c->bus_addr) {
+		result = lv1_unmap_device_dma_region(c->region->did.bus_id,
+			c->region->did.dev_id, c->bus_addr, c->len);
+		BUG_ON(result);
+	}
+
+	kfree(c);
+	return result;
+}
+
+/**
+ * dma_map_pages - Maps dma pages into the io controller bus address space.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @phys_addr: Starting physical address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * c_out: A pointer to receive an allocated struct dma_chunk for this area.
+ *
+ * This is the lowest level dma mapping routine, and is the one that will
+ * make the HV call to add the pages into the io controller address space.
+ */
+
+static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
+	unsigned long len, struct dma_chunk **c_out)
+{
+	int result;
+	struct dma_chunk *c;
+
+	c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
+
+	if (!c) {
+		result = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	c->region = r;
+	c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
+	c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
+	c->len = len;
+
+	result = lv1_map_device_dma_region(c->region->did.bus_id,
+		c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
+		0xf800000000000000UL);
+
+	if (result) {
+		DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		goto fail_map;
+	}
+
+	list_add(&c->link, &r->chunk_list.head);
+
+	*c_out = c;
+	return 0;
+
+fail_map:
+	kfree(c);
+fail_alloc:
+	*c_out = NULL;
+	DBG(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+/**
+ * dma_region_create - Create a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This is the lowest level dma region create routine, and is the one that
+ * will make the HV call to create the region.
+ */
+
+static int dma_region_create(struct ps3_dma_region* r)
+{
+	int result;
+
+	r->len = _ALIGN_UP(map.total, 1 << r->page_size);
+	INIT_LIST_HEAD(&r->chunk_list.head);
+	spin_lock_init(&r->chunk_list.lock);
+
+	result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
+		r->len, r->page_size, r->region_type, &r->bus_addr);
+
+	dma_dump_region(r);
+
+	if (result) {
+		DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		r->len = r->bus_addr = 0;
+	}
+
+	return result;
+}
+
+/**
+ * dma_region_free - Free a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This is the lowest level dma region free routine, and is the one that
+ * will make the HV call to free the region.
+ */
+
+static int dma_region_free(struct ps3_dma_region* r)
+{
+	int result;
+	struct dma_chunk *c;
+	struct dma_chunk *tmp;
+
+	list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
+		list_del(&c->link);
+		dma_free_chunk(c);
+	}
+
+	result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
+		r->bus_addr);
+
+	if (result)
+		DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+
+	r->len = r->bus_addr = 0;
+
+	return result;
+}
+
+/**
+ * dma_map_area - Map an area of memory into a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @virt_addr: Starting virtual address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @bus_addr: A pointer to return the starting ioc bus address of the area to
+ * map.
+ *
+ * This is the common dma mapping routine.
+ */
+
+static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
+	unsigned long len, unsigned long *bus_addr)
+{
+	int result;
+	unsigned long flags;
+	struct dma_chunk *c;
+	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
+		: virt_addr;
+
+	*bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+
+	if (!USE_DYNAMIC_DMA) {
+		unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
+		DBG(" -> %s:%d\n", __func__, __LINE__);
+		DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
+			virt_addr);
+		DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
+			phys_addr);
+		DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
+			lpar_addr);
+		DBG("%s:%d len       %lxh\n", __func__, __LINE__, len);
+		DBG("%s:%d bus_addr  %lxh (%lxh)\n", __func__, __LINE__,
+		*bus_addr, len);
+	}
+
+	spin_lock_irqsave(&r->chunk_list.lock, flags);
+	c = dma_find_chunk(r, *bus_addr, len);
+
+	if (c) {
+		c->usage_count++;
+		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+		return 0;
+	}
+
+	result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
+		_ALIGN_UP(len, 1 << r->page_size), &c);
+
+	if (result) {
+		*bus_addr = 0;
+		DBG("%s:%d: dma_map_pages failed (%d)\n",
+			__func__, __LINE__, result);
+		spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+		return result;
+	}
+
+	c->usage_count = 1;
+
+	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+	return result;
+}
+
+/**
+ * dma_unmap_area - Unmap an area of memory from a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @bus_addr: The starting ioc bus address of the area to unmap.
+ * @len: Length in bytes of the area to unmap.
+ *
+ * This is the common dma unmap routine.
+ */
+
+int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
+	unsigned long len)
+{
+	unsigned long flags;
+	struct dma_chunk *c;
+
+	spin_lock_irqsave(&r->chunk_list.lock, flags);
+	c = dma_find_chunk(r, bus_addr, len);
+
+	if (!c) {
+		unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+			1 << r->page_size);
+		unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+		DBG("%s:%d: not found: bus_addr %lxh\n",
+			__func__, __LINE__, bus_addr);
+		DBG("%s:%d: not found: len %lxh\n",
+			__func__, __LINE__, len);
+		DBG("%s:%d: not found: aligned_bus %lxh\n",
+			__func__, __LINE__, aligned_bus);
+		DBG("%s:%d: not found: aligned_len %lxh\n",
+			__func__, __LINE__, aligned_len);
+		BUG();
+	}
+
+	c->usage_count--;
+
+	if (!c->usage_count) {
+		list_del(&c->link);
+		dma_free_chunk(c);
+	}
+
+	spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+	return 0;
+}
+
+/**
+ * dma_region_create_linear - Setup a linear dma maping for a device.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This routine creates an HV dma region for the device and maps all available
+ * ram into the io controller bus address space.
+ */
+
+static int dma_region_create_linear(struct ps3_dma_region *r)
+{
+	int result;
+	unsigned long tmp;
+
+	/* force 16M dma pages for linear mapping */
+
+	if (r->page_size != PS3_DMA_16M) {
+		pr_info("%s:%d: forcing 16M pages for linear map\n",
+			__func__, __LINE__);
+		r->page_size = PS3_DMA_16M;
+	}
+
+	result = dma_region_create(r);
+	BUG_ON(result);
+
+	result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
+	BUG_ON(result);
+
+	if (USE_LPAR_ADDR)
+		result = dma_map_area(r, map.r1.base, map.r1.size,
+			&tmp);
+	else
+		result = dma_map_area(r, map.rm.size, map.r1.size,
+			&tmp);
+
+	BUG_ON(result);
+
+	return result;
+}
+
+/**
+ * dma_region_free_linear - Free a linear dma mapping for a device.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This routine will unmap all mapped areas and free the HV dma region.
+ */
+
+static int dma_region_free_linear(struct ps3_dma_region *r)
+{
+	int result;
+
+	result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
+	BUG_ON(result);
+
+	result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
+		map.r1.size);
+	BUG_ON(result);
+
+	result = dma_region_free(r);
+	BUG_ON(result);
+
+	return result;
+}
+
+/**
+ * dma_map_area_linear - Map an area of memory into a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @virt_addr: Starting virtual address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @bus_addr: A pointer to return the starting ioc bus address of the area to
+ * map.
+ *
+ * This routine just returns the coresponding bus address.  Actual mapping
+ * occurs in dma_region_create_linear().
+ */
+
+static int dma_map_area_linear(struct ps3_dma_region *r,
+	unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
+{
+	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
+		: virt_addr;
+	*bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+	return 0;
+}
+
+/**
+ * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @bus_addr: The starting ioc bus address of the area to unmap.
+ * @len: Length in bytes of the area to unmap.
+ *
+ * This routine does nothing.  Unmapping occurs in dma_region_free_linear().
+ */
+
+static int dma_unmap_area_linear(struct ps3_dma_region *r,
+	unsigned long bus_addr, unsigned long len)
+{
+	return 0;
+}
+
+int ps3_dma_region_create(struct ps3_dma_region *r)
+{
+	return (USE_DYNAMIC_DMA)
+		? dma_region_create(r)
+		: dma_region_create_linear(r);
+}
+
+int ps3_dma_region_free(struct ps3_dma_region *r)
+{
+	return (USE_DYNAMIC_DMA)
+		? dma_region_free(r)
+		: dma_region_free_linear(r);
+}
+
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+	unsigned long len, unsigned long *bus_addr)
+{
+	return (USE_DYNAMIC_DMA)
+		? dma_map_area(r, virt_addr, len, bus_addr)
+		: dma_map_area_linear(r, virt_addr, len, bus_addr);
+}
+
+int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+	unsigned long len)
+{
+	return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
+		: dma_unmap_area_linear(r, bus_addr, len);
+}
+
+/*============================================================================*/
+/* system startup routines                                                    */
+/*============================================================================*/
+
+/**
+ * ps3_mm_init - initialize the address space state variables
+ */
+
+void __init ps3_mm_init(void)
+{
+	int result;
+
+	DBG(" -> %s:%d\n", __func__, __LINE__);
+
+	result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
+		&map.total);
+
+	if (result)
+		panic("ps3_repository_read_mm_info() failed");
+
+	map.rm.offset = map.rm.base;
+	map.vas_id = map.htab_size = 0;
+
+	/* this implementation assumes map.rm.base is zero */
+
+	BUG_ON(map.rm.base);
+	BUG_ON(!map.rm.size);
+
+	lmb_add(map.rm.base, map.rm.size);
+	lmb_analyze();
+
+	/* arrange to do this in ps3_mm_add_memory */
+	ps3_mm_region_create(&map.r1, map.total - map.rm.size);
+
+	DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+/**
+ * ps3_mm_shutdown - final cleanup of address space
+ */
+
+void ps3_mm_shutdown(void)
+{
+	ps3_mm_region_destroy(&map.r1);
+	map.total = map.rm.size;
+}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
new file mode 100644
index 0000000..5835830
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -0,0 +1,259 @@
+/*
+ *  PS3 'Other OS' area data.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <asm/lmb.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+enum {
+	OS_AREA_SEGMENT_SIZE = 0X200,
+};
+
+enum {
+	HEADER_LDR_FORMAT_RAW = 0,
+	HEADER_LDR_FORMAT_GZIP = 1,
+};
+
+/**
+ * struct os_area_header - os area header segment.
+ * @magic_num: Always 'cell_ext_os_area'.
+ * @hdr_version: Header format version number.
+ * @os_area_offset: Starting segment number of os image area.
+ * @ldr_area_offset: Starting segment number of bootloader image area.
+ * @ldr_format: HEADER_LDR_FORMAT flag.
+ * @ldr_size: Size of bootloader image in bytes.
+ *
+ * Note that the docs refer to area offsets.  These are offsets in units of
+ * segments from the start of the os area (top of the header).  These are
+ * better thought of as segment numbers.  The os area of the os area is
+ * reserved for the os image.
+ */
+
+struct os_area_header {
+	s8 magic_num[16];
+	u32 hdr_version;
+	u32 os_area_offset;
+	u32 ldr_area_offset;
+	u32 _reserved_1;
+	u32 ldr_format;
+	u32 ldr_size;
+	u32 _reserved_2[6];
+} __attribute__ ((packed));
+
+enum {
+	PARAM_BOOT_FLAG_GAME_OS = 0,
+	PARAM_BOOT_FLAG_OTHER_OS = 1,
+};
+
+enum {
+	PARAM_AV_MULTI_OUT_NTSC = 0,
+	PARAM_AV_MULTI_OUT_PAL_RGB = 1,
+	PARAM_AV_MULTI_OUT_PAL_YCBCR = 2,
+	PARAM_AV_MULTI_OUT_SECAM = 3,
+};
+
+enum {
+	PARAM_CTRL_BUTTON_O_IS_YES = 0,
+	PARAM_CTRL_BUTTON_X_IS_YES = 1,
+};
+
+/**
+ * struct os_area_params - os area params segment.
+ * @boot_flag: User preference of operating system, PARAM_BOOT_FLAG flag.
+ * @num_params: Number of params in this (params) segment.
+ * @rtc_diff: Difference in seconds between 1970 and the ps3 rtc value.
+ * @av_multi_out: User preference of AV output, PARAM_AV_MULTI_OUT flag.
+ * @ctrl_button: User preference of controller button config, PARAM_CTRL_BUTTON
+ *	flag.
+ * @static_ip_addr: User preference of static IP address.
+ * @network_mask: User preference of static network mask.
+ * @default_gateway: User preference of static default gateway.
+ * @dns_primary: User preference of static primary dns server.
+ * @dns_secondary: User preference of static secondary dns server.
+ *
+ * User preference of zero for static_ip_addr means use dhcp.
+ */
+
+struct os_area_params {
+	u32 boot_flag;
+	u32 _reserved_1[3];
+	u32 num_params;
+	u32 _reserved_2[3];
+	/* param 0 */
+	s64 rtc_diff;
+	u8 av_multi_out;
+	u8 ctrl_button;
+	u8 _reserved_3[6];
+	/* param 1 */
+	u8 static_ip_addr[4];
+	u8 network_mask[4];
+	u8 default_gateway[4];
+	u8 _reserved_4[4];
+	/* param 2 */
+	u8 dns_primary[4];
+	u8 dns_secondary[4];
+	u8 _reserved_5[8];
+} __attribute__ ((packed));
+
+/**
+ * struct saved_params - Static working copies of data from the 'Other OS' area.
+ *
+ * For the convinience of the guest, the HV makes a copy of the 'Other OS' area
+ * in flash to a high address in the boot memory region and then puts that RAM
+ * address and the byte count into the repository for retreval by the guest.
+ * We copy the data we want into a static variable and allow the memory setup
+ * by the HV to be claimed by the lmb manager.
+ */
+
+struct saved_params {
+	/* param 0 */
+	s64 rtc_diff;
+	unsigned int av_multi_out;
+	unsigned int ctrl_button;
+	/* param 1 */
+	u8 static_ip_addr[4];
+	u8 network_mask[4];
+	u8 default_gateway[4];
+	/* param 2 */
+	u8 dns_primary[4];
+	u8 dns_secondary[4];
+} static saved_params;
+
+#define dump_header(_a) _dump_header(_a, __func__, __LINE__)
+static void _dump_header(const struct os_area_header __iomem *h, const char* func,
+	int line)
+{
+	pr_debug("%s:%d: h.magic_num:         '%s'\n", func, line,
+		h->magic_num);
+	pr_debug("%s:%d: h.hdr_version:       %u\n", func, line,
+		h->hdr_version);
+	pr_debug("%s:%d: h.os_area_offset:   %u\n", func, line,
+		h->os_area_offset);
+	pr_debug("%s:%d: h.ldr_area_offset: %u\n", func, line,
+		h->ldr_area_offset);
+	pr_debug("%s:%d: h.ldr_format:        %u\n", func, line,
+		h->ldr_format);
+	pr_debug("%s:%d: h.ldr_size:          %xh\n", func, line,
+		h->ldr_size);
+}
+
+#define dump_params(_a) _dump_params(_a, __func__, __LINE__)
+static void _dump_params(const struct os_area_params __iomem *p, const char* func,
+	int line)
+{
+	pr_debug("%s:%d: p.boot_flag:       %u\n", func, line, p->boot_flag);
+	pr_debug("%s:%d: p.num_params:      %u\n", func, line, p->num_params);
+	pr_debug("%s:%d: p.rtc_diff         %ld\n", func, line, p->rtc_diff);
+	pr_debug("%s:%d: p.av_multi_out     %u\n", func, line, p->av_multi_out);
+	pr_debug("%s:%d: p.ctrl_button:     %u\n", func, line, p->ctrl_button);
+	pr_debug("%s:%d: p.static_ip_addr:  %u.%u.%u.%u\n", func, line,
+		p->static_ip_addr[0], p->static_ip_addr[1],
+		p->static_ip_addr[2], p->static_ip_addr[3]);
+	pr_debug("%s:%d: p.network_mask:    %u.%u.%u.%u\n", func, line,
+		p->network_mask[0], p->network_mask[1],
+		p->network_mask[2], p->network_mask[3]);
+	pr_debug("%s:%d: p.default_gateway: %u.%u.%u.%u\n", func, line,
+		p->default_gateway[0], p->default_gateway[1],
+		p->default_gateway[2], p->default_gateway[3]);
+	pr_debug("%s:%d: p.dns_primary:     %u.%u.%u.%u\n", func, line,
+		p->dns_primary[0], p->dns_primary[1],
+		p->dns_primary[2], p->dns_primary[3]);
+	pr_debug("%s:%d: p.dns_secondary:   %u.%u.%u.%u\n", func, line,
+		p->dns_secondary[0], p->dns_secondary[1],
+		p->dns_secondary[2], p->dns_secondary[3]);
+}
+
+static int __init verify_header(const struct os_area_header *header)
+{
+	if (memcmp(header->magic_num, "cell_ext_os_area", 16)) {
+		pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
+		return -1;
+	}
+
+	if (header->hdr_version < 1) {
+		pr_debug("%s:%d hdr_version failed\n", __func__, __LINE__);
+		return -1;
+	}
+
+	if (header->os_area_offset > header->ldr_area_offset) {
+		pr_debug("%s:%d offsets failed\n", __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
+
+int __init ps3_os_area_init(void)
+{
+	int result;
+	u64 lpar_addr;
+	unsigned int size;
+	struct os_area_header *header;
+	struct os_area_params *params;
+
+	result = ps3_repository_read_boot_dat_info(&lpar_addr, &size);
+
+	if (result) {
+		pr_debug("%s:%d ps3_repository_read_boot_dat_info failed\n",
+			__func__, __LINE__);
+		return result;
+	}
+
+	header = (struct os_area_header *)__va(lpar_addr);
+	params = (struct os_area_params *)__va(lpar_addr + OS_AREA_SEGMENT_SIZE);
+
+	result = verify_header(header);
+
+	if (result) {
+		pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
+		dump_header(header);
+		return -EIO;
+	}
+
+	dump_header(header);
+	dump_params(params);
+
+	saved_params.rtc_diff = params->rtc_diff;
+	saved_params.av_multi_out = params->av_multi_out;
+	saved_params.ctrl_button = params->ctrl_button;
+	memcpy(saved_params.static_ip_addr, params->static_ip_addr, 4);
+	memcpy(saved_params.network_mask, params->network_mask, 4);
+	memcpy(saved_params.default_gateway, params->default_gateway, 4);
+	memcpy(saved_params.dns_secondary, params->dns_secondary, 4);
+
+	return result;
+}
+
+/**
+ * ps3_os_area_rtc_diff - Returns the ps3 rtc diff value.
+ *
+ * The ps3 rtc maintains a value that approximates seconds since
+ * 2000-01-01 00:00:00 UTC.  Returns the exact number of seconds from 1970 to
+ * 2000 when saved_params.rtc_diff has not been properly set up.
+ */
+
+u64 ps3_os_area_rtc_diff(void)
+{
+	return saved_params.rtc_diff ? saved_params.rtc_diff : 946684800UL;
+}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
new file mode 100644
index 0000000..23b111b
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -0,0 +1,68 @@
+/*
+ *  PS3 platform declarations.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_PS3_PLATFORM_H)
+#define _PS3_PLATFORM_H
+
+#include <linux/rtc.h>
+
+/* htab */
+
+void __init ps3_hpte_init(unsigned long htab_size);
+void __init ps3_map_htab(void);
+
+/* mm */
+
+void __init ps3_mm_init(void);
+void __init ps3_mm_vas_create(unsigned long* htab_size);
+void ps3_mm_vas_destroy(void);
+void ps3_mm_shutdown(void);
+
+/* irq */
+
+void ps3_init_IRQ(void);
+void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
+
+/* smp */
+
+void smp_init_ps3(void);
+void ps3_smp_cleanup_cpu(int cpu);
+
+/* time */
+
+void __init ps3_calibrate_decr(void);
+unsigned long __init ps3_get_boot_time(void);
+void ps3_get_rtc_time(struct rtc_time *time);
+int ps3_set_rtc_time(struct rtc_time *time);
+
+/* os area */
+
+int __init ps3_os_area_init(void);
+u64 ps3_os_area_rtc_diff(void);
+
+/* spu */
+
+#if defined(CONFIG_SPU_BASE)
+void ps3_spu_set_platform (void);
+#else
+static inline void ps3_spu_set_platform (void) {}
+#endif
+
+#endif
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
new file mode 100644
index 0000000..273a0d6
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -0,0 +1,840 @@
+/*
+ *  PS3 repository routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+enum ps3_vendor_id {
+	PS3_VENDOR_ID_NONE = 0,
+	PS3_VENDOR_ID_SONY = 0x8000000000000000UL,
+};
+
+enum ps3_lpar_id {
+	PS3_LPAR_ID_CURRENT = 0,
+	PS3_LPAR_ID_PME = 1,
+};
+
+#define dump_field(_a, _b) _dump_field(_a, _b, __func__, __LINE__)
+static void _dump_field(const char *hdr, u64 n, const char* func, int line)
+{
+#if defined(DEBUG)
+	char s[16];
+	const char *const in = (const char *)&n;
+	unsigned int i;
+
+	for (i = 0; i < 8; i++)
+		s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
+	s[i] = 0;
+
+	pr_debug("%s:%d: %s%016lx : %s\n", func, line, hdr, n, s);
+#endif
+}
+
+#define dump_node_name(_a, _b, _c, _d, _e) \
+	_dump_node_name(_a, _b, _c, _d, _e, __func__, __LINE__)
+static void _dump_node_name (unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
+	u64 n4, const char* func, int line)
+{
+	pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+	_dump_field("n1: ", n1, func, line);
+	_dump_field("n2: ", n2, func, line);
+	_dump_field("n3: ", n3, func, line);
+	_dump_field("n4: ", n4, func, line);
+}
+
+#define dump_node(_a, _b, _c, _d, _e, _f, _g) \
+	_dump_node(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
+static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
+	u64 v1, u64 v2, const char* func, int line)
+{
+	pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+	_dump_field("n1: ", n1, func, line);
+	_dump_field("n2: ", n2, func, line);
+	_dump_field("n3: ", n3, func, line);
+	_dump_field("n4: ", n4, func, line);
+	pr_debug("%s:%d: v1: %016lx\n", func, line, v1);
+	pr_debug("%s:%d: v2: %016lx\n", func, line, v2);
+}
+
+/**
+ * make_first_field - Make the first field of a repository node name.
+ * @text: Text portion of the field.
+ * @index: Numeric index portion of the field.  Use zero for 'don't care'.
+ *
+ * This routine sets the vendor id to zero (non-vendor specific).
+ * Returns field value.
+ */
+
+static u64 make_first_field(const char *text, u64 index)
+{
+	u64 n;
+
+	strncpy((char *)&n, text, 8);
+	return PS3_VENDOR_ID_NONE + (n >> 32) + index;
+}
+
+/**
+ * make_field - Make subsequent fields of a repository node name.
+ * @text: Text portion of the field.  Use "" for 'don't care'.
+ * @index: Numeric index portion of the field.  Use zero for 'don't care'.
+ *
+ * Returns field value.
+ */
+
+static u64 make_field(const char *text, u64 index)
+{
+	u64 n;
+
+	strncpy((char *)&n, text, 8);
+	return n + index;
+}
+
+/**
+ * read_node - Read a repository node from raw fields.
+ * @n1: First field of node name.
+ * @n2: Second field of node name.  Use zero for 'don't care'.
+ * @n3: Third field of node name.  Use zero for 'don't care'.
+ * @n4: Fourth field of node name.  Use zero for 'don't care'.
+ * @v1: First repository value (high word).
+ * @v2: Second repository value (low word).  Optional parameter, use zero
+ *      for 'don't care'.
+ */
+
+static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
+	u64 *_v1, u64 *_v2)
+{
+	int result;
+	u64 v1;
+	u64 v2;
+
+	if (lpar_id == PS3_LPAR_ID_CURRENT) {
+		u64 id;
+		lv1_get_logical_partition_id(&id);
+		lpar_id = id;
+	}
+
+	result = lv1_get_repository_node_value(lpar_id, n1, n2, n3, n4, &v1,
+		&v2);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		dump_node_name(lpar_id, n1, n2, n3, n4);
+		return result;
+	}
+
+	dump_node(lpar_id, n1, n2, n3, n4, v1, v2);
+
+	if (_v1)
+		*_v1 = v1;
+	if (_v2)
+		*_v2 = v2;
+
+	if (v1 && !_v1)
+		pr_debug("%s:%d: warning: discarding non-zero v1: %016lx\n",
+			__func__, __LINE__, v1);
+	if (v2 && !_v2)
+		pr_debug("%s:%d: warning: discarding non-zero v2: %016lx\n",
+			__func__, __LINE__, v2);
+
+	return result;
+}
+
+int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
+	u64 *value)
+{
+	return read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field(bus_str, 0),
+		0, 0,
+		value, 0);
+}
+
+int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id)
+{
+	int result;
+	u64 v1;
+	u64 v2; /* unused */
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("id", 0),
+		0, 0,
+		&v1, &v2);
+	*bus_id = v1;
+	return result;
+}
+
+int ps3_repository_read_bus_type(unsigned int bus_index,
+	enum ps3_bus_type *bus_type)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("type", 0),
+		0, 0,
+		&v1, 0);
+	*bus_type = v1;
+	return result;
+}
+
+int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+	unsigned int *num_dev)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("num_dev", 0),
+		0, 0,
+		&v1, 0);
+	*num_dev = v1;
+	return result;
+}
+
+int ps3_repository_read_dev_str(unsigned int bus_index,
+	unsigned int dev_index, const char *dev_str, u64 *value)
+{
+	return read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("dev", dev_index),
+		make_field(dev_str, 0),
+		0,
+		value, 0);
+}
+
+int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
+	unsigned int *dev_id)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("dev", dev_index),
+		make_field("id", 0),
+		0,
+		&v1, 0);
+	*dev_id = v1;
+	return result;
+}
+
+int ps3_repository_read_dev_type(unsigned int bus_index,
+	unsigned int dev_index, enum ps3_dev_type *dev_type)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("dev", dev_index),
+		make_field("type", 0),
+		0,
+		&v1, 0);
+	*dev_type = v1;
+	return result;
+}
+
+int ps3_repository_read_dev_intr(unsigned int bus_index,
+	unsigned int dev_index, unsigned int intr_index,
+	unsigned int *intr_type, unsigned int* interrupt_id)
+{
+	int result;
+	u64 v1;
+	u64 v2;
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("dev", dev_index),
+		make_field("intr", intr_index),
+		0,
+		&v1, &v2);
+	*intr_type = v1;
+	*interrupt_id = v2;
+	return result;
+}
+
+int ps3_repository_read_dev_reg_type(unsigned int bus_index,
+	unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("dev", dev_index),
+		make_field("reg", reg_index),
+		make_field("type", 0),
+		&v1, 0);
+	*reg_type = v1;
+	return result;
+}
+
+int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
+	unsigned int dev_index, unsigned int reg_index, u64 *bus_addr, u64 *len)
+{
+	return read_node(PS3_LPAR_ID_PME,
+		make_first_field("bus", bus_index),
+		make_field("dev", dev_index),
+		make_field("reg", reg_index),
+		make_field("data", 0),
+		bus_addr, len);
+}
+
+int ps3_repository_read_dev_reg(unsigned int bus_index,
+	unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type,
+	u64 *bus_addr, u64 *len)
+{
+	int result = ps3_repository_read_dev_reg_type(bus_index, dev_index,
+		reg_index, reg_type);
+	return result ? result
+		: ps3_repository_read_dev_reg_addr(bus_index, dev_index,
+		reg_index, bus_addr, len);
+}
+
+#if defined(DEBUG)
+int ps3_repository_dump_resource_info(unsigned int bus_index,
+	unsigned int dev_index)
+{
+	int result = 0;
+	unsigned int res_index;
+
+	pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+		bus_index, dev_index);
+
+	for (res_index = 0; res_index < 10; res_index++) {
+		enum ps3_interrupt_type intr_type;
+		unsigned int interrupt_id;
+
+		result = ps3_repository_read_dev_intr(bus_index, dev_index,
+			res_index, &intr_type, &interrupt_id);
+
+		if (result) {
+			if (result !=  LV1_NO_ENTRY)
+				pr_debug("%s:%d ps3_repository_read_dev_intr"
+					" (%u:%u) failed\n", __func__, __LINE__,
+					bus_index, dev_index);
+			break;
+		}
+
+		pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
+			__func__, __LINE__, bus_index, dev_index, intr_type,
+			interrupt_id);
+	}
+
+	for (res_index = 0; res_index < 10; res_index++) {
+		enum ps3_region_type reg_type;
+		u64 bus_addr;
+		u64 len;
+
+		result = ps3_repository_read_dev_reg(bus_index, dev_index,
+			res_index, &reg_type, &bus_addr, &len);
+
+		if (result) {
+			if (result !=  LV1_NO_ENTRY)
+				pr_debug("%s:%d ps3_repository_read_dev_reg"
+					" (%u:%u) failed\n", __func__, __LINE__,
+					bus_index, dev_index);
+			break;
+		}
+
+		pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
+			__func__, __LINE__, bus_index, dev_index, reg_type,
+			bus_addr, len);
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+static int dump_device_info(unsigned int bus_index, unsigned int num_dev)
+{
+	int result = 0;
+	unsigned int dev_index;
+
+	pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, bus_index);
+
+	for (dev_index = 0; dev_index < num_dev; dev_index++) {
+		enum ps3_dev_type dev_type;
+		unsigned int dev_id;
+
+		result = ps3_repository_read_dev_type(bus_index, dev_index,
+			&dev_type);
+
+		if (result) {
+			pr_debug("%s:%d ps3_repository_read_dev_type"
+				" (%u:%u) failed\n", __func__, __LINE__,
+				bus_index, dev_index);
+			break;
+		}
+
+		result = ps3_repository_read_dev_id(bus_index, dev_index,
+			&dev_id);
+
+		if (result) {
+			pr_debug("%s:%d ps3_repository_read_dev_id"
+				" (%u:%u) failed\n", __func__, __LINE__,
+				bus_index, dev_index);
+			continue;
+		}
+
+		pr_debug("%s:%d  (%u:%u): dev_type %u, dev_id %u\n", __func__,
+			__LINE__, bus_index, dev_index, dev_type, dev_id);
+
+		ps3_repository_dump_resource_info(bus_index, dev_index);
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+
+int ps3_repository_dump_bus_info(void)
+{
+	int result = 0;
+	unsigned int bus_index;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	for (bus_index = 0; bus_index < 10; bus_index++) {
+		enum ps3_bus_type bus_type;
+		unsigned int bus_id;
+		unsigned int num_dev;
+
+		result = ps3_repository_read_bus_type(bus_index, &bus_type);
+
+		if (result) {
+			pr_debug("%s:%d read_bus_type(%u) failed\n",
+				__func__, __LINE__, bus_index);
+			break;
+		}
+
+		result = ps3_repository_read_bus_id(bus_index, &bus_id);
+
+		if (result) {
+			pr_debug("%s:%d read_bus_id(%u) failed\n",
+				__func__, __LINE__, bus_index);
+			continue;
+		}
+
+		if (bus_index != bus_id)
+			pr_debug("%s:%d bus_index != bus_id\n",
+				__func__, __LINE__);
+
+		result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
+
+		if (result) {
+			pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
+				__func__, __LINE__, bus_index);
+			continue;
+		}
+
+		pr_debug("%s:%d bus_%u: bus_type %u, bus_id %u, num_dev %u\n",
+			__func__, __LINE__, bus_index, bus_type, bus_id,
+			num_dev);
+
+		dump_device_info(bus_index, num_dev);
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return result;
+}
+#endif /* defined(DEBUG) */
+
+static int find_device(unsigned int bus_index, unsigned int num_dev,
+	unsigned int start_dev_index, enum ps3_dev_type dev_type,
+	struct ps3_repository_device *dev)
+{
+	int result = 0;
+	unsigned int dev_index;
+
+	pr_debug("%s:%d: find dev_type %u\n", __func__, __LINE__, dev_type);
+
+	dev->dev_index = UINT_MAX;
+
+	for (dev_index = start_dev_index; dev_index < num_dev; dev_index++) {
+		enum ps3_dev_type x;
+
+		result = ps3_repository_read_dev_type(bus_index, dev_index,
+			&x);
+
+		if (result) {
+			pr_debug("%s:%d read_dev_type failed\n",
+				__func__, __LINE__);
+			return result;
+		}
+
+		if (x == dev_type)
+			break;
+	}
+
+	BUG_ON(dev_index == num_dev);
+
+	pr_debug("%s:%d: found dev_type %u at dev_index %u\n",
+		__func__, __LINE__, dev_type, dev_index);
+
+	result = ps3_repository_read_dev_id(bus_index, dev_index,
+		&dev->did.dev_id);
+
+	if (result) {
+		pr_debug("%s:%d read_dev_id failed\n",
+			__func__, __LINE__);
+		return result;
+	}
+
+	dev->dev_index = dev_index;
+
+	pr_debug("%s:%d found: dev_id %u\n", __func__, __LINE__,
+		dev->did.dev_id);
+
+	return result;
+}
+
+int ps3_repository_find_device (enum ps3_bus_type bus_type,
+	enum ps3_dev_type dev_type,
+	const struct ps3_repository_device *start_dev,
+	struct ps3_repository_device *dev)
+{
+	int result = 0;
+	unsigned int bus_index;
+	unsigned int num_dev;
+
+	pr_debug("%s:%d: find bus_type %u, dev_type %u\n", __func__, __LINE__,
+		bus_type, dev_type);
+
+	dev->bus_index = UINT_MAX;
+
+	for (bus_index = start_dev ? start_dev->bus_index : 0; bus_index < 10;
+		bus_index++) {
+		enum ps3_bus_type x;
+
+		result = ps3_repository_read_bus_type(bus_index, &x);
+
+		if (result) {
+			pr_debug("%s:%d read_bus_type failed\n",
+				__func__, __LINE__);
+			return result;
+		}
+		if (x == bus_type)
+			break;
+	}
+
+	BUG_ON(bus_index == 10);
+
+	pr_debug("%s:%d: found bus_type %u at bus_index %u\n",
+		__func__, __LINE__, bus_type, bus_index);
+
+	result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
+
+	if (result) {
+		pr_debug("%s:%d read_bus_num_dev failed\n",
+			__func__, __LINE__);
+		return result;
+	}
+
+	result = find_device(bus_index, num_dev, start_dev
+		? start_dev->dev_index + 1 : 0, dev_type, dev);
+
+	if (result) {
+		pr_debug("%s:%d get_did failed\n", __func__, __LINE__);
+		return result;
+	}
+
+	result = ps3_repository_read_bus_id(bus_index, &dev->did.bus_id);
+
+	if (result) {
+		pr_debug("%s:%d read_bus_id failed\n",
+			__func__, __LINE__);
+		return result;
+	}
+
+	dev->bus_index = bus_index;
+
+	pr_debug("%s:%d found: bus_id %u, dev_id %u\n",
+		__func__, __LINE__, dev->did.bus_id, dev->did.dev_id);
+
+	return result;
+}
+
+int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
+	enum ps3_interrupt_type intr_type, unsigned int *interrupt_id)
+{
+	int result = 0;
+	unsigned int res_index;
+
+	pr_debug("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
+
+	*interrupt_id = UINT_MAX;
+
+	for (res_index = 0; res_index < 10; res_index++) {
+		enum ps3_interrupt_type t;
+		unsigned int id;
+
+		result = ps3_repository_read_dev_intr(dev->bus_index,
+			dev->dev_index, res_index, &t, &id);
+
+		if (result) {
+			pr_debug("%s:%d read_dev_intr failed\n",
+				__func__, __LINE__);
+			return result;
+		}
+
+		if (t == intr_type) {
+			*interrupt_id = id;
+			break;
+		}
+	}
+
+	BUG_ON(res_index == 10);
+
+	pr_debug("%s:%d: found intr_type %u at res_index %u\n",
+		__func__, __LINE__, intr_type, res_index);
+
+	return result;
+}
+
+int ps3_repository_find_region(const struct ps3_repository_device *dev,
+	enum ps3_region_type reg_type, u64 *bus_addr, u64 *len)
+{
+	int result = 0;
+	unsigned int res_index;
+
+	pr_debug("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
+
+	*bus_addr = *len = 0;
+
+	for (res_index = 0; res_index < 10; res_index++) {
+		enum ps3_region_type t;
+		u64 a;
+		u64 l;
+
+		result = ps3_repository_read_dev_reg(dev->bus_index,
+			dev->dev_index, res_index, &t, &a, &l);
+
+		if (result) {
+			pr_debug("%s:%d read_dev_reg failed\n",
+				__func__, __LINE__);
+			return result;
+		}
+
+		if (t == reg_type) {
+			*bus_addr = a;
+			*len = l;
+			break;
+		}
+	}
+
+	BUG_ON(res_index == 10);
+
+	pr_debug("%s:%d: found reg_type %u at res_index %u\n",
+		__func__, __LINE__, reg_type, res_index);
+
+	return result;
+}
+
+int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
+{
+	return read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("pu", 0),
+		ppe_id,
+		make_field("rm_size", 0),
+		rm_size, 0);
+}
+
+int ps3_repository_read_region_total(u64 *region_total)
+{
+	return read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("rgntotal", 0),
+		0, 0,
+		region_total, 0);
+}
+
+/**
+ * ps3_repository_read_mm_info - Read mm info for single pu system.
+ * @rm_base: Real mode memory base address.
+ * @rm_size: Real mode memory size.
+ * @region_total: Maximum memory region size.
+ */
+
+int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
+{
+	int result;
+	u64 ppe_id;
+
+	lv1_get_logical_ppe_id(&ppe_id);
+	*rm_base = 0;
+	result = ps3_repository_read_rm_size(ppe_id, rm_size);
+	return result ? result
+		: ps3_repository_read_region_total(region_total);
+}
+
+/**
+ * ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
+ * @num_spu: Number of physical spus.
+ */
+
+int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("spun", 0),
+		0, 0,
+		&v1, 0);
+	*num_spu_reserved = v1;
+	return result;
+}
+
+/**
+ * ps3_repository_read_num_spu_resource_id - Number of spu resource reservations.
+ * @num_resource_id: Number of spu resource ids.
+ */
+
+int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("spursvn", 0),
+		0, 0,
+		&v1, 0);
+	*num_resource_id = v1;
+	return result;
+}
+
+/**
+ * ps3_repository_read_spu_resource_id - spu resource reservation id value.
+ * @res_index: Resource reservation index.
+ * @resource_type: Resource reservation type.
+ * @resource_id: Resource reservation id.
+ */
+
+int ps3_repository_read_spu_resource_id(unsigned int res_index,
+	enum ps3_spu_resource_type* resource_type, unsigned int *resource_id)
+{
+	int result;
+	u64 v1;
+	u64 v2;
+
+	result = read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("spursv", 0),
+		res_index,
+		0,
+		&v1, &v2);
+	*resource_type = v1;
+	*resource_id = v2;
+	return result;
+}
+
+int ps3_repository_read_boot_dat_address(u64 *address)
+{
+	return read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("boot_dat", 0),
+		make_field("address", 0),
+		0,
+		address, 0);
+}
+
+int ps3_repository_read_boot_dat_size(unsigned int *size)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_CURRENT,
+		make_first_field("bi", 0),
+		make_field("boot_dat", 0),
+		make_field("size", 0),
+		0,
+		&v1, 0);
+	*size = v1;
+	return result;
+}
+
+/**
+  * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area.
+  * address: lpar address of cell_ext_os_area
+  * @size: size of cell_ext_os_area
+  */
+
+int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
+{
+	int result;
+
+	*size = 0;
+	result = ps3_repository_read_boot_dat_address(lpar_addr);
+	return result ? result
+		: ps3_repository_read_boot_dat_size(size);
+}
+
+int ps3_repository_read_num_be(unsigned int *num_be)
+{
+	int result;
+	u64 v1;
+
+	result = read_node(PS3_LPAR_ID_PME,
+		make_first_field("ben", 0),
+		0,
+		0,
+		0,
+		&v1, 0);
+	*num_be = v1;
+	return result;
+}
+
+int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
+{
+	return read_node(PS3_LPAR_ID_PME,
+		make_first_field("be", be_index),
+		0,
+		0,
+		0,
+		node_id, 0);
+}
+
+int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
+{
+	return read_node(PS3_LPAR_ID_PME,
+		make_first_field("be", 0),
+		node_id,
+		make_field("clock", 0),
+		0,
+		tb_freq, 0);
+}
+
+int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
+{
+	int result;
+	u64 node_id;
+
+	*tb_freq = 0;
+	result = ps3_repository_read_be_node_id(0, &node_id);
+	return result ? result
+		: ps3_repository_read_tb_freq(node_id, tb_freq);
+}
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
new file mode 100644
index 0000000..d8b5cad
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -0,0 +1,173 @@
+/*
+ *  PS3 platform setup routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/kexec.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/time.h>
+#include <asm/iommu.h>
+#include <asm/udbg.h>
+#include <asm/prom.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static void ps3_show_cpuinfo(struct seq_file *m)
+{
+	seq_printf(m, "machine\t\t: %s\n", ppc_md.name);
+}
+
+static void ps3_power_save(void)
+{
+	/*
+	 * lv1_pause() puts the PPE thread into inactive state until an
+	 * irq on an unmasked plug exists. MSR[EE] has no effect.
+	 * flags: 0 = wake on DEC interrupt, 1 = ignore DEC interrupt.
+	 */
+
+	lv1_pause(0);
+}
+
+static void ps3_panic(char *str)
+{
+	DBG("%s:%d %s\n", __func__, __LINE__, str);
+
+#ifdef CONFIG_SMP
+	smp_send_stop();
+#endif
+	printk("\n");
+	printk("   System does not reboot automatically.\n");
+	printk("   Please press POWER button.\n");
+	printk("\n");
+
+	for (;;) ;
+}
+
+static void __init ps3_setup_arch(void)
+{
+	DBG(" -> %s:%d\n", __func__, __LINE__);
+
+	ps3_spu_set_platform();
+	ps3_map_htab();
+
+#ifdef CONFIG_SMP
+	smp_init_ps3();
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+	conswitchp = &dummy_con;
+#endif
+
+	ppc_md.power_save = ps3_power_save;
+
+	DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+static void __init ps3_progress(char *s, unsigned short hex)
+{
+	printk("*** %04x : %s\n", hex, s ? s : "");
+}
+
+static int __init ps3_probe(void)
+{
+	unsigned long htab_size;
+	unsigned long dt_root;
+
+	DBG(" -> %s:%d\n", __func__, __LINE__);
+
+	dt_root = of_get_flat_dt_root();
+	if (!of_flat_dt_is_compatible(dt_root, "PS3"))
+		return 0;
+
+	powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
+
+	ps3_os_area_init();
+	ps3_mm_init();
+	ps3_mm_vas_create(&htab_size);
+	ps3_hpte_init(htab_size);
+
+	DBG(" <- %s:%d\n", __func__, __LINE__);
+	return 1;
+}
+
+#if defined(CONFIG_KEXEC)
+static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+	DBG(" -> %s:%d\n", __func__, __LINE__);
+
+	if (secondary) {
+		int cpu;
+		for_each_online_cpu(cpu)
+			if (cpu)
+				ps3_smp_cleanup_cpu(cpu);
+	} else
+		ps3_smp_cleanup_cpu(0);
+
+	DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+static void ps3_machine_kexec(struct kimage *image)
+{
+	unsigned long ppe_id;
+
+	DBG(" -> %s:%d\n", __func__, __LINE__);
+
+	lv1_get_logical_ppe_id(&ppe_id);
+	lv1_configure_irq_state_bitmap(ppe_id, 0, 0);
+	ps3_mm_shutdown();
+	ps3_mm_vas_destroy();
+
+	default_machine_kexec(image);
+
+	DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+#endif
+
+define_machine(ps3) {
+	.name				= "PS3",
+	.probe				= ps3_probe,
+	.setup_arch			= ps3_setup_arch,
+	.show_cpuinfo			= ps3_show_cpuinfo,
+	.init_IRQ			= ps3_init_IRQ,
+	.panic				= ps3_panic,
+	.get_boot_time			= ps3_get_boot_time,
+	.set_rtc_time			= ps3_set_rtc_time,
+	.get_rtc_time			= ps3_get_rtc_time,
+	.calibrate_decr			= ps3_calibrate_decr,
+	.progress			= ps3_progress,
+#if defined(CONFIG_KEXEC)
+	.kexec_cpu_down			= ps3_kexec_cpu_down,
+	.machine_kexec			= ps3_machine_kexec,
+	.machine_kexec_prepare		= default_machine_kexec_prepare,
+	.machine_crash_shutdown		= default_machine_crash_shutdown,
+#endif
+};
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
new file mode 100644
index 0000000..11d2080
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -0,0 +1,158 @@
+/*
+ *  PS3 SMP routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static irqreturn_t ipi_function_handler(int irq, void *msg)
+{
+	smp_message_recv((int)(long)msg);
+	return IRQ_HANDLED;
+}
+
+/**
+  * virqs - a per cpu array of virqs for ipi use
+  */
+
+#define MSG_COUNT 4
+static DEFINE_PER_CPU(unsigned int, virqs[MSG_COUNT]);
+
+static const char *names[MSG_COUNT] = {
+	"ipi call",
+	"ipi reschedule",
+	"ipi migrate",
+	"ipi debug brk"
+};
+
+static void do_message_pass(int target, int msg)
+{
+	int result;
+	unsigned int virq;
+
+	if (msg >= MSG_COUNT) {
+		DBG("%s:%d: bad msg: %d\n", __func__, __LINE__, msg);
+		return;
+	}
+
+	virq = per_cpu(virqs, target)[msg];
+	result = ps3_send_event_locally(virq);
+
+	if (result)
+		DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
+			" (%d)\n", __func__, __LINE__, target, msg, result);
+}
+
+static void ps3_smp_message_pass(int target, int msg)
+{
+	int cpu;
+
+	if (target < NR_CPUS)
+		do_message_pass(target, msg);
+	else if (target == MSG_ALL_BUT_SELF) {
+		for_each_online_cpu(cpu)
+			if (cpu != smp_processor_id())
+				do_message_pass(cpu, msg);
+	} else {
+		for_each_online_cpu(cpu)
+			do_message_pass(cpu, msg);
+	}
+}
+
+static int ps3_smp_probe(void)
+{
+	return 2;
+}
+
+static void __init ps3_smp_setup_cpu(int cpu)
+{
+	int result;
+	unsigned int *virqs = per_cpu(virqs, cpu);
+	int i;
+
+	DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+
+	/*
+	 * Check assumptions on virqs[] indexing. If this
+	 * check fails, then a different mapping of PPC_MSG_
+	 * to index needs to be setup.
+	 */
+
+	BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION  != 0);
+	BUILD_BUG_ON(PPC_MSG_RESCHEDULE     != 1);
+	BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+
+	for (i = 0; i < MSG_COUNT; i++) {
+		result = ps3_alloc_event_irq(&virqs[i]);
+
+		if (result)
+			continue;
+
+		DBG("%s:%d: (%d, %d) => virq %u\n",
+			__func__, __LINE__, cpu, i, virqs[i]);
+
+
+		request_irq(virqs[i], ipi_function_handler, IRQF_DISABLED,
+			names[i], (void*)(long)i);
+	}
+
+	ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
+
+	DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+}
+
+void ps3_smp_cleanup_cpu(int cpu)
+{
+	unsigned int *virqs = per_cpu(virqs, cpu);
+	int i;
+
+	DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+	for (i = 0; i < MSG_COUNT; i++) {
+		ps3_free_event_irq(virqs[i]);
+		free_irq(virqs[i], (void*)(long)i);
+		virqs[i] = NO_IRQ;
+	}
+	DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+}
+
+static struct smp_ops_t ps3_smp_ops = {
+	.probe		= ps3_smp_probe,
+	.message_pass	= ps3_smp_message_pass,
+	.kick_cpu	= smp_generic_kick_cpu,
+	.setup_cpu	= ps3_smp_setup_cpu,
+};
+
+void smp_init_ps3(void)
+{
+	DBG(" -> %s\n", __func__);
+	smp_ops = &ps3_smp_ops;
+	DBG(" <- %s\n", __func__);
+}
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
new file mode 100644
index 0000000..644532c
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -0,0 +1,613 @@
+/*
+ *  PS3 Platform spu routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mmzone.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+/* spu_management_ops */
+
+/**
+ * enum spe_type - Type of spe to create.
+ * @spe_type_logical: Standard logical spe.
+ *
+ * For use with lv1_construct_logical_spe().  The current HV does not support
+ * any types other than those listed.
+ */
+
+enum spe_type {
+	SPE_TYPE_LOGICAL = 0,
+};
+
+/**
+ * struct spe_shadow - logical spe shadow register area.
+ *
+ * Read-only shadow of spe registers.
+ */
+
+struct spe_shadow {
+	u8 padding_0000[0x0140];
+	u64 int_status_class0_RW;       /* 0x0140 */
+	u64 int_status_class1_RW;       /* 0x0148 */
+	u64 int_status_class2_RW;       /* 0x0150 */
+	u8 padding_0158[0x0610-0x0158];
+	u64 mfc_dsisr_RW;               /* 0x0610 */
+	u8 padding_0618[0x0620-0x0618];
+	u64 mfc_dar_RW;                 /* 0x0620 */
+	u8 padding_0628[0x0800-0x0628];
+	u64 mfc_dsipr_R;                /* 0x0800 */
+	u8 padding_0808[0x0810-0x0808];
+	u64 mfc_lscrr_R;                /* 0x0810 */
+	u8 padding_0818[0x0c00-0x0818];
+	u64 mfc_cer_R;                  /* 0x0c00 */
+	u8 padding_0c08[0x0f00-0x0c08];
+	u64 spe_execution_status;       /* 0x0f00 */
+	u8 padding_0f08[0x1000-0x0f08];
+} __attribute__ ((packed));
+
+
+/**
+ * enum spe_ex_state - Logical spe execution state.
+ * @spe_ex_state_unexecutable: Uninitialized.
+ * @spe_ex_state_executable: Enabled, not ready.
+ * @spe_ex_state_executed: Ready for use.
+ *
+ * The execution state (status) of the logical spe as reported in
+ * struct spe_shadow:spe_execution_status.
+ */
+
+enum spe_ex_state {
+	SPE_EX_STATE_UNEXECUTABLE = 0,
+	SPE_EX_STATE_EXECUTABLE = 2,
+	SPE_EX_STATE_EXECUTED = 3,
+};
+
+/**
+ * struct priv1_cache - Cached values of priv1 registers.
+ * @masks[]: Array of cached spe interrupt masks, indexed by class.
+ * @sr1: Cached mfc_sr1 register.
+ * @tclass_id: Cached mfc_tclass_id register.
+ */
+
+struct priv1_cache {
+	u64 masks[3];
+	u64 sr1;
+	u64 tclass_id;
+};
+
+/**
+ * struct spu_pdata - Platform state variables.
+ * @spe_id: HV spe id returned by lv1_construct_logical_spe().
+ * @resource_id: HV spe resource id returned by
+ * 	ps3_repository_read_spe_resource_id().
+ * @priv2_addr: lpar address of spe priv2 area returned by
+ * 	lv1_construct_logical_spe().
+ * @shadow_addr: lpar address of spe register shadow area returned by
+ * 	lv1_construct_logical_spe().
+ * @shadow: Virtual (ioremap) address of spe register shadow area.
+ * @cache: Cached values of priv1 registers.
+ */
+
+struct spu_pdata {
+	u64 spe_id;
+	u64 resource_id;
+	u64 priv2_addr;
+	u64 shadow_addr;
+	struct spe_shadow __iomem *shadow;
+	struct priv1_cache cache;
+};
+
+static struct spu_pdata *spu_pdata(struct spu *spu)
+{
+	return spu->pdata;
+}
+
+#define dump_areas(_a, _b, _c, _d, _e) \
+	_dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)
+static void _dump_areas(unsigned int spe_id, unsigned long priv2,
+	unsigned long problem, unsigned long ls, unsigned long shadow,
+	const char* func, int line)
+{
+	pr_debug("%s:%d: spe_id:  %xh (%u)\n", func, line, spe_id, spe_id);
+	pr_debug("%s:%d: priv2:   %lxh\n", func, line, priv2);
+	pr_debug("%s:%d: problem: %lxh\n", func, line, problem);
+	pr_debug("%s:%d: ls:      %lxh\n", func, line, ls);
+	pr_debug("%s:%d: shadow:  %lxh\n", func, line, shadow);
+}
+
+static unsigned long get_vas_id(void)
+{
+	unsigned long id;
+
+	lv1_get_logical_ppe_id(&id);
+	lv1_get_virtual_address_space_id_of_ppe(id, &id);
+
+	return id;
+}
+
+static int __init construct_spu(struct spu *spu)
+{
+	int result;
+	unsigned long unused;
+
+	result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
+		PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
+		&spu_pdata(spu)->priv2_addr, &spu->problem_phys,
+		&spu->local_store_phys, &unused,
+		&spu_pdata(spu)->shadow_addr,
+		&spu_pdata(spu)->spe_id);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		return result;
+	}
+
+	return result;
+}
+
+static int __init add_spu_pages(unsigned long start_addr, unsigned long size)
+{
+	int result;
+	unsigned long start_pfn;
+	unsigned long nr_pages;
+	struct pglist_data *pgdata;
+	struct zone *zone;
+
+	BUG_ON(!mem_init_done);
+
+	start_pfn = start_addr >> PAGE_SHIFT;
+	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	pgdata = NODE_DATA(0);
+	zone = pgdata->node_zones;
+
+	result = __add_pages(zone, start_pfn, nr_pages);
+
+	if (result)
+		pr_debug("%s:%d: __add_pages failed: (%d)\n",
+			__func__, __LINE__, result);
+
+	return result;
+}
+
+static void spu_unmap(struct spu *spu)
+{
+	iounmap(spu->priv2);
+	iounmap(spu->problem);
+	iounmap((__force u8 __iomem *)spu->local_store);
+	iounmap(spu_pdata(spu)->shadow);
+}
+
+static int __init setup_areas(struct spu *spu)
+{
+	struct table {char* name; unsigned long addr; unsigned long size;};
+	int result;
+
+	/* setup pages */
+
+	result = add_spu_pages(spu->local_store_phys, LS_SIZE);
+	if (result)
+		goto fail_add;
+
+	result = add_spu_pages(spu->problem_phys, sizeof(struct spu_problem));
+	if (result)
+		goto fail_add;
+
+	/* ioremap */
+
+	spu_pdata(spu)->shadow = __ioremap(
+		spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow),
+		PAGE_READONLY | _PAGE_NO_CACHE | _PAGE_GUARDED);
+	if (!spu_pdata(spu)->shadow) {
+		pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
+		goto fail_ioremap;
+	}
+
+	spu->local_store = ioremap(spu->local_store_phys, LS_SIZE);
+	if (!spu->local_store) {
+		pr_debug("%s:%d: ioremap local_store failed\n",
+			__func__, __LINE__);
+		goto fail_ioremap;
+	}
+
+	spu->problem = ioremap(spu->problem_phys,
+		sizeof(struct spu_problem));
+	if (!spu->problem) {
+		pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
+		goto fail_ioremap;
+	}
+
+	spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
+		sizeof(struct spu_priv2));
+	if (!spu->priv2) {
+		pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
+		goto fail_ioremap;
+	}
+
+	dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
+		spu->problem_phys, spu->local_store_phys,
+		spu_pdata(spu)->shadow_addr);
+	dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
+		(unsigned long)spu->problem, (unsigned long)spu->local_store,
+		(unsigned long)spu_pdata(spu)->shadow);
+
+	return 0;
+
+fail_ioremap:
+	spu_unmap(spu);
+fail_add:
+	return result;
+}
+
+static int __init setup_interrupts(struct spu *spu)
+{
+	int result;
+
+	result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 0,
+		&spu->irqs[0]);
+
+	if (result)
+		goto fail_alloc_0;
+
+	result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 1,
+		&spu->irqs[1]);
+
+	if (result)
+		goto fail_alloc_1;
+
+	result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 2,
+		&spu->irqs[2]);
+
+	if (result)
+		goto fail_alloc_2;
+
+	return result;
+
+fail_alloc_2:
+	ps3_free_spe_irq(spu->irqs[1]);
+fail_alloc_1:
+	ps3_free_spe_irq(spu->irqs[0]);
+fail_alloc_0:
+	spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+	return result;
+}
+
+static int __init enable_spu(struct spu *spu)
+{
+	int result;
+
+	result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id,
+		spu_pdata(spu)->resource_id);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		goto fail_enable;
+	}
+
+	result = setup_areas(spu);
+
+	if (result)
+		goto fail_areas;
+
+	result = setup_interrupts(spu);
+
+	if (result)
+		goto fail_interrupts;
+
+	return 0;
+
+fail_interrupts:
+	spu_unmap(spu);
+fail_areas:
+	lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
+fail_enable:
+	return result;
+}
+
+static int ps3_destroy_spu(struct spu *spu)
+{
+	int result;
+
+	pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
+
+	result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
+	BUG_ON(result);
+
+	ps3_free_spe_irq(spu->irqs[2]);
+	ps3_free_spe_irq(spu->irqs[1]);
+	ps3_free_spe_irq(spu->irqs[0]);
+
+	spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+
+	spu_unmap(spu);
+
+	result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id);
+	BUG_ON(result);
+
+	kfree(spu->pdata);
+	spu->pdata = NULL;
+
+	return 0;
+}
+
+static int __init ps3_create_spu(struct spu *spu, void *data)
+{
+	int result;
+
+	pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
+
+	spu->pdata = kzalloc(sizeof(struct spu_pdata),
+		GFP_KERNEL);
+
+	if (!spu->pdata) {
+		result = -ENOMEM;
+		goto fail_malloc;
+	}
+
+	spu_pdata(spu)->resource_id = (unsigned long)data;
+
+	/* Init cached reg values to HV defaults. */
+
+	spu_pdata(spu)->cache.sr1 = 0x33;
+
+	result = construct_spu(spu);
+
+	if (result)
+		goto fail_construct;
+
+	/* For now, just go ahead and enable it. */
+
+	result = enable_spu(spu);
+
+	if (result)
+		goto fail_enable;
+
+	/* Make sure the spu is in SPE_EX_STATE_EXECUTED. */
+
+	/* need something better here!!! */
+	while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status)
+		!= SPE_EX_STATE_EXECUTED)
+		(void)0;
+
+	return result;
+
+fail_enable:
+fail_construct:
+	ps3_destroy_spu(spu);
+fail_malloc:
+	return result;
+}
+
+static int __init ps3_enumerate_spus(int (*fn)(void *data))
+{
+	int result;
+	unsigned int num_resource_id;
+	unsigned int i;
+
+	result = ps3_repository_read_num_spu_resource_id(&num_resource_id);
+
+	pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__,
+		num_resource_id);
+
+	/*
+	 * For now, just create logical spus equal to the number
+	 * of physical spus reserved for the partition.
+	 */
+
+	for (i = 0; i < num_resource_id; i++) {
+		enum ps3_spu_resource_type resource_type;
+		unsigned int resource_id;
+
+		result = ps3_repository_read_spu_resource_id(i,
+			&resource_type, &resource_id);
+
+		if (result)
+			break;
+
+		if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) {
+			result = fn((void*)(unsigned long)resource_id);
+
+			if (result)
+				break;
+		}
+	}
+
+	if (result)
+		printk(KERN_WARNING "%s:%d: Error initializing spus\n",
+			__func__, __LINE__);
+
+	return result;
+}
+
+const struct spu_management_ops spu_management_ps3_ops = {
+	.enumerate_spus = ps3_enumerate_spus,
+	.create_spu = ps3_create_spu,
+	.destroy_spu = ps3_destroy_spu,
+};
+
+/* spu_priv1_ops */
+
+static void int_mask_and(struct spu *spu, int class, u64 mask)
+{
+	u64 old_mask;
+
+	/* are these serialized by caller??? */
+	old_mask = spu_int_mask_get(spu, class);
+	spu_int_mask_set(spu, class, old_mask & mask);
+}
+
+static void int_mask_or(struct spu *spu, int class, u64 mask)
+{
+	u64 old_mask;
+
+	old_mask = spu_int_mask_get(spu, class);
+	spu_int_mask_set(spu, class, old_mask | mask);
+}
+
+static void int_mask_set(struct spu *spu, int class, u64 mask)
+{
+	spu_pdata(spu)->cache.masks[class] = mask;
+	lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class,
+		spu_pdata(spu)->cache.masks[class]);
+}
+
+static u64 int_mask_get(struct spu *spu, int class)
+{
+	return spu_pdata(spu)->cache.masks[class];
+}
+
+static void int_stat_clear(struct spu *spu, int class, u64 stat)
+{
+	/* Note that MFC_DSISR will be cleared when class1[MF] is set. */
+
+	lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class,
+		stat, 0);
+}
+
+static u64 int_stat_get(struct spu *spu, int class)
+{
+	u64 stat;
+
+	lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat);
+	return stat;
+}
+
+static void cpu_affinity_set(struct spu *spu, int cpu)
+{
+	/* No support. */
+}
+
+static u64 mfc_dar_get(struct spu *spu)
+{
+	return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW);
+}
+
+static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
+{
+	/* Nothing to do, cleared in int_stat_clear(). */
+}
+
+static u64 mfc_dsisr_get(struct spu *spu)
+{
+	return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW);
+}
+
+static void mfc_sdr_setup(struct spu *spu)
+{
+	/* Nothing to do. */
+}
+
+static void mfc_sr1_set(struct spu *spu, u64 sr1)
+{
+	/* Check bits allowed by HV. */
+
+	static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
+		| MFC_STATE1_PROBLEM_STATE_MASK);
+
+	BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
+
+	spu_pdata(spu)->cache.sr1 = sr1;
+	lv1_set_spe_privilege_state_area_1_register(
+		spu_pdata(spu)->spe_id,
+		offsetof(struct spu_priv1, mfc_sr1_RW),
+		spu_pdata(spu)->cache.sr1);
+}
+
+static u64 mfc_sr1_get(struct spu *spu)
+{
+	return spu_pdata(spu)->cache.sr1;
+}
+
+static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
+{
+	spu_pdata(spu)->cache.tclass_id = tclass_id;
+	lv1_set_spe_privilege_state_area_1_register(
+		spu_pdata(spu)->spe_id,
+		offsetof(struct spu_priv1, mfc_tclass_id_RW),
+		spu_pdata(spu)->cache.tclass_id);
+}
+
+static u64 mfc_tclass_id_get(struct spu *spu)
+{
+	return spu_pdata(spu)->cache.tclass_id;
+}
+
+static void tlb_invalidate(struct spu *spu)
+{
+	/* Nothing to do. */
+}
+
+static void resource_allocation_groupID_set(struct spu *spu, u64 id)
+{
+	/* No support. */
+}
+
+static u64 resource_allocation_groupID_get(struct spu *spu)
+{
+	return 0; /* No support. */
+}
+
+static void resource_allocation_enable_set(struct spu *spu, u64 enable)
+{
+	/* No support. */
+}
+
+static u64 resource_allocation_enable_get(struct spu *spu)
+{
+	return 0; /* No support. */
+}
+
+const struct spu_priv1_ops spu_priv1_ps3_ops = {
+	.int_mask_and = int_mask_and,
+	.int_mask_or = int_mask_or,
+	.int_mask_set = int_mask_set,
+	.int_mask_get = int_mask_get,
+	.int_stat_clear = int_stat_clear,
+	.int_stat_get = int_stat_get,
+	.cpu_affinity_set = cpu_affinity_set,
+	.mfc_dar_get = mfc_dar_get,
+	.mfc_dsisr_set = mfc_dsisr_set,
+	.mfc_dsisr_get = mfc_dsisr_get,
+	.mfc_sdr_setup = mfc_sdr_setup,
+	.mfc_sr1_set = mfc_sr1_set,
+	.mfc_sr1_get = mfc_sr1_get,
+	.mfc_tclass_id_set = mfc_tclass_id_set,
+	.mfc_tclass_id_get = mfc_tclass_id_get,
+	.tlb_invalidate = tlb_invalidate,
+	.resource_allocation_groupID_set = resource_allocation_groupID_set,
+	.resource_allocation_groupID_get = resource_allocation_groupID_get,
+	.resource_allocation_enable_set = resource_allocation_enable_set,
+	.resource_allocation_enable_get = resource_allocation_enable_get,
+};
+
+void ps3_spu_set_platform(void)
+{
+	spu_priv1_ops = &spu_priv1_ps3_ops;
+	spu_management_ops = &spu_management_ps3_ops;
+}
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
new file mode 100644
index 0000000..1bae8b1
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -0,0 +1,104 @@
+/*
+ *  PS3 time and rtc routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/rtc.h>
+#include <asm/lv1call.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+#define dump_tm(_a) _dump_tm(_a, __func__, __LINE__)
+static void _dump_tm(const struct rtc_time *tm, const char* func, int line)
+{
+	pr_debug("%s:%d tm_sec  %d\n", func, line, tm->tm_sec);
+	pr_debug("%s:%d tm_min  %d\n", func, line, tm->tm_min);
+	pr_debug("%s:%d tm_hour %d\n", func, line, tm->tm_hour);
+	pr_debug("%s:%d tm_mday %d\n", func, line, tm->tm_mday);
+	pr_debug("%s:%d tm_mon  %d\n", func, line, tm->tm_mon);
+	pr_debug("%s:%d tm_year %d\n", func, line, tm->tm_year);
+	pr_debug("%s:%d tm_wday %d\n", func, line, tm->tm_wday);
+}
+
+#define dump_time(_a) _dump_time(_a, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_time(int time, const char* func,
+	int line)
+{
+	struct rtc_time tm;
+
+	to_tm(time, &tm);
+
+	pr_debug("%s:%d time    %d\n", func, line, time);
+	_dump_tm(&tm, func, line);
+}
+
+/**
+ * rtc_shift - Difference in seconds between 1970 and the ps3 rtc value.
+ */
+
+static s64 rtc_shift;
+
+void __init ps3_calibrate_decr(void)
+{
+	int result;
+	u64 tmp;
+
+	result = ps3_repository_read_be_tb_freq(0, &tmp);
+	BUG_ON(result);
+
+	ppc_tb_freq = tmp;
+	ppc_proc_freq = ppc_tb_freq * 40;
+
+	rtc_shift = ps3_os_area_rtc_diff();
+}
+
+static u64 read_rtc(void)
+{
+	int result;
+	u64 rtc_val;
+	u64 tb_val;
+
+	result = lv1_get_rtc(&rtc_val, &tb_val);
+	BUG_ON(result);
+
+	return rtc_val;
+}
+
+int ps3_set_rtc_time(struct rtc_time *tm)
+{
+	u64 now = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	rtc_shift = now - read_rtc();
+	return 0;
+}
+
+void ps3_get_rtc_time(struct rtc_time *tm)
+{
+	to_tm(read_rtc() + rtc_shift, tm);
+	tm->tm_year -= 1900;
+	tm->tm_mon -= 1;
+}
+
+unsigned long __init ps3_get_boot_time(void)
+{
+	return read_rtc() + rtc_shift;
+}
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 1370774..49037ed 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -37,8 +37,8 @@
 /* EEH event workqueue setup. */
 static DEFINE_SPINLOCK(eeh_eventlist_lock);
 LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(void *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+static void eeh_thread_launcher(struct work_struct *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
 
 /* Serialize reset sequences for a given pci device */
 DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@
  * eeh_thread_launcher
  * @dummy - unused
  */
-static void eeh_thread_launcher(void *dummy)
+static void eeh_thread_launcher(struct work_struct *dummy)
 {
 	if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
 		printk(KERN_ERR "Failed to start EEH daemon\n");
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 556c279..3c95392 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -309,7 +309,7 @@
 	tbl->it_size = size >> IOMMU_PAGE_SHIFT;
 }
 
-static void iommu_bus_setup_pSeries(struct pci_bus *bus)
+static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
 {
 	struct device_node *dn;
 	struct iommu_table *tbl;
@@ -318,10 +318,9 @@
 	struct pci_dn *pci;
 	int children;
 
-	DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
-
 	dn = pci_bus_to_OF_node(bus);
-	pci = PCI_DN(dn);
+
+	DBG("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
 
 	if (bus->self) {
 		/* This is not a root bus, any setup will be done for the
@@ -329,6 +328,7 @@
 		 */
 		return;
 	}
+	pci = PCI_DN(dn);
 
 	/* Check if the ISA bus on the system is under
 	 * this PHB.
@@ -390,17 +390,17 @@
 }
 
 
-static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
+static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
 {
 	struct iommu_table *tbl;
 	struct device_node *dn, *pdn;
 	struct pci_dn *ppci;
 	const void *dma_window = NULL;
 
-	DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
-
 	dn = pci_bus_to_OF_node(bus);
 
+	DBG("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", dn->full_name);
+
 	/* Find nearest ibm,dma-window, walking up the device tree */
 	for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
 		dma_window = get_property(pdn, "ibm,dma-window", NULL);
@@ -409,11 +409,15 @@
 	}
 
 	if (dma_window == NULL) {
-		DBG("iommu_bus_setup_pSeriesLP: bus %s seems to have no ibm,dma-window property\n", dn->full_name);
+		DBG("  no ibm,dma-window property !\n");
 		return;
 	}
 
 	ppci = PCI_DN(pdn);
+
+	DBG("  parent is %s, iommu_table: 0x%p\n",
+	    pdn->full_name, ppci->iommu_table);
+
 	if (!ppci->iommu_table) {
 		/* Bussubno hasn't been copied yet.
 		 * Do it now because iommu_table_setparms_lpar needs it.
@@ -427,6 +431,7 @@
 		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
 
 		ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
+		DBG("  created table: %p\n", ppci->iommu_table);
 	}
 
 	if (pdn != dn)
@@ -434,27 +439,27 @@
 }
 
 
-static void iommu_dev_setup_pSeries(struct pci_dev *dev)
+static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
 {
-	struct device_node *dn, *mydn;
+	struct device_node *dn;
 	struct iommu_table *tbl;
 
-	DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev));
+	DBG("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
 
-	mydn = dn = pci_device_to_OF_node(dev);
+	dn = dev->dev.archdata.of_node;
 
 	/* If we're the direct child of a root bus, then we need to allocate
 	 * an iommu table ourselves. The bus setup code should have setup
 	 * the window sizes already.
 	 */
 	if (!dev->bus->self) {
+		struct pci_controller *phb = PCI_DN(dn)->phb;
+
 		DBG(" --> first child, no bridge. Allocating iommu table.\n");
 		tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
-				   PCI_DN(dn)->phb->node);
-		iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
-		PCI_DN(dn)->iommu_table = iommu_init_table(tbl,
-						PCI_DN(dn)->phb->node);
-
+				   phb->node);
+		iommu_table_setparms(phb, dn, tbl);
+		dev->dev.archdata.dma_data = iommu_init_table(tbl, phb->node);
 		return;
 	}
 
@@ -465,11 +470,11 @@
 	while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
 		dn = dn->parent;
 
-	if (dn && PCI_DN(dn)) {
-		PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
-	} else {
-		DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
-	}
+	if (dn && PCI_DN(dn))
+		dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
+	else
+		printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
+		       pci_name(dev));
 }
 
 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
@@ -495,13 +500,15 @@
 	.notifier_call = iommu_reconfig_notifier,
 };
 
-static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
+static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
 {
 	struct device_node *pdn, *dn;
 	struct iommu_table *tbl;
 	const void *dma_window = NULL;
 	struct pci_dn *pci;
 
+	DBG("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
+
 	/* dev setup for LPAR is a little tricky, since the device tree might
 	 * contain the dma-window properties per-device and not neccesarily
 	 * for the bus. So we need to search upwards in the tree until we
@@ -509,9 +516,7 @@
 	 * already allocated.
 	 */
 	dn = pci_device_to_OF_node(dev);
-
-	DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n",
-	     dev, pci_name(dev), dn->full_name);
+	DBG("  node is %s\n", dn->full_name);
 
 	for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
 	     pdn = pdn->parent) {
@@ -520,16 +525,17 @@
 			break;
 	}
 
+	DBG("  parent is %s\n", pdn->full_name);
+
 	/* Check for parent == NULL so we don't try to setup the empty EADS
 	 * slots on POWER4 machines.
 	 */
 	if (dma_window == NULL || pdn->parent == NULL) {
-		DBG("No dma window for device, linking to parent\n");
-		PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table;
+		DBG("  no dma window for device, linking to parent\n");
+		dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table;
 		return;
-	} else {
-		DBG("Found DMA window, allocating table\n");
 	}
+	DBG("  found DMA window, table: %p\n", pci->iommu_table);
 
 	pci = PCI_DN(pdn);
 	if (!pci->iommu_table) {
@@ -542,24 +548,20 @@
 		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
 
 		pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
+		DBG("  created table: %p\n", pci->iommu_table);
 	}
 
-	if (pdn != dn)
-		PCI_DN(dn)->iommu_table = pci->iommu_table;
+	dev->dev.archdata.dma_data = pci->iommu_table;
 }
 
-static void iommu_bus_setup_null(struct pci_bus *b) { }
-static void iommu_dev_setup_null(struct pci_dev *d) { }
-
 /* These are called very early. */
 void iommu_init_early_pSeries(void)
 {
 	if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) {
 		/* Direct I/O, IOMMU off */
-		ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-		ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-		pci_direct_iommu_init();
-
+		ppc_md.pci_dma_dev_setup = NULL;
+		ppc_md.pci_dma_bus_setup = NULL;
+		pci_dma_ops = &dma_direct_ops;
 		return;
 	}
 
@@ -572,19 +574,19 @@
 			ppc_md.tce_free	 = tce_free_pSeriesLP;
 		}
 		ppc_md.tce_get   = tce_get_pSeriesLP;
-		ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP;
-		ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP;
+		ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
+		ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
 	} else {
 		ppc_md.tce_build = tce_build_pSeries;
 		ppc_md.tce_free  = tce_free_pSeries;
 		ppc_md.tce_get   = tce_get_pseries;
-		ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries;
-		ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
+		ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
+		ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
 	}
 
 
 	pSeries_reconfig_notifier_register(&iommu_reconfig_nb);
 
-	pci_iommu_init();
+	pci_dma_ops = &dma_iommu_ops;
 }
 
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 1820a0b..721436d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -282,7 +282,7 @@
 	}
 }
 
-long pSeries_lpar_hpte_insert(unsigned long hpte_group,
+static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
  			      unsigned long va, unsigned long pa,
  			      unsigned long rflags, unsigned long vflags,
  			      int psize)
@@ -506,7 +506,7 @@
  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
  * lock.
  */
-void pSeries_lpar_flush_hash_range(unsigned long number, int local)
+static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
 {
 	int i;
 	unsigned long flags = 0;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 410a6bc..715db5c 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,8 +29,6 @@
 #include <asm/prom.h>
 #include <asm/ppc-pci.h>
 
-static int __devinitdata s7a_workaround = -1;
-
 #if 0
 void pcibios_name_device(struct pci_dev *dev)
 {
@@ -57,39 +55,6 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
 #endif
 
-static void __devinit check_s7a(void)
-{
-	struct device_node *root;
-	const char *model;
-
-	s7a_workaround = 0;
-	root = of_find_node_by_path("/");
-	if (root) {
-		model = get_property(root, "model", NULL);
-		if (model && !strcmp(model, "IBM,7013-S7A"))
-			s7a_workaround = 1;
-		of_node_put(root);
-	}
-}
-
-void __devinit pSeries_irq_bus_setup(struct pci_bus *bus)
-{
-	struct pci_dev *dev;
-
-	if (s7a_workaround < 0)
-		check_s7a();
-	list_for_each_entry(dev, &bus->devices, bus_list) {
-		pci_read_irq_line(dev);
-		if (s7a_workaround) {
-			if (dev->irq > 16) {
-				dev->irq -= 3;
-				pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
-					dev->irq);
-			}
-		}
-	}
-}
-
 static void __init pSeries_request_regions(void)
 {
 	if (!isa_io_base)
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 6bfacc2..ac56b86 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -93,8 +93,8 @@
 		if (list_empty(&dev->global_list)) {
 			int i;
 
-			/* Need to setup IOMMU tables */
-			ppc_md.iommu_dev_setup(dev);
+			/* Fill device archdata and setup iommu table */
+			pcibios_setup_new_device(dev);
 
 			if(fix_bus)
 				pcibios_fixup_device_resources(dev, bus);
@@ -195,7 +195,7 @@
 	phb = pcibios_alloc_controller(dn);
 	if (!phb)
 		return NULL;
-	setup_phb(dn, phb);
+	rtas_setup_phb(phb);
 	pci_process_bridge_OF_ranges(phb, dn, 0);
 
 	pci_setup_phb_io_dynamic(phb, primary);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 1773103..4ad33e4 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -268,11 +268,10 @@
 static struct property *new_property(const char *name, const int length,
 				     const unsigned char *value, struct property *last)
 {
-	struct property *new = kmalloc(sizeof(*new), GFP_KERNEL);
+	struct property *new = kzalloc(sizeof(*new), GFP_KERNEL);
 
 	if (!new)
 		return NULL;
-	memset(new, 0, sizeof(*new));
 
 	if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL)))
 		goto cleanup;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 89a8119..0dc2548 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -347,6 +347,7 @@
 }
 arch_initcall(pSeries_init_panel);
 
+#ifdef CONFIG_HOTPLUG_CPU
 static void pSeries_mach_cpu_die(void)
 {
 	local_irq_disable();
@@ -357,6 +358,9 @@
 	BUG();
 	for(;;);
 }
+#else
+#define pSeries_mach_cpu_die NULL
+#endif
 
 static int pseries_set_dabr(unsigned long dabr)
 {
@@ -553,7 +557,6 @@
 	.log_error		= pSeries_log_error,
 	.pcibios_fixup		= pSeries_final_fixup,
 	.pci_probe_mode		= pSeries_pci_probe_mode,
-	.irq_bus_setup		= pSeries_irq_bus_setup,
 	.restart		= rtas_restart,
 	.power_off		= rtas_power_off,
 	.halt			= rtas_halt,
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index d071abe..b5b2b11 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -656,13 +656,38 @@
 	set_irq_chained_handler(cascade, pseries_8259_cascade);
 }
 
+static struct device_node *cpuid_to_of_node(int cpu)
+{
+	struct device_node *np;
+	u32 hcpuid = get_hard_smp_processor_id(cpu);
+
+	for_each_node_by_type(np, "cpu") {
+		int i, len;
+		const u32 *intserv;
+
+		intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
+
+		if (!intserv)
+			intserv = get_property(np, "reg", &len);
+
+		i = len / sizeof(u32);
+
+		while (i--)
+			if (intserv[i] == hcpuid)
+				return np;
+	}
+
+	return NULL;
+}
+
 void __init xics_init_IRQ(void)
 {
-	int i;
+	int i, j;
 	struct device_node *np;
 	u32 ilen, indx = 0;
-	const u32 *ireg;
+	const u32 *ireg, *isize;
 	int found = 0;
+	u32 hcpuid;
 
 	ppc64_boot_msg(0x20, "XICS Init");
 
@@ -683,26 +708,31 @@
 	xics_init_host();
 
 	/* Find the server numbers for the boot cpu. */
-	for (np = of_find_node_by_type(NULL, "cpu");
-	     np;
-	     np = of_find_node_by_type(np, "cpu")) {
-		ireg = get_property(np, "reg", &ilen);
-		if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
-			ireg = get_property(np,
-					"ibm,ppc-interrupt-gserver#s", &ilen);
-			i = ilen / sizeof(int);
-			if (ireg && i > 0) {
-				default_server = ireg[0];
-				/* take last element */
-				default_distrib_server = ireg[i-1];
-			}
-			ireg = get_property(np,
+	np = cpuid_to_of_node(boot_cpuid);
+	BUG_ON(!np);
+	ireg = get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
+	if (!ireg)
+		goto skip_gserver_check;
+	i = ilen / sizeof(int);
+	hcpuid = get_hard_smp_processor_id(boot_cpuid);
+
+	/* Global interrupt distribution server is specified in the last
+	 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
+	 * entry fom this property for current boot cpu id and use it as
+	 * default distribution server
+	 */
+	for (j = 0; j < i; j += 2) {
+		if (ireg[j] == hcpuid) {
+			default_server = hcpuid;
+			default_distrib_server = ireg[j+1];
+
+			isize = get_property(np,
 					"ibm,interrupt-server#-size", NULL);
-			if (ireg)
-				interrupt_server_size = *ireg;
-			break;
+			if (isize)
+				interrupt_server_size = *isize;
 		}
 	}
+skip_gserver_check:
 	of_node_put(np);
 
 	if (firmware_has_feature(FW_FEATURE_LPAR))
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 91f052d..6cc3459 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -5,14 +5,13 @@
 obj-$(CONFIG_MPIC)		+= mpic.o
 obj-$(CONFIG_PPC_INDIRECT_PCI)	+= indirect_pci.o
 obj-$(CONFIG_PPC_MPC106)	+= grackle.o
-obj-$(CONFIG_BOOKE)		+= dcr.o
-obj-$(CONFIG_40x)		+= dcr.o
+obj-$(CONFIG_PPC_DCR)		+= dcr.o dcr-low.o
 obj-$(CONFIG_U3_DART)		+= dart_iommu.o
 obj-$(CONFIG_MMIO_NVRAM)	+= mmio_nvram.o
 obj-$(CONFIG_FSL_SOC)		+= fsl_soc.o
-obj-$(CONFIG_PPC_TODC)		+= todc.o
 obj-$(CONFIG_TSI108_BRIDGE)	+= tsi108_pci.o tsi108_dev.o
 obj-$(CONFIG_QUICC_ENGINE)	+= qe_lib/
+obj-$(CONFIG_MTD)		+= rom.o
 
 ifeq ($(CONFIG_PPC_MERGE),y)
 obj-$(CONFIG_PPC_I8259)		+= i8259.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 572b784..1488535 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -48,9 +48,6 @@
 
 #include "dart.h"
 
-extern int iommu_is_off;
-extern int iommu_force_on;
-
 /* Physical base address and size of the DART table */
 unsigned long dart_tablebase; /* exported to htab_initialize */
 static unsigned long dart_tablesize;
@@ -289,24 +286,15 @@
 	set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
 }
 
-static void iommu_dev_setup_dart(struct pci_dev *dev)
+static void pci_dma_dev_setup_dart(struct pci_dev *dev)
 {
-	struct device_node *dn;
-
 	/* We only have one iommu table on the mac for now, which makes
 	 * things simple. Setup all PCI devices to point to this table
-	 *
-	 * We must use pci_device_to_OF_node() to make sure that
-	 * we get the real "final" pointer to the device in the
-	 * pci_dev sysdata and not the temporary PHB one
 	 */
-	dn = pci_device_to_OF_node(dev);
-
-	if (dn)
-		PCI_DN(dn)->iommu_table = &iommu_table_dart;
+	dev->dev.archdata.dma_data = &iommu_table_dart;
 }
 
-static void iommu_bus_setup_dart(struct pci_bus *bus)
+static void pci_dma_bus_setup_dart(struct pci_bus *bus)
 {
 	struct device_node *dn;
 
@@ -321,9 +309,6 @@
 		PCI_DN(dn)->iommu_table = &iommu_table_dart;
 }
 
-static void iommu_dev_setup_null(struct pci_dev *dev) { }
-static void iommu_bus_setup_null(struct pci_bus *bus) { }
-
 void iommu_init_early_dart(void)
 {
 	struct device_node *dn;
@@ -344,22 +329,21 @@
 
 	/* Initialize the DART HW */
 	if (dart_init(dn) == 0) {
-		ppc_md.iommu_dev_setup = iommu_dev_setup_dart;
-		ppc_md.iommu_bus_setup = iommu_bus_setup_dart;
+		ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart;
+		ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart;
 
 		/* Setup pci_dma ops */
-		pci_iommu_init();
-
+		pci_dma_ops = &dma_iommu_ops;
 		return;
 	}
 
  bail:
 	/* If init failed, use direct iommu and null setup functions */
-	ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-	ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+	ppc_md.pci_dma_dev_setup = NULL;
+	ppc_md.pci_dma_bus_setup = NULL;
 
 	/* Setup pci_dma ops */
-	pci_direct_iommu_init();
+	pci_dma_ops = &dma_direct_ops;
 }
 
 
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
new file mode 100644
index 0000000..2078f39
--- /dev/null
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -0,0 +1,39 @@
+/*
+ * "Indirect" DCR access
+ *
+ * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of  the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+#define DCR_ACCESS_PROLOG(table) \
+	rlwinm  r3,r3,4,18,27;   \
+	lis     r5,table@h;      \
+	ori     r5,r5,table@l;   \
+	add     r3,r3,r5;        \
+	mtctr   r3;              \
+	bctr
+
+_GLOBAL(__mfdcr)
+	DCR_ACCESS_PROLOG(__mfdcr_table)
+
+_GLOBAL(__mtdcr)
+	DCR_ACCESS_PROLOG(__mtdcr_table)
+
+__mfdcr_table:
+	mfdcr  r3,0; blr
+__mtdcr_table:
+	mtdcr  0,r4; blr
+
+dcr     = 1
+        .rept   1023
+	mfdcr   r3,dcr; blr
+	mtdcr   dcr,r4; blr
+	dcr     = dcr + 1
+	.endr
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
new file mode 100644
index 0000000..dffeeae
--- /dev/null
+++ b/arch/powerpc/sysdev/dcr.c
@@ -0,0 +1,137 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <asm/prom.h>
+#include <asm/dcr.h>
+
+unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
+{
+	unsigned int ds;
+	const u32 *dr = get_property(np, "dcr-reg", &ds);
+
+	if (dr == NULL || ds & 1 || index >= (ds / 8))
+		return 0;
+
+	return dr[index * 2];
+}
+
+unsigned int dcr_resource_len(struct device_node *np, unsigned int index)
+{
+	unsigned int ds;
+	const u32 *dr = get_property(np, "dcr-reg", &ds);
+
+	if (dr == NULL || ds & 1 || index >= (ds / 8))
+		return 0;
+
+	return dr[index * 2 + 1];
+}
+
+#ifndef CONFIG_PPC_DCR_NATIVE
+
+static struct device_node * find_dcr_parent(struct device_node * node)
+{
+	struct device_node *par, *tmp;
+	const u32 *p;
+
+	for (par = of_node_get(node); par;) {
+		if (get_property(par, "dcr-controller", NULL))
+			break;
+		p = get_property(par, "dcr-parent", NULL);
+		tmp = par;
+		if (p == NULL)
+			par = of_get_parent(par);
+		else
+			par = of_find_node_by_phandle(*p);
+		of_node_put(tmp);
+	}
+	return par;
+}
+
+u64 of_translate_dcr_address(struct device_node *dev,
+			     unsigned int dcr_n,
+			     unsigned int *out_stride)
+{
+	struct device_node *dp;
+	const u32 *p;
+	unsigned int stride;
+	u64 ret;
+
+	dp = find_dcr_parent(dev);
+	if (dp == NULL)
+		return OF_BAD_ADDR;
+
+	/* Stride is not properly defined yet, default to 0x10 for Axon */
+	p = get_property(dp, "dcr-mmio-stride", NULL);
+	stride = (p == NULL) ? 0x10 : *p;
+
+	/* XXX FIXME: Which property name is to use of the 2 following ? */
+	p = get_property(dp, "dcr-mmio-range", NULL);
+	if (p == NULL)
+		p = get_property(dp, "dcr-mmio-space", NULL);
+	if (p == NULL)
+		return OF_BAD_ADDR;
+
+	/* Maybe could do some better range checking here */
+	ret = of_translate_address(dp, p);
+	if (ret != OF_BAD_ADDR)
+		ret += (u64)(stride) * (u64)dcr_n;
+	if (out_stride)
+		*out_stride = stride;
+	return ret;
+}
+
+dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
+		   unsigned int dcr_c)
+{
+	dcr_host_t ret = { .token = NULL, .stride = 0 };
+	u64 addr;
+
+	pr_debug("dcr_map(%s, 0x%x, 0x%x)\n",
+		 dev->full_name, dcr_n, dcr_c);
+
+	addr = of_translate_dcr_address(dev, dcr_n, &ret.stride);
+	pr_debug("translates to addr: 0x%lx, stride: 0x%x\n",
+		 addr, ret.stride);
+	if (addr == OF_BAD_ADDR)
+		return ret;
+	pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride);
+	ret.token = ioremap(addr, dcr_c * ret.stride);
+	if (ret.token == NULL)
+		return ret;
+	pr_debug("mapped at 0x%p -> base is 0x%p\n",
+		 ret.token, ret.token - dcr_n * ret.stride);
+	ret.token -= dcr_n * ret.stride;
+	return ret;
+}
+
+void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c)
+{
+	dcr_host_t h = host;
+
+	if (h.token == NULL)
+		return;
+	h.token -= dcr_n * h.stride;
+	iounmap(h.token);
+	h.token = NULL;
+}
+
+#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index dbe92ae..ad31e56 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/phy.h>
 #include <linux/fsl_devices.h>
 #include <linux/fs_enet_pd.h>
 #include <linux/fs_uart_pd.h>
@@ -146,7 +147,7 @@
 		}
 
 		for (k = 0; k < 32; k++)
-			mdio_data.irq[k] = -1;
+			mdio_data.irq[k] = PHY_POLL;
 
 		while ((child = of_get_next_child(np, child)) != NULL) {
 			int irq = irq_of_parse_and_map(child, 0);
@@ -177,6 +178,7 @@
 static const char *gfar_rx_intr = "rx";
 static const char *gfar_err_intr = "error";
 
+
 static int __init gfar_of_init(void)
 {
 	struct device_node *np;
@@ -204,8 +206,7 @@
 		if (ret)
 			goto err;
 
-		r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-		r[1].flags = IORESOURCE_IRQ;
+		of_irq_to_resource(np, 0, &r[1]);
 
 		model = get_property(np, "model", NULL);
 
@@ -214,12 +215,10 @@
 			r[1].name = gfar_tx_intr;
 
 			r[2].name = gfar_rx_intr;
-			r[2].start = r[2].end = irq_of_parse_and_map(np, 1);
-			r[2].flags = IORESOURCE_IRQ;
+			of_irq_to_resource(np, 1, &r[2]);
 
 			r[3].name = gfar_err_intr;
-			r[3].start = r[3].end = irq_of_parse_and_map(np, 2);
-			r[3].flags = IORESOURCE_IRQ;
+			of_irq_to_resource(np, 2, &r[3]);
 
 			n_res += 2;
 		}
@@ -323,8 +322,7 @@
 		if (ret)
 			goto err;
 
-		r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-		r[1].flags = IORESOURCE_IRQ;
+		of_irq_to_resource(np, 0, &r[1]);
 
 		i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2);
 		if (IS_ERR(i2c_dev)) {
@@ -459,8 +457,7 @@
 		if (ret)
 			goto err;
 
-		r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-		r[1].flags = IORESOURCE_IRQ;
+		of_irq_to_resource(np, 0, &r[1]);
 
 		usb_dev_mph =
 		    platform_device_register_simple("fsl-ehci", i, r, 2);
@@ -507,8 +504,7 @@
 		if (ret)
 			goto unreg_mph;
 
-		r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-		r[1].flags = IORESOURCE_IRQ;
+		of_irq_to_resource(np, 0, &r[1]);
 
 		usb_dev_dr =
 		    platform_device_register_simple("fsl-ehci", i, r, 2);
@@ -591,8 +587,7 @@
 		r[2].name = fcc_regs_c;
 		fs_enet_data.fcc_regs_c = r[2].start;
 
-		r[3].start = r[3].end = irq_of_parse_and_map(np, 0);
-		r[3].flags = IORESOURCE_IRQ;
+		of_irq_to_resource(np, 0, &r[3]);
 
 		fs_enet_dev =
 		    platform_device_register_simple("fsl-cpm-fcc", i, &r[0], 4);
@@ -754,8 +749,7 @@
 			goto err;
 		r[1].name = scc_pram;
 
-		r[2].start = r[2].end = irq_of_parse_and_map(np, 0);
-		r[2].flags = IORESOURCE_IRQ;
+		of_irq_to_resource(np, 0, &r[2]);
 
 		cpm_uart_dev =
 		    platform_device_register_simple("fsl-cpm-scc:uart", i, &r[0], 3);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index ba4833f..411480d 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -147,33 +147,51 @@
  */
 
 
-static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
-			    unsigned int reg)
+static inline u32 _mpic_read(enum mpic_reg_type type,
+			     struct mpic_reg_bank *rb,
+			     unsigned int reg)
 {
-	if (be)
-		return in_be32(base + (reg >> 2));
-	else
-		return in_le32(base + (reg >> 2));
+	switch(type) {
+#ifdef CONFIG_PPC_DCR
+	case mpic_access_dcr:
+		return dcr_read(rb->dhost,
+				rb->dbase + reg + rb->doff);
+#endif
+	case mpic_access_mmio_be:
+		return in_be32(rb->base + (reg >> 2));
+	case mpic_access_mmio_le:
+	default:
+		return in_le32(rb->base + (reg >> 2));
+	}
 }
 
-static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
-			      unsigned int reg, u32 value)
+static inline void _mpic_write(enum mpic_reg_type type,
+			       struct mpic_reg_bank *rb,
+ 			       unsigned int reg, u32 value)
 {
-	if (be)
-		out_be32(base + (reg >> 2), value);
-	else
-		out_le32(base + (reg >> 2), value);
+	switch(type) {
+#ifdef CONFIG_PPC_DCR
+	case mpic_access_dcr:
+		return dcr_write(rb->dhost,
+				 rb->dbase + reg + rb->doff, value);
+#endif
+	case mpic_access_mmio_be:
+		return out_be32(rb->base + (reg >> 2), value);
+	case mpic_access_mmio_le:
+	default:
+		return out_le32(rb->base + (reg >> 2), value);
+	}
 }
 
 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
 {
-	unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
+	enum mpic_reg_type type = mpic->reg_type;
 	unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
 			      (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 
-	if (mpic->flags & MPIC_BROKEN_IPI)
-		be = !be;
-	return _mpic_read(be, mpic->gregs, offset);
+	if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
+		type = mpic_access_mmio_be;
+	return _mpic_read(type, &mpic->gregs, offset);
 }
 
 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
@@ -181,7 +199,7 @@
 	unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
 			      (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 
-	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
+	_mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
 }
 
 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
@@ -190,8 +208,7 @@
 
 	if (mpic->flags & MPIC_PRIMARY)
 		cpu = hard_smp_processor_id();
-	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,
-			  mpic->cpuregs[cpu], reg);
+	return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
 }
 
 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
@@ -201,7 +218,7 @@
 	if (mpic->flags & MPIC_PRIMARY)
 		cpu = hard_smp_processor_id();
 
-	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
+	_mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
 }
 
 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
@@ -209,7 +226,7 @@
 	unsigned int	isu = src_no >> mpic->isu_shift;
 	unsigned int	idx = src_no & mpic->isu_mask;
 
-	return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+	return _mpic_read(mpic->reg_type, &mpic->isus[isu],
 			  reg + (idx * MPIC_INFO(IRQ_STRIDE)));
 }
 
@@ -219,12 +236,12 @@
 	unsigned int	isu = src_no >> mpic->isu_shift;
 	unsigned int	idx = src_no & mpic->isu_mask;
 
-	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+	_mpic_write(mpic->reg_type, &mpic->isus[isu],
 		    reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
 }
 
-#define mpic_read(b,r)		_mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
-#define mpic_write(b,r,v)	_mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
+#define mpic_read(b,r)		_mpic_read(mpic->reg_type,&(b),(r))
+#define mpic_write(b,r,v)	_mpic_write(mpic->reg_type,&(b),(r),(v))
 #define mpic_ipi_read(i)	_mpic_ipi_read(mpic,(i))
 #define mpic_ipi_write(i,v)	_mpic_ipi_write(mpic,(i),(v))
 #define mpic_cpu_read(i)	_mpic_cpu_read(mpic,(i))
@@ -238,6 +255,38 @@
  */
 
 
+static void _mpic_map_mmio(struct mpic *mpic, unsigned long phys_addr,
+			   struct mpic_reg_bank *rb, unsigned int offset,
+			   unsigned int size)
+{
+	rb->base = ioremap(phys_addr + offset, size);
+	BUG_ON(rb->base == NULL);
+}
+
+#ifdef CONFIG_PPC_DCR
+static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
+			  unsigned int offset, unsigned int size)
+{
+	rb->dbase = mpic->dcr_base;
+	rb->doff = offset;
+	rb->dhost = dcr_map(mpic->of_node, rb->dbase + rb->doff, size);
+	BUG_ON(!DCR_MAP_OK(rb->dhost));
+}
+
+static inline void mpic_map(struct mpic *mpic, unsigned long phys_addr,
+			    struct mpic_reg_bank *rb, unsigned int offset,
+			    unsigned int size)
+{
+	if (mpic->flags & MPIC_USES_DCR)
+		_mpic_map_dcr(mpic, rb, offset, size);
+	else
+		_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
+}
+#else /* CONFIG_PPC_DCR */
+#define mpic_map(m,p,b,o,s)	_mpic_map_mmio(m,p,b,o,s)
+#endif /* !CONFIG_PPC_DCR */
+
+
 
 /* Check if we have one of those nice broken MPICs with a flipped endian on
  * reads from IPI registers
@@ -845,7 +894,7 @@
  */
 
 struct mpic * __init mpic_alloc(struct device_node *node,
-				unsigned long phys_addr,
+				phys_addr_t phys_addr,
 				unsigned int flags,
 				unsigned int isu_size,
 				unsigned int irq_count,
@@ -855,6 +904,7 @@
 	u32		reg;
 	const char	*vers;
 	int		i;
+	u64		paddr = phys_addr;
 
 	mpic = alloc_bootmem(sizeof(struct mpic));
 	if (mpic == NULL)
@@ -883,6 +933,7 @@
 	if (flags & MPIC_PRIMARY)
 		mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
 #endif /* CONFIG_MPIC_BROKEN_U3 */
+
 #ifdef CONFIG_SMP
 	mpic->hc_ipi = mpic_ipi_chip;
 	mpic->hc_ipi.typename = name;
@@ -893,15 +944,52 @@
 	mpic->irq_count = irq_count;
 	mpic->num_sources = 0; /* so far */
 
+	/* Check for "big-endian" in device-tree */
+	if (node && get_property(node, "big-endian", NULL) != NULL)
+		mpic->flags |= MPIC_BIG_ENDIAN;
+
+
 #ifdef CONFIG_MPIC_WEIRD
 	mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
 #endif
 
+	/* default register type */
+	mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ?
+		mpic_access_mmio_be : mpic_access_mmio_le;
+
+	/* If no physical address is passed in, a device-node is mandatory */
+	BUG_ON(paddr == 0 && node == NULL);
+
+	/* If no physical address passed in, check if it's dcr based */
+	if (paddr == 0 && get_property(node, "dcr-reg", NULL) != NULL)
+		mpic->flags |= MPIC_USES_DCR;
+
+#ifdef CONFIG_PPC_DCR
+	if (mpic->flags & MPIC_USES_DCR) {
+		const u32 *dbasep;
+		dbasep = get_property(node, "dcr-reg", NULL);
+		BUG_ON(dbasep == NULL);
+		mpic->dcr_base = *dbasep;
+		mpic->reg_type = mpic_access_dcr;
+	}
+#else
+	BUG_ON (mpic->flags & MPIC_USES_DCR);
+#endif /* CONFIG_PPC_DCR */
+
+	/* If the MPIC is not DCR based, and no physical address was passed
+	 * in, try to obtain one
+	 */
+	if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) {
+		const u32 *reg;
+		reg = get_property(node, "reg", NULL);
+		BUG_ON(reg == NULL);
+		paddr = of_translate_address(node, reg);
+		BUG_ON(paddr == OF_BAD_ADDR);
+	}
+
 	/* Map the global registers */
-	mpic->gregs = ioremap(phys_addr + MPIC_INFO(GREG_BASE), 0x1000);
-	mpic->tmregs = mpic->gregs +
-		       ((MPIC_INFO(TIMER_BASE) - MPIC_INFO(GREG_BASE)) >> 2);
-	BUG_ON(mpic->gregs == NULL);
+	mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+	mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
 
 	/* Reset */
 	if (flags & MPIC_WANTS_RESET) {
@@ -926,17 +1014,16 @@
 
 	/* Map the per-CPU registers */
 	for (i = 0; i < mpic->num_cpus; i++) {
-		mpic->cpuregs[i] = ioremap(phys_addr + MPIC_INFO(CPU_BASE) +
-					   i * MPIC_INFO(CPU_STRIDE), 0x1000);
-		BUG_ON(mpic->cpuregs[i] == NULL);
+		mpic_map(mpic, paddr, &mpic->cpuregs[i],
+			 MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
+			 0x1000);
 	}
 
 	/* Initialize main ISU if none provided */
 	if (mpic->isu_size == 0) {
 		mpic->isu_size = mpic->num_sources;
-		mpic->isus[0] = ioremap(phys_addr + MPIC_INFO(IRQ_BASE),
-					MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
-		BUG_ON(mpic->isus[0] == NULL);
+		mpic_map(mpic, paddr, &mpic->isus[0],
+			 MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
 	}
 	mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
 	mpic->isu_mask = (1 << mpic->isu_shift) - 1;
@@ -956,10 +1043,11 @@
 		vers = "<unknown>";
 		break;
 	}
-	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
-	       name, vers, phys_addr, mpic->num_cpus);
-	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
-	       mpic->isu_shift, mpic->isu_mask);
+	printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
+	       " max %d CPUs\n",
+	       name, vers, (unsigned long long)paddr, mpic->num_cpus);
+	printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
+	       mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
 
 	mpic->next = mpics;
 	mpics = mpic;
@@ -973,14 +1061,14 @@
 }
 
 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
-			    unsigned long phys_addr)
+			    phys_addr_t paddr)
 {
 	unsigned int isu_first = isu_num * mpic->isu_size;
 
 	BUG_ON(isu_num >= MPIC_MAX_ISU);
 
-	mpic->isus[isu_num] = ioremap(phys_addr,
-				      MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+	mpic_map(mpic, paddr, &mpic->isus[isu_num], 0,
+		 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
 	if ((isu_first + mpic->isu_size) > mpic->num_sources)
 		mpic->num_sources = isu_first + mpic->isu_size;
 }
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index e422322..e3d71e0 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -174,8 +174,7 @@
 	u32 divisor, tempval;
 	int div16 = 0;
 
-	bp = &qe_immr->brg.brgc1;
-	bp += brg;
+	bp = &qe_immr->brg.brgc[brg];
 
 	divisor = (get_brg_clk() / rate);
 	if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index 75fa310..e657559 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -216,14 +216,12 @@
 		return -EINVAL;
 	}
 
-	uccf = (struct ucc_fast_private *)
-		 kmalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+	uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
 	if (!uccf) {
 		uccf_err
 		    ("ucc_fast_init: No memory for UCC slow data structure!");
 		return -ENOMEM;
 	}
-	memset(uccf, 0, sizeof(struct ucc_fast_private));
 
 	/* Fill fast UCC structure */
 	uccf->uf_info = uf_info;
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index a49da6b..47b5620 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -168,14 +168,12 @@
 		return -EINVAL;
 	}
 
-	uccs = (struct ucc_slow_private *)
-		kmalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+	uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
 	if (!uccs) {
 		uccs_err
 		    ("ucc_slow_init: No memory for UCC slow data structure!");
 		return -ENOMEM;
 	}
-	memset(uccs, 0, sizeof(struct ucc_slow_private));
 
 	/* Fill slow UCC structure */
 	uccs->us_info = us_info;
diff --git a/arch/powerpc/sysdev/rom.c b/arch/powerpc/sysdev/rom.c
new file mode 100644
index 0000000..bf5b3f1
--- /dev/null
+++ b/arch/powerpc/sysdev/rom.c
@@ -0,0 +1,31 @@
+/*
+ * ROM device registration
+ *
+ * (C) 2006 MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/kernel.h>
+#include <asm/of_device.h>
+
+static int __init powerpc_flash_init(void)
+{
+	struct device_node *node = NULL;
+
+	/*
+	 * Register all the devices which type is "rom"
+	 */
+	while ((node = of_find_node_by_type(node, "rom")) != NULL) {
+		if (node->name == NULL) {
+			printk(KERN_WARNING "powerpc_flash_init: found 'rom' "
+				"device, but with no name, skipping...\n");
+			continue;
+		}
+		of_platform_device_create(node, node->name, NULL);
+	}
+	return 0;
+}
+
+arch_initcall(powerpc_flash_init);
diff --git a/arch/powerpc/sysdev/todc.c b/arch/powerpc/sysdev/todc.c
deleted file mode 100644
index 0a65980..0000000
--- a/arch/powerpc/sysdev/todc.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Time of Day Clock support for the M48T35, M48T37, M48T59, and MC146818
- * Real Time Clocks/Timekeepers.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001-2004 (c) MontaVista, Software, Inc.  This file is licensed under
- * the terms of the GNU General Public License version 2.  This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/bcd.h>
-#include <linux/mc146818rtc.h>
-
-#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/time.h>
-#include <asm/todc.h>
-
-/*
- * Depending on the hardware on your board and your board design, the
- * RTC/NVRAM may be accessed either directly (like normal memory) or via
- * address/data registers.  If your board uses the direct method, set
- * 'nvram_data' to the base address of your nvram and leave 'nvram_as0' and
- * 'nvram_as1' NULL.  If your board uses address/data regs to access nvram,
- * set 'nvram_as0' to the address of the lower byte, set 'nvram_as1' to the
- * address of the upper byte (leave NULL if using mc146818), and set
- * 'nvram_data' to the address of the 8-bit data register.
- *
- * Note: Even though the documentation for the various RTC chips say that it
- * 	 take up to a second before it starts updating once the 'R' bit is
- * 	 cleared, they always seem to update even though we bang on it many
- * 	 times a second.  This is true, except for the Dallas Semi 1746/1747
- * 	 (possibly others).  Those chips seem to have a real problem whenever
- * 	 we set the 'R' bit before reading them, they basically stop counting.
- * 	 					--MAG
- */
-
-/*
- * 'todc_info' should be initialized in your *_setup.c file to
- * point to a fully initialized 'todc_info_t' structure.
- * This structure holds all the register offsets for your particular
- * TODC/RTC chip.
- * TODC_ALLOC()/TODC_INIT() will allocate and initialize this table for you.
- */
-
-#ifdef	RTC_FREQ_SELECT
-#undef	RTC_FREQ_SELECT
-#define	RTC_FREQ_SELECT		control_b	/* Register A */
-#endif
-
-#ifdef	RTC_CONTROL
-#undef	RTC_CONTROL
-#define	RTC_CONTROL		control_a	/* Register B */
-#endif
-
-#ifdef	RTC_INTR_FLAGS
-#undef	RTC_INTR_FLAGS
-#define	RTC_INTR_FLAGS		watchdog	/* Register C */
-#endif
-
-#ifdef	RTC_VALID
-#undef	RTC_VALID
-#define	RTC_VALID		interrupts	/* Register D */
-#endif
-
-/* Access routines when RTC accessed directly (like normal memory) */
-u_char
-todc_direct_read_val(int addr)
-{
-	return readb((void __iomem *)(todc_info->nvram_data + addr));
-}
-
-void
-todc_direct_write_val(int addr, unsigned char val)
-{
-	writeb(val, (void __iomem *)(todc_info->nvram_data + addr));
-	return;
-}
-
-/* Access routines for accessing m48txx type chips via addr/data regs */
-u_char
-todc_m48txx_read_val(int addr)
-{
-	outb(addr, todc_info->nvram_as0);
-	outb(addr>>todc_info->as0_bits, todc_info->nvram_as1);
-	return inb(todc_info->nvram_data);
-}
-
-void
-todc_m48txx_write_val(int addr, unsigned char val)
-{
-	outb(addr, todc_info->nvram_as0);
-	outb(addr>>todc_info->as0_bits, todc_info->nvram_as1);
-	outb(val, todc_info->nvram_data);
-	return;
-}
-
-/* Access routines for accessing mc146818 type chips via addr/data regs */
-u_char
-todc_mc146818_read_val(int addr)
-{
-	outb_p(addr, todc_info->nvram_as0);
-	return inb_p(todc_info->nvram_data);
-}
-
-void
-todc_mc146818_write_val(int addr, unsigned char val)
-{
-	outb_p(addr, todc_info->nvram_as0);
-	outb_p(val, todc_info->nvram_data);
-}
-
-
-/*
- * Routines to make RTC chips with NVRAM buried behind an addr/data pair
- * have the NVRAM and clock regs appear at the same level.
- * The NVRAM will appear to start at addr 0 and the clock regs will appear
- * to start immediately after the NVRAM (actually, start at offset
- * todc_info->nvram_size).
- */
-static inline u_char
-todc_read_val(int addr)
-{
-	u_char	val;
-
-	if (todc_info->sw_flags & TODC_FLAG_2_LEVEL_NVRAM) {
-		if (addr < todc_info->nvram_size) { /* NVRAM */
-			ppc_md.rtc_write_val(todc_info->nvram_addr_reg, addr);
-			val = ppc_md.rtc_read_val(todc_info->nvram_data_reg);
-		} else { /* Clock Reg */
-			addr -= todc_info->nvram_size;
-			val = ppc_md.rtc_read_val(addr);
-		}
-	} else
-		val = ppc_md.rtc_read_val(addr);
-
-	return val;
-}
-
-static inline void
-todc_write_val(int addr, u_char val)
-{
-	if (todc_info->sw_flags & TODC_FLAG_2_LEVEL_NVRAM) {
-		if (addr < todc_info->nvram_size) { /* NVRAM */
-			ppc_md.rtc_write_val(todc_info->nvram_addr_reg, addr);
-			ppc_md.rtc_write_val(todc_info->nvram_data_reg, val);
-		} else { /* Clock Reg */
-			addr -= todc_info->nvram_size;
-			ppc_md.rtc_write_val(addr, val);
-		}
-	} else
-		ppc_md.rtc_write_val(addr, val);
-}
-
-/*
- * TODC routines
- *
- * There is some ugly stuff in that there are assumptions for the mc146818.
- *
- * Assumptions:
- *	- todc_info->control_a has the offset as mc146818 Register B reg
- *	- todc_info->control_b has the offset as mc146818 Register A reg
- *	- m48txx control reg's write enable or 'W' bit is same as
- *	  mc146818 Register B 'SET' bit (i.e., 0x80)
- *
- * These assumptions were made to make the code simpler.
- */
-long __init
-todc_time_init(void)
-{
-	u_char	cntl_b;
-
-	if (!ppc_md.rtc_read_val)
-		ppc_md.rtc_read_val = ppc_md.nvram_read_val;
-	if (!ppc_md.rtc_write_val)
-		ppc_md.rtc_write_val = ppc_md.nvram_write_val;
-
-	cntl_b = todc_read_val(todc_info->control_b);
-
-	if (todc_info->rtc_type == TODC_TYPE_MC146818) {
-		if ((cntl_b & 0x70) != 0x20) {
-			printk(KERN_INFO "TODC real-time-clock was stopped."
-				"  Now starting...");
-			cntl_b &= ~0x70;
-			cntl_b |= 0x20;
-		}
-
-		todc_write_val(todc_info->control_b, cntl_b);
-	} else if (todc_info->rtc_type == TODC_TYPE_DS17285) {
-		u_char mode;
-
-		mode = todc_read_val(TODC_TYPE_DS17285_CNTL_A);
-		/* Make sure countdown clear is not set */
-		mode &= ~0x40;
-		/* Enable oscillator, extended register set */
-		mode |= 0x30;
-		todc_write_val(TODC_TYPE_DS17285_CNTL_A, mode);
-
-	} else if (todc_info->rtc_type == TODC_TYPE_DS1501) {
-		u_char	month;
-
-		todc_info->enable_read = TODC_DS1501_CNTL_B_TE;
-		todc_info->enable_write = TODC_DS1501_CNTL_B_TE;
-
-		month = todc_read_val(todc_info->month);
-
-		if ((month & 0x80) == 0x80) {
-			printk(KERN_INFO "TODC %s %s\n",
-				"real-time-clock was stopped.",
-				"Now starting...");
-			month &= ~0x80;
-			todc_write_val(todc_info->month, month);
-		}
-
-		cntl_b &= ~TODC_DS1501_CNTL_B_TE;
-		todc_write_val(todc_info->control_b, cntl_b);
-	} else { /* must be a m48txx type */
-		u_char	cntl_a;
-
-		todc_info->enable_read = TODC_MK48TXX_CNTL_A_R;
-		todc_info->enable_write = TODC_MK48TXX_CNTL_A_W;
-
-		cntl_a = todc_read_val(todc_info->control_a);
-
-		/* Check & clear STOP bit in control B register */
-		if (cntl_b & TODC_MK48TXX_DAY_CB) {
-			printk(KERN_INFO "TODC %s %s\n",
-				"real-time-clock was stopped.",
-				"Now starting...");
-
-			cntl_a |= todc_info->enable_write;
-			cntl_b &= ~TODC_MK48TXX_DAY_CB;/* Start Oscil */
-
-			todc_write_val(todc_info->control_a, cntl_a);
-			todc_write_val(todc_info->control_b, cntl_b);
-		}
-
-		/* Make sure READ & WRITE bits are cleared. */
-		cntl_a &= ~(todc_info->enable_write | todc_info->enable_read);
-		todc_write_val(todc_info->control_a, cntl_a);
-	}
-
-	return 0;
-}
-
-/*
- * There is some ugly stuff in that there are assumptions that for a mc146818,
- * the todc_info->control_a has the offset of the mc146818 Register B reg and
- * that the register'ss 'SET' bit is the same as the m48txx's write enable
- * bit in the control register of the m48txx (i.e., 0x80).
- *
- * It was done to make the code look simpler.
- */
-void
-todc_get_rtc_time(struct rtc_time *tm)
-{
-	uint	year = 0, mon = 0, mday = 0, hour = 0, min = 0, sec = 0;
-	uint	limit, i;
-	u_char	save_control, uip = 0;
-	extern void GregorianDay(struct rtc_time *);
-
-	spin_lock(&rtc_lock);
-	save_control = todc_read_val(todc_info->control_a);
-
-	if (todc_info->rtc_type != TODC_TYPE_MC146818) {
-		limit = 1;
-
-		switch (todc_info->rtc_type) {
-		case TODC_TYPE_DS1553:
-		case TODC_TYPE_DS1557:
-		case TODC_TYPE_DS1743:
-		case TODC_TYPE_DS1746:	/* XXXX BAD HACK -> FIX */
-		case TODC_TYPE_DS1747:
-		case TODC_TYPE_DS17285:
-			break;
-		default:
-			todc_write_val(todc_info->control_a,
-				(save_control | todc_info->enable_read));
-		}
-	} else
-		limit = 100000000;
-
-	for (i=0; i<limit; i++) {
-		if (todc_info->rtc_type == TODC_TYPE_MC146818)
-			uip = todc_read_val(todc_info->RTC_FREQ_SELECT);
-
-		sec = todc_read_val(todc_info->seconds) & 0x7f;
-		min = todc_read_val(todc_info->minutes) & 0x7f;
-		hour = todc_read_val(todc_info->hours) & 0x3f;
-		mday = todc_read_val(todc_info->day_of_month) & 0x3f;
-		mon = todc_read_val(todc_info->month) & 0x1f;
-		year = todc_read_val(todc_info->year) & 0xff;
-
-		if (todc_info->rtc_type == TODC_TYPE_MC146818) {
-			uip |= todc_read_val(todc_info->RTC_FREQ_SELECT);
-			if ((uip & RTC_UIP) == 0)
-				break;
-		}
-	}
-
-	if (todc_info->rtc_type != TODC_TYPE_MC146818) {
-		switch (todc_info->rtc_type) {
-		case TODC_TYPE_DS1553:
-		case TODC_TYPE_DS1557:
-		case TODC_TYPE_DS1743:
-		case TODC_TYPE_DS1746:	/* XXXX BAD HACK -> FIX */
-		case TODC_TYPE_DS1747:
-		case TODC_TYPE_DS17285:
-			break;
-		default:
-			save_control &= ~(todc_info->enable_read);
-			todc_write_val(todc_info->control_a, save_control);
-		}
-	}
-	spin_unlock(&rtc_lock);
-
-	if ((todc_info->rtc_type != TODC_TYPE_MC146818)
-			|| ((save_control & RTC_DM_BINARY) == 0)
-			|| RTC_ALWAYS_BCD) {
-		BCD_TO_BIN(sec);
-		BCD_TO_BIN(min);
-		BCD_TO_BIN(hour);
-		BCD_TO_BIN(mday);
-		BCD_TO_BIN(mon);
-		BCD_TO_BIN(year);
-	}
-
-	if ((year + 1900) < 1970) {
-		year += 100;
-	}
-
-	tm->tm_sec = sec;
-	tm->tm_min = min;
-	tm->tm_hour = hour;
-	tm->tm_mday = mday;
-	tm->tm_mon = mon;
-	tm->tm_year = year;
-
-	GregorianDay(tm);
-}
-
-int
-todc_set_rtc_time(struct rtc_time *tm)
-{
-	u_char save_control, save_freq_select = 0;
-
-	spin_lock(&rtc_lock);
-	save_control = todc_read_val(todc_info->control_a);
-
-	/* Assuming MK48T59_RTC_CA_WRITE & RTC_SET are equal */
-	todc_write_val(todc_info->control_a,
-		(save_control | todc_info->enable_write));
-	save_control &= ~(todc_info->enable_write); /* in case it was set */
-
-	if (todc_info->rtc_type == TODC_TYPE_MC146818) {
-		save_freq_select = todc_read_val(todc_info->RTC_FREQ_SELECT);
-		todc_write_val(todc_info->RTC_FREQ_SELECT,
-			save_freq_select | RTC_DIV_RESET2);
-	}
-
-	if ((todc_info->rtc_type != TODC_TYPE_MC146818)
-			|| ((save_control & RTC_DM_BINARY) == 0)
-			|| RTC_ALWAYS_BCD) {
-		BIN_TO_BCD(tm->tm_sec);
-		BIN_TO_BCD(tm->tm_min);
-		BIN_TO_BCD(tm->tm_hour);
-		BIN_TO_BCD(tm->tm_mon);
-		BIN_TO_BCD(tm->tm_mday);
-		BIN_TO_BCD(tm->tm_year);
-	}
-
-	todc_write_val(todc_info->seconds, tm->tm_sec);
-	todc_write_val(todc_info->minutes, tm->tm_min);
-	todc_write_val(todc_info->hours, tm->tm_hour);
-	todc_write_val(todc_info->month, tm->tm_mon);
-	todc_write_val(todc_info->day_of_month, tm->tm_mday);
-	todc_write_val(todc_info->year, tm->tm_year);
-
-	todc_write_val(todc_info->control_a, save_control);
-
-	if (todc_info->rtc_type == TODC_TYPE_MC146818)
-		todc_write_val(todc_info->RTC_FREQ_SELECT, save_freq_select);
-
-	spin_unlock(&rtc_lock);
-	return 0;
-}
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 322f86e..ae249c6 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -3,6 +3,8 @@
  *
  * 2004-2005 (c) Tundra Semiconductor Corp.
  * Author: Alex Bounine (alexandreb@tundra.com)
+ * Author: Roy Zang (tie-fei.zang@freescale.com)
+ * 	   Add pci interrupt router host
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -48,6 +50,8 @@
 
 u32 tsi108_pci_cfg_base;
 u32 tsi108_csr_vir_base;
+static struct device_node *pci_irq_node;
+static struct irq_host *pci_irq_host;
 
 extern u32 get_vir_csrbase(void);
 extern u32 tsi108_read_reg(u32 reg_offset);
@@ -378,6 +382,38 @@
 	.unmask = tsi108_pci_irq_enable,
 };
 
+static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
+			    u32 *intspec, unsigned int intsize,
+			    irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+{
+	*out_hwirq = intspec[0];
+	*out_flags = IRQ_TYPE_LEVEL_HIGH;
+	return 0;
+}
+
+static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
+			  irq_hw_number_t hw)
+{	unsigned int irq;
+	DBG("%s(%d, 0x%lx)\n", __FUNCTION__, virq, hw);
+	if ((virq >= 1) && (virq <= 4)){
+		irq = virq + IRQ_PCI_INTAD_BASE - 1;
+		get_irq_desc(irq)->status |= IRQ_LEVEL;
+		set_irq_chip(irq, &tsi108_pci_irq);
+	}
+	return 0;
+}
+
+static int pci_irq_host_match(struct irq_host *h, struct device_node *node)
+{
+	return pci_irq_node == node;
+}
+
+static struct irq_host_ops pci_irq_host_ops = {
+	.match = pci_irq_host_match,
+	.map = pci_irq_host_map,
+	.xlate = pci_irq_host_xlate,
+};
+
 /*
  * Exported functions
  */
@@ -391,15 +427,15 @@
  * to the MPIC.
  */
 
-void __init tsi108_pci_int_init(void)
+void __init tsi108_pci_int_init(struct device_node *node)
 {
-	u_int i;
-
 	DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
 
-	for (i = 0; i < NUM_PCI_IRQS; i++) {
-		irq_desc[i + IRQ_PCI_INTAD_BASE].chip = &tsi108_pci_irq;
-		irq_desc[i + IRQ_PCI_INTAD_BASE].status |= IRQ_LEVEL;
+	pci_irq_node = of_node_get(node);
+	pci_irq_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &pci_irq_host_ops, 0);
+	if (pci_irq_host == NULL) {
+		printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n");
+		return;
 	}
 
 	init_pci_source();
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 109d874..51d9758 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -3,5 +3,10 @@
 ifdef CONFIG_PPC64
 EXTRA_CFLAGS += -mno-minimal-toc
 endif
-obj-y			+= xmon.o ppc-dis.o ppc-opc.o setjmp.o start.o \
-			   nonstdio.o
+
+obj-y			+= xmon.o setjmp.o start.o nonstdio.o
+
+ifdef CONFIG_XMON_DISASSEMBLY
+obj-y			+= ppc-dis.o ppc-opc.o
+obj-$(CONFIG_SPU_BASE)	+= spu-dis.o spu-opc.o
+endif
diff --git a/arch/powerpc/xmon/dis-asm.h b/arch/powerpc/xmon/dis-asm.h
new file mode 100644
index 0000000..be3533b
--- /dev/null
+++ b/arch/powerpc/xmon/dis-asm.h
@@ -0,0 +1,31 @@
+#ifndef _POWERPC_XMON_DIS_ASM_H
+#define _POWERPC_XMON_DIS_ASM_H
+/*
+ * Copyright (C) 2006 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern void print_address (unsigned long memaddr);
+
+#ifdef CONFIG_XMON_DISASSEMBLY
+extern int print_insn_powerpc(unsigned long insn, unsigned long memaddr);
+extern int print_insn_spu(unsigned long insn, unsigned long memaddr);
+#else
+static inline int print_insn_powerpc(unsigned long insn, unsigned long memaddr)
+{
+	printf("%.8x", insn);
+	return 0;
+}
+
+static inline int print_insn_spu(unsigned long insn, unsigned long memaddr)
+{
+	printf("%.8x", insn);
+	return 0;
+}
+#endif
+
+#endif /* _POWERPC_XMON_DIS_ASM_H */
diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
index ac0a9d2..89098f3 100644
--- a/arch/powerpc/xmon/ppc-dis.c
+++ b/arch/powerpc/xmon/ppc-dis.c
@@ -1,5 +1,6 @@
 /* ppc-dis.c -- Disassemble PowerPC instructions
-   Copyright 1994 Free Software Foundation, Inc.
+   Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+   Free Software Foundation, Inc.
    Written by Ian Lance Taylor, Cygnus Support
 
 This file is part of GDB, GAS, and the GNU binutils.
@@ -16,27 +17,36 @@
 
 You should have received a copy of the GNU General Public License
 along with this file; see the file COPYING.  If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
 
+#include <asm/cputable.h>
 #include "nonstdio.h"
 #include "ansidecl.h"
 #include "ppc.h"
-
-extern void print_address (unsigned long memaddr);
+#include "dis-asm.h"
 
 /* Print a PowerPC or POWER instruction.  */
 
 int
-print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
+print_insn_powerpc (unsigned long insn, unsigned long memaddr)
 {
   const struct powerpc_opcode *opcode;
   const struct powerpc_opcode *opcode_end;
   unsigned long op;
+  int dialect;
 
-  if (dialect == 0)
-    dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
+  dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
 	      | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
 
+  if (cpu_has_feature(CPU_FTRS_POWER5))
+    dialect |= PPC_OPCODE_POWER5;
+
+  if (cpu_has_feature(CPU_FTRS_CELL))
+    dialect |= PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC;
+
+  if (cpu_has_feature(CPU_FTRS_POWER6))
+    dialect |= PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC;
+
   /* Get the major opcode of the instruction.  */
   op = PPC_OP (insn);
 
@@ -121,7 +131,8 @@
 	    }
 
 	  /* Print the operand as directed by the flags.  */
-	  if ((operand->flags & PPC_OPERAND_GPR) != 0)
+	  if ((operand->flags & PPC_OPERAND_GPR) != 0
+	      || ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0))
 	    printf("r%ld", value);
 	  else if ((operand->flags & PPC_OPERAND_FPR) != 0)
 	    printf("f%ld", value);
@@ -137,7 +148,7 @@
 	  else
 	    {
 	      if (operand->bits == 3)
-		printf("cr%d", value);
+		printf("cr%ld", value);
 	      else
 		{
 		  static const char *cbnames[4] = { "lt", "gt", "eq", "so" };
diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index 5ee8fc3..5d841f4 100644
--- a/arch/powerpc/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
@@ -1,6 +1,6 @@
 /* ppc-opc.c -- PowerPC opcode list
-   Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003
-   Free Software Foundation, Inc.
+   Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
+   2005 Free Software Foundation, Inc.
    Written by Ian Lance Taylor, Cygnus Support
 
    This file is part of GDB, GAS, and the GNU binutils.
@@ -17,8 +17,8 @@
 
    You should have received a copy of the GNU General Public License
    along with this file; see the file COPYING.  If not, write to the Free
-   Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-   02111-1307, USA.  */
+   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+   02110-1301, USA.  */
 
 #include <linux/stddef.h>
 #include "nonstdio.h"
@@ -86,6 +86,8 @@
 static long extract_sh6 (unsigned long, int, int *);
 static unsigned long insert_spr (unsigned long, long, int, const char **);
 static long extract_spr (unsigned long, int, int *);
+static unsigned long insert_sprg (unsigned long, long, int, const char **);
+static long extract_sprg (unsigned long, int, int *);
 static unsigned long insert_tbr (unsigned long, long, int, const char **);
 static long extract_tbr (unsigned long, int, int *);
 static unsigned long insert_ev2 (unsigned long, long, int, const char **);
@@ -196,8 +198,11 @@
 #define BOE BO + 1
   { 5, 21, insert_boe, extract_boe, 0 },
 
+#define BH BOE + 1
+  { 2, 11, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
   /* The BT field in an X or XL form instruction.  */
-#define BT BOE + 1
+#define BT BH + 1
   { 5, 21, NULL, NULL, PPC_OPERAND_CR },
 
   /* The condition register number portion of the BI field in a B form
@@ -301,10 +306,14 @@
 #define L FXM4 + 1
   { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
 
-  /* The LEV field in a POWER SC form instruction.  */
-#define LEV L + 1
+  /* The LEV field in a POWER SVC form instruction.  */
+#define SVC_LEV L + 1
   { 7, 5, NULL, NULL, 0 },
 
+  /* The LEV field in an SC form instruction.  */
+#define LEV SVC_LEV + 1
+  { 7, 5, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
   /* The LI field in an I form instruction.  The lower two bits are
      forced to zero.  */
 #define LI LEV + 1
@@ -346,7 +355,7 @@
 
   /* The MO field in an mbar instruction.  */
 #define MO MB6 + 1
-  { 5, 21, NULL, NULL, 0 },
+  { 5, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
 
   /* The NB field in an X form instruction.  The value 32 is stored as
      0.  */
@@ -364,30 +373,38 @@
 #define RA_MASK (0x1f << 16)
   { 5, 16, NULL, NULL, PPC_OPERAND_GPR },
 
+  /* As above, but 0 in the RA field means zero, not r0.  */
+#define RA0 RA + 1
+  { 5, 16, NULL, NULL, PPC_OPERAND_GPR_0 },
+
   /* The RA field in the DQ form lq instruction, which has special
      value restrictions.  */
-#define RAQ RA + 1
-  { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR },
+#define RAQ RA0 + 1
+  { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RA field in a D or X form instruction which is an updating
      load, which means that the RA field may not be zero and may not
      equal the RT field.  */
 #define RAL RAQ + 1
-  { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR },
+  { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RA field in an lmw instruction, which has special value
      restrictions.  */
 #define RAM RAL + 1
-  { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR },
+  { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RA field in a D or X form instruction which is an updating
      store or an updating floating point load, which means that the RA
      field may not be zero.  */
 #define RAS RAM + 1
-  { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR },
+  { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 },
+
+  /* The RA field of the tlbwe instruction, which is optional.  */
+#define RAOPT RAS + 1
+  { 5, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
 
   /* The RB field in an X, XO, M, or MDS form instruction.  */
-#define RB RAS + 1
+#define RB RAOPT + 1
 #define RB_MASK (0x1f << 11)
   { 5, 11, NULL, NULL, PPC_OPERAND_GPR },
 
@@ -408,15 +425,20 @@
   /* The RS field of the DS form stq instruction, which has special
      value restrictions.  */
 #define RSQ RS + 1
-  { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR },
+  { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RT field of the DQ form lq instruction, which has special
      value restrictions.  */
 #define RTQ RSQ + 1
-  { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR },
+  { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR_0 },
+
+  /* The RS field of the tlbwe instruction, which is optional.  */
+#define RSO RTQ + 1
+#define RTO RSO
+  { 5, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
 
   /* The SH field in an X or M form instruction.  */
-#define SH RTQ + 1
+#define SH RSO + 1
 #define SH_MASK (0x1f << 11)
   { 5, 11, NULL, NULL, 0 },
 
@@ -425,8 +447,12 @@
 #define SH6_MASK ((0x1f << 11) | (1 << 1))
   { 6, 1, insert_sh6, extract_sh6, 0 },
 
+  /* The SH field of the tlbwe instruction, which is optional.  */
+#define SHO SH6 + 1
+  { 5, 11,NULL, NULL, PPC_OPERAND_OPTIONAL },
+
   /* The SI field in a D form instruction.  */
-#define SI SH6 + 1
+#define SI SHO + 1
   { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED },
 
   /* The SI field in a D form instruction when we accept a wide range
@@ -448,8 +474,7 @@
 
   /* The SPRG register number in an XFX form m[ft]sprg instruction.  */
 #define SPRG SPRBAT + 1
-#define SPRG_MASK (0x3 << 16)
-  { 2, 16, NULL, NULL, 0 },
+  { 5, 16, insert_sprg, extract_sprg, 0 },
 
   /* The SR field in an X form instruction.  */
 #define SR SPRG + 1
@@ -536,10 +561,45 @@
 #define WS_MASK (0x7 << 11)
   { 3, 11, NULL, NULL, 0 },
 
-  /* The L field in an mtmsrd instruction */
+  /* The L field in an mtmsrd or A form instruction.  */
 #define MTMSRD_L WS + 1
+#define A_L MTMSRD_L
   { 1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL },
 
+  /* The DCM field in a Z form instruction.  */
+#define DCM MTMSRD_L + 1
+  { 6, 16, NULL, NULL, 0 },
+
+  /* Likewise, the DGM field in a Z form instruction.  */
+#define DGM DCM + 1
+  { 6, 16, NULL, NULL, 0 },
+
+#define TE DGM + 1
+  { 5, 11, NULL, NULL, 0 },
+
+#define RMC TE + 1
+  { 2, 21, NULL, NULL, 0 },
+
+#define R RMC + 1
+  { 1, 15, NULL, NULL, 0 },
+
+#define SP R + 1
+  { 2, 11, NULL, NULL, 0 },
+
+#define S SP + 1
+  { 1, 11, NULL, NULL, 0 },
+
+  /* SH field starting at bit position 16.  */
+#define SH16 S + 1
+  { 6, 10, NULL, NULL, 0 },
+
+  /* The L field in an X form with the RT field fixed instruction.  */
+#define XRT_L SH16 + 1
+  { 2, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
+  /* The EH field in larx instruction.  */
+#define EH XRT_L + 1
+  { 1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL },
 };
 
 /* The functions used to insert and extract complicated operands.  */
@@ -550,7 +610,6 @@
    and the extraction function just checks that the fields are the
    same.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bat (unsigned long insn,
 	    long value ATTRIBUTE_UNUSED,
@@ -576,7 +635,6 @@
    and the extraction function just checks that the fields are the
    same.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bba (unsigned long insn,
 	    long value ATTRIBUTE_UNUSED,
@@ -599,7 +657,6 @@
 /* The BD field in a B form instruction.  The lower two bits are
    forced to zero.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bd (unsigned long insn,
 	   long value,
@@ -609,7 +666,6 @@
   return insn | (value & 0xfffc);
 }
 
-/*ARGSUSED*/
 static long
 extract_bd (unsigned long insn,
 	    int dialect ATTRIBUTE_UNUSED,
@@ -631,7 +687,6 @@
    in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000
    for branch on CTR.  We only handle the taken/not-taken hint here.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bdm (unsigned long insn,
 	    long value,
@@ -677,7 +732,6 @@
    This is like BDM, above, except that the branch is expected to be
    taken.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bdp (unsigned long insn,
 	    long value,
@@ -831,7 +885,6 @@
 /* The DQ field in a DQ form instruction.  This is like D, but the
    lower four bits are forced to zero. */
 
-/*ARGSUSED*/
 static unsigned long
 insert_dq (unsigned long insn,
 	   long value,
@@ -843,7 +896,6 @@
   return insn | (value & 0xfff0);
 }
 
-/*ARGSUSED*/
 static long
 extract_dq (unsigned long insn,
 	    int dialect ATTRIBUTE_UNUSED,
@@ -918,7 +970,6 @@
 /* The DS field in a DS form instruction.  This is like D, but the
    lower two bits are forced to zero.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_ds (unsigned long insn,
 	   long value,
@@ -930,7 +981,6 @@
   return insn | (value & 0xfffc);
 }
 
-/*ARGSUSED*/
 static long
 extract_ds (unsigned long insn,
 	    int dialect ATTRIBUTE_UNUSED,
@@ -941,7 +991,6 @@
 
 /* The DE field in a DE form instruction.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_de (unsigned long insn,
 	   long value,
@@ -953,7 +1002,6 @@
   return insn | ((value << 4) & 0xfff0);
 }
 
-/*ARGSUSED*/
 static long
 extract_de (unsigned long insn,
 	    int dialect ATTRIBUTE_UNUSED,
@@ -964,7 +1012,6 @@
 
 /* The DES field in a DES form instruction.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_des (unsigned long insn,
 	    long value,
@@ -978,7 +1025,6 @@
   return insn | ((value << 2) & 0xfff0);
 }
 
-/*ARGSUSED*/
 static long
 extract_des (unsigned long insn,
 	     int dialect ATTRIBUTE_UNUSED,
@@ -995,17 +1041,33 @@
 	    int dialect,
 	    const char **errmsg)
 {
+  /* If we're handling the mfocrf and mtocrf insns ensure that exactly
+     one bit of the mask field is set.  */
+  if ((insn & (1 << 20)) != 0)
+    {
+      if (value == 0 || (value & -value) != value)
+	{
+	  *errmsg = _("invalid mask field");
+	  value = 0;
+	}
+    }
+
   /* If the optional field on mfcr is missing that means we want to use
      the old form of the instruction that moves the whole cr.  In that
      case we'll have VALUE zero.  There doesn't seem to be a way to
      distinguish this from the case where someone writes mfcr %r3,0.  */
-  if (value == 0)
+  else if (value == 0)
     ;
 
   /* If only one bit of the FXM field is set, we can use the new form
      of the instruction, which is faster.  Unlike the Power4 branch hint
-     encoding, this is not backward compatible.  */
-  else if ((dialect & PPC_OPCODE_POWER4) != 0 && (value & -value) == value)
+     encoding, this is not backward compatible.  Do not generate the
+     new form unless -mpower4 has been given, or -many and the two
+     operand form of mfcr was used.  */
+  else if ((value & -value) == value
+	   && ((dialect & PPC_OPCODE_POWER4) != 0
+	       || ((dialect & PPC_OPCODE_ANY) != 0
+		   && (insn & (0x3ff << 1)) == 19 << 1)))
     insn |= 1 << 20;
 
   /* Any other value on mfcr is an error.  */
@@ -1020,7 +1082,7 @@
 
 static long
 extract_fxm (unsigned long insn,
-	     int dialect,
+	     int dialect ATTRIBUTE_UNUSED,
 	     int *invalid)
 {
   long mask = (insn >> 12) & 0xff;
@@ -1028,14 +1090,9 @@
   /* Is this a Power4 insn?  */
   if ((insn & (1 << 20)) != 0)
     {
-      if ((dialect & PPC_OPCODE_POWER4) == 0)
+      /* Exactly one bit of MASK should be set.  */
+      if (mask == 0 || (mask & -mask) != mask)
 	*invalid = 1;
-      else
-	{
-	  /* Exactly one bit of MASK should be set.  */
-	  if (mask == 0 || (mask & -mask) != mask)
-	    *invalid = 1;
-	}
     }
 
   /* Check that non-power4 form of mfcr has a zero MASK.  */
@@ -1051,7 +1108,6 @@
 /* The LI field in an I form instruction.  The lower two bits are
    forced to zero.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_li (unsigned long insn,
 	   long value,
@@ -1063,7 +1119,6 @@
   return insn | (value & 0x3fffffc);
 }
 
-/*ARGSUSED*/
 static long
 extract_li (unsigned long insn,
 	    int dialect ATTRIBUTE_UNUSED,
@@ -1163,7 +1218,6 @@
 /* The MB or ME field in an MD or MDS form instruction.  The high bit
    is wrapped to the low end.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_mb6 (unsigned long insn,
 	    long value,
@@ -1173,7 +1227,6 @@
   return insn | ((value & 0x1f) << 6) | (value & 0x20);
 }
 
-/*ARGSUSED*/
 static long
 extract_mb6 (unsigned long insn,
 	     int dialect ATTRIBUTE_UNUSED,
@@ -1198,7 +1251,6 @@
   return insn | ((value & 0x1f) << 11);
 }
 
-/*ARGSUSED*/
 static long
 extract_nb (unsigned long insn,
 	    int dialect ATTRIBUTE_UNUSED,
@@ -1217,7 +1269,6 @@
    invalid, since we never want to recognize an instruction which uses
    a field of this type.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_nsi (unsigned long insn,
 	    long value,
@@ -1269,7 +1320,6 @@
 /* The RA field in the DQ form lq instruction, which has special
    value restrictions.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_raq (unsigned long insn,
 	    long value,
@@ -1304,7 +1354,6 @@
    function just copies the BT field into the BA field, and the
    extraction function just checks that the fields are the same.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_rbs (unsigned long insn,
 	    long value ATTRIBUTE_UNUSED,
@@ -1327,7 +1376,6 @@
 /* The RT field of the DQ form lq instruction, which has special
    value restrictions.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_rtq (unsigned long insn,
 	    long value,
@@ -1342,7 +1390,6 @@
 /* The RS field of the DS form stq instruction, which has special
    value restrictions.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_rsq (unsigned long insn,
 	    long value ATTRIBUTE_UNUSED,
@@ -1356,7 +1403,6 @@
 
 /* The SH field in an MD form instruction.  This is split.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_sh6 (unsigned long insn,
 	    long value,
@@ -1366,7 +1412,6 @@
   return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4);
 }
 
-/*ARGSUSED*/
 static long
 extract_sh6 (unsigned long insn,
 	     int dialect ATTRIBUTE_UNUSED,
@@ -1395,6 +1440,47 @@
   return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
 }
 
+/* Some dialects have 8 SPRG registers instead of the standard 4.  */
+
+static unsigned long
+insert_sprg (unsigned long insn,
+	     long value,
+	     int dialect,
+	     const char **errmsg)
+{
+  /* This check uses PPC_OPCODE_403 because PPC405 is later defined
+     as a synonym.  If ever a 405 specific dialect is added this
+     check should use that instead.  */
+  if (value > 7
+      || (value > 3
+	  && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
+    *errmsg = _("invalid sprg number");
+
+  /* If this is mfsprg4..7 then use spr 260..263 which can be read in
+     user mode.  Anything else must use spr 272..279.  */
+  if (value <= 3 || (insn & 0x100) != 0)
+    value |= 0x10;
+
+  return insn | ((value & 0x17) << 16);
+}
+
+static long
+extract_sprg (unsigned long insn,
+	      int dialect,
+	      int *invalid)
+{
+  unsigned long val = (insn >> 16) & 0x1f;
+
+  /* mfsprg can use 260..263 and 272..279.  mtsprg only uses spr 272..279
+     If not BOOKE or 405, then both use only 272..275.  */
+  if (val <= 3
+      || (val < 0x10 && (insn & 0x100) != 0)
+      || (val - 0x10 > 3
+	  && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
+    *invalid = 1;
+  return val & 7;
+}
+
 /* The TBR field in an XFX instruction.  This is just like SPR, but it
    is optional.  When TBR is omitted, it must be inserted as 268 (the
    magic number of the TB register).  These functions treat 0
@@ -1460,6 +1546,9 @@
 /* An A_MASK with the FRA and FRC fields fixed.  */
 #define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK)
 
+/* An AFRAFRC_MASK, but with L bit clear.  */
+#define AFRALFRC_MASK (AFRAFRC_MASK & ~((unsigned long) 1 << 16))
+
 /* A B form instruction.  */
 #define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1))
 #define B_MASK B (0x3f, 1, 1)
@@ -1494,11 +1583,11 @@
 
 /* An Context form instruction.  */
 #define CTX(op, xop)   (OP (op) | (((unsigned long)(xop)) & 0x7))
-#define CTX_MASK       CTX(0x3f, 0x7)
+#define CTX_MASK CTX(0x3f, 0x7)
 
 /* An User Context form instruction.  */
 #define UCTX(op, xop)  (OP (op) | (((unsigned long)(xop)) & 0x1f))
-#define UCTX_MASK      UCTX(0x3f, 0x1f)
+#define UCTX_MASK UCTX(0x3f, 0x1f)
 
 /* The main opcode mask with the RA field clear.  */
 #define DRA_MASK (OP_MASK | RA_MASK)
@@ -1570,12 +1659,21 @@
 /* An X form instruction.  */
 #define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1))
 
+/* A Z form instruction.  */
+#define Z(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1))
+
 /* An X form instruction with the RC bit specified.  */
 #define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1))
 
+/* A Z form instruction with the RC bit specified.  */
+#define ZRC(op, xop, rc) (Z ((op), (xop)) | ((rc) & 1))
+
 /* The mask for an X form instruction.  */
 #define X_MASK XRC (0x3f, 0x3ff, 1)
 
+/* The mask for a Z form instruction.  */
+#define Z_MASK ZRC (0x3f, 0x1ff, 1)
+
 /* An X_MASK with the RA field fixed.  */
 #define XRA_MASK (X_MASK | RA_MASK)
 
@@ -1585,6 +1683,9 @@
 /* An X_MASK with the RT field fixed.  */
 #define XRT_MASK (X_MASK | RT_MASK)
 
+/* An XRT_MASK mask with the L bits clear.  */
+#define XLRT_MASK (XRT_MASK & ~((unsigned long) 0x3 << 21))
+
 /* An X_MASK with the RA and RB fields fixed.  */
 #define XRARB_MASK (X_MASK | RA_MASK | RB_MASK)
 
@@ -1597,8 +1698,8 @@
 /* An XRTRA_MASK, but with L bit clear.  */
 #define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21))
 
-/* An X form comparison instruction.  */
-#define XCMPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
+/* An X form instruction with the L bit specified.  */
+#define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
 
 /* The mask for an X form comparison instruction.  */
 #define XCMP_MASK (X_MASK | (((unsigned long)1) << 22))
@@ -1621,6 +1722,9 @@
 /* An X form sync instruction with everything filled in except the LS field.  */
 #define XSYNC_MASK (0xff9fffff)
 
+/* An X_MASK, but with the EH bit clear.  */
+#define XEH_MASK (X_MASK & ~((unsigned long )1))
+
 /* An X form AltiVec dss instruction.  */
 #define XDSS(op, xop, a) (X ((op), (xop)) | ((((unsigned long)(a)) & 1) << 25))
 #define XDSS_MASK XDSS(0x3f, 0x3ff, 1)
@@ -1663,6 +1767,9 @@
 #define XLYBB_MASK (XLYLK_MASK | BB_MASK)
 #define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK)
 
+/* A mask for branch instructions using the BH field.  */
+#define XLBH_MASK (XL_MASK | (0x1c << 11))
+
 /* An XL_MASK with the BO and BB fields fixed.  */
 #define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK)
 
@@ -1682,11 +1789,12 @@
 #define XS_MASK XS (0x3f, 0x1ff, 1)
 
 /* A mask for the FXM version of an XFX form instruction.  */
-#define XFXFXM_MASK (X_MASK | (1 << 11))
+#define XFXFXM_MASK (X_MASK | (1 << 11) | (1 << 20))
 
 /* An XFX form instruction with the FXM field filled in.  */
-#define XFXM(op, xop, fxm) \
-  (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12))
+#define XFXM(op, xop, fxm, p4) \
+  (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12) \
+   | ((unsigned long)(p4) << 20))
 
 /* An XFX form instruction with the SPR field filled in.  */
 #define XSPR(op, xop, spr) \
@@ -1699,7 +1807,7 @@
 
 /* An XFX form instruction with the SPR field filled in except for the
    SPRG field.  */
-#define XSPRG_MASK (XSPR_MASK &~ SPRG_MASK)
+#define XSPRG_MASK (XSPR_MASK & ~(0x17 << 16))
 
 /* An X form instruction with everything filled in except the E field.  */
 #define XE_MASK (0xffff7fff)
@@ -1769,6 +1877,9 @@
 #define PPCCOM	PPC_OPCODE_PPC | PPC_OPCODE_COMMON
 #define NOPOWER4 PPC_OPCODE_NOPOWER4 | PPCCOM
 #define POWER4	PPC_OPCODE_POWER4
+#define POWER5	PPC_OPCODE_POWER5
+#define POWER6	PPC_OPCODE_POWER6
+#define CELL	PPC_OPCODE_CELL
 #define PPC32   PPC_OPCODE_32 | PPC_OPCODE_PPC
 #define PPC64   PPC_OPCODE_64 | PPC_OPCODE_PPC
 #define PPC403	PPC_OPCODE_403
@@ -1776,7 +1887,7 @@
 #define PPC440	PPC_OPCODE_440
 #define PPC750	PPC
 #define PPC860	PPC
-#define PPCVEC	PPC_OPCODE_ALTIVEC | PPC_OPCODE_PPC
+#define PPCVEC	PPC_OPCODE_ALTIVEC
 #define	POWER   PPC_OPCODE_POWER
 #define	POWER2	PPC_OPCODE_POWER | PPC_OPCODE_POWER2
 #define PPCPWR2	PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2
@@ -1790,6 +1901,7 @@
 #define BOOKE	PPC_OPCODE_BOOKE
 #define BOOKE64	PPC_OPCODE_BOOKE64
 #define CLASSIC	PPC_OPCODE_CLASSIC
+#define PPCE300 PPC_OPCODE_E300
 #define PPCSPE	PPC_OPCODE_SPE
 #define PPCISEL	PPC_OPCODE_ISEL
 #define PPCEFS	PPC_OPCODE_EFS
@@ -1952,6 +2064,41 @@
 { "nmaclhwso.",	XO(4,494,1,1), XO_MASK,	PPC405|PPC440,	{ RT, RA, RB } },
 { "mfvscr",  VX(4, 1540), VX_MASK,	PPCVEC,		{ VD } },
 { "mtvscr",  VX(4, 1604), VX_MASK,	PPCVEC,		{ VB } },
+
+  /* Double-precision opcodes.  */
+  /* Some of these conflict with AltiVec, so move them before, since
+     PPCVEC includes the PPC_OPCODE_PPC set.  */
+{ "efscfd",   VX(4, 719), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdabs",   VX(4, 740), VX_MASK,	PPCEFS,		{ RS, RA } },
+{ "efdnabs",  VX(4, 741), VX_MASK,	PPCEFS,		{ RS, RA } },
+{ "efdneg",   VX(4, 742), VX_MASK,	PPCEFS,		{ RS, RA } },
+{ "efdadd",   VX(4, 736), VX_MASK,	PPCEFS,		{ RS, RA, RB } },
+{ "efdsub",   VX(4, 737), VX_MASK,	PPCEFS,		{ RS, RA, RB } },
+{ "efdmul",   VX(4, 744), VX_MASK,	PPCEFS,		{ RS, RA, RB } },
+{ "efddiv",   VX(4, 745), VX_MASK,	PPCEFS,		{ RS, RA, RB } },
+{ "efdcmpgt", VX(4, 748), VX_MASK,	PPCEFS,		{ CRFD, RA, RB } },
+{ "efdcmplt", VX(4, 749), VX_MASK,	PPCEFS,		{ CRFD, RA, RB } },
+{ "efdcmpeq", VX(4, 750), VX_MASK,	PPCEFS,		{ CRFD, RA, RB } },
+{ "efdtstgt", VX(4, 764), VX_MASK,	PPCEFS,		{ CRFD, RA, RB } },
+{ "efdtstlt", VX(4, 765), VX_MASK,	PPCEFS,		{ CRFD, RA, RB } },
+{ "efdtsteq", VX(4, 766), VX_MASK,	PPCEFS,		{ CRFD, RA, RB } },
+{ "efdcfsi",  VX(4, 753), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdcfsid", VX(4, 739), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdcfui",  VX(4, 752), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdcfuid", VX(4, 738), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdcfsf",  VX(4, 755), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdcfuf",  VX(4, 754), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctsi",  VX(4, 757), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctsidz",VX(4, 747), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctsiz", VX(4, 762), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctui",  VX(4, 756), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctuidz",VX(4, 746), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctuiz", VX(4, 760), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctsf",  VX(4, 759), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdctuf",  VX(4, 758), VX_MASK,	PPCEFS,		{ RS, RB } },
+{ "efdcfs",   VX(4, 751), VX_MASK,	PPCEFS,		{ RS, RB } },
+  /* End of double-precision opcodes.  */
+
 { "vaddcuw", VX(4,  384), VX_MASK,	PPCVEC,		{ VD, VA, VB } },
 { "vaddfp",  VX(4,   10), VX_MASK, 	PPCVEC,		{ VD, VA, VB } },
 { "vaddsbs", VX(4,  768), VX_MASK,	PPCVEC,		{ VD, VA, VB } },
@@ -2389,16 +2536,16 @@
 
 { "li",	     OP(14),	DRA_MASK,	PPCCOM,		{ RT, SI } },
 { "lil",     OP(14),	DRA_MASK,	PWRCOM,		{ RT, SI } },
-{ "addi",    OP(14),	OP_MASK,	PPCCOM,		{ RT, RA, SI } },
-{ "cal",     OP(14),	OP_MASK,	PWRCOM,		{ RT, D, RA } },
-{ "subi",    OP(14),	OP_MASK,	PPCCOM,		{ RT, RA, NSI } },
-{ "la",	     OP(14),	OP_MASK,	PPCCOM,		{ RT, D, RA } },
+{ "addi",    OP(14),	OP_MASK,	PPCCOM,		{ RT, RA0, SI } },
+{ "cal",     OP(14),	OP_MASK,	PWRCOM,		{ RT, D, RA0 } },
+{ "subi",    OP(14),	OP_MASK,	PPCCOM,		{ RT, RA0, NSI } },
+{ "la",	     OP(14),	OP_MASK,	PPCCOM,		{ RT, D, RA0 } },
 
 { "lis",     OP(15),	DRA_MASK,	PPCCOM,		{ RT, SISIGNOPT } },
 { "liu",     OP(15),	DRA_MASK,	PWRCOM,		{ RT, SISIGNOPT } },
-{ "addis",   OP(15),	OP_MASK,	PPCCOM,		{ RT,RA,SISIGNOPT } },
-{ "cau",     OP(15),	OP_MASK,	PWRCOM,		{ RT,RA,SISIGNOPT } },
-{ "subis",   OP(15),	OP_MASK,	PPCCOM,		{ RT, RA, NSI } },
+{ "addis",   OP(15),	OP_MASK,	PPCCOM,		{ RT,RA0,SISIGNOPT } },
+{ "cau",     OP(15),	OP_MASK,	PWRCOM,		{ RT,RA0,SISIGNOPT } },
+{ "subis",   OP(15),	OP_MASK,	PPCCOM,		{ RT, RA0, NSI } },
 
 { "bdnz-",   BBO(16,BODNZ,0,0),      BBOATBI_MASK, PPCCOM,	{ BDM } },
 { "bdnz+",   BBO(16,BODNZ,0,0),      BBOATBI_MASK, PPCCOM,	{ BDP } },
@@ -2665,9 +2812,9 @@
 { "bcla+",   B(16,1,1),	B_MASK,		PPCCOM,		{ BOE, BI, BDPA } },
 { "bcla",    B(16,1,1),	B_MASK,		COM,		{ BO, BI, BDA } },
 
-{ "sc",      SC(17,1,0), 0xffffffff,	PPC,		{ 0 } },
-{ "svc",     SC(17,0,0), SC_MASK,	POWER,		{ LEV, FL1, FL2 } },
-{ "svcl",    SC(17,0,1), SC_MASK,	POWER,		{ LEV, FL1, FL2 } },
+{ "sc",      SC(17,1,0), SC_MASK,	PPC,		{ LEV } },
+{ "svc",     SC(17,0,0), SC_MASK,	POWER,		{ SVC_LEV, FL1, FL2 } },
+{ "svcl",    SC(17,0,1), SC_MASK,	POWER,		{ SVC_LEV, FL1, FL2 } },
 { "svca",    SC(17,1,0), SC_MASK,	PWRCOM,		{ SV } },
 { "svcla",   SC(17,1,1), SC_MASK,	POWER,		{ SV } },
 
@@ -2890,12 +3037,12 @@
 { "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM,	{ BI } },
 { "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, NOPOWER4,	{ BI } },
 { "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
-{ "bclr",    XLLK(19,16,0), XLYBB_MASK,	PPCCOM,		{ BO, BI } },
-{ "bclrl",   XLLK(19,16,1), XLYBB_MASK,	PPCCOM,		{ BO, BI } },
 { "bclr+",   XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM,	{ BOE, BI } },
 { "bclrl+",  XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM,	{ BOE, BI } },
 { "bclr-",   XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM,	{ BOE, BI } },
 { "bclrl-",  XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM,	{ BOE, BI } },
+{ "bclr",    XLLK(19,16,0), XLBH_MASK,	PPCCOM,		{ BO, BI, BH } },
+{ "bclrl",   XLLK(19,16,1), XLBH_MASK,	PPCCOM,		{ BO, BI, BH } },
 { "bcr",     XLLK(19,16,0), XLBB_MASK,	PWRCOM,		{ BO, BI } },
 { "bcrl",    XLLK(19,16,1), XLBB_MASK,	PWRCOM,		{ BO, BI } },
 { "bclre",   XLLK(19,17,0), XLBB_MASK,	BOOKE64,	{ BO, BI } },
@@ -2924,14 +3071,23 @@
 
 { "crand",   XL(19,257), XL_MASK,	COM,		{ BT, BA, BB } },
 
+{ "hrfid",   XL(19,274), 0xffffffff,	POWER5 | CELL,	{ 0 } },
+
 { "crset",   XL(19,289), XL_MASK,	PPCCOM,		{ BT, BAT, BBA } },
 { "creqv",   XL(19,289), XL_MASK,	COM,		{ BT, BA, BB } },
 
+{ "doze",    XL(19,402), 0xffffffff,	POWER6,		{ 0 } },
+
 { "crorc",   XL(19,417), XL_MASK,	COM,		{ BT, BA, BB } },
 
+{ "nap",     XL(19,434), 0xffffffff,	POWER6,		{ 0 } },
+
 { "crmove",  XL(19,449), XL_MASK,	PPCCOM,		{ BT, BA, BBA } },
 { "cror",    XL(19,449), XL_MASK,	COM,		{ BT, BA, BB } },
 
+{ "sleep",   XL(19,466), 0xffffffff,	POWER6,		{ 0 } },
+{ "rvwinkle", XL(19,498), 0xffffffff,	POWER6,		{ 0 } },
+
 { "bctr",    XLO(19,BOU,528,0), XLBOBIBB_MASK, COM,	{ 0 } },
 { "bctrl",   XLO(19,BOU,528,1), XLBOBIBB_MASK, COM,	{ 0 } },
 { "bltctr",  XLOCB(19,BOT,CBLT,528,0),  XLBOCBBB_MASK, PPCCOM,	{ CR } },
@@ -3074,12 +3230,12 @@
 { "bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, POWER4, { BI } },
 { "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
 { "bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, POWER4, { BI } },
-{ "bcctr",   XLLK(19,528,0),     XLYBB_MASK,  PPCCOM,	{ BO, BI } },
 { "bcctr-",  XLYLK(19,528,0,0),  XLYBB_MASK,  PPCCOM,	{ BOE, BI } },
 { "bcctr+",  XLYLK(19,528,1,0),  XLYBB_MASK,  PPCCOM,	{ BOE, BI } },
-{ "bcctrl",  XLLK(19,528,1),     XLYBB_MASK,  PPCCOM,	{ BO, BI } },
 { "bcctrl-", XLYLK(19,528,0,1),  XLYBB_MASK,  PPCCOM,	{ BOE, BI } },
 { "bcctrl+", XLYLK(19,528,1,1),  XLYBB_MASK,  PPCCOM,	{ BOE, BI } },
+{ "bcctr",   XLLK(19,528,0),     XLBH_MASK,   PPCCOM,	{ BO, BI, BH } },
+{ "bcctrl",  XLLK(19,528,1),     XLBH_MASK,   PPCCOM,	{ BO, BI, BH } },
 { "bcc",     XLLK(19,528,0),     XLBB_MASK,   PWRCOM,	{ BO, BI } },
 { "bccl",    XLLK(19,528,1),     XLBB_MASK,   PWRCOM,	{ BO, BI } },
 { "bcctre",  XLLK(19,529,0),     XLYBB_MASK,  BOOKE64,	{ BO, BI } },
@@ -3158,8 +3314,8 @@
 { "rldcr",   MDS(30,9,0), MDS_MASK,	PPC64,		{ RA, RS, RB, ME6 } },
 { "rldcr.",  MDS(30,9,1), MDS_MASK,	PPC64,		{ RA, RS, RB, ME6 } },
 
-{ "cmpw",    XCMPL(31,0,0), XCMPL_MASK, PPCCOM,		{ OBF, RA, RB } },
-{ "cmpd",    XCMPL(31,0,1), XCMPL_MASK, PPC64,		{ OBF, RA, RB } },
+{ "cmpw",    XOPL(31,0,0), XCMPL_MASK, PPCCOM,		{ OBF, RA, RB } },
+{ "cmpd",    XOPL(31,0,1), XCMPL_MASK, PPC64,		{ OBF, RA, RB } },
 { "cmp",     X(31,0),	XCMP_MASK,	PPC,		{ BF, L, RA, RB } },
 { "cmp",     X(31,0),	XCMPL_MASK,	PWRCOM,		{ BF, RA, RB } },
 
@@ -3228,17 +3384,18 @@
 { "iseleq",  X(31,79),      X_MASK,	PPCISEL,	{ RT, RA, RB } },
 { "isel",    XISEL(31,15),  XISEL_MASK,	PPCISEL,	{ RT, RA, RB, CRB } },
 
-{ "mfcr",    X(31,19),	XRARB_MASK,	NOPOWER4,	{ RT } },
+{ "mfocrf",  XFXM(31,19,0,1), XFXFXM_MASK, COM,		{ RT, FXM } },
+{ "mfcr",    X(31,19),	XRARB_MASK,	NOPOWER4 | COM,	{ RT } },
 { "mfcr",    X(31,19),	XFXFXM_MASK,	POWER4,		{ RT, FXM4 } },
 
-{ "lwarx",   X(31,20),	X_MASK,		PPC,		{ RT, RA, RB } },
+{ "lwarx",   X(31,20),	XEH_MASK,	PPC,		{ RT, RA0, RB, EH } },
 
-{ "ldx",     X(31,21),	X_MASK,		PPC64,		{ RT, RA, RB } },
+{ "ldx",     X(31,21),	X_MASK,		PPC64,		{ RT, RA0, RB } },
 
-{ "icbt",    X(31,22),	X_MASK,		BOOKE,		{ CT, RA, RB } },
+{ "icbt",    X(31,22),	X_MASK,		BOOKE|PPCE300,	{ CT, RA, RB } },
 { "icbt",    X(31,262),	XRT_MASK,	PPC403,		{ RA, RB } },
 
-{ "lwzx",    X(31,23),	X_MASK,		PPCCOM,		{ RT, RA, RB } },
+{ "lwzx",    X(31,23),	X_MASK,		PPCCOM,		{ RT, RA0, RB } },
 { "lx",      X(31,23),	X_MASK,		PWRCOM,		{ RT, RA, RB } },
 
 { "slw",     XRC(31,24,0), X_MASK,	PPCCOM,		{ RA, RS, RB } },
@@ -3262,10 +3419,10 @@
 
 { "icbte",   X(31,30),	X_MASK,		BOOKE64,	{ CT, RA, RB } },
 
-{ "lwzxe",   X(31,31),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "lwzxe",   X(31,31),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
-{ "cmplw",   XCMPL(31,32,0), XCMPL_MASK, PPCCOM,	{ OBF, RA, RB } },
-{ "cmpld",   XCMPL(31,32,1), XCMPL_MASK, PPC64,		{ OBF, RA, RB } },
+{ "cmplw",   XOPL(31,32,0), XCMPL_MASK, PPCCOM,	{ OBF, RA, RB } },
+{ "cmpld",   XOPL(31,32,1), XCMPL_MASK, PPC64,		{ OBF, RA, RB } },
 { "cmpl",    X(31,32),	XCMP_MASK,	 PPC,		{ BF, L, RA, RB } },
 { "cmpl",    X(31,32),	XCMPL_MASK,	 PWRCOM,	{ BF, RA, RB } },
 
@@ -3324,15 +3481,16 @@
 
 { "mfmsr",   X(31,83),	XRARB_MASK,	COM,		{ RT } },
 
-{ "ldarx",   X(31,84),	X_MASK,		PPC64,		{ RT, RA, RB } },
+{ "ldarx",   X(31,84),	XEH_MASK,	PPC64,		{ RT, RA0, RB, EH } },
 
-{ "dcbf",    X(31,86),	XRT_MASK,	PPC,		{ RA, RB } },
+{ "dcbfl",   XOPL(31,86,1), XRT_MASK,	POWER5,		{ RA, RB } },
+{ "dcbf",    X(31,86),	XLRT_MASK,	PPC,		{ RA, RB, XRT_L } },
 
-{ "lbzx",    X(31,87),	X_MASK,		COM,		{ RT, RA, RB } },
+{ "lbzx",    X(31,87),	X_MASK,		COM,		{ RT, RA0, RB } },
 
 { "dcbfe",   X(31,94),	XRT_MASK,	BOOKE64,	{ RA, RB } },
 
-{ "lbzxe",   X(31,95),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "lbzxe",   X(31,95),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
 { "neg",     XO(31,104,0,0), XORB_MASK,	COM,		{ RT, RA } },
 { "neg.",    XO(31,104,0,1), XORB_MASK,	COM,		{ RT, RA } },
@@ -3350,12 +3508,14 @@
 
 { "lbzux",   X(31,119),	X_MASK,		COM,		{ RT, RAL, RB } },
 
+{ "popcntb", X(31,122), XRB_MASK,	POWER5,		{ RA, RS } },
+
 { "not",     XRC(31,124,0), X_MASK,	COM,		{ RA, RS, RBS } },
 { "nor",     XRC(31,124,0), X_MASK,	COM,		{ RA, RS, RB } },
 { "not.",    XRC(31,124,1), X_MASK,	COM,		{ RA, RS, RBS } },
 { "nor.",    XRC(31,124,1), X_MASK,	COM,		{ RA, RS, RB } },
 
-{ "lwarxe",  X(31,126),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "lwarxe",  X(31,126),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
 { "lbzuxe",  X(31,127),	X_MASK,		BOOKE64,	{ RT, RAL, RB } },
 
@@ -3383,21 +3543,22 @@
 
 { "dcbtstlse",X(31,142),X_MASK,		PPCCHLK64,	{ CT, RA, RB }},
 
-{ "mtcr",    XFXM(31,144,0xff), XRARB_MASK, COM,	{ RS }},
+{ "mtocrf",  XFXM(31,144,0,1), XFXFXM_MASK, COM,	{ FXM, RS } },
+{ "mtcr",    XFXM(31,144,0xff,0), XRARB_MASK, COM,	{ RS }},
 { "mtcrf",   X(31,144),	XFXFXM_MASK,	COM,		{ FXM, RS } },
 
 { "mtmsr",   X(31,146),	XRARB_MASK,	COM,		{ RS } },
 
-{ "stdx",    X(31,149), X_MASK,		PPC64,		{ RS, RA, RB } },
+{ "stdx",    X(31,149), X_MASK,		PPC64,		{ RS, RA0, RB } },
 
-{ "stwcx.",  XRC(31,150,1), X_MASK,	PPC,		{ RS, RA, RB } },
+{ "stwcx.",  XRC(31,150,1), X_MASK,	PPC,		{ RS, RA0, RB } },
 
-{ "stwx",    X(31,151), X_MASK,		PPCCOM,		{ RS, RA, RB } },
+{ "stwx",    X(31,151), X_MASK,		PPCCOM,		{ RS, RA0, RB } },
 { "stx",     X(31,151), X_MASK,		PWRCOM,		{ RS, RA, RB } },
 
-{ "stwcxe.", XRC(31,158,1), X_MASK,	BOOKE64,	{ RS, RA, RB } },
+{ "stwcxe.", XRC(31,158,1), X_MASK,	BOOKE64,	{ RS, RA0, RB } },
 
-{ "stwxe",   X(31,159), X_MASK,		BOOKE64,	{ RS, RA, RB } },
+{ "stwxe",   X(31,159), X_MASK,		BOOKE64,	{ RS, RA0, RB } },
 
 { "slq",     XRC(31,152,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "slq.",    XRC(31,152,1), X_MASK,	M601,		{ RA, RS, RB } },
@@ -3405,6 +3566,8 @@
 { "sle",     XRC(31,153,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "sle.",    XRC(31,153,1), X_MASK,	M601,		{ RA, RS, RB } },
 
+{ "prtyw",   X(31,154),	XRB_MASK,	POWER6,		{ RA, RS } },
+
 { "wrteei",  X(31,163),	XE_MASK,	PPC403 | BOOKE,	{ E } },
 
 { "dcbtls",  X(31,166),	X_MASK,		PPCCHLK,	{ CT, RA, RB }},
@@ -3415,11 +3578,13 @@
 { "stdux",   X(31,181),	X_MASK,		PPC64,		{ RS, RAS, RB } },
 
 { "stwux",   X(31,183),	X_MASK,		PPCCOM,		{ RS, RAS, RB } },
-{ "stux",    X(31,183),	X_MASK,		PWRCOM,		{ RS, RA, RB } },
+{ "stux",    X(31,183),	X_MASK,		PWRCOM,		{ RS, RA0, RB } },
 
 { "sliq",    XRC(31,184,0), X_MASK,	M601,		{ RA, RS, SH } },
 { "sliq.",   XRC(31,184,1), X_MASK,	M601,		{ RA, RS, SH } },
 
+{ "prtyd",   X(31,186),	XRB_MASK,	POWER6,		{ RA, RS } },
+
 { "stwuxe",  X(31,191),	X_MASK,		BOOKE64,	{ RS, RAS, RB } },
 
 { "subfze",  XO(31,200,0,0), XORB_MASK, PPCCOM,		{ RT, RA } },
@@ -3442,9 +3607,9 @@
 
 { "mtsr",    X(31,210),	XRB_MASK|(1<<20), COM32,	{ SR, RS } },
 
-{ "stdcx.",  XRC(31,214,1), X_MASK,	PPC64,		{ RS, RA, RB } },
+{ "stdcx.",  XRC(31,214,1), X_MASK,	PPC64,		{ RS, RA0, RB } },
 
-{ "stbx",    X(31,215),	X_MASK,		COM,		{ RS, RA, RB } },
+{ "stbx",    X(31,215),	X_MASK,		COM,		{ RS, RA0, RB } },
 
 { "sllq",    XRC(31,216,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "sllq.",   XRC(31,216,1), X_MASK,	M601,		{ RA, RS, RB } },
@@ -3452,7 +3617,7 @@
 { "sleq",    XRC(31,217,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "sleq.",   XRC(31,217,1), X_MASK,	M601,		{ RA, RS, RB } },
 
-{ "stbxe",   X(31,223),	X_MASK,		BOOKE64,	{ RS, RA, RB } },
+{ "stbxe",   X(31,223),	X_MASK,		BOOKE64,	{ RS, RA0, RB } },
 
 { "icblc",   X(31,230),	X_MASK,		PPCCHLK,	{ CT, RA, RB }},
 
@@ -3492,7 +3657,7 @@
 { "mtsrin",  X(31,242),	XRA_MASK,	PPC32,		{ RS, RB } },
 { "mtsri",   X(31,242),	XRA_MASK,	POWER32,	{ RS, RB } },
 
-{ "dcbtst",  X(31,246),	XRT_MASK,	PPC,		{ CT, RA, RB } },
+{ "dcbtst",  X(31,246),	X_MASK,	PPC,			{ CT, RA, RB } },
 
 { "stbux",   X(31,247),	X_MASK,		COM,		{ RS, RAS, RB } },
 
@@ -3519,26 +3684,26 @@
 { "addo.",   XO(31,266,1,1), XO_MASK,	PPCCOM,		{ RT, RA, RB } },
 { "caxo.",   XO(31,266,1,1), XO_MASK,	PWRCOM,		{ RT, RA, RB } },
 
-{ "tlbiel",  X(31,274), XRTRA_MASK,	POWER4,		{ RB } },
+{ "tlbiel",  X(31,274), XRTLRA_MASK,	POWER4,		{ RB, L } },
 
 { "mfapidi", X(31,275), X_MASK,		BOOKE,		{ RT, RA } },
 
 { "lscbx",   XRC(31,277,0), X_MASK,	M601,		{ RT, RA, RB } },
 { "lscbx.",  XRC(31,277,1), X_MASK,	M601,		{ RT, RA, RB } },
 
-{ "dcbt",    X(31,278),	XRT_MASK,	PPC,		{ CT, RA, RB } },
+{ "dcbt",    X(31,278),	X_MASK,		PPC,		{ CT, RA, RB } },
 
-{ "lhzx",    X(31,279),	X_MASK,		COM,		{ RT, RA, RB } },
+{ "lhzx",    X(31,279),	X_MASK,		COM,		{ RT, RA0, RB } },
 
 { "eqv",     XRC(31,284,0), X_MASK,	COM,		{ RA, RS, RB } },
 { "eqv.",    XRC(31,284,1), X_MASK,	COM,		{ RA, RS, RB } },
 
 { "dcbte",   X(31,286),	X_MASK,		BOOKE64,	{ CT, RA, RB } },
 
-{ "lhzxe",   X(31,287),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "lhzxe",   X(31,287),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
 { "tlbie",   X(31,306),	XRTLRA_MASK,	PPC,		{ RB, L } },
-{ "tlbi",    X(31,306),	XRT_MASK,	POWER,		{ RA, RB } },
+{ "tlbi",    X(31,306),	XRT_MASK,	POWER,		{ RA0, RB } },
 
 { "eciwx",   X(31,310), X_MASK,		PPC,		{ RT, RA, RB } },
 
@@ -3607,6 +3772,7 @@
 { "mfsdr1",     XSPR(31,339,25),   XSPR_MASK, COM,	{ RT } },
 { "mfsrr0",     XSPR(31,339,26),   XSPR_MASK, COM,	{ RT } },
 { "mfsrr1",     XSPR(31,339,27),   XSPR_MASK, COM,	{ RT } },
+{ "mfcfar",     XSPR(31,339,28),   XSPR_MASK, POWER6,	{ RT } },
 { "mfpid",      XSPR(31,339,48),   XSPR_MASK, BOOKE,    { RT } },
 { "mfpid",      XSPR(31,339,945),  XSPR_MASK, PPC403,	{ RT } },
 { "mfcsrr0",    XSPR(31,339,58),   XSPR_MASK, BOOKE,    { RT } },
@@ -3634,21 +3800,21 @@
 { "mfbar",      XSPR(31,339,159),  XSPR_MASK, PPC860,	{ RT } },
 { "mfvrsave",   XSPR(31,339,256),  XSPR_MASK, PPCVEC,	{ RT } },
 { "mfusprg0",   XSPR(31,339,256),  XSPR_MASK, BOOKE,    { RT } },
-{ "mfsprg4",    XSPR(31,339,260),  XSPR_MASK, PPC405,	{ RT } },
-{ "mfsprg5",    XSPR(31,339,261),  XSPR_MASK, PPC405,	{ RT } },
-{ "mfsprg6",    XSPR(31,339,262),  XSPR_MASK, PPC405,	{ RT } },
-{ "mfsprg7",    XSPR(31,339,263),  XSPR_MASK, PPC405,	{ RT } },
 { "mftb",       X(31,371),	   X_MASK,    CLASSIC,	{ RT, TBR } },
 { "mftb",       XSPR(31,339,268),  XSPR_MASK, BOOKE,    { RT } },
 { "mftbl",      XSPR(31,371,268),  XSPR_MASK, CLASSIC,	{ RT } },
 { "mftbl",      XSPR(31,339,268),  XSPR_MASK, BOOKE,    { RT } },
 { "mftbu",      XSPR(31,371,269),  XSPR_MASK, CLASSIC,	{ RT } },
 { "mftbu",      XSPR(31,339,269),  XSPR_MASK, BOOKE,    { RT } },
-{ "mfsprg",     XSPR(31,339,272),  XSPRG_MASK, PPC,	{ RT, SPRG } },
+{ "mfsprg",     XSPR(31,339,256),  XSPRG_MASK, PPC,	{ RT, SPRG } },
 { "mfsprg0",    XSPR(31,339,272),  XSPR_MASK, PPC,	{ RT } },
 { "mfsprg1",    XSPR(31,339,273),  XSPR_MASK, PPC,	{ RT } },
 { "mfsprg2",    XSPR(31,339,274),  XSPR_MASK, PPC,	{ RT } },
 { "mfsprg3",    XSPR(31,339,275),  XSPR_MASK, PPC,	{ RT } },
+{ "mfsprg4",    XSPR(31,339,260),  XSPR_MASK, PPC405 | BOOKE,	{ RT } },
+{ "mfsprg5",    XSPR(31,339,261),  XSPR_MASK, PPC405 | BOOKE,	{ RT } },
+{ "mfsprg6",    XSPR(31,339,262),  XSPR_MASK, PPC405 | BOOKE,	{ RT } },
+{ "mfsprg7",    XSPR(31,339,263),  XSPR_MASK, PPC405 | BOOKE,	{ RT } },
 { "mfasr",      XSPR(31,339,280),  XSPR_MASK, PPC64,	{ RT } },
 { "mfear",      XSPR(31,339,282),  XSPR_MASK, PPC,	{ RT } },
 { "mfpir",      XSPR(31,339,286),  XSPR_MASK, BOOKE,    { RT } },
@@ -3699,6 +3865,10 @@
 { "mfspefscr",  XSPR(31,339,512),  XSPR_MASK, PPCSPE,	{ RT } },
 { "mfbbear",    XSPR(31,339,513),  XSPR_MASK, PPCBRLK,  { RT } },
 { "mfbbtar",    XSPR(31,339,514),  XSPR_MASK, PPCBRLK,  { RT } },
+{ "mfivor32",   XSPR(31,339,528),  XSPR_MASK, PPCSPE,	{ RT } },
+{ "mfivor33",   XSPR(31,339,529),  XSPR_MASK, PPCSPE,	{ RT } },
+{ "mfivor34",   XSPR(31,339,530),  XSPR_MASK, PPCSPE,	{ RT } },
+{ "mfivor35",   XSPR(31,339,531),  XSPR_MASK, PPCPMR,	{ RT } },
 { "mfibatu",    XSPR(31,339,528),  XSPRBAT_MASK, PPC,	{ RT, SPRBAT } },
 { "mfibatl",    XSPR(31,339,529),  XSPRBAT_MASK, PPC,	{ RT, SPRBAT } },
 { "mfdbatu",    XSPR(31,339,536),  XSPRBAT_MASK, PPC,	{ RT, SPRBAT } },
@@ -3708,10 +3878,11 @@
 { "mfic_dat",   XSPR(31,339,562),  XSPR_MASK, PPC860,	{ RT } },
 { "mfdc_cst",   XSPR(31,339,568),  XSPR_MASK, PPC860,	{ RT } },
 { "mfdc_adr",   XSPR(31,339,569),  XSPR_MASK, PPC860,	{ RT } },
-{ "mfdc_dat",   XSPR(31,339,570),  XSPR_MASK, PPC860,	{ RT } },
 { "mfmcsrr0",   XSPR(31,339,570),  XSPR_MASK, PPCRFMCI, { RT } },
+{ "mfdc_dat",   XSPR(31,339,570),  XSPR_MASK, PPC860,	{ RT } },
 { "mfmcsrr1",   XSPR(31,339,571),  XSPR_MASK, PPCRFMCI, { RT } },
 { "mfmcsr",     XSPR(31,339,572),  XSPR_MASK, PPCRFMCI, { RT } },
+{ "mfmcar",     XSPR(31,339,573),  XSPR_MASK, PPCRFMCI, { RT } },
 { "mfdpdr",     XSPR(31,339,630),  XSPR_MASK, PPC860,	{ RT } },
 { "mfdpir",     XSPR(31,339,631),  XSPR_MASK, PPC860,	{ RT } },
 { "mfimmr",     XSPR(31,339,638),  XSPR_MASK, PPC860,	{ RT } },
@@ -3775,14 +3946,14 @@
 { "mfpbu2",     XSPR(31,339,1023), XSPR_MASK, PPC403,	{ RT } },
 { "mfspr",      X(31,339),	   X_MASK,    COM,	{ RT, SPR } },
 
-{ "lwax",    X(31,341),	X_MASK,		PPC64,		{ RT, RA, RB } },
+{ "lwax",    X(31,341),	X_MASK,		PPC64,		{ RT, RA0, RB } },
 
 { "dst",     XDSS(31,342,0), XDSS_MASK,	PPCVEC,		{ RA, RB, STRM } },
 { "dstt",    XDSS(31,342,1), XDSS_MASK,	PPCVEC,		{ RA, RB, STRM } },
 
-{ "lhax",    X(31,343),	X_MASK,		COM,		{ RT, RA, RB } },
+{ "lhax",    X(31,343),	X_MASK,		COM,		{ RT, RA0, RB } },
 
-{ "lhaxe",   X(31,351),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "lhaxe",   X(31,351),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
 { "dstst",   XDSS(31,374,0), XDSS_MASK,	PPCVEC,		{ RA, RB, STRM } },
 { "dststt",  XDSS(31,374,1), XDSS_MASK,	PPCVEC,		{ RA, RB, STRM } },
@@ -3821,14 +3992,20 @@
 
 { "slbmte",  X(31,402), XRA_MASK,	PPC64,		{ RS, RB } },
 
-{ "sthx",    X(31,407),	X_MASK,		COM,		{ RS, RA, RB } },
+{ "sthx",    X(31,407),	X_MASK,		COM,		{ RS, RA0, RB } },
+
+{ "cmpb",    X(31,508),	X_MASK,		POWER6,		{ RA, RS, RB } },
 
 { "lfqx",    X(31,791),	X_MASK,		POWER2,		{ FRT, RA, RB } },
 
+{ "lfdpx",   X(31,791),	X_MASK,		POWER6,		{ FRT, RA, RB } },
+
 { "lfqux",   X(31,823),	X_MASK,		POWER2,		{ FRT, RA, RB } },
 
 { "stfqx",   X(31,919),	X_MASK,		POWER2,		{ FRS, RA, RB } },
 
+{ "stfdpx",  X(31,919),	X_MASK,		POWER6,		{ FRS, RA, RB } },
+
 { "stfqux",  X(31,951),	X_MASK,		POWER2,		{ FRS, RA, RB } },
 
 { "orc",     XRC(31,412,0), X_MASK,	COM,		{ RA, RS, RB } },
@@ -3837,7 +4014,7 @@
 { "sradi",   XS(31,413,0), XS_MASK,	PPC64,		{ RA, RS, SH6 } },
 { "sradi.",  XS(31,413,1), XS_MASK,	PPC64,		{ RA, RS, SH6 } },
 
-{ "sthxe",   X(31,415),	X_MASK,		BOOKE64,	{ RS, RA, RB } },
+{ "sthxe",   X(31,415),	X_MASK,		BOOKE64,	{ RS, RA0, RB } },
 
 { "slbie",   X(31,434),	XRTRA_MASK,	PPC64,		{ RB } },
 
@@ -3918,6 +4095,7 @@
 { "mtsdr1",    XSPR(31,467,25),   XSPR_MASK, COM,	{ RS } },
 { "mtsrr0",    XSPR(31,467,26),   XSPR_MASK, COM,	{ RS } },
 { "mtsrr1",    XSPR(31,467,27),   XSPR_MASK, COM,	{ RS } },
+{ "mtcfar",    XSPR(31,467,28),   XSPR_MASK, POWER6,	{ RS } },
 { "mtpid",     XSPR(31,467,48),   XSPR_MASK, BOOKE,     { RS } },
 { "mtpid",     XSPR(31,467,945),  XSPR_MASK, PPC403,	{ RS } },
 { "mtdecar",   XSPR(31,467,54),   XSPR_MASK, BOOKE,     { RS } },
@@ -3946,7 +4124,7 @@
 { "mtbar",     XSPR(31,467,159),  XSPR_MASK, PPC860,	{ RS } },
 { "mtvrsave",  XSPR(31,467,256),  XSPR_MASK, PPCVEC,	{ RS } },
 { "mtusprg0",  XSPR(31,467,256),  XSPR_MASK, BOOKE,     { RS } },
-{ "mtsprg",    XSPR(31,467,272),  XSPRG_MASK,PPC,	{ SPRG, RS } },
+{ "mtsprg",    XSPR(31,467,256),  XSPRG_MASK,PPC,	{ SPRG, RS } },
 { "mtsprg0",   XSPR(31,467,272),  XSPR_MASK, PPC,	{ RS } },
 { "mtsprg1",   XSPR(31,467,273),  XSPR_MASK, PPC,	{ RS } },
 { "mtsprg2",   XSPR(31,467,274),  XSPR_MASK, PPC,	{ RS } },
@@ -4005,6 +4183,10 @@
 { "mtspefscr",  XSPR(31,467,512),  XSPR_MASK, PPCSPE,   { RS } },
 { "mtbbear",   XSPR(31,467,513),  XSPR_MASK, PPCBRLK,   { RS } },
 { "mtbbtar",   XSPR(31,467,514),  XSPR_MASK, PPCBRLK,  { RS } },
+{ "mtivor32",  XSPR(31,467,528),  XSPR_MASK, PPCSPE,	{ RS } },
+{ "mtivor33",  XSPR(31,467,529),  XSPR_MASK, PPCSPE,	{ RS } },
+{ "mtivor34",  XSPR(31,467,530),  XSPR_MASK, PPCSPE,	{ RS } },
+{ "mtivor35",  XSPR(31,467,531),  XSPR_MASK, PPCPMR,	{ RS } },
 { "mtibatu",   XSPR(31,467,528),  XSPRBAT_MASK, PPC,	{ SPRBAT, RS } },
 { "mtibatl",   XSPR(31,467,529),  XSPRBAT_MASK, PPC,	{ SPRBAT, RS } },
 { "mtdbatu",   XSPR(31,467,536),  XSPRBAT_MASK, PPC,	{ SPRBAT, RS } },
@@ -4101,13 +4283,15 @@
 
 { "clcs",    X(31,531), XRB_MASK,	M601,		{ RT, RA } },
 
-{ "lswx",    X(31,533),	X_MASK,		PPCCOM,		{ RT, RA, RB } },
+{ "ldbrx",   X(31,532),	X_MASK,		CELL,		{ RT, RA0, RB } },
+
+{ "lswx",    X(31,533),	X_MASK,		PPCCOM,		{ RT, RA0, RB } },
 { "lsx",     X(31,533),	X_MASK,		PWRCOM,		{ RT, RA, RB } },
 
-{ "lwbrx",   X(31,534),	X_MASK,		PPCCOM,		{ RT, RA, RB } },
+{ "lwbrx",   X(31,534),	X_MASK,		PPCCOM,		{ RT, RA0, RB } },
 { "lbrx",    X(31,534),	X_MASK,		PWRCOM,		{ RT, RA, RB } },
 
-{ "lfsx",    X(31,535),	X_MASK,		COM,		{ FRT, RA, RB } },
+{ "lfsx",    X(31,535),	X_MASK,		COM,		{ FRT, RA0, RB } },
 
 { "srw",     XRC(31,536,0), X_MASK,	PPCCOM,		{ RA, RS, RB } },
 { "sr",      XRC(31,536,0), X_MASK,	PWRCOM,		{ RA, RS, RB } },
@@ -4123,11 +4307,12 @@
 { "maskir",  XRC(31,541,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "maskir.", XRC(31,541,1), X_MASK,	M601,		{ RA, RS, RB } },
 
-{ "lwbrxe",  X(31,542),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "lwbrxe",  X(31,542),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
-{ "lfsxe",   X(31,543),	X_MASK,		BOOKE64,	{ FRT, RA, RB } },
+{ "lfsxe",   X(31,543),	X_MASK,		BOOKE64,	{ FRT, RA0, RB } },
 
 { "bbelr",   X(31,550),	X_MASK,		PPCBRLK,	{ 0 }},
+
 { "tlbsync", X(31,566),	0xffffffff,	PPC,		{ 0 } },
 
 { "lfsux",   X(31,567),	X_MASK,		COM,		{ FRT, RAS, RB } },
@@ -4136,8 +4321,8 @@
 
 { "mfsr",    X(31,595),	XRB_MASK|(1<<20), COM32,	{ RT, SR } },
 
-{ "lswi",    X(31,597),	X_MASK,		PPCCOM,		{ RT, RA, NB } },
-{ "lsi",     X(31,597),	X_MASK,		PWRCOM,		{ RT, RA, NB } },
+{ "lswi",    X(31,597),	X_MASK,		PPCCOM,		{ RT, RA0, NB } },
+{ "lsi",     X(31,597),	X_MASK,		PWRCOM,		{ RT, RA0, NB } },
 
 { "lwsync",  XSYNC(31,598,1), 0xffffffff, PPC,		{ 0 } },
 { "ptesync", XSYNC(31,598,2), 0xffffffff, PPC64,	{ 0 } },
@@ -4145,9 +4330,11 @@
 { "sync",    X(31,598), XSYNC_MASK,	PPCCOM,		{ LS } },
 { "dcs",     X(31,598), 0xffffffff,	PWRCOM,		{ 0 } },
 
-{ "lfdx",    X(31,599), X_MASK,		COM,		{ FRT, RA, RB } },
+{ "lfdx",    X(31,599), X_MASK,		COM,		{ FRT, RA0, RB } },
 
-{ "lfdxe",   X(31,607), X_MASK,		BOOKE64,	{ FRT, RA, RB } },
+{ "lfdxe",   X(31,607), X_MASK,		BOOKE64,	{ FRT, RA0, RB } },
+
+{ "mffgpr",  XRC(31,607,0), XRA_MASK,	POWER6,		{ FRT, RB } },
 
 { "mfsri",   X(31,627), X_MASK,		PWRCOM,		{ RT, RA, RB } },
 
@@ -4159,13 +4346,15 @@
 
 { "mfsrin",  X(31,659), XRA_MASK,	PPC32,		{ RT, RB } },
 
-{ "stswx",   X(31,661), X_MASK,		PPCCOM,		{ RS, RA, RB } },
-{ "stsx",    X(31,661), X_MASK,		PWRCOM,		{ RS, RA, RB } },
+{ "stdbrx",  X(31,660), X_MASK,		CELL,		{ RS, RA0, RB } },
 
-{ "stwbrx",  X(31,662), X_MASK,		PPCCOM,		{ RS, RA, RB } },
-{ "stbrx",   X(31,662), X_MASK,		PWRCOM,		{ RS, RA, RB } },
+{ "stswx",   X(31,661), X_MASK,		PPCCOM,		{ RS, RA0, RB } },
+{ "stsx",    X(31,661), X_MASK,		PWRCOM,		{ RS, RA0, RB } },
 
-{ "stfsx",   X(31,663), X_MASK,		COM,		{ FRS, RA, RB } },
+{ "stwbrx",  X(31,662), X_MASK,		PPCCOM,		{ RS, RA0, RB } },
+{ "stbrx",   X(31,662), X_MASK,		PWRCOM,		{ RS, RA0, RB } },
+
+{ "stfsx",   X(31,663), X_MASK,		COM,		{ FRS, RA0, RB } },
 
 { "srq",     XRC(31,664,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "srq.",    XRC(31,664,1), X_MASK,	M601,		{ RA, RS, RB } },
@@ -4173,9 +4362,9 @@
 { "sre",     XRC(31,665,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "sre.",    XRC(31,665,1), X_MASK,	M601,		{ RA, RS, RB } },
 
-{ "stwbrxe", X(31,670), X_MASK,		BOOKE64,	{ RS, RA, RB } },
+{ "stwbrxe", X(31,670), X_MASK,		BOOKE64,	{ RS, RA0, RB } },
 
-{ "stfsxe",  X(31,671), X_MASK,		BOOKE64,	{ FRS, RA, RB } },
+{ "stfsxe",  X(31,671), X_MASK,		BOOKE64,	{ FRS, RA0, RB } },
 
 { "stfsux",  X(31,695),	X_MASK,		COM,		{ FRS, RAS, RB } },
 
@@ -4184,10 +4373,10 @@
 
 { "stfsuxe", X(31,703),	X_MASK,		BOOKE64,	{ FRS, RAS, RB } },
 
-{ "stswi",   X(31,725),	X_MASK,		PPCCOM,		{ RS, RA, NB } },
-{ "stsi",    X(31,725),	X_MASK,		PWRCOM,		{ RS, RA, NB } },
+{ "stswi",   X(31,725),	X_MASK,		PPCCOM,		{ RS, RA0, NB } },
+{ "stsi",    X(31,725),	X_MASK,		PWRCOM,		{ RS, RA0, NB } },
 
-{ "stfdx",   X(31,727),	X_MASK,		COM,		{ FRS, RA, RB } },
+{ "stfdx",   X(31,727),	X_MASK,		COM,		{ FRS, RA0, RB } },
 
 { "srlq",    XRC(31,728,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "srlq.",   XRC(31,728,1), X_MASK,	M601,		{ RA, RS, RB } },
@@ -4195,7 +4384,9 @@
 { "sreq",    XRC(31,729,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "sreq.",   XRC(31,729,1), X_MASK,	M601,		{ RA, RS, RB } },
 
-{ "stfdxe",  X(31,735),	X_MASK,		BOOKE64,	{ FRS, RA, RB } },
+{ "stfdxe",  X(31,735),	X_MASK,		BOOKE64,	{ FRS, RA0, RB } },
+
+{ "mftgpr",  XRC(31,735,0), XRA_MASK,	POWER6,		{ RT, FRB } },
 
 { "dcba",    X(31,758),	XRT_MASK,	PPC405 | BOOKE,	{ RA, RB } },
 
@@ -4211,7 +4402,9 @@
 { "tlbivax", X(31,786),	XRT_MASK,	BOOKE,		{ RA, RB } },
 { "tlbivaxe",X(31,787),	XRT_MASK,	BOOKE64,	{ RA, RB } },
 
-{ "lhbrx",   X(31,790),	X_MASK,		COM,		{ RT, RA, RB } },
+{ "lwzcix",  X(31,789),	X_MASK,		POWER6,		{ RT, RA0, RB } },
+
+{ "lhbrx",   X(31,790),	X_MASK,		COM,		{ RT, RA0, RB } },
 
 { "sraw",    XRC(31,792,0), X_MASK,	PPCCOM,		{ RA, RS, RB } },
 { "sra",     XRC(31,792,0), X_MASK,	PWRCOM,		{ RA, RS, RB } },
@@ -4221,13 +4414,15 @@
 { "srad",    XRC(31,794,0), X_MASK,	PPC64,		{ RA, RS, RB } },
 { "srad.",   XRC(31,794,1), X_MASK,	PPC64,		{ RA, RS, RB } },
 
-{ "lhbrxe",  X(31,798),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "lhbrxe",  X(31,798),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
-{ "ldxe",    X(31,799),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
-{ "lduxe",   X(31,831),	X_MASK,		BOOKE64,	{ RT, RA, RB } },
+{ "ldxe",    X(31,799),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
+{ "lduxe",   X(31,831),	X_MASK,		BOOKE64,	{ RT, RA0, RB } },
 
 { "rac",     X(31,818),	X_MASK,		PWRCOM,		{ RT, RA, RB } },
 
+{ "lhzcix",  X(31,821),	X_MASK,		POWER6,		{ RT, RA0, RB } },
+
 { "dss",     XDSS(31,822,0), XDSS_MASK,	PPCVEC,		{ STRM } },
 { "dssall",  XDSS(31,822,1), XDSS_MASK,	PPCVEC,		{ 0 } },
 
@@ -4238,19 +4433,25 @@
 
 { "slbmfev", X(31,851), XRA_MASK,	PPC64,		{ RT, RB } },
 
+{ "lbzcix",  X(31,853),	X_MASK,		POWER6,		{ RT, RA0, RB } },
+
 { "mbar",    X(31,854),	X_MASK,		BOOKE,		{ MO } },
 { "eieio",   X(31,854),	0xffffffff,	PPC,		{ 0 } },
 
-{ "tlbsx",   XRC(31,914,0), X_MASK,	BOOKE,		{ RA, RB } },
-{ "tlbsx",   XRC(31,914,0), X_MASK, 	PPC403,		{ RT, RA, RB } },
-{ "tlbsx.",  XRC(31,914,1), X_MASK,	BOOKE,		{ RA, RB } },
-{ "tlbsx.",  XRC(31,914,1), X_MASK, 	PPC403,		{ RT, RA, RB } },
+{ "lfiwax",  X(31,855),	X_MASK,		POWER6,		{ FRT, RA0, RB } },
+
+{ "ldcix",   X(31,885),	X_MASK,		POWER6,		{ RT, RA0, RB } },
+
+{ "tlbsx",   XRC(31,914,0), X_MASK, 	PPC403|BOOKE,	{ RTO, RA, RB } },
+{ "tlbsx.",  XRC(31,914,1), X_MASK, 	PPC403|BOOKE,	{ RTO, RA, RB } },
 { "tlbsxe",  XRC(31,915,0), X_MASK,	BOOKE64,	{ RA, RB } },
 { "tlbsxe.", XRC(31,915,1), X_MASK,	BOOKE64,	{ RA, RB } },
 
 { "slbmfee", X(31,915), XRA_MASK,	PPC64,		{ RT, RB } },
 
-{ "sthbrx",  X(31,918),	X_MASK,		COM,		{ RS, RA, RB } },
+{ "stwcix",  X(31,917),	X_MASK,		POWER6,		{ RS, RA0, RB } },
+
+{ "sthbrx",  X(31,918),	X_MASK,		COM,		{ RS, RA0, RB } },
 
 { "sraq",    XRC(31,920,0), X_MASK,	M601,		{ RA, RS, RB } },
 { "sraq.",   XRC(31,920,1), X_MASK,	M601,		{ RA, RS, RB } },
@@ -4263,14 +4464,15 @@
 { "extsh.",  XRC(31,922,1), XRB_MASK,	PPCCOM,		{ RA, RS } },
 { "exts.",   XRC(31,922,1), XRB_MASK,	PWRCOM,		{ RA, RS } },
 
-{ "sthbrxe", X(31,926),	X_MASK,		BOOKE64,	{ RS, RA, RB } },
+{ "sthbrxe", X(31,926),	X_MASK,		BOOKE64,	{ RS, RA0, RB } },
 
-{ "stdxe",   X(31,927), X_MASK,		BOOKE64,	{ RS, RA, RB } },
+{ "stdxe",   X(31,927), X_MASK,		BOOKE64,	{ RS, RA0, RB } },
 
 { "tlbrehi", XTLB(31,946,0), XTLB_MASK,	PPC403,		{ RT, RA } },
 { "tlbrelo", XTLB(31,946,1), XTLB_MASK,	PPC403,		{ RT, RA } },
-{ "tlbre",   X(31,946),	X_MASK,		BOOKE,		{ 0 } },
-{ "tlbre",   X(31,946),	X_MASK,		PPC403,		{ RS, RA, SH } },
+{ "tlbre",   X(31,946),	X_MASK,		PPC403|BOOKE,	{ RSO, RAOPT, SHO } },
+
+{ "sthcix",  X(31,949),	X_MASK,		POWER6,		{ RS, RA0, RB } },
 
 { "sraiq",   XRC(31,952,0), X_MASK,	M601,		{ RA, RS, SH } },
 { "sraiq.",  XRC(31,952,1), X_MASK,	M601,		{ RA, RS, SH } },
@@ -4284,13 +4486,14 @@
 
 { "tlbwehi", XTLB(31,978,0), XTLB_MASK,	PPC403,		{ RT, RA } },
 { "tlbwelo", XTLB(31,978,1), XTLB_MASK,	PPC403,		{ RT, RA } },
-{ "tlbwe",   X(31,978),	X_MASK,		BOOKE,		{ 0 } },
-{ "tlbwe",   X(31,978),	X_MASK,		PPC403,		{ RS, RA, SH } },
+{ "tlbwe",   X(31,978),	X_MASK,		PPC403|BOOKE,	{ RSO, RAOPT, SHO } },
 { "tlbld",   X(31,978),	XRTRA_MASK,	PPC,		{ RB } },
 
+{ "stbcix",  X(31,981),	X_MASK,		POWER6,		{ RS, RA0, RB } },
+
 { "icbi",    X(31,982),	XRT_MASK,	PPC,		{ RA, RB } },
 
-{ "stfiwx",  X(31,983),	X_MASK,		PPC,		{ FRS, RA, RB } },
+{ "stfiwx",  X(31,983),	X_MASK,		PPC,		{ FRS, RA0, RB } },
 
 { "extsw",   XRC(31,986,0), XRB_MASK,	PPC64 | BOOKE64,{ RA, RS } },
 { "extsw.",  XRC(31,986,1), XRB_MASK,	PPC64,		{ RA, RS } },
@@ -4298,10 +4501,13 @@
 { "icread",  X(31,998),	XRT_MASK,	PPC403|PPC440,	{ RA, RB } },
 
 { "icbie",   X(31,990),	XRT_MASK,	BOOKE64,	{ RA, RB } },
-{ "stfiwxe", X(31,991),	X_MASK,		BOOKE64,	{ FRS, RA, RB } },
+{ "stfiwxe", X(31,991),	X_MASK,		BOOKE64,	{ FRS, RA0, RB } },
 
 { "tlbli",   X(31,1010), XRTRA_MASK,	PPC,		{ RB } },
 
+{ "stdcix",  X(31,1013), X_MASK,	POWER6,		{ RS, RA0, RB } },
+
+{ "dcbzl",   XOPL(31,1014,1), XRT_MASK,POWER4,            { RA, RB } },
 { "dcbz",    X(31,1014), XRT_MASK,	PPC,		{ RA, RB } },
 { "dclz",    X(31,1014), XRT_MASK,	PPC,		{ RA, RB } },
 
@@ -4320,86 +4526,104 @@
 { "stvx",    X(31, 231), X_MASK,	PPCVEC,		{ VS, RA, RB } },
 { "stvxl",   X(31, 487), X_MASK,	PPCVEC,		{ VS, RA, RB } },
 
-{ "lwz",     OP(32),	OP_MASK,	PPCCOM,		{ RT, D, RA } },
-{ "l",	     OP(32),	OP_MASK,	PWRCOM,		{ RT, D, RA } },
+/* New load/store left/right index vector instructions that are in the Cell only.  */
+{ "lvlx",    X(31, 519), X_MASK,	CELL,		{ VD, RA0, RB } },
+{ "lvlxl",   X(31, 775), X_MASK,	CELL,		{ VD, RA0, RB } },
+{ "lvrx",    X(31, 551), X_MASK,	CELL,		{ VD, RA0, RB } },
+{ "lvrxl",   X(31, 807), X_MASK,	CELL,		{ VD, RA0, RB } },
+{ "stvlx",   X(31, 647), X_MASK,	CELL,		{ VS, RA0, RB } },
+{ "stvlxl",  X(31, 903), X_MASK,	CELL,		{ VS, RA0, RB } },
+{ "stvrx",   X(31, 679), X_MASK,	CELL,		{ VS, RA0, RB } },
+{ "stvrxl",  X(31, 935), X_MASK,	CELL,		{ VS, RA0, RB } },
+
+{ "lwz",     OP(32),	OP_MASK,	PPCCOM,		{ RT, D, RA0 } },
+{ "l",	     OP(32),	OP_MASK,	PWRCOM,		{ RT, D, RA0 } },
 
 { "lwzu",    OP(33),	OP_MASK,	PPCCOM,		{ RT, D, RAL } },
-{ "lu",      OP(33),	OP_MASK,	PWRCOM,		{ RT, D, RA } },
+{ "lu",      OP(33),	OP_MASK,	PWRCOM,		{ RT, D, RA0 } },
 
-{ "lbz",     OP(34),	OP_MASK,	COM,		{ RT, D, RA } },
+{ "lbz",     OP(34),	OP_MASK,	COM,		{ RT, D, RA0 } },
 
 { "lbzu",    OP(35),	OP_MASK,	COM,		{ RT, D, RAL } },
 
-{ "stw",     OP(36),	OP_MASK,	PPCCOM,		{ RS, D, RA } },
-{ "st",      OP(36),	OP_MASK,	PWRCOM,		{ RS, D, RA } },
+{ "stw",     OP(36),	OP_MASK,	PPCCOM,		{ RS, D, RA0 } },
+{ "st",      OP(36),	OP_MASK,	PWRCOM,		{ RS, D, RA0 } },
 
 { "stwu",    OP(37),	OP_MASK,	PPCCOM,		{ RS, D, RAS } },
-{ "stu",     OP(37),	OP_MASK,	PWRCOM,		{ RS, D, RA } },
+{ "stu",     OP(37),	OP_MASK,	PWRCOM,		{ RS, D, RA0 } },
 
-{ "stb",     OP(38),	OP_MASK,	COM,		{ RS, D, RA } },
+{ "stb",     OP(38),	OP_MASK,	COM,		{ RS, D, RA0 } },
 
 { "stbu",    OP(39),	OP_MASK,	COM,		{ RS, D, RAS } },
 
-{ "lhz",     OP(40),	OP_MASK,	COM,		{ RT, D, RA } },
+{ "lhz",     OP(40),	OP_MASK,	COM,		{ RT, D, RA0 } },
 
 { "lhzu",    OP(41),	OP_MASK,	COM,		{ RT, D, RAL } },
 
-{ "lha",     OP(42),	OP_MASK,	COM,		{ RT, D, RA } },
+{ "lha",     OP(42),	OP_MASK,	COM,		{ RT, D, RA0 } },
 
 { "lhau",    OP(43),	OP_MASK,	COM,		{ RT, D, RAL } },
 
-{ "sth",     OP(44),	OP_MASK,	COM,		{ RS, D, RA } },
+{ "sth",     OP(44),	OP_MASK,	COM,		{ RS, D, RA0 } },
 
 { "sthu",    OP(45),	OP_MASK,	COM,		{ RS, D, RAS } },
 
 { "lmw",     OP(46),	OP_MASK,	PPCCOM,		{ RT, D, RAM } },
-{ "lm",      OP(46),	OP_MASK,	PWRCOM,		{ RT, D, RA } },
+{ "lm",      OP(46),	OP_MASK,	PWRCOM,		{ RT, D, RA0 } },
 
-{ "stmw",    OP(47),	OP_MASK,	PPCCOM,		{ RS, D, RA } },
-{ "stm",     OP(47),	OP_MASK,	PWRCOM,		{ RS, D, RA } },
+{ "stmw",    OP(47),	OP_MASK,	PPCCOM,		{ RS, D, RA0 } },
+{ "stm",     OP(47),	OP_MASK,	PWRCOM,		{ RS, D, RA0 } },
 
-{ "lfs",     OP(48),	OP_MASK,	COM,		{ FRT, D, RA } },
+{ "lfs",     OP(48),	OP_MASK,	COM,		{ FRT, D, RA0 } },
 
 { "lfsu",    OP(49),	OP_MASK,	COM,		{ FRT, D, RAS } },
 
-{ "lfd",     OP(50),	OP_MASK,	COM,		{ FRT, D, RA } },
+{ "lfd",     OP(50),	OP_MASK,	COM,		{ FRT, D, RA0 } },
 
 { "lfdu",    OP(51),	OP_MASK,	COM,		{ FRT, D, RAS } },
 
-{ "stfs",    OP(52),	OP_MASK,	COM,		{ FRS, D, RA } },
+{ "stfs",    OP(52),	OP_MASK,	COM,		{ FRS, D, RA0 } },
 
 { "stfsu",   OP(53),	OP_MASK,	COM,		{ FRS, D, RAS } },
 
-{ "stfd",    OP(54),	OP_MASK,	COM,		{ FRS, D, RA } },
+{ "stfd",    OP(54),	OP_MASK,	COM,		{ FRS, D, RA0 } },
 
 { "stfdu",   OP(55),	OP_MASK,	COM,		{ FRS, D, RAS } },
 
 { "lq",      OP(56),	OP_MASK,	POWER4,		{ RTQ, DQ, RAQ } },
 
-{ "lfq",     OP(56),	OP_MASK,	POWER2,		{ FRT, D, RA } },
+{ "lfq",     OP(56),	OP_MASK,	POWER2,		{ FRT, D, RA0 } },
 
-{ "lfqu",    OP(57),	OP_MASK,	POWER2,		{ FRT, D, RA } },
+{ "lfqu",    OP(57),	OP_MASK,	POWER2,		{ FRT, D, RA0 } },
 
-{ "lbze",    DEO(58,0), DE_MASK,	BOOKE64,	{ RT, DE, RA } },
+{ "lfdp",    OP(57),	OP_MASK,	POWER6,		{ FRT, D, RA0 } },
+
+{ "lbze",    DEO(58,0), DE_MASK,	BOOKE64,	{ RT, DE, RA0 } },
 { "lbzue",   DEO(58,1), DE_MASK,	BOOKE64,	{ RT, DE, RAL } },
-{ "lhze",    DEO(58,2), DE_MASK,	BOOKE64,	{ RT, DE, RA } },
+{ "lhze",    DEO(58,2), DE_MASK,	BOOKE64,	{ RT, DE, RA0 } },
 { "lhzue",   DEO(58,3), DE_MASK,	BOOKE64,	{ RT, DE, RAL } },
-{ "lhae",    DEO(58,4), DE_MASK,	BOOKE64,	{ RT, DE, RA } },
+{ "lhae",    DEO(58,4), DE_MASK,	BOOKE64,	{ RT, DE, RA0 } },
 { "lhaue",   DEO(58,5), DE_MASK,	BOOKE64,	{ RT, DE, RAL } },
-{ "lwze",    DEO(58,6), DE_MASK,	BOOKE64,	{ RT, DE, RA } },
+{ "lwze",    DEO(58,6), DE_MASK,	BOOKE64,	{ RT, DE, RA0 } },
 { "lwzue",   DEO(58,7), DE_MASK,	BOOKE64,	{ RT, DE, RAL } },
-{ "stbe",    DEO(58,8), DE_MASK,	BOOKE64,	{ RS, DE, RA } },
+{ "stbe",    DEO(58,8), DE_MASK,	BOOKE64,	{ RS, DE, RA0 } },
 { "stbue",   DEO(58,9), DE_MASK,	BOOKE64,	{ RS, DE, RAS } },
-{ "sthe",    DEO(58,10), DE_MASK,	BOOKE64,	{ RS, DE, RA } },
+{ "sthe",    DEO(58,10), DE_MASK,	BOOKE64,	{ RS, DE, RA0 } },
 { "sthue",   DEO(58,11), DE_MASK,	BOOKE64,	{ RS, DE, RAS } },
-{ "stwe",    DEO(58,14), DE_MASK,	BOOKE64,	{ RS, DE, RA } },
+{ "stwe",    DEO(58,14), DE_MASK,	BOOKE64,	{ RS, DE, RA0 } },
 { "stwue",   DEO(58,15), DE_MASK,	BOOKE64,	{ RS, DE, RAS } },
 
-{ "ld",      DSO(58,0),	DS_MASK,	PPC64,		{ RT, DS, RA } },
+{ "ld",      DSO(58,0),	DS_MASK,	PPC64,		{ RT, DS, RA0 } },
 
 { "ldu",     DSO(58,1), DS_MASK,	PPC64,		{ RT, DS, RAL } },
 
-{ "lwa",     DSO(58,2), DS_MASK,	PPC64,		{ RT, DS, RA } },
+{ "lwa",     DSO(58,2), DS_MASK,	PPC64,		{ RT, DS, RA0 } },
+
+{ "dadd",    XRC(59,2,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "dadd.",   XRC(59,2,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
+{ "dqua",    ZRC(59,3,0), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+{ "dqua.",   ZRC(59,3,1), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
 
 { "fdivs",   A(59,18,0), AFRC_MASK,	PPC,		{ FRT, FRA, FRB } },
 { "fdivs.",  A(59,18,1), AFRC_MASK,	PPC,		{ FRT, FRA, FRB } },
@@ -4413,12 +4637,15 @@
 { "fsqrts",  A(59,22,0), AFRAFRC_MASK,	PPC,		{ FRT, FRB } },
 { "fsqrts.", A(59,22,1), AFRAFRC_MASK,	PPC,		{ FRT, FRB } },
 
-{ "fres",    A(59,24,0), AFRAFRC_MASK,	PPC,		{ FRT, FRB } },
-{ "fres.",   A(59,24,1), AFRAFRC_MASK,	PPC,		{ FRT, FRB } },
+{ "fres",    A(59,24,0), AFRALFRC_MASK,	PPC,		{ FRT, FRB, A_L } },
+{ "fres.",   A(59,24,1), AFRALFRC_MASK,	PPC,		{ FRT, FRB, A_L } },
 
 { "fmuls",   A(59,25,0), AFRB_MASK,	PPC,		{ FRT, FRA, FRC } },
 { "fmuls.",  A(59,25,1), AFRB_MASK,	PPC,		{ FRT, FRA, FRC } },
 
+{ "frsqrtes", A(59,26,0), AFRALFRC_MASK,POWER5,		{ FRT, FRB, A_L } },
+{ "frsqrtes.",A(59,26,1), AFRALFRC_MASK,POWER5,		{ FRT, FRB, A_L } },
+
 { "fmsubs",  A(59,28,0), A_MASK,	PPC,		{ FRT,FRA,FRC,FRB } },
 { "fmsubs.", A(59,28,1), A_MASK,	PPC,		{ FRT,FRA,FRC,FRB } },
 
@@ -4431,31 +4658,103 @@
 { "fnmadds", A(59,31,0), A_MASK,	PPC,		{ FRT,FRA,FRC,FRB } },
 { "fnmadds.",A(59,31,1), A_MASK,	PPC,		{ FRT,FRA,FRC,FRB } },
 
+{ "dmul",    XRC(59,34,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "dmul.",   XRC(59,34,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
+{ "drrnd",   ZRC(59,35,0), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+{ "drrnd.",  ZRC(59,35,1), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+
+{ "dscli",   ZRC(59,66,0), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+{ "dscli.",  ZRC(59,66,1), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+
+{ "dquai",   ZRC(59,67,0), Z_MASK,	POWER6,		{ TE,  FRT, FRB, RMC } },
+{ "dquai.",  ZRC(59,67,1), Z_MASK,	POWER6,		{ TE,  FRT, FRB, RMC } },
+
+{ "dscri",   ZRC(59,98,0), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+{ "dscri.",  ZRC(59,98,1), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+
+{ "drintx",  ZRC(59,99,0), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+{ "drintx.", ZRC(59,99,1), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+
+{ "dcmpo",   X(59,130),	   X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+
+{ "dtstex",  X(59,162),	   X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+{ "dtstdc",  Z(59,194),	   Z_MASK,	POWER6,		{ BF,  FRA, DCM } },
+{ "dtstdg",  Z(59,226),	   Z_MASK,	POWER6,		{ BF,  FRA, DGM } },
+
+{ "drintn",  ZRC(59,227,0), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+{ "drintn.", ZRC(59,227,1), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+
+{ "dctdp",   XRC(59,258,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dctdp.",  XRC(59,258,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "dctfix",  XRC(59,290,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dctfix.", XRC(59,290,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "ddedpd",  XRC(59,322,0), X_MASK,	POWER6,		{ SP, FRT, FRB } }, 
+{ "ddedpd.", XRC(59,322,1), X_MASK,	POWER6,		{ SP, FRT, FRB } }, 
+
+{ "dxex",    XRC(59,354,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dxex.",   XRC(59,354,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "dsub",    XRC(59,514,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "dsub.",   XRC(59,514,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
+{ "ddiv",    XRC(59,546,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "ddiv.",   XRC(59,546,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
+{ "dcmpu",   X(59,642),	    X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+
+{ "dtstsf",  X(59,674),	   X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+
+{ "drsp",    XRC(59,770,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "drsp.",   XRC(59,770,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "dcffix",  XRC(59,802,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dcffix.", XRC(59,802,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "denbcd",  XRC(59,834,0), X_MASK,	POWER6,		{ S, FRT, FRB } },
+{ "denbcd.", XRC(59,834,1), X_MASK,	POWER6,		{ S, FRT, FRB } },
+
+{ "diex",    XRC(59,866,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "diex.",   XRC(59,866,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
 { "stfq",    OP(60),	OP_MASK,	POWER2,		{ FRS, D, RA } },
 
 { "stfqu",   OP(61),	OP_MASK,	POWER2,		{ FRS, D, RA } },
 
-{ "lde",     DEO(62,0), DE_MASK,	BOOKE64,	{ RT, DES, RA } },
-{ "ldue",    DEO(62,1), DE_MASK,	BOOKE64,	{ RT, DES, RA } },
-{ "lfse",    DEO(62,4), DE_MASK,	BOOKE64,	{ FRT, DES, RA } },
+{ "stfdp",   OP(61),	OP_MASK,	POWER6,		{ FRT, D, RA0 } },
+
+{ "lde",     DEO(62,0), DE_MASK,	BOOKE64,	{ RT, DES, RA0 } },
+{ "ldue",    DEO(62,1), DE_MASK,	BOOKE64,	{ RT, DES, RA0 } },
+{ "lfse",    DEO(62,4), DE_MASK,	BOOKE64,	{ FRT, DES, RA0 } },
 { "lfsue",   DEO(62,5), DE_MASK,	BOOKE64,	{ FRT, DES, RAS } },
-{ "lfde",    DEO(62,6), DE_MASK,	BOOKE64,	{ FRT, DES, RA } },
+{ "lfde",    DEO(62,6), DE_MASK,	BOOKE64,	{ FRT, DES, RA0 } },
 { "lfdue",   DEO(62,7), DE_MASK,	BOOKE64,	{ FRT, DES, RAS } },
-{ "stde",    DEO(62,8), DE_MASK,	BOOKE64,	{ RS, DES, RA } },
+{ "stde",    DEO(62,8), DE_MASK,	BOOKE64,	{ RS, DES, RA0 } },
 { "stdue",   DEO(62,9), DE_MASK,	BOOKE64,	{ RS, DES, RAS } },
-{ "stfse",   DEO(62,12), DE_MASK,	BOOKE64,	{ FRS, DES, RA } },
+{ "stfse",   DEO(62,12), DE_MASK,	BOOKE64,	{ FRS, DES, RA0 } },
 { "stfsue",  DEO(62,13), DE_MASK,	BOOKE64,	{ FRS, DES, RAS } },
-{ "stfde",   DEO(62,14), DE_MASK,	BOOKE64,	{ FRS, DES, RA } },
+{ "stfde",   DEO(62,14), DE_MASK,	BOOKE64,	{ FRS, DES, RA0 } },
 { "stfdue",  DEO(62,15), DE_MASK,	BOOKE64,	{ FRS, DES, RAS } },
 
-{ "std",     DSO(62,0),	DS_MASK,	PPC64,		{ RS, DS, RA } },
+{ "std",     DSO(62,0),	DS_MASK,	PPC64,		{ RS, DS, RA0 } },
 
 { "stdu",    DSO(62,1),	DS_MASK,	PPC64,		{ RS, DS, RAS } },
 
-{ "stq",     DSO(62,2),	DS_MASK,	POWER4,		{ RSQ, DS, RA } },
+{ "stq",     DSO(62,2),	DS_MASK,	POWER4,		{ RSQ, DS, RA0 } },
 
 { "fcmpu",   X(63,0),	X_MASK|(3<<21),	COM,		{ BF, FRA, FRB } },
 
+{ "daddq",   XRC(63,2,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "daddq.",  XRC(63,2,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
+{ "dquaq",   ZRC(63,3,0), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+{ "dquaq.",  ZRC(63,3,1), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+
+{ "fcpsgn",  XRC(63,8,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "fcpsgn.", XRC(63,8,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
 { "frsp",    XRC(63,12,0), XRA_MASK,	COM,		{ FRT, FRB } },
 { "frsp.",   XRC(63,12,1), XRA_MASK,	COM,		{ FRT, FRB } },
 
@@ -4490,13 +4789,16 @@
 { "fsel",    A(63,23,0), A_MASK,	PPC,		{ FRT,FRA,FRC,FRB } },
 { "fsel.",   A(63,23,1), A_MASK,	PPC,		{ FRT,FRA,FRC,FRB } },
 
+{ "fre",     A(63,24,0), AFRALFRC_MASK,	POWER5,		{ FRT, FRB, A_L } },
+{ "fre.",    A(63,24,1), AFRALFRC_MASK,	POWER5,		{ FRT, FRB, A_L } },
+
 { "fmul",    A(63,25,0), AFRB_MASK,	PPCCOM,		{ FRT, FRA, FRC } },
 { "fm",      A(63,25,0), AFRB_MASK,	PWRCOM,		{ FRT, FRA, FRC } },
 { "fmul.",   A(63,25,1), AFRB_MASK,	PPCCOM,		{ FRT, FRA, FRC } },
 { "fm.",     A(63,25,1), AFRB_MASK,	PWRCOM,		{ FRT, FRA, FRC } },
 
-{ "frsqrte", A(63,26,0), AFRAFRC_MASK,	PPC,		{ FRT, FRB } },
-{ "frsqrte.",A(63,26,1), AFRAFRC_MASK,	PPC,		{ FRT, FRB } },
+{ "frsqrte", A(63,26,0), AFRALFRC_MASK,	PPC,		{ FRT, FRB, A_L } },
+{ "frsqrte.",A(63,26,1), AFRALFRC_MASK,	PPC,		{ FRT, FRB, A_L } },
 
 { "fmsub",   A(63,28,0), A_MASK,	PPCCOM,		{ FRT,FRA,FRC,FRB } },
 { "fms",     A(63,28,0), A_MASK,	PWRCOM,		{ FRT,FRA,FRC,FRB } },
@@ -4520,6 +4822,12 @@
 
 { "fcmpo",   X(63,32),	X_MASK|(3<<21),	COM,		{ BF, FRA, FRB } },
 
+{ "dmulq",   XRC(63,34,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "dmulq.",  XRC(63,34,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
+{ "drrndq",  ZRC(63,35,0), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+{ "drrndq.", ZRC(63,35,1), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+
 { "mtfsb1",  XRC(63,38,0), XRARB_MASK,	COM,		{ BT } },
 { "mtfsb1.", XRC(63,38,1), XRARB_MASK,	COM,		{ BT } },
 
@@ -4528,36 +4836,100 @@
 
 { "mcrfs",   X(63,64),	XRB_MASK|(3<<21)|(3<<16), COM,	{ BF, BFA } },
 
+{ "dscliq",  ZRC(63,66,0), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+{ "dscliq.", ZRC(63,66,1), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+
+{ "dquaiq",  ZRC(63,67,0), Z_MASK,	POWER6,		{ TE,  FRT, FRB, RMC } },
+{ "dquaiq.", ZRC(63,67,1), Z_MASK,	POWER6,		{ FRT, FRA, FRB, RMC } },
+
 { "mtfsb0",  XRC(63,70,0), XRARB_MASK,	COM,		{ BT } },
 { "mtfsb0.", XRC(63,70,1), XRARB_MASK,	COM,		{ BT } },
 
 { "fmr",     XRC(63,72,0), XRA_MASK,	COM,		{ FRT, FRB } },
 { "fmr.",    XRC(63,72,1), XRA_MASK,	COM,		{ FRT, FRB } },
 
+{ "dscriq",  ZRC(63,98,0), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+{ "dscriq.", ZRC(63,98,1), Z_MASK,	POWER6,		{ FRT, FRA, SH16 } },
+
+{ "drintxq", ZRC(63,99,0), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+{ "drintxq.",ZRC(63,99,1), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+
+{ "dcmpoq",  X(63,130),	   X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+
 { "mtfsfi",  XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
 { "mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
 
 { "fnabs",   XRC(63,136,0), XRA_MASK,	COM,		{ FRT, FRB } },
 { "fnabs.",  XRC(63,136,1), XRA_MASK,	COM,		{ FRT, FRB } },
 
+{ "dtstexq", X(63,162),	    X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+{ "dtstdcq", Z(63,194),	    Z_MASK,	POWER6,		{ BF,  FRA, DCM } },
+{ "dtstdgq", Z(63,226),	    Z_MASK,	POWER6,		{ BF,  FRA, DGM } },
+
+{ "drintnq", ZRC(63,227,0), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+{ "drintnq.",ZRC(63,227,1), Z_MASK,	POWER6,		{ R, FRT, FRB, RMC } },
+
+{ "dctqpq",  XRC(63,258,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dctqpq.", XRC(63,258,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
 { "fabs",    XRC(63,264,0), XRA_MASK,	COM,		{ FRT, FRB } },
 { "fabs.",   XRC(63,264,1), XRA_MASK,	COM,		{ FRT, FRB } },
 
+{ "dctfixq", XRC(63,290,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dctfixq.",XRC(63,290,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "ddedpdq", XRC(63,322,0), X_MASK,	POWER6,		{ SP, FRT, FRB } },
+{ "ddedpdq.",XRC(63,322,1), X_MASK,	POWER6,		{ SP, FRT, FRB } },
+
+{ "dxexq",   XRC(63,354,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dxexq.",  XRC(63,354,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "frin",    XRC(63,392,0), XRA_MASK,	POWER5,		{ FRT, FRB } },
+{ "frin.",   XRC(63,392,1), XRA_MASK,	POWER5,		{ FRT, FRB } },
+{ "friz",    XRC(63,424,0), XRA_MASK,	POWER5,		{ FRT, FRB } },
+{ "friz.",   XRC(63,424,1), XRA_MASK,	POWER5,		{ FRT, FRB } },
+{ "frip",    XRC(63,456,0), XRA_MASK,	POWER5,		{ FRT, FRB } },
+{ "frip.",   XRC(63,456,1), XRA_MASK,	POWER5,		{ FRT, FRB } },
+{ "frim",    XRC(63,488,0), XRA_MASK,	POWER5,		{ FRT, FRB } },
+{ "frim.",   XRC(63,488,1), XRA_MASK,	POWER5,		{ FRT, FRB } },
+
+{ "dsubq",   XRC(63,514,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "dsubq.",  XRC(63,514,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
+{ "ddivq",   XRC(63,546,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "ddivq.",  XRC(63,546,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
 { "mffs",    XRC(63,583,0), XRARB_MASK,	COM,		{ FRT } },
 { "mffs.",   XRC(63,583,1), XRARB_MASK,	COM,		{ FRT } },
 
+{ "dcmpuq",  X(63,642),	    X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+
+{ "dtstsfq", X(63,674),	    X_MASK,	POWER6,		{ BF,  FRA, FRB } },
+
 { "mtfsf",   XFL(63,711,0), XFL_MASK,	COM,		{ FLM, FRB } },
 { "mtfsf.",  XFL(63,711,1), XFL_MASK,	COM,		{ FLM, FRB } },
 
+{ "drdpq",   XRC(63,770,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "drdpq.",  XRC(63,770,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
+{ "dcffixq", XRC(63,802,0), X_MASK,	POWER6,		{ FRT, FRB } },
+{ "dcffixq.",XRC(63,802,1), X_MASK,	POWER6,		{ FRT, FRB } },
+
 { "fctid",   XRC(63,814,0), XRA_MASK,	PPC64,		{ FRT, FRB } },
 { "fctid.",  XRC(63,814,1), XRA_MASK,	PPC64,		{ FRT, FRB } },
 
 { "fctidz",  XRC(63,815,0), XRA_MASK,	PPC64,		{ FRT, FRB } },
 { "fctidz.", XRC(63,815,1), XRA_MASK,	PPC64,		{ FRT, FRB } },
 
+{ "denbcdq", XRC(63,834,0), X_MASK,	POWER6,		{ S, FRT, FRB } },
+{ "denbcdq.",XRC(63,834,1), X_MASK,	POWER6,		{ S, FRT, FRB } },
+
 { "fcfid",   XRC(63,846,0), XRA_MASK,	PPC64,		{ FRT, FRB } },
 { "fcfid.",  XRC(63,846,1), XRA_MASK,	PPC64,		{ FRT, FRB } },
 
+{ "diexq",   XRC(63,866,0), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+{ "diexq.",  XRC(63,866,1), X_MASK,	POWER6,		{ FRT, FRA, FRB } },
+
 };
 
 const int powerpc_num_opcodes =
diff --git a/arch/powerpc/xmon/ppc.h b/arch/powerpc/xmon/ppc.h
index 342237e..110df96 100644
--- a/arch/powerpc/xmon/ppc.h
+++ b/arch/powerpc/xmon/ppc.h
@@ -1,5 +1,5 @@
 /* ppc.h -- Header file for PowerPC opcode table
-   Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003
+   Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
    Free Software Foundation, Inc.
    Written by Ian Lance Taylor, Cygnus Support
 
@@ -17,7 +17,7 @@
 
 You should have received a copy of the GNU General Public License
 along with this file; see the file COPYING.  If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
 
 #ifndef PPC_H
 #define PPC_H
@@ -134,6 +134,18 @@
 /* Opcode is supported by machine check APU.  */
 #define PPC_OPCODE_RFMCI	  0x800000
 
+/* Opcode is only supported by Power5 architecture.  */
+#define PPC_OPCODE_POWER5	 0x1000000
+
+/* Opcode is supported by PowerPC e300 family.  */
+#define PPC_OPCODE_E300          0x2000000
+
+/* Opcode is only supported by Power6 architecture.  */
+#define PPC_OPCODE_POWER6	 0x4000000
+
+/* Opcode is only supported by PowerPC Cell family.  */
+#define PPC_OPCODE_CELL		 0x8000000
+
 /* A macro to extract the major opcode from an instruction.  */
 #define PPC_OP(i) (((i) >> 26) & 0x3f)
 
@@ -233,25 +245,28 @@
    register names with a leading 'r'.  */
 #define PPC_OPERAND_GPR (040)
 
+/* Like PPC_OPERAND_GPR, but don't print a leading 'r' for r0.  */
+#define PPC_OPERAND_GPR_0 (0100)
+
 /* This operand names a floating point register.  The disassembler
    prints these with a leading 'f'.  */
-#define PPC_OPERAND_FPR (0100)
+#define PPC_OPERAND_FPR (0200)
 
 /* This operand is a relative branch displacement.  The disassembler
    prints these symbolically if possible.  */
-#define PPC_OPERAND_RELATIVE (0200)
+#define PPC_OPERAND_RELATIVE (0400)
 
 /* This operand is an absolute branch address.  The disassembler
    prints these symbolically if possible.  */
-#define PPC_OPERAND_ABSOLUTE (0400)
+#define PPC_OPERAND_ABSOLUTE (01000)
 
 /* This operand is optional, and is zero if omitted.  This is used for
-   the optional BF and L fields in the comparison instructions.  The
+   example, in the optional BF field in the comparison instructions.  The
    assembler must count the number of operands remaining on the line,
    and the number of operands remaining for the opcode, and decide
    whether this operand is present or not.  The disassembler should
    print this operand out only if it is not zero.  */
-#define PPC_OPERAND_OPTIONAL (01000)
+#define PPC_OPERAND_OPTIONAL (02000)
 
 /* This flag is only used with PPC_OPERAND_OPTIONAL.  If this operand
    is omitted, then for the next operand use this operand value plus
@@ -259,24 +274,24 @@
    hack is needed because the Power rotate instructions can take
    either 4 or 5 operands.  The disassembler should print this operand
    out regardless of the PPC_OPERAND_OPTIONAL field.  */
-#define PPC_OPERAND_NEXT (02000)
+#define PPC_OPERAND_NEXT (04000)
 
 /* This operand should be regarded as a negative number for the
    purposes of overflow checking (i.e., the normal most negative
    number is disallowed and one more than the normal most positive
    number is allowed).  This flag will only be set for a signed
    operand.  */
-#define PPC_OPERAND_NEGATIVE (04000)
+#define PPC_OPERAND_NEGATIVE (010000)
 
 /* This operand names a vector unit register.  The disassembler
    prints these with a leading 'v'.  */
-#define PPC_OPERAND_VR (010000)
+#define PPC_OPERAND_VR (020000)
 
 /* This operand is for the DS field in a DS form instruction.  */
-#define PPC_OPERAND_DS (020000)
+#define PPC_OPERAND_DS (040000)
 
 /* This operand is for the DQ field in a DQ form instruction.  */
-#define PPC_OPERAND_DQ (040000)
+#define PPC_OPERAND_DQ (0100000)
 
 /* The POWER and PowerPC assemblers use a few macros.  We keep them
    with the operands table for simplicity.  The macro table is an
diff --git a/arch/powerpc/xmon/spu-dis.c b/arch/powerpc/xmon/spu-dis.c
new file mode 100644
index 0000000..ee929c6
--- /dev/null
+++ b/arch/powerpc/xmon/spu-dis.c
@@ -0,0 +1,248 @@
+/* Disassemble SPU instructions
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of GDB, GAS, and the GNU binutils.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License along
+   with this program; if not, write to the Free Software Foundation, Inc.,
+   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+#include <linux/string.h>
+#include "nonstdio.h"
+#include "ansidecl.h"
+#include "spu.h"
+#include "dis-asm.h"
+
+/* This file provides a disassembler function which uses
+   the disassembler interface defined in dis-asm.h.   */
+
+extern const struct spu_opcode spu_opcodes[];
+extern const int spu_num_opcodes;
+
+#define SPU_DISASM_TBL_SIZE (1 << 11)
+static const struct spu_opcode *spu_disassemble_table[SPU_DISASM_TBL_SIZE];
+
+static void
+init_spu_disassemble (void)
+{
+  int i;
+
+  /* If two instructions have the same opcode then we prefer the first
+   * one.  In most cases it is just an alternate mnemonic. */
+  for (i = 0; i < spu_num_opcodes; i++)
+    {
+      int o = spu_opcodes[i].opcode;
+      if (o >= SPU_DISASM_TBL_SIZE)
+	continue; /* abort (); */
+      if (spu_disassemble_table[o] == 0)
+	spu_disassemble_table[o] = &spu_opcodes[i];
+    }
+}
+
+/* Determine the instruction from the 10 least significant bits. */
+static const struct spu_opcode *
+get_index_for_opcode (unsigned int insn)
+{
+  const struct spu_opcode *index;
+  unsigned int opcode = insn >> (32-11);
+
+  /* Init the table.  This assumes that element 0/opcode 0 (currently
+   * NOP) is always used */
+  if (spu_disassemble_table[0] == 0)
+    init_spu_disassemble ();
+
+  if ((index = spu_disassemble_table[opcode & 0x780]) != 0
+      && index->insn_type == RRR)
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7f0]) != 0
+      && (index->insn_type == RI18 || index->insn_type == LBT))
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7f8]) != 0
+      && index->insn_type == RI10)
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7fc]) != 0
+      && (index->insn_type == RI16))
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7fe]) != 0
+      && (index->insn_type == RI8))
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7ff]) != 0)
+    return index;
+
+  return 0;
+}
+
+/* Print a Spu instruction.  */
+
+int
+print_insn_spu (unsigned long insn, unsigned long memaddr)
+{
+  int value;
+  int hex_value;
+  const struct spu_opcode *index;
+  enum spu_insns tag;
+
+  index = get_index_for_opcode (insn);
+
+  if (index == 0)
+    {
+      printf(".long 0x%x", insn);
+    }
+  else
+    {
+      int i;
+      int paren = 0;
+      tag = (enum spu_insns)(index - spu_opcodes);
+      printf("%s", index->mnemonic);
+      if (tag == M_BI || tag == M_BISL || tag == M_IRET || tag == M_BISLED
+	  || tag == M_BIHNZ || tag == M_BIHZ || tag == M_BINZ || tag == M_BIZ
+          || tag == M_SYNC || tag == M_HBR)
+	{
+	  int fb = (insn >> (32-18)) & 0x7f;
+	  if (fb & 0x40)
+	    printf(tag == M_SYNC ? "c" : "p");
+	  if (fb & 0x20)
+	    printf("d");
+	  if (fb & 0x10)
+	    printf("e");
+	}
+      if (index->arg[0] != 0)
+	printf("\t");
+      hex_value = 0;
+      for (i = 1;  i <= index->arg[0]; i++)
+	{
+	  int arg = index->arg[i];
+	  if (arg != A_P && !paren && i > 1)
+	    printf(",");
+
+	  switch (arg)
+	    {
+	    case A_T:
+	      printf("$%d",
+				     DECODE_INSN_RT (insn));
+	      break;
+	    case A_A:
+	      printf("$%d",
+				     DECODE_INSN_RA (insn));
+	      break;
+	    case A_B:
+	      printf("$%d",
+				     DECODE_INSN_RB (insn));
+	      break;
+	    case A_C:
+	      printf("$%d",
+				     DECODE_INSN_RC (insn));
+	      break;
+	    case A_S:
+	      printf("$sp%d",
+				     DECODE_INSN_RA (insn));
+	      break;
+	    case A_H:
+	      printf("$ch%d",
+				     DECODE_INSN_RA (insn));
+	      break;
+	    case A_P:
+	      paren++;
+	      printf("(");
+	      break;
+	    case A_U7A:
+	      printf("%d",
+				     173 - DECODE_INSN_U8 (insn));
+	      break;
+	    case A_U7B:
+	      printf("%d",
+				     155 - DECODE_INSN_U8 (insn));
+	      break;
+	    case A_S3:
+	    case A_S6:
+	    case A_S7:
+	    case A_S7N:
+	    case A_U3:
+	    case A_U5:
+	    case A_U6:
+	    case A_U7:
+	      hex_value = DECODE_INSN_I7 (insn);
+	      printf("%d", hex_value);
+	      break;
+	    case A_S11:
+	      print_address(memaddr + DECODE_INSN_I9a (insn) * 4);
+	      break;
+	    case A_S11I:
+	      print_address(memaddr + DECODE_INSN_I9b (insn) * 4);
+	      break;
+	    case A_S10:
+	    case A_S10B:
+	      hex_value = DECODE_INSN_I10 (insn);
+	      printf("%d", hex_value);
+	      break;
+	    case A_S14:
+	      hex_value = DECODE_INSN_I10 (insn) * 16;
+	      printf("%d", hex_value);
+	      break;
+	    case A_S16:
+	      hex_value = DECODE_INSN_I16 (insn);
+	      printf("%d", hex_value);
+	      break;
+	    case A_X16:
+	      hex_value = DECODE_INSN_U16 (insn);
+	      printf("%u", hex_value);
+	      break;
+	    case A_R18:
+	      value = DECODE_INSN_I16 (insn) * 4;
+	      if (value == 0)
+		printf("%d", value);
+	      else
+		{
+		  hex_value = memaddr + value;
+		  print_address(hex_value & 0x3ffff);
+		}
+	      break;
+	    case A_S18:
+	      value = DECODE_INSN_U16 (insn) * 4;
+	      if (value == 0)
+		printf("%d", value);
+	      else
+		print_address(value);
+	      break;
+	    case A_U18:
+	      value = DECODE_INSN_U18 (insn);
+	      if (value == 0 || 1)
+		{
+		  hex_value = value;
+		  printf("%u", value);
+		}
+	      else
+		print_address(value);
+	      break;
+	    case A_U14:
+	      hex_value = DECODE_INSN_U14 (insn);
+	      printf("%u", hex_value);
+	      break;
+	    }
+	  if (arg != A_P && paren)
+	    {
+	      printf(")");
+	      paren--;
+	    }
+	}
+      if (hex_value > 16)
+	printf("\t# %x", hex_value);
+    }
+  return 4;
+}
diff --git a/arch/powerpc/xmon/spu-insns.h b/arch/powerpc/xmon/spu-insns.h
new file mode 100644
index 0000000..99dc452
--- /dev/null
+++ b/arch/powerpc/xmon/spu-insns.h
@@ -0,0 +1,410 @@
+/* SPU ELF support for BFD.
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of BFD, the Binary File Descriptor library.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software Foundation,
+   Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+/* SPU Opcode Table
+
+-=-=-= FORMAT =-=-=-
+                                             		                                             
+       +----+-------+-------+-------+-------+  		       +------------+-------+-------+-------+
+RRR    | op |  RC   |  RB   |  RA   |  RT   |		RI7    | op         |  I7   |  RA   |  RT   |
+       +----+-------+-------+-------+-------+		       +------------+-------+-------+-------+
+        0  3       1       1       2       3		        0          1       1       2       3 
+                   0       7       4       1		                   0       7       4       1 
+
+       +-----------+--------+-------+-------+		       +---------+----------+-------+-------+
+RI8    | op        |   I8   |  RA   |  RT   |		RI10   | op      |   I10    |  RA   |  RT   |
+       +-----------+--------+-------+-------+		       +---------+----------+-------+-------+
+        0         9        1       2       3		        0       7          1       2       3 
+                           7       4       1		                           7       4       1 
+
+       +----------+-----------------+-------+		       +--------+-------------------+-------+
+RI16   | op       |       I16       |  RT   |		RI18   | op     |       I18         |  RT   |
+       +----------+-----------------+-------+		       +--------+-------------------+-------+
+        0        8                 2       3		        0      6                   2       3 
+                                   4       1		                                   4       1 
+
+       +------------+-------+-------+-------+		       +-------+--+-----------------+-------+
+RR     | op         |  RB   |  RA   |  RT   |		LBT    | op    |RO|       I16       |  RO   |
+       +------------+-------+-------+-------+		       +-------+--+-----------------+-------+
+        0          1       1       2       3		        0     6  8                 2       3 
+                   0       7       4       1		                                   4       1 
+
+							       +------------+----+--+-------+-------+
+							LBTI   | op         | // |RO|  RA   |  RO   |
+							       +------------+----+--+-------+-------+
+							        0          1    1  1       2       3
+							                   0    5  7       4       1
+
+-=-=-= OPCODE =-=-=-
+
+OPCODE field specifies the most significant 11bit of the instruction. Some formats don't have 11bits for opcode field, and in this
+case, bit field other than op are defined as 0s. For example, opcode of fma instruction which is RRR format is defined as 0x700,
+since 0x700 -> 11'b11100000000, this means opcode is 4'b1110, and other 7bits are defined as 7'b0000000.
+
+-=-=-= ASM_FORMAT =-=-=-
+
+RRR category						RI7 category                               
+	ASM_RRR		mnemonic RC, RA, RB, RT		        ASM_RI4         mnemonic RT, RA, I4
+							        ASM_RI7         mnemonic RT, RA, I7
+
+RI8 category						RI10 category                               
+	ASM_RUI8	mnemonic RT, RA, UI8		        ASM_AI10        mnemonic RA, I10    
+							        ASM_RI10        mnemonic RT, RA, R10
+							        ASM_RI10IDX     mnemonic RT, I10(RA)
+
+RI16 category						RI18 category                           
+	ASM_I16W	mnemonic I16W			        ASM_RI18        mnemonic RT, I18
+	ASM_RI16	mnemonic RT, I16
+	ASM_RI16W	mnemonic RT, I16W
+
+RR category						LBT category                                    
+	ASM_MFSPR	mnemonic RT, SA			        ASM_LBT         mnemonic brinst, brtarg 
+	ASM_MTSPR	mnemonic SA, RT			                                                
+	ASM_NOOP	mnemonic			LBTI category                                   
+	ASM_RA		mnemonic RA			        ASM_LBTI        mnemonic brinst, RA     
+	ASM_RAB		mnemonic RA, RB
+	ASM_RDCH	mnemonic RT, CA
+	ASM_RR		mnemonic RT, RA, RB
+	ASM_RT		mnemonic RT
+	ASM_RTA		mnemonic RT, RA
+	ASM_WRCH	mnemonic CA, RT
+
+Note that RRR instructions have the names for RC and RT reversed from
+what's in the ISA, in order to put RT in the same position it appears
+for other formats.
+
+-=-=-= DEPENDENCY =-=-=-
+
+DEPENDENCY filed consists of 5 digits. This represents which register is used as source and which register is used as target.
+The first(most significant) digit is always 0. Then it is followd by RC, RB, RA and RT digits.
+If the digit is 0, this means the corresponding register is not used in the instruction.
+If the digit is 1, this means the corresponding register is used as a source in the instruction.
+If the digit is 2, this means the corresponding register is used as a target in the instruction.
+If the digit is 3, this means the corresponding register is used as both source and target in the instruction.
+For example, fms instruction has 00113 as the DEPENDENCY field. This means RC is not used in this operation, RB and RA are
+used as sources and RT is the target.
+
+-=-=-= PIPE =-=-=-
+
+This field shows which execution pipe is used for the instruction
+
+pipe0 execution pipelines:
+	FP6	SP floating pipeline
+	FP7	integer operations executed in SP floating pipeline
+	FPD	DP floating pipeline
+	FX2	FXU pipeline
+	FX3	Rotate/Shift pipeline
+	FXB	Byte pipeline
+	NOP	No pipeline
+
+pipe1 execution pipelines:
+	BR	Branch pipeline
+	LNOP	No pipeline
+	LS	Load/Store pipeline
+	SHUF	Shuffle pipeline
+	SPR	SPR/CH pipeline
+
+*/
+
+#define _A0() {0}
+#define _A1(a) {1,a}
+#define _A2(a,b) {2,a,b}
+#define _A3(a,b,c) {3,a,b,c}
+#define _A4(a,b,c,d) {4,a,b,c,d}
+
+/*    TAG		FORMAT	OPCODE	MNEMONIC	ASM_FORMAT	DEPENDENCY	PIPE	COMMENT				*/
+/*									0[RC][RB][RA][RT]					*/
+/*									1:src, 2:target						*/
+
+APUOP(M_BR,		RI16,	0x190,	"br",		_A1(A_R18),	00000,	BR)	/* BRel          IP<-IP+I16 */
+APUOP(M_BRSL,		RI16,	0x198,	"brsl",		_A2(A_T,A_R18),	00002,	BR)	/* BRelSetLink   RT,IP<-IP,IP+I16 */
+APUOP(M_BRA,		RI16,	0x180,	"bra",		_A1(A_S18),	00000,	BR)	/* BRAbs         IP<-I16 */
+APUOP(M_BRASL,		RI16,	0x188,	"brasl",	_A2(A_T,A_S18),	00002,	BR)	/* BRAbsSetLink  RT,IP<-IP,I16 */
+APUOP(M_FSMBI,		RI16,	0x194,	"fsmbi",	_A2(A_T,A_X16),	00002,	SHUF)	/* FormSelMask%I RT<-fsm(I16) */
+APUOP(M_LQA,		RI16,	0x184,	"lqa",		_A2(A_T,A_S18),	00002,	LS)	/* LoadQAbs      RT<-M[I16] */
+APUOP(M_LQR,		RI16,	0x19C,	"lqr",		_A2(A_T,A_R18),	00002,	LS)	/* LoadQRel      RT<-M[IP+I16] */
+APUOP(M_STOP,		RR,	0x000,	"stop",		_A0(),		00000,	BR)	/* STOP          stop */
+APUOP(M_STOP2,		RR,	0x000,	"stop",		_A1(A_U14),	00000,	BR)	/* STOP          stop */
+APUOP(M_STOPD,		RR,	0x140,	"stopd",	_A3(A_T,A_A,A_B),         00111,	BR)	/* STOPD         stop (with register dependencies) */
+APUOP(M_LNOP,		RR,	0x001,	"lnop",		_A0(),		00000,	LNOP)	/* LNOP          no_operation */
+APUOP(M_SYNC,		RR,	0x002,	"sync",		_A0(),		00000,	BR)	/* SYNC          flush_pipe */
+APUOP(M_DSYNC,		RR,	0x003,	"dsync",	_A0(),		00000,	BR)	/* DSYNC         flush_store_queue */
+APUOP(M_MFSPR,		RR,	0x00c,	"mfspr",	_A2(A_T,A_S),	00002,	SPR)	/* MFSPR         RT<-SA */
+APUOP(M_RDCH,		RR,	0x00d,	"rdch",		_A2(A_T,A_H),	00002,	SPR)	/* ReaDCHannel   RT<-CA:data */
+APUOP(M_RCHCNT,		RR,	0x00f,	"rchcnt",	_A2(A_T,A_H),	00002,	SPR)	/* ReaDCHanCouNT RT<-CA:count */
+APUOP(M_HBRA,		LBT,	0x080,	"hbra",		_A2(A_S11,A_S18),	00000,	LS)	/* HBRA          BTB[B9]<-M[I16] */
+APUOP(M_HBRR,		LBT,	0x090,	"hbrr",		_A2(A_S11,A_R18),	00000,	LS)	/* HBRR          BTB[B9]<-M[IP+I16] */
+APUOP(M_BRZ,		RI16,	0x100,	"brz",		_A2(A_T,A_R18),	00001,	BR)	/* BRZ           IP<-IP+I16_if(RT) */
+APUOP(M_BRNZ,		RI16,	0x108,	"brnz",		_A2(A_T,A_R18),	00001,	BR)	/* BRNZ          IP<-IP+I16_if(RT) */
+APUOP(M_BRHZ,		RI16,	0x110,	"brhz",		_A2(A_T,A_R18),	00001,	BR)	/* BRHZ          IP<-IP+I16_if(RT) */
+APUOP(M_BRHNZ,		RI16,	0x118,	"brhnz",	_A2(A_T,A_R18),	00001,	BR)	/* BRHNZ         IP<-IP+I16_if(RT) */
+APUOP(M_STQA,		RI16,	0x104,	"stqa",		_A2(A_T,A_S18),	00001,	LS)	/* SToreQAbs     M[I16]<-RT */
+APUOP(M_STQR,		RI16,	0x11C,	"stqr",		_A2(A_T,A_R18),	00001,	LS)	/* SToreQRel     M[IP+I16]<-RT */
+APUOP(M_MTSPR,		RR,	0x10c,	"mtspr",	_A2(A_S,A_T),	00001,	SPR)	/* MTSPR         SA<-RT */
+APUOP(M_WRCH,		RR,	0x10d,	"wrch",		_A2(A_H,A_T),	00001,	SPR)	/* ChanWRite     CA<-RT */
+APUOP(M_LQD,		RI10,	0x1a0,	"lqd",		_A4(A_T,A_S14,A_P,A_A),	00012,	LS)	/* LoadQDisp     RT<-M[Ra+I10] */
+APUOP(M_BI,		RR,	0x1a8,	"bi",		_A1(A_A),		00010,	BR)	/* BI            IP<-RA */
+APUOP(M_BISL,		RR,	0x1a9,	"bisl",		_A2(A_T,A_A),	00012,	BR)	/* BISL          RT,IP<-IP,RA */
+APUOP(M_IRET,  		RR,	0x1aa,	"iret",	        _A1(A_A), 	00010,	BR)	/* IRET          IP<-SRR0 */
+APUOP(M_IRET2, 		RR,	0x1aa,	"iret",	        _A0(),	 	00010,	BR)	/* IRET          IP<-SRR0 */
+APUOP(M_BISLED,		RR,	0x1ab,	"bisled",	_A2(A_T,A_A),	00012,	BR)	/* BISLED        RT,IP<-IP,RA_if(ext) */
+APUOP(M_HBR,		LBTI,	0x1ac,	"hbr",		_A2(A_S11I,A_A),	00010,	LS)	/* HBR           BTB[B9]<-M[Ra] */
+APUOP(M_FREST,		RR,	0x1b8,	"frest",	_A2(A_T,A_A),	00012,	SHUF)	/* FREST         RT<-recip(RA) */
+APUOP(M_FRSQEST,	RR,	0x1b9,	"frsqest",	_A2(A_T,A_A),	00012,	SHUF)	/* FRSQEST       RT<-rsqrt(RA) */
+APUOP(M_FSM,		RR,	0x1b4,	"fsm",		_A2(A_T,A_A),	00012,	SHUF)	/* FormSelMask%  RT<-expand(Ra) */
+APUOP(M_FSMH,		RR,	0x1b5,	"fsmh",		_A2(A_T,A_A),	00012,	SHUF)	/* FormSelMask%  RT<-expand(Ra) */
+APUOP(M_FSMB,		RR,	0x1b6,	"fsmb",		_A2(A_T,A_A),	00012,	SHUF)	/* FormSelMask%  RT<-expand(Ra) */
+APUOP(M_GB,		RR,	0x1b0,	"gb",		_A2(A_T,A_A),	00012,	SHUF)	/* GatherBits%   RT<-gather(RA) */
+APUOP(M_GBH,		RR,	0x1b1,	"gbh",		_A2(A_T,A_A),	00012,	SHUF)	/* GatherBits%   RT<-gather(RA) */
+APUOP(M_GBB,		RR,	0x1b2,	"gbb",		_A2(A_T,A_A),	00012,	SHUF)	/* GatherBits%   RT<-gather(RA) */
+APUOP(M_CBD,		RI7,	0x1f4,	"cbd",		_A4(A_T,A_U7,A_P,A_A),	00012,	SHUF)	/* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_CHD,		RI7,	0x1f5,	"chd",		_A4(A_T,A_U7,A_P,A_A),	00012,	SHUF)	/* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_CWD,		RI7,	0x1f6,	"cwd",		_A4(A_T,A_U7,A_P,A_A),	00012,	SHUF)	/* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_CDD,		RI7,	0x1f7,	"cdd",		_A4(A_T,A_U7,A_P,A_A),	00012,	SHUF)	/* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_ROTQBII,	RI7,	0x1f8,	"rotqbii",	_A3(A_T,A_A,A_U3),	00012,	SHUF)	/* ROTQBII       RT<-RA<<<I7 */
+APUOP(M_ROTQBYI,	RI7,	0x1fc,	"rotqbyi",	_A3(A_T,A_A,A_S7N),	00012,	SHUF)	/* ROTQBYI       RT<-RA<<<(I7*8) */
+APUOP(M_ROTQMBII,	RI7,	0x1f9,	"rotqmbii",	_A3(A_T,A_A,A_S3),	00012,	SHUF)	/* ROTQMBII      RT<-RA<<I7 */
+APUOP(M_ROTQMBYI,	RI7,	0x1fd,	"rotqmbyi",	_A3(A_T,A_A,A_S6),	00012,	SHUF)	/* ROTQMBYI      RT<-RA<<I7 */
+APUOP(M_SHLQBII,	RI7,	0x1fb,	"shlqbii",	_A3(A_T,A_A,A_U3),	00012,	SHUF)	/* SHLQBII       RT<-RA<<I7 */
+APUOP(M_SHLQBYI,	RI7,	0x1ff,	"shlqbyi",	_A3(A_T,A_A,A_U5),	00012,	SHUF)	/* SHLQBYI       RT<-RA<<I7 */
+APUOP(M_STQD,		RI10,	0x120,	"stqd",		_A4(A_T,A_S14,A_P,A_A),	00011,	LS)	/* SToreQDisp    M[Ra+I10]<-RT */
+APUOP(M_BIHNZ,		RR,	0x12b,	"bihnz",	_A2(A_T,A_A),	00011,	BR)	/* BIHNZ         IP<-RA_if(RT) */
+APUOP(M_BIHZ,		RR,	0x12a,	"bihz",		_A2(A_T,A_A),	00011,	BR)	/* BIHZ          IP<-RA_if(RT) */
+APUOP(M_BINZ,		RR,	0x129,	"binz",		_A2(A_T,A_A),	00011,	BR)	/* BINZ          IP<-RA_if(RT) */
+APUOP(M_BIZ,		RR,	0x128,	"biz",		_A2(A_T,A_A),	00011,	BR)	/* BIZ           IP<-RA_if(RT) */
+APUOP(M_CBX,		RR,	0x1d4,	"cbx",		_A3(A_T,A_A,A_B),		00112,	SHUF)	/* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_CHX,		RR,	0x1d5,	"chx",		_A3(A_T,A_A,A_B),		00112,	SHUF)	/* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_CWX,		RR,	0x1d6,	"cwx",		_A3(A_T,A_A,A_B),		00112,	SHUF)	/* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_CDX,		RR,	0x1d7,	"cdx",		_A3(A_T,A_A,A_B),		00112,	SHUF)	/* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_LQX,		RR,	0x1c4,	"lqx",		_A3(A_T,A_A,A_B),		00112,	LS)	/* LoadQindeX    RT<-M[Ra+Rb] */
+APUOP(M_ROTQBI,		RR,	0x1d8,	"rotqbi",	_A3(A_T,A_A,A_B),		00112,	SHUF)	/* ROTQBI        RT<-RA<<<Rb */
+APUOP(M_ROTQMBI,	RR,	0x1d9,	"rotqmbi",	_A3(A_T,A_A,A_B),		00112,	SHUF)	/* ROTQMBI       RT<-RA<<Rb */
+APUOP(M_SHLQBI,		RR,	0x1db,	"shlqbi",	_A3(A_T,A_A,A_B),		00112,	SHUF)	/* SHLQBI        RT<-RA<<Rb */
+APUOP(M_ROTQBY,		RR,	0x1dc,	"rotqby",	_A3(A_T,A_A,A_B),		00112,		SHUF)	/* ROTQBY        RT<-RA<<<(Rb*8) */
+APUOP(M_ROTQMBY,	RR,	0x1dd,	"rotqmby",	_A3(A_T,A_A,A_B),		00112,		SHUF)	/* ROTQMBY       RT<-RA<<Rb */
+APUOP(M_SHLQBY,		RR,	0x1df,	"shlqby",	_A3(A_T,A_A,A_B),		00112,	SHUF)	/* SHLQBY        RT<-RA<<Rb */
+APUOP(M_ROTQBYBI,	RR,	0x1cc,	"rotqbybi",	_A3(A_T,A_A,A_B),		00112,		SHUF)	/* ROTQBYBI      RT<-RA<<Rb */
+APUOP(M_ROTQMBYBI,	RR,	0x1cd,	"rotqmbybi",	_A3(A_T,A_A,A_B),		00112,		SHUF)	/* ROTQMBYBI     RT<-RA<<Rb */
+APUOP(M_SHLQBYBI,	RR,	0x1cf,	"shlqbybi",	_A3(A_T,A_A,A_B),		00112,	SHUF)	/* SHLQBYBI      RT<-RA<<Rb */
+APUOP(M_STQX,		RR,	0x144,	"stqx",		_A3(A_T,A_A,A_B),		00111,	LS)	/* SToreQindeX   M[Ra+Rb]<-RT */
+APUOP(M_SHUFB,		RRR,	0x580,	"shufb",	_A4(A_C,A_A,A_B,A_T),	02111,	SHUF)	/* SHUFfleBytes  RC<-f(RA,RB,RT) */
+APUOP(M_IL,		RI16,	0x204,	"il",		_A2(A_T,A_S16),	00002,	FX2)	/* ImmLoad       RT<-sxt(I16) */
+APUOP(M_ILH,		RI16,	0x20c,	"ilh",		_A2(A_T,A_X16),	00002,	FX2)	/* ImmLoadH      RT<-I16 */
+APUOP(M_ILHU,		RI16,	0x208,	"ilhu",		_A2(A_T,A_X16),	00002,	FX2)	/* ImmLoadHUpper RT<-I16<<16 */
+APUOP(M_ILA,		RI18,	0x210,	"ila",		_A2(A_T,A_U18),	00002,	FX2)	/* ImmLoadAddr   RT<-zxt(I18) */
+APUOP(M_NOP,		RR,	0x201,	"nop",		_A1(A_T),		00000,	NOP)	/* XNOP          no_operation */
+APUOP(M_NOP2,		RR,	0x201,	"nop",		_A0(),		00000,	NOP)	/* XNOP          no_operation */
+APUOP(M_IOHL,		RI16,	0x304,	"iohl",		_A2(A_T,A_X16),	00003,	FX2)	/* AddImmeXt     RT<-RT+sxt(I16) */
+APUOP(M_ANDBI,		RI10,	0x0b0,	"andbi",	_A3(A_T,A_A,A_S10B),	00012,	FX2)	/* AND%I         RT<-RA&I10 */
+APUOP(M_ANDHI,		RI10,	0x0a8,	"andhi",	_A3(A_T,A_A,A_S10),	00012,	FX2)	/* AND%I         RT<-RA&I10 */
+APUOP(M_ANDI,		RI10,	0x0a0,	"andi",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* AND%I         RT<-RA&I10 */
+APUOP(M_ORBI,		RI10,	0x030,	"orbi",		_A3(A_T,A_A,A_S10B),	00012,	FX2)	/* OR%I          RT<-RA|I10 */
+APUOP(M_ORHI,		RI10,	0x028,	"orhi",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* OR%I          RT<-RA|I10 */
+APUOP(M_ORI,		RI10,	0x020,	"ori",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* OR%I          RT<-RA|I10 */
+APUOP(M_ORX,		RR,	0x1f0,	"orx",		_A2(A_T,A_A),		00012,	BR)	/* ORX           RT<-RA.w0|RA.w1|RA.w2|RA.w3 */
+APUOP(M_XORBI,		RI10,	0x230,	"xorbi",	_A3(A_T,A_A,A_S10B),	00012,	FX2)	/* XOR%I         RT<-RA^I10 */
+APUOP(M_XORHI,		RI10,	0x228,	"xorhi",	_A3(A_T,A_A,A_S10),	00012,	FX2)	/* XOR%I         RT<-RA^I10 */
+APUOP(M_XORI,		RI10,	0x220,	"xori",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* XOR%I         RT<-RA^I10 */
+APUOP(M_AHI,		RI10,	0x0e8,	"ahi",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* Add%Immed     RT<-RA+I10 */
+APUOP(M_AI,		RI10,	0x0e0,	"ai",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* Add%Immed     RT<-RA+I10 */
+APUOP(M_SFHI,		RI10,	0x068,	"sfhi",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* SubFrom%Imm   RT<-I10-RA */
+APUOP(M_SFI,		RI10,	0x060,	"sfi",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* SubFrom%Imm   RT<-I10-RA */
+APUOP(M_CGTBI,		RI10,	0x270,	"cgtbi",	_A3(A_T,A_A,A_S10B),	00012,	FX2)	/* CGT%I         RT<-(RA>I10) */
+APUOP(M_CGTHI,		RI10,	0x268,	"cgthi",	_A3(A_T,A_A,A_S10),	00012,	FX2)	/* CGT%I         RT<-(RA>I10) */
+APUOP(M_CGTI,		RI10,	0x260,	"cgti",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* CGT%I         RT<-(RA>I10) */
+APUOP(M_CLGTBI,		RI10,	0x2f0,	"clgtbi",	_A3(A_T,A_A,A_S10B),	00012,	FX2)	/* CLGT%I        RT<-(RA>I10) */
+APUOP(M_CLGTHI,		RI10,	0x2e8,	"clgthi",	_A3(A_T,A_A,A_S10),	00012,	FX2)	/* CLGT%I        RT<-(RA>I10) */
+APUOP(M_CLGTI,		RI10,	0x2e0,	"clgti",	_A3(A_T,A_A,A_S10),	00012,	FX2)	/* CLGT%I        RT<-(RA>I10) */
+APUOP(M_CEQBI,		RI10,	0x3f0,	"ceqbi",	_A3(A_T,A_A,A_S10B),	00012,	FX2)	/* CEQ%I         RT<-(RA=I10) */
+APUOP(M_CEQHI,		RI10,	0x3e8,	"ceqhi",	_A3(A_T,A_A,A_S10),	00012,	FX2)	/* CEQ%I         RT<-(RA=I10) */
+APUOP(M_CEQI,		RI10,	0x3e0,	"ceqi",		_A3(A_T,A_A,A_S10),	00012,	FX2)	/* CEQ%I         RT<-(RA=I10) */
+APUOP(M_HGTI,		RI10,	0x278,	"hgti",		_A3(A_T,A_A,A_S10),	00010,	FX2)	/* HaltGTI       halt_if(RA>I10) */
+APUOP(M_HGTI2,		RI10,	0x278,	"hgti",		_A2(A_A,A_S10),	00010,	FX2)	/* HaltGTI       halt_if(RA>I10) */
+APUOP(M_HLGTI,		RI10,	0x2f8,	"hlgti",	_A3(A_T,A_A,A_S10),	00010,	FX2)	/* HaltLGTI      halt_if(RA>I10) */
+APUOP(M_HLGTI2,		RI10,	0x2f8,	"hlgti",	_A2(A_A,A_S10),	00010,	FX2)	/* HaltLGTI      halt_if(RA>I10) */
+APUOP(M_HEQI,		RI10,	0x3f8,	"heqi",		_A3(A_T,A_A,A_S10),	00010,	FX2)	/* HaltEQImm     halt_if(RA=I10) */
+APUOP(M_HEQI2,		RI10,	0x3f8,	"heqi",		_A2(A_A,A_S10),	00010,	FX2)	/* HaltEQImm     halt_if(RA=I10) */
+APUOP(M_MPYI,		RI10,	0x3a0,	"mpyi",		_A3(A_T,A_A,A_S10),	00012,	FP7)	/* MPYI          RT<-RA*I10 */
+APUOP(M_MPYUI,		RI10,	0x3a8,	"mpyui",	_A3(A_T,A_A,A_S10),	00012,	FP7)	/* MPYUI         RT<-RA*I10 */
+APUOP(M_CFLTS,		RI8,	0x3b0,	"cflts",	_A3(A_T,A_A,A_U7A),	00012,	FP7)	/* CFLTS         RT<-int(RA,I8) */
+APUOP(M_CFLTU,		RI8,	0x3b2,	"cfltu",	_A3(A_T,A_A,A_U7A),	00012,	FP7)	/* CFLTU         RT<-int(RA,I8) */
+APUOP(M_CSFLT,		RI8,	0x3b4,	"csflt",	_A3(A_T,A_A,A_U7B),	00012,	FP7)	/* CSFLT         RT<-flt(RA,I8) */
+APUOP(M_CUFLT,		RI8,	0x3b6,	"cuflt",	_A3(A_T,A_A,A_U7B),	00012,	FP7)	/* CUFLT         RT<-flt(RA,I8) */
+APUOP(M_FESD,		RR,	0x3b8,	"fesd",		_A2(A_T,A_A),	00012,	FPD)	/* FESD          RT<-double(RA) */
+APUOP(M_FRDS,		RR,	0x3b9,	"frds",		_A2(A_T,A_A),	00012,	FPD)	/* FRDS          RT<-single(RA) */
+APUOP(M_FSCRRD,		RR,	0x398,	"fscrrd",	_A1(A_T),		00002,	FPD)	/* FSCRRD        RT<-FP_status */
+APUOP(M_FSCRWR,		RR,	0x3ba,	"fscrwr",	_A2(A_T,A_A),	00010,	FP7)	/* FSCRWR        FP_status<-RA */
+APUOP(M_FSCRWR2,	RR,	0x3ba,	"fscrwr",	_A1(A_A),		00010,	FP7)	/* FSCRWR        FP_status<-RA */
+APUOP(M_CLZ,		RR,	0x2a5,	"clz",		_A2(A_T,A_A),	00012,	FX2)	/* CLZ           RT<-clz(RA) */
+APUOP(M_CNTB,		RR,	0x2b4,	"cntb",		_A2(A_T,A_A),	00012,	FXB)	/* CNT           RT<-pop(RA) */
+APUOP(M_XSBH,		RR,	0x2b6,	"xsbh",		_A2(A_T,A_A),	00012,	FX2)	/* eXtSignBtoH   RT<-sign_ext(RA) */
+APUOP(M_XSHW,		RR,	0x2ae,	"xshw",		_A2(A_T,A_A),	00012,	FX2)	/* eXtSignHtoW   RT<-sign_ext(RA) */
+APUOP(M_XSWD,		RR,	0x2a6,	"xswd",		_A2(A_T,A_A),	00012,	FX2)	/* eXtSignWtoD   RT<-sign_ext(RA) */
+APUOP(M_ROTI,		RI7,	0x078,	"roti",		_A3(A_T,A_A,A_S7N),	00012,	FX3)	/* ROT%I         RT<-RA<<<I7 */
+APUOP(M_ROTMI,		RI7,	0x079,	"rotmi",	_A3(A_T,A_A,A_S7),	00012,	FX3)	/* ROT%MI        RT<-RA<<I7 */
+APUOP(M_ROTMAI,		RI7,	0x07a,	"rotmai",	_A3(A_T,A_A,A_S7),	00012,	FX3)	/* ROTMA%I       RT<-RA<<I7 */
+APUOP(M_SHLI,		RI7,	0x07b,	"shli",		_A3(A_T,A_A,A_U6),	00012,	FX3)	/* SHL%I         RT<-RA<<I7 */
+APUOP(M_ROTHI,		RI7,	0x07c,	"rothi",	_A3(A_T,A_A,A_S7N),	00012,	FX3)	/* ROT%I         RT<-RA<<<I7 */
+APUOP(M_ROTHMI,		RI7,	0x07d,	"rothmi",	_A3(A_T,A_A,A_S6),	00012,	FX3)	/* ROT%MI        RT<-RA<<I7 */
+APUOP(M_ROTMAHI,	RI7,	0x07e,	"rotmahi",	_A3(A_T,A_A,A_S6),	00012,	FX3)	/* ROTMA%I       RT<-RA<<I7 */
+APUOP(M_SHLHI,		RI7,	0x07f,	"shlhi",	_A3(A_T,A_A,A_U5),	00012,	FX3)	/* SHL%I         RT<-RA<<I7 */
+APUOP(M_A,		RR,	0x0c0,	"a",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* Add%          RT<-RA+RB */
+APUOP(M_AH,		RR,	0x0c8,	"ah",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* Add%          RT<-RA+RB */
+APUOP(M_SF,		RR,	0x040,	"sf",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* SubFrom%      RT<-RB-RA */
+APUOP(M_SFH,		RR,	0x048,	"sfh",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* SubFrom%      RT<-RB-RA */
+APUOP(M_CGT,		RR,	0x240,	"cgt",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* CGT%          RT<-(RA>RB) */
+APUOP(M_CGTB,		RR,	0x250,	"cgtb",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* CGT%          RT<-(RA>RB) */
+APUOP(M_CGTH,		RR,	0x248,	"cgth",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* CGT%          RT<-(RA>RB) */
+APUOP(M_CLGT,		RR,	0x2c0,	"clgt",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* CLGT%         RT<-(RA>RB) */
+APUOP(M_CLGTB,		RR,	0x2d0,	"clgtb",	_A3(A_T,A_A,A_B),		00112,	FX2)	/* CLGT%         RT<-(RA>RB) */
+APUOP(M_CLGTH,		RR,	0x2c8,	"clgth",	_A3(A_T,A_A,A_B),		00112,	FX2)	/* CLGT%         RT<-(RA>RB) */
+APUOP(M_CEQ,		RR,	0x3c0,	"ceq",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* CEQ%          RT<-(RA=RB) */
+APUOP(M_CEQB,		RR,	0x3d0,	"ceqb",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* CEQ%          RT<-(RA=RB) */
+APUOP(M_CEQH,		RR,	0x3c8,	"ceqh",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* CEQ%          RT<-(RA=RB) */
+APUOP(M_HGT,		RR,	0x258,	"hgt",		_A3(A_T,A_A,A_B),		00110,	FX2)	/* HaltGT        halt_if(RA>RB) */
+APUOP(M_HGT2,		RR,	0x258,	"hgt",		_A2(A_A,A_B),	00110,	FX2)	/* HaltGT        halt_if(RA>RB) */
+APUOP(M_HLGT,		RR,	0x2d8,	"hlgt",		_A3(A_T,A_A,A_B),		00110,	FX2)	/* HaltLGT       halt_if(RA>RB) */
+APUOP(M_HLGT2,		RR,	0x2d8,	"hlgt",		_A2(A_A,A_B),	00110,	FX2)	/* HaltLGT       halt_if(RA>RB) */
+APUOP(M_HEQ,		RR,	0x3d8,	"heq",		_A3(A_T,A_A,A_B),		00110,	FX2)	/* HaltEQ        halt_if(RA=RB) */
+APUOP(M_HEQ2,		RR,	0x3d8,	"heq",		_A2(A_A,A_B),	00110,	FX2)	/* HaltEQ        halt_if(RA=RB) */
+APUOP(M_FCEQ,		RR,	0x3c2,	"fceq",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* FCEQ          RT<-(RA=RB) */
+APUOP(M_FCMEQ,		RR,	0x3ca,	"fcmeq",	_A3(A_T,A_A,A_B),		00112,	FX2)	/* FCMEQ         RT<-(|RA|=|RB|) */
+APUOP(M_FCGT,		RR,	0x2c2,	"fcgt",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* FCGT          RT<-(RA<RB) */
+APUOP(M_FCMGT,		RR,	0x2ca,	"fcmgt",	_A3(A_T,A_A,A_B),		00112,	FX2)	/* FCMGT         RT<-(|RA|<|RB|) */
+APUOP(M_AND,		RR,	0x0c1,	"and",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* AND           RT<-RA&RB */
+APUOP(M_NAND,		RR,	0x0c9,	"nand",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* NAND          RT<-!(RA&RB) */
+APUOP(M_OR,		RR,	0x041,	"or",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* OR            RT<-RA|RB */
+APUOP(M_NOR,		RR,	0x049,	"nor",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* NOR           RT<-!(RA&RB) */
+APUOP(M_XOR,		RR,	0x241,	"xor",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* XOR           RT<-RA^RB */
+APUOP(M_EQV,		RR,	0x249,	"eqv",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* EQuiValent    RT<-!(RA^RB) */
+APUOP(M_ANDC,		RR,	0x2c1,	"andc",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* ANDComplement RT<-RA&!RB */
+APUOP(M_ORC,		RR,	0x2c9,	"orc",		_A3(A_T,A_A,A_B),		00112,	FX2)	/* ORComplement  RT<-RA|!RB */
+APUOP(M_ABSDB,		RR,	0x053,	"absdb",	_A3(A_T,A_A,A_B),		00112,	FXB)	/* ABSoluteDiff  RT<-|RA-RB| */
+APUOP(M_AVGB,		RR,	0x0d3,	"avgb",		_A3(A_T,A_A,A_B),		00112,	FXB)	/* AVG%          RT<-(RA+RB+1)/2 */
+APUOP(M_SUMB,		RR,	0x253,	"sumb",		_A3(A_T,A_A,A_B),		00112,	FXB)	/* SUM%          RT<-f(RA,RB) */
+APUOP(M_DFA,		RR,	0x2cc,	"dfa",		_A3(A_T,A_A,A_B),		00112,	FPD)	/* DFAdd         RT<-RA+RB */
+APUOP(M_DFM,		RR,	0x2ce,	"dfm",		_A3(A_T,A_A,A_B),		00112,	FPD)	/* DFMul         RT<-RA*RB */
+APUOP(M_DFS,		RR,	0x2cd,	"dfs",		_A3(A_T,A_A,A_B),		00112,	FPD)	/* DFSub         RT<-RA-RB */
+APUOP(M_FA,		RR,	0x2c4,	"fa",		_A3(A_T,A_A,A_B),		00112,	FP6)	/* FAdd          RT<-RA+RB */
+APUOP(M_FM,		RR,	0x2c6,	"fm",		_A3(A_T,A_A,A_B),		00112,	FP6)	/* FMul          RT<-RA*RB */
+APUOP(M_FS,		RR,	0x2c5,	"fs",		_A3(A_T,A_A,A_B),		00112,	FP6)	/* FSub          RT<-RA-RB */
+APUOP(M_MPY,		RR,	0x3c4,	"mpy",		_A3(A_T,A_A,A_B),		00112,	FP7)	/* MPY           RT<-RA*RB */
+APUOP(M_MPYH,		RR,	0x3c5,	"mpyh",		_A3(A_T,A_A,A_B),		00112,	FP7)	/* MPYH          RT<-(RAh*RB)<<16 */
+APUOP(M_MPYHH,		RR,	0x3c6,	"mpyhh",	_A3(A_T,A_A,A_B),		00112,	FP7)	/* MPYHH         RT<-RAh*RBh */
+APUOP(M_MPYHHU,		RR,	0x3ce,	"mpyhhu",	_A3(A_T,A_A,A_B),		00112,	FP7)	/* MPYHHU        RT<-RAh*RBh */
+APUOP(M_MPYS,		RR,	0x3c7,	"mpys",		_A3(A_T,A_A,A_B),		00112,	FP7)	/* MPYS          RT<-(RA*RB)>>16 */
+APUOP(M_MPYU,		RR,	0x3cc,	"mpyu",		_A3(A_T,A_A,A_B),		00112,	FP7)	/* MPYU          RT<-RA*RB */
+APUOP(M_FI,		RR,	0x3d4,	"fi",		_A3(A_T,A_A,A_B),		00112,	FP7)	/* FInterpolate  RT<-f(RA,RB) */
+APUOP(M_ROT,		RR,	0x058,	"rot",		_A3(A_T,A_A,A_B),		00112,	FX3)	/* ROT%          RT<-RA<<<RB */
+APUOP(M_ROTM,		RR,	0x059,	"rotm",		_A3(A_T,A_A,A_B),		00112,	FX3)	/* ROT%M         RT<-RA<<Rb */
+APUOP(M_ROTMA,		RR,	0x05a,	"rotma",	_A3(A_T,A_A,A_B),		00112,	FX3)	/* ROTMA%        RT<-RA<<Rb */
+APUOP(M_SHL,		RR,	0x05b,	"shl",		_A3(A_T,A_A,A_B),		00112,	FX3)	/* SHL%          RT<-RA<<Rb */
+APUOP(M_ROTH,		RR,	0x05c,	"roth",		_A3(A_T,A_A,A_B),		00112,	FX3)	/* ROT%          RT<-RA<<<RB */
+APUOP(M_ROTHM,		RR,	0x05d,	"rothm",	_A3(A_T,A_A,A_B),		00112,	FX3)	/* ROT%M         RT<-RA<<Rb */
+APUOP(M_ROTMAH,		RR,	0x05e,	"rotmah",	_A3(A_T,A_A,A_B),		00112,	FX3)	/* ROTMA%        RT<-RA<<Rb */
+APUOP(M_SHLH,		RR,	0x05f,	"shlh",		_A3(A_T,A_A,A_B),		00112,	FX3)	/* SHL%          RT<-RA<<Rb */
+APUOP(M_MPYHHA,		RR,	0x346,	"mpyhha",	_A3(A_T,A_A,A_B),		00113,	FP7)	/* MPYHHA        RT<-RAh*RBh+RT */
+APUOP(M_MPYHHAU,	RR,	0x34e,	"mpyhhau",	_A3(A_T,A_A,A_B),		00113,	FP7)	/* MPYHHAU       RT<-RAh*RBh+RT */
+APUOP(M_DFMA,		RR,	0x35c,	"dfma",		_A3(A_T,A_A,A_B),		00113,	FPD)	/* DFMAdd        RT<-RT+RA*RB */
+APUOP(M_DFMS,		RR,	0x35d,	"dfms",		_A3(A_T,A_A,A_B),		00113,	FPD)	/* DFMSub        RT<-RA*RB-RT */
+APUOP(M_DFNMS,		RR,	0x35e,	"dfnms",	_A3(A_T,A_A,A_B),		00113,	FPD)	/* DFNMSub       RT<-RT-RA*RB */
+APUOP(M_DFNMA,		RR,	0x35f,	"dfnma",	_A3(A_T,A_A,A_B),		00113,	FPD)	/* DFNMAdd       RT<-(-RT)-RA*RB */
+APUOP(M_FMA,		RRR,	0x700,	"fma",		_A4(A_C,A_A,A_B,A_T),	02111,	FP6)	/* FMAdd         RC<-RT+RA*RB */
+APUOP(M_FMS,		RRR,	0x780,	"fms",		_A4(A_C,A_A,A_B,A_T),	02111,	FP6)	/* FMSub         RC<-RA*RB-RT */
+APUOP(M_FNMS,		RRR,	0x680,	"fnms",		_A4(A_C,A_A,A_B,A_T),	02111,	FP6)	/* FNMSub        RC<-RT-RA*RB */
+APUOP(M_MPYA,		RRR,	0x600,	"mpya",		_A4(A_C,A_A,A_B,A_T),	02111,	FP7)	/* MPYA          RC<-RA*RB+RT */
+APUOP(M_SELB,		RRR,	0x400,	"selb",		_A4(A_C,A_A,A_B,A_T),	02111,	FX2)	/* SELectBits    RC<-RA&RT|RB&!RT */
+/* for system function call, this uses op-code of mtspr */
+APUOP(M_SYSCALL,	RI7,    0x10c,	"syscall",      _A3(A_T,A_A,A_S7N),	00002,	SPR)        /* System Call */
+/*
+pseudo instruction:
+system call
+value of I9	operation
+0	halt
+1		rt[0] = open(MEM[ra[0]],	ra[1])
+2		rt[0] = close(ra[0])
+3		rt[0] = read(ra[0],	MEM[ra[1]],	ra[2])
+4		rt[0] = write(ra[0],	MEM[ra[1]],	ra[2])
+5		printf(MEM[ra[0]],	ra[1],	ra[2],	ra[3])
+42		rt[0] = clock()
+52		rt[0] = lseek(ra0,	ra1,	ra2)
+
+*/
+
+
+/* new multiprecision add/sub */
+APUOP(M_ADDX,		RR,	0x340,	"addx",		_A3(A_T,A_A,A_B),		00113,		FX2)	/* Add_eXtended  RT<-RA+RB+RT */
+APUOP(M_CG,		RR,	0x0c2,	"cg",		_A3(A_T,A_A,A_B),		00112,		FX2)	/* CarryGenerate RT<-cout(RA+RB) */
+APUOP(M_CGX,		RR,	0x342,	"cgx",		_A3(A_T,A_A,A_B),		00113,		FX2)	/* CarryGen_eXtd RT<-cout(RA+RB+RT) */
+APUOP(M_SFX,		RR,	0x341,	"sfx",		_A3(A_T,A_A,A_B),		00113,		FX2)	/* Add_eXtended  RT<-RA+RB+RT */
+APUOP(M_BG,		RR,	0x042,	"bg",		_A3(A_T,A_A,A_B),		00112,		FX2)	/* CarryGenerate RT<-cout(RA+RB) */
+APUOP(M_BGX,		RR,	0x343,	"bgx",		_A3(A_T,A_A,A_B),		00113,		FX2)	/* CarryGen_eXtd RT<-cout(RA+RB+RT) */
+
+/*
+
+The following ops are a subset of above except with feature bits set.
+Feature bits are bits 11-17 of the instruction:
+
+  11 - C & P feature bit
+  12 - disable interrupts
+  13 - enable interrupts
+
+*/
+APUOPFB(M_BID,		RR,	0x1a8,	0x20,	"bid",		_A1(A_A),		00010,	BR)	/* BI            IP<-RA */
+APUOPFB(M_BIE,		RR,	0x1a8,	0x10,	"bie",		_A1(A_A),		00010,	BR)	/* BI            IP<-RA */
+APUOPFB(M_BISLD,	RR,	0x1a9,	0x20,	"bisld",	_A2(A_T,A_A),	00012,	BR)	/* BISL          RT,IP<-IP,RA */
+APUOPFB(M_BISLE,	RR,	0x1a9,	0x10,	"bisle",	_A2(A_T,A_A),	00012,	BR)	/* BISL          RT,IP<-IP,RA */
+APUOPFB(M_IRETD,  	RR,	0x1aa,	0x20,	"iretd",	_A1(A_A), 	00010,	BR)	/* IRET          IP<-SRR0 */
+APUOPFB(M_IRETD2,  	RR,	0x1aa,	0x20,	"iretd",	_A0(),	 	00010,	BR)	/* IRET          IP<-SRR0 */
+APUOPFB(M_IRETE,  	RR,	0x1aa,	0x10,	"irete",	_A1(A_A), 	00010,	BR)	/* IRET          IP<-SRR0 */
+APUOPFB(M_IRETE2,  	RR,	0x1aa,	0x10,	"irete",	_A0(),	 	00010,	BR)	/* IRET          IP<-SRR0 */
+APUOPFB(M_BISLEDD,	RR,	0x1ab,	0x20,	"bisledd",	_A2(A_T,A_A),	00012,	BR)	/* BISLED        RT,IP<-IP,RA_if(ext) */
+APUOPFB(M_BISLEDE,	RR,	0x1ab,	0x10,	"bislede",	_A2(A_T,A_A),	00012,	BR)	/* BISLED        RT,IP<-IP,RA_if(ext) */
+APUOPFB(M_BIHNZD,	RR,	0x12b,	0x20,	"bihnzd",	_A2(A_T,A_A),	00011,	BR)	/* BIHNZ         IP<-RA_if(RT) */
+APUOPFB(M_BIHNZE,	RR,	0x12b,	0x10,	"bihnze",	_A2(A_T,A_A),	00011,	BR)	/* BIHNZ         IP<-RA_if(RT) */
+APUOPFB(M_BIHZD,	RR,	0x12a,	0x20,	"bihzd",	_A2(A_T,A_A),	00011,	BR)	/* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BIHZE,	RR,	0x12a,	0x10,	"bihze",	_A2(A_T,A_A),	00011,	BR)	/* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BINZD,	RR,	0x129,	0x20,	"binzd",	_A2(A_T,A_A),	00011,	BR)	/* BINZ          IP<-RA_if(RT) */
+APUOPFB(M_BINZE,	RR,	0x129,	0x10,	"binze",	_A2(A_T,A_A),	00011,	BR)	/* BINZ          IP<-RA_if(RT) */
+APUOPFB(M_BIZD,		RR,	0x128,	0x20,	"bizd",		_A2(A_T,A_A),	00011,	BR)	/* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_BIZE,		RR,	0x128,	0x10,	"bize",		_A2(A_T,A_A),	00011,	BR)	/* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_SYNCC,	RR,	0x002,	0x40,	"syncc",	_A0(),		00000,	BR)	/* SYNCC          flush_pipe */
+APUOPFB(M_HBRP,		LBTI,	0x1ac,	0x40,	"hbrp",		_A0(),	        00010,	LS)	/* HBR           BTB[B9]<-M[Ra] */
+
+/* Synonyms required by the AS manual. */
+APUOP(M_LR,		RI10,	0x020,	"lr",		_A2(A_T,A_A),	00012,	FX2)	/* OR%I          RT<-RA|I10 */
+APUOP(M_BIHT,		RR,	0x12b,	"biht", 	_A2(A_T,A_A),	00011,	BR)	/* BIHNZ         IP<-RA_if(RT) */
+APUOP(M_BIHF,		RR,	0x12a,	"bihf",		_A2(A_T,A_A),	00011,	BR)	/* BIHZ          IP<-RA_if(RT) */
+APUOP(M_BIT,		RR,	0x129,	"bit",		_A2(A_T,A_A),	00011,	BR)	/* BINZ          IP<-RA_if(RT) */
+APUOP(M_BIF,		RR,	0x128,	"bif",		_A2(A_T,A_A),	00011,	BR)	/* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_BIHTD,	RR,	0x12b,	0x20,	"bihtd",	_A2(A_T,A_A),	00011,	BR)	/* BIHNF         IP<-RA_if(RT) */
+APUOPFB(M_BIHTE,	RR,	0x12b,	0x10,	"bihte",	_A2(A_T,A_A),	00011,	BR)	/* BIHNF         IP<-RA_if(RT) */
+APUOPFB(M_BIHFD,	RR,	0x12a,	0x20,	"bihfd",	_A2(A_T,A_A),	00011,	BR)	/* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BIHFE,	RR,	0x12a,	0x10,	"bihfe",	_A2(A_T,A_A),	00011,	BR)	/* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BITD, 	RR,	0x129,	0x20,	"bitd", 	_A2(A_T,A_A),	00011,	BR)	/* BINF          IP<-RA_if(RT) */
+APUOPFB(M_BITE, 	RR,	0x129,	0x10,	"bite", 	_A2(A_T,A_A),	00011,	BR)	/* BINF          IP<-RA_if(RT) */
+APUOPFB(M_BIFD,		RR,	0x128,	0x20,	"bifd",		_A2(A_T,A_A),	00011,	BR)	/* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_BIFE,		RR,	0x128,	0x10,	"bife",		_A2(A_T,A_A),	00011,	BR)	/* BIZ           IP<-RA_if(RT) */
+
+#undef _A0
+#undef _A1
+#undef _A2
+#undef _A3
+#undef _A4
diff --git a/arch/powerpc/xmon/spu-opc.c b/arch/powerpc/xmon/spu-opc.c
new file mode 100644
index 0000000..efffde9
--- /dev/null
+++ b/arch/powerpc/xmon/spu-opc.c
@@ -0,0 +1,44 @@
+/* SPU opcode list
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of GDB, GAS, and the GNU binutils.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License along
+   with this program; if not, write to the Free Software Foundation, Inc.,
+   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+#include "spu.h"
+
+/* This file holds the Spu opcode table */
+
+
+/*
+   Example contents of spu-insn.h
+      id_tag	mode	mode	type	opcode	mnemonic	asmtype	    dependency		FPU	L/S?	branch?	instruction   
+                QUAD	WORD                                               (0,RC,RB,RA,RT)    latency  			              		
+   APUOP(M_LQD,	1,	0,	RI9,	0x1f8,	"lqd",		ASM_RI9IDX,	00012,		FXU,	1,	0)	Load Quadword d-form 
+ */
+
+const struct spu_opcode spu_opcodes[] = {
+#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+	{ MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
+#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+	{ MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
+#include "spu-insns.h"
+#undef APUOP
+#undef APUOPFB
+};
+
+const int spu_num_opcodes =
+  sizeof (spu_opcodes) / sizeof (spu_opcodes[0]);
diff --git a/arch/powerpc/xmon/spu.h b/arch/powerpc/xmon/spu.h
new file mode 100644
index 0000000..c761fc8
--- /dev/null
+++ b/arch/powerpc/xmon/spu.h
@@ -0,0 +1,126 @@
+/* SPU ELF support for BFD.
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of GDB, GAS, and the GNU binutils.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software Foundation,
+   Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+
+/* These two enums are from rel_apu/common/spu_asm_format.h */
+/* definition of instruction format */
+typedef enum {
+  RRR,
+  RI18,
+  RI16,
+  RI10,
+  RI8,
+  RI7,
+  RR,
+  LBT,
+  LBTI,
+  IDATA,
+  UNKNOWN_IFORMAT
+} spu_iformat;
+
+/* These values describe assembly instruction arguments.  They indicate
+ * how to encode, range checking and which relocation to use. */
+typedef enum {
+  A_T,  /* register at pos 0 */
+  A_A,  /* register at pos 7 */
+  A_B,  /* register at pos 14 */
+  A_C,  /* register at pos 21 */
+  A_S,  /* special purpose register at pos 7 */
+  A_H,  /* channel register at pos 7 */
+  A_P,  /* parenthesis, this has to separate regs from immediates */
+  A_S3,
+  A_S6,
+  A_S7N,
+  A_S7,
+  A_U7A,
+  A_U7B,
+  A_S10B,
+  A_S10,
+  A_S11,
+  A_S11I,
+  A_S14,
+  A_S16,
+  A_S18,
+  A_R18,
+  A_U3,
+  A_U5,
+  A_U6,
+  A_U7,
+  A_U14,
+  A_X16,
+  A_U18,
+  A_MAX
+} spu_aformat;
+
+enum spu_insns {
+#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+	TAG,
+#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+	TAG,
+#include "spu-insns.h"
+#undef APUOP
+#undef APUOPFB
+        M_SPU_MAX
+};
+
+struct spu_opcode
+{
+   spu_iformat insn_type;
+   unsigned int opcode;
+   char *mnemonic;
+   int arg[5];
+};
+
+#define SIGNED_EXTRACT(insn,size,pos) (((int)((insn) << (32-size-pos))) >> (32-size))
+#define UNSIGNED_EXTRACT(insn,size,pos) (((insn) >> pos) & ((1 << size)-1))
+
+#define DECODE_INSN_RT(insn) (insn & 0x7f)
+#define DECODE_INSN_RA(insn) ((insn >> 7) & 0x7f)
+#define DECODE_INSN_RB(insn) ((insn >> 14) & 0x7f)
+#define DECODE_INSN_RC(insn) ((insn >> 21) & 0x7f)
+
+#define DECODE_INSN_I10(insn) SIGNED_EXTRACT(insn,10,14)
+#define DECODE_INSN_U10(insn) UNSIGNED_EXTRACT(insn,10,14)
+
+/* For branching, immediate loads, hbr and  lqa/stqa. */
+#define DECODE_INSN_I16(insn) SIGNED_EXTRACT(insn,16,7)
+#define DECODE_INSN_U16(insn) UNSIGNED_EXTRACT(insn,16,7)
+
+/* for stop */
+#define DECODE_INSN_U14(insn) UNSIGNED_EXTRACT(insn,14,0)
+
+/* For ila */
+#define DECODE_INSN_I18(insn) SIGNED_EXTRACT(insn,18,7)
+#define DECODE_INSN_U18(insn) UNSIGNED_EXTRACT(insn,18,7)
+
+/* For rotate and shift and generate control mask */
+#define DECODE_INSN_I7(insn) SIGNED_EXTRACT(insn,7,14)
+#define DECODE_INSN_U7(insn) UNSIGNED_EXTRACT(insn,7,14)
+
+/* For float <-> int conversion */
+#define DECODE_INSN_I8(insn)  SIGNED_EXTRACT(insn,8,14)
+#define DECODE_INSN_U8(insn) UNSIGNED_EXTRACT(insn,8,14)
+
+/* For hbr  */
+#define DECODE_INSN_I9a(insn) ((SIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_I9b(insn) ((SIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_U9a(insn) ((UNSIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_U9b(insn) ((UNSIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index f56ffef..a34ed49 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -37,13 +37,18 @@
 #include <asm/sstep.h>
 #include <asm/bug.h>
 #include <asm/irq_regs.h>
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/firmware.h>
 
 #ifdef CONFIG_PPC64
 #include <asm/hvcall.h>
 #include <asm/paca.h>
+#include <asm/iseries/it_lp_reg_save.h>
 #endif
 
 #include "nonstdio.h"
+#include "dis-asm.h"
 
 #define scanhex	xmon_scanhex
 #define skipbl	xmon_skipbl
@@ -107,7 +112,6 @@
 static void dump(void);
 static void prdump(unsigned long, long);
 static int ppc_inst_dump(unsigned long, long, int);
-void print_address(unsigned long);
 static void backtrace(struct pt_regs *);
 static void excprint(struct pt_regs *);
 static void prregs(struct pt_regs *);
@@ -147,9 +151,9 @@
 			      const char *after);
 static const char *getvecname(unsigned long vec);
 
-int xmon_no_auto_backtrace;
+static int do_spu_cmd(void);
 
-extern int print_insn_powerpc(unsigned long, unsigned long, int);
+int xmon_no_auto_backtrace;
 
 extern void xmon_enter(void);
 extern void xmon_leave(void);
@@ -209,8 +213,15 @@
   mi	show information about memory allocation\n\
   p 	call a procedure\n\
   r	print registers\n\
-  s	single step\n\
-  S	print special registers\n\
+  s	single step\n"
+#ifdef CONFIG_SPU_BASE
+"  ss	stop execution on all spus\n\
+  sr	restore execution on stopped spus\n\
+  sf  #	dump spu fields for spu # (in hex)\n\
+  sd  #	dump spu local store for spu # (in hex)\
+  sdi #	disassemble spu local store for spu # (in hex)\n"
+#endif
+"  S	print special registers\n\
   t	print backtrace\n\
   x	exit monitor and recover\n\
   X	exit monitor and dont recover\n"
@@ -518,6 +529,7 @@
 		xmon_save_regs(&regs);
 		excp = &regs;
 	}
+
 	return xmon_core(excp, 0);
 }
 EXPORT_SYMBOL(xmon);
@@ -809,6 +821,8 @@
 			cacheflush();
 			break;
 		case 's':
+			if (do_spu_cmd() == 0)
+				break;
 			if (do_step(excp))
 				return cmd;
 			break;
@@ -1555,11 +1569,6 @@
 {
 	int cmd;
 	unsigned long val;
-#ifdef CONFIG_PPC_ISERIES
-	struct paca_struct *ptrPaca = NULL;
-	struct lppaca *ptrLpPaca = NULL;
-	struct ItLpRegSave *ptrLpRegSave = NULL;
-#endif
 
 	cmd = skipbl();
 	if (cmd == '\n') {
@@ -1576,26 +1585,32 @@
 		printf("sp   = "REG"  sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
 		printf("toc  = "REG"  dar  = "REG"\n", toc, mfspr(SPRN_DAR));
 #ifdef CONFIG_PPC_ISERIES
-		// Dump out relevant Paca data areas.
-		printf("Paca: \n");
-		ptrPaca = get_paca();
-    
-		printf("  Local Processor Control Area (LpPaca): \n");
-		ptrLpPaca = ptrPaca->lppaca_ptr;
-		printf("    Saved Srr0=%.16lx  Saved Srr1=%.16lx \n",
-		       ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
-		printf("    Saved Gpr3=%.16lx  Saved Gpr4=%.16lx \n",
-		       ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
-		printf("    Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
-    
-		printf("  Local Processor Register Save Area (LpRegSave): \n");
-		ptrLpRegSave = ptrPaca->reg_save_ptr;
-		printf("    Saved Sprg0=%.16lx  Saved Sprg1=%.16lx \n",
-		       ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
-		printf("    Saved Sprg2=%.16lx  Saved Sprg3=%.16lx \n",
-		       ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
-		printf("    Saved Msr  =%.16lx  Saved Nia  =%.16lx \n",
-		       ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
+		if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+			struct paca_struct *ptrPaca;
+			struct lppaca *ptrLpPaca;
+			struct ItLpRegSave *ptrLpRegSave;
+
+			/* Dump out relevant Paca data areas. */
+			printf("Paca: \n");
+			ptrPaca = get_paca();
+
+			printf("  Local Processor Control Area (LpPaca): \n");
+			ptrLpPaca = ptrPaca->lppaca_ptr;
+			printf("    Saved Srr0=%.16lx  Saved Srr1=%.16lx \n",
+			       ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
+			printf("    Saved Gpr3=%.16lx  Saved Gpr4=%.16lx \n",
+			       ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
+			printf("    Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
+
+			printf("  Local Processor Register Save Area (LpRegSave): \n");
+			ptrLpRegSave = ptrPaca->reg_save_ptr;
+			printf("    Saved Sprg0=%.16lx  Saved Sprg1=%.16lx \n",
+			       ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
+			printf("    Saved Sprg2=%.16lx  Saved Sprg3=%.16lx \n",
+			       ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
+			printf("    Saved Msr  =%.16lx  Saved Nia  =%.16lx \n",
+			       ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
+		}
 #endif
 
 		return;
@@ -2053,8 +2068,11 @@
 	}
 }
 
+typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr);
+
 int
-ppc_inst_dump(unsigned long adr, long count, int praddr)
+generic_inst_dump(unsigned long adr, long count, int praddr,
+			instruction_dump_func dump_func)
 {
 	int nr, dotted;
 	unsigned long first_adr;
@@ -2084,12 +2102,18 @@
 		if (praddr)
 			printf(REG"  %.8x", adr, inst);
 		printf("\t");
-		print_insn_powerpc(inst, adr, 0);	/* always returns 4 */
+		dump_func(inst, adr);
 		printf("\n");
 	}
 	return adr - first_adr;
 }
 
+int
+ppc_inst_dump(unsigned long adr, long count, int praddr)
+{
+	return generic_inst_dump(adr, count, praddr, print_insn_powerpc);
+}
+
 void
 print_address(unsigned long addr)
 {
@@ -2557,6 +2581,10 @@
 
 void xmon_init(int enable)
 {
+#ifdef CONFIG_PPC_ISERIES
+	if (firmware_has_feature(FW_FEATURE_ISERIES))
+		return;
+#endif
 	if (enable) {
 		__debugger = xmon;
 		__debugger_ipi = xmon_ipi;
@@ -2594,6 +2622,10 @@
 
 static int __init setup_xmon_sysrq(void)
 {
+#ifdef CONFIG_PPC_ISERIES
+	if (firmware_has_feature(FW_FEATURE_ISERIES))
+		return 0;
+#endif
 	register_sysrq_key('x', &sysrq_xmon_op);
 	return 0;
 }
@@ -2630,3 +2662,263 @@
 	if (xmon_early)
 		debugger(NULL);
 }
+
+#ifdef CONFIG_SPU_BASE
+
+struct spu_info {
+	struct spu *spu;
+	u64 saved_mfc_sr1_RW;
+	u32 saved_spu_runcntl_RW;
+	unsigned long dump_addr;
+	u8 stopped_ok;
+};
+
+#define XMON_NUM_SPUS	16	/* Enough for current hardware */
+
+static struct spu_info spu_info[XMON_NUM_SPUS];
+
+void xmon_register_spus(struct list_head *list)
+{
+	struct spu *spu;
+
+	list_for_each_entry(spu, list, full_list) {
+		if (spu->number >= XMON_NUM_SPUS) {
+			WARN_ON(1);
+			continue;
+		}
+
+		spu_info[spu->number].spu = spu;
+		spu_info[spu->number].stopped_ok = 0;
+		spu_info[spu->number].dump_addr = (unsigned long)
+				spu_info[spu->number].spu->local_store;
+	}
+}
+
+static void stop_spus(void)
+{
+	struct spu *spu;
+	int i;
+	u64 tmp;
+
+	for (i = 0; i < XMON_NUM_SPUS; i++) {
+		if (!spu_info[i].spu)
+			continue;
+
+		if (setjmp(bus_error_jmp) == 0) {
+			catch_memory_errors = 1;
+			sync();
+
+			spu = spu_info[i].spu;
+
+			spu_info[i].saved_spu_runcntl_RW =
+				in_be32(&spu->problem->spu_runcntl_RW);
+
+			tmp = spu_mfc_sr1_get(spu);
+			spu_info[i].saved_mfc_sr1_RW = tmp;
+
+			tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+			spu_mfc_sr1_set(spu, tmp);
+
+			sync();
+			__delay(200);
+
+			spu_info[i].stopped_ok = 1;
+
+			printf("Stopped spu %.2d (was %s)\n", i,
+					spu_info[i].saved_spu_runcntl_RW ?
+					"running" : "stopped");
+		} else {
+			catch_memory_errors = 0;
+			printf("*** Error stopping spu %.2d\n", i);
+		}
+		catch_memory_errors = 0;
+	}
+}
+
+static void restart_spus(void)
+{
+	struct spu *spu;
+	int i;
+
+	for (i = 0; i < XMON_NUM_SPUS; i++) {
+		if (!spu_info[i].spu)
+			continue;
+
+		if (!spu_info[i].stopped_ok) {
+			printf("*** Error, spu %d was not successfully stopped"
+					", not restarting\n", i);
+			continue;
+		}
+
+		if (setjmp(bus_error_jmp) == 0) {
+			catch_memory_errors = 1;
+			sync();
+
+			spu = spu_info[i].spu;
+			spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW);
+			out_be32(&spu->problem->spu_runcntl_RW,
+					spu_info[i].saved_spu_runcntl_RW);
+
+			sync();
+			__delay(200);
+
+			printf("Restarted spu %.2d\n", i);
+		} else {
+			catch_memory_errors = 0;
+			printf("*** Error restarting spu %.2d\n", i);
+		}
+		catch_memory_errors = 0;
+	}
+}
+
+#define DUMP_WIDTH	23
+#define DUMP_VALUE(format, field, value)				\
+do {									\
+	if (setjmp(bus_error_jmp) == 0) {				\
+		catch_memory_errors = 1;				\
+		sync();							\
+		printf("  %-*s = "format"\n", DUMP_WIDTH,		\
+				#field, value);				\
+		sync();							\
+		__delay(200);						\
+	} else {							\
+		catch_memory_errors = 0;				\
+		printf("  %-*s = *** Error reading field.\n",		\
+					DUMP_WIDTH, #field);		\
+	}								\
+	catch_memory_errors = 0;					\
+} while (0)
+
+#define DUMP_FIELD(obj, format, field)	\
+	DUMP_VALUE(format, field, obj->field)
+
+static void dump_spu_fields(struct spu *spu)
+{
+	printf("Dumping spu fields at address %p:\n", spu);
+
+	DUMP_FIELD(spu, "0x%x", number);
+	DUMP_FIELD(spu, "%s", name);
+	DUMP_FIELD(spu, "0x%lx", local_store_phys);
+	DUMP_FIELD(spu, "0x%p", local_store);
+	DUMP_FIELD(spu, "0x%lx", ls_size);
+	DUMP_FIELD(spu, "0x%x", node);
+	DUMP_FIELD(spu, "0x%lx", flags);
+	DUMP_FIELD(spu, "0x%lx", dar);
+	DUMP_FIELD(spu, "0x%lx", dsisr);
+	DUMP_FIELD(spu, "%d", class_0_pending);
+	DUMP_FIELD(spu, "0x%lx", irqs[0]);
+	DUMP_FIELD(spu, "0x%lx", irqs[1]);
+	DUMP_FIELD(spu, "0x%lx", irqs[2]);
+	DUMP_FIELD(spu, "0x%x", slb_replace);
+	DUMP_FIELD(spu, "%d", pid);
+	DUMP_FIELD(spu, "%d", prio);
+	DUMP_FIELD(spu, "0x%p", mm);
+	DUMP_FIELD(spu, "0x%p", ctx);
+	DUMP_FIELD(spu, "0x%p", rq);
+	DUMP_FIELD(spu, "0x%p", timestamp);
+	DUMP_FIELD(spu, "0x%lx", problem_phys);
+	DUMP_FIELD(spu, "0x%p", problem);
+	DUMP_VALUE("0x%x", problem->spu_runcntl_RW,
+			in_be32(&spu->problem->spu_runcntl_RW));
+	DUMP_VALUE("0x%x", problem->spu_status_R,
+			in_be32(&spu->problem->spu_status_R));
+	DUMP_VALUE("0x%x", problem->spu_npc_RW,
+			in_be32(&spu->problem->spu_npc_RW));
+	DUMP_FIELD(spu, "0x%p", priv2);
+	DUMP_FIELD(spu, "0x%p", pdata);
+}
+
+int
+spu_inst_dump(unsigned long adr, long count, int praddr)
+{
+	return generic_inst_dump(adr, count, praddr, print_insn_spu);
+}
+
+static void dump_spu_ls(unsigned long num, int subcmd)
+{
+	unsigned long offset, addr, ls_addr;
+
+	if (setjmp(bus_error_jmp) == 0) {
+		catch_memory_errors = 1;
+		sync();
+		ls_addr = (unsigned long)spu_info[num].spu->local_store;
+		sync();
+		__delay(200);
+	} else {
+		catch_memory_errors = 0;
+		printf("*** Error: accessing spu info for spu %d\n", num);
+		return;
+	}
+	catch_memory_errors = 0;
+
+	if (scanhex(&offset))
+		addr = ls_addr + offset;
+	else
+		addr = spu_info[num].dump_addr;
+
+	if (addr >= ls_addr + LS_SIZE) {
+		printf("*** Error: address outside of local store\n");
+		return;
+	}
+
+	switch (subcmd) {
+	case 'i':
+		addr += spu_inst_dump(addr, 16, 1);
+		last_cmd = "sdi\n";
+		break;
+	default:
+		prdump(addr, 64);
+		addr += 64;
+		last_cmd = "sd\n";
+		break;
+	}
+
+	spu_info[num].dump_addr = addr;
+}
+
+static int do_spu_cmd(void)
+{
+	static unsigned long num = 0;
+	int cmd, subcmd = 0;
+
+	cmd = inchar();
+	switch (cmd) {
+	case 's':
+		stop_spus();
+		break;
+	case 'r':
+		restart_spus();
+		break;
+	case 'd':
+		subcmd = inchar();
+		if (isxdigit(subcmd) || subcmd == '\n')
+			termch = subcmd;
+	case 'f':
+		scanhex(&num);
+		if (num >= XMON_NUM_SPUS || !spu_info[num].spu) {
+			printf("*** Error: invalid spu number\n");
+			return 0;
+		}
+
+		switch (cmd) {
+		case 'f':
+			dump_spu_fields(spu_info[num].spu);
+			break;
+		default:
+			dump_spu_ls(num, subcmd);
+			break;
+		}
+
+		break;
+	default:
+		return -1;
+	}
+
+	return 0;
+}
+#else /* ! CONFIG_SPU_BASE */
+static int do_spu_cmd(void)
+{
+	return -1;
+}
+#endif
diff --git a/arch/ppc/.gitignore b/arch/ppc/.gitignore
new file mode 100644
index 0000000..a1a869c
--- /dev/null
+++ b/arch/ppc/.gitignore
@@ -0,0 +1 @@
+include
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 2e1943e..709952c 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -385,6 +385,7 @@
 	phy_info_t	*phy;
 	struct work_struct phy_relink;
 	struct work_struct phy_display_config;
+	struct net_device *dev;
 
 	uint	sequence_done;
 
@@ -1391,10 +1392,11 @@
 	NULL
 };
 
-static void mii_display_status(void *data)
+static void mii_display_status(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	volatile struct fcc_enet_private *fep = dev->priv;
+	volatile struct fcc_enet_private *fep =
+		container_of(work, struct fcc_enet_private, phy_relink);
+	struct net_device *dev = fep->dev;
 	uint s = fep->phy_status;
 
 	if (!fep->link && !fep->old_link) {
@@ -1428,10 +1430,12 @@
 	printk(".\n");
 }
 
-static void mii_display_config(void *data)
+static void mii_display_config(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	volatile struct fcc_enet_private *fep = dev->priv;
+	volatile struct fcc_enet_private *fep =
+		container_of(work, struct fcc_enet_private,
+			     phy_display_config);
+	struct net_device *dev = fep->dev;
 	uint s = fep->phy_status;
 
 	printk("%s: config: auto-negotiation ", dev->name);
@@ -1758,8 +1762,9 @@
 		cep->phy_id_done = 0;
 		cep->phy_addr = fip->fc_phyaddr;
 		mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
-		INIT_WORK(&cep->phy_relink, mii_display_status, dev);
-		INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
+		INIT_WORK(&cep->phy_relink, mii_display_status);
+		INIT_WORK(&cep->phy_display_config, mii_display_config);
+		cep->dev = dev;
 #endif	/* CONFIG_USE_MDIO */
 
 		fip++;
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index 2f9fa9e..e6c28fb 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -173,6 +173,7 @@
 	uint	phy_speed;
 	phy_info_t	*phy;
 	struct work_struct phy_task;
+	struct net_device *dev;
 
 	uint	sequence_done;
 
@@ -1263,10 +1264,11 @@
 	printk(".\n");
 }
 
-static void mii_display_config(void *priv)
+static void mii_display_config(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *)priv;
-	struct fec_enet_private *fep = dev->priv;
+	struct fec_enet_private *fep =
+		container_of(work, struct fec_enet_private, phy_task);
+	struct net_device *dev = fep->dev;
 	volatile uint *s = &(fep->phy_status);
 
 	printk("%s: config: auto-negotiation ", dev->name);
@@ -1295,10 +1297,11 @@
 	fep->sequence_done = 1;
 }
 
-static void mii_relink(void *priv)
+static void mii_relink(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *)priv;
-	struct fec_enet_private *fep = dev->priv;
+	struct fec_enet_private *fep =
+		container_of(work, struct fec_enet_private, phy_task);
+	struct net_device *dev = fep->dev;
 	int duplex;
 
 	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
@@ -1325,7 +1328,8 @@
 {
 	struct fec_enet_private *fep = dev->priv;
 
-	INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
+	fep->dev = dev;
+	INIT_WORK(&fep->phy_task, mii_relink);
 	schedule_work(&fep->phy_task);
 }
 
@@ -1333,7 +1337,8 @@
 {
 	struct fec_enet_private *fep = dev->priv;
 
-	INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
+	fep->dev = dev;
+	INIT_WORK(&fep->phy_task, mii_display_config);
 	schedule_work(&fep->phy_task);
 }
 
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 077711e..edf71a4 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -77,9 +77,11 @@
 
 config 40x
 	bool "40x"
+	select PPC_DCR_NATIVE
 
 config 44x
 	bool "44x"
+	select PPC_DCR_NATIVE
 
 config 8xx
 	bool "8xx"
@@ -95,6 +97,15 @@
 config PPC_FPU
 	bool
 
+config PPC_DCR_NATIVE
+	bool
+	default n
+
+config PPC_DCR
+	bool
+	depends on PPC_DCR_NATIVE
+	default y
+
 config BOOKE
 	bool
 	depends on E200 || E500
@@ -724,7 +735,7 @@
 	  Be aware that PCI buses can only function when SYS board is plugged
 	  into the PIB (Platform IO Board) board from Freescale which provide
 	  3 PCI slots.  The PIBs PCI initialization is the bootloader's
-	  responsiblilty.
+	  responsibility.
 
 config EV64360
 	bool "Marvell-EV64360BP"
diff --git a/arch/ppc/boot/images/.gitignore b/arch/ppc/boot/images/.gitignore
new file mode 100644
index 0000000..21c2dc5
--- /dev/null
+++ b/arch/ppc/boot/images/.gitignore
@@ -0,0 +1,6 @@
+sImage
+vmapus
+vmlinux*
+miboot*
+zImage*
+uImage
diff --git a/arch/ppc/boot/lib/.gitignore b/arch/ppc/boot/lib/.gitignore
new file mode 100644
index 0000000..1629a61
--- /dev/null
+++ b/arch/ppc/boot/lib/.gitignore
@@ -0,0 +1,3 @@
+inffast.c
+inflate.c
+inftrees.c
diff --git a/arch/ppc/boot/utils/.gitignore b/arch/ppc/boot/utils/.gitignore
new file mode 100644
index 0000000..bbdfb3b
--- /dev/null
+++ b/arch/ppc/boot/utils/.gitignore
@@ -0,0 +1,3 @@
+mkprep
+mkbugboot
+mktree
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 27faeca..3c506af 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -313,7 +313,7 @@
 	 * Identify the CPU type and fix up code sections
 	 * that depend on which cpu we have.
 	 */
-	spec = identify_cpu(offset);
+	spec = identify_cpu(offset, mfspr(SPRN_PVR));
 	do_feature_fixups(spec->cpu_features,
 			  PTRRELOC(&__start___ftr_fixup),
 			  PTRRELOC(&__stop___ftr_fixup));
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 9661a91..2f835b9 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -316,7 +316,7 @@
 	if (reason & MCSR_BUS_RBERR)
 		printk("Bus - Read Data Bus Error\n");
 	if (reason & MCSR_BUS_WBERR)
-		printk("Bus - Read Data Bus Error\n");
+		printk("Bus - Write Data Bus Error\n");
 	if (reason & MCSR_BUS_IPERR)
 		printk("Bus - Instruction Parity Error\n");
 	if (reason & MCSR_BUS_RPERR)
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 16e8661..6192126 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -31,6 +31,7 @@
   .plt : { *(.plt) }
   .text      :
   {
+    _text = .;
     *(.text)
     SCHED_TEXT
     LOCK_TEXT
diff --git a/arch/ppc/platforms/4xx/bubinga.c b/arch/ppc/platforms/4xx/bubinga.c
index 4009f49..75857b3 100644
--- a/arch/ppc/platforms/4xx/bubinga.c
+++ b/arch/ppc/platforms/4xx/bubinga.c
@@ -116,6 +116,7 @@
 void __init
 bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
 {
+#ifdef CONFIG_PCI
 
 	unsigned int bar_response, bar;
 	/*
@@ -212,6 +213,7 @@
 	printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la)));
 
 #endif
+#endif
 }
 
 void __init
diff --git a/arch/ppc/platforms/4xx/cpci405.c b/arch/ppc/platforms/4xx/cpci405.c
index 3674309..8474b05 100644
--- a/arch/ppc/platforms/4xx/cpci405.c
+++ b/arch/ppc/platforms/4xx/cpci405.c
@@ -126,6 +126,7 @@
 void __init
 bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
 {
+#ifdef CONFIG_PCI
 	unsigned int bar_response, bar;
 
 	/* Disable region first */
@@ -167,6 +168,7 @@
 					PCI_FUNC(hose->first_busno), bar,
 					&bar_response);
 	}
+#endif
 }
 
 void __init
diff --git a/arch/ppc/platforms/4xx/ep405.c b/arch/ppc/platforms/4xx/ep405.c
index ae5c820..e5adf9b 100644
--- a/arch/ppc/platforms/4xx/ep405.c
+++ b/arch/ppc/platforms/4xx/ep405.c
@@ -68,6 +68,7 @@
 void __init
 bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
 {
+#ifdef CONFIG_PCI
 	unsigned int bar_response, bar;
 	/*
 	 * Expected PCI mapping:
@@ -130,6 +131,7 @@
 		    PCI_FUNC(hose->first_busno), bar, bar_response);
 	}
 	/* end work arround */
+#endif
 }
 
 void __init
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c
index 3397f0d..b84f8df 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.c
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.c
@@ -121,8 +121,8 @@
 
 	mdata->irq[0] = MPC83xx_IRQ_EXT1;
 	mdata->irq[1] = MPC83xx_IRQ_EXT2;
-	mdata->irq[2] = -1;
-	mdata->irq[31] = -1;
+	mdata->irq[2] = PHY_POLL;
+	mdata->irq[31] = PHY_POLL;
 
 	/* setup the board related information for the enet controllers */
 	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
index 4f839da..00a3ba5 100644
--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8540_ads.c
@@ -92,9 +92,9 @@
 
 	mdata->irq[0] = MPC85xx_IRQ_EXT5;
 	mdata->irq[1] = MPC85xx_IRQ_EXT5;
-	mdata->irq[2] = -1;
+	mdata->irq[2] = PHY_POLL;
 	mdata->irq[3] = MPC85xx_IRQ_EXT5;
-	mdata->irq[31] = -1;
+	mdata->irq[31] = PHY_POLL;
 
 	/* setup the board related information for the enet controllers */
 	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index 14ecec7..3a06046 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -156,9 +156,9 @@
 
 	mdata->irq[0] = MPC85xx_IRQ_EXT5;
 	mdata->irq[1] = MPC85xx_IRQ_EXT5;
-	mdata->irq[2] = -1;
+	mdata->irq[2] = PHY_POLL;
 	mdata->irq[3] = MPC85xx_IRQ_EXT5;
-	mdata->irq[31] = -1;
+	mdata->irq[31] = PHY_POLL;
 
 	/* setup the board related information for the enet controllers */
 	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
index 5ce0f69..2d59eb7 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
@@ -451,9 +451,9 @@
 
 	mdata->irq[0] = MPC85xx_IRQ_EXT5;
 	mdata->irq[1] = MPC85xx_IRQ_EXT5;
-	mdata->irq[2] = -1;
-	mdata->irq[3] = -1;
-	mdata->irq[31] = -1;
+	mdata->irq[2] = PHY_POLL;
+	mdata->irq[3] = PHY_POLL;
+	mdata->irq[31] = PHY_POLL;
 
 	/* setup the board related information for the enet controllers */
 	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index 764d580..1d10ab9 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -129,7 +129,7 @@
 
 	mdata->irq[25] = MPC85xx_IRQ_EXT6;
 	mdata->irq[26] = MPC85xx_IRQ_EXT7;
-	mdata->irq[31] = -1;
+	mdata->irq[31] = PHY_POLL;
 
 	/* setup the board related information for the enet controllers */
 	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index 4bb18ab..b1f5b73 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -123,7 +123,7 @@
 
 	mdata->irq[2] = MPC85xx_IRQ_EXT5;
 	mdata->irq[4] = MPC85xx_IRQ_EXT5;
-	mdata->irq[31] = -1;
+	mdata->irq[31] = PHY_POLL;
 
 	/* setup the board related information for the enet controllers */
 	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/tqm85xx.c b/arch/ppc/platforms/85xx/tqm85xx.c
index dd45f2e..4ee2bd1 100644
--- a/arch/ppc/platforms/85xx/tqm85xx.c
+++ b/arch/ppc/platforms/85xx/tqm85xx.c
@@ -137,9 +137,9 @@
 
 	mdata->irq[0] = MPC85xx_IRQ_EXT8;
 	mdata->irq[1] = MPC85xx_IRQ_EXT8;
-	mdata->irq[2] = -1;
+	mdata->irq[2] = PHY_POLL;
 	mdata->irq[3] = MPC85xx_IRQ_EXT8;
-	mdata->irq[31] = -1;
+	mdata->irq[31] = PHY_POLL;
 
 	/* setup the board related information for the enet controllers */
 	pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/mpc8272ads_setup.c b/arch/ppc/platforms/mpc8272ads_setup.c
index 1f9ea36..0bc0676 100644
--- a/arch/ppc/platforms/mpc8272ads_setup.c
+++ b/arch/ppc/platforms/mpc8272ads_setup.c
@@ -266,10 +266,10 @@
 					      int idx)
 {
 	m82xx_mii_bb_pdata.irq[0] = PHY_INTERRUPT;
-	m82xx_mii_bb_pdata.irq[1] = -1;
-	m82xx_mii_bb_pdata.irq[2] = -1;
+	m82xx_mii_bb_pdata.irq[1] = PHY_POLL;
+	m82xx_mii_bb_pdata.irq[2] = PHY_POLL;
 	m82xx_mii_bb_pdata.irq[3] = PHY_INTERRUPT;
-	m82xx_mii_bb_pdata.irq[31] = -1;
+	m82xx_mii_bb_pdata.irq[31] = PHY_POLL;
 
 
 	m82xx_mii_bb_pdata.mdio_dat.offset =
diff --git a/arch/ppc/platforms/mpc866ads_setup.c b/arch/ppc/platforms/mpc866ads_setup.c
index e95d2c1..8a0c07e 100644
--- a/arch/ppc/platforms/mpc866ads_setup.c
+++ b/arch/ppc/platforms/mpc866ads_setup.c
@@ -361,7 +361,7 @@
 
 	fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
 	/* No PHY interrupt line here */
-	fmpi->irq[0xf] = -1;
+	fmpi->irq[0xf] = PHY_POLL;
 
 /* Since either of the uarts could be used as console, they need to ready */
 #ifdef CONFIG_SERIAL_CPM_SMC1
@@ -380,7 +380,7 @@
 
 	fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
 	/* No PHY interrupt line here */
-	fmpi->irq[0xf] = -1;
+	fmpi->irq[0xf] = PHY_POLL;
 
 	return 0;
 }
diff --git a/arch/ppc/syslib/mpc8xx_devices.c b/arch/ppc/syslib/mpc8xx_devices.c
index cf5ab47..31fb565 100644
--- a/arch/ppc/syslib/mpc8xx_devices.c
+++ b/arch/ppc/syslib/mpc8xx_devices.c
@@ -78,7 +78,7 @@
 			{
 				.name 	= "pram",
 				.start 	= 0x3c00,
-				.end 	= 0x3c80,
+				.end 	= 0x3c7f,
 				.flags 	= IORESOURCE_MEM,
 			},
 			{
@@ -103,7 +103,7 @@
 			{
 				.name 	= "pram",
 				.start 	= 0x3d00,
-				.end 	= 0x3d80,
+				.end 	= 0x3d7f,
 				.flags 	= IORESOURCE_MEM,
 			},
 
@@ -129,7 +129,7 @@
 			{
 				.name 	= "pram",
 				.start 	= 0x3e00,
-				.end 	= 0x3e80,
+				.end 	= 0x3e7f,
 				.flags 	= IORESOURCE_MEM,
 			},
 
@@ -155,7 +155,7 @@
 			{
 				.name 	= "pram",
 				.start 	= 0x3f00,
-				.end 	= 0x3f80,
+				.end 	= 0x3f7f,
 				.flags 	= IORESOURCE_MEM,
 			},
 
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 245b81b..583d9ff 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -33,9 +33,6 @@
 config GENERIC_TIME
 	def_bool y
 
-config GENERIC_BUST_SPINLOCK
-	bool
-
 mainmenu "Linux Kernel Configuration"
 
 config S390
@@ -181,7 +178,7 @@
 
 config SMALL_STACK
 	bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb"
-	depends on PACK_STACK
+	depends on PACK_STACK && !LOCKDEP
 	help
 	  If you say Y here and the compiler supports the -mkernel-backchain
 	  option the kernel will use a smaller kernel stack size. For 31 bit
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 5deb9f7..6598e52 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,6 +35,9 @@
 cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
 cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
 
+#KBUILD_IMAGE is necessary for make rpm
+KBUILD_IMAGE	:=arch/s390/boot/image
+
 #
 # Prevent tail-call optimizations, to get clearer backtraces:
 #
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index af1e8fc..b8c2372 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -92,8 +92,8 @@
  * Work queue
  */
 static struct workqueue_struct *appldata_wq;
-static void appldata_work_fn(void *data);
-static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
+static void appldata_work_fn(struct work_struct *work);
+static DECLARE_WORK(appldata_work, appldata_work_fn);
 
 
 /*
@@ -125,7 +125,7 @@
  *
  * call data gathering function for each (active) module
  */
-static void appldata_work_fn(void *data)
+static void appldata_work_fn(struct work_struct *work)
 {
 	struct list_head *lh;
 	struct appldata_ops *ops;
@@ -561,7 +561,6 @@
 	spin_unlock(&appldata_timer_lock);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __cpuinit
 appldata_cpu_notify(struct notifier_block *self,
 		    unsigned long action, void *hcpu)
@@ -582,7 +581,6 @@
 static struct notifier_block appldata_nb = {
 	.notifier_call = appldata_cpu_notify,
 };
-#endif
 
 /*
  * appldata_init()
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index aa97897..a81881c 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,7 +4,7 @@
 
 EXTRA_AFLAGS	:= -traditional
 
-obj-y	:=  bitmap.o traps.o time.o process.o \
+obj-y	:=  bitmap.o traps.o time.o process.o reset.o \
             setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
 	    semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o
 
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 9565a2d..5c46054 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -176,7 +176,6 @@
 
 #include <linux/highuid.h>
 
-#define elf_addr_t	u32
 /*
 #define init_elf_binfmt init_elf32_binfmt
 */
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 1eae74e..a5972f1 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -21,14 +21,15 @@
 static char cpcmd_buf[241];
 
 /*
- * the caller of __cpcmd has to ensure that the response buffer is below 2 GB
+ * __cpcmd has some restrictions over cpcmd
+ *  - the response buffer must reside below 2GB (if any)
+ *  - __cpcmd is unlocked and therefore not SMP-safe
  */
 int  __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
 {
-	unsigned long flags, cmdlen;
+	unsigned cmdlen;
 	int return_code, return_len;
 
-	spin_lock_irqsave(&cpcmd_lock, flags);
 	cmdlen = strlen(cmd);
 	BUG_ON(cmdlen > 240);
 	memcpy(cpcmd_buf, cmd, cmdlen);
@@ -74,7 +75,6 @@
 			: "+d" (reg3) : "d" (reg2) : "cc");
 		return_code = (int) reg3;
         }
-	spin_unlock_irqrestore(&cpcmd_lock, flags);
 	if (response_code != NULL)
 		*response_code = return_code;
 	return return_len;
@@ -82,15 +82,18 @@
 
 EXPORT_SYMBOL(__cpcmd);
 
-#ifdef CONFIG_64BIT
 int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
 {
 	char *lowbuf;
 	int len;
+	unsigned long flags;
 
 	if ((rlen == 0) || (response == NULL)
-	    || !((unsigned long)response >> 31))
+	    || !((unsigned long)response >> 31)) {
+		spin_lock_irqsave(&cpcmd_lock, flags);
 		len = __cpcmd(cmd, response, rlen, response_code);
+		spin_unlock_irqrestore(&cpcmd_lock, flags);
+	}
 	else {
 		lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
 		if (!lowbuf) {
@@ -98,7 +101,9 @@
 				"cpcmd: could not allocate response buffer\n");
 			return -ENOMEM;
 		}
+		spin_lock_irqsave(&cpcmd_lock, flags);
 		len = __cpcmd(cmd, lowbuf, rlen, response_code);
+		spin_unlock_irqrestore(&cpcmd_lock, flags);
 		memcpy(response, lowbuf, rlen);
 		kfree(lowbuf);
 	}
@@ -106,4 +111,3 @@
 }
 
 EXPORT_SYMBOL(cpcmd);
-#endif		/* CONFIG_64BIT */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 0cf59bb..8f8c802 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -418,24 +418,6 @@
 .gotr:
 	l	%r10,.tbl		# EBCDIC to ASCII table
 	tr	0(240,%r8),0(%r10)
-	stidp	__LC_CPUID		# Are we running on VM maybe
-	cli	__LC_CPUID,0xff
-	bnz	.test
-	.long	0x83300060		# diag 3,0,x'0060' - storage size
-	b	.done
-.test:
-	mvc	0x68(8),.pgmnw		# set up pgm check handler
-	l	%r2,.fourmeg
-	lr	%r3,%r2
-	bctr	%r3,%r0			# 4M-1
-.loop:	iske	%r0,%r3
-	ar	%r3,%r2
-.pgmx:
-	sr	%r3,%r2
-	la	%r3,1(%r3)
-.done:
-	l	%r1,.memsize
-	st	%r3,ARCH_OFFSET(%r1)
 	slr	%r0,%r0
 	st	%r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
 	st	%r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
@@ -443,9 +425,6 @@
 .tbl:	.long	_ebcasc			# translate table
 .cmd:	.long	COMMAND_LINE		# address of command line buffer
 .parm:	.long	PARMAREA
-.memsize: .long memory_size
-.fourmeg: .long 0x00400000      	# 4M
-.pgmnw:	.long	0x00080000,.pgmx
 .lowcase:
 	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
 	.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 0a2c929..4388b33 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -131,10 +131,11 @@
 	.long	init_thread_union
 .Lpmask:
 	.byte	0
-.align 8
+	.align	8
 .Lpcext:.long	0x00080000,0x80000000
 .Lcr:
 	.long	0x00			# place holder for cr0
+	.align	8
 .Lwaitsclp:
 	.long 0x010a0000,0x80000000 + .Lsclph
 .Lrcp:
@@ -156,7 +157,7 @@
 	slr	%r4,%r4			# set start of chunk to zero
 	slr	%r5,%r5			# set end of chunk to zero
 	slr	%r6,%r6			# set access code to zero
-	la	%r10, MEMORY_CHUNKS	# number of chunks
+	la	%r10,MEMORY_CHUNKS	# number of chunks
 .Lloop:
 	tprot	0(%r5),0		# test protection of first byte
 	ipm	%r7
@@ -176,8 +177,6 @@
 	st	%r0,4(%r3)		# store size of chunk
 	st	%r6,8(%r3)		# store type of chunk
 	la	%r3,12(%r3)
-	l	%r4,.Lmemsize-.LPG1(%r13)	 # address of variable memory_size
-	st	%r5,0(%r4)		# store last end to memory size
 	ahi	%r10,-1			# update chunk number
 .Lchkloop:
 	lr	%r6,%r7			# set access code to last cc
@@ -292,7 +291,6 @@
 .Lpcmvpg:.long	0x00080000,0x80000000 + .Lchkmvpg
 .Lpcidte:.long	0x00080000,0x80000000 + .Lchkidte
 .Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c
-.Lmemsize:.long memory_size
 .Lmchunk:.long	memory_chunk
 .Lmflags:.long	machine_flags
 .Lbss_bgn:  .long __bss_start
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 42f54d4..c526279e1 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -70,7 +70,20 @@
 	sgr	%r5,%r5 		# set src,length and pad to zero
 	mvcle	%r2,%r4,0		# clear mem
 	jo	.-4			# branch back, if not finish
+					# set program check new psw mask
+	mvc	__LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
+	larl	%r1,.Lslowmemdetect	# set program check address
+	stg	%r1,__LC_PGM_NEW_PSW+8
+	lghi	%r1,0xc
+	diag	%r0,%r1,0x260		# get memory size of virtual machine
+	cgr	%r0,%r1			# different? -> old detection routine
+	jne	.Lslowmemdetect
+	aghi	%r1,1			# size is one more than end
+	larl	%r2,memory_chunk
+	stg	%r1,8(%r2)		# store size of chunk
+	j	.Ldonemem
 
+.Lslowmemdetect:
 	l	%r2,.Lrcp-.LPG1(%r13)	# Read SCP forced command word
 .Lservicecall:
 	stosm	.Lpmask-.LPG1(%r13),0x01	# authorize ext interrupts
@@ -139,8 +152,6 @@
 	.int	0x100000
 
 .Lfchunk:
-					# set program check new psw mask
-	mvc	__LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
 
 #
 # find memory chunks.
@@ -175,8 +186,6 @@
 	stg	%r0,8(%r3)		# store size of chunk
 	st	%r6,20(%r3)		# store type of chunk
 	la	%r3,24(%r3)
-	larl	%r8,memory_size
-	stg	%r5,0(%r8)		# store memory size
 	ahi	%r10,-1			# update chunk number
 .Lchkloop:
 	lr	%r6,%r7			# set access code to last cc
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 1f5e782..a36bea1 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -13,12 +13,21 @@
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
+#include <linux/ctype.h>
 #include <asm/smp.h>
 #include <asm/setup.h>
 #include <asm/cpcmd.h>
 #include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/reset.h>
 
 #define IPL_PARM_BLOCK_VERSION 0
+#define LOADPARM_LEN 8
+
+extern char s390_readinfo_sccb[];
+#define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010)
+#define SCCB_LOADPARM (&s390_readinfo_sccb[24])
+#define SCCB_FLAG (s390_readinfo_sccb[91])
 
 enum ipl_type {
 	IPL_TYPE_NONE	 = 1,
@@ -289,9 +298,25 @@
 
 /* CCW ipl device attributes */
 
+static ssize_t ipl_ccw_loadparm_show(struct subsystem *subsys, char *page)
+{
+	char loadparm[LOADPARM_LEN + 1] = {};
+
+	if (!SCCB_VALID)
+		return sprintf(page, "#unknown#\n");
+	memcpy(loadparm, SCCB_LOADPARM, LOADPARM_LEN);
+	EBCASC(loadparm, LOADPARM_LEN);
+	strstrip(loadparm);
+	return sprintf(page, "%s\n", loadparm);
+}
+
+static struct subsys_attribute sys_ipl_ccw_loadparm_attr =
+	__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
+
 static struct attribute *ipl_ccw_attrs[] = {
 	&sys_ipl_type_attr.attr,
 	&sys_ipl_device_attr.attr,
+	&sys_ipl_ccw_loadparm_attr.attr,
 	NULL,
 };
 
@@ -348,8 +373,57 @@
 DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
 	reipl_block_ccw->ipl_info.ccw.devno);
 
+static void reipl_get_ascii_loadparm(char *loadparm)
+{
+	memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param,
+	       LOADPARM_LEN);
+	EBCASC(loadparm, LOADPARM_LEN);
+	loadparm[LOADPARM_LEN] = 0;
+	strstrip(loadparm);
+}
+
+static ssize_t reipl_ccw_loadparm_show(struct subsystem *subsys, char *page)
+{
+	char buf[LOADPARM_LEN + 1];
+
+	reipl_get_ascii_loadparm(buf);
+	return sprintf(page, "%s\n", buf);
+}
+
+static ssize_t reipl_ccw_loadparm_store(struct subsystem *subsys,
+					const char *buf, size_t len)
+{
+	int i, lp_len;
+
+	/* ignore trailing newline */
+	lp_len = len;
+	if ((len > 0) && (buf[len - 1] == '\n'))
+		lp_len--;
+	/* loadparm can have max 8 characters and must not start with a blank */
+	if ((lp_len > LOADPARM_LEN) || ((lp_len > 0) && (buf[0] == ' ')))
+		return -EINVAL;
+	/* loadparm can only contain "a-z,A-Z,0-9,SP,." */
+	for (i = 0; i < lp_len; i++) {
+		if (isalpha(buf[i]) || isdigit(buf[i]) || (buf[i] == ' ') ||
+		    (buf[i] == '.'))
+			continue;
+		return -EINVAL;
+	}
+	/* initialize loadparm with blanks */
+	memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN);
+	/* copy and convert to ebcdic */
+	memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len);
+	ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN);
+	return len;
+}
+
+static struct subsys_attribute sys_reipl_ccw_loadparm_attr =
+	__ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
+	       reipl_ccw_loadparm_store);
+
 static struct attribute *reipl_ccw_attrs[] = {
 	&sys_reipl_ccw_device_attr.attr,
+	&sys_reipl_ccw_loadparm_attr.attr,
 	NULL,
 };
 
@@ -502,23 +576,6 @@
 
 static decl_subsys(dump, NULL, NULL);
 
-#ifdef CONFIG_SMP
-static void dump_smp_stop_all(void)
-{
-	int cpu;
-	preempt_disable();
-	for_each_online_cpu(cpu) {
-		if (cpu == smp_processor_id())
-			continue;
-		while (signal_processor(cpu, sigp_stop) == sigp_busy)
-			udelay(10);
-	}
-	preempt_enable();
-}
-#else
-#define dump_smp_stop_all() do { } while (0)
-#endif
-
 /*
  * Shutdown actions section
  */
@@ -571,11 +628,14 @@
 {
 	struct ccw_dev_id devid;
 	static char buf[100];
+	char loadparm[LOADPARM_LEN + 1];
 
 	switch (reipl_type) {
 	case IPL_TYPE_CCW:
+		reipl_get_ascii_loadparm(loadparm);
 		printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n",
 			reipl_block_ccw->ipl_info.ccw.devno);
+		printk(KERN_EMERG "loadparm = '%s'\n", loadparm);
 		break;
 	case IPL_TYPE_FCP:
 		printk(KERN_EMERG "reboot on fcp device:\n");
@@ -588,12 +648,19 @@
 	switch (reipl_method) {
 	case IPL_METHOD_CCW_CIO:
 		devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
+		if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno)
+			diag308(DIAG308_IPL, NULL);
 		devid.ssid  = 0;
 		reipl_ccw_dev(&devid);
 		break;
 	case IPL_METHOD_CCW_VM:
-		sprintf(buf, "IPL %X", reipl_block_ccw->ipl_info.ccw.devno);
-		cpcmd(buf, NULL, 0, NULL);
+		if (strlen(loadparm) == 0)
+			sprintf(buf, "IPL %X",
+				reipl_block_ccw->ipl_info.ccw.devno);
+		else
+			sprintf(buf, "IPL %X LOADPARM '%s'",
+				reipl_block_ccw->ipl_info.ccw.devno, loadparm);
+		__cpcmd(buf, NULL, 0, NULL);
 		break;
 	case IPL_METHOD_CCW_DIAG:
 		diag308(DIAG308_SET, reipl_block_ccw);
@@ -607,16 +674,17 @@
 		diag308(DIAG308_IPL, NULL);
 		break;
 	case IPL_METHOD_FCP_RO_VM:
-		cpcmd("IPL", NULL, 0, NULL);
+		__cpcmd("IPL", NULL, 0, NULL);
 		break;
 	case IPL_METHOD_NONE:
 	default:
 		if (MACHINE_IS_VM)
-			cpcmd("IPL", NULL, 0, NULL);
+			__cpcmd("IPL", NULL, 0, NULL);
 		diag308(DIAG308_IPL, NULL);
 		break;
 	}
-	panic("reipl failed!\n");
+	printk(KERN_EMERG "reboot failed!\n");
+	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
 }
 
 static void do_dump(void)
@@ -639,17 +707,17 @@
 
 	switch (dump_method) {
 	case IPL_METHOD_CCW_CIO:
-		dump_smp_stop_all();
+		smp_send_stop();
 		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
 		devid.ssid  = 0;
 		reipl_ccw_dev(&devid);
 		break;
 	case IPL_METHOD_CCW_VM:
-		dump_smp_stop_all();
+		smp_send_stop();
 		sprintf(buf, "STORE STATUS");
-		cpcmd(buf, NULL, 0, NULL);
+		__cpcmd(buf, NULL, 0, NULL);
 		sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
-		cpcmd(buf, NULL, 0, NULL);
+		__cpcmd(buf, NULL, 0, NULL);
 		break;
 	case IPL_METHOD_CCW_DIAG:
 		diag308(DIAG308_SET, dump_block_ccw);
@@ -746,6 +814,17 @@
 	reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
 	reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
 	reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+	/* check if read scp info worked and set loadparm */
+	if (SCCB_VALID)
+		memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
+		       SCCB_LOADPARM, LOADPARM_LEN);
+	else
+		/* read scp info failed: set empty loadparm (EBCDIC blanks) */
+		memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
+		       LOADPARM_LEN);
+	/* FIXME: check for diag308_set_works when enabling diag ccw reipl */
+	if (!MACHINE_IS_VM)
+		sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
 	if (ipl_get_type() == IPL_TYPE_CCW)
 		reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
 	reipl_capabilities |= IPL_TYPE_CCW;
@@ -827,13 +906,11 @@
 	return 0;
 }
 
-extern char s390_readinfo_sccb[];
-
 static int __init dump_fcp_init(void)
 {
 	int rc;
 
-	if(!(s390_readinfo_sccb[91] & 0x2))
+	if(!(SCCB_FLAG & 0x2) || !SCCB_VALID)
 		return 0; /* LDIPL DUMP is not installed */
 	if (!diag308_set_works)
 		return 0;
@@ -931,3 +1008,53 @@
 }
 
 __initcall(s390_ipl_init);
+
+static LIST_HEAD(rcall);
+static DEFINE_MUTEX(rcall_mutex);
+
+void register_reset_call(struct reset_call *reset)
+{
+	mutex_lock(&rcall_mutex);
+	list_add(&reset->list, &rcall);
+	mutex_unlock(&rcall_mutex);
+}
+EXPORT_SYMBOL_GPL(register_reset_call);
+
+void unregister_reset_call(struct reset_call *reset)
+{
+	mutex_lock(&rcall_mutex);
+	list_del(&reset->list);
+	mutex_unlock(&rcall_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_reset_call);
+
+static void do_reset_calls(void)
+{
+	struct reset_call *reset;
+
+	list_for_each_entry(reset, &rcall, list)
+		reset->fn();
+}
+
+extern void reset_mcck_handler(void);
+
+void s390_reset_system(void)
+{
+	struct _lowcore *lc;
+
+	/* Stack for interrupt/machine check handler */
+	lc = (struct _lowcore *)(unsigned long) store_prefix();
+	lc->panic_stack = S390_lowcore.panic_stack;
+
+	/* Disable prefixing */
+	set_prefix(0);
+
+	/* Disable lowcore protection */
+	__ctl_clear_bit(0,28);
+
+	/* Set new machine check handler */
+	S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK;
+	S390_lowcore.mcck_new_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler;
+	do_reset_calls();
+}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 67914fe..576368c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -200,7 +200,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 60b1ea9..f6d9bcc 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,15 +1,10 @@
 /*
  * arch/s390/kernel/machine_kexec.c
  *
- * (C) Copyright IBM Corp. 2005
+ * Copyright IBM Corp. 2005,2006
  *
- * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
- *
- */
-
-/*
- * s390_machine_kexec.c - handle the transition of Linux booting another kernel
- * on the S390 architecture.
+ * Author(s): Rolf Adelsberger,
+ *	      Heiko Carstens <heiko.carstens@de.ibm.com>
  */
 
 #include <linux/device.h>
@@ -22,86 +17,49 @@
 #include <asm/pgalloc.h>
 #include <asm/system.h>
 #include <asm/smp.h>
+#include <asm/reset.h>
 
-static void kexec_halt_all_cpus(void *);
-
-typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long);
+typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
 
 extern const unsigned char relocate_kernel[];
 extern const unsigned long long relocate_kernel_len;
 
-int
-machine_kexec_prepare(struct kimage *image)
+int machine_kexec_prepare(struct kimage *image)
 {
-	unsigned long reboot_code_buffer;
+	void *reboot_code_buffer;
 
 	/* We don't support anything but the default image type for now. */
 	if (image->type != KEXEC_TYPE_DEFAULT)
 		return -EINVAL;
 
 	/* Get the destination where the assembler code should be copied to.*/
-	reboot_code_buffer = page_to_pfn(image->control_code_page)<<PAGE_SHIFT;
+	reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
 
 	/* Then copy it */
-	memcpy((void *) reboot_code_buffer, relocate_kernel,
-	       relocate_kernel_len);
+	memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
 	return 0;
 }
 
-void
-machine_kexec_cleanup(struct kimage *image)
+void machine_kexec_cleanup(struct kimage *image)
 {
 }
 
-void
-machine_shutdown(void)
+void machine_shutdown(void)
 {
 	printk(KERN_INFO "kexec: machine_shutdown called\n");
 }
 
-NORET_TYPE void
-machine_kexec(struct kimage *image)
+void machine_kexec(struct kimage *image)
 {
-	clear_all_subchannels();
-	cio_reset_channel_paths();
-
-	/* Disable lowcore protection */
-	ctl_clear_bit(0,28);
-
-	on_each_cpu(kexec_halt_all_cpus, image, 0, 0);
-	for (;;);
-}
-
-extern void pfault_fini(void);
-
-static void
-kexec_halt_all_cpus(void *kernel_image)
-{
-	static atomic_t cpuid = ATOMIC_INIT(-1);
-	int cpu;
-	struct kimage *image;
 	relocate_kernel_t data_mover;
 
-#ifdef CONFIG_PFAULT
-	if (MACHINE_IS_VM)
-		pfault_fini();
-#endif
+	smp_send_stop();
+	pfault_fini();
+	s390_reset_system();
 
-	if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
-		signal_processor(smp_processor_id(), sigp_stop);
-
-	/* Wait for all other cpus to enter stopped state */
-	for_each_online_cpu(cpu) {
-		if (cpu == smp_processor_id())
-			continue;
-		while (!smp_cpu_not_running(cpu))
-			cpu_relax();
-	}
-
-	image = (struct kimage *) kernel_image;
-	data_mover = (relocate_kernel_t)
-		(page_to_pfn(image->control_code_page) << PAGE_SHIFT);
+	data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
 
 	/* Call the moving routine */
-	(*data_mover) (&image->head, image->start);
+	(*data_mover)(&image->head, image->start);
+	for (;;);
 }
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 0340477..f9434d4 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -11,19 +11,10 @@
 		.globl	do_reipl_asm
 do_reipl_asm:	basr	%r13,0
 .Lpg0:		lpsw	.Lnewpsw-.Lpg0(%r13)
-
-		# switch off lowcore protection
-
-.Lpg1:		stctl	%c0,%c0,.Lctlsave1-.Lpg0(%r13)
-		stctl	%c0,%c0,.Lctlsave2-.Lpg0(%r13)
-		ni	.Lctlsave1-.Lpg0(%r13),0xef
-		lctl	%c0,%c0,.Lctlsave1-.Lpg0(%r13)
-
-		# do store status of all registers
+.Lpg1:		# do store status of all registers
 
 		stm	%r0,%r15,__LC_GPREGS_SAVE_AREA
 		stctl	%c0,%c15,__LC_CREGS_SAVE_AREA
-		mvc	__LC_CREGS_SAVE_AREA(4),.Lctlsave2-.Lpg0(%r13)
 		stam	%a0,%a15,__LC_AREGS_SAVE_AREA
 		stpx	__LC_PREFIX_SAVE_AREA
 		stckc	.Lclkcmp-.Lpg0(%r13)
@@ -56,8 +47,7 @@
 .L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3
 		jz	.L003
 		bas	%r14,.Ldisab-.Lpg0(%r13)
-.L003:		spx	.Lnull-.Lpg0(%r13)
-		st	%r1,__LC_SUBCHANNEL_ID
+.L003:		st	%r1,__LC_SUBCHANNEL_ID
 		lpsw	0
 		sigp	0,0,0(6)
 .Ldisab:	st	%r14,.Ldispsw+4-.Lpg0(%r13)
@@ -65,9 +55,6 @@
 		.align	8
 .Lclkcmp:	.quad	0x0000000000000000
 .Lall:		.long	0xff000000
-.Lnull:		.long	0x00000000
-.Lctlsave1:	.long	0x00000000
-.Lctlsave2:	.long	0x00000000
 		.align	8
 .Lnewpsw:	.long	0x00080000,0x80000000+.Lpg1
 .Lpcnew:	.long	0x00080000,0x80000000+.Lecs
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index de74350..f18ef26 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -10,10 +10,10 @@
 #include <asm/lowcore.h>
 		.globl	do_reipl_asm
 do_reipl_asm:	basr	%r13,0
+.Lpg0:		lpswe	.Lnewpsw-.Lpg0(%r13)
+.Lpg1:		# do store status of all registers
 
-		# do store status of all registers
-
-.Lpg0:		stg	%r1,.Lregsave-.Lpg0(%r13)
+		stg	%r1,.Lregsave-.Lpg0(%r13)
 		lghi	%r1,0x1000
 		stmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
 		lg	%r0,.Lregsave-.Lpg0(%r13)
@@ -27,11 +27,7 @@
 		stpt	__LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
 		stg	%r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
 
-		lpswe	.Lnewpsw-.Lpg0(%r13)
-.Lpg1:		lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
-		stctg	%c0,%c0,.Lregsave-.Lpg0(%r13)
-		ni	.Lregsave+4-.Lpg0(%r13),0xef
-		lctlg	%c0,%c0,.Lregsave-.Lpg0(%r13)
+		lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
 		lgr	%r1,%r2
 		mvc	__LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
 		stsch	.Lschib-.Lpg0(%r13)
@@ -56,8 +52,7 @@
 .L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3
 		jz	.L003
 		bas	%r14,.Ldisab-.Lpg0(%r13)
-.L003:		spx	.Lnull-.Lpg0(%r13)
-		st	%r1,__LC_SUBCHANNEL_ID
+.L003:		st	%r1,__LC_SUBCHANNEL_ID
 		lhi	%r1,0		 # mode 0 = esa
 		slr	%r0,%r0 	 # set cpuid to zero
 		sigp	%r1,%r0,0x12	 # switch to esa mode
@@ -70,7 +65,6 @@
 .Lclkcmp:	.quad	0x0000000000000000
 .Lall:		.quad	0x00000000ff000000
 .Lregsave:	.quad	0x0000000000000000
-.Lnull:		.long	0x0000000000000000
 		.align	16
 /*
  * These addresses have to be 31 bit otherwise
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index f9899ff..3b456b8 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -26,8 +26,7 @@
 	relocate_kernel:
 		basr	%r13,0		# base address
 	.base:
-		stnsm	sys_msk-.base(%r13),0xf8	# disable DAT and IRQ (external)
-		spx	zero64-.base(%r13)	# absolute addressing mode
+		stnsm	sys_msk-.base(%r13),0xfb	# disable DAT
 		stctl	%c0,%c15,ctlregs-.base(%r13)
 		stm	%r0,%r15,gprregs-.base(%r13)
 		la	%r1,load_psw-.base(%r13)
@@ -97,8 +96,6 @@
 		lpsw	0		# hopefully start new kernel...
 
 		.align	8
-	zero64:
-		.quad	0
 	load_psw:
 		.long	0x00080000,0x80000000
 	sys_msk:
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 4fb4430..1f9ea20 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -27,8 +27,7 @@
 	relocate_kernel:
 		basr	%r13,0		# base address
 	.base:
-		stnsm	sys_msk-.base(%r13),0xf8	# disable DAT and IRQs
-		spx	zero64-.base(%r13)	# absolute addressing mode
+		stnsm	sys_msk-.base(%r13),0xfb	# disable DAT
 		stctg	%c0,%c15,ctlregs-.base(%r13)
 		stmg	%r0,%r15,gprregs-.base(%r13)
 		lghi	%r0,3
@@ -100,8 +99,6 @@
 		lpsw	0		# hopefully start new kernel...
 
 		.align	8
-	zero64:
-		.quad	0
 	load_psw:
 		.long	0x00080000,0x80000000
 	sys_msk:
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S
new file mode 100644
index 0000000..be8688c
--- /dev/null
+++ b/arch/s390/kernel/reset.S
@@ -0,0 +1,48 @@
+/*
+ *  arch/s390/kernel/reset.S
+ *
+ *    Copyright (C) IBM Corp. 2006
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <asm/ptrace.h>
+#include <asm/lowcore.h>
+
+#ifdef CONFIG_64BIT
+
+	.globl	reset_mcck_handler
+reset_mcck_handler:
+	basr	%r13,0
+0:	lg	%r15,__LC_PANIC_STACK	# load panic stack
+	aghi	%r15,-STACK_FRAME_OVERHEAD
+	lg	%r1,s390_reset_mcck_handler-0b(%r13)
+	ltgr	%r1,%r1
+	jz	1f
+	basr	%r14,%r1
+1:	la	%r1,4095
+	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
+	lpswe	__LC_MCK_OLD_PSW
+
+	.globl	s390_reset_mcck_handler
+s390_reset_mcck_handler:
+	.quad	0
+
+#else /* CONFIG_64BIT */
+
+	.globl	reset_mcck_handler
+reset_mcck_handler:
+	basr	%r13,0
+0:	l	%r15,__LC_PANIC_STACK	# load panic stack
+	ahi	%r15,-STACK_FRAME_OVERHEAD
+	l	%r1,s390_reset_mcck_handler-0b(%r13)
+	ltr	%r1,%r1
+	jz	1f
+	basr	%r14,%r1
+1:	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA
+	lpsw	__LC_MCK_OLD_PSW
+
+	.globl	s390_reset_mcck_handler
+s390_reset_mcck_handler:
+	.long	0
+
+#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2aa13e8..b928fec 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -62,13 +62,9 @@
 unsigned int console_mode = 0;
 unsigned int console_devno = -1;
 unsigned int console_irq = -1;
-unsigned long memory_size = 0;
 unsigned long machine_flags = 0;
-struct {
-	unsigned long addr, size, type;
-} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
-#define CHUNK_READ_WRITE 0
-#define CHUNK_READ_ONLY 1
+
+struct mem_chunk memory_chunk[MEMORY_CHUNKS];
 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
 unsigned long __initdata zholes_size[MAX_NR_ZONES];
 static unsigned long __initdata memory_end;
@@ -229,11 +225,11 @@
 	char *ptr;
 
         if (MACHINE_IS_VM) {
-		__cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
+		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
 		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
 		ptr = strstr(query_buffer, "SUBCHANNEL =");
 		console_irq = simple_strtoul(ptr + 13, NULL, 16);
-		__cpcmd("QUERY TERM", query_buffer, 1024, NULL);
+		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
 		ptr = strstr(query_buffer, "CONMODE");
 		/*
 		 * Set the conmode to 3215 so that the device recognition 
@@ -242,7 +238,7 @@
 		 * 3215 and the 3270 driver will try to access the console
 		 * device (3215 as console and 3270 as normal tty).
 		 */
-		__cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
+		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
 		if (ptr == NULL) {
 #if defined(CONFIG_SCLP_CONSOLE)
 			SET_CONSOLE_SCLP;
@@ -299,14 +295,14 @@
 static void do_machine_halt_nonsmp(void)
 {
         if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
-                cpcmd(vmhalt_cmd, NULL, 0, NULL);
+		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
         signal_processor(smp_processor_id(), sigp_stop_and_store_status);
 }
 
 static void do_machine_power_off_nonsmp(void)
 {
         if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
-                cpcmd(vmpoff_cmd, NULL, 0, NULL);
+		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
         signal_processor(smp_processor_id(), sigp_stop_and_store_status);
 }
 
@@ -489,6 +485,37 @@
 	}
 }
 
+static void __init setup_memory_end(void)
+{
+	unsigned long real_size, memory_size;
+	unsigned long max_mem, max_phys;
+	int i;
+
+	memory_size = real_size = 0;
+	max_phys = VMALLOC_END - VMALLOC_MIN_SIZE;
+	memory_end &= PAGE_MASK;
+
+	max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
+
+	for (i = 0; i < MEMORY_CHUNKS; i++) {
+		struct mem_chunk *chunk = &memory_chunk[i];
+
+		real_size = max(real_size, chunk->addr + chunk->size);
+		if (chunk->addr >= max_mem) {
+			memset(chunk, 0, sizeof(*chunk));
+			continue;
+		}
+		if (chunk->addr + chunk->size > max_mem)
+			chunk->size = max_mem - chunk->addr;
+		memory_size = max(memory_size, chunk->addr + chunk->size);
+	}
+	if (!memory_end)
+		memory_end = memory_size;
+	if (real_size > memory_end)
+		printk("More memory detected than supported. Unused: %luk\n",
+		       (real_size - memory_end) >> 10);
+}
+
 static void __init
 setup_memory(void)
 {
@@ -645,8 +672,6 @@
 	init_mm.end_data = (unsigned long) &_edata;
 	init_mm.brk = (unsigned long) &_end;
 
-	memory_end = memory_size;
-
 	if (MACHINE_HAS_MVCOS)
 		memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
 	else
@@ -654,20 +679,7 @@
 
 	parse_early_param();
 
-#ifndef CONFIG_64BIT
-	memory_end &= ~0x400000UL;
-
-        /*
-         * We need some free virtual space to be able to do vmalloc.
-         * On a machine with 2GB memory we make sure that we have at
-         * least 128 MB free space for vmalloc.
-         */
-        if (memory_end > 1920*1024*1024)
-                memory_end = 1920*1024*1024;
-#else /* CONFIG_64BIT */
-	memory_end &= ~0x200000UL;
-#endif /* CONFIG_64BIT */
-
+	setup_memory_end();
 	setup_memory();
 	setup_resources();
 	setup_lowcore();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6282224..19090f7 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -230,18 +230,37 @@
         }
 }
 
+static inline void do_wait_for_stop(void)
+{
+	int cpu;
+
+	/* Wait for all other cpus to enter stopped state */
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+			continue;
+		while(!smp_cpu_not_running(cpu))
+			cpu_relax();
+	}
+}
+
 /*
  * this function sends a 'stop' sigp to all other CPUs in the system.
  * it goes straight through.
  */
 void smp_send_stop(void)
 {
+	/* Disable all interrupts/machine checks */
+	__load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
+
         /* write magic number to zero page (absolute 0) */
 	lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
 
 	/* stop other processors. */
 	do_send_stop();
 
+	/* wait until other processors are stopped */
+	do_wait_for_stop();
+
 	/* store status of other processors. */
 	do_store_status();
 }
@@ -250,88 +269,28 @@
  * Reboot, halt and power_off routines for SMP.
  */
 
-static void do_machine_restart(void * __unused)
-{
-	int cpu;
-	static atomic_t cpuid = ATOMIC_INIT(-1);
-
-	if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
-		signal_processor(smp_processor_id(), sigp_stop);
-
-	/* Wait for all other cpus to enter stopped state */
-	for_each_online_cpu(cpu) {
-		if (cpu == smp_processor_id())
-			continue;
-		while(!smp_cpu_not_running(cpu))
-			cpu_relax();
-	}
-
-	/* Store status of other cpus. */
-	do_store_status();
-
-	/*
-	 * Finally call reipl. Because we waited for all other
-	 * cpus to enter this function we know that they do
-	 * not hold any s390irq-locks (the cpus have been
-	 * interrupted by an external interrupt and s390irq
-	 * locks are always held disabled).
-	 */
-	do_reipl();
-}
-
 void machine_restart_smp(char * __unused) 
 {
-        on_each_cpu(do_machine_restart, NULL, 0, 0);
-}
-
-static void do_wait_for_stop(void)
-{
-	unsigned long cr[16];
-
-	__ctl_store(cr, 0, 15);
-	cr[0] &= ~0xffff;
-	cr[6] = 0;
-	__ctl_load(cr, 0, 15);
-	for (;;)
-		enabled_wait();
-}
-
-static void do_machine_halt(void * __unused)
-{
-	static atomic_t cpuid = ATOMIC_INIT(-1);
-
-	if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
-		smp_send_stop();
-		if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
-			cpcmd(vmhalt_cmd, NULL, 0, NULL);
-		signal_processor(smp_processor_id(),
-				 sigp_stop_and_store_status);
-	}
-	do_wait_for_stop();
+	smp_send_stop();
+	do_reipl();
 }
 
 void machine_halt_smp(void)
 {
-        on_each_cpu(do_machine_halt, NULL, 0, 0);
-}
-
-static void do_machine_power_off(void * __unused)
-{
-	static atomic_t cpuid = ATOMIC_INIT(-1);
-
-	if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
-		smp_send_stop();
-		if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
-			cpcmd(vmpoff_cmd, NULL, 0, NULL);
-		signal_processor(smp_processor_id(),
-				 sigp_stop_and_store_status);
-	}
-	do_wait_for_stop();
+	smp_send_stop();
+	if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
+	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+	for (;;);
 }
 
 void machine_power_off_smp(void)
 {
-        on_each_cpu(do_machine_power_off, NULL, 0, 0);
+	smp_send_stop();
+	if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
+	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+	for (;;);
 }
 
 /*
@@ -501,8 +460,6 @@
  */
 extern void init_cpu_timer(void);
 extern void init_cpu_vtimer(void);
-extern int pfault_init(void);
-extern void pfault_fini(void);
 
 int __devinit start_secondary(void *cpuvoid)
 {
@@ -514,11 +471,9 @@
 #ifdef CONFIG_VIRT_TIMER
         init_cpu_vtimer();
 #endif
-#ifdef CONFIG_PFAULT
 	/* Enable pfault pseudo page faults on this cpu. */
-	if (MACHINE_IS_VM)
-		pfault_init();
-#endif
+	pfault_init();
+
 	/* Mark this cpu as online */
 	cpu_set(smp_processor_id(), cpu_online_map);
 	/* Switch on interrupts */
@@ -708,11 +663,8 @@
 	}
 	cpu_clear(cpu, cpu_online_map);
 
-#ifdef CONFIG_PFAULT
 	/* Disable pfault pseudo page faults on this cpu. */
-	if (MACHINE_IS_VM)
-		pfault_fini();
-#endif
+	pfault_fini();
 
 	memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
 	memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
@@ -860,4 +812,3 @@
 EXPORT_SYMBOL(smp_call_function);
 EXPORT_SYMBOL(smp_get_cpu);
 EXPORT_SYMBOL(smp_put_cpu);
-
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 92ecffb..3cbb0dc 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -58,12 +58,6 @@
 
 extern pgm_check_handler_t do_protection_exception;
 extern pgm_check_handler_t do_dat_exception;
-#ifdef CONFIG_PFAULT
-extern int pfault_init(void);
-extern void pfault_fini(void);
-extern void pfault_interrupt(__u16 error_code);
-static ext_int_info_t ext_int_pfault;
-#endif
 extern pgm_check_handler_t do_monitor_call;
 
 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -135,7 +129,7 @@
 	}
 }
 
-void show_trace(struct task_struct *task, unsigned long * stack)
+void show_trace(struct task_struct *task, unsigned long *stack)
 {
 	register unsigned long __r15 asm ("15");
 	unsigned long sp;
@@ -157,6 +151,9 @@
 		__show_trace(sp, S390_lowcore.thread_info,
 			     S390_lowcore.thread_info + THREAD_SIZE);
 	printk("\n");
+	if (!task)
+		task = current;
+	debug_show_held_locks(task);
 }
 
 void show_stack(struct task_struct *task, unsigned long *sp)
@@ -739,22 +736,5 @@
         pgm_check_table[0x1C] = &space_switch_exception;
         pgm_check_table[0x1D] = &hfp_sqrt_exception;
 	pgm_check_table[0x40] = &do_monitor_call;
-
-	if (MACHINE_IS_VM) {
-#ifdef CONFIG_PFAULT
-		/*
-		 * Try to get pfault pseudo page faults going.
-		 */
-		if (register_early_external_interrupt(0x2603, pfault_interrupt,
-						      &ext_int_pfault) != 0)
-			panic("Couldn't request external interrupt 0x2603");
-
-		if (pfault_init() == 0) 
-			return;
-		
-		/* Tough luck, no pfault. */
-		unregister_early_external_interrupt(0x2603, pfault_interrupt,
-						    &ext_int_pfault);
-#endif
-	}
+	pfault_irq_init();
 }
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b0cfa6c..b5f94cf 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
 
 EXTRA_AFLAGS := -traditional
 
-lib-y += delay.o string.o uaccess_std.o
+lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
 lib-$(CONFIG_32BIT) += div64.o
 lib-$(CONFIG_64BIT) += uaccess_mvcos.o
 lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 121b293..f9a23d5 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -27,6 +27,9 @@
 #define SLR	"slgr"
 #endif
 
+extern size_t copy_from_user_std(size_t, const void __user *, void *);
+extern size_t copy_to_user_std(size_t, void __user *, const void *);
+
 size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
 {
 	register unsigned long reg0 asm("0") = 0x81UL;
@@ -66,6 +69,13 @@
 	return size;
 }
 
+size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
+{
+	if (size <= 256)
+		return copy_from_user_std(size, ptr, x);
+	return copy_from_user_mvcos(size, ptr, x);
+}
+
 size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
 {
 	register unsigned long reg0 asm("0") = 0x810000UL;
@@ -95,6 +105,13 @@
 	return size;
 }
 
+size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x)
+{
+	if (size <= 256)
+		return copy_to_user_std(size, ptr, x);
+	return copy_to_user_mvcos(size, ptr, x);
+}
+
 size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
 {
 	register unsigned long reg0 asm("0") = 0x810081UL;
@@ -145,18 +162,16 @@
 	return size;
 }
 
-extern size_t copy_from_user_std_small(size_t, const void __user *, void *);
-extern size_t copy_to_user_std_small(size_t, void __user *, const void *);
 extern size_t strnlen_user_std(size_t, const char __user *);
 extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
 extern int futex_atomic_op(int, int __user *, int, int *);
 extern int futex_atomic_cmpxchg(int __user *, int, int);
 
 struct uaccess_ops uaccess_mvcos = {
-	.copy_from_user = copy_from_user_mvcos,
-	.copy_from_user_small = copy_from_user_std_small,
-	.copy_to_user = copy_to_user_mvcos,
-	.copy_to_user_small = copy_to_user_std_small,
+	.copy_from_user = copy_from_user_mvcos_check,
+	.copy_from_user_small = copy_from_user_std,
+	.copy_to_user = copy_to_user_mvcos_check,
+	.copy_to_user_small = copy_to_user_std,
 	.copy_in_user = copy_in_user_mvcos,
 	.clear_user = clear_user_mvcos,
 	.strnlen_user = strnlen_user_std,
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
new file mode 100644
index 0000000..8741bdc
--- /dev/null
+++ b/arch/s390/lib/uaccess_pt.c
@@ -0,0 +1,153 @@
+/*
+ *  arch/s390/lib/uaccess_pt.c
+ *
+ *  User access functions based on page table walks.
+ *
+ *    Copyright IBM Corp. 2006
+ *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
+ */
+
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/mm.h>
+#include <asm/futex.h>
+
+static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
+				 int write_access)
+{
+	struct vm_area_struct *vma;
+	int ret = -EFAULT;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, address);
+	if (unlikely(!vma))
+		goto out;
+	if (unlikely(vma->vm_start > address)) {
+		if (!(vma->vm_flags & VM_GROWSDOWN))
+			goto out;
+		if (expand_stack(vma, address))
+			goto out;
+	}
+
+	if (!write_access) {
+		/* page not present, check vm flags */
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+			goto out;
+	} else {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto out;
+	}
+
+survive:
+	switch (handle_mm_fault(mm, vma, address, write_access)) {
+	case VM_FAULT_MINOR:
+		current->min_flt++;
+		break;
+	case VM_FAULT_MAJOR:
+		current->maj_flt++;
+		break;
+	case VM_FAULT_SIGBUS:
+		goto out_sigbus;
+	case VM_FAULT_OOM:
+		goto out_of_memory;
+	default:
+		BUG();
+	}
+	ret = 0;
+out:
+	up_read(&mm->mmap_sem);
+	return ret;
+
+out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (current->pid == 1) {
+		yield();
+		goto survive;
+	}
+	printk("VM: killing process %s\n", current->comm);
+	return ret;
+
+out_sigbus:
+	up_read(&mm->mmap_sem);
+	current->thread.prot_addr = address;
+	current->thread.trap_no = 0x11;
+	force_sig(SIGBUS, current);
+	return ret;
+}
+
+static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
+				    size_t n, int write_user)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long offset, pfn, done, size;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	void *from, *to;
+
+	done = 0;
+retry:
+	spin_lock(&mm->page_table_lock);
+	do {
+		pgd = pgd_offset(mm, uaddr);
+		if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+			goto fault;
+
+		pmd = pmd_offset(pgd, uaddr);
+		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+			goto fault;
+
+		pte = pte_offset_map(pmd, uaddr);
+		if (!pte || !pte_present(*pte) ||
+		    (write_user && !pte_write(*pte)))
+			goto fault;
+
+		pfn = pte_pfn(*pte);
+		if (!pfn_valid(pfn))
+			goto out;
+
+		offset = uaddr & (PAGE_SIZE - 1);
+		size = min(n - done, PAGE_SIZE - offset);
+		if (write_user) {
+			to = (void *)((pfn << PAGE_SHIFT) + offset);
+			from = kptr + done;
+		} else {
+			from = (void *)((pfn << PAGE_SHIFT) + offset);
+			to = kptr + done;
+		}
+		memcpy(to, from, size);
+		done += size;
+		uaddr += size;
+	} while (done < n);
+out:
+	spin_unlock(&mm->page_table_lock);
+	return n - done;
+fault:
+	spin_unlock(&mm->page_table_lock);
+	if (__handle_fault(mm, uaddr, write_user))
+		return n - done;
+	goto retry;
+}
+
+size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
+{
+	size_t rc;
+
+	if (segment_eq(get_fs(), KERNEL_DS)) {
+		memcpy(to, (void __kernel __force *) from, n);
+		return 0;
+	}
+	rc = __user_copy_pt((unsigned long) from, to, n, 0);
+	if (unlikely(rc))
+		memset(to + n - rc, 0, rc);
+	return rc;
+}
+
+size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
+{
+	if (segment_eq(get_fs(), KERNEL_DS)) {
+		memcpy((void __kernel __force *) to, from, n);
+		return 0;
+	}
+	return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
+}
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index f44f007..bbaca66 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -11,7 +11,7 @@
 
 #include <linux/errno.h>
 #include <linux/mm.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/futex.h>
 
 #ifndef __s390x__
@@ -28,6 +28,9 @@
 #define SLR	"slgr"
 #endif
 
+extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
+extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
+
 size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
 {
 	unsigned long tmp1, tmp2;
@@ -69,34 +72,11 @@
 	return size;
 }
 
-size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
+size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x)
 {
-	unsigned long tmp1, tmp2;
-
-	tmp1 = 0UL;
-	asm volatile(
-		"0: mvcp  0(%0,%2),0(%1),%3\n"
-		"  "SLR"  %0,%0\n"
-		"   j     5f\n"
-		"1: la    %4,255(%1)\n" /* %4 = ptr + 255 */
-		"  "LHI"  %3,-4096\n"
-		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
-		"  "SLR"  %4,%1\n"
-		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
-		"   jnh   5f\n"
-		"2: mvcp  0(%4,%2),0(%1),%3\n"
-		"  "SLR"  %0,%4\n"
-		"  "ALR"  %2,%4\n"
-		"3:"LHI"  %4,-1\n"
-		"  "ALR"  %4,%0\n"	/* copy remaining size, subtract 1 */
-		"   bras  %3,4f\n"
-		"   xc    0(1,%2),0(%2)\n"
-		"4: ex    %4,0(%3)\n"
-		"5:\n"
-		EX_TABLE(0b,1b) EX_TABLE(2b,3b)
-		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-		: : "cc", "memory");
-	return size;
+	if (size <= 1024)
+		return copy_from_user_std(size, ptr, x);
+	return copy_from_user_pt(size, ptr, x);
 }
 
 size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
@@ -130,28 +110,11 @@
 	return size;
 }
 
-size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x)
+size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x)
 {
-	unsigned long tmp1, tmp2;
-
-	tmp1 = 0UL;
-	asm volatile(
-		"0: mvcs  0(%0,%1),0(%2),%3\n"
-		"  "SLR"  %0,%0\n"
-		"   j     3f\n"
-		"1: la    %4,255(%1)\n" /* ptr + 255 */
-		"  "LHI"  %3,-4096\n"
-		"   nr    %4,%3\n"	/* (ptr + 255) & -4096UL */
-		"  "SLR"  %4,%1\n"
-		"  "CLR"  %0,%4\n"	/* copy crosses next page boundary? */
-		"   jnh   3f\n"
-		"2: mvcs  0(%4,%1),0(%2),%3\n"
-		"  "SLR"  %0,%4\n"
-		"3:\n"
-		EX_TABLE(0b,1b) EX_TABLE(2b,3b)
-		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-		: : "cc", "memory");
-	return size;
+	if (size <= 1024)
+		return copy_to_user_std(size, ptr, x);
+	return copy_to_user_pt(size, ptr, x);
 }
 
 size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
@@ -295,7 +258,7 @@
 {
 	int oldval = 0, newval, ret;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -321,7 +284,7 @@
 	default:
 		ret = -ENOSYS;
 	}
-	dec_preempt_count();
+	pagefault_enable();
 	*old = oldval;
 	return ret;
 }
@@ -343,10 +306,10 @@
 }
 
 struct uaccess_ops uaccess_std = {
-	.copy_from_user = copy_from_user_std,
-	.copy_from_user_small = copy_from_user_std_small,
-	.copy_to_user = copy_to_user_std,
-	.copy_to_user_small = copy_to_user_std_small,
+	.copy_from_user = copy_from_user_std_check,
+	.copy_from_user_small = copy_from_user_std,
+	.copy_to_user = copy_to_user_std_check,
+	.copy_to_user_small = copy_to_user_std,
 	.copy_in_user = copy_in_user_std,
 	.clear_user = clear_user_std,
 	.strnlen_user = strnlen_user_std,
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 226275d..9e9bc48 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -14,12 +14,13 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/bootmem.h>
+#include <linux/ctype.h>
 #include <asm/page.h>
 #include <asm/ebcdic.h>
 #include <asm/errno.h>
 #include <asm/extmem.h>
 #include <asm/cpcmd.h>
-#include <linux/ctype.h>
+#include <asm/setup.h>
 
 #define DCSS_DEBUG	/* Debug messages on/off */
 
@@ -77,15 +78,11 @@
 	int segcnt;
 };
 
-static DEFINE_SPINLOCK(dcss_lock);
+static DEFINE_MUTEX(dcss_lock);
 static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
 static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
 					"EW/EN-MIXED" };
 
-extern struct {
-	unsigned long addr, size, type;
-} memory_chunk[MEMORY_CHUNKS];
-
 /*
  * Create the 8 bytes, ebcdic VM segment name from
  * an ascii name.
@@ -117,7 +114,7 @@
 	struct list_head *l;
 	struct dcss_segment *tmp, *retval = NULL;
 
-	assert_spin_locked(&dcss_lock);
+	BUG_ON(!mutex_is_locked(&dcss_lock));
 	dcss_mkname (name, dcss_name);
 	list_for_each (l, &dcss_list) {
 		tmp = list_entry (l, struct dcss_segment, list);
@@ -249,8 +246,8 @@
 {
 	int i;
 
-	for (i=0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
-		if (memory_chunk[i].type != 0)
+	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+		if (memory_chunk[i].type != CHUNK_READ_WRITE)
 			continue;
 		if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
 			continue;
@@ -272,7 +269,7 @@
 	struct list_head *l;
 	struct dcss_segment *tmp;
 
-	assert_spin_locked(&dcss_lock);
+	BUG_ON(!mutex_is_locked(&dcss_lock));
 	list_for_each(l, &dcss_list) {
 		tmp = list_entry(l, struct dcss_segment, list);
 		if ((tmp->start_addr >> 20) > (seg->end >> 20))
@@ -429,7 +426,7 @@
 	if (!MACHINE_IS_VM)
 		return -ENOSYS;
 
-	spin_lock (&dcss_lock);
+	mutex_lock(&dcss_lock);
 	seg = segment_by_name (name);
 	if (seg == NULL)
 		rc = __segment_load (name, do_nonshared, addr, end);
@@ -444,7 +441,7 @@
 			rc    = -EPERM;
 		}
 	}
-	spin_unlock (&dcss_lock);
+	mutex_unlock(&dcss_lock);
 	return rc;
 }
 
@@ -467,7 +464,7 @@
 	unsigned long dummy;
 	int dcss_command, rc, diag_cc;
 
-	spin_lock (&dcss_lock);
+	mutex_lock(&dcss_lock);
 	seg = segment_by_name (name);
 	if (seg == NULL) {
 		rc = -EINVAL;
@@ -508,7 +505,7 @@
 		  &dummy, &dummy);
 	kfree(seg);
  out_unlock:
-	spin_unlock(&dcss_lock);
+	mutex_unlock(&dcss_lock);
 	return rc;
 }
 
@@ -526,7 +523,7 @@
 	if (!MACHINE_IS_VM)
 		return;
 
-	spin_lock(&dcss_lock);
+	mutex_lock(&dcss_lock);
 	seg = segment_by_name (name);
 	if (seg == NULL) {
 		PRINT_ERR ("could not find segment %s in segment_unload, "
@@ -540,7 +537,7 @@
 		kfree(seg);
 	}
 out_unlock:
-	spin_unlock(&dcss_lock);
+	mutex_unlock(&dcss_lock);
 }
 
 /*
@@ -559,12 +556,13 @@
 	if (!MACHINE_IS_VM)
 		return;
 
-	spin_lock(&dcss_lock);
+	mutex_lock(&dcss_lock);
 	seg = segment_by_name (name);
 
 	if (seg == NULL) {
-		PRINT_ERR ("could not find segment %s in segment_save, please report to linux390@de.ibm.com\n",name);
-		return;
+		PRINT_ERR("could not find segment %s in segment_save, please "
+			  "report to linux390@de.ibm.com\n", name);
+		goto out;
 	}
 
 	startpfn = seg->start_addr >> PAGE_SHIFT;
@@ -591,7 +589,7 @@
 		goto out;
 	}
 out:
-	spin_unlock(&dcss_lock);
+	mutex_unlock(&dcss_lock);
 }
 
 EXPORT_SYMBOL(segment_load);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1c323bb..cd85e34 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -31,6 +31,7 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/kdebug.h>
+#include <asm/s390_ext.h>
 
 #ifndef CONFIG_64BIT
 #define __FAIL_ADDR_MASK 0x7ffff000
@@ -394,6 +395,7 @@
 /*
  * 'pfault' pseudo page faults routines.
  */
+static ext_int_info_t ext_int_pfault;
 static int pfault_disable = 0;
 
 static int __init nopfault(char *str)
@@ -422,7 +424,7 @@
 		  __PF_RES_FIELD };
         int rc;
 
-	if (pfault_disable)
+	if (!MACHINE_IS_VM || pfault_disable)
 		return -1;
 	asm volatile(
 		"	diag	%1,%0,0x258\n"
@@ -440,7 +442,7 @@
 	pfault_refbk_t refbk =
 	{ 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
 
-	if (pfault_disable)
+	if (!MACHINE_IS_VM || pfault_disable)
 		return;
 	__ctl_clear_bit(0,9);
 	asm volatile(
@@ -500,5 +502,25 @@
 			set_tsk_need_resched(tsk);
 	}
 }
-#endif
 
+void __init pfault_irq_init(void)
+{
+	if (!MACHINE_IS_VM)
+		return;
+
+	/*
+	 * Try to get pfault pseudo page faults going.
+	 */
+	if (register_early_external_interrupt(0x2603, pfault_interrupt,
+					      &ext_int_pfault) != 0)
+		panic("Couldn't request external interrupt 0x2603");
+
+	if (pfault_init() == 0)
+		return;
+
+	/* Tough luck, no pfault. */
+	pfault_disable = 1;
+	unregister_early_external_interrupt(0x2603, pfault_interrupt,
+					    &ext_int_pfault);
+}
+#endif
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6a461d4..d83d64a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -51,6 +51,14 @@
 config ARCH_MAY_HAVE_PC_FDC
 	bool
 
+config STACKTRACE_SUPPORT
+	bool
+	default y
+
+config LOCKDEP_SUPPORT
+	bool
+	default y
+
 source "init/Kconfig"
 
 menu "System type"
@@ -217,7 +225,21 @@
 	bool "SHMIN"
 	select CPU_SUBTYPE_SH7706
 	help
-	  Select SHMIN if configureing for the SHMIN board
+	  Select SHMIN if configuring for the SHMIN board.
+
+config SH_7206_SOLUTION_ENGINE
+	bool "SolutionEngine7206"
+	select CPU_SUBTYPE_SH7206
+	help
+	  Select 7206 SolutionEngine if configuring for a Hitachi SH7206
+	  evaluation board.
+
+config SH_7619_SOLUTION_ENGINE
+	bool "SolutionEngine7619"
+	select CPU_SUBTYPE_SH7619
+	help
+	  Select 7619 SolutionEngine if configuring for a Hitachi SH7619
+	  evaluation board.
 
 config SH_UNKNOWN
 	bool "BareCPU"
@@ -280,12 +302,20 @@
 
 menu "Processor features"
 
-config CPU_LITTLE_ENDIAN
-	bool "Little Endian"
+choice
+	prompt "Endianess selection" 
+	default CPU_LITTLE_ENDIAN
 	help
 	  Some SuperH machines can be configured for either little or big
-	  endian byte order. These modes require different kernels. Say Y if
-	  your machine is little endian, N if it's a big endian machine.
+	  endian byte order. These modes require different kernels.
+
+config CPU_LITTLE_ENDIAN
+	bool "Little Endian"
+
+config CPU_BIG_ENDIAN
+	bool "Big Endian"
+
+endchoice
 
 config SH_FPU
 	bool "FPU support"
@@ -345,6 +375,9 @@
 config CPU_HAS_INTC2_IRQ
 	bool
 
+config CPU_HAS_IPR_IRQ
+	bool
+
 config CPU_HAS_SR_RB
 	bool "CPU has SR.RB"
 	depends on CPU_SH3 || CPU_SH4
@@ -357,6 +390,9 @@
 	  See <file:Documentation/sh/register-banks.txt> for further
 	  information on SR.RB and register banking in the kernel in general.
 
+config CPU_HAS_PTEA
+	bool
+
 endmenu
 
 menu "Timer support"
@@ -364,10 +400,25 @@
 
 config SH_TMU
 	bool "TMU timer support"
+	depends on CPU_SH3 || CPU_SH4
 	default y
 	help
 	  This enables the use of the TMU as the system timer.
 
+config SH_CMT
+	bool "CMT timer support"
+	depends on CPU_SH2
+	default y
+	help
+	  This enables the use of the CMT as the system timer.
+
+config SH_MTU2
+	bool "MTU2 timer support"
+	depends on CPU_SH2A
+	default n
+	help
+	  This enables the use of the MTU2 as the system timer.
+
 endmenu
 
 source "arch/sh/boards/renesas/hs7751rvoip/Kconfig"
@@ -376,19 +427,52 @@
 
 source "arch/sh/boards/renesas/r7780rp/Kconfig"
 
+config SH_TIMER_IRQ
+	int
+	default "28" if CPU_SUBTYPE_SH7780
+	default "86" if CPU_SUBTYPE_SH7619
+	default "140" if CPU_SUBTYPE_SH7206
+	default "16"
+
+config NO_IDLE_HZ
+	bool "Dynamic tick timer"
+	help
+	  Select this option if you want to disable continuous timer ticks
+	  and have them programmed to occur as required. This option saves
+	  power as the system can remain in idle state for longer.
+
+	  By default dynamic tick is disabled during the boot, and can be
+	  manually enabled with:
+
+	    echo 1 > /sys/devices/system/timer/timer0/dyn_tick
+
+	  Alternatively, if you want dynamic tick automatically enabled
+	  during boot, pass "dyntick=enable" via the kernel command string.
+
+	  Please note that dynamic tick may affect the accuracy of
+	  timekeeping on some platforms depending on the implementation.
+
 config SH_PCLK_FREQ
 	int "Peripheral clock frequency (in Hz)"
+	default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
+	default "31250000" if CPU_SUBTYPE_SH7619
+	default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
+			      CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \
+			      CPU_SUBTYPE_SH7206
 	default "50000000" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7780
 	default "60000000" if CPU_SUBTYPE_SH7751
-	default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
-			      CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705
-	default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
 	default "66000000" if CPU_SUBTYPE_SH4_202
 	help
 	  This option is used to specify the peripheral clock frequency.
 	  This is necessary for determining the reference clock value on
 	  platforms lacking an RTC.
 
+config SH_CLK_MD
+	int "CPU Mode Pin Setting"
+	depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
+	help
+	  MD2 - MD0 Setting.
+
 menu "CPU Frequency scaling"
 
 source "drivers/cpufreq/Kconfig"
@@ -421,6 +505,8 @@
 	  behavior is platform-dependent, but normally the flash frequency is
 	  a hyperbolic function of the 5-minute load average.
 
+source "arch/sh/drivers/Kconfig"
+
 endmenu
 
 config ISA_DMA_API
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 48479e0..66a25ef 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -1,5 +1,9 @@
 menu "Kernel hacking"
 
+config TRACE_IRQFLAGS_SUPPORT
+	bool
+	default y
+
 source "lib/Kconfig.debug"
 
 config SH_STANDARD_BIOS
@@ -17,7 +21,18 @@
 
 config EARLY_SCIF_CONSOLE
 	bool "Use early SCIF console"
-	depends on CPU_SH4 || CPU_SH2A && !SH_STANDARD_BIOS
+	help
+	  This enables an early console using a fixed SCIF port. This can
+	  be used by platforms that are either not running the SH
+	  standard BIOS, or do not wish to use the BIOS callbacks for the
+	  serial I/O.
+
+config EARLY_SCIF_CONSOLE_PORT
+	hex "SCIF port for early console"
+	depends on EARLY_SCIF_CONSOLE
+	default "0xffe00000" if CPU_SUBTYPE_SH7780
+	default "0xfffe9800" if CPU_SUBTYPE_SH72060
+	default "0xffe80000" if CPU_SH4
 
 config EARLY_PRINTK
 	bool "Early printk support"
@@ -30,6 +45,11 @@
 	  when the kernel may crash or hang before the serial console is
 	  initialised. If unsure, say N.
 
+	  On devices that are running SH-IPL and want to keep the port
+	  initialization consistent while not using the BIOS callbacks,
+	  select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
+	  the kernel command line option to toggle back and forth.
+
 config DEBUG_STACKOVERFLOW
 	bool "Check for stack overflows"
 	depends on DEBUG_KERNEL
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 26d62ff..d10bba5 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -13,10 +13,6 @@
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 #
-
-cflags-y				:= -mb
-cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	:= -ml
-
 isa-y					:= any
 isa-$(CONFIG_SH_DSP)			:= sh
 isa-$(CONFIG_CPU_SH2)			:= sh2
@@ -38,13 +34,16 @@
 endif
 endif
 
-cflags-y	+= $(call as-option,-Wa$(comma)-isa=$(isa-y),)
-
-cflags-$(CONFIG_CPU_SH2)		+= -m2
-cflags-$(CONFIG_CPU_SH3)		+= -m3
-cflags-$(CONFIG_CPU_SH4)		+= -m4 \
+cflags-$(CONFIG_CPU_SH2)		:= -m2
+cflags-$(CONFIG_CPU_SH3)		:= -m3
+cflags-$(CONFIG_CPU_SH4)		:= -m4 \
 	$(call cc-option,-mno-implicit-fp,-m4-nofpu)
-cflags-$(CONFIG_CPU_SH4A)		+= $(call cc-option,-m4a-nofpu,)
+cflags-$(CONFIG_CPU_SH4A)		:= -m4a $(call cc-option,-m4a-nofpu,)
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= -mb
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -ml
+
+cflags-y	+= $(call as-option,-Wa$(comma)-isa=$(isa-y),) -ffreestanding
 
 cflags-$(CONFIG_SH_DSP)			+= -Wa,-dsp
 cflags-$(CONFIG_SH_KGDB)		+= -g
@@ -59,7 +58,9 @@
 # never be used by anyone. Use a board-specific defconfig that has a
 # reasonable chance of being current instead.
 #
-KBUILD_DEFCONFIG := rts7751r2d_defconfig
+KBUILD_DEFCONFIG := r7780rp_defconfig
+
+KBUILD_IMAGE	:= arch/sh/boot/zImage
 
 #
 # Choosing incompatible machines durings configuration will result in
@@ -109,6 +110,8 @@
 machdir-$(CONFIG_SH_LANDISK)			:= landisk
 machdir-$(CONFIG_SH_TITAN)			:= titan
 machdir-$(CONFIG_SH_SHMIN)			:= shmin
+machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE)	:= se/7206
+machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE)	:= se/7619
 machdir-$(CONFIG_SH_UNKNOWN)			:= unknown
 
 incdir-y			:= $(notdir $(machdir-y))
@@ -124,6 +127,7 @@
 core-$(CONFIG_VOYAGERGX)	+= arch/sh/cchips/voyagergx/
 
 cpuincdir-$(CONFIG_CPU_SH2)	:= cpu-sh2
+cpuincdir-$(CONFIG_CPU_SH2A)	:= cpu-sh2a
 cpuincdir-$(CONFIG_CPU_SH3)	:= cpu-sh3
 cpuincdir-$(CONFIG_CPU_SH4)	:= cpu-sh4
 
diff --git a/arch/sh/boards/renesas/r7780rp/Makefile b/arch/sh/boards/renesas/r7780rp/Makefile
index f1776d0..574b031 100644
--- a/arch/sh/boards/renesas/r7780rp/Makefile
+++ b/arch/sh/boards/renesas/r7780rp/Makefile
@@ -3,4 +3,6 @@
 #
 
 obj-y	 := setup.o io.o irq.o
-obj-$(CONFIG_HEARTBEAT)	+= led.o
+
+obj-$(CONFIG_HEARTBEAT)		+= led.o
+obj-$(CONFIG_PUSH_SWITCH)	+= psw.o
diff --git a/arch/sh/boards/renesas/r7780rp/irq.c b/arch/sh/boards/renesas/r7780rp/irq.c
index aa15ec5..cc381e1 100644
--- a/arch/sh/boards/renesas/r7780rp/irq.c
+++ b/arch/sh/boards/renesas/r7780rp/irq.c
@@ -10,6 +10,7 @@
  */
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <asm/r7780rp.h>
 
diff --git a/arch/sh/boards/renesas/r7780rp/psw.c b/arch/sh/boards/renesas/r7780rp/psw.c
new file mode 100644
index 0000000..c844dfa
--- /dev/null
+++ b/arch/sh/boards/renesas/r7780rp/psw.c
@@ -0,0 +1,122 @@
+/*
+ * arch/sh/boards/renesas/r7780rp/psw.c
+ *
+ * push switch support for RDBRP-1/RDBREVRP-1 debug boards.
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/mach/r7780rp.h>
+#include <asm/push-switch.h>
+
+static irqreturn_t psw_irq_handler(int irq, void *arg)
+{
+	struct platform_device *pdev = arg;
+	struct push_switch *psw = platform_get_drvdata(pdev);
+	struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+	unsigned int l, mask;
+	int ret = 0;
+
+	l = ctrl_inw(PA_DBSW);
+
+	/* Nothing to do if there's no state change */
+	if (psw->state) {
+		ret = 1;
+		goto out;
+	}
+
+	mask = l & 0x70;
+	/* Figure out who raised it */
+	if (mask & (1 << psw_info->bit)) {
+		psw->state = !!(mask & (1 << psw_info->bit));
+		if (psw->state)	/* debounce */
+			mod_timer(&psw->debounce, jiffies + 50);
+
+		ret = 1;
+	}
+
+out:
+	/* Clear the switch IRQs */
+	l |= (0x7 << 12);
+	ctrl_outw(l, PA_DBSW);
+
+	return IRQ_RETVAL(ret);
+}
+
+static struct resource psw_resources[] = {
+	[0] = {
+		.start	= IRQ_PSW,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct push_switch_platform_info s2_platform_data = {
+	.name		= "s2",
+	.bit		= 6,
+	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			  IRQF_SHARED,
+	.irq_handler	= psw_irq_handler,
+};
+
+static struct platform_device s2_switch_device = {
+	.name		= "push-switch",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(psw_resources),
+	.resource	= psw_resources,
+	.dev		= {
+		.platform_data = &s2_platform_data,
+	},
+};
+
+static struct push_switch_platform_info s3_platform_data = {
+	.name		= "s3",
+	.bit		= 5,
+	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			  IRQF_SHARED,
+	.irq_handler	= psw_irq_handler,
+};
+
+static struct platform_device s3_switch_device = {
+	.name		= "push-switch",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(psw_resources),
+	.resource	= psw_resources,
+	.dev		= {
+		.platform_data = &s3_platform_data,
+	},
+};
+
+static struct push_switch_platform_info s4_platform_data = {
+	.name		= "s4",
+	.bit		= 4,
+	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			  IRQF_SHARED,
+	.irq_handler	= psw_irq_handler,
+};
+
+static struct platform_device s4_switch_device = {
+	.name		= "push-switch",
+	.id		= 2,
+	.num_resources	= ARRAY_SIZE(psw_resources),
+	.resource	= psw_resources,
+	.dev		= {
+		.platform_data = &s4_platform_data,
+	},
+};
+
+static struct platform_device *psw_devices[] = {
+	&s2_switch_device, &s3_switch_device, &s4_switch_device,
+};
+
+static int __init psw_init(void)
+{
+	return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices));
+}
+module_init(psw_init);
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
index c331cae..9f89c8d 100644
--- a/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/arch/sh/boards/renesas/r7780rp/setup.c
@@ -44,8 +44,37 @@
 	.resource	= m66596_usb_host_resources,
 };
 
+static struct resource cf_ide_resources[] = {
+	[0] = {
+		.start	= 0x1f0,
+		.end	= 0x1f0 + 8,
+		.flags	= IORESOURCE_IO,
+	},
+	[1] = {
+		.start	= 0x1f0 + 0x206,
+		.end	= 0x1f0 + 8 + 0x206 + 8,
+		.flags	= IORESOURCE_IO,
+	},
+	[2] = {
+#ifdef CONFIG_SH_R7780MP
+		.start	= 1,
+#else
+		.start	= 4,
+#endif
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device cf_ide_device  = {
+	.name		= "pata_platform",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(cf_ide_resources),
+	.resource	= cf_ide_resources,
+};
+
 static struct platform_device *r7780rp_devices[] __initdata = {
 	&m66596_usb_host_device,
+	&cf_ide_device,
 };
 
 static int __init r7780rp_devices_setup(void)
diff --git a/arch/sh/boards/se/7206/Makefile b/arch/sh/boards/se/7206/Makefile
new file mode 100644
index 0000000..63950f4f
--- /dev/null
+++ b/arch/sh/boards/se/7206/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the 7206 SolutionEngine specific parts of the kernel
+#
+
+obj-y	 := setup.o io.o irq.o
+obj-$(CONFIG_HEARTBEAT) += led.o
+
diff --git a/arch/sh/boards/se/7206/io.c b/arch/sh/boards/se/7206/io.c
new file mode 100644
index 0000000..b557273
--- /dev/null
+++ b/arch/sh/boards/se/7206/io.c
@@ -0,0 +1,123 @@
+/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
+ *
+ * linux/arch/sh/boards/se/7206/io.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7206 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7206.h>
+
+
+static inline void delay(void)
+{
+	ctrl_inw(0x20000000);  /* P2 ROM Area */
+}
+
+/* MS7750 requires special versions of in*, out* routines, since
+   PC-like io ports are located at upper half byte of 16-bit word which
+   can be accessed only with 16-bit wide.  */
+
+static inline volatile __u16 *
+port2adr(unsigned int port)
+{
+	if (port >= 0x2000)
+		return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
+	else if (port >= 0x300 || port < 0x310)
+		return (volatile __u16 *) (PA_SMSC + (port - 0x300));
+}
+
+unsigned char se7206_inb(unsigned long port)
+{
+	return (*port2adr(port))&0xff; 
+}
+
+unsigned char se7206_inb_p(unsigned long port)
+{
+	unsigned long v;
+
+	v = (*port2adr(port))&0xff; 
+	delay();
+	return v;
+}
+
+unsigned short se7206_inw(unsigned long port)
+{
+	return *port2adr(port);;
+}
+
+unsigned int se7206_inl(unsigned long port)
+{
+	maybebadio(port);
+	return 0;
+}
+
+void se7206_outb(unsigned char value, unsigned long port)
+{
+	*(port2adr(port)) = value;
+}
+
+void se7206_outb_p(unsigned char value, unsigned long port)
+{
+	*(port2adr(port)) = value;
+	delay();
+}
+
+void se7206_outw(unsigned short value, unsigned long port)
+{
+	*port2adr(port) = value;
+}
+
+void se7206_outl(unsigned int value, unsigned long port)
+{
+	maybebadio(port);
+}
+
+void se7206_insb(unsigned long port, void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	__u8 *ap = addr;
+
+	while (count--)
+		*ap++ = *p;
+}
+
+void se7206_insw(unsigned long port, void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	__u16 *ap = addr;
+	while (count--)
+		*ap++ = *p;
+}
+
+void se7206_insl(unsigned long port, void *addr, unsigned long count)
+{
+	maybebadio(port);
+}
+
+void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	const __u8 *ap = addr;
+
+	while (count--)
+		*p = *ap++;
+}
+
+void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
+{
+	volatile __u16 *p = port2adr(port);
+	const __u16 *ap = addr;
+	while (count--)
+		*p = *ap++;
+}
+
+void se7206_outsl(unsigned long port, const void *addr, unsigned long count)
+{
+	maybebadio(port);
+}
diff --git a/arch/sh/boards/se/7206/irq.c b/arch/sh/boards/se/7206/irq.c
new file mode 100644
index 0000000..3fb0c5f
--- /dev/null
+++ b/arch/sh/boards/se/7206/irq.c
@@ -0,0 +1,139 @@
+/*
+ * linux/arch/sh/boards/se/7206/irq.c
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/se7206.h>
+
+#define INTSTS0 0x31800000
+#define INTSTS1 0x31800002
+#define INTMSK0 0x31800004
+#define INTMSK1 0x31800006
+#define INTSEL  0x31800008
+
+static void disable_se7206_irq(unsigned int irq)
+{
+	unsigned short val;
+	unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq)));
+	unsigned short msk0,msk1;
+
+	/* Set the priority in IPR to 0 */
+	val = ctrl_inw(INTC_IPR01);
+	val &= mask;
+	ctrl_outw(val, INTC_IPR01);
+	/* FPGA mask set */
+	msk0 = ctrl_inw(INTMSK0);
+	msk1 = ctrl_inw(INTMSK1);
+
+	switch (irq) {
+	case IRQ0_IRQ:
+		msk0 |= 0x0010;
+		break;
+	case IRQ1_IRQ:
+		msk0 |= 0x000f;
+		break;
+	case IRQ2_IRQ:
+		msk0 |= 0x0f00;
+		msk1 |= 0x00ff;
+		break;
+	}
+	ctrl_outw(msk0, INTMSK0);
+	ctrl_outw(msk1, INTMSK1);
+}
+
+static void enable_se7206_irq(unsigned int irq)
+{
+	unsigned short val;
+	unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq)));
+	unsigned short msk0,msk1;
+
+	/* Set priority in IPR back to original value */
+	val = ctrl_inw(INTC_IPR01);
+	val |= value;
+	ctrl_outw(val, INTC_IPR01);
+
+	/* FPGA mask reset */
+	msk0 = ctrl_inw(INTMSK0);
+	msk1 = ctrl_inw(INTMSK1);
+
+	switch (irq) {
+	case IRQ0_IRQ:
+		msk0 &= ~0x0010;
+		break;
+	case IRQ1_IRQ:
+		msk0 &= ~0x000f;
+		break;
+	case IRQ2_IRQ:
+		msk0 &= ~0x0f00;
+		msk1 &= ~0x00ff;
+		break;
+	}
+	ctrl_outw(msk0, INTMSK0);
+	ctrl_outw(msk1, INTMSK1);
+}
+
+static void eoi_se7206_irq(unsigned int irq)
+{
+	unsigned short sts0,sts1;
+
+	if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+		enable_se7206_irq(irq);
+	/* FPGA isr clear */
+	sts0 = ctrl_inw(INTSTS0);
+	sts1 = ctrl_inw(INTSTS1);
+
+	switch (irq) {
+	case IRQ0_IRQ:
+		sts0 &= ~0x0010;
+		break;
+	case IRQ1_IRQ:
+		sts0 &= ~0x000f;
+		break;
+	case IRQ2_IRQ:
+		sts0 &= ~0x0f00;
+		sts1 &= ~0x00ff;
+		break;
+	}
+	ctrl_outw(sts0, INTSTS0);
+	ctrl_outw(sts1, INTSTS1);
+}
+
+static struct irq_chip se7206_irq_chip __read_mostly = {
+	.name		= "SE7206-FPGA-IRQ",
+	.mask		= disable_se7206_irq,
+	.unmask		= enable_se7206_irq,
+	.mask_ack	= disable_se7206_irq,
+	.eoi		= eoi_se7206_irq,
+};
+
+static void make_se7206_irq(unsigned int irq)
+{
+	disable_irq_nosync(irq);
+	set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
+				      handle_level_irq, "level");
+	disable_se7206_irq(irq);
+}
+
+/*
+ * Initialize IRQ setting
+ */
+void __init init_se7206_IRQ(void)
+{
+	make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
+	make_se7206_irq(IRQ1_IRQ); /* ATA */
+	make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
+	ctrl_outw(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+	/* FPGA System register setup*/
+	ctrl_outw(0x0000,INTSTS0); /* Clear INTSTS0 */
+	ctrl_outw(0x0000,INTSTS1); /* Clear INTSTS1 */
+	/* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
+	ctrl_outw(0x0001,INTSEL);
+}
diff --git a/arch/sh/boards/se/7206/led.c b/arch/sh/boards/se/7206/led.c
new file mode 100644
index 0000000..ef79460
--- /dev/null
+++ b/arch/sh/boards/se/7206/led.c
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/sh/kernel/led_se.c
+ *
+ * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * This file contains Solution Engine specific LED code.
+ */
+
+#include <linux/config.h>
+#include <asm/se7206.h>
+
+#ifdef CONFIG_HEARTBEAT
+
+#include <linux/sched.h>
+
+/* Cycle the LED's in the clasic Knightrider/Sun pattern */
+void heartbeat_se(void)
+{
+	static unsigned int cnt = 0, period = 0;
+	volatile unsigned short* p = (volatile unsigned short*)PA_LED;
+	static unsigned bit = 0, up = 1;
+
+	cnt += 1;
+	if (cnt < period) {
+		return;
+	}
+
+	cnt = 0;
+
+	/* Go through the points (roughly!):
+	 * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
+	 */
+	period = 110 - ( (300<<FSHIFT)/
+			 ((avenrun[0]/5) + (3<<FSHIFT)) );
+
+	if (up) {
+		if (bit == 7) {
+			bit--;
+			up=0;
+		} else {
+			bit ++;
+		}
+	} else {
+		if (bit == 0) {
+			bit++;
+			up=1;
+		} else {
+			bit--;
+		}
+	}
+	*p = 1<<(bit+8);
+
+}
+#endif /* CONFIG_HEARTBEAT */
diff --git a/arch/sh/boards/se/7206/setup.c b/arch/sh/boards/se/7206/setup.c
new file mode 100644
index 0000000..0f42e91
--- /dev/null
+++ b/arch/sh/boards/se/7206/setup.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7206/setup.c
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * Hitachi 7206 SolutionEngine Support.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/se7206.h>
+#include <asm/io.h>
+#include <asm/machvec.h>
+
+static struct resource smc91x_resources[] = {
+	[0] = {
+		.start		= 0x300,
+		.end		= 0x300 + 0x020 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start		= 64,
+		.end		= 64,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device smc91x_device = {
+	.name		= "smc91x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smc91x_resources),
+	.resource	= smc91x_resources,
+};
+
+static int __init se7206_devices_setup(void)
+{
+	return platform_device_register(&smc91x_device);
+}
+
+__initcall(se7206_devices_setup);
+
+void heartbeat_se(void);
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+	.mv_name		= "SolutionEngine",
+	.mv_nr_irqs		= 256,
+	.mv_inb			= se7206_inb,
+	.mv_inw			= se7206_inw,
+	.mv_inl			= se7206_inl,
+	.mv_outb		= se7206_outb,
+	.mv_outw		= se7206_outw,
+	.mv_outl		= se7206_outl,
+
+	.mv_inb_p		= se7206_inb_p,
+	.mv_inw_p		= se7206_inw,
+	.mv_inl_p		= se7206_inl,
+	.mv_outb_p		= se7206_outb_p,
+	.mv_outw_p		= se7206_outw,
+	.mv_outl_p		= se7206_outl,
+
+	.mv_insb		= se7206_insb,
+	.mv_insw		= se7206_insw,
+	.mv_insl		= se7206_insl,
+	.mv_outsb		= se7206_outsb,
+	.mv_outsw		= se7206_outsw,
+	.mv_outsl		= se7206_outsl,
+
+	.mv_init_irq		= init_se7206_IRQ,
+#ifdef CONFIG_HEARTBEAT
+	.mv_heartbeat		= heartbeat_se,
+#endif
+};
+ALIAS_MV(se)
diff --git a/arch/sh/boards/se/7619/Makefile b/arch/sh/boards/se/7619/Makefile
new file mode 100644
index 0000000..3666eca
--- /dev/null
+++ b/arch/sh/boards/se/7619/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the 7619 SolutionEngine specific parts of the kernel
+#
+
+obj-y	 := setup.o io.o
diff --git a/arch/sh/boards/se/7619/io.c b/arch/sh/boards/se/7619/io.c
new file mode 100644
index 0000000..176f1f39c
--- /dev/null
+++ b/arch/sh/boards/se/7619/io.c
@@ -0,0 +1,102 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7619/io.c
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7619 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/irq.h>
+
+/* FIXME: M3A-ZAB7 Compact Flash Slot support */
+
+static inline void delay(void)
+{
+	ctrl_inw(0xa0000000);	/* Uncached ROM area (P2) */
+}
+
+#define badio(name,port) \
+  printk("bad I/O operation (%s) for port 0x%lx at 0x%08x\n", \
+	 #name, (port), (__u32) __builtin_return_address(0))
+
+unsigned char se7619___inb(unsigned long port)
+{
+	badio(inb, port);
+	return 0;
+}
+
+unsigned char se7619___inb_p(unsigned long port)
+{
+	badio(inb_p, port);
+	delay();
+	return 0;
+}
+
+unsigned short se7619___inw(unsigned long port)
+{
+	badio(inw, port);
+	return 0;
+}
+
+unsigned int se7619___inl(unsigned long port)
+{
+	badio(inl, port);
+	return 0;
+}
+
+void se7619___outb(unsigned char value, unsigned long port)
+{
+	badio(outb, port);
+}
+
+void se7619___outb_p(unsigned char value, unsigned long port)
+{
+	badio(outb_p, port);
+	delay();
+}
+
+void se7619___outw(unsigned short value, unsigned long port)
+{
+	badio(outw, port);
+}
+
+void se7619___outl(unsigned int value, unsigned long port)
+{
+	badio(outl, port);
+}
+
+void se7619___insb(unsigned long port, void *addr, unsigned long count)
+{
+	badio(inw, port);
+}
+
+void se7619___insw(unsigned long port, void *addr, unsigned long count)
+{
+	badio(inw, port);
+}
+
+void se7619___insl(unsigned long port, void *addr, unsigned long count)
+{
+	badio(insl, port);
+}
+
+void se7619___outsb(unsigned long port, const void *addr, unsigned long count)
+{
+	badio(insl, port);
+}
+
+void se7619___outsw(unsigned long port, const void *addr, unsigned long count)
+{
+	badio(insl, port);
+}
+
+void se7619___outsl(unsigned long port, const void *addr, unsigned long count)
+{
+	badio(outsw, port);
+}
diff --git a/arch/sh/boards/se/7619/setup.c b/arch/sh/boards/se/7619/setup.c
new file mode 100644
index 0000000..e627b26
--- /dev/null
+++ b/arch/sh/boards/se/7619/setup.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/boards/se/7619/setup.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Hitachi SH7619 SolutionEngine Support.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/machvec.h>
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+	.mv_name		= "SolutionEngine",
+	.mv_nr_irqs		= 108,
+	.mv_inb			= se7619___inb,
+	.mv_inw			= se7619___inw,
+	.mv_inl			= se7619___inl,
+	.mv_outb		= se7619___outb,
+	.mv_outw		= se7619___outw,
+	.mv_outl		= se7619___outl,
+
+	.mv_inb_p		= se7619___inb_p,
+	.mv_inw_p		= se7619___inw,
+	.mv_inl_p		= se7619___inl,
+	.mv_outb_p		= se7619___outb_p,
+	.mv_outw_p		= se7619___outw,
+	.mv_outl_p		= se7619___outl,
+
+	.mv_insb		= se7619___insb,
+	.mv_insw		= se7619___insw,
+	.mv_insl		= se7619___insl,
+	.mv_outsb		= se7619___outsb,
+	.mv_outsw		= se7619___outsw,
+	.mv_outsl		= se7619___outsl,
+};
+ALIAS_MV(se)
diff --git a/arch/sh/boards/titan/setup.c b/arch/sh/boards/titan/setup.c
index a6046d9..6bcd939 100644
--- a/arch/sh/boards/titan/setup.c
+++ b/arch/sh/boards/titan/setup.c
@@ -1,26 +1,30 @@
 /*
- *	Setup for Titan
+ * arch/sh/boards/titan/setup.c - Setup for Titan
+ *
+ *  Copyright (C) 2006  Jamie Lenehan
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
-
 #include <linux/init.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
 #include <asm/titan.h>
 #include <asm/io.h>
 
-extern void __init pcibios_init_platform(void);
-
 static struct ipr_data titan_ipr_map[] = {
-	{ TITAN_IRQ_WAN,	IRL0_IPR_ADDR,	IRL0_IPR_POS,	IRL0_PRIORITY },
-	{ TITAN_IRQ_LAN,	IRL1_IPR_ADDR,	IRL1_IPR_POS,	IRL1_PRIORITY },
-	{ TITAN_IRQ_MPCIA,	IRL2_IPR_ADDR,	IRL2_IPR_POS,	IRL2_PRIORITY },
-	{ TITAN_IRQ_USB,	IRL3_IPR_ADDR,	IRL3_IPR_POS,	IRL3_PRIORITY },
+	/* IRQ, IPR idx, shift, prio */
+	{ TITAN_IRQ_WAN,   3, 12, 8 },	/* eth0 (WAN) */
+	{ TITAN_IRQ_LAN,   3,  8, 8 },	/* eth1 (LAN) */
+	{ TITAN_IRQ_MPCIA, 3,  4, 8 },	/* mPCI A (top) */
+	{ TITAN_IRQ_USB,   3,  0, 8 },	/* mPCI B (bottom), USB */
 };
 
 static void __init init_titan_irq(void)
 {
 	/* enable individual interrupt mode for externals */
-	ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
-
+	ipr_irq_enable_irlm();
+	/* register ipr irqs */
 	make_ipr_irq(titan_ipr_map, ARRAY_SIZE(titan_ipr_map));
 }
 
@@ -47,6 +51,5 @@
 	.mv_ioport_map = titan_ioport_map,
 
 	.mv_init_irq =	init_titan_irq,
-	.mv_init_pci =	pcibios_init_platform,
 };
 ALIAS_MV(titan)
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index f2fed5c..35452d8 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -12,6 +12,7 @@
  */
 
 #include <asm/uaccess.h>
+#include <asm/addrspace.h>
 #ifdef CONFIG_SH_STANDARD_BIOS
 #include <asm/sh_bios.h>
 #endif
@@ -228,7 +229,7 @@
 void decompress_kernel(void)
 {
 	output_data = 0;
-	output_ptr = (unsigned long)&_text+0x20001000;
+	output_ptr = P2SEGADDR((unsigned long)&_text+0x1000);
 	free_mem_ptr = (unsigned long)&_end;
 	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
 
diff --git a/arch/sh/configs/r7780rp_defconfig b/arch/sh/configs/r7780rp_defconfig
index 34e2046..2b75b48 100644
--- a/arch/sh/configs/r7780rp_defconfig
+++ b/arch/sh/configs/r7780rp_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc3
-# Tue Oct 31 12:32:06 2006
+# Linux kernel version: 2.6.19
+# Wed Dec  6 11:59:38 2006
 #
 CONFIG_SUPERH=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
@@ -11,6 +11,8 @@
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 # CONFIG_GENERIC_TIME is not set
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
 #
@@ -37,6 +39,7 @@
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -118,6 +121,8 @@
 # CONFIG_SH_LANDISK is not set
 # CONFIG_SH_TITAN is not set
 # CONFIG_SH_SHMIN is not set
+# CONFIG_SH_7206_SOLUTION_ENGINE is not set
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
 # CONFIG_SH_UNKNOWN is not set
 
 #
@@ -130,6 +135,12 @@
 # SH-2 Processor Support
 #
 # CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
 
 #
 # SH-3 Processor Support
@@ -165,6 +176,7 @@
 #
 # CONFIG_CPU_SUBTYPE_SH7770 is not set
 CONFIG_CPU_SUBTYPE_SH7780=y
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
 
 #
 # SH4AL-DSP Processor Support
@@ -181,8 +193,14 @@
 CONFIG_MEMORY_SIZE=0x08000000
 # CONFIG_32BIT is not set
 CONFIG_VSYSCALL=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
 CONFIG_HUGETLB_PAGE_SIZE_64K=y
+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
 # CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -204,12 +222,14 @@
 # Processor features
 #
 CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
 CONFIG_SH_FPU=y
 # CONFIG_SH_DSP is not set
 CONFIG_SH_STORE_QUEUES=y
 CONFIG_CPU_HAS_INTEVT=y
 CONFIG_CPU_HAS_INTC2_IRQ=y
 CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEA=y
 
 #
 # Timer support
@@ -220,6 +240,8 @@
 # R7780RP options
 #
 CONFIG_SH_R7780MP=y
+CONFIG_SH_TIMER_IRQ=28
+CONFIG_NO_IDLE_HZ=y
 CONFIG_SH_PCLK_FREQ=32000000
 
 #
@@ -238,13 +260,18 @@
 # CONFIG_HD6446X_SERIES is not set
 
 #
+# Additional SuperH Device Drivers
+#
+CONFIG_PUSH_SWITCH=y
+
+#
 # Kernel features
 #
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
-# CONFIG_KEXEC is not set
+CONFIG_KEXEC=y
 # CONFIG_SMP is not set
 # CONFIG_PREEMPT_NONE is not set
 # CONFIG_PREEMPT_VOLUNTARY is not set
@@ -278,10 +305,7 @@
 #
 # PCI Hotplug Support
 #
-CONFIG_HOTPLUG_PCI=y
-# CONFIG_HOTPLUG_PCI_FAKE is not set
-# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_SHPC is not set
+# CONFIG_HOTPLUG_PCI is not set
 
 #
 # Executable file formats
@@ -341,6 +365,7 @@
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_INET6_XFRM_TUNNEL is not set
 # CONFIG_INET6_TUNNEL is not set
@@ -556,6 +581,7 @@
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_JMICRON is not set
 # CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
 # CONFIG_PATA_OLDPIIX is not set
 # CONFIG_PATA_NETCELL is not set
@@ -572,6 +598,7 @@
 # CONFIG_PATA_SIS is not set
 # CONFIG_PATA_VIA is not set
 # CONFIG_PATA_WINBOND is not set
+CONFIG_PATA_PLATFORM=y
 
 #
 # Multi-device support (RAID and LVM)
@@ -688,6 +715,7 @@
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -830,10 +858,6 @@
 # CONFIG_DTLK is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 # CONFIG_DRM is not set
 # CONFIG_RAW_DRIVER is not set
 
@@ -1020,7 +1044,7 @@
 CONFIG_DNOTIFY=y
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+CONFIG_FUSE_FS=m
 
 #
 # CD-ROM/DVD Filesystems
@@ -1052,7 +1076,7 @@
 CONFIG_HUGETLBFS=y
 CONFIG_HUGETLB_PAGE=y
 CONFIG_RAMFS=y
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=m
 
 #
 # Miscellaneous filesystems
@@ -1153,28 +1177,33 @@
 #
 # Profiling support
 #
-# CONFIG_PROFILING is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
 
 #
 # Kernel hacking
 #
-# CONFIG_PRINTK_TIME is not set
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
 CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 # CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 # CONFIG_DEBUG_VM is not set
 # CONFIG_DEBUG_LIST is not set
@@ -1184,7 +1213,7 @@
 # CONFIG_RCU_TORTURE_TEST is not set
 # CONFIG_SH_STANDARD_BIOS is not set
 # CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_4KSTACKS is not set
 # CONFIG_KGDB is not set
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
new file mode 100644
index 0000000..36cec0b
--- /dev/null
+++ b/arch/sh/configs/se7206_defconfig
@@ -0,0 +1,826 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc4
+# Sun Nov  5 16:20:10 2006
+#
+CONFIG_SUPERH=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_TIME is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SYSVIPC is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+
+#
+# System type
+#
+# CONFIG_SH_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SOLUTION_ENGINE is not set
+# CONFIG_SH_7300_SOLUTION_ENGINE is not set
+# CONFIG_SH_7343_SOLUTION_ENGINE is not set
+# CONFIG_SH_73180_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SYSTEMH is not set
+# CONFIG_SH_HP6XX is not set
+# CONFIG_SH_EC3104 is not set
+# CONFIG_SH_SATURN is not set
+# CONFIG_SH_DREAMCAST is not set
+# CONFIG_SH_BIGSUR is not set
+# CONFIG_SH_MPC1211 is not set
+# CONFIG_SH_SH03 is not set
+# CONFIG_SH_SECUREEDGE5410 is not set
+# CONFIG_SH_HS7751RVOIP is not set
+# CONFIG_SH_7710VOIPGW is not set
+# CONFIG_SH_RTS7751R2D is not set
+# CONFIG_SH_R7780RP is not set
+# CONFIG_SH_EDOSK7705 is not set
+# CONFIG_SH_SH4202_MICRODEV is not set
+# CONFIG_SH_LANDISK is not set
+# CONFIG_SH_TITAN is not set
+# CONFIG_SH_SHMIN is not set
+CONFIG_SH_7206_SOLUTION_ENGINE=y
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
+# CONFIG_SH_UNKNOWN is not set
+
+#
+# Processor selection
+#
+CONFIG_CPU_SH2=y
+CONFIG_CPU_SH2A=y
+
+#
+# SH-2 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+CONFIG_CPU_SUBTYPE_SH7206=y
+
+#
+# SH-3 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7300 is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+
+#
+# SH-4 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+
+#
+# ST40 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
+# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
+
+#
+# SH-4A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+
+#
+# SH4AL-DSP Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH73180 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+
+#
+# Memory management options
+#
+CONFIG_PAGE_OFFSET=0x00000000
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_MEMORY_SIZE=0x02000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+
+#
+# Cache configuration
+#
+# CONFIG_SH_DIRECT_MAPPED is not set
+# CONFIG_SH_WRITETHROUGH is not set
+# CONFIG_SH_OCRAM is not set
+
+#
+# Processor features
+#
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+# CONFIG_SH_FPU is not set
+# CONFIG_SH_FPU_EMU is not set
+# CONFIG_SH_DSP is not set
+
+#
+# Timer support
+#
+CONFIG_SH_CMT=y
+# CONFIG_SH_MTU2 is not set
+CONFIG_SH_PCLK_FREQ=33333333
+CONFIG_SH_CLK_MD=6
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+# CONFIG_HD6446X_SERIES is not set
+
+#
+# Kernel features
+#
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_KEXEC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+# CONFIG_UBC_WAKEUP is not set
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Bus options
+#
+# CONFIG_PCI is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+
+#
+# PCI Hotplug Support
+#
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options (EXPERIMENTAL)
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+# CONFIG_UNIX is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x20000000
+CONFIG_MTD_PHYSMAP_LEN=0x1000000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=4
+# CONFIG_MTD_SOLUTIONENGINE is not set
+# CONFIG_MTD_UCLINUX is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_NETLINK is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+# CONFIG_NETDEVICES is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=4
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_UNIX98_PTYS is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_SYSFS is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_UNWIND_INFO is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/sh/drivers/Kconfig b/arch/sh/drivers/Kconfig
new file mode 100644
index 0000000..c54c758
--- /dev/null
+++ b/arch/sh/drivers/Kconfig
@@ -0,0 +1,9 @@
+menu "Additional SuperH Device Drivers"
+
+config PUSH_SWITCH
+	tristate "Push switch support"
+	help
+	  This enables support for the push switch framework, a simple
+	  framework that allows for sysfs driven switch status reporting.
+
+endmenu
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile
index 338c372..bf18dbf 100644
--- a/arch/sh/drivers/Makefile
+++ b/arch/sh/drivers/Makefile
@@ -5,4 +5,4 @@
 obj-$(CONFIG_PCI)		+= pci/
 obj-$(CONFIG_SH_DMA)		+= dma/
 obj-$(CONFIG_SUPERHYWAY)	+= superhyway/
-
+obj-$(CONFIG_PUSH_SWITCH)	+= push-switch.o
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile
index 065d4c9..db1295d 100644
--- a/arch/sh/drivers/dma/Makefile
+++ b/arch/sh/drivers/dma/Makefile
@@ -2,8 +2,8 @@
 # Makefile for the SuperH DMA specific kernel interface routines under Linux.
 #
 
-obj-y				+= dma-api.o dma-isa.o
+obj-y				+= dma-api.o
+obj-$(CONFIG_ISA_DMA_API)	+= dma-isa.o
 obj-$(CONFIG_SYSFS)		+= dma-sysfs.o
 obj-$(CONFIG_SH_DMA)		+= dma-sh.o
 obj-$(CONFIG_SH_DREAMCAST)	+= dma-pvr2.o dma-g2.o
-
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c
index 47c3e83..e062067 100644
--- a/arch/sh/drivers/dma/dma-api.c
+++ b/arch/sh/drivers/dma/dma-api.c
@@ -11,61 +11,27 @@
  */
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/proc_fs.h>
 #include <linux/list.h>
 #include <linux/platform_device.h>
+#include <linux/mm.h>
 #include <asm/dma.h>
 
 DEFINE_SPINLOCK(dma_spin_lock);
 static LIST_HEAD(registered_dmac_list);
 
-/*
- * A brief note about the reasons for this API as it stands.
- *
- * For starters, the old ISA DMA API didn't work for us for a number of
- * reasons, for one, the vast majority of channels on the SH DMAC are
- * dual-address mode only, and both the new and the old DMA APIs are after the
- * concept of managing a DMA buffer, which doesn't overly fit this model very
- * well. In addition to which, the new API is largely geared at IOMMUs and
- * GARTs, and doesn't even support the channel notion very well.
- *
- * The other thing that's a marginal issue, is the sheer number of random DMA
- * engines that are present (ie, in boards like the Dreamcast), some of which
- * cascade off of the SH DMAC, and others do not. As such, there was a real
- * need for a scalable subsystem that could deal with both single and
- * dual-address mode usage, in addition to interoperating with cascaded DMACs.
- *
- * There really isn't any reason why this needs to be SH specific, though I'm
- * not aware of too many other processors (with the exception of some MIPS)
- * that have the same concept of a dual address mode, or any real desire to
- * actually make use of the DMAC even if such a subsystem were exposed
- * elsewhere.
- *
- * The idea for this was derived from the ARM port, which acted as an excellent
- * reference when trying to address these issues.
- *
- * It should also be noted that the decision to add Yet Another DMA API(tm) to
- * the kernel wasn't made easily, and was only decided upon after conferring
- * with jejb with regards to the state of the old and new APIs as they applied
- * to these circumstances. Philip Blundell was also a great help in figuring
- * out some single-address mode DMA semantics that were otherwise rather
- * confusing.
- */
-
 struct dma_info *get_dma_info(unsigned int chan)
 {
 	struct dma_info *info;
-	unsigned int total = 0;
 
 	/*
 	 * Look for each DMAC's range to determine who the owner of
 	 * the channel is.
 	 */
 	list_for_each_entry(info, &registered_dmac_list, list) {
-		total += info->nr_channels;
-		if (chan > total)
+		if ((chan <  info->first_channel_nr) ||
+		    (chan >= info->first_channel_nr + info->nr_channels))
 			continue;
 
 		return info;
@@ -73,6 +39,22 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(get_dma_info);
+
+struct dma_info *get_dma_info_by_name(const char *dmac_name)
+{
+	struct dma_info *info;
+
+	list_for_each_entry(info, &registered_dmac_list, list) {
+		if (dmac_name && (strcmp(dmac_name, info->name) != 0))
+			continue;
+		else
+			return info;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(get_dma_info_by_name);
 
 static unsigned int get_nr_channels(void)
 {
@@ -91,63 +73,161 @@
 struct dma_channel *get_dma_channel(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
+	struct dma_channel *channel;
+	int i;
 
-	if (!info)
+	if (unlikely(!info))
 		return ERR_PTR(-EINVAL);
 
-	return info->channels + chan;
+	for (i = 0; i < info->nr_channels; i++) {
+		channel = &info->channels[i];
+		if (channel->chan == chan)
+			return channel;
+	}
+
+	return NULL;
 }
+EXPORT_SYMBOL(get_dma_channel);
 
 int get_dma_residue(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (info->ops->get_residue)
 		return info->ops->get_residue(channel);
 
 	return 0;
 }
+EXPORT_SYMBOL(get_dma_residue);
 
-int request_dma(unsigned int chan, const char *dev_id)
+static int search_cap(const char **haystack, const char *needle)
 {
-	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	const char **p;
 
-	down(&channel->sem);
-
-	if (!info->ops || chan >= MAX_DMA_CHANNELS) {
-		up(&channel->sem);
-		return -EINVAL;
-	}
-
-	atomic_set(&channel->busy, 1);
-
-	strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
-
-	up(&channel->sem);
-
-	if (info->ops->request)
-		return info->ops->request(channel);
+	for (p = haystack; *p; p++)
+		if (strcmp(*p, needle) == 0)
+			return 1;
 
 	return 0;
 }
 
+/**
+ * request_dma_bycap - Allocate a DMA channel based on its capabilities
+ * @dmac: List of DMA controllers to search
+ * @caps: List of capabilites
+ *
+ * Search all channels of all DMA controllers to find a channel which
+ * matches the requested capabilities. The result is the channel
+ * number if a match is found, or %-ENODEV if no match is found.
+ *
+ * Note that not all DMA controllers export capabilities, in which
+ * case they can never be allocated using this API, and so
+ * request_dma() must be used specifying the channel number.
+ */
+int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id)
+{
+	unsigned int found = 0;
+	struct dma_info *info;
+	const char **p;
+	int i;
+
+	BUG_ON(!dmac || !caps);
+
+	list_for_each_entry(info, &registered_dmac_list, list)
+		if (strcmp(*dmac, info->name) == 0) {
+			found = 1;
+			break;
+		}
+
+	if (!found)
+		return -ENODEV;
+
+	for (i = 0; i < info->nr_channels; i++) {
+		struct dma_channel *channel = &info->channels[i];
+
+		if (unlikely(!channel->caps))
+			continue;
+
+		for (p = caps; *p; p++) {
+			if (!search_cap(channel->caps, *p))
+				break;
+			if (request_dma(channel->chan, dev_id) == 0)
+				return channel->chan;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(request_dma_bycap);
+
+int dmac_search_free_channel(const char *dev_id)
+{
+	struct dma_channel *channel = { 0 };
+	struct dma_info *info = get_dma_info(0);
+	int i;
+
+	for (i = 0; i < info->nr_channels; i++) {
+		channel = &info->channels[i];
+		if (unlikely(!channel))
+			return -ENODEV;
+
+		if (atomic_read(&channel->busy) == 0)
+			break;
+	}
+
+	if (info->ops->request) {
+		int result = info->ops->request(channel);
+		if (result)
+			return result;
+
+		atomic_set(&channel->busy, 1);
+		return channel->chan;
+	}
+
+	return -ENOSYS;
+}
+
+int request_dma(unsigned int chan, const char *dev_id)
+{
+	struct dma_channel *channel = { 0 };
+	struct dma_info *info = get_dma_info(chan);
+	int result;
+
+	channel = get_dma_channel(chan);
+	if (atomic_xchg(&channel->busy, 1))
+		return -EBUSY;
+
+	strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
+
+	if (info->ops->request) {
+		result = info->ops->request(channel);
+		if (result)
+			atomic_set(&channel->busy, 0);
+
+		return result;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(request_dma);
+
 void free_dma(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (info->ops->free)
 		info->ops->free(channel);
 
 	atomic_set(&channel->busy, 0);
 }
+EXPORT_SYMBOL(free_dma);
 
 void dma_wait_for_completion(unsigned int chan)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (channel->flags & DMA_TEI_CAPABLE) {
 		wait_event(channel->wait_queue,
@@ -158,21 +238,52 @@
 	while (info->ops->get_residue(channel))
 		cpu_relax();
 }
+EXPORT_SYMBOL(dma_wait_for_completion);
+
+int register_chan_caps(const char *dmac, struct dma_chan_caps *caps)
+{
+	struct dma_info *info;
+	unsigned int found = 0;
+	int i;
+
+	list_for_each_entry(info, &registered_dmac_list, list)
+		if (strcmp(dmac, info->name) == 0) {
+			found = 1;
+			break;
+		}
+
+	if (unlikely(!found))
+		return -ENODEV;
+
+	for (i = 0; i < info->nr_channels; i++, caps++) {
+		struct dma_channel *channel;
+
+		if ((info->first_channel_nr + i) != caps->ch_num)
+			return -EINVAL;
+
+		channel = &info->channels[i];
+		channel->caps = caps->caplist;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(register_chan_caps);
 
 void dma_configure_channel(unsigned int chan, unsigned long flags)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	if (info->ops->configure)
 		info->ops->configure(channel, flags);
 }
+EXPORT_SYMBOL(dma_configure_channel);
 
 int dma_xfer(unsigned int chan, unsigned long from,
 	     unsigned long to, size_t size, unsigned int mode)
 {
 	struct dma_info *info = get_dma_info(chan);
-	struct dma_channel *channel = &info->channels[chan];
+	struct dma_channel *channel = get_dma_channel(chan);
 
 	channel->sar	= from;
 	channel->dar	= to;
@@ -181,8 +292,20 @@
 
 	return info->ops->xfer(channel);
 }
+EXPORT_SYMBOL(dma_xfer);
 
-#ifdef CONFIG_PROC_FS
+int dma_extend(unsigned int chan, unsigned long op, void *param)
+{
+	struct dma_info *info = get_dma_info(chan);
+	struct dma_channel *channel = get_dma_channel(chan);
+
+	if (info->ops->extend)
+		return info->ops->extend(channel, op, param);
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL(dma_extend);
+
 static int dma_read_proc(char *buf, char **start, off_t off,
 			 int len, int *eof, void *data)
 {
@@ -214,8 +337,6 @@
 
 	return p - buf;
 }
-#endif
-
 
 int register_dmac(struct dma_info *info)
 {
@@ -224,8 +345,7 @@
 	INIT_LIST_HEAD(&info->list);
 
 	printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n",
-	       info->name, info->nr_channels,
-	       info->nr_channels > 1 ? "s" : "");
+	       info->name, info->nr_channels, info->nr_channels > 1 ? "s" : "");
 
 	BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
 
@@ -242,28 +362,26 @@
 
 		size = sizeof(struct dma_channel) * info->nr_channels;
 
-		info->channels = kmalloc(size, GFP_KERNEL);
+		info->channels = kzalloc(size, GFP_KERNEL);
 		if (!info->channels)
 			return -ENOMEM;
-
-		memset(info->channels, 0, size);
 	}
 
 	total_channels = get_nr_channels();
 	for (i = 0; i < info->nr_channels; i++) {
-		struct dma_channel *chan = info->channels + i;
+		struct dma_channel *chan = &info->channels[i];
 
-		chan->chan = i;
-		chan->vchan = i + total_channels;
+		atomic_set(&chan->busy, 0);
+
+		chan->chan  = info->first_channel_nr + i;
+		chan->vchan = info->first_channel_nr + i + total_channels;
 
 		memcpy(chan->dev_id, "Unused", 7);
 
 		if (info->flags & DMAC_CHANNELS_TEI_CAPABLE)
 			chan->flags |= DMA_TEI_CAPABLE;
 
-		init_MUTEX(&chan->sem);
 		init_waitqueue_head(&chan->wait_queue);
-
 		dma_create_sysfs_files(chan, info);
 	}
 
@@ -271,6 +389,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(register_dmac);
 
 void unregister_dmac(struct dma_info *info)
 {
@@ -285,31 +404,16 @@
 	list_del(&info->list);
 	platform_device_unregister(info->pdev);
 }
+EXPORT_SYMBOL(unregister_dmac);
 
 static int __init dma_api_init(void)
 {
-	printk("DMA: Registering DMA API.\n");
-
-#ifdef CONFIG_PROC_FS
+	printk(KERN_NOTICE "DMA: Registering DMA API.\n");
 	create_proc_read_entry("dma", 0, 0, dma_read_proc, 0);
-#endif
-
 	return 0;
 }
-
 subsys_initcall(dma_api_init);
 
 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
 MODULE_DESCRIPTION("DMA API for SuperH");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
-EXPORT_SYMBOL(register_dmac);
-EXPORT_SYMBOL(get_dma_residue);
-EXPORT_SYMBOL(get_dma_info);
-EXPORT_SYMBOL(get_dma_channel);
-EXPORT_SYMBOL(dma_xfer);
-EXPORT_SYMBOL(dma_wait_for_completion);
-EXPORT_SYMBOL(dma_configure_channel);
-
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 6607860..f63721e 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -94,20 +94,13 @@
 	if (unlikely(!chan->flags & DMA_TEI_CAPABLE))
 		return 0;
 
-	chan->name = kzalloc(32, GFP_KERNEL);
-	if (unlikely(chan->name == NULL))
-		return -ENOMEM;
-	snprintf(chan->name, 32, "DMAC Transfer End (Channel %d)",
-		 chan->chan);
-
 	return request_irq(get_dmte_irq(chan->chan), dma_tei,
-			   IRQF_DISABLED, chan->name, chan);
+			   IRQF_DISABLED, chan->dev_id, chan);
 }
 
 static void sh_dmac_free_dma(struct dma_channel *chan)
 {
 	free_irq(get_dmte_irq(chan->chan), chan);
-	kfree(chan->name);
 }
 
 static void
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 29b8ef9..eebcd47 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -3,7 +3,7 @@
  *
  * sysfs interface for SH DMA API
  *
- * Copyright (C) 2004, 2005  Paul Mundt
+ * Copyright (C) 2004 - 2006  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -21,7 +21,6 @@
 static struct sysdev_class dma_sysclass = {
 	set_kset_name("dma"),
 };
-
 EXPORT_SYMBOL(dma_sysclass);
 
 static ssize_t dma_show_devices(struct sys_device *dev, char *buf)
@@ -31,7 +30,10 @@
 
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct dma_info *info = get_dma_info(i);
-		struct dma_channel *channel = &info->channels[i];
+		struct dma_channel *channel = get_dma_channel(i);
+
+		if (unlikely(!info) || !channel)
+			continue;
 
 		len += sprintf(buf + len, "%2d: %14s    %s\n",
 			       channel->chan, info->name,
@@ -125,11 +127,16 @@
 	if (ret)
 		return ret;
 
-	sysdev_create_file(dev, &attr_dev_id);
-	sysdev_create_file(dev, &attr_count);
-	sysdev_create_file(dev, &attr_mode);
-	sysdev_create_file(dev, &attr_flags);
-	sysdev_create_file(dev, &attr_config);
+	ret |= sysdev_create_file(dev, &attr_dev_id);
+	ret |= sysdev_create_file(dev, &attr_count);
+	ret |= sysdev_create_file(dev, &attr_mode);
+	ret |= sysdev_create_file(dev, &attr_flags);
+	ret |= sysdev_create_file(dev, &attr_config);
+
+	if (unlikely(ret)) {
+		dev_err(&info->pdev->dev, "Failed creating attrs\n");
+		return ret;
+	}
 
 	snprintf(name, sizeof(name), "dma%d", chan->chan);
 	return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name);
diff --git a/arch/sh/drivers/pci/ops-titan.c b/arch/sh/drivers/pci/ops-titan.c
index cd56d53..ac8ee23 100644
--- a/arch/sh/drivers/pci/ops-titan.c
+++ b/arch/sh/drivers/pci/ops-titan.c
@@ -15,25 +15,21 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/titan.h>
 #include "pci-sh4.h"
 
+static char titan_irq_tab[] __initdata = {
+	TITAN_IRQ_WAN,
+	TITAN_IRQ_LAN,
+	TITAN_IRQ_MPCIA,
+	TITAN_IRQ_MPCIB,
+	TITAN_IRQ_USB,
+};
+
 int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
 {
-	int irq = -1;
-
-	switch (slot) {
-	case 0: irq = TITAN_IRQ_WAN;   break;	/* eth0 (WAN) */
-	case 1: irq = TITAN_IRQ_LAN;   break;	/* eth1 (LAN) */
-	case 2: irq = TITAN_IRQ_MPCIA; break;	/* mPCI A */
-	case 3: irq = TITAN_IRQ_MPCIB; break;	/* mPCI B */
-	case 4: irq = TITAN_IRQ_USB;   break;	/* USB */
-	default:
-		printk(KERN_INFO "PCI: Bad IRQ mapping "
-				 "request for slot %d\n", slot);
-		return -1;
-	}
+	int irq = titan_irq_tab[slot];
 
 	printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n",
 		slot, pin - 1 + 'A', irq);
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index d6e6352..602b644 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -22,6 +22,20 @@
 #include <linux/delay.h>
 #include "pci-sh4.h"
 
+#define INTC_BASE	0xffd00000
+#define INTC_ICR0	(INTC_BASE+0x0)
+#define INTC_ICR1	(INTC_BASE+0x1c)
+#define INTC_INTPRI	(INTC_BASE+0x10)
+#define INTC_INTREQ	(INTC_BASE+0x24)
+#define INTC_INTMSK0	(INTC_BASE+0x44)
+#define INTC_INTMSK1	(INTC_BASE+0x48)
+#define INTC_INTMSK2	(INTC_BASE+0x40080)
+#define INTC_INTMSKCLR0	(INTC_BASE+0x64)
+#define INTC_INTMSKCLR1	(INTC_BASE+0x68)
+#define INTC_INTMSKCLR2	(INTC_BASE+0x40084)
+#define INTC_INT2MSKR	(INTC_BASE+0x40038)
+#define INTC_INT2MSKCR	(INTC_BASE+0x4003c)
+
 /*
  * Initialization. Try all known PCI access methods. Note that we support
  * using both PCI BIOS and direct access: in such cases, we use I/O ports
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
new file mode 100644
index 0000000..f2b9157
--- /dev/null
+++ b/arch/sh/drivers/push-switch.c
@@ -0,0 +1,138 @@
+/*
+ * Generic push-switch framework
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/push-switch.h>
+
+#define DRV_NAME "push-switch"
+#define DRV_VERSION "0.1.0"
+
+static ssize_t switch_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct push_switch_platform_info *psw_info = dev->platform_data;
+	return sprintf(buf, "%s\n", psw_info->name);
+}
+static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
+
+static void switch_timer(unsigned long data)
+{
+	struct push_switch *psw = (struct push_switch *)data;
+
+	schedule_work(&psw->work);
+}
+
+static void switch_work_handler(void *data)
+{
+	struct platform_device *pdev = data;
+	struct push_switch *psw = platform_get_drvdata(pdev);
+
+	psw->state = 0;
+
+	kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int switch_drv_probe(struct platform_device *pdev)
+{
+	struct push_switch_platform_info *psw_info;
+	struct push_switch *psw;
+	int ret, irq;
+
+	psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL);
+	if (unlikely(!psw))
+		return -ENOMEM;
+
+	irq = platform_get_irq(pdev, 0);
+	if (unlikely(irq < 0)) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+	psw_info = pdev->dev.platform_data;
+	BUG_ON(!psw_info);
+
+	ret = request_irq(irq, psw_info->irq_handler,
+			  IRQF_DISABLED | psw_info->irq_flags,
+			  psw_info->name ? psw_info->name : DRV_NAME, pdev);
+	if (unlikely(ret < 0))
+		goto err;
+
+	if (psw_info->name) {
+		ret = device_create_file(&pdev->dev, &dev_attr_switch);
+		if (unlikely(ret)) {
+			dev_err(&pdev->dev, "Failed creating device attrs\n");
+			ret = -EINVAL;
+			goto err_irq;
+		}
+	}
+
+	INIT_WORK(&psw->work, switch_work_handler, pdev);
+	init_timer(&psw->debounce);
+
+	psw->debounce.function = switch_timer;
+	psw->debounce.data = (unsigned long)psw;
+
+	platform_set_drvdata(pdev, psw);
+
+	return 0;
+
+err_irq:
+	free_irq(irq, pdev);
+err:
+	kfree(psw);
+	return ret;
+}
+
+static int switch_drv_remove(struct platform_device *pdev)
+{
+	struct push_switch *psw = platform_get_drvdata(pdev);
+	struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+	int irq = platform_get_irq(pdev, 0);
+
+	if (psw_info->name)
+		device_remove_file(&pdev->dev, &dev_attr_switch);
+
+	platform_set_drvdata(pdev, NULL);
+	flush_scheduled_work();
+	del_timer_sync(&psw->debounce);
+	free_irq(irq, pdev);
+
+	kfree(psw);
+
+	return 0;
+}
+
+static struct platform_driver switch_driver = {
+	.probe		= switch_drv_probe,
+	.remove		= switch_drv_remove,
+	.driver		= {
+		.name	= DRV_NAME,
+	},
+};
+
+static int __init switch_init(void)
+{
+	printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
+	return platform_driver_register(&switch_driver);
+}
+
+static void __exit switch_exit(void)
+{
+	platform_driver_unregister(&switch_driver);
+}
+module_init(switch_init);
+module_exit(switch_exit);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Paul Mundt");
+MODULE_LICENSE("GPLv2");
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 5da88a4..99c7e52 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -4,7 +4,7 @@
 
 extra-y	:= head.o init_task.o vmlinux.lds
 
-obj-y	:= process.o signal.o entry.o traps.o irq.o \
+obj-y	:= process.o signal.o traps.o irq.o \
 	ptrace.o setup.o time.o sys_sh.o semaphore.o \
 	io.o io_generic.o sh_ksyms.o syscalls.o
 
@@ -21,3 +21,4 @@
 obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_APM)		+= apm.o
 obj-$(CONFIG_PM)		+= pm.o
+obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index fb5dac0..0582e67 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -2,11 +2,12 @@
 # Makefile for the Linux/SuperH CPU-specifc backends.
 #
 
-obj-y	+= irq/ init.o clock.o
-
-obj-$(CONFIG_CPU_SH2)		+= sh2/
-obj-$(CONFIG_CPU_SH3)		+= sh3/
-obj-$(CONFIG_CPU_SH4)		+= sh4/
+obj-$(CONFIG_CPU_SH2)		= sh2/
+obj-$(CONFIG_CPU_SH2A)		= sh2a/
+obj-$(CONFIG_CPU_SH3)		= sh3/
+obj-$(CONFIG_CPU_SH4)		= sh4/
 
 obj-$(CONFIG_UBC_WAKEUP)	+= ubc.o
 obj-$(CONFIG_SH_ADC)		+= adc.o
+
+obj-y	+= irq/ init.o clock.o
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index 51ec64c..abb586b 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -5,9 +5,11 @@
  *
  * This clock framework is derived from the OMAP version by:
  *
- *	Copyright (C) 2004 Nokia Corporation
+ *	Copyright (C) 2004 - 2005 Nokia Corporation
  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  *
+ *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -20,6 +22,7 @@
 #include <linux/kref.h>
 #include <linux/seq_file.h>
 #include <linux/err.h>
+#include <linux/platform_device.h>
 #include <asm/clock.h>
 #include <asm/timer.h>
 
@@ -195,17 +198,37 @@
 		propagate_rate(clk);
 }
 
-struct clk *clk_get(const char *id)
+/*
+ * Returns a clock. Note that we first try to use device id on the bus
+ * and clock name. If this fails, we try to use clock name only.
+ */
+struct clk *clk_get(struct device *dev, const char *id)
 {
 	struct clk *p, *clk = ERR_PTR(-ENOENT);
+	int idno;
+
+	if (dev == NULL || dev->bus != &platform_bus_type)
+		idno = -1;
+	else
+		idno = to_platform_device(dev)->id;
 
 	mutex_lock(&clock_list_sem);
 	list_for_each_entry(p, &clock_list, node) {
+		if (p->id == idno &&
+		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
+			clk = p;
+			goto found;
+		}
+	}
+
+	list_for_each_entry(p, &clock_list, node) {
 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
 			clk = p;
 			break;
 		}
 	}
+
+found:
 	mutex_unlock(&clock_list_sem);
 
 	return clk;
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index bfb90eb..4812176 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -68,12 +68,14 @@
 
 		waysize = cpu_data->dcache.sets;
 
+#ifdef CCR_CACHE_ORA
 		/*
 		 * If the OC is already in RAM mode, we only have
 		 * half of the entries to flush..
 		 */
 		if (ccr & CCR_CACHE_ORA)
 			waysize >>= 1;
+#endif
 
 		waysize <<= cpu_data->dcache.entry_shift;
 
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 1c034c2..0049d21 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,8 +1,9 @@
 #
 # Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
 #
-obj-y	+= ipr.o imask.o
+obj-y	+= imask.o
 
+obj-$(CONFIG_CPU_HAS_IPR_IRQ)		+= ipr.o
 obj-$(CONFIG_CPU_HAS_PINT_IRQ)		+= pint.o
 obj-$(CONFIG_CPU_HAS_MASKREG_IRQ)	+= maskreg.o
 obj-$(CONFIG_CPU_HAS_INTC2_IRQ)		+= intc2.o
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index a33ae3e..301b505 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -53,7 +53,10 @@
 {
 	unsigned long __dummy;
 
-	asm volatile("ldc	%2, r6_bank\n\t"
+	asm volatile(
+#ifdef CONFIG_CPU_HAS_SR_RB
+		     "ldc	%2, r6_bank\n\t"
+#endif
 		     "stc	sr, %0\n\t"
 		     "and	#0xf0, %0\n\t"
 		     "shlr2	%0\n\t"
diff --git a/arch/sh/kernel/cpu/irq/intc2.c b/arch/sh/kernel/cpu/irq/intc2.c
index 74ca576..74defe7 100644
--- a/arch/sh/kernel/cpu/irq/intc2.c
+++ b/arch/sh/kernel/cpu/irq/intc2.c
@@ -11,22 +11,29 @@
  * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780.
  */
 #include <linux/kernel.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
-#include <asm/system.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7760)
+#define INTC2_BASE	0xfe080000
+#define INTC2_INTMSK	(INTC2_BASE + 0x40)
+#define INTC2_INTMSKCLR	(INTC2_BASE + 0x60)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
+#define INTC2_BASE	0xffd40000
+#define INTC2_INTMSK	(INTC2_BASE + 0x38)
+#define INTC2_INTMSKCLR	(INTC2_BASE + 0x3c)
+#endif
 
 static void disable_intc2_irq(unsigned int irq)
 {
 	struct intc2_data *p = get_irq_chip_data(irq);
-	ctrl_outl(1 << p->msk_shift,
-		  INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset);
+	ctrl_outl(1 << p->msk_shift, INTC2_INTMSK + p->msk_offset);
 }
 
 static void enable_intc2_irq(unsigned int irq)
 {
 	struct intc2_data *p = get_irq_chip_data(irq);
-	ctrl_outl(1 << p->msk_shift,
-		  INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset);
+	ctrl_outl(1 << p->msk_shift, INTC2_INTMSKCLR + p->msk_offset);
 }
 
 static struct irq_chip intc2_irq_chip = {
@@ -61,12 +68,10 @@
 		/* Set the priority level */
 		local_irq_save(flags);
 
-		ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET +
-			       p->ipr_offset);
+		ipr = ctrl_inl(INTC2_BASE + p->ipr_offset);
 		ipr &= ~(0xf << p->ipr_shift);
 		ipr |= p->priority << p->ipr_shift;
-		ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET +
-			  p->ipr_offset);
+		ctrl_outl(ipr, INTC2_BASE + p->ipr_offset);
 
 		local_irq_restore(flags);
 
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index a008956..35eb575 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -19,25 +19,21 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/module.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/machvec.h>
-
+#include <linux/io.h>
+#include <linux/interrupt.h>
 
 static void disable_ipr_irq(unsigned int irq)
 {
 	struct ipr_data *p = get_irq_chip_data(irq);
-	int shift = p->shift*4;
 	/* Set the priority in IPR to 0 */
-	ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << shift)), p->addr);
+	ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr);
 }
 
 static void enable_ipr_irq(unsigned int irq)
 {
 	struct ipr_data *p = get_irq_chip_data(irq);
-	int shift = p->shift*4;
 	/* Set priority in IPR back to original value */
-	ctrl_outw(ctrl_inw(p->addr) | (p->priority << shift), p->addr);
+	ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr);
 }
 
 static struct irq_chip ipr_irq_chip = {
@@ -53,6 +49,10 @@
 
 	for (i = 0; i < nr_irqs; i++) {
 		unsigned int irq = table[i].irq;
+		table[i].addr = map_ipridx_to_addr(table[i].ipr_idx);
+		/* could the IPR index be mapped, if not we ignore this */
+		if (table[i].addr == 0)
+			continue;
 		disable_irq_nosync(irq);
 		set_irq_chip_and_handler_name(irq, &ipr_irq_chip,
 				      handle_level_irq, "level");
@@ -62,83 +62,6 @@
 }
 EXPORT_SYMBOL(make_ipr_irq);
 
-static struct ipr_data sys_ipr_map[] = {
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
-	{ TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY },
-	{ TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY },
-#ifdef RTC_IRQ
-	{ RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY },
-#endif
-#ifdef SCI_ERI_IRQ
-	{ SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-	{ SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-	{ SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-#endif
-#ifdef SCIF1_ERI_IRQ
-	{ SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-	{ SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-	{ SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-	{ SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-	{ SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY },
-	{ DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
-	{ DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
-	{ VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY },
-#endif
-#ifdef SCIF_ERI_IRQ
-	{ SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-	{ SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-	{ SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-	{ SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-#endif
-#ifdef IRDA_ERI_IRQ
-	{ IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-	{ IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-	{ IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-	{ IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
-	/*
-	 * Initialize the Interrupt Controller (INTC)
-	 * registers to their power on values
-	 */
-
-	/*
-	 * Enable external irq (INTC IRQ mode).
-	 * You should set corresponding bits of PFC to "00"
-	 * to enable these interrupts.
-	 */
-	{ IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY },
-	{ IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY },
-	{ IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY },
-	{ IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY },
-	{ IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY },
-	{ IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY },
-#endif
-#endif
-};
-
-void __init init_IRQ(void)
-{
-	make_ipr_irq(sys_ipr_map, ARRAY_SIZE(sys_ipr_map));
-
-#ifdef CONFIG_CPU_HAS_PINT_IRQ
-	init_IRQ_pint();
-#endif
-
-#ifdef CONFIG_CPU_HAS_INTC2_IRQ
-	init_IRQ_intc2();
-#endif
-	/* Perform the machine specific initialisation */
-	if (sh_mv.mv_init_irq != NULL)
-		sh_mv.mv_init_irq();
-
-	irq_ctx_init(smp_processor_id());
-}
-
 #if !defined(CONFIG_CPU_HAS_PINT_IRQ)
 int ipr_irq_demux(int irq)
 {
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile
index 389353f..f0f059a 100644
--- a/arch/sh/kernel/cpu/sh2/Makefile
+++ b/arch/sh/kernel/cpu/sh2/Makefile
@@ -2,5 +2,6 @@
 # Makefile for the Linux/SuperH SH-2 backends.
 #
 
-obj-y	:= probe.o
+obj-y	:= ex.o probe.o entry.o
 
+obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
new file mode 100644
index 0000000..d0440b2
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -0,0 +1,81 @@
+/*
+ * arch/sh/kernel/cpu/sh2/clock-sh7619.c
+ *
+ * SH7619 support for the clock framework
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2};
+const static int pfc_divisors[]={1,2,0,4};
+
+#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+	clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_master_clk_ops = {
+	.init		= master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7619_module_clk_ops = {
+	.recalc		= module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+	clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_bus_clk_ops = {
+	.recalc		= bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+	clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7619_cpu_clk_ops = {
+	.recalc		= cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7619_clk_ops[] = {
+	&sh7619_master_clk_ops,
+	&sh7619_module_clk_ops,
+	&sh7619_bus_clk_ops,
+	&sh7619_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+	if (idx < ARRAY_SIZE(sh7619_clk_ops))
+		*ops = sh7619_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
new file mode 100644
index 0000000..34d51b3
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -0,0 +1,341 @@
+/*
+ * arch/sh/kernel/cpu/sh2/entry.S
+ *
+ * The SH-2 exception entry
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ * Copyright (C) 2005  AXE,Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+	
+/* Offsets to the stack */
+OFF_R0  =  0		/* Return value. New ABI also arg4 */
+OFF_R1  =  4     	/* New ABI: arg5 */
+OFF_R2  =  8     	/* New ABI: arg6 */
+OFF_R3  =  12     	/* New ABI: syscall_nr */
+OFF_R4  =  16     	/* New ABI: arg0 */
+OFF_R5  =  20     	/* New ABI: arg1 */
+OFF_R6  =  24     	/* New ABI: arg2 */
+OFF_R7  =  28     	/* New ABI: arg3 */
+OFF_SP	=  (15*4)
+OFF_PC  =  (16*4)
+OFF_SR	=  (16*4+2*4)
+OFF_TRA	=  (16*4+6*4)
+
+#include <asm/entry-macros.S>
+
+ENTRY(exception_handler)
+	! already saved r0/r1
+	mov.l	r2,@-sp
+	mov.l	r3,@-sp
+	mov	r0,r1
+	cli
+	mov.l	$cpu_mode,r2
+	mov.l	@r2,r0
+	mov.l	@(5*4,r15),r3	! previous SR
+	shll2	r3		! set "S" flag
+	rotl	r0		! T <- "S" flag
+	rotl	r0		! "S" flag is LSB
+	rotcr	r3		! T -> r3:b30
+	shlr	r3
+	shlr	r0
+	bt/s	1f
+	 mov.l	r3,@(5*4,r15)	! copy cpu mode to SR
+	! switch to kernel mode
+	mov	#1,r0
+	rotr	r0
+	rotr	r0
+	mov.l	r0,@r2		! enter kernel mode
+	mov.l	$current_thread_info,r2
+	mov.l	@r2,r2
+	mov	#0x20,r0
+	shll8	r0
+	add	r2,r0
+	mov	r15,r2		! r2 = user stack top
+	mov	r0,r15		! switch kernel stack
+	add	#-4,r15		! dummy
+	mov.l	r1,@-r15	! TRA
+	sts.l	macl, @-r15
+	sts.l	mach, @-r15
+	stc.l	gbr, @-r15
+	mov.l	@(4*4,r2),r0
+	mov.l	@(5*4,r2),r1
+	mov.l	r1,@-r15	! original SR
+	sts.l	pr,@-r15
+	mov.l	r0,@-r15	! original PC
+	mov	r2,r3
+	add	#(4+2)*4,r3	! rewind r0 - r3 + exception frame
+	mov.l	r3,@-r15	! original SP
+	mov.l	r14,@-r15
+	mov.l	r13,@-r15
+	mov.l	r12,@-r15
+	mov.l	r11,@-r15
+	mov.l	r10,@-r15
+	mov.l	r9,@-r15
+	mov.l	r8,@-r15
+	mov.l	r7,@-r15
+	mov.l	r6,@-r15
+	mov.l	r5,@-r15
+	mov.l	r4,@-r15
+	mov	r2,r8		! copy user -> kernel stack
+	mov.l	@r8+,r3
+	mov.l	r3,@-r15
+	mov.l	@r8+,r2
+	mov.l	r2,@-r15
+	mov.l	@r8+,r1
+	mov.l	r1,@-r15
+	mov.l	@r8+,r0
+	bra	2f
+	 mov.l	r0,@-r15
+1:
+	! in kernel exception
+	mov	#(22-4-4-1)*4+4,r0
+	mov	r15,r2
+	sub	r0,r15
+	mov.l	@r2+,r0		! old R3
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r0		! old R2
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r0		! old R1
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r0		! old R0
+	mov.l	r0,@-r15	
+	mov.l	@r2+,r3		! old PC
+	mov.l	@r2+,r0		! old SR
+	add	#-4,r2		! exception frame stub (sr)
+	mov.l	r1,@-r2		! TRA
+	sts.l	macl, @-r2
+	sts.l	mach, @-r2
+	stc.l	gbr, @-r2
+	mov.l	r0,@-r2		! save old SR
+	sts.l	pr,@-r2
+	mov.l	r3,@-r2		! save old PC
+	mov	r2,r0
+	add	#8*4,r0
+	mov.l	r0,@-r2		! save old SP
+	mov.l	r14,@-r2
+	mov.l	r13,@-r2
+	mov.l	r12,@-r2
+	mov.l	r11,@-r2
+	mov.l	r10,@-r2
+	mov.l	r9,@-r2
+	mov.l	r8,@-r2
+	mov.l	r7,@-r2
+	mov.l	r6,@-r2
+	mov.l	r5,@-r2
+	mov.l	r4,@-r2
+	mov.l	@(OFF_R0,r15),r0
+	mov.l	@(OFF_R1,r15),r1
+	mov.l	@(OFF_R2,r15),r2
+	mov.l	@(OFF_R3,r15),r3
+2:
+	mov	#OFF_TRA,r8
+	add	r15,r8
+	mov.l	@r8,r9	
+	mov	#64,r8
+	cmp/hs	r8,r9
+	bt	interrupt_entry	! vec >= 64 is interrupt
+	mov	#32,r8
+	cmp/hs	r8,r9
+	bt	trap_entry	! 64 > vec >= 32  is trap
+	mov.l	4f,r8
+	mov	r9,r4
+	shll2	r9
+	add	r9,r8
+	mov.l	@r8,r8
+	mov	#0,r9
+	cmp/eq	r9,r8
+	bf	3f
+	mov.l	8f,r8		! unhandled exception
+3:
+	mov.l	5f,r10
+	jmp	@r8
+	 lds	r10,pr
+
+interrupt_entry:
+	mov	r9,r4
+	mov.l	6f,r9
+	mov.l	7f,r8
+	jmp	@r8
+	 lds	r9,pr
+
+	.align	2
+4:	.long	exception_handling_table
+5:	.long	ret_from_exception
+6:	.long	ret_from_irq
+7:	.long	do_IRQ
+8:	.long	do_exception_error
+	
+trap_entry:	
+	add	#-0x10,r9
+	shll2	r9			! TRA
+	mov	#OFF_TRA,r8
+	add	r15,r8
+	mov.l	r9,@r8
+	mov	r9,r8
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r9
+	jsr	@r9
+	 nop
+#endif
+	sti
+	bra	system_call
+	 nop
+	
+	.align	2
+1:	.long	syscall_exit
+2:	.long	break_point_trap_software
+3:	.long	NR_syscalls
+4:	.long	sys_call_table
+#ifdef CONFIG_TRACE_IRQFLAGS
+5:	.long	trace_hardirqs_on
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+	/* Unwind the stack and jmp to the debug entry */
+debug_kernel_fw:
+	mov	r15,r0
+	add	#(22-4)*4-4,r0
+	ldc.l	@r0+,gbr
+	lds.l	@r0+,mach
+	lds.l	@r0+,macl
+	mov	r15,r0
+	mov.l	@(OFF_SP,r0),r1
+	mov	#OFF_SR,r2
+	mov.l	@(r0,r2),r3
+	mov.l	r3,@-r1
+	mov	#OFF_SP,r2
+	mov.l	@(r0,r2),r3
+	mov.l	r3,@-r1
+	mov	r15,r0
+	add	#(22-4)*4-8,r0
+	mov.l	1f,r2
+	mov.l	@r2,r2
+	stc	sr,r3
+	mov.l	r2,@r0
+	mov.l	r3,@r0
+	mov.l	r1,@(8,r0)	
+	mov.l	@r15+, r0
+	mov.l	@r15+, r1
+	mov.l	@r15+, r2
+	mov.l	@r15+, r3
+	mov.l	@r15+, r4
+	mov.l	@r15+, r5
+	mov.l	@r15+, r6
+	mov.l	@r15+, r7
+	mov.l	@r15+, r8
+	mov.l	@r15+, r9
+	mov.l	@r15+, r10
+	mov.l	@r15+, r11
+	mov.l	@r15+, r12
+	mov.l	@r15+, r13
+	mov.l	@r15+, r14
+	add	#8,r15
+	lds.l	@r15+, pr
+	rte
+	 mov.l	@r15+,r15
+	.align	2
+1:	.long	gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+ENTRY(address_error_handler)
+	mov	r15,r4				! regs
+	add	#4,r4
+	mov	#OFF_PC,r0
+	mov.l	@(r0,r15),r6			! pc
+	mov.l	1f,r0
+	jmp	@r0
+	 mov	#0,r5				! writeaccess is unknown
+	.align	2
+
+1:	.long	do_address_error
+
+restore_all:
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	3f, r0
+	jsr	@r0
+	 nop
+#endif
+	mov	r15,r0
+	mov.l	$cpu_mode,r2
+	mov	#OFF_SR,r3
+	mov.l	@(r0,r3),r1
+	mov.l	r1,@r2
+	shll2	r1				! clear MD bit
+	shlr2	r1
+	mov.l	@(OFF_SP,r0),r2
+	add	#-8,r2
+	mov.l	r2,@(OFF_SP,r0)			! point exception frame top
+	mov.l	r1,@(4,r2)			! set sr
+	mov	#OFF_PC,r3
+	mov.l	@(r0,r3),r1
+	mov.l	r1,@r2				! set pc
+	add	#4*16+4,r0
+	lds.l	@r0+,pr
+	add	#4,r0				! skip sr
+	ldc.l	@r0+,gbr
+	lds.l	@r0+,mach
+	lds.l	@r0+,macl
+	get_current_thread_info r0, r1
+	mov.l	$current_thread_info,r1
+	mov.l	r0,@r1
+	mov.l	@r15+,r0
+	mov.l	@r15+,r1
+	mov.l	@r15+,r2
+	mov.l	@r15+,r3
+	mov.l	@r15+,r4
+	mov.l	@r15+,r5
+	mov.l	@r15+,r6
+	mov.l	@r15+,r7
+	mov.l	@r15+,r8
+	mov.l	@r15+,r9
+	mov.l	@r15+,r10
+	mov.l	@r15+,r11
+	mov.l	@r15+,r12
+	mov.l	@r15+,r13
+	mov.l	@r15+,r14
+	mov.l	@r15,r15
+	rte
+	 nop
+2:
+	mov.l	1f,r8
+	mov.l	2f,r9
+	jmp	@r9
+	 lds	r8,pr
+
+	.align	2
+$current_thread_info:
+	.long	__current_thread_info
+$cpu_mode:	
+	.long	__cpu_mode
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:	.long	trace_hardirqs_off
+#endif
+		
+! common exception handler
+#include "../../entry-common.S"
+	
+	.data
+! cpu operation mode 
+! bit30 = MD (compatible SH3/4)
+__cpu_mode:
+	.long	0x40000000
+		
+	.section	.bss
+__current_thread_info:
+	.long	0
+
+ENTRY(exception_handling_table)
+	.space	4*32
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S
new file mode 100644
index 0000000..6d285af
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/ex.S
@@ -0,0 +1,46 @@
+/*
+ * arch/sh/kernel/cpu/sh2/ex.S
+ *
+ * The SH-2 exception vector table
+ *
+ * Copyright (C) 2005 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+
+!
+! convert Exception Vector to Exception Number
+!
+exception_entry:	
+no	=	0
+	.rept	256
+	mov.l	r0,@-sp
+	mov	#no,r0
+	bra	exception_trampoline
+	and	#0xff,r0
+no	=	no + 1
+	.endr
+exception_trampoline:
+	mov.l	r1,@-sp
+	mov.l	$exception_handler,r1
+	jmp	@r1
+
+	.align	2
+$exception_entry:
+	.long	exception_entry
+$exception_handler:
+	.long	exception_handler
+!
+! Exception Vector Base
+!
+	.align	2
+ENTRY(vbr_base)
+vector	=	0
+	.rept	256
+	.long	exception_entry + vector * 8
+vector	=	vector + 1
+	.endr
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index f17a2a0..ba527d9 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -17,17 +17,23 @@
 
 int __init detect_cpu_and_cache_system(void)
 {
-	/*
-	 * For now, assume SH7604 .. fix this later.
-	 */
+#if defined(CONFIG_CPU_SUBTYPE_SH7604)
 	cpu_data->type			= CPU_SH7604;
 	cpu_data->dcache.ways		= 4;
-	cpu_data->dcache.way_shift	= 6;
+	cpu_data->dcache.way_incr	= (1<<10);
 	cpu_data->dcache.sets		= 64;
 	cpu_data->dcache.entry_shift	= 4;
 	cpu_data->dcache.linesz		= L1_CACHE_BYTES;
 	cpu_data->dcache.flags		= 0;
-
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+	cpu_data->type			= CPU_SH7619;
+	cpu_data->dcache.ways		= 4;
+	cpu_data->dcache.way_incr	= (1<<12);
+	cpu_data->dcache.sets		= 256;
+	cpu_data->dcache.entry_shift	= 4;
+	cpu_data->dcache.linesz		= L1_CACHE_BYTES;
+	cpu_data->dcache.flags		= 0;
+#endif
 	/*
 	 * SH-2 doesn't have separate caches
 	 */
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
new file mode 100644
index 0000000..82c2d90
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -0,0 +1,53 @@
+/*
+ * SH7619 Setup
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+	{
+		.mapbase	= 0xf8400000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 88, 89, 91, 90},
+	}, {
+		.mapbase	= 0xf8410000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 92, 93, 95, 94},
+	}, {
+		.mapbase	= 0xf8420000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 96, 97, 99, 98},
+	}, {
+		.flags = 0,
+	}
+};
+
+static struct platform_device sci_device = {
+	.name		= "sh-sci",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= sci_platform_data,
+	},
+};
+
+static struct platform_device *sh7619_devices[] __initdata = {
+	&sci_device,
+};
+
+static int __init sh7619_devices_setup(void)
+{
+	return platform_add_devices(sh7619_devices,
+				    ARRAY_SIZE(sh7619_devices));
+}
+__initcall(sh7619_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
new file mode 100644
index 0000000..350972a
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux/SuperH SH-2A backends.
+#
+
+obj-y	:= common.o probe.o
+
+common-y	+= $(addprefix ../sh2/, ex.o)
+common-y	+= $(addprefix ../sh2/, entry.o)
+
+obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
new file mode 100644
index 0000000..a9ad309
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -0,0 +1,85 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+ *
+ * SH7206 support for the clock framework
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2,3,4,6,8};
+const static int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+#if (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#elif (CONFIG_SH_CLK_MD == 7)
+#define PLL2 (1)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+	clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_master_clk_ops = {
+	.init		= master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7206_module_clk_ops = {
+	.recalc		= module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+	clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_bus_clk_ops = {
+	.recalc		= bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+	int idx = (ctrl_inw(FREQCR) & 0x0007);
+	clk->rate = clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct clk_ops sh7206_cpu_clk_ops = {
+	.recalc		= cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7206_clk_ops[] = {
+	&sh7206_master_clk_ops,
+	&sh7206_module_clk_ops,
+	&sh7206_bus_clk_ops,
+	&sh7206_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+	if (idx < ARRAY_SIZE(sh7206_clk_ops))
+		*ops = sh7206_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
new file mode 100644
index 0000000..87c6c05
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -0,0 +1,39 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/probe.c
+ *
+ * CPU Subtype Probing for SH-2A.
+ *
+ * Copyright (C) 2004, 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+int __init detect_cpu_and_cache_system(void)
+{
+	/* Just SH7206 for now .. */
+	cpu_data->type			= CPU_SH7206;
+
+	cpu_data->dcache.ways		= 4;
+	cpu_data->dcache.way_incr	= (1 << 11);
+	cpu_data->dcache.sets		= 128;
+	cpu_data->dcache.entry_shift	= 4;
+	cpu_data->dcache.linesz		= L1_CACHE_BYTES;
+	cpu_data->dcache.flags		= 0;
+
+	/*
+	 * The icache is the same as the dcache as far as this setup is
+	 * concerned. The only real difference in hardware is that the icache
+	 * lacks the U bit that the dcache has, none of this has any bearing
+	 * on the cache info.
+	 */
+	cpu_data->icache		= cpu_data->dcache;
+
+	return 0;
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
new file mode 100644
index 0000000..cdfeef49
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -0,0 +1,58 @@
+/*
+ * SH7206 Setup
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+	{
+		.mapbase	= 0xfffe8000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 240, 241, 242, 243},
+	}, {
+		.mapbase	= 0xfffe8800,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 244, 245, 246, 247},
+	}, {
+		.mapbase	= 0xfffe9000,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 248, 249, 250, 251},
+	}, {
+		.mapbase	= 0xfffe9800,
+		.flags		= UPF_BOOT_AUTOCONF,
+		.type		= PORT_SCIF,
+		.irqs		=  { 252, 253, 254, 255},
+	}, {
+		.flags = 0,
+	}
+};
+
+static struct platform_device sci_device = {
+	.name		= "sh-sci",
+	.id		= -1,
+	.dev		= {
+		.platform_data	= sci_platform_data,
+	},
+};
+
+static struct platform_device *sh7206_devices[] __initdata = {
+	&sci_device,
+};
+
+static int __init sh7206_devices_setup(void)
+{
+	return platform_add_devices(sh7206_devices,
+				    ARRAY_SIZE(sh7206_devices));
+}
+__initcall(sh7206_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 58d3815..83905e4 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the Linux/SuperH SH-3 backends.
 #
 
-obj-y	:= ex.o probe.o
+obj-y	:= ex.o probe.o entry.o
 
 # CPU subtype setup
 obj-$(CONFIG_CPU_SUBTYPE_SH7705)	+= setup-sh7705.o
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index 10461a7..b791a29 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -24,7 +24,7 @@
 
 static void set_bus_parent(struct clk *clk)
 {
-	struct clk *bus_clk = clk_get("bus_clk");
+	struct clk *bus_clk = clk_get(NULL, "bus_clk");
 	clk->parent = bus_clk;
 	clk_put(bus_clk);
 }
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
new file mode 100644
index 0000000..8c0dc27
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -0,0 +1,693 @@
+/*
+ * arch/sh/kernel/entry.S
+ *
+ *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+ *  Copyright (C) 2003 - 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*	
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ *    jmp	@k0	    ! control-transfer instruction
+ *     ldc	k1, ssr     ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ * 	ptrace needs to have all regs on the stack.
+ *	if the order here is changed, it needs to be
+ *	updated in ptrace.c and ptrace.h
+ *
+ *	r0
+ *      ...
+ *	r15 = stack pointer
+ *	spc
+ *	pr
+ *	ssr
+ *	gbr
+ *	mach
+ *	macl
+ *	syscall #
+ *
+ */
+#if defined(CONFIG_KGDB_NMI)
+NMI_VEC = 0x1c0			! Must catch early for debounce
+#endif
+
+/* Offsets to the stack */
+OFF_R0  =  0		/* Return value. New ABI also arg4 */
+OFF_R1  =  4     	/* New ABI: arg5 */
+OFF_R2  =  8     	/* New ABI: arg6 */
+OFF_R3  =  12     	/* New ABI: syscall_nr */
+OFF_R4  =  16     	/* New ABI: arg0 */
+OFF_R5  =  20     	/* New ABI: arg1 */
+OFF_R6  =  24     	/* New ABI: arg2 */
+OFF_R7  =  28     	/* New ABI: arg3 */
+OFF_SP	=  (15*4)
+OFF_PC  =  (16*4)
+OFF_SR	=  (16*4+8)
+OFF_TRA	=  (16*4+6*4)
+
+
+#define k0	r0
+#define k1	r1
+#define k2	r2
+#define k3	r3
+#define k4	r4
+
+#define g_imask		r6	/* r6_bank1 */
+#define k_g_imask	r6_bank	/* r6_bank1 */
+#define current		r7	/* r7_bank1 */
+
+#include <asm/entry-macros.S>
+	
+/*
+ * Kernel mode register usage:
+ *	k0	scratch
+ *	k1	scratch
+ *	k2	scratch (Exception code)
+ *	k3	scratch (Return address)
+ *	k4	scratch
+ *	k5	reserved
+ *	k6	Global Interrupt Mask (0--15 << 4)
+ *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
+ */
+
+!
+! TLB Miss / Initial Page write exception handling
+!			_and_
+! TLB hits, but the access violate the protection.
+! It can be valid access, such as stack grow and/or C-O-W.
+!
+!
+! Find the pmd/pte entry and loadtlb
+! If it's not found, cause address error (SEGV)
+!
+! Although this could be written in assembly language (and it'd be faster),
+! this first version depends *much* on C implementation.
+!
+
+#if defined(CONFIG_MMU)
+	.align	2
+ENTRY(tlb_miss_load)
+	bra	call_dpf
+	 mov	#0, r5
+
+	.align	2
+ENTRY(tlb_miss_store)
+	bra	call_dpf
+	 mov	#1, r5
+
+	.align	2
+ENTRY(initial_page_write)
+	bra	call_dpf
+	 mov	#1, r5
+
+	.align	2
+ENTRY(tlb_protection_violation_load)
+	bra	call_dpf
+	 mov	#0, r5
+
+	.align	2
+ENTRY(tlb_protection_violation_store)
+	bra	call_dpf
+	 mov	#1, r5
+
+call_dpf:
+	mov.l	1f, r0
+ 	mov.l	@r0, r6		! address
+	mov.l	3f, r0
+
+	jmp	@r0
+ 	 mov	r15, r4		! regs
+
+	.align 2
+1:	.long	MMU_TEA
+3:	.long	do_page_fault
+
+	.align	2
+ENTRY(address_error_load)
+	bra	call_dae
+	 mov	#0,r5		! writeaccess = 0
+
+	.align	2
+ENTRY(address_error_store)
+	bra	call_dae
+	 mov	#1,r5		! writeaccess = 1
+
+	.align	2
+call_dae:
+	mov.l	1f, r0
+	mov.l	@r0, r6		! address
+	mov.l	2f, r0
+	jmp	@r0
+	 mov	r15, r4		! regs
+
+	.align 2
+1:	.long	MMU_TEA
+2:	.long   do_address_error
+#endif /* CONFIG_MMU */
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+	/* Unwind the stack and jmp to the debug entry */
+debug_kernel_fw:
+	mov.l	@r15+, r0
+	mov.l	@r15+, r1
+	mov.l	@r15+, r2
+	mov.l	@r15+, r3
+	mov.l	@r15+, r4
+	mov.l	@r15+, r5
+	mov.l	@r15+, r6
+	mov.l	@r15+, r7
+	stc	sr, r8
+	mov.l	1f, r9			! BL =1, RB=1, IMASK=0x0F
+	or	r9, r8
+	ldc	r8, sr			! here, change the register bank
+	mov.l	@r15+, r8
+	mov.l	@r15+, r9
+	mov.l	@r15+, r10
+	mov.l	@r15+, r11
+	mov.l	@r15+, r12
+	mov.l	@r15+, r13
+	mov.l	@r15+, r14
+	mov.l	@r15+, k0
+	ldc.l	@r15+, spc
+	lds.l	@r15+, pr
+	mov.l	@r15+, k1
+	ldc.l	@r15+, gbr
+	lds.l	@r15+, mach
+	lds.l	@r15+, macl
+	mov	k0, r15
+	!
+	mov.l	2f, k0
+	mov.l	@k0, k0
+	jmp	@k0
+	 ldc	k1, ssr
+	.align	2
+1:	.long	0x300000f0
+2:	.long	gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+restore_all:
+	mov.l	@r15+, r0
+	mov.l	@r15+, r1
+	mov.l	@r15+, r2
+	mov.l	@r15+, r3
+	mov.l	@r15+, r4
+	mov.l	@r15+, r5
+	mov.l	@r15+, r6
+	mov.l	@r15+, r7
+	!
+	stc	sr, r8
+	mov.l	7f, r9
+	or	r9, r8			! BL =1, RB=1
+	ldc	r8, sr			! here, change the register bank
+	!
+	mov.l	@r15+, r8
+	mov.l	@r15+, r9
+	mov.l	@r15+, r10
+	mov.l	@r15+, r11
+	mov.l	@r15+, r12
+	mov.l	@r15+, r13
+	mov.l	@r15+, r14
+	mov.l	@r15+, k4		! original stack pointer
+	ldc.l	@r15+, spc
+	lds.l	@r15+, pr
+	mov.l	@r15+, k3		! original SR
+	ldc.l	@r15+, gbr
+	lds.l	@r15+, mach
+	lds.l	@r15+, macl
+	add	#4, r15			! Skip syscall number
+	!
+#ifdef CONFIG_SH_DSP
+	mov.l	@r15+, k0		! DSP mode marker
+	mov.l	5f, k1
+	cmp/eq	k0, k1			! Do we have a DSP stack frame?
+	bf	skip_restore
+
+	stc	sr, k0			! Enable CPU DSP mode
+	or	k1, k0			! (within kernel it may be disabled)
+	ldc	k0, sr
+	mov	r2, k0			! Backup r2
+
+	! Restore DSP registers from stack
+	mov	r15, r2
+	movs.l	@r2+, a1
+	movs.l	@r2+, a0g
+	movs.l	@r2+, a1g
+	movs.l	@r2+, m0
+	movs.l	@r2+, m1
+	mov	r2, r15
+
+	lds.l	@r15+, a0
+	lds.l	@r15+, x0
+	lds.l	@r15+, x1
+	lds.l	@r15+, y0
+	lds.l	@r15+, y1
+	lds.l	@r15+, dsr
+	ldc.l	@r15+, rs
+	ldc.l	@r15+, re
+	ldc.l	@r15+, mod
+
+	mov	k0, r2			! Restore r2
+skip_restore:
+#endif
+	!
+	! Calculate new SR value
+	mov	k3, k2			! original SR value
+	mov	#0xf0, k1
+	extu.b	k1, k1
+	not	k1, k1
+	and	k1, k2			! Mask orignal SR value
+	!
+	mov	k3, k0			! Calculate IMASK-bits
+	shlr2	k0
+	and	#0x3c, k0
+	cmp/eq	#0x3c, k0
+	bt/s	6f
+	 shll2	k0
+	mov	g_imask, k0
+	!
+6:	or	k0, k2			! Set the IMASK-bits
+	ldc	k2, ssr
+	!
+#if defined(CONFIG_KGDB_NMI)
+	! Clear in_nmi
+	mov.l	6f, k0
+	mov	#0, k1
+	mov.b	k1, @k0
+#endif
+	mov.l	@r15+, k2		! restore EXPEVT
+	mov	k4, r15
+	rte
+	 nop
+
+	.align	2
+5:	.long	0x00001000	! DSP
+7:	.long	0x30000000
+
+! common exception handler
+#include "../../entry-common.S"
+	
+! Exception Vector Base
+!
+!	Should be aligned page boundary.
+!
+	.balign 	4096,0,4096
+ENTRY(vbr_base)
+	.long	0
+!
+	.balign 	256,0,256
+general_exception:
+	mov.l	1f, k2
+	mov.l	2f, k3
+	bra	handle_exception
+	 mov.l	@k2, k2
+	.align	2
+1:	.long	EXPEVT
+2:	.long	ret_from_exception
+!
+!
+
+/* This code makes some assumptions to improve performance.
+ * Make sure they are stil true. */
+#if PTRS_PER_PGD != PTRS_PER_PTE
+#error PGD and PTE sizes don't match
+#endif
+
+/* gas doesn't flag impossible values for mov #immediate as an error */
+#if (_PAGE_PRESENT >> 2) > 0x7f
+#error cannot load PAGE_PRESENT as an immediate
+#endif
+#if _PAGE_DIRTY > 0x7f
+#error cannot load PAGE_DIRTY as an immediate
+#endif
+#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
+#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
+#endif
+
+#if defined(CONFIG_CPU_SH4)
+#define ldmmupteh(r)	mov.l	8f, r
+#else
+#define ldmmupteh(r)	mov	#MMU_PTEH, r
+#endif
+
+	.balign 	1024,0,1024
+tlb_miss:
+#ifdef COUNT_EXCEPTIONS
+	! Increment the counts
+	mov.l	9f, k1
+	mov.l	@k1, k2
+	add	#1, k2
+	mov.l	k2, @k1
+#endif
+
+	! k0 scratch
+	! k1 pgd and pte pointers
+	! k2 faulting address
+	! k3 pgd and pte index masks
+	! k4 shift
+
+	! Load up the pgd entry (k1)
+
+	ldmmupteh(k0)			!  9 LS (latency=2)	MMU_PTEH
+
+	mov.w	4f, k3			!  8 LS (latency=2)	(PTRS_PER_PGD-1) << 2
+	mov	#-(PGDIR_SHIFT-2), k4	!  6 EX
+
+	mov.l	@(MMU_TEA-MMU_PTEH,k0), k2	! 18 LS (latency=2)
+
+	mov.l	@(MMU_TTB-MMU_PTEH,k0), k1	! 18 LS (latency=2)
+
+	mov	k2, k0			!   5 MT (latency=0)
+	shld	k4, k0			!  99 EX
+
+	and	k3, k0			!  78 EX
+
+	mov.l	@(k0, k1), k1		!  21 LS (latency=2)
+	mov	#-(PAGE_SHIFT-2), k4	!   6 EX
+
+	! Load up the pte entry (k2)
+
+	mov	k2, k0			!   5 MT (latency=0)
+	shld	k4, k0			!  99 EX
+
+	tst	k1, k1			!  86 MT
+
+	bt	20f			! 110 BR
+
+	and	k3, k0			!  78 EX
+	mov.w	5f, k4			!   8 LS (latency=2)	_PAGE_PRESENT
+
+	mov.l	@(k0, k1), k2		!  21 LS (latency=2)
+	add	k0, k1			!  49 EX
+
+#ifdef CONFIG_CPU_HAS_PTEA
+	! Test the entry for present and _PAGE_ACCESSED
+
+	mov	#-28, k3		!   6 EX
+	mov	k2, k0			!   5 MT (latency=0)
+
+	tst	k4, k2			!  68 MT
+	shld	k3, k0			!  99 EX
+
+	bt	20f			! 110 BR
+
+	! Set PTEA register
+	! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
+	!
+	! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
+
+	and	#0xe, k0		!  79 EX
+
+	mov	k0, k3			!   5 MT (latency=0)
+	mov	k2, k0			!   5 MT (latency=0)
+
+	and	#1, k0			!  79 EX
+
+	or	k0, k3			!  82 EX
+
+	ldmmupteh(k0)			!   9 LS (latency=2)
+	shll2	k4			! 101 EX		_PAGE_ACCESSED
+
+	tst	k4, k2			!  68 MT
+
+	mov.l	k3, @(MMU_PTEA-MMU_PTEH,k0)	! 27 LS
+
+	mov.l	7f, k3			!   9 LS (latency=2)	_PAGE_FLAGS_HARDWARE_MASK
+
+	! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+#else
+
+	! Test the entry for present and _PAGE_ACCESSED
+
+	mov.l	7f, k3			!   9 LS (latency=2)	_PAGE_FLAGS_HARDWARE_MASK
+	tst	k4, k2			!  68 MT
+
+	shll2	k4			! 101 EX		_PAGE_ACCESSED
+	ldmmupteh(k0)			!   9 LS (latency=2)
+
+	bt	20f			! 110 BR
+	tst	k4, k2			!  68 MT
+
+	! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+
+#endif
+
+	! Set up the entry
+
+	and	k2, k3			!  78 EX
+	bt/s	10f			! 108 BR
+
+	 mov.l	k3, @(MMU_PTEL-MMU_PTEH,k0)	! 27 LS
+
+	ldtlb				! 128 CO
+
+	! At least one instruction between ldtlb and rte
+	nop				! 119 NOP
+
+	rte				! 126 CO
+
+	 nop				! 119 NOP
+
+
+10:	or	k4, k2			!  82 EX
+
+	ldtlb				! 128 CO
+
+	! At least one instruction between ldtlb and rte
+	mov.l	k2, @k1			!  27 LS
+
+	rte				! 126 CO
+
+	! Note we cannot execute mov here, because it is executed after
+	! restoring SSR, so would be executed in user space.
+	 nop				! 119 NOP
+
+
+	.align 5
+	! Once cache line if possible...
+1:	.long	swapper_pg_dir
+4:	.short	(PTRS_PER_PGD-1) << 2
+5:	.short	_PAGE_PRESENT
+7:	.long	_PAGE_FLAGS_HARDWARE_MASK
+8:	.long	MMU_PTEH
+#ifdef COUNT_EXCEPTIONS
+9:	.long	exception_count_miss
+#endif
+
+	! Either pgd or pte not present
+20:	mov.l	1f, k2
+	mov.l	4f, k3
+	bra	handle_exception
+	 mov.l	@k2, k2
+!
+	.balign 	512,0,512
+interrupt:
+	mov.l	2f, k2
+	mov.l	3f, k3
+#if defined(CONFIG_KGDB_NMI)
+	! Debounce (filter nested NMI)
+	mov.l	@k2, k0
+	mov.l	5f, k1
+	cmp/eq	k1, k0
+	bf	0f
+	mov.l	6f, k1
+	tas.b	@k1
+	bt	0f
+	rte
+	 nop
+	.align	2
+5:	.long	NMI_VEC
+6:	.long	in_nmi
+0:
+#endif /* defined(CONFIG_KGDB_NMI) */
+	bra	handle_exception
+	 mov	#-1, k2		! interrupt exception marker
+
+	.align	2
+1:	.long	EXPEVT
+2:	.long	INTEVT
+3:	.long	ret_from_irq
+4:	.long	ret_from_exception
+
+!
+!
+	.align	2
+ENTRY(handle_exception)
+	! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
+	! save all registers onto stack.
+	!
+	stc	ssr, k0		! Is it from kernel space?
+	shll	k0		! Check MD bit (bit30) by shifting it into...
+	shll	k0		!       ...the T bit
+	bt/s	1f		! It's a kernel to kernel transition.
+	 mov	r15, k0		! save original stack to k0
+	/* User space to kernel */
+	mov	#(THREAD_SIZE >> 10), k1
+	shll8	k1		! k1 := THREAD_SIZE
+	shll2	k1
+	add	current, k1
+	mov	k1, r15		! change to kernel stack
+	!
+1:	mov.l	2f, k1
+	!
+#ifdef CONFIG_SH_DSP
+	mov.l	r2, @-r15		! Save r2, we need another reg
+	stc	sr, k4
+	mov.l	1f, r2
+	tst	r2, k4			! Check if in DSP mode
+	mov.l	@r15+, r2		! Restore r2 now
+	bt/s	skip_save
+	 mov	#0, k4			! Set marker for no stack frame
+
+	mov	r2, k4			! Backup r2 (in k4) for later
+
+	! Save DSP registers on stack
+	stc.l	mod, @-r15
+	stc.l	re, @-r15
+	stc.l	rs, @-r15
+	sts.l	dsr, @-r15
+	sts.l	y1, @-r15
+	sts.l	y0, @-r15
+	sts.l	x1, @-r15
+	sts.l	x0, @-r15
+	sts.l	a0, @-r15
+
+	! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
+
+	! FIXME: Make sure that this is still the case with newer toolchains,
+	! as we're not at all interested in supporting ancient toolchains at
+	! this point. -- PFM.
+
+	mov	r15, r2
+	.word	0xf653			! movs.l	a1, @-r2
+	.word	0xf6f3			! movs.l	a0g, @-r2
+	.word	0xf6d3			! movs.l	a1g, @-r2
+	.word	0xf6c3			! movs.l	m0, @-r2
+	.word	0xf6e3			! movs.l	m1, @-r2
+	mov	r2, r15
+
+	mov	k4, r2			! Restore r2
+	mov.l	1f, k4			! Force DSP stack frame
+skip_save:
+	mov.l	k4, @-r15		! Push DSP mode marker onto stack
+#endif
+	! Save the user registers on the stack.
+	mov.l	k2, @-r15	! EXPEVT
+
+	mov	#-1, k4
+	mov.l	k4, @-r15	! set TRA (default: -1)
+	!
+	sts.l	macl, @-r15
+	sts.l	mach, @-r15
+	stc.l	gbr, @-r15
+	stc.l	ssr, @-r15
+	sts.l	pr, @-r15
+	stc.l	spc, @-r15
+	!
+	lds	k3, pr		! Set the return address to pr
+	!
+	mov.l	k0, @-r15	! save orignal stack
+	mov.l	r14, @-r15
+	mov.l	r13, @-r15
+	mov.l	r12, @-r15
+	mov.l	r11, @-r15
+	mov.l	r10, @-r15
+	mov.l	r9, @-r15
+	mov.l	r8, @-r15
+	!
+	stc	sr, r8		! Back to normal register bank, and
+	or	k1, r8		! Block all interrupts
+	mov.l	3f, k1
+	and	k1, r8		! ...
+	ldc	r8, sr		! ...changed here.
+	!
+	mov.l	r7, @-r15
+	mov.l	r6, @-r15
+	mov.l	r5, @-r15
+	mov.l	r4, @-r15
+	mov.l	r3, @-r15
+	mov.l	r2, @-r15
+	mov.l	r1, @-r15
+	mov.l	r0, @-r15
+
+	/*
+	 * This gets a bit tricky.. in the INTEVT case we don't want to use
+	 * the VBR offset as a destination in the jump call table, since all
+	 * of the destinations are the same. In this case, (interrupt) sets
+	 * a marker in r2 (now r2_bank since SR.RB changed), which we check
+	 * to determine the exception type. For all other exceptions, we
+	 * forcibly read EXPEVT from memory and fix up the jump address, in
+	 * the interrupt exception case we jump to do_IRQ() and defer the
+	 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
+	 * checks that do_IRQ() was doing..
+	 */
+	stc	r2_bank, r8
+	cmp/pz	r8
+	bf	interrupt_exception
+	shlr2	r8
+	shlr	r8
+
+#ifdef COUNT_EXCEPTIONS
+	mov.l	5f, r9
+	add	r8, r9
+	mov.l	@r9, r10
+	add	#1, r10
+	mov.l	r10, @r9
+#endif
+
+	mov.l	4f, r9
+	add	r8, r9
+	mov.l	@r9, r9
+	jmp	@r9
+	 nop
+	rts
+	 nop
+
+	.align	2
+1:	.long	0x00001000	! DSP=1
+2:	.long	0x000080f0	! FD=1, IMASK=15
+3:	.long	0xcfffffff	! RB=0, BL=0
+4:	.long	exception_handling_table
+#ifdef COUNT_EXCEPTIONS
+5:	.long	exception_count_table
+#endif
+
+interrupt_exception:
+	mov.l	1f, r9
+	jmp	@r9
+	 nop
+	rts
+	 nop
+
+	.align 2
+1:	.long	do_IRQ
+
+	.align	2
+ENTRY(exception_none)
+	rts
+	 nop
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 8dbf389..6e415ba 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -2,7 +2,8 @@
 # Makefile for the Linux/SuperH SH-4 backends.
 #
 
-obj-y	:= ex.o probe.o
+obj-y	:= ex.o probe.o common.o
+common-y	+= $(addprefix ../sh3/, entry.o)
 
 obj-$(CONFIG_SH_FPU)                    += fpu.o
 obj-$(CONFIG_SH_STORE_QUEUES)		+= sq.o
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index bfdf5fe..fa2019a 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -97,7 +97,7 @@
 
 static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
 {
-	struct clk *bclk = clk_get("bus_clk");
+	struct clk *bclk = clk_get(NULL, "bus_clk");
 	unsigned long bclk_rate = clk_get_rate(bclk);
 
 	clk_put(bclk);
@@ -151,7 +151,7 @@
 
 static int __init sh4202_clk_init(void)
 {
-	struct clk *clk = clk_get("master_clk");
+	struct clk *clk = clk_get(NULL, "master_clk");
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) {
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7780.c b/arch/sh/kernel/cpu/sh4/clock-sh7780.c
index 93ad367..9e6a216 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh7780.c
@@ -98,7 +98,7 @@
 
 static int __init sh7780_clk_init(void)
 {
-	struct clk *clk = clk_get("master_clk");
+	struct clk *clk = clk_get(NULL, "master_clk");
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index f486c07..7624677 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -282,11 +282,8 @@
 			grab_fpu(regs);
 			restore_fpu(tsk);
 			set_tsk_thread_flag(tsk, TIF_USEDFPU);
-		} else {
-			tsk->thread.trap_no = 11;
-			tsk->thread.error_code = 0;
+		} else
 			force_sig(SIGFPE, tsk);
-		}
 
 		regs->pc = nextpc;
 		return 1;
@@ -296,29 +293,29 @@
 }
 
 asmlinkage void
-do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
-	     struct pt_regs regs)
+do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
+	     unsigned long r7, struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	struct task_struct *tsk = current;
 
-	if (ieee_fpe_handler (&regs))
+	if (ieee_fpe_handler(regs))
 		return;
 
-	regs.pc += 2;
-	save_fpu(tsk, &regs);
-	tsk->thread.trap_no = 11;
-	tsk->thread.error_code = 0;
+	regs->pc += 2;
+	save_fpu(tsk, regs);
 	force_sig(SIGFPE, tsk);
 }
 
 asmlinkage void
 do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
-		     unsigned long r7, struct pt_regs regs)
+		     unsigned long r7, struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	struct task_struct *tsk = current;
 
-	grab_fpu(&regs);
-	if (!user_mode(&regs)) {
+	grab_fpu(regs);
+	if (!user_mode(regs)) {
 		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
 		return;
 	}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index c294de1..afe0f1b 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -79,16 +79,16 @@
 	case 0x205:
 		cpu_data->type = CPU_SH7750;
 		cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
-				   CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+				   CPU_HAS_PERF_COUNTER;
 		break;
 	case 0x206:
 		cpu_data->type = CPU_SH7750S;
 		cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
-				   CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+				   CPU_HAS_PERF_COUNTER;
 		break;
 	case 0x1100:
 		cpu_data->type = CPU_SH7751;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x2000:
 		cpu_data->type = CPU_SH73180;
@@ -126,23 +126,22 @@
 		break;
 	case 0x8000:
 		cpu_data->type = CPU_ST40RA;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x8100:
 		cpu_data->type = CPU_ST40GX1;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x700:
 		cpu_data->type = CPU_SH4_501;
 		cpu_data->icache.ways = 2;
 		cpu_data->dcache.ways = 2;
-		cpu_data->flags |= CPU_HAS_PTEA;
 		break;
 	case 0x600:
 		cpu_data->type = CPU_SH4_202;
 		cpu_data->icache.ways = 2;
 		cpu_data->dcache.ways = 2;
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 		break;
 	case 0x500 ... 0x501:
 		switch (prr) {
@@ -160,7 +159,7 @@
 		cpu_data->icache.ways = 2;
 		cpu_data->dcache.ways = 2;
 
-		cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+		cpu_data->flags |= CPU_HAS_FPU;
 
 		break;
 	default:
@@ -173,6 +172,10 @@
 	cpu_data->dcache.ways = 1;
 #endif
 
+#ifdef CONFIG_CPU_HAS_PTEA
+	cpu_data->flags |= CPU_HAS_PTEA;
+#endif
+
 	/*
 	 * On anything that's not a direct-mapped cache, look to the CVR
 	 * for I/D-cache specifics.
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 50812d5..bbcb06f 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -2,6 +2,7 @@
  * SH7750/SH7751 Setup
  *
  *  Copyright (C) 2006  Paul Mundt
+ *  Copyright (C) 2006  Jamie Lenehan
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -10,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
+#include <linux/io.h>
 #include <asm/sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
@@ -46,3 +48,71 @@
 				    ARRAY_SIZE(sh7750_devices));
 }
 __initcall(sh7750_devices_setup);
+
+static struct ipr_data sh7750_ipr_map[] = {
+	/* IRQ, IPR-idx, shift, priority */
+	{ 16, 0, 12, 2 }, /* TMU0 TUNI*/
+	{ 17, 0, 12, 2 }, /* TMU1 TUNI */
+	{ 18, 0,  4, 2 }, /* TMU2 TUNI */
+	{ 19, 0,  4, 2 }, /* TMU2 TIPCI */
+	{ 27, 1, 12, 2 }, /* WDT ITI */
+	{ 20, 0,  0, 2 }, /* RTC ATI (alarm) */
+	{ 21, 0,  0, 2 }, /* RTC PRI (period) */
+	{ 22, 0,  0, 2 }, /* RTC CUI (carry) */
+	{ 23, 1,  4, 3 }, /* SCI ERI */
+	{ 24, 1,  4, 3 }, /* SCI RXI */
+	{ 25, 1,  4, 3 }, /* SCI TXI */
+	{ 40, 2,  4, 3 }, /* SCIF ERI */
+	{ 41, 2,  4, 3 }, /* SCIF RXI */
+	{ 42, 2,  4, 3 }, /* SCIF BRI */
+	{ 43, 2,  4, 3 }, /* SCIF TXI */
+	{ 34, 2,  8, 7 }, /* DMAC DMTE0 */
+	{ 35, 2,  8, 7 }, /* DMAC DMTE1 */
+	{ 36, 2,  8, 7 }, /* DMAC DMTE2 */
+	{ 37, 2,  8, 7 }, /* DMAC DMTE3 */
+	{ 28, 2,  8, 7 }, /* DMAC DMAE */
+};
+
+static struct ipr_data sh7751_ipr_map[] = {
+	{ 44, 2,  8, 7 }, /* DMAC DMTE4 */
+	{ 45, 2,  8, 7 }, /* DMAC DMTE5 */
+	{ 46, 2,  8, 7 }, /* DMAC DMTE6 */
+	{ 47, 2,  8, 7 }, /* DMAC DMTE7 */
+	/* The following use INTC_INPRI00 for masking, which is a 32-bit
+	   register, not a 16-bit register like the IPRx registers, so it
+	   would need special support */
+	/*{ 72, INTPRI00,  8, ? },*/ /* TMU3 TUNI */
+	/*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */
+};
+
+static unsigned long ipr_offsets[] = {
+	0xffd00004UL,	/* 0: IPRA */
+	0xffd00008UL,	/* 1: IPRB */
+	0xffd0000cUL,	/* 2: IPRC */
+	0xffd00010UL,	/* 3: IPRD */
+};
+
+/* given the IPR index return the address of the IPR register */
+unsigned int map_ipridx_to_addr(int idx)
+{
+	if (idx >= ARRAY_SIZE(ipr_offsets))
+		return 0;
+	return ipr_offsets[idx];
+}
+
+#define INTC_ICR	0xffd00000UL
+#define INTC_ICR_IRLM   (1<<7)
+
+/* enable individual interrupt mode for external interupts */
+void ipr_irq_enable_irlm(void)
+{
+	ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+}
+
+void __init init_IRQ_ipr()
+{
+	make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map));
+#ifdef CONFIG_CPU_SUBTYPE_SH7751
+	make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map));
+#endif
+}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
index 814ddb2..9aeaa2d 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
@@ -79,25 +79,27 @@
 __initcall(sh7780_devices_setup);
 
 static struct intc2_data intc2_irq_table[] = {
-	{ TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2 },
-	{ 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-	{ 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-	{ 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-	{ SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-	{ SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-	{ SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-	{ SCIF0_TXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
+	{ 28, 0, 24, 0, 0, 2 },		/* TMU0 */
 
-	{ SCIF1_ERI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-	{ SCIF1_RXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-	{ SCIF1_BRI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-	{ SCIF1_TXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
+	{ 21, 1,  0, 0, 2, 2 },
+	{ 22, 1,  1, 0, 2, 2 },
+	{ 23, 1,  2, 0, 2, 2 },
 
-	{ PCIC0_IRQ, 0x10,  8, 0, INTC_PCIC0_MSK, PCIC0_PRIORITY },
-	{ PCIC1_IRQ, 0x10,  0, 0, INTC_PCIC1_MSK, PCIC1_PRIORITY },
-	{ PCIC2_IRQ, 0x14, 24, 0, INTC_PCIC2_MSK, PCIC2_PRIORITY },
-	{ PCIC3_IRQ, 0x14, 16, 0, INTC_PCIC3_MSK, PCIC3_PRIORITY },
-	{ PCIC4_IRQ, 0x14,  8, 0, INTC_PCIC4_MSK, PCIC4_PRIORITY },
+	{ 40, 8, 24, 0, 3, 3 },		/* SCIF0 ERI */
+	{ 41, 8, 24, 0, 3, 3 },		/* SCIF0 RXI */
+	{ 42, 8, 24, 0, 3, 3 },		/* SCIF0 BRI */
+	{ 43, 8, 24, 0, 3, 3 },		/* SCIF0 TXI */
+
+	{ 76, 8, 16, 0, 4, 3 },		/* SCIF1 ERI */
+	{ 77, 8, 16, 0, 4, 3 },		/* SCIF1 RXI */
+	{ 78, 8, 16, 0, 4, 3 },		/* SCIF1 BRI */
+	{ 79, 8, 16, 0, 4, 3 },		/* SCIF1 TXI */
+
+	{ 64, 0x10,  8, 0, 14, 2 },	/* PCIC0 */
+	{ 65, 0x10,  0, 0, 15, 2 },	/* PCIC1 */
+	{ 66, 0x14, 24, 0, 16, 2 },	/* PCIC2 */
+	{ 67, 0x14, 16, 0, 17, 2 },	/* PCIC3 */
+	{ 68, 0x14,  8, 0, 18, 2 },	/* PCIC4 */
 };
 
 void __init init_IRQ_intc2(void)
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 7bcc73f..0c9ea38 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -19,7 +19,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 #include <asm/cpu/sq.h>
@@ -38,7 +38,7 @@
 
 static struct sq_mapping *sq_mapping_list;
 static DEFINE_SPINLOCK(sq_mapping_lock);
-static kmem_cache_t *sq_cache;
+static struct kmem_cache *sq_cache;
 static unsigned long *sq_bitmap;
 
 #define store_queue_barrier()			\
@@ -67,6 +67,7 @@
 	/* Wait for completion */
 	store_queue_barrier();
 }
+EXPORT_SYMBOL(sq_flush_range);
 
 static inline void sq_mapping_list_add(struct sq_mapping *map)
 {
@@ -166,7 +167,7 @@
 	map->size = size;
 	map->name = name;
 
-	page = bitmap_find_free_region(sq_bitmap, 0x04000000,
+	page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
 				       get_order(map->size));
 	if (unlikely(page < 0)) {
 		ret = -ENOSPC;
@@ -193,6 +194,7 @@
 	kmem_cache_free(sq_cache, map);
 	return ret;
 }
+EXPORT_SYMBOL(sq_remap);
 
 /**
  * sq_unmap - Unmap a Store Queue allocation
@@ -234,6 +236,7 @@
 
 	kmem_cache_free(sq_cache, map);
 }
+EXPORT_SYMBOL(sq_unmap);
 
 /*
  * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
@@ -402,7 +405,3 @@
 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(sq_remap);
-EXPORT_SYMBOL(sq_unmap);
-EXPORT_SYMBOL(sq_flush_range);
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index a000227..6034082 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -12,7 +12,7 @@
 #include <linux/console.h>
 #include <linux/tty.h>
 #include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #ifdef CONFIG_SH_STANDARD_BIOS
 #include <asm/sh_bios.h>
@@ -62,17 +62,9 @@
 #include <linux/serial_core.h>
 #include "../../../drivers/serial/sh-sci.h"
 
-#ifdef CONFIG_CPU_SH4
-#define SCIF_REG	0xffe80000
-#elif defined(CONFIG_CPU_SUBTYPE_SH72060)
-#define SCIF_REG	0xfffe9800
-#else
-#error "Undefined SCIF for this subtype"
-#endif
-
 static struct uart_port scif_port = {
-	.mapbase	= SCIF_REG,
-	.membase	= (char __iomem *)SCIF_REG,
+	.mapbase	= CONFIG_EARLY_SCIF_CONSOLE_PORT,
+	.membase	= (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
 };
 
 static void scif_sercon_putc(int c)
@@ -113,23 +105,29 @@
 	.index		= -1,
 };
 
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
+/*
+ * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
+ * devices that aren't using sh-ipl+g.
+ */
 static void scif_sercon_init(int baud)
 {
-	ctrl_outw(0, SCIF_REG + 8);
-	ctrl_outw(0, SCIF_REG);
+	ctrl_outw(0, scif_port.mapbase + 8);
+	ctrl_outw(0, scif_port.mapbase);
 
 	/* Set baud rate */
 	ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) /
-		  (32 * baud) - 1, SCIF_REG + 4);
+		  (32 * baud) - 1, scif_port.mapbase + 4);
 
-	ctrl_outw(12, SCIF_REG + 24);
-	ctrl_outw(8, SCIF_REG + 24);
-	ctrl_outw(0, SCIF_REG + 32);
-	ctrl_outw(0x60, SCIF_REG + 16);
-	ctrl_outw(0, SCIF_REG + 36);
-	ctrl_outw(0x30, SCIF_REG + 8);
+	ctrl_outw(12, scif_port.mapbase + 24);
+	ctrl_outw(8, scif_port.mapbase + 24);
+	ctrl_outw(0, scif_port.mapbase + 32);
+	ctrl_outw(0x60, scif_port.mapbase + 16);
+	ctrl_outw(0, scif_port.mapbase + 36);
+	ctrl_outw(0x30, scif_port.mapbase + 8);
 }
-#endif
+#endif /* CONFIG_CPU_SH4 && !CONFIG_SH_STANDARD_BIOS */
+#endif /* CONFIG_EARLY_SCIF_CONSOLE */
 
 /*
  * Setup a default console, if more than one is compiled in, rely on the
@@ -168,7 +166,7 @@
 	if (!strncmp(buf, "serial", 6)) {
 		early_console = &scif_console;
 
-#ifdef CONFIG_CPU_SH4
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
 		scif_sercon_init(115200);
 #endif
 	}
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
new file mode 100644
index 0000000..29136a3
--- /dev/null
+++ b/arch/sh/kernel/entry-common.S
@@ -0,0 +1,433 @@
+/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $
+ *
+ *  linux/arch/sh/entry.S
+ *
+ *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+ *  Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*	
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ *    jmp	@k0	    ! control-transfer instruction
+ *     ldc	k1, ssr     ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ * 	ptrace needs to have all regs on the stack.
+ *	if the order here is changed, it needs to be
+ *	updated in ptrace.c and ptrace.h
+ *
+ *	r0
+ *      ...
+ *	r15 = stack pointer
+ *	spc
+ *	pr
+ *	ssr
+ *	gbr
+ *	mach
+ *	macl
+ *	syscall #
+ *
+ */
+
+#if defined(CONFIG_PREEMPT)
+#  define preempt_stop()	cli
+#else
+#  define preempt_stop()
+#  define resume_kernel		__restore_all
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
+! If both are configured, handle the debug traps (breakpoints) in SW,
+! but still allow BIOS traps to FW.
+
+	.align	2
+debug_kernel:
+#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
+	/* Force BIOS call to FW (debug_trap put TRA in r8) */
+	mov	r8,r0
+	shlr2	r0
+	cmp/eq	#0x3f,r0
+	bt	debug_kernel_fw
+#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
+
+debug_enter:		
+#if defined(CONFIG_SH_KGDB)
+	/* Jump to kgdb, pass stacked regs as arg */
+debug_kernel_sw:
+	mov.l	3f, r0
+	jmp	@r0
+	 mov	r15, r4
+	.align	2
+3:	.long	kgdb_handle_exception
+#endif /* CONFIG_SH_KGDB */
+
+#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
+
+
+	.align	2
+debug_trap:	
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+	mov	#OFF_SR, r0
+	mov.l	@(r0,r15), r0		! get status register
+	shll	r0
+	shll	r0			! kernel space?
+	bt/s	debug_kernel
+#endif
+	 mov.l	@r15, r0		! Restore R0 value
+	mov.l	1f, r8
+	jmp	@r8
+	 nop
+
+	.align	2
+ENTRY(exception_error)
+	!
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	3f, r0
+	jsr	@r0
+	 nop
+#endif
+	sti
+	mov.l	2f, r0
+	jmp	@r0
+	 nop
+
+!
+	.align	2
+1:	.long	break_point_trap_software
+2:	.long	do_exception_error
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:	.long	trace_hardirqs_on
+#endif
+
+	.align	2
+ret_from_exception:
+	preempt_stop()
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	4f, r0
+	jsr	@r0
+	 nop
+#endif
+ENTRY(ret_from_irq)
+	!
+	mov	#OFF_SR, r0
+	mov.l	@(r0,r15), r0	! get status register
+	shll	r0
+	shll	r0		! kernel space?
+	get_current_thread_info r8, r0
+	bt	resume_kernel	! Yes, it's from kernel, go back soon
+
+#ifdef CONFIG_PREEMPT
+	bra	resume_userspace
+	 nop
+ENTRY(resume_kernel)
+	mov.l	@(TI_PRE_COUNT,r8), r0	! current_thread_info->preempt_count
+	tst	r0, r0
+	bf	noresched
+need_resched:
+	mov.l	@(TI_FLAGS,r8), r0	! current_thread_info->flags
+	tst	#_TIF_NEED_RESCHED, r0	! need_resched set?
+	bt	noresched
+
+	mov	#OFF_SR, r0
+	mov.l	@(r0,r15), r0		! get status register
+	and	#0xf0, r0		! interrupts off (exception path)?
+	cmp/eq	#0xf0, r0
+	bt	noresched
+
+	mov.l	1f, r0
+	mov.l	r0, @(TI_PRE_COUNT,r8)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	3f, r0
+	jsr	@r0
+	 nop
+#endif
+	sti
+	mov.l	2f, r0
+	jsr	@r0
+	 nop
+	mov	#0, r0
+	mov.l	r0, @(TI_PRE_COUNT,r8)
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	4f, r0
+	jsr	@r0
+	 nop
+#endif
+
+	bra	need_resched
+	 nop
+
+noresched:
+	bra	__restore_all
+	 nop
+
+	.align 2
+1:	.long	PREEMPT_ACTIVE
+2:	.long	schedule
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:	.long	trace_hardirqs_on
+4:	.long	trace_hardirqs_off
+#endif
+#endif
+
+ENTRY(resume_userspace)
+	! r8: current_thread_info
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r0
+	jsr	@r0
+	 nop
+#endif
+	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
+	tst	#_TIF_WORK_MASK, r0
+	bt/s	__restore_all
+	 tst	#_TIF_NEED_RESCHED, r0
+
+	.align	2
+work_pending:
+	! r0: current_thread_info->flags
+	! r8: current_thread_info
+	! t:  result of "tst	#_TIF_NEED_RESCHED, r0"
+	bf/s	work_resched
+	 tst	#(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
+work_notifysig:
+	bt/s	__restore_all
+	 mov	r15, r4
+	mov	r12, r5		! set arg1(save_r0)
+	mov	r0, r6
+	mov.l	2f, r1
+	mov.l	3f, r0
+	jmp	@r1
+	 lds	r0, pr
+work_resched:
+#ifndef CONFIG_PREEMPT
+	! gUSA handling
+	mov.l	@(OFF_SP,r15), r0	! get user space stack pointer
+	mov	r0, r1
+	shll	r0
+	bf/s	1f
+	 shll	r0
+	bf/s	1f
+	 mov	#OFF_PC, r0
+	! 				  SP >= 0xc0000000 : gUSA mark
+	mov.l	@(r0,r15), r2		! get user space PC (program counter)
+	mov.l	@(OFF_R0,r15), r3	! end point
+	cmp/hs	r3, r2			! r2 >= r3? 
+	bt	1f
+	add	r3, r1			! rewind point #2
+	mov.l	r1, @(r0,r15)		! reset PC to rewind point #2
+	!
+1:
+#endif
+	mov.l	1f, r1
+	jsr	@r1				! schedule
+	 nop
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r0
+	jsr	@r0
+	 nop
+#endif
+	!
+	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
+	tst	#_TIF_WORK_MASK, r0
+	bt	__restore_all
+	bra	work_pending
+	 tst	#_TIF_NEED_RESCHED, r0
+
+	.align	2
+1:	.long	schedule
+2:	.long	do_notify_resume
+3:	.long	restore_all
+#ifdef CONFIG_TRACE_IRQFLAGS
+4:	.long	trace_hardirqs_on
+5:	.long	trace_hardirqs_off
+#endif
+
+	.align	2
+syscall_exit_work:
+	! r0: current_thread_info->flags
+	! r8: current_thread_info
+	tst	#_TIF_SYSCALL_TRACE, r0
+	bt/s	work_pending
+	 tst	#_TIF_NEED_RESCHED, r0
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r0
+	jsr	@r0
+	 nop
+#endif
+	sti
+	! XXX setup arguments...
+	mov.l	4f, r0			! do_syscall_trace
+	jsr	@r0
+	 nop
+	bra	resume_userspace
+	 nop
+
+	.align	2
+syscall_trace_entry:
+	!                     	Yes it is traced.
+	! XXX setup arguments...
+	mov.l	4f, r11		! Call do_syscall_trace which notifies
+	jsr	@r11	    	! superior (will chomp R[0-7])
+	 nop
+	!			Reload R0-R4 from kernel stack, where the
+	!   	    	    	parent may have modified them using
+	!   	    	    	ptrace(POKEUSR).  (Note that R0-R2 are
+	!   	    	    	used by the system call handler directly
+	!   	    	    	from the kernel stack anyway, so don't need
+	!   	    	    	to be reloaded here.)  This allows the parent
+	!   	    	    	to rewrite system calls and args on the fly.
+	mov.l	@(OFF_R4,r15), r4   ! arg0
+	mov.l	@(OFF_R5,r15), r5
+	mov.l	@(OFF_R6,r15), r6
+	mov.l	@(OFF_R7,r15), r7   ! arg3
+	mov.l	@(OFF_R3,r15), r3   ! syscall_nr
+	!
+	mov.l	2f, r10			! Number of syscalls
+	cmp/hs	r10, r3
+	bf	syscall_call
+	mov	#-ENOSYS, r0
+	bra	syscall_exit
+	 mov.l	r0, @(OFF_R0,r15)	! Return value
+
+__restore_all:
+	mov.l	1f, r0
+	jmp	@r0
+	 nop
+
+	.align	2
+1:	.long	restore_all
+
+	.align	2
+not_syscall_tra:	
+	bra	debug_trap
+	 nop
+
+	.align	2
+syscall_badsys:			! Bad syscall number
+	mov	#-ENOSYS, r0
+	bra	resume_userspace
+	 mov.l	r0, @(OFF_R0,r15)	! Return value
+	
+
+/*
+ * Syscall interface:
+ *
+ *	Syscall #: R3
+ *	Arguments #0 to #3: R4--R7
+ *	Arguments #4 to #6: R0, R1, R2
+ *	TRA: (number of arguments + 0x10) x 4
+ *
+ * This code also handles delegating other traps to the BIOS/gdb stub
+ * according to:
+ *
+ * Trap number
+ * (TRA>>2) 	    Purpose
+ * -------- 	    -------
+ * 0x0-0xf  	    old syscall ABI
+ * 0x10-0x1f  	    new syscall ABI
+ * 0x20-0xff  	    delegated through debug_trap to BIOS/gdb stub.
+ *
+ * Note: When we're first called, the TRA value must be shifted
+ * right 2 bits in order to get the value that was used as the "trapa"
+ * argument.
+ */
+
+	.align	2
+	.globl	ret_from_fork
+ret_from_fork:
+	mov.l	1f, r8
+	jsr	@r8
+	 mov	r0, r4
+	bra	syscall_exit
+	 nop
+	.align	2
+1:	.long	schedule_tail
+	!
+ENTRY(system_call)
+#if !defined(CONFIG_CPU_SH2)
+	mov.l	1f, r9
+	mov.l	@r9, r8		! Read from TRA (Trap Address) Register
+#endif
+	!
+	! Is the trap argument >= 0x20? (TRA will be >= 0x80)
+	mov	#0x7f, r9
+	cmp/hi	r9, r8
+	bt/s	not_syscall_tra
+	 mov	#OFF_TRA, r9
+	add	r15, r9
+	mov.l	r8, @r9			! set TRA value to tra
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	5f, r10
+	jsr	@r10
+	 nop
+#endif
+	sti
+
+	!
+	get_current_thread_info r8, r10
+	mov.l	@(TI_FLAGS,r8), r8
+	mov	#_TIF_SYSCALL_TRACE, r10
+	tst	r10, r8
+	bf	syscall_trace_entry
+	!
+	mov.l	2f, r8			! Number of syscalls
+	cmp/hs	r8, r3
+	bt	syscall_badsys
+	!
+syscall_call:
+	shll2	r3		! x4
+	mov.l	3f, r8		! Load the address of sys_call_table
+	add	r8, r3
+	mov.l	@r3, r8
+	jsr	@r8	    	! jump to specific syscall handler
+	 nop
+	mov.l	@(OFF_R0,r15), r12		! save r0
+	mov.l	r0, @(OFF_R0,r15)		! save the return value
+	!
+syscall_exit:
+	cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+	mov.l	6f, r0
+	jsr	@r0
+	 nop
+#endif
+	!
+	get_current_thread_info r8, r0
+	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
+	tst	#_TIF_ALLWORK_MASK, r0
+	bf	syscall_exit_work
+	bra	__restore_all
+	 nop
+	.align	2
+#if !defined(CONFIG_CPU_SH2)
+1:	.long	TRA
+#endif
+2:	.long	NR_syscalls
+3:	.long	sys_call_table
+4:	.long	do_syscall_trace
+#ifdef CONFIG_TRACE_IRQFLAGS
+5:	.long	trace_hardirqs_on
+6:	.long	trace_hardirqs_off
+#endif
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
deleted file mode 100644
index 39aaefb..0000000
--- a/arch/sh/kernel/entry.S
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- *  linux/arch/sh/entry.S
- *
- *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
- *  Copyright (C) 2003 - 2006  Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-#include <linux/sys.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
-#include <asm/unistd.h>
-
-! NOTE:
-! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
-! to be jumped is too far, but it causes illegal slot exception.
-
-/*	
- * entry.S contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all interrupts
- * and faults that can result in a task-switch.
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after a timer-interrupt and after each system call.
- *
- * NOTE: This code uses a convention that instructions in the delay slot
- * of a transfer-control instruction are indented by an extra space, thus:
- *
- *    jmp	@k0	    ! control-transfer instruction
- *     ldc	k1, ssr     ! delay slot
- *
- * Stack layout in 'ret_from_syscall':
- * 	ptrace needs to have all regs on the stack.
- *	if the order here is changed, it needs to be
- *	updated in ptrace.c and ptrace.h
- *
- *	r0
- *      ...
- *	r15 = stack pointer
- *	spc
- *	pr
- *	ssr
- *	gbr
- *	mach
- *	macl
- *	syscall #
- *
- */
-#if defined(CONFIG_KGDB_NMI)
-NMI_VEC = 0x1c0			! Must catch early for debounce
-#endif
-
-/* Offsets to the stack */
-OFF_R0  =  0		/* Return value. New ABI also arg4 */
-OFF_R1  =  4     	/* New ABI: arg5 */
-OFF_R2  =  8     	/* New ABI: arg6 */
-OFF_R3  =  12     	/* New ABI: syscall_nr */
-OFF_R4  =  16     	/* New ABI: arg0 */
-OFF_R5  =  20     	/* New ABI: arg1 */
-OFF_R6  =  24     	/* New ABI: arg2 */
-OFF_R7  =  28     	/* New ABI: arg3 */
-OFF_SP	=  (15*4)
-OFF_PC  =  (16*4)
-OFF_SR	=  (16*4+8)
-OFF_TRA	=  (16*4+6*4)
-
-
-#define k0	r0
-#define k1	r1
-#define k2	r2
-#define k3	r3
-#define k4	r4
-
-#define g_imask		r6	/* r6_bank1 */
-#define k_g_imask	r6_bank	/* r6_bank1 */
-#define current		r7	/* r7_bank1 */
-
-/*
- * Kernel mode register usage:
- *	k0	scratch
- *	k1	scratch
- *	k2	scratch (Exception code)
- *	k3	scratch (Return address)
- *	k4	scratch
- *	k5	reserved
- *	k6	Global Interrupt Mask (0--15 << 4)
- *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
- */
-
-!
-! TLB Miss / Initial Page write exception handling
-!			_and_
-! TLB hits, but the access violate the protection.
-! It can be valid access, such as stack grow and/or C-O-W.
-!
-!
-! Find the pmd/pte entry and loadtlb
-! If it's not found, cause address error (SEGV)
-!
-! Although this could be written in assembly language (and it'd be faster),
-! this first version depends *much* on C implementation.
-!
-
-#define CLI()				\
-	stc	sr, r0;			\
-	or	#0xf0, r0;		\
-	ldc	r0, sr
-
-#define STI()				\
-	mov.l	__INV_IMASK, r11;	\
-	stc	sr, r10;		\
-	and	r11, r10;		\
-	stc	k_g_imask, r11;		\
-	or	r11, r10;		\
-	ldc	r10, sr
-
-#if defined(CONFIG_PREEMPT)
-#  define preempt_stop()	CLI()
-#else
-#  define preempt_stop()
-#  define resume_kernel		restore_all
-#endif
-
-#if defined(CONFIG_MMU)
-	.align	2
-ENTRY(tlb_miss_load)
-	bra	call_dpf
-	 mov	#0, r5
-
-	.align	2
-ENTRY(tlb_miss_store)
-	bra	call_dpf
-	 mov	#1, r5
-
-	.align	2
-ENTRY(initial_page_write)
-	bra	call_dpf
-	 mov	#1, r5
-
-	.align	2
-ENTRY(tlb_protection_violation_load)
-	bra	call_dpf
-	 mov	#0, r5
-
-	.align	2
-ENTRY(tlb_protection_violation_store)
-	bra	call_dpf
-	 mov	#1, r5
-
-call_dpf:
-	mov.l	1f, r0
-	mov	r5, r8
-	mov.l	@r0, r6
-	mov	r6, r9
-	mov.l	2f, r0
-	sts	pr, r10
-	jsr	@r0
-	 mov	r15, r4
-	!
-	tst	r0, r0
-	bf/s	0f
-	 lds	r10, pr
-	rts
-	 nop
-0:	STI()
-	mov.l	3f, r0
-	mov	r9, r6
-	mov	r8, r5
-	jmp	@r0
-	 mov	r15, r4
-
-	.align 2
-1:	.long	MMU_TEA
-2:	.long	__do_page_fault
-3:	.long	do_page_fault
-
-	.align	2
-ENTRY(address_error_load)
-	bra	call_dae
-	 mov	#0,r5		! writeaccess = 0
-
-	.align	2
-ENTRY(address_error_store)
-	bra	call_dae
-	 mov	#1,r5		! writeaccess = 1
-
-	.align	2
-call_dae:
-	mov.l	1f, r0
-	mov.l	@r0, r6		! address
-	mov.l	2f, r0
-	jmp	@r0
-	 mov	r15, r4		! regs
-
-	.align 2
-1:	.long	MMU_TEA
-2:	.long   do_address_error
-#endif /* CONFIG_MMU */
-
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
-! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
-! If both are configured, handle the debug traps (breakpoints) in SW,
-! but still allow BIOS traps to FW.
-
-	.align	2
-debug_kernel:
-#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
-	/* Force BIOS call to FW (debug_trap put TRA in r8) */
-	mov	r8,r0
-	shlr2	r0
-	cmp/eq	#0x3f,r0
-	bt	debug_kernel_fw
-#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
-
-debug_enter:		
-#if defined(CONFIG_SH_KGDB)
-	/* Jump to kgdb, pass stacked regs as arg */
-debug_kernel_sw:
-	mov.l	3f, r0
-	jmp	@r0
-	 mov	r15, r4
-	.align	2
-3:	.long	kgdb_handle_exception
-#endif /* CONFIG_SH_KGDB */
-
-#if defined(CONFIG_SH_STANDARD_BIOS)
-	/* Unwind the stack and jmp to the debug entry */
-debug_kernel_fw:
-	mov.l	@r15+, r0
-	mov.l	@r15+, r1
-	mov.l	@r15+, r2
-	mov.l	@r15+, r3
-	mov.l	@r15+, r4
-	mov.l	@r15+, r5
-	mov.l	@r15+, r6
-	mov.l	@r15+, r7
-	stc	sr, r8
-	mov.l	1f, r9			! BL =1, RB=1, IMASK=0x0F
-	or	r9, r8
-	ldc	r8, sr			! here, change the register bank
-	mov.l	@r15+, r8
-	mov.l	@r15+, r9
-	mov.l	@r15+, r10
-	mov.l	@r15+, r11
-	mov.l	@r15+, r12
-	mov.l	@r15+, r13
-	mov.l	@r15+, r14
-	mov.l	@r15+, k0
-	ldc.l	@r15+, spc
-	lds.l	@r15+, pr
-	mov.l	@r15+, k1
-	ldc.l	@r15+, gbr
-	lds.l	@r15+, mach
-	lds.l	@r15+, macl
-	mov	k0, r15
-	!
-	mov.l	2f, k0
-	mov.l	@k0, k0
-	jmp	@k0
-	 ldc	k1, ssr
-	.align	2
-1:	.long	0x300000f0
-2:	.long	gdb_vbr_vector
-#endif /* CONFIG_SH_STANDARD_BIOS */
-
-#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
-
-
-	.align	2
-debug_trap:	
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
-	mov	#OFF_SR, r0
-	mov.l	@(r0,r15), r0		! get status register
-	shll	r0
-	shll	r0			! kernel space?
-	bt/s	debug_kernel
-#endif
-	 mov.l	@r15, r0		! Restore R0 value
-	mov.l	1f, r8
-	jmp	@r8
-	 nop
-
-	.align	2
-ENTRY(exception_error)
-	!
-	STI()
-	mov.l	2f, r0
-	jmp	@r0
-	 nop
-
-!
-	.align	2
-1:	.long	break_point_trap_software
-2:	.long	do_exception_error
-
-	.align	2
-ret_from_exception:
-	preempt_stop()
-ENTRY(ret_from_irq)
-	!
-	mov	#OFF_SR, r0
-	mov.l	@(r0,r15), r0	! get status register
-	shll	r0
-	shll	r0		! kernel space?
-	bt/s	resume_kernel	! Yes, it's from kernel, go back soon
-	 GET_THREAD_INFO(r8)
-
-#ifdef CONFIG_PREEMPT
-	bra	resume_userspace
-	 nop
-ENTRY(resume_kernel)
-	mov.l	@(TI_PRE_COUNT,r8), r0	! current_thread_info->preempt_count
-	tst	r0, r0
-	bf	noresched
-need_resched:
-	mov.l	@(TI_FLAGS,r8), r0	! current_thread_info->flags
-	tst	#_TIF_NEED_RESCHED, r0	! need_resched set?
-	bt	noresched
-
-	mov	#OFF_SR, r0
-	mov.l	@(r0,r15), r0		! get status register
-	and	#0xf0, r0		! interrupts off (exception path)?
-	cmp/eq	#0xf0, r0
-	bt	noresched
-
-	mov.l	1f, r0
-	mov.l	r0, @(TI_PRE_COUNT,r8)
-
-	STI()
-	mov.l	2f, r0
-	jsr	@r0
-	 nop
-	mov	#0, r0
-	mov.l	r0, @(TI_PRE_COUNT,r8)
-	CLI()
-
-	bra	need_resched
-	 nop
-noresched:
-	bra	restore_all
-	 nop
-
-	.align 2
-1:	.long	PREEMPT_ACTIVE
-2:	.long	schedule
-#endif
-
-ENTRY(resume_userspace)
-	! r8: current_thread_info
-	CLI()
-	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
-	tst	#_TIF_WORK_MASK, r0
-	bt/s	restore_all
-	 tst	#_TIF_NEED_RESCHED, r0
-
-	.align	2
-work_pending:
-	! r0: current_thread_info->flags
-	! r8: current_thread_info
-	! t:  result of "tst	#_TIF_NEED_RESCHED, r0"
-	bf/s	work_resched
-	 tst	#(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
-work_notifysig:
-	bt/s	restore_all
-	 mov	r15, r4
-	mov	r12, r5		! set arg1(save_r0)
-	mov	r0, r6
-	mov.l	2f, r1
-	mova	restore_all, r0
-	jmp	@r1
-	 lds	r0, pr
-work_resched:
-#ifndef CONFIG_PREEMPT
-	! gUSA handling
-	mov.l	@(OFF_SP,r15), r0	! get user space stack pointer
-	mov	r0, r1
-	shll	r0
-	bf/s	1f
-	 shll	r0
-	bf/s	1f
-	 mov	#OFF_PC, r0
-	! 				  SP >= 0xc0000000 : gUSA mark
-	mov.l	@(r0,r15), r2		! get user space PC (program counter)
-	mov.l	@(OFF_R0,r15), r3	! end point
-	cmp/hs	r3, r2			! r2 >= r3? 
-	bt	1f
-	add	r3, r1			! rewind point #2
-	mov.l	r1, @(r0,r15)		! reset PC to rewind point #2
-	!
-1:
-#endif
-	mov.l	1f, r1
-	jsr	@r1				! schedule
-	 nop
-	CLI()
-	!
-	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
-	tst	#_TIF_WORK_MASK, r0
-	bt	restore_all
-	bra	work_pending
-	 tst	#_TIF_NEED_RESCHED, r0
-
-	.align	2
-1:	.long	schedule
-2:	.long	do_notify_resume
-
-	.align	2
-syscall_exit_work:
-	! r0: current_thread_info->flags
-	! r8: current_thread_info
-	tst	#_TIF_SYSCALL_TRACE, r0
-	bt/s	work_pending
-	 tst	#_TIF_NEED_RESCHED, r0
-	STI()
-	! XXX setup arguments...
-	mov.l	4f, r0			! do_syscall_trace
-	jsr	@r0
-	 nop
-	bra	resume_userspace
-	 nop
-
-	.align	2
-syscall_trace_entry:
-	!                     	Yes it is traced.
-	! XXX setup arguments...
-	mov.l	4f, r11		! Call do_syscall_trace which notifies
-	jsr	@r11	    	! superior (will chomp R[0-7])
-	 nop
-	!			Reload R0-R4 from kernel stack, where the
-	!   	    	    	parent may have modified them using
-	!   	    	    	ptrace(POKEUSR).  (Note that R0-R2 are
-	!   	    	    	used by the system call handler directly
-	!   	    	    	from the kernel stack anyway, so don't need
-	!   	    	    	to be reloaded here.)  This allows the parent
-	!   	    	    	to rewrite system calls and args on the fly.
-	mov.l	@(OFF_R4,r15), r4   ! arg0
-	mov.l	@(OFF_R5,r15), r5
-	mov.l	@(OFF_R6,r15), r6
-	mov.l	@(OFF_R7,r15), r7   ! arg3
-	mov.l	@(OFF_R3,r15), r3   ! syscall_nr
-	!   	    	    Arrange for do_syscall_trace to be called
-	!   	    	    again as the system call returns.
-	mov.l	2f, r10			! Number of syscalls
-	cmp/hs	r10, r3
-	bf	syscall_call
-	mov	#-ENOSYS, r0
-	bra	syscall_exit
-	 mov.l	r0, @(OFF_R0,r15)	! Return value
-
-/*
- * Syscall interface:
- *
- *	Syscall #: R3
- *	Arguments #0 to #3: R4--R7
- *	Arguments #4 to #6: R0, R1, R2
- *	TRA: (number of arguments + 0x10) x 4
- *
- * This code also handles delegating other traps to the BIOS/gdb stub
- * according to:
- *
- * Trap number
- * (TRA>>2) 	    Purpose
- * -------- 	    -------
- * 0x0-0xf  	    old syscall ABI
- * 0x10-0x1f  	    new syscall ABI
- * 0x20-0xff  	    delegated through debug_trap to BIOS/gdb stub.
- *
- * Note: When we're first called, the TRA value must be shifted
- * right 2 bits in order to get the value that was used as the "trapa"
- * argument.
- */
-
-	.align	2
-	.globl	ret_from_fork
-ret_from_fork:
-	mov.l	1f, r8
-	jsr	@r8
-	 mov	r0, r4
-	bra	syscall_exit
-	 nop
-	.align	2
-1:	.long	schedule_tail
-	!
-ENTRY(system_call)
-	mov.l	1f, r9
-	mov.l	@r9, r8		! Read from TRA (Trap Address) Register
-	!
-	! Is the trap argument >= 0x20? (TRA will be >= 0x80)
-	mov	#0x7f, r9
-	cmp/hi	r9, r8
-	bt/s	0f
-	 mov	#OFF_TRA, r9
-	add	r15, r9
-	!
-	mov.l	r8, @r9			! set TRA value to tra
-	STI()
-	!   	    	    Call the system call handler through the table.
-	!   	    	    First check for bad syscall number
-	mov	r3, r9
-	mov.l	2f, r8			! Number of syscalls
-	cmp/hs	r8, r9
-	bf/s	good_system_call
-	 GET_THREAD_INFO(r8)
-syscall_badsys:			! Bad syscall number
-	mov	#-ENOSYS, r0
-	bra	resume_userspace
-	 mov.l	r0, @(OFF_R0,r15)	! Return value
-	!
-0:
-	bra	debug_trap
-	 nop
-	!
-good_system_call:		! Good syscall number
-	mov.l	@(TI_FLAGS,r8), r8
-	mov	#_TIF_SYSCALL_TRACE, r10
-	tst	r10, r8
-	bf	syscall_trace_entry
-	!
-syscall_call:
-	shll2	r9		! x4
-	mov.l	3f, r8		! Load the address of sys_call_table
-	add	r8, r9
-	mov.l	@r9, r8
-	jsr	@r8	    	! jump to specific syscall handler
-	 nop
-	mov.l	@(OFF_R0,r15), r12		! save r0
-	mov.l	r0, @(OFF_R0,r15)		! save the return value
-	!
-syscall_exit:
-	CLI()
-	!
-	GET_THREAD_INFO(r8)
-	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
-	tst	#_TIF_ALLWORK_MASK, r0
-	bf	syscall_exit_work
-restore_all:
-	mov.l	@r15+, r0
-	mov.l	@r15+, r1
-	mov.l	@r15+, r2
-	mov.l	@r15+, r3
-	mov.l	@r15+, r4
-	mov.l	@r15+, r5
-	mov.l	@r15+, r6
-	mov.l	@r15+, r7
-	!
-	stc	sr, r8
-	mov.l	7f, r9
-	or	r9, r8			! BL =1, RB=1
-	ldc	r8, sr			! here, change the register bank
-	!
-	mov.l	@r15+, r8
-	mov.l	@r15+, r9
-	mov.l	@r15+, r10
-	mov.l	@r15+, r11
-	mov.l	@r15+, r12
-	mov.l	@r15+, r13
-	mov.l	@r15+, r14
-	mov.l	@r15+, k4		! original stack pointer
-	ldc.l	@r15+, spc
-	lds.l	@r15+, pr
-	mov.l	@r15+, k3		! original SR
-	ldc.l	@r15+, gbr
-	lds.l	@r15+, mach
-	lds.l	@r15+, macl
-	add	#4, r15			! Skip syscall number
-	!
-#ifdef CONFIG_SH_DSP
-	mov.l	@r15+, k0		! DSP mode marker
-	mov.l	5f, k1
-	cmp/eq	k0, k1			! Do we have a DSP stack frame?
-	bf	skip_restore
-
-	stc	sr, k0			! Enable CPU DSP mode
-	or	k1, k0			! (within kernel it may be disabled)
-	ldc	k0, sr
-	mov	r2, k0			! Backup r2
-
-	! Restore DSP registers from stack
-	mov	r15, r2
-	movs.l	@r2+, a1
-	movs.l	@r2+, a0g
-	movs.l	@r2+, a1g
-	movs.l	@r2+, m0
-	movs.l	@r2+, m1
-	mov	r2, r15
-
-	lds.l	@r15+, a0
-	lds.l	@r15+, x0
-	lds.l	@r15+, x1
-	lds.l	@r15+, y0
-	lds.l	@r15+, y1
-	lds.l	@r15+, dsr
-	ldc.l	@r15+, rs
-	ldc.l	@r15+, re
-	ldc.l	@r15+, mod
-
-	mov	k0, r2			! Restore r2
-skip_restore:
-#endif
-	!
-	! Calculate new SR value
-	mov	k3, k2			! original SR value
-	mov.l	9f, k1
-	and	k1, k2			! Mask orignal SR value
-	!
-	mov	k3, k0			! Calculate IMASK-bits
-	shlr2	k0
-	and	#0x3c, k0
-	cmp/eq	#0x3c, k0
-	bt/s	6f
-	 shll2	k0
-	mov	g_imask, k0
-	!
-6:	or	k0, k2			! Set the IMASK-bits
-	ldc	k2, ssr
-	!
-#if defined(CONFIG_KGDB_NMI)
-	! Clear in_nmi
-	mov.l	6f, k0
-	mov	#0, k1
-	mov.b	k1, @k0
-#endif
-	mov.l	@r15+, k2		! restore EXPEVT
-	mov	k4, r15
-	rte
-	 nop
-
-	.align	2
-1:	.long	TRA
-2:	.long	NR_syscalls
-3:	.long	sys_call_table
-4:	.long	do_syscall_trace
-5:	.long	0x00001000	! DSP
-7:	.long	0x30000000
-9:
-__INV_IMASK:
-	.long	0xffffff0f	! ~(IMASK)
-
-! Exception Vector Base
-!
-!	Should be aligned page boundary.
-!
-	.balign 	4096,0,4096
-ENTRY(vbr_base)
-	.long	0
-!
-	.balign 	256,0,256
-general_exception:
-	mov.l	1f, k2
-	mov.l	2f, k3
-	bra	handle_exception
-	 mov.l	@k2, k2
-	.align	2
-1:	.long	EXPEVT
-2:	.long	ret_from_exception
-!
-!
-	.balign 	1024,0,1024
-tlb_miss:
-	mov.l	1f, k2
-	mov.l	4f, k3
-	bra	handle_exception
-	 mov.l	@k2, k2
-!
-	.balign 	512,0,512
-interrupt:
-	mov.l	2f, k2
-	mov.l	3f, k3
-#if defined(CONFIG_KGDB_NMI)
-	! Debounce (filter nested NMI)
-	mov.l	@k2, k0
-	mov.l	5f, k1
-	cmp/eq	k1, k0
-	bf	0f
-	mov.l	6f, k1
-	tas.b	@k1
-	bt	0f
-	rte
-	 nop
-	.align	2
-5:	.long	NMI_VEC
-6:	.long	in_nmi
-0:
-#endif /* defined(CONFIG_KGDB_NMI) */
-	bra	handle_exception
-	 mov	#-1, k2		! interrupt exception marker
-
-	.align	2
-1:	.long	EXPEVT
-2:	.long	INTEVT
-3:	.long	ret_from_irq
-4:	.long	ret_from_exception
-
-!
-!
-	.align	2
-ENTRY(handle_exception)
-	! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
-	! save all registers onto stack.
-	!
-	stc	ssr, k0		! Is it from kernel space?
-	shll	k0		! Check MD bit (bit30) by shifting it into...
-	shll	k0		!       ...the T bit
-	bt/s	1f		! It's a kernel to kernel transition.
-	 mov	r15, k0		! save original stack to k0
-	/* User space to kernel */
-	mov	#(THREAD_SIZE >> 8), k1
-	shll8	k1		! k1 := THREAD_SIZE
-	add	current, k1
-	mov	k1, r15		! change to kernel stack
-	!
-1:	mov.l	2f, k1
-	!
-#ifdef CONFIG_SH_DSP
-	mov.l	r2, @-r15		! Save r2, we need another reg
-	stc	sr, k4
-	mov.l	1f, r2
-	tst	r2, k4			! Check if in DSP mode
-	mov.l	@r15+, r2		! Restore r2 now
-	bt/s	skip_save
-	 mov	#0, k4			! Set marker for no stack frame
-
-	mov	r2, k4			! Backup r2 (in k4) for later
-
-	! Save DSP registers on stack
-	stc.l	mod, @-r15
-	stc.l	re, @-r15
-	stc.l	rs, @-r15
-	sts.l	dsr, @-r15
-	sts.l	y1, @-r15
-	sts.l	y0, @-r15
-	sts.l	x1, @-r15
-	sts.l	x0, @-r15
-	sts.l	a0, @-r15
-
-	! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
-
-	! FIXME: Make sure that this is still the case with newer toolchains,
-	! as we're not at all interested in supporting ancient toolchains at
-	! this point. -- PFM.
-
-	mov	r15, r2
-	.word	0xf653			! movs.l	a1, @-r2
-	.word	0xf6f3			! movs.l	a0g, @-r2
-	.word	0xf6d3			! movs.l	a1g, @-r2
-	.word	0xf6c3			! movs.l	m0, @-r2
-	.word	0xf6e3			! movs.l	m1, @-r2
-	mov	r2, r15
-
-	mov	k4, r2			! Restore r2
-	mov.l	1f, k4			! Force DSP stack frame
-skip_save:
-	mov.l	k4, @-r15		! Push DSP mode marker onto stack
-#endif
-	! Save the user registers on the stack.
-	mov.l	k2, @-r15	! EXPEVT
-
- 	mov	#-1, k4
-	mov.l	k4, @-r15	! set TRA (default: -1)
-	!
-	sts.l	macl, @-r15
-	sts.l	mach, @-r15
-	stc.l	gbr, @-r15
-	stc.l	ssr, @-r15
-	sts.l	pr, @-r15
-	stc.l	spc, @-r15
-	!
-	lds	k3, pr		! Set the return address to pr
-	!
-	mov.l	k0, @-r15	! save orignal stack
-	mov.l	r14, @-r15
-	mov.l	r13, @-r15
-	mov.l	r12, @-r15
-	mov.l	r11, @-r15
-	mov.l	r10, @-r15
-	mov.l	r9, @-r15
-	mov.l	r8, @-r15
-	!
-	stc	sr, r8		! Back to normal register bank, and
-	or	k1, r8		! Block all interrupts
-	mov.l	3f, k1
-	and	k1, r8		! ...
-	ldc	r8, sr		! ...changed here.
-	!
-	mov.l	r7, @-r15
-	mov.l	r6, @-r15
-	mov.l	r5, @-r15
-	mov.l	r4, @-r15
-	mov.l	r3, @-r15
-	mov.l	r2, @-r15
-	mov.l	r1, @-r15
-	mov.l	r0, @-r15
-
-	/*
-	 * This gets a bit tricky.. in the INTEVT case we don't want to use
-	 * the VBR offset as a destination in the jump call table, since all
-	 * of the destinations are the same. In this case, (interrupt) sets
-	 * a marker in r2 (now r2_bank since SR.RB changed), which we check
-	 * to determine the exception type. For all other exceptions, we
-	 * forcibly read EXPEVT from memory and fix up the jump address, in
-	 * the interrupt exception case we jump to do_IRQ() and defer the
-	 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
-	 * checks that do_IRQ() was doing..
-	 */
-	stc	r2_bank, r8
-	cmp/pz	r8
-	bf	interrupt_exception
-	shlr2	r8
-	shlr	r8
-	mov.l	4f, r9
-	add	r8, r9
-	mov.l	@r9, r9
-	jmp	@r9
-	 nop
-	rts
-	 nop
-
-	.align	2
-1:	.long	0x00001000	! DSP=1
-2:	.long	0x000080f0	! FD=1, IMASK=15
-3:	.long	0xcfffffff	! RB=0, BL=0
-4:	.long	exception_handling_table
-
-interrupt_exception:
-	mov.l	1f, r9
-	jmp	@r9
-	 nop
-	rts
-	 nop
-
-	.align 2
-1:	.long	do_IRQ
-
-	.align	2
-ENTRY(exception_none)
-	rts
-	 nop
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index f5f53d1..6aca4bc 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -33,7 +33,7 @@
 	.long	0x00360000	/* INITRD_START */
 	.long	0x000a0000	/* INITRD_SIZE */
 	.long	0
-	.balign 4096,0,4096
+	.balign PAGE_SIZE,0,PAGE_SIZE
 
 	.text	
 /*
@@ -53,8 +53,10 @@
 	ldc	r0, sr
 	!			Initialize global interrupt mask
 	mov	#0, r0
+#ifdef CONFIG_CPU_HAS_SR_RB
 	ldc	r0, r6_bank
-
+#endif
+	
 	/*
 	 * Prefetch if possible to reduce cache miss penalty.
 	 *
@@ -68,11 +70,14 @@
 	!
 	mov.l	2f, r0
 	mov	r0, r15		! Set initial r15 (stack pointer)
-	mov	#(THREAD_SIZE >> 8), r1
+	mov	#(THREAD_SIZE >> 10), r1
 	shll8	r1		! r1 = THREAD_SIZE
+	shll2	r1
 	sub	r1, r0		!
+#ifdef CONFIG_CPU_HAS_SR_RB
 	ldc	r0, r7_bank	! ... and initial thread_info
-
+#endif
+	
 	!			Clear BSS area
 	mov.l	3f, r1
 	add	#4, r1
@@ -95,7 +100,11 @@
 	 nop
 
 	.balign 4
+#if defined(CONFIG_CPU_SH2)
+1:	.long	0x000000F0		! IMASK=0xF
+#else
 1:	.long	0x400080F0		! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+#endif
 2:	.long	init_thread_union+THREAD_SIZE
 3:	.long	__bss_start
 4:	.long	_end
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 944128c..67be2b6 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -12,7 +12,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/io.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/thread_info.h>
@@ -78,15 +78,16 @@
 	u32			stack[THREAD_SIZE/sizeof(u32)];
 };
 
-static union irq_ctx *hardirq_ctx[NR_CPUS];
-static union irq_ctx *softirq_ctx[NR_CPUS];
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
 #endif
 
 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
 		      unsigned long r6, unsigned long r7,
-		      struct pt_regs regs)
+		      struct pt_regs __regs)
 {
-	struct pt_regs *old_regs = set_irq_regs(&regs);
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	struct pt_regs *old_regs = set_irq_regs(regs);
 	int irq;
 #ifdef CONFIG_4KSTACKS
 	union irq_ctx *curctx, *irqctx;
@@ -111,7 +112,7 @@
 #endif
 
 #ifdef CONFIG_CPU_HAS_INTEVT
-	irq = (ctrl_inl(INTEVT) >> 5) - 16;
+	irq = evt2irq(ctrl_inl(INTEVT));
 #else
 	irq = r4;
 #endif
@@ -135,17 +136,24 @@
 		irqctx->tinfo.task = curctx->tinfo.task;
 		irqctx->tinfo.previous_sp = current_stack_pointer;
 
+		/*
+		 * Copy the softirq bits in preempt_count so that the
+		 * softirq checks work in the hardirq context.
+		 */
+		irqctx->tinfo.preempt_count =
+			(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+			(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
 		__asm__ __volatile__ (
 			"mov	%0, r4		\n"
-			"mov	r15, r9		\n"
+			"mov	r15, r8		\n"
 			"jsr	@%1		\n"
 			/* swith to the irq stack */
 			" mov	%2, r15		\n"
 			/* restore the stack (ring zero) */
-			"mov	r9, r15		\n"
+			"mov	r8, r15		\n"
 			: /* no outputs */
 			: "r" (irq), "r" (generic_handle_irq), "r" (isp)
-			/* XXX: A somewhat excessive clobber list? -PFM */
 			: "memory", "r0", "r1", "r2", "r3", "r4",
 			  "r5", "r6", "r7", "r8", "t", "pr"
 		);
@@ -193,7 +201,7 @@
 	irqctx->tinfo.task		= NULL;
 	irqctx->tinfo.exec_domain	= NULL;
 	irqctx->tinfo.cpu		= cpu;
-	irqctx->tinfo.preempt_count	= SOFTIRQ_OFFSET;
+	irqctx->tinfo.preempt_count	= 0;
 	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
 
 	softirq_ctx[cpu] = irqctx;
@@ -239,13 +247,38 @@
 			"mov	r9, r15		\n"
 			: /* no outputs */
 			: "r" (__do_softirq), "r" (isp)
-			/* XXX: A somewhat excessive clobber list? -PFM */
 			: "memory", "r0", "r1", "r2", "r3", "r4",
 			  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
 		);
+
+		/*
+		 * Shouldnt happen, we returned above if in_interrupt():
+		 */
+		WARN_ON_ONCE(softirq_count());
 	}
 
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(do_softirq);
 #endif
+
+void __init init_IRQ(void)
+{
+#ifdef CONFIG_CPU_HAS_PINT_IRQ
+	init_IRQ_pint();
+#endif
+
+#ifdef CONFIG_CPU_HAS_INTC2_IRQ
+	init_IRQ_intc2();
+#endif
+
+#ifdef CONFIG_CPU_HAS_IPR_IRQ
+	init_IRQ_ipr();
+#endif
+
+	/* Perform the machine specific initialisation */
+	if (sh_mv.mv_init_irq)
+		sh_mv.mv_init_irq();
+
+	irq_ctx_init(smp_processor_id());
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index a52b13a..f3e2631 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -385,10 +385,11 @@
 
 asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
 			unsigned long r6, unsigned long r7,
-			struct pt_regs regs)
+			struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 #ifdef CONFIG_MMU
-	return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
+	return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
 #else
 	/* fork almost works, enough to trick you into looking elsewhere :-( */
 	return -EINVAL;
@@ -398,11 +399,12 @@
 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
 			 unsigned long parent_tidptr,
 			 unsigned long child_tidptr,
-			 struct pt_regs regs)
+			 struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	if (!newsp)
-		newsp = regs.regs[15];
-	return do_fork(clone_flags, newsp, &regs, 0,
+		newsp = regs->regs[15];
+	return do_fork(clone_flags, newsp, regs, 0,
 			(int __user *)parent_tidptr, (int __user *)child_tidptr);
 }
 
@@ -418,9 +420,10 @@
  */
 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
 			 unsigned long r6, unsigned long r7,
-			 struct pt_regs regs)
+			 struct pt_regs __regs)
 {
-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
 		       0, NULL, NULL);
 }
 
@@ -429,8 +432,9 @@
  */
 asmlinkage int sys_execve(char *ufilename, char **uargv,
 			  char **uenvp, unsigned long r7,
-			  struct pt_regs regs)
+			  struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	int error;
 	char *filename;
 
@@ -442,7 +446,7 @@
 	error = do_execve(filename,
 			  (char __user * __user *)uargv,
 			  (char __user * __user *)uenvp,
-			  &regs);
+			  regs);
 	if (error == 0) {
 		task_lock(current);
 		current->ptrace &= ~PT_DTRACE;
@@ -472,9 +476,7 @@
 	return pc;
 }
 
-asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
-				 unsigned long r6, unsigned long r7,
-				 struct pt_regs regs)
+asmlinkage void break_point_trap(void)
 {
 	/* Clear tracing.  */
 #if defined(CONFIG_CPU_SH4A)
@@ -492,8 +494,10 @@
 
 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
 					  unsigned long r6, unsigned long r7,
-					  struct pt_regs regs)
+					  struct pt_regs __regs)
 {
-	regs.pc -= 2;
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+	regs->pc -= 2;
 	force_sig(SIGTRAP, current);
 }
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S
index 8221b37..c66cb32 100644
--- a/arch/sh/kernel/relocate_kernel.S
+++ b/arch/sh/kernel/relocate_kernel.S
@@ -7,11 +7,9 @@
  * This source code is licensed under the GNU General Public License,
  * Version 2.  See the file COPYING for more details.
  */
-
 #include <linux/linkage.h>
-
-#define PAGE_SIZE      4096 /* must be same value as in <asm/page.h> */
-
+#include <asm/addrspace.h>
+#include <asm/page.h>
 
 		.globl relocate_new_kernel
 relocate_new_kernel:
@@ -20,8 +18,8 @@
 	/* r6 = start_address      */
 	/* r7 = vbr_reg            */
 
-	mov.l	10f,r8	  /* 4096 */
-	mov.l	11f,r9    /* 0xa0000000 */
+	mov.l	10f,r8	  /* PAGE_SIZE */
+	mov.l	11f,r9    /* P2SEG */
 
 	/*  stack setting */
 	add	r8,r5
@@ -32,7 +30,7 @@
 0:
 	mov.l	@r4+,r0	  /* cmd = *ind++ */
 
-1:	/* addr = (cmd | 0xa0000000) & 0xfffffff0 */
+1:	/* addr = (cmd | P2SEG) & 0xfffffff0 */
 	mov	r0,r2
 	or	r9,r2
 	mov	#-16,r1
@@ -92,7 +90,7 @@
 10:
 	.long	PAGE_SIZE
 11:
-	.long	0xa0000000
+	.long	P2SEG
 
 relocate_new_kernel_end:
 
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 36d86f9..f8dd6b7 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -332,8 +332,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
 			reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE);
-			initrd_start =
-				INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START;
 			initrd_end = initrd_start + INITRD_SIZE;
 		} else {
 			printk("initrd extends beyond end of memory "
@@ -392,6 +391,7 @@
 subsys_initcall(topology_init);
 
 static const char *cpu_name[] = {
+	[CPU_SH7206]	= "SH7206",	[CPU_SH7619]	= "SH7619",
 	[CPU_SH7604]	= "SH7604",	[CPU_SH7300]	= "SH7300",
 	[CPU_SH7705]	= "SH7705",	[CPU_SH7706]	= "SH7706",
 	[CPU_SH7707]	= "SH7707",	[CPU_SH7708]	= "SH7708",
@@ -404,6 +404,7 @@
 	[CPU_SH4_202]	= "SH4-202",	[CPU_SH4_501]	= "SH4-501",
 	[CPU_SH7770]	= "SH7770",	[CPU_SH7780]	= "SH7780",
 	[CPU_SH7781]	= "SH7781",	[CPU_SH7343]	= "SH7343",
+	[CPU_SH7785]	= "SH7785",
 	[CPU_SH_NONE]	= "Unknown"
 };
 
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 9daad70..ceee791 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -18,7 +18,6 @@
 #include <asm/delay.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
-#include <asm/checksum.h>
 
 extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
 extern struct hw_interrupt_type no_irq_type;
@@ -74,8 +73,6 @@
 DECLARE_EXPORT(__movstr);
 DECLARE_EXPORT(__movstrSI16);
 
-EXPORT_SYMBOL(strcpy);
-
 #ifdef CONFIG_CPU_SH4
 DECLARE_EXPORT(__movstr_i4_even);
 DECLARE_EXPORT(__movstr_i4_odd);
@@ -102,10 +99,6 @@
 EXPORT_SYMBOL(synchronize_irq);
 #endif
 
-#ifdef CONFIG_PM
-EXPORT_SYMBOL(pm_suspend);
-#endif
-
 EXPORT_SYMBOL(csum_partial);
 #ifdef CONFIG_IPV6
 EXPORT_SYMBOL(csum_ipv6_magic);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 5213f5b..bb1c480 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -23,6 +23,7 @@
 #include <linux/elf.h>
 #include <linux/personality.h>
 #include <linux/binfmts.h>
+#include <linux/freezer.h>
 
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
@@ -37,7 +38,7 @@
 asmlinkage int
 sys_sigsuspend(old_sigset_t mask,
 	       unsigned long r5, unsigned long r6, unsigned long r7,
-	       struct pt_regs regs)
+	       struct pt_regs __regs)
 {
 	mask &= _BLOCKABLE;
 	spin_lock_irq(&current->sighand->siglock);
@@ -52,7 +53,7 @@
 	return -ERESTARTNOHAND;
 }
 
-asmlinkage int 
+asmlinkage int
 sys_sigaction(int sig, const struct old_sigaction __user *act,
 	      struct old_sigaction __user *oact)
 {
@@ -87,9 +88,11 @@
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 		unsigned long r6, unsigned long r7,
-		struct pt_regs regs)
+		struct pt_regs __regs)
 {
-	return do_sigaltstack(uss, uoss, regs.regs[15]);
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+	return do_sigaltstack(uss, uoss, regs->regs[15]);
 }
 
 
@@ -98,7 +101,11 @@
  */
 
 #define MOVW(n)	 (0x9300|((n)-2))	/* Move mem word at PC+n to R3 */
-#define TRAP16	 0xc310			/* Syscall w/no args (NR in R3) */
+#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
+#define TRAP_NOARG 0xc320		/* Syscall w/no args (NR in R3) */
+#else
+#define TRAP_NOARG 0xc310		/* Syscall w/no args (NR in R3) */
+#endif
 #define OR_R0_R0 0x200b			/* or r0,r0 (insert to avoid hardware bug) */
 
 struct sigframe
@@ -194,9 +201,10 @@
 
 asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
 			     unsigned long r6, unsigned long r7,
-			     struct pt_regs regs)
+			     struct pt_regs __regs)
 {
-	struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15];
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
 	sigset_t set;
 	int r0;
 
@@ -216,7 +224,7 @@
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
-	if (restore_sigcontext(&regs, &frame->sc, &r0))
+	if (restore_sigcontext(regs, &frame->sc, &r0))
 		goto badframe;
 	return r0;
 
@@ -227,9 +235,10 @@
 
 asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
 				unsigned long r6, unsigned long r7,
-				struct pt_regs regs)
+				struct pt_regs __regs)
 {
-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15];
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
 	sigset_t set;
 	stack_t st;
 	int r0;
@@ -246,14 +255,14 @@
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
-	if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
 		goto badframe;
 
 	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
 		goto badframe;
 	/* It is more difficult to avoid calling this function than to
 	   call it and ignore errors.  */
-	do_sigaltstack(&st, NULL, regs.regs[15]);
+	do_sigaltstack(&st, NULL, regs->regs[15]);
 
 	return r0;
 
@@ -350,7 +359,7 @@
 	} else {
 		/* Generate return code (system call to sigreturn) */
 		err |= __put_user(MOVW(7), &frame->retcode[0]);
-		err |= __put_user(TRAP16, &frame->retcode[1]);
+		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
@@ -430,7 +439,7 @@
 	} else {
 		/* Generate return code (system call to rt_sigreturn) */
 		err |= __put_user(MOVW(7), &frame->retcode[0]);
-		err |= __put_user(TRAP16, &frame->retcode[1]);
+		err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[2]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[3]);
 		err |= __put_user(OR_R0_R0, &frame->retcode[4]);
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
new file mode 100644
index 0000000..0d5268a
--- /dev/null
+++ b/arch/sh/kernel/stacktrace.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/kernel/stacktrace.c
+ *
+ * Stack trace management functions
+ *
+ *  Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <asm/ptrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+{
+	unsigned long *sp;
+
+	if (!task)
+		task = current;
+	if (task == current)
+		sp = (unsigned long *)current_stack_pointer;
+	else
+		sp = (unsigned long *)task->thread.sp;
+
+	while (!kstack_end(sp)) {
+		unsigned long addr = *sp++;
+
+		if (__kernel_text_address(addr)) {
+			if (trace->skip > 0)
+				trace->skip--;
+			else
+				trace->entries[trace->nr_entries++] = addr;
+			if (trace->nr_entries >= trace->max_entries)
+				break;
+		}
+	}
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 8fde950..5083b6e 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -33,14 +33,15 @@
  */
 asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
 	unsigned long r6, unsigned long r7,
-	struct pt_regs regs)
+	struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	int fd[2];
 	int error;
 
 	error = do_pipe(fd);
 	if (!error) {
-		regs.regs[1] = fd[1];
+		regs->regs[1] = fd[1];
 		return fd[0];
 	}
 	return error;
@@ -50,6 +51,7 @@
 
 EXPORT_SYMBOL(shm_align_mask);
 
+#ifdef CONFIG_MMU
 /*
  * To avoid cache aliases, we map the shared page with same color.
  */
@@ -135,6 +137,7 @@
 			addr = COLOUR_ALIGN(addr, pgoff);
 	}
 }
+#endif /* CONFIG_MMU */
 
 static inline long
 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 57e708d..c206c95 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -13,6 +13,8 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
 #include <asm/clock.h>
 #include <asm/rtc.h>
 #include <asm/timer.h>
@@ -50,15 +52,20 @@
 #ifndef CONFIG_GENERIC_TIME
 void do_gettimeofday(struct timeval *tv)
 {
+	unsigned long flags;
 	unsigned long seq;
 	unsigned long usec, sec;
 
 	do {
-		seq = read_seqbegin(&xtime_lock);
+		/*
+		 * Turn off IRQs when grabbing xtime_lock, so that
+		 * the sys_timer get_offset code doesn't have to handle it.
+		 */
+		seq = read_seqbegin_irqsave(&xtime_lock, flags);
 		usec = get_timer_offset();
 		sec = xtime.tv_sec;
-		usec += xtime.tv_nsec / 1000;
-	} while (read_seqretry(&xtime_lock, seq));
+		usec += xtime.tv_nsec / NSEC_PER_USEC;
+	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 
 	while (usec >= 1000000) {
 		usec -= 1000000;
@@ -85,7 +92,7 @@
 	 * wall time.  Discover what correction gettimeofday() would have
 	 * made, and then undo it!
 	 */
-	nsec -= 1000 * get_timer_offset();
+	nsec -= get_timer_offset() * NSEC_PER_USEC;
 
 	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
 	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -169,6 +176,108 @@
 	.resume	 = timer_resume,
 };
 
+#ifdef CONFIG_NO_IDLE_HZ
+static int timer_dyn_tick_enable(void)
+{
+	struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+	unsigned long flags;
+	int ret = -ENODEV;
+
+	if (dyn_tick) {
+		spin_lock_irqsave(&dyn_tick->lock, flags);
+		ret = 0;
+		if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
+			ret = dyn_tick->enable();
+
+			if (ret == 0)
+				dyn_tick->state |= DYN_TICK_ENABLED;
+		}
+		spin_unlock_irqrestore(&dyn_tick->lock, flags);
+	}
+
+	return ret;
+}
+
+static int timer_dyn_tick_disable(void)
+{
+	struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+	unsigned long flags;
+	int ret = -ENODEV;
+
+	if (dyn_tick) {
+		spin_lock_irqsave(&dyn_tick->lock, flags);
+		ret = 0;
+		if (dyn_tick->state & DYN_TICK_ENABLED) {
+			ret = dyn_tick->disable();
+
+			if (ret == 0)
+				dyn_tick->state &= ~DYN_TICK_ENABLED;
+		}
+		spin_unlock_irqrestore(&dyn_tick->lock, flags);
+	}
+
+	return ret;
+}
+
+/*
+ * Reprogram the system timer for at least the calculated time interval.
+ * This function should be called from the idle thread with IRQs disabled,
+ * immediately before sleeping.
+ */
+void timer_dyn_reprogram(void)
+{
+	struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+	unsigned long next, seq, flags;
+
+	if (!dyn_tick)
+		return;
+
+	spin_lock_irqsave(&dyn_tick->lock, flags);
+	if (dyn_tick->state & DYN_TICK_ENABLED) {
+		next = next_timer_interrupt();
+		do {
+			seq = read_seqbegin(&xtime_lock);
+			dyn_tick->reprogram(next - jiffies);
+		} while (read_seqretry(&xtime_lock, seq));
+	}
+	spin_unlock_irqrestore(&dyn_tick->lock, flags);
+}
+
+static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
+{
+	return sprintf(buf, "%i\n",
+		       (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1);
+}
+
+static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf,
+				  size_t count)
+{
+	unsigned int enable = simple_strtoul(buf, NULL, 2);
+
+	if (enable)
+		timer_dyn_tick_enable();
+	else
+		timer_dyn_tick_disable();
+
+	return count;
+}
+static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
+
+/*
+ * dyntick=enable|disable
+ */
+static char dyntick_str[4] __initdata = "";
+
+static int __init dyntick_setup(char *str)
+{
+	if (str)
+		strlcpy(dyntick_str, str, sizeof(dyntick_str));
+	return 1;
+}
+
+__setup("dyntick=", dyntick_setup);
+#endif
+
 static int __init timer_init_sysfs(void)
 {
 	int ret = sysdev_class_register(&timer_sysclass);
@@ -176,7 +285,22 @@
 		return ret;
 
 	sys_timer->dev.cls = &timer_sysclass;
-	return sysdev_register(&sys_timer->dev);
+	ret = sysdev_register(&sys_timer->dev);
+
+#ifdef CONFIG_NO_IDLE_HZ
+	if (ret == 0 && sys_timer->dyn_tick) {
+		ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick);
+
+		/*
+		 * Turn on dynamic tick after calibrate delay
+		 * for correct bogomips
+		 */
+		if (ret == 0 && dyntick_str[0] == 'e')
+			ret = timer_dyn_tick_enable();
+	}
+#endif
+
+	return ret;
 }
 device_initcall(timer_init_sysfs);
 
@@ -200,6 +324,11 @@
 	sys_timer = get_sys_timer();
 	printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
 
+#ifdef CONFIG_NO_IDLE_HZ
+	if (sys_timer->dyn_tick)
+		spin_lock_init(&sys_timer->dyn_tick->lock);
+#endif
+
 #if defined(CONFIG_SH_KGDB)
 	/*
 	 * Set up kgdb as requested. We do it here because the serial
diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile
index 151a6a3..bcf244f 100644
--- a/arch/sh/kernel/timers/Makefile
+++ b/arch/sh/kernel/timers/Makefile
@@ -5,4 +5,6 @@
 obj-y	:= timer.o
 
 obj-$(CONFIG_SH_TMU)		+= timer-tmu.o
+obj-$(CONFIG_SH_MTU2)		+= timer-mtu2.o
+obj-$(CONFIG_SH_CMT)		+= timer-cmt.o
 
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
new file mode 100644
index 0000000..a574b93
--- /dev/null
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -0,0 +1,196 @@
+/*
+ * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support
+ *
+ *  Copyright (C) 2005  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/rtc.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define CMT_CMSTR	0xf84a0070
+#define CMT_CMCSR_0	0xf84a0072
+#define CMT_CMCNT_0	0xf84a0074
+#define CMT_CMCOR_0	0xf84a0076
+#define CMT_CMCSR_1	0xf84a0078
+#define CMT_CMCNT_1	0xf84a007a
+#define CMT_CMCOR_1	0xf84a007c
+
+#define STBCR3		0xf80a0000
+#define cmt_clock_enable() do {	ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
+#define CMT_CMCSR_INIT	0x0040
+#define CMT_CMCSR_CALIB	0x0000
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+#define CMT_CMSTR	0xfffec000
+#define CMT_CMCSR_0	0xfffec002
+#define CMT_CMCNT_0	0xfffec004
+#define CMT_CMCOR_0	0xfffec006
+
+#define STBCR4		0xfffe040c
+#define cmt_clock_enable() do {	ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0)
+#define CMT_CMCSR_INIT	0x0040
+#define CMT_CMCSR_CALIB	0x0000
+#else
+#error "Unknown CPU SUBTYPE"
+#endif
+
+static unsigned long cmt_timer_get_offset(void)
+{
+	int count;
+	static unsigned short count_p = 0xffff;    /* for the first call after boot */
+	static unsigned long jiffies_p = 0;
+
+	/*
+	 * cache volatile jiffies temporarily; we have IRQs turned off.
+	 */
+	unsigned long jiffies_t;
+
+	/* timer count may underflow right here */
+	count =  ctrl_inw(CMT_CMCOR_0);
+	count -= ctrl_inw(CMT_CMCNT_0);
+
+	jiffies_t = jiffies;
+
+	/*
+	 * avoiding timer inconsistencies (they are rare, but they happen)...
+	 * there is one kind of problem that must be avoided here:
+	 *  1. the timer counter underflows
+	 */
+
+	if (jiffies_t == jiffies_p) {
+		if (count > count_p) {
+			/* the nutcase */
+			if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */
+				count -= LATCH;
+			} else {
+				printk("%s (): hardware timer problem?\n",
+				       __FUNCTION__);
+			}
+		}
+	} else
+		jiffies_p = jiffies_t;
+
+	count_p = count;
+
+	count = ((LATCH-1) - count) * TICK_SIZE;
+	count = (count + LATCH/2) / LATCH;
+
+	return count;
+}
+
+static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
+{
+	unsigned long timer_status;
+
+	/* Clear CMF bit */
+	timer_status = ctrl_inw(CMT_CMCSR_0);
+	timer_status &= ~0x80;
+	ctrl_outw(timer_status, CMT_CMCSR_0);
+
+	/*
+	 * Here we are in the timer irq handler. We just have irqs locally
+	 * disabled but we don't know if the timer_bh is running on the other
+	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+	 * the irq version of write_lock because as just said we have irq
+	 * locally disabled. -arca
+	 */
+	write_seqlock(&xtime_lock);
+	handle_timer_tick();
+	write_sequnlock(&xtime_lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction cmt_irq = {
+	.name		= "timer",
+	.handler	= cmt_timer_interrupt,
+	.flags		= IRQF_DISABLED | IRQF_TIMER,
+	.mask		= CPU_MASK_NONE,
+};
+
+static void cmt_clk_init(struct clk *clk)
+{
+	u8 divisor = CMT_CMCSR_INIT & 0x3;
+	ctrl_inw(CMT_CMCSR_0);
+	ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0);
+	clk->parent = clk_get(NULL, "module_clk");
+	clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static void cmt_clk_recalc(struct clk *clk)
+{
+	u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3;
+	clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static struct clk_ops cmt_clk_ops = {
+	.init		= cmt_clk_init,
+	.recalc		= cmt_clk_recalc,
+};
+
+static struct clk cmt0_clk = {
+	.name		= "cmt0_clk",
+	.ops		= &cmt_clk_ops,
+};
+
+static int cmt_timer_start(void)
+{
+	ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR);
+	return 0;
+}
+
+static int cmt_timer_stop(void)
+{
+	ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR);
+	return 0;
+}
+
+static int cmt_timer_init(void)
+{
+	unsigned long interval;
+
+	cmt_clock_enable();
+
+	setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq);
+
+	cmt0_clk.parent = clk_get(NULL, "module_clk");
+
+	cmt_timer_stop();
+
+	interval = cmt0_clk.parent->rate / 8 / HZ;
+	printk(KERN_INFO "Interval = %ld\n", interval);
+
+	ctrl_outw(interval, CMT_CMCOR_0);
+
+	clk_register(&cmt0_clk);
+	clk_enable(&cmt0_clk);
+
+	cmt_timer_start();
+
+	return 0;
+}
+
+struct sys_timer_ops cmt_timer_ops = {
+	.init		= cmt_timer_init,
+	.start		= cmt_timer_start,
+	.stop		= cmt_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+	.get_offset	= cmt_timer_get_offset,
+#endif
+};
+
+struct sys_timer cmt_timer = {
+	.name	= "cmt",
+	.ops	= &cmt_timer_ops,
+};
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c
new file mode 100644
index 0000000..fffcd1c
--- /dev/null
+++ b/arch/sh/kernel/timers/timer-mtu2.c
@@ -0,0 +1,200 @@
+/*
+ * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support
+ *
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * Based off of arch/sh/kernel/timers/timer-tmu.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+/*
+ * We use channel 1 for our lowly system timer. Channel 2 would be the other
+ * likely candidate, but we leave it alone as it has higher divisors that
+ * would be of more use to other more interesting applications.
+ *
+ * TODO: Presently we only implement a 16-bit single-channel system timer.
+ * However, we can implement channel cascade if we go the overflow route and
+ * get away with using 2 MTU2 channels as a 32-bit timer.
+ */
+#define MTU2_TSTR	0xfffe4280
+#define MTU2_TCR_1	0xfffe4380
+#define MTU2_TMDR_1	0xfffe4381
+#define MTU2_TIOR_1	0xfffe4382
+#define MTU2_TIER_1	0xfffe4384
+#define MTU2_TSR_1	0xfffe4385
+#define MTU2_TCNT_1	0xfffe4386	/* 16-bit counter */
+#define MTU2_TGRA_1	0xfffe438a
+
+#define STBCR3		0xfffe0408
+
+#define MTU2_TSTR_CST1	(1 << 1)	/* Counter Start 1 */
+
+#define MTU2_TSR_TGFA	(1 << 0)	/* GRA compare match */
+
+#define MTU2_TIER_TGIEA	(1 << 0)	/* GRA compare match  interrupt enable */
+
+#define MTU2_TCR_INIT	0x22
+
+#define MTU2_TCR_CALIB  0x00
+
+static unsigned long mtu2_timer_get_offset(void)
+{
+	int count;
+	static int count_p = 0x7fff;	/* for the first call after boot */
+	static unsigned long jiffies_p = 0;
+
+	/*
+	 * cache volatile jiffies temporarily; we have IRQs turned off.
+	 */
+	unsigned long jiffies_t;
+
+	/* timer count may underflow right here */
+	count = ctrl_inw(MTU2_TCNT_1);	/* read the latched count */
+
+	jiffies_t = jiffies;
+
+	/*
+	 * avoiding timer inconsistencies (they are rare, but they happen)...
+	 * there is one kind of problem that must be avoided here:
+	 *  1. the timer counter underflows
+	 */
+
+	if (jiffies_t == jiffies_p) {
+		if (count > count_p) {
+			if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) {
+				count -= LATCH;
+			} else {
+				printk("%s (): hardware timer problem?\n",
+				       __FUNCTION__);
+			}
+		}
+	} else
+		jiffies_p = jiffies_t;
+
+	count_p = count;
+
+	count = ((LATCH-1) - count) * TICK_SIZE;
+	count = (count + LATCH/2) / LATCH;
+
+	return count;
+}
+
+static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
+{
+	unsigned long timer_status;
+
+	/* Clear TGFA bit */
+	timer_status = ctrl_inb(MTU2_TSR_1);
+	timer_status &= ~MTU2_TSR_TGFA;
+	ctrl_outb(timer_status, MTU2_TSR_1);
+
+	/* Do timer tick */
+	write_seqlock(&xtime_lock);
+	handle_timer_tick();
+	write_sequnlock(&xtime_lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction mtu2_irq = {
+	.name		= "timer",
+	.handler	= mtu2_timer_interrupt,
+	.flags		= IRQF_DISABLED | IRQF_TIMER,
+	.mask		= CPU_MASK_NONE,
+};
+
+static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 };
+
+static void mtu2_clk_init(struct clk *clk)
+{
+	u8 idx = MTU2_TCR_INIT & 0x7;
+
+	clk->rate = clk->parent->rate / divisors[idx];
+	/* Start TCNT counting */
+	ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+
+}
+
+static void mtu2_clk_recalc(struct clk *clk)
+{
+	u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7;
+	clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops mtu2_clk_ops = {
+	.init		= mtu2_clk_init,
+	.recalc		= mtu2_clk_recalc,
+};
+
+static struct clk mtu2_clk1 = {
+	.name		= "mtu2_clk1",
+	.ops		= &mtu2_clk_ops,
+};
+
+static int mtu2_timer_start(void)
+{
+	ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+	return 0;
+}
+
+static int mtu2_timer_stop(void)
+{
+	ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR);
+	return 0;
+}
+
+static int mtu2_timer_init(void)
+{
+	u8 tmp;
+	unsigned long interval;
+
+	setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq);
+
+	mtu2_clk1.parent = clk_get(NULL, "module_clk");
+
+	ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3);
+
+	/* Normal operation */
+	ctrl_outb(0, MTU2_TMDR_1);
+	ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1);
+	ctrl_outb(0x01, MTU2_TIOR_1);
+
+	/* Enable underflow interrupt */
+	ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1);
+
+	interval = CONFIG_SH_PCLK_FREQ / 16 / HZ;
+	printk(KERN_INFO "Interval = %ld\n", interval);
+
+	ctrl_outw(interval, MTU2_TGRA_1);
+	ctrl_outw(0, MTU2_TCNT_1);
+
+	clk_register(&mtu2_clk1);
+	clk_enable(&mtu2_clk1);
+
+	return 0;
+}
+
+struct sys_timer_ops mtu2_timer_ops = {
+	.init		= mtu2_timer_init,
+	.start		= mtu2_timer_start,
+	.stop		= mtu2_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+	.get_offset	= mtu2_timer_get_offset,
+#endif
+};
+
+struct sys_timer mtu2_timer = {
+	.name	= "mtu2",
+	.ops	= &mtu2_timer_ops,
+};
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 2492701..e060e71 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -17,7 +17,6 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
-#include <linux/spinlock.h>
 #include <linux/seqlock.h>
 #include <asm/timer.h>
 #include <asm/rtc.h>
@@ -31,13 +30,9 @@
 
 #define TMU0_TCR_CALIB	0x0000
 
-static DEFINE_SPINLOCK(tmu0_lock);
-
 static unsigned long tmu_timer_get_offset(void)
 {
 	int count;
-	unsigned long flags;
-
 	static int count_p = 0x7fffffff;    /* for the first call after boot */
 	static unsigned long jiffies_p = 0;
 
@@ -46,7 +41,6 @@
 	 */
 	unsigned long jiffies_t;
 
-	spin_lock_irqsave(&tmu0_lock, flags);
 	/* timer count may underflow right here */
 	count = ctrl_inl(TMU0_TCNT);	/* read the latched count */
 
@@ -72,7 +66,6 @@
 		jiffies_p = jiffies_t;
 
 	count_p = count;
-	spin_unlock_irqrestore(&tmu0_lock, flags);
 
 	count = ((LATCH-1) - count) * TICK_SIZE;
 	count = (count + LATCH/2) / LATCH;
@@ -106,7 +99,7 @@
 static struct irqaction tmu_irq = {
 	.name		= "timer",
 	.handler	= tmu_timer_interrupt,
-	.flags		= IRQF_DISABLED,
+	.flags		= IRQF_DISABLED | IRQF_TIMER,
 	.mask		= CPU_MASK_NONE,
 };
 
@@ -149,9 +142,9 @@
 {
 	unsigned long interval;
 
-	setup_irq(TIMER_IRQ, &tmu_irq);
+	setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq);
 
-	tmu0_clk.parent = clk_get("module_clk");
+	tmu0_clk.parent = clk_get(NULL, "module_clk");
 
 	/* Start TMU0 */
 	tmu_timer_stop();
diff --git a/arch/sh/kernel/timers/timer.c b/arch/sh/kernel/timers/timer.c
index dc1f631..a6bcc91 100644
--- a/arch/sh/kernel/timers/timer.c
+++ b/arch/sh/kernel/timers/timer.c
@@ -17,6 +17,12 @@
 #ifdef CONFIG_SH_TMU
 	&tmu_timer,
 #endif
+#ifdef CONFIG_SH_MTU2
+	&mtu2_timer,
+#endif
+#ifdef CONFIG_SH_CMT
+	&cmt_timer,
+#endif
 	NULL,
 };
 
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 53dfa55..3762d9d 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -18,13 +18,14 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/io.h>
+#include <linux/debug_locks.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
 #ifdef CONFIG_SH_KGDB
 #include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs)                 	\
-{       					\
+#define CHK_REMOTE_DEBUG(regs)			\
+{						\
 	if (kgdb_debug_hook && !user_mode(regs))\
 		(*kgdb_debug_hook)(regs);       \
 }
@@ -33,8 +34,13 @@
 #endif
 
 #ifdef CONFIG_CPU_SH2
-#define TRAP_RESERVED_INST	4
-#define TRAP_ILLEGAL_SLOT_INST	6
+# define TRAP_RESERVED_INST	4
+# define TRAP_ILLEGAL_SLOT_INST	6
+# define TRAP_ADDRESS_ERROR	9
+# ifdef CONFIG_CPU_SH2A
+#  define TRAP_DIVZERO_ERROR	17
+#  define TRAP_DIVOVF_ERROR	18
+# endif
 #else
 #define TRAP_RESERVED_INST	12
 #define TRAP_ILLEGAL_SLOT_INST	13
@@ -88,7 +94,7 @@
 
 	if (!user_mode(regs) || in_interrupt())
 		dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
-		 	 (unsigned long)task_stack_page(current));
+			 (unsigned long)task_stack_page(current));
 
 	bust_spinlocks(0);
 	spin_unlock_irq(&die_lock);
@@ -102,8 +108,6 @@
 		die(str, regs, err);
 }
 
-static int handle_unaligned_notify_count = 10;
-
 /*
  * try and fix up kernelspace address errors
  * - userspace errors just cause EFAULT to be returned, resulting in SEGV
@@ -198,7 +202,7 @@
 		if (copy_to_user(dst,src,4))
 			goto fetch_fault;
 		ret = 0;
- 		break;
+		break;
 
 	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
 		if (instruction & 4)
@@ -222,7 +226,7 @@
 		if (copy_from_user(dst,src,4))
 			goto fetch_fault;
 		ret = 0;
- 		break;
+		break;
 
 	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
 		src = (unsigned char*) *rm;
@@ -230,7 +234,7 @@
 			*rm += count;
 		dst = (unsigned char*) rn;
 		*(unsigned long*)dst = 0;
-		
+
 #ifdef __LITTLE_ENDIAN__
 		if (copy_from_user(dst, src, count))
 			goto fetch_fault;
@@ -241,7 +245,7 @@
 		}
 #else
 		dst += 4-count;
-		
+
 		if (copy_from_user(dst, src, count))
 			goto fetch_fault;
 
@@ -320,7 +324,8 @@
 			return -EFAULT;
 
 		/* kernel */
-		die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+		die("delay-slot-insn faulting in handle_unaligned_delayslot",
+		    regs, 0);
 	}
 
 	return handle_unaligned_ins(instruction,regs);
@@ -342,6 +347,13 @@
 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
 
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
 static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
 {
 	u_int rm;
@@ -354,7 +366,8 @@
 	if (user_mode(regs) && handle_unaligned_notify_count>0) {
 		handle_unaligned_notify_count--;
 
-		printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+		printk(KERN_NOTICE "Fixing up unaligned userspace access "
+		       "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
 		       current->comm,current->pid,(u16*)regs->pc,instruction);
 	}
 
@@ -478,32 +491,58 @@
 		regs->pc += 2;
 	return ret;
 }
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x)	\
+	__asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x)	\
+	__asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
 
 /*
- * Handle various address error exceptions
+ * Handle various address error exceptions:
+ *  - instruction address error:
+ *       misaligned PC
+ *       PC >= 0x80000000 in user mode
+ *  - data address error (read and write)
+ *       misaligned data access
+ *       access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read acceses.
  */
-asmlinkage void do_address_error(struct pt_regs *regs, 
+asmlinkage void do_address_error(struct pt_regs *regs,
 				 unsigned long writeaccess,
 				 unsigned long address)
 {
-	unsigned long error_code;
+	unsigned long error_code = 0;
 	mm_segment_t oldfs;
+	siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
 	u16 instruction;
 	int tmp;
+#endif
 
-	asm volatile("stc       r2_bank,%0": "=r" (error_code));
+	/* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+	lookup_exception_vector(error_code);
+#endif
 
 	oldfs = get_fs();
 
 	if (user_mode(regs)) {
+		int si_code = BUS_ADRERR;
+
 		local_irq_enable();
-		current->thread.error_code = error_code;
-		current->thread.trap_no = (writeaccess) ? 8 : 7;
 
 		/* bad PC is not something we can fix */
-		if (regs->pc & 1)
+		if (regs->pc & 1) {
+			si_code = BUS_ADRALN;
 			goto uspace_segv;
+		}
 
+#ifndef CONFIG_CPU_SH2A
 		set_fs(USER_DS);
 		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
 			/* Argh. Fault on the instruction itself.
@@ -518,14 +557,23 @@
 
 		if (tmp==0)
 			return; /* sorted */
+#endif
 
-	uspace_segv:
-		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
-		force_sig(SIGSEGV, current);
+uspace_segv:
+		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+		       regs->pr);
+
+		info.si_signo = SIGBUS;
+		info.si_errno = 0;
+		info.si_code = si_code;
+		info.si_addr = (void *) address;
+		force_sig_info(SIGBUS, &info, current);
 	} else {
 		if (regs->pc & 1)
 			die("unaligned program counter", regs, error_code);
 
+#ifndef CONFIG_CPU_SH2A
 		set_fs(KERNEL_DS);
 		if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
 			/* Argh. Fault on the instruction itself.
@@ -537,6 +585,12 @@
 
 		handle_unaligned_access(instruction, regs);
 		set_fs(oldfs);
+#else
+		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+		       "access\n", current->comm);
+
+		force_sig(SIGSEGV, current);
+#endif
 	}
 }
 
@@ -548,7 +602,7 @@
 {
 	unsigned short inst;
 
-	/* 
+	/*
 	 * Safe guard if DSP mode is already enabled or we're lacking
 	 * the DSP altogether.
 	 */
@@ -569,27 +623,49 @@
 #define is_dsp_inst(regs)	(0)
 #endif /* CONFIG_SH_DSP */
 
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+				unsigned long r6, unsigned long r7,
+				struct pt_regs __regs)
+{
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+	siginfo_t info;
+
+	switch (r4) {
+	case TRAP_DIVZERO_ERROR:
+		info.si_code = FPE_INTDIV;
+		break;
+	case TRAP_DIVOVF_ERROR:
+		info.si_code = FPE_INTOVF;
+		break;
+	}
+
+	force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
 /* arch/sh/kernel/cpu/sh4/fpu.c */
 extern int do_fpu_inst(unsigned short, struct pt_regs *);
 extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
-		unsigned long r6, unsigned long r7, struct pt_regs regs);
+		unsigned long r6, unsigned long r7, struct pt_regs __regs);
 
 asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
 				unsigned long r6, unsigned long r7,
-				struct pt_regs regs)
+				struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	unsigned long error_code;
 	struct task_struct *tsk = current;
 
 #ifdef CONFIG_SH_FPU_EMU
-	unsigned short inst;
+	unsigned short inst = 0;
 	int err;
 
-	get_user(inst, (unsigned short*)regs.pc);
+	get_user(inst, (unsigned short*)regs->pc);
 
-	err = do_fpu_inst(inst, &regs);
+	err = do_fpu_inst(inst, regs);
 	if (!err) {
-		regs.pc += 2;
+		regs->pc += 2;
 		return;
 	}
 	/* not a FPU inst. */
@@ -597,20 +673,19 @@
 
 #ifdef CONFIG_SH_DSP
 	/* Check if it's a DSP instruction */
- 	if (is_dsp_inst(&regs)) {
+	if (is_dsp_inst(regs)) {
 		/* Enable DSP mode, and restart instruction. */
-		regs.sr |= SR_DSP;
+		regs->sr |= SR_DSP;
 		return;
 	}
 #endif
 
-	asm volatile("stc	r2_bank, %0": "=r" (error_code));
+	lookup_exception_vector(error_code);
+
 	local_irq_enable();
-	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = TRAP_RESERVED_INST;
-	CHK_REMOTE_DEBUG(&regs);
+	CHK_REMOTE_DEBUG(regs);
 	force_sig(SIGILL, tsk);
-	die_if_no_fixup("reserved instruction", &regs, error_code);
+	die_if_no_fixup("reserved instruction", regs, error_code);
 }
 
 #ifdef CONFIG_SH_FPU_EMU
@@ -658,39 +733,41 @@
 
 asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
 				unsigned long r6, unsigned long r7,
-				struct pt_regs regs)
+				struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	unsigned long error_code;
 	struct task_struct *tsk = current;
 #ifdef CONFIG_SH_FPU_EMU
-	unsigned short inst;
+	unsigned short inst = 0;
 
-	get_user(inst, (unsigned short *)regs.pc + 1);
-	if (!do_fpu_inst(inst, &regs)) {
-		get_user(inst, (unsigned short *)regs.pc);
-		if (!emulate_branch(inst, &regs))
+	get_user(inst, (unsigned short *)regs->pc + 1);
+	if (!do_fpu_inst(inst, regs)) {
+		get_user(inst, (unsigned short *)regs->pc);
+		if (!emulate_branch(inst, regs))
 			return;
 		/* fault in branch.*/
 	}
 	/* not a FPU inst. */
 #endif
 
-	asm volatile("stc	r2_bank, %0": "=r" (error_code));
+	lookup_exception_vector(error_code);
+
 	local_irq_enable();
-	tsk->thread.error_code = error_code;
-	tsk->thread.trap_no = TRAP_RESERVED_INST;
-	CHK_REMOTE_DEBUG(&regs);
+	CHK_REMOTE_DEBUG(regs);
 	force_sig(SIGILL, tsk);
-	die_if_no_fixup("illegal slot instruction", &regs, error_code);
+	die_if_no_fixup("illegal slot instruction", regs, error_code);
 }
 
 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
 				   unsigned long r6, unsigned long r7,
-				   struct pt_regs regs)
+				   struct pt_regs __regs)
 {
+	struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 	long ex;
-	asm volatile("stc	r2_bank, %0" : "=r" (ex));
-	die_if_kernel("exception", &regs, ex);
+
+	lookup_exception_vector(ex);
+	die_if_kernel("exception", regs, ex);
 }
 
 #if defined(CONFIG_SH_STANDARD_BIOS)
@@ -735,12 +812,16 @@
 {
 	extern void *exception_handling_table[];
 	void *old_handler;
-	
+
 	old_handler = exception_handling_table[vec];
 	exception_handling_table[vec] = handler;
 	return old_handler;
 }
 
+extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+					     unsigned long r6, unsigned long r7,
+					     struct pt_regs __regs);
+
 void __init trap_init(void)
 {
 	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
@@ -759,7 +840,15 @@
 	set_exception_table_evt(0x800, do_fpu_state_restore);
 	set_exception_table_evt(0x820, do_fpu_state_restore);
 #endif
-		
+
+#ifdef CONFIG_CPU_SH2
+	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
 	/* Setup VBR for boot cpu */
 	per_cpu_trap_init();
 }
@@ -784,6 +873,11 @@
 	}
 
 	printk("\n");
+
+	if (!tsk)
+		tsk = current;
+
+	debug_show_held_locks(tsk);
 }
 
 void show_stack(struct task_struct *tsk, unsigned long *sp)
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 075d6cc..deb4694 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -97,7 +97,7 @@
 		goto up_fail;
 	}
 
-	vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma) {
 		ret = -ENOMEM;
 		goto up_fail;
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 9dd6064..4e0362f 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -4,8 +4,12 @@
 # Processor families
 #
 config CPU_SH2
+	select SH_WRITETHROUGH if !CPU_SH2A
 	bool
-	select SH_WRITETHROUGH
+
+config CPU_SH2A
+	bool
+	select CPU_SH2
 
 config CPU_SH3
 	bool
@@ -16,6 +20,7 @@
 	bool
 	select CPU_HAS_INTEVT
 	select CPU_HAS_SR_RB
+	select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40
 
 config CPU_SH4A
 	bool
@@ -40,6 +45,16 @@
 	bool "Support SH7604 processor"
 	select CPU_SH2
 
+config CPU_SUBTYPE_SH7619
+	bool "Support SH7619 processor"
+	select CPU_SH2
+
+comment "SH-2A Processor Support"
+
+config CPU_SUBTYPE_SH7206
+	bool "Support SH7206 processor"
+	select CPU_SH2A
+
 comment "SH-3 Processor Support"
 
 config CPU_SUBTYPE_SH7300
@@ -89,6 +104,7 @@
 config CPU_SUBTYPE_SH7750
 	bool "Support SH7750 processor"
 	select CPU_SH4
+	select CPU_HAS_IPR_IRQ
 	help
 	  Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
 
@@ -104,15 +120,18 @@
 	bool "Support SH7750R processor"
 	select CPU_SH4
 	select CPU_SUBTYPE_SH7750
+	select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7750S
 	bool "Support SH7750S processor"
 	select CPU_SH4
 	select CPU_SUBTYPE_SH7750
+	select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7751
 	bool "Support SH7751 processor"
 	select CPU_SH4
+	select CPU_HAS_IPR_IRQ
 	help
 	  Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
 	  or if you have a HD6417751R CPU.
@@ -121,6 +140,7 @@
 	bool "Support SH7751R processor"
 	select CPU_SH4
 	select CPU_SUBTYPE_SH7751
+	select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7760
 	bool "Support SH7760 processor"
@@ -157,6 +177,11 @@
 	select CPU_SH4A
 	select CPU_HAS_INTC2_IRQ
 
+config CPU_SUBTYPE_SH7785
+	bool "Support SH7785 processor"
+	select CPU_SH4A
+	select CPU_HAS_INTC2_IRQ
+
 comment "SH4AL-DSP Processor Support"
 
 config CPU_SUBTYPE_SH73180
@@ -216,13 +241,22 @@
 
 config 32BIT
 	bool "Support 32-bit physical addressing through PMB"
-	depends on CPU_SH4A && MMU
+	depends on CPU_SH4A && MMU && (!X2TLB || BROKEN)
 	default y
 	help
 	  If you say Y here, physical addressing will be extended to
 	  32-bits through the SH-4A PMB. If this is not set, legacy
 	  29-bit physical addressing will be used.
 
+config X2TLB
+	bool "Enable extended TLB mode"
+	depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL
+	help
+	  Selecting this option will enable the extended mode of the SH-X2
+	  TLB. For legacy SH-X behaviour and interoperability, say N. For
+	  all of the fun new features and a willingless to submit bug reports,
+	  say Y.
+
 config VSYSCALL
 	bool "Support vsyscall page"
 	depends on MMU
@@ -237,16 +271,52 @@
 	  (the default value) say Y.
 
 choice
+	prompt "Kernel page size"
+	default PAGE_SIZE_4KB
+
+config PAGE_SIZE_4KB
+	bool "4kB"
+	help
+	  This is the default page size used by all SuperH CPUs.
+
+config PAGE_SIZE_8KB
+	bool "8kB"
+	depends on EXPERIMENTAL && X2TLB
+	help
+	  This enables 8kB pages as supported by SH-X2 and later MMUs.
+
+config PAGE_SIZE_64KB
+	bool "64kB"
+	depends on EXPERIMENTAL && CPU_SH4
+	help
+	  This enables support for 64kB pages, possible on all SH-4
+	  CPUs and later. Highly experimental, not recommended.
+
+endchoice
+
+choice
 	prompt "HugeTLB page size"
 	depends on HUGETLB_PAGE && CPU_SH4 && MMU
 	default HUGETLB_PAGE_SIZE_64K
 
 config HUGETLB_PAGE_SIZE_64K
-	bool "64K"
+	bool "64kB"
+
+config HUGETLB_PAGE_SIZE_256K
+	bool "256kB"
+	depends on X2TLB
 
 config HUGETLB_PAGE_SIZE_1MB
 	bool "1MB"
 
+config HUGETLB_PAGE_SIZE_4MB
+	bool "4MB"
+	depends on X2TLB
+
+config HUGETLB_PAGE_SIZE_64MB
+	bool "64MB"
+	depends on X2TLB
+
 endchoice
 
 source "mm/Kconfig"
@@ -274,7 +344,6 @@
 
 config SH_WRITETHROUGH
 	bool "Use write-through caching"
-	default y if CPU_SH2
 	help
 	  Selecting this option will configure the caches in write-through
 	  mode, as opposed to the default write-back configuration.
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index 2689cb2..6614033 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -5,6 +5,7 @@
  *
  * Released under the terms of the GNU GPL v2.0.
  */
+
 #include <linux/init.h>
 #include <linux/mm.h>
 
@@ -14,37 +15,43 @@
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
-/*
- * Calculate the OC address and set the way bit on the SH-2.
- *
- * We must have already jump_to_P2()'ed prior to calling this
- * function, since we rely on CCR manipulation to do the
- * Right Thing(tm).
- */
-unsigned long __get_oc_addr(unsigned long set, unsigned long way)
+void __flush_wback_region(void *start, int size)
 {
-	unsigned long ccr;
+	unsigned long v;
+	unsigned long begin, end;
 
-	/*
-	 * On SH-2 the way bit isn't tracked in the address field
-	 * if we're doing address array access .. instead, we need
-	 * to manually switch out the way in the CCR.
-	 */
-	ccr = ctrl_inl(CCR);
-	ccr &= ~0x00c0;
-	ccr |= way << cpu_data->dcache.way_shift;
+	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+		& ~(L1_CACHE_BYTES-1);
+	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+		/* FIXME cache purge */
+		ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+	}
+}
 
-	/*
-	 * Despite the number of sets being halved, we end up losing
-	 * the first 2 ways to OCRAM instead of the last 2 (if we're
-	 * 4-way). As a result, forcibly setting the W1 bit handily
-	 * bumps us up 2 ways.
-	 */
-	if (ccr & CCR_CACHE_ORA)
-		ccr |= 1 << (cpu_data->dcache.way_shift + 1);
+void __flush_purge_region(void *start, int size)
+{
+	unsigned long v;
+	unsigned long begin, end;
 
-	ctrl_outl(ccr, CCR);
+	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+		& ~(L1_CACHE_BYTES-1);
+	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+		ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+	}
+}
 
-	return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift);
+void __flush_invalidate_region(void *start, int size)
+{
+	unsigned long v;
+	unsigned long begin, end;
+
+	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+		& ~(L1_CACHE_BYTES-1);
+	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+		ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+	}
 }
 
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index e48cc22..ae531af 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -11,12 +11,8 @@
  */
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <asm/addrspace.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -83,9 +79,9 @@
  */
 
 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
-#define MAX_P3_SEMAPHORES 16
+#define MAX_P3_MUTEXES 16
 
-struct semaphore p3map_sem[MAX_P3_SEMAPHORES];
+struct mutex p3map_mutex[MAX_P3_MUTEXES];
 
 void __init p3_cache_init(void)
 {
@@ -115,7 +111,7 @@
 		panic("%s failed.", __FUNCTION__);
 
 	for (i = 0; i < cpu_data->dcache.n_aliases; i++)
-		sema_init(&p3map_sem[i], 1);
+		mutex_init(&p3map_mutex[i]);
 }
 
 /*
@@ -229,7 +225,7 @@
 	 */
 	if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
 	    (start < CACHE_OC_ADDRESS_ARRAY))
-	    	exec_offset = 0x20000000;
+		exec_offset = 0x20000000;
 
 	local_irq_save(flags);
 	__flush_cache_4096(start | SH_CACHE_ASSOC,
@@ -250,7 +246,7 @@
 
 		/* Loop all the D-cache */
 		n = cpu_data->dcache.n_aliases;
-		for (i = 0; i < n; i++, addr += PAGE_SIZE)
+		for (i = 0; i < n; i++, addr += 4096)
 			flush_cache_4096(addr, phys);
 	}
 
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S
index 7b96425..8a70613 100644
--- a/arch/sh/mm/clear_page.S
+++ b/arch/sh/mm/clear_page.S
@@ -1,12 +1,12 @@
-/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $
- *
+/*
  * __clear_user_page, __clear_user, clear_page implementation of SuperH
  *
  * Copyright (C) 2001  Kaz Kojima
  * Copyright (C) 2001, 2002  Niibe Yutaka
- *
+ * Copyright (C) 2006  Paul Mundt
  */
 #include <linux/linkage.h>
+#include <asm/page.h>
 
 /*
  * clear_page_slow
@@ -18,11 +18,11 @@
 /*
  * r0 --- scratch
  * r4 --- to
- * r5 --- to + 4096
+ * r5 --- to + PAGE_SIZE
  */
 ENTRY(clear_page_slow)
 	mov	r4,r5
-	mov.w	.Llimit,r0
+	mov.l	.Llimit,r0
 	add	r0,r5
 	mov	#0,r0
 	!
@@ -50,7 +50,7 @@
 	!
 	rts
 	 nop
-.Llimit:	.word	(4096-28)
+.Llimit:	.long	(PAGE_SIZE-28)
 
 ENTRY(__clear_user)
 	!
@@ -164,10 +164,10 @@
  * r0 --- scratch 
  * r4 --- to
  * r5 --- orig_to
- * r6 --- to + 4096
+ * r6 --- to + PAGE_SIZE
  */
 ENTRY(__clear_user_page)
-	mov.w	.L4096,r0
+	mov.l	.Lpsz,r0
 	mov	r4,r6
 	add	r0,r6
 	mov	#0,r0
@@ -191,7 +191,7 @@
 	!
 	rts
 	 nop
-.L4096:	.word	4096
+.Lpsz:	.long	PAGE_SIZE
 
 #endif
 
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 1addffe..397c94c 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -1,12 +1,12 @@
-/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $
- *
+/*
  * copy_page, __copy_user_page, __copy_user implementation of SuperH
  *
  * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
  * Copyright (C) 2002  Toshinobu Sugioka
- *
+ * Copyright (C) 2006  Paul Mundt
  */
 #include <linux/linkage.h>
+#include <asm/page.h>
 
 /*
  * copy_page_slow
@@ -18,7 +18,7 @@
 
 /*
  * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
  * r9 --- not used
  * r10 --- to
  * r11 --- from
@@ -30,7 +30,7 @@
 	mov	r4,r10
 	mov	r5,r11
 	mov	r5,r8
-	mov.w	.L4096,r0
+	mov.l	.Lpsz,r0
 	add	r0,r8
 	!
 1:	mov.l	@r11+,r0
@@ -80,7 +80,7 @@
 
 /*
  * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
  * r9 --- orig_to
  * r10 --- to
  * r11 --- from
@@ -94,7 +94,7 @@
 	mov	r5,r11
 	mov	r6,r9
 	mov	r5,r8
-	mov.w	.L4096,r0
+	mov.l	.Lpsz,r0
 	add	r0,r8
 	!
 1:	ocbi	@r9
@@ -129,7 +129,7 @@
 	rts
 	 nop
 #endif
-.L4096:	.word	4096
+.Lpsz:	.long	PAGE_SIZE
 /*
  * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
  * Return the number of bytes NOT copied
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 68663b8..716ebf5 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -26,13 +26,19 @@
  * and the problem, and then passes it off to one of the appropriate
  * routines.
  */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
-			      unsigned long address)
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+					unsigned long writeaccess,
+					unsigned long address)
 {
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	struct vm_area_struct * vma;
 	unsigned long page;
+	int si_code;
+	siginfo_t info;
+
+	trace_hardirqs_on();
+	local_irq_enable();
 
 #ifdef CONFIG_SH_KGDB
 	if (kgdb_nofault && kgdb_bus_err_hook)
@@ -41,6 +47,46 @@
 
 	tsk = current;
 	mm = tsk->mm;
+	si_code = SEGV_MAPERR;
+
+	if (unlikely(address >= TASK_SIZE)) {
+		/*
+		 * Synchronize this task's top level page-table
+		 * with the 'reference' page table.
+		 *
+		 * Do _not_ use "tsk" here. We might be inside
+		 * an interrupt in the middle of a task switch..
+		 */
+		int offset = pgd_index(address);
+		pgd_t *pgd, *pgd_k;
+		pud_t *pud, *pud_k;
+		pmd_t *pmd, *pmd_k;
+
+		pgd = get_TTB() + offset;
+		pgd_k = swapper_pg_dir + offset;
+
+		/* This will never happen with the folded page table. */
+		if (!pgd_present(*pgd)) {
+			if (!pgd_present(*pgd_k))
+				goto bad_area_nosemaphore;
+			set_pgd(pgd, *pgd_k);
+			return;
+		}
+
+		pud = pud_offset(pgd, address);
+		pud_k = pud_offset(pgd_k, address);
+		if (pud_present(*pud) || !pud_present(*pud_k))
+			goto bad_area_nosemaphore;
+		set_pud(pud, *pud_k);
+
+		pmd = pmd_offset(pud, address);
+		pmd_k = pmd_offset(pud_k, address);
+		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+			goto bad_area_nosemaphore;
+		set_pmd(pmd, *pmd_k);
+
+		return;
+	}
 
 	/*
 	 * If we're in an interrupt or have no user
@@ -65,6 +111,7 @@
  * we can handle it..
  */
 good_area:
+	si_code = SEGV_ACCERR;
 	if (writeaccess) {
 		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
@@ -104,10 +151,13 @@
 bad_area:
 	up_read(&mm->mmap_sem);
 
+bad_area_nosemaphore:
 	if (user_mode(regs)) {
-		tsk->thread.address = address;
-		tsk->thread.error_code = writeaccess;
-		force_sig(SIGSEGV, tsk);
+		info.si_signo = SIGSEGV;
+		info.si_errno = 0;
+		info.si_code = si_code;
+		info.si_addr = (void *) address;
+		force_sig_info(SIGSEGV, &info, tsk);
 		return;
 	}
 
@@ -127,11 +177,9 @@
 		printk(KERN_ALERT "Unable to handle kernel paging request");
 	printk(" at virtual address %08lx\n", address);
 	printk(KERN_ALERT "pc = %08lx\n", regs->pc);
-	asm volatile("mov.l	%1, %0"
-		     : "=r" (page)
-		     : "m" (__m(MMU_TTB)));
+	page = (unsigned long)get_TTB();
 	if (page) {
-		page = ((unsigned long *) page)[address >> 22];
+		page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
 		printk(KERN_ALERT "*pde = %08lx\n", page);
 		if (page & _PAGE_PRESENT) {
 			page &= PAGE_MASK;
@@ -166,98 +214,13 @@
 	 * Send a sigbus, regardless of whether we were in kernel
 	 * or user mode.
 	 */
-	tsk->thread.address = address;
-	tsk->thread.error_code = writeaccess;
-	tsk->thread.trap_no = 14;
-	force_sig(SIGBUS, tsk);
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_ADRERR;
+	info.si_addr = (void *)address;
+	force_sig_info(SIGBUS, &info, tsk);
 
 	/* Kernel mode? Handle exceptions or die */
 	if (!user_mode(regs))
 		goto no_context;
 }
-
-#ifdef CONFIG_SH_STORE_QUEUES
-/*
- * This is a special case for the SH-4 store queues, as pages for this
- * space still need to be faulted in before it's possible to flush the
- * store queue cache for writeout to the remapped region.
- */
-#define P3_ADDR_MAX		(P4SEG_STORE_QUE + 0x04000000)
-#else
-#define P3_ADDR_MAX		P4SEG
-#endif
-
-/*
- * Called with interrupts disabled.
- */
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
-					 unsigned long writeaccess,
-					 unsigned long address)
-{
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *pte;
-	pte_t entry;
-	struct mm_struct *mm = current->mm;
-	spinlock_t *ptl;
-	int ret = 1;
-
-#ifdef CONFIG_SH_KGDB
-	if (kgdb_nofault && kgdb_bus_err_hook)
-		kgdb_bus_err_hook();
-#endif
-
-	/*
-	 * We don't take page faults for P1, P2, and parts of P4, these
-	 * are always mapped, whether it be due to legacy behaviour in
-	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
-	 */
-	if (address >= P3SEG && address < P3_ADDR_MAX) {
-		pgd = pgd_offset_k(address);
-		mm = NULL;
-	} else {
-		if (unlikely(address >= TASK_SIZE || !mm))
-			return 1;
-
-		pgd = pgd_offset(mm, address);
-	}
-
-	pud = pud_offset(pgd, address);
-	if (pud_none_or_clear_bad(pud))
-		return 1;
-	pmd = pmd_offset(pud, address);
-	if (pmd_none_or_clear_bad(pmd))
-		return 1;
-
-	if (mm)
-		pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-	else
-		pte = pte_offset_kernel(pmd, address);
-
-	entry = *pte;
-	if (unlikely(pte_none(entry) || pte_not_present(entry)))
-		goto unlock;
-	if (unlikely(writeaccess && !pte_write(entry)))
-		goto unlock;
-
-	if (writeaccess)
-		entry = pte_mkdirty(entry);
-	entry = pte_mkyoung(entry);
-
-#ifdef CONFIG_CPU_SH4
-	/*
-	 * ITLB is not affected by "ldtlb" instruction.
-	 * So, we need to flush the entry by ourselves.
-	 */
-	__flush_tlb_page(get_asid(), address & PAGE_MASK);
-#endif
-
-	set_pte(pte, entry);
-	update_mmu_cache(NULL, address, entry);
-	ret = 0;
-unlock:
-	if (mm)
-		pte_unmap_unlock(pte, ptl);
-	return ret;
-}
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 329059d..cf2c2ee 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -63,6 +63,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 struct page *follow_huge_addr(struct mm_struct *mm,
 			      unsigned long address, int write)
 {
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 7154d1c..59f4cc1 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -84,30 +84,22 @@
 	pmd_t *pmd;
 	pte_t *pte;
 
-	pgd = swapper_pg_dir + pgd_index(addr);
+	pgd = pgd_offset_k(addr);
 	if (pgd_none(*pgd)) {
 		pgd_ERROR(*pgd);
 		return;
 	}
 
-	pud = pud_offset(pgd, addr);
-	if (pud_none(*pud)) {
-		pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
-		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-		if (pmd != pmd_offset(pud, 0)) {
-			pud_ERROR(*pud);
-			return;
-		}
+	pud = pud_alloc(NULL, pgd, addr);
+	if (unlikely(!pud)) {
+		pud_ERROR(*pud);
+		return;
 	}
 
-	pmd = pmd_offset(pud, addr);
-	if (pmd_none(*pmd)) {
-		pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
-		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-		if (pte != pte_offset_kernel(pmd, 0)) {
-			pmd_ERROR(*pmd);
-			return;
-		}
+	pmd = pmd_alloc(NULL, pud, addr);
+	if (unlikely(!pmd)) {
+		pmd_ERROR(*pmd);
+		return;
 	}
 
 	pte = pte_offset_kernel(pmd, addr);
@@ -155,9 +147,6 @@
 
 /*
  * paging_init() sets up the page tables
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
  */
 void __init paging_init(void)
 {
@@ -180,14 +169,11 @@
 	 */
 	{
 		unsigned long max_dma, low, start_pfn;
-		pgd_t *pg_dir;
-		int i;
 
-		/* We don't need kernel mapping as hardware support that. */
-		pg_dir = swapper_pg_dir;
-
-		for (i = 0; i < PTRS_PER_PGD; i++)
-			pgd_val(pg_dir[i]) = 0;
+		/* We don't need to map the kernel through the TLB, as
+		 * it is permanatly mapped using P1. So clear the
+		 * entire pgd. */
+		memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 
 		/* Turn on the MMU */
 		enable_mmu();
@@ -206,6 +192,10 @@
 		}
 	}
 
+	/* Set an initial value for the MMU.TTB so we don't have to
+	 * check for a null value. */
+	set_TTB(swapper_pg_dir);
+
 #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
 	/*
 	 * If we don't have CONFIG_MMU set and the processor in question
@@ -227,7 +217,6 @@
 
 void __init mem_init(void)
 {
-	extern unsigned long empty_zero_page[1024];
 	int codesize, reservedpages, datasize, initsize;
 	int tmp;
 	extern unsigned long memory_start;
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index a9fe80c..11d54c1 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -28,9 +28,7 @@
 {
 	unsigned long end;
 	unsigned long pfn;
-	pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
-				   _PAGE_DIRTY | _PAGE_ACCESSED |
-				   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
+	pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
 
 	address &= ~PMD_MASK;
 	end = address + size;
diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c
index 1406d2e..bb23679 100644
--- a/arch/sh/mm/pg-dma.c
+++ b/arch/sh/mm/pg-dma.c
@@ -39,8 +39,6 @@
 
 static void clear_page_dma(void *to)
 {
-	extern unsigned long empty_zero_page[1024];
-
 	/*
 	 * We get invoked quite early on, if the DMAC hasn't been initialized
 	 * yet, fall back on the slow manual implementation.
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 07371ed..3f98d2a 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -6,22 +6,12 @@
  *
  * Released under the terms of the GNU GPL v2.0.
  */
-#include <linux/init.h>
-#include <linux/mman.h>
 #include <linux/mm.h>
-#include <linux/threads.h>
-#include <asm/addrspace.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
+#include <linux/mutex.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
-extern struct semaphore p3map_sem[];
+extern struct mutex p3map_mutex[];
 
 #define CACHE_ALIAS (cpu_data->dcache.alias_mask)
 
@@ -37,10 +27,6 @@
 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
 		clear_page(to);
 	else {
-		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
-					   _PAGE_RW | _PAGE_CACHABLE |
-					   _PAGE_DIRTY | _PAGE_ACCESSED |
-					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
 		unsigned long phys_addr = PHYSADDR(to);
 		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
 		pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -50,8 +36,8 @@
 		pte_t entry;
 		unsigned long flags;
 
-		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
-		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 		set_pte(pte, entry);
 		local_irq_save(flags);
 		__flush_tlb_page(get_asid(), p3_addr);
@@ -59,7 +45,7 @@
 		update_mmu_cache(NULL, p3_addr, entry);
 		__clear_user_page((void *)p3_addr, to);
 		pte_clear(&init_mm, p3_addr, pte);
-		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 	}
 }
 
@@ -77,10 +63,6 @@
 	if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
 		copy_page(to, from);
 	else {
-		pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
-					   _PAGE_RW | _PAGE_CACHABLE |
-					   _PAGE_DIRTY | _PAGE_ACCESSED |
-					   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
 		unsigned long phys_addr = PHYSADDR(to);
 		unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
 		pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -90,8 +72,8 @@
 		pte_t entry;
 		unsigned long flags;
 
-		entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
-		down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+		mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 		set_pte(pte, entry);
 		local_irq_save(flags);
 		__flush_tlb_page(get_asid(), p3_addr);
@@ -99,7 +81,7 @@
 		update_mmu_cache(NULL, p3_addr, entry);
 		__copy_user_page((void *)p3_addr, from, to);
 		pte_clear(&init_mm, p3_addr, pte);
-		up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+		mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
 	}
 }
 
@@ -122,4 +104,3 @@
 	}
 	return pte;
 }
-
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 92e7453..b60ad83 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -30,7 +30,7 @@
 
 #define NR_PMB_ENTRIES	16
 
-static kmem_cache_t *pmb_cache;
+static struct kmem_cache *pmb_cache;
 static unsigned long pmb_map;
 
 static struct pmb_entry pmb_init_map[] = {
@@ -283,7 +283,7 @@
 	} while (pmbe);
 }
 
-static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct pmb_entry *pmbe = pmb;
 
@@ -297,7 +297,7 @@
 	spin_unlock_irq(&pmb_list_lock);
 }
 
-static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
 {
 	spin_lock_irq(&pmb_list_lock);
 	pmb_list_del(pmb);
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index ac57638..0571755 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -30,3 +30,5 @@
 TITAN			SH_TITAN
 SHMIN			SH_SHMIN
 7710VOIPGW		SH_7710VOIPGW
+7206SE			SH_7206_SOLUTION_ENGINE
+7619SE			SH_7619_SOLUTION_ENGINE
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index ffb310e..b9e7d54 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -243,9 +243,7 @@
 		if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
 		        reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
 
-			initrd_start =
-			  (long) INITRD_START ? INITRD_START + PAGE_OFFSET +  __MEMORY_START : 0;
-
+			initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START;
 			initrd_end = initrd_start + INITRD_SIZE;
 		} else {
 			printk("initrd extends beyond end of memory "
diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c
index 4b2df72..7aa4b4f 100644
--- a/arch/sh64/kernel/sh_ksyms.c
+++ b/arch/sh64/kernel/sh_ksyms.c
@@ -38,7 +38,7 @@
 EXPORT_SYMBOL(kernel_thread);
 
 /* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
 
 EXPORT_SYMBOL(strstr);
 
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index 9e2ffc4..1666d3e 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -22,7 +22,7 @@
 #include <linux/errno.h>
 #include <linux/wait.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/stddef.h>
diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c
index 0e8a742..4b26763 100644
--- a/arch/sh64/lib/c-checksum.c
+++ b/arch/sh64/lib/c-checksum.c
@@ -118,24 +118,24 @@
 
 /* computes the checksum of a memory block at buff, length len,
    and adds in "sum" (32-bit)  */
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned long long result = do_csum(buff, len);
 
 	/* add in old sum, and carry.. */
-	result += sum;
+	result += (__force u32)sum;
 	/* 32+c bits -> 32 bits */
 	result = (result & 0xffffffff) + (result >> 32);
 
 	pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n",
 		buff, len, sum, result);
 
-	return result;
+	return (__force __wsum)result;
 }
 
 /* Copy while checksumming, otherwise like csum_partial.  */
-unsigned int
-csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
 	sum = csum_partial(src, len, sum);
 	memcpy(dst, src, len);
@@ -145,9 +145,9 @@
 
 /* Copy from userspace and compute checksum.  If we catch an exception
    then zero the rest of the buffer.  */
-unsigned int
-csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, int len,
-			    unsigned int sum, int *err_ptr)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+			    __wsum sum, int *err_ptr)
 {
 	int missing;
 
@@ -166,9 +166,9 @@
 }
 
 /* Copy to userspace and compute checksum.  */
-unsigned int
+__wsum
 csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
-			  unsigned int sum, int *err_ptr)
+			  __wsum sum, int *err_ptr)
 {
 	sum = csum_partial(src, len, sum);
 
@@ -182,28 +182,24 @@
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	pr_debug("ip_fast_csum %p,%d\n", iph, ihl);
 
-	return ~do_csum(iph, ihl * 4);
+	return (__force __sum16)~do_csum(iph, ihl * 4);
 }
 
-unsigned int csum_tcpudp_nofold(unsigned long saddr,
-				unsigned long daddr,
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 				unsigned short len,
-				unsigned short proto, unsigned int sum)
+				unsigned short proto, __wsum sum)
 {
 	unsigned long long result;
 
 	pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead));
 	pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead));
 
-	result = ((unsigned long long) saddr +
-		  (unsigned long long) daddr +
-		  (unsigned long long) sum +
-		  ((unsigned long long) ntohs(len) << 16) +
-		  ((unsigned long long) proto << 8));
+	result = (__force u64) saddr + (__force u64) daddr +
+		 (__force u64) sum + ((len + proto) << 8);
 
 	/* Fold down to 32-bits so we don't loose in the typedef-less
 	   network stack.  */
@@ -215,16 +211,5 @@
 	pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n",
 		__FUNCTION__, saddr, daddr, len, proto, sum, result);
 
-	return result;
-}
-
-// Post SIM:
-unsigned int
-csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
-{
-	//  unsigned dummy;
-	pr_debug("csum_partial_copy_nocheck src %p dst %p len %d\n", src, dst,
-		len);
-
-	return csum_partial_copy(src, dst, len, sum);
+	return (__wsum)result;
 }
diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c
index 1326f45..4310fc8 100644
--- a/arch/sh64/lib/dbg.c
+++ b/arch/sh64/lib/dbg.c
@@ -383,7 +383,7 @@
 /* ======================================================================= */
 
 /*
-** Depending on <base> scan the MMU, Data or Instrction side
+** Depending on <base> scan the MMU, Data or Instruction side
 ** looking for a valid mapping matching Eaddr & asid.
 ** Return -1 if not found or the TLB id entry otherwise.
 ** Note: it works only for 4k pages!
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
index 8e2f6c2..4f72ab3 100644
--- a/arch/sh64/mm/fault.c
+++ b/arch/sh64/mm/fault.c
@@ -154,7 +154,7 @@
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_interrupt() || !mm)
+	if (in_atomic() || !mm)
 		goto no_context;
 
 	/* TLB misses upon some cache flushes get done under cli() */
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
index 187cf01..4b455f6 100644
--- a/arch/sh64/mm/hugetlbpage.c
+++ b/arch/sh64/mm/hugetlbpage.c
@@ -53,6 +53,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t entry)
 {
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2f96610..92a7c8a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -212,8 +212,8 @@
 	tristate "Sun4m LED driver"
 	help
 	  This driver toggles the front-panel LED on sun4m systems
-	  in a user-specifyable manner.  It's state can be probed
-	  by reading /proc/led and it's blinking mode can be changed
+	  in a user-specifiable manner.  Its state can be probed
+	  by reading /proc/led and its blinking mode can be changed
 	  via writes to /proc/led
 
 source "fs/Kconfig.binfmt"
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 5cc5ff7..b73e6b9 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -11,6 +11,7 @@
   . = 0x10000 + SIZEOF_HEADERS;
   .text 0xf0004000 :
   {
+    _text = .;
     *(.text)
     SCHED_TEXT
     LOCK_TEXT
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 4d8ed9c..01fc6c2 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -35,7 +35,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -70,8 +70,7 @@
 	unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
 
 	if (vaddr < FIXADDR_START) { // FIXME
-		dec_preempt_count();
-		preempt_check_resched();
+		pagefault_enable();
 		return;
 	}
 
@@ -97,8 +96,7 @@
 #endif
 #endif
 
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 /* We may be fed a pagetable here by ptep_to_xxx and others. */
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
index a98f3ae..9ad84ff 100644
--- a/arch/sparc64/kernel/binfmt_elf32.c
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -141,7 +141,6 @@
 	value->tv_sec = jiffies / HZ;
 }
 
-#define elf_addr_t	u32
 #undef start_thread
 #define start_thread start_thread32
 #define init_elf_binfmt init_elf32_binfmt
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index e02f01b..dfc41cd 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -646,13 +646,4 @@
 }
 EXPORT_SYMBOL(pci_domain_nr);
 
-int pcibios_prep_mwi(struct pci_dev *dev)
-{
-	/* We set correct PCI_CACHE_LINE_SIZE register values for every
-	 * device probed on this platform.  So there is nothing to check
-	 * and this always succeeds.
-	 */
-	return 0;
-}
-
 #endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index bd9de8c..4a6063f 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -13,6 +13,7 @@
   . = 0x4000;
   .text 0x0000000000404000 :
   {
+    _text = .;
     *(.text)
     SCHED_TEXT
     LOCK_TEXT
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 53b9b1f..33fd0b2 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -235,6 +235,11 @@
 	return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+	return 0;
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t entry)
 {
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 09cb7fc..a8e8802 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -176,9 +176,9 @@
 
 int bigkernel = 0;
 
-kmem_cache_t *pgtable_cache __read_mostly;
+struct kmem_cache *pgtable_cache __read_mostly;
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
 	clear_page(addr);
 }
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index beaa028..236d02f 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -239,7 +239,7 @@
 	}
 }
 
-static kmem_cache_t *tsb_caches[8] __read_mostly;
+static struct kmem_cache *tsb_caches[8] __read_mostly;
 
 static const char *tsb_cache_names[8] = {
 	"tsb_8KB",
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 3576b3c..7d4190e 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -638,7 +638,7 @@
 	return -1;
 }
 
-void chan_interrupt(struct list_head *chans, struct work_struct *task,
+void chan_interrupt(struct list_head *chans, struct delayed_work *task,
 		    struct tty_struct *tty, int irq)
 {
 	struct list_head *ele, *next;
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 2f880cb..0cad354 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -120,7 +120,7 @@
 	/* These are synchronization calls between various UML threads on the
 	 * host - since they are not different kernel threads, we cannot use
 	 * kernel semaphores. We don't use SysV semaphores because they are
-	 * persistant. */
+	 * persistent. */
 	count = os_read_file(pipe_fd, &c, sizeof(c));
 	if(count != sizeof(c))
 		printk("winch_thread : failed to read synchronization byte, "
diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c
index 8243869..9c2e7a7 100644
--- a/arch/um/drivers/daemon_kern.c
+++ b/arch/um/drivers/daemon_kern.c
@@ -98,4 +98,4 @@
 	return 0;
 }
 
-__initcall(register_daemon);
+late_initcall(register_daemon);
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 426633e..aa3090d 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -31,9 +31,9 @@
 	return IRQ_HANDLED;
 }
 
-static void line_timer_cb(void *arg)
+static void line_timer_cb(struct work_struct *work)
 {
-	struct line *line = arg;
+	struct line *line = container_of(work, struct line, task.work);
 
 	if(!line->throttled)
 		chan_interrupt(&line->chan_list, &line->task, line->tty,
@@ -443,7 +443,7 @@
 		 * is registered.
 		 */
 		enable_chan(line);
-		INIT_WORK(&line->task, line_timer_cb, line);
+		INIT_DELAYED_WORK(&line->task, line_timer_cb);
 
 		if(!line->sigio){
 			chan_enable_winch(&line->chan_list, tty);
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
index c090fbd..52ccb7b 100644
--- a/arch/um/drivers/mcast_kern.c
+++ b/arch/um/drivers/mcast_kern.c
@@ -127,4 +127,4 @@
 	return 0;
 }
 
-__initcall(register_mcast);
+late_initcall(register_mcast);
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 7b17216..96f0189 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -56,7 +56,7 @@
 
 static LIST_HEAD(mc_requests);
 
-static void mc_work_proc(void *unused)
+static void mc_work_proc(struct work_struct *unused)
 {
 	struct mconsole_entry *req;
 	unsigned long flags;
@@ -72,7 +72,7 @@
 	}
 }
 
-static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
+static DECLARE_WORK(mconsole_work, mc_work_proc);
 
 static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
 {
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index ec9eb8b..286bc0b 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -99,6 +99,7 @@
 		 * same device, since it tests for (dev->flags & IFF_UP). So
 		 * there's no harm in delaying the device shutdown. */
 		schedule_work(&close_work);
+#error this is not permitted - close_work will go out of scope
 		goto out;
 	}
 	reactivate_fd(lp->fd, UM_ETH_IRQ);
diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c
index 6e1ef85..e67362a 100644
--- a/arch/um/drivers/pcap_kern.c
+++ b/arch/um/drivers/pcap_kern.c
@@ -109,4 +109,4 @@
 	return 0;
 }
 
-__initcall(register_pcap);
+late_initcall(register_pcap);
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index ce9f373..6dfe632 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -132,7 +132,7 @@
 DECLARE_MUTEX(ports_sem);
 struct list_head ports = LIST_HEAD_INIT(ports);
 
-void port_work_proc(void *unused)
+void port_work_proc(struct work_struct *unused)
 {
 	struct port_list *port;
 	struct list_head *ele;
@@ -150,7 +150,7 @@
 	local_irq_restore(flags);
 }
 
-DECLARE_WORK(port_work, port_work_proc, NULL);
+DECLARE_WORK(port_work, port_work_proc);
 
 static irqreturn_t port_interrupt(int irq, void *data)
 {
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index 788da54..25634bd 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -95,4 +95,4 @@
 	return 0;
 }
 
-__initcall(register_slip);
+late_initcall(register_slip);
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index ae322e1..b3ed8fb 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -119,4 +119,4 @@
 	return 0;
 }
 
-__initcall(register_slirp);
+late_initcall(register_slirp);
diff --git a/arch/um/include/chan_kern.h b/arch/um/include/chan_kern.h
index 572d286..9003a34 100644
--- a/arch/um/include/chan_kern.h
+++ b/arch/um/include/chan_kern.h
@@ -27,7 +27,7 @@
 	void *data;
 };
 
-extern void chan_interrupt(struct list_head *chans, struct work_struct *task,
+extern void chan_interrupt(struct list_head *chans, struct delayed_work *task,
 			   struct tty_struct *tty, int irq);
 extern int parse_chan_pair(char *str, struct line *line, int device,
 			   const struct chan_opts *opts);
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 7be2481..214ee76 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -51,7 +51,7 @@
 	char *tail;
 
 	int sigio;
-	struct work_struct task;
+	struct delayed_work task;
 	const struct line_driver *driver;
 	int have_irq;
 };
diff --git a/arch/um/include/sysdep-i386/checksum.h b/arch/um/include/sysdep-i386/checksum.h
index 052bb06..0cb4645 100644
--- a/arch/um/include/sysdep-i386/checksum.h
+++ b/arch/um/include/sysdep-i386/checksum.h
@@ -20,8 +20,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, 
-			  unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  *	Note: when you get a NULL pointer exception here this means someone
@@ -32,8 +31,8 @@
  */
 
 static __inline__
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
-				       int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum)
 {
 	memcpy(dst, src, len);
 	return csum_partial(dst, len, sum);
@@ -48,36 +47,25 @@
  */
 
 static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
-					 unsigned char *dst,
-					 int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+					 int len, __wsum sum, int *err_ptr)
 {
-	if(copy_from_user(dst, src, len)){
+	if (copy_from_user(dst, src, len)) {
 		*err_ptr = -EFAULT;
-		return(-1);
+		return (__force __wsum)-1;
 	}
 
 	return csum_partial(dst, len, sum);
 }
 
 /*
- * These are the old (and unsafe) way of doing checksums, a warning message 
- * will be printed if they are used and an exception occurs.
- *
- * these functions should go away after some time.
- */
-
-#define csum_partial_copy_fromuser csum_partial_copy_from_user
-
-/*
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  *
  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
  *	Arnt Gulbrandsen.
  */
-static inline unsigned short ip_fast_csum(unsigned char * iph,
-					  unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum;
 
@@ -105,29 +93,29 @@
 	: "=r" (sum), "=r" (iph), "=r" (ihl)
 	: "1" (iph), "2" (ihl)
 	: "memory");
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
  *	Fold a partial checksum
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__(
 		"addl %1, %0		;\n"
 		"adcl $0xffff, %0	;\n"
 		: "=r" (sum)
-		: "r" (sum << 16), "0" (sum & 0xffff0000)
+		: "r" ((__force u32)sum << 16),
+		  "0" ((__force u32)sum & 0xffff0000)
 	);
-	return (~sum) >> 16;
+	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-						   unsigned long daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
     __asm__(
 	"addl %1, %0	;\n"
@@ -135,7 +123,7 @@
 	"adcl %3, %0	;\n"
 	"adcl $0, %0	;\n"
 	: "=r" (sum)
-	: "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
+	: "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
     return sum;
 }
 
@@ -143,11 +131,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -157,17 +144,16 @@
  * in icmp.c
  */
 
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
     return csum_fold (csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						     struct in6_addr *daddr,
-						     __u32 len,
-						     unsigned short proto,
-						     unsigned int sum)
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum sum)
 {
 	__asm__(
 		"addl 0(%1), %0		;\n"
@@ -192,14 +178,14 @@
  *	Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
-						     unsigned char __user *dst,
-						     int len, int sum, int *err_ptr)
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+						     void __user *dst,
+						     int len, __wsum sum, int *err_ptr)
 {
-	if (access_ok(VERIFY_WRITE, dst, len)){
-		if(copy_to_user(dst, src, len)){
+	if (access_ok(VERIFY_WRITE, dst, len)) {
+		if (copy_to_user(dst, src, len)) {
 			*err_ptr = -EFAULT;
-			return(-1);
+			return (__force __wsum)-1;
 		}
 
 		return csum_partial(src, len, sum);
@@ -208,7 +194,7 @@
 	if (len)
 		*err_ptr = -EFAULT;
 
-	return -1; /* invalid checksum */
+	return (__force __wsum)-1; /* invalid checksum */
 }
 
 #endif
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h
index 6670cc9..52b398b 100644
--- a/arch/um/include/sysdep-i386/ptrace.h
+++ b/arch/um/include/sysdep-i386/ptrace.h
@@ -75,7 +75,7 @@
 #endif
 #ifdef UML_CONFIG_MODE_SKAS
 	struct skas_regs {
-		unsigned long regs[HOST_FRAME_SIZE];
+		unsigned long regs[MAX_REG_NR];
 		unsigned long fp[HOST_FP_SIZE];
 		unsigned long xfp[HOST_XFP_SIZE];
                 struct faultinfo faultinfo;
diff --git a/arch/um/include/sysdep-i386/stub.h b/arch/um/include/sysdep-i386/stub.h
index b492b12..4fffae7 100644
--- a/arch/um/include/sysdep-i386/stub.h
+++ b/arch/um/include/sysdep-i386/stub.h
@@ -9,6 +9,7 @@
 #include <sys/mman.h>
 #include <asm/ptrace.h>
 #include <asm/unistd.h>
+#include <asm/page.h>
 #include "stub-data.h"
 #include "kern_constants.h"
 #include "uml-config.h"
diff --git a/arch/um/include/sysdep-x86_64/checksum.h b/arch/um/include/sysdep-x86_64/checksum.h
index ea97005..a5be903 100644
--- a/arch/um/include/sysdep-x86_64/checksum.h
+++ b/arch/um/include/sysdep-x86_64/checksum.h
@@ -9,8 +9,7 @@
 #include "linux/in6.h"
 #include "asm/uaccess.h"
 
-extern unsigned csum_partial(const unsigned char *buff, unsigned len,
-                             unsigned sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  *	Note: when you get a NULL pointer exception here this means someone
@@ -21,21 +20,21 @@
  */
 
 static __inline__
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
-				       int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum)
 {
 	memcpy(dst, src, len);
 	return(csum_partial(dst, len, sum));
 }
 
 static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char *src,
-                                         unsigned char *dst, int len, int sum,
+__wsum csum_partial_copy_from_user(const void __user *src,
+                                         void *dst, int len, __wsum sum,
                                          int *err_ptr)
 {
-        if(copy_from_user(dst, src, len)){
+        if (copy_from_user(dst, src, len)) {
                 *err_ptr = -EFAULT;
-                return(-1);
+                return (__force __wsum)-1;
         }
         return csum_partial(dst, len, sum);
 }
@@ -48,15 +47,16 @@
  * the last step before putting a checksum into a packet.
  * Make sure not to mix with 64bit checksums.
  */
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__(
 		"  addl %1,%0\n"
 		"  adcl $0xffff,%0"
 		: "=r" (sum)
-		: "r" (sum << 16), "0" (sum & 0xffff0000)
+		: "r" ((__force u32)sum << 16),
+		  "0" ((__force u32)sum & 0xffff0000)
 	);
-	return (~sum) >> 16;
+	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
 /**
@@ -70,28 +70,27 @@
  * Returns the pseudo header checksum the input data. Result is
  * 32bit unfolded.
  */
-static inline unsigned long
-csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len,
-		   unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
 {
 	asm("  addl %1, %0\n"
 	    "  adcl %2, %0\n"
 	    "  adcl %3, %0\n"
 	    "  adcl $0, %0\n"
 		: "=r" (sum)
-	    : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum));
-    return sum;
+	    : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
+	return sum;
 }
 
 /*
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   unsigned int sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+					   unsigned short len,
+					   unsigned short proto,
+					   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -101,7 +100,7 @@
  * iph: ipv4 header
  * ihl: length of header / 4
  */
-static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum;
 
@@ -128,7 +127,7 @@
 	: "=r" (sum), "=r" (iph), "=r" (ihl)
 	: "1" (iph), "2" (ihl)
 	: "memory");
-	return(sum);
+	return (__force __sum16)sum;
 }
 
 static inline unsigned add32_with_carry(unsigned a, unsigned b)
@@ -140,6 +139,6 @@
         return a;
 }
 
-extern unsigned short ip_compute_csum(unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 #endif
diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h
index 617bb9ef..66cb400 100644
--- a/arch/um/include/sysdep-x86_64/ptrace.h
+++ b/arch/um/include/sysdep-x86_64/ptrace.h
@@ -108,7 +108,7 @@
 		 * file size, while i386 uses FRAME_SIZE.  Therefore, we need
 		 * to use UM_FRAME_SIZE here instead of HOST_FRAME_SIZE.
 		 */
-		unsigned long regs[UM_FRAME_SIZE];
+		unsigned long regs[MAX_REG_NR];
 		unsigned long fp[HOST_FP_SIZE];
                 struct faultinfo faultinfo;
 		long syscall;
diff --git a/arch/um/os-Linux/drivers/ethertap_kern.c b/arch/um/os-Linux/drivers/ethertap_kern.c
index 16385e2..7054182 100644
--- a/arch/um/os-Linux/drivers/ethertap_kern.c
+++ b/arch/um/os-Linux/drivers/ethertap_kern.c
@@ -105,4 +105,4 @@
 	return 0;
 }
 
-__initcall(register_ethertap);
+late_initcall(register_ethertap);
diff --git a/arch/um/os-Linux/drivers/tuntap_kern.c b/arch/um/os-Linux/drivers/tuntap_kern.c
index 0edbac6..76570a2 100644
--- a/arch/um/os-Linux/drivers/tuntap_kern.c
+++ b/arch/um/os-Linux/drivers/tuntap_kern.c
@@ -90,4 +90,4 @@
 	return 0;
 }
 
-__initcall(register_tuntap);
+late_initcall(register_tuntap);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index e299ee5..49057d8 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -3,7 +3,6 @@
  * Licensed under the GPL
  */
 
-#include "linux/stddef.h"
 #include "linux/sched.h"
 #include "linux/slab.h"
 #include "linux/types.h"
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
index 5f3cc66..01212c8 100644
--- a/arch/um/sys-i386/ptrace_user.c
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -4,9 +4,9 @@
  */
 
 #include <stdio.h>
+#include <stddef.h>
 #include <errno.h>
 #include <unistd.h>
-#include <linux/stddef.h>
 #include "ptrace_user.h"
 /* Grr, asm/user.h includes asm/ptrace.h, so has to follow ptrace_user.h */
 #include <asm/user.h>
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
index 6f4ef2b..447306b 100644
--- a/arch/um/sys-i386/user-offsets.c
+++ b/arch/um/sys-i386/user-offsets.c
@@ -2,7 +2,7 @@
 #include <signal.h>
 #include <asm/ptrace.h>
 #include <asm/user.h>
-#include <linux/stddef.h>
+#include <stddef.h>
 #include <sys/poll.h>
 
 #define DEFINE(sym, val) \
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
index 67bc48e..93575fd 100644
--- a/arch/v850/kernel/v850_ksyms.c
+++ b/arch/v850/kernel/v850_ksyms.c
@@ -24,7 +24,7 @@
 EXPORT_SYMBOL (__bug);
 
 /* Networking helper routines. */
-EXPORT_SYMBOL (csum_partial_copy);
+EXPORT_SYMBOL (csum_partial_copy_nocheck);
 EXPORT_SYMBOL (csum_partial_copy_from_user);
 EXPORT_SYMBOL (ip_compute_csum);
 EXPORT_SYMBOL (ip_fast_csum);
diff --git a/arch/v850/kernel/vmlinux.lds.S b/arch/v850/kernel/vmlinux.lds.S
index 88d087f..3a5fd07 100644
--- a/arch/v850/kernel/vmlinux.lds.S
+++ b/arch/v850/kernel/vmlinux.lds.S
@@ -90,6 +90,7 @@
 
 /* Kernel text segment, and some constant data areas.  */
 #define TEXT_CONTENTS							      \
+		_text = .;						      \
 		__stext = . ;						      \
         	*(.text)						      \
 		SCHED_TEXT						      \
diff --git a/arch/v850/lib/checksum.c b/arch/v850/lib/checksum.c
index fa58726..042158d 100644
--- a/arch/v850/lib/checksum.c
+++ b/arch/v850/lib/checksum.c
@@ -88,32 +88,32 @@
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
-	return ~do_csum(iph,ihl*4);
+	return (__force __sum16)~do_csum(iph,ihl*4);
 }
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
 {
-	return ~do_csum(buff,len);
+	return (__force __sum16)~do_csum(buff,len);
 }
 
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	unsigned int result = do_csum(buff, len);
 
 	/* add in old sum, and carry.. */
-	result += sum;
-	if(sum > result)
+	result += (__force u32)sum;
+	if ((__force u32)sum > result)
 		result += 1;
-	return result;
+	return (__force __wsum)result;
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -121,8 +121,8 @@
 /*
  * copy while checksumming, otherwise like csum_partial
  */
-unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst,
-                               int len, unsigned int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+                               int len, __wsum sum)
 {
 	/*
 	 * It's 2:30 am and I don't feel like doing it real ...
@@ -138,9 +138,9 @@
  * Copy from userspace and compute checksum.  If we catch an exception
  * then zero the rest of the buffer.
  */
-unsigned int csum_partial_copy_from_user (const unsigned char *src,
-					  unsigned char *dst,
-                                          int len, unsigned int sum,
+__wsum csum_partial_copy_from_user (const void *src,
+					  void *dst,
+                                          int len, __wsum sum,
                                           int *err_ptr)
 {
 	int missing;
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 010d226..bfbb9bc 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -122,7 +122,7 @@
 
 choice
 	prompt "Processor family"
-	default MK8
+	default GENERIC_CPU
 
 config MK8
 	bool "AMD-Opteron/Athlon64"
@@ -130,16 +130,31 @@
 	  Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
 
 config MPSC
-       bool "Intel EM64T"
+       bool "Intel P4 / older Netburst based Xeon"
        help
-	  Optimize for Intel Pentium 4 and Xeon CPUs with Intel
-	  Extended Memory 64 Technology(EM64T). For details see
+	  Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs
+	  with Intel Extended Memory 64 Technology(EM64T). For details see
 	  <http://www.intel.com/technology/64bitextensions/>.
+	  Note the the latest Xeons (Xeon 51xx and 53xx) are not based on the
+          Netburst core and shouldn't use this option. You can distingush them
+	  using the cpu family field
+	  in /proc/cpuinfo. Family 15 is a older Xeon, Family 6 a newer one
+	  (this rule only applies to system that support EM64T)
+
+config MCORE2
+	bool "Intel Core2 / newer Xeon"
+	help
+	  Optimize for Intel Core2 and newer Xeons (51xx)
+	  You can distingush the newer Xeons from the older ones using
+	  the cpu family field in /proc/cpuinfo. 15 is a older Xeon
+	  (use CONFIG_MPSC then), 6 is a newer one. This rule only
+	  applies to CPUs that support EM64T.
 
 config GENERIC_CPU
 	bool "Generic-x86-64"
 	help
 	  Generic x86-64 CPU.
+	  Run equally well on all x86-64 CPUs.
 
 endchoice
 
@@ -149,12 +164,12 @@
 config X86_L1_CACHE_BYTES
 	int
 	default "128" if GENERIC_CPU || MPSC
-	default "64" if MK8
+	default "64" if MK8 || MCORE2
 
 config X86_L1_CACHE_SHIFT
 	int
 	default "7" if GENERIC_CPU || MPSC
-	default "6" if MK8
+	default "6" if MK8 || MCORE2
 
 config X86_INTERNODE_CACHE_BYTES
 	int
@@ -344,11 +359,6 @@
        depends on NUMA
        default y
 
-
-config ARCH_DISCONTIGMEM_ENABLE
-	def_bool y
-	depends on NUMA
-
 config ARCH_DISCONTIGMEM_DEFAULT
 	def_bool y
 	depends on NUMA
@@ -455,6 +465,17 @@
 	  Normally the kernel will make the right choice by itself.
 	  If unsure, say Y.
 
+config CALGARY_IOMMU_ENABLED_BY_DEFAULT
+	bool "Should Calgary be enabled by default?"
+	default y
+	depends on CALGARY_IOMMU
+	help
+	  Should Calgary be enabled by default? if you choose 'y', Calgary
+	  will be used (if it exists). If you choose 'n', Calgary will not be
+	  used even if it exists. If you choose 'n' and would like to use
+	  Calgary anyway, pass 'iommu=calgary' on the kernel command line.
+	  If unsure, say Y.
+
 # need this always selected by IOMMU for the VIA workaround
 config SWIOTLB
 	bool
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 6e38d4d..b471b85 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -30,6 +30,10 @@
 cflags-kernel-y	:=
 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
 cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+# gcc doesn't support -march=core2 yet as of gcc 4.3, but I hope it
+# will eventually. Use -mtune=generic as fallback
+cflags-$(CONFIG_MCORE2) += \
+	$(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
 cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
 
 cflags-y += -m64
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 0f5d44e..96f226c 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc2-git4
-# Sat Oct 21 03:38:52 2006
+# Linux kernel version: 2.6.19-git7
+# Wed Dec  6 23:50:47 2006
 #
 CONFIG_X86_64=y
 CONFIG_64BIT=y
@@ -47,13 +47,14 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 # CONFIG_CPUSETS is not set
+CONFIG_SYSFS_DEPRECATED=y
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
 CONFIG_UID16=y
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -87,9 +88,7 @@
 # Block layer
 #
 CONFIG_BLOCK=y
-CONFIG_LBD=y
 # CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
 
 #
 # IO Schedulers
@@ -111,10 +110,11 @@
 # CONFIG_X86_VSMP is not set
 # CONFIG_MK8 is not set
 # CONFIG_MPSC is not set
-CONFIG_GENERIC_CPU=y
-CONFIG_X86_L1_CACHE_BYTES=128
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_X86_INTERNODE_CACHE_BYTES=128
+CONFIG_MCORE2=y
+# CONFIG_GENERIC_CPU is not set
+CONFIG_X86_L1_CACHE_BYTES=64
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTERNODE_CACHE_BYTES=64
 CONFIG_X86_TSC=y
 CONFIG_X86_GOOD_APIC=y
 # CONFIG_MICROCODE is not set
@@ -322,6 +322,7 @@
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 CONFIG_IPV6=y
 # CONFIG_IPV6_PRIVACY is not set
 # CONFIG_IPV6_ROUTER_PREF is not set
@@ -624,6 +625,7 @@
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_JMICRON is not set
 # CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
 # CONFIG_PATA_OLDPIIX is not set
 # CONFIG_PATA_NETCELL is not set
@@ -795,6 +797,7 @@
 CONFIG_S2IO=m
 # CONFIG_S2IO_NAPI is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -927,10 +930,6 @@
 # CONFIG_DTLK is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 CONFIG_AGP=y
 CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
@@ -1135,6 +1134,7 @@
 # CONFIG_USB_BANDWIDTH is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_MULTITHREAD_PROBE is not set
 # CONFIG_USB_OTG is not set
 
 #
@@ -1212,6 +1212,7 @@
 # CONFIG_USB_KAWETH is not set
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET_MII is not set
 # CONFIG_USB_USBNET is not set
 CONFIG_USB_MON=y
 
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 82ef182..543ef4f 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -305,8 +305,6 @@
 #undef MODULE_DESCRIPTION
 #undef MODULE_AUTHOR
 
-#define elf_addr_t __u32
-
 static void elf32_init(struct pt_regs *);
 
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
@@ -351,7 +349,7 @@
 		bprm->loader += stack_base;
 	bprm->exec += stack_base;
 
-	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!mpnt) 
 		return -ENOMEM; 
 
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index 0e0a266..ff499ef 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -584,6 +584,11 @@
 	regs->rdx = (unsigned long) &frame->info;
 	regs->rcx = (unsigned long) &frame->uc;
 
+	/* Make -mregparm=3 work */
+	regs->rax = sig;
+	regs->rdx = (unsigned long) &frame->info;
+	regs->rcx = (unsigned long) &frame->uc;
+
 	asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 
 	asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 
 	
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index 3a01329..3e5ed20 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -49,7 +49,7 @@
 	struct mm_struct *mm = current->mm;
 	int ret;
 
-	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma)
 		return -ENOMEM;
 
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 4d9d5ed..124b2d2 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -25,6 +25,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/sysdev.h>
 #include <linux/module.h>
+#include <linux/ioport.h>
 
 #include <asm/atomic.h>
 #include <asm/smp.h>
@@ -45,6 +46,12 @@
 
 int disable_apic_timer __initdata;
 
+static struct resource *ioapic_resources;
+static struct resource lapic_resource = {
+	.name = "Local APIC",
+	.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+};
+
 /*
  * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
  * IPIs in place of local APIC timers
@@ -133,7 +140,6 @@
 		apic_write(APIC_LVTERR, APIC_LVT_MASKED);
 	if (maxlvt >= 4)
 		apic_write(APIC_LVTPC, APIC_LVT_MASKED);
-	v = GET_APIC_VERSION(apic_read(APIC_LVR));
 	apic_write(APIC_ESR, 0);
 	apic_read(APIC_ESR);
 }
@@ -452,23 +458,30 @@
 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
 {
 	unsigned long flags;
+	int maxlvt;
 
 	if (!apic_pm_state.active)
 		return 0;
 
+	maxlvt = get_maxlvt();
+
 	apic_pm_state.apic_id = apic_read(APIC_ID);
 	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
 	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
 	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
 	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
 	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
-	apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
+	if (maxlvt >= 4)
+		apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
 	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
 	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
 	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
 	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
 	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
-	apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#ifdef CONFIG_X86_MCE_INTEL
+	if (maxlvt >= 5)
+		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#endif
 	local_irq_save(flags);
 	disable_local_APIC();
 	local_irq_restore(flags);
@@ -479,10 +492,13 @@
 {
 	unsigned int l, h;
 	unsigned long flags;
+	int maxlvt;
 
 	if (!apic_pm_state.active)
 		return 0;
 
+	maxlvt = get_maxlvt();
+
 	local_irq_save(flags);
 	rdmsr(MSR_IA32_APICBASE, l, h);
 	l &= ~MSR_IA32_APICBASE_BASE;
@@ -496,8 +512,12 @@
 	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
 	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
 	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
-	apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
-	apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
+#ifdef CONFIG_X86_MCE_INTEL
+	if (maxlvt >= 5)
+		apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
+#endif
+	if (maxlvt >= 4)
+		apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
 	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
 	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
 	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
@@ -585,6 +605,64 @@
 	return 0;
 }
 
+#ifdef CONFIG_X86_IO_APIC
+static struct resource * __init ioapic_setup_resources(void)
+{
+#define IOAPIC_RESOURCE_NAME_SIZE 11
+	unsigned long n;
+	struct resource *res;
+	char *mem;
+	int i;
+
+	if (nr_ioapics <= 0)
+		return NULL;
+
+	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
+	n *= nr_ioapics;
+
+	mem = alloc_bootmem(n);
+	res = (void *)mem;
+
+	if (mem != NULL) {
+		memset(mem, 0, n);
+		mem += sizeof(struct resource) * nr_ioapics;
+
+		for (i = 0; i < nr_ioapics; i++) {
+			res[i].name = mem;
+			res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+			sprintf(mem,  "IOAPIC %u", i);
+			mem += IOAPIC_RESOURCE_NAME_SIZE;
+		}
+	}
+
+	ioapic_resources = res;
+
+	return res;
+}
+
+static int __init ioapic_insert_resources(void)
+{
+	int i;
+	struct resource *r = ioapic_resources;
+
+	if (!r) {
+		printk("IO APIC resources could be not be allocated.\n");
+		return -1;
+	}
+
+	for (i = 0; i < nr_ioapics; i++) {
+		insert_resource(&iomem_resource, r);
+		r++;
+	}
+
+	return 0;
+}
+
+/* Insert the IO APIC resources after PCI initialization has occured to handle
+ * IO APICS that are mapped in on a BAR in PCI space. */
+late_initcall(ioapic_insert_resources);
+#endif
+
 void __init init_apic_mappings(void)
 {
 	unsigned long apic_phys;
@@ -604,6 +682,11 @@
 	apic_mapped = 1;
 	apic_printk(APIC_VERBOSE,"mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys);
 
+	/* Put local APIC into the resource map. */
+	lapic_resource.start = apic_phys;
+	lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
+	insert_resource(&iomem_resource, &lapic_resource);
+
 	/*
 	 * Fetch the APIC ID of the BSP in case we have a
 	 * default configuration (or the MP table is broken).
@@ -613,7 +696,9 @@
 	{
 		unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
 		int i;
+		struct resource *ioapic_res;
 
+		ioapic_res = ioapic_setup_resources();
 		for (i = 0; i < nr_ioapics; i++) {
 			if (smp_found_config) {
 				ioapic_phys = mp_ioapics[i].mpc_apicaddr;
@@ -625,6 +710,12 @@
 			apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n",
 					__fix_to_virt(idx), ioapic_phys);
 			idx++;
+
+			if (ioapic_res != NULL) {
+				ioapic_res->start = ioapic_phys;
+				ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
+				ioapic_res++;
+			}
 		}
 	}
 }
@@ -644,10 +735,9 @@
 
 static void __setup_APIC_LVTT(unsigned int clocks)
 {
-	unsigned int lvtt_value, tmp_value, ver;
+	unsigned int lvtt_value, tmp_value;
 	int cpu = smp_processor_id();
 
-	ver = GET_APIC_VERSION(apic_read(APIC_LVR));
 	lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
 
 	if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 3525f88..95a7a2c 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -28,71 +28,6 @@
 /* This keeps a track of which one is crashing cpu. */
 static int crashing_cpu;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type,
-						void *data, size_t data_len)
-{
-	struct elf_note note;
-
-	note.n_namesz = strlen(name) + 1;
-	note.n_descsz = data_len;
-	note.n_type   = type;
-	memcpy(buf, &note, sizeof(note));
-	buf += (sizeof(note) +3)/4;
-	memcpy(buf, name, note.n_namesz);
-	buf += (note.n_namesz + 3)/4;
-	memcpy(buf, data, note.n_descsz);
-	buf += (note.n_descsz + 3)/4;
-
-	return buf;
-}
-
-static void final_note(u32 *buf)
-{
-	struct elf_note note;
-
-	note.n_namesz = 0;
-	note.n_descsz = 0;
-	note.n_type   = 0;
-	memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-	struct elf_prstatus prstatus;
-	u32 *buf;
-
-	if ((cpu < 0) || (cpu >= NR_CPUS))
-		return;
-
-	/* Using ELF notes here is opportunistic.
-	 * I need a well defined structure format
-	 * for the data I pass, and I need tags
-	 * on the data to indicate what information I have
-	 * squirrelled away.  ELF notes happen to provide
-	 * all of that, no need to invent something new.
-	 */
-
-	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-
-	if (!buf)
-		return;
-
-	memset(&prstatus, 0, sizeof(prstatus));
-	prstatus.pr_pid = current->pid;
-	elf_core_copy_regs(&prstatus.pr_reg, regs);
-	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-					sizeof(prstatus));
-	final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
-	int cpu;
-
-	cpu = smp_processor_id();
-	crash_save_this_cpu(regs, cpu);
-}
-
 #ifdef CONFIG_SMP
 static atomic_t waiting_for_crash_ipi;
 
@@ -117,7 +52,7 @@
 		return NOTIFY_STOP;
 	local_irq_disable();
 
-	crash_save_this_cpu(regs, cpu);
+	crash_save_cpu(regs, cpu);
 	disable_local_APIC();
 	atomic_dec(&waiting_for_crash_ipi);
 	/* Assume hlt works */
@@ -196,5 +131,5 @@
 
 	disable_IO_APIC();
 
-	crash_save_self(regs);
+	crash_save_cpu(regs, smp_processor_id());
 }
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 68273bf..829698f 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -69,11 +69,18 @@
 
 static void ati_bugs(void)
 {
-	if (timer_over_8254 == 1) {
-		timer_over_8254 = 0;
-		printk(KERN_INFO
-	 	"ATI board detected. Disabling timer routing over 8254.\n");
-	}
+}
+
+static void intel_bugs(void)
+{
+	u16 device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
+
+#ifdef CONFIG_SMP
+	if (device == PCI_DEVICE_ID_INTEL_E7320_MCH ||
+	    device == PCI_DEVICE_ID_INTEL_E7520_MCH ||
+	    device == PCI_DEVICE_ID_INTEL_E7525_MCH)
+		quirk_intel_irqbalance();
+#endif
 }
 
 struct chipset {
@@ -85,6 +92,7 @@
 	{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
 	{ PCI_VENDOR_ID_VIA, via_bugs },
 	{ PCI_VENDOR_ID_ATI, ati_bugs },
+	{ PCI_VENDOR_ID_INTEL, intel_bugs},
 	{}
 };
 
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 7d401b0..601d332 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -230,7 +230,6 @@
 	CFI_REL_OFFSET rip,RIP-ARGOFFSET
 	GET_THREAD_INFO(%rcx)
 	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-	CFI_REMEMBER_STATE
 	jnz tracesys
 	cmpq $__NR_syscall_max,%rax
 	ja badsys
@@ -241,7 +240,6 @@
  * Syscall return path ending with SYSRET (fast path)
  * Has incomplete stack frame and undefined top of stack. 
  */		
-	.globl ret_from_sys_call
 ret_from_sys_call:
 	movl $_TIF_ALLWORK_MASK,%edi
 	/* edi:	flagmask */
@@ -251,8 +249,8 @@
 	TRACE_IRQS_OFF
 	movl threadinfo_flags(%rcx),%edx
 	andl %edi,%edx
-	CFI_REMEMBER_STATE
 	jnz  sysret_careful 
+	CFI_REMEMBER_STATE
 	/*
 	 * sysretq will re-enable interrupts:
 	 */
@@ -265,10 +263,10 @@
 	swapgs
 	sysretq
 
+	CFI_RESTORE_STATE
 	/* Handle reschedules */
 	/* edx:	work, edi: workmask */	
 sysret_careful:
-	CFI_RESTORE_STATE
 	bt $TIF_NEED_RESCHED,%edx
 	jnc sysret_signal
 	TRACE_IRQS_ON
@@ -306,7 +304,6 @@
 
 	/* Do syscall tracing */
 tracesys:			 
-	CFI_RESTORE_STATE
 	SAVE_REST
 	movq $-ENOSYS,RAX(%rsp)
 	FIXUP_TOP_OF_STACK %rdi
@@ -322,32 +319,13 @@
 	call *sys_call_table(,%rax,8)
 1:	movq %rax,RAX-ARGOFFSET(%rsp)
 	/* Use IRET because user could have changed frame */
-	jmp int_ret_from_sys_call
-	CFI_ENDPROC
-END(system_call)
 		
 /* 
  * Syscall return path ending with IRET.
  * Has correct top of stack, but partial stack frame.
- */ 	
-ENTRY(int_ret_from_sys_call)
-	CFI_STARTPROC	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,SS+8-ARGOFFSET
-	/*CFI_REL_OFFSET	ss,SS-ARGOFFSET*/
-	CFI_REL_OFFSET	rsp,RSP-ARGOFFSET
-	/*CFI_REL_OFFSET	rflags,EFLAGS-ARGOFFSET*/
-	/*CFI_REL_OFFSET	cs,CS-ARGOFFSET*/
-	CFI_REL_OFFSET	rip,RIP-ARGOFFSET
-	CFI_REL_OFFSET	rdx,RDX-ARGOFFSET
-	CFI_REL_OFFSET	rcx,RCX-ARGOFFSET
-	CFI_REL_OFFSET	rax,RAX-ARGOFFSET
-	CFI_REL_OFFSET	rdi,RDI-ARGOFFSET
-	CFI_REL_OFFSET	rsi,RSI-ARGOFFSET
-	CFI_REL_OFFSET	r8,R8-ARGOFFSET
-	CFI_REL_OFFSET	r9,R9-ARGOFFSET
-	CFI_REL_OFFSET	r10,R10-ARGOFFSET
-	CFI_REL_OFFSET	r11,R11-ARGOFFSET
+ */
+	.globl int_ret_from_sys_call
+int_ret_from_sys_call:
 	cli
 	TRACE_IRQS_OFF
 	testl $3,CS-ARGOFFSET(%rsp)
@@ -394,8 +372,6 @@
 	popq %rdi
 	CFI_ADJUST_CFA_OFFSET -8
 	andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
-	cli
-	TRACE_IRQS_OFF
 	jmp int_restore_rest
 	
 int_signal:
@@ -411,7 +387,7 @@
 	TRACE_IRQS_OFF
 	jmp int_with_check
 	CFI_ENDPROC
-END(int_ret_from_sys_call)
+END(system_call)
 		
 /* 
  * Certain special system calls that need to save a complete full stack frame.
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index 8e78a75..b007433 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -33,7 +33,7 @@
 extern struct genapic apic_physflat;
 
 struct genapic *genapic = &apic_flat;
-
+struct genapic *genapic_force;
 
 /*
  * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
@@ -46,6 +46,13 @@
 	u8 cluster_cnt[NUM_APIC_CLUSTERS];
 	int max_apic = 0;
 
+	/* genapic selection can be forced because of certain quirks.
+	 */
+	if (genapic_force) {
+		genapic = genapic_force;
+		goto print;
+	}
+
 #if defined(CONFIG_ACPI)
 	/*
 	 * Some x86_64 machines use physical APIC mode regardless of how many
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index 9561eb3..cc230b9 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -57,10 +57,12 @@
 {
 	int i;
 
-	for (i = 0; i < 256; i++)
+	/* clear bss before set_intr_gate with early_idt_handler */
+	clear_bss();
+
+	for (i = 0; i < IDT_ENTRIES; i++)
 		set_intr_gate(i, early_idt_handler);
 	asm volatile("lidt %0" :: "m" (idt_descr));
-	clear_bss();
 
 	early_printk("Kernel alive\n");
 
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index 3aa1e9b..1d58c13 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -82,11 +82,8 @@
 	struct task_struct *tsk = current;
 	int err = 0;
 
-	{ 
-		extern void bad_user_i387_struct(void); 
-		if (sizeof(struct user_i387_struct) != sizeof(tsk->thread.i387.fxsave))
-			bad_user_i387_struct();
-	} 
+	BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
+			sizeof(tsk->thread.i387.fxsave));
 
 	if ((unsigned long)buf % 16) 
 		printk("save_i387: bad fpstate %p\n",buf); 
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index c4ef801..d73c79e 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -76,7 +76,8 @@
 	IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
 	IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
 
-void (*interrupt[NR_IRQS])(void) = {
+/* for the irq vectors */
+static void (*interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
 					  IRQLIST_16(0x2), IRQLIST_16(0x3),
 	IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
 	IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index c80081a..2a1dcd5 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -55,10 +55,6 @@
 
 static int no_timer_check;
 
-static int disable_timer_pin_1 __initdata;
-
-int timer_over_8254 __initdata = 1;
-
 /* Where if anywhere is the i8259 connect in external int mode */
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
@@ -178,14 +174,20 @@
  * the interrupt, and we need to make sure the entry is fully populated
  * before that happens.
  */
+static void
+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+	union entry_union eu;
+	eu.entry = e;
+	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+}
+
 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
 	unsigned long flags;
-	union entry_union eu;
-	eu.entry = e;
 	spin_lock_irqsave(&ioapic_lock, flags);
-	io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-	io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+	__ioapic_write_entry(apic, pin, e);
 	spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -348,29 +350,6 @@
 }
 early_param("noapic", disable_ioapic_setup);
 
-/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
-static int __init disable_timer_pin_setup(char *arg)
-{
-	disable_timer_pin_1 = 1;
-	return 1;
-}
-__setup("disable_timer_pin_1", disable_timer_pin_setup);
-
-static int __init setup_disable_8254_timer(char *s)
-{
-	timer_over_8254 = -1;
-	return 1;
-}
-static int __init setup_enable_8254_timer(char *s)
-{
-	timer_over_8254 = 2;
-	return 1;
-}
-
-__setup("disable_8254_timer", setup_disable_8254_timer);
-__setup("enable_8254_timer", setup_enable_8254_timer);
-
-
 /*
  * Find the IRQ entry number of a certain pin.
  */
@@ -750,6 +729,22 @@
 	return vector;
 }
 
+static void __clear_irq_vector(int irq)
+{
+	cpumask_t mask;
+	int cpu, vector;
+
+	BUG_ON(!irq_vector[irq]);
+
+	vector = irq_vector[irq];
+	cpus_and(mask, irq_domain[irq], cpu_online_map);
+	for_each_cpu_mask(cpu, mask)
+		per_cpu(vector_irq, cpu)[vector] = -1;
+
+	irq_vector[irq] = 0;
+	irq_domain[irq] = CPU_MASK_NONE;
+}
+
 void __setup_vector_irq(int cpu)
 {
 	/* Initialize vector_irq on a new cpu */
@@ -794,28 +789,66 @@
 					      handle_edge_irq, "edge");
 	}
 }
+static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
+{
+	struct IO_APIC_route_entry entry;
+	int vector;
+	unsigned long flags;
+
+
+	/*
+	 * add it to the IO-APIC irq-routing table:
+	 */
+	memset(&entry,0,sizeof(entry));
+
+	entry.delivery_mode = INT_DELIVERY_MODE;
+	entry.dest_mode = INT_DEST_MODE;
+	entry.mask = 0;				/* enable IRQ */
+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+
+	entry.trigger = irq_trigger(idx);
+	entry.polarity = irq_polarity(idx);
+
+	if (irq_trigger(idx)) {
+		entry.trigger = 1;
+		entry.mask = 1;
+		entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+	}
+
+	if (!apic && !IO_APIC_IRQ(irq))
+		return;
+
+	if (IO_APIC_IRQ(irq)) {
+		cpumask_t mask;
+		vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
+		if (vector < 0)
+			return;
+
+		entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+		entry.vector = vector;
+
+		ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+		if (!apic && (irq < 16))
+			disable_8259A_irq(irq);
+	}
+
+	ioapic_write_entry(apic, pin, entry);
+
+	spin_lock_irqsave(&ioapic_lock, flags);
+	set_native_irq_info(irq, TARGET_CPUS);
+	spin_unlock_irqrestore(&ioapic_lock, flags);
+
+}
 
 static void __init setup_IO_APIC_irqs(void)
 {
-	struct IO_APIC_route_entry entry;
-	int apic, pin, idx, irq, first_notcon = 1, vector;
-	unsigned long flags;
+	int apic, pin, idx, irq, first_notcon = 1;
 
 	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
 	for (apic = 0; apic < nr_ioapics; apic++) {
 	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
 
-		/*
-		 * add it to the IO-APIC irq-routing table:
-		 */
-		memset(&entry,0,sizeof(entry));
-
-		entry.delivery_mode = INT_DELIVERY_MODE;
-		entry.dest_mode = INT_DEST_MODE;
-		entry.mask = 0;				/* enable IRQ */
-		entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-
 		idx = find_irq_entry(apic,pin,mp_INT);
 		if (idx == -1) {
 			if (first_notcon) {
@@ -826,39 +859,11 @@
 			continue;
 		}
 
-		entry.trigger = irq_trigger(idx);
-		entry.polarity = irq_polarity(idx);
-
-		if (irq_trigger(idx)) {
-			entry.trigger = 1;
-			entry.mask = 1;
-			entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-		}
-
 		irq = pin_2_irq(idx, apic, pin);
 		add_pin_to_irq(irq, apic, pin);
 
-		if (!apic && !IO_APIC_IRQ(irq))
-			continue;
+		setup_IO_APIC_irq(apic, pin, idx, irq);
 
-		if (IO_APIC_IRQ(irq)) {
-			cpumask_t mask;
-			vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
-			if (vector < 0)
-				continue;
-
-			entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
-			entry.vector = vector;
-
-			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-			if (!apic && (irq < 16))
-				disable_8259A_irq(irq);
-		}
-		ioapic_write_entry(apic, pin, entry);
-
-		spin_lock_irqsave(&ioapic_lock, flags);
-		set_native_irq_info(irq, TARGET_CPUS);
-		spin_unlock_irqrestore(&ioapic_lock, flags);
 	}
 	}
 
@@ -1563,10 +1568,33 @@
  * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
  * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
  * fanatically on his truly buggy board.
- *
- * FIXME: really need to revamp this for modern platforms only.
  */
-static inline void check_timer(void)
+
+static int try_apic_pin(int apic, int pin, char *msg)
+{
+	apic_printk(APIC_VERBOSE, KERN_INFO
+		    "..TIMER: trying IO-APIC=%d PIN=%d %s",
+		    apic, pin, msg);
+
+	/*
+	 * Ok, does IRQ0 through the IOAPIC work?
+	 */
+	if (!no_timer_check && timer_irq_works()) {
+		nmi_watchdog_default();
+		if (nmi_watchdog == NMI_IO_APIC) {
+			disable_8259A_irq(0);
+			setup_nmi();
+			enable_8259A_irq(0);
+		}
+		return 1;
+	}
+	clear_IO_APIC_pin(apic, pin);
+	apic_printk(APIC_QUIET, KERN_ERR " .. failed\n");
+	return 0;
+}
+
+/* The function from hell */
+static void check_timer(void)
 {
 	int apic1, pin1, apic2, pin2;
 	int vector;
@@ -1587,61 +1615,43 @@
 	 */
 	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 	init_8259A(1);
-	if (timer_over_8254 > 0)
-		enable_8259A_irq(0);
 
 	pin1  = find_isa_irq_pin(0, mp_INT);
 	apic1 = find_isa_irq_apic(0, mp_INT);
 	pin2  = ioapic_i8259.pin;
 	apic2 = ioapic_i8259.apic;
 
-	apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-		vector, apic1, pin1, apic2, pin2);
+	/* Do this first, otherwise we get double interrupts on ATI boards */
+	if ((pin1 != -1) && try_apic_pin(apic1, pin1,"with 8259 IRQ0 disabled"))
+		return;
 
-	if (pin1 != -1) {
-		/*
-		 * Ok, does IRQ0 through the IOAPIC work?
-		 */
-		unmask_IO_APIC_irq(0);
-		if (!no_timer_check && timer_irq_works()) {
-			nmi_watchdog_default();
-			if (nmi_watchdog == NMI_IO_APIC) {
-				disable_8259A_irq(0);
-				setup_nmi();
-				enable_8259A_irq(0);
-			}
-			if (disable_timer_pin_1 > 0)
-				clear_IO_APIC_pin(0, pin1);
-			return;
-		}
-		clear_IO_APIC_pin(apic1, pin1);
-		apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
-				"connected to IO-APIC\n");
-	}
+	/* Now try again with IRQ0 8259A enabled.
+	   Assumes timer is on IO-APIC 0 ?!? */
+	enable_8259A_irq(0);
+	unmask_IO_APIC_irq(0);
+	if (try_apic_pin(apic1, pin1, "with 8259 IRQ0 enabled"))
+		return;
+	disable_8259A_irq(0);
 
-	apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
-				"through the 8259A ... ");
+	/* Always try pin0 and pin2 on APIC 0 to handle buggy timer overrides
+	   on Nvidia boards */
+	if (!(apic1 == 0 && pin1 == 0) &&
+	    try_apic_pin(0, 0, "fallback with 8259 IRQ0 disabled"))
+		return;
+	if (!(apic1 == 0 && pin1 == 2) &&
+	    try_apic_pin(0, 2, "fallback with 8259 IRQ0 disabled"))
+		return;
+
+	/* Then try pure 8259A routing on the 8259 as reported by BIOS*/
+	enable_8259A_irq(0);
 	if (pin2 != -1) {
-		apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
-			apic2, pin2);
-		/*
-		 * legacy devices should be connected to IO APIC #0
-		 */
 		setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-		if (timer_irq_works()) {
-			apic_printk(APIC_VERBOSE," works.\n");
-			nmi_watchdog_default();
-			if (nmi_watchdog == NMI_IO_APIC) {
-				setup_nmi();
-			}
+		if (try_apic_pin(apic2,pin2,"8259A broadcast ExtINT from BIOS"))
 			return;
-		}
-		/*
-		 * Cleanup, just in case ...
-		 */
-		clear_IO_APIC_pin(apic2, pin2);
 	}
-	apic_printk(APIC_VERBOSE," failed.\n");
+
+	/* Tried all possibilities to go through the IO-APIC. Now come the
+	   really cheesy fallbacks. */
 
 	if (nmi_watchdog == NMI_IO_APIC) {
 		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
@@ -1837,7 +1847,7 @@
 	dynamic_irq_cleanup(irq);
 
 	spin_lock_irqsave(&vector_lock, flags);
-	irq_vector[irq] = 0;
+	__clear_irq_vector(irq);
 	spin_unlock_irqrestore(&vector_lock, flags);
 }
 
@@ -2139,7 +2149,15 @@
 			if (irq_entry == -1)
 				continue;
 			irq = pin_2_irq(irq_entry, ioapic, pin);
-			set_ioapic_affinity_irq(irq, TARGET_CPUS);
+
+			/* setup_IO_APIC_irqs could fail to get vector for some device
+			 * when you have too many devices, because at that time only boot
+			 * cpu is online.
+			 */
+			if(!irq_vector[irq])
+				setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
+			else
+				set_ioapic_affinity_irq(irq, TARGET_CPUS);
 		}
 
 	}
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index e46c558..0c06af6 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -120,7 +120,7 @@
 
 	if (likely(irq < NR_IRQS))
 		generic_handle_irq(irq);
-	else
+	else if (printk_ratelimit())
 		printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
 			__func__, smp_processor_id(), vector);
 
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index ac24156..209c8c0 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -224,7 +224,7 @@
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-	free_insn_slot(p->ainsn.insn);
+	free_insn_slot(p->ainsn.insn, 0);
 	mutex_unlock(&kprobe_mutex);
 }
 
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index bbea888..ac08503 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -306,8 +306,8 @@
  */
 
 static int check_interval = 5 * 60; /* 5 minutes */
-static void mcheck_timer(void *data);
-static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
+static void mcheck_timer(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
 
 static void mcheck_check_cpu(void *info)
 {
@@ -315,7 +315,7 @@
 		do_machine_check(NULL, 0);
 }
 
-static void mcheck_timer(void *data)
+static void mcheck_timer(struct work_struct *work)
 {
 	on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
 	schedule_delayed_work(&mcheck_work, check_interval * HZ);
@@ -641,7 +641,6 @@
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void mce_remove_device(unsigned int cpu)
 {
 	int i;
@@ -652,6 +651,7 @@
 	sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
 	sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
 	sysdev_unregister(&per_cpu(device_mce,cpu));
+	memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
 }
 
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
@@ -674,7 +674,6 @@
 static struct notifier_block mce_cpu_notifier = {
 	.notifier_call = mce_cpu_callback,
 };
-#endif
 
 static __init int mce_init_device(void)
 {
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 883fe74..fa09deb 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -551,7 +551,6 @@
 	return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * let's be hotplug friendly.
  * in case of multiple core processors, the first core always takes ownership
@@ -594,12 +593,14 @@
 
 	sprintf(name, "threshold_bank%i", bank);
 
+#ifdef CONFIG_SMP
 	/* sibling symlink */
 	if (shared_bank[bank] && b->blocks->cpu != cpu) {
 		sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
 		per_cpu(threshold_banks, cpu)[bank] = NULL;
 		return;
 	}
+#endif
 
 	/* remove all sibling symlinks before unregistering */
 	for_each_cpu_mask(i, b->cpus) {
@@ -656,7 +657,6 @@
 static struct notifier_block threshold_cpu_notifier = {
 	.notifier_call = threshold_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static __init int threshold_init_device(void)
 {
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index b147ab1..0807256 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -35,8 +35,6 @@
 int smp_found_config;
 unsigned int __initdata maxcpus = NR_CPUS;
 
-int acpi_found_madt;
-
 /*
  * Various Linux-internal data structures created from the
  * MP-table.
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 7af9cb3..27e95e7 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -12,14 +12,15 @@
  *  Mikael Pettersson	: PM converted to driver model. Disable/enable API.
  */
 
+#include <linux/nmi.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/sysdev.h>
-#include <linux/nmi.h>
 #include <linux/sysctl.h>
 #include <linux/kprobes.h>
+#include <linux/cpumask.h>
 
 #include <asm/smp.h>
 #include <asm/nmi.h>
@@ -41,6 +42,8 @@
 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
 
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
  */
@@ -782,6 +785,7 @@
 {
 	int sum;
 	int touched = 0;
+	int cpu = smp_processor_id();
 	struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
 	u64 dummy;
 	int rc=0;
@@ -799,6 +803,16 @@
 		touched = 1;
 	}
 
+	if (cpu_isset(cpu, backtrace_mask)) {
+		static DEFINE_SPINLOCK(lock);	/* Serialise the printks */
+
+		spin_lock(&lock);
+		printk("NMI backtrace for cpu %d\n", cpu);
+		dump_stack();
+		spin_unlock(&lock);
+		cpu_clear(cpu, backtrace_mask);
+	}
+
 #ifdef CONFIG_X86_MCE
 	/* Could check oops_in_progress here too, but it's safer
 	   not too */
@@ -931,6 +945,19 @@
 
 #endif
 
+void __trigger_all_cpu_backtrace(void)
+{
+	int i;
+
+	backtrace_mask = cpu_online_map;
+	/* Wait for up to 10 seconds for all CPUs to do the backtrace */
+	for (i = 0; i < 10 * 1000; i++) {
+		if (cpus_empty(backtrace_mask))
+			break;
+		mdelay(1);
+	}
+}
+
 EXPORT_SYMBOL(nmi_active);
 EXPORT_SYMBOL(nmi_watchdog);
 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index 37a7708..3215675 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -41,6 +41,13 @@
 #include <asm/pci-direct.h>
 #include <asm/system.h>
 #include <asm/dma.h>
+#include <asm/rio.h>
+
+#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
+int use_calgary __read_mostly = 1;
+#else
+int use_calgary __read_mostly = 0;
+#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
 
 #define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
 #define PCI_VENDOR_DEVICE_ID_CALGARY \
@@ -115,14 +122,35 @@
 	0xB000 /* PHB3 */
 };
 
+/* PHB debug registers */
+
+static const unsigned long phb_debug_offsets[] = {
+	0x4000	/* PHB 0 DEBUG */,
+	0x5000	/* PHB 1 DEBUG */,
+	0x6000	/* PHB 2 DEBUG */,
+	0x7000	/* PHB 3 DEBUG */
+};
+
+/*
+ * STUFF register for each debug PHB,
+ * byte 1 = start bus number, byte 2 = end bus number
+ */
+
+#define PHB_DEBUG_STUFF_OFFSET	0x0020
+
 unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
 static int translate_empty_slots __read_mostly = 0;
 static int calgary_detected __read_mostly = 0;
 
+static struct rio_table_hdr	*rio_table_hdr __initdata;
+static struct scal_detail	*scal_devs[MAX_NUMNODES] __initdata;
+static struct rio_detail	*rio_devs[MAX_NUMNODES * 4] __initdata;
+
 struct calgary_bus_info {
 	void *tce_space;
 	unsigned char translation_disabled;
 	signed char phbid;
+	void __iomem *bbar;
 };
 
 static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
@@ -475,6 +503,11 @@
 	.unmap_sg = calgary_unmap_sg,
 };
 
+static inline void __iomem * busno_to_bbar(unsigned char num)
+{
+	return bus_info[num].bbar;
+}
+
 static inline int busno_to_phbid(unsigned char num)
 {
 	return bus_info[num].phbid;
@@ -620,14 +653,9 @@
 static void __init calgary_reserve_regions(struct pci_dev *dev)
 {
 	unsigned int npages;
-	void __iomem *bbar;
-	unsigned char busnum;
 	u64 start;
 	struct iommu_table *tbl = dev->sysdata;
 
-	bbar = tbl->bbar;
-	busnum = dev->bus->number;
-
 	/* reserve bad_dma_address in case it's a legal address */
 	iommu_range_reserve(tbl, bad_dma_address, 1);
 
@@ -740,7 +768,7 @@
 {
 	u64 val64;
 	void __iomem *target;
-	unsigned long phb_shift = -1;
+	unsigned int phb_shift = ~0; /* silence gcc */
 	u64 mask;
 
 	switch (busno_to_phbid(busnum)) {
@@ -828,33 +856,6 @@
 	del_timer_sync(&tbl->watchdog_timer);
 }
 
-static inline unsigned int __init locate_register_space(struct pci_dev *dev)
-{
-	int rionodeid;
-	u32 address;
-
-	/*
-	 * Each Calgary has four busses. The first four busses (first Calgary)
-	 * have RIO node ID 2, then the next four (second Calgary) have RIO
-	 * node ID 3, the next four (third Calgary) have node ID 2 again, etc.
-	 * We use a gross hack - relying on the dev->bus->number ordering,
-	 * modulo 14 - to decide which Calgary a given bus is on. Busses 0, 1,
-	 * 2 and 4 are on the first Calgary (id 2), 6, 8, a and c are on the
-	 * second (id 3), and then it repeats modulo 14.
- 	 */
-	rionodeid = (dev->bus->number % 14 > 4) ? 3 : 2;
-	/*
-	 * register space address calculation as follows:
-	 * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase)
-	 * ChassisBase is always zero for x366/x260/x460
-	 * RioNodeId is 2 for first Calgary, 3 for second Calgary
-	 */
-	address = START_ADDRESS	-
-		(0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 14)) +
-		(0x100000) * (rionodeid - CHASSIS_BASE);
-	return address;
-}
-
 static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
 {
 	pci_dev_get(dev);
@@ -864,23 +865,15 @@
 
 static int __init calgary_init_one(struct pci_dev *dev)
 {
-	u32 address;
 	void __iomem *bbar;
 	int ret;
 
 	BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
 
-	address = locate_register_space(dev);
-	/* map entire 1MB of Calgary config space */
-	bbar = ioremap_nocache(address, 1024 * 1024);
-	if (!bbar) {
-		ret = -ENODATA;
-		goto done;
-	}
-
+	bbar = busno_to_bbar(dev->bus->number);
 	ret = calgary_setup_tar(dev, bbar);
 	if (ret)
-		goto iounmap;
+		goto done;
 
 	pci_dev_get(dev);
 	dev->bus->self = dev;
@@ -888,17 +881,66 @@
 
 	return 0;
 
-iounmap:
-	iounmap(bbar);
 done:
 	return ret;
 }
 
+static int __init calgary_locate_bbars(void)
+{
+	int ret;
+	int rioidx, phb, bus;
+	void __iomem *bbar;
+	void __iomem *target;
+	unsigned long offset;
+	u8 start_bus, end_bus;
+	u32 val;
+
+	ret = -ENODATA;
+	for (rioidx = 0; rioidx < rio_table_hdr->num_rio_dev; rioidx++) {
+		struct rio_detail *rio = rio_devs[rioidx];
+
+		if ((rio->type != COMPAT_CALGARY) && (rio->type != ALT_CALGARY))
+			continue;
+
+		/* map entire 1MB of Calgary config space */
+		bbar = ioremap_nocache(rio->BBAR, 1024 * 1024);
+		if (!bbar)
+			goto error;
+
+		for (phb = 0; phb < PHBS_PER_CALGARY; phb++) {
+			offset = phb_debug_offsets[phb] | PHB_DEBUG_STUFF_OFFSET;
+			target = calgary_reg(bbar, offset);
+
+			val = be32_to_cpu(readl(target));
+			start_bus = (u8)((val & 0x00FF0000) >> 16);
+			end_bus = (u8)((val & 0x0000FF00) >> 8);
+			for (bus = start_bus; bus <= end_bus; bus++) {
+				bus_info[bus].bbar = bbar;
+				bus_info[bus].phbid = phb;
+			}
+		}
+	}
+
+	return 0;
+
+error:
+	/* scan bus_info and iounmap any bbars we previously ioremap'd */
+	for (bus = 0; bus < ARRAY_SIZE(bus_info); bus++)
+		if (bus_info[bus].bbar)
+			iounmap(bus_info[bus].bbar);
+
+	return ret;
+}
+
 static int __init calgary_init(void)
 {
-	int ret = -ENODEV;
+	int ret;
 	struct pci_dev *dev = NULL;
 
+	ret = calgary_locate_bbars();
+	if (ret)
+		return ret;
+
 	do {
 		dev = pci_get_device(PCI_VENDOR_ID_IBM,
 				     PCI_DEVICE_ID_IBM_CALGARY,
@@ -921,7 +963,7 @@
 
 error:
 	do {
-		dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM,
+		dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM,
 					      PCI_DEVICE_ID_IBM_CALGARY,
 					      dev);
 		if (!dev)
@@ -962,13 +1004,56 @@
 	return ret;
 }
 
+static int __init build_detail_arrays(void)
+{
+	unsigned long ptr;
+	int i, scal_detail_size, rio_detail_size;
+
+	if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){
+		printk(KERN_WARNING
+			"Calgary: MAX_NUMNODES too low! Defined as %d, "
+			"but system has %d nodes.\n",
+			MAX_NUMNODES, rio_table_hdr->num_scal_dev);
+		return -ENODEV;
+	}
+
+	switch (rio_table_hdr->version){
+	case 2:
+		scal_detail_size = 11;
+		rio_detail_size = 13;
+		break;
+	case 3:
+		scal_detail_size = 12;
+		rio_detail_size = 15;
+		break;
+	default:
+		printk(KERN_WARNING
+		       "Calgary: Invalid Rio Grande Table Version: %d\n",
+		       rio_table_hdr->version);
+		return -EPROTO;
+	}
+
+	ptr = ((unsigned long)rio_table_hdr) + 3;
+	for (i = 0; i < rio_table_hdr->num_scal_dev;
+		    i++, ptr += scal_detail_size)
+		scal_devs[i] = (struct scal_detail *)ptr;
+
+	for (i = 0; i < rio_table_hdr->num_rio_dev;
+		    i++, ptr += rio_detail_size)
+		rio_devs[i] = (struct rio_detail *)ptr;
+
+	return 0;
+}
+
 void __init detect_calgary(void)
 {
 	u32 val;
 	int bus;
 	void *tbl;
 	int calgary_found = 0;
-	int phb = -1;
+	unsigned long ptr;
+	int offset;
+	int ret;
 
 	/*
 	 * if the user specified iommu=off or iommu=soft or we found
@@ -977,25 +1062,47 @@
 	if (swiotlb || no_iommu || iommu_detected)
 		return;
 
+	if (!use_calgary)
+		return;
+
 	if (!early_pci_allowed())
 		return;
 
+	ptr = (unsigned long)phys_to_virt(get_bios_ebda());
+
+	rio_table_hdr = NULL;
+	offset = 0x180;
+	while (offset) {
+		/* The block id is stored in the 2nd word */
+		if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
+			/* set the pointer past the offset & block id */
+			rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
+			break;
+		}
+		/* The next offset is stored in the 1st word. 0 means no more */
+		offset = *((unsigned short *)(ptr + offset));
+	}
+	if (!rio_table_hdr) {
+		printk(KERN_ERR "Calgary: Unable to locate "
+				"Rio Grande Table in EBDA - bailing!\n");
+		return;
+	}
+
+	ret = build_detail_arrays();
+	if (ret) {
+		printk(KERN_ERR "Calgary: build_detail_arrays ret %d\n", ret);
+		return;
+	}
+
 	specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
 
 	for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
 		int dev;
 		struct calgary_bus_info *info = &bus_info[bus];
-		info->phbid = -1;
 
 		if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
 			continue;
 
-		/*
-		 * There are 4 PHBs per Calgary chip.  Set phb to which phb (0-3)
-		 * it is connected to releative to the clagary chip.
-		 */
-		phb = (phb + 1) % PHBS_PER_CALGARY;
-
 		if (info->translation_disabled)
 			continue;
 
@@ -1010,7 +1117,6 @@
 				if (!tbl)
 					goto cleanup;
 				info->tce_space = tbl;
-				info->phbid = phb;
 				calgary_found = 1;
 				break;
 			}
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index f8d8574..683b7a5 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -296,6 +296,11 @@
 		gart_parse_options(p);
 #endif
 
+#ifdef CONFIG_CALGARY_IOMMU
+		if (!strncmp(p, "calgary", 7))
+			use_calgary = 1;
+#endif /* CONFIG_CALGARY_IOMMU */
+
 		p += strcspn(p, ",");
 		if (*p == ',')
 			++p;
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 16261a8..fc1960f 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -601,10 +601,9 @@
 	    (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
 	    !iommu_aperture ||
 	    (no_agp && init_k8_gatt(&info) < 0)) {
-		printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
 		if (end_pfn > MAX_DMA32_PFN) {
 			printk(KERN_ERR "WARNING more than 4GB of memory "
-					"but IOMMU not available.\n"
+					"but GART IOMMU not available.\n"
 			       KERN_ERR "WARNING 32bit PCI may malfunction.\n");
 		}
 		return;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7451a4c..a418ee4 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -108,17 +108,15 @@
  */
 static void default_idle(void)
 {
-	local_irq_enable();
-
 	current_thread_info()->status &= ~TS_POLLING;
 	smp_mb__after_clear_bit();
-	while (!need_resched()) {
-		local_irq_disable();
-		if (!need_resched())
-			safe_halt();
-		else
-			local_irq_enable();
-	}
+	local_irq_disable();
+	if (!need_resched()) {
+		/* Enables interrupts one instruction before HLT.
+		   x86 special cases this so there is no race. */
+		safe_halt();
+	} else
+		local_irq_enable();
 	current_thread_info()->status |= TS_POLLING;
 }
 
@@ -130,15 +128,7 @@
 static void poll_idle (void)
 {
 	local_irq_enable();
-
-	asm volatile(
-		"2:"
-		"testl %0,%1;"
-		"rep; nop;"
-		"je 2b;"
-		: :
-		"i" (_TIF_NEED_RESCHED),
-		"m" (current_thread_info()->flags));
+	cpu_relax();
 }
 
 void cpu_idle_wait(void)
@@ -219,6 +209,12 @@
 				idle = default_idle;
 			if (cpu_is_offline(smp_processor_id()))
 				play_dead();
+			/*
+			 * Idle routines should keep interrupts disabled
+			 * from here on, until they go to idle.
+			 * Otherwise, idle callbacks can misfire.
+			 */
+			local_irq_disable();
 			enter_idle();
 			idle();
 			/* In many cases the interrupt that ended idle
@@ -256,9 +252,16 @@
 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
 static void mwait_idle(void)
 {
-	local_irq_enable();
-	while (!need_resched())
-		mwait_idle_with_hints(0,0);
+	if (!need_resched()) {
+		__monitor((void *)&current_thread_info()->flags, 0, 0);
+		smp_mb();
+		if (!need_resched())
+			__sti_mwait(0, 0);
+		else
+			local_irq_enable();
+	} else {
+		local_irq_enable();
+	}
 }
 
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index fc944b5..af425a8 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -471,8 +471,7 @@
 	if (LOADER_TYPE && INITRD_START) {
 		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
 			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-			initrd_start =
-				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+			initrd_start = INITRD_START + PAGE_OFFSET;
 			initrd_end = initrd_start+INITRD_SIZE;
 		}
 		else {
@@ -732,11 +731,8 @@
 	/* Fix cpuid4 emulation for more */
 	num_cache_leaves = 3;
 
-	/* When there is only one core no need to synchronize RDTSC */
-	if (num_possible_cpus() == 1)
-	        set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-	else
-	        clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+	/* RDTSC can be speculated around */
+	clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
 }
 
 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -835,6 +831,15 @@
 			set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
 	}
 
+	if (cpu_has_ds) {
+		unsigned int l1, l2;
+		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+		if (!(l1 & (1<<11)))
+			set_bit(X86_FEATURE_BTS, c->x86_capability);
+		if (!(l1 & (1<<12)))
+			set_bit(X86_FEATURE_PEBS, c->x86_capability);
+	}
+
 	n = c->extended_cpuid_level;
 	if (n >= 0x80000008) {
 		unsigned eax = cpuid_eax(0x80000008);
@@ -854,7 +859,10 @@
 		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
 	if (c->x86 == 6)
 		set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
-	set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+	if (c->x86 == 15)
+		set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+	else
+		clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
  	c->x86_max_cores = intel_num_cpu_cores(c);
 
 	srat_detect_node();
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 9f74c88..af1ec4d 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -379,12 +379,17 @@
 		put_cpu();
 		return 0;
 	}
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
 	spin_lock_bh(&call_lock);
 	__smp_call_function_single(cpu, func, info, nonatomic, wait);
 	spin_unlock_bh(&call_lock);
 	put_cpu();
 	return 0;
 }
+EXPORT_SYMBOL(smp_call_function_single);
 
 /*
  * this function sends a 'generic call function' IPI to all other CPUs
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 62c2e74..daf1933 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -60,6 +60,7 @@
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
 #include <asm/numa.h>
+#include <asm/genapic.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -753,14 +754,16 @@
 }
 
 struct create_idle {
+	struct work_struct work;
 	struct task_struct *idle;
 	struct completion done;
 	int cpu;
 };
 
-void do_fork_idle(void *_c_idle)
+void do_fork_idle(struct work_struct *work)
 {
-	struct create_idle *c_idle = _c_idle;
+	struct create_idle *c_idle =
+		container_of(work, struct create_idle, work);
 
 	c_idle->idle = fork_idle(c_idle->cpu);
 	complete(&c_idle->done);
@@ -775,10 +778,10 @@
 	int timeout;
 	unsigned long start_rip;
 	struct create_idle c_idle = {
+		.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
 		.cpu = cpu,
 		.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
 	};
-	DECLARE_WORK(work, do_fork_idle, &c_idle);
 
 	/* allocate memory for gdts of secondary cpus. Hotplug is considered */
 	if (!cpu_gdt_descr[cpu].address &&
@@ -825,9 +828,9 @@
 	 * thread.
 	 */
 	if (!keventd_up() || current_is_keventd())
-		work.func(work.data);
+		c_idle.work.func(&c_idle.work);
 	else {
-		schedule_work(&work);
+		schedule_work(&c_idle.work);
 		wait_for_completion(&c_idle.done);
 	}
 
@@ -1167,6 +1170,13 @@
 
 	while (!cpu_isset(cpu, cpu_online_map))
 		cpu_relax();
+
+	if (num_online_cpus() > 8 && genapic == &apic_flat) {
+		printk(KERN_WARNING
+		       "flat APIC routing can't be used with > 8 cpus\n");
+		BUG();
+	}
+
 	err = 0;
 
 	return err;
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index e3ef544..9f05bc9 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -563,7 +563,7 @@
 static unsigned int cpufreq_init = 0;
 static struct work_struct cpufreq_delayed_get_work;
 
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *v)
 {
 	unsigned int cpu;
 	for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@
 
 static int __init cpufreq_tsc(void)
 {
-	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+	INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
 	if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
 				       CPUFREQ_TRANSITION_NOTIFIER))
 		cpufreq_init = 1;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 0d65b22..a1641ff 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -30,9 +30,9 @@
 #include <linux/kprobes.h>
 #include <linux/kexec.h>
 #include <linux/unwind.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/atomic.h>
 #include <asm/debugreg.h>
@@ -108,7 +108,7 @@
 	preempt_enable_no_resched();
 }
 
-static int kstack_depth_to_print = 12;
+int kstack_depth_to_print = 12;
 #ifdef CONFIG_STACK_UNWIND
 static int call_trace = 1;
 #else
@@ -225,16 +225,25 @@
 {
 	struct ops_and_data *oad = (struct ops_and_data *)context;
 	int n = 0;
+	unsigned long sp = UNW_SP(info);
 
+	if (arch_unw_user_mode(info))
+		return -1;
 	while (unwind(info) == 0 && UNW_PC(info)) {
 		n++;
 		oad->ops->address(oad->data, UNW_PC(info));
 		if (arch_unw_user_mode(info))
 			break;
+		if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
+		    && sp > UNW_SP(info))
+			break;
+		sp = UNW_SP(info);
 	}
 	return n;
 }
 
+#define MSG(txt) ops->warning(data, txt)
+
 /*
  * x86-64 can have upto three kernel stacks: 
  * process stack
@@ -248,11 +257,12 @@
         return p > t && p < t + THREAD_SIZE - 3;
 }
 
-void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack,
+void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+		unsigned long *stack,
 		struct stacktrace_ops *ops, void *data)
 {
-	const unsigned cpu = smp_processor_id();
-	unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
+	const unsigned cpu = get_cpu();
+	unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
 	unsigned used = 0;
 	struct thread_info *tinfo;
 
@@ -268,28 +278,30 @@
 			if (unwind_init_frame_info(&info, tsk, regs) == 0)
 				unw_ret = dump_trace_unwind(&info, &oad);
 		} else if (tsk == current)
-			unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+			unw_ret = unwind_init_running(&info, dump_trace_unwind,
+						      &oad);
 		else {
 			if (unwind_init_blocked(&info, tsk) == 0)
 				unw_ret = dump_trace_unwind(&info, &oad);
 		}
 		if (unw_ret > 0) {
 			if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-				ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+				ops->warning_symbol(data,
+					     "DWARF2 unwinder stuck at %s",
 					     UNW_PC(&info));
 				if ((long)UNW_SP(&info) < 0) {
-					ops->warning(data, "Leftover inexact backtrace:\n");
+					MSG("Leftover inexact backtrace:");
 					stack = (unsigned long *)UNW_SP(&info);
 					if (!stack)
-						return;
+						goto out;
 				} else
-					ops->warning(data, "Full inexact backtrace again:\n");
+					MSG("Full inexact backtrace again:");
 			} else if (call_trace >= 1)
-				return;
+				goto out;
 			else
-				ops->warning(data, "Full inexact backtrace again:\n");
+				MSG("Full inexact backtrace again:");
 		} else
-			ops->warning(data, "Inexact backtrace:\n");
+			MSG("Inexact backtrace:");
 	}
 	if (!stack) {
 		unsigned long dummy;
@@ -297,12 +309,6 @@
 		if (tsk && tsk != current)
 			stack = (unsigned long *)tsk->thread.rsp;
 	}
-	/*
-	 * Align the stack pointer on word boundary, later loops
-	 * rely on that (and corruption / debug info bugs can cause
-	 * unaligned values here):
-	 */
-	stack = (unsigned long *)((unsigned long)stack & ~(sizeof(long)-1));
 
 	/*
 	 * Print function call entries within a stack. 'cond' is the
@@ -312,9 +318,9 @@
 #define HANDLE_STACK(cond) \
 	do while (cond) { \
 		unsigned long addr = *stack++; \
-		if (oops_in_progress ? 		\
-			__kernel_text_address(addr) : \
-			kernel_text_address(addr)) { \
+		/* Use unlocked access here because except for NMIs	\
+		   we should be already protected against module unloads */ \
+		if (__kernel_text_address(addr)) { \
 			/* \
 			 * If the address is either in the text segment of the \
 			 * kernel, or in the region which contains vmalloc'ed \
@@ -380,6 +386,8 @@
 	tinfo = current_thread_info();
 	HANDLE_STACK (valid_stack_ptr(tinfo, stack));
 #undef HANDLE_STACK
+out:
+	put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
 
@@ -786,8 +794,7 @@
 {
 	printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
 		reason);
-	printk(KERN_EMERG "You probably have a hardware problem with your "
-		"RAM chips\n");
+	printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
 
 	if (panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index d9534e7..6a1f8f4 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -51,15 +51,6 @@
 
   RODATA
 
-#ifdef CONFIG_STACK_UNWIND
-  . = ALIGN(8);
-  .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
-	__start_unwind = .;
-  	*(.eh_frame)
-	__end_unwind = .;
-  }
-#endif
-
   . = ALIGN(PAGE_SIZE);        /* Align data segment to page size boundary */
 				/* Data */
   .data : AT(ADDR(.data) - LOAD_OFFSET) {
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 92546c1..4a673f5 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -42,6 +42,7 @@
 #include <asm/topology.h>
 
 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define __syscall_clobber "r11","rcx","memory"
 
 int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
 seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
@@ -274,7 +275,6 @@
 	vsyscall_set_cpu(raw_smp_processor_id());
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __cpuinit
 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
 {
@@ -283,13 +283,13 @@
 		smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
 	return NOTIFY_DONE;
 }
-#endif
 
 static void __init map_vsyscall(void)
 {
 	extern char __vsyscall_0;
 	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
 
+	/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
 	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
 }
 
diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c
index c493735..bc503f5 100644
--- a/arch/x86_64/lib/csum-partial.c
+++ b/arch/x86_64/lib/csum-partial.c
@@ -9,8 +9,6 @@
 #include <linux/module.h>
 #include <asm/checksum.h>
 
-#define __force_inline inline __attribute__((always_inline))
-
 static inline unsigned short from32to16(unsigned a) 
 {
 	unsigned short b = a >> 16; 
@@ -33,7 +31,7 @@
  * Unrolling to an 128 bytes inner loop.
  * Using interleaving with more registers to break the carry chains.
  */
-static __force_inline unsigned do_csum(const unsigned char *buff, unsigned len)
+static unsigned do_csum(const unsigned char *buff, unsigned len)
 {
 	unsigned odd, count;
 	unsigned long result = 0;
@@ -132,9 +130,10 @@
  *
  * it's best to have buff aligned on a 64-bit boundary
  */
-unsigned csum_partial(const unsigned char *buff, unsigned len, unsigned sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
 {
-	return add32_with_carry(do_csum(buff, len), sum); 
+	return (__force __wsum)add32_with_carry(do_csum(buff, len),
+						(__force u32)sum);
 }
 
 EXPORT_SYMBOL(csum_partial);
@@ -143,7 +142,7 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-unsigned short ip_compute_csum(unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff,len,0));
 }
diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c
index b1320ec..fd42a4a 100644
--- a/arch/x86_64/lib/csum-wrappers.c
+++ b/arch/x86_64/lib/csum-wrappers.c
@@ -18,9 +18,9 @@
  * Returns an 32bit unfolded checksum of the buffer.
  * src and dst are best aligned to 64bits. 
  */ 
-unsigned int 
-csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
-			    int len, unsigned int isum, int *errp)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+			    int len, __wsum isum, int *errp)
 { 
 	might_sleep();
 	*errp = 0;
@@ -34,17 +34,19 @@
 		if (unlikely((unsigned long)src & 6)) {			
 			while (((unsigned long)src & 6) && len >= 2) { 
 				__u16 val16;			
-				*errp = __get_user(val16, (__u16 __user *)src); 
+				*errp = __get_user(val16, (const __u16 __user *)src);
 				if (*errp)
 					return isum;
 				*(__u16 *)dst = val16;
-				isum = add32_with_carry(isum, val16); 
+				isum = (__force __wsum)add32_with_carry(
+						(__force unsigned)isum, val16);
 				src += 2; 
 				dst += 2; 
 				len -= 2;
 			}
 		}
-		isum = csum_partial_copy_generic((__force void *)src,dst,len,isum,errp,NULL);
+		isum = csum_partial_copy_generic((__force const void *)src,
+					dst, len, isum, errp, NULL);
 		if (likely(*errp == 0)) 
 			return isum;
 	} 
@@ -66,9 +68,9 @@
  * Returns an 32bit unfolded checksum of the buffer.
  * src and dst are best aligned to 64bits.
  */ 
-unsigned int 
-csum_partial_copy_to_user(unsigned const char *src, unsigned char __user *dst,
-			  int len, unsigned int isum, int *errp)
+__wsum
+csum_partial_copy_to_user(const void *src, void __user *dst,
+			  int len, __wsum isum, int *errp)
 { 
 	might_sleep();
 	if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
@@ -79,7 +81,8 @@
 	if (unlikely((unsigned long)dst & 6)) {
 		while (((unsigned long)dst & 6) && len >= 2) { 
 			__u16 val16 = *(__u16 *)src;
-			isum = add32_with_carry(isum, val16);
+			isum = (__force __wsum)add32_with_carry(
+					(__force unsigned)isum, val16);
 			*errp = __put_user(val16, (__u16 __user *)dst);
 			if (*errp)
 				return isum;
@@ -104,19 +107,21 @@
  * 
  * Returns an 32bit unfolded checksum of the buffer.
  */ 
-unsigned int 
-csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 { 
 	return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
 } 
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 
-unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
-			       __u32 len, unsigned short proto, unsigned int sum) 
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+			const struct in6_addr *daddr,
+			__u32 len, unsigned short proto, __wsum sum)
 {
 	__u64 rest, sum64;
      
-	rest = (__u64)htonl(len) + (__u64)htons(proto) + (__u64)sum;
+	rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
+		(__force __u64)sum;
 	asm("  addq (%[saddr]),%[sum]\n"
 	    "  adcq 8(%[saddr]),%[sum]\n"
 	    "  adcq (%[daddr]),%[sum]\n" 
@@ -124,7 +129,7 @@
 	    "  adcq $0,%[sum]\n"
 	    : [sum] "=r" (sum64) 
 	    : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr));
-	return csum_fold(add32_with_carry(sum64 & 0xffffffff, sum64>>32));
+	return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
 }
 
 EXPORT_SYMBOL(csum_ipv6_magic);
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 50be909..2dbebd3 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -40,13 +40,13 @@
 
 inline void __const_udelay(unsigned long xloops)
 {
-	__delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
+	__delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1);
 }
 EXPORT_SYMBOL(__const_udelay);
 
 void __udelay(unsigned long usecs)
 {
-	__const_udelay(usecs * 0x000010c6);  /* 2**32 / 1000000 */
+	__const_udelay(usecs * 0x000010c7);  /* 2**32 / 1000000 (rounded up) */
 }
 EXPORT_SYMBOL(__udelay);
 
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 3751b47..a65fc6f 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -23,9 +23,9 @@
 #include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/kprobes.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
@@ -96,7 +96,7 @@
 static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
 				unsigned long error_code)
 { 
-	unsigned char __user *instr;
+	unsigned char *instr;
 	int scan_more = 1;
 	int prefetch = 0; 
 	unsigned char *max_instr;
@@ -116,7 +116,7 @@
 		unsigned char instr_hi;
 		unsigned char instr_lo;
 
-		if (__get_user(opcode, (char __user *)instr))
+		if (probe_kernel_address(instr, opcode))
 			break; 
 
 		instr_hi = opcode & 0xf0; 
@@ -154,7 +154,7 @@
 		case 0x00:
 			/* Prefetch instruction is 0x0F0D or 0x0F18 */
 			scan_more = 0;
-			if (__get_user(opcode, (char __user *)instr))
+			if (probe_kernel_address(instr, opcode))
 				break;
 			prefetch = (instr_lo == 0xF) &&
 				(opcode == 0x0D || opcode == 0x18);
@@ -170,7 +170,7 @@
 static int bad_address(void *p) 
 { 
 	unsigned long dummy;
-	return __get_user(dummy, (unsigned long __user *)p);
+	return probe_kernel_address((unsigned long *)p, dummy);
 } 
 
 void dump_pagetable(unsigned long address)
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 4c0c00e..2968b90 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -730,14 +730,15 @@
 __initcall(x8664_sysctl_init);
 #endif
 
-/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only
+/* A pseudo VMA to allow ptrace access for the vsyscall page.  This only
    covers the 64bit vsyscall page now. 32bit has a real VMA now and does
    not need special handling anymore. */
 
 static struct vm_area_struct gate_vma = {
 	.vm_start = VSYSCALL_START,
-	.vm_end = VSYSCALL_END,
-	.vm_page_prot = PAGE_READONLY
+	.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
+	.vm_page_prot = PAGE_READONLY_EXEC,
+	.vm_flags = VM_READ | VM_EXEC
 };
 
 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 3e231d76..ccb91dd 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -61,34 +61,40 @@
 	return base;
 } 
 
-
-static void flush_kernel_map(void *address) 
+static void cache_flush_page(void *adr)
 {
-	if (0 && address && cpu_has_clflush) {
-		/* is this worth it? */ 
-		int i;
-		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
-			asm volatile("clflush (%0)" :: "r" (address + i)); 
-	} else
-		asm volatile("wbinvd":::"memory"); 
-	if (address)
-		__flush_tlb_one(address);
-	else
-		__flush_tlb_all();
+	int i;
+	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+		asm volatile("clflush (%0)" :: "r" (adr + i));
 }
 
+static void flush_kernel_map(void *arg)
+{
+	struct list_head *l = (struct list_head *)arg;
+	struct page *pg;
 
-static inline void flush_map(unsigned long address)
+	/* When clflush is available always use it because it is
+	   much cheaper than WBINVD */
+	if (!cpu_has_clflush)
+		asm volatile("wbinvd" ::: "memory");
+	list_for_each_entry(pg, l, lru) {
+		void *adr = page_address(pg);
+		if (cpu_has_clflush)
+			cache_flush_page(adr);
+		__flush_tlb_one(adr);
+	}
+}
+
+static inline void flush_map(struct list_head *l)
 {	
-	on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
+	on_each_cpu(flush_kernel_map, l, 1, 1);
 }
 
-static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
+static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
 
 static inline void save_page(struct page *fpage)
 {
-	fpage->lru.next = (struct list_head *)deferred_pages;
-	deferred_pages = fpage;
+	list_add(&fpage->lru, &deferred_pages);
 }
 
 /* 
@@ -207,18 +213,18 @@
 
 void global_flush_tlb(void)
 { 
-	struct page *dpage;
+	struct page *pg, *next;
+	struct list_head l;
 
 	down_read(&init_mm.mmap_sem);
-	dpage = xchg(&deferred_pages, NULL);
+	list_replace_init(&deferred_pages, &l);
 	up_read(&init_mm.mmap_sem);
 
-	flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
-	while (dpage) {
-		struct page *tmp = dpage;
-		dpage = (struct page *)dpage->lru.next;
-		ClearPagePrivate(tmp);
-		__free_page(tmp);
+	flush_map(&l);
+
+	list_for_each_entry_safe(pg, next, &l, lru) {
+		ClearPagePrivate(pg);
+		__free_page(pg);
 	} 
 } 
 
diff --git a/block/Kconfig b/block/Kconfig
index 83766a6..a50f481 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -19,11 +19,9 @@
 
 if BLOCK
 
-#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
-#for instance.
 config LBD
 	bool "Support for Large Block Devices"
-	depends on X86 || (MIPS && 32BIT) || PPC32 || (S390 && !64BIT) || SUPERH || UML
+	depends on !64BIT
 	help
 	  Say Y here if you want to attach large (bigger than 2TB) discs to
 	  your machine, or if you want to have a raid or loopback device
@@ -44,7 +42,7 @@
 
 config LSF
 	bool "Support for Large Single Files"
-	depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
+	depends on !64BIT
 	help
 	  Say Y here if you want to be able to handle very large files (bigger
 	  than 2TB), otherwise say N.
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 50b95e4..5934c4b 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1274,9 +1274,10 @@
  *
  * FIXME! dispatch queue is not a queue at all!
  */
-static void as_work_handler(void *data)
+static void as_work_handler(struct work_struct *work)
 {
-	struct request_queue *q = data;
+	struct as_data *ad = container_of(work, struct as_data, antic_work);
+	struct request_queue *q = ad->q;
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
@@ -1317,7 +1318,7 @@
 /*
  * initialize elevator private data (as_data).
  */
-static void *as_init_queue(request_queue_t *q, elevator_t *e)
+static void *as_init_queue(request_queue_t *q)
 {
 	struct as_data *ad;
 
@@ -1332,7 +1333,7 @@
 	ad->antic_timer.function = as_antic_timeout;
 	ad->antic_timer.data = (unsigned long)q;
 	init_timer(&ad->antic_timer);
-	INIT_WORK(&ad->antic_work, as_work_handler, q);
+	INIT_WORK(&ad->antic_work, as_work_handler);
 
 	INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
 	INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/blktrace.c b/block/blktrace.c
index 135593c..d3679dd 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -22,30 +22,58 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/debugfs.h>
+#include <linux/time.h>
 #include <asm/uaccess.h>
 
 static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
 static unsigned int blktrace_seq __read_mostly = 1;
 
 /*
+ * Send out a notify message.
+ */
+static void trace_note(struct blk_trace *bt, pid_t pid, int action,
+		       const void *data, size_t len)
+{
+	struct blk_io_trace *t;
+
+	t = relay_reserve(bt->rchan, sizeof(*t) + len);
+	if (t) {
+		const int cpu = smp_processor_id();
+
+		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+		t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+		t->device = bt->dev;
+		t->action = action;
+		t->pid = pid;
+		t->cpu = cpu;
+		t->pdu_len = len;
+		memcpy((void *) t + sizeof(*t), data, len);
+	}
+}
+
+/*
  * Send out a notify for this process, if we haven't done so since a trace
  * started
  */
 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
 {
-	struct blk_io_trace *t;
+	tsk->btrace_seq = blktrace_seq;
+	trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
+}
 
-	t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm));
-	if (t) {
-		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
-		t->device = bt->dev;
-		t->action = BLK_TC_ACT(BLK_TC_NOTIFY);
-		t->pid = tsk->pid;
-		t->cpu = smp_processor_id();
-		t->pdu_len = sizeof(tsk->comm);
-		memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len);
-		tsk->btrace_seq = blktrace_seq;
-	}
+static void trace_note_time(struct blk_trace *bt)
+{
+	struct timespec now;
+	unsigned long flags;
+	u32 words[2];
+
+	getnstimeofday(&now);
+	words[0] = now.tv_sec;
+	words[1] = now.tv_nsec;
+
+	local_irq_save(flags);
+	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
+	local_irq_restore(flags);
 }
 
 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
@@ -366,8 +394,7 @@
 	if (bt) {
 		if (bt->dropped_file)
 			debugfs_remove(bt->dropped_file);
-		if (bt->sequence)
-			free_percpu(bt->sequence);
+		free_percpu(bt->sequence);
 		if (bt->rchan)
 			relay_close(bt->rchan);
 		kfree(bt);
@@ -394,6 +421,8 @@
 			blktrace_seq++;
 			smp_mb();
 			bt->trace_state = Blktrace_running;
+
+			trace_note_time(bt);
 			ret = 0;
 		}
 	} else {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1d9c3c7..78c6b31 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -43,8 +43,8 @@
 #define RQ_CIC(rq)		((struct cfq_io_context*)(rq)->elevator_private)
 #define RQ_CFQQ(rq)		((rq)->elevator_private2)
 
-static kmem_cache_t *cfq_pool;
-static kmem_cache_t *cfq_ioc_pool;
+static struct kmem_cache *cfq_pool;
+static struct kmem_cache *cfq_ioc_pool;
 
 static DEFINE_PER_CPU(unsigned long, ioc_count);
 static struct completion *ioc_gone;
@@ -1464,8 +1464,7 @@
 }
 
 static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
-		       struct request *rq)
+cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
 {
 	sector_t sdist;
 	u64 total;
@@ -1617,7 +1616,7 @@
 	}
 
 	cfq_update_io_thinktime(cfqd, cic);
-	cfq_update_io_seektime(cfqd, cic, rq);
+	cfq_update_io_seektime(cic, rq);
 	cfq_update_idle_window(cfqd, cfqq, cic);
 
 	cic->last_queue = jiffies;
@@ -1770,7 +1769,7 @@
 /*
  * queue lock held here
  */
-static void cfq_put_request(request_queue_t *q, struct request *rq)
+static void cfq_put_request(struct request *rq)
 {
 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
 
@@ -1841,9 +1840,11 @@
 	return 1;
 }
 
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
 {
-	request_queue_t *q = data;
+	struct cfq_data *cfqd =
+		container_of(work, struct cfq_data, unplug_work);
+	request_queue_t *q = cfqd->queue;
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
@@ -1951,7 +1952,7 @@
 	kfree(cfqd);
 }
 
-static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q)
 {
 	struct cfq_data *cfqd;
 	int i;
@@ -1987,7 +1988,7 @@
 	cfqd->idle_class_timer.function = cfq_idle_class_timer;
 	cfqd->idle_class_timer.data = (unsigned long) cfqd;
 
-	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
 	cfqd->cfq_quantum = cfq_quantum;
 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b7c5b34..6d673e9 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -356,7 +356,7 @@
 /*
  * initialize elevator private data (deadline_data).
  */
-static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
+static void *deadline_init_queue(request_queue_t *q)
 {
 	struct deadline_data *dd;
 
diff --git a/block/elevator.c b/block/elevator.c
index 8ccd163..c0063f3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -129,7 +129,7 @@
 
 static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
 {
-	return eq->ops->elevator_init_fn(q, eq);
+	return eq->ops->elevator_init_fn(q);
 }
 
 static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
@@ -810,7 +810,7 @@
 	elevator_t *e = q->elevator;
 
 	if (e->ops->elevator_put_req_fn)
-		e->ops->elevator_put_req_fn(q, rq);
+		e->ops->elevator_put_req_fn(rq);
 }
 
 int elv_may_queue(request_queue_t *q, int rw)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 9eaee66..31512cd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -34,7 +34,7 @@
  */
 #include <scsi/scsi_cmnd.h>
 
-static void blk_unplug_work(void *data);
+static void blk_unplug_work(struct work_struct *work);
 static void blk_unplug_timeout(unsigned long data);
 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
 static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -44,17 +44,17 @@
 /*
  * For the allocated request tables
  */
-static kmem_cache_t *request_cachep;
+static struct kmem_cache *request_cachep;
 
 /*
  * For queue allocation
  */
-static kmem_cache_t *requestq_cachep;
+static struct kmem_cache *requestq_cachep;
 
 /*
  * For io context allocations
  */
-static kmem_cache_t *iocontext_cachep;
+static struct kmem_cache *iocontext_cachep;
 
 /*
  * Controlling structure to kblockd
@@ -227,7 +227,7 @@
 	if (q->unplug_delay == 0)
 		q->unplug_delay = 1;
 
-	INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+	INIT_WORK(&q->unplug_work, blk_unplug_work);
 
 	q->unplug_timer.function = blk_unplug_timeout;
 	q->unplug_timer.data = (unsigned long)q;
@@ -1631,9 +1631,9 @@
 	}
 }
 
-static void blk_unplug_work(void *data)
+static void blk_unplug_work(struct work_struct *work)
 {
-	request_queue_t *q = data;
+	request_queue_t *q = container_of(work, request_queue_t, unplug_work);
 
 	blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
 				q->rq.count[READ] + q->rq.count[WRITE]);
@@ -2322,6 +2322,84 @@
 
 EXPORT_SYMBOL(blk_insert_request);
 
+static int __blk_rq_unmap_user(struct bio *bio)
+{
+	int ret = 0;
+
+	if (bio) {
+		if (bio_flagged(bio, BIO_USER_MAPPED))
+			bio_unmap_user(bio);
+		else
+			ret = bio_uncopy_user(bio);
+	}
+
+	return ret;
+}
+
+static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+			     void __user *ubuf, unsigned int len)
+{
+	unsigned long uaddr;
+	struct bio *bio, *orig_bio;
+	int reading, ret;
+
+	reading = rq_data_dir(rq) == READ;
+
+	/*
+	 * if alignment requirement is satisfied, map in user pages for
+	 * direct dma. else, set up kernel bounce buffers
+	 */
+	uaddr = (unsigned long) ubuf;
+	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+		bio = bio_map_user(q, NULL, uaddr, len, reading);
+	else
+		bio = bio_copy_user(q, uaddr, len, reading);
+
+	if (IS_ERR(bio)) {
+		return PTR_ERR(bio);
+	}
+
+	orig_bio = bio;
+	blk_queue_bounce(q, &bio);
+	/*
+	 * We link the bounce buffer in and could have to traverse it
+	 * later so we have to get a ref to prevent it from being freed
+	 */
+	bio_get(bio);
+
+	/*
+	 * for most (all? don't know of any) queues we could
+	 * skip grabbing the queue lock here. only drivers with
+	 * funky private ->back_merge_fn() function could be
+	 * problematic.
+	 */
+	spin_lock_irq(q->queue_lock);
+	if (!rq->bio)
+		blk_rq_bio_prep(q, rq, bio);
+	else if (!q->back_merge_fn(q, rq, bio)) {
+		ret = -EINVAL;
+		spin_unlock_irq(q->queue_lock);
+		goto unmap_bio;
+	} else {
+		rq->biotail->bi_next = bio;
+		rq->biotail = bio;
+
+		rq->nr_sectors += bio_sectors(bio);
+		rq->hard_nr_sectors = rq->nr_sectors;
+		rq->data_len += bio->bi_size;
+	}
+	spin_unlock_irq(q->queue_lock);
+
+	return bio->bi_size;
+
+unmap_bio:
+	/* if it was boucned we must call the end io function */
+	bio_endio(bio, bio->bi_size, 0);
+	__blk_rq_unmap_user(orig_bio);
+	bio_put(bio);
+	return ret;
+}
+
 /**
  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
  * @q:		request queue where request should be inserted
@@ -2343,42 +2421,44 @@
  *    unmapping.
  */
 int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
-		    unsigned int len)
+		    unsigned long len)
 {
-	unsigned long uaddr;
-	struct bio *bio;
-	int reading;
+	unsigned long bytes_read = 0;
+	int ret;
 
 	if (len > (q->max_hw_sectors << 9))
 		return -EINVAL;
 	if (!len || !ubuf)
 		return -EINVAL;
 
-	reading = rq_data_dir(rq) == READ;
+	while (bytes_read != len) {
+		unsigned long map_len, end, start;
 
-	/*
-	 * if alignment requirement is satisfied, map in user pages for
-	 * direct dma. else, set up kernel bounce buffers
-	 */
-	uaddr = (unsigned long) ubuf;
-	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
-		bio = bio_map_user(q, NULL, uaddr, len, reading);
-	else
-		bio = bio_copy_user(q, uaddr, len, reading);
+		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+								>> PAGE_SHIFT;
+		start = (unsigned long)ubuf >> PAGE_SHIFT;
 
-	if (!IS_ERR(bio)) {
-		rq->bio = rq->biotail = bio;
-		blk_rq_bio_prep(q, rq, bio);
+		/*
+		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
+		 * pages. If this happens we just lower the requested
+		 * mapping len by a page so that we can fit
+		 */
+		if (end - start > BIO_MAX_PAGES)
+			map_len -= PAGE_SIZE;
 
-		rq->buffer = rq->data = NULL;
-		rq->data_len = len;
-		return 0;
+		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+		if (ret < 0)
+			goto unmap_rq;
+		bytes_read += ret;
+		ubuf += ret;
 	}
 
-	/*
-	 * bio is the err-ptr
-	 */
-	return PTR_ERR(bio);
+	rq->buffer = rq->data = NULL;
+	return 0;
+unmap_rq:
+	blk_rq_unmap_user(rq);
+	return ret;
 }
 
 EXPORT_SYMBOL(blk_rq_map_user);
@@ -2404,7 +2484,7 @@
  *    unmapping.
  */
 int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
-			struct sg_iovec *iov, int iov_count)
+			struct sg_iovec *iov, int iov_count, unsigned int len)
 {
 	struct bio *bio;
 
@@ -2418,10 +2498,15 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
-	rq->bio = rq->biotail = bio;
+	if (bio->bi_size != len) {
+		bio_endio(bio, bio->bi_size, 0);
+		bio_unmap_user(bio);
+		return -EINVAL;
+	}
+
+	bio_get(bio);
 	blk_rq_bio_prep(q, rq, bio);
 	rq->buffer = rq->data = NULL;
-	rq->data_len = bio->bi_size;
 	return 0;
 }
 
@@ -2429,23 +2514,26 @@
 
 /**
  * blk_rq_unmap_user - unmap a request with user data
- * @bio:	bio to be unmapped
- * @ulen:	length of user buffer
+ * @rq:		rq to be unmapped
  *
  * Description:
- *    Unmap a bio previously mapped by blk_rq_map_user().
+ *    Unmap a rq previously mapped by blk_rq_map_user().
+ *    rq->bio must be set to the original head of the request.
  */
-int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq)
 {
-	int ret = 0;
+	struct bio *bio, *mapped_bio;
 
-	if (bio) {
-		if (bio_flagged(bio, BIO_USER_MAPPED))
-			bio_unmap_user(bio);
+	while ((bio = rq->bio)) {
+		if (bio_flagged(bio, BIO_BOUNCED))
+			mapped_bio = bio->bi_private;
 		else
-			ret = bio_uncopy_user(bio);
-	}
+			mapped_bio = bio;
 
+		__blk_rq_unmap_user(mapped_bio);
+		rq->bio = bio->bi_next;
+		bio_put(bio);
+	}
 	return 0;
 }
 
@@ -2476,11 +2564,8 @@
 	if (rq_data_dir(rq) == WRITE)
 		bio->bi_rw |= (1 << BIO_RW);
 
-	rq->bio = rq->biotail = bio;
 	blk_rq_bio_prep(q, rq, bio);
-
 	rq->buffer = rq->data = NULL;
-	rq->data_len = len;
 	return 0;
 }
 
@@ -3374,8 +3459,6 @@
 	}
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
 static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
 			  void *hcpu)
 {
@@ -3401,8 +3484,6 @@
 	.notifier_call	= blk_cpu_notify,
 };
 
-#endif /* CONFIG_HOTPLUG_CPU */
-
 /**
  * blk_complete_request - end I/O on a request
  * @req:      the request being processed
@@ -3495,6 +3576,7 @@
 	rq->hard_cur_sectors = rq->current_nr_sectors;
 	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
 	rq->buffer = bio_data(bio);
+	rq->data_len = bio->bi_size;
 
 	rq->bio = rq->biotail = bio;
 }
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 79af431..1c3de2b 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,7 +65,7 @@
 	return list_entry(rq->queuelist.next, struct request, queuelist);
 }
 
-static void *noop_init_queue(request_queue_t *q, elevator_t *e)
+static void *noop_init_queue(request_queue_t *q)
 {
 	struct noop_data *nd;
 
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e55a756..b3e2107 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -226,7 +226,6 @@
 	unsigned long start_time;
 	int writing = 0, ret = 0;
 	struct request *rq;
-	struct bio *bio;
 	char sense[SCSI_SENSE_BUFFERSIZE];
 	unsigned char cmd[BLK_MAX_CDB];
 
@@ -258,6 +257,32 @@
 	if (!rq)
 		return -ENOMEM;
 
+	/*
+	 * fill in request structure
+	 */
+	rq->cmd_len = hdr->cmd_len;
+	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+	memcpy(rq->cmd, cmd, hdr->cmd_len);
+
+	memset(sense, 0, sizeof(sense));
+	rq->sense = sense;
+	rq->sense_len = 0;
+
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+	/*
+	 * bounce this after holding a reference to the original bio, it's
+	 * needed for proper unmapping
+	 */
+	if (rq->bio)
+		blk_queue_bounce(q, &rq->bio);
+
+	rq->timeout = jiffies_to_msecs(hdr->timeout);
+	if (!rq->timeout)
+		rq->timeout = q->sg_timeout;
+	if (!rq->timeout)
+		rq->timeout = BLK_DEFAULT_TIMEOUT;
+
 	if (hdr->iovec_count) {
 		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
 		struct sg_iovec *iov;
@@ -274,7 +299,8 @@
 			goto out;
 		}
 
-		ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
+		ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
+					  hdr->dxfer_len);
 		kfree(iov);
 	} else if (hdr->dxfer_len)
 		ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
@@ -282,33 +308,6 @@
 	if (ret)
 		goto out;
 
-	/*
-	 * fill in request structure
-	 */
-	rq->cmd_len = hdr->cmd_len;
-	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
-	memcpy(rq->cmd, cmd, hdr->cmd_len);
-
-	memset(sense, 0, sizeof(sense));
-	rq->sense = sense;
-	rq->sense_len = 0;
-
-	rq->cmd_type = REQ_TYPE_BLOCK_PC;
-	bio = rq->bio;
-
-	/*
-	 * bounce this after holding a reference to the original bio, it's
-	 * needed for proper unmapping
-	 */
-	if (rq->bio)
-		blk_queue_bounce(q, &rq->bio);
-
-	rq->timeout = (hdr->timeout * HZ) / 1000;
-	if (!rq->timeout)
-		rq->timeout = q->sg_timeout;
-	if (!rq->timeout)
-		rq->timeout = BLK_DEFAULT_TIMEOUT;
-
 	rq->retries = 0;
 
 	start_time = jiffies;
@@ -339,7 +338,7 @@
 			hdr->sb_len_wr = len;
 	}
 
-	if (blk_rq_unmap_user(bio, hdr->dxfer_len))
+	if (blk_rq_unmap_user(rq))
 		ret = -EFAULT;
 
 	/* may not have succeeded, but output values written to control
diff --git a/crypto/Kconfig b/crypto/Kconfig
index cbae839..92ba249 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -39,6 +39,17 @@
 	  HMAC: Keyed-Hashing for Message Authentication (RFC2104).
 	  This is required for IPSec.
 
+config CRYPTO_XCBC
+	tristate "XCBC support"
+	depends on EXPERIMENTAL
+	select CRYPTO_HASH
+	select CRYPTO_MANAGER
+	help
+	  XCBC: Keyed-Hashing with encryption algorithm
+		http://www.ietf.org/rfc/rfc3566.txt
+		http://csrc.nist.gov/encryption/modes/proposedmodes/
+		 xcbc-mac/xcbc-mac-spec.pdf
+
 config CRYPTO_NULL
 	tristate "Null algorithms"
 	select CRYPTO_ALGAPI
@@ -128,6 +139,16 @@
 	  See also:
 	  <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
 
+config CRYPTO_GF128MUL
+	tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  Efficient table driven implementation of multiplications in the
+	  field GF(2^128).  This is needed by some cypher modes. This
+	  option will be selected automatically if you select such a
+	  cipher mode.  Only select this option by hand if you expect to load
+	  an external module that requires these functions.
+
 config CRYPTO_ECB
 	tristate "ECB support"
 	select CRYPTO_BLKCIPHER
@@ -147,6 +168,19 @@
 	  CBC: Cipher Block Chaining mode
 	  This block cipher algorithm is required for IPSec.
 
+config CRYPTO_LRW
+	tristate "LRW support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_MANAGER
+	select CRYPTO_GF128MUL
+	help
+	  LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable
+	  narrow block cipher mode for dm-crypt.  Use it with cipher
+	  specification string aes-lrw-benbi, the key must be 256, 320 or 384.
+	  The first 128, 192 or 256 bits in the key are used for AES and the
+	  rest is used to tie each cipher block to its logical position.
+
 config CRYPTO_DES
 	tristate "DES and Triple DES EDE cipher algorithms"
 	select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 7236620..60e3d24 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -15,6 +15,7 @@
 
 obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
+obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
 obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
@@ -23,8 +24,10 @@
 obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
 obj-$(CONFIG_CRYPTO_WP512) += wp512.o
 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
+obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
 obj-$(CONFIG_CRYPTO_ECB) += ecb.o
 obj-$(CONFIG_CRYPTO_CBC) += cbc.o
+obj-$(CONFIG_CRYPTO_LRW) += lrw.o
 obj-$(CONFIG_CRYPTO_DES) += des.o
 obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
diff --git a/crypto/api.c b/crypto/api.c
index 4fb7fa4..8c44687 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -466,23 +466,8 @@
 	kfree(tfm);
 }
 
-int crypto_alg_available(const char *name, u32 flags)
-{
-	int ret = 0;
-	struct crypto_alg *alg = crypto_alg_mod_lookup(name, 0,
-						       CRYPTO_ALG_ASYNC);
-	
-	if (!IS_ERR(alg)) {
-		crypto_mod_put(alg);
-		ret = 1;
-	}
-	
-	return ret;
-}
-
 EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
 EXPORT_SYMBOL_GPL(crypto_free_tfm);
-EXPORT_SYMBOL_GPL(crypto_alg_available);
 
 int crypto_has_alg(const char *name, u32 type, u32 mask)
 {
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c
index 9b5b156..2ebffb8 100644
--- a/crypto/cryptomgr.c
+++ b/crypto/cryptomgr.c
@@ -40,9 +40,10 @@
 	char template[CRYPTO_MAX_ALG_NAME];
 };
 
-static void cryptomgr_probe(void *data)
+static void cryptomgr_probe(struct work_struct *work)
 {
-	struct cryptomgr_param *param = data;
+	struct cryptomgr_param *param =
+		container_of(work, struct cryptomgr_param, work);
 	struct crypto_template *tmpl;
 	struct crypto_instance *inst;
 	int err;
@@ -112,7 +113,7 @@
 	param->larval.type = larval->alg.cra_flags;
 	param->larval.mask = larval->mask;
 
-	INIT_WORK(&param->work, cryptomgr_probe, param);
+	INIT_WORK(&param->work, cryptomgr_probe);
 	schedule_work(&param->work);
 
 	return NOTIFY_STOP;
diff --git a/crypto/digest.c b/crypto/digest.c
index 0155a94..8f45932 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -21,54 +21,6 @@
 #include "internal.h"
 #include "scatterwalk.h"
 
-void crypto_digest_init(struct crypto_tfm *tfm)
-{
-	struct crypto_hash *hash = crypto_hash_cast(tfm);
-	struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-
-	crypto_hash_init(&desc);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_init);
-
-void crypto_digest_update(struct crypto_tfm *tfm,
-			  struct scatterlist *sg, unsigned int nsg)
-{
-	struct crypto_hash *hash = crypto_hash_cast(tfm);
-	struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-	unsigned int nbytes = 0;
-	unsigned int i;
-
-	for (i = 0; i < nsg; i++)
-		nbytes += sg[i].length;
-
-	crypto_hash_update(&desc, sg, nbytes);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_update);
-
-void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
-{
-	struct crypto_hash *hash = crypto_hash_cast(tfm);
-	struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-
-	crypto_hash_final(&desc, out);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_final);
-
-void crypto_digest_digest(struct crypto_tfm *tfm,
-			  struct scatterlist *sg, unsigned int nsg, u8 *out)
-{
-	struct crypto_hash *hash = crypto_hash_cast(tfm);
-	struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-	unsigned int nbytes = 0;
-	unsigned int i;
-
-	for (i = 0; i < nsg; i++)
-		nbytes += sg[i].length;
-
-	crypto_hash_digest(&desc, sg, nbytes, out);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_digest);
-
 static int init(struct hash_desc *desc)
 {
 	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
new file mode 100644
index 0000000..0a2aadf
--- /dev/null
+++ b/crypto/gf128mul.c
@@ -0,0 +1,466 @@
+/* gf128mul.c - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue 31/01/2006
+
+ This file provides fast multiplication in GF(128) as required by several
+ cryptographic authentication modes
+*/
+
+#include <crypto/gf128mul.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define gf128mul_dat(q) { \
+	q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
+	q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
+	q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
+	q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
+	q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
+	q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
+	q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
+	q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
+	q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
+	q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
+	q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
+	q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
+	q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
+	q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
+	q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
+	q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
+	q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
+	q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
+	q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
+	q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
+	q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
+	q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
+	q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
+	q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
+	q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
+	q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
+	q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
+	q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
+	q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
+	q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
+	q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
+	q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
+}
+
+/*	Given the value i in 0..255 as the byte overflow when a field element
+    in GHASH is multipled by x^8, this function will return the values that
+    are generated in the lo 16-bit word of the field value by applying the
+    modular polynomial. The values lo_byte and hi_byte are returned via the
+    macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into
+    memory as required by a suitable definition of this macro operating on
+    the table above
+*/
+
+#define xx(p, q)	0x##p##q
+
+#define xda_bbe(i) ( \
+	(i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \
+	(i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \
+	(i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \
+	(i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \
+)
+
+#define xda_lle(i) ( \
+	(i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \
+	(i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \
+	(i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \
+	(i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \
+)
+
+static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle);
+static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe);
+
+/* These functions multiply a field element by x, by x^4 and by x^8
+ * in the polynomial field representation. It uses 32-bit word operations
+ * to gain speed but compensates for machine endianess and hence works
+ * correctly on both styles of machine.
+ */
+
+static void gf128mul_x_lle(be128 *r, const be128 *x)
+{
+	u64 a = be64_to_cpu(x->a);
+	u64 b = be64_to_cpu(x->b);
+	u64 _tt = gf128mul_table_lle[(b << 7) & 0xff];
+
+	r->b = cpu_to_be64((b >> 1) | (a << 63));
+	r->a = cpu_to_be64((a >> 1) ^ (_tt << 48));
+}
+
+static void gf128mul_x_bbe(be128 *r, const be128 *x)
+{
+	u64 a = be64_to_cpu(x->a);
+	u64 b = be64_to_cpu(x->b);
+	u64 _tt = gf128mul_table_bbe[a >> 63];
+
+	r->a = cpu_to_be64((a << 1) | (b >> 63));
+	r->b = cpu_to_be64((b << 1) ^ _tt);
+}
+
+static void gf128mul_x8_lle(be128 *x)
+{
+	u64 a = be64_to_cpu(x->a);
+	u64 b = be64_to_cpu(x->b);
+	u64 _tt = gf128mul_table_lle[b & 0xff];
+
+	x->b = cpu_to_be64((b >> 8) | (a << 56));
+	x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+static void gf128mul_x8_bbe(be128 *x)
+{
+	u64 a = be64_to_cpu(x->a);
+	u64 b = be64_to_cpu(x->b);
+	u64 _tt = gf128mul_table_bbe[a >> 56];
+
+	x->a = cpu_to_be64((a << 8) | (b >> 56));
+	x->b = cpu_to_be64((b << 8) ^ _tt);
+}
+
+void gf128mul_lle(be128 *r, const be128 *b)
+{
+	be128 p[8];
+	int i;
+
+	p[0] = *r;
+	for (i = 0; i < 7; ++i)
+		gf128mul_x_lle(&p[i + 1], &p[i]);
+
+	memset(r, 0, sizeof(r));
+	for (i = 0;;) {
+		u8 ch = ((u8 *)b)[15 - i];
+
+		if (ch & 0x80)
+			be128_xor(r, r, &p[0]);
+		if (ch & 0x40)
+			be128_xor(r, r, &p[1]);
+		if (ch & 0x20)
+			be128_xor(r, r, &p[2]);
+		if (ch & 0x10)
+			be128_xor(r, r, &p[3]);
+		if (ch & 0x08)
+			be128_xor(r, r, &p[4]);
+		if (ch & 0x04)
+			be128_xor(r, r, &p[5]);
+		if (ch & 0x02)
+			be128_xor(r, r, &p[6]);
+		if (ch & 0x01)
+			be128_xor(r, r, &p[7]);
+
+		if (++i >= 16)
+			break;
+
+		gf128mul_x8_lle(r);
+	}
+}
+EXPORT_SYMBOL(gf128mul_lle);
+
+void gf128mul_bbe(be128 *r, const be128 *b)
+{
+	be128 p[8];
+	int i;
+
+	p[0] = *r;
+	for (i = 0; i < 7; ++i)
+		gf128mul_x_bbe(&p[i + 1], &p[i]);
+
+	memset(r, 0, sizeof(r));
+	for (i = 0;;) {
+		u8 ch = ((u8 *)b)[i];
+
+		if (ch & 0x80)
+			be128_xor(r, r, &p[7]);
+		if (ch & 0x40)
+			be128_xor(r, r, &p[6]);
+		if (ch & 0x20)
+			be128_xor(r, r, &p[5]);
+		if (ch & 0x10)
+			be128_xor(r, r, &p[4]);
+		if (ch & 0x08)
+			be128_xor(r, r, &p[3]);
+		if (ch & 0x04)
+			be128_xor(r, r, &p[2]);
+		if (ch & 0x02)
+			be128_xor(r, r, &p[1]);
+		if (ch & 0x01)
+			be128_xor(r, r, &p[0]);
+
+		if (++i >= 16)
+			break;
+
+		gf128mul_x8_bbe(r);
+	}
+}
+EXPORT_SYMBOL(gf128mul_bbe);
+
+/*      This version uses 64k bytes of table space.
+    A 16 byte buffer has to be multiplied by a 16 byte key
+    value in GF(128).  If we consider a GF(128) value in
+    the buffer's lowest byte, we can construct a table of
+    the 256 16 byte values that result from the 256 values
+    of this byte.  This requires 4096 bytes. But we also
+    need tables for each of the 16 higher bytes in the
+    buffer as well, which makes 64 kbytes in total.
+*/
+/* additional explanation
+ * t[0][BYTE] contains g*BYTE
+ * t[1][BYTE] contains g*x^8*BYTE
+ *  ..
+ * t[15][BYTE] contains g*x^120*BYTE */
+struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g)
+{
+	struct gf128mul_64k *t;
+	int i, j, k;
+
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		goto out;
+
+	for (i = 0; i < 16; i++) {
+		t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+		if (!t->t[i]) {
+			gf128mul_free_64k(t);
+			t = NULL;
+			goto out;
+		}
+	}
+
+	t->t[0]->t[128] = *g;
+	for (j = 64; j > 0; j >>= 1)
+		gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]);
+
+	for (i = 0;;) {
+		for (j = 2; j < 256; j += j)
+			for (k = 1; k < j; ++k)
+				be128_xor(&t->t[i]->t[j + k],
+					  &t->t[i]->t[j], &t->t[i]->t[k]);
+
+		if (++i >= 16)
+			break;
+
+		for (j = 128; j > 0; j >>= 1) {
+			t->t[i]->t[j] = t->t[i - 1]->t[j];
+			gf128mul_x8_lle(&t->t[i]->t[j]);
+		}
+	}
+
+out:
+	return t;
+}
+EXPORT_SYMBOL(gf128mul_init_64k_lle);
+
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
+{
+	struct gf128mul_64k *t;
+	int i, j, k;
+
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		goto out;
+
+	for (i = 0; i < 16; i++) {
+		t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+		if (!t->t[i]) {
+			gf128mul_free_64k(t);
+			t = NULL;
+			goto out;
+		}
+	}
+
+	t->t[0]->t[1] = *g;
+	for (j = 1; j <= 64; j <<= 1)
+		gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
+
+	for (i = 0;;) {
+		for (j = 2; j < 256; j += j)
+			for (k = 1; k < j; ++k)
+				be128_xor(&t->t[i]->t[j + k],
+					  &t->t[i]->t[j], &t->t[i]->t[k]);
+
+		if (++i >= 16)
+			break;
+
+		for (j = 128; j > 0; j >>= 1) {
+			t->t[i]->t[j] = t->t[i - 1]->t[j];
+			gf128mul_x8_bbe(&t->t[i]->t[j]);
+		}
+	}
+
+out:
+	return t;
+}
+EXPORT_SYMBOL(gf128mul_init_64k_bbe);
+
+void gf128mul_free_64k(struct gf128mul_64k *t)
+{
+	int i;
+
+	for (i = 0; i < 16; i++)
+		kfree(t->t[i]);
+	kfree(t);
+}
+EXPORT_SYMBOL(gf128mul_free_64k);
+
+void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t)
+{
+	u8 *ap = (u8 *)a;
+	be128 r[1];
+	int i;
+
+	*r = t->t[0]->t[ap[0]];
+	for (i = 1; i < 16; ++i)
+		be128_xor(r, r, &t->t[i]->t[ap[i]]);
+	*a = *r;
+}
+EXPORT_SYMBOL(gf128mul_64k_lle);
+
+void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
+{
+	u8 *ap = (u8 *)a;
+	be128 r[1];
+	int i;
+
+	*r = t->t[0]->t[ap[15]];
+	for (i = 1; i < 16; ++i)
+		be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
+	*a = *r;
+}
+EXPORT_SYMBOL(gf128mul_64k_bbe);
+
+/*      This version uses 4k bytes of table space.
+    A 16 byte buffer has to be multiplied by a 16 byte key
+    value in GF(128).  If we consider a GF(128) value in a
+    single byte, we can construct a table of the 256 16 byte
+    values that result from the 256 values of this byte.
+    This requires 4096 bytes. If we take the highest byte in
+    the buffer and use this table to get the result, we then
+    have to multiply by x^120 to get the final value. For the
+    next highest byte the result has to be multiplied by x^112
+    and so on. But we can do this by accumulating the result
+    in an accumulator starting with the result for the top
+    byte.  We repeatedly multiply the accumulator value by
+    x^8 and then add in (i.e. xor) the 16 bytes of the next
+    lower byte in the buffer, stopping when we reach the
+    lowest byte. This requires a 4096 byte table.
+*/
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
+{
+	struct gf128mul_4k *t;
+	int j, k;
+
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		goto out;
+
+	t->t[128] = *g;
+	for (j = 64; j > 0; j >>= 1)
+		gf128mul_x_lle(&t->t[j], &t->t[j+j]);
+
+	for (j = 2; j < 256; j += j)
+		for (k = 1; k < j; ++k)
+			be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+	return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_lle);
+
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
+{
+	struct gf128mul_4k *t;
+	int j, k;
+
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (!t)
+		goto out;
+
+	t->t[1] = *g;
+	for (j = 1; j <= 64; j <<= 1)
+		gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
+
+	for (j = 2; j < 256; j += j)
+		for (k = 1; k < j; ++k)
+			be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+	return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_bbe);
+
+void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
+{
+	u8 *ap = (u8 *)a;
+	be128 r[1];
+	int i = 15;
+
+	*r = t->t[ap[15]];
+	while (i--) {
+		gf128mul_x8_lle(r);
+		be128_xor(r, r, &t->t[ap[i]]);
+	}
+	*a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_lle);
+
+void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t)
+{
+	u8 *ap = (u8 *)a;
+	be128 r[1];
+	int i = 0;
+
+	*r = t->t[ap[0]];
+	while (++i < 16) {
+		gf128mul_x8_bbe(r);
+		be128_xor(r, r, &t->t[ap[i]]);
+	}
+	*a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_bbe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/crypto/lrw.c b/crypto/lrw.c
new file mode 100644
index 0000000..5664258
--- /dev/null
+++ b/crypto/lrw.c
@@ -0,0 +1,301 @@
+/* LRW: as defined by Cyril Guyot in
+ *	http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
+ *
+ * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based om ecb.c
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/* This implementation is checked against the test vectors in the above
+ * document and by a test vector provided by Ken Buchanan at
+ * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
+ *
+ * The test vectors are included in the testing module tcrypt.[ch] */
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
+
+struct priv {
+	struct crypto_cipher *child;
+	/* optimizes multiplying a random (non incrementing, as at the
+	 * start of a new sector) value with key2, we could also have
+	 * used 4k optimization tables or no optimization at all. In the
+	 * latter case we would have to store key2 here */
+	struct gf128mul_64k *table;
+	/* stores:
+	 *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
+	 *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
+	 *  key2*{ 0,0,...1,1,1,1,1 }, etc
+	 * needed for optimized multiplication of incrementing values
+	 * with key2 */
+	be128 mulinc[128];
+};
+
+static inline void setbit128_bbe(void *b, int bit)
+{
+	__set_bit(bit ^ 0x78, b);
+}
+
+static int setkey(struct crypto_tfm *parent, const u8 *key,
+		  unsigned int keylen)
+{
+	struct priv *ctx = crypto_tfm_ctx(parent);
+	struct crypto_cipher *child = ctx->child;
+	int err, i;
+	be128 tmp = { 0 };
+	int bsize = crypto_cipher_blocksize(child);
+
+	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+				       CRYPTO_TFM_REQ_MASK);
+	if ((err = crypto_cipher_setkey(child, key, keylen - bsize)))
+		return err;
+	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
+				     CRYPTO_TFM_RES_MASK);
+
+	if (ctx->table)
+		gf128mul_free_64k(ctx->table);
+
+	/* initialize multiplication table for Key2 */
+	ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize));
+	if (!ctx->table)
+		return -ENOMEM;
+
+	/* initialize optimization table */
+	for (i = 0; i < 128; i++) {
+		setbit128_bbe(&tmp, i);
+		ctx->mulinc[i] = tmp;
+		gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
+	}
+
+	return 0;
+}
+
+struct sinfo {
+	be128 t;
+	struct crypto_tfm *tfm;
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+};
+
+static inline void inc(be128 *iv)
+{
+	if (!(iv->b = cpu_to_be64(be64_to_cpu(iv->b) + 1)))
+		iv->a = cpu_to_be64(be64_to_cpu(iv->a) + 1);
+}
+
+static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
+{
+	be128_xor(dst, &s->t, src);		/* PP <- T xor P */
+	s->fn(s->tfm, dst, dst);		/* CC <- E(Key2,PP) */
+	be128_xor(dst, dst, &s->t);		/* C <- T xor CC */
+}
+
+/* this returns the number of consequative 1 bits starting
+ * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
+static inline int get_index128(be128 *block)
+{
+	int x;
+	__be32 *p = (__be32 *) block;
+
+	for (p += 3, x = 0; x < 128; p--, x += 32) {
+		u32 val = be32_to_cpup(p);
+
+		if (!~val)
+			continue;
+
+		return x + ffz(val);
+	}
+
+	return x;
+}
+
+static int crypt(struct blkcipher_desc *d,
+		 struct blkcipher_walk *w, struct priv *ctx,
+		 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
+{
+	int err;
+	unsigned int avail;
+	const int bs = crypto_cipher_blocksize(ctx->child);
+	struct sinfo s = {
+		.tfm = crypto_cipher_tfm(ctx->child),
+		.fn = fn
+	};
+	be128 *iv;
+	u8 *wsrc;
+	u8 *wdst;
+
+	err = blkcipher_walk_virt(d, w);
+	if (!(avail = w->nbytes))
+		return err;
+
+	wsrc = w->src.virt.addr;
+	wdst = w->dst.virt.addr;
+
+	/* calculate first value of T */
+	iv = (be128 *)w->iv;
+	s.t = *iv;
+
+	/* T <- I*Key2 */
+	gf128mul_64k_bbe(&s.t, ctx->table);
+
+	goto first;
+
+	for (;;) {
+		do {
+			/* T <- I*Key2, using the optimization
+			 * discussed in the specification */
+			be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
+			inc(iv);
+
+first:
+			lrw_round(&s, wdst, wsrc);
+
+			wsrc += bs;
+			wdst += bs;
+		} while ((avail -= bs) >= bs);
+
+		err = blkcipher_walk_done(d, w, avail);
+		if (!(avail = w->nbytes))
+			break;
+
+		wsrc = w->src.virt.addr;
+		wdst = w->dst.virt.addr;
+	}
+
+	return err;
+}
+
+static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		   struct scatterlist *src, unsigned int nbytes)
+{
+	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk w;
+
+	blkcipher_walk_init(&w, dst, src, nbytes);
+	return crypt(desc, &w, ctx,
+		     crypto_cipher_alg(ctx->child)->cia_encrypt);
+}
+
+static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		   struct scatterlist *src, unsigned int nbytes)
+{
+	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk w;
+
+	blkcipher_walk_init(&w, dst, src, nbytes);
+	return crypt(desc, &w, ctx,
+		     crypto_cipher_alg(ctx->child)->cia_decrypt);
+}
+
+static int init_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+	struct priv *ctx = crypto_tfm_ctx(tfm);
+	u32 *flags = &tfm->crt_flags;
+
+	tfm = crypto_spawn_tfm(spawn);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	if (crypto_tfm_alg_blocksize(tfm) != 16) {
+		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+		return -EINVAL;
+	}
+
+	ctx->child = crypto_cipher_cast(tfm);
+	return 0;
+}
+
+static void exit_tfm(struct crypto_tfm *tfm)
+{
+	struct priv *ctx = crypto_tfm_ctx(tfm);
+	if (ctx->table)
+		gf128mul_free_64k(ctx->table);
+	crypto_free_cipher(ctx->child);
+}
+
+static struct crypto_instance *alloc(void *param, unsigned int len)
+{
+	struct crypto_instance *inst;
+	struct crypto_alg *alg;
+
+	alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
+				  CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+	if (IS_ERR(alg))
+		return ERR_PTR(PTR_ERR(alg));
+
+	inst = crypto_alloc_instance("lrw", alg);
+	if (IS_ERR(inst))
+		goto out_put_alg;
+
+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
+	inst->alg.cra_priority = alg->cra_priority;
+	inst->alg.cra_blocksize = alg->cra_blocksize;
+
+	if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
+	else inst->alg.cra_alignmask = alg->cra_alignmask;
+	inst->alg.cra_type = &crypto_blkcipher_type;
+
+	if (!(alg->cra_blocksize % 4))
+		inst->alg.cra_alignmask |= 3;
+	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
+	inst->alg.cra_blkcipher.min_keysize =
+		alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
+	inst->alg.cra_blkcipher.max_keysize =
+		alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
+
+	inst->alg.cra_ctxsize = sizeof(struct priv);
+
+	inst->alg.cra_init = init_tfm;
+	inst->alg.cra_exit = exit_tfm;
+
+	inst->alg.cra_blkcipher.setkey = setkey;
+	inst->alg.cra_blkcipher.encrypt = encrypt;
+	inst->alg.cra_blkcipher.decrypt = decrypt;
+
+out_put_alg:
+	crypto_mod_put(alg);
+	return inst;
+}
+
+static void free(struct crypto_instance *inst)
+{
+	crypto_drop_spawn(crypto_instance_ctx(inst));
+	kfree(inst);
+}
+
+static struct crypto_template crypto_tmpl = {
+	.name = "lrw",
+	.alloc = alloc,
+	.free = free,
+	.module = THIS_MODULE,
+};
+
+static int __init crypto_module_init(void)
+{
+	return crypto_register_template(&crypto_tmpl);
+}
+
+static void __exit crypto_module_exit(void)
+{
+	crypto_unregister_template(&crypto_tmpl);
+}
+
+module_init(crypto_module_init);
+module_exit(crypto_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LRW block cipher mode");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 8330742..d671e89 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -906,6 +906,10 @@
 			    AES_CBC_ENC_TEST_VECTORS);
 		test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
 			    AES_CBC_DEC_TEST_VECTORS);
+		test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
+			    AES_LRW_ENC_TEST_VECTORS);
+		test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
+			    AES_LRW_DEC_TEST_VECTORS);
 
 		//CAST5
 		test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
@@ -977,6 +981,9 @@
 		test_hash("hmac(sha256)", hmac_sha256_tv_template,
 			  HMAC_SHA256_TEST_VECTORS);
 
+		test_hash("xcbc(aes)", aes_xcbc128_tv_template,
+			  XCBC_AES_TEST_VECTORS);
+
 		test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS);
 		break;
 
@@ -1052,6 +1059,10 @@
 			    AES_CBC_ENC_TEST_VECTORS);
 		test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
 			    AES_CBC_DEC_TEST_VECTORS);
+		test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
+			    AES_LRW_ENC_TEST_VECTORS);
+		test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
+			    AES_LRW_DEC_TEST_VECTORS);
 		break;
 
 	case 11:
@@ -1191,6 +1202,10 @@
 				  aes_speed_template);
 		test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
 				  aes_speed_template);
+		test_cipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
+				  aes_lrw_speed_template);
+		test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+				  aes_lrw_speed_template);
 		break;
 
 	case 201:
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index a40c441..48a8136 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -39,15 +39,15 @@
 struct cipher_testvec {
 	char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
 	char iv[MAX_IVLEN];
-	char input[48];
-	char result[48];
+	char input[512];
+	char result[512];
 	unsigned char tap[MAX_TAP];
 	int np;
 	unsigned char fail;
 	unsigned char wk; /* weak key flag */
 	unsigned char klen;
-	unsigned char ilen;
-	unsigned char rlen;
+	unsigned short ilen;
+	unsigned short rlen;
 };
 
 struct cipher_speed {
@@ -933,6 +933,74 @@
 	},
 };
 
+#define XCBC_AES_TEST_VECTORS 6
+
+static struct hash_testvec aes_xcbc128_tv_template[] = {
+	{
+		.key	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+		.plaintext = { [0 ... 15] = 0 },
+		.digest = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
+			    0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 },
+		.psize	= 0,
+		.ksize	= 16,
+	}, {
+		.key	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+		.plaintext = { 0x00, 0x01, 0x02 },
+		.digest	= { 0x5b, 0x37, 0x65, 0x80, 0xae, 0x2f, 0x19, 0xaf,
+			    0xe7, 0x21, 0x9c, 0xee, 0xf1, 0x72, 0x75, 0x6f },
+		.psize	= 3,
+		.ksize	= 16,
+	} , {
+		.key	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+		.plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+		.digest = { 0xd2, 0xa2, 0x46, 0xfa, 0x34, 0x9b, 0x68, 0xa7,
+			    0x99, 0x98, 0xa4, 0x39, 0x4f, 0xf7, 0xa2, 0x63 },
+		.psize	= 16,
+		.ksize	= 16,
+	}, {
+		.key	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+		.plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+			       0x10, 0x11, 0x12, 0x13 },
+		.digest = { 0x47, 0xf5, 0x1b, 0x45, 0x64, 0x96, 0x62, 0x15,
+			    0xb8, 0x98, 0x5c, 0x63, 0x05, 0x5e, 0xd3, 0x08 },
+		.tap	= { 10, 10 },
+		.psize	= 20,
+		.np	= 2,
+		.ksize	= 16,
+	}, {
+		.key	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+		.plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+			       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+			       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+		.digest = { 0xf5, 0x4f, 0x0e, 0xc8, 0xd2, 0xb9, 0xf3, 0xd3,
+			    0x68, 0x07, 0x73, 0x4b, 0xd5, 0x28, 0x3f, 0xd4 },
+		.psize	= 32,
+		.ksize	= 16,
+	}, {
+		.key	= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			    0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+		.plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			       0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+			       0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+			       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+			       0x20, 0x21 },
+		.digest = { 0xbe, 0xcb, 0xb3, 0xbc, 0xcd, 0xb5, 0x18, 0xa3,
+			    0x06, 0x77, 0xd5, 0x48, 0x1f, 0xb6, 0xb4, 0xd8 },
+		.tap	= { 17, 17 },
+		.psize	= 34,
+		.np	= 2,
+		.ksize	= 16,
+	}
+};
+
 /*
  * DES test vectors.
  */
@@ -1831,6 +1899,8 @@
 #define AES_DEC_TEST_VECTORS 3
 #define AES_CBC_ENC_TEST_VECTORS 2
 #define AES_CBC_DEC_TEST_VECTORS 2
+#define AES_LRW_ENC_TEST_VECTORS 8
+#define AES_LRW_DEC_TEST_VECTORS 8
 
 static struct cipher_testvec aes_enc_tv_template[] = {
 	{ /* From FIPS-197 */
@@ -1968,6 +2038,509 @@
 	},
 };
 
+static struct cipher_testvec aes_lrw_enc_tv_template[] = {
+	/* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
+	{ /* LRW-32-AES 1 */
+		.key    = { 0x45, 0x62, 0xac, 0x25, 0xf8, 0x28, 0x17, 0x6d,
+			    0x4c, 0x26, 0x84, 0x14, 0xb5, 0x68, 0x01, 0x85,
+			    0x25, 0x8e, 0x2a, 0x05, 0xe7, 0x3e, 0x9d, 0x03,
+			    0xee, 0x5a, 0x83, 0x0c, 0xcc, 0x09, 0x4c, 0x87 },
+		.klen   = 32,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.ilen   = 16,
+		.result = { 0xf1, 0xb2, 0x73, 0xcd, 0x65, 0xa3, 0xdf, 0x5f,
+			    0xe9, 0x5d, 0x48, 0x92, 0x54, 0x63, 0x4e, 0xb8 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 2 */
+		.key    = { 0x59, 0x70, 0x47, 0x14, 0xf5, 0x57, 0x47, 0x8c,
+		  	    0xd7, 0x79, 0xe8, 0x0f, 0x54, 0x88, 0x79, 0x44,
+			    0x0d, 0x48, 0xf0, 0xb7, 0xb1, 0x5a, 0x53, 0xea,
+			    0x1c, 0xaa, 0x6b, 0x29, 0xc2, 0xca, 0xfb, 0xaf
+		},
+		.klen   = 32,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+		.input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.ilen   = 16,
+		.result = { 0x00, 0xc8, 0x2b, 0xae, 0x95, 0xbb, 0xcd, 0xe5,
+			    0x27, 0x4f, 0x07, 0x69, 0xb2, 0x60, 0xe1, 0x36 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 3 */
+		.key    = { 0xd8, 0x2a, 0x91, 0x34, 0xb2, 0x6a, 0x56, 0x50,
+		  	    0x30, 0xfe, 0x69, 0xe2, 0x37, 0x7f, 0x98, 0x47,
+			    0xcd, 0xf9, 0x0b, 0x16, 0x0c, 0x64, 0x8f, 0xb6,
+			    0xb0, 0x0d, 0x0d, 0x1b, 0xae, 0x85, 0x87, 0x1f },
+		.klen   = 32,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+		.input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.ilen   = 16,
+		.result = { 0x76, 0x32, 0x21, 0x83, 0xed, 0x8f, 0xf1, 0x82,
+			    0xf9, 0x59, 0x62, 0x03, 0x69, 0x0e, 0x5e, 0x01 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 4 */
+		.key    = { 0x0f, 0x6a, 0xef, 0xf8, 0xd3, 0xd2, 0xbb, 0x15,
+		  	    0x25, 0x83, 0xf7, 0x3c, 0x1f, 0x01, 0x28, 0x74,
+			    0xca, 0xc6, 0xbc, 0x35, 0x4d, 0x4a, 0x65, 0x54,
+			    0x90, 0xae, 0x61, 0xcf, 0x7b, 0xae, 0xbd, 0xcc,
+			    0xad, 0xe4, 0x94, 0xc5, 0x4a, 0x29, 0xae, 0x70 },
+		.klen   = 40,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.ilen   = 16,
+		.result = { 0x9c, 0x0f, 0x15, 0x2f, 0x55, 0xa2, 0xd8, 0xf0,
+			    0xd6, 0x7b, 0x8f, 0x9e, 0x28, 0x22, 0xbc, 0x41 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 5 */
+		.key    = { 0x8a, 0xd4, 0xee, 0x10, 0x2f, 0xbd, 0x81, 0xff,
+		  	    0xf8, 0x86, 0xce, 0xac, 0x93, 0xc5, 0xad, 0xc6,
+			    0xa0, 0x19, 0x07, 0xc0, 0x9d, 0xf7, 0xbb, 0xdd,
+			    0x52, 0x13, 0xb2, 0xb7, 0xf0, 0xff, 0x11, 0xd8,
+			    0xd6, 0x08, 0xd0, 0xcd, 0x2e, 0xb1, 0x17, 0x6f },
+		.klen   = 40,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+		.input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.ilen   = 16,
+		.result = { 0xd4, 0x27, 0x6a, 0x7f, 0x14, 0x91, 0x3d, 0x65,
+			    0xc8, 0x60, 0x48, 0x02, 0x87, 0xe3, 0x34, 0x06 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 6 */
+		.key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+		  	    0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+			    0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+			    0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+			    0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+			    0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+		.klen   = 48,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.ilen   = 16,
+		.result = { 0xbd, 0x06, 0xb8, 0xe1, 0xdb, 0x98, 0x89, 0x9e,
+			    0xc4, 0x98, 0xe4, 0x91, 0xcf, 0x1c, 0x70, 0x2b },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 7 */
+		.key    = { 0xfb, 0x76, 0x15, 0xb2, 0x3d, 0x80, 0x89, 0x1d,
+		  	    0xd4, 0x70, 0x98, 0x0b, 0xc7, 0x95, 0x84, 0xc8,
+			    0xb2, 0xfb, 0x64, 0xce, 0x60, 0x97, 0x87, 0x8d,
+			    0x17, 0xfc, 0xe4, 0x5a, 0x49, 0xe8, 0x30, 0xb7,
+			    0x6e, 0x78, 0x17, 0xe7, 0x2d, 0x5e, 0x12, 0xd4,
+			    0x60, 0x64, 0x04, 0x7a, 0xf1, 0x2f, 0x9e, 0x0c },
+		.klen   = 48,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+		.input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.ilen   = 16,
+		.result = { 0x5b, 0x90, 0x8e, 0xc1, 0xab, 0xdd, 0x67, 0x5f,
+			    0x3d, 0x69, 0x8a, 0x95, 0x53, 0xc8, 0x9c, 0xe5 },
+		.rlen   = 16,
+	}, {
+/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
+		.key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+			    0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+			    0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+			    0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+			    0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+			    0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+		.klen   = 48,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input  = { 0x05, 0x11, 0xb7, 0x18, 0xab, 0xc6, 0x2d, 0xac,
+			    0x70, 0x5d, 0xf6, 0x22, 0x94, 0xcd, 0xe5, 0x6c,
+			    0x17, 0x6b, 0xf6, 0x1c, 0xf0, 0xf3, 0x6e, 0xf8,
+			    0x50, 0x38, 0x1f, 0x71, 0x49, 0xb6, 0x57, 0xd6,
+			    0x8f, 0xcb, 0x8d, 0x6b, 0xe3, 0xa6, 0x29, 0x90,
+			    0xfe, 0x2a, 0x62, 0x82, 0xae, 0x6d, 0x8b, 0xf6,
+			    0xad, 0x1e, 0x9e, 0x20, 0x5f, 0x38, 0xbe, 0x04,
+			    0xda, 0x10, 0x8e, 0xed, 0xa2, 0xa4, 0x87, 0xab,
+			    0xda, 0x6b, 0xb4, 0x0c, 0x75, 0xba, 0xd3, 0x7c,
+			    0xc9, 0xac, 0x42, 0x31, 0x95, 0x7c, 0xc9, 0x04,
+			    0xeb, 0xd5, 0x6e, 0x32, 0x69, 0x8a, 0xdb, 0xa6,
+			    0x15, 0xd7, 0x3f, 0x4f, 0x2f, 0x66, 0x69, 0x03,
+			    0x9c, 0x1f, 0x54, 0x0f, 0xde, 0x1f, 0xf3, 0x65,
+			    0x4c, 0x96, 0x12, 0xed, 0x7c, 0x92, 0x03, 0x01,
+			    0x6f, 0xbc, 0x35, 0x93, 0xac, 0xf1, 0x27, 0xf1,
+			    0xb4, 0x96, 0x82, 0x5a, 0x5f, 0xb0, 0xa0, 0x50,
+			    0x89, 0xa4, 0x8e, 0x66, 0x44, 0x85, 0xcc, 0xfd,
+			    0x33, 0x14, 0x70, 0xe3, 0x96, 0xb2, 0xc3, 0xd3,
+			    0xbb, 0x54, 0x5a, 0x1a, 0xf9, 0x74, 0xa2, 0xc5,
+			    0x2d, 0x64, 0x75, 0xdd, 0xb4, 0x54, 0xe6, 0x74,
+			    0x8c, 0xd3, 0x9d, 0x9e, 0x86, 0xab, 0x51, 0x53,
+			    0xb7, 0x93, 0x3e, 0x6f, 0xd0, 0x4e, 0x2c, 0x40,
+			    0xf6, 0xa8, 0x2e, 0x3e, 0x9d, 0xf4, 0x66, 0xa5,
+			    0x76, 0x12, 0x73, 0x44, 0x1a, 0x56, 0xd7, 0x72,
+			    0x88, 0xcd, 0x21, 0x8c, 0x4c, 0x0f, 0xfe, 0xda,
+			    0x95, 0xe0, 0x3a, 0xa6, 0xa5, 0x84, 0x46, 0xcd,
+			    0xd5, 0x3e, 0x9d, 0x3a, 0xe2, 0x67, 0xe6, 0x60,
+			    0x1a, 0xe2, 0x70, 0x85, 0x58, 0xc2, 0x1b, 0x09,
+			    0xe1, 0xd7, 0x2c, 0xca, 0xad, 0xa8, 0x8f, 0xf9,
+			    0xac, 0xb3, 0x0e, 0xdb, 0xca, 0x2e, 0xe2, 0xb8,
+			    0x51, 0x71, 0xd9, 0x3c, 0x6c, 0xf1, 0x56, 0xf8,
+			    0xea, 0x9c, 0xf1, 0xfb, 0x0c, 0xe6, 0xb7, 0x10,
+			    0x1c, 0xf8, 0xa9, 0x7c, 0xe8, 0x53, 0x35, 0xc1,
+			    0x90, 0x3e, 0x76, 0x4a, 0x74, 0xa4, 0x21, 0x2c,
+			    0xf6, 0x2c, 0x4e, 0x0f, 0x94, 0x3a, 0x88, 0x2e,
+			    0x41, 0x09, 0x6a, 0x33, 0x7d, 0xf6, 0xdd, 0x3f,
+			    0x8d, 0x23, 0x31, 0x74, 0x84, 0xeb, 0x88, 0x6e,
+			    0xcc, 0xb9, 0xbc, 0x22, 0x83, 0x19, 0x07, 0x22,
+			    0xa5, 0x2d, 0xdf, 0xa5, 0xf3, 0x80, 0x85, 0x78,
+			    0x84, 0x39, 0x6a, 0x6d, 0x6a, 0x99, 0x4f, 0xa5,
+			    0x15, 0xfe, 0x46, 0xb0, 0xe4, 0x6c, 0xa5, 0x41,
+			    0x3c, 0xce, 0x8f, 0x42, 0x60, 0x71, 0xa7, 0x75,
+			    0x08, 0x40, 0x65, 0x8a, 0x82, 0xbf, 0xf5, 0x43,
+			    0x71, 0x96, 0xa9, 0x4d, 0x44, 0x8a, 0x20, 0xbe,
+			    0xfa, 0x4d, 0xbb, 0xc0, 0x7d, 0x31, 0x96, 0x65,
+			    0xe7, 0x75, 0xe5, 0x3e, 0xfd, 0x92, 0x3b, 0xc9,
+			    0x55, 0xbb, 0x16, 0x7e, 0xf7, 0xc2, 0x8c, 0xa4,
+			    0x40, 0x1d, 0xe5, 0xef, 0x0e, 0xdf, 0xe4, 0x9a,
+			    0x62, 0x73, 0x65, 0xfd, 0x46, 0x63, 0x25, 0x3d,
+			    0x2b, 0xaf, 0xe5, 0x64, 0xfe, 0xa5, 0x5c, 0xcf,
+			    0x24, 0xf3, 0xb4, 0xac, 0x64, 0xba, 0xdf, 0x4b,
+			    0xc6, 0x96, 0x7d, 0x81, 0x2d, 0x8d, 0x97, 0xf7,
+			    0xc5, 0x68, 0x77, 0x84, 0x32, 0x2b, 0xcc, 0x85,
+			    0x74, 0x96, 0xf0, 0x12, 0x77, 0x61, 0xb9, 0xeb,
+			    0x71, 0xaa, 0x82, 0xcb, 0x1c, 0xdb, 0x89, 0xc8,
+			    0xc6, 0xb5, 0xe3, 0x5c, 0x7d, 0x39, 0x07, 0x24,
+			    0xda, 0x39, 0x87, 0x45, 0xc0, 0x2b, 0xbb, 0x01,
+			    0xac, 0xbc, 0x2a, 0x5c, 0x7f, 0xfc, 0xe8, 0xce,
+			    0x6d, 0x9c, 0x6f, 0xed, 0xd3, 0xc1, 0xa1, 0xd6,
+			    0xc5, 0x55, 0xa9, 0x66, 0x2f, 0xe1, 0xc8, 0x32,
+			    0xa6, 0x5d, 0xa4, 0x3a, 0x98, 0x73, 0xe8, 0x45,
+			    0xa4, 0xc7, 0xa8, 0xb4, 0xf6, 0x13, 0x03, 0xf6,
+			    0xe9, 0x2e, 0xc4, 0x29, 0x0f, 0x84, 0xdb, 0xc4,
+			    0x21, 0xc4, 0xc2, 0x75, 0x67, 0x89, 0x37, 0x0a },
+		.ilen   = 512,
+		.result = { 0x1a, 0x1d, 0xa9, 0x30, 0xad, 0xf9, 0x2f, 0x9b,
+			    0xb6, 0x1d, 0xae, 0xef, 0xf0, 0x2f, 0xf8, 0x5a,
+			    0x39, 0x3c, 0xbf, 0x2a, 0xb2, 0x45, 0xb2, 0x23,
+			    0x1b, 0x63, 0x3c, 0xcf, 0xaa, 0xbe, 0xcf, 0x4e,
+			    0xfa, 0xe8, 0x29, 0xc2, 0x20, 0x68, 0x2b, 0x3c,
+			    0x2e, 0x8b, 0xf7, 0x6e, 0x25, 0xbd, 0xe3, 0x3d,
+			    0x66, 0x27, 0xd6, 0xaf, 0xd6, 0x64, 0x3e, 0xe3,
+			    0xe8, 0x58, 0x46, 0x97, 0x39, 0x51, 0x07, 0xde,
+			    0xcb, 0x37, 0xbc, 0xa9, 0xc0, 0x5f, 0x75, 0xc3,
+			    0x0e, 0x84, 0x23, 0x1d, 0x16, 0xd4, 0x1c, 0x59,
+			    0x9c, 0x1a, 0x02, 0x55, 0xab, 0x3a, 0x97, 0x1d,
+			    0xdf, 0xdd, 0xc7, 0x06, 0x51, 0xd7, 0x70, 0xae,
+			    0x23, 0xc6, 0x8c, 0xf5, 0x1e, 0xa0, 0xe5, 0x82,
+			    0xb8, 0xb2, 0xbf, 0x04, 0xa0, 0x32, 0x8e, 0x68,
+			    0xeb, 0xaf, 0x6e, 0x2d, 0x94, 0x22, 0x2f, 0xce,
+			    0x4c, 0xb5, 0x59, 0xe2, 0xa2, 0x2f, 0xa0, 0x98,
+			    0x1a, 0x97, 0xc6, 0xd4, 0xb5, 0x00, 0x59, 0xf2,
+			    0x84, 0x14, 0x72, 0xb1, 0x9a, 0x6e, 0xa3, 0x7f,
+			    0xea, 0x20, 0xe7, 0xcb, 0x65, 0x77, 0x3a, 0xdf,
+			    0xc8, 0x97, 0x67, 0x15, 0xc2, 0x2a, 0x27, 0xcc,
+			    0x18, 0x55, 0xa1, 0x24, 0x0b, 0x24, 0x24, 0xaf,
+			    0x5b, 0xec, 0x68, 0xb8, 0xc8, 0xf5, 0xba, 0x63,
+			    0xff, 0xed, 0x89, 0xce, 0xd5, 0x3d, 0x88, 0xf3,
+			    0x25, 0xef, 0x05, 0x7c, 0x3a, 0xef, 0xeb, 0xd8,
+			    0x7a, 0x32, 0x0d, 0xd1, 0x1e, 0x58, 0x59, 0x99,
+			    0x90, 0x25, 0xb5, 0x26, 0xb0, 0xe3, 0x2b, 0x6c,
+			    0x4c, 0xa9, 0x8b, 0x84, 0x4f, 0x5e, 0x01, 0x50,
+			    0x41, 0x30, 0x58, 0xc5, 0x62, 0x74, 0x52, 0x1d,
+			    0x45, 0x24, 0x6a, 0x42, 0x64, 0x4f, 0x97, 0x1c,
+			    0xa8, 0x66, 0xb5, 0x6d, 0x79, 0xd4, 0x0d, 0x48,
+			    0xc5, 0x5f, 0xf3, 0x90, 0x32, 0xdd, 0xdd, 0xe1,
+			    0xe4, 0xa9, 0x9f, 0xfc, 0xc3, 0x52, 0x5a, 0x46,
+			    0xe4, 0x81, 0x84, 0x95, 0x36, 0x59, 0x7a, 0x6b,
+			    0xaa, 0xb3, 0x60, 0xad, 0xce, 0x9f, 0x9f, 0x28,
+			    0xe0, 0x01, 0x75, 0x22, 0xc4, 0x4e, 0xa9, 0x62,
+			    0x5c, 0x62, 0x0d, 0x00, 0xcb, 0x13, 0xe8, 0x43,
+			    0x72, 0xd4, 0x2d, 0x53, 0x46, 0xb5, 0xd1, 0x16,
+			    0x22, 0x18, 0xdf, 0x34, 0x33, 0xf5, 0xd6, 0x1c,
+			    0xb8, 0x79, 0x78, 0x97, 0x94, 0xff, 0x72, 0x13,
+			    0x4c, 0x27, 0xfc, 0xcb, 0xbf, 0x01, 0x53, 0xa6,
+			    0xb4, 0x50, 0x6e, 0xde, 0xdf, 0xb5, 0x43, 0xa4,
+			    0x59, 0xdf, 0x52, 0xf9, 0x7c, 0xe0, 0x11, 0x6f,
+			    0x2d, 0x14, 0x8e, 0x24, 0x61, 0x2c, 0xe1, 0x17,
+			    0xcc, 0xce, 0x51, 0x0c, 0x19, 0x8a, 0x82, 0x30,
+			    0x94, 0xd5, 0x3d, 0x6a, 0x53, 0x06, 0x5e, 0xbd,
+			    0xb7, 0xeb, 0xfa, 0xfd, 0x27, 0x51, 0xde, 0x85,
+			    0x1e, 0x86, 0x53, 0x11, 0x53, 0x94, 0x00, 0xee,
+			    0x2b, 0x8c, 0x08, 0x2a, 0xbf, 0xdd, 0xae, 0x11,
+			    0xcb, 0x1e, 0xa2, 0x07, 0x9a, 0x80, 0xcf, 0x62,
+			    0x9b, 0x09, 0xdc, 0x95, 0x3c, 0x96, 0x8e, 0xb1,
+			    0x09, 0xbd, 0xe4, 0xeb, 0xdb, 0xca, 0x70, 0x7a,
+			    0x9e, 0xfa, 0x31, 0x18, 0x45, 0x3c, 0x21, 0x33,
+			    0xb0, 0xb3, 0x2b, 0xea, 0xf3, 0x71, 0x2d, 0xe1,
+			    0x03, 0xad, 0x1b, 0x48, 0xd4, 0x67, 0x27, 0xf0,
+			    0x62, 0xe4, 0x3d, 0xfb, 0x9b, 0x08, 0x76, 0xe7,
+			    0xdd, 0x2b, 0x01, 0x39, 0x04, 0x5a, 0x58, 0x7a,
+			    0xf7, 0x11, 0x90, 0xec, 0xbd, 0x51, 0x5c, 0x32,
+			    0x6b, 0xd7, 0x35, 0x39, 0x02, 0x6b, 0xf2, 0xa6,
+			    0xd0, 0x0d, 0x07, 0xe1, 0x06, 0xc4, 0x5b, 0x7d,
+			    0xe4, 0x6a, 0xd7, 0xee, 0x15, 0x1f, 0x83, 0xb4,
+			    0xa3, 0xa7, 0x5e, 0xc3, 0x90, 0xb7, 0xef, 0xd3,
+			    0xb7, 0x4f, 0xf8, 0x92, 0x4c, 0xb7, 0x3c, 0x29,
+			    0xcd, 0x7e, 0x2b, 0x5d, 0x43, 0xea, 0x42, 0xe7,
+			    0x74, 0x3f, 0x7d, 0x58, 0x88, 0x75, 0xde, 0x3e },
+		.rlen   = 512,
+	}
+};
+
+static struct cipher_testvec aes_lrw_dec_tv_template[] = {
+	/* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
+	/* same as enc vectors with input and result reversed */
+	{ /* LRW-32-AES 1 */
+		.key    = { 0x45, 0x62, 0xac, 0x25, 0xf8, 0x28, 0x17, 0x6d,
+			    0x4c, 0x26, 0x84, 0x14, 0xb5, 0x68, 0x01, 0x85,
+			    0x25, 0x8e, 0x2a, 0x05, 0xe7, 0x3e, 0x9d, 0x03,
+			    0xee, 0x5a, 0x83, 0x0c, 0xcc, 0x09, 0x4c, 0x87 },
+		.klen   = 32,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input  = { 0xf1, 0xb2, 0x73, 0xcd, 0x65, 0xa3, 0xdf, 0x5f,
+			    0xe9, 0x5d, 0x48, 0x92, 0x54, 0x63, 0x4e, 0xb8 },
+		.ilen   = 16,
+		.result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 2 */
+		.key    = { 0x59, 0x70, 0x47, 0x14, 0xf5, 0x57, 0x47, 0x8c,
+		  	    0xd7, 0x79, 0xe8, 0x0f, 0x54, 0x88, 0x79, 0x44,
+			    0x0d, 0x48, 0xf0, 0xb7, 0xb1, 0x5a, 0x53, 0xea,
+			    0x1c, 0xaa, 0x6b, 0x29, 0xc2, 0xca, 0xfb, 0xaf
+		},
+		.klen   = 32,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+		.input  = { 0x00, 0xc8, 0x2b, 0xae, 0x95, 0xbb, 0xcd, 0xe5,
+			    0x27, 0x4f, 0x07, 0x69, 0xb2, 0x60, 0xe1, 0x36 },
+		.ilen   = 16,
+		.result  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			     0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 3 */
+		.key    = { 0xd8, 0x2a, 0x91, 0x34, 0xb2, 0x6a, 0x56, 0x50,
+		  	    0x30, 0xfe, 0x69, 0xe2, 0x37, 0x7f, 0x98, 0x47,
+			    0xcd, 0xf9, 0x0b, 0x16, 0x0c, 0x64, 0x8f, 0xb6,
+			    0xb0, 0x0d, 0x0d, 0x1b, 0xae, 0x85, 0x87, 0x1f },
+		.klen   = 32,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+		.input  = { 0x76, 0x32, 0x21, 0x83, 0xed, 0x8f, 0xf1, 0x82,
+			    0xf9, 0x59, 0x62, 0x03, 0x69, 0x0e, 0x5e, 0x01 },
+		.ilen   = 16,
+		.result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 4 */
+		.key    = { 0x0f, 0x6a, 0xef, 0xf8, 0xd3, 0xd2, 0xbb, 0x15,
+		  	    0x25, 0x83, 0xf7, 0x3c, 0x1f, 0x01, 0x28, 0x74,
+			    0xca, 0xc6, 0xbc, 0x35, 0x4d, 0x4a, 0x65, 0x54,
+			    0x90, 0xae, 0x61, 0xcf, 0x7b, 0xae, 0xbd, 0xcc,
+			    0xad, 0xe4, 0x94, 0xc5, 0x4a, 0x29, 0xae, 0x70 },
+		.klen   = 40,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input  = { 0x9c, 0x0f, 0x15, 0x2f, 0x55, 0xa2, 0xd8, 0xf0,
+			    0xd6, 0x7b, 0x8f, 0x9e, 0x28, 0x22, 0xbc, 0x41 },
+		.ilen   = 16,
+		.result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 5 */
+		.key    = { 0x8a, 0xd4, 0xee, 0x10, 0x2f, 0xbd, 0x81, 0xff,
+		  	    0xf8, 0x86, 0xce, 0xac, 0x93, 0xc5, 0xad, 0xc6,
+			    0xa0, 0x19, 0x07, 0xc0, 0x9d, 0xf7, 0xbb, 0xdd,
+			    0x52, 0x13, 0xb2, 0xb7, 0xf0, 0xff, 0x11, 0xd8,
+			    0xd6, 0x08, 0xd0, 0xcd, 0x2e, 0xb1, 0x17, 0x6f },
+		.klen   = 40,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+		.input  = { 0xd4, 0x27, 0x6a, 0x7f, 0x14, 0x91, 0x3d, 0x65,
+			    0xc8, 0x60, 0x48, 0x02, 0x87, 0xe3, 0x34, 0x06 },
+		.ilen   = 16,
+		.result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 6 */
+		.key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+		  	    0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+			    0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+			    0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+			    0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+			    0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+		.klen   = 48,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input  = { 0xbd, 0x06, 0xb8, 0xe1, 0xdb, 0x98, 0x89, 0x9e,
+			    0xc4, 0x98, 0xe4, 0x91, 0xcf, 0x1c, 0x70, 0x2b },
+		.ilen   = 16,
+		.result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.rlen   = 16,
+	}, { /* LRW-32-AES 7 */
+		.key    = { 0xfb, 0x76, 0x15, 0xb2, 0x3d, 0x80, 0x89, 0x1d,
+		  	    0xd4, 0x70, 0x98, 0x0b, 0xc7, 0x95, 0x84, 0xc8,
+			    0xb2, 0xfb, 0x64, 0xce, 0x60, 0x97, 0x87, 0x8d,
+			    0x17, 0xfc, 0xe4, 0x5a, 0x49, 0xe8, 0x30, 0xb7,
+			    0x6e, 0x78, 0x17, 0xe7, 0x2d, 0x5e, 0x12, 0xd4,
+			    0x60, 0x64, 0x04, 0x7a, 0xf1, 0x2f, 0x9e, 0x0c },
+		.klen   = 48,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+		.input  = { 0x5b, 0x90, 0x8e, 0xc1, 0xab, 0xdd, 0x67, 0x5f,
+			    0x3d, 0x69, 0x8a, 0x95, 0x53, 0xc8, 0x9c, 0xe5 },
+		.ilen   = 16,
+		.result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+			    0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+		.rlen   = 16,
+	}, {
+/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
+		.key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+			    0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+			    0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+			    0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+			    0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+			    0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+		.klen   = 48,
+		.iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+		.input	= { 0x1a, 0x1d, 0xa9, 0x30, 0xad, 0xf9, 0x2f, 0x9b,
+			    0xb6, 0x1d, 0xae, 0xef, 0xf0, 0x2f, 0xf8, 0x5a,
+			    0x39, 0x3c, 0xbf, 0x2a, 0xb2, 0x45, 0xb2, 0x23,
+			    0x1b, 0x63, 0x3c, 0xcf, 0xaa, 0xbe, 0xcf, 0x4e,
+			    0xfa, 0xe8, 0x29, 0xc2, 0x20, 0x68, 0x2b, 0x3c,
+			    0x2e, 0x8b, 0xf7, 0x6e, 0x25, 0xbd, 0xe3, 0x3d,
+			    0x66, 0x27, 0xd6, 0xaf, 0xd6, 0x64, 0x3e, 0xe3,
+			    0xe8, 0x58, 0x46, 0x97, 0x39, 0x51, 0x07, 0xde,
+			    0xcb, 0x37, 0xbc, 0xa9, 0xc0, 0x5f, 0x75, 0xc3,
+			    0x0e, 0x84, 0x23, 0x1d, 0x16, 0xd4, 0x1c, 0x59,
+			    0x9c, 0x1a, 0x02, 0x55, 0xab, 0x3a, 0x97, 0x1d,
+			    0xdf, 0xdd, 0xc7, 0x06, 0x51, 0xd7, 0x70, 0xae,
+			    0x23, 0xc6, 0x8c, 0xf5, 0x1e, 0xa0, 0xe5, 0x82,
+			    0xb8, 0xb2, 0xbf, 0x04, 0xa0, 0x32, 0x8e, 0x68,
+			    0xeb, 0xaf, 0x6e, 0x2d, 0x94, 0x22, 0x2f, 0xce,
+			    0x4c, 0xb5, 0x59, 0xe2, 0xa2, 0x2f, 0xa0, 0x98,
+			    0x1a, 0x97, 0xc6, 0xd4, 0xb5, 0x00, 0x59, 0xf2,
+			    0x84, 0x14, 0x72, 0xb1, 0x9a, 0x6e, 0xa3, 0x7f,
+			    0xea, 0x20, 0xe7, 0xcb, 0x65, 0x77, 0x3a, 0xdf,
+			    0xc8, 0x97, 0x67, 0x15, 0xc2, 0x2a, 0x27, 0xcc,
+			    0x18, 0x55, 0xa1, 0x24, 0x0b, 0x24, 0x24, 0xaf,
+			    0x5b, 0xec, 0x68, 0xb8, 0xc8, 0xf5, 0xba, 0x63,
+			    0xff, 0xed, 0x89, 0xce, 0xd5, 0x3d, 0x88, 0xf3,
+			    0x25, 0xef, 0x05, 0x7c, 0x3a, 0xef, 0xeb, 0xd8,
+			    0x7a, 0x32, 0x0d, 0xd1, 0x1e, 0x58, 0x59, 0x99,
+			    0x90, 0x25, 0xb5, 0x26, 0xb0, 0xe3, 0x2b, 0x6c,
+			    0x4c, 0xa9, 0x8b, 0x84, 0x4f, 0x5e, 0x01, 0x50,
+			    0x41, 0x30, 0x58, 0xc5, 0x62, 0x74, 0x52, 0x1d,
+			    0x45, 0x24, 0x6a, 0x42, 0x64, 0x4f, 0x97, 0x1c,
+			    0xa8, 0x66, 0xb5, 0x6d, 0x79, 0xd4, 0x0d, 0x48,
+			    0xc5, 0x5f, 0xf3, 0x90, 0x32, 0xdd, 0xdd, 0xe1,
+			    0xe4, 0xa9, 0x9f, 0xfc, 0xc3, 0x52, 0x5a, 0x46,
+			    0xe4, 0x81, 0x84, 0x95, 0x36, 0x59, 0x7a, 0x6b,
+			    0xaa, 0xb3, 0x60, 0xad, 0xce, 0x9f, 0x9f, 0x28,
+			    0xe0, 0x01, 0x75, 0x22, 0xc4, 0x4e, 0xa9, 0x62,
+			    0x5c, 0x62, 0x0d, 0x00, 0xcb, 0x13, 0xe8, 0x43,
+			    0x72, 0xd4, 0x2d, 0x53, 0x46, 0xb5, 0xd1, 0x16,
+			    0x22, 0x18, 0xdf, 0x34, 0x33, 0xf5, 0xd6, 0x1c,
+			    0xb8, 0x79, 0x78, 0x97, 0x94, 0xff, 0x72, 0x13,
+			    0x4c, 0x27, 0xfc, 0xcb, 0xbf, 0x01, 0x53, 0xa6,
+			    0xb4, 0x50, 0x6e, 0xde, 0xdf, 0xb5, 0x43, 0xa4,
+			    0x59, 0xdf, 0x52, 0xf9, 0x7c, 0xe0, 0x11, 0x6f,
+			    0x2d, 0x14, 0x8e, 0x24, 0x61, 0x2c, 0xe1, 0x17,
+			    0xcc, 0xce, 0x51, 0x0c, 0x19, 0x8a, 0x82, 0x30,
+			    0x94, 0xd5, 0x3d, 0x6a, 0x53, 0x06, 0x5e, 0xbd,
+			    0xb7, 0xeb, 0xfa, 0xfd, 0x27, 0x51, 0xde, 0x85,
+			    0x1e, 0x86, 0x53, 0x11, 0x53, 0x94, 0x00, 0xee,
+			    0x2b, 0x8c, 0x08, 0x2a, 0xbf, 0xdd, 0xae, 0x11,
+			    0xcb, 0x1e, 0xa2, 0x07, 0x9a, 0x80, 0xcf, 0x62,
+			    0x9b, 0x09, 0xdc, 0x95, 0x3c, 0x96, 0x8e, 0xb1,
+			    0x09, 0xbd, 0xe4, 0xeb, 0xdb, 0xca, 0x70, 0x7a,
+			    0x9e, 0xfa, 0x31, 0x18, 0x45, 0x3c, 0x21, 0x33,
+			    0xb0, 0xb3, 0x2b, 0xea, 0xf3, 0x71, 0x2d, 0xe1,
+			    0x03, 0xad, 0x1b, 0x48, 0xd4, 0x67, 0x27, 0xf0,
+			    0x62, 0xe4, 0x3d, 0xfb, 0x9b, 0x08, 0x76, 0xe7,
+			    0xdd, 0x2b, 0x01, 0x39, 0x04, 0x5a, 0x58, 0x7a,
+			    0xf7, 0x11, 0x90, 0xec, 0xbd, 0x51, 0x5c, 0x32,
+			    0x6b, 0xd7, 0x35, 0x39, 0x02, 0x6b, 0xf2, 0xa6,
+			    0xd0, 0x0d, 0x07, 0xe1, 0x06, 0xc4, 0x5b, 0x7d,
+			    0xe4, 0x6a, 0xd7, 0xee, 0x15, 0x1f, 0x83, 0xb4,
+			    0xa3, 0xa7, 0x5e, 0xc3, 0x90, 0xb7, 0xef, 0xd3,
+			    0xb7, 0x4f, 0xf8, 0x92, 0x4c, 0xb7, 0x3c, 0x29,
+			    0xcd, 0x7e, 0x2b, 0x5d, 0x43, 0xea, 0x42, 0xe7,
+			    0x74, 0x3f, 0x7d, 0x58, 0x88, 0x75, 0xde, 0x3e },
+		.ilen   = 512,
+		.result	= { 0x05, 0x11, 0xb7, 0x18, 0xab, 0xc6, 0x2d, 0xac,
+			    0x70, 0x5d, 0xf6, 0x22, 0x94, 0xcd, 0xe5, 0x6c,
+			    0x17, 0x6b, 0xf6, 0x1c, 0xf0, 0xf3, 0x6e, 0xf8,
+			    0x50, 0x38, 0x1f, 0x71, 0x49, 0xb6, 0x57, 0xd6,
+			    0x8f, 0xcb, 0x8d, 0x6b, 0xe3, 0xa6, 0x29, 0x90,
+			    0xfe, 0x2a, 0x62, 0x82, 0xae, 0x6d, 0x8b, 0xf6,
+			    0xad, 0x1e, 0x9e, 0x20, 0x5f, 0x38, 0xbe, 0x04,
+			    0xda, 0x10, 0x8e, 0xed, 0xa2, 0xa4, 0x87, 0xab,
+			    0xda, 0x6b, 0xb4, 0x0c, 0x75, 0xba, 0xd3, 0x7c,
+			    0xc9, 0xac, 0x42, 0x31, 0x95, 0x7c, 0xc9, 0x04,
+			    0xeb, 0xd5, 0x6e, 0x32, 0x69, 0x8a, 0xdb, 0xa6,
+			    0x15, 0xd7, 0x3f, 0x4f, 0x2f, 0x66, 0x69, 0x03,
+			    0x9c, 0x1f, 0x54, 0x0f, 0xde, 0x1f, 0xf3, 0x65,
+			    0x4c, 0x96, 0x12, 0xed, 0x7c, 0x92, 0x03, 0x01,
+			    0x6f, 0xbc, 0x35, 0x93, 0xac, 0xf1, 0x27, 0xf1,
+			    0xb4, 0x96, 0x82, 0x5a, 0x5f, 0xb0, 0xa0, 0x50,
+			    0x89, 0xa4, 0x8e, 0x66, 0x44, 0x85, 0xcc, 0xfd,
+			    0x33, 0x14, 0x70, 0xe3, 0x96, 0xb2, 0xc3, 0xd3,
+			    0xbb, 0x54, 0x5a, 0x1a, 0xf9, 0x74, 0xa2, 0xc5,
+			    0x2d, 0x64, 0x75, 0xdd, 0xb4, 0x54, 0xe6, 0x74,
+			    0x8c, 0xd3, 0x9d, 0x9e, 0x86, 0xab, 0x51, 0x53,
+			    0xb7, 0x93, 0x3e, 0x6f, 0xd0, 0x4e, 0x2c, 0x40,
+			    0xf6, 0xa8, 0x2e, 0x3e, 0x9d, 0xf4, 0x66, 0xa5,
+			    0x76, 0x12, 0x73, 0x44, 0x1a, 0x56, 0xd7, 0x72,
+			    0x88, 0xcd, 0x21, 0x8c, 0x4c, 0x0f, 0xfe, 0xda,
+			    0x95, 0xe0, 0x3a, 0xa6, 0xa5, 0x84, 0x46, 0xcd,
+			    0xd5, 0x3e, 0x9d, 0x3a, 0xe2, 0x67, 0xe6, 0x60,
+			    0x1a, 0xe2, 0x70, 0x85, 0x58, 0xc2, 0x1b, 0x09,
+			    0xe1, 0xd7, 0x2c, 0xca, 0xad, 0xa8, 0x8f, 0xf9,
+			    0xac, 0xb3, 0x0e, 0xdb, 0xca, 0x2e, 0xe2, 0xb8,
+			    0x51, 0x71, 0xd9, 0x3c, 0x6c, 0xf1, 0x56, 0xf8,
+			    0xea, 0x9c, 0xf1, 0xfb, 0x0c, 0xe6, 0xb7, 0x10,
+			    0x1c, 0xf8, 0xa9, 0x7c, 0xe8, 0x53, 0x35, 0xc1,
+			    0x90, 0x3e, 0x76, 0x4a, 0x74, 0xa4, 0x21, 0x2c,
+			    0xf6, 0x2c, 0x4e, 0x0f, 0x94, 0x3a, 0x88, 0x2e,
+			    0x41, 0x09, 0x6a, 0x33, 0x7d, 0xf6, 0xdd, 0x3f,
+			    0x8d, 0x23, 0x31, 0x74, 0x84, 0xeb, 0x88, 0x6e,
+			    0xcc, 0xb9, 0xbc, 0x22, 0x83, 0x19, 0x07, 0x22,
+			    0xa5, 0x2d, 0xdf, 0xa5, 0xf3, 0x80, 0x85, 0x78,
+			    0x84, 0x39, 0x6a, 0x6d, 0x6a, 0x99, 0x4f, 0xa5,
+			    0x15, 0xfe, 0x46, 0xb0, 0xe4, 0x6c, 0xa5, 0x41,
+			    0x3c, 0xce, 0x8f, 0x42, 0x60, 0x71, 0xa7, 0x75,
+			    0x08, 0x40, 0x65, 0x8a, 0x82, 0xbf, 0xf5, 0x43,
+			    0x71, 0x96, 0xa9, 0x4d, 0x44, 0x8a, 0x20, 0xbe,
+			    0xfa, 0x4d, 0xbb, 0xc0, 0x7d, 0x31, 0x96, 0x65,
+			    0xe7, 0x75, 0xe5, 0x3e, 0xfd, 0x92, 0x3b, 0xc9,
+			    0x55, 0xbb, 0x16, 0x7e, 0xf7, 0xc2, 0x8c, 0xa4,
+			    0x40, 0x1d, 0xe5, 0xef, 0x0e, 0xdf, 0xe4, 0x9a,
+			    0x62, 0x73, 0x65, 0xfd, 0x46, 0x63, 0x25, 0x3d,
+			    0x2b, 0xaf, 0xe5, 0x64, 0xfe, 0xa5, 0x5c, 0xcf,
+			    0x24, 0xf3, 0xb4, 0xac, 0x64, 0xba, 0xdf, 0x4b,
+			    0xc6, 0x96, 0x7d, 0x81, 0x2d, 0x8d, 0x97, 0xf7,
+			    0xc5, 0x68, 0x77, 0x84, 0x32, 0x2b, 0xcc, 0x85,
+			    0x74, 0x96, 0xf0, 0x12, 0x77, 0x61, 0xb9, 0xeb,
+			    0x71, 0xaa, 0x82, 0xcb, 0x1c, 0xdb, 0x89, 0xc8,
+			    0xc6, 0xb5, 0xe3, 0x5c, 0x7d, 0x39, 0x07, 0x24,
+			    0xda, 0x39, 0x87, 0x45, 0xc0, 0x2b, 0xbb, 0x01,
+			    0xac, 0xbc, 0x2a, 0x5c, 0x7f, 0xfc, 0xe8, 0xce,
+			    0x6d, 0x9c, 0x6f, 0xed, 0xd3, 0xc1, 0xa1, 0xd6,
+			    0xc5, 0x55, 0xa9, 0x66, 0x2f, 0xe1, 0xc8, 0x32,
+			    0xa6, 0x5d, 0xa4, 0x3a, 0x98, 0x73, 0xe8, 0x45,
+			    0xa4, 0xc7, 0xa8, 0xb4, 0xf6, 0x13, 0x03, 0xf6,
+			    0xe9, 0x2e, 0xc4, 0x29, 0x0f, 0x84, 0xdb, 0xc4,
+			    0x21, 0xc4, 0xc2, 0x75, 0x67, 0x89, 0x37, 0x0a },
+		.rlen   = 512,
+	}
+};
+
 /* Cast5 test vectors from RFC 2144 */
 #define CAST5_ENC_TEST_VECTORS	3
 #define CAST5_DEC_TEST_VECTORS	3
@@ -3084,6 +3657,27 @@
 	{  .klen = 0, .blen = 0, }
 };
 
+static struct cipher_speed aes_lrw_speed_template[] = {
+	{ .klen = 32, .blen = 16, },
+	{ .klen = 32, .blen = 64, },
+	{ .klen = 32, .blen = 256, },
+	{ .klen = 32, .blen = 1024, },
+	{ .klen = 32, .blen = 8192, },
+	{ .klen = 40, .blen = 16, },
+	{ .klen = 40, .blen = 64, },
+	{ .klen = 40, .blen = 256, },
+	{ .klen = 40, .blen = 1024, },
+	{ .klen = 40, .blen = 8192, },
+	{ .klen = 48, .blen = 16, },
+	{ .klen = 48, .blen = 64, },
+	{ .klen = 48, .blen = 256, },
+	{ .klen = 48, .blen = 1024, },
+	{ .klen = 48, .blen = 8192, },
+
+	/* End marker */
+	{  .klen = 0, .blen = 0, }
+};
+
 static struct cipher_speed des3_ede_speed_template[] = {
 	{ .klen = 24, .blen = 16, },
 	{ .klen = 24, .blen = 64, },
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
new file mode 100644
index 0000000..9347eb6
--- /dev/null
+++ b/crypto/xcbc.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C)2006 USAGI/WIDE Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Author:
+ * 	Kazunori Miyazawa <miyazawa@linux-ipv6.org>
+ */
+
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include "internal.h"
+
+static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
+			   0x02020202, 0x02020202, 0x02020202, 0x02020202,
+			   0x03030303, 0x03030303, 0x03030303, 0x03030303};
+/*
+ * +------------------------
+ * | <parent tfm>
+ * +------------------------
+ * | crypto_xcbc_ctx
+ * +------------------------
+ * | odds (block size)
+ * +------------------------
+ * | prev (block size)
+ * +------------------------
+ * | key (block size)
+ * +------------------------
+ * | consts (block size * 3)
+ * +------------------------
+ */
+struct crypto_xcbc_ctx {
+	struct crypto_tfm *child;
+	u8 *odds;
+	u8 *prev;
+	u8 *key;
+	u8 *consts;
+	void (*xor)(u8 *a, const u8 *b, unsigned int bs);
+	unsigned int keylen;
+	unsigned int len;
+};
+
+static void xor_128(u8 *a, const u8 *b, unsigned int bs)
+{
+	((u32 *)a)[0] ^= ((u32 *)b)[0];
+	((u32 *)a)[1] ^= ((u32 *)b)[1];
+	((u32 *)a)[2] ^= ((u32 *)b)[2];
+	((u32 *)a)[3] ^= ((u32 *)b)[3];
+}
+
+static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
+				      struct crypto_xcbc_ctx *ctx)
+{
+	int bs = crypto_hash_blocksize(parent);
+	int err = 0;
+	u8 key1[bs];
+
+	if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
+	    return err;
+
+	ctx->child->__crt_alg->cra_cipher.cia_encrypt(ctx->child, key1,
+			ctx->consts);
+
+	return crypto_cipher_setkey(ctx->child, key1, bs);
+}
+
+static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
+				     const u8 *inkey, unsigned int keylen)
+{
+	struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
+
+	if (keylen != crypto_tfm_alg_blocksize(ctx->child))
+		return -EINVAL;
+
+	ctx->keylen = keylen;
+	memcpy(ctx->key, inkey, keylen);
+	ctx->consts = (u8*)ks;
+
+	return _crypto_xcbc_digest_setkey(parent, ctx);
+}
+
+static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
+{
+	struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
+	int bs = crypto_hash_blocksize(pdesc->tfm);
+
+	ctx->len = 0;
+	memset(ctx->odds, 0, bs);
+	memset(ctx->prev, 0, bs);
+
+	return 0;
+}
+
+static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
+				     struct scatterlist *sg,
+				     unsigned int nbytes)
+{
+	struct crypto_hash *parent = pdesc->tfm;
+	struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
+	struct crypto_tfm *tfm = ctx->child;
+	int bs = crypto_hash_blocksize(parent);
+	unsigned int i = 0;
+
+	do {
+
+		struct page *pg = sg[i].page;
+		unsigned int offset = sg[i].offset;
+		unsigned int slen = sg[i].length;
+
+		while (slen > 0) {
+			unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
+			char *p = crypto_kmap(pg, 0) + offset;
+
+			/* checking the data can fill the block */
+			if ((ctx->len + len) <= bs) {
+				memcpy(ctx->odds + ctx->len, p, len);
+				ctx->len += len;
+				slen -= len;
+
+				/* checking the rest of the page */
+				if (len + offset >= PAGE_SIZE) {
+					offset = 0;
+					pg++;
+				} else
+					offset += len;
+
+				crypto_kunmap(p, 0);
+				crypto_yield(tfm->crt_flags);
+				continue;
+			}
+
+			/* filling odds with new data and encrypting it */
+			memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
+			len -= bs - ctx->len;
+			p += bs - ctx->len;
+
+			ctx->xor(ctx->prev, ctx->odds, bs);
+			tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
+
+			/* clearing the length */
+			ctx->len = 0;
+
+			/* encrypting the rest of data */
+			while (len > bs) {
+				ctx->xor(ctx->prev, p, bs);
+				tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
+				p += bs;
+				len -= bs;
+			}
+
+			/* keeping the surplus of blocksize */
+			if (len) {
+				memcpy(ctx->odds, p, len);
+				ctx->len = len;
+			}
+			crypto_kunmap(p, 0);
+			crypto_yield(tfm->crt_flags);
+			slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
+			offset = 0;
+			pg++;
+		}
+		nbytes-=sg[i].length;
+		i++;
+	} while (nbytes>0);
+
+	return 0;
+}
+
+static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
+{
+	struct crypto_hash *parent = pdesc->tfm;
+	struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
+	struct crypto_tfm *tfm = ctx->child;
+	int bs = crypto_hash_blocksize(parent);
+	int err = 0;
+
+	if (ctx->len == bs) {
+		u8 key2[bs];
+
+		if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
+			return err;
+
+		tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key2, (const u8*)(ctx->consts+bs));
+
+		ctx->xor(ctx->prev, ctx->odds, bs);
+		ctx->xor(ctx->prev, key2, bs);
+		_crypto_xcbc_digest_setkey(parent, ctx);
+
+		tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
+	} else {
+		u8 key3[bs];
+		unsigned int rlen;
+		u8 *p = ctx->odds + ctx->len;
+		*p = 0x80;
+		p++;
+
+		rlen = bs - ctx->len -1;
+		if (rlen)
+			memset(p, 0, rlen);
+
+		if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
+			return err;
+
+		tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key3, (const u8*)(ctx->consts+bs*2));
+
+		ctx->xor(ctx->prev, ctx->odds, bs);
+		ctx->xor(ctx->prev, key3, bs);
+
+		_crypto_xcbc_digest_setkey(parent, ctx);
+
+		tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
+	}
+
+	return 0;
+}
+
+static int crypto_xcbc_digest(struct hash_desc *pdesc,
+		  struct scatterlist *sg, unsigned int nbytes, u8 *out)
+{
+	crypto_xcbc_digest_init(pdesc);
+	crypto_xcbc_digest_update(pdesc, sg, nbytes);
+	return crypto_xcbc_digest_final(pdesc, out);
+}
+
+static int xcbc_init_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+	struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
+	int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
+
+	tfm = crypto_spawn_tfm(spawn);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	switch(bs) {
+	case 16:
+		ctx->xor = xor_128;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ctx->child = crypto_cipher_cast(tfm);
+	ctx->odds = (u8*)(ctx+1);
+	ctx->prev = ctx->odds + bs;
+	ctx->key = ctx->prev + bs;
+
+	return 0;
+};
+
+static void xcbc_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
+	crypto_free_cipher(ctx->child);
+}
+
+static struct crypto_instance *xcbc_alloc(void *param, unsigned int len)
+{
+	struct crypto_instance *inst;
+	struct crypto_alg *alg;
+	alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
+				  CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
+	if (IS_ERR(alg))
+		return ERR_PTR(PTR_ERR(alg));
+
+	switch(alg->cra_blocksize) {
+	case 16:
+		break;
+	default:
+		return ERR_PTR(PTR_ERR(alg));
+	}
+
+	inst = crypto_alloc_instance("xcbc", alg);
+	if (IS_ERR(inst))
+		goto out_put_alg;
+
+	inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
+	inst->alg.cra_priority = alg->cra_priority;
+	inst->alg.cra_blocksize = alg->cra_blocksize;
+	inst->alg.cra_alignmask = alg->cra_alignmask;
+	inst->alg.cra_type = &crypto_hash_type;
+
+	inst->alg.cra_hash.digestsize =
+		(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+		CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
+				       alg->cra_blocksize;
+	inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
+				ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
+	inst->alg.cra_init = xcbc_init_tfm;
+	inst->alg.cra_exit = xcbc_exit_tfm;
+
+	inst->alg.cra_hash.init = crypto_xcbc_digest_init;
+	inst->alg.cra_hash.update = crypto_xcbc_digest_update;
+	inst->alg.cra_hash.final = crypto_xcbc_digest_final;
+	inst->alg.cra_hash.digest = crypto_xcbc_digest;
+	inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey;
+
+out_put_alg:
+	crypto_mod_put(alg);
+	return inst;
+}
+
+static void xcbc_free(struct crypto_instance *inst)
+{
+	crypto_drop_spawn(crypto_instance_ctx(inst));
+	kfree(inst);
+}
+
+static struct crypto_template crypto_xcbc_tmpl = {
+	.name = "xcbc",
+	.alloc = xcbc_alloc,
+	.free = xcbc_free,
+	.module = THIS_MODULE,
+};
+
+static int __init crypto_xcbc_module_init(void)
+{
+	return crypto_register_template(&crypto_xcbc_tmpl);
+}
+
+static void __exit crypto_xcbc_module_exit(void)
+{
+	crypto_unregister_template(&crypto_xcbc_tmpl);
+}
+
+module_init(crypto_xcbc_module_init);
+module_exit(crypto_xcbc_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("XCBC keyed hash algorithm");
diff --git a/drivers/Makefile b/drivers/Makefile
index 4ac14da..6771177 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -77,3 +77,4 @@
 obj-$(CONFIG_SUPERH)		+= sh/
 obj-$(CONFIG_GENERIC_TIME)	+= clocksource/
 obj-$(CONFIG_DMA_ENGINE)	+= dma/
+obj-$(CONFIG_PPC_PS3)		+= ps3/
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 578b99b..bf5b79e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -27,6 +27,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/notifier.h>
+#include <linux/jiffies.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 10f160d..a2f46d5 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -267,9 +267,9 @@
 {
 	acpi_status status;
 
-	if (dev->firmware_data) {
+	if (dev->archdata.acpi_handle) {
 		printk(KERN_WARNING PREFIX
-		       "Drivers changed 'firmware_data' for %s\n", dev->bus_id);
+		       "Drivers changed 'acpi_handle' for %s\n", dev->bus_id);
 		return -EINVAL;
 	}
 	get_device(dev);
@@ -278,25 +278,26 @@
 		put_device(dev);
 		return -EINVAL;
 	}
-	dev->firmware_data = handle;
+	dev->archdata.acpi_handle = handle;
 
 	return 0;
 }
 
 static int acpi_unbind_one(struct device *dev)
 {
-	if (!dev->firmware_data)
+	if (!dev->archdata.acpi_handle)
 		return 0;
-	if (dev == acpi_get_physical_device(dev->firmware_data)) {
+	if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) {
 		/* acpi_get_physical_device increase refcnt by one */
 		put_device(dev);
-		acpi_detach_data(dev->firmware_data, acpi_glue_data_handler);
-		dev->firmware_data = NULL;
+		acpi_detach_data(dev->archdata.acpi_handle,
+				 acpi_glue_data_handler);
+		dev->archdata.acpi_handle = NULL;
 		/* acpi_bind_one increase refcnt by one */
 		put_device(dev);
 	} else {
 		printk(KERN_ERR PREFIX
-		       "Oops, 'firmware_data' corrupt for %s\n", dev->bus_id);
+		       "Oops, 'acpi_handle' corrupt for %s\n", dev->bus_id);
 	}
 	return 0;
 }
@@ -328,7 +329,8 @@
 	if (!ret) {
 		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
-		acpi_get_name(dev->firmware_data, ACPI_FULL_PATHNAME, &buffer);
+		acpi_get_name(dev->archdata.acpi_handle,
+			      ACPI_FULL_PATHNAME, &buffer);
 		DBG("Device %s -> %s\n", dev->bus_id, (char *)buffer.pointer);
 		kfree(buffer.pointer);
 	} else
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 068fe4f..02b30ae 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -50,6 +50,7 @@
 struct acpi_os_dpc {
 	acpi_osd_exec_callback function;
 	void *context;
+	struct work_struct work;
 };
 
 #ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -564,12 +565,9 @@
 	acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
 }
 
-static void acpi_os_execute_deferred(void *context)
+static void acpi_os_execute_deferred(struct work_struct *work)
 {
-	struct acpi_os_dpc *dpc = NULL;
-
-
-	dpc = (struct acpi_os_dpc *)context;
+	struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
 	if (!dpc) {
 		printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
 		return;
@@ -602,7 +600,6 @@
 {
 	acpi_status status = AE_OK;
 	struct acpi_os_dpc *dpc;
-	struct work_struct *task;
 
 	ACPI_FUNCTION_TRACE("os_queue_for_execution");
 
@@ -615,28 +612,22 @@
 
 	/*
 	 * Allocate/initialize DPC structure.  Note that this memory will be
-	 * freed by the callee.  The kernel handles the tq_struct list  in a
+	 * freed by the callee.  The kernel handles the work_struct list  in a
 	 * way that allows us to also free its memory inside the callee.
 	 * Because we may want to schedule several tasks with different
 	 * parameters we can't use the approach some kernel code uses of
-	 * having a static tq_struct.
-	 * We can save time and code by allocating the DPC and tq_structs
-	 * from the same memory.
+	 * having a static work_struct.
 	 */
 
-	dpc =
-	    kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
-		    GFP_ATOMIC);
+	dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
 	if (!dpc)
 		return_ACPI_STATUS(AE_NO_MEMORY);
 
 	dpc->function = function;
 	dpc->context = context;
 
-	task = (void *)(dpc + 1);
-	INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
-
-	if (!queue_work(kacpid_wq, task)) {
+	INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+	if (!queue_work(kacpid_wq, &dpc->work)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
 				  "Call to queue_work() failed.\n"));
 		kfree(dpc);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 03f6338..984ab28 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -328,6 +328,15 @@
 
 	  If unsure, say N.
 
+config PATA_MARVELL
+	tristate "Marvell PATA support via legacy mode"
+	depends on PCI
+	help
+	  This option enables limited support for the Marvell 88SE6145 ATA
+	  controller.
+
+	  If unsure, say N.
+
 config PATA_MPIIX
 	tristate "Intel PATA MPIIX support"
 	depends on PCI
@@ -483,6 +492,32 @@
 
 	  If unsure, say N.
 
+config PATA_WINBOND_VLB
+	tristate "Winbond W83759A VLB PATA support (Experimental)"
+	depends on ISA && EXPERIMENTAL
+	help
+	  Support for the Winbond W83759A controller on Vesa Local Bus
+	  systems.
+
+config PATA_PLATFORM
+	tristate "Generic platform device PATA support"
+	depends on EMBEDDED
+	help
+	  This option enables support for generic directly connected ATA
+	  devices commonly found on embedded systems.
+
+	  If unsure, say N.
+
+config PATA_IXP4XX_CF
+	tristate "IXP4XX Compact Flash support"
+	depends on ARCH_IXP4XX
+	help
+	  This option enables support for a Compact Flash connected on
+	  the ixp4xx expansion bus. This driver had been written for
+	  Loft/Avila boards in mind but can work with others.
+
+	  If unsure, say N.
+
 endif
 endmenu
 
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 72243a6..bc3d81a 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -38,6 +38,7 @@
 obj-$(CONFIG_PATA_NS87410)	+= pata_ns87410.o
 obj-$(CONFIG_PATA_OPTI)		+= pata_opti.o
 obj-$(CONFIG_PATA_OPTIDMA)	+= pata_optidma.o
+obj-$(CONFIG_PATA_MARVELL)	+= pata_marvell.o
 obj-$(CONFIG_PATA_MPIIX)	+= pata_mpiix.o
 obj-$(CONFIG_PATA_OLDPIIX)	+= pata_oldpiix.o
 obj-$(CONFIG_PATA_PCMCIA)	+= pata_pcmcia.o
@@ -51,8 +52,11 @@
 obj-$(CONFIG_PATA_SIL680)	+= pata_sil680.o
 obj-$(CONFIG_PATA_VIA)		+= pata_via.o
 obj-$(CONFIG_PATA_WINBOND)	+= pata_sl82c105.o
+obj-$(CONFIG_PATA_WINBOND_VLB)	+= pata_winbond.o
 obj-$(CONFIG_PATA_SIS)		+= pata_sis.o
 obj-$(CONFIG_PATA_TRIFLEX)	+= pata_triflex.o
+obj-$(CONFIG_PATA_IXP4XX_CF)	+= pata_ixp4xx_cf.o
+obj-$(CONFIG_PATA_PLATFORM)	+= pata_platform.o
 # Should be last but one libata driver
 obj-$(CONFIG_ATA_GENERIC)	+= ata_generic.o
 # Should be last libata driver
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index f510e11..f36da48 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -53,6 +53,7 @@
 
 enum {
 	AHCI_PCI_BAR		= 5,
+	AHCI_MAX_PORTS		= 32,
 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
 	AHCI_DMA_BOUNDARY	= 0xffffffff,
 	AHCI_USE_CLUSTERING	= 0,
@@ -77,7 +78,9 @@
 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
 
 	board_ahci		= 0,
-	board_ahci_vt8251	= 1,
+	board_ahci_pi		= 1,
+	board_ahci_vt8251	= 2,
+	board_ahci_ign_iferr	= 3,
 
 	/* global controller registers */
 	HOST_CAP		= 0x00, /* host capabilities */
@@ -166,8 +169,9 @@
 	AHCI_FLAG_MSI		= (1 << 0),
 
 	/* ap->flags bits */
-	AHCI_FLAG_RESET_NEEDS_CLO	= (1 << 24),
-	AHCI_FLAG_NO_NCQ		= (1 << 25),
+	AHCI_FLAG_NO_NCQ		= (1 << 24),
+	AHCI_FLAG_IGN_IRQ_IF_ERR	= (1 << 25), /* ignore IRQ_IF_ERR */
+	AHCI_FLAG_HONOR_PI		= (1 << 26), /* honor PORTS_IMPL */
 };
 
 struct ahci_cmd_hdr {
@@ -214,6 +218,7 @@
 static void ahci_freeze(struct ata_port *ap);
 static void ahci_thaw(struct ata_port *ap);
 static void ahci_error_handler(struct ata_port *ap);
+static void ahci_vt8251_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
 static int ahci_port_resume(struct ata_port *ap);
@@ -273,6 +278,37 @@
 	.port_stop		= ahci_port_stop,
 };
 
+static const struct ata_port_operations ahci_vt8251_ops = {
+	.port_disable		= ata_port_disable,
+
+	.check_status		= ahci_check_status,
+	.check_altstatus	= ahci_check_status,
+	.dev_select		= ata_noop_dev_select,
+
+	.tf_read		= ahci_tf_read,
+
+	.qc_prep		= ahci_qc_prep,
+	.qc_issue		= ahci_qc_issue,
+
+	.irq_handler		= ahci_interrupt,
+	.irq_clear		= ahci_irq_clear,
+
+	.scr_read		= ahci_scr_read,
+	.scr_write		= ahci_scr_write,
+
+	.freeze			= ahci_freeze,
+	.thaw			= ahci_thaw,
+
+	.error_handler		= ahci_vt8251_error_handler,
+	.post_internal_cmd	= ahci_post_internal_cmd,
+
+	.port_suspend		= ahci_port_suspend,
+	.port_resume		= ahci_port_resume,
+
+	.port_start		= ahci_port_start,
+	.port_stop		= ahci_port_stop,
+};
+
 static const struct ata_port_info ahci_port_info[] = {
 	/* board_ahci */
 	{
@@ -284,13 +320,34 @@
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &ahci_ops,
 	},
+	/* board_ahci_pi */
+	{
+		.sht		= &ahci_sht,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+				  ATA_FLAG_SKIP_D2H_BSY | AHCI_FLAG_HONOR_PI,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.port_ops	= &ahci_ops,
+	},
 	/* board_ahci_vt8251 */
 	{
 		.sht		= &ahci_sht,
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
 				  ATA_FLAG_SKIP_D2H_BSY |
-				  AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
+				  ATA_FLAG_HRST_TO_RESUME | AHCI_FLAG_NO_NCQ,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.port_ops	= &ahci_vt8251_ops,
+	},
+	/* board_ahci_ign_iferr */
+	{
+		.sht		= &ahci_sht,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+				  ATA_FLAG_SKIP_D2H_BSY |
+				  AHCI_FLAG_IGN_IRQ_IF_ERR,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &ahci_ops,
@@ -309,29 +366,29 @@
 	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
 	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
 	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
-	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
-	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
-	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
-	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
-	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
-	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
 
 	/* JMicron */
-	{ PCI_VDEVICE(JMICRON, 0x2360), board_ahci }, /* JMicron JMB360 */
-	{ PCI_VDEVICE(JMICRON, 0x2361), board_ahci }, /* JMicron JMB361 */
-	{ PCI_VDEVICE(JMICRON, 0x2363), board_ahci }, /* JMicron JMB363 */
-	{ PCI_VDEVICE(JMICRON, 0x2365), board_ahci }, /* JMicron JMB365 */
-	{ PCI_VDEVICE(JMICRON, 0x2366), board_ahci }, /* JMicron JMB366 */
+	{ PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */
+	{ PCI_VDEVICE(JMICRON, 0x2361), board_ahci_ign_iferr }, /* JMB361 */
+	{ PCI_VDEVICE(JMICRON, 0x2363), board_ahci_ign_iferr }, /* JMB363 */
+	{ PCI_VDEVICE(JMICRON, 0x2365), board_ahci_ign_iferr }, /* JMB365 */
+	{ PCI_VDEVICE(JMICRON, 0x2366), board_ahci_ign_iferr }, /* JMB366 */
 
 	/* ATI */
 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
@@ -359,6 +416,10 @@
 	{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
 	{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
 
+	/* Generic, PCI class code for AHCI */
+	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  0x010601, 0xffffff, board_ahci },
+
 	{ }	/* terminate list */
 };
 
@@ -373,6 +434,11 @@
 };
 
 
+static inline int ahci_nr_ports(u32 cap)
+{
+	return (cap & 0x1f) + 1;
+}
+
 static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
 {
 	return base + 0x100 + (port * 0x80);
@@ -546,9 +612,6 @@
 static void ahci_init_port(void __iomem *port_mmio, u32 cap,
 			   dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
 {
-	/* power up */
-	ahci_power_up(port_mmio, cap);
-
 	/* enable FIS reception */
 	ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
 
@@ -574,19 +637,17 @@
 		return rc;
 	}
 
-	/* put device into slumber mode */
-	ahci_power_down(port_mmio, cap);
-
 	return 0;
 }
 
 static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
 {
-	u32 cap_save, tmp;
+	u32 cap_save, impl_save, tmp;
 
 	cap_save = readl(mmio + HOST_CAP);
 	cap_save &= ( (1<<28) | (1<<17) );
 	cap_save |= (1 << 27);
+	impl_save = readl(mmio + HOST_PORTS_IMPL);
 
 	/* global controller reset */
 	tmp = readl(mmio + HOST_CTL);
@@ -607,10 +668,21 @@
 		return -EIO;
 	}
 
+	/* turn on AHCI mode */
 	writel(HOST_AHCI_EN, mmio + HOST_CTL);
 	(void) readl(mmio + HOST_CTL);	/* flush */
+
+	/* These write-once registers are normally cleared on reset.
+	 * Restore BIOS values... which we HOPE were present before
+	 * reset.
+	 */
+	if (!impl_save) {
+		impl_save = (1 << ahci_nr_ports(cap_save)) - 1;
+		dev_printk(KERN_WARNING, &pdev->dev,
+			   "PORTS_IMPL is zero, forcing 0x%x\n", impl_save);
+	}
 	writel(cap_save, mmio + HOST_CAP);
-	writel(0xf, mmio + HOST_PORTS_IMPL);
+	writel(impl_save, mmio + HOST_PORTS_IMPL);
 	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
 
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
@@ -626,7 +698,8 @@
 }
 
 static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
-				 int n_ports, u32 cap)
+				 int n_ports, unsigned int port_flags,
+				 struct ahci_host_priv *hpriv)
 {
 	int i, rc;
 	u32 tmp;
@@ -635,13 +708,12 @@
 		void __iomem *port_mmio = ahci_port_base(mmio, i);
 		const char *emsg = NULL;
 
-#if 0 /* BIOSen initialize this incorrectly */
-		if (!(hpriv->port_map & (1 << i)))
+		if ((port_flags & AHCI_FLAG_HONOR_PI) &&
+		    !(hpriv->port_map & (1 << i)))
 			continue;
-#endif
 
 		/* make sure port is not active */
-		rc = ahci_deinit_port(port_mmio, cap, &emsg);
+		rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
 		if (rc)
 			dev_printk(KERN_WARNING, &pdev->dev,
 				   "%s (%d)\n", emsg, rc);
@@ -716,17 +788,6 @@
 	return 0;
 }
 
-static int ahci_prereset(struct ata_port *ap)
-{
-	if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
-	    (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
-		/* ATA_BUSY hasn't cleared, so send a CLO */
-		ahci_clo(ap);
-	}
-
-	return ata_std_prereset(ap);
-}
-
 static int ahci_softreset(struct ata_port *ap, unsigned int *class)
 {
 	struct ahci_port_priv *pp = ap->private_data;
@@ -864,6 +925,31 @@
 	return rc;
 }
 
+static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
+{
+	void __iomem *mmio = ap->host->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	ahci_stop_engine(port_mmio);
+
+	rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context));
+
+	/* vt8251 needs SError cleared for the port to operate */
+	ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
+
+	ahci_start_engine(port_mmio);
+
+	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+
+	/* vt8251 doesn't clear BSY on signature FIS reception,
+	 * request follow-up softreset.
+	 */
+	return rc ?: -EAGAIN;
+}
+
 static void ahci_postreset(struct ata_port *ap, unsigned int *class)
 {
 	void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@@ -980,6 +1066,10 @@
 	/* analyze @irq_stat */
 	ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
 
+	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
+	if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR)
+		irq_stat &= ~PORT_IRQ_IF_ERR;
+
 	if (irq_stat & PORT_IRQ_TF_ERR)
 		err_mask |= AC_ERR_DEV;
 
@@ -1179,7 +1269,23 @@
 	}
 
 	/* perform recovery */
-	ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
+	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset,
+		  ahci_postreset);
+}
+
+static void ahci_vt8251_error_handler(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+
+	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+		/* restart engine */
+		ahci_stop_engine(port_mmio);
+		ahci_start_engine(port_mmio);
+	}
+
+	/* perform recovery */
+	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
 		  ahci_postreset);
 }
 
@@ -1209,7 +1315,9 @@
 	int rc;
 
 	rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
-	if (rc) {
+	if (rc == 0)
+		ahci_power_down(port_mmio, hpriv->cap);
+	else {
 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
 		ahci_init_port(port_mmio, hpriv->cap,
 			       pp->cmd_slot_dma, pp->rx_fis_dma);
@@ -1225,6 +1333,7 @@
 	void __iomem *mmio = ap->host->mmio_base;
 	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 
+	ahci_power_up(port_mmio, hpriv->cap);
 	ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
 
 	return 0;
@@ -1264,7 +1373,8 @@
 		if (rc)
 			return rc;
 
-		ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
+		ahci_init_controller(mmio, pdev, host->n_ports,
+				     host->ports[0]->flags, hpriv);
 	}
 
 	ata_host_resume(host);
@@ -1330,6 +1440,9 @@
 
 	ap->private_data = pp;
 
+	/* power up port */
+	ahci_power_up(port_mmio, hpriv->cap);
+
 	/* initialize port */
 	ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
 
@@ -1376,7 +1489,7 @@
 	struct ahci_host_priv *hpriv = probe_ent->private_data;
 	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
 	void __iomem *mmio = probe_ent->mmio_base;
-	unsigned int i, using_dac;
+	unsigned int i, cap_n_ports, using_dac;
 	int rc;
 
 	rc = ahci_reset_controller(mmio, pdev);
@@ -1385,10 +1498,34 @@
 
 	hpriv->cap = readl(mmio + HOST_CAP);
 	hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
-	probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
+	cap_n_ports = ahci_nr_ports(hpriv->cap);
 
 	VPRINTK("cap 0x%x  port_map 0x%x  n_ports %d\n",
-		hpriv->cap, hpriv->port_map, probe_ent->n_ports);
+		hpriv->cap, hpriv->port_map, cap_n_ports);
+
+	if (probe_ent->port_flags & AHCI_FLAG_HONOR_PI) {
+		unsigned int n_ports = cap_n_ports;
+		u32 port_map = hpriv->port_map;
+		int max_port = 0;
+
+		for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) {
+			if (port_map & (1 << i)) {
+				n_ports--;
+				port_map &= ~(1 << i);
+				max_port = i;
+			} else
+				probe_ent->dummy_port_mask |= 1 << i;
+		}
+
+		if (n_ports || port_map)
+			dev_printk(KERN_WARNING, &pdev->dev,
+				   "nr_ports (%u) and implemented port map "
+				   "(0x%x) don't match\n",
+				   cap_n_ports, hpriv->port_map);
+
+		probe_ent->n_ports = max_port + 1;
+	} else
+		probe_ent->n_ports = cap_n_ports;
 
 	using_dac = hpriv->cap & HOST_CAP_64;
 	if (using_dac &&
@@ -1420,7 +1557,8 @@
 	for (i = 0; i < probe_ent->n_ports; i++)
 		ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
 
-	ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
+	ahci_init_controller(mmio, pdev, probe_ent->n_ports,
+			     probe_ent->port_flags, hpriv);
 
 	pci_set_master(pdev);
 
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 377425e..908751d 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -26,7 +26,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "ata_generic"
-#define DRV_VERSION "0.2.6"
+#define DRV_VERSION "0.2.10"
 
 /*
  *	A generic parallel ATA driver using libata
@@ -109,14 +109,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations generic_port_ops = {
@@ -225,12 +227,14 @@
 	.name 		= DRV_NAME,
 	.id_table	= ata_generic,
 	.probe 		= ata_generic_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init ata_generic_init(void)
 {
-	return pci_module_init(&ata_generic_pci_driver);
+	return pci_register_driver(&ata_generic_pci_driver);
 }
 
 
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 720174d..c7de0bb 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -40,7 +40,7 @@
  * Documentation
  *	Publically available from Intel web site. Errata documentation
  * is also publically available. As an aide to anyone hacking on this
- * driver the list of errata that are relevant is below.going back to
+ * driver the list of errata that are relevant is below, going back to
  * PIIX4. Older device documentation is now a bit tricky to find.
  *
  * The chipsets all follow very much the same design. The orginal Triton
@@ -93,7 +93,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"ata_piix"
-#define DRV_VERSION	"2.00ac6"
+#define DRV_VERSION	"2.00ac7"
 
 enum {
 	PIIX_IOCFG		= 0x54, /* IDE I/O configuration register */
@@ -101,11 +101,13 @@
 	ICH5_PCS		= 0x92,	/* port control and status */
 	PIIX_SCC		= 0x0A, /* sub-class code register */
 
-	PIIX_FLAG_IGNORE_PCS	= (1 << 25), /* ignore PCS present bits */
 	PIIX_FLAG_SCR		= (1 << 26), /* SCR available */
 	PIIX_FLAG_AHCI		= (1 << 27), /* AHCI possible */
 	PIIX_FLAG_CHECKINTR	= (1 << 28), /* make sure PCI INTx enabled */
 
+	PIIX_PATA_FLAGS		= ATA_FLAG_SLAVE_POSS,
+	PIIX_SATA_FLAGS		= ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
+
 	/* combined mode.  if set, PATA is channel 0.
 	 * if clear, PATA is channel 1.
 	 */
@@ -122,11 +124,10 @@
 	ich_pata_100		= 3,	/* ICH up to UDMA 100 */
 	ich_pata_133		= 4,	/* ICH up to UDMA 133 */
 	ich5_sata		= 5,
-	esb_sata		= 6,
-	ich6_sata		= 7,
-	ich6_sata_ahci		= 8,
-	ich6m_sata_ahci		= 9,
-	ich8_sata_ahci		= 10,
+	ich6_sata		= 6,
+	ich6_sata_ahci		= 7,
+	ich6m_sata_ahci		= 8,
+	ich8_sata_ahci		= 9,
 
 	/* constants for mapping table */
 	P0			= 0,  /* port 0 */
@@ -143,13 +144,11 @@
 struct piix_map_db {
 	const u32 mask;
 	const u16 port_enable;
-	const int present_shift;
 	const int map[][4];
 };
 
 struct piix_host_priv {
 	const int *map;
-	const struct piix_map_db *map_db;
 };
 
 static int piix_init_one (struct pci_dev *pdev,
@@ -214,9 +213,9 @@
 	/* 82801EB (ICH5) */
 	{ 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
 	/* 6300ESB (ICH5 variant with broken PCS present bits) */
-	{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
+	{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
 	/* 6300ESB pretending RAID */
-	{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
+	{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
 	/* 82801FB/FW (ICH6/ICH6W) */
 	{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
 	/* 82801FR/FRW (ICH6R/ICH6RW) */
@@ -367,7 +366,6 @@
 static const struct piix_map_db ich5_map_db = {
 	.mask = 0x7,
 	.port_enable = 0x3,
-	.present_shift = 4,
 	.map = {
 		/* PM   PS   SM   SS       MAP  */
 		{  P0,  NA,  P1,  NA }, /* 000b */
@@ -384,7 +382,6 @@
 static const struct piix_map_db ich6_map_db = {
 	.mask = 0x3,
 	.port_enable = 0xf,
-	.present_shift = 4,
 	.map = {
 		/* PM   PS   SM   SS       MAP */
 		{  P0,  P2,  P1,  P3 }, /* 00b */
@@ -397,7 +394,6 @@
 static const struct piix_map_db ich6m_map_db = {
 	.mask = 0x3,
 	.port_enable = 0x5,
-	.present_shift = 4,
 
 	/* Map 01b isn't specified in the doc but some notebooks use
 	 * it anyway.  MAP 01b have been spotted on both ICH6M and
@@ -415,7 +411,6 @@
 static const struct piix_map_db ich8_map_db = {
 	.mask = 0x3,
 	.port_enable = 0x3,
-	.present_shift = 8,
 	.map = {
 		/* PM   PS   SM   SS       MAP */
 		{  P0,  P2,  P1,  P3 }, /* 00b (hardwired when in AHCI) */
@@ -427,7 +422,6 @@
 
 static const struct piix_map_db *piix_map_db_table[] = {
 	[ich5_sata]		= &ich5_map_db,
-	[esb_sata]		= &ich5_map_db,
 	[ich6_sata]		= &ich6_map_db,
 	[ich6_sata_ahci]	= &ich6_map_db,
 	[ich6m_sata_ahci]	= &ich6m_map_db,
@@ -438,7 +432,7 @@
 	/* piix_pata_33: 0:  PIIX3 or 4 at 33MHz */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= PIIX_PATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
 		.udma_mask	= ATA_UDMA_MASK_40C,
@@ -448,7 +442,7 @@
 	/* ich_pata_33: 1 	ICH0 - ICH at 33Mhz*/
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
+		.flags		= PIIX_PATA_FLAGS,
 		.pio_mask 	= 0x1f,	/* pio 0-4 */
 		.mwdma_mask	= 0x06, /* Check: maybe 0x07  */
 		.udma_mask	= ATA_UDMA2, /* UDMA33 */
@@ -457,7 +451,7 @@
 	/* ich_pata_66: 2 	ICH controllers up to 66MHz */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
+		.flags		= PIIX_PATA_FLAGS,
 		.pio_mask 	= 0x1f,	/* pio 0-4 */
 		.mwdma_mask	= 0x06, /* MWDMA0 is broken on chip */
 		.udma_mask	= ATA_UDMA4,
@@ -467,7 +461,7 @@
 	/* ich_pata_100: 3 */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
+		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x06, /* mwdma1-2 */
 		.udma_mask	= ATA_UDMA5, /* udma0-5 */
@@ -477,7 +471,7 @@
 	/* ich_pata_133: 4 	ICH with full UDMA6 */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
+		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
 		.pio_mask 	= 0x1f,	/* pio 0-4 */
 		.mwdma_mask	= 0x06, /* Check: maybe 0x07  */
 		.udma_mask	= ATA_UDMA6, /* UDMA133 */
@@ -487,41 +481,27 @@
 	/* ich5_sata: 5 */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
-				  PIIX_FLAG_IGNORE_PCS,
+		.flags		= PIIX_SATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* i6300esb_sata: 6 */
+	/* ich6_sata: 6 */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6_sata: 7 */
+	/* ich6_sata_ahci: 7 */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
-		.pio_mask	= 0x1f,	/* pio0-4 */
-		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
-		.port_ops	= &piix_sata_ops,
-	},
-
-	/* ich6_sata_ahci: 8 */
-	{
-		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -529,11 +509,10 @@
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6m_sata_ahci: 9 */
+	/* ich6m_sata_ahci: 8 */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -541,11 +520,10 @@
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich8_sata_ahci: 10 */
+	/* ich8_sata_ahci: 9 */
 	{
 		.sht		= &piix_sht,
-		.flags		= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -566,10 +544,22 @@
 MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
-static int force_pcs = 0;
-module_param(force_pcs, int, 0444);
-MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
-		 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
+struct ich_laptop {
+	u16 device;
+	u16 subvendor;
+	u16 subdevice;
+};
+
+/*
+ *	List of laptops that use short cables rather than 80 wire
+ */
+
+static const struct ich_laptop ich_laptop[] = {
+	/* devid, subvendor, subdev */
+	{ 0x27DF, 0x0005, 0x0280 },	/* ICH7 on Acer 5602WLMi */
+	/* end marker */
+	{ 0, }
+};
 
 /**
  *	piix_pata_cbl_detect - Probe host controller cable detect info
@@ -585,12 +575,24 @@
 static void ich_pata_cbl_detect(struct ata_port *ap)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	const struct ich_laptop *lap = &ich_laptop[0];
 	u8 tmp, mask;
 
 	/* no 80c support in host controller? */
 	if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
 		goto cbl40;
 
+	/* Check for specials - Acer Aspire 5602WLMi */
+	while (lap->device) {
+		if (lap->device == pdev->device &&
+		    lap->subvendor == pdev->subsystem_vendor &&
+		    lap->subdevice == pdev->subsystem_device) {
+			ap->cbl = ATA_CBL_PATA40_SHORT;
+		    	return;
+		}
+		lap++;
+	}
+
 	/* check BIOS cable detect results */
 	mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
 	pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
@@ -659,84 +661,9 @@
 			   ata_std_postreset);
 }
 
-/**
- *	piix_sata_present_mask - determine present mask for SATA host controller
- *	@ap: Target port
- *
- *	Reads SATA PCI device's PCI config register Port Configuration
- *	and Status (PCS) to determine port and device availability.
- *
- *	LOCKING:
- *	None (inherited from caller).
- *
- *	RETURNS:
- *	determined present_mask
- */
-static unsigned int piix_sata_present_mask(struct ata_port *ap)
-{
-	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-	struct piix_host_priv *hpriv = ap->host->private_data;
-	const unsigned int *map = hpriv->map;
-	int base = 2 * ap->port_no;
-	unsigned int present_mask = 0;
-	int port, i;
-	u16 pcs;
-
-	pci_read_config_word(pdev, ICH5_PCS, &pcs);
-	DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
-
-	for (i = 0; i < 2; i++) {
-		port = map[base + i];
-		if (port < 0)
-			continue;
-		if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
-		    (pcs & 1 << (hpriv->map_db->present_shift + port)))
-			present_mask |= 1 << i;
-	}
-
-	DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
-		ap->id, pcs, present_mask);
-
-	return present_mask;
-}
-
-/**
- *	piix_sata_softreset - reset SATA host port via ATA SRST
- *	@ap: port to reset
- *	@classes: resulting classes of attached devices
- *
- *	Reset SATA host port via ATA SRST.  On controllers with
- *	reliable PCS present bits, the bits are used to determine
- *	device presence.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
-{
-	unsigned int present_mask;
-	int i, rc;
-
-	present_mask = piix_sata_present_mask(ap);
-
-	rc = ata_std_softreset(ap, classes);
-	if (rc)
-		return rc;
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		if (!(present_mask & (1 << i)))
-			classes[i] = ATA_DEV_NONE;
-	}
-
-	return 0;
-}
-
 static void piix_sata_error_handler(struct ata_port *ap)
 {
-	ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
+	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
 			   ata_std_postreset);
 }
 
@@ -1051,18 +978,6 @@
 		pci_write_config_word(pdev, ICH5_PCS, new_pcs);
 		msleep(150);
 	}
-
-	if (force_pcs == 1) {
-		dev_printk(KERN_INFO, &pdev->dev,
-			   "force ignoring PCS (0x%x)\n", new_pcs);
-		pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
-		pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
-	} else if (force_pcs == 2) {
-		dev_printk(KERN_INFO, &pdev->dev,
-			   "force honoring PCS (0x%x)\n", new_pcs);
-		pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
-		pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
-	}
 }
 
 static void __devinit piix_init_sata_map(struct pci_dev *pdev,
@@ -1112,7 +1027,6 @@
 			   "invalid MAP value %u\n", map_value);
 
 	hpriv->map = map;
-	hpriv->map_db = map_db;
 }
 
 /**
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 915a55a..011c0a8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -199,7 +199,8 @@
 
 /**
  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
- *	@qc: command to examine and configure
+ *	@tf: command to examine and configure
+ *	@dev: device tf belongs to
  *
  *	Examine the device configuration and tf->flags to calculate
  *	the proper read/write commands and protocol to use.
@@ -207,10 +208,8 @@
  *	LOCKING:
  *	caller.
  */
-int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
+static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
 {
-	struct ata_taskfile *tf = &qc->tf;
-	struct ata_device *dev = qc->dev;
 	u8 cmd;
 
 	int index, fua, lba48, write;
@@ -222,7 +221,7 @@
 	if (dev->flags & ATA_DFLAG_PIO) {
 		tf->protocol = ATA_PROT_PIO;
 		index = dev->multi_count ? 0 : 8;
-	} else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
+	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
 		/* Unable to use DMA due to host limitation */
 		tf->protocol = ATA_PROT_PIO;
 		index = dev->multi_count ? 0 : 8;
@@ -240,6 +239,174 @@
 }
 
 /**
+ *	ata_tf_read_block - Read block address from ATA taskfile
+ *	@tf: ATA taskfile of interest
+ *	@dev: ATA device @tf belongs to
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	Read block address from @tf.  This function can handle all
+ *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
+ *	flags select the address format to use.
+ *
+ *	RETURNS:
+ *	Block address read from @tf.
+ */
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+{
+	u64 block = 0;
+
+	if (tf->flags & ATA_TFLAG_LBA) {
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			block |= (u64)tf->hob_lbah << 40;
+			block |= (u64)tf->hob_lbam << 32;
+			block |= tf->hob_lbal << 24;
+		} else
+			block |= (tf->device & 0xf) << 24;
+
+		block |= tf->lbah << 16;
+		block |= tf->lbam << 8;
+		block |= tf->lbal;
+	} else {
+		u32 cyl, head, sect;
+
+		cyl = tf->lbam | (tf->lbah << 8);
+		head = tf->device & 0xf;
+		sect = tf->lbal;
+
+		block = (cyl * dev->heads + head) * dev->sectors + sect;
+	}
+
+	return block;
+}
+
+/**
+ *	ata_build_rw_tf - Build ATA taskfile for given read/write request
+ *	@tf: Target ATA taskfile
+ *	@dev: ATA device @tf belongs to
+ *	@block: Block address
+ *	@n_block: Number of blocks
+ *	@tf_flags: RW/FUA etc...
+ *	@tag: tag
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	Build ATA taskfile @tf for read/write request described by
+ *	@block, @n_block, @tf_flags and @tag on @dev.
+ *
+ *	RETURNS:
+ *
+ *	0 on success, -ERANGE if the request is too large for @dev,
+ *	-EINVAL if the request is invalid.
+ */
+int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+		    u64 block, u32 n_block, unsigned int tf_flags,
+		    unsigned int tag)
+{
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf->flags |= tf_flags;
+
+	if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
+			   ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
+	    likely(tag != ATA_TAG_INTERNAL)) {
+		/* yay, NCQ */
+		if (!lba_48_ok(block, n_block))
+			return -ERANGE;
+
+		tf->protocol = ATA_PROT_NCQ;
+		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+
+		if (tf->flags & ATA_TFLAG_WRITE)
+			tf->command = ATA_CMD_FPDMA_WRITE;
+		else
+			tf->command = ATA_CMD_FPDMA_READ;
+
+		tf->nsect = tag << 3;
+		tf->hob_feature = (n_block >> 8) & 0xff;
+		tf->feature = n_block & 0xff;
+
+		tf->hob_lbah = (block >> 40) & 0xff;
+		tf->hob_lbam = (block >> 32) & 0xff;
+		tf->hob_lbal = (block >> 24) & 0xff;
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device = 1 << 6;
+		if (tf->flags & ATA_TFLAG_FUA)
+			tf->device |= 1 << 7;
+	} else if (dev->flags & ATA_DFLAG_LBA) {
+		tf->flags |= ATA_TFLAG_LBA;
+
+		if (lba_28_ok(block, n_block)) {
+			/* use LBA28 */
+			tf->device |= (block >> 24) & 0xf;
+		} else if (lba_48_ok(block, n_block)) {
+			if (!(dev->flags & ATA_DFLAG_LBA48))
+				return -ERANGE;
+
+			/* use LBA48 */
+			tf->flags |= ATA_TFLAG_LBA48;
+
+			tf->hob_nsect = (n_block >> 8) & 0xff;
+
+			tf->hob_lbah = (block >> 40) & 0xff;
+			tf->hob_lbam = (block >> 32) & 0xff;
+			tf->hob_lbal = (block >> 24) & 0xff;
+		} else
+			/* request too large even for LBA48 */
+			return -ERANGE;
+
+		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+			return -EINVAL;
+
+		tf->nsect = n_block & 0xff;
+
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device |= ATA_LBA;
+	} else {
+		/* CHS */
+		u32 sect, head, cyl, track;
+
+		/* The request -may- be too large for CHS addressing. */
+		if (!lba_28_ok(block, n_block))
+			return -ERANGE;
+
+		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+			return -EINVAL;
+
+		/* Convert LBA to CHS */
+		track = (u32)block / dev->sectors;
+		cyl   = track / dev->heads;
+		head  = track % dev->heads;
+		sect  = (u32)block % dev->sectors + 1;
+
+		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+			(u32)block, track, cyl, head, sect);
+
+		/* Check whether the converted CHS can fit.
+		   Cylinder: 0-65535
+		   Head: 0-15
+		   Sector: 1-255*/
+		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+			return -ERANGE;
+
+		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+		tf->lbal = sect;
+		tf->lbam = cyl;
+		tf->lbah = cyl >> 8;
+		tf->device |= head;
+	}
+
+	return 0;
+}
+
+/**
  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
  *	@pio_mask: pio_mask
  *	@mwdma_mask: mwdma_mask
@@ -914,7 +1081,7 @@
  *	ata_port_queue_task - Queue port_task
  *	@ap: The ata_port to queue port_task for
  *	@fn: workqueue function to be scheduled
- *	@data: data value to pass to workqueue function
+ *	@data: data for @fn to use
  *	@delay: delay time for workqueue function
  *
  *	Schedule @fn(@data) for execution after @delay jiffies using
@@ -929,7 +1096,7 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
+void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
 			 unsigned long delay)
 {
 	int rc;
@@ -937,12 +1104,10 @@
 	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
 		return;
 
-	PREPARE_WORK(&ap->port_task, fn, data);
+	PREPARE_DELAYED_WORK(&ap->port_task, fn);
+	ap->port_task_data = data;
 
-	if (!delay)
-		rc = queue_work(ata_wq, &ap->port_task);
-	else
-		rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+	rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
 
 	/* rc == 0 means that another user is using port task */
 	WARN_ON(rc == 0);
@@ -999,13 +1164,13 @@
 }
 
 /**
- *	ata_exec_internal - execute libata internal command
+ *	ata_exec_internal_sg - execute libata internal command
  *	@dev: Device to which the command is sent
  *	@tf: Taskfile registers for the command and the result
  *	@cdb: CDB for packet command
  *	@dma_dir: Data tranfer direction of the command
- *	@buf: Data buffer of the command
- *	@buflen: Length of data buffer
+ *	@sg: sg list for the data buffer of the command
+ *	@n_elem: Number of sg entries
  *
  *	Executes libata internal command with timeout.  @tf contains
  *	command on entry and result on return.  Timeout and error
@@ -1019,9 +1184,10 @@
  *	RETURNS:
  *	Zero on success, AC_ERR_* mask on failure
  */
-unsigned ata_exec_internal(struct ata_device *dev,
-			   struct ata_taskfile *tf, const u8 *cdb,
-			   int dma_dir, void *buf, unsigned int buflen)
+unsigned ata_exec_internal_sg(struct ata_device *dev,
+			      struct ata_taskfile *tf, const u8 *cdb,
+			      int dma_dir, struct scatterlist *sg,
+			      unsigned int n_elem)
 {
 	struct ata_port *ap = dev->ap;
 	u8 command = tf->command;
@@ -1077,7 +1243,12 @@
 	qc->flags |= ATA_QCFLAG_RESULT_TF;
 	qc->dma_dir = dma_dir;
 	if (dma_dir != DMA_NONE) {
-		ata_sg_init_one(qc, buf, buflen);
+		unsigned int i, buflen = 0;
+
+		for (i = 0; i < n_elem; i++)
+			buflen += sg[i].length;
+
+		ata_sg_init(qc, sg, n_elem);
 		qc->nsect = buflen / ATA_SECT_SIZE;
 	}
 
@@ -1161,6 +1332,35 @@
 }
 
 /**
+ *	ata_exec_internal_sg - execute libata internal command
+ *	@dev: Device to which the command is sent
+ *	@tf: Taskfile registers for the command and the result
+ *	@cdb: CDB for packet command
+ *	@dma_dir: Data tranfer direction of the command
+ *	@buf: Data buffer of the command
+ *	@buflen: Length of data buffer
+ *
+ *	Wrapper around ata_exec_internal_sg() which takes simple
+ *	buffer instead of sg list.
+ *
+ *	LOCKING:
+ *	None.  Should be called with kernel context, might sleep.
+ *
+ *	RETURNS:
+ *	Zero on success, AC_ERR_* mask on failure
+ */
+unsigned ata_exec_internal(struct ata_device *dev,
+			   struct ata_taskfile *tf, const u8 *cdb,
+			   int dma_dir, void *buf, unsigned int buflen)
+{
+	struct scatterlist sg;
+
+	sg_init_one(&sg, buf, buflen);
+
+	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, &sg, 1);
+}
+
+/**
  *	ata_do_simple_cmd - execute simple internal command
  *	@dev: Device to which the command is sent
  *	@cmd: Opcode to execute
@@ -1224,7 +1424,7 @@
  *	ata_dev_read_id - Read ID data from the specified device
  *	@dev: target device
  *	@p_class: pointer to class of the target device (may be changed)
- *	@post_reset: is this read ID post-reset?
+ *	@flags: ATA_READID_* flags
  *	@id: buffer to read IDENTIFY data into
  *
  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
@@ -1239,7 +1439,7 @@
  *	0 on success, -errno otherwise.
  */
 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
-		    int post_reset, u16 *id)
+		    unsigned int flags, u16 *id)
 {
 	struct ata_port *ap = dev->ap;
 	unsigned int class = *p_class;
@@ -1271,10 +1471,17 @@
 	}
 
 	tf.protocol = ATA_PROT_PIO;
+	tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
 
 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
 				     id, sizeof(id[0]) * ATA_ID_WORDS);
 	if (err_mask) {
+		if (err_mask & AC_ERR_NODEV_HINT) {
+			DPRINTK("ata%u.%d: NODEV after polling detection\n",
+				ap->id, dev->devno);
+			return -ENOENT;
+		}
+
 		rc = -EIO;
 		reason = "I/O error";
 		goto err_out;
@@ -1294,7 +1501,7 @@
 			goto err_out;
 	}
 
-	if (post_reset && class == ATA_DEV_ATA) {
+	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
 		/*
 		 * The exact sequence expected by certain pre-ATA4 drives is:
 		 * SRST RESET
@@ -1314,7 +1521,7 @@
 			/* current CHS translation info (id[53-58]) might be
 			 * changed. reread the identify device info.
 			 */
-			post_reset = 0;
+			flags &= ~ATA_READID_POSTRESET;
 			goto retry;
 		}
 	}
@@ -1345,7 +1552,10 @@
 		desc[0] = '\0';
 		return;
 	}
-
+	if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
+		snprintf(desc, desc_sz, "NCQ (not used)");
+		return;
+	}
 	if (ap->flags & ATA_FLAG_NCQ) {
 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
 		dev->flags |= ATA_DFLAG_NCQ;
@@ -1374,7 +1584,6 @@
 /**
  *	ata_dev_configure - Configure the specified ATA/ATAPI device
  *	@dev: Target device to configure
- *	@print_info: Enable device info printout
  *
  *	Configure @dev according to @dev->id.  Generic and low-level
  *	driver specific fixups are also applied.
@@ -1385,9 +1594,10 @@
  *	RETURNS:
  *	0 on success, -errno otherwise
  */
-int ata_dev_configure(struct ata_device *dev, int print_info)
+int ata_dev_configure(struct ata_device *dev)
 {
 	struct ata_port *ap = dev->ap;
+	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
 	const u16 *id = dev->id;
 	unsigned int xfer_mask;
 	char revbuf[7];		/* XYZ-99\0 */
@@ -1454,6 +1664,10 @@
 			if (ata_id_has_lba48(id)) {
 				dev->flags |= ATA_DFLAG_LBA48;
 				lba_desc = "LBA48";
+
+				if (dev->n_sectors >= (1UL << 28) &&
+				    ata_id_has_flush_ext(id))
+					dev->flags |= ATA_DFLAG_FLUSH_EXT;
 			}
 
 			/* config NCQ */
@@ -1530,6 +1744,11 @@
 				       cdb_intr_string);
 	}
 
+	/* determine max_sectors */
+	dev->max_sectors = ATA_MAX_SECTORS;
+	if (dev->flags & ATA_DFLAG_LBA48)
+		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+
 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
 		/* Let the user know. We don't want to disallow opens for
 		   rescue purposes, or in case the vendor is just a blithering
@@ -1631,11 +1850,14 @@
 		if (!ata_dev_enabled(dev))
 			continue;
 
-		rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
+		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
+				     dev->id);
 		if (rc)
 			goto fail;
 
-		rc = ata_dev_configure(dev, 1);
+		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
+		rc = ata_dev_configure(dev);
+		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
 		if (rc)
 			goto fail;
 	}
@@ -2081,7 +2303,7 @@
 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
 	 */
 
-	if (speed > XFER_PIO_4) {
+	if (speed > XFER_PIO_6) {
 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
 	}
@@ -2153,6 +2375,7 @@
 
 static int ata_dev_set_mode(struct ata_device *dev)
 {
+	struct ata_eh_context *ehc = &dev->ap->eh_context;
 	unsigned int err_mask;
 	int rc;
 
@@ -2167,7 +2390,9 @@
 		return -EIO;
 	}
 
+	ehc->i.flags |= ATA_EHI_POST_SETMODE;
 	rc = ata_dev_revalidate(dev, 0);
+	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
 	if (rc)
 		return rc;
 
@@ -2325,11 +2550,14 @@
  *	Sleep until ATA Status register bit BSY clears,
  *	or a timeout occurs.
  *
- *	LOCKING: None.
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
  */
-
-unsigned int ata_busy_sleep (struct ata_port *ap,
-			     unsigned long tmout_pat, unsigned long tmout)
+int ata_busy_sleep(struct ata_port *ap,
+		   unsigned long tmout_pat, unsigned long tmout)
 {
 	unsigned long timer_start, timeout;
 	u8 status;
@@ -2337,27 +2565,32 @@
 	status = ata_busy_wait(ap, ATA_BUSY, 300);
 	timer_start = jiffies;
 	timeout = timer_start + tmout_pat;
-	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
 		msleep(50);
 		status = ata_busy_wait(ap, ATA_BUSY, 3);
 	}
 
-	if (status & ATA_BUSY)
+	if (status != 0xff && (status & ATA_BUSY))
 		ata_port_printk(ap, KERN_WARNING,
 				"port is slow to respond, please be patient "
 				"(Status 0x%x)\n", status);
 
 	timeout = timer_start + tmout;
-	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
 		msleep(50);
 		status = ata_chk_status(ap);
 	}
 
+	if (status == 0xff)
+		return -ENODEV;
+
 	if (status & ATA_BUSY) {
 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
 				"(%lu secs, Status 0x%x)\n",
 				tmout / HZ, status);
-		return 1;
+		return -EBUSY;
 	}
 
 	return 0;
@@ -2448,10 +2681,8 @@
 	 * the bus shows 0xFF because the odd clown forgets the D7
 	 * pulldown resistor.
 	 */
-	if (ata_check_status(ap) == 0xFF) {
-		ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
-		return AC_ERR_OTHER;
-	}
+	if (ata_check_status(ap) == 0xFF)
+		return 0;
 
 	ata_bus_post_reset(ap, devmask);
 
@@ -2777,9 +3008,9 @@
 }
 
 /**
- *	sata_std_hardreset - reset host port via SATA phy reset
+ *	sata_port_hardreset - reset port via SATA phy reset
  *	@ap: port to reset
- *	@class: resulting class of attached device
+ *	@timing: timing parameters { interval, duratinon, timeout } in msec
  *
  *	SATA phy-reset host port using DET bits of SControl register.
  *
@@ -2789,10 +3020,8 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
+int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
-	const unsigned long *timing = sata_ehc_deb_timing(ehc);
 	u32 scontrol;
 	int rc;
 
@@ -2805,24 +3034,24 @@
 		 * and Sil3124.
 		 */
 		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
-			return rc;
+			goto out;
 
 		scontrol = (scontrol & 0x0f0) | 0x304;
 
 		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
-			return rc;
+			goto out;
 
 		sata_set_spd(ap);
 	}
 
 	/* issue phy wake/reset */
 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
-		return rc;
+		goto out;
 
 	scontrol = (scontrol & 0x0f0) | 0x301;
 
 	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
-		return rc;
+		goto out;
 
 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
 	 * 10.4.2 says at least 1 ms.
@@ -2830,7 +3059,40 @@
 	msleep(1);
 
 	/* bring phy back */
-	sata_phy_resume(ap, timing);
+	rc = sata_phy_resume(ap, timing);
+ out:
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	sata_std_hardreset - reset host port via SATA phy reset
+ *	@ap: port to reset
+ *	@class: resulting class of attached device
+ *
+ *	SATA phy-reset host port using DET bits of SControl register,
+ *	wait for !BSY and classify the attached device.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	/* do hardreset */
+	rc = sata_port_hardreset(ap, timing);
+	if (rc) {
+		ata_port_printk(ap, KERN_ERR,
+				"COMRESET failed (errno=%d)\n", rc);
+		return rc;
+	}
 
 	/* TODO: phy layer with polling, timeouts, etc. */
 	if (ata_port_offline(ap)) {
@@ -2969,7 +3231,7 @@
 /**
  *	ata_dev_revalidate - Revalidate ATA device
  *	@dev: device to revalidate
- *	@post_reset: is this revalidation after reset?
+ *	@readid_flags: read ID flags
  *
  *	Re-read IDENTIFY page and make sure @dev is still attached to
  *	the port.
@@ -2980,7 +3242,7 @@
  *	RETURNS:
  *	0 on success, negative errno otherwise
  */
-int ata_dev_revalidate(struct ata_device *dev, int post_reset)
+int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
 {
 	unsigned int class = dev->class;
 	u16 *id = (void *)dev->ap->sector_buf;
@@ -2992,7 +3254,7 @@
 	}
 
 	/* read ID data */
-	rc = ata_dev_read_id(dev, &class, post_reset, id);
+	rc = ata_dev_read_id(dev, &class, readid_flags, id);
 	if (rc)
 		goto fail;
 
@@ -3005,7 +3267,7 @@
 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
 
 	/* configure device according to the new ID */
-	rc = ata_dev_configure(dev, 0);
+	rc = ata_dev_configure(dev);
 	if (rc == 0)
 		return 0;
 
@@ -3014,37 +3276,55 @@
 	return rc;
 }
 
-static const char * const ata_dma_blacklist [] = {
-	"WDC AC11000H", NULL,
-	"WDC AC22100H", NULL,
-	"WDC AC32500H", NULL,
-	"WDC AC33100H", NULL,
-	"WDC AC31600H", NULL,
-	"WDC AC32100H", "24.09P07",
-	"WDC AC23200L", "21.10N21",
-	"Compaq CRD-8241B",  NULL,
-	"CRD-8400B", NULL,
-	"CRD-8480B", NULL,
-	"CRD-8482B", NULL,
- 	"CRD-84", NULL,
-	"SanDisk SDP3B", NULL,
-	"SanDisk SDP3B-64", NULL,
-	"SANYO CD-ROM CRD", NULL,
-	"HITACHI CDR-8", NULL,
-	"HITACHI CDR-8335", NULL,
-	"HITACHI CDR-8435", NULL,
-	"Toshiba CD-ROM XM-6202B", NULL,
-	"TOSHIBA CD-ROM XM-1702BC", NULL,
-	"CD-532E-A", NULL,
-	"E-IDE CD-ROM CR-840", NULL,
-	"CD-ROM Drive/F5A", NULL,
-	"WPI CDD-820", NULL,
-	"SAMSUNG CD-ROM SC-148C", NULL,
-	"SAMSUNG CD-ROM SC", NULL,
-	"SanDisk SDP3B-64", NULL,
-	"ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
-	"_NEC DV5800A", NULL,
-	"SAMSUNG CD-ROM SN-124", "N001"
+struct ata_blacklist_entry {
+	const char *model_num;
+	const char *model_rev;
+	unsigned long horkage;
+};
+
+static const struct ata_blacklist_entry ata_device_blacklist [] = {
+	/* Devices with DMA related problems under Linux */
+	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
+	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
+	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
+	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
+	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
+	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
+	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
+	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
+	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
+	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
+	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
+	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
+	{ "SanDisk SDP3B-64", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
+	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
+
+	/* Devices we expect to fail diagnostics */
+
+	/* Devices where NCQ should be avoided */
+	/* NCQ is slow */
+        { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
+
+	/* Devices with NCQ limits */
+
+	/* End Marker */
+	{ }
 };
 
 static int ata_strim(char *s, size_t len)
@@ -3059,20 +3339,12 @@
 	return len;
 }
 
-static int ata_dma_blacklisted(const struct ata_device *dev)
+unsigned long ata_device_blacklisted(const struct ata_device *dev)
 {
 	unsigned char model_num[40];
 	unsigned char model_rev[16];
 	unsigned int nlen, rlen;
-	int i;
-
-	/* We don't support polling DMA.
-	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
-	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
-	 */
-	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
-	    (dev->flags & ATA_DFLAG_CDB_INTR))
-		return 1;
+	const struct ata_blacklist_entry *ad = ata_device_blacklist;
 
 	ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
 			  sizeof(model_num));
@@ -3081,17 +3353,30 @@
 	nlen = ata_strim(model_num, sizeof(model_num));
 	rlen = ata_strim(model_rev, sizeof(model_rev));
 
-	for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
-		if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
-			if (ata_dma_blacklist[i+1] == NULL)
-				return 1;
-			if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
-				return 1;
+	while (ad->model_num) {
+		if (!strncmp(ad->model_num, model_num, nlen)) {
+			if (ad->model_rev == NULL)
+				return ad->horkage;
+			if (!strncmp(ad->model_rev, model_rev, rlen))
+				return ad->horkage;
 		}
+		ad++;
 	}
 	return 0;
 }
 
+static int ata_dma_blacklisted(const struct ata_device *dev)
+{
+	/* We don't support polling DMA.
+	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
+	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
+	 */
+	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
+	    (dev->flags & ATA_DFLAG_CDB_INTR))
+		return 1;
+	return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
+}
+
 /**
  *	ata_dev_xfermask - Compute supported xfermask of the given device
  *	@dev: Device to compute xfermask for
@@ -3119,6 +3404,13 @@
 	 */
 	if (ap->cbl == ATA_CBL_PATA40)
 		xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+	/* Apply drive side cable rule. Unknown or 80 pin cables reported
+	 * host side are checked drive side as well. Cases where we know a
+	 * 40wire cable is used safely for 80 are not checked here.
+	 */
+        if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
+		xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+
 
 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
 				       dev->mwdma_mask, dev->udma_mask);
@@ -3236,8 +3528,7 @@
  *	LOCKING:
  *	spin_lock_irqsave(host lock)
  */
-
-static void ata_sg_clean(struct ata_queued_cmd *qc)
+void ata_sg_clean(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct scatterlist *sg = qc->__sg;
@@ -3395,19 +3686,15 @@
 
 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
 {
-	struct scatterlist *sg;
-
 	qc->flags |= ATA_QCFLAG_SINGLE;
 
-	memset(&qc->sgent, 0, sizeof(qc->sgent));
 	qc->__sg = &qc->sgent;
 	qc->n_elem = 1;
 	qc->orig_n_elem = 1;
 	qc->buf_virt = buf;
 	qc->nbytes = buflen;
 
-	sg = qc->__sg;
-	sg_init_one(sg, buf, buflen);
+	sg_init_one(&qc->sgent, buf, buflen);
 }
 
 /**
@@ -4200,8 +4487,12 @@
 					/* device stops HSM for abort/error */
 					qc->err_mask |= AC_ERR_DEV;
 				else
-					/* HSM violation. Let EH handle this */
-					qc->err_mask |= AC_ERR_HSM;
+					/* HSM violation. Let EH handle this.
+					 * Phantom devices also trigger this
+					 * condition.  Mark hint.
+					 */
+					qc->err_mask |= AC_ERR_HSM |
+							AC_ERR_NODEV_HINT;
 
 				ap->hsm_task_state = HSM_ST_ERR;
 				goto fsm_start;
@@ -4295,10 +4586,11 @@
 	return poll_next;
 }
 
-static void ata_pio_task(void *_data)
+static void ata_pio_task(struct work_struct *work)
 {
-	struct ata_queued_cmd *qc = _data;
-	struct ata_port *ap = qc->ap;
+	struct ata_port *ap =
+		container_of(work, struct ata_port, port_task.work);
+	struct ata_queued_cmd *qc = ap->port_task_data;
 	u8 status;
 	int poll_next;
 
@@ -4440,6 +4732,14 @@
 	qc->complete_fn(qc);
 }
 
+static void fill_result_tf(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	ap->ops->tf_read(ap, &qc->result_tf);
+	qc->result_tf.flags = qc->tf.flags;
+}
+
 /**
  *	ata_qc_complete - Complete an active ATA command
  *	@qc: Command to complete
@@ -4477,7 +4777,7 @@
 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
 			if (!ata_tag_internal(qc->tag)) {
 				/* always fill result TF for failed qc */
-				ap->ops->tf_read(ap, &qc->result_tf);
+				fill_result_tf(qc);
 				ata_qc_schedule_eh(qc);
 				return;
 			}
@@ -4485,7 +4785,7 @@
 
 		/* read result TF if requested */
 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
-			ap->ops->tf_read(ap, &qc->result_tf);
+			fill_result_tf(qc);
 
 		__ata_qc_complete(qc);
 	} else {
@@ -4494,7 +4794,7 @@
 
 		/* read result TF if failed or requested */
 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
-			ap->ops->tf_read(ap, &qc->result_tf);
+			fill_result_tf(qc);
 
 		__ata_qc_complete(qc);
 	}
@@ -4660,6 +4960,7 @@
 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
 		switch (qc->tf.protocol) {
 		case ATA_PROT_PIO:
+		case ATA_PROT_NODATA:
 		case ATA_PROT_ATAPI:
 		case ATA_PROT_ATAPI_NODATA:
 			qc->tf.flags |= ATA_TFLAG_POLLING;
@@ -4674,6 +4975,14 @@
 		}
 	}
 
+	/* Some controllers show flaky interrupt behavior after
+	 * setting xfer mode.  Use polling instead.
+	 */
+	if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
+		     qc->tf.feature == SETFEATURES_XFER) &&
+	    (ap->flags & ATA_FLAG_SETXFER_POLLING))
+		qc->tf.flags |= ATA_TFLAG_POLLING;
+
 	/* select the device */
 	ata_dev_select(ap, qc->dev->devno, 1, 0);
 
@@ -4782,6 +5091,7 @@
 inline unsigned int ata_host_intr (struct ata_port *ap,
 				   struct ata_queued_cmd *qc)
 {
+	struct ata_eh_info *ehi = &ap->eh_info;
 	u8 status, host_stat = 0;
 
 	VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -4842,6 +5152,11 @@
 	ap->ops->irq_clear(ap);
 
 	ata_hsm_move(ap, qc, status, 0);
+
+	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
+		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
 	return 1;	/* irq handled */
 
 idle_irq:
@@ -5048,7 +5363,7 @@
 	if (!ata_try_flush_cache(dev))
 		return 0;
 
-	if (ata_id_has_flush_ext(dev->id))
+	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
 		cmd = ATA_CMD_FLUSH_EXT;
 	else
 		cmd = ATA_CMD_FLUSH;
@@ -5320,9 +5635,9 @@
 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
 #endif
 
-	INIT_WORK(&ap->port_task, NULL, NULL);
-	INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
-	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
+	INIT_DELAYED_WORK(&ap->port_task, NULL);
+	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
 	INIT_LIST_HEAD(&ap->eh_done_q);
 	init_waitqueue_head(&ap->eh_wait_q);
 
@@ -5520,9 +5835,8 @@
 				ap->ioaddr.bmdma_addr,
 				irq_line);
 
-		ata_chk_status(ap);
-		host->ops->irq_clear(ap);
-		ata_eh_freeze_port(ap);	/* freeze port before requesting IRQ */
+		/* freeze port before requesting IRQ */
+		ata_eh_freeze_port(ap);
 	}
 
 	/* obtain irq, that may be shared between channels */
@@ -6120,6 +6434,7 @@
 EXPORT_SYMBOL_GPL(ata_bus_reset);
 EXPORT_SYMBOL_GPL(ata_std_prereset);
 EXPORT_SYMBOL_GPL(ata_std_softreset);
+EXPORT_SYMBOL_GPL(sata_port_hardreset);
 EXPORT_SYMBOL_GPL(sata_std_hardreset);
 EXPORT_SYMBOL_GPL(ata_std_postreset);
 EXPORT_SYMBOL_GPL(ata_dev_classify);
@@ -6146,6 +6461,7 @@
 EXPORT_SYMBOL_GPL(ata_host_resume);
 EXPORT_SYMBOL_GPL(ata_id_string);
 EXPORT_SYMBOL_GPL(ata_id_c_string);
+EXPORT_SYMBOL_GPL(ata_device_blacklisted);
 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
 
 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 02b2b27..08ad44b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -332,7 +332,7 @@
 	if (ap->pflags & ATA_PFLAG_LOADING)
 		ap->pflags &= ~ATA_PFLAG_LOADING;
 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
-		queue_work(ata_aux_wq, &ap->hotplug_task);
+		queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
 
 	if (ap->pflags & ATA_PFLAG_RECOVERED)
 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
@@ -1136,19 +1136,21 @@
 		break;
 
 	case ATA_DEV_ATAPI:
-		tmp = atapi_eh_request_sense(qc->dev,
-					     qc->scsicmd->sense_buffer);
-		if (!tmp) {
-			/* ATA_QCFLAG_SENSE_VALID is used to tell
-			 * atapi_qc_complete() that sense data is
-			 * already valid.
-			 *
-			 * TODO: interpret sense data and set
-			 * appropriate err_mask.
-			 */
-			qc->flags |= ATA_QCFLAG_SENSE_VALID;
-		} else
-			qc->err_mask |= tmp;
+		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
+			tmp = atapi_eh_request_sense(qc->dev,
+						     qc->scsicmd->sense_buffer);
+			if (!tmp) {
+				/* ATA_QCFLAG_SENSE_VALID is used to
+				 * tell atapi_qc_complete() that sense
+				 * data is already valid.
+				 *
+				 * TODO: interpret sense data and set
+				 * appropriate err_mask.
+				 */
+				qc->flags |= ATA_QCFLAG_SENSE_VALID;
+			} else
+				qc->err_mask |= tmp;
+		}
 	}
 
 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
@@ -1433,16 +1435,39 @@
 	}
 
 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+		static const char *dma_str[] = {
+			[DMA_BIDIRECTIONAL]	= "bidi",
+			[DMA_TO_DEVICE]		= "out",
+			[DMA_FROM_DEVICE]	= "in",
+			[DMA_NONE]		= "",
+		};
 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
+		unsigned int nbytes;
 
 		if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
 			continue;
 
-		ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
-			       "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
-			       qc->tag, qc->tf.command, qc->err_mask,
-			       qc->result_tf.command, qc->result_tf.feature,
-			       ata_err_string(qc->err_mask));
+		nbytes = qc->nbytes;
+		if (!nbytes)
+			nbytes = qc->nsect << 9;
+
+		ata_dev_printk(qc->dev, KERN_ERR,
+			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+			"tag %d cdb 0x%x data %u %s\n         "
+			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+			"Emask 0x%x (%s)\n",
+			cmd->command, cmd->feature, cmd->nsect,
+			cmd->lbal, cmd->lbam, cmd->lbah,
+			cmd->hob_feature, cmd->hob_nsect,
+			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
+			cmd->device, qc->tag, qc->cdb[0], nbytes,
+			dma_str[qc->dma_dir],
+			res->command, res->feature, res->nsect,
+			res->lbal, res->lbam, res->lbah,
+			res->hob_feature, res->hob_nsect,
+			res->hob_lbal, res->hob_lbam, res->hob_lbah,
+			res->device, qc->err_mask, ata_err_string(qc->err_mask));
 	}
 }
 
@@ -1634,11 +1659,14 @@
 	DPRINTK("ENTER\n");
 
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		unsigned int action;
+		unsigned int action, readid_flags = 0;
 
 		dev = &ap->device[i];
 		action = ata_eh_dev_action(dev);
 
+		if (ehc->i.flags & ATA_EHI_DID_RESET)
+			readid_flags |= ATA_READID_POSTRESET;
+
 		if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
 			if (ata_port_offline(ap)) {
 				rc = -EIO;
@@ -1646,13 +1674,17 @@
 			}
 
 			ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
-			rc = ata_dev_revalidate(dev,
-					ehc->i.flags & ATA_EHI_DID_RESET);
+			rc = ata_dev_revalidate(dev, readid_flags);
 			if (rc)
 				break;
 
 			ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
 
+			/* Configuration may have changed, reconfigure
+			 * transfer mode.
+			 */
+			ehc->i.flags |= ATA_EHI_SETMODE;
+
 			/* schedule the scsi_rescan_device() here */
 			queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
 		} else if (dev->class == ATA_DEV_UNKNOWN &&
@@ -1660,18 +1692,35 @@
 			   ata_class_enabled(ehc->classes[dev->devno])) {
 			dev->class = ehc->classes[dev->devno];
 
-			rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
-			if (rc == 0)
-				rc = ata_dev_configure(dev, 1);
+			rc = ata_dev_read_id(dev, &dev->class, readid_flags,
+					     dev->id);
+			if (rc == 0) {
+				ehc->i.flags |= ATA_EHI_PRINTINFO;
+				rc = ata_dev_configure(dev);
+				ehc->i.flags &= ~ATA_EHI_PRINTINFO;
+			} else if (rc == -ENOENT) {
+				/* IDENTIFY was issued to non-existent
+				 * device.  No need to reset.  Just
+				 * thaw and kill the device.
+				 */
+				ata_eh_thaw_port(ap);
+				dev->class = ATA_DEV_UNKNOWN;
+				rc = 0;
+			}
 
 			if (rc) {
 				dev->class = ATA_DEV_UNKNOWN;
 				break;
 			}
 
-			spin_lock_irqsave(ap->lock, flags);
-			ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
-			spin_unlock_irqrestore(ap->lock, flags);
+			if (ata_dev_enabled(dev)) {
+				spin_lock_irqsave(ap->lock, flags);
+				ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
+				spin_unlock_irqrestore(ap->lock, flags);
+
+				/* new device discovered, configure xfermode */
+				ehc->i.flags |= ATA_EHI_SETMODE;
+			}
 		}
 	}
 
@@ -1987,13 +2036,14 @@
 	if (rc)
 		goto dev_fail;
 
-	/* configure transfer mode if the port has been reset */
-	if (ehc->i.flags & ATA_EHI_DID_RESET) {
+	/* configure transfer mode if necessary */
+	if (ehc->i.flags & ATA_EHI_SETMODE) {
 		rc = ata_set_mode(ap, &dev);
 		if (rc) {
 			down_xfermask = 1;
 			goto dev_fail;
 		}
+		ehc->i.flags &= ~ATA_EHI_SETMODE;
 	}
 
 	/* suspend devices */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 47ea111..664e137 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -671,7 +671,7 @@
 }
 
 /*
- *	ata_gen_ata_desc_sense - Generate check condition sense block.
+ *	ata_gen_passthru_sense - Generate check condition sense block.
  *	@qc: Command that completed.
  *
  *	This function is specific to the ATA descriptor format sense
@@ -681,9 +681,9 @@
  *	block. Clear sense key, ASC & ASCQ if there is no error.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host lock)
+ *	None.
  */
-void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
+static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
 {
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->result_tf;
@@ -713,12 +713,9 @@
 
 	desc[0] = 0x09;
 
-	/*
-	 * Set length of additional sense data.
-	 * Since we only populate descriptor 0, the total
-	 * length is the same (fixed) length as descriptor 0.
-	 */
-	desc[1] = sb[7] = 14;
+	/* set length of additional sense data */
+	sb[7] = 14;
+	desc[1] = 12;
 
 	/*
 	 * Copy registers into sense buffer.
@@ -746,56 +743,56 @@
 }
 
 /**
- *	ata_gen_fixed_sense - generate a SCSI fixed sense block
+ *	ata_gen_ata_sense - generate a SCSI fixed sense block
  *	@qc: Command that we are erroring out
  *
- *	Leverage ata_to_sense_error() to give us the codes.  Fit our
- *	LBA in here if there's room.
+ *	Generate sense block for a failed ATA command @qc.  Descriptor
+ *	format is used to accomodate LBA48 block address.
  *
  *	LOCKING:
- *	inherited from caller
+ *	None.
  */
-void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
+static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
 {
+	struct ata_device *dev = qc->dev;
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->result_tf;
 	unsigned char *sb = cmd->sense_buffer;
+	unsigned char *desc = sb + 8;
 	int verbose = qc->ap->ops->error_handler == NULL;
+	u64 block;
 
 	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
 
 	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
-	/*
-	 * Use ata_to_sense_error() to map status register bits
+	/* sense data is current and format is descriptor */
+	sb[0] = 0x72;
+
+	/* Use ata_to_sense_error() to map status register bits
 	 * onto sense key, asc & ascq.
 	 */
 	if (qc->err_mask ||
 	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
 		ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
-				   &sb[2], &sb[12], &sb[13], verbose);
-		sb[2] &= 0x0f;
+				   &sb[1], &sb[2], &sb[3], verbose);
+		sb[1] &= 0x0f;
 	}
 
-	sb[0] = 0x70;
-	sb[7] = 0x0a;
+	block = ata_tf_read_block(&qc->result_tf, dev);
 
-	if (tf->flags & ATA_TFLAG_LBA48) {
-		/* TODO: find solution for LBA48 descriptors */
-	}
+	/* information sense data descriptor */
+	sb[7] = 12;
+	desc[0] = 0x00;
+	desc[1] = 10;
 
-	else if (tf->flags & ATA_TFLAG_LBA) {
-		/* A small (28b) LBA will fit in the 32b info field */
-		sb[0] |= 0x80;		/* set valid bit */
-		sb[3] = tf->device & 0x0f;
-		sb[4] = tf->lbah;
-		sb[5] = tf->lbam;
-		sb[6] = tf->lbal;
-	}
-
-	else {
-		/* TODO: C/H/S */
-	}
+	desc[2] |= 0x80;	/* valid */
+	desc[6] = block >> 40;
+	desc[7] = block >> 32;
+	desc[8] = block >> 24;
+	desc[9] = block >> 16;
+	desc[10] = block >> 8;
+	desc[11] = block;
 }
 
 static void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -807,23 +804,10 @@
 static void ata_scsi_dev_config(struct scsi_device *sdev,
 				struct ata_device *dev)
 {
-	unsigned int max_sectors;
+	/* configure max sectors */
+	blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
 
-	/* TODO: 2048 is an arbitrary number, not the
-	 * hardware maximum.  This should be increased to
-	 * 65534 when Jens Axboe's patch for dynamically
-	 * determining max_sectors is merged.
-	 */
-	max_sectors = ATA_MAX_SECTORS;
-	if (dev->flags & ATA_DFLAG_LBA48)
-		max_sectors = ATA_MAX_SECTORS_LBA48;
-	if (dev->max_sectors)
-		max_sectors = dev->max_sectors;
-
-	blk_queue_max_sectors(sdev->request_queue, max_sectors);
-
-	/*
-	 * SATA DMA transfers must be multiples of 4 byte, so
+	/* SATA DMA transfers must be multiples of 4 byte, so
 	 * we need to pad ATAPI transfers using an extra sg.
 	 * Decrement max hw segments accordingly.
 	 */
@@ -1040,8 +1024,7 @@
 	tf->flags |= ATA_TFLAG_DEVICE;
 	tf->protocol = ATA_PROT_NODATA;
 
-	if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
-	    (ata_id_has_flush_ext(qc->dev->id)))
+	if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
 		tf->command = ATA_CMD_FLUSH_EXT;
 	else
 		tf->command = ATA_CMD_FLUSH;
@@ -1282,17 +1265,14 @@
 
 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
 {
-	struct ata_taskfile *tf = &qc->tf;
-	struct ata_device *dev = qc->dev;
+	unsigned int tf_flags = 0;
 	u64 block;
 	u32 n_block;
-
-	qc->flags |= ATA_QCFLAG_IO;
-	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	int rc;
 
 	if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
 	    scsicmd[0] == WRITE_16)
-		tf->flags |= ATA_TFLAG_WRITE;
+		tf_flags |= ATA_TFLAG_WRITE;
 
 	/* Calculate the SCSI LBA, transfer length and FUA. */
 	switch (scsicmd[0]) {
@@ -1300,7 +1280,7 @@
 	case WRITE_10:
 		scsi_10_lba_len(scsicmd, &block, &n_block);
 		if (unlikely(scsicmd[1] & (1 << 3)))
-			tf->flags |= ATA_TFLAG_FUA;
+			tf_flags |= ATA_TFLAG_FUA;
 		break;
 	case READ_6:
 	case WRITE_6:
@@ -1316,7 +1296,7 @@
 	case WRITE_16:
 		scsi_16_lba_len(scsicmd, &block, &n_block);
 		if (unlikely(scsicmd[1] & (1 << 3)))
-			tf->flags |= ATA_TFLAG_FUA;
+			tf_flags |= ATA_TFLAG_FUA;
 		break;
 	default:
 		DPRINTK("no-byte command\n");
@@ -1334,106 +1314,17 @@
 		 */
 		goto nothing_to_do;
 
-	if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
-			   ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
-		/* yay, NCQ */
-		if (!lba_48_ok(block, n_block))
-			goto out_of_range;
+	qc->flags |= ATA_QCFLAG_IO;
+	qc->nsect = n_block;
 
-		tf->protocol = ATA_PROT_NCQ;
-		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+	rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
+			     qc->tag);
+	if (likely(rc == 0))
+		return 0;
 
-		if (tf->flags & ATA_TFLAG_WRITE)
-			tf->command = ATA_CMD_FPDMA_WRITE;
-		else
-			tf->command = ATA_CMD_FPDMA_READ;
-
-		qc->nsect = n_block;
-
-		tf->nsect = qc->tag << 3;
-		tf->hob_feature = (n_block >> 8) & 0xff;
-		tf->feature = n_block & 0xff;
-
-		tf->hob_lbah = (block >> 40) & 0xff;
-		tf->hob_lbam = (block >> 32) & 0xff;
-		tf->hob_lbal = (block >> 24) & 0xff;
-		tf->lbah = (block >> 16) & 0xff;
-		tf->lbam = (block >> 8) & 0xff;
-		tf->lbal = block & 0xff;
-
-		tf->device = 1 << 6;
-		if (tf->flags & ATA_TFLAG_FUA)
-			tf->device |= 1 << 7;
-	} else if (dev->flags & ATA_DFLAG_LBA) {
-		tf->flags |= ATA_TFLAG_LBA;
-
-		if (lba_28_ok(block, n_block)) {
-			/* use LBA28 */
-			tf->device |= (block >> 24) & 0xf;
-		} else if (lba_48_ok(block, n_block)) {
-			if (!(dev->flags & ATA_DFLAG_LBA48))
-				goto out_of_range;
-
-			/* use LBA48 */
-			tf->flags |= ATA_TFLAG_LBA48;
-
-			tf->hob_nsect = (n_block >> 8) & 0xff;
-
-			tf->hob_lbah = (block >> 40) & 0xff;
-			tf->hob_lbam = (block >> 32) & 0xff;
-			tf->hob_lbal = (block >> 24) & 0xff;
-		} else
-			/* request too large even for LBA48 */
-			goto out_of_range;
-
-		if (unlikely(ata_rwcmd_protocol(qc) < 0))
-			goto invalid_fld;
-
-		qc->nsect = n_block;
-		tf->nsect = n_block & 0xff;
-
-		tf->lbah = (block >> 16) & 0xff;
-		tf->lbam = (block >> 8) & 0xff;
-		tf->lbal = block & 0xff;
-
-		tf->device |= ATA_LBA;
-	} else {
-		/* CHS */
-		u32 sect, head, cyl, track;
-
-		/* The request -may- be too large for CHS addressing. */
-		if (!lba_28_ok(block, n_block))
-			goto out_of_range;
-
-		if (unlikely(ata_rwcmd_protocol(qc) < 0))
-			goto invalid_fld;
-
-		/* Convert LBA to CHS */
-		track = (u32)block / dev->sectors;
-		cyl   = track / dev->heads;
-		head  = track % dev->heads;
-		sect  = (u32)block % dev->sectors + 1;
-
-		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
-			(u32)block, track, cyl, head, sect);
-
-		/* Check whether the converted CHS can fit.
-		   Cylinder: 0-65535
-		   Head: 0-15
-		   Sector: 1-255*/
-		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
-			goto out_of_range;
-
-		qc->nsect = n_block;
-		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
-		tf->lbal = sect;
-		tf->lbam = cyl;
-		tf->lbah = cyl >> 8;
-		tf->device |= head;
-	}
-
-	return 0;
-
+	if (rc == -ERANGE)
+		goto out_of_range;
+	/* treat all other errors as -EINVAL, fall through */
 invalid_fld:
 	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
 	/* "Invalid field in cbd" */
@@ -1477,7 +1368,7 @@
 	 */
 	if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
  	    ((cdb[2] & 0x20) || need_sense)) {
- 		ata_gen_ata_desc_sense(qc);
+		ata_gen_passthru_sense(qc);
 	} else {
 		if (!need_sense) {
 			cmd->result = SAM_STAT_GOOD;
@@ -1488,7 +1379,7 @@
 			 * good for smaller LBA (and maybe CHS?)
 			 * devices.
 			 */
-			ata_gen_fixed_sense(qc);
+			ata_gen_ata_sense(qc);
 		}
 	}
 
@@ -1715,6 +1606,22 @@
 }
 
 /**
+ *	ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer
+ *	@idx: byte index into SCSI response buffer
+ *	@val: value to set
+ *
+ *	To be used by SCSI command simulator functions.  This macros
+ *	expects two local variables, u8 *rbuf and unsigned int buflen,
+ *	are in scope.
+ *
+ *	LOCKING:
+ *	None.
+ */
+#define ATA_SCSI_RBUF_SET(idx, val) do { \
+		if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \
+	} while (0)
+
+/**
  *	ata_scsiop_inq_std - Simulate INQUIRY command
  *	@args: device IDENTIFY data / SCSI command of interest.
  *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
@@ -2173,67 +2080,42 @@
  *	Simulate READ CAPACITY commands.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host lock)
+ *	None.
  */
-
 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
 			        unsigned int buflen)
 {
-	u64 n_sectors;
-	u32 tmp;
+	u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
 
 	VPRINTK("ENTER\n");
 
-	if (ata_id_has_lba(args->id)) {
-		if (ata_id_has_lba48(args->id))
-			n_sectors = ata_id_u64(args->id, 100);
-		else
-			n_sectors = ata_id_u32(args->id, 60);
-	} else {
-		/* CHS default translation */
-		n_sectors = args->id[1] * args->id[3] * args->id[6];
-
-		if (ata_id_current_chs_valid(args->id))
-			/* CHS current translation */
-			n_sectors = ata_id_u32(args->id, 57);
-	}
-
-	n_sectors--;		/* ATA TotalUserSectors - 1 */
-
 	if (args->cmd->cmnd[0] == READ_CAPACITY) {
-		if( n_sectors >= 0xffffffffULL )
-			tmp = 0xffffffff ;  /* Return max count on overflow */
-		else
-			tmp = n_sectors ;
+		if (last_lba >= 0xffffffffULL)
+			last_lba = 0xffffffff;
 
 		/* sector count, 32-bit */
-		rbuf[0] = tmp >> (8 * 3);
-		rbuf[1] = tmp >> (8 * 2);
-		rbuf[2] = tmp >> (8 * 1);
-		rbuf[3] = tmp;
+		ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3));
+		ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2));
+		ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1));
+		ATA_SCSI_RBUF_SET(3, last_lba);
 
 		/* sector size */
-		tmp = ATA_SECT_SIZE;
-		rbuf[6] = tmp >> 8;
-		rbuf[7] = tmp;
-
+		ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
+		ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE);
 	} else {
 		/* sector count, 64-bit */
-		tmp = n_sectors >> (8 * 4);
-		rbuf[2] = tmp >> (8 * 3);
-		rbuf[3] = tmp >> (8 * 2);
-		rbuf[4] = tmp >> (8 * 1);
-		rbuf[5] = tmp;
-		tmp = n_sectors;
-		rbuf[6] = tmp >> (8 * 3);
-		rbuf[7] = tmp >> (8 * 2);
-		rbuf[8] = tmp >> (8 * 1);
-		rbuf[9] = tmp;
+		ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
+		ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6));
+		ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5));
+		ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4));
+		ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3));
+		ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2));
+		ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1));
+		ATA_SCSI_RBUF_SET(7, last_lba);
 
 		/* sector size */
-		tmp = ATA_SECT_SIZE;
-		rbuf[12] = tmp >> 8;
-		rbuf[13] = tmp;
+		ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
+		ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE);
 	}
 
 	return 0;
@@ -2319,7 +2201,7 @@
 		 * a sense descriptors, since that's only
 		 * correct for ATA, not ATAPI
 		 */
-		ata_gen_ata_desc_sense(qc);
+		ata_gen_passthru_sense(qc);
 	}
 
 	qc->scsidone(qc->scsicmd);
@@ -2394,7 +2276,7 @@
 			 * sense descriptors, since that's only
 			 * correct for ATA, not ATAPI
 			 */
-			ata_gen_ata_desc_sense(qc);
+			ata_gen_passthru_sense(qc);
 		}
 
 		/* SCSI EH automatically locks door if sdev->locked is
@@ -2427,7 +2309,7 @@
 		 * a sense descriptors, since that's only
 		 * correct for ATA, not ATAPI
 		 */
-		ata_gen_ata_desc_sense(qc);
+		ata_gen_passthru_sense(qc);
 	} else {
 		u8 *scsicmd = cmd->cmnd;
 
@@ -3081,7 +2963,7 @@
 
 /**
  *	ata_scsi_hotplug - SCSI part of hotplug
- *	@data: Pointer to ATA port to perform SCSI hotplug on
+ *	@work: Pointer to ATA port to perform SCSI hotplug on
  *
  *	Perform SCSI part of hotplug.  It's executed from a separate
  *	workqueue after EH completes.  This is necessary because SCSI
@@ -3091,9 +2973,10 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_scsi_hotplug(void *data)
+void ata_scsi_hotplug(struct work_struct *work)
 {
-	struct ata_port *ap = data;
+	struct ata_port *ap =
+		container_of(work, struct ata_port, hotplug_task.work);
 	int i;
 
 	if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3182,17 +3065,19 @@
 			rc = -EINVAL;
 	}
 
-	if (rc == 0)
+	if (rc == 0) {
 		ata_port_schedule_eh(ap);
-
-	spin_unlock_irqrestore(ap->lock, flags);
+		spin_unlock_irqrestore(ap->lock, flags);
+		ata_port_wait_eh(ap);
+	} else
+		spin_unlock_irqrestore(ap->lock, flags);
 
 	return rc;
 }
 
 /**
  *	ata_scsi_dev_rescan - initiate scsi_rescan_device()
- *	@data: Pointer to ATA port to perform scsi_rescan_device()
+ *	@work: Pointer to ATA port to perform scsi_rescan_device()
  *
  *	After ATA pass thru (SAT) commands are executed successfully,
  *	libata need to propagate the changes to SCSI layer.  This
@@ -3202,18 +3087,31 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_scsi_dev_rescan(void *data)
+void ata_scsi_dev_rescan(struct work_struct *work)
 {
-	struct ata_port *ap = data;
-	struct ata_device *dev;
+	struct ata_port *ap =
+		container_of(work, struct ata_port, scsi_rescan_task);
+	unsigned long flags;
 	unsigned int i;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
+	spin_lock_irqsave(ap->lock, flags);
 
-		if (ata_dev_enabled(dev) && dev->sdev)
-			scsi_rescan_device(&(dev->sdev->sdev_gendev));
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		struct scsi_device *sdev = dev->sdev;
+
+		if (!ata_dev_enabled(dev) || !sdev)
+			continue;
+		if (scsi_device_get(sdev))
+			continue;
+
+		spin_unlock_irqrestore(ap->lock, flags);
+		scsi_rescan_device(&(sdev->sdev_gendev));
+		scsi_device_put(sdev);
+		spin_lock_irqsave(ap->lock, flags);
 	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
 }
 
 /**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 7645f2b..10ee22a 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -39,6 +39,35 @@
 #include "libata.h"
 
 /**
+ *	ata_irq_on - Enable interrupts on a port.
+ *	@ap: Port on which interrupts are enabled.
+ *
+ *	Enable interrupts on a legacy IDE device using MMIO or PIO,
+ *	wait for idle, clear any pending interrupts.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_irq_on(struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 tmp;
+
+	ap->ctl &= ~ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	if (ap->flags & ATA_FLAG_MMIO)
+		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+	else
+		outb(ap->ctl, ioaddr->ctl_addr);
+	tmp = ata_wait_idle(ap);
+
+	ap->ops->irq_clear(ap);
+
+	return tmp;
+}
+
+/**
  *	ata_tf_load_pio - send taskfile registers to host controller
  *	@ap: Port to which output is sent
  *	@tf: ATA taskfile register set
@@ -671,6 +700,14 @@
 		writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
 	else
 		outb(ap->ctl, ioaddr->ctl_addr);
+
+	/* Under certain circumstances, some controllers raise IRQ on
+	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
+	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
+	 */
+	ata_chk_status(ap);
+
+	ap->ops->irq_clear(ap);
 }
 
 /**
@@ -714,7 +751,6 @@
 			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
 			ata_postreset_fn_t postreset)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
 	struct ata_queued_cmd *qc;
 	unsigned long flags;
 	int thaw = 0;
@@ -732,9 +768,7 @@
 		   qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
 		u8 host_stat;
 
-		host_stat = ata_bmdma_status(ap);
-
-		ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
+		host_stat = ap->ops->bmdma_status(ap);
 
 		/* BMDMA controllers indicate host bus error by
 		 * setting DMA_ERR bit and timing out.  As it wasn't
@@ -877,6 +911,7 @@
 		return NULL;
 
 	probe_ent->n_ports = 2;
+	probe_ent->irq_flags = IRQF_SHARED;
 
 	if (port_mask & ATA_PORT_PRIMARY) {
 		probe_ent->irq = ATA_PRIMARY_IRQ;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0ed263b..81ae41d 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -39,26 +39,39 @@
 };
 
 /* libata-core.c */
+enum {
+	/* flags for ata_dev_read_id() */
+	ATA_READID_POSTRESET	= (1 << 0), /* reading ID after reset */
+};
+
 extern struct workqueue_struct *ata_aux_wq;
 extern int atapi_enabled;
 extern int atapi_dmadir;
 extern int libata_fua;
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
-extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
+extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+			   u64 block, u32 n_block, unsigned int tf_flags,
+			   unsigned int tag);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern void ata_dev_disable(struct ata_device *dev);
 extern void ata_port_flush_task(struct ata_port *ap);
 extern unsigned ata_exec_internal(struct ata_device *dev,
 				  struct ata_taskfile *tf, const u8 *cdb,
 				  int dma_dir, void *buf, unsigned int buflen);
+extern unsigned ata_exec_internal_sg(struct ata_device *dev,
+				     struct ata_taskfile *tf, const u8 *cdb,
+				     int dma_dir, struct scatterlist *sg,
+				     unsigned int n_elem);
 extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
 extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
-			   int post_reset, u16 *id);
-extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
-extern int ata_dev_configure(struct ata_device *dev, int print_info);
+			   unsigned int flags, u16 *id);
+extern int ata_dev_revalidate(struct ata_device *dev, unsigned int flags);
+extern int ata_dev_configure(struct ata_device *dev);
 extern int sata_down_spd_limit(struct ata_port *ap);
 extern int sata_set_spd_needed(struct ata_port *ap);
 extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
 extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
+extern void ata_sg_clean(struct ata_queued_cmd *qc);
 extern void ata_qc_free(struct ata_queued_cmd *qc);
 extern void ata_qc_issue(struct ata_queued_cmd *qc);
 extern void __ata_qc_complete(struct ata_queued_cmd *qc);
@@ -81,7 +94,7 @@
 
 extern void ata_scsi_scan_host(struct ata_port *ap);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_hotplug(void *data);
+extern void ata_scsi_hotplug(struct work_struct *work);
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
 			       unsigned int buflen);
 
@@ -111,7 +124,7 @@
                         unsigned int (*actor) (struct ata_scsi_args *args,
                                            u8 *rbuf, unsigned int buflen));
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
-extern void ata_scsi_dev_rescan(void *data);
+extern void ata_scsi_dev_rescan(struct work_struct *work);
 extern int ata_bus_probe(struct ata_port *ap);
 
 /* libata-eh.c */
@@ -120,4 +133,7 @@
 extern void ata_port_wait_eh(struct ata_port *ap);
 extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
 
+/* libata-sff.c */
+extern u8 ata_irq_on(struct ata_port *ap);
+
 #endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 1d695df..c5d61d1 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -34,7 +34,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME "pata_ali"
-#define DRV_VERSION "0.6.6"
+#define DRV_VERSION "0.7.2"
 
 /*
  *	Cable special cases
@@ -78,7 +78,7 @@
 	   implement the detect logic */
 
 	if (ali_cable_override(pdev))
-		return ATA_CBL_PATA80;
+		return ATA_CBL_PATA40_SHORT;
 
 	/* Host view cable detect 0x4A bit 0 primary bit 1 secondary
 	   Bit set for 40 pin */
@@ -337,16 +337,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	/* Keep LBA28 counts so large I/O's don't turn LBA48 and PIO
-	   with older controllers. Not locked so will grow on C5 or later */
-	.max_sectors		= 255,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 /*
@@ -496,6 +496,69 @@
 	.host_stop	= ata_host_stop
 };
 
+
+/**
+ *	ali_init_chipset	-	chip setup function
+ *	@pdev: PCI device of ATA controller
+ *
+ *	Perform the setup on the device that must be done both at boot
+ *	and at resume time.
+ */
+ 
+static void ali_init_chipset(struct pci_dev *pdev)
+{
+	u8 rev, tmp;
+	struct pci_dev *north, *isa_bridge;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+
+	/*
+	 * The chipset revision selects the driver operations and
+	 * mode data.
+	 */
+
+	if (rev >= 0x20 && rev < 0xC2) {
+		/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
+		pci_read_config_byte(pdev, 0x4B, &tmp);
+		/* Clear CD-ROM DMA write bit */
+		tmp &= 0x7F;
+		pci_write_config_byte(pdev, 0x4B, tmp);
+	} else if (rev >= 0xC2) {
+		/* Enable cable detection logic */
+		pci_read_config_byte(pdev, 0x4B, &tmp);
+		pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
+	}
+	north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+	isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+	if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) {
+		/* Configure the ALi bridge logic. For non ALi rely on BIOS.
+		   Set the south bridge enable bit */
+		pci_read_config_byte(isa_bridge, 0x79, &tmp);
+		if (rev == 0xC2)
+			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
+		else if (rev > 0xC2 && rev < 0xC5)
+			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
+	}
+	if (rev >= 0x20) {
+		/*
+		 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
+		 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
+		 * via 0x54/55.
+		 */
+		pci_read_config_byte(pdev, 0x53, &tmp);
+		if (rev <= 0x20)
+			tmp &= ~0x02;
+		if (rev >= 0xc7)
+			tmp |= 0x03;
+		else
+			tmp |= 0x01;	/* CD_ROM enable for DMA */
+		pci_write_config_byte(pdev, 0x53, tmp);
+	}
+	pci_dev_put(isa_bridge);
+	pci_dev_put(north);
+	ata_pci_clear_simplex(pdev);
+}
 /**
  *	ali_init_one		-	discovery callback
  *	@pdev: PCI device ID
@@ -569,7 +632,7 @@
 
 	static struct ata_port_info *port_info[2];
 	u8 rev, tmp;
-	struct pci_dev *north, *isa_bridge;
+	struct pci_dev *isa_bridge;
 
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
 
@@ -581,11 +644,6 @@
 	if (rev < 0x20) {
 		port_info[0] = port_info[1] = &info_early;
 	} else if (rev < 0xC2) {
-		/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
-		pci_read_config_byte(pdev, 0x4B, &tmp);
-		/* Clear CD-ROM DMA write bit */
-		tmp &= 0x7F;
-		pci_write_config_byte(pdev, 0x4B, tmp);
         	port_info[0] = port_info[1] = &info_20;
 	} else if (rev == 0xC2) {
         	port_info[0] = port_info[1] = &info_c2;
@@ -596,54 +654,25 @@
 	} else
         	port_info[0] = port_info[1] = &info_c5;
 
-	if (rev >= 0xC2) {
-		/* Enable cable detection logic */
-		pci_read_config_byte(pdev, 0x4B, &tmp);
-		pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
-	}
-
-	north = pci_get_slot(pdev->bus, PCI_DEVFN(0,0));
+	ali_init_chipset(pdev);
+	
 	isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-
-	if (north && north->vendor == PCI_VENDOR_ID_AL) {
-		/* Configure the ALi bridge logic. For non ALi rely on BIOS.
-		   Set the south bridge enable bit */
-		pci_read_config_byte(isa_bridge, 0x79, &tmp);
-		if (rev == 0xC2)
-			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
-		else if (rev > 0xC2)
-			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
+	if (isa_bridge && rev >= 0x20 && rev < 0xC2) {
+		/* Are we paired with a UDMA capable chip */
+		pci_read_config_byte(isa_bridge, 0x5E, &tmp);
+		if ((tmp & 0x1E) == 0x12)
+	        	port_info[0] = port_info[1] = &info_20_udma;
+		pci_dev_put(isa_bridge);
 	}
-
-	if (rev >= 0x20) {
-		if (rev < 0xC2) {
-			/* Are we paired with a UDMA capable chip */
-			pci_read_config_byte(isa_bridge, 0x5E, &tmp);
-			if ((tmp & 0x1E) == 0x12)
-		        	port_info[0] = port_info[1] = &info_20_udma;
-		}
-		/*
-		 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
-		 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
-		 * via 0x54/55.
-		 */
-		pci_read_config_byte(pdev, 0x53, &tmp);
-		if (rev <= 0x20)
-			tmp &= ~0x02;
-		if (rev >= 0xc7)
-			tmp |= 0x03;
-		else
-			tmp |= 0x01;	/* CD_ROM enable for DMA */
-		pci_write_config_byte(pdev, 0x53, tmp);
-	}
-
-	pci_dev_put(isa_bridge);
-	pci_dev_put(north);
-
-	ata_pci_clear_simplex(pdev);
 	return ata_pci_init_one(pdev, port_info, 2);
 }
 
+static int ali_reinit_one(struct pci_dev *pdev)
+{
+	ali_init_chipset(pdev);
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id ali[] = {
 	{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
 	{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
@@ -655,7 +684,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= ali,
 	.probe 		= ali_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ali_reinit_one,
 };
 
 static int __init ali_init(void)
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 5c47a9e..a6b33008 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_amd"
-#define DRV_VERSION "0.2.4"
+#define DRV_VERSION "0.2.7"
 
 /**
  *	timing_setup		-	shared timing computation and load
@@ -326,14 +326,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations amd33_port_ops = {
@@ -661,6 +663,23 @@
 	return ata_pci_init_one(pdev, port_info, 2);
 }
 
+static int amd_reinit_one(struct pci_dev *pdev)
+{
+	if (pdev->vendor == PCI_VENDOR_ID_AMD) {
+		u8 fifo;
+		pci_read_config_byte(pdev, 0x41, &fifo);
+		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
+			/* FIFO is broken */
+			pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
+		else
+			pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
+		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
+		    pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
+		    	ata_pci_clear_simplex(pdev);
+	}
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id amd[] = {
 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_COBRA_7401),		0 },
 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7409),		1 },
@@ -688,7 +707,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= amd,
 	.probe 		= amd_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= amd_reinit_one,
 };
 
 static int __init amd_init(void)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 96a0980..37bc132 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -307,13 +307,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 1ce28d2..6f6672c 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -22,7 +22,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_atiixp"
-#define DRV_VERSION "0.4.3"
+#define DRV_VERSION "0.4.4"
 
 enum {
 	ATIIXP_IDE_PIO_TIMING	= 0x40,
@@ -209,14 +209,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations atiixp_port_ops = {
@@ -280,7 +282,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= atiixp,
 	.probe 		= atiixp_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.resume		= ata_pci_device_resume,
+	.suspend	= ata_pci_device_suspend,
 };
 
 static int __init atiixp_init(void)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index b9bbd1d..15841a5 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_cmd64x"
-#define DRV_VERSION "0.2.1"
+#define DRV_VERSION "0.2.2"
 
 /*
  * CMD64x specific registers definition.
@@ -268,14 +268,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations cmd64x_port_ops = {
@@ -468,6 +470,20 @@
 	return ata_pci_init_one(pdev, port_info, 2);
 }
 
+static int cmd64x_reinit_one(struct pci_dev *pdev)
+{
+	u8 mrdmode;
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+	pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+	mrdmode &= ~ 0x30;	/* IRQ set up */
+	mrdmode |= 0x02;	/* Memory read line enable */
+	pci_write_config_byte(pdev, MRDMODE, mrdmode);
+#ifdef CONFIG_PPC
+	pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id cmd64x[] = {
 	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
 	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
@@ -481,7 +497,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= cmd64x,
 	.probe 		= cmd64x_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cmd64x_reinit_one,
 };
 
 static int __init cmd64x_init(void)
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 2cd3c0f..9f165a8 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -41,7 +41,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_cs5520"
-#define DRV_VERSION	"0.6.2"
+#define DRV_VERSION	"0.6.3"
 
 struct pio_clocks
 {
@@ -159,14 +159,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations cs5520_port_ops = {
@@ -296,6 +298,22 @@
 	dev_set_drvdata(dev, NULL);
 }
 
+/**
+ *	cs5520_reinit_one	-	device resume
+ *	@pdev: PCI device
+ *
+ *	Do any reconfiguration work needed by a resume from RAM. We need
+ *	to restore DMA mode support on BIOSen which disabled it
+ */
+ 
+static int cs5520_reinit_one(struct pci_dev *pdev)
+{
+	u8 pcicfg;
+	pci_read_config_byte(pdev, 0x60, &pcicfg);
+	if ((pcicfg & 0x40) == 0)
+		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
+	return ata_pci_device_resume(pdev);
+}
 /* For now keep DMA off. We can set it for all but A rev CS5510 once the
    core ATA code can handle it */
 
@@ -310,7 +328,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= pata_cs5520,
 	.probe 		= cs5520_init_one,
-	.remove		= cs5520_remove_one
+	.remove		= cs5520_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cs5520_reinit_one,
 };
 
 static int __init cs5520_init(void)
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index a07cc81..1c62801 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -35,7 +35,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME	"pata_cs5530"
-#define DRV_VERSION	"0.6"
+#define DRV_VERSION	"0.7.1"
 
 /**
  *	cs5530_set_piomode		-	PIO setup
@@ -173,14 +173,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations cs5530_port_ops = {
@@ -238,38 +240,18 @@
 	return 0;
 }
 
+
 /**
- *	cs5530_init_one		-	Initialise a CS5530
- *	@dev: PCI device
- *	@id: Entry in match table
+ *	cs5530_init_chip	-	Chipset init
  *
- *	Install a driver for the newly found CS5530 companion chip. Most of
- *	this is just housekeeping. We have to set the chip up correctly and
- *	turn off various bits of emulation magic.
+ *	Perform the chip initialisation work that is shared between both
+ *	setup and resume paths
  */
-
-static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 
+static int cs5530_init_chip(void)
 {
-	int compiler_warning_pointless_fix;
-	struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
-	static struct ata_port_info info = {
-		.sht = &cs5530_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
-		.pio_mask = 0x1f,
-		.mwdma_mask = 0x07,
-		.udma_mask = 0x07,
-		.port_ops = &cs5530_port_ops
-	};
-	/* The docking connector doesn't do UDMA, and it seems not MWDMA */
-	static struct ata_port_info info_palmax_secondary = {
-		.sht = &cs5530_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
-		.pio_mask = 0x1f,
-		.port_ops = &cs5530_port_ops
-	};
-	static struct ata_port_info *port_info[2] = { &info, &info };
+	struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
 
-	dev = NULL;
 	while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
 		switch (dev->device) {
 			case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
@@ -290,7 +272,7 @@
 	}
 
 	pci_set_master(cs5530_0);
-	compiler_warning_pointless_fix = pci_set_mwi(cs5530_0);
+	pci_set_mwi(cs5530_0);
 
 	/*
 	 * Set PCI CacheLineSize to 16-bytes:
@@ -338,13 +320,7 @@
 
 	pci_dev_put(master_0);
 	pci_dev_put(cs5530_0);
-
-	if (cs5530_is_palmax())
-		port_info[1] = &info_palmax_secondary;
-
-	/* Now kick off ATA set up */
-	return ata_pci_init_one(dev, port_info, 2);
-
+	return 0;
 fail_put:
 	if (master_0)
 		pci_dev_put(master_0);
@@ -353,6 +329,53 @@
 	return -ENODEV;
 }
 
+/**
+ *	cs5530_init_one		-	Initialise a CS5530
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Install a driver for the newly found CS5530 companion chip. Most of
+ *	this is just housekeeping. We have to set the chip up correctly and
+ *	turn off various bits of emulation magic.
+ */
+
+static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static struct ata_port_info info = {
+		.sht = &cs5530_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x07,
+		.port_ops = &cs5530_port_ops
+	};
+	/* The docking connector doesn't do UDMA, and it seems not MWDMA */
+	static struct ata_port_info info_palmax_secondary = {
+		.sht = &cs5530_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.port_ops = &cs5530_port_ops
+	};
+	static struct ata_port_info *port_info[2] = { &info, &info };
+	
+	/* Chip initialisation */
+	if (cs5530_init_chip())
+		return -ENODEV;
+		
+	if (cs5530_is_palmax())
+		port_info[1] = &info_palmax_secondary;
+
+	/* Now kick off ATA set up */
+	return ata_pci_init_one(pdev, port_info, 2);
+}
+
+static int cs5530_reinit_one(struct pci_dev *pdev)
+{
+	/* If we fail on resume we are doomed */
+	BUG_ON(cs5530_init_chip());
+	return ata_pci_device_resume(pdev);
+}
+	
 static const struct pci_device_id cs5530[] = {
 	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
 
@@ -363,7 +386,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= cs5530,
 	.probe 		= cs5530_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cs5530_reinit_one,
 };
 
 static int __init cs5530_init(void)
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index f8def3f..e3efec4 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -39,7 +39,7 @@
 #include <asm/msr.h>
 
 #define DRV_NAME	"cs5535"
-#define DRV_VERSION	"0.2.10"
+#define DRV_VERSION	"0.2.11"
 
 /*
  *	The Geode (Aka Athlon GX now) uses an internal MSR based
@@ -177,14 +177,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations cs5535_port_ops = {
@@ -267,7 +269,9 @@
 	.name		= DRV_NAME,
 	.id_table	= cs5535,
 	.probe 		= cs5535_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init cs5535_init(void)
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 247b436..e2a9569 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -18,7 +18,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_cypress"
-#define DRV_VERSION "0.1.2"
+#define DRV_VERSION "0.1.4"
 
 /* here are the offset definitions for the registers */
 
@@ -128,14 +128,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations cy82c693_port_ops = {
@@ -203,7 +205,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= cy82c693,
 	.probe 		= cy82c693_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init cy82c693_init(void)
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index ef18c60..edf8a63 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -22,7 +22,7 @@
 #include <linux/ata.h>
 
 #define DRV_NAME	"pata_efar"
-#define DRV_VERSION	"0.4.2"
+#define DRV_VERSION	"0.4.3"
 
 /**
  *	efar_pre_reset	-	check for 40/80 pin
@@ -226,14 +226,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static const struct ata_port_operations efar_ops = {
@@ -315,6 +317,8 @@
 	.id_table		= efar_pci_tbl,
 	.probe			= efar_init_one,
 	.remove			= ata_pci_remove_one,
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
 };
 
 static int __init efar_init(void)
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 6d3e4c0..2663599 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -27,7 +27,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt366"
-#define DRV_VERSION	"0.5"
+#define DRV_VERSION	"0.5.3"
 
 struct hpt_clock {
 	u8	xfer_speed;
@@ -222,9 +222,17 @@
 
 static int hpt36x_pre_reset(struct ata_port *ap)
 {
+	static const struct pci_bits hpt36x_enable_bits[] = {
+		{ 0x50, 1, 0x04, 0x04 },
+		{ 0x54, 1, 0x04, 0x04 }
+	};
+
 	u8 ata66;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
+	if (!pci_test_config_bits(pdev, &hpt36x_enable_bits[ap->port_no]))
+		return -ENOENT;
+		
 	pci_read_config_byte(pdev, 0x5A, &ata66);
 	if (ata66 & (1 << ap->port_no))
 		ap->cbl = ATA_CBL_PATA40;
@@ -322,14 +330,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 /*
@@ -372,6 +382,27 @@
 };
 
 /**
+ *	hpt36x_init_chipset	-	common chip setup
+ *	@dev: PCI device
+ *
+ *	Perform the chip setup work that must be done at both init and
+ *	resume time
+ */
+
+static void hpt36x_init_chipset(struct pci_dev *dev)
+{
+	u8 drive_fast;
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+	pci_read_config_byte(dev, 0x51, &drive_fast);
+	if (drive_fast & 0x80)
+		pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
+}
+
+/**
  *	hpt36x_init_one		-	Initialise an HPT366/368
  *	@dev: PCI device
  *	@id: Entry in match table
@@ -406,7 +437,6 @@
 
 	u32 class_rev;
 	u32 reg1;
-	u8 drive_fast;
 
 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
 	class_rev &= 0xFF;
@@ -416,14 +446,7 @@
 	if (class_rev > 2)
 			return -ENODEV;
 
-	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
-	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
-	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
-	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
-
-	pci_read_config_byte(dev, 0x51, &drive_fast);
-	if (drive_fast & 0x80)
-		pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
+	hpt36x_init_chipset(dev);
 
 	pci_read_config_dword(dev, 0x40,  &reg1);
 
@@ -444,9 +467,15 @@
 	return ata_pci_init_one(dev, port_info, 2);
 }
 
+static int hpt36x_reinit_one(struct pci_dev *dev)
+{
+	hpt36x_init_chipset(dev);
+	return ata_pci_device_resume(dev);
+}
+
+
 static const struct pci_device_id hpt36x[] = {
 	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
-
 	{ },
 };
 
@@ -454,7 +483,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= hpt36x,
 	.probe 		= hpt36x_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= hpt36x_reinit_one,
 };
 
 static int __init hpt36x_init(void)
@@ -462,13 +493,11 @@
 	return pci_register_driver(&hpt36x_pci_driver);
 }
 
-
 static void __exit hpt36x_exit(void)
 {
 	pci_unregister_driver(&hpt36x_pci_driver);
 }
 
-
 MODULE_AUTHOR("Alan Cox");
 MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
 MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index fce3fcd..47082df 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -768,13 +768,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 58cfb2b..f6817b4 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -334,13 +334,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 3334d72..5f1d385 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -23,7 +23,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt3x3"
-#define DRV_VERSION	"0.4.1"
+#define DRV_VERSION	"0.4.2"
 
 static int hpt3x3_probe_init(struct ata_port *ap)
 {
@@ -111,14 +111,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations hpt3x3_port_ops = {
@@ -157,6 +159,27 @@
 };
 
 /**
+ *	hpt3x3_init_chipset	-	chip setup
+ *	@dev: PCI device
+ *
+ *	Perform the setup required at boot and on resume.
+ */
+ 
+static void hpt3x3_init_chipset(struct pci_dev *dev)
+{
+	u16 cmd;
+	/* Initialize the board */
+	pci_write_config_word(dev, 0x80, 0x00);
+	/* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	if (cmd & PCI_COMMAND_MEMORY)
+		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
+	else
+		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
+}
+
+
+/**
  *	hpt3x3_init_one		-	Initialise an HPT343/363
  *	@dev: PCI device
  *	@id: Entry in match table
@@ -177,21 +200,18 @@
 		.port_ops = &hpt3x3_port_ops
 	};
 	static struct ata_port_info *port_info[2] = { &info, &info };
-	u16 cmd;
 
-	/* Initialize the board */
-	pci_write_config_word(dev, 0x80, 0x00);
-	/* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
-	if (cmd & PCI_COMMAND_MEMORY)
-		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
-	else
-		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
-
+	hpt3x3_init_chipset(dev);
 	/* Now kick off ATA set up */
 	return ata_pci_init_one(dev, port_info, 2);
 }
 
+static int hpt3x3_reinit_one(struct pci_dev *dev)
+{
+	hpt3x3_init_chipset(dev);
+	return ata_pci_device_resume(dev);
+}
+
 static const struct pci_device_id hpt3x3[] = {
 	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
 
@@ -202,7 +222,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= hpt3x3,
 	.probe 		= hpt3x3_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= hpt3x3_reinit_one,
 };
 
 static int __init hpt3x3_init(void)
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 640b8b0..a97d55a 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -27,13 +27,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 18ff3e5..0b56ff3 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -80,7 +80,7 @@
 
 
 #define DRV_NAME "pata_it821x"
-#define DRV_VERSION "0.3.2"
+#define DRV_VERSION "0.3.3"
 
 struct it821x_dev
 {
@@ -666,16 +666,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	/* 255 sectors to begin with. This is locked in smart mode but not
-	   in pass through */
-	.max_sectors		= 255,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations it821x_smart_port_ops = {
@@ -808,6 +808,14 @@
 	return ata_pci_init_one(pdev, port_info, 2);
 }
 
+static int it821x_reinit_one(struct pci_dev *pdev)
+{
+	/* Resume - turn raid back off if need be */
+	if (it8212_noraid)
+		it821x_disable_raid(pdev);
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id it821x[] = {
 	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
 	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
@@ -819,7 +827,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= it821x,
 	.probe 		= it821x_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= it821x_reinit_one,
 };
 
 static int __init it821x_init(void)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
new file mode 100644
index 0000000..cb89241
--- /dev/null
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -0,0 +1,271 @@
+/*
+ * ixp4xx PATA/Compact Flash driver
+ * Copyright (c) 2006 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * An ATA driver to handle a Compact Flash connected
+ * to the ixp4xx expansion bus in TrueIDE mode. The CF
+ * must have it chip selects connected to two CS lines
+ * on the ixp4xx. The interrupt line is optional, if not
+ * specified the driver will run in polling mode.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <scsi/scsi_host.h>
+
+#define DRV_NAME	"pata_ixp4xx_cf"
+#define DRV_VERSION	"0.1.1"
+
+static void ixp4xx_set_mode(struct ata_port *ap)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_enabled(dev)) {
+			dev->pio_mode = XFER_PIO_0;
+			dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+		}
+	}
+}
+
+static void ixp4xx_phy_reset(struct ata_port *ap)
+{
+	ap->cbl = ATA_CBL_PATA40;
+	ata_port_probe(ap);
+	ata_bus_reset(ap);
+}
+
+static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
+				unsigned int buflen, int write_data)
+{
+	unsigned int i;
+	unsigned int words = buflen >> 1;
+	u16 *buf16 = (u16 *) buf;
+	struct ata_port *ap = adev->ap;
+	void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
+	struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
+
+	/* set the expansion bus in 16bit mode and restore
+	 * 8 bit mode after the transaction.
+	 */
+	*data->cs0_cfg &= ~(0x01);
+	udelay(100);
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data) {
+		for (i = 0; i < words; i++)
+			writew(buf16[i], mmio);
+	} else {
+		for (i = 0; i < words; i++)
+			buf16[i] = readw(mmio);
+	}
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			writew(align_buf[0], mmio);
+		} else {
+			align_buf[0] = readw(mmio);
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+
+	udelay(100);
+	*data->cs0_cfg |= 0x01;
+}
+
+static void ixp4xx_irq_clear(struct ata_port *ap)
+{
+}
+
+static void ixp4xx_host_stop (struct ata_host *host)
+{
+	struct ixp4xx_pata_data *data = host->dev->platform_data;
+
+	iounmap(data->cs0);
+	iounmap(data->cs1);
+}
+
+static struct scsi_host_template ixp4xx_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations ixp4xx_port_ops = {
+	.set_mode	= ixp4xx_set_mode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+	.eng_timeout	= ata_eng_timeout,
+	.data_xfer	= ixp4xx_mmio_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ixp4xx_irq_clear,
+
+	.port_start	= ata_port_start,
+	.port_stop	= ata_port_stop,
+	.host_stop	= ixp4xx_host_stop,
+
+	.phy_reset	= ixp4xx_phy_reset,
+};
+
+static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
+				struct ixp4xx_pata_data *data)
+{
+	ioaddr->cmd_addr	= (unsigned long) data->cs0;
+	ioaddr->altstatus_addr	= (unsigned long) data->cs1 + 0x06;
+	ioaddr->ctl_addr	= (unsigned long) data->cs1 + 0x06;
+
+	ata_std_ports(ioaddr);
+
+#ifndef __ARMEB__
+
+	/* adjust the addresses to handle the address swizzling of the
+	 * ixp4xx in little endian mode.
+	 */
+
+	ioaddr->data_addr	^= 0x02;
+	ioaddr->cmd_addr	^= 0x03;
+	ioaddr->altstatus_addr	^= 0x03;
+	ioaddr->ctl_addr	^= 0x03;
+	ioaddr->error_addr	^= 0x03;
+	ioaddr->feature_addr	^= 0x03;
+	ioaddr->nsect_addr	^= 0x03;
+	ioaddr->lbal_addr 	^= 0x03;
+	ioaddr->lbam_addr	^= 0x03;
+	ioaddr->lbah_addr	^= 0x03;
+	ioaddr->device_addr	^= 0x03;
+	ioaddr->status_addr	^= 0x03;
+	ioaddr->command_addr	^= 0x03;
+#endif
+}
+
+static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
+{
+	int ret;
+	unsigned int irq;
+	struct resource *cs0, *cs1;
+	struct ata_probe_ent ae;
+
+	struct ixp4xx_pata_data *data = pdev->dev.platform_data;
+
+	cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+	if (!cs0 || !cs1)
+		return -EINVAL;
+
+	pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
+
+	data->cs0 = ioremap(cs0->start, 0x1000);
+	data->cs1 = ioremap(cs1->start, 0x1000);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq)
+		set_irq_type(irq, IRQT_HIGH);
+
+	/* Setup expansion bus chip selects */
+	*data->cs0_cfg = data->cs0_bits;
+	*data->cs1_cfg = data->cs1_bits;
+
+	memset(&ae, 0, sizeof(struct ata_probe_ent));
+	INIT_LIST_HEAD(&ae.node);
+
+	ae.dev		= &pdev->dev;
+	ae.port_ops	= &ixp4xx_port_ops;
+	ae.sht		= &ixp4xx_sht;
+	ae.n_ports	= 1;
+	ae.pio_mask	= 0x1f; /* PIO4 */
+	ae.irq		= irq;
+	ae.irq_flags	= 0;
+	ae.port_flags	= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
+			| ATA_FLAG_NO_ATAPI | ATA_FLAG_SRST;
+
+	/* run in polling mode if no irq has been assigned */
+	if (!irq)
+		ae.port_flags |= ATA_FLAG_PIO_POLLING;
+
+	ixp4xx_setup_port(&ae.port[0], data);
+
+	dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+	ret = ata_device_add(&ae);
+	if (ret == 0)
+		return -ENODEV;
+
+	return 0;
+}
+
+static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
+{
+	struct ata_host *host = platform_get_drvdata(dev);
+
+	ata_host_remove(host);
+	platform_set_drvdata(dev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver ixp4xx_pata_platform_driver = {
+	.driver	 = {
+		.name   = DRV_NAME,
+		.owner  = THIS_MODULE,
+	},
+	.probe		= ixp4xx_pata_probe,
+	.remove		= __devexit_p(ixp4xx_pata_remove),
+};
+
+static int __init ixp4xx_pata_init(void)
+{
+	return platform_driver_register(&ixp4xx_pata_platform_driver);
+}
+
+static void __exit ixp4xx_pata_exit(void)
+{
+	platform_driver_unregister(&ixp4xx_pata_platform_driver);
+}
+
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ixp4xx_pata_init);
+module_exit(ixp4xx_pata_exit);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 52a2bdf..2d661cb 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -19,7 +19,7 @@
 #include <linux/ata.h>
 
 #define DRV_NAME	"pata_jmicron"
-#define DRV_VERSION	"0.1.2"
+#define DRV_VERSION	"0.1.4"
 
 typedef enum {
 	PORT_PATA0 = 0,
@@ -128,14 +128,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	/* Special handling needed if you have sector or LBA48 limits */
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	/* Use standard CHS mapping rules */
 	.bios_param		= ata_std_bios_param,
 };
@@ -212,12 +211,11 @@
 
 		/* FIXME: We may want a way to override this in future */
 		pci_write_config_byte(pdev, 0x41, 0xa1);
+
+		/* PATA controller is fn 1, AHCI is fn 0 */
+		if (PCI_FUNC(pdev->devfn) != 1)
+			return -ENODEV;
 	}
-
-	/* PATA controller is fn 1, AHCI is fn 0 */
-	if (PCI_FUNC(pdev->devfn) != 1)
-		return -ENODEV;
-
 	if ( id->driver_data == 365 || id->driver_data == 366) {
 		/* The 365/66 have two PATA channels, redirect the second */
 		pci_read_config_dword(pdev, 0x80, &reg);
@@ -228,6 +226,27 @@
 	return ata_pci_init_one(pdev, port_info, 2);
 }
 
+static int jmicron_reinit_one(struct pci_dev *pdev)
+{
+	u32 reg;
+	
+	switch(pdev->device) {
+		case PCI_DEVICE_ID_JMICRON_JMB368:
+			break;
+		case PCI_DEVICE_ID_JMICRON_JMB365:
+		case PCI_DEVICE_ID_JMICRON_JMB366:
+			/* Restore mapping or disks swap and boy does it get ugly */
+			pci_read_config_dword(pdev, 0x80, &reg);
+			reg |= (1 << 24);	/* IDE1 to PATA IDE secondary */
+			pci_write_config_dword(pdev, 0x80, reg);
+			/* Fall through */
+		default:
+			/* Make sure AHCI is turned back on */
+			pci_write_config_byte(pdev, 0x41, 0xa1);
+	}
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id jmicron_pci_tbl[] = {
 	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
 	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
@@ -243,6 +262,8 @@
 	.id_table		= jmicron_pci_tbl,
 	.probe			= jmicron_init_one,
 	.remove			= ata_pci_remove_one,
+	.suspend		= ata_pci_device_suspend,
+	.resume			= jmicron_reinit_one,
 };
 
 static int __init jmicron_init(void)
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 10231ef..c7d1738 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -128,13 +128,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
new file mode 100644
index 0000000..1c810ea
--- /dev/null
+++ b/drivers/ata/pata_marvell.c
@@ -0,0 +1,224 @@
+/*
+ *	Marvell PATA driver.
+ *
+ *	For the moment we drive the PATA port in legacy mode. That
+ *	isn't making full use of the device functionality but it is
+ *	easy to get working.
+ *
+ *	(c) 2006 Red Hat  <alan@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_marvell"
+#define DRV_VERSION	"0.1.1"
+
+/**
+ *	marvell_pre_reset	-	check for 40/80 pin
+ *	@ap: Port
+ *
+ *	Perform the PATA port setup we need.
+ */
+
+static int marvell_pre_reset(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 devices;
+	void __iomem *barp;
+	int i;
+
+	/* Check if our port is enabled */
+
+	barp = pci_iomap(pdev, 5, 0x10);
+	if (barp == NULL)
+		return -ENOMEM;
+	printk("BAR5:");
+	for(i = 0; i <= 0x0F; i++)
+		printk("%02X:%02X ", i, readb(barp + i));
+	printk("\n");
+	
+	devices = readl(barp + 0x0C);
+	pci_iounmap(pdev, barp);
+	
+	if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
+	    (!(devices & 0x10)))	/* PATA enable ? */
+		return -ENOENT;
+
+	/* Cable type */
+	switch(ap->port_no)
+	{
+	case 0:
+		if (inb(ap->ioaddr.bmdma_addr + 1) & 1)
+			ap->cbl = ATA_CBL_PATA40;
+		else
+			ap->cbl = ATA_CBL_PATA80;
+		break;
+
+	case 1: /* Legacy SATA port */
+		ap->cbl = ATA_CBL_SATA;
+		break;
+	}
+	return ata_std_prereset(ap);
+}
+
+/**
+ *	marvell_error_handler - Setup and error handler
+ *	@ap: Port to handle
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void marvell_error_handler(struct ata_port *ap)
+{
+	return ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset,
+				  NULL, ata_std_postreset);
+}
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template marvell_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	/* Use standard CHS mapping rules */
+	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
+};
+
+static const struct ata_port_operations marvell_ops = {
+	.port_disable		= ata_port_disable,
+
+	/* Task file is PCI ATA format, use helpers */
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= marvell_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+
+	/* BMDMA handling is PCI ATA format, use helpers */
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_pio_data_xfer,
+
+	/* Timeout handling */
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+
+	/* Generic PATA PCI ATA helpers */
+	.port_start		= ata_port_start,
+	.port_stop		= ata_port_stop,
+	.host_stop		= ata_host_stop,
+};
+
+
+/**
+ *	marvell_init_one - Register Marvell ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in marvell_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static struct ata_port_info info = {
+		.sht		= &marvell_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask 	= 0x3f,
+
+		.port_ops	= &marvell_ops,
+	};
+	static struct ata_port_info info_sata = {
+		.sht		= &marvell_sht,
+		/* Slave possible as its magically mapped not real */
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask 	= 0x7f,
+
+		.port_ops	= &marvell_ops,
+	};
+	struct ata_port_info *port_info[2] = { &info, &info_sata };
+	int n_port = 2;
+
+	if (pdev->device == 0x6101)
+		n_port = 1;
+
+	return ata_pci_init_one(pdev, port_info, n_port);
+}
+
+static const struct pci_device_id marvell_pci_tbl[] = {
+	{ PCI_DEVICE(0x11AB, 0x6101), },
+	{ PCI_DEVICE(0x11AB, 0x6145), },
+	{ }	/* terminate list */
+};
+
+static struct pci_driver marvell_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= marvell_pci_tbl,
+	.probe			= marvell_init_one,
+	.remove			= ata_pci_remove_one,
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+};
+
+static int __init marvell_init(void)
+{
+	return pci_register_driver(&marvell_pci_driver);
+}
+
+static void __exit marvell_exit(void)
+{
+	pci_unregister_driver(&marvell_pci_driver);
+}
+
+module_init(marvell_init);
+module_exit(marvell_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 9dfe3e9..4ccca93 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -35,7 +35,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_mpiix"
-#define DRV_VERSION "0.7.2"
+#define DRV_VERSION "0.7.3"
 
 enum {
 	IDETIM = 0x6C,		/* IDE control register */
@@ -159,14 +159,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations mpiix_port_ops = {
@@ -284,7 +286,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= mpiix,
 	.probe 		= mpiix_init_one,
-	.remove		= mpiix_remove_one
+	.remove		= mpiix_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init mpiix_init(void)
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index f5672de..cf7fe03 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -16,7 +16,7 @@
 #include <linux/ata.h>
 
 #define DRV_NAME	"pata_netcell"
-#define DRV_VERSION	"0.1.5"
+#define DRV_VERSION	"0.1.6"
 
 /**
  *	netcell_probe_init	-	check for 40/80 pin
@@ -54,16 +54,17 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	/* Special handling needed if you have sector or LBA48 limits */
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	/* Use standard CHS mapping rules */
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static const struct ata_port_operations netcell_ops = {
@@ -152,6 +153,8 @@
 	.id_table		= netcell_pci_tbl,
 	.probe			= netcell_init_one,
 	.remove			= ata_pci_remove_one,
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
 };
 
 static int __init netcell_init(void)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 2a3dbee..c3032eb 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -28,7 +28,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_ns87410"
-#define DRV_VERSION "0.4.2"
+#define DRV_VERSION "0.4.3"
 
 /**
  *	ns87410_pre_reset		-	probe begin
@@ -149,14 +149,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations ns87410_port_ops = {
@@ -209,7 +211,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= ns87410,
 	.probe 		= ns87410_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init ns87410_init(void)
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index fc947df..10ac3cc 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -224,14 +224,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static const struct ata_port_operations oldpiix_pata_ops = {
@@ -313,6 +315,8 @@
 	.id_table		= oldpiix_pci_tbl,
 	.probe			= oldpiix_init_one,
 	.remove			= ata_pci_remove_one,
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
 };
 
 static int __init oldpiix_init(void)
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index a7320ba..c2988b0 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -34,7 +34,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_opti"
-#define DRV_VERSION "0.2.5"
+#define DRV_VERSION "0.2.7"
 
 enum {
 	READ_REG	= 0,	/* index of Read cycle timing register */
@@ -109,30 +109,6 @@
 	outb(0x83, regio + 2);
 }
 
-#if 0
-/**
- *	opti_read_reg		-	control register read
- *	@ap: ATA port
- *	@reg: control register number
- *
- *	The Opti uses magic 'trapdoor' register accesses to do configuration
- *	rather than using PCI space as other controllers do. The double inw
- *	on the error register activates configuration mode. We can then read
- *	the control register
- */
-
-static u8 opti_read_reg(struct ata_port *ap, int reg)
-{
-	unsigned long regio = ap->ioaddr.cmd_addr;
-	u8 ret;
-	inw(regio + 1);
-	inw(regio + 1);
-	outb(3, regio + 2);
-	ret = inb(regio + reg);
-	outb(0x83, regio + 2);
-}
-#endif
-
 /**
  *	opti_set_piomode	-	set initial PIO mode data
  *	@ap: ATA interface
@@ -195,20 +171,21 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations opti_port_ops = {
 	.port_disable	= ata_port_disable,
 	.set_piomode	= opti_set_piomode,
-/*	.set_dmamode	= opti_set_dmamode, */
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -266,7 +243,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= opti,
 	.probe 		= opti_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init opti_init(void)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index c6906b4..80d111c 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -33,7 +33,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_optidma"
-#define DRV_VERSION "0.2.2"
+#define DRV_VERSION "0.2.3"
 
 enum {
 	READ_REG	= 0,	/* index of Read cycle timing register */
@@ -352,14 +352,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations optidma_port_ops = {
@@ -521,7 +523,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= optidma,
 	.probe 		= optidma_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init optidma_init(void)
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index e93ea27..9ed7f58 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -62,13 +62,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
@@ -154,19 +154,12 @@
 	tuple.TupleOffset = 0;
 	tuple.TupleDataMax = 255;
 	tuple.Attributes = 0;
-	tuple.DesiredTuple = CISTPL_CONFIG;
-
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(pdev, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(pdev, &tuple, &stk->parse));
-	pdev->conf.ConfigBase = stk->parse.config.base;
-	pdev->conf.Present = stk->parse.config.rmask[0];
 
 	/* See if we have a manufacturer identifier. Use it to set is_kme for
 	   vendor quirks */
-	tuple.DesiredTuple = CISTPL_MANFID;
-	if (!pcmcia_get_first_tuple(pdev, &tuple) && !pcmcia_get_tuple_data(pdev, &tuple) && !pcmcia_parse_tuple(pdev, &tuple, &stk->parse))
-			is_kme = ((stk->parse.manfid.manf == MANFID_KME) && ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
+	is_kme = ((pdev->manf_id == MANFID_KME) &&
+		  ((pdev->card_id == PRODID_KME_KXLC005_A) ||
+		   (pdev->card_id == PRODID_KME_KXLC005_B)));
 
 	/* Not sure if this is right... look up the current Vcc */
 	CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf));
@@ -356,8 +349,10 @@
 	PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
 	PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
 	PCMCIA_DEVICE_PROD_ID1("TRANSCEND    512M   ", 0xd0909443),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
 	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
 	PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+	PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
 	PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
 	PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
 	PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index d894d99..76dd1c9 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -134,13 +134,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
@@ -853,7 +853,7 @@
  */
 static int __init pdc2027x_init(void)
 {
-	return pci_module_init(&pdc2027x_pci_driver);
+	return pci_register_driver(&pdc2027x_pci_driver);
 }
 
 /**
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 5ba9eb2..ad691b9 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -21,7 +21,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_pdc202xx_old"
-#define DRV_VERSION "0.2.1"
+#define DRV_VERSION "0.2.3"
 
 /**
  *	pdc2024x_pre_reset		-	probe begin
@@ -63,7 +63,7 @@
 }
 
 /**
- *	pdc_configure_piomode	-	set chip PIO timing
+ *	pdc202xx_configure_piomode	-	set chip PIO timing
  *	@ap: ATA interface
  *	@adev: ATA device
  *	@pio: PIO mode
@@ -73,7 +73,7 @@
  *	versa
  */
 
-static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
+static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
@@ -98,7 +98,7 @@
 }
 
 /**
- *	pdc_set_piomode	-	set initial PIO mode data
+ *	pdc202xx_set_piomode	-	set initial PIO mode data
  *	@ap: ATA interface
  *	@adev: ATA device
  *
@@ -106,13 +106,13 @@
  *	but we want to set the PIO timing by default.
  */
 
-static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
 {
-	pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+	pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
 }
 
 /**
- *	pdc_configure_dmamode	-	set DMA mode in chip
+ *	pdc202xx_configure_dmamode	-	set DMA mode in chip
  *	@ap: ATA interface
  *	@adev: ATA device
  *
@@ -120,7 +120,7 @@
  *	to occur.
  */
 
-static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
@@ -184,7 +184,7 @@
 
 	/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
 	   and move to qc_issue ? */
-	pdc_set_dmamode(ap, qc->dev);
+	pdc202xx_set_dmamode(ap, qc->dev);
 
 	/* Cases the state machine will not complete correctly without help */
 	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATA_PROT_ATAPI_DMA)
@@ -254,7 +254,7 @@
 	adev->max_sectors = 256;
 }
 
-static struct scsi_host_template pdc_sht = {
+static struct scsi_host_template pdc202xx_sht = {
 	.module			= THIS_MODULE,
 	.name			= DRV_NAME,
 	.ioctl			= ata_scsi_ioctl,
@@ -262,20 +262,22 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations pdc2024x_port_ops = {
 	.port_disable	= ata_port_disable,
-	.set_piomode	= pdc_set_piomode,
-	.set_dmamode	= pdc_set_dmamode,
+	.set_piomode	= pdc202xx_set_piomode,
+	.set_dmamode	= pdc202xx_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
@@ -307,8 +309,8 @@
 
 static struct ata_port_operations pdc2026x_port_ops = {
 	.port_disable	= ata_port_disable,
-	.set_piomode	= pdc_set_piomode,
-	.set_dmamode	= pdc_set_dmamode,
+	.set_piomode	= pdc202xx_set_piomode,
+	.set_dmamode	= pdc202xx_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
@@ -339,11 +341,11 @@
 	.host_stop	= ata_host_stop
 };
 
-static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static struct ata_port_info info[3] = {
 		{
-			.sht = &pdc_sht,
+			.sht = &pdc202xx_sht,
 			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -351,7 +353,7 @@
 			.port_ops = &pdc2024x_port_ops
 		},
 		{
-			.sht = &pdc_sht,
+			.sht = &pdc202xx_sht,
 			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -359,7 +361,7 @@
 			.port_ops = &pdc2026x_port_ops
 		},
 		{
-			.sht = &pdc_sht,
+			.sht = &pdc202xx_sht,
 			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
@@ -385,7 +387,7 @@
 	return ata_pci_init_one(dev, port_info, 2);
 }
 
-static const struct pci_device_id pdc[] = {
+static const struct pci_device_id pdc202xx[] = {
 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
@@ -395,28 +397,30 @@
 	{ },
 };
 
-static struct pci_driver pdc_pci_driver = {
+static struct pci_driver pdc202xx_pci_driver = {
 	.name 		= DRV_NAME,
-	.id_table	= pdc,
-	.probe 		= pdc_init_one,
-	.remove		= ata_pci_remove_one
+	.id_table	= pdc202xx,
+	.probe 		= pdc202xx_init_one,
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
-static int __init pdc_init(void)
+static int __init pdc202xx_init(void)
 {
-	return pci_register_driver(&pdc_pci_driver);
+	return pci_register_driver(&pdc202xx_pci_driver);
 }
 
-static void __exit pdc_exit(void)
+static void __exit pdc202xx_exit(void)
 {
-	pci_unregister_driver(&pdc_pci_driver);
+	pci_unregister_driver(&pdc202xx_pci_driver);
 }
 
 MODULE_AUTHOR("Alan Cox");
 MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
 MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, pdc);
+MODULE_DEVICE_TABLE(pci, pdc202xx);
 MODULE_VERSION(DRV_VERSION);
 
-module_init(pdc_init);
-module_exit(pdc_exit);
+module_init(pdc202xx_init);
+module_exit(pdc202xx_exit);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
new file mode 100644
index 0000000..443b1d8
--- /dev/null
+++ b/drivers/ata/pata_platform.c
@@ -0,0 +1,295 @@
+/*
+ * Generic platform device PATA driver
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * Based on pata_pcmcia:
+ *
+ *   Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/pata_platform.h>
+
+#define DRV_NAME "pata_platform"
+#define DRV_VERSION "0.1.2"
+
+static int pio_mask = 1;
+
+/*
+ * Provide our own set_mode() as we don't want to change anything that has
+ * already been configured..
+ */
+static void pata_platform_set_mode(struct ata_port *ap)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+
+		if (ata_dev_enabled(dev)) {
+			/* We don't really care */
+			dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+		}
+	}
+}
+
+static void pata_platform_host_stop(struct ata_host *host)
+{
+	int i;
+
+	/*
+	 * Unmap the bases for MMIO
+	 */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->flags & ATA_FLAG_MMIO) {
+			iounmap((void __iomem *)ap->ioaddr.ctl_addr);
+			iounmap((void __iomem *)ap->ioaddr.cmd_addr);
+		}
+	}
+}
+
+static struct scsi_host_template pata_platform_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations pata_platform_port_ops = {
+	.set_mode		= pata_platform_set_mode,
+
+	.port_disable		= ata_port_disable,
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.data_xfer		= ata_pio_data_xfer_noirq,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+
+	.port_start		= ata_port_start,
+	.port_stop		= ata_port_stop,
+	.host_stop		= pata_platform_host_stop
+};
+
+static void pata_platform_setup_port(struct ata_ioports *ioaddr,
+				     struct pata_platform_info *info)
+{
+	unsigned int shift = 0;
+
+	/* Fixup the port shift for platforms that need it */
+	if (info && info->ioport_shift)
+		shift = info->ioport_shift;
+
+	ioaddr->data_addr	= ioaddr->cmd_addr + (ATA_REG_DATA    << shift);
+	ioaddr->error_addr	= ioaddr->cmd_addr + (ATA_REG_ERR     << shift);
+	ioaddr->feature_addr	= ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
+	ioaddr->nsect_addr	= ioaddr->cmd_addr + (ATA_REG_NSECT   << shift);
+	ioaddr->lbal_addr	= ioaddr->cmd_addr + (ATA_REG_LBAL    << shift);
+	ioaddr->lbam_addr	= ioaddr->cmd_addr + (ATA_REG_LBAM    << shift);
+	ioaddr->lbah_addr	= ioaddr->cmd_addr + (ATA_REG_LBAH    << shift);
+	ioaddr->device_addr	= ioaddr->cmd_addr + (ATA_REG_DEVICE  << shift);
+	ioaddr->status_addr	= ioaddr->cmd_addr + (ATA_REG_STATUS  << shift);
+	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD     << shift);
+}
+
+/**
+ *	pata_platform_probe		-	attach a platform interface
+ *	@pdev: platform device
+ *
+ *	Register a platform bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ *
+ *	Platform devices are expected to contain 3 resources per port:
+ *
+ *		- I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *		- CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *		- IRQ	   (IORESOURCE_IRQ)
+ *
+ *	If the base resources are both mem types, the ioremap() is handled
+ *	here. For IORESOURCE_IO, it's assumed that there's no remapping
+ *	necessary.
+ */
+static int __devinit pata_platform_probe(struct platform_device *pdev)
+{
+	struct resource *io_res, *ctl_res;
+	struct ata_probe_ent ae;
+	unsigned int mmio;
+	int ret;
+
+	/*
+	 * Simple resource validation ..
+	 */
+	if (unlikely(pdev->num_resources != 3)) {
+		dev_err(&pdev->dev, "invalid number of resources\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the I/O base first
+	 */
+	io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (io_res == NULL) {
+		io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (unlikely(io_res == NULL))
+			return -EINVAL;
+	}
+
+	/*
+	 * Then the CTL base
+	 */
+	ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+	if (ctl_res == NULL) {
+		ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (unlikely(ctl_res == NULL))
+			return -EINVAL;
+	}
+
+	/*
+	 * Check for MMIO
+	 */
+	mmio = (( io_res->flags == IORESOURCE_MEM) &&
+		(ctl_res->flags == IORESOURCE_MEM));
+
+	/*
+	 * Now that that's out of the way, wire up the port..
+	 */
+	memset(&ae, 0, sizeof(struct ata_probe_ent));
+	INIT_LIST_HEAD(&ae.node);
+	ae.dev = &pdev->dev;
+	ae.port_ops = &pata_platform_port_ops;
+	ae.sht = &pata_platform_sht;
+	ae.n_ports = 1;
+	ae.pio_mask = pio_mask;
+	ae.irq = platform_get_irq(pdev, 0);
+	ae.irq_flags = 0;
+	ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
+
+	/*
+	 * Handle the MMIO case
+	 */
+	if (mmio) {
+		ae.port_flags |= ATA_FLAG_MMIO;
+
+		ae.port[0].cmd_addr = (unsigned long)ioremap(io_res->start,
+				io_res->end - io_res->start + 1);
+		if (unlikely(!ae.port[0].cmd_addr)) {
+			dev_err(&pdev->dev, "failed to remap IO base\n");
+			return -ENXIO;
+		}
+
+		ae.port[0].ctl_addr = (unsigned long)ioremap(ctl_res->start,
+				ctl_res->end - ctl_res->start + 1);
+		if (unlikely(!ae.port[0].ctl_addr)) {
+			dev_err(&pdev->dev, "failed to remap CTL base\n");
+			ret = -ENXIO;
+			goto bad_remap;
+		}
+	} else {
+		ae.port[0].cmd_addr = io_res->start;
+		ae.port[0].ctl_addr = ctl_res->start;
+	}
+
+	ae.port[0].altstatus_addr = ae.port[0].ctl_addr;
+
+	pata_platform_setup_port(&ae.port[0], pdev->dev.platform_data);
+
+	if (unlikely(ata_device_add(&ae) == 0)) {
+		ret = -ENODEV;
+		goto add_failed;
+	}
+
+	return 0;
+
+add_failed:
+	if (ae.port[0].ctl_addr && mmio)
+		iounmap((void __iomem *)ae.port[0].ctl_addr);
+bad_remap:
+	if (ae.port[0].cmd_addr && mmio)
+		iounmap((void __iomem *)ae.port[0].cmd_addr);
+
+	return ret;
+}
+
+/**
+ *	pata_platform_remove	-	unplug a platform interface
+ *	@pdev: platform device
+ *
+ *	A platform bus ATA device has been unplugged. Perform the needed
+ *	cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit pata_platform_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ata_host *host = dev_get_drvdata(dev);
+
+	ata_host_remove(host);
+	dev_set_drvdata(dev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver pata_platform_driver = {
+	.probe		= pata_platform_probe,
+	.remove		= __devexit_p(pata_platform_remove),
+	.driver = {
+		.name		= DRV_NAME,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init pata_platform_init(void)
+{
+	return platform_driver_register(&pata_platform_driver);
+}
+
+static void __exit pata_platform_exit(void)
+{
+	platform_driver_unregister(&pata_platform_driver);
+}
+module_init(pata_platform_init);
+module_exit(pata_platform_exit);
+
+module_param(pio_mask, int, 0);
+
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("low-level driver for platform device ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 2c3cc0c..36f621a 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -157,13 +157,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 1af83d7..065541d 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -220,14 +220,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static const struct ata_port_operations radisys_pata_ops = {
@@ -310,6 +312,8 @@
 	.id_table		= radisys_pci_tbl,
 	.probe			= radisys_init_one,
 	.remove			= ata_pci_remove_one,
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
 };
 
 static int __init radisys_init(void)
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 4533b63..3677c64 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -21,7 +21,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_rz1000"
-#define DRV_VERSION	"0.2.2"
+#define DRV_VERSION	"0.2.3"
 
 
 /**
@@ -83,14 +83,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations rz1000_port_ops = {
@@ -128,6 +130,19 @@
 	.host_stop	= ata_host_stop
 };
 
+static int rz1000_fifo_disable(struct pci_dev *pdev)
+{
+	u16 reg;
+	/* Be exceptionally paranoid as we must be sure to apply the fix */
+	if (pci_read_config_word(pdev, 0x40, &reg) != 0)
+		return -1;
+	reg &= 0xDFFF;
+	if (pci_write_config_word(pdev, 0x40, reg) != 0)
+		return -1;
+	printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
+	return 0;
+}
+
 /**
  *	rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
  *	@pdev: PCI device to register
@@ -142,7 +157,6 @@
 {
 	static int printed_version;
 	struct ata_port_info *port_info[2];
-	u16 reg;
 	static struct ata_port_info info = {
 		.sht = &rz1000_sht,
 		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
@@ -153,23 +167,25 @@
 	if (!printed_version++)
 		printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
 
-	/* Be exceptionally paranoid as we must be sure to apply the fix */
-	if (pci_read_config_word(pdev, 0x40, &reg) != 0)
-		goto fail;
-	reg &= 0xDFFF;
-	if (pci_write_config_word(pdev, 0x40, reg) != 0)
-		goto fail;
-	printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
-
-	port_info[0] = &info;
-	port_info[1] = &info;
-	return ata_pci_init_one(pdev, port_info, 2);
-fail:
+	if (rz1000_fifo_disable(pdev) == 0) {
+		port_info[0] = &info;
+		port_info[1] = &info;
+		return ata_pci_init_one(pdev, port_info, 2);
+	}
 	printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
 	/* Not safe to use so skip */
 	return -ENODEV;
 }
 
+static int rz1000_reinit_one(struct pci_dev *pdev)
+{
+	/* If this fails on resume (which is a "cant happen" case), we
+	   must stop as any progress risks data loss */
+	if (rz1000_fifo_disable(pdev))
+		panic("rz1000 fifo");
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id pata_rz1000[] = {
 	{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
 	{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
@@ -181,7 +197,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= pata_rz1000,
 	.probe 		= rz1000_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= rz1000_reinit_one,
 };
 
 static int __init rz1000_init(void)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 067d9d2..a3b35bc 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -40,7 +40,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sc1200"
-#define DRV_VERSION	"0.2.3"
+#define DRV_VERSION	"0.2.4"
 
 #define SC1200_REV_A	0x00
 #define SC1200_REV_B1	0x01
@@ -186,14 +186,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations sc1200_port_ops = {
@@ -263,7 +265,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= sc1200,
 	.probe 		= sc1200_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init sc1200_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 5bbf76e..f02b6a3 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -41,7 +41,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_serverworks"
-#define DRV_VERSION "0.3.7"
+#define DRV_VERSION "0.3.9"
 
 #define SVWKS_CSB5_REVISION_NEW	0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
 #define SVWKS_CSB6_REVISION	0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
@@ -318,14 +318,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations serverworks_osb4_port_ops = {
@@ -553,6 +555,30 @@
 	return ata_pci_init_one(pdev, port_info, ports);
 }
 
+static int serverworks_reinit_one(struct pci_dev *pdev)
+{
+	/* Force master latency timer to 64 PCI clocks */
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+	
+	switch (pdev->device)
+	{
+		case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
+			serverworks_fixup_osb4(pdev);
+			break;
+		case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+			ata_pci_clear_simplex(pdev);
+			/* fall through */
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+			serverworks_fixup_csb(pdev);
+			break;
+		case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+			serverworks_fixup_ht1000(pdev);
+			break;
+	}
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id serverworks[] = {
 	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
 	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
@@ -567,7 +593,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= serverworks,
 	.probe 		= serverworks_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= serverworks_reinit_one,
 };
 
 static int __init serverworks_init(void)
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 4a2b72b..32cf0bf 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -33,7 +33,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_sil680"
-#define DRV_VERSION "0.3.2"
+#define DRV_VERSION "0.4.1"
 
 /**
  *	sil680_selreg		-	return register base
@@ -218,13 +218,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
@@ -262,32 +262,20 @@
 	.host_stop	= ata_host_stop
 };
 
-static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+/**
+ *	sil680_init_chip		-	chip setup
+ *	@pdev: PCI device
+ *
+ *	Perform all the chip setup which must be done both when the device
+ *	is powered up on boot and when we resume in case we resumed from RAM.
+ *	Returns the final clock settings.
+ */
+ 
+static u8 sil680_init_chip(struct pci_dev *pdev)
 {
-	static struct ata_port_info info = {
-		.sht = &sil680_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
-		.pio_mask = 0x1f,
-		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
-		.port_ops = &sil680_port_ops
-	};
-	static struct ata_port_info info_slow = {
-		.sht = &sil680_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
-		.pio_mask = 0x1f,
-		.mwdma_mask = 0x07,
-		.udma_mask = 0x3f,
-		.port_ops = &sil680_port_ops
-	};
-	static struct ata_port_info *port_info[2] = {&info, &info};
-	static int printed_version;
 	u32 class_rev	= 0;
 	u8 tmpbyte	= 0;
 
-	if (!printed_version++)
-		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
-
         pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
         class_rev &= 0xff;
         /* FIXME: double check */
@@ -322,8 +310,6 @@
 	pci_read_config_byte(pdev,   0x8A, &tmpbyte);
 	printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
 			tmpbyte & 1, tmpbyte & 0x30);
-	if ((tmpbyte & 0x30) == 0)
-		port_info[0] = port_info[1] = &info_slow;
 
 	pci_write_config_byte(pdev,  0xA1, 0x72);
 	pci_write_config_word(pdev,  0xA2, 0x328A);
@@ -342,11 +328,51 @@
 		case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
 		/* This last case is _NOT_ ok */
 		case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
-			return -EIO;
+	}
+	return tmpbyte & 0x30;
+}
+
+static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static struct ata_port_info info = {
+		.sht = &sil680_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &sil680_port_ops
+	};
+	static struct ata_port_info info_slow = {
+		.sht = &sil680_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x3f,
+		.port_ops = &sil680_port_ops
+	};
+	static struct ata_port_info *port_info[2] = {&info, &info};
+	static int printed_version;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	switch(sil680_init_chip(pdev))
+	{
+		case 0:
+			port_info[0] = port_info[1] = &info_slow;
+			break;
+		case 0x30:
+			return -ENODEV;
 	}
 	return ata_pci_init_one(pdev, port_info, 2);
 }
 
+static int sil680_reinit_one(struct pci_dev *pdev)
+{
+	sil680_init_chip(pdev);
+	return ata_pci_device_resume(pdev);
+}
+
 static const struct pci_device_id sil680[] = {
 	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
 
@@ -357,7 +383,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= sil680,
 	.probe 		= sil680_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= sil680_reinit_one,
 };
 
 static int __init sil680_init(void)
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b9ffafb..916cedb 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -34,7 +34,7 @@
 #include <linux/ata.h>
 
 #define DRV_NAME	"pata_sis"
-#define DRV_VERSION	"0.4.4"
+#define DRV_VERSION	"0.4.5"
 
 struct sis_chipset {
 	u16 device;			/* PCI host ID */
@@ -538,14 +538,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static const struct ata_port_operations sis_133_ops = {
@@ -999,6 +1001,8 @@
 	.id_table		= sis_pci_tbl,
 	.probe			= sis_init_one,
 	.remove			= ata_pci_remove_one,
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
 };
 
 static int __init sis_init(void)
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 08a6dc8..e94f515 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -230,13 +230,13 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
 };
 
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 9640f80..a142971 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -43,7 +43,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_triflex"
-#define DRV_VERSION "0.2.5"
+#define DRV_VERSION "0.2.7"
 
 /**
  *	triflex_prereset		-	probe begin
@@ -185,14 +185,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations triflex_port_ops = {
@@ -257,7 +259,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= triflex,
 	.probe 		= triflex_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
 };
 
 static int __init triflex_init(void)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 1e7be9e..cc09d47 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -23,6 +23,7 @@
  *	VIA VT8233c	-	UDMA100
  *	VIA VT8235	-	UDMA133
  *	VIA VT8237	-	UDMA133
+ *	VIA VT8251	-	UDMA133
  *
  *	Most registers remain compatible across chips. Others start reserved
  *	and acquire sensible semantics if set to 1 (eg cable detect). A few
@@ -60,7 +61,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_via"
-#define DRV_VERSION "0.1.14"
+#define DRV_VERSION "0.2.0"
 
 /*
  *	The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
@@ -94,6 +95,7 @@
 	u8 rev_max;
 	u16 flags;
 } via_isa_bridges[] = {
+	{ "vt8251",	PCI_DEVICE_ID_VIA_8251,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
 	{ "cx700",	PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
 	{ "vt6410",	PCI_DEVICE_ID_VIA_6410,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
 	{ "vt8237a",	PCI_DEVICE_ID_VIA_8237A,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
@@ -288,14 +290,16 @@
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
-	.max_sectors		= ATA_MAX_SECTORS,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= ATA_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
 };
 
 static struct ata_port_operations via_port_ops = {
@@ -369,8 +373,42 @@
 };
 
 /**
+ *	via_config_fifo		-	set up the FIFO
+ *	@pdev: PCI device
+ *	@flags: configuration flags
+ *
+ *	Set the FIFO properties for this device if neccessary. Used both on
+ *	set up and on and the resume path
+ */
+
+static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
+{
+	u8 enable;
+	
+	/* 0x40 low bits indicate enabled channels */
+	pci_read_config_byte(pdev, 0x40 , &enable);
+	enable &= 3;
+	
+	if (flags & VIA_SET_FIFO) {
+		u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
+		u8 fifo;
+
+		pci_read_config_byte(pdev, 0x43, &fifo);
+
+		/* Clear PREQ# until DDACK# for errata */
+		if (flags & VIA_BAD_PREQ)
+			fifo &= 0x7F;
+		else
+			fifo &= 0x9f;
+		/* Turn on FIFO for enabled channels */
+		fifo |= fifo_setting[enable];
+		pci_write_config_byte(pdev, 0x43, fifo);
+	}
+}
+
+/**
  *	via_init_one		-	discovery callback
- *	@pdev: PCI device ID
+ *	@pdev: PCI device
  *	@id: PCI table info
  *
  *	A VIA IDE interface has been discovered. Figure out what revision
@@ -382,7 +420,7 @@
 	/* Early VIA without UDMA support */
 	static struct ata_port_info via_mwdma_info = {
 		.sht = &via_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &via_port_ops
@@ -390,7 +428,7 @@
 	/* Ditto with IRQ masking required */
 	static struct ata_port_info via_mwdma_info_borked = {
 		.sht = &via_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &via_port_ops_noirq,
@@ -398,7 +436,7 @@
 	/* VIA UDMA 33 devices (and borked 66) */
 	static struct ata_port_info via_udma33_info = {
 		.sht = &via_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x7,
@@ -407,7 +445,7 @@
 	/* VIA UDMA 66 devices */
 	static struct ata_port_info via_udma66_info = {
 		.sht = &via_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x1f,
@@ -416,7 +454,7 @@
 	/* VIA UDMA 100 devices */
 	static struct ata_port_info via_udma100_info = {
 		.sht = &via_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x3f,
@@ -425,7 +463,7 @@
 	/* UDMA133 with bad AST (All current 133) */
 	static struct ata_port_info via_udma133_info = {
 		.sht = &via_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x7f,	/* FIXME: should check north bridge */
@@ -470,21 +508,8 @@
 	}
 
 	/* Initialise the FIFO for the enabled channels. */
-	if (config->flags & VIA_SET_FIFO) {
-		u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
-		u8 fifo;
-
-		pci_read_config_byte(pdev, 0x43, &fifo);
-
-		/* Clear PREQ# until DDACK# for errata */
-		if (config->flags & VIA_BAD_PREQ)
-			fifo &= 0x7F;
-		else
-			fifo &= 0x9f;
-		/* Turn on FIFO for enabled channels */
-		fifo |= fifo_setting[enable];
-		pci_write_config_byte(pdev, 0x43, fifo);
-	}
+	via_config_fifo(pdev, config->flags);
+	
 	/* Clock set up */
 	switch(config->flags & VIA_UDMA) {
 		case VIA_UDMA_NONE:
@@ -528,6 +553,39 @@
 	return ata_pci_init_one(pdev, port_info, 2);
 }
 
+/**
+ *	via_reinit_one		-	reinit after resume
+ *	@pdev; PCI device
+ *
+ *	Called when the VIA PATA device is resumed. We must then
+ *	reconfigure the fifo and other setup we may have altered. In
+ *	addition the kernel needs to have the resume methods on PCI
+ *	quirk supported.
+ */
+
+static int via_reinit_one(struct pci_dev *pdev)
+{
+	u32 timing;
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	const struct via_isa_bridge *config = host->private_data;
+	
+	via_config_fifo(pdev, config->flags);
+
+	if ((config->flags & VIA_UDMA) == VIA_UDMA_66) {
+		/* The 66 MHz devices require we enable the clock */
+		pci_read_config_dword(pdev, 0x50, &timing);
+		timing |= 0x80008;
+		pci_write_config_dword(pdev, 0x50, timing);
+	}
+	if (config->flags & VIA_BAD_CLK66) {
+		/* Disable the 66MHz clock on problem devices */
+		pci_read_config_dword(pdev, 0x50, &timing);
+		timing &= ~0x80008;
+		pci_write_config_dword(pdev, 0x50, timing);
+	}
+	return ata_pci_device_resume(pdev);	
+}
+
 static const struct pci_device_id via[] = {
 	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), },
 	{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), },
@@ -541,7 +599,9 @@
 	.name 		= DRV_NAME,
 	.id_table	= via,
 	.probe 		= via_init_one,
-	.remove		= ata_pci_remove_one
+	.remove		= ata_pci_remove_one,
+	.suspend	= ata_pci_device_suspend,
+	.resume		= via_reinit_one,
 };
 
 static int __init via_init(void)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
new file mode 100644
index 0000000..3ea345c
--- /dev/null
+++ b/drivers/ata/pata_winbond.c
@@ -0,0 +1,306 @@
+/*
+ *    pata_winbond.c - Winbond VLB ATA controllers
+ *	(C) 2006 Red Hat <alan@redhat.com>
+ *
+ *    Support for the Winbond 83759A when operating in advanced mode.
+ *    Multichip mode is not currently supported.
+ */
+ 
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_winbond"
+#define DRV_VERSION "0.0.1"
+
+#define NR_HOST 4	/* Two winbond controllers, two channels each */
+
+struct winbond_data {
+	unsigned long config;
+	struct platform_device *platform_dev;
+};
+
+static struct ata_host *winbond_host[NR_HOST];
+static struct winbond_data winbond_data[NR_HOST];
+static int nr_winbond_host;
+
+#ifdef MODULE
+static int probe_winbond = 1;
+#else
+static int probe_winbond;
+#endif
+
+static spinlock_t winbond_lock = SPIN_LOCK_UNLOCKED;
+
+static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&winbond_lock, flags);
+	outb(reg, port + 0x01);
+	outb(val, port + 0x02);
+	spin_unlock_irqrestore(&winbond_lock, flags);
+}
+
+static u8 winbond_readcfg(unsigned long port, u8 reg)
+{
+	u8 val;
+
+	unsigned long flags;
+	spin_lock_irqsave(&winbond_lock, flags);
+	outb(reg, port + 0x01);
+	val = inb(port + 0x02);
+	spin_unlock_irqrestore(&winbond_lock, flags);
+
+	return val;
+}
+
+static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_timing t;
+	struct winbond_data *winbond = ap->host->private_data;
+	int active, recovery;
+	u8 reg;
+	int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
+
+	reg = winbond_readcfg(winbond->config, 0x81);
+	
+	/* Get the timing data in cycles */
+	if (reg & 0x40)		/* Fast VLB bus, assume 50MHz */
+		ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+	else
+		ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+	active = (FIT(t.active, 3, 17) - 1) & 0x0F;
+	recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
+	timing = (active << 4) | recovery;
+	winbond_writecfg(winbond->config, timing, reg);
+	
+	/* Load the setup timing */
+	
+	reg = 0x35;
+	if (adev->class != ATA_DEV_ATA)
+		reg |= 0x08;	/* FIFO off */
+	if (!ata_pio_need_iordy(adev))
+		reg |= 0x02;	/* IORDY off */
+	reg |= (FIT(t.setup, 0, 3) << 6);
+	winbond_writecfg(winbond->config, timing + 1, reg);
+}
+
+
+static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
+{
+	struct ata_port *ap = adev->ap;
+	int slop = buflen & 3;
+
+	if (ata_id_has_dword_io(adev->id)) {
+		if (write_data)
+			outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
+		else
+			insl(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+		if (unlikely(slop)) {
+			u32 pad;
+			if (write_data) {
+				memcpy(&pad, buf + buflen - slop, slop);
+				outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
+			} else {
+				pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
+				memcpy(buf + buflen - slop, &pad, slop);
+			}
+		}
+	} else
+		ata_pio_data_xfer(adev, buf, buflen, write_data);
+}
+
+static struct scsi_host_template winbond_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations winbond_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= winbond_set_piomode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= winbond_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+
+	.port_start	= ata_port_start,
+	.port_stop	= ata_port_stop,
+	.host_stop	= ata_host_stop
+};
+
+/**
+ *	winbond_init_one		-	attach a winbond interface
+ *	@type: Type to display
+ *	@io: I/O port start
+ *	@irq: interrupt line
+ *	@fast: True if on a > 33Mhz VLB
+ *
+ *	Register a VLB bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ */
+
+static __init int winbond_init_one(unsigned long port)
+{
+	struct ata_probe_ent ae;
+	struct platform_device *pdev;
+	int ret;
+	u8 reg;
+	int i;
+
+	reg = winbond_readcfg(port, 0x81);
+	reg |= 0x80;	/* jumpered mode off */
+	winbond_writecfg(port, 0x81, reg);
+	reg = winbond_readcfg(port, 0x83);
+	reg |= 0xF0;	/* local control */
+	winbond_writecfg(port, 0x83, reg);
+	reg = winbond_readcfg(port, 0x85);
+	reg |= 0xF0;	/* programmable timing */
+	winbond_writecfg(port, 0x85, reg);
+
+	reg = winbond_readcfg(port, 0x81);
+	
+	if (!(reg & 0x03))		/* Disabled */
+		return 0;
+
+	for (i = 0; i < 2 ; i ++) {
+
+		if (reg & (1 << i)) {		
+			/*
+			 *	Fill in a probe structure first of all
+			 */
+
+			pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
+			if (pdev == NULL)
+				return -ENOMEM;
+
+			memset(&ae, 0, sizeof(struct ata_probe_ent));
+			INIT_LIST_HEAD(&ae.node);
+			ae.dev = &pdev->dev;
+
+			ae.port_ops = &winbond_port_ops;
+			ae.pio_mask = 0x1F;
+
+			ae.sht = &winbond_sht;
+	
+			ae.n_ports = 1;
+			ae.irq = 14 + i;
+			ae.irq_flags = 0;
+			ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
+			ae.port[0].cmd_addr = 0x1F0 - (0x80 * i);
+			ae.port[0].altstatus_addr = ae.port[0].cmd_addr + 0x0206;
+			ae.port[0].ctl_addr = ae.port[0].altstatus_addr;
+			ata_std_ports(&ae.port[0]);
+			/*
+			 *	Hook in a private data structure per channel
+			 */
+			ae.private_data = &winbond_data[nr_winbond_host];
+			winbond_data[nr_winbond_host].config = port;
+			winbond_data[nr_winbond_host].platform_dev = pdev;
+
+			ret = ata_device_add(&ae);
+			if (ret == 0) {
+				platform_device_unregister(pdev);
+				return -ENODEV;
+			}
+			winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *	winbond_init		-	attach winbond interfaces
+ *
+ *	Attach winbond IDE interfaces by scanning the ports it may occupy.
+ */
+
+static __init int winbond_init(void)
+{
+	static const unsigned long config[2] = { 0x130, 0x1B0 };
+
+	int ct = 0;
+	int i;
+	
+	if (probe_winbond == 0)
+		return -ENODEV;
+
+	/*
+ 	 *	Check both base addresses
+	 */
+
+	for (i = 0; i < 2; i++) {
+		if (probe_winbond & (1<<i)) {
+			int ret = 0;
+			unsigned long port = config[i];
+
+			if (request_region(port, 2, "pata_winbond")) {
+				ret = winbond_init_one(port);
+				if(ret <= 0)
+					release_region(port, 2);
+				else ct+= ret;
+			}
+		}
+	}
+	if (ct != 0)
+		return 0;
+	return -ENODEV;
+}
+
+static __exit void winbond_exit(void)
+{
+	int i;
+
+	for (i = 0; i < nr_winbond_host; i++) {
+		ata_host_remove(winbond_host[i]);
+		release_region(winbond_data[i].config, 2);
+		platform_device_unregister(winbond_data[i].platform_dev);
+	}
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(winbond_init);
+module_exit(winbond_exit);
+
+module_param(probe_winbond, int, 0);
+
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index d65ebfd..0d316eb 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -29,6 +29,11 @@
  *  NV-specific details such as register offsets, SATA phy location,
  *  hotplug info, etc.
  *
+ *  CK804/MCP04 controllers support an alternate programming interface
+ *  similar to the ADMA specification (with some modifications).
+ *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
+ *  sent through the legacy interface.
+ *
  */
 
 #include <linux/kernel.h>
@@ -40,10 +45,13 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
 #include <linux/libata.h>
 
 #define DRV_NAME			"sata_nv"
-#define DRV_VERSION			"2.0"
+#define DRV_VERSION			"3.2"
+
+#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
 
 enum {
 	NV_PORTS			= 2,
@@ -78,8 +86,138 @@
 	// For PCI config register 20
 	NV_MCP_SATA_CFG_20		= 0x50,
 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
+	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
+	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
+	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
+	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
+
+	NV_ADMA_MAX_CPBS		= 32,
+	NV_ADMA_CPB_SZ			= 128,
+	NV_ADMA_APRD_SZ			= 16,
+	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
+					   NV_ADMA_APRD_SZ,
+	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
+	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
+	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
+					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
+
+	/* BAR5 offset to ADMA general registers */
+	NV_ADMA_GEN			= 0x400,
+	NV_ADMA_GEN_CTL			= 0x00,
+	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
+
+	/* BAR5 offset to ADMA ports */
+	NV_ADMA_PORT			= 0x480,
+
+	/* size of ADMA port register space  */
+	NV_ADMA_PORT_SIZE		= 0x100,
+
+	/* ADMA port registers */
+	NV_ADMA_CTL			= 0x40,
+	NV_ADMA_CPB_COUNT		= 0x42,
+	NV_ADMA_NEXT_CPB_IDX		= 0x43,
+	NV_ADMA_STAT			= 0x44,
+	NV_ADMA_CPB_BASE_LOW		= 0x48,
+	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
+	NV_ADMA_APPEND			= 0x50,
+	NV_ADMA_NOTIFIER		= 0x68,
+	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
+
+	/* NV_ADMA_CTL register bits */
+	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
+	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
+	NV_ADMA_CTL_GO			= (1 << 7),
+	NV_ADMA_CTL_AIEN		= (1 << 8),
+	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
+	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
+
+	/* CPB response flag bits */
+	NV_CPB_RESP_DONE		= (1 << 0),
+	NV_CPB_RESP_ATA_ERR		= (1 << 3),
+	NV_CPB_RESP_CMD_ERR		= (1 << 4),
+	NV_CPB_RESP_CPB_ERR		= (1 << 7),
+
+	/* CPB control flag bits */
+	NV_CPB_CTL_CPB_VALID		= (1 << 0),
+	NV_CPB_CTL_QUEUE		= (1 << 1),
+	NV_CPB_CTL_APRD_VALID		= (1 << 2),
+	NV_CPB_CTL_IEN			= (1 << 3),
+	NV_CPB_CTL_FPDMA		= (1 << 4),
+
+	/* APRD flags */
+	NV_APRD_WRITE			= (1 << 1),
+	NV_APRD_END			= (1 << 2),
+	NV_APRD_CONT			= (1 << 3),
+
+	/* NV_ADMA_STAT flags */
+	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
+	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
+	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
+	NV_ADMA_STAT_CPBERR		= (1 << 4),
+	NV_ADMA_STAT_SERROR		= (1 << 5),
+	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
+	NV_ADMA_STAT_IDLE		= (1 << 8),
+	NV_ADMA_STAT_LEGACY		= (1 << 9),
+	NV_ADMA_STAT_STOPPED		= (1 << 10),
+	NV_ADMA_STAT_DONE		= (1 << 12),
+	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
+	 				  NV_ADMA_STAT_TIMEOUT,
+
+	/* port flags */
+	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
+	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
+
 };
 
+/* ADMA Physical Region Descriptor - one SG segment */
+struct nv_adma_prd {
+	__le64			addr;
+	__le32			len;
+	u8			flags;
+	u8			packet_len;
+	__le16			reserved;
+};
+
+enum nv_adma_regbits {
+	CMDEND	= (1 << 15),		/* end of command list */
+	WNB	= (1 << 14),		/* wait-not-BSY */
+	IGN	= (1 << 13),		/* ignore this entry */
+	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
+	DA2	= (1 << (2 + 8)),
+	DA1	= (1 << (1 + 8)),
+	DA0	= (1 << (0 + 8)),
+};
+
+/* ADMA Command Parameter Block
+   The first 5 SG segments are stored inside the Command Parameter Block itself.
+   If there are more than 5 segments the remainder are stored in a separate
+   memory area indicated by next_aprd. */
+struct nv_adma_cpb {
+	u8			resp_flags;    /* 0 */
+	u8			reserved1;     /* 1 */
+	u8			ctl_flags;     /* 2 */
+	/* len is length of taskfile in 64 bit words */
+ 	u8			len;           /* 3  */
+	u8			tag;           /* 4 */
+	u8			next_cpb_idx;  /* 5 */
+	__le16			reserved2;     /* 6-7 */
+	__le16			tf[12];        /* 8-31 */
+	struct nv_adma_prd	aprd[5];       /* 32-111 */
+	__le64			next_aprd;     /* 112-119 */
+	__le64			reserved3;     /* 120-127 */
+};
+
+
+struct nv_adma_port_priv {
+	struct nv_adma_cpb	*cpb;
+	dma_addr_t		cpb_dma;
+	struct nv_adma_prd	*aprd;
+	dma_addr_t		aprd_dma;
+	u8			flags;
+};
+
+#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
+
 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
 static void nv_ck804_host_stop(struct ata_host *host);
 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
@@ -93,13 +231,28 @@
 static void nv_ck804_freeze(struct ata_port *ap);
 static void nv_ck804_thaw(struct ata_port *ap);
 static void nv_error_handler(struct ata_port *ap);
+static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
+static void nv_adma_irq_clear(struct ata_port *ap);
+static int nv_adma_port_start(struct ata_port *ap);
+static void nv_adma_port_stop(struct ata_port *ap);
+static void nv_adma_error_handler(struct ata_port *ap);
+static void nv_adma_host_stop(struct ata_host *host);
+static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
+static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
+static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
+static u8 nv_adma_bmdma_status(struct ata_port *ap);
 
 enum nv_host_type
 {
 	GENERIC,
 	NFORCE2,
 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
-	CK804
+	CK804,
+	ADMA
 };
 
 static const struct pci_device_id nv_pci_tbl[] = {
@@ -160,6 +313,24 @@
 	.bios_param		= ata_std_bios_param,
 };
 
+static struct scsi_host_template nv_adma_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= NV_ADMA_MAX_CPBS,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
+	.slave_configure	= nv_adma_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
 static const struct ata_port_operations nv_generic_ops = {
 	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
@@ -241,11 +412,40 @@
 	.host_stop		= nv_ck804_host_stop,
 };
 
+static const struct ata_port_operations nv_adma_ops = {
+	.port_disable		= ata_port_disable,
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_atapi_dma	= nv_adma_check_atapi_dma,
+	.exec_command		= ata_exec_command,
+	.check_status		= ata_check_status,
+	.dev_select		= ata_std_dev_select,
+	.bmdma_setup		= nv_adma_bmdma_setup,
+	.bmdma_start		= nv_adma_bmdma_start,
+	.bmdma_stop		= nv_adma_bmdma_stop,
+	.bmdma_status		= nv_adma_bmdma_status,
+	.qc_prep		= nv_adma_qc_prep,
+	.qc_issue		= nv_adma_qc_issue,
+	.freeze			= nv_ck804_freeze,
+	.thaw			= nv_ck804_thaw,
+	.error_handler		= nv_adma_error_handler,
+	.post_internal_cmd	= nv_adma_bmdma_stop,
+	.data_xfer		= ata_mmio_data_xfer,
+	.irq_handler		= nv_adma_interrupt,
+	.irq_clear		= nv_adma_irq_clear,
+	.scr_read		= nv_scr_read,
+	.scr_write		= nv_scr_write,
+	.port_start		= nv_adma_port_start,
+	.port_stop		= nv_adma_port_stop,
+	.host_stop		= nv_adma_host_stop,
+};
+
 static struct ata_port_info nv_port_info[] = {
 	/* generic */
 	{
 		.sht		= &nv_sht,
-		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
@@ -254,7 +454,8 @@
 	/* nforce2/3 */
 	{
 		.sht		= &nv_sht,
-		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
@@ -263,12 +464,23 @@
 	/* ck804 */
 	{
 		.sht		= &nv_sht,
-		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_ck804_ops,
 	},
+	/* ADMA */
+	{
+		.sht		= &nv_adma_sht,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_adma_ops,
+	},
 };
 
 MODULE_AUTHOR("NVIDIA");
@@ -277,6 +489,700 @@
 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
+static int adma_enabled = 1;
+
+static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
+					        unsigned int port_no)
+{
+	mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
+	return mmio;
+}
+
+static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
+{
+	return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
+}
+
+static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
+{
+	return (ap->host->mmio_base + NV_ADMA_GEN);
+}
+
+static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
+{
+	return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
+}
+
+static void nv_adma_register_mode(struct ata_port *ap)
+{
+	void __iomem *mmio = nv_adma_ctl_block(ap);
+	struct nv_adma_port_priv *pp = ap->private_data;
+	u16 tmp;
+
+	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+		return;
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
+}
+
+static void nv_adma_mode(struct ata_port *ap)
+{
+	void __iomem *mmio = nv_adma_ctl_block(ap);
+	struct nv_adma_port_priv *pp = ap->private_data;
+	u16 tmp;
+
+	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
+		return;
+		
+	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
+}
+
+static int nv_adma_slave_config(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct nv_adma_port_priv *pp = ap->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u64 bounce_limit;
+	unsigned long segment_boundary;
+	unsigned short sg_tablesize;
+	int rc;
+	int adma_enable;
+	u32 current_reg, new_reg, config_mask;
+
+	rc = ata_scsi_slave_config(sdev);
+
+	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+		/* Not a proper libata device, ignore */
+		return rc;
+
+	if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
+		/*
+		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
+		 * Therefore ATAPI commands are sent through the legacy interface.
+		 * However, the legacy interface only supports 32-bit DMA.
+		 * Restrict DMA parameters as required by the legacy interface
+		 * when an ATAPI device is connected.
+		 */
+		bounce_limit = ATA_DMA_MASK;
+		segment_boundary = ATA_DMA_BOUNDARY;
+		/* Subtract 1 since an extra entry may be needed for padding, see
+		   libata-scsi.c */
+		sg_tablesize = LIBATA_MAX_PRD - 1;
+		
+		/* Since the legacy DMA engine is in use, we need to disable ADMA
+		   on the port. */
+		adma_enable = 0;
+		nv_adma_register_mode(ap);
+	}
+	else {
+		bounce_limit = *ap->dev->dma_mask;
+		segment_boundary = NV_ADMA_DMA_BOUNDARY;
+		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
+		adma_enable = 1;
+	}
+	
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
+
+	if(ap->port_no == 1)
+		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
+			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+	else
+		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
+			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
+	
+	if(adma_enable) {
+		new_reg = current_reg | config_mask;
+		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
+	}
+	else {
+		new_reg = current_reg & ~config_mask;
+		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
+	}
+	
+	if(current_reg != new_reg)
+		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
+	
+	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
+	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
+	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
+	ata_port_printk(ap, KERN_INFO,
+		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
+	return rc;
+}
+
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+}
+
+static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
+{
+	unsigned int idx = 0;
+
+	cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
+
+	if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
+		cpb[idx++] = cpu_to_le16(IGN);
+		cpb[idx++] = cpu_to_le16(IGN);
+		cpb[idx++] = cpu_to_le16(IGN);
+		cpb[idx++] = cpu_to_le16(IGN);
+		cpb[idx++] = cpu_to_le16(IGN);
+	}
+	else {
+		cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature);
+		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
+	}
+	cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
+	cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
+	cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
+	cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
+	cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
+
+	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
+
+	return idx;
+}
+
+static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	int complete = 0, have_err = 0;
+	u8 flags = pp->cpb[cpb_num].resp_flags;
+
+	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
+
+	if (flags & NV_CPB_RESP_DONE) {
+		VPRINTK("CPB flags done, flags=0x%x\n", flags);
+		complete = 1;
+	}
+	if (flags & NV_CPB_RESP_ATA_ERR) {
+		ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
+		have_err = 1;
+		complete = 1;
+	}
+	if (flags & NV_CPB_RESP_CMD_ERR) {
+		ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
+		have_err = 1;
+		complete = 1;
+	}
+	if (flags & NV_CPB_RESP_CPB_ERR) {
+		ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
+		have_err = 1;
+		complete = 1;
+	}
+	if(complete || force_err)
+	{
+		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
+		if(likely(qc)) {
+			u8 ata_status = 0;
+			/* Only use the ATA port status for non-NCQ commands.
+			   For NCQ commands the current status may have nothing to do with
+			   the command just completed. */
+			if(qc->tf.protocol != ATA_PROT_NCQ)
+				ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
+
+			if(have_err || force_err)
+				ata_status |= ATA_ERR;
+
+			qc->err_mask |= ac_err_mask(ata_status);
+			DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
+				qc->err_mask);
+			ata_qc_complete(qc);
+		}
+	}
+}
+
+static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
+{
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+	int handled;
+
+	/* freeze if hotplugged */
+	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
+		ata_port_freeze(ap);
+		return 1;
+	}
+
+	/* bail out if not our interrupt */
+	if (!(irq_stat & NV_INT_DEV))
+		return 0;
+
+	/* DEV interrupt w/ no active qc? */
+	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+		ata_check_status(ap);
+		return 1;
+	}
+
+	/* handle interrupt */
+	handled = ata_host_intr(ap, qc);
+	if (unlikely(!handled)) {
+		/* spurious, clear it */
+		ata_check_status(ap);
+	}
+
+	return 1;
+}
+
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	int i, handled = 0;
+	u32 notifier_clears[2];
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		notifier_clears[i] = 0;
+
+		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+			struct nv_adma_port_priv *pp = ap->private_data;
+			void __iomem *mmio = nv_adma_ctl_block(ap);
+			u16 status;
+			u32 gen_ctl;
+			int have_global_err = 0;
+			u32 notifier, notifier_error;
+
+			/* if in ATA register mode, use standard ata interrupt handler */
+			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
+				u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
+					>> (NV_INT_PORT_SHIFT * i);
+				handled += nv_host_intr(ap, irq_stat);
+				continue;
+			}
+
+			notifier = readl(mmio + NV_ADMA_NOTIFIER);
+			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+			notifier_clears[i] = notifier | notifier_error;
+
+			gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
+
+			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+			    !notifier_error)
+				/* Nothing to do */
+				continue;
+
+			status = readw(mmio + NV_ADMA_STAT);
+
+			/* Clear status. Ensure the controller sees the clearing before we start
+			   looking at any of the CPB statuses, so that any CPB completions after
+			   this point in the handler will raise another interrupt. */
+			writew(status, mmio + NV_ADMA_STAT);
+			readw(mmio + NV_ADMA_STAT); /* flush posted write */
+			rmb();
+
+			/* freeze if hotplugged */
+			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
+				ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
+				ata_port_freeze(ap);
+				handled++;
+				continue;
+			}
+
+			if (status & NV_ADMA_STAT_TIMEOUT) {
+				ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
+				have_global_err = 1;
+			}
+			if (status & NV_ADMA_STAT_CPBERR) {
+				ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
+				have_global_err = 1;
+			}
+			if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
+				/** Check CPBs for completed commands */
+
+				if(ata_tag_valid(ap->active_tag))
+					/* Non-NCQ command */
+					nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
+						(notifier_error & (1 << ap->active_tag)));
+				else {
+					int pos;
+					u32 active = ap->sactive;
+					while( (pos = ffs(active)) ) {
+						pos--;
+						nv_adma_check_cpb(ap, pos, have_global_err ||
+							(notifier_error & (1 << pos)) );
+						active &= ~(1 << pos );
+					}
+				}
+			}
+
+			handled++; /* irq handled if we got here */
+		}
+	}
+	
+	if(notifier_clears[0] || notifier_clears[1]) {
+		/* Note: Both notifier clear registers must be written
+		   if either is set, even if one is zero, according to NVIDIA. */
+		writel(notifier_clears[0], 
+			nv_adma_notifier_clear_block(host->ports[0]));
+		writel(notifier_clears[1], 
+			nv_adma_notifier_clear_block(host->ports[1]));
+	}
+
+	spin_unlock(&host->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+static void nv_adma_irq_clear(struct ata_port *ap)
+{
+	void __iomem *mmio = nv_adma_ctl_block(ap);
+	u16 status = readw(mmio + NV_ADMA_STAT);
+	u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
+	u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+	unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
+
+	/* clear ADMA status */
+	writew(status, mmio + NV_ADMA_STAT);
+	writel(notifier | notifier_error,
+	       nv_adma_notifier_clear_block(ap));
+
+	/** clear legacy status */
+	outb(inb(dma_stat_addr), dma_stat_addr);
+}
+
+static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct nv_adma_port_priv *pp = ap->private_data;
+	u8 dmactl;
+
+	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+		WARN_ON(1);
+		return;
+	}
+
+	/* load PRD table addr. */
+	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+
+	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	/* issue r/w command */
+	ata_exec_command(ap, &qc->tf);
+}
+
+static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct nv_adma_port_priv *pp = ap->private_data;
+	u8 dmactl;
+
+	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+		WARN_ON(1);
+		return;
+	}
+
+	/* start host DMA transaction */
+	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	outb(dmactl | ATA_DMA_START,
+	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
+static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct nv_adma_port_priv *pp = ap->private_data;
+
+	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
+		return;
+
+	/* clear start/stop bit */
+	outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
+		ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+	ata_altstatus(ap);        /* dummy read */
+}
+
+static u8 nv_adma_bmdma_status(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+
+	WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
+
+	return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+
+static int nv_adma_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct nv_adma_port_priv *pp;
+	int rc;
+	void *mem;
+	dma_addr_t mem_dma;
+	void __iomem *mmio = nv_adma_ctl_block(ap);
+	u16 tmp;
+
+	VPRINTK("ENTER\n");
+
+	rc = ata_port_start(ap);
+	if (rc)
+		return rc;
+
+	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+	if (!pp) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
+				 &mem_dma, GFP_KERNEL);
+
+	if (!mem) {
+		rc = -ENOMEM;
+		goto err_out_kfree;
+	}
+	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
+
+	/*
+	 * First item in chunk of DMA memory:
+	 * 128-byte command parameter block (CPB)
+	 * one for each command tag
+	 */
+	pp->cpb     = mem;
+	pp->cpb_dma = mem_dma;
+
+	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
+	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
+
+	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+
+	/*
+	 * Second item: block of ADMA_SGTBL_LEN s/g entries
+	 */
+	pp->aprd = mem;
+	pp->aprd_dma = mem_dma;
+
+	ap->private_data = pp;
+
+	/* clear any outstanding interrupt conditions */
+	writew(0xffff, mmio + NV_ADMA_STAT);
+
+	/* initialize port variables */
+	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
+
+	/* clear CPB fetch count */
+	writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+	/* clear GO for register mode */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readl( mmio + NV_ADMA_CTL );	/* flush posted write */
+	udelay(1);
+	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readl( mmio + NV_ADMA_CTL );	/* flush posted write */
+
+	return 0;
+
+err_out_kfree:
+	kfree(pp);
+err_out:
+	ata_port_stop(ap);
+	return rc;
+}
+
+static void nv_adma_port_stop(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = nv_adma_ctl_block(ap);
+
+	VPRINTK("ENTER\n");
+
+	writew(0, mmio + NV_ADMA_CTL);
+
+	ap->private_data = NULL;
+	dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
+	kfree(pp);
+	ata_port_stop(ap);
+}
+
+
+static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
+{
+	void __iomem *mmio = probe_ent->mmio_base;
+	struct ata_ioports *ioport = &probe_ent->port[port];
+
+	VPRINTK("ENTER\n");
+
+	mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
+
+	ioport->cmd_addr	= (unsigned long) mmio;
+	ioport->data_addr	= (unsigned long) mmio + (ATA_REG_DATA * 4);
+	ioport->error_addr	=
+	ioport->feature_addr	= (unsigned long) mmio + (ATA_REG_ERR * 4);
+	ioport->nsect_addr	= (unsigned long) mmio + (ATA_REG_NSECT * 4);
+	ioport->lbal_addr	= (unsigned long) mmio + (ATA_REG_LBAL * 4);
+	ioport->lbam_addr	= (unsigned long) mmio + (ATA_REG_LBAM * 4);
+	ioport->lbah_addr	= (unsigned long) mmio + (ATA_REG_LBAH * 4);
+	ioport->device_addr	= (unsigned long) mmio + (ATA_REG_DEVICE * 4);
+	ioport->status_addr	=
+	ioport->command_addr	= (unsigned long) mmio + (ATA_REG_STATUS * 4);
+	ioport->altstatus_addr	=
+	ioport->ctl_addr	= (unsigned long) mmio + 0x20;
+}
+
+static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
+{
+	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
+	unsigned int i;
+	u32 tmp32;
+
+	VPRINTK("ENTER\n");
+
+	/* enable ADMA on the ports */
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
+		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+		 NV_MCP_SATA_CFG_20_PORT1_EN |
+		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+
+	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+
+	for (i = 0; i < probe_ent->n_ports; i++)
+		nv_adma_setup_port(probe_ent, i);
+
+	for (i = 0; i < probe_ent->n_ports; i++) {
+		void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
+		u16 tmp;
+
+		/* enable interrupt, clear reset if not already clear */
+		tmp = readw(mmio + NV_ADMA_CTL);
+		writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
+	}
+
+	return 0;
+}
+
+static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
+			      struct scatterlist *sg,
+			      int idx,
+			      struct nv_adma_prd *aprd)
+{
+	u8 flags;
+
+	memset(aprd, 0, sizeof(struct nv_adma_prd));
+
+	flags = 0;
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		flags |= NV_APRD_WRITE;
+	if (idx == qc->n_elem - 1)
+		flags |= NV_APRD_END;
+	else if (idx != 4)
+		flags |= NV_APRD_CONT;
+
+	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
+	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
+	aprd->flags = flags;
+}
+
+static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	unsigned int idx;
+	struct nv_adma_prd *aprd;
+	struct scatterlist *sg;
+
+	VPRINTK("ENTER\n");
+
+	idx = 0;
+
+	ata_for_each_sg(sg, qc) {
+		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
+		nv_adma_fill_aprd(qc, sg, idx, aprd);
+		idx++;
+	}
+	if (idx > 5)
+		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
+}
+
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
+	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
+		       NV_CPB_CTL_APRD_VALID |
+		       NV_CPB_CTL_IEN;
+
+	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
+	     (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
+		nv_adma_register_mode(qc->ap);
+		ata_qc_prep(qc);
+		return;
+	}
+
+	memset(cpb, 0, sizeof(struct nv_adma_cpb));
+
+	cpb->len		= 3;
+	cpb->tag		= qc->tag;
+	cpb->next_cpb_idx	= 0;
+
+	/* turn on NCQ flags for NCQ commands */
+	if (qc->tf.protocol == ATA_PROT_NCQ)
+		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
+
+	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
+
+	nv_adma_fill_sg(qc, cpb);
+
+	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
+	   finished filling in all of the contents */
+	wmb();
+	cpb->ctl_flags = ctl_flags;
+}
+
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	void __iomem *mmio = nv_adma_ctl_block(qc->ap);
+
+	VPRINTK("ENTER\n");
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
+	     (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
+		/* use ATA register mode */
+		VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
+		nv_adma_register_mode(qc->ap);
+		return ata_qc_issue_prot(qc);
+	} else
+		nv_adma_mode(qc->ap);
+
+	/* write append register, command tag in lower 8 bits
+	   and (number of cpbs to append -1) in top 8 bits */
+	wmb();
+	writew(qc->tag, mmio + NV_ADMA_APPEND);
+
+	DPRINTK("Issued tag %u\n",qc->tag);
+
+	return 0;
+}
+
 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
 {
 	struct ata_host *host = dev_instance;
@@ -310,37 +1216,6 @@
 	return IRQ_RETVAL(handled);
 }
 
-static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
-{
-	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
-	int handled;
-
-	/* freeze if hotplugged */
-	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
-		ata_port_freeze(ap);
-		return 1;
-	}
-
-	/* bail out if not our interrupt */
-	if (!(irq_stat & NV_INT_DEV))
-		return 0;
-
-	/* DEV interrupt w/ no active qc? */
-	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
-		ata_check_status(ap);
-		return 1;
-	}
-
-	/* handle interrupt */
-	handled = ata_host_intr(ap, qc);
-	if (unlikely(!handled)) {
-		/* spurious, clear it */
-		ata_check_status(ap);
-	}
-
-	return 1;
-}
-
 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
 {
 	int i, handled = 0;
@@ -466,6 +1341,56 @@
 			   nv_hardreset, ata_std_postreset);
 }
 
+static void nv_adma_error_handler(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+		void __iomem *mmio = nv_adma_ctl_block(ap);
+		int i;
+		u16 tmp;
+
+		u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
+		u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+		u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
+		u32 status = readw(mmio + NV_ADMA_STAT);
+
+		ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
+			"notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
+			notifier, notifier_error, gen_ctl, status);
+
+		for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
+			struct nv_adma_cpb *cpb = &pp->cpb[i];
+			if( cpb->ctl_flags || cpb->resp_flags )
+				ata_port_printk(ap, KERN_ERR,
+					"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
+					i, cpb->ctl_flags, cpb->resp_flags);
+		}
+
+		/* Push us back into port register mode for error handling. */
+		nv_adma_register_mode(ap);
+
+		ata_port_printk(ap, KERN_ERR, "Resetting port\n");
+
+		/* Mark all of the CPBs as invalid to prevent them from being executed */
+		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
+			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
+
+		/* clear CPB fetch count */
+		writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+		/* Reset channel */
+		tmp = readw(mmio + NV_ADMA_CTL);
+		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+		readl( mmio + NV_ADMA_CTL );	/* flush posted write */
+		udelay(1);
+		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+		readl( mmio + NV_ADMA_CTL );	/* flush posted write */
+	}
+
+	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
+			   nv_hardreset, ata_std_postreset);
+}
+
 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version = 0;
@@ -475,6 +1400,8 @@
 	int rc;
 	u32 bar;
 	unsigned long base;
+	unsigned long type = ent->driver_data;
+	int mask_set = 0;
 
         // Make sure this is a SATA controller by counting the number of bars
         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
@@ -483,7 +1410,7 @@
 		if (pci_resource_start(pdev, bar) == 0)
 			return -ENODEV;
 
-	if (!printed_version++)
+	if (	!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
 	rc = pci_enable_device(pdev);
@@ -496,16 +1423,26 @@
 		goto err_out_disable;
 	}
 
-	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
+	if(type >= CK804 && adma_enabled) {
+		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
+		type = ADMA;
+		if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
+		   !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+			mask_set = 1;
+	}
+
+	if(!mask_set) {
+		rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+		if (rc)
+			goto err_out_regions;
+		rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+		if (rc)
+			goto err_out_regions;
+	}
 
 	rc = -ENOMEM;
 
-	ppi[0] = ppi[1] = &nv_port_info[ent->driver_data];
+	ppi[0] = ppi[1] = &nv_port_info[type];
 	probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
 	if (!probe_ent)
 		goto err_out_regions;
@@ -522,7 +1459,7 @@
 	probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
 
 	/* enable SATA space for CK804 */
-	if (ent->driver_data == CK804) {
+	if (type >= CK804) {
 		u8 regval;
 
 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
@@ -532,6 +1469,12 @@
 
 	pci_set_master(pdev);
 
+	if (type == ADMA) {
+		rc = nv_adma_host_init(probe_ent);
+		if (rc)
+			goto err_out_iounmap;
+	}
+
 	rc = ata_device_add(probe_ent);
 	if (rc != NV_PORTS)
 		goto err_out_iounmap;
@@ -566,6 +1509,33 @@
 	ata_pci_host_stop(host);
 }
 
+static void nv_adma_host_stop(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	int i;
+	u32 tmp32;
+
+	for (i = 0; i < host->n_ports; i++) {
+		void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
+		u16 tmp;
+
+		/* disable interrupt */
+		tmp = readw(mmio + NV_ADMA_CTL);
+		writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
+	}
+
+	/* disable ADMA on the ports */
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
+		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+		   NV_MCP_SATA_CFG_20_PORT1_EN |
+		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+
+	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+
+	nv_ck804_host_stop(host);
+}
+
 static int __init nv_init(void)
 {
 	return pci_register_driver(&nv_pci_driver);
@@ -578,3 +1548,5 @@
 
 module_init(nv_init);
 module_exit(nv_exit);
+module_param_named(adma, adma_enabled, bool, 0444);
+MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 72eda51..f055874 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -46,20 +46,19 @@
 #include "sata_promise.h"
 
 #define DRV_NAME	"sata_promise"
-#define DRV_VERSION	"1.04"
+#define DRV_VERSION	"1.05"
 
 
 enum {
 	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
 	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
-	PDC_TBG_MODE		= 0x41,	/* TBG mode */
 	PDC_FLASH_CTL		= 0x44, /* Flash control register */
-	PDC_PCI_CTL		= 0x48, /* PCI control and status register */
 	PDC_GLOBAL_CTL		= 0x48, /* Global control/status (per port) */
 	PDC_CTLSTAT		= 0x60,	/* IDE control and status (per port) */
 	PDC_SATA_PLUG_CSR	= 0x6C, /* SATA Plug control/status reg */
 	PDC2_SATA_PLUG_CSR	= 0x60, /* SATAII Plug control/status reg */
-	PDC_SLEW_CTL		= 0x470, /* slew rate control reg */
+	PDC_TBG_MODE		= 0x41C, /* TBG mode (not SATAII) */
+	PDC_SLEW_CTL		= 0x470, /* slew rate control reg (not SATAII) */
 
 	PDC_ERR_MASK		= (1<<19) | (1<<20) | (1<<21) | (1<<22) |
 				  (1<<8) | (1<<9) | (1<<10),
@@ -67,17 +66,22 @@
 	board_2037x		= 0,	/* FastTrak S150 TX2plus */
 	board_20319		= 1,	/* FastTrak S150 TX4 */
 	board_20619		= 2,	/* FastTrak TX4000 */
-	board_20771		= 3,	/* FastTrak TX2300 */
-	board_2057x		= 4,	/* SATAII150 Tx2plus */
-	board_40518		= 5,	/* SATAII150 Tx4 */
+	board_2057x		= 3,	/* SATAII150 Tx2plus */
+	board_40518		= 4,	/* SATAII150 Tx4 */
 
 	PDC_HAS_PATA		= (1 << 1), /* PDC20375/20575 has PATA */
 
+	/* PDC_CTLSTAT bit definitions */
+	PDC_DMA_ENABLE		= (1 << 7),
+	PDC_IRQ_DISABLE		= (1 << 10),
 	PDC_RESET		= (1 << 11), /* HDMA reset */
 
-	PDC_COMMON_FLAGS	= ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
+	PDC_COMMON_FLAGS	= ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
 				  ATA_FLAG_PIO_POLLING,
+
+	/* hp->flags bits */
+	PDC_FLAG_GEN_II		= (1 << 0),
 };
 
 
@@ -87,7 +91,7 @@
 };
 
 struct pdc_host_priv {
-	int			hotplug_offset;
+	unsigned long		flags;
 };
 
 static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
@@ -98,13 +102,16 @@
 static int pdc_port_start(struct ata_port *ap);
 static void pdc_port_stop(struct ata_port *ap);
 static void pdc_pata_phy_reset(struct ata_port *ap);
-static void pdc_sata_phy_reset(struct ata_port *ap);
 static void pdc_qc_prep(struct ata_queued_cmd *qc);
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc_irq_clear(struct ata_port *ap);
 static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
 static void pdc_host_stop(struct ata_host *host);
+static void pdc_freeze(struct ata_port *ap);
+static void pdc_thaw(struct ata_port *ap);
+static void pdc_error_handler(struct ata_port *ap);
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
 
 
 static struct scsi_host_template pdc_ata_sht = {
@@ -133,11 +140,12 @@
 	.exec_command		= pdc_exec_command_mmio,
 	.dev_select		= ata_std_dev_select,
 
-	.phy_reset		= pdc_sata_phy_reset,
-
 	.qc_prep		= pdc_qc_prep,
 	.qc_issue		= pdc_qc_issue_prot,
-	.eng_timeout		= pdc_eng_timeout,
+	.freeze			= pdc_freeze,
+	.thaw			= pdc_thaw,
+	.error_handler		= pdc_error_handler,
+	.post_internal_cmd	= pdc_post_internal_cmd,
 	.data_xfer		= ata_mmio_data_xfer,
 	.irq_handler		= pdc_interrupt,
 	.irq_clear		= pdc_irq_clear,
@@ -195,23 +203,13 @@
 	/* board_20619 */
 	{
 		.sht		= &pdc_ata_sht,
-		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &pdc_pata_ops,
 	},
 
-	/* board_20771 */
-	{
-		.sht		= &pdc_ata_sht,
-		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA,
-		.pio_mask	= 0x1f, /* pio0-4 */
-		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
-		.port_ops	= &pdc_sata_ops,
-	},
-
 	/* board_2057x */
 	{
 		.sht		= &pdc_ata_sht,
@@ -235,33 +233,25 @@
 
 static const struct pci_device_id pdc_ata_pci_tbl[] = {
 	{ PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
-	{ PCI_VDEVICE(PROMISE, 0x3570), board_2037x },
-	{ PCI_VDEVICE(PROMISE, 0x3571), board_2037x },
 	{ PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
 	{ PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
 	{ PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
 	{ PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
 	{ PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
-	{ PCI_VDEVICE(PROMISE, 0x3d73), board_2037x },
 
 	{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
 	{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
 	{ PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
 	{ PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
-	{ PCI_VDEVICE(PROMISE, 0x3d17), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
 	{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
 
 	{ PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
 
-/* TODO: remove all associated board_20771 code, as it completely
- * duplicates board_2037x code, unless reason for separation can be
- * divined.
- */
-#if 0
-	{ PCI_VDEVICE(PROMISE, 0x3570), board_20771 },
-#endif
-	{ PCI_VDEVICE(PROMISE, 0x3577), board_20771 },
-
 	{ }	/* terminate list */
 };
 
@@ -277,6 +267,7 @@
 static int pdc_port_start(struct ata_port *ap)
 {
 	struct device *dev = ap->host->dev;
+	struct pdc_host_priv *hp = ap->host->private_data;
 	struct pdc_port_priv *pp;
 	int rc;
 
@@ -298,6 +289,16 @@
 
 	ap->private_data = pp;
 
+	/* fix up PHYMODE4 align timing */
+	if ((hp->flags & PDC_FLAG_GEN_II) && sata_scr_valid(ap)) {
+		void __iomem *mmio = (void __iomem *) ap->ioaddr.scr_addr;
+		unsigned int tmp;
+
+		tmp = readl(mmio + 0x014);
+		tmp = (tmp & ~3) | 1;	/* set bits 1:0 = 0:1 */
+		writel(tmp, mmio + 0x014);
+	}
+
 	return 0;
 
 err_out_kfree:
@@ -352,12 +353,6 @@
 	readl(mmio);	/* flush */
 }
 
-static void pdc_sata_phy_reset(struct ata_port *ap)
-{
-	pdc_reset_port(ap);
-	sata_phy_reset(ap);
-}
-
 static void pdc_pata_cbl_detect(struct ata_port *ap)
 {
 	u8 tmp;
@@ -425,6 +420,61 @@
 	}
 }
 
+static void pdc_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	u32 tmp;
+
+	tmp = readl(mmio + PDC_CTLSTAT);
+	tmp |= PDC_IRQ_DISABLE;
+	tmp &= ~PDC_DMA_ENABLE;
+	writel(tmp, mmio + PDC_CTLSTAT);
+	readl(mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	u32 tmp;
+
+	/* clear IRQ */
+	readl(mmio + PDC_INT_SEQMASK);
+
+	/* turn IRQ back on */
+	tmp = readl(mmio + PDC_CTLSTAT);
+	tmp &= ~PDC_IRQ_DISABLE;
+	writel(tmp, mmio + PDC_CTLSTAT);
+	readl(mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_error_handler(struct ata_port *ap)
+{
+	ata_reset_fn_t hardreset;
+
+	if (!(ap->pflags & ATA_PFLAG_FROZEN))
+		pdc_reset_port(ap);
+
+	hardreset = NULL;
+	if (sata_scr_valid(ap))
+		hardreset = sata_std_hardreset;
+
+	/* perform recovery */
+	ata_do_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
+		  ata_std_postreset);
+}
+
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		qc->err_mask |= AC_ERR_OTHER;
+
+	/* make DMA engine forget about the failed command */
+	if (qc->err_mask)
+		pdc_reset_port(ap);
+}
+
 static void pdc_eng_timeout(struct ata_port *ap)
 {
 	struct ata_host *host = ap->host;
@@ -631,18 +681,25 @@
 {
 	void __iomem *mmio = pe->mmio_base;
 	struct pdc_host_priv *hp = pe->private_data;
-	int hotplug_offset = hp->hotplug_offset;
+	int hotplug_offset;
 	u32 tmp;
 
+	if (hp->flags & PDC_FLAG_GEN_II)
+		hotplug_offset = PDC2_SATA_PLUG_CSR;
+	else
+		hotplug_offset = PDC_SATA_PLUG_CSR;
+
 	/*
 	 * Except for the hotplug stuff, this is voodoo from the
 	 * Promise driver.  Label this entire section
 	 * "TODO: figure out why we do this"
 	 */
 
-	/* change FIFO_SHD to 8 dwords, enable BMR_BURST */
+	/* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
 	tmp = readl(mmio + PDC_FLASH_CTL);
-	tmp |= 0x12000;	/* bit 16 (fifo 8 dw) and 13 (bmr burst?) */
+	tmp |= 0x02000;	/* bit 13 (enable bmr burst) */
+	if (!(hp->flags & PDC_FLAG_GEN_II))
+		tmp |= 0x10000;	/* bit 16 (fifo threshold at 8 dw) */
 	writel(tmp, mmio + PDC_FLASH_CTL);
 
 	/* clear plug/unplug flags for all ports */
@@ -653,6 +710,10 @@
 	tmp = readl(mmio + hotplug_offset);
 	writel(tmp | 0xff0000, mmio + hotplug_offset);
 
+	/* don't initialise TBG or SLEW on 2nd generation chips */
+	if (hp->flags & PDC_FLAG_GEN_II)
+		return;
+
 	/* reduce TBG clock to 133 Mhz. */
 	tmp = readl(mmio + PDC_TBG_MODE);
 	tmp &= ~0x30000; /* clear bit 17, 16*/
@@ -722,8 +783,6 @@
 		goto err_out_free_ent;
 	}
 
-	/* Set default hotplug offset */
-	hp->hotplug_offset = PDC_SATA_PLUG_CSR;
 	probe_ent->private_data = hp;
 
 	probe_ent->sht		= pdc_port_info[board_idx].sht;
@@ -746,8 +805,7 @@
 	/* notice 4-port boards */
 	switch (board_idx) {
 	case board_40518:
-		/* Override hotplug offset for SATAII150 */
-		hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
+		hp->flags |= PDC_FLAG_GEN_II;
 		/* Fall through */
 	case board_20319:
        		probe_ent->n_ports = 4;
@@ -759,15 +817,11 @@
 		probe_ent->port[3].scr_addr = base + 0x700;
 		break;
 	case board_2057x:
-		/* Override hotplug offset for SATAII150 */
-		hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
+		hp->flags |= PDC_FLAG_GEN_II;
 		/* Fall through */
 	case board_2037x:
 		probe_ent->n_ports = 2;
 		break;
-	case board_20771:
-		probe_ent->n_ports = 2;
-		break;
 	case board_20619:
 		probe_ent->n_ports = 4;
 
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index ca8d993..7808d03 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -356,6 +356,7 @@
 
 static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
 {
+	struct ata_eh_info *ehi = &ap->eh_info;
 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
 	u8 status;
 
@@ -428,6 +429,10 @@
 	/* kick HSM in the ass */
 	ata_hsm_move(ap, qc, status, 0);
 
+	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
+		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
+
 	return;
 
  err_hsm:
@@ -534,6 +539,7 @@
  */
 static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
 {
+	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
 	unsigned int n, quirks = 0;
 	unsigned char model_num[41];
 
@@ -549,16 +555,18 @@
 	if (slow_down ||
 	    ((ap->flags & SIL_FLAG_MOD15WRITE) &&
 	     (quirks & SIL_QUIRK_MOD15WRITE))) {
-		ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
-			       "(mod15write workaround)\n");
+		if (print_info)
+			ata_dev_printk(dev, KERN_INFO, "applying Seagate "
+				       "errata fix (mod15write workaround)\n");
 		dev->max_sectors = 15;
 		return;
 	}
 
 	/* limit to udma5 */
 	if (quirks & SIL_QUIRK_UDMA5MAX) {
-		ata_dev_printk(dev, KERN_INFO,
-			       "applying Maxtor errata fix %s\n", model_num);
+		if (print_info)
+			ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
+				       "errata fix %s\n", model_num);
 		dev->udma_mask &= ATA_UDMA5;
 		return;
 	}
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 169e200..5aa288d 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -100,10 +100,14 @@
 	 */
 	PORT_REGS_SIZE		= 0x2000,
 
-	PORT_LRAM		= 0x0000, /* 31 LRAM slots and PM regs */
+	PORT_LRAM		= 0x0000, /* 31 LRAM slots and PMP regs */
 	PORT_LRAM_SLOT_SZ	= 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
 
-	PORT_PM			= 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
+	PORT_PMP		= 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
+	PORT_PMP_STATUS		= 0x0000, /* port device status offset */
+	PORT_PMP_QACTIVE	= 0x0004, /* port device QActive offset */
+	PORT_PMP_SIZE		= 0x0008, /* 8 bytes per PMP */
+
 		/* 32 bit regs */
 	PORT_CTRL_STAT		= 0x1000, /* write: ctrl-set, read: stat */
 	PORT_CTRL_CLR		= 0x1004, /* write: ctrl-clear */
@@ -126,6 +130,7 @@
 	PORT_PHY_CFG		= 0x1050,
 	PORT_SLOT_STAT		= 0x1800,
 	PORT_CMD_ACTIVATE	= 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
+	PORT_CONTEXT		= 0x1e04,
 	PORT_EXEC_DIAG		= 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
 	PORT_PSD_DIAG		= 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
 	PORT_SCONTROL		= 0x1f00,
@@ -139,9 +144,9 @@
 	PORT_CS_INIT		= (1 << 2), /* port initialize */
 	PORT_CS_IRQ_WOC		= (1 << 3), /* interrupt write one to clear */
 	PORT_CS_CDB16		= (1 << 5), /* 0=12b cdb, 1=16b cdb */
-	PORT_CS_RESUME		= (1 << 6), /* port resume */
+	PORT_CS_PMP_RESUME	= (1 << 6), /* PMP resume */
 	PORT_CS_32BIT_ACTV	= (1 << 10), /* 32-bit activation */
-	PORT_CS_PM_EN		= (1 << 13), /* port multiplier enable */
+	PORT_CS_PMP_EN		= (1 << 13), /* port multiplier enable */
 	PORT_CS_RDY		= (1 << 31), /* port ready to accept commands */
 
 	/* PORT_IRQ_STAT/ENABLE_SET/CLR */
@@ -562,7 +567,7 @@
 
 	/* do SRST */
 	prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
-	prb->fis[1] = 0; /* no PM yet */
+	prb->fis[1] = 0; /* no PMP yet */
 
 	writel((u32)paddr, port + PORT_CMD_ACTIVATE);
 	writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
@@ -1050,7 +1055,8 @@
 		writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
 
 		/* Clear port multiplier enable and resume bits */
-		writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
+		writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME,
+		       port + PORT_CTRL_CLR);
 	}
 
 	/* Turn on interrupts */
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 9d1235b..9c25a1e 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -173,7 +173,7 @@
 	if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
 		pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
 
-	return val|val2;
+	return (val|val2) &  0xfffffffb; /* avoid problems with powerdowned ports */
 }
 
 static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
@@ -212,7 +212,7 @@
 	if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
 		val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
 
-	return val | val2;
+	return (val | val2) &  0xfffffffb;
 }
 
 static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
@@ -239,7 +239,7 @@
 	static int printed_version;
 	struct ata_probe_ent *probe_ent = NULL;
 	int rc;
-	u32 genctl;
+	u32 genctl, val;
 	struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi };
 	int pci_dev_busy = 0;
 	u8 pmr;
@@ -285,17 +285,24 @@
 	if (ent->device != 0x182) {
 		if ((pmr & SIS_PMR_COMBINED) == 0) {
 			dev_printk(KERN_INFO, &pdev->dev,
-				   "Detected SiS 180/181 chipset in SATA mode\n");
+				   "Detected SiS 180/181/964 chipset in SATA mode\n");
 			port2_start = 64;
 		}
 		else {
 			dev_printk(KERN_INFO, &pdev->dev,
 				   "Detected SiS 180/181 chipset in combined mode\n");
 			port2_start=0;
+			pi.flags |= ATA_FLAG_SLAVE_POSS;
 		}
 	}
 	else {
-		dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
+		pci_read_config_dword ( pdev, 0x6C, &val);
+		if (val & (1L << 31)) {
+			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n");
+			pi.flags |= ATA_FLAG_SLAVE_POSS;
+		}
+		else
+			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n");
 		port2_start = 0x20;
 	}
 
diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile
index b5077ce..1b16f81 100644
--- a/drivers/atm/Makefile
+++ b/drivers/atm/Makefile
@@ -41,7 +41,7 @@
   # guess the target endianess to choose the right PCA-200E firmware image
   ifeq ($(CONFIG_ATM_FORE200E_PCA_DEFAULT_FW),y)
     byteorder.h			:= include$(if $(patsubst $(srctree),,$(objtree)),2)/asm/byteorder.h
-    CONFIG_ATM_FORE200E_PCA_FW	:= $(obj)/pca200e$(if $(shell $(CC) -E -dM $(byteorder.h) | grep ' __LITTLE_ENDIAN '),.bin,_ecd.bin2)
+    CONFIG_ATM_FORE200E_PCA_FW	:= $(obj)/pca200e$(if $(shell $(CC) $(CPPFLAGS) -E -dM $(byteorder.h) | grep ' __LITTLE_ENDIAN '),.bin,_ecd.bin2)
   endif
 endif
 
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 9fffa7a..afa7d75 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -972,7 +972,7 @@
       }
       case round_up: {
 	// check all bits that we are discarding
-	if (man & (-1>>9)) {
+	if (man & (~0U>>9)) {
 	  man = (man>>(32-9)) + 1;
 	  if (man == (1<<9)) {
 	    // no need to check for round up outside of range
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 697ad82..9c67df5 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -512,7 +512,7 @@
 		}
 		case ROUND_UP: {
 			/* check all bits that we are discarding */
-			if (man & (-1>>9)) {
+			if (man & (~0U>>9)) {
 				man = (man>>(32-9)) + 1;
 				if (man == (1<<9)) {
 					/* no need to check for round up outside of range */
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c7314a7..7d9b4e5 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -820,7 +820,7 @@
 		void *cpuaddr;
 
 #ifdef USE_RBPS_POOL 
-		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
+		cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
 		if (cpuaddr == NULL)
 			return -ENOMEM;
 #else
@@ -884,7 +884,7 @@
 		void *cpuaddr;
 
 #ifdef USE_RBPL_POOL
-		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
+		cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
 		if (cpuaddr == NULL)
 			return -ENOMEM;
 #else
@@ -1724,7 +1724,7 @@
 	struct he_tpd *tpd;
 	dma_addr_t dma_handle; 
 
-	tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);              
+	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
 	if (tpd == NULL)
 		return NULL;
 			
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 87b17c3..f407861 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -135,7 +135,7 @@
 			       int flags);
 static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
 			      char *page);
-static void idt77252_softint(void *dev_id);
+static void idt77252_softint(struct work_struct *work);
 
 
 static struct atmdev_ops idt77252_ops =
@@ -2866,9 +2866,10 @@
 }
 
 static void
-idt77252_softint(void *dev_id)
+idt77252_softint(struct work_struct *work)
 {
-	struct idt77252_dev *card = dev_id;
+	struct idt77252_dev *card =
+		container_of(work, struct idt77252_dev, tqueue);
 	u32 stat;
 	int done;
 
@@ -3697,7 +3698,7 @@
 	card->pcidev = pcidev;
 	sprintf(card->name, "idt77252-%d", card->index);
 
-	INIT_WORK(&card->tqueue, idt77252_softint, (void *)card);
+	INIT_WORK(&card->tqueue, idt77252_softint);
 
 	membase = pci_resource_start(pcidev, 1);
 	srambase = pci_resource_start(pcidev, 2);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 9ed1c60..bb7ef57 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -305,7 +305,7 @@
 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
 **  +----+----+------------------+-------------------------------+
 ** 
-**    R = reserverd (written as 0)
+**    R = reserved (written as 0)
 **    NZ = 0 if 0 cells/sec; 1 otherwise
 **
 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 7d8a7ce..472810f 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -355,6 +355,21 @@
 	}
 }
 
+#ifdef CONFIG_SYSFS_DEPRECATED
+static int make_deprecated_bus_links(struct device *dev)
+{
+	return sysfs_create_link(&dev->kobj,
+				 &dev->bus->subsys.kset.kobj, "bus");
+}
+
+static void remove_deprecated_bus_links(struct device *dev)
+{
+	sysfs_remove_link(&dev->kobj, "bus");
+}
+#else
+static inline int make_deprecated_bus_links(struct device *dev) { return 0; }
+static inline void remove_deprecated_bus_links(struct device *dev) { }
+#endif
 
 /**
  *	bus_add_device - add device to bus
@@ -381,8 +396,7 @@
 				&dev->bus->subsys.kset.kobj, "subsystem");
 		if (error)
 			goto out_subsys;
-		error = sysfs_create_link(&dev->kobj,
-				&dev->bus->subsys.kset.kobj, "bus");
+		error = make_deprecated_bus_links(dev);
 		if (error)
 			goto out_deprecated;
 	}
@@ -436,7 +450,7 @@
 {
 	if (dev->bus) {
 		sysfs_remove_link(&dev->kobj, "subsystem");
-		sysfs_remove_link(&dev->kobj, "bus");
+		remove_deprecated_bus_links(dev);
 		sysfs_remove_link(&dev->bus->devices.kobj, dev->bus_id);
 		device_remove_attrs(dev->bus, dev);
 		if (dev->is_registered) {
@@ -724,6 +738,8 @@
 {
 	int retval;
 
+	BLOCKING_INIT_NOTIFIER_HEAD(&bus->bus_notifier);
+
 	retval = kobject_set_name(&bus->subsys.kset.kobj, "%s", bus->name);
 	if (retval)
 		goto out;
@@ -782,6 +798,18 @@
 	subsystem_unregister(&bus->subsys);
 }
 
+int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&bus->bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bus_register_notifier);
+
+int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&bus->bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bus_unregister_notifier);
+
 int __init buses_init(void)
 {
 	return subsystem_register(&bus_subsys);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0ff267a..f098881 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -352,6 +352,92 @@
 	return class_dev->class->name;
 }
 
+#ifdef CONFIG_SYSFS_DEPRECATED
+char *make_class_name(const char *name, struct kobject *kobj)
+{
+	char *class_name;
+	int size;
+
+	size = strlen(name) + strlen(kobject_name(kobj)) + 2;
+
+	class_name = kmalloc(size, GFP_KERNEL);
+	if (!class_name)
+		return ERR_PTR(-ENOMEM);
+
+	strcpy(class_name, name);
+	strcat(class_name, ":");
+	strcat(class_name, kobject_name(kobj));
+	return class_name;
+}
+
+static int deprecated_class_uevent(char **envp, int num_envp, int *cur_index,
+				   char *buffer, int buffer_size,
+				   int *cur_len,
+				   struct class_device *class_dev)
+{
+	struct device *dev = class_dev->dev;
+	char *path;
+
+	if (!dev)
+		return 0;
+
+	/* add device, backing this class device (deprecated) */
+	path = kobject_get_path(&dev->kobj, GFP_KERNEL);
+
+	add_uevent_var(envp, num_envp, cur_index, buffer, buffer_size,
+		       cur_len, "PHYSDEVPATH=%s", path);
+	kfree(path);
+
+	if (dev->bus)
+		add_uevent_var(envp, num_envp, cur_index,
+			       buffer, buffer_size, cur_len,
+			       "PHYSDEVBUS=%s", dev->bus->name);
+
+	if (dev->driver)
+		add_uevent_var(envp, num_envp, cur_index,
+			       buffer, buffer_size, cur_len,
+			       "PHYSDEVDRIVER=%s", dev->driver->name);
+	return 0;
+}
+
+static int make_deprecated_class_device_links(struct class_device *class_dev)
+{
+	char *class_name;
+	int error;
+
+	if (!class_dev->dev)
+		return 0;
+
+	class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
+	error = sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj,
+				  class_name);
+	kfree(class_name);
+	return error;
+}
+
+static void remove_deprecated_class_device_links(struct class_device *class_dev)
+{
+	char *class_name;
+
+	if (!class_dev->dev)
+		return;
+
+	class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
+	sysfs_remove_link(&class_dev->dev->kobj, class_name);
+	kfree(class_name);
+}
+#else
+static inline int deprecated_class_uevent(char **envp, int num_envp,
+					  int *cur_index, char *buffer,
+					  int buffer_size, int *cur_len,
+					  struct class_device *class_dev)
+{ return 0; }
+static inline int make_deprecated_class_device_links(struct class_device *cd)
+{ return 0; }
+static void remove_deprecated_class_device_links(struct class_device *cd)
+{ }
+#endif
+
 static int class_uevent(struct kset *kset, struct kobject *kobj, char **envp,
 			 int num_envp, char *buffer, int buffer_size)
 {
@@ -362,25 +448,8 @@
 
 	pr_debug("%s - name = %s\n", __FUNCTION__, class_dev->class_id);
 
-	if (class_dev->dev) {
-		/* add device, backing this class device (deprecated) */
-		struct device *dev = class_dev->dev;
-		char *path = kobject_get_path(&dev->kobj, GFP_KERNEL);
-
-		add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
-			       &length, "PHYSDEVPATH=%s", path);
-		kfree(path);
-
-		if (dev->bus)
-			add_uevent_var(envp, num_envp, &i,
-				       buffer, buffer_size, &length,
-				       "PHYSDEVBUS=%s", dev->bus->name);
-
-		if (dev->driver)
-			add_uevent_var(envp, num_envp, &i,
-				       buffer, buffer_size, &length,
-				       "PHYSDEVDRIVER=%s", dev->driver->name);
-	}
+	deprecated_class_uevent(envp, num_envp, &i, buffer, buffer_size,
+				&length, class_dev);
 
 	if (MAJOR(class_dev->devt)) {
 		add_uevent_var(envp, num_envp, &i,
@@ -506,29 +575,11 @@
 	INIT_LIST_HEAD(&class_dev->node);
 }
 
-char *make_class_name(const char *name, struct kobject *kobj)
-{
-	char *class_name;
-	int size;
-
-	size = strlen(name) + strlen(kobject_name(kobj)) + 2;
-
-	class_name = kmalloc(size, GFP_KERNEL);
-	if (!class_name)
-		return ERR_PTR(-ENOMEM);
-
-	strcpy(class_name, name);
-	strcat(class_name, ":");
-	strcat(class_name, kobject_name(kobj));
-	return class_name;
-}
-
 int class_device_add(struct class_device *class_dev)
 {
 	struct class *parent_class = NULL;
 	struct class_device *parent_class_dev = NULL;
 	struct class_interface *class_intf;
-	char *class_name = NULL;
 	int error = -EINVAL;
 
 	class_dev = class_device_get(class_dev);
@@ -599,20 +650,18 @@
 		goto out5;
 
 	if (class_dev->dev) {
-		class_name = make_class_name(class_dev->class->name,
-					     &class_dev->kobj);
 		error = sysfs_create_link(&class_dev->kobj,
 					  &class_dev->dev->kobj, "device");
 		if (error)
 			goto out6;
-		error = sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj,
-					  class_name);
-		if (error)
-			goto out7;
 	}
 
 	error = class_device_add_groups(class_dev);
 	if (error)
+		goto out7;
+
+	error = make_deprecated_class_device_links(class_dev);
+	if (error)
 		goto out8;
 
 	kobject_uevent(&class_dev->kobj, KOBJ_ADD);
@@ -629,8 +678,7 @@
 	goto out1;
 
  out8:
-	if (class_dev->dev)
-		sysfs_remove_link(&class_dev->kobj, class_name);
+	class_device_remove_groups(class_dev);
  out7:
 	if (class_dev->dev)
 		sysfs_remove_link(&class_dev->kobj, "device");
@@ -649,7 +697,6 @@
 	class_put(parent_class);
  out1:
 	class_device_put(class_dev);
-	kfree(class_name);
 	return error;
 }
 
@@ -726,7 +773,6 @@
 	struct class *parent_class = class_dev->class;
 	struct class_device *parent_device = class_dev->parent;
 	struct class_interface *class_intf;
-	char *class_name = NULL;
 
 	if (parent_class) {
 		down(&parent_class->sem);
@@ -738,10 +784,8 @@
 	}
 
 	if (class_dev->dev) {
-		class_name = make_class_name(class_dev->class->name,
-					     &class_dev->kobj);
+		remove_deprecated_class_device_links(class_dev);
 		sysfs_remove_link(&class_dev->kobj, "device");
-		sysfs_remove_link(&class_dev->dev->kobj, class_name);
 	}
 	sysfs_remove_link(&class_dev->kobj, "subsystem");
 	class_device_remove_file(class_dev, &class_dev->uevent_attr);
@@ -755,7 +799,6 @@
 
 	class_device_put(parent_device);
 	class_put(parent_class);
-	kfree(class_name);
 }
 
 void class_device_unregister(struct class_device *class_dev)
@@ -804,14 +847,17 @@
 	pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id,
 		 new_name);
 
+#ifdef CONFIG_SYSFS_DEPRECATED
 	if (class_dev->dev)
 		old_class_name = make_class_name(class_dev->class->name,
 						 &class_dev->kobj);
+#endif
 
 	strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN);
 
 	error = kobject_rename(&class_dev->kobj, new_name);
 
+#ifdef CONFIG_SYSFS_DEPRECATED
 	if (class_dev->dev) {
 		new_class_name = make_class_name(class_dev->class->name,
 						 &class_dev->kobj);
@@ -819,6 +865,7 @@
 				  new_class_name);
 		sysfs_remove_link(&class_dev->dev->kobj, old_class_name);
 	}
+#endif
 	class_device_put(class_dev);
 
 	kfree(old_class_name);
@@ -893,23 +940,6 @@
 	class_put(parent);
 }
 
-int virtual_device_parent(struct device *dev)
-{
-	if (!dev->class)
-		return -ENODEV;
-
-	if (!dev->class->virtual_dir) {
-		static struct kobject *virtual_dir = NULL;
-
-		if (!virtual_dir)
-			virtual_dir = kobject_add_dir(&devices_subsys.kset.kobj, "virtual");
-		dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name);
-	}
-
-	dev->kobj.parent = dev->class->virtual_dir;
-	return 0;
-}
-
 int __init classes_init(void)
 {
 	int retval;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 002fde4..67b79a7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/kdev_t.h>
+#include <linux/notifier.h>
 
 #include <asm/semaphore.h>
 
@@ -153,20 +154,24 @@
 			       "MINOR=%u", MINOR(dev->devt));
 	}
 
+#ifdef CONFIG_SYSFS_DEPRECATED
 	/* add bus name (same as SUBSYSTEM, deprecated) */
 	if (dev->bus)
 		add_uevent_var(envp, num_envp, &i,
 			       buffer, buffer_size, &length,
 			       "PHYSDEVBUS=%s", dev->bus->name);
+#endif
 
 	/* add driver name (PHYSDEV* values are deprecated)*/
 	if (dev->driver) {
 		add_uevent_var(envp, num_envp, &i,
 			       buffer, buffer_size, &length,
 			       "DRIVER=%s", dev->driver->name);
+#ifdef CONFIG_SYSFS_DEPRECATED
 		add_uevent_var(envp, num_envp, &i,
 			       buffer, buffer_size, &length,
 			       "PHYSDEVDRIVER=%s", dev->driver->name);
+#endif
 	}
 
 	/* terminate, set to next free slot, shrink available space */
@@ -381,8 +386,55 @@
 	INIT_LIST_HEAD(&dev->node);
 	init_MUTEX(&dev->sem);
 	device_init_wakeup(dev, 0);
+	set_dev_node(dev, -1);
 }
 
+#ifdef CONFIG_SYSFS_DEPRECATED
+static int setup_parent(struct device *dev, struct device *parent)
+{
+	/* Set the parent to the class, not the parent device */
+	/* this keeps sysfs from having a symlink to make old udevs happy */
+	if (dev->class)
+		dev->kobj.parent = &dev->class->subsys.kset.kobj;
+	else if (parent)
+		dev->kobj.parent = &parent->kobj;
+
+	return 0;
+}
+#else
+static int virtual_device_parent(struct device *dev)
+{
+	if (!dev->class)
+		return -ENODEV;
+
+	if (!dev->class->virtual_dir) {
+		static struct kobject *virtual_dir = NULL;
+
+		if (!virtual_dir)
+			virtual_dir = kobject_add_dir(&devices_subsys.kset.kobj, "virtual");
+		dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name);
+	}
+
+	dev->kobj.parent = dev->class->virtual_dir;
+	return 0;
+}
+
+static int setup_parent(struct device *dev, struct device *parent)
+{
+	int error;
+
+	/* if this is a class device, and has no parent, create one */
+	if ((dev->class) && (parent == NULL)) {
+		error = virtual_device_parent(dev);
+		if (error)
+			return error;
+	} else if (parent)
+		dev->kobj.parent = &parent->kobj;
+
+	return 0;
+}
+#endif
+
 /**
  *	device_add - add device to device hierarchy.
  *	@dev:	device.
@@ -405,29 +457,29 @@
 	if (!dev || !strlen(dev->bus_id))
 		goto Error;
 
-	/* if this is a class device, and has no parent, create one */
-	if ((dev->class) && (dev->parent == NULL)) {
-		error = virtual_device_parent(dev);
-		if (error)
-			goto Error;
-	}
+	pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id);
 
 	parent = get_device(dev->parent);
 
-	pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id);
+	error = setup_parent(dev, parent);
+	if (error)
+		goto Error;
 
 	/* first, register with generic layer. */
 	kobject_set_name(&dev->kobj, "%s", dev->bus_id);
-	if (parent)
-		dev->kobj.parent = &parent->kobj;
-
-	if ((error = kobject_add(&dev->kobj)))
+	error = kobject_add(&dev->kobj);
+	if (error)
 		goto Error;
 
 	/* notify platform of device entry */
 	if (platform_notify)
 		platform_notify(dev);
 
+	/* notify clients of device entry (new way) */
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->bus_notifier,
+					     BUS_NOTIFY_ADD_DEVICE, dev);
+
 	dev->uevent_attr.attr.name = "uevent";
 	dev->uevent_attr.attr.mode = S_IWUSR;
 	if (dev->driver)
@@ -461,13 +513,18 @@
 	if (dev->class) {
 		sysfs_create_link(&dev->kobj, &dev->class->subsys.kset.kobj,
 				  "subsystem");
-		sysfs_create_link(&dev->class->subsys.kset.kobj, &dev->kobj,
-				  dev->bus_id);
+		/* If this is not a "fake" compatible device, then create the
+		 * symlink from the class to the device. */
+		if (dev->kobj.parent != &dev->class->subsys.kset.kobj)
+			sysfs_create_link(&dev->class->subsys.kset.kobj,
+					  &dev->kobj, dev->bus_id);
+#ifdef CONFIG_SYSFS_DEPRECATED
 		if (parent) {
 			sysfs_create_link(&dev->kobj, &dev->parent->kobj, "device");
 			class_name = make_class_name(dev->class->name, &dev->kobj);
 			sysfs_create_link(&dev->parent->kobj, &dev->kobj, class_name);
 		}
+#endif
 	}
 
 	if ((error = device_add_attrs(dev)))
@@ -504,6 +561,9 @@
  BusError:
 	device_pm_remove(dev);
  PMError:
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->bus_notifier,
+					     BUS_NOTIFY_DEL_DEVICE, dev);
 	device_remove_groups(dev);
  GroupError:
  	device_remove_attrs(dev);
@@ -586,7 +646,6 @@
 void device_del(struct device * dev)
 {
 	struct device * parent = dev->parent;
-	char *class_name = NULL;
 	struct class_interface *class_intf;
 
 	if (parent)
@@ -597,13 +656,21 @@
 	}
 	if (dev->class) {
 		sysfs_remove_link(&dev->kobj, "subsystem");
-		sysfs_remove_link(&dev->class->subsys.kset.kobj, dev->bus_id);
-		class_name = make_class_name(dev->class->name, &dev->kobj);
+		/* If this is not a "fake" compatible device, remove the
+		 * symlink from the class to the device. */
+		if (dev->kobj.parent != &dev->class->subsys.kset.kobj)
+			sysfs_remove_link(&dev->class->subsys.kset.kobj,
+					  dev->bus_id);
+#ifdef CONFIG_SYSFS_DEPRECATED
 		if (parent) {
-			sysfs_remove_link(&dev->kobj, "device");
+			char *class_name = make_class_name(dev->class->name,
+							   &dev->kobj);
 			sysfs_remove_link(&dev->parent->kobj, class_name);
+			kfree(class_name);
+			sysfs_remove_link(&dev->kobj, "device");
 		}
-		kfree(class_name);
+#endif
+
 		down(&dev->class->sem);
 		/* notify any interfaces that the device is now gone */
 		list_for_each_entry(class_intf, &dev->class->interfaces, node)
@@ -616,13 +683,16 @@
 	device_remove_file(dev, &dev->uevent_attr);
 	device_remove_groups(dev);
 	device_remove_attrs(dev);
+	bus_remove_device(dev);
 
 	/* Notify the platform of the removal, in case they
 	 * need to do anything...
 	 */
 	if (platform_notify_remove)
 		platform_notify_remove(dev);
-	bus_remove_device(dev);
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->bus_notifier,
+					     BUS_NOTIFY_DEL_DEVICE, dev);
 	device_pm_remove(dev);
 	kobject_uevent(&dev->kobj, KOBJ_REMOVE);
 	kobject_del(&dev->kobj);
@@ -681,12 +751,45 @@
 	return error;
 }
 
+/**
+ * device_find_child - device iterator for locating a particular device.
+ * @parent: parent struct device
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does.  If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
+ */
+struct device * device_find_child(struct device *parent, void *data,
+				  int (*match)(struct device *, void *))
+{
+	struct klist_iter i;
+	struct device *child;
+
+	if (!parent)
+		return NULL;
+
+	klist_iter_init(&parent->klist_children, &i);
+	while ((child = next_device(&i)))
+		if (match(child, data) && get_device(child))
+			break;
+	klist_iter_exit(&i);
+	return child;
+}
+
 int __init devices_init(void)
 {
 	return subsystem_register(&devices_subsys);
 }
 
 EXPORT_SYMBOL_GPL(device_for_each_child);
+EXPORT_SYMBOL_GPL(device_find_child);
 
 EXPORT_SYMBOL_GPL(device_initialize);
 EXPORT_SYMBOL_GPL(device_add);
@@ -809,8 +912,10 @@
 
 	pr_debug("DEVICE: renaming '%s' to '%s'\n", dev->bus_id, new_name);
 
+#ifdef CONFIG_SYSFS_DEPRECATED
 	if ((dev->class) && (dev->parent))
 		old_class_name = make_class_name(dev->class->name, &dev->kobj);
+#endif
 
 	if (dev->class) {
 		old_symlink_name = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
@@ -825,6 +930,7 @@
 
 	error = kobject_rename(&dev->kobj, new_name);
 
+#ifdef CONFIG_SYSFS_DEPRECATED
 	if (old_class_name) {
 		new_class_name = make_class_name(dev->class->name, &dev->kobj);
 		if (new_class_name) {
@@ -833,6 +939,8 @@
 			sysfs_remove_link(&dev->parent->kobj, old_class_name);
 		}
 	}
+#endif
+
 	if (dev->class) {
 		sysfs_remove_link(&dev->class->subsys.kset.kobj,
 				  old_symlink_name);
@@ -848,3 +956,95 @@
 
 	return error;
 }
+
+
+static int device_move_class_links(struct device *dev,
+				   struct device *old_parent,
+				   struct device *new_parent)
+{
+#ifdef CONFIG_SYSFS_DEPRECATED
+	int error;
+	char *class_name;
+
+	class_name = make_class_name(dev->class->name, &dev->kobj);
+	if (!class_name) {
+		error = PTR_ERR(class_name);
+		class_name = NULL;
+		goto out;
+	}
+	if (old_parent) {
+		sysfs_remove_link(&dev->kobj, "device");
+		sysfs_remove_link(&old_parent->kobj, class_name);
+	}
+	error = sysfs_create_link(&dev->kobj, &new_parent->kobj, "device");
+	if (error)
+		goto out;
+	error = sysfs_create_link(&new_parent->kobj, &dev->kobj, class_name);
+	if (error)
+		sysfs_remove_link(&dev->kobj, "device");
+out:
+	kfree(class_name);
+	return error;
+#else
+	return 0;
+#endif
+}
+
+/**
+ * device_move - moves a device to a new parent
+ * @dev: the pointer to the struct device to be moved
+ * @new_parent: the new parent of the device
+ */
+int device_move(struct device *dev, struct device *new_parent)
+{
+	int error;
+	struct device *old_parent;
+
+	dev = get_device(dev);
+	if (!dev)
+		return -EINVAL;
+
+	if (!device_is_registered(dev)) {
+		error = -EINVAL;
+		goto out;
+	}
+	new_parent = get_device(new_parent);
+	if (!new_parent) {
+		error = -EINVAL;
+		goto out;
+	}
+	pr_debug("DEVICE: moving '%s' to '%s'\n", dev->bus_id,
+		new_parent->bus_id);
+	error = kobject_move(&dev->kobj, &new_parent->kobj);
+	if (error) {
+		put_device(new_parent);
+		goto out;
+	}
+	old_parent = dev->parent;
+	dev->parent = new_parent;
+	if (old_parent)
+		klist_remove(&dev->knode_parent);
+	klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
+	if (!dev->class)
+		goto out_put;
+	error = device_move_class_links(dev, old_parent, new_parent);
+	if (error) {
+		/* We ignore errors on cleanup since we're hosed anyway... */
+		device_move_class_links(dev, new_parent, old_parent);
+		if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
+			klist_remove(&dev->knode_parent);
+			if (old_parent)
+				klist_add_tail(&dev->knode_parent,
+					       &old_parent->klist_children);
+		}
+		put_device(new_parent);
+		goto out;
+	}
+out_put:
+	put_device(old_parent);
+out:
+	put_device(dev);
+	return error;
+}
+
+EXPORT_SYMBOL_GPL(device_move);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4bef76a..7fd095e 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -5,6 +5,7 @@
 #include <linux/sysdev.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/cpu.h>
 #include <linux/topology.h>
 #include <linux/device.h>
@@ -103,8 +104,8 @@
 
 /*
  * register_cpu - Setup a driverfs device for a CPU.
- * @cpu - Callers can set the cpu->no_control field to 1, to indicate not to
- *		  generate a control file in sysfs for this CPU.
+ * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
+ *	  sysfs for this CPU.
  * @num - CPU number to use when creating the device.
  *
  * Initialize and register the CPU device.
@@ -118,7 +119,7 @@
 
 	error = sysdev_register(&cpu->sysdev);
 
-	if (!error && !cpu->no_control)
+	if (!error && cpu->hotpluggable)
 		register_cpu_control(cpu);
 	if (!error)
 		cpu_sys_devices[num] = &cpu->sysdev;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c5d6bb4..510e788 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -26,6 +26,50 @@
 #define to_drv(node) container_of(node, struct device_driver, kobj.entry)
 
 
+static void driver_bound(struct device *dev)
+{
+	if (klist_node_attached(&dev->knode_driver)) {
+		printk(KERN_WARNING "%s: device %s already bound\n",
+			__FUNCTION__, kobject_name(&dev->kobj));
+		return;
+	}
+
+	pr_debug("bound device '%s' to driver '%s'\n",
+		 dev->bus_id, dev->driver->name);
+
+	if (dev->bus)
+		blocking_notifier_call_chain(&dev->bus->bus_notifier,
+					     BUS_NOTIFY_BOUND_DRIVER, dev);
+
+	klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
+}
+
+static int driver_sysfs_add(struct device *dev)
+{
+	int ret;
+
+	ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj,
+			  kobject_name(&dev->kobj));
+	if (ret == 0) {
+		ret = sysfs_create_link(&dev->kobj, &dev->driver->kobj,
+					"driver");
+		if (ret)
+			sysfs_remove_link(&dev->driver->kobj,
+					kobject_name(&dev->kobj));
+	}
+	return ret;
+}
+
+static void driver_sysfs_remove(struct device *dev)
+{
+	struct device_driver *drv = dev->driver;
+
+	if (drv) {
+		sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
+		sysfs_remove_link(&dev->kobj, "driver");
+	}
+}
+
 /**
  *	device_bind_driver - bind a driver to one device.
  *	@dev:	device.
@@ -42,27 +86,8 @@
  */
 int device_bind_driver(struct device *dev)
 {
-	int ret;
-
-	if (klist_node_attached(&dev->knode_driver)) {
-		printk(KERN_WARNING "%s: device %s already bound\n",
-			__FUNCTION__, kobject_name(&dev->kobj));
-		return 0;
-	}
-
-	pr_debug("bound device '%s' to driver '%s'\n",
-		 dev->bus_id, dev->driver->name);
-	klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
-	ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj,
-			  kobject_name(&dev->kobj));
-	if (ret == 0) {
-		ret = sysfs_create_link(&dev->kobj, &dev->driver->kobj,
-					"driver");
-		if (ret)
-			sysfs_remove_link(&dev->driver->kobj,
-					kobject_name(&dev->kobj));
-	}
-	return ret;
+	driver_bound(dev);
+	return driver_sysfs_add(dev);
 }
 
 struct stupid_thread_structure {
@@ -85,30 +110,32 @@
 		 drv->bus->name, drv->name, dev->bus_id);
 
 	dev->driver = drv;
+	if (driver_sysfs_add(dev)) {
+		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
+			__FUNCTION__, dev->bus_id);
+		goto probe_failed;
+	}
+
 	if (dev->bus->probe) {
 		ret = dev->bus->probe(dev);
-		if (ret) {
-			dev->driver = NULL;
+		if (ret)
 			goto probe_failed;
-		}
 	} else if (drv->probe) {
 		ret = drv->probe(dev);
-		if (ret) {
-			dev->driver = NULL;
+		if (ret)
 			goto probe_failed;
-		}
 	}
-	if (device_bind_driver(dev)) {
-		printk(KERN_ERR "%s: device_bind_driver(%s) failed\n",
-			__FUNCTION__, dev->bus_id);
-		/* How does undo a ->probe?  We're screwed. */
-	}
+
+	driver_bound(dev);
 	ret = 1;
 	pr_debug("%s: Bound Device %s to Driver %s\n",
 		 drv->bus->name, dev->bus_id, drv->name);
 	goto done;
 
 probe_failed:
+	driver_sysfs_remove(dev);
+	dev->driver = NULL;
+
 	if (ret == -ENODEV || ret == -ENXIO) {
 		/* Driver matched, but didn't support device
 		 * or device not found.
@@ -284,10 +311,15 @@
 	drv = dev->driver;
 	if (drv) {
 		get_driver(drv);
-		sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
+		driver_sysfs_remove(dev);
 		sysfs_remove_link(&dev->kobj, "driver");
 		klist_remove(&dev->knode_driver);
 
+		if (dev->bus)
+			blocking_notifier_call_chain(&dev->bus->bus_notifier,
+						     BUS_NOTIFY_UNBIND_DRIVER,
+						     dev);
+
 		if (dev->bus && dev->bus->remove)
 			dev->bus->remove(dev);
 		else if (drv->remove)
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index b2efbd4..dbe0735 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -126,7 +126,7 @@
 	} else if (allocation < size)
 		return NULL;
 
-	if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
+	if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL)))
 		return retval;
 
 	strlcpy (retval->name, name, sizeof retval->name);
@@ -297,7 +297,7 @@
 			}
 		}
 	}
-	if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) {
+	if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
 		if (mem_flags & __GFP_WAIT) {
 			DECLARE_WAITQUEUE (wait, current);
 
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 1461569..4bad287 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -21,6 +21,8 @@
 #include <linux/firmware.h>
 #include "base.h"
 
+#define to_dev(obj) container_of(obj, struct device, kobj)
+
 MODULE_AUTHOR("Manuel Estrada Sainz <ranty@debian.org>");
 MODULE_DESCRIPTION("Multi purpose firmware loading support");
 MODULE_LICENSE("GPL");
@@ -86,12 +88,12 @@
 
 static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
 
-static void  fw_class_dev_release(struct class_device *class_dev);
+static void fw_dev_release(struct device *dev);
 
-static int firmware_class_uevent(struct class_device *class_dev, char **envp,
-				 int num_envp, char *buffer, int buffer_size)
+static int firmware_uevent(struct device *dev, char **envp, int num_envp,
+			   char *buffer, int buffer_size)
 {
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
+	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
 	int i = 0, len = 0;
 
 	if (!test_bit(FW_STATUS_READY, &fw_priv->status))
@@ -110,21 +112,21 @@
 
 static struct class firmware_class = {
 	.name		= "firmware",
-	.uevent		= firmware_class_uevent,
-	.release	= fw_class_dev_release,
+	.dev_uevent	= firmware_uevent,
+	.dev_release	= fw_dev_release,
 };
 
-static ssize_t
-firmware_loading_show(struct class_device *class_dev, char *buf)
+static ssize_t firmware_loading_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
 {
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
+	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
 	int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status);
 	return sprintf(buf, "%d\n", loading);
 }
 
 /**
  * firmware_loading_store - set value in the 'loading' control file
- * @class_dev: class_device pointer
+ * @dev: device pointer
  * @buf: buffer to scan for loading control value
  * @count: number of bytes in @buf
  *
@@ -134,11 +136,11 @@
  *	 0: Conclude the load and hand the data to the driver code.
  *	-1: Conclude the load with an error and discard any written data.
  **/
-static ssize_t
-firmware_loading_store(struct class_device *class_dev,
-		       const char *buf, size_t count)
+static ssize_t firmware_loading_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
 {
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
+	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
 	int loading = simple_strtol(buf, NULL, 10);
 
 	switch (loading) {
@@ -174,15 +176,14 @@
 	return count;
 }
 
-static CLASS_DEVICE_ATTR(loading, 0644,
-			firmware_loading_show, firmware_loading_store);
+static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
 
 static ssize_t
 firmware_data_read(struct kobject *kobj,
 		   char *buffer, loff_t offset, size_t count)
 {
-	struct class_device *class_dev = to_class_dev(kobj);
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
+	struct device *dev = to_dev(kobj);
+	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
 	struct firmware *fw;
 	ssize_t ret_count = count;
 
@@ -234,7 +235,7 @@
 
 /**
  * firmware_data_write - write method for firmware
- * @kobj: kobject for the class_device
+ * @kobj: kobject for the device
  * @buffer: buffer being written
  * @offset: buffer offset for write in total data store area
  * @count: buffer size
@@ -246,8 +247,8 @@
 firmware_data_write(struct kobject *kobj,
 		    char *buffer, loff_t offset, size_t count)
 {
-	struct class_device *class_dev = to_class_dev(kobj);
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
+	struct device *dev = to_dev(kobj);
+	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
 	struct firmware *fw;
 	ssize_t retval;
 
@@ -280,13 +281,12 @@
 	.write = firmware_data_write,
 };
 
-static void
-fw_class_dev_release(struct class_device *class_dev)
+static void fw_dev_release(struct device *dev)
 {
-	struct firmware_priv *fw_priv = class_get_devdata(class_dev);
+	struct firmware_priv *fw_priv = dev_get_drvdata(dev);
 
 	kfree(fw_priv);
-	kfree(class_dev);
+	kfree(dev);
 
 	module_put(THIS_MODULE);
 }
@@ -298,26 +298,23 @@
 	fw_load_abort(fw_priv);
 }
 
-static inline void
-fw_setup_class_device_id(struct class_device *class_dev, struct device *dev)
+static inline void fw_setup_device_id(struct device *f_dev, struct device *dev)
 {
 	/* XXX warning we should watch out for name collisions */
-	strlcpy(class_dev->class_id, dev->bus_id, BUS_ID_SIZE);
+	strlcpy(f_dev->bus_id, dev->bus_id, BUS_ID_SIZE);
 }
 
-static int
-fw_register_class_device(struct class_device **class_dev_p,
-			 const char *fw_name, struct device *device)
+static int fw_register_device(struct device **dev_p, const char *fw_name,
+			      struct device *device)
 {
 	int retval;
 	struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
 						GFP_KERNEL);
-	struct class_device *class_dev = kzalloc(sizeof(*class_dev),
-						 GFP_KERNEL);
+	struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL);
 
-	*class_dev_p = NULL;
+	*dev_p = NULL;
 
-	if (!fw_priv || !class_dev) {
+	if (!fw_priv || !f_dev) {
 		printk(KERN_ERR "%s: kmalloc failed\n", __FUNCTION__);
 		retval = -ENOMEM;
 		goto error_kfree;
@@ -331,55 +328,54 @@
 	fw_priv->timeout.data = (u_long) fw_priv;
 	init_timer(&fw_priv->timeout);
 
-	fw_setup_class_device_id(class_dev, device);
-	class_dev->dev = device;
-	class_dev->class = &firmware_class;
-	class_set_devdata(class_dev, fw_priv);
-	retval = class_device_register(class_dev);
+	fw_setup_device_id(f_dev, device);
+	f_dev->parent = device;
+	f_dev->class = &firmware_class;
+	dev_set_drvdata(f_dev, fw_priv);
+	retval = device_register(f_dev);
 	if (retval) {
-		printk(KERN_ERR "%s: class_device_register failed\n",
+		printk(KERN_ERR "%s: device_register failed\n",
 		       __FUNCTION__);
 		goto error_kfree;
 	}
-	*class_dev_p = class_dev;
+	*dev_p = f_dev;
 	return 0;
 
 error_kfree:
 	kfree(fw_priv);
-	kfree(class_dev);
+	kfree(f_dev);
 	return retval;
 }
 
-static int
-fw_setup_class_device(struct firmware *fw, struct class_device **class_dev_p,
-		      const char *fw_name, struct device *device, int uevent)
+static int fw_setup_device(struct firmware *fw, struct device **dev_p,
+			   const char *fw_name, struct device *device,
+			   int uevent)
 {
-	struct class_device *class_dev;
+	struct device *f_dev;
 	struct firmware_priv *fw_priv;
 	int retval;
 
-	*class_dev_p = NULL;
-	retval = fw_register_class_device(&class_dev, fw_name, device);
+	*dev_p = NULL;
+	retval = fw_register_device(&f_dev, fw_name, device);
 	if (retval)
 		goto out;
 
 	/* Need to pin this module until class device is destroyed */
 	__module_get(THIS_MODULE);
 
-	fw_priv = class_get_devdata(class_dev);
+	fw_priv = dev_get_drvdata(f_dev);
 
 	fw_priv->fw = fw;
-	retval = sysfs_create_bin_file(&class_dev->kobj, &fw_priv->attr_data);
+	retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
 	if (retval) {
 		printk(KERN_ERR "%s: sysfs_create_bin_file failed\n",
 		       __FUNCTION__);
 		goto error_unreg;
 	}
 
-	retval = class_device_create_file(class_dev,
-					  &class_device_attr_loading);
+	retval = device_create_file(f_dev, &dev_attr_loading);
 	if (retval) {
-		printk(KERN_ERR "%s: class_device_create_file failed\n",
+		printk(KERN_ERR "%s: device_create_file failed\n",
 		       __FUNCTION__);
 		goto error_unreg;
 	}
@@ -388,11 +384,11 @@
                 set_bit(FW_STATUS_READY, &fw_priv->status);
         else
                 set_bit(FW_STATUS_READY_NOHOTPLUG, &fw_priv->status);
-	*class_dev_p = class_dev;
+	*dev_p = f_dev;
 	goto out;
 
 error_unreg:
-	class_device_unregister(class_dev);
+	device_unregister(f_dev);
 out:
 	return retval;
 }
@@ -401,7 +397,7 @@
 _request_firmware(const struct firmware **firmware_p, const char *name,
 		 struct device *device, int uevent)
 {
-	struct class_device *class_dev;
+	struct device *f_dev;
 	struct firmware_priv *fw_priv;
 	struct firmware *firmware;
 	int retval;
@@ -417,12 +413,11 @@
 		goto out;
 	}
 
-	retval = fw_setup_class_device(firmware, &class_dev, name, device,
-				       uevent);
+	retval = fw_setup_device(firmware, &f_dev, name, device, uevent);
 	if (retval)
 		goto error_kfree_fw;
 
-	fw_priv = class_get_devdata(class_dev);
+	fw_priv = dev_get_drvdata(f_dev);
 
 	if (uevent) {
 		if (loading_timeout > 0) {
@@ -430,7 +425,7 @@
 			add_timer(&fw_priv->timeout);
 		}
 
-		kobject_uevent(&class_dev->kobj, KOBJ_ADD);
+		kobject_uevent(&f_dev->kobj, KOBJ_ADD);
 		wait_for_completion(&fw_priv->completion);
 		set_bit(FW_STATUS_DONE, &fw_priv->status);
 		del_timer_sync(&fw_priv->timeout);
@@ -445,7 +440,7 @@
 	}
 	fw_priv->fw = NULL;
 	mutex_unlock(&fw_lock);
-	class_device_unregister(class_dev);
+	device_unregister(f_dev);
 	goto out;
 
 error_kfree_fw:
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c6b7d9c..74b9679 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -290,9 +290,8 @@
 
 static int block_size_init(void)
 {
-	sysfs_create_file(&memory_sysdev_class.kset.kobj,
-		&class_attr_block_size_bytes.attr);
-	return 0;
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_block_size_bytes.attr);
 }
 
 /*
@@ -323,12 +322,14 @@
 
 static int memory_probe_init(void)
 {
-	sysfs_create_file(&memory_sysdev_class.kset.kobj,
-		&class_attr_probe.attr);
-	return 0;
+	return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+				&class_attr_probe.attr);
 }
 #else
-#define memory_probe_init(...)	do {} while (0)
+static inline int memory_probe_init(void)
+{
+	return 0;
+}
 #endif
 
 /*
@@ -431,9 +432,12 @@
 {
 	unsigned int i;
 	int ret;
+	int err;
 
 	memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
 	ret = sysdev_class_register(&memory_sysdev_class);
+	if (ret)
+		goto out;
 
 	/*
 	 * Create entries for memory sections that were found
@@ -442,11 +446,19 @@
 	for (i = 0; i < NR_MEM_SECTIONS; i++) {
 		if (!valid_section_nr(i))
 			continue;
-		add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0);
+		err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0);
+		if (!ret)
+			ret = err;
 	}
 
-	memory_probe_init();
-	block_size_init();
-
+	err = memory_probe_init();
+	if (!ret)
+		ret = err;
+	err = block_size_init();
+	if (!ret)
+		ret = err;
+out:
+	if (ret)
+		printk(KERN_ERR "%s() failed: %d\n", __FUNCTION__, ret);
 	return ret;
 }
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 940ce41..d1df4a0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -388,6 +388,11 @@
 	return drv->probe(dev);
 }
 
+static int platform_drv_probe_fail(struct device *_dev)
+{
+	return -ENXIO;
+}
+
 static int platform_drv_remove(struct device *_dev)
 {
 	struct platform_driver *drv = to_platform_driver(_dev->driver);
@@ -451,6 +456,49 @@
 }
 EXPORT_SYMBOL_GPL(platform_driver_unregister);
 
+/**
+ * platform_driver_probe - register driver for non-hotpluggable device
+ * @drv: platform driver structure
+ * @probe: the driver probe routine, probably from an __init section
+ *
+ * Use this instead of platform_driver_register() when you know the device
+ * is not hotpluggable and has already been registered, and you want to
+ * remove its run-once probe() infrastructure from memory after the driver
+ * has bound to the device.
+ *
+ * One typical use for this would be with drivers for controllers integrated
+ * into system-on-chip processors, where the controller devices have been
+ * configured as part of board setup.
+ *
+ * Returns zero if the driver registered and bound to a device, else returns
+ * a negative error code and with the driver not registered.
+ */
+int platform_driver_probe(struct platform_driver *drv,
+		int (*probe)(struct platform_device *))
+{
+	int retval, code;
+
+	/* temporary section violation during probe() */
+	drv->probe = probe;
+	retval = code = platform_driver_register(drv);
+
+	/* Fixup that section violation, being paranoid about code scanning
+	 * the list of drivers in order to probe new devices.  Check to see
+	 * if the probe was successful, and make sure any forced probes of
+	 * new devices fail.
+	 */
+	spin_lock(&platform_bus_type.klist_drivers.k_lock);
+	drv->probe = NULL;
+	if (code == 0 && list_empty(&drv->driver.klist_devices.k_list))
+		retval = -ENODEV;
+	drv->driver.probe = platform_drv_probe_fail;
+	spin_unlock(&platform_bus_type.klist_drivers.k_lock);
+
+	if (code != retval)
+		platform_driver_unregister(drv);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(platform_driver_probe);
 
 /* modalias support enables more hands-off userspace setup:
  * (a) environment variable lets new-style hotplug events work once system is
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 28dccb7..067a9e8 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -94,54 +94,61 @@
 	.name = "topology"
 };
 
+static cpumask_t topology_dev_map = CPU_MASK_NONE;
+
 /* Add/Remove cpu_topology interface for CPU device */
-static int __cpuinit topology_add_dev(struct sys_device * sys_dev)
+static int __cpuinit topology_add_dev(unsigned int cpu)
 {
-	return sysfs_create_group(&sys_dev->kobj, &topology_attr_group);
+	int rc;
+	struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+
+	rc = sysfs_create_group(&sys_dev->kobj, &topology_attr_group);
+	if (!rc)
+		cpu_set(cpu, topology_dev_map);
+	return rc;
 }
 
-static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
+static void __cpuinit topology_remove_dev(unsigned int cpu)
 {
+	struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+
+	if (!cpu_isset(cpu, topology_dev_map))
+		return;
+	cpu_clear(cpu, topology_dev_map);
 	sysfs_remove_group(&sys_dev->kobj, &topology_attr_group);
-	return 0;
 }
 
 static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
-		unsigned long action, void *hcpu)
+					   unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct sys_device *sys_dev;
+	int rc = 0;
 
-	sys_dev = get_cpu_sysdev(cpu);
 	switch (action) {
-	case CPU_ONLINE:
-		topology_add_dev(sys_dev);
+	case CPU_UP_PREPARE:
+		rc = topology_add_dev(cpu);
 		break;
+	case CPU_UP_CANCELED:
 	case CPU_DEAD:
-		topology_remove_dev(sys_dev);
+		topology_remove_dev(cpu);
 		break;
 	}
-	return NOTIFY_OK;
+	return rc ? NOTIFY_BAD : NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata topology_cpu_notifier =
-{
-	.notifier_call = topology_cpu_callback,
-};
-
 static int __cpuinit topology_sysfs_init(void)
 {
-	int i;
+	int cpu;
+	int rc;
 
-	for_each_online_cpu(i) {
-		topology_cpu_callback(&topology_cpu_notifier, CPU_ONLINE,
-				(void *)(long)i);
+	for_each_online_cpu(cpu) {
+		rc = topology_add_dev(cpu);
+		if (rc)
+			return rc;
 	}
-
-	register_hotcpu_notifier(&topology_cpu_notifier);
+	hotcpu_notifier(topology_cpu_callback, 0);
 
 	return 0;
 }
 
 device_initcall(topology_sysfs_init);
-
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 742d074..8d81a3a 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -324,13 +324,13 @@
       Command->Next = Controller->FreeCommands;
       Controller->FreeCommands = Command;
       Controller->Commands[CommandIdentifier-1] = Command;
-      ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, SLAB_ATOMIC,
+      ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
 							&ScatterGatherDMA);
       if (ScatterGatherCPU == NULL)
 	  return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
 
       if (RequestSensePool != NULL) {
-  	  RequestSenseCPU = pci_pool_alloc(RequestSensePool, SLAB_ATOMIC,
+  	  RequestSenseCPU = pci_pool_alloc(RequestSensePool, GFP_ATOMIC,
 						&RequestSenseDMA);
   	  if (RequestSenseCPU == NULL) {
                 pci_pool_free(ScatterGatherPool, ScatterGatherCPU,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 17dc222..8507244 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -168,7 +168,8 @@
 
 config CISS_SCSI_TAPE
 	bool "SCSI tape drive support for Smart Array 5xxx"
-	depends on BLK_CPQ_CISS_DA && SCSI && PROC_FS
+	depends on BLK_CPQ_CISS_DA && PROC_FS
+	depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA
 	help
 	  When enabled (Y), this option allows SCSI tape drives and SCSI medium
 	  changers (tape robots) to be accessed via a Compaq 5xxx array 
@@ -305,6 +306,7 @@
 config BLK_DEV_CRYPTOLOOP
 	tristate "Cryptoloop Support"
 	select CRYPTO
+	select CRYPTO_CBC
 	depends on BLK_DEV_LOOP
 	---help---
 	  Say Y here if you want to be able to use the ciphers that are 
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 6d11122..2308e83 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,7 +159,7 @@
 void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
 void aoecmd_ata_rsp(struct sk_buff *);
 void aoecmd_cfg_rsp(struct sk_buff *);
-void aoecmd_sleepwork(void *vp);
+void aoecmd_sleepwork(struct work_struct *);
 struct sk_buff *new_skb(ulong);
 
 int aoedev_init(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index aa25f8b..478489c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -12,7 +12,7 @@
 #include <linux/netdevice.h>
 #include "aoe.h"
 
-static kmem_cache_t *buf_pool_cache;
+static struct kmem_cache *buf_pool_cache;
 
 static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
 {
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8a13b1a..97f7f53 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -408,9 +408,9 @@
 /* this function performs work that has been deferred until sleeping is OK
  */
 void
-aoecmd_sleepwork(void *vp)
+aoecmd_sleepwork(struct work_struct *work)
 {
-	struct aoedev *d = (struct aoedev *) vp;
+	struct aoedev *d = container_of(work, struct aoedev, work);
 
 	if (d->flags & DEVFL_GDALLOC)
 		aoeblk_gdalloc(d);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6125921..05a9719 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -88,7 +88,7 @@
  			kfree(d);
 		return NULL;
 	}
-	INIT_WORK(&d->work, aoecmd_sleepwork, d);
+	INIT_WORK(&d->work, aoecmd_sleepwork);
 	spin_lock_init(&d->lock);
 	init_timer(&d->timer);
 	d->timer.data = (ulong) d;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4105c3b..892e092 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -47,14 +47,15 @@
 #include <linux/completion.h>
 
 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
+#define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
 
 /* Embedded module documentation macros - see modules.h */
 MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
 			" SA6i P600 P800 P400 P400i E200 E200i E500");
+MODULE_VERSION("3.6.14");
 MODULE_LICENSE("GPL");
 
 #include "cciss_cmd.h"
@@ -81,7 +82,9 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3213},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3214},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3215},
-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3233},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3237},
+	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
 };
 
@@ -90,27 +93,29 @@
 /*  board_id = Subsystem Device ID & Vendor ID
  *  product = Marketing Name for the board
  *  access = Address of the struct of function pointers
+ *  nr_cmds = Number of commands supported by controller
  */
 static struct board_type products[] = {
-	{0x40700E11, "Smart Array 5300", &SA5_access},
-	{0x40800E11, "Smart Array 5i", &SA5B_access},
-	{0x40820E11, "Smart Array 532", &SA5B_access},
-	{0x40830E11, "Smart Array 5312", &SA5B_access},
-	{0x409A0E11, "Smart Array 641", &SA5_access},
-	{0x409B0E11, "Smart Array 642", &SA5_access},
-	{0x409C0E11, "Smart Array 6400", &SA5_access},
-	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
-	{0x40910E11, "Smart Array 6i", &SA5_access},
-	{0x3225103C, "Smart Array P600", &SA5_access},
-	{0x3223103C, "Smart Array P800", &SA5_access},
-	{0x3234103C, "Smart Array P400", &SA5_access},
-	{0x3235103C, "Smart Array P400i", &SA5_access},
-	{0x3211103C, "Smart Array E200i", &SA5_access},
-	{0x3212103C, "Smart Array E200", &SA5_access},
-	{0x3213103C, "Smart Array E200i", &SA5_access},
-	{0x3214103C, "Smart Array E200i", &SA5_access},
-	{0x3215103C, "Smart Array E200i", &SA5_access},
-	{0x3233103C, "Smart Array E500", &SA5_access},
+	{0x40700E11, "Smart Array 5300", &SA5_access, 512},
+	{0x40800E11, "Smart Array 5i", &SA5B_access, 512},
+	{0x40820E11, "Smart Array 532", &SA5B_access, 512},
+	{0x40830E11, "Smart Array 5312", &SA5B_access, 512},
+	{0x409A0E11, "Smart Array 641", &SA5_access, 512},
+	{0x409B0E11, "Smart Array 642", &SA5_access, 512},
+	{0x409C0E11, "Smart Array 6400", &SA5_access, 512},
+	{0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
+	{0x40910E11, "Smart Array 6i", &SA5_access, 512},
+	{0x3225103C, "Smart Array P600", &SA5_access, 512},
+	{0x3223103C, "Smart Array P800", &SA5_access, 512},
+	{0x3234103C, "Smart Array P400", &SA5_access, 512},
+	{0x3235103C, "Smart Array P400i", &SA5_access, 512},
+	{0x3211103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3212103C, "Smart Array E200", &SA5_access, 120},
+	{0x3213103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3214103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3215103C, "Smart Array E200i", &SA5_access, 120},
+	{0x3237103C, "Smart Array E500", &SA5_access, 512},
+	{0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
 };
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -121,7 +126,6 @@
 #define MAX_CMD_RETRIES 3
 
 #define READ_AHEAD 	 1024
-#define NR_CMDS		 384	/* #commands that can be outstanding */
 #define MAX_CTLR	32
 
 /* Originally cciss driver only supports 8 major numbers */
@@ -137,7 +141,6 @@
 		       unsigned int cmd, unsigned long arg);
 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
-static int revalidate_allvol(ctlr_info_t *host);
 static int cciss_revalidate(struct gendisk *disk);
 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
@@ -265,6 +268,7 @@
 		       "Firmware Version: %c%c%c%c\n"
 		       "IRQ: %d\n"
 		       "Logical drives: %d\n"
+		       "Max sectors: %d\n"
 		       "Current Q depth: %d\n"
 		       "Current # commands on controller: %d\n"
 		       "Max Q depth since init: %d\n"
@@ -275,7 +279,9 @@
 		       (unsigned long)h->board_id,
 		       h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
 		       h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
-		       h->num_luns, h->Qdepth, h->commands_outstanding,
+		       h->num_luns,
+		       h->cciss_max_sectors,
+		       h->Qdepth, h->commands_outstanding,
 		       h->maxQsinceinit, h->max_outstanding, h->maxSG);
 
 	pos += size;
@@ -400,8 +406,8 @@
 	} else {		/* get it out of the controllers pool */
 
 		do {
-			i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
-			if (i == NR_CMDS)
+			i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+			if (i == h->nr_cmds)
 				return NULL;
 		} while (test_and_set_bit
 			 (i & (BITS_PER_LONG - 1),
@@ -487,7 +493,7 @@
 	 * but I'm already using way to many device nodes to claim another one
 	 * for "raw controller".
 	 */
-	if (drv->nr_blocks == 0) {
+	if (drv->heads == 0) {
 		if (iminor(inode) != 0) {	/* not node 0? */
 			/* if not node 0 make sure it is a partition = 0 */
 			if (iminor(inode) & 0x0f) {
@@ -850,9 +856,7 @@
 		}
 
 	case CCISS_REVALIDVOLS:
-		if (bdev != bdev->bd_contains || drv != host->drv)
-			return -ENXIO;
-		return revalidate_allvol(host);
+		return rebuild_lun_table(host, NULL);
 
 	case CCISS_GETLUNINFO:{
 			LogvolInfo_struct luninfo;
@@ -1152,75 +1156,6 @@
 	}
 }
 
-/*
- * revalidate_allvol is for online array config utilities.  After a
- * utility reconfigures the drives in the array, it can use this function
- * (through an ioctl) to make the driver zap any previous disk structs for
- * that controller and get new ones.
- *
- * Right now I'm using the getgeometry() function to do this, but this
- * function should probably be finer grained and allow you to revalidate one
- * particular logical volume (instead of all of them on a particular
- * controller).
- */
-static int revalidate_allvol(ctlr_info_t *host)
-{
-	int ctlr = host->ctlr, i;
-	unsigned long flags;
-
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-	if (host->usage_count > 1) {
-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-		printk(KERN_WARNING "cciss: Device busy for volume"
-		       " revalidation (usage=%d)\n", host->usage_count);
-		return -EBUSY;
-	}
-	host->usage_count++;
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
-	for (i = 0; i < NWD; i++) {
-		struct gendisk *disk = host->gendisk[i];
-		if (disk) {
-			request_queue_t *q = disk->queue;
-
-			if (disk->flags & GENHD_FL_UP)
-				del_gendisk(disk);
-			if (q)
-				blk_cleanup_queue(q);
-		}
-	}
-
-	/*
-	 * Set the partition and block size structures for all volumes
-	 * on this controller to zero.  We will reread all of this data
-	 */
-	memset(host->drv, 0, sizeof(drive_info_struct)
-	       * CISS_MAX_LUN);
-	/*
-	 * Tell the array controller not to give us any interrupts while
-	 * we check the new geometry.  Then turn interrupts back on when
-	 * we're done.
-	 */
-	host->access.set_intr_mask(host, CCISS_INTR_OFF);
-	cciss_getgeometry(ctlr);
-	host->access.set_intr_mask(host, CCISS_INTR_ON);
-
-	/* Loop through each real device */
-	for (i = 0; i < NWD; i++) {
-		struct gendisk *disk = host->gendisk[i];
-		drive_info_struct *drv = &(host->drv[i]);
-		/* we must register the controller even if no disks exist */
-		/* this is for the online array utilities */
-		if (!drv->heads && i)
-			continue;
-		blk_queue_hardsect_size(drv->queue, drv->block_size);
-		set_capacity(disk, drv->nr_blocks);
-		add_disk(disk);
-	}
-	host->usage_count--;
-	return 0;
-}
-
 static inline void complete_buffers(struct bio *bio, int status)
 {
 	while (bio) {
@@ -1243,7 +1178,7 @@
 	 * in case the interrupt we serviced was from an ioctl and did not
 	 * free any new commands.
 	 */
-	if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
+	if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
 		return;
 
 	/* We have room on the queue for more commands.  Now we need to queue
@@ -1262,7 +1197,7 @@
 		/* check to see if we have maxed out the number of commands
 		 * that can be placed on the queue.
 		 */
-		if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
+		if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
 			if (curr_queue == start_queue) {
 				h->next_to_run =
 				    (start_queue + 1) % (h->highest_lun + 1);
@@ -1380,6 +1315,11 @@
 	/* if it's the controller it's already added */
 	if (drv_index) {
 		disk->queue = blk_init_queue(do_cciss_request, &h->lock);
+		sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
+		disk->major = h->major;
+		disk->first_minor = drv_index << NWD_SHIFT;
+		disk->fops = &cciss_fops;
+		disk->private_data = &h->drv[drv_index];
 
 		/* Set up queue information */
 		disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
@@ -1391,7 +1331,7 @@
 		/* This is a limit in the driver and could be eliminated. */
 		blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
 
-		blk_queue_max_sectors(disk->queue, 512);
+		blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
 
 		blk_queue_softirq_done(disk->queue, cciss_softirq_done);
 
@@ -1458,11 +1398,6 @@
 
 	/* Set busy_configuring flag for this operation */
 	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-	if (h->num_luns >= CISS_MAX_LUN) {
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-		return -EINVAL;
-	}
-
 	if (h->busy_configuring) {
 		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
 		return -EBUSY;
@@ -1495,17 +1430,8 @@
 					      0, 0, TYPE_CMD);
 
 		if (return_code == IO_OK) {
-			listlength |=
-			    (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
-			    << 24;
-			listlength |=
-			    (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
-			    << 16;
-			listlength |=
-			    (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
-			    << 8;
-			listlength |=
-			    0xff & (unsigned int)(ld_buff->LUNListLength[3]);
+			listlength =
+				be32_to_cpu(*(__u32 *) ld_buff->LUNListLength);
 		} else {	/* reading number of logical volumes failed */
 			printk(KERN_WARNING "cciss: report logical volume"
 			       " command failed\n");
@@ -1556,6 +1482,14 @@
 				if (drv_index == -1)
 					goto freeret;
 
+				/*Check if the gendisk needs to be allocated */
+				if (!h->gendisk[drv_index]){
+					h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
+					if (!h->gendisk[drv_index]){
+						printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
+						goto mem_msg;
+					}
+				}
 			}
 			h->drv[drv_index].LunID = lunid;
 			cciss_update_drive_info(ctlr, drv_index);
@@ -1593,6 +1527,7 @@
 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
 			   int clear_all)
 {
+	int i;
 	ctlr_info_t *h = get_host(disk);
 
 	if (!capable(CAP_SYS_RAWIO))
@@ -1616,9 +1551,35 @@
 				del_gendisk(disk);
 			if (q) {
 				blk_cleanup_queue(q);
+				/* Set drv->queue to NULL so that we do not try
+				 * to call blk_start_queue on this queue in the
+				 * interrupt handler
+				 */
 				drv->queue = NULL;
 			}
+			/* If clear_all is set then we are deleting the logical
+			 * drive, not just refreshing its info.  For drives
+			 * other than disk 0 we will call put_disk.  We do not
+			 * do this for disk 0 as we need it to be able to
+			 * configure the controller.
+			*/
+			if (clear_all){
+				/* This isn't pretty, but we need to find the
+				 * disk in our array and NULL our the pointer.
+				 * This is so that we will call alloc_disk if
+				 * this index is used again later.
+				*/
+				for (i=0; i < CISS_MAX_LUN; i++){
+					if(h->gendisk[i] == disk){
+						h->gendisk[i] = NULL;
+						break;
+					}
+				}
+				put_disk(disk);
+			}
 		}
+	} else {
+		set_capacity(disk, 0);
 	}
 
 	--h->num_luns;
@@ -2136,7 +2097,7 @@
 
 	/* We've sent down an abort or reset, but something else
 	   has completed */
-	if (srl->ncompletions >= (NR_CMDS + 2)) {
+	if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
 		/* Uh oh.  No room to save it for later... */
 		printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
 		       "reject list overflow, command lost!\n", ctlr);
@@ -2673,7 +2634,7 @@
 			a1 = a;
 			if ((a & 0x04)) {
 				a2 = (a >> 3);
-				if (a2 >= NR_CMDS) {
+				if (a2 >= h->nr_cmds) {
 					printk(KERN_WARNING
 					       "cciss: controller cciss%d failed, stopping.\n",
 					       h->ctlr);
@@ -2827,23 +2788,21 @@
 		if (err > 0) {
 			printk(KERN_WARNING "cciss: only %d MSI-X vectors "
 			       "available\n", err);
+			goto default_int_mode;
 		} else {
 			printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
 			       err);
+			goto default_int_mode;
 		}
 	}
 	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
 		if (!pci_enable_msi(pdev)) {
-			c->intr[SIMPLE_MODE_INT] = pdev->irq;
 			c->msi_vector = 1;
-			return;
 		} else {
 			printk(KERN_WARNING "cciss: MSI init failed\n");
-			c->intr[SIMPLE_MODE_INT] = pdev->irq;
-			return;
 		}
 	}
-      default_int_mode:
+default_int_mode:
 #endif				/* CONFIG_PCI_MSI */
 	/* if we get here we're going to use the default interrupt mode */
 	c->intr[SIMPLE_MODE_INT] = pdev->irq;
@@ -2956,16 +2915,10 @@
 		if (board_id == products[i].board_id) {
 			c->product_name = products[i].product_name;
 			c->access = *(products[i].access);
+			c->nr_cmds = products[i].nr_cmds;
 			break;
 		}
 	}
-	if (i == ARRAY_SIZE(products)) {
-		printk(KERN_WARNING "cciss: Sorry, I don't know how"
-		       " to access the Smart Array controller %08lx\n",
-		       (unsigned long)board_id);
-		err = -ENODEV;
-		goto err_out_free_res;
-	}
 	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
 	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
 	    (readb(&c->cfgtable->Signature[2]) != 'S') ||
@@ -2974,6 +2927,27 @@
 		err = -ENODEV;
 		goto err_out_free_res;
 	}
+	/* We didn't find the controller in our list. We know the
+	 * signature is valid. If it's an HP device let's try to
+	 * bind to the device and fire it up. Otherwise we bail.
+	 */
+	if (i == ARRAY_SIZE(products)) {
+		if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
+			c->product_name = products[i-1].product_name;
+			c->access = *(products[i-1].access);
+			c->nr_cmds = products[i-1].nr_cmds;
+			printk(KERN_WARNING "cciss: This is an unknown "
+				"Smart Array controller.\n"
+				"cciss: Please update to the latest driver "
+				"available from www.hp.com.\n");
+		} else {
+			printk(KERN_WARNING "cciss: Sorry, I don't know how"
+				" to access the Smart Array controller %08lx\n"
+					, (unsigned long)board_id);
+			err = -ENODEV;
+			goto err_out_free_res;
+		}
+	}
 #ifdef CONFIG_X86
 	{
 		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
@@ -2984,6 +2958,17 @@
 	}
 #endif
 
+	/* Disabling DMA prefetch for the P600
+	 * An ASIC bug may result in a prefetch beyond
+	 * physical memory.
+	 */
+	if(board_id == 0x3225103C) {
+		__u32 dma_prefetch;
+		dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
+		dma_prefetch |= 0x8000;
+		writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
+	}
+
 #ifdef CCISS_DEBUG
 	printk("Trying to put board into Simple mode\n");
 #endif				/* CCISS_DEBUG */
@@ -3158,13 +3143,7 @@
 /* Returns -1 if no free entries are left.  */
 static int alloc_cciss_hba(void)
 {
-	struct gendisk *disk[NWD];
-	int i, n;
-	for (n = 0; n < NWD; n++) {
-		disk[n] = alloc_disk(1 << NWD_SHIFT);
-		if (!disk[n])
-			goto out;
-	}
+	int i;
 
 	for (i = 0; i < MAX_CTLR; i++) {
 		if (!hba[i]) {
@@ -3172,20 +3151,18 @@
 			p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
 			if (!p)
 				goto Enomem;
-			for (n = 0; n < NWD; n++)
-				p->gendisk[n] = disk[n];
+			p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
+			if (!p->gendisk[0])
+				goto Enomem;
 			hba[i] = p;
 			return i;
 		}
 	}
 	printk(KERN_WARNING "cciss: This driver supports a maximum"
 	       " of %d controllers.\n", MAX_CTLR);
-	goto out;
-      Enomem:
+	return -1;
+Enomem:
 	printk(KERN_ERR "cciss: out of memory.\n");
-      out:
-	while (n--)
-		put_disk(disk[n]);
 	return -1;
 }
 
@@ -3195,7 +3172,7 @@
 	int n;
 
 	hba[i] = NULL;
-	for (n = 0; n < NWD; n++)
+	for (n = 0; n < CISS_MAX_LUN; n++)
 		put_disk(p->gendisk[n]);
 	kfree(p);
 }
@@ -3208,9 +3185,8 @@
 static int __devinit cciss_init_one(struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
 {
-	request_queue_t *q;
 	int i;
-	int j;
+	int j = 0;
 	int rc;
 	int dac;
 
@@ -3269,15 +3245,15 @@
 	       hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
 
 	hba[i]->cmd_pool_bits =
-	    kmalloc(((NR_CMDS + BITS_PER_LONG -
+	    kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
 		      1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
 	hba[i]->cmd_pool = (CommandList_struct *)
 	    pci_alloc_consistent(hba[i]->pdev,
-		    NR_CMDS * sizeof(CommandList_struct),
+		    hba[i]->nr_cmds * sizeof(CommandList_struct),
 		    &(hba[i]->cmd_pool_dhandle));
 	hba[i]->errinfo_pool = (ErrorInfo_struct *)
 	    pci_alloc_consistent(hba[i]->pdev,
-		    NR_CMDS * sizeof(ErrorInfo_struct),
+		    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
 		    &(hba[i]->errinfo_pool_dhandle));
 	if ((hba[i]->cmd_pool_bits == NULL)
 	    || (hba[i]->cmd_pool == NULL)
@@ -3288,7 +3264,7 @@
 #ifdef CONFIG_CISS_SCSI_TAPE
 	hba[i]->scsi_rejects.complete =
 	    kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
-		    (NR_CMDS + 5), GFP_KERNEL);
+		    (hba[i]->nr_cmds + 5), GFP_KERNEL);
 	if (hba[i]->scsi_rejects.complete == NULL) {
 		printk(KERN_ERR "cciss: out of memory");
 		goto clean4;
@@ -3302,7 +3278,7 @@
 	/* command and error info recs zeroed out before
 	   they are used */
 	memset(hba[i]->cmd_pool_bits, 0,
-	       ((NR_CMDS + BITS_PER_LONG -
+	       ((hba[i]->nr_cmds + BITS_PER_LONG -
 		 1) / BITS_PER_LONG) * sizeof(unsigned long));
 
 #ifdef CCISS_DEBUG
@@ -3317,18 +3293,34 @@
 	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
 
 	cciss_procinit(i);
+
+	hba[i]->cciss_max_sectors = 2048;
+
 	hba[i]->busy_initializing = 0;
 
-	for (j = 0; j < NWD; j++) {	/* mfm */
+	do {
 		drive_info_struct *drv = &(hba[i]->drv[j]);
 		struct gendisk *disk = hba[i]->gendisk[j];
+		request_queue_t *q;
+
+		/* Check if the disk was allocated already */
+		if (!disk){
+			hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
+			disk = hba[i]->gendisk[j];
+		}
+
+		/* Check that the disk was able to be allocated */
+		if (!disk) {
+			printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
+			goto clean4;
+		}
 
 		q = blk_init_queue(do_cciss_request, &hba[i]->lock);
 		if (!q) {
 			printk(KERN_ERR
 			       "cciss:  unable to allocate queue for disk %d\n",
 			       j);
-			break;
+			goto clean4;
 		}
 		drv->queue = q;
 
@@ -3341,7 +3333,7 @@
 		/* This is a limit in the driver and could be eliminated. */
 		blk_queue_max_phys_segments(q, MAXSGENTRIES);
 
-		blk_queue_max_sectors(q, 512);
+		blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
 
 		blk_queue_softirq_done(q, cciss_softirq_done);
 
@@ -3360,7 +3352,8 @@
 		blk_queue_hardsect_size(q, drv->block_size);
 		set_capacity(disk, drv->nr_blocks);
 		add_disk(disk);
-	}
+		j++;
+	} while (j <= hba[i]->highest_lun);
 
 	return 1;
 
@@ -3371,11 +3364,11 @@
 	kfree(hba[i]->cmd_pool_bits);
 	if (hba[i]->cmd_pool)
 		pci_free_consistent(hba[i]->pdev,
-				    NR_CMDS * sizeof(CommandList_struct),
+				    hba[i]->nr_cmds * sizeof(CommandList_struct),
 				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
 	if (hba[i]->errinfo_pool)
 		pci_free_consistent(hba[i]->pdev,
-				    NR_CMDS * sizeof(ErrorInfo_struct),
+				    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
 				    hba[i]->errinfo_pool,
 				    hba[i]->errinfo_pool_dhandle);
 	free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
@@ -3383,6 +3376,15 @@
 	unregister_blkdev(hba[i]->major, hba[i]->devname);
       clean1:
 	hba[i]->busy_initializing = 0;
+	/* cleanup any queues that may have been initialized */
+	for (j=0; j <= hba[i]->highest_lun; j++){
+		drive_info_struct *drv = &(hba[i]->drv[j]);
+		if (drv->queue)
+			blk_cleanup_queue(drv->queue);
+	}
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
 	free_hba(i);
 	return -1;
 }
@@ -3430,7 +3432,7 @@
 	remove_proc_entry(hba[i]->devname, proc_cciss);
 
 	/* remove it from the disk list */
-	for (j = 0; j < NWD; j++) {
+	for (j = 0; j < CISS_MAX_LUN; j++) {
 		struct gendisk *disk = hba[i]->gendisk[j];
 		if (disk) {
 			request_queue_t *q = disk->queue;
@@ -3442,9 +3444,9 @@
 		}
 	}
 
-	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
+	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
 			    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
+	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
 			    hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
 	kfree(hba[i]->cmd_pool_bits);
 #ifdef CONFIG_CISS_SCSI_TAPE
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 562235c..b70988d 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -6,7 +6,6 @@
 #include "cciss_cmd.h"
 
 
-#define NWD		16
 #define NWD_SHIFT	4
 #define MAX_PART	(1 << NWD_SHIFT)
 
@@ -60,6 +59,7 @@
 	__u32	board_id;
 	void __iomem *vaddr;
 	unsigned long paddr;
+	int 	nr_cmds; /* Number of commands allowed on this controller */
 	CfgTable_struct __iomem *cfgtable;
 	int	interrupts_enabled;
 	int	major;
@@ -76,6 +76,7 @@
 	unsigned int intr[4];
 	unsigned int msix_vector;
 	unsigned int msi_vector;
+	int 	cciss_max_sectors;
 	BYTE	cciss_read;
 	BYTE	cciss_write;
 	BYTE	cciss_read_capacity;
@@ -110,7 +111,7 @@
 	int			next_to_run;
 
 	// Disk structures we need to pass back
-	struct gendisk   *gendisk[NWD];
+	struct gendisk   *gendisk[CISS_MAX_LUN];
 #ifdef CONFIG_CISS_SCSI_TAPE
 	void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
 	/* list of block side commands the scsi error handling sucked up */
@@ -282,6 +283,7 @@
 	__u32	board_id;
 	char	*product_name;
 	struct access_method *access;
+	int nr_cmds; /* Max cmds this kind of ctlr can handle. */
 };
 
 #define CCISS_LOCK(i)	(&hba[i]->lock)
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 4af7c4c..43bf559 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -55,6 +55,7 @@
 #define I2O_INT_MASK            0x34
 #define I2O_IBPOST_Q            0x40
 #define I2O_OBPOST_Q            0x44
+#define I2O_DMA1_CFG		0x214
 
 //Configuration Table
 #define CFGTBL_ChangeReq        0x00000001l
@@ -88,7 +89,7 @@
 //###########################################################################
 //STRUCTURES
 //###########################################################################
-#define CISS_MAX_LUN	16	
+#define CISS_MAX_LUN	1024
 #define CISS_MAX_PHYS_LUN	1024
 // SCSI-3 Cmmands 
 
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e6d3a8..3f1b382 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -992,11 +992,11 @@
 {
 }
 
-static DECLARE_WORK(floppy_work, NULL, NULL);
+static DECLARE_WORK(floppy_work, NULL);
 
 static void schedule_bh(void (*handler) (void))
 {
-	PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
+	PREPARE_WORK(&floppy_work, (work_func_t)handler);
 	schedule_work(&floppy_work);
 }
 
@@ -1008,7 +1008,7 @@
 
 	spin_lock_irqsave(&floppy_lock, flags);
 	do_floppy = NULL;
-	PREPARE_WORK(&floppy_work, (void *)empty, NULL);
+	PREPARE_WORK(&floppy_work, (work_func_t)empty);
 	del_timer(&fd_timer);
 	spin_unlock_irqrestore(&floppy_lock, flags);
 }
@@ -1868,7 +1868,7 @@
 	printk("fdc_busy=%lu\n", fdc_busy);
 	if (do_floppy)
 		printk("do_floppy=%p\n", do_floppy);
-	if (floppy_work.pending)
+	if (work_pending(&floppy_work))
 		printk("floppy_work.func=%p\n", floppy_work.func);
 	if (timer_pending(&fd_timer))
 		printk("fd_timer.function=%p\n", fd_timer.function);
@@ -4498,7 +4498,7 @@
 		printk("floppy timer still active:%s\n", timeout_message);
 	if (timer_pending(&fd_timer))
 		printk("auxiliary floppy timer still active\n");
-	if (floppy_work.pending)
+	if (work_pending(&floppy_work))
 		printk("work still pending\n");
 #endif
 	old_fdc = fdc;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9d1035e..7bf2cfb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -355,14 +355,30 @@
 	return NULL;
 }
 
+static ssize_t pid_show(struct gendisk *disk, char *page)
+{
+	return sprintf(page, "%ld\n",
+		(long) ((struct nbd_device *)disk->private_data)->pid);
+}
+
+static struct disk_attribute pid_attr = {
+	.attr = { .name = "pid", .mode = S_IRUGO },
+	.show = pid_show,
+};
+
 static void nbd_do_it(struct nbd_device *lo)
 {
 	struct request *req;
 
 	BUG_ON(lo->magic != LO_MAGIC);
 
+	lo->pid = current->pid;
+	sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
+
 	while ((req = nbd_read_stat(lo)) != NULL)
 		nbd_end_request(req);
+
+	sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
 	return;
 }
 
diff --git a/drivers/block/paride/aten.c b/drivers/block/paride/aten.c
index c4d696d..2695465 100644
--- a/drivers/block/paride/aten.c
+++ b/drivers/block/paride/aten.c
@@ -149,12 +149,12 @@
 
 static int __init aten_init(void)
 {
-	return pi_register(&aten)-1;
+	return paride_register(&aten);
 }
 
 static void __exit aten_exit(void)
 {
-	pi_unregister( &aten );
+	paride_unregister( &aten );
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
index d462ff6..4f27e73 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/block/paride/bpck.c
@@ -464,12 +464,12 @@
 
 static int __init bpck_init(void)
 {
-	return pi_register(&bpck)-1;
+	return paride_register(&bpck);
 }
 
 static void __exit bpck_exit(void)
 {
-	pi_unregister(&bpck);
+	paride_unregister(&bpck);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
index 41a237c..ad12452 100644
--- a/drivers/block/paride/bpck6.c
+++ b/drivers/block/paride/bpck6.c
@@ -31,10 +31,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <asm/io.h>
-
-#if defined(CONFIG_PARPORT_MODULE)||defined(CONFIG_PARPORT)
 #include <linux/parport.h>
-#endif
 
 #include "ppc6lnx.c"
 #include "paride.h"
@@ -139,11 +136,6 @@
 	PPCSTRUCT(pi)->ppc_id=pi->unit;
 	PPCSTRUCT(pi)->lpt_addr=pi->port;
 
-#ifdef CONFIG_PARPORT_PC_MODULE
-#define CONFIG_PARPORT_PC
-#endif
-
-#ifdef CONFIG_PARPORT_PC
 	/* look at the parport device to see if what modes we can use */
 	if(((struct pardevice *)(pi->pardev))->port->modes & 
 		(PARPORT_MODE_EPP)
@@ -161,11 +153,6 @@
 	{
 		return 1;
 	}
-#else
-	/* there is no way of knowing what kind of port we have
-	   default to the highest mode possible */
-	return 5;
-#endif
 }
 
 static int bpck6_probe_unit ( PIA *pi )
@@ -265,12 +252,12 @@
 	printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n");
 	if(verbose)
 		printk(KERN_DEBUG "bpck6: verbose debug enabled.\n");
-	return pi_register(&bpck6) - 1;  
+	return paride_register(&bpck6);
 }
 
 static void __exit bpck6_exit(void)
 {
-	pi_unregister(&bpck6);
+	paride_unregister(&bpck6);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/comm.c b/drivers/block/paride/comm.c
index 43d6135..9bcd354 100644
--- a/drivers/block/paride/comm.c
+++ b/drivers/block/paride/comm.c
@@ -205,12 +205,12 @@
 
 static int __init comm_init(void)
 {
-	return pi_register(&comm)-1;
+	return paride_register(&comm);
 }
 
 static void __exit comm_exit(void)
 {
-	pi_unregister(&comm);
+	paride_unregister(&comm);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/dstr.c b/drivers/block/paride/dstr.c
index 04d53bf..accc5c7 100644
--- a/drivers/block/paride/dstr.c
+++ b/drivers/block/paride/dstr.c
@@ -220,12 +220,12 @@
 
 static int __init dstr_init(void)
 {
-	return pi_register(&dstr)-1;
+	return paride_register(&dstr);
 }
 
 static void __exit dstr_exit(void)
 {
-	pi_unregister(&dstr);
+	paride_unregister(&dstr);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/epat.c b/drivers/block/paride/epat.c
index 55d1c0a..1bcdff7 100644
--- a/drivers/block/paride/epat.c
+++ b/drivers/block/paride/epat.c
@@ -327,12 +327,12 @@
 #ifdef CONFIG_PARIDE_EPATC8
 	epatc8 = 1;
 #endif
-	return pi_register(&epat)-1;
+	return paride_register(&epat);
 }
 
 static void __exit epat_exit(void)
 {
-	pi_unregister(&epat);
+	paride_unregister(&epat);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/epia.c b/drivers/block/paride/epia.c
index 0f2e0c2..fb0e782 100644
--- a/drivers/block/paride/epia.c
+++ b/drivers/block/paride/epia.c
@@ -303,12 +303,12 @@
 
 static int __init epia_init(void)
 {
-	return pi_register(&epia)-1;
+	return paride_register(&epia);
 }
 
 static void __exit epia_exit(void)
 {
-	pi_unregister(&epia);
+	paride_unregister(&epia);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/fit2.c b/drivers/block/paride/fit2.c
index e0f0691..3812837 100644
--- a/drivers/block/paride/fit2.c
+++ b/drivers/block/paride/fit2.c
@@ -138,12 +138,12 @@
 
 static int __init fit2_init(void)
 {
-	return pi_register(&fit2)-1;
+	return paride_register(&fit2);
 }
 
 static void __exit fit2_exit(void)
 {
-	pi_unregister(&fit2);
+	paride_unregister(&fit2);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/fit3.c b/drivers/block/paride/fit3.c
index 15400e7..275d269 100644
--- a/drivers/block/paride/fit3.c
+++ b/drivers/block/paride/fit3.c
@@ -198,12 +198,12 @@
 
 static int __init fit3_init(void)
 {
-	return pi_register(&fit3)-1;
+	return paride_register(&fit3);
 }
 
 static void __exit fit3_exit(void)
 {
-	pi_unregister(&fit3);
+	paride_unregister(&fit3);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/friq.c b/drivers/block/paride/friq.c
index 5ea2904..4f2ba24 100644
--- a/drivers/block/paride/friq.c
+++ b/drivers/block/paride/friq.c
@@ -263,12 +263,12 @@
 
 static int __init friq_init(void)
 {
-	return pi_register(&friq)-1;
+	return paride_register(&friq);
 }
 
 static void __exit friq_exit(void)
 {
-	pi_unregister(&friq);
+	paride_unregister(&friq);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/frpw.c b/drivers/block/paride/frpw.c
index 56b3824..c3cde36 100644
--- a/drivers/block/paride/frpw.c
+++ b/drivers/block/paride/frpw.c
@@ -300,12 +300,12 @@
 
 static int __init frpw_init(void)
 {
-	return pi_register(&frpw)-1;
+	return paride_register(&frpw);
 }
 
 static void __exit frpw_exit(void)
 {
-	pi_unregister(&frpw);
+	paride_unregister(&frpw);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/jumbo b/drivers/block/paride/jumbo
deleted file mode 100644
index e793b9c..0000000
--- a/drivers/block/paride/jumbo
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/sh
-#
-# This script can be used to build "jumbo" modules that contain the
-# base PARIDE support, one protocol module and one high-level driver.
-#
-echo -n "High level driver [pcd] : "
-read X
-HLD=${X:-pcd}
-#
-echo -n "Protocol module [bpck] : "
-read X
-PROTO=${X:-bpck}
-#
-echo -n "Use MODVERSIONS [y] ? "
-read X
-UMODV=${X:-y}
-#
-echo -n "For SMP kernel [n] ? "
-read X
-USMP=${X:-n}
-#
-echo -n "Support PARPORT [n] ? "
-read X
-UPARP=${X:-n}
-#
-echo
-#
-case $USMP in
-	y* | Y* ) FSMP="-DCONFIG_SMP"
-		  ;;
-	*)	  FSMP=""
-		  ;;
-esac
-#
-MODI="-include ../../../include/linux/modversions.h"
-#
-case $UMODV in
-	y* | Y* ) FMODV="-DMODVERSIONS $MODI"
-		  ;;
-	*)	  FMODV=""
-		  ;;
-esac
-#
-case $UPARP in
-	y* | Y* ) FPARP="-DCONFIG_PARPORT"
-		  ;;
-	*)	  FPARP=""
-		  ;;
-esac
-#
-TARG=$HLD-$PROTO.o
-FPROTO=-DCONFIG_PARIDE_`echo "$PROTO" | tr [a-z] [A-Z]`
-FK="-D__KERNEL__ -I ../../../include"
-FLCH=-D_LINUX_CONFIG_H
-#
-echo cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
-cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
-#
-echo cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
-cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
-#
-echo cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
-cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
-#
-echo ld -r -o $TARG Jp.o Jb.o Jd.o
-ld -r -o $TARG Jp.o Jb.o Jd.o
-#
-#
-rm Jp.o Jb.o Jd.o
-#
diff --git a/drivers/block/paride/kbic.c b/drivers/block/paride/kbic.c
index d983bce..35999c4 100644
--- a/drivers/block/paride/kbic.c
+++ b/drivers/block/paride/kbic.c
@@ -283,13 +283,21 @@
 
 static int __init kbic_init(void)
 {
-	return (pi_register(&k951)||pi_register(&k971))-1;
+	int rv;
+
+	rv = paride_register(&k951);
+	if (rv < 0)
+		return rv;
+	rv = paride_register(&k971);
+	if (rv < 0)
+		paride_unregister(&k951);
+	return rv;
 }
 
 static void __exit kbic_exit(void)
 {
-	pi_unregister(&k951);
-	pi_unregister(&k971);
+	paride_unregister(&k951);
+	paride_unregister(&k971);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/ktti.c b/drivers/block/paride/ktti.c
index 6c7edbf..117ab0e 100644
--- a/drivers/block/paride/ktti.c
+++ b/drivers/block/paride/ktti.c
@@ -115,12 +115,12 @@
 
 static int __init ktti_init(void)
 {
-	return pi_register(&ktti)-1;
+	return paride_register(&ktti);
 }
 
 static void __exit ktti_exit(void)
 {
-	pi_unregister(&ktti);
+	paride_unregister(&ktti);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/on20.c b/drivers/block/paride/on20.c
index 9f8e010..0173697 100644
--- a/drivers/block/paride/on20.c
+++ b/drivers/block/paride/on20.c
@@ -140,12 +140,12 @@
 
 static int __init on20_init(void)
 {
-	return pi_register(&on20)-1;
+	return paride_register(&on20);
 }
 
 static void __exit on20_exit(void)
 {
-	pi_unregister(&on20);
+	paride_unregister(&on20);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/on26.c b/drivers/block/paride/on26.c
index 0f833ca..95ba256 100644
--- a/drivers/block/paride/on26.c
+++ b/drivers/block/paride/on26.c
@@ -306,12 +306,12 @@
 
 static int __init on26_init(void)
 {
-	return pi_register(&on26)-1;
+	return paride_register(&on26);
 }
 
 static void __exit on26_exit(void)
 {
-	pi_unregister(&on26);
+	paride_unregister(&on26);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
index 4b258f7..48c50f1 100644
--- a/drivers/block/paride/paride.c
+++ b/drivers/block/paride/paride.c
@@ -29,14 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/sched.h>	/* TASK_* */
-
-#ifdef CONFIG_PARPORT_MODULE
-#define CONFIG_PARPORT
-#endif
-
-#ifdef CONFIG_PARPORT
 #include <linux/parport.h>
-#endif
 
 #include "paride.h"
 
@@ -76,8 +69,6 @@
 
 EXPORT_SYMBOL(pi_read_block);
 
-#ifdef CONFIG_PARPORT
-
 static void pi_wake_up(void *p)
 {
 	PIA *pi = (PIA *) p;
@@ -100,11 +91,8 @@
 		cont();
 }
 
-#endif
-
 int pi_schedule_claimed(PIA * pi, void (*cont) (void))
 {
-#ifdef CONFIG_PARPORT
 	unsigned long flags;
 
 	spin_lock_irqsave(&pi_spinlock, flags);
@@ -115,7 +103,6 @@
 	}
 	pi->claimed = 1;
 	spin_unlock_irqrestore(&pi_spinlock, flags);
-#endif
 	return 1;
 }
 EXPORT_SYMBOL(pi_schedule_claimed);
@@ -133,20 +120,16 @@
 	if (pi->claimed)
 		return;
 	pi->claimed = 1;
-#ifdef CONFIG_PARPORT
 	if (pi->pardev)
 		wait_event(pi->parq,
 			   !parport_claim((struct pardevice *) pi->pardev));
-#endif
 }
 
 static void pi_unclaim(PIA * pi)
 {
 	pi->claimed = 0;
-#ifdef CONFIG_PARPORT
 	if (pi->pardev)
 		parport_release((struct pardevice *) (pi->pardev));
-#endif
 }
 
 void pi_connect(PIA * pi)
@@ -167,21 +150,15 @@
 
 static void pi_unregister_parport(PIA * pi)
 {
-#ifdef CONFIG_PARPORT
 	if (pi->pardev) {
 		parport_unregister_device((struct pardevice *) (pi->pardev));
 		pi->pardev = NULL;
 	}
-#endif
 }
 
 void pi_release(PIA * pi)
 {
 	pi_unregister_parport(pi);
-#ifndef CONFIG_PARPORT
-	if (pi->reserved)
-		release_region(pi->port, pi->reserved);
-#endif				/* !CONFIG_PARPORT */
 	if (pi->proto->release_proto)
 		pi->proto->release_proto(pi);
 	module_put(pi->proto->owner);
@@ -229,7 +206,7 @@
 	return res;
 }
 
-int pi_register(PIP * pr)
+int paride_register(PIP * pr)
 {
 	int k;
 
@@ -237,24 +214,24 @@
 		if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) {
 			printk("paride: %s protocol already registered\n",
 			       pr->name);
-			return 0;
+			return -1;
 		}
 	k = 0;
 	while ((k < MAX_PROTOS) && (protocols[k]))
 		k++;
 	if (k == MAX_PROTOS) {
 		printk("paride: protocol table full\n");
-		return 0;
+		return -1;
 	}
 	protocols[k] = pr;
 	pr->index = k;
 	printk("paride: %s registered as protocol %d\n", pr->name, k);
-	return 1;
+	return 0;
 }
 
-EXPORT_SYMBOL(pi_register);
+EXPORT_SYMBOL(paride_register);
 
-void pi_unregister(PIP * pr)
+void paride_unregister(PIP * pr)
 {
 	if (!pr)
 		return;
@@ -265,12 +242,10 @@
 	protocols[pr->index] = NULL;
 }
 
-EXPORT_SYMBOL(pi_unregister);
+EXPORT_SYMBOL(paride_unregister);
 
 static int pi_register_parport(PIA * pi, int verbose)
 {
-#ifdef CONFIG_PARPORT
-
 	struct parport *port;
 
 	port = parport_find_base(pi->port);
@@ -290,7 +265,6 @@
 		printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name);
 
 	pi->parname = (char *) port->name;
-#endif
 
 	return 1;
 }
@@ -447,13 +421,6 @@
 			printk("%s: Adapter not found\n", device);
 		return 0;
 	}
-#ifndef CONFIG_PARPORT
-	if (!request_region(pi->port, pi->reserved, pi->device)) {
-		printk(KERN_WARNING "paride: Unable to request region 0x%x\n",
-		       pi->port);
-		return 0;
-	}
-#endif				/* !CONFIG_PARPORT */
 
 	if (pi->parname)
 		printk("%s: Sharing %s at 0x%x\n", pi->device,
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
index c6d98ef..2bddbf4 100644
--- a/drivers/block/paride/paride.h
+++ b/drivers/block/paride/paride.h
@@ -163,8 +163,8 @@
 
 typedef struct pi_protocol PIP;
 
-extern int pi_register( PIP * );
-extern void pi_unregister ( PIP * );
+extern int paride_register( PIP * );
+extern void paride_unregister ( PIP * );
 
 #endif /* __DRIVERS_PARIDE_H__ */
 /* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index ac5ba46..c852eed 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -912,12 +912,12 @@
 	int unit;
 
 	if (disable)
-		return -1;
+		return -EINVAL;
 
 	pcd_init_units();
 
 	if (pcd_detect())
-		return -1;
+		return -ENODEV;
 
 	/* get the atapi capabilities page */
 	pcd_probe_capabilities();
@@ -925,7 +925,7 @@
 	if (register_blkdev(major, name)) {
 		for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
 			put_disk(cd->disk);
-		return -1;
+		return -EBUSY;
 	}
 
 	pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock);
@@ -933,7 +933,7 @@
 		unregister_blkdev(major, name);
 		for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
 			put_disk(cd->disk);
-		return -1;
+		return -ENOMEM;
 	}
 
 	for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 40a11e5..9d9bff2 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -352,19 +352,19 @@
 
 static void run_fsm(void);
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
-static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
 
 static void schedule_fsm(void)
 {
 	if (!nice)
-		schedule_work(&fsm_tq);
+		schedule_delayed_work(&fsm_tq, 0);
 	else
 		schedule_delayed_work(&fsm_tq, nice-1);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
 	run_fsm();
 }
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 1a9dee1..7cdaa19 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -933,25 +933,25 @@
 	int unit;
 
 	if (disable)
-		return -1;
+		return -EINVAL;
 
 	pf_init_units();
 
 	if (pf_detect())
-		return -1;
+		return -ENODEV;
 	pf_busy = 0;
 
 	if (register_blkdev(major, name)) {
 		for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
 			put_disk(pf->disk);
-		return -1;
+		return -EBUSY;
 	}
 	pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock);
 	if (!pf_queue) {
 		unregister_blkdev(major, name);
 		for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
 			put_disk(pf->disk);
-		return -1;
+		return -ENOMEM;
 	}
 
 	blk_queue_max_phys_segments(pf_queue, cluster);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 13f998a..9970aed 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -646,14 +646,14 @@
 	int err;
 
 	if (disable){
-		err = -1;
+		err = -EINVAL;
 		goto out;
 	}
 
 	pg_init_units();
 
 	if (pg_detect()) {
-		err = -1;
+		err = -ENODEV;
 		goto out;
 	}
 
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
index 932342d..bc37032 100644
--- a/drivers/block/paride/pseudo.h
+++ b/drivers/block/paride/pseudo.h
@@ -35,7 +35,7 @@
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
 static void (* ps_continuation)(void);
 static int (* ps_ready)(void);
@@ -45,7 +45,7 @@
 
 static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
 
-static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
 
 static void ps_set_intr(void (*continuation)(void), 
 			int (*ready)(void),
@@ -63,14 +63,14 @@
 	if (!ps_tq_active) {
 		ps_tq_active = 1;
 		if (!ps_nice)
-			schedule_work(&ps_tq);
+			schedule_delayed_work(&ps_tq, 0);
 		else
 			schedule_delayed_work(&ps_tq, ps_nice-1);
 	}
 	spin_unlock_irqrestore(&ps_spinlock,flags);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
 	void (*con)(void);
 	unsigned long flags;
@@ -92,7 +92,7 @@
 	}
 	ps_tq_active = 1;
 	if (!ps_nice)
-		schedule_work(&ps_tq);
+		schedule_delayed_work(&ps_tq, 0);
 	else
 		schedule_delayed_work(&ps_tq, ps_nice-1);
 	spin_unlock_irqrestore(&ps_spinlock,flags);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 35fb266..c902b25 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -946,12 +946,12 @@
 	int err;
 
 	if (disable) {
-		err = -1;
+		err = -EINVAL;
 		goto out;
 	}
 
 	if (pt_detect()) {
-		err = -1;
+		err = -ENODEV;
 		goto out;
 	}
 
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f2904f6..e45eaa2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -54,7 +54,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/miscdevice.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/mutex.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_ioctl.h>
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 47d6975..54509eb 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1244,9 +1244,10 @@
 	return IRQ_RETVAL(handled);
 }
 
-static void carm_fsm_task (void *_data)
+static void carm_fsm_task (struct work_struct *work)
 {
-	struct carm_host *host = _data;
+	struct carm_host *host =
+		container_of(work, struct carm_host, fsm_task);
 	unsigned long flags;
 	unsigned int state;
 	int rc, i, next_dev;
@@ -1619,7 +1620,7 @@
 	host->pdev = pdev;
 	host->flags = pci_dac ? FL_DAC : 0;
 	spin_lock_init(&host->lock);
-	INIT_WORK(&host->fsm_task, carm_fsm_task, host);
+	INIT_WORK(&host->fsm_task, carm_fsm_task);
 	init_completion(&host->probe_comp);
 
 	for (i = 0; i < ARRAY_SIZE(host->req); i++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0d5c73f..2098eff 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -376,7 +376,7 @@
     int stalled_pipe);
 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
 static void ub_reset_enter(struct ub_dev *sc, int try);
-static void ub_reset_task(void *arg);
+static void ub_reset_task(struct work_struct *work);
 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
     struct ub_capacity *ret);
@@ -1558,9 +1558,9 @@
 	schedule_work(&sc->reset_work);
 }
 
-static void ub_reset_task(void *arg)
+static void ub_reset_task(struct work_struct *work)
 {
-	struct ub_dev *sc = arg;
+	struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
 	unsigned long flags;
 	struct list_head *p;
 	struct ub_lun *lun;
@@ -2179,7 +2179,7 @@
 	usb_init_urb(&sc->work_urb);
 	tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
 	atomic_set(&sc->poison, 0);
-	INIT_WORK(&sc->reset_work, ub_reset_task, sc);
+	INIT_WORK(&sc->reset_work, ub_reset_task);
 	init_waitqueue_head(&sc->reset_wait);
 
 	init_timer(&sc->work_timer);
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ec5a1b9..e19ba4e 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -759,6 +759,8 @@
 	}
 };
 
+static int need_delete_probe;
+
 /*
  * Initialize the whole device driver.  Handle module and non-module
  * versions
@@ -773,46 +775,67 @@
 
 	if (viopath_hostLp == HvLpIndexInvalid) {
 		printk(VIOD_KERN_WARNING "invalid hosting partition\n");
-		return -EIO;
+		rc = -EIO;
+		goto early_fail;
 	}
 
 	printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
 			viopath_hostLp);
 
         /* register the block device */
-	if (register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME)) {
+	rc =  register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
+	if (rc) {
 		printk(VIOD_KERN_WARNING
 				"Unable to get major number %d for %s\n",
 				VIODASD_MAJOR, VIOD_GENHD_NAME);
-		return -EIO;
+		goto early_fail;
 	}
 	/* Actually open the path to the hosting partition */
-	if (viopath_open(viopath_hostLp, viomajorsubtype_blockio,
-				VIOMAXREQ + 2)) {
+	rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
+				VIOMAXREQ + 2);
+	if (rc) {
 		printk(VIOD_KERN_WARNING
 		       "error opening path to host partition %d\n",
 		       viopath_hostLp);
-		unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
-		return -EIO;
+		goto unregister_blk;
 	}
 
 	/* Initialize our request handler */
 	vio_setHandler(viomajorsubtype_blockio, handle_block_event);
 
 	rc = vio_register_driver(&viodasd_driver);
-	if (rc == 0)
-		driver_create_file(&viodasd_driver.driver, &driver_attr_probe);
+	if (rc) {
+		printk(VIOD_KERN_WARNING "vio_register_driver failed\n");
+		goto unset_handler;
+	}
+
+	/*
+	 * If this call fails, it just means that we cannot dynamically
+	 * add virtual disks, but the driver will still work fine for
+	 * all existing disk, so ignore the failure.
+	 */
+	if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
+		need_delete_probe = 1;
+
+	return 0;
+
+unset_handler:
+	vio_clearHandler(viomajorsubtype_blockio);
+	viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
+unregister_blk:
+	unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
+early_fail:
 	return rc;
 }
 module_init(viodasd_init);
 
-void viodasd_exit(void)
+void __exit viodasd_exit(void)
 {
-	driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
+	if (need_delete_probe)
+		driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
 	vio_unregister_driver(&viodasd_driver);
 	vio_clearHandler(viomajorsubtype_blockio);
-	unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
 	viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
+	unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
 }
-
 module_exit(viodasd_exit);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 5167517..9256985 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -157,9 +157,10 @@
 	}
 }
 
-static void bcm203x_work(void *user_data)
+static void bcm203x_work(struct work_struct *work)
 {
-	struct bcm203x_data *data = user_data;
+	struct bcm203x_data *data =
+		container_of(work, struct bcm203x_data, work);
 
 	if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
 		BT_ERR("Can't submit URB");
@@ -246,7 +247,7 @@
 
 	release_firmware(firmware);
 
-	INIT_WORK(&data->work, bcm203x_work, (void *) data);
+	INIT_WORK(&data->work, bcm203x_work);
 
 	usb_set_intfdata(intf, data);
 
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index cbc0725..acfb6a4 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -892,43 +892,10 @@
 }
 
 
-static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
-{
-	int i;
-
-	i = pcmcia_get_first_tuple(handle, tuple);
-	if (i != CS_SUCCESS)
-		return CS_NO_MORE_ITEMS;
-
-	i = pcmcia_get_tuple_data(handle, tuple);
-	if (i != CS_SUCCESS)
-		return i;
-
-	return pcmcia_parse_tuple(handle, tuple, parse);
-}
-
 static int bluecard_config(struct pcmcia_device *link)
 {
 	bluecard_info_t *info = link->priv;
-	tuple_t tuple;
-	u_short buf[256];
-	cisparse_t parse;
-	int i, n, last_ret, last_fn;
-
-	tuple.TupleData = (cisdata_t *)buf;
-	tuple.TupleOffset = 0;
-	tuple.TupleDataMax = 255;
-	tuple.Attributes = 0;
-
-	/* Get configuration register information */
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	last_ret = first_tuple(link, &tuple, &parse);
-	if (last_ret != CS_SUCCESS) {
-		last_fn = ParseTuple;
-		goto cs_failed;
-	}
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
+	int i, n;
 
 	link->conf.ConfigIndex = 0x20;
 	link->io.NumPorts1 = 64;
@@ -966,9 +933,6 @@
 
 	return 0;
 
-cs_failed:
-	cs_error(link, last_fn, last_ret);
-
 failed:
 	bluecard_release(link);
 	return -ENODEV;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 3a96a0b..aae3aba 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -713,22 +713,7 @@
 	u_short buf[256];
 	cisparse_t parse;
 	cistpl_cftable_entry_t *cf = &parse.cftable_entry;
-	int i, j, try, last_ret, last_fn;
-
-	tuple.TupleData = (cisdata_t *)buf;
-	tuple.TupleOffset = 0;
-	tuple.TupleDataMax = 255;
-	tuple.Attributes = 0;
-
-	/* Get configuration register information */
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	last_ret = first_tuple(link, &tuple, &parse);
-	if (last_ret != CS_SUCCESS) {
-		last_fn = ParseTuple;
-		goto cs_failed;
-	}
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
+	int i, j, try;
 
 	/* First pass: look for a config entry that looks normal. */
 	tuple.TupleData = (cisdata_t *)buf;
@@ -802,9 +787,6 @@
 
 	return 0;
 
-cs_failed:
-	cs_error(link, last_fn, last_ret);
-
 failed:
 	bt3c_release(link);
 	return -ENODEV;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 3b29086..92648ef 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -644,22 +644,7 @@
 	u_short buf[256];
 	cisparse_t parse;
 	cistpl_cftable_entry_t *cf = &parse.cftable_entry;
-	int i, j, try, last_ret, last_fn;
-
-	tuple.TupleData = (cisdata_t *)buf;
-	tuple.TupleOffset = 0;
-	tuple.TupleDataMax = 255;
-	tuple.Attributes = 0;
-
-	/* Get configuration register information */
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	last_ret = first_tuple(link, &tuple, &parse);
-	if (last_ret != CS_SUCCESS) {
-		last_fn = ParseTuple;
-		goto cs_failed;
-	}
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
+	int i, j, try;
 
 	/* First pass: look for a config entry that looks normal. */
 	tuple.TupleData = (cisdata_t *) buf;
@@ -734,9 +719,6 @@
 
 	return 0;
 
-cs_failed:
-	cs_error(link, last_fn, last_ret);
-
 failed:
 	btuart_release(link);
 	return -ENODEV;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 07eafbc..77b99ee 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -626,22 +626,7 @@
 	u_short buf[256];
 	cisparse_t parse;
 	cistpl_cftable_entry_t *cf = &parse.cftable_entry;
-	int i, last_ret, last_fn;
-
-	tuple.TupleData = (cisdata_t *)buf;
-	tuple.TupleOffset = 0;
-	tuple.TupleDataMax = 255;
-	tuple.Attributes = 0;
-
-	/* Get configuration register information */
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	last_ret = first_tuple(link, &tuple, &parse);
-	if (last_ret != CS_SUCCESS) {
-		last_fn = ParseTuple;
-		goto cs_failed;
-	}
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
+	int i;
 
 	tuple.TupleData = (cisdata_t *)buf;
 	tuple.TupleOffset = 0;
@@ -690,9 +675,6 @@
 
 	return 0;
 
-cs_failed:
-	cs_error(link, last_fn, last_ret);
-
 failed:
 	dtl1_release(link);
 	return -ENODEV;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index d0cface..5e2c318 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -330,7 +330,7 @@
 	   reliable packet if the number of packets sent but not yet ack'ed
 	   is < than the winsize */
 
-	spin_lock_irqsave(&bcsp->unack.lock, flags);
+	spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
 	if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
 		struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
@@ -696,7 +696,7 @@
 
 	BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen);
 
-	spin_lock_irqsave(&bcsp->unack.lock, flags);
+	spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
 	while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
 		bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 7ea0f48..2df5cf4 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2133,16 +2133,14 @@
 		rq->timeout = 60 * HZ;
 		bio = rq->bio;
 
-		if (rq->bio)
-			blk_queue_bounce(q, &rq->bio);
-
 		if (blk_execute_rq(q, cdi->disk, rq, 0)) {
 			struct request_sense *s = rq->sense;
 			ret = -EIO;
 			cdi->last_sense = s->sense_key;
 		}
 
-		if (blk_rq_unmap_user(bio, len))
+		rq->bio = bio;
+		if (blk_rq_unmap_user(rq))
 			ret = -EFAULT;
 
 		if (ret)
diff --git a/drivers/cdrom/optcd.c b/drivers/cdrom/optcd.c
index 25032d7..3541690 100644
--- a/drivers/cdrom/optcd.c
+++ b/drivers/cdrom/optcd.c
@@ -101,7 +101,7 @@
 		return;
 
 	va_start(args, fmt);
-	vsprintf(s, fmt, args);
+	vsnprintf(s, sizeof(s), fmt, args);
 	printk(KERN_DEBUG "optcd: %s\n", s);
 	va_end(args);
 }
diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c
index ba50e5a..a1283b1 100644
--- a/drivers/cdrom/sbpcd.c
+++ b/drivers/cdrom/sbpcd.c
@@ -770,11 +770,10 @@
 	
 	msgnum++;
 	if (msgnum>99) msgnum=0;
-	sprintf(buf, MSG_LEVEL "%s-%d [%02d]:  ", major_name, current_drive - D_S, msgnum);
 	va_start(args, fmt);
-	vsprintf(&buf[18], fmt, args);
+	vsnprintf(buf, sizeof(buf), fmt, args);
 	va_end(args);
-	printk(buf);
+	printk(MSG_LEVEL "%s-%d [%02d]:  %s", major_name, current_drive - D_S, msgnum, buf);
 #if KLOGD_PAUSE
 	sbp_sleep(KLOGD_PAUSE); /* else messages get lost */
 #endif /* KLOGD_PAUSE */ 
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2af12fc..24f922f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -855,39 +855,6 @@
 	depends TANBAC_TB022X
 	select GPIO_VR41XX
 
-menu "Ftape, the floppy tape device driver"
-
-config FTAPE
-	tristate "Ftape (QIC-80/Travan) support"
-	depends on BROKEN_ON_SMP && (ALPHA || X86)
-	---help---
-	  If you have a tape drive that is connected to your floppy
-	  controller, say Y here.
-
-	  Some tape drives (like the Seagate "Tape Store 3200" or the Iomega
-	  "Ditto 3200" or the Exabyte "Eagle TR-3") come with a "high speed"
-	  controller of their own. These drives (and their companion
-	  controllers) are also supported if you say Y here.
-
-	  If you have a special controller (such as the CMS FC-10, FC-20,
-	  Mountain Mach-II, or any controller that is based on the Intel 82078
-	  FDC like the high speed controllers by Seagate and Exabyte and
-	  Iomega's "Ditto Dash") you must configure it by selecting the
-	  appropriate entries from the "Floppy tape controllers" sub-menu
-	  below and possibly modify the default values for the IRQ and DMA
-	  channel and the IO base in ftape's configuration menu.
-
-	  If you want to use your floppy tape drive on a PCI-bus based system,
-	  please read the file <file:drivers/char/ftape/README.PCI>.
-
-	  The ftape kernel driver is also available as a runtime loadable
-	  module. To compile this driver as a module, choose M here: the
-	  module will be called ftape.
-
-source "drivers/char/ftape/Kconfig"
-
-endmenu
-
 source "drivers/char/agp/Kconfig"
 
 source "drivers/char/drm/Kconfig"
@@ -994,7 +961,7 @@
 	help
 	  If you say Y here, you will have a miscdevice named "/dev/hpet/".  Each
 	  open selects one of the timers supported by the HPET.  The timers are
-	  non-periodioc and/or periodic.
+	  non-periodic and/or periodic.
 
 config HPET_RTC_IRQ
 	bool "HPET Control RTC IRQ" if !HPET_EMULATE_RTC
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 777cad0..b1fcdab 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -78,7 +78,6 @@
 obj-$(CONFIG_I8K)		+= i8k.o
 obj-$(CONFIG_DS1620)		+= ds1620.o
 obj-$(CONFIG_HW_RANDOM)		+= hw_random/
-obj-$(CONFIG_FTAPE)		+= ftape/
 obj-$(CONFIG_COBALT_LCD)	+= lcd.o
 obj-$(CONFIG_PPDEV)		+= ppdev.o
 obj-$(CONFIG_NWBUTTON)		+= nwbutton.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 00b17ae..2f2c4ef 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -459,7 +459,7 @@
 
 /* Handle shadow device of the Nvidia NForce3 */
 /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
-static int __devinit nforce3_agp_init(struct pci_dev *pdev)
+static int nforce3_agp_init(struct pci_dev *pdev)
 {
 	u32 tmp, apbase, apbar, aplimit;
 	struct pci_dev *dev1;
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e608dad..acb2de5 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -926,9 +926,10 @@
  * had to poll every port to see if that port needed servicing.
  */
 static void
-do_softint(void *private_)
+do_softint(struct work_struct *work)
 {
-  struct cyclades_port *info = (struct cyclades_port *) private_;
+	struct cyclades_port *info =
+		container_of(work, struct cyclades_port, tqueue);
   struct tty_struct    *tty;
 
     tty = info->tty;
@@ -5328,7 +5329,7 @@
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-		    INIT_WORK(&info->tqueue, do_softint, info);
+		    INIT_WORK(&info->tqueue, do_softint);
 		    init_waitqueue_head(&info->open_wait);
 		    init_waitqueue_head(&info->close_wait);
 		    init_waitqueue_head(&info->shutdown_wait);
@@ -5403,7 +5404,7 @@
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-		    INIT_WORK(&info->tqueue, do_softint, info);
+		    INIT_WORK(&info->tqueue, do_softint);
 		    init_waitqueue_head(&info->open_wait);
 		    init_waitqueue_head(&info->close_wait);
 		    init_waitqueue_head(&info->shutdown_wait);
diff --git a/drivers/char/decserial.c b/drivers/char/decserial.c
index 85f404e..8ea2bea 100644
--- a/drivers/char/decserial.c
+++ b/drivers/char/decserial.c
@@ -23,20 +23,12 @@
 extern int zs_init(void);
 #endif
 
-#ifdef CONFIG_DZ
-extern int dz_init(void);
-#endif
-
 #ifdef CONFIG_SERIAL_CONSOLE
 
 #ifdef CONFIG_ZS
 extern void zs_serial_console_init(void);
 #endif
 
-#ifdef CONFIG_DZ
-extern void dz_serial_console_init(void);
-#endif
-
 #endif
 
 /* rs_init - starts up the serial interface -
@@ -46,23 +38,11 @@
 
 int __init rs_init(void)
 {
-
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
     if (IOASIC)
 	return zs_init();
-    else
-	return dz_init();
-#else
-
-#ifdef CONFIG_ZS
-    return zs_init();
 #endif
-
-#ifdef CONFIG_DZ
-    return dz_init();
-#endif
-
-#endif
+    return -ENXIO;
 }
 
 __initcall(rs_init);
@@ -76,21 +56,9 @@
  */
 static int __init decserial_console_init(void)
 {
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
     if (IOASIC)
 	zs_serial_console_init();
-    else
-	dz_serial_console_init();
-#else
-
-#ifdef CONFIG_ZS
-    zs_serial_console_init();
-#endif
-
-#ifdef CONFIG_DZ
-    dz_serial_console_init();
-#endif
-
 #endif
     return 0;
 }
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
index 425c823..19c81d2 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/char/drm/drm_sman.c
@@ -162,6 +162,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_sman_set_manager);
 
 static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
 						 unsigned long owner)
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index b40ae43..ae26919 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -147,14 +147,14 @@
 	if (address > vma->vm_end)
 		return NOPAGE_SIGBUS;	/* Disallow mremap */
 	if (!map)
-		return NOPAGE_OOM;	/* Nothing allocated */
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 
 	offset = address - vma->vm_start;
 	i = (unsigned long)map->handle + offset;
 	page = (map->type == _DRM_CONSISTENT) ?
 		virt_to_page((void *)i) : vmalloc_to_page((void *)i);
 	if (!page)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	get_page(page);
 
 	DRM_DEBUG("shm_nopage 0x%lx\n", address);
@@ -272,7 +272,7 @@
 	if (address > vma->vm_end)
 		return NOPAGE_SIGBUS;	/* Disallow mremap */
 	if (!dma->pagelist)
-		return NOPAGE_OOM;	/* Nothing allocated */
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 
 	offset = address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 	page_nr = offset >> PAGE_SHIFT;
@@ -310,7 +310,7 @@
 	if (address > vma->vm_end)
 		return NOPAGE_SIGBUS;	/* Disallow mremap */
 	if (!entry->pagelist)
-		return NOPAGE_OOM;	/* Nothing allocated */
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 
 	offset = address - vma->vm_start;
 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 60c1695..806f9ce 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -500,9 +500,9 @@
 
 
 static void 
-via_dmablit_workqueue(void *data)
+via_dmablit_workqueue(struct work_struct *work)
 {
-	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
 	drm_device_t *dev = blitq->dev;
 	unsigned long irqsave;
 	drm_via_sg_info_t *cur_sg;
@@ -571,7 +571,7 @@
 			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
 		}
 		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
-		INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
 		init_timer(&blitq->poll_timer);
 		blitq->poll_timer.function = &via_dmablit_timer;
 		blitq->poll_timer.data = (unsigned long) blitq;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 706733c..7c71eb7 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -200,7 +200,7 @@
 static int info_ioctl(struct tty_struct *, struct file *,
                     unsigned int, unsigned long);
 static void pc_set_termios(struct tty_struct *, struct termios *);
-static void do_softint(void *);
+static void do_softint(struct work_struct *work);
 static void pc_stop(struct tty_struct *);
 static void pc_start(struct tty_struct *);
 static void pc_throttle(struct tty_struct * tty);
@@ -1505,7 +1505,7 @@
 
 		ch->brdchan        = bc;
 		ch->mailbox        = gd; 
-		INIT_WORK(&ch->tqueue, do_softint, ch);
+		INIT_WORK(&ch->tqueue, do_softint);
 		ch->board          = &boards[crd];
 
 		spin_lock_irqsave(&epca_lock, flags);
@@ -2566,9 +2566,9 @@
 
 /* --------------------- Begin do_softint  ----------------------- */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 { /* Begin do_softint */
-	struct channel *ch = (struct channel *) private_;
+	struct channel *ch = container_of(work, struct channel, tqueue);
 	/* Called in response to a modem change event */
 	if (ch && ch->magic == EPCA_MAGIC)  { /* Begin EPCA_MAGIC */
 		struct tty_struct *tty = ch->tty;
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 15a4ea8..93b5519 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -723,9 +723,10 @@
  * -------------------------------------------------------------------
  */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-	struct esp_struct	*info = (struct esp_struct *) private_;
+	struct esp_struct	*info =
+		container_of(work, struct esp_struct, tqueue);
 	struct tty_struct	*tty;
 	
 	tty = info->tty;
@@ -746,9 +747,10 @@
  * 	do_serial_hangup() -> tty->hangup() -> esp_hangup()
  * 
  */
-static void do_serial_hangup(void *private_)
+static void do_serial_hangup(struct work_struct *work)
 {
-	struct esp_struct	*info = (struct esp_struct *) private_;
+	struct esp_struct	*info =
+		container_of(work, struct esp_struct, tqueue_hangup);
 	struct tty_struct	*tty;
 	
 	tty = info->tty;
@@ -2501,8 +2503,8 @@
 		info->magic = ESP_MAGIC;
 		info->close_delay = 5*HZ/10;
 		info->closing_wait = 30*HZ;
-		INIT_WORK(&info->tqueue, do_softint, info);
-		INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+		INIT_WORK(&info->tqueue, do_softint);
+		INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
 		info->config.rx_timeout = rx_timeout;
 		info->config.flow_on = flow_on;
 		info->config.flow_off = flow_off;
diff --git a/drivers/char/ftape/Kconfig b/drivers/char/ftape/Kconfig
deleted file mode 100644
index 0d65189..0000000
--- a/drivers/char/ftape/Kconfig
+++ /dev/null
@@ -1,330 +0,0 @@
-#
-# Ftape configuration
-#
-config ZFTAPE
-	tristate "Zftape, the VFS interface"
-	depends on FTAPE
-	---help---
-	  Normally, you want to say Y or M. DON'T say N here or you
-	  WON'T BE ABLE TO USE YOUR FLOPPY TAPE DRIVE.
-
-	  The ftape module itself no longer contains the routines necessary
-	  to interface with the kernel VFS layer (i.e. to actually write data
-	  to and read data from the tape drive).  Instead the file system
-	  interface (i.e. the hardware independent part of the driver) has
-	  been moved to a separate module.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called zftape.
-
-	  Regardless of whether you say Y or M here, an additional runtime
-	  loadable module called `zft-compressor' which contains code to
-	  support user transparent on-the-fly compression based on Ross
-	  William's lzrw3 algorithm will be produced.  If you have enabled the
-	  kernel module loader (i.e. have said Y to "Kernel module loader
-	  support", above) then `zft-compressor' will be loaded
-	  automatically by zftape when needed.
-
-	  Despite its name, zftape does NOT use compression by default.
-
-config ZFT_DFLT_BLK_SZ
-	int "Default block size"
-	depends on ZFTAPE
-	default "10240"
-	---help---
-	  If unsure leave this at its default value, i.e. 10240. Note that
-	  you specify only the default block size here. The block size can be
-	  changed at run time using the MTSETBLK tape operation with the
-	  MTIOCTOP ioctl (i.e. with "mt -f /dev/qft0 setblk #BLKSZ" from the
-	  shell command line).
-
-	  The probably most striking difference between zftape and previous
-	  versions of ftape is the fact that all data must be written or read
-	  in multiples of a fixed block size. The block size defaults to
-	  10240 which is what GNU tar uses. The values for the block size
-	  should be either 1 or multiples of 1024 up to a maximum value of
-	  63488 (i.e. 62 K). If you specify `1' then zftape's builtin
-	  compression will be disabled.
-
-	  Reasonable values are `10240' (GNU tar's default block size),
-	  `5120' (afio's default block size), `32768' (default block size some
-	  backup programs assume for SCSI tape drives) or `1' (no restriction
-	  on block size, but disables builtin compression).
-
-comment "The compressor will be built as a module only!"
-	depends on FTAPE && ZFTAPE
-
-config ZFT_COMPRESSOR
-	tristate
-	depends on FTAPE!=n && ZFTAPE!=n
-	default m
-
-config FT_NR_BUFFERS
-	int "Number of ftape buffers (EXPERIMENTAL)"
-	depends on FTAPE && EXPERIMENTAL
-	default "3"
-	help
-	  Please leave this at `3' unless you REALLY know what you are doing.
-	  It is not necessary to change this value. Values below 3 make the
-	  proper use of ftape impossible, values greater than 3 are a waste of
-	  memory. You can change the amount of DMA memory used by ftape at
-	  runtime with "mt -f /dev/qft0 setdrvbuffer #NUMBUFFERS". Each buffer
-	  wastes 32 KB of memory. Please note that this memory cannot be
-	  swapped out.
-
-config FT_PROC_FS
-	bool "Enable procfs status report (+2kb)"
-	depends on FTAPE && PROC_FS
-	---help---
-	  Optional. Saying Y will result in creation of a directory
-	  `/proc/ftape' under the /proc file system. The files can be viewed
-	  with your favorite pager (i.e. use "more /proc/ftape/history" or
-	  "less /proc/ftape/history" or simply "cat /proc/ftape/history"). The
-	  file will contain some status information about the inserted
-	  cartridge, the kernel driver, your tape drive, the floppy disk
-	  controller and the error history for the most recent use of the
-	  kernel driver. Saying Y will enlarge the size of the ftape driver
-	  by approximately 2 KB.
-
-	  WARNING: When compiling ftape as a module (i.e. saying M to "Floppy
-	  tape drive") it is dangerous to use ftape's /proc file system
-	  interface. Accessing `/proc/ftape' while the module is unloaded will
-	  result in a kernel Oops. This cannot be fixed from inside ftape.
-
-choice
-	prompt "Debugging output"
-	depends on FTAPE
-	default FT_NORMAL_DEBUG
-
-config FT_NORMAL_DEBUG
-	bool "Normal"
-	---help---
-	  This option controls the amount of debugging output the ftape driver
-	  is ABLE to produce; it does not increase or diminish the debugging
-	  level itself. If unsure, leave this at its default setting,
-	  i.e. choose "Normal".
-
-	  Ftape can print lots of debugging messages to the system console
-	  resp. kernel log files. Reducing the amount of possible debugging
-	  output reduces the size of the kernel module by some KB, so it might
-	  be a good idea to use "None" for emergency boot floppies.
-
-	  If you want to save memory then the following strategy is
-	  recommended: leave this option at its default setting "Normal" until
-	  you know that the driver works as expected, afterwards reconfigure
-	  the kernel, this time specifying "Reduced" or "None" and recompile
-	  and install the kernel as usual. Note that choosing "Excessive"
-	  debugging output does not increase the amount of debugging output
-	  printed to the console but only makes it possible to produce
-	  "Excessive" debugging output.
-
-	  Please read <file:Documentation/ftape.txt> for a short description
-	  how to control the amount of debugging output.
-
-config FT_FULL_DEBUG
-	bool "Excessive"
-	help
-	  Extremely verbose output for driver debugging purposes.
-
-config FT_NO_TRACE
-	bool "Reduced"
-	help
-	  Reduced tape driver debugging output.
-
-config FT_NO_TRACE_AT_ALL
-	bool "None"
-	help
-	  Suppress all debugging output from the tape drive.
-
-endchoice
-
-comment "Hardware configuration"
-	depends on FTAPE
-
-choice
-	prompt "Floppy tape controllers"
-	depends on FTAPE
-	default FT_STD_FDC
-
-config FT_STD_FDC
-	bool "Standard"
-	---help---
-	  Only change this setting if you have a special controller. If you
-	  didn't plug any add-on card into your computer system but just
-	  plugged the floppy tape cable into the already existing floppy drive
-	  controller then you don't want to change the default setting,
-	  i.e. choose "Standard".
-
-	  Choose "MACH-2" if you have a Mountain Mach-2 controller.
-	  Choose "FC-10/FC-20" if you have a Colorado FC-10 or FC-20
-	  controller.
-	  Choose "Alt/82078" if you have another controller that is located at
-	  an IO base address different from the standard floppy drive
-	  controller's base address of `0x3f0', or uses an IRQ (interrupt)
-	  channel different from `6', or a DMA channel different from
-	  `2'. This is necessary for any controller card that is based on
-	  Intel's 82078 FDC such as Seagate's, Exabyte's and Iomega's "high
-	  speed" controllers.
-
-	  If you choose something other than "Standard" then please make
-	  sure that the settings for the IO base address and the IRQ and DMA
-	  channel in the configuration menus below are correct. Use the manual
-	  of your tape drive to determine the correct settings!
-
-	  If you are already successfully using your tape drive with another
-	  operating system then you definitely should use the same settings
-	  for the IO base, the IRQ and DMA channel that have proven to work
-	  with that other OS.
-
-	  Note that this menu lets you specify only the default setting for
-	  the hardware setup. The hardware configuration can be changed at
-	  boot time (when ftape is compiled into the kernel, i.e. if you
-	  have said Y to "Floppy tape drive") or module load time (i.e. if you
-	  have said M to "Floppy tape drive").
-
-	  Please read also the file <file:Documentation/ftape.txt> which
-	  contains a short description of the parameters that can be set at
-	  boot or load time. If you want to use your floppy tape drive on a
-	  PCI-bus based system, please read the file
-	  <file:drivers/char/ftape/README.PCI>.
-
-config FT_MACH2
-	bool "MACH-2"
-
-config FT_PROBE_FC10
-	bool "FC-10/FC-20"
-
-config FT_ALT_FDC
-	bool "Alt/82078"
-
-endchoice
-
-comment "Consult the manuals of your tape drive for the correct settings!"
-	depends on FTAPE && !FT_STD_FDC
-
-config FT_FDC_BASE
-	hex "IO base of the floppy disk controller"
-	depends on FTAPE && !FT_STD_FDC
-	default "0"
-	---help---
-	  You don't need to specify a value if the following default
-	  settings for the base IO address are correct:
-	  <<< MACH-2     : 0x1E0 >>>
-	  <<< FC-10/FC-20: 0x180 >>>
-	  <<< Secondary  : 0x370 >>>
-	  Secondary refers to a secondary FDC controller like the "high speed"
-	  controllers delivered by Seagate or Exabyte or Iomega's Ditto Dash.
-	  Please make sure that the setting for the IO base address
-	  specified here is correct. USE THE MANUAL OF YOUR TAPE DRIVE OR
-	  CONTROLLER CARD TO DETERMINE THE CORRECT SETTING. If you are already
-	  successfully using the tape drive with another operating system then
-	  you definitely should use the same settings for the IO base that has
-	  proven to work with that other OS.
-
-	  Note that this menu lets you specify only the default setting for
-	  the IO base. The hardware configuration can be changed at boot time
-	  (when ftape is compiled into the kernel, i.e. if you specified Y to
-	  "Floppy tape drive") or module load time (i.e. if you have said M to
-	  "Floppy tape drive").
-
-	  Please read also the file <file:Documentation/ftape.txt> which
-	  contains a short description of the parameters that can be set at
-	  boot or load time.
-
-config FT_FDC_IRQ
-	int "IRQ channel of the floppy disk controller"
-	depends on FTAPE && !FT_STD_FDC
-	default "0"
-	---help---
-	  You don't need to specify a value if the following default
-	  settings for the interrupt channel are correct:
-	  <<< MACH-2     : 6 >>>
-	  <<< FC-10/FC-20: 9 >>>
-	  <<< Secondary  : 6 >>>
-	  Secondary refers to secondary a FDC controller like the "high speed"
-	  controllers delivered by Seagate or Exabyte or Iomega's Ditto Dash.
-	  Please make sure that the setting for the IO base address
-	  specified here is correct. USE THE MANUAL OF YOUR TAPE DRIVE OR
-	  CONTROLLER CARD TO DETERMINE THE CORRECT SETTING. If you are already
-	  successfully using the tape drive with another operating system then
-	  you definitely should use the same settings for the IO base that has
-	  proven to work with that other OS.
-
-	  Note that this menu lets you specify only the default setting for
-	  the IRQ channel. The hardware configuration can be changed at boot
-	  time (when ftape is compiled into the kernel, i.e. if you said Y to
-	  "Floppy tape drive") or module load time (i.e. if you said M to
-	  "Floppy tape drive").
-
-	  Please read also the file <file:Documentation/ftape.txt> which
-	  contains a short description of the parameters that can be set at
-	  boot or load time.
-
-config FT_FDC_DMA
-	int "DMA channel of the floppy disk controller"
-	depends on FTAPE && !FT_STD_FDC
-	default "0"
-	---help---
-	  You don't need to specify a value if the following default
-	  settings for the DMA channel are correct:
-	  <<< MACH-2     : 2 >>>
-	  <<< FC-10/FC-20: 3 >>>
-	  <<< Secondary  : 2 >>>
-	  Secondary refers to a secondary FDC controller like the "high speed"
-	  controllers delivered by Seagate or Exabyte or Iomega's Ditto Dash.
-	  Please make sure that the setting for the IO base address
-	  specified here is correct. USE THE MANUAL OF YOUR TAPE DRIVE OR
-	  CONTROLLER CARD TO DETERMINE THE CORRECT SETTING. If you are already
-	  successfully using the tape drive with another operating system then
-	  you definitely should use the same settings for the IO base that has
-	  proven to work with that other OS.
-
-	  Note that this menu lets you specify only the default setting for
-	  the DMA channel. The hardware configuration can be changed at boot
-	  time (when ftape is compiled into the kernel, i.e. if you said Y to
-	  "Floppy tape drive") or module load time (i.e. if you said M to
-	  "Floppy tape drive").
-
-	  Please read also the file <file:Documentation/ftape.txt> which
-	  contains a short description of the parameters that can be set at
-	  boot or load time.
-
-config FT_FDC_THR
-	int "Default FIFO threshold (EXPERIMENTAL)"
-	depends on FTAPE && EXPERIMENTAL
-	default "8"
-	help
-	  Set the FIFO threshold of the FDC. If this is higher the DMA
-	  controller may serve the FDC after a higher latency time. If this is
-	  lower, fewer DMA transfers occur leading to less bus contention.
-	  You may try to tune this if ftape annoys you with "reduced data
-	  rate because of excessive overrun errors" messages. However, this
-	  doesn't seem to have too much effect.
-
-	  If unsure, don't touch the initial value, i.e. leave it at "8".
-
-config FT_FDC_MAX_RATE
-	int "Maximal data rate to use (EXPERIMENTAL)"
-	depends on FTAPE && EXPERIMENTAL
-	default "2000"
-	---help---
-	  With some motherboard/FDC combinations ftape will not be able to
-	  run your FDC/tape drive combination at the highest available
-	  speed. If this is the case you'll encounter "reduced data rate
-	  because of excessive overrun errors" messages and lots of retries
-	  before ftape finally decides to reduce the data rate.
-
-	  In this case it might be desirable to tell ftape beforehand that
-	  it need not try to run the tape drive at the highest available
-	  speed. If unsure, leave this disabled, i.e. leave it at 2000
-	  bits/sec.
-
-config FT_ALPHA_CLOCK
-	int "CPU clock frequency of your DEC Alpha" if ALPHA
-	depends on FTAPE
-	default "0"
-	help
-	  On some DEC Alpha machines the CPU clock frequency cannot be
-	  determined automatically, so you need to specify it here ONLY if
-	  running a DEC Alpha, otherwise this setting has no effect.
-
diff --git a/drivers/char/ftape/Makefile b/drivers/char/ftape/Makefile
deleted file mode 100644
index 0e67d2f..0000000
--- a/drivers/char/ftape/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#       Copyright (C) 1997 Claus Heine.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-# 
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with this program; see the file COPYING.  If not, write to
-# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-#
-# $Source: /homes/cvs/ftape-stacked/ftape/Makefile,v $
-# $Revision: 1.4 $
-# $Date: 1997/10/05 19:17:56 $
-#
-#      Makefile for the QIC-40/80/3010/3020 floppy-tape driver for
-#      Linux.
-#
-
-obj-$(CONFIG_FTAPE)		+= lowlevel/
-obj-$(CONFIG_ZFTAPE)		+= zftape/
-obj-$(CONFIG_ZFT_COMPRESSOR)	+= compressor/
diff --git a/drivers/char/ftape/README.PCI b/drivers/char/ftape/README.PCI
deleted file mode 100644
index 18de159..0000000
--- a/drivers/char/ftape/README.PCI
+++ /dev/null
@@ -1,81 +0,0 @@
-Some notes for ftape users with PCI motherboards:
-=================================================
-
-The problem:
-------------
-
-There have been some problem reports from people using PCI-bus based
-systems getting overrun errors.
-I wasn't able to reproduce these until I ran ftape on a Intel Plato
-(Premiere PCI II) motherboard with bios version 1.00.08AX1.
-It turned out that if GAT (Guaranteed Access Timing) is enabled (?)
-ftape gets a lot of overrun errors.
-The problem disappears when disabling GAT in the bios.
-Note that Intel removed this setting (permanently disabled) from the
-1.00.10AX1 bios !
-
-It looks like that if GAT is enabled there are often large periods
-(greater than 120 us !??) on the ISA bus that the DMA controller cannot
-service the floppy disk controller.
-I cannot imagine this being acceptable in a decent PCI implementation.
-Maybe this is a `feature' of the chipset. I can only speculate why
-Intel choose to remove the option from the latest Bios...
-
-The lesson of this all is that there may be other motherboard
-implementations having the same of similar problems.
-If you experience a lot of overrun errors during a backup to tape,
-see if there is some setting in the Bios that may influence the
-bus timing.
-
-I judge this a hardware problem and not a limitation of ftape ;-)
-My DOS backup software seems to be suffering from the same problems
-and even refuses to run at 1 Mbps !
-Ftape will reduce the data-rate from 1 Mbps to 500 Kbps if the number
-of overrun errors on a track exceeds a threshold.
-
-
-Possible solutions:
--------------------
-
-Some of the problems were solved by upgrading the (flash) bios.
-Other suggest that it has to do with the FDC being on the PCI
-bus, but that is not the case with the Intel Premiere II boards.
-[If upgrading the bios doesn't solve the problem you could try
-a floppy disk controller on the isa-bus].
-
-Here is a list of systems and recommended BIOS settings:
-
-
-        Intel Premiere PCI (Revenge):
-
-Bios version 1.00.09.AF2 is reported to work.
-
-
-
-        Intel Premiere PCI II (Plato):
-
-Bios version 1.00.10.AX1 and version 11 beta are ok.
-If using version 1.00.08.AX1, GAT must be disabled !
-
-
-
-        ASUS PCI/I-SP3G:
-
-Preferred settings:     ISA-GAT-mode : disabled
-                        DMA-linebuffer-mode : standard
-                        ISA-masterbuffer-mode : standard
-
-
-        DELL Dimension XPS P90
-
-Bios version A2 is reported to be broken, while bios version A5 works.
-You can get a flash bios upgrade from http://www.dell.com
-
-
-To see if you're having the GAT problem, try making a backup
-under DOS. If it's very slow and often repositions you're
-probably having this problem.
-
-                        --//--
- LocalWords:  ftape PCI bios GAT ISA DMA chipset Mbps Kbps FDC isa AF ok ASUS
- LocalWords:  SP linebuffer masterbuffer XPS http www com
diff --git a/drivers/char/ftape/RELEASE-NOTES b/drivers/char/ftape/RELEASE-NOTES
deleted file mode 100644
index 03799db..0000000
--- a/drivers/char/ftape/RELEASE-NOTES
+++ /dev/null
@@ -1,966 +0,0 @@
-Hey, Emacs, we're -*-Text-*- mode!
-
-===== Release notes for ftape-3.04d 25/11/97 =====
-- The correct pre-processor statement for "else if" is "#elif" not
-  "elsif".
-- Need to call zft_reset_position() when overwriting cartridges
-  previously written with ftape-2.x, sftape, or ancient
-  (pre-ftape-3.x) versions of zftape.
-
-===== Release notes for ftape-3.04c 16/11/97 =====
-- fdc_probe() was calling DUMPREGS with a result length of "1" which
-  was just fine. Undo previous change.
-
-===== Release notes for ftape-3.04b 14/11/97 =====
-
-- patches/2.x.x/floppy.c.diff was somewhat broken, releasing i/o
-  regions it never had allocated.
-- fdc_probe() was calling DUMPREGS with a result length of "1" instead
-  of "10"
-- Writing deleted data marks if the first segents on track zero are
-  should work now.
-- ftformat should now be able to handle those cases where the tape
-  drive sets the read only status bit (QIC-40/80 cartridges with
-  QIC-3010/3020 tape drives) because the header segment is damaged.
-- the MTIOCFTCMD ioctl may now be issued by the superuser ONLY.
-
-===== Release notes for ftape-3.04a 12/11/97 =====
-- Fix an "infinite loop can't be killed by signal" bug in
-  ftape_get_drive_status(). Only relevant when trying to access
-  buggy/misconfigured hardware
-- Try to compensate a bug in the HP Colorado T3000's firmware: it
-  doesn't set the write protect bit for QIC80/QIC40 cartridges.
-
-===== Release notes for ftape-3.04 06/11/97 =====
-- If positioning with fast seeking fails fall back to a slow seek
-  before giving up.
-- (nearly) no retries on "no data errors" when verifying after
-  formatting. Improved tuning of the bad sector map after formatting.
-- the directory layout has changed again to allow for easier kernel
-  integration
-- Module parameter "ftape_tracing" now is called "ft_tracing" because
-  the "ftape_tracing" variable has the version checksum attached to it.
-- `/proc/ftape' interface for 2.0.* kernels. `/proc/ftape' no longer
-  is a directory but a file that contains all the information formerly
-  provided in separate files under the `/proc/ftape/' directory.
-- Most of the configuration options have been prefixed by "CONFIG_FT_"
-  in preparation of the kernel inclusion. The Makefiles under
-  "./ftape/" should be directly usable by the kernel.
-- The MODVERSIONS stuff is now auto-detected.
-- Broke backslashed multi line options in MCONFIG into separate lines
-  using GNU-make's "+=" feature.
-- The html and dvi version of the manual is now installed under
-  '/usr/doc/ftape` with 'make install`
-- New SMP define in MCONFIG. ftape works with SMP if this is defined.
-- attempt to cope with "excessive overrun errors" by gradually
-  increasing FDC FIFO threshold. But this doesn't seem to have too
-  much an effect.
-- New load time configuration parameter "ft_fdc_rate_limit". If you
-  encounter too many overrun errors with a 2Mb controller then you
-  might want to set this to 1000.
-- overrun errors on the last sector in a segment sometimes result in
-  a zero DMA residue. Dunno why, but compensate for it.
-- there were still fdc_read() timeout errors. I think I have fixed it
-  now, please FIXME.
-- Sometimes ftape_write() failed to re-start the tape drive when a
-  segment without a good sector was reached ("wait for empty segment
-  failed"). This is fixed. Especially important for > QIC-3010.
-- sftape (aka ftape-2.x) has vanished. I didn't work on it for
-  ages. It is probably still possible to use the old code with
-  ftape-3.04, if one really needs it (BUT RECOMPILE IT)
-- zftape no longer alters the contents of already existing volume
-  table entries, which makes it possible to fill in missing fields,
-  like time stamps using some user space program.
-- ./contrib/vtblc/ contains such a program.
-- new perl script ./contrib/scripts/listtape that list the contents of a
-  floppy tape cartridge parsing the output of "mt volinfo" + "mt fsf"
-- the MTWEOF implementation has changed a little bit (after I had a
-  look at amanda). Calling MTWEOF while the tape is still held open
-  after writing something to the tape now will terminate the current
-  volume, and start a new one at the current position.
-- the volume table maintained by zftape now is a doubly linked list
-  that grows dynamically as needed.
-
-  formatting floppy tape cartridges
-  ---------------------------------
-  * there is a new user space formatting program that does most of the
-    dirty work in user space (auto-detect, computing the sector
-    coordinates, adjusting time stamps and statistics). It has a
-    simple command line interface.
-  * ftape-format.o has vanished, it has been folded into the low level
-    ftape.o module, and the ioctl interface into zftape.o. Most of the
-    complicated stuff has been moved to user space, so there was no
-    need for a separate module anymore.
-  * there is a new ioctl MTIOCFTCMD that sends a bare QIC-117 command
-    to the tape drive.
-  * there is a new mmap() feature to map the dma buffers into user
-    space to be used by the user level formatting program.
-  * Formatting of yet unformatted or totally degaussed cartridges
-    should be possible now. FIXME.
-
-===== Release notes for ftape-3.03b, <forgot the exact date> ====
-
-ftape-3.03b was released as a beta release only. Its main new feature
-was support of the DITTO-2GB drive. This was made possible by reverse
-engineering done by <fill in his name> after Iomega failed to support
-ftape. Although they had promised to do so (this makes me feel a bit
-sad and uncomfortable about Iomega).
-
-===== Release notes for ftape-3.03a, 22/05/97 ====
-
-- Finally fixed auto-un-loading of modules for kernels > 2.1.18
-- Add an "uninstall" target to the Makefile
-- removed the kdtime hack
-- texi2www didn't properly set the back-reference from a footnote back
-  to the regular text.
-
-  zftape specific
-  ---------------
-  * hide the old compression map volume. Taper doesn't accept the
-    presence of non-Taper volumes and Taper-written volume on the same
-    tape.
-  * EOD (End Of Data) handling was still broken: the expected behavior
-    is to return a zero byte count at the first attempt to read past
-    EOD, return a zero byte count at the second attempt to read past
-    EOD and THEN return -EIO.
-  
-  ftape-format specific
-  ---------------------
-  * Detection of QIC-40 cartridges in select_tape_format() was broken
-    and made it impossible to format QIC-3010/3020 cartridges.
-  * There are strange "TR-1 Extra" cartridges out there which weren't
-    detected properly because the don't strictly conform to the
-    QIC-80, Rev. N, spec.
-
-===== Release notes for ftape-3.03, 30/04/97 =====
-
-- Removed kernel integration code from the package. I plan to provide
-  a package that can be integrated into the stock kernel separately
-  (hopefully soon).
-  As a result, a simple `make' command now will build everything.
-- ALL compile time configuration options have been moved to the file
-  `MCONFIG'.
-- Quite a few `low level' changes to allow formatting of cartridges.
-- formatting is implemented as a separate module `ftape-format.o'. The
-  modified `mt' program contains sample code that shows how to use it.
-- The VFS interface has been moved from the `ftape.o' module to the
-  high level modules `zftape.o' resp. `sftape.o'. `ftape.o' contains
-  the hardware support only.
-- A bit of /proc support for kernels > 2.1.28
-- Moved documentation to Doc subdir. INSTALL now contains some real
-  installation notes.
-- `install' target in Makefile.
-
-zftape specific:
-----------------
-
-- zftape works for large cartridges now ( > 2^31 bytes)
-- MTIOCVOLINFO and MTIOCGETSIZE now return the size in KILOBYTES,
-  NO LONGER in bytes.
-
-- permissions for write access to a cartridge have changed:
-  * zftape now also takes the file access mode into account
-  * zftape no longer allows writing in the middle of the recorded
-    media. The tape has to be positioned at BOT or EOD for write
-    access.
-
-- MTBSF has changed. It used to position at the beginning of the
-  previous file when called with count 1. This was different from the
-  expected behavior for other Un*x tape drivers (i.e. SCSI). MTBSF
-  with count 1 should merely position at the beginning of the current
-  volume. Fixed. As a result, `tar --verify' now produces the desired
-  result: it verifies the last written volume, not the pre-last
-  written volume.
-
-- The compression map has vanished --> no need for `mt erase' any
-  more. Fast seeking in a compressed volume is still be possible, but
-  takes slightly longer. As a side effect, you may experience an
-  additional volume showing up in front of all others for old
-  cartridges. This is the tape volume that holds the compression map.
-
-- The compression support for zftape has been moved to a separate
-  module `zft-compressor'. DON'T forget to load it before trying to
-  read back compressed volumes. The stock `zftape.o' module probes for
-  the module `zft-compressor' using the kerneld message channel; you
-  have to install `zft-compressor.o' in a place where modprobe can
-  find it if you want to use this.
-
-- New experimental feature that tries to get the broken down GMT time
-  from user space via a kernel daemon message channel. You need to
-  compile and start the `kdtime' daemon contained in the contrib
-  directory to use it. Needed (?) for time stamps in the header
-  segments and the volume table.
-
-- variable block size mode via MTSETBLK 0
-
-- keep modules locked in memory after the block size has been changed
-
-sftape specific:
-----------------
-
-- end of tape handling should be fixed, i.e. multi volume archives
-  written with `afio' can be read back now.
-
-
-===== Release notes for ftape-3.02a, 09/01/97 =====
-
-No big news:
-- call zft_init() resp. sft_init() when compiling the entire stuff
-  into the kernel image.
-- fix bug in ftape-setup.c when NO_TRACE_AT_ALL was defined.
-- fix bug in sftape-eof.c/zftape-eof.c for old kernels (1.2.*)
-- add support for new module interface for recent kernels
-
-===== Release notes for ftape-3.02, 16/12/96 =====
-- Fixed the `FDC unlock command failed' bug in fdc-io.c. When the FIFO
-  was already locked when ftape was loaded, ftape failed to unlock it.
-- Fixed compilation of `contrib/gnumt'. It now finds `mtio.h' even if
-  ftape is NOT included into the kernel source tree.
-- fc-10.c: include <asm/io.h> for inb() and outb().
-- ftape/sftape/zftape: all global variable now have either a `ftape_',
-  a `ft_', `sft_', `zft_' or `qic_' prefix to prevent name clashes
-  with other parts of the kernel when including ftape into the kernel
-  source tree.
-- Kerneld support has changed. `ftape' now searches for a module
-  `ftape-frontend' when none of the frontend (`sftape' or `zftape') is
-  loaded. Please refer to the `Installation/Loading ftape' section of
-  the TeXinfo manual.
-- Add load resp. boot-time configuration of ftape. There are now
-  variables ft_fdc_base, ft_fdc_dma and ft_fdc_irq corresponding to
-  the former FDC_BASE etc. compile time definitions. One can also use
-  the kernel command line parameters to configure the driver if it is
-  compiled into the kernel. Also, the FC-10/FC-20 support is load-time
-  configurable now as well as the MACH-II hack (ft_probe_fc10,
-  resp. ft_mach2). Please refer to the section `Installation/Configure
-  ftape' of the TeXinfo manual.
-- I removed the MODVERSIONS option from `Makefile.module'. Let me alone
-  with ftape and MODVERSIONS unless you include the ftape sources into
-  the kernel source tree.
-- new vendors in `vendors.h':
-  * HP Colorado T3000 
-  * ComByte DoublePlay (including a bug fix for their broken
-    formatting software, thanks to whraven@njackn.com)
-  * Iomega DITTO 2GIG. NOTE: this drive cannot work with ftape because
-    the logical data layout of the cartridges used by this drive does
-    NOT conform to the QIC standards, it is a special Iomega specific
-    format. I've sent mail to Iomega but didn't receive an answer
-    yet. If you want this drive to be supported by ftape, ask Iomega
-    to give me information about it.
-- zftape:
-  * re-introduced the MTIOC_ZFTAPE_GETBLKSZ ioctl for compatibility
-    with zftape 1.06a and earlier. Please don't use it when writing
-    new software, use the MTIOCVOLINFO ioctl instead.
-  * Major overhaul of the code that updates the header segments. Never
-    change the tape label unless erasing the tape. Thus we almost
-    never need to write the header segments, unless we would modify
-    the bad sector map which isn't done yet. Updating of volume table
-    and compression map more secure now although it takes a bit
-    longer.
-  * Fixed bug when aborting a write operation with a signal: zftape
-    now finishes the current volume (i.e. writes an eof marker) at the
-    current position. It didn't before which led to somehow *strange*
-    behavior in this cases.
-  * Keep module locked in memory when using it with  the non-rewinding
-    devices and the tape is not logical at BOT. Needed for kerneld
-    support.
-- sftape:
-  * Keep module locked in memory when using it with  the non-rewinding
-    devices and the tape is not logical at BOT. Needed for kerneld
-    support.
-
-===== Release notes for ftape-3.01, 14/11/96 =====
-
-- Fixed silly bugs in ftape-3.00:
-  * MAKEDEV.ftape: major device number must be 27, not 23 
-  * sftape/sftape-read.c: sftape_read_header_segments() called 
-    itself recursively instead of calling ftape_read_header_segment()
-  * zftape/qic-vtbl.h: conversion of ftape's file marks to zftape's
-    internal volume table was broken.
-  * patches/2.x.x/linux-2.0.21.dif: my RCS (resp. CVS) system replaced
-    the `$Revison:' etc. macros in the `ftape.h' concerning part of the
-    patch :-( Fixed.
-  * info/ftape.info: Fixed misspellings (`cp' <-> `cp -r' etc.)
-  * when ftape/sftape or ftape/zftape was compiled into the kernel the
-    variable ftape_status was declared twice. Fixed.
-  * removed reference to undeclared variable kernel_version when not
-    compiling as module
-  * fixed a bug introduced by the use of bit-fields for some flags
-    (i.e. write_protected, no_cartridge, formatted)
-  * flag `header_read' is now reset correctly to zero when tape is
-    removed.
-- fixed a bug in sftape/sftape-eof.c that was already in the original
-  ftape code. MTFSF/BSF was not handled correctly when positioned
-  right before the file mark (think of tar)
-- Changed TRACE macros (following a suggestion of Marcin Dalecki) to use
-  the predefined __FUNCTION__ macro of GCC. Spares about 4k of code.
-- added new vendor id for Iomega DITTO 2GIG
-- fixed a bug already present in zftape-1.06 when aborting a write
-  with a signal: we now finish the current volume at that
-  position. Header segments remain NOT up to date until an explicit call
-  to MTREW or MTOFFL is done.  
-
-===== Release notes for ftape-3.00, 14/10/96 =====
-
-- Merged ftape with zftape. There are three modules now:
-  ftape for the hardware support, sftape for the implementation of the
-  original ftape eof mark stuff and zftape that implements zftape's way
-  of handling things (compression, volume table, tape blocks of
-  constant length)
-- Documentation in TeXinfo format in the `info' subdirectory.
-- New ioctls for zftape. See zftape/zftape.h
-- Dummy formatting ioctl for ftape. See ftape.h
-- Kernel patch files for the 2.*.* series to include ftape-3.00 in the
-  kernel source tree. These includes a kernel compatible Config.in
-  script and fairly large online information for the kernel configure
-  script.
-- Support for compiling with Linux-1.2.13. 
-- Modified GNU mt from their cpio package that can handle the new
-  ioctls.
-- ftape/sftape/zftape is kerneld save now!
-
-Notes on sftape:
-- sftape implements the eof handling code of the original ftape. If
-  you like to stick with the original ftape stuff, you have to use
-  this module, not zftape.
-- sftape is kerneld save, unlike the original ftape.
-- we keep the entire header segment now in memory, so no need to read
-  it before updating the header segments. Additional memory
-  consumption: 256 bytes. 
-
-Notes for zftape:
-- zftape has support for tapes with format code 6 now, which use a
-  slightly different volume table format compared with other floppy
-  tapes.
-- new ioctls for zftape. Have a look at zftape/zftape.h
-- The internal volume table representation has changed for zftape. Old
-  cartridges are converted automatically.
-- zftape no longer uses compression map segments, which have vanished
-  from the QIC specs, but creates volume table entry that reserves
-  enough space for the compression map. 
-- zftape is kerneld save now.
-- we keep the entire header segment now in memory, so no need to read
-  it before updating the header segments. Additional memory
-  consumption: 256 bytes. 
-
-Notes for contrib/gnumt:
-- modified mt from the GNU cpio package that supports all the new
-  ioctls of zftape.
-Notes for contrib/swapout:
-- This contains the swapout.c program that was written by Kai
-  Harrekilde-Pederson. I simply added a Makefile.
-
-===== Release notes for ftape-2.10, 14/10/96 =====
-
-The ftape maintainer has changed. 
-Kai Harrekilde-Petersen <khp@dolphinics.no>
-has resigned from maintaining ftape, and I,
-Claus-Justus Heine <claus@momo.math.rwth-aachen.de>,
-have taken over.
-
-- Added support for tapes with `format code 6', i.e. QIC-3020 tapes
-  with more than 2^16 segments.
-- merged changes made by Bas Laarhoven with ftape-2.09. Refer
-  to his release notes below. I've included them into this
-  file unchanged for your reference.
-- disabled call stack back trace for now. This new feature
-  introduced by the interim release 2.0.x still seems to
-  be buggy.
-- Tried to minimize differences between the ftape version
-  to be included into the kernel source tree and the standalone
-  module version.
-- Reintroduced support for Linux-1.2.13. Please refer to the
-  Install-guide. 
-
-===== Release notes for ftape-2.09, 16/06/96 =====
-
-There aren't any really big news in this release, mostly just that I
-(the maintainer) have changed my email address (due to a new job).  My
-new address is <khp@dolphinics.no>
-
-- The CLK_48MHZ and FDC_82078SL options has gone (all 2Mbps cards seem
-  to use a 48MHz oscillator anyway and I haven't heard of an 'SL
-  chip out there).
-- The S82078B has been `downgraded' to i82077AA compability.
-- TESTING option revived.  Right now, it'll enable the (seriously broken)
-  2Mbps code.  If you enable it, you'll experience a tape drive that's
-  *really* out to lunch!
-- Some (bold) changes in the init code.  Please notify me if they
-  break things for you.
-
-===== Release notes for ftape-2.08, 14/03/96 =====
-
-If you correct a problem with ftape, please send your patch to
-khp@dolphinics.no too.
-
-- Updated to reflect that NR_MEM_LISTS is gone in 1.3.74
-- Teac 700 added to list of known drives.
-- The registered device name is now "ft" rather than "ftape".
-
-===== Release notes for ftape-2.07a, 14/03/96 =====
-
-Bugfixes by Marcin Dalecki <dalecki@namu03.gwdg.de>:
-- In the last release it just compiled against 1.3.70;
-  now the params to request_irq() and free_irq are() are fixed, so it also 
-  works in 1.3.73 :-)
-- Support for modules is now correct for newer kernels.
-
-===== Release notes for ftape-2.07, 04/03/96 =====
-
-
-- ftape updated to compile against 1.3.70.
-- Iomega 700 and Wangtek 3200 recognised.
-
-
-===== Release notes for ftape-2.06b, 13/02/96 =====
-
-Another simple bugfix version.
-
-- Jumbo 700 recognised.
-- Typo in vendors.h fixed.
-
-
-===== Release notes for ftape-2.06a, 10/02/96 =====
-
-This release is a simple bugfix version.
-
-- Linux/SMP: ftape *should* work.
-- FC-10/20: Only accepts IRQs 3-7, or 9.  If IRQ 9, properly tell the card
-  to use IRQ 2.  Thanks to Greg Crider (gcrider@iclnet.org) for finding and
-  locating this bug and testing the patch.
-- Insight drive recognised correctly again.
-- Motor-on wakeup version of the Iomega 250 drive added
-
-
-===== Release notes for ftape-2.06, 28/01/96 =====
-
-Special thanks go to Neal Friedman and Steven Sorbom for their
-help in producing and testing this release.
-
-I have continued to clean up the code, with an eye towards inclusion
-of ftape in Linus' official kernel (In fact, as I type this, I am
-running on a kernel with ftape support statically linked).  I have
-test-compiled ftape against my 1.2.13 tree without problems.
-Hopefully, everything should be OK for the v1.2.x people.
-
-WARNING! Alan Cox has mailed me that ftape does *NOT* work with
-Linux/SMP.  If you try to run ftape under Linux/SMP, it will cause a
-kernel deadlock (which is worse than a panic).
-
-- QIC-3020/TR-3: 1Mbps support works.  Neal is capable of reading and
-  writing data to a tape.  ftape will automatically detect the type of
-  tape (e.g. TR-3 vs QIC-80) and move the fdc in and out of
-  "perpendicular mode" as necessary.
-- 2Mbps support is disabled by default, since it is not fully
-  debugged.  If you are adventurous, remove -DFDC_82078SL in the
-  Makefile and see what happens :-)
-- fdc detection: silly bugs removed (Only 2Mbps fdcs were affected)
-  and added detection of the National Semiconductors PC8744 fdc chip
-  (used in the PC873xx "super-IO" chips).
-- Removed warning about incompatible types when compiling with Linux
-  1.2.x.
-- README.PCI updated with info about the DELL Dimension XPS P90.
-- Connor TST3200R added to detected drives.
-- `swapout' utility added to distribution.  It will dirty 5Meg of
-  memory, trying to swap out other programs.  Just say `make swapout'
-  to build it.  ftape will do this automatically Real Soon Now (ie:
-  when I have found out which kernel memory alloc function to call).
-
-
-===== Release notes for ftape-2.05, 08/01/96 =====
-
-- For v1.2.x Kernels, you must apply the patch linux-1.2/ksyms.patch to
-  the kernel and rebuild it (it adds the __get_dma_pages symbol to
-  ksyms.c).
-- Included new asm-i386/io.h file from v1.3.x kernel series, to enable
-  gcc v.2.7.[12] to compile v1.2.x kernels (linux-1.2/io.h).
-- Module versions: If you wish to compile ftape as a versioned module,
-  you must first compile your kernel with CONFIG_MODVERSIONS=y.
-  Otherwise, you will get complaints that <linux/modversions.h> does not
-  exist (if that happens, a `touch modversions.h' will help you out).
-- CLK_48MHZ: new define in the Makefile (default: non-zero).  If you have
-  a tape controller card that uses the i82078(-1) chip, but cannot get
-  it to work with ftape, try set it to 0 (and please report this).
-- QIC-3010/3020: Complete support is still missing, but will hopefully
-  come soon.  Steven Sorbom has kindly provided me with hints about
-  this.  Writing of QIC-3020 tapes definitely does NOT work (do not try
-  it! - the drive will not be in "perpendicular mode" and this will ruin
-  the formatting info on the tape).
-- ftape_num_buffers is out of fashion: use NR_BUFFERS instead (and
-  recompile if you want to change it :-).
-
-
-===== Release notes for ftape-2.04, 01/01/96 =====
-
-This version by Kai Harrekilde-Petersen <khp@dolphinics.no>
-
-- ALERT! Support for Kernels earlier then v1.1.85 is about to go away.
-  I intend to clean up some of the code (getting rid of an annoyingly
-  large numbers of #ifdef mostly), which means that support for
-  pre-1.1.85 kernels must go as well.
-- NR_FTAPE_BUFFERS is gone; You can instead select the number of dma
-  buffers by saying `insmod ftape.o ftape_num_buffer=<n>' instead.
-- Configure script gone.  ftape will now automagically determine your
-  kernel version by /usr/include/linux/version.h instead.
-- CONFIG_MODVERSIONS now work.  All combinations of versioned /
-  unversioned kernel and ftape module works (at least with my 1.3.52
-  kernel).
-- If you have problems with inserting ftape into an old (1.2.x)
-  kernel (e.g. insmod says "1.2.8 does not match 1.2.8), recompile
-  your modules utilities with your new compiler.
-- Reveal TB1400 drive added to vendors.h
-- Support for the i82078-1 (2Mbps) chip is coming along.  The
-  biggest problem is that I don't have such a card, which makes
-  testing / debugging somewhat problematic.  The second biggest
-  problem is that I do not have the QIC-3010/3020 standards either.
-  Status right now is that the chip is detected, and it should be
-  possible to put it into 2Mbps mode.  However, I do not know what
-  "extras" are needed to complete the support.  Although putting the
-  i82078 into 1Mbps mode ought to work out of the box, it doesn't
-  (right now, ftape complains about id am errors).
-
-
-===== Release notes for ftape-2.04beta5, 29/12/95 =====
-
-Bas offline linux-tape
-----------------------
-For reasons only known to the majordomo mail list processor, Bas was
-kicked off the linux-tape list sometime during the summer.  Being
-overworked at his for-pay job, he didn't notice it much.  Instead I
-(Kai, khp@dolphinics.no) has worked on ftape to produce the 2.04(beta)
-version.
-
-zftape
-------
-Note that there exists a much improved version of ftape, written by
-Claus-Justus Heine <claus@willi.math.rwth-aachen.de> which is named
-zftape, which conforms to the QIC-80 specs on how to mark backups, and
-is capable of doing automatic compression.  However, zftape makes
-substantial changes to ftape, and I (Kai) have therefore declined to
-integrate zftape into ftape.  Hopefully, this will happen soon.
-
-CONFIG_QIC117 removed from the kernel
--------------------------------------
-The biggest change of all is that ftape now will allocate its dma
-buffers when it is inserted.  The means that the CONFIG_QIC117 option
-has disappeared from the Linux kernel as of v1.3.34.  If you have an
-earlier kernel, simply answer 'no' to the question will do the trick
-(if you get complains about __get_free_pages() missing, contact the
-linux-tape mailing list).
-
-Note that ftape-2.04beta will work equally well on kernels with and
-without `ftape support'.  The only catch is, that you will waste
-around 96-128Kb of precious DMA'able memory on a box that has ftape
-support compiled in.
-
-Now for the real changes:
-
-- FC-20 can now use DMA channels 1, 2, and 3. Thanks to Daniel
-  Cohen, catman@wpi.edu.
-- ftape no longer requires a (gigantic) 96Kb buffer to be statically
-  allocated by the kernel.
-- Added new Iomega drive (8882) to vendors.h
-- -fno-strength-reduce added to Makefile, since GCC is broken.
-- i82078-1 (2Mbps) FDC support started.
-
-
-===== Release notes for ftape-2.03b, 27/05/95 =====
-
-- Prevented verify_area to return error if called with zero length.
-- Fixed a bug in flush_buffers that caused too much padding to be
-  written when a final segment had bad sectors.
-- Increased maximum fast-seek overshoot value from 5 to 10 segments.
-- Breaking loop after 5 retries when positioning fails.
-- Fixed wrong calculation of tape length for QIC-3010 and QIC-3020
-  tapes (densities were swapped).
-- Fixed wrong calculation of overshoot on seek_forward: Wrong sign
-  of error.
-- Suppress (false) error message due to new tape loaded.
-- Added two new CMS drives (11c3 and 11c5) to vendors.h.
-
-
-===== Release notes for ftape-2.03a, 09/05/95 =====
-
-- Fixed display of old error (even if already cleared) in ftape_open.
-- Improved tape length detection, ioctls would fail for 425 ft tapes.
-  Until the tape length is calculated with data from the header
-  segment, we'll use worst-case values.
-- Clear eof_mark after rewinding ioctls.
-- Fixed wrong version message (2.03 had 2.02g id).
-- Fixed bug that caused the fdc to be reset very frequently.
-  This shouldn't affect normal operation but the timing of the
-  report routines has changed again and that may cause problems.
-  We'll just have to find out....
-- Implemented correct write precompensation setting for QIC-3010/3020.
-- Cleaned up fdc_interrupt_wait routine. Hope it still works :-)
-- Finally removed (already disabled) special eof mark handling for
-  gnu tar.
-- Changed order of get_dma_residue and disable_dma in fdc-isr.c
-  because the current order would fail on at least one system.
-  We're back to the original order again, hope (and expect) this
-  doesn't break any other system.
-
-
-===== Release notes for ftape-2.03, 07/05/95 =====
-
-(Changes refer to the first ftape-2.02 release)
-
-Support for wide and extended length tapes
-------------------------------------------
-The Conner TSM 420 and 850 drives are reported to be working.
-I haven't received any reports about other brands; the TSM 420
-and 850 seem to be the most widely used wide drives.
-Extended length tapes (425 ft) with normal QIC-80 drives
-are operating too (At least I've had no reports stating otherwise).
-_Not_ yet completely supported (although they may work) are
-QIC-3020 drives and 2 Mbps floppy disk controllers won't work at
-the highest speed.
-If someone is kind enough to send me one of these, I'll include
-support for it too ;-)
-
-Easier configuration
---------------------
-Problems due to wrong settings in the Makefile are prevented
-by using a configuration script that sets the necessary (kernel
-version dependent) compile time options.
-This kernel version is now determined from the sources found
-at /usr/src/linux, or if not found, the old way using
-/proc/version.
-Versioned modules will be used automatically when supported
-by- and configured in- the kernel.
-Note that the current modules code (1.1.87) is still broken
-and _needs_ the fix included in the insmod directory.
-Please don't send me any more Oops reports caused by insmod :-(
-
-Reduced module size
--------------------
-The standard module size is much reduced and some compile time
-options can even reduce it further. (I don't recommend this
-for normal use but it can be handy for rescue diskettes)
-
-Option:           Approx. module size:
-
-<standard>             150 Kb
-NO_TRACE               125 Kb
-NO_TRACE_AT_ALL         67 Kb
-
-
-Much improved driver interruption
----------------------------------
-Most possible loops have been broken and signal detection
-has been improved.
-In most cases the driver can be aborted by ^C (SIGINT) and
-SIGKILL (kill -9) will generate be a sure kill.
-(Note that aborting a tape operation may damage the last
-data written to tape)
-
-Improved error recovery
------------------------
-Ftape now returns an error (ENODATA) to the application if
-a segment proves to be unrecoverable and then skips the
-bad segment.
-This causes most applications to continue to work (tar
-and afio) loosing only a small amount (up to 29 Kb) of data.
-Retried read operations will now be done slightly off-track
-to improve the chance of success. Serious head off-track
-errors will be detected.
-
-FC-10 and FC-20 controllers
----------------------------
-Ftape now supports both the old CMS FC-10 and the newer FC-20
-controllers.
-Because the operation of these cards is still undocumented,
-thus far they will only work with the default settings (See
-Makefile). Any feed-back on how to use them with other settings
-will be welcome !
-Compilation will fail if one changes the settings to illegal
-values.
-
-Kernels and compilers
----------------------
-Ftape is currently being developed using the 2.5.8 compiler.
-The older 2.4.5 probably works too (Set option in Makefile!).
-I have no experience with any later compilers nor Elf support.
-Any information on this is welcome.
-The latest kernel I have tested ftape with is 1.2.6.
-
-Compression
------------
-An impressive collection of changes for ftape including
-on-the-fly compression is still lying on my desk.
-If 2.03 proves to be reliable I might start integrating these
-but as usual, I'm short in time :-(
-
-Formatting
-----------
-There is still no way to format tapes under Linux. As far as
-I know all attempts to write such a program have died now.
-Since formatted tapes are rather common now, I think all we
-need is a utility that writes a worst case pattern and verifies
-that with the drive put in verify mode, reducing margins.
-Any takers ?
-
-Furthermore
------------
-Cleaned up messages.
-Prepared to support multiple tape drives on one fdc.
-Thanks to all the people who sent bug reports and helped me
-improve the driver. Without trying to be complete I'll mention
-Gary Anderson (without his accurate reports and unreliable
-hardware there wouldn't be a 2.03), Stefan Kneifel (FC-20),
-Robert Broughton (FC-20, you were almost there ;-), Bjorn
-Ekwall (for the versioned modules and buggy insmod ;-), Peter
-Fox, Christopher Oliver, Ralph Whittaker and not the least
-Linus Torvalds (for Linux and keeping me busy because of
-changes to the kernel ;-)
-Thanks to anyone I forgot, for the bug reports, the ftape
-bashing and the mental support...
-
-
-That's it for now. Have Fun,
-
-Bas.
-
-
-===== Release notes for ftape-2.02g, 06/05/95 =====
-
-- Added extra test to break read-id loop with signal.
-- Changed rewind code to handle negative overshoot for drives
-  that take very long to start or stop.
-- Let use of get/set i/o-regions depend on kernel version.
-- Changed code to use a more general test for conditional
-  compilations depending on kernel version.
-- Improved micro-step functionality to go off-track only
-  while reading (id & data).
-- Added failure on tape-not-referenced bit in ftape_command.
-- Added FOREVER option to read-wait routine.
-- Changed read-id to use shorter timeout causing smaller
-  rewinds on timeout.
-- Made kernel-interface functions static.
-
-
-===== Release notes for ftape-2.02f, 03/05/95 =====
-
-- Added support for dual tape drives on my system, extended Configure
-  script to detect host 'dodo'.
-- Log media defect in history if ecc failed and no data was returned.
-- Fixed Configure script that was failing for kernel versions with
-  double digit version or revision numbers.
-
-
-===== Release notes for ftape-2.02e, 01/05/95 =====
-
-- Fixed reposition loop at logical eot (failing read_id).
-- Fixed 34 segment offset when rewinding.
-- Added fast seek capability for more than 255 segments.
-- Fixed wrong busy result from ftape_command causing reverse
-  seek to fail.
-- Added breakout from infinite rewind loop (if something fails).
-
-
-===== Release notes for ftape-2.02d, 30/04/95 =====
-
-- Improved abortion on signals: Interrupt will make a graceful
-  exit, Kill will be less nice and should be used if everything
-  else fails.
-- Included check for tape-head off track.
-- Implemented exit from tape-start loop.
-- Added kernel io-port registration.
-- Implemented skip of failing segment (ENODATA) on ecc failure.
-  This allows afio and tar to continue when the tape is damaged.
-- Made distinction between drive names with different codes.
-
-
-===== Release notes for ftape-2.02c, 22/04/95 =====
-
-- Fixed too tight command queueing after tape stop/pause command
-  issued from within interrupt service routine (Showed as timeout
-  on Acknowledge errors during retries on some systems)
-- Tried to fix timeouts when using 425 ft tape because the extended
-  length doesn't seem to be detected by the hardware.
-  We now use the format code from the header segment so adjust the
-  timing after reading the header segment.
-- Fixed some messages stating 'unexpected something...' being not
-  unexpected anymore.
-- Started preparations for merge of dynamic buffer allocation and
-  compression code.
-- Changed some debug messages to include relevant segment information
-  at level 4.
-- Included early bail-out when drive offline, preventing a lot of
-  false messages.
-- Moved ftape_parameter_xxx() offsets into function instead of in calls.
-- Removed 'weird, drive busy but no data' error when caused by
-  an error during a read-id.
-- Improved 'timeout on acknowledge' diagnostics.
-- Moved MODULE option into Configure.
-- Reduced code size when no tracing at all was set (Claus Heine).
-- No longer log error code 0 (no error) as an error.
-
-
-===== Release notes for ftape-2.02b, 09/04/95 =====
-
-- Relaxed timing for status operation and displaying
-  abnormal results. Hopefully this shows what's going
-  wrong with the Conner TSM850R drives.
-- Created script for configuration, using version number
-  of kernel source if available, otherwise /proc/version.
-- Fixed conditionals in kernel-interface.c.
-- Removed unavoidable TRACE output.
-
-
-===== Release notes for ftape-2.02a, 01/04/95 =====
-
-- Implemented `new-style' (versioned) modules support for new
-  kernels.
-- Reduced size of module by moving static data to bss.
-- Now using version number of kernel source instead of running
-  kernel for kernel versions >= 1.1.82
-- Added feedback on drive speeds to vendor information.
-- Included fixed insmod sources to distribution (Let's hope
-  the modules distribution get fixed soon :-/).
-
-Note that I haven't yet implemented any of the code extension I
-received. I hope to find some time to do this soon.
-
-
-===== Release notes for ftape-2.02, 15/01/95 =====
-
-
-- Fixed failing repositioning when overshoot was incremented.
-- Fixed rate selection: Because of a deficiency in the QIC-117
-  specification one cannot distinguish between a not implemented
-  and a failing command. Therefor we now try to find out if the
-  drive does support this command before usage.
-- Fixed error retry using wrong offset in fdc-isr.
-- Improved retry code to retry only once on a single no-data
-  error in a segment.
-- Validate sector number extracted from eof mark because an
-  invalid file mark (due to ???) could cause kernel panic.
-- Split ftape-io.c into ftape-io.c and ftape-ctl.c files.
-- Corrected too high media error count after writing to
-  a bad tape.
-- Added #include <asm/segment.h> again because old kernel versions
-  need it.
-- Fixed fdc not being disabled when open failed because no tape
-  drive was found.
-- Fixed problem with soft error in sector 32 (shift operator with
-  shiftcount 32 is not defined).
-
-
-===== Release notes for ftape-2.01, 08/01/95 =====
-
-
-- Removed TESTING setting from distributed Makefile.
-- Fixed `mt asf' failure: Rewind was deferred to close which
-  overruled the fsf ioctl.
-- Prevented non-interruptible commands being interrupted.
-- Added missing timeout.pause setting.
-- Maximum tape speed read from drive type information table.
-  If the information is not in the table (0) the drive will
-  determine the speed itself and put a message in the logfile.
-  This information should then be added to the table in the
-  vendors.h file (and reported to me).
-- Added call to ftape_init_drive after soft reset for those
-  (antique) drives that don't do an implicit seek_load_point
-  after a reset or power up.
-- Don't try to set data rate if reset failed.
-- Prevent update of seek variables when starting from the
-  beginning or the end of the tape.
-- Fixed wrong adjustment of overshoot in seek_forward().
-- Added sync to Makefile (again).
-- Added code to diagnose timer problems (calibr.c).
-- Replaced time differences by timediff calls.
-- Removed reference to do_floppy from object for recent kernels.
-- Fixed wrong display of 'failing dma controller' message.
-- Removed various no longer used #include statements.
-- Added max. tape speed value to vendor-struct.
-- Changed ftape-command to check pre-conditions and wait
-  if needed.
-- Further updated qic117.h to rev G.
-- Combined command name table and restrictions table to one.
-  Extended this table with some new fields.
-- Increased timeout on Ack timer value and included code to
-  report out of spec behaviour.
-- Increased rewind timeout margin to calculated + 20%.
-- Improved data rate selection so it won't fail on some
-  older (pre standard) drives.
-- Changed initialisation code so drive will be rewound if the
-  driver is reloaded and the tape is not at bot.
-- Moved some of the flush operations from close to the ioctls.
-- Added exit code value to failing verify area message.
-- Loop until tape halted in smart-stop.
-- Fast seek handled specially if located at bot or eot.
-- Being more conservative on overshoot value.
-
-
-===== Release notes for ftape-2.00, 31/12/94 =====
-
-  The Install-guide is completely rewritten and now also includes
-some information on how to use the driver. If you're either new
-to ftape or new to Unix tape devices make sure to read it !
-
-  If you own a pci system and experience problems with the
-ftape driver make sure to read the README.PCI file. It contains
-some hints on how to fix your hardware.
-
-  For anybody who hasn't noticed: The version number of the
-driver has been incremented (The latest released version has
-been version 1.14d).
-  This has been done for two major reasons:
-
-  o  A new (better) error recovery scheme is implemented.
-  o  Support for new drive types has been added.
-
-  All these improvements/changes will probably include a couple
-of new (and old?) bugs. If you encounter any problems that you think
-I'm not yet aware of, feel free to send a report to <bas@vimec.nl>.
-  I recommend keeping a version of ftape-1.14d available, just
-in case ;-)
-
-  This version should work with all kernel versions from 1.0.9 up
-to 1.1.72 (and probably earlier and later versions too).
-
-
-Major new features:
-
-- Better handling of tapes with defects: When a sector repeatedly
-  (SOFT_RETRIES in ftape.h) cannot be written to or read from it is
-  marked as an hard error and gets skipped.
-  The error correction code can handle up to three of these hard
-  errors provided there are no other errors in that segment (32 Kb).
-  
-- Allows writing to tapes with defects (although the risk of loosing
-  data increases !)
-  Look for the media-defects entry printed with the statistics when
-  the tape is closed. A non-zero value here shows a bad tape.
-  [the actual count is wrong (too high), this is a known bug].
-
-- Use of backup header segment if first one is failing.
-
-- Support for extended length tapes with QIC-80: both 425 and 1100 ft.
-  0.25 inch tapes are now recognized and handled.
-
-- Support for new QIC-80 drives with 8 mm `wide' tapes (e.g. Conner
-  TSM 420).
-
-- Support for new QIC-3010 and QIC-3020 drives (experimental) with
-  both 0.25 inch and 8 mm tapes.
-
-Some minor features were added, a couple of small bugs were fixed and
-probably some new ones introduced ;-).
-
-[lseek() didn't make it into this version]
-
-Have fun,
-
-Bas.
-----
- LocalWords:  ftape MCONFIG mt VFS zftape resp sftape proc subdir MTIOCVOLINFO
- LocalWords:  MTIOCGETSIZE BOT EOD MTBSF zft kerneld modprobe kdtime contrib TR
- LocalWords:  MTSETBLK afio uninstall texi www EIO QIC init sft eof aka dma GB
- LocalWords:  SIGKILL MTIOCFTCMD mmap Iomega FDC fdc io gnumt mtio fc asm inb
- LocalWords:  outb ft qic frontend TeXinfo irq mach MODVERSIONS CONFIG html dvi
- LocalWords:  usr doc SMP Mb Dunno FIXME vtblc perl listtape volinfo fsf MTWEOF
- LocalWords:  amanda degaussed ComByte DoublePlay whraven njackn com MTIOC vtbl
- LocalWords:  GETBLKSZ MAKEDEV zftape's linux dif CVS Revison cp MTREW MTOFFL
- LocalWords:  MTFSF BSF Marcin Dalecki GCC Config cpio swapout Kai Harrekilde
- LocalWords:  Pederson khp dolphinics Justus claus momo rwth aachen Laarhoven
diff --git a/drivers/char/ftape/compressor/Makefile b/drivers/char/ftape/compressor/Makefile
deleted file mode 100644
index 1fbd6c4..0000000
--- a/drivers/char/ftape/compressor/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#       Copyright (C) 1997 Claus-Justus Heine.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-# 
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with this program; see the file COPYING.  If not, write to
-# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-#
-# $Source: /homes/cvs/ftape-stacked/ftape/compressor/Makefile,v $
-# $Revision: 1.1 $
-# $Date: 1997/10/05 19:12:28 $
-#
-#      Makefile for the optional compressor for th zftape VFS
-#      interface to the QIC-40/80/3010/3020 floppy-tape driver for
-#      Linux.
-#
-
-obj-$(CONFIG_ZFT_COMPRESSOR) += zft-compressor.o
-
-zft-compressor-objs := zftape-compress.o lzrw3.o
-
-CFLAGS_lzrw3.o	:= -O6 -funroll-all-loops
diff --git a/drivers/char/ftape/compressor/lzrw3.c b/drivers/char/ftape/compressor/lzrw3.c
deleted file mode 100644
index a032a0e..0000000
--- a/drivers/char/ftape/compressor/lzrw3.c
+++ /dev/null
@@ -1,743 +0,0 @@
-/*
- * $Source: /homes/cvs/ftape-stacked/ftape/compressor/lzrw3.c,v $
- * $Revision: 1.1 $
- * $Date: 1997/10/05 19:12:29 $
- *
- * Implementation of Ross Williams lzrw3 algorithm. Adaption for zftape.
- *
- */
-
-#include "../compressor/lzrw3.h"       /* Defines single exported function "compress".   */
-
-/******************************************************************************/
-/*                                                                            */
-/*                                    LZRW3.C                                 */
-/*                                                                            */
-/******************************************************************************/
-/*                                                                            */
-/* Author  : Ross Williams.                                                   */
-/* Date    : 30-Jun-1991.                                                     */
-/* Release : 1.                                                               */
-/*                                                                            */
-/******************************************************************************/
-/*                                                                            */
-/* This file contains an implementation of the LZRW3 data compression         */
-/* algorithm in C.                                                            */
-/*                                                                            */
-/* The algorithm is a general purpose compression algorithm that runs fast    */
-/* and gives reasonable compression. The algorithm is a member of the Lempel  */
-/* Ziv family of algorithms and bases its compression on the presence in the  */
-/* data of repeated substrings.                                               */
-/*                                                                            */
-/* This algorithm is unpatented and the code is public domain. As the         */
-/* algorithm is based on the LZ77 class of algorithms, it is unlikely to be   */
-/* the subject of a patent challenge.                                         */
-/*                                                                            */
-/* Unlike the LZRW1 and LZRW1-A algorithms, the LZRW3 algorithm is            */
-/* deterministic and is guaranteed to yield the same compressed               */
-/* representation for a given file each time it is run.                       */
-/*                                                                            */
-/* The LZRW3 algorithm was originally designed and implemented                */
-/* by Ross Williams on 31-Dec-1990.                                           */
-/*                                                                            */
-/* Here are the results of applying this code, compiled under THINK C 4.0     */
-/* and running on a Mac-SE (8MHz 68000), to the standard calgary corpus.      */
-/*                                                                            */
-/*    +----------------------------------------------------------------+      */
-/*    | DATA COMPRESSION TEST                                          |      */
-/*    | =====================                                          |      */
-/*    | Time of run     : Sun 30-Jun-1991 09:31PM                      |      */
-/*    | Timing accuracy : One part in 100                              |      */
-/*    | Context length  : 262144 bytes (= 256.0000K)                   |      */
-/*    | Test suite      : Calgary Corpus Suite                         |      */
-/*    | Files in suite  : 14                                           |      */
-/*    | Algorithm       : LZRW3                                        |      */
-/*    | Note: All averages are calculated from the un-rounded values.  |      */
-/*    +----------------------------------------------------------------+      */
-/*    | File Name   Length  CxB  ComLen  %Remn  Bits  Com K/s  Dec K/s |      */
-/*    | ----------  ------  ---  ------  -----  ----  -------  ------- |      */
-/*    | rpus:Bib.D  111261    1   55033   49.5  3.96    19.46    32.27 |      */
-/*    | us:Book1.D  768771    3  467962   60.9  4.87    17.03    31.07 |      */
-/*    | us:Book2.D  610856    3  317102   51.9  4.15    19.39    34.15 |      */
-/*    | rpus:Geo.D  102400    1   82424   80.5  6.44    11.65    18.18 |      */
-/*    | pus:News.D  377109    2  205670   54.5  4.36    17.14    27.47 |      */
-/*    | pus:Obj1.D   21504    1   13027   60.6  4.85    13.40    18.95 |      */
-/*    | pus:Obj2.D  246814    1  116286   47.1  3.77    19.31    30.10 |      */
-/*    | s:Paper1.D   53161    1   27522   51.8  4.14    18.60    31.15 |      */
-/*    | s:Paper2.D   82199    1   45160   54.9  4.40    18.45    32.84 |      */
-/*    | rpus:Pic.D  513216    2  122388   23.8  1.91    35.29    51.05 |      */
-/*    | us:Progc.D   39611    1   19669   49.7  3.97    18.87    30.64 |      */
-/*    | us:Progl.D   71646    1   28247   39.4  3.15    24.34    40.66 |      */
-/*    | us:Progp.D   49379    1   19377   39.2  3.14    23.91    39.23 |      */
-/*    | us:Trans.D   93695    1   33481   35.7  2.86    25.48    40.37 |      */
-/*    +----------------------------------------------------------------+      */
-/*    | Average     224401    1  110953   50.0  4.00    20.17    32.72 |      */
-/*    +----------------------------------------------------------------+      */
-/*                                                                            */
-/******************************************************************************/
-
-/******************************************************************************/
-
-/* The following structure is returned by the "compress" function below when  */
-/* the user asks the function to return identifying information.              */
-/* The most important field in the record is the working memory field which   */
-/* tells the calling program how much working memory should be passed to      */
-/* "compress" when it is called to perform a compression or decompression.    */
-/* LZRW3 uses the same amount of memory during compression and decompression. */
-/* For more information on this structure see "compress.h".                   */
-  
-#define U(X)            ((ULONG) X)
-#define SIZE_P_BYTE     (U(sizeof(UBYTE *)))
-#define SIZE_WORD       (U(sizeof(UWORD  )))
-#define ALIGNMENT_FUDGE (U(16))
-#define MEM_REQ ( U(4096)*(SIZE_P_BYTE) + ALIGNMENT_FUDGE )
-
-static struct compress_identity identity =
-{
- U(0x032DDEA8),                           /* Algorithm identification number. */
- MEM_REQ,                                 /* Working memory (bytes) required. */
- "LZRW3",                                 /* Name of algorithm.               */
- "1.0",                                   /* Version number of algorithm.     */
- "31-Dec-1990",                           /* Date of algorithm.               */
- "Public Domain",                         /* Copyright notice.                */
- "Ross N. Williams",                      /* Author of algorithm.             */
- "Renaissance Software",                  /* Affiliation of author.           */
- "Public Domain"                          /* Vendor of algorithm.             */
-};
- 
-LOCAL void compress_compress  (UBYTE *,UBYTE *,ULONG,UBYTE *, LONG *);
-LOCAL void compress_decompress(UBYTE *,UBYTE *,LONG, UBYTE *, ULONG *);
-
-/******************************************************************************/
-
-/* This function is the only function exported by this module.                */
-/* Depending on its first parameter, the function can be requested to         */
-/* compress a block of memory, decompress a block of memory, or to identify   */
-/* itself. For more information, see the specification file "compress.h".     */
-
-EXPORT void lzrw3_compress(
-	UWORD     action,      /* Action to be performed.		*/
-	UBYTE   *wrk_mem,	/* Address of working memory we can use.*/
-	UBYTE   *src_adr,	/* Address of input data.		*/
-	LONG     src_len,	/* Length  of input data.		*/
-	UBYTE   *dst_adr,	/* Address to put output data.		*/
-	void  *p_dst_len	/* Address of longword for length of output data.*/
-)
-{
- switch (action)
-   {
-    case COMPRESS_ACTION_IDENTITY:
-       *((struct compress_identity **)p_dst_len)= &identity;
-       break;
-    case COMPRESS_ACTION_COMPRESS:
-       compress_compress(wrk_mem,src_adr,src_len,dst_adr,(LONG *)p_dst_len);
-       break;
-    case COMPRESS_ACTION_DECOMPRESS:
-       compress_decompress(wrk_mem,src_adr,src_len,dst_adr,(LONG *)p_dst_len);
-       break;
-   }
-}
-
-/******************************************************************************/
-/*                                                                            */
-/* BRIEF DESCRIPTION OF THE LZRW3 ALGORITHM                                   */
-/* ========================================                                   */
-/* The LZRW3 algorithm is identical to the LZRW1-A algorithm except that      */
-/* instead of transmitting history offsets, it transmits hash table indexes.  */
-/* In order to decode the indexes, the decompressor must maintain an          */
-/* identical hash table. Copy items are straightforward:when the decompressor */
-/* receives a copy item, it simply looks up the hash table to translate the   */
-/* index into a pointer into the data already decompressed. To update the     */
-/* hash table, it replaces the same table entry with a pointer to the start   */
-/* of the newly decoded phrase. The tricky part is with literal items, for at */
-/* the time that the decompressor receives a literal item the decompressor    */
-/* does not have the three bytes in the Ziv (that the compressor has) to      */
-/* perform the three-byte hash. To solve this problem, in LZRW3, both the     */
-/* compressor and decompressor are wired up so that they "buffer" these       */
-/* literals and update their hash tables only when three bytes are available. */
-/* This makes the maximum buffering 2 bytes.                                  */
-/*                                                                            */
-/* Replacement of offsets by hash table indexes yields a few percent extra    */
-/* compression at the cost of some speed. LZRW3 is slower than LZRW1, LZRW1-A */
-/* and LZRW2, but yields better compression.                                  */
-/*                                                                            */
-/* Extra compression could be obtained by using a hash table of depth two.    */
-/* However, increasing the depth above one incurs a significant decrease in   */
-/* compression speed which was not considered worthwhile. Another reason for  */
-/* keeping the depth down to one was to allow easy comparison with the        */
-/* LZRW1-A and LZRW2 algorithms so as to demonstrate the exact effect of the  */
-/* use of direct hash indexes.                                                */
-/*                                                                            */
-/*                                  +---+                                     */
-/*                                  |___|4095                                 */
-/*                                  |___|                                     */
-/*              +---------------------*_|<---+   /----+---\                   */
-/*              |                   |___|    +---|Hash    |                   */
-/*              |                   |___|        |Function|                   */
-/*              |                   |___|        \--------/                   */
-/*              |                   |___|0            ^                       */
-/*              |                   +---+             |                       */
-/*              |                   Hash        +-----+                       */
-/*              |                   Table       |                             */
-/*              |                              ---                            */
-/*              v                              ^^^                            */
-/*      +-------------------------------------|----------------+              */
-/*      ||||||||||||||||||||||||||||||||||||||||||||||||||||||||              */
-/*      +-------------------------------------|----------------+              */
-/*      |                                     |1......18|      |              */
-/*      |<------- Lempel=History ------------>|<--Ziv-->|      |              */
-/*      |     (=bytes already processed)      |<-Still to go-->|              */
-/*      |<-------------------- INPUT BLOCK ------------------->|              */
-/*                                                                            */
-/* The diagram above for LZRW3 looks almost identical to the diagram for      */
-/* LZRW1. The difference is that in LZRW3, the compressor transmits hash      */
-/* table indices instead of Lempel offsets. For this to work, the             */
-/* decompressor must maintain a hash table as well as the compressor and both */
-/* compressor and decompressor must "buffer" literals, as the decompressor    */
-/* cannot hash phrases commencing with a literal until another two bytes have */
-/* arrived.                                                                   */
-/*                                                                            */
-/*  LZRW3 Algorithm Execution Summary                                         */
-/*  ---------------------------------                                         */
-/*  1. Hash the first three bytes of the Ziv to yield a hash table index h.   */
-/*  2. Look up the hash table yielding history pointer p.                     */
-/*  3. Match where p points with the Ziv. If there is a match of three or     */
-/*     more bytes, code those bytes (in the Ziv) as a copy item, otherwise    */
-/*     code the next byte in the Ziv as a literal item.                       */
-/*  4. Update the hash table as possible subject to the constraint that only  */
-/*     phrases commencing three bytes back from the Ziv can be hashed and     */
-/*     entered into the hash table. (This enables the decompressor to keep    */
-/*     pace). See the description and code for more details.                  */
-/*                                                                            */
-/******************************************************************************/
-/*                                                                            */
-/*                     DEFINITION OF COMPRESSED FILE FORMAT                   */
-/*                     ====================================                   */
-/*  * A compressed file consists of a COPY FLAG followed by a REMAINDER.      */
-/*  * The copy flag CF uses up four bytes with the first byte being the       */
-/*    least significant.                                                      */
-/*  * If CF=1, then the compressed file represents the remainder of the file  */
-/*    exactly. Otherwise CF=0 and the remainder of the file consists of zero  */
-/*    or more GROUPS, each of which represents one or more bytes.             */
-/*  * Each group consists of two bytes of CONTROL information followed by     */
-/*    sixteen ITEMs except for the last group which can contain from one      */
-/*    to sixteen items.                                                       */
-/*  * An item can be either a LITERAL item or a COPY item.                    */
-/*  * Each item corresponds to a bit in the control bytes.                    */
-/*  * The first control byte corresponds to the first 8 items in the group    */
-/*    with bit 0 corresponding to the first item in the group and bit 7 to    */
-/*    the eighth item in the group.                                           */
-/*  * The second control byte corresponds to the second 8 items in the group  */
-/*    with bit 0 corresponding to the ninth item in the group and bit 7 to    */
-/*    the sixteenth item in the group.                                        */
-/*  * A zero bit in a control word means that the corresponding item is a     */
-/*    literal item. A one bit corresponds to a copy item.                     */
-/*  * A literal item consists of a single byte which represents itself.       */
-/*  * A copy item consists of two bytes that represent from 3 to 18 bytes.    */
-/*  * The first  byte in a copy item will be denoted C1.                      */
-/*  * The second byte in a copy item will be denoted C2.                      */
-/*  * Bits will be selected using square brackets.                            */
-/*    For example: C1[0..3] is the low nibble of the first control byte.      */
-/*    of copy item C1.                                                        */
-/*  * The LENGTH of a copy item is defined to be C1[0..3]+3 which is a number */
-/*    in the range [3,18].                                                    */
-/*  * The INDEX of a copy item is defined to be C1[4..7]*256+C2[0..8] which   */
-/*    is a number in the range [0,4095].                                      */
-/*  * A copy item represents the sequence of bytes                            */
-/*       text[POS-OFFSET..POS-OFFSET+LENGTH-1] where                          */
-/*          text   is the entire text of the uncompressed string.             */
-/*          POS    is the index in the text of the character following the    */
-/*                   string represented by all the items preceeding the item  */
-/*                   being defined.                                           */
-/*          OFFSET is obtained from INDEX by looking up the hash table.       */
-/*                                                                            */
-/******************************************************************************/
-
-/* The following #define defines the length of the copy flag that appears at  */
-/* the start of the compressed file. The value of four bytes was chosen       */
-/* because the fast_copy routine on my Macintosh runs faster if the source    */
-/* and destination blocks are relatively longword aligned.                    */
-/* The actual flag data appears in the first byte. The rest are zeroed so as  */
-/* to normalize the compressed representation (i.e. not non-deterministic).   */
-#define FLAG_BYTES 4
-
-/* The following #defines define the meaning of the values of the copy        */
-/* flag at the start of the compressed file.                                  */
-#define FLAG_COMPRESS 0     /* Signals that output was result of compression. */
-#define FLAG_COPY     1     /* Signals that output was simply copied over.    */
-
-/* The 68000 microprocessor (on which this algorithm was originally developed */
-/* is fussy about non-aligned arrays of words. To avoid these problems the    */
-/* following macro can be used to "waste" from 0 to 3 bytes so as to align    */
-/* the argument pointer.                                                      */
-#define ULONG_ALIGN_UP(X) ((((ULONG)X)+sizeof(ULONG)-1)&~(sizeof(ULONG)-1))
-
-
-/* The following constant defines the maximum length of an uncompressed item. */
-/* This definition must not be changed; its value is hardwired into the code. */
-/* The longest number of bytes that can be spanned by a single item is 18     */
-/* for the longest copy item.                                                 */
-#define MAX_RAW_ITEM (18)
-
-/* The following constant defines the maximum length of an uncompressed group.*/
-/* This definition must not be changed; its value is hardwired into the code. */
-/* A group contains at most 16 items which explains this definition.          */
-#define MAX_RAW_GROUP (16*MAX_RAW_ITEM)
-
-/* The following constant defines the maximum length of a compressed group.   */
-/* This definition must not be changed; its value is hardwired into the code. */
-/* A compressed group consists of two control bytes followed by up to 16      */
-/* compressed items each of which can have a maximum length of two bytes.     */
-#define MAX_CMP_GROUP (2+16*2)
-
-/* The following constant defines the number of entries in the hash table.    */
-/* This definition must not be changed; its value is hardwired into the code. */
-#define HASH_TABLE_LENGTH (4096)
-
-/* LZRW3, unlike LZRW1(-A), must initialize its hash table so as to enable    */
-/* the compressor and decompressor to stay in step maintaining identical hash */
-/* tables. In an early version of the algorithm, the tables were simply       */
-/* initialized to zero and a check for zero was included just before the      */
-/* matching code. However, this test costs time. A better solution is to      */
-/* initialize all the entries in the hash table to point to a constant        */
-/* string. The decompressor does the same. This solution requires no extra    */
-/* test. The contents of the string do not matter so long as the string is    */
-/* the same for the compressor and decompressor and contains at least         */
-/* MAX_RAW_ITEM bytes. I chose consecutive decimal digits because they do not */
-/* have white space problems (e.g. there is no chance that the compiler will  */
-/* replace more than one space by a TAB) and because they make the length of  */
-/* the string obvious by inspection.                                          */
-#define START_STRING_18 ((UBYTE *) "123456789012345678")
-
-/* In this algorithm, hash values have to be calculated at more than one      */
-/* point. The following macro neatens the code up for this.                   */
-#define HASH(PTR) \
-   (((40543*(((*(PTR))<<8)^((*((PTR)+1))<<4)^(*((PTR)+2))))>>4) & 0xFFF)
-
-/******************************************************************************/
-
-/* Input  : Hand over the required amount of working memory in p_wrk_mem.     */
-/* Input  : Specify input block using p_src_first and src_len.                */
-/* Input  : Point p_dst_first to the start of the output zone (OZ).           */
-/* Input  : Point p_dst_len to a ULONG to receive the output length.          */
-/* Input  : Input block and output zone must not overlap.                     */
-/* Output : Length of output block written to *p_dst_len.                     */
-/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1]. May   */
-/* Output : write in OZ=Mem[p_dst_first..p_dst_first+src_len+MAX_CMP_GROUP-1].*/
-/* Output : Upon completion guaranteed *p_dst_len<=src_len+FLAG_BYTES.        */
-LOCAL void compress_compress(UBYTE *p_wrk_mem,
-			     UBYTE *p_src_first, ULONG  src_len,
-			     UBYTE *p_dst_first, LONG  *p_dst_len)
-{
- /* p_src and p_dst step through the source and destination blocks.           */
- register UBYTE *p_src = p_src_first;
- register UBYTE *p_dst = p_dst_first;
- 
- /* The following variables are never modified and are used in the            */
- /* calculations that determine when the main loop terminates.                */
- UBYTE *p_src_post  = p_src_first+src_len;
- UBYTE *p_dst_post  = p_dst_first+src_len;
- UBYTE *p_src_max1  = p_src_first+src_len-MAX_RAW_ITEM;
- UBYTE *p_src_max16 = p_src_first+src_len-MAX_RAW_ITEM*16;
- 
- /* The variables 'p_control' and 'control' are used to buffer control bits.  */
- /* Before each group is processed, the next two bytes of the output block    */
- /* are set aside for the control word for the group about to be processed.   */
- /* 'p_control' is set to point to the first byte of that word. Meanwhile,    */
- /* 'control' buffers the control bits being generated during the processing  */
- /* of the group. Instead of having a counter to keep track of how many items */
- /* have been processed (=the number of bits in the control word), at the     */
- /* start of each group, the top word of 'control' is filled with 1 bits.     */
- /* As 'control' is shifted for each item, the 1 bits in the top word are     */
- /* absorbed or destroyed. When they all run out (i.e. when the top word is   */
- /* all zero bits, we know that we are at the end of a group.                 */
-# define TOPWORD 0xFFFF0000
- UBYTE *p_control;
- register ULONG control=TOPWORD;
- 
- /* THe variable 'hash' always points to the first element of the hash table. */
- UBYTE **hash= (UBYTE **)  ULONG_ALIGN_UP(p_wrk_mem);
- 
- /* The following two variables represent the literal buffer. p_h1 points to  */
- /* the hash table entry corresponding to the youngest literal. p_h2 points   */
- /* to the hash table entry corresponding to the second youngest literal.     */
- /* Note: p_h1=0=>p_h2=0 because zero values denote absence of a pending      */
- /* literal. The variables are initialized to zero meaning an empty "buffer". */
- UBYTE **p_h1=NULL;
- UBYTE **p_h2=NULL;
-  
- /* To start, we write the flag bytes. Being optimistic, we set the flag to   */
- /* FLAG_COMPRESS. The remaining flag bytes are zeroed so as to keep the      */
- /* algorithm deterministic.                                                  */
- *p_dst++=FLAG_COMPRESS;
- {UWORD i; for (i=2;i<=FLAG_BYTES;i++) *p_dst++=0;}
-
- /* Reserve the first word of output as the control word for the first group. */
- /* Note: This is undone at the end if the input block is empty.              */
- p_control=p_dst; p_dst+=2;
- 
- /* Initialize all elements of the hash table to point to a constant string.  */
- /* Use of an unrolled loop speeds this up considerably.                      */
- {UWORD i; UBYTE **p_h=hash;
-#  define ZH *p_h++=START_STRING_18
-  for (i=0;i<256;i++)     /* 256=HASH_TABLE_LENGTH/16. */
-    {ZH;ZH;ZH;ZH;
-     ZH;ZH;ZH;ZH;
-     ZH;ZH;ZH;ZH;
-     ZH;ZH;ZH;ZH;}
- }
-
- /* The main loop processes either 1 or 16 items per iteration. As its        */
- /* termination logic is complicated, I have opted for an infinite loop       */
- /* structure containing 'break' and 'goto' statements.                       */
- while (TRUE)
-   {/* Begin main processing loop. */
-   
-    /* Note: All the variables here except unroll should be defined within    */
-    /*       the inner loop. Unfortunately the loop hasn't got a block.       */
-     register UBYTE *p;         /* Scans through targ phrase during matching. */
-     register UBYTE *p_ziv= NULL ;     /* Points to first byte of current Ziv.       */
-     register UWORD unroll;     /* Loop counter for unrolled inner loop.      */
-     register UWORD index;      /* Index of current hash table entry.         */
-     register UBYTE **p_h0 = NULL ;     /* Pointer to current hash table entry.       */
-     
-    /* Test for overrun and jump to overrun code if necessary.                */
-    if (p_dst>p_dst_post)
-       goto overrun;
-       
-    /* The following cascade of if statements efficiently catches and deals   */
-    /* with varying degrees of closeness to the end of the input block.       */
-    /* When we get very close to the end, we stop updating the table and      */
-    /* code the remaining bytes as literals. This makes the code simpler.     */
-    unroll=16;
-    if (p_src>p_src_max16)
-      {
-       unroll=1;
-       if (p_src>p_src_max1)
-         {
-          if (p_src==p_src_post)
-             break;
-          else
-             goto literal;
-         }
-      }
-         
-    /* This inner unrolled loop processes 'unroll' (whose value is either 1   */
-    /* or 16) items. I have chosen to implement this loop with labels and     */
-    /* gotos to heighten the ease with which the loop may be implemented with */
-    /* a single decrement and branch instruction in assembly language and     */
-    /* also because the labels act as highly readable place markers.          */
-    /* (Also because we jump into the loop for endgame literals (see above)). */
-    
-    begin_unrolled_loop:
-    
-       /* To process the next phrase, we hash the next three bytes and use    */
-       /* the resultant hash table index to look up the hash table. A pointer */
-       /* to the entry is stored in p_h0 so as to avoid an array lookup. The  */
-       /* hash table entry *p_h0 is looked up yielding a pointer p to a       */
-       /* potential match of the Ziv in the history.                          */
-       index=HASH(p_src);
-       p_h0=&hash[index];
-       p=*p_h0;
-       
-       /* Having looked up the candidate position, we are in a position to    */
-       /* attempt a match. The match loop has been unrolled using the PS      */
-       /* macro so that failure within the first three bytes automatically    */
-       /* results in the literal branch being taken. The coding is simple.    */
-       /* p_ziv saves p_src so we can let p_src wander.                       */
-#       define PS *p++!=*p_src++
-       p_ziv=p_src;
-       if (PS || PS || PS)
-         {
-          /* Literal. */
-          
-          /* Code the literal byte as itself and a zero control bit.          */
-          p_src=p_ziv; literal: *p_dst++=*p_src++; control&=0xFFFEFFFF;
-          
-          /* We have just coded a literal. If we had two pending ones, that   */
-          /* makes three and we can update the hash table.                    */
-          if (p_h2!=0)
-             {*p_h2=p_ziv-2;}
-             
-          /* In any case, rotate the hash table pointers for next time. */
-          p_h2=p_h1; p_h1=p_h0;
-          
-         }
-       else
-         {
-          /* Copy */
-          
-          /* Match up to 15 remaining bytes using an unrolled loop and code. */
-#if 0
-          PS || PS || PS || PS || PS || PS || PS || PS ||
-          PS || PS || PS || PS || PS || PS || PS || p_src++;
-#else     
-          if (
-               !( PS || PS || PS || PS || PS || PS || PS || PS ||
-                  PS || PS || PS || PS || PS || PS || PS ) 
-             ) p_src++;
-#endif
-          *p_dst++=((index&0xF00)>>4)|(--p_src-p_ziv-3);
-          *p_dst++=index&0xFF;
-          
-          /* As we have just coded three bytes, we are now in a position to   */
-          /* update the hash table with the literal bytes that were pending   */
-          /* upon the arrival of extra context bytes.                         */
-          if (p_h1!=0)
-            {
-             if (p_h2)
-               {*p_h2=p_ziv-2; p_h2=NULL;}
-             *p_h1=p_ziv-1; p_h1=NULL;
-            }
-            
-          /* In any case, we can update the hash table based on the current   */
-          /* position as we just coded at least three bytes in a copy items.  */
-          *p_h0=p_ziv;
-          
-         }
-       control>>=1;
-                
-       /* This loop is all set up for a decrement and jump instruction! */
-#ifndef linux
-`    end_unrolled_loop: if (--unroll) goto begin_unrolled_loop;
-#else
-    /* end_unrolled_loop: */ if (--unroll) goto begin_unrolled_loop;
-#endif
-
-    /* At this point it will nearly always be the end of a group in which     */
-    /* case, we have to do some control-word processing. However, near the    */
-    /* end of the input block, the inner unrolled loop is only executed once. */
-    /* This necessitates the 'if' test.                                       */
-    if ((control&TOPWORD)==0)
-      {
-       /* Write the control word to the place we saved for it in the output. */
-       *p_control++=  control     &0xFF;
-       *p_control  = (control>>8) &0xFF;
-
-       /* Reserve the next word in the output block for the control word */
-       /* for the group about to be processed.                           */
-       p_control=p_dst; p_dst+=2;
-       
-       /* Reset the control bits buffer. */
-       control=TOPWORD;
-      }
-          
-   } /* End main processing loop. */
-   
- /* After the main processing loop has executed, all the input bytes have     */
- /* been processed. However, the control word has still to be written to the  */
- /* word reserved for it in the output at the start of the most recent group. */
- /* Before writing, the control word has to be shifted so that all the bits   */
- /* are in the right place. The "empty" bit positions are filled with 1s      */
- /* which partially fill the top word.                                        */
- while(control&TOPWORD) control>>=1;
- *p_control++= control     &0xFF;
- *p_control++=(control>>8) &0xFF;
- 
- /* If the last group contained no items, delete the control word too.        */
- if (p_control==p_dst) p_dst-=2;
- 
- /* Write the length of the output block to the dst_len parameter and return. */
- *p_dst_len=p_dst-p_dst_first;                           
- return;
- 
- /* Jump here as soon as an overrun is detected. An overrun is defined to     */
- /* have occurred if p_dst>p_dst_first+src_len. That is, the moment the       */
- /* length of the output written so far exceeds the length of the input block.*/
- /* The algorithm checks for overruns at least at the end of each group       */
- /* which means that the maximum overrun is MAX_CMP_GROUP bytes.              */
- /* Once an overrun occurs, the only thing to do is to set the copy flag and  */
- /* copy the input over.                                                      */
- overrun:
-#if 0
- *p_dst_first=FLAG_COPY;
- fast_copy(p_src_first,p_dst_first+FLAG_BYTES,src_len);
- *p_dst_len=src_len+FLAG_BYTES;
-#else
- fast_copy(p_src_first,p_dst_first,src_len);
- *p_dst_len= -src_len; /* return a negative number to indicate uncompressed data */
-#endif
-}
-
-/******************************************************************************/
-
-/* Input  : Hand over the required amount of working memory in p_wrk_mem.     */
-/* Input  : Specify input block using p_src_first and src_len.                */
-/* Input  : Point p_dst_first to the start of the output zone.                */
-/* Input  : Point p_dst_len to a ULONG to receive the output length.          */
-/* Input  : Input block and output zone must not overlap. User knows          */
-/* Input  : upperbound on output block length from earlier compression.       */
-/* Input  : In any case, maximum expansion possible is nine times.            */
-/* Output : Length of output block written to *p_dst_len.                     */
-/* Output : Output block in Mem[p_dst_first..p_dst_first+*p_dst_len-1].       */
-/* Output : Writes only  in Mem[p_dst_first..p_dst_first+*p_dst_len-1].       */
-LOCAL void compress_decompress( UBYTE *p_wrk_mem,
-				UBYTE *p_src_first, LONG   src_len,
-				UBYTE *p_dst_first, ULONG *p_dst_len)
-{
- /* Byte pointers p_src and p_dst scan through the input and output blocks.   */
- register UBYTE *p_src = p_src_first+FLAG_BYTES;
- register UBYTE *p_dst = p_dst_first;
- /* we need to avoid a SEGV when trying to uncompress corrupt data */
- register UBYTE *p_dst_post = p_dst_first + *p_dst_len;
-
- /* The following two variables are never modified and are used to control    */
- /* the main loop.                                                            */
- UBYTE *p_src_post  = p_src_first+src_len;
- UBYTE *p_src_max16 = p_src_first+src_len-(MAX_CMP_GROUP-2);
- 
- /* The hash table is the only resident of the working memory. The hash table */
- /* contains HASH_TABLE_LENGTH=4096 pointers to positions in the history. To  */
- /* keep Macintoshes happy, it is longword aligned.                           */
- UBYTE **hash = (UBYTE **) ULONG_ALIGN_UP(p_wrk_mem);
-
- /* The variable 'control' is used to buffer the control bits which appear in */
- /* groups of 16 bits (control words) at the start of each compressed group.  */
- /* When each group is read, bit 16 of the register is set to one. Whenever   */
- /* a new bit is needed, the register is shifted right. When the value of the */
- /* register becomes 1, we know that we have reached the end of a group.      */
- /* Initializing the register to 1 thus instructs the code to follow that it  */
- /* should read a new control word immediately.                               */
- register ULONG control=1;
- 
- /* The value of 'literals' is always in the range 0..3. It is the number of  */
- /* consecutive literal items just seen. We have to record this number so as  */
- /* to know when to update the hash table. When literals gets to 3, there     */
- /* have been three consecutive literals and we can update at the position of */
- /* the oldest of the three.                                                  */
- register UWORD literals=0;
- 
- /* Check the leading copy flag to see if the compressor chose to use a copy  */
- /* operation instead of a compression operation. If a copy operation was     */
- /* used, then all we need to do is copy the data over, set the output length */
- /* and return.                                                               */
-#if 0
- if (*p_src_first==FLAG_COPY)
-   {
-    fast_copy(p_src_first+FLAG_BYTES,p_dst_first,src_len-FLAG_BYTES);
-    *p_dst_len=src_len-FLAG_BYTES;
-    return;
-   }
-#else
-  if ( src_len < 0 )
-  {                                            
-   fast_copy(p_src_first,p_dst_first,-src_len );
-   *p_dst_len = (ULONG)-src_len;
-   return;
-  }
-#endif
-   
- /* Initialize all elements of the hash table to point to a constant string.  */
- /* Use of an unrolled loop speeds this up considerably.                      */
- {UWORD i; UBYTE **p_h=hash;
-#  define ZJ *p_h++=START_STRING_18
-  for (i=0;i<256;i++)     /* 256=HASH_TABLE_LENGTH/16. */
-    {ZJ;ZJ;ZJ;ZJ;
-     ZJ;ZJ;ZJ;ZJ;
-     ZJ;ZJ;ZJ;ZJ;
-     ZJ;ZJ;ZJ;ZJ;}
- }
-
- /* The outer loop processes either 1 or 16 items per iteration depending on  */
- /* how close p_src is to the end of the input block.                         */
- while (p_src!=p_src_post)
-   {/* Start of outer loop */
-   
-    register UWORD unroll;   /* Counts unrolled loop executions.              */
-    
-    /* When 'control' has the value 1, it means that the 16 buffered control  */
-    /* bits that were read in at the start of the current group have all been */
-    /* shifted out and that all that is left is the 1 bit that was injected   */
-    /* into bit 16 at the start of the current group. When we reach the end   */
-    /* of a group, we have to load a new control word and inject a new 1 bit. */
-    if (control==1)
-      {
-       control=0x10000|*p_src++;
-       control|=(*p_src++)<<8;
-      }
-
-    /* If it is possible that we are within 16 groups from the end of the     */
-    /* input, execute the unrolled loop only once, else process a whole group */
-    /* of 16 items by looping 16 times.                                       */
-    unroll= p_src<=p_src_max16 ? 16 : 1;
-
-    /* This inner loop processes one phrase (item) per iteration. */
-    while (unroll--)
-      { /* Begin unrolled inner loop. */
-      
-       /* Process a literal or copy item depending on the next control bit. */
-       if (control&1)
-         {
-          /* Copy item. */
-          
-          register UBYTE *p;           /* Points to place from which to copy. */
-          register UWORD lenmt;        /* Length of copy item minus three.    */
-          register UBYTE **p_hte;      /* Pointer to current hash table entry.*/
-          register UBYTE *p_ziv=p_dst; /* Pointer to start of current Ziv.    */
-          
-          /* Read and dismantle the copy word. Work out from where to copy.   */
-          lenmt=*p_src++;
-          p_hte=&hash[((lenmt&0xF0)<<4)|*p_src++];
-          p=*p_hte;
-          lenmt&=0xF;
-          
-          /* Now perform the copy using a half unrolled loop. */
-          *p_dst++=*p++;
-          *p_dst++=*p++;
-          *p_dst++=*p++;
-          while (lenmt--)
-             *p_dst++=*p++;
-                 
-          /* Because we have just received 3 or more bytes in a copy item     */
-          /* (whose bytes we have just installed in the output), we are now   */
-          /* in a position to flush all the pending literal hashings that had */
-          /* been postponed for lack of bytes.                                */
-          if (literals>0)
-            {
-             register UBYTE *r=p_ziv-literals;
-             hash[HASH(r)]=r;
-             if (literals==2)
-                {r++; hash[HASH(r)]=r;}
-             literals=0;
-            }
-            
-          /* In any case, we can immediately update the hash table with the   */
-          /* current position. We don't need to do a HASH(...) to work out    */
-          /* where to put the pointer, as the compressor just told us!!!      */
-          *p_hte=p_ziv;
-          
-         }
-       else
-         {
-          /* Literal item. */
-          
-          /* Copy over the literal byte. */
-          *p_dst++=*p_src++;
-          
-          /* If we now have three literals waiting to be hashed into the hash */
-          /* table, we can do one of them now (because there are three).      */
-          if (++literals == 3)
-             {register UBYTE *p=p_dst-3; hash[HASH(p)]=p; literals=2;}
-         }
-          
-       /* Shift the control buffer so the next control bit is in bit 0. */
-       control>>=1;
-#if 1
-       if (p_dst > p_dst_post) 
-       {
-	       /* Shit: we tried to decompress corrupt data */
-	       *p_dst_len = 0;
-	       return;
-       }
-#endif
-      } /* End unrolled inner loop. */
-               
-   } /* End of outer loop */
-   
- /* Write the length of the decompressed data before returning. */
-  *p_dst_len=p_dst-p_dst_first;
-}
-
-/******************************************************************************/
-/*                               End of LZRW3.C                               */
-/******************************************************************************/
diff --git a/drivers/char/ftape/compressor/lzrw3.h b/drivers/char/ftape/compressor/lzrw3.h
deleted file mode 100644
index 533feba..0000000
--- a/drivers/char/ftape/compressor/lzrw3.h
+++ /dev/null
@@ -1,253 +0,0 @@
-#ifndef _LZRW3_H
-#define _LZRW3_H
-/*
- * $Source: /homes/cvs/ftape-stacked/ftape/compressor/lzrw3.h,v $
- * $Revision: 1.1 $
- * $Date: 1997/10/05 19:12:30 $
- *
- *  include files for lzrw3. Only slighty modified from the original
- *  version. Assembles the three include files compress.h, port.h and
- *  fastcopy.h from the original lzrw3 package.
- *
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-
-/******************************************************************************/
-/*                                                                            */
-/*                                 COMPRESS.H                                 */
-/*                                                                            */
-/******************************************************************************/
-/*                                                                            */
-/* Author : Ross Williams.                                                    */
-/* Date   : December 1989.                                                    */
-/*                                                                            */
-/* This header file defines the interface to a set of functions called        */
-/* 'compress', each member of which implements a particular data compression  */
-/* algorithm.                                                                 */
-/*                                                                            */
-/* Normally in C programming, for each .H file, there is a corresponding .C   */
-/* file that implements the functions promised in the .H file.                */
-/* Here, there are many .C files corresponding to this header file.           */
-/* Each comforming implementation file contains a single function             */
-/* called 'compress' that implements a single data compression                */
-/* algorithm that conforms with the interface specified in this header file.  */
-/* Only one algorithm can be linked in at a time in this organization.        */
-/*                                                                            */
-/******************************************************************************/
-/*                                                                            */
-/*                    DEFINITION OF FUNCTION COMPRESS                         */
-/*                    ===============================                         */
-/*                                                                            */
-/* Summary of Function Compress                                               */
-/* ----------------------------                                               */
-/* The action that 'compress' takes depends on its first argument called      */
-/* 'action'.  The function provides three actions:                            */
-/*                                                                            */
-/*    - Return information about the algorithm.                               */
-/*    - Compress   a block of memory.                                         */
-/*    - Decompress a block of memory.                                         */
-/*                                                                            */
-/* Parameters                                                                 */
-/* ----------                                                                 */
-/* See the formal C definition later for a description of the parameters.     */
-/*                                                                            */
-/* Constants                                                                  */
-/* ---------                                                                  */
-/* COMPRESS_OVERRUN: The constant COMPRESS_OVERRUN defines by how many bytes  */
-/* an algorithm is allowed to expand a block during a compression operation.  */
-/*                                                                            */
-/* Although compression algorithms usually compress data, there will always   */
-/* be data that a given compressor will expand (this can be proven).          */
-/* Fortunately, the degree of expansion can be limited to a single bit, by    */
-/* copying over the input data if the data gets bigger during compression.    */
-/* To allow for this possibility, the first bit of a compressed               */
-/* representation can be used as a flag indicating whether the                */
-/* input data was copied over, or truly compressed. In practice, the first    */
-/* byte would be used to store this bit so as to maintain byte alignment.     */
-/*                                                                            */
-/* Unfortunately, in general, the only way to tell if an algorithm will       */
-/* expand a particular block of data is to run the algorithm on the data.     */
-/* If the algorithm does not continuously monitor how many output bytes it    */
-/* has written, it might write an output block far larger than the input      */
-/* block before realizing that it has done so.                                */
-/* On the other hand, continuous checks on output length are inefficient.     */
-/*                                                                            */
-/* To cater for all these problems, this interface definition:                */
-/* > Allows a compression algorithm to return an output block that is up to   */
-/*   COMPRESS_OVERRUN bytes longer than the input block.                      */
-/* > Allows a compression algorithm to write up to COMPRESS_OVERRUN bytes     */
-/*   more than the length of the input block to the memory of the output      */
-/*   block regardless of the length of the output block eventually returned.  */
-/*   This allows an algorithm to overrun the length of the input block in the */
-/*   output block by up to COMPRESS_OVERRUN bytes between expansion checks.   */
-/*                                                                            */
-/* The problem does not arise for decompression.                              */
-/*                                                                            */
-/* Identity Action                                                            */
-/* ---------------                                                            */
-/* > action must be COMPRESS_ACTION_IDENTITY.                                 */
-/* > p_dst_len must point to a longword to receive a longword address.        */
-/* > The value of the other parameters does not matter.                       */
-/* > After execution, the longword that p_dst_len points to will be a pointer */
-/*   to a structure of type compress_identity.                                */
-/*   Thus, for example, after the call, (*p_dst_len)->memory will return the  */
-/*   number of bytes of working memory that the algorithm requires to run.    */
-/* > The values of the identity structure returned are fixed constant         */
-/*   attributes of the algorithm and must not vary from call to call.         */
-/*                                                                            */
-/* Common Requirements for Compression and Decompression Actions              */
-/* -------------------------------------------------------------              */
-/* > wrk_mem must point to an unused block of memory of a length specified in */
-/*   the algorithm's identity block. The identity block can be obtained by    */
-/*   making a separate call to compress, specifying the identity action.      */
-/* > The INPUT BLOCK is defined to be Memory[src_addr,src_addr+src_len-1].    */
-/* > dst_len will be used to denote *p_dst_len.                               */
-/* > dst_len is not read by compress, only written.                           */
-/* > The value of dst_len is defined only upon termination.                   */
-/* > The OUTPUT BLOCK is defined to be Memory[dst_addr,dst_addr+dst_len-1].   */
-/*                                                                            */
-/* Compression Action                                                         */
-/* ------------------                                                         */
-/* > action must be COMPRESS_ACTION_COMPRESS.                                 */
-/* > src_len must be in the range [0,COMPRESS_MAX_ORG].                       */
-/* > The OUTPUT ZONE is defined to be                                         */
-/*      Memory[dst_addr,dst_addr+src_len-1+COMPRESS_OVERRUN].                 */
-/* > The function can modify any part of the output zone regardless of the    */
-/*   final length of the output block.                                        */
-/* > The input block and the output zone must not overlap.                    */
-/* > dst_len will be in the range [0,src_len+COMPRESS_OVERRUN].               */
-/* > dst_len will be in the range [0,COMPRESS_MAX_COM] (from prev fact).      */
-/* > The output block will consist of a representation of the input block.    */
-/*                                                                            */
-/* Decompression Action                                                       */
-/* --------------------                                                       */
-/* > action must be COMPRESS_ACTION_DECOMPRESS.                               */
-/* > The input block must be the result of an earlier compression operation.  */
-/* > If the previous fact is true, the following facts must also be true:     */
-/*   > src_len will be in the range [0,COMPRESS_MAX_COM].                     */
-/*   > dst_len will be in the range [0,COMPRESS_MAX_ORG].                     */
-/* > The input and output blocks must not overlap.                            */
-/* > Only the output block is modified.                                       */
-/* > Upon termination, the output block will consist of the bytes contained   */
-/*   in the input block passed to the earlier compression operation.          */
-/*                                                                            */
-/******************************************************************************/
-
-/******************************************************************************/
-/*                                                                            */
-/*                                    PORT.H                                  */
-/*                                                                            */
-/******************************************************************************/
-/*                                                                            */
-/* This module contains macro definitions and types that are likely to        */
-/* change between computers.                                                  */
-/*                                                                            */
-/******************************************************************************/
-
-#ifndef DONE_PORT       /* Only do this if not previously done.               */
-
-   #ifdef THINK_C
-      #define UBYTE unsigned char      /* Unsigned byte                       */
-      #define UWORD unsigned int       /* Unsigned word (2 bytes)             */
-      #define ULONG unsigned long      /* Unsigned word (4 bytes)             */
-      #define BOOL  unsigned char      /* Boolean                             */
-      #define FOPEN_BINARY_READ  "rb"  /* Mode string for binary reading.     */
-      #define FOPEN_BINARY_WRITE "wb"  /* Mode string for binary writing.     */
-      #define FOPEN_TEXT_APPEND  "a"   /* Mode string for text appending.     */
-      #define REAL double              /* USed for floating point stuff.      */
-   #endif
-   #if defined(LINUX) || defined(linux)
-      #define UBYTE __u8               /* Unsigned byte                       */
-      #define UWORD __u16              /* Unsigned word (2 bytes)             */
-      #define ULONG __u32              /* Unsigned word (4 bytes)             */
-      #define LONG  __s32              /* Signed   word (4 bytes)             */
-      #define BOOL  is not used here   /* Boolean                             */
-      #define FOPEN_BINARY_READ  not used  /* Mode string for binary reading. */
-      #define FOPEN_BINARY_WRITE not used  /* Mode string for binary writing. */
-      #define FOPEN_TEXT_APPEND  not used  /* Mode string for text appending. */
-      #define REAL not used                /* USed for floating point stuff.  */
-      #ifndef TRUE
-      #define TRUE 1
-      #endif
-   #endif
-
-   #define DONE_PORT                   /* Don't do all this again.            */
-   #define MALLOC_FAIL NULL            /* Failure status from malloc()        */
-   #define LOCAL static                /* For non-exported routines.          */
-   #define EXPORT                      /* Signals exported function.          */
-   #define then                        /* Useful for aligning ifs.            */
-
-#endif
-
-/******************************************************************************/
-/*                              End of PORT.H                                 */
-/******************************************************************************/
-
-#define COMPRESS_ACTION_IDENTITY   0
-#define COMPRESS_ACTION_COMPRESS   1
-#define COMPRESS_ACTION_DECOMPRESS 2
-
-#define COMPRESS_OVERRUN 1024
-#define COMPRESS_MAX_COM 0x70000000
-#define COMPRESS_MAX_ORG (COMPRESS_MAX_COM-COMPRESS_OVERRUN)
-
-#define COMPRESS_MAX_STRLEN 255
-
-/* The following structure provides information about the algorithm.         */
-/* > The top bit of id must be zero. The remaining bits must be chosen by    */
-/*   the author of the algorithm by tossing a coin 31 times.                 */
-/* > The amount of memory requested by the algorithm is specified in bytes   */
-/*   and must be in the range [0,0x70000000].                                */
-/* > All strings s must be such that strlen(s)<=COMPRESS_MAX_STRLEN.         */
-struct compress_identity
-  {
-   ULONG id;           /* Identifying number of algorithm.            */
-   ULONG memory;       /* Number of bytes of working memory required. */
-
-   char  *name;        /* Name of algorithm.                          */
-   char  *version;     /* Version number.                             */
-   char  *date;        /* Date of release of this version.            */
-   char  *copyright;   /* Copyright message.                          */
-
-   char  *author;      /* Author of algorithm.                        */
-   char  *affiliation; /* Affiliation of author.                      */
-   char  *vendor;      /* Where the algorithm can be obtained.        */
-  };
-
-void  lzrw3_compress(        /* Single function interface to compression algorithm. */
-UWORD     action,      /* Action to be performed.                             */
-UBYTE   *wrk_mem,      /* Working memory temporarily given to routine to use. */
-UBYTE   *src_adr,      /* Address of input  data.                             */
-LONG     src_len,      /* Length  of input  data.                             */
-UBYTE   *dst_adr,      /* Address of output data.                             */
-void  *p_dst_len       /* Pointer to a longword where routine will write:     */
-                       /*    If action=..IDENTITY   => Adr of id structure.   */
-                       /*    If action=..COMPRESS   => Length of output data. */
-                       /*    If action=..DECOMPRESS => Length of output data. */
-);
-
-/******************************************************************************/
-/*                             End of COMPRESS.H                              */
-/******************************************************************************/
-
-
-/******************************************************************************/
-/*                                  fast_copy.h                               */
-/******************************************************************************/
-
-/* This function copies a block of memory very quickly.                       */
-/* The exact speed depends on the relative alignment of the blocks of memory. */
-/* PRE  : 0<=src_len<=(2^32)-1 .                                              */
-/* PRE  : Source and destination blocks must not overlap.                     */
-/* POST : MEM[dst_adr,dst_adr+src_len-1]=MEM[src_adr,src_adr+src_len-1].      */
-/* POST : MEM[dst_adr,dst_adr+src_len-1] is the only memory changed.          */
-
-#define fast_copy(src,dst,len) memcpy(dst,src,len)
-
-/******************************************************************************/
-/*                               End of fast_copy.h                           */
-/******************************************************************************/
-
-#endif
diff --git a/drivers/char/ftape/compressor/zftape-compress.c b/drivers/char/ftape/compressor/zftape-compress.c
deleted file mode 100644
index 65ffc0b..0000000
--- a/drivers/char/ftape/compressor/zftape-compress.c
+++ /dev/null
@@ -1,1203 +0,0 @@
-/*
- *      Copyright (C) 1994-1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
- 
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- General Public License for more details.
- 
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA.
- 
- *
- *     This file implements a "generic" interface between the *
- *     zftape-driver and a compression-algorithm. The *
- *     compression-algorithm currently used is a LZ77. I use the *
- *     implementation lzrw3 by Ross N. Williams (Renaissance *
- *     Software). The compression program itself is in the file
- *     lzrw3.c * and lzrw3.h.  To adopt another compression algorithm
- *     the functions * zft_compress() and zft_uncompress() must be
- *     changed * appropriately. See below.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-
-#include <linux/zftape.h>
-
-#include <asm/uaccess.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-rw.h"
-#include "../compressor/zftape-compress.h"
-#include "../zftape/zftape-vtbl.h"
-#include "../compressor/lzrw3.h"
-
-/*
- *   global variables
- */
-
-/* I handle the allocation of this buffer as a special case, because
- * it's size varies depending on the tape length inserted.
- */
-
-/* local variables 
- */
-static void *zftc_wrk_mem = NULL;
-static __u8 *zftc_buf     = NULL;
-static void *zftc_scratch_buf  = NULL;
-
-/* compression statistics 
- */
-static unsigned int zftc_wr_uncompressed = 0;
-static unsigned int zftc_wr_compressed   = 0;
-static unsigned int zftc_rd_uncompressed = 0;
-static unsigned int zftc_rd_compressed   = 0;
-
-/* forward */
-static int  zftc_write(int *write_cnt,
-		       __u8 *dst_buf, const int seg_sz,
-		       const __u8 __user *src_buf, const int req_len,
-		       const zft_position *pos, const zft_volinfo *volume);
-static int  zftc_read(int *read_cnt,
-		      __u8  __user *dst_buf, const int to_do,
-		      const __u8 *src_buf, const int seg_sz,
-		      const zft_position *pos, const zft_volinfo *volume);
-static int  zftc_seek(unsigned int new_block_pos, 
-		      zft_position *pos, const zft_volinfo *volume,
-		      __u8 *buffer);
-static void zftc_lock   (void);
-static void zftc_reset  (void);
-static void zftc_cleanup(void);
-static void zftc_stats      (void);
-
-/* compressed segment. This conforms to QIC-80-MC, Revision K.
- * 
- * Rev. K applies to tapes with `fixed length format' which is
- * indicated by format code 2,3 and 5. See below for format code 4 and 6
- *
- * 2 bytes: offset of compression segment structure
- *          29k > offset >= 29k-18: data from previous segment ens in this
- *                                  segment and no compressed block starts
- *                                  in this segment
- *                     offset == 0: data from previous segment occupies entire
- *                                  segment and continues in next segment
- * n bytes: remainder from previous segment
- * 
- * Rev. K:  
- * 4 bytes: 4 bytes: files set byte offset
- * Post Rev. K and QIC-3020/3020:
- * 8 bytes: 8 bytes: files set byte offset
- * 2 bytes: byte count N (amount of data following)
- *          bit 15 is set if data is compressed, bit 15 is not
- *          set if data is uncompressed
- * N bytes: data (as much as specified in the byte count)
- * 2 bytes: byte count N_1 of next cluster
- * N_1 bytes: data of next cluset
- * 2 bytes: byte count N_2 of next cluster
- * N_2 bytes: ...  
- *
- * Note that the `N' byte count accounts only for the bytes that in the
- * current segment if the cluster spans to the next segment.
- */
-
-typedef struct
-{
-	int cmpr_pos;             /* actual position in compression buffer */
-	int cmpr_sz;              /* what is left in the compression buffer
-				   * when copying the compressed data to the
-				   * deblock buffer
-				   */
-	unsigned int first_block; /* location of header information in
-				   * this segment
-				   */
-	unsigned int count;       /* amount of data of current block
-				   * contained in current segment 
-				   */
-	unsigned int offset;      /* offset in current segment */
-	unsigned int spans:1;     /* might continue in next segment */
-	unsigned int uncmpr;      /* 0x8000 if this block contains
-				   * uncompressed data 
-				   */
-	__s64 foffs;              /* file set byte offset, same as in 
-				   * compression map segment
-				   */
-} cmpr_info;
-
-static cmpr_info cseg; /* static data. Must be kept uptodate and shared by 
-			* read, write and seek functions
-			*/
-
-#define DUMP_CMPR_INFO(level, msg, info)				\
-	TRACE(level, msg "\n"						\
-	      KERN_INFO "cmpr_pos   : %d\n"				\
-	      KERN_INFO "cmpr_sz    : %d\n"				\
-	      KERN_INFO "first_block: %d\n"				\
-	      KERN_INFO "count      : %d\n"				\
-	      KERN_INFO "offset     : %d\n"				\
-	      KERN_INFO "spans      : %d\n"				\
-	      KERN_INFO "uncmpr     : 0x%04x\n"				\
-	      KERN_INFO "foffs      : " LL_X,				\
-	      (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block,	\
-	      (info)->count, (info)->offset, (info)->spans == 1,	\
-	      (info)->uncmpr, LL((info)->foffs))
-
-/*   dispatch compression segment info, return error code
- *  
- *   afterwards, cseg->offset points to start of data of the NEXT
- *   compressed block, and cseg->count contains the amount of data
- *   left in the actual compressed block. cseg->spans is set to 1 if
- *   the block is continued in the following segment. Otherwise it is
- *   set to 0. 
- */
-static int get_cseg (cmpr_info *cinfo, const __u8 *buff, 
-		     const unsigned int seg_sz,
-		     const zft_volinfo *volume)
-{
-	TRACE_FUN(ft_t_flow);
-
- 	cinfo->first_block = GET2(buff, 0);
-	if (cinfo->first_block == 0) { /* data spans to next segment */
-		cinfo->count  = seg_sz - sizeof(__u16);
-		cinfo->offset = seg_sz;
-		cinfo->spans = 1;
-	} else { /* cluster definetely ends in this segment */
-		if (cinfo->first_block > seg_sz) {
-			/* data corrupted */
-			TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
-				    KERN_INFO "segment size: %d\n"
-				    KERN_INFO "first block : %d",
-				    seg_sz, cinfo->first_block);
-		}
-	        cinfo->count  = cinfo->first_block - sizeof(__u16);
-		cinfo->offset = cinfo->first_block;
-		cinfo->spans = 0;
-	}
-	/* now get the offset the first block should have in the
-	 * uncompressed data stream.
-	 *
-	 * For this magic `18' refer to CRF-3 standard or QIC-80MC,
-	 * Rev. K.  
-	 */
-	if ((seg_sz - cinfo->offset) > 18) {
-		if (volume->qic113) { /* > revision K */
-			TRACE(ft_t_data_flow, "New QIC-113 compliance");
-			cinfo->foffs = GET8(buff, cinfo->offset);
-			cinfo->offset += sizeof(__s64); 
-		} else {
-			TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
-			cinfo->foffs   = (__s64)GET4(buff, cinfo->offset);
-			cinfo->offset += sizeof(__u32); 
-		}
-	}
-	if (cinfo->foffs > volume->size) {
-		TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
-			    KERN_INFO "offset in current volume: %d\n"
-			    KERN_INFO "size of current volume  : %d",
-			    (int)(cinfo->foffs>>10), (int)(volume->size>>10));
-	}
-	if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
-		TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
-			    KERN_INFO "block size : %d\n"
-			    KERN_INFO "data record: %d",
-			    volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
-	}
-	DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
-	TRACE_EXIT 0;
-}
-
-/*  This one is called, when a new cluster starts in same segment.
- *  
- *  Note: if this is the first cluster in the current segment, we must
- *  not check whether there are more than 18 bytes available because
- *  this have already been done in get_cseg() and there may be less
- *  than 18 bytes available due to header information.
- * 
- */
-static void get_next_cluster(cmpr_info *cluster, const __u8 *buff, 
-			     const int seg_sz, const int finish)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
-		cluster->count   = GET2(buff, cluster->offset);
-		cluster->uncmpr  = cluster->count & 0x8000;
-		cluster->count  -= cluster->uncmpr;
-		cluster->offset += sizeof(__u16);
-		cluster->foffs   = 0;
-		if ((cluster->offset + cluster->count) < seg_sz) {
-			cluster->spans = 0;
-		} else if (cluster->offset + cluster->count == seg_sz) {
-			cluster->spans = !finish;
-		} else {
-			/* either an error or a volume written by an 
-			 * old version. If this is a data error, then we'll
-			 * catch it later.
-			 */
-			TRACE(ft_t_data_flow, "Either error or old volume");
-			cluster->spans = 1;
-			cluster->count = seg_sz - cluster->offset;
-		}
-	} else {
-		cluster->count = 0;
-		cluster->spans = 0;
-		cluster->foffs = 0;
-	}
-	DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
-	TRACE_EXIT;
-}
-
-static void zftc_lock(void)
-{
-}
-
-/*  this function is needed for zftape_reset_position in zftape-io.c 
- */
-static void zftc_reset(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	memset((void *)&cseg, '\0', sizeof(cseg));
-	zftc_stats();
-	TRACE_EXIT;
-}
-
-static int cmpr_mem_initialized = 0;
-static unsigned int alloc_blksz = 0;
-
-static int zft_allocate_cmpr_mem(unsigned int blksz)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (cmpr_mem_initialized && blksz == alloc_blksz) {
-		TRACE_EXIT 0;
-	}
-	TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
-		    zftc_cleanup());
-	TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
-		    zftc_cleanup());
-	alloc_blksz = blksz;
-	TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
-		    zftc_cleanup());
-	cmpr_mem_initialized = 1;
-	TRACE_EXIT 0;
-}
-
-static void zftc_cleanup(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
-	zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
-	zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
-	cmpr_mem_initialized = alloc_blksz = 0;
-	TRACE_EXIT;
-}
-
-/*****************************************************************************
- *                                                                           *
- *  The following two functions "ftape_compress()" and                       *
- *  "ftape_uncompress()" are the interface to the actual compression         *
- *  algorithm (i.e. they are calling the "compress()" function from          *
- *  the lzrw3 package for now). These routines could quite easily be         *
- *  changed to adopt another compression algorithm instead of lzrw3,         *
- *  which currently is used.                                                 *
- *                                                                           *
- *****************************************************************************/
-
-/* called by zft_compress_write() to perform the compression. Must
- * return the size of the compressed data.
- *
- * NOTE: The size of the compressed data should not exceed the size of
- *       the uncompressed data. Most compression algorithms have means
- *       to store data unchanged if the "compressed" data amount would
- *       exceed the original one. Mostly this is done by storing some
- *       flag-bytes in front of the compressed data to indicate if it
- *       is compressed or not. Thus the worst compression result
- *       length is the original length plus those flag-bytes.
- *
- *       We don't want that, as the QIC-80 standard provides a means
- *       of marking uncompressed blocks by simply setting bit 15 of
- *       the compressed block's length. Thus a compessed block can
- *       have at most a length of 2^15-1 bytes. The QIC-80 standard
- *       restricts the block-length even further, allowing only 29k -
- *       6 bytes.
- *
- *       Currently, the maximum blocksize used by zftape is 28k.
- *
- *       In short: don't exceed the length of the input-package, set
- *       bit 15 of the compressed size to 1 if you have copied data
- *       instead of compressing it.
- */
-static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
-{ 
-	__s32 compressed_sz;
-	TRACE_FUN(ft_t_flow);
-	
-
-	lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
-		       in_buffer, in_sz, out_buffer, &compressed_sz);
-	if (TRACE_LEVEL >= ft_t_info) {
-		/*  the compiler will optimize this away when
-		 *  compiled with NO_TRACE_AT_ALL option
-		 */
-		TRACE(ft_t_data_flow, "\n"
-		      KERN_INFO "before compression: %d bytes\n"
-		      KERN_INFO "after compresison : %d bytes", 
-		      in_sz, 
-		      (int)(compressed_sz < 0 
-		      ? -compressed_sz : compressed_sz));
-		/*  for statistical purposes
-		 */
-		zftc_wr_compressed   += (compressed_sz < 0 
-					   ? -compressed_sz : compressed_sz);
-		zftc_wr_uncompressed += in_sz;
-	}
-	TRACE_EXIT (int)compressed_sz;
-}
-
-/* called by zft_compress_read() to decompress the data. Must
- * return the size of the decompressed data for sanity checks
- * (compared with zft_blk_sz)
- *
- * NOTE: Read the note for zft_compress() above!  If bit 15 of the
- *       parameter in_sz is set, then the data in in_buffer isn't
- *       compressed, which must be handled by the un-compression
- *       algorithm. (I changed lzrw3 to handle this.)
- *
- *  The parameter max_out_sz is needed to prevent buffer overruns when 
- *  uncompressing corrupt data.
- */
-static unsigned int zft_uncompress(__u8 *in_buffer, 
-				   int in_sz, 
-				   __u8 *out_buffer,
-				   unsigned int max_out_sz)
-{ 
-	TRACE_FUN(ft_t_flow);
-	
-	lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
-		       in_buffer, (__s32)in_sz,
-		       out_buffer, (__u32 *)&max_out_sz);
-	
-	if (TRACE_LEVEL >= ft_t_info) {
-		TRACE(ft_t_data_flow, "\n"
-		      KERN_INFO "before decompression: %d bytes\n"
-		      KERN_INFO "after decompression : %d bytes", 
-		      in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
-		/*  for statistical purposes
-		 */
-		zftc_rd_compressed   += in_sz < 0 ? -in_sz : in_sz;
-		zftc_rd_uncompressed += max_out_sz;
-	}
-	TRACE_EXIT (unsigned int)max_out_sz;
-}
-
-/* print some statistics about the efficiency of the compression to
- * the kernel log 
- */
-static void zftc_stats(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (TRACE_LEVEL < ft_t_info) {
-		TRACE_EXIT;
-	}
-	if (zftc_wr_uncompressed != 0) {
-		if (zftc_wr_compressed > (1<<14)) {
-			TRACE(ft_t_info, "compression statistics (writing):\n"
-			      KERN_INFO " compr./uncmpr.   : %3d %%",
-			      (((zftc_wr_compressed>>10) * 100)
-			       / (zftc_wr_uncompressed>>10)));
-		} else {
-			TRACE(ft_t_info, "compression statistics (writing):\n"
-			      KERN_INFO " compr./uncmpr.   : %3d %%",
-			      ((zftc_wr_compressed * 100)
-			       / zftc_wr_uncompressed));
-		}
-	}
-	if (zftc_rd_uncompressed != 0) {
-		if (zftc_rd_compressed > (1<<14)) {
-			TRACE(ft_t_info, "compression statistics (reading):\n"
-			      KERN_INFO " compr./uncmpr.   : %3d %%",
-			      (((zftc_rd_compressed>>10) * 100)
-			       / (zftc_rd_uncompressed>>10)));
-		} else {
-			TRACE(ft_t_info, "compression statistics (reading):\n"
-			      KERN_INFO " compr./uncmpr.   : %3d %%",
-			      ((zftc_rd_compressed * 100)
-			       / zftc_rd_uncompressed));
-		}
-	}
-	/* only print it once: */
-	zftc_wr_uncompressed = 
-		zftc_wr_compressed  =
-		zftc_rd_uncompressed =
-		zftc_rd_compressed   = 0;
-	TRACE_EXIT;
-}
-
-/* start new compressed block 
- */
-static int start_new_cseg(cmpr_info *cluster, 
-			  char *dst_buf, 
-			  const zft_position *pos,
-			  const unsigned int blk_sz,
-			  const char *src_buf,
-			  const int this_segs_sz,
-			  const int qic113)
-{
-	int size_left;
-	int cp_cnt;
-	int buf_pos;
-	TRACE_FUN(ft_t_flow);
-
-	size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
-	TRACE(ft_t_data_flow,"\n" 
-	      KERN_INFO "segment size   : %d\n"
-	      KERN_INFO "compressed_sz: %d\n"
-	      KERN_INFO "size_left      : %d",
-	      this_segs_sz, cluster->cmpr_sz, size_left);
-	if (size_left > 18) { /* start a new cluseter */
-		cp_cnt = cluster->cmpr_sz;
-		cluster->cmpr_sz = 0;
-		buf_pos = cp_cnt + sizeof(__u16);
-		PUT2(dst_buf, 0, buf_pos);
-
-		if (qic113) {
-			__s64 foffs = pos->volume_pos;
-			if (cp_cnt) foffs += (__s64)blk_sz;
-
-			TRACE(ft_t_data_flow, "new style QIC-113 header");
-			PUT8(dst_buf, buf_pos, foffs);
-			buf_pos += sizeof(__s64);
-		} else {
-			__u32 foffs = (__u32)pos->volume_pos;
-			if (cp_cnt) foffs += (__u32)blk_sz;
-			
-			TRACE(ft_t_data_flow, "old style QIC-80MC header");
-			PUT4(dst_buf, buf_pos, foffs);
-			buf_pos += sizeof(__u32);
-		}
-	} else if (size_left >= 0) {
-		cp_cnt = cluster->cmpr_sz;
-		cluster->cmpr_sz = 0;
-		buf_pos = cp_cnt + sizeof(__u16);
-		PUT2(dst_buf, 0, buf_pos);  
-		/* zero unused part of segment. */
-		memset(dst_buf + buf_pos, '\0', size_left);
-		buf_pos = this_segs_sz;
-	} else { /* need entire segment and more space */
-		PUT2(dst_buf, 0, 0); 
-		cp_cnt = this_segs_sz - sizeof(__u16);
-		cluster->cmpr_sz  -= cp_cnt;
-		buf_pos = this_segs_sz;
-	}
-	memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
-	cluster->cmpr_pos += cp_cnt;
-	TRACE_EXIT buf_pos;
-}
-
-/* return-value: the number of bytes removed from the user-buffer
- *               `src_buf' or error code
- *
- *  int *write_cnt           : how much actually has been moved to the
- *                             dst_buf. Need not be initialized when
- *                             function returns with an error code
- *                             (negativ return value) 
- *  __u8 *dst_buf            : kernel space buffer where the has to be
- *                             copied to. The contents of this buffers
- *                             goes to a specific segment.
- *  const int seg_sz         : the size of the segment dst_buf will be
- *                             copied to.
- *  const zft_position *pos  : struct containing the coordinates in
- *                             the current volume (byte position,
- *                             segment id of current segment etc)
- *  const zft_volinfo *volume: information about the current volume,
- *                             size etc.
- *  const __u8 *src_buf      : user space buffer that contains the
- *                             data the user wants to be written to
- *                             tape.
- *  const int req_len        : the amount of data the user wants to be
- *                             written to tape.
- */
-static int zftc_write(int *write_cnt,
-		      __u8 *dst_buf, const int seg_sz,
-		      const __u8 __user *src_buf, const int req_len,
-		      const zft_position *pos, const zft_volinfo *volume)
-{
-	int req_len_left = req_len;
-	int result;
-	int len_left;
-	int buf_pos_write = pos->seg_byte_pos;
-	TRACE_FUN(ft_t_flow);
-	
-	/* Note: we do not unlock the module because
-	 * there are some values cached in that `cseg' variable.  We
-	 * don't don't want to use this information when being
-	 * unloaded by kerneld even when the tape is full or when we
-	 * cannot allocate enough memory.
-	 */
-	if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
-		TRACE_EXIT -ENOSPC;
-	}    
-	if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
-		/* should we unlock the module? But it shouldn't 
-		 * be locked anyway ...
-		 */
-		TRACE_EXIT -ENOMEM;
-	}
-	if (buf_pos_write == 0) { /* fill a new segment */
-		*write_cnt = buf_pos_write = start_new_cseg(&cseg,
-							    dst_buf,
-							    pos,
-							    volume->blk_sz,
-							    zftc_buf, 
-							    seg_sz,
-							    volume->qic113);
-		if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
-			req_len_left -= result = volume->blk_sz;
-			cseg.cmpr_pos  = 0;
-		} else {
-			result = 0;
-		}
-	} else {
-		*write_cnt = result = 0;
-	}
-	
-	len_left = seg_sz - buf_pos_write;
-	while ((req_len_left > 0) && (len_left > 18)) {
-		/* now we have some size left for a new compressed
-		 * block.  We know, that the compression buffer is
-		 * empty (else there wouldn't be any space left).  
-		 */
-		if (copy_from_user(zftc_scratch_buf, src_buf + result, 
-				   volume->blk_sz) != 0) {
-			TRACE_EXIT -EFAULT;
-		}
-		req_len_left -= volume->blk_sz;
-		cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz, 
-					    zftc_buf);
-		if (cseg.cmpr_sz < 0) {
-			cseg.uncmpr = 0x8000;
-			cseg.cmpr_sz = -cseg.cmpr_sz;
-		} else {
-			cseg.uncmpr = 0;
-		}
-		/* increment "result" iff we copied the entire
-		 * compressed block to the zft_deblock_buf 
-		 */
-		len_left -= sizeof(__u16);
-		if (len_left >= cseg.cmpr_sz) {
-			len_left -= cseg.count = cseg.cmpr_sz;
-			cseg.cmpr_pos = cseg.cmpr_sz = 0;
-			result += volume->blk_sz;
-		} else {
-			cseg.cmpr_sz       -= 
-				cseg.cmpr_pos =
-				cseg.count    = len_left;
-			len_left = 0;
-		}
-		PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
-		buf_pos_write += sizeof(__u16);
-		memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
-		buf_pos_write += cseg.count;
-		*write_cnt    += cseg.count + sizeof(__u16);
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-	}
-	/* erase the remainder of the segment if less than 18 bytes
-	 * left (18 bytes is due to the QIC-80 standard) 
-	 */
-	if (len_left <= 18) {
-		memset(dst_buf + buf_pos_write, '\0', len_left);
-		(*write_cnt) += len_left;
-	}
-	TRACE(ft_t_data_flow, "returning %d", result);
-	TRACE_EXIT result;
-}   
-
-/* out:
- *
- * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
- *                (result)
- * int *to_do   : the remaining size of the read-request.
- *
- * in:
- *
- * char *buff          : buff is the address of the upper part of the user
- *                       buffer, that hasn't been filled with data yet.
-
- * int buf_pos_read    : copy of from _ftape_read()
- * int buf_len_read    : copy of buf_len_rd from _ftape_read()
- * char *zft_deblock_buf: zft_deblock_buf
- * unsigned short blk_sz: the block size valid for this volume, may differ
- *                            from zft_blk_sz.
- * int finish: if != 0 means that this is the last segment belonging
- *  to this volume
- * returns the amount of data actually copied to the user-buffer
- *
- * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
- * be set to 0 
- */
-static int zftc_read (int *read_cnt, 
-		      __u8  __user *dst_buf, const int to_do, 
-		      const __u8 *src_buf, const int seg_sz, 
-		      const zft_position *pos, const zft_volinfo *volume)
-{          
-	int uncompressed_sz;         
-	int result = 0;
-	int remaining = to_do;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
-	if (pos->seg_byte_pos == 0) {
-		/* new segment just read
-		 */
-		TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
-			    *read_cnt = 0);
-		memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16), 
-		       cseg.count);
-		cseg.cmpr_pos += cseg.count;
-		*read_cnt      = cseg.offset;
-		DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
-	} else {
-		*read_cnt = 0;
-	}
-	/* loop and uncompress until user buffer full or
-	 * deblock-buffer empty 
-	 */
-	TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
-	      cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
-	while ((cseg.spans == 0) && (remaining > 0)) {
-		if (cseg.cmpr_pos  != 0) { /* cmpr buf is not empty */
-			uncompressed_sz = 
-				zft_uncompress(zftc_buf,
-					       cseg.uncmpr == 0x8000 ?
-					       -cseg.cmpr_pos : cseg.cmpr_pos,
-					       zftc_scratch_buf,
-					       volume->blk_sz);
-			if (uncompressed_sz != volume->blk_sz) {
-				*read_cnt = 0;
-				TRACE_ABORT(-EIO, ft_t_warn,
-				      "Uncompressed blk (%d) != blk size (%d)",
-				      uncompressed_sz, volume->blk_sz);
-			}       
-			if (copy_to_user(dst_buf + result, 
-					 zftc_scratch_buf, 
-					 uncompressed_sz) != 0 ) {
-				TRACE_EXIT -EFAULT;
-			}
-			remaining      -= uncompressed_sz;
-			result     += uncompressed_sz;
-			cseg.cmpr_pos  = 0;
-		}                                              
-		if (remaining > 0) {
-			get_next_cluster(&cseg, src_buf, seg_sz, 
-					 volume->end_seg == pos->seg_pos);
-			if (cseg.count != 0) {
-				memcpy(zftc_buf, src_buf + cseg.offset,
-				       cseg.count);
-				cseg.cmpr_pos = cseg.count;
-				cseg.offset  += cseg.count;
-				*read_cnt += cseg.count + sizeof(__u16);
-			} else {
-				remaining = 0;
-			}
-		}
-		TRACE(ft_t_data_flow, "\n" 
-		      KERN_INFO "compressed_sz: %d\n"
-		      KERN_INFO "compos       : %d\n"
-		      KERN_INFO "*read_cnt    : %d",
-		      cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
-	}
-	if (seg_sz - cseg.offset <= 18) {
-		*read_cnt += seg_sz - cseg.offset;
-		TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
-	}
-	TRACE(ft_t_data_flow, "\n"
-	      KERN_INFO "segment size   : %d\n"
-	      KERN_INFO "read count     : %d\n"
-	      KERN_INFO "buf_pos_read   : %d\n"
-	      KERN_INFO "remaining      : %d",
-		seg_sz, *read_cnt, pos->seg_byte_pos, 
-		seg_sz - *read_cnt - pos->seg_byte_pos);
-	TRACE(ft_t_data_flow, "returning: %d", result);
-	TRACE_EXIT result;
-}                
-
-/* seeks to the new data-position. Reads sometimes a segment.
- *  
- * start_seg and end_seg give the boundaries of the current volume
- * blk_sz is the blk_sz of the current volume as stored in the
- * volume label
- *
- * We don't allow blocksizes less than 1024 bytes, therefore we don't need
- * a 64 bit argument for new_block_pos.
- */
-
-static int seek_in_segment(const unsigned int to_do, cmpr_info  *c_info,
-			   const char *src_buf, const int seg_sz, 
-			   const int seg_pos, const zft_volinfo *volume);
-static int slow_seek_forward_until_error(const unsigned int distance,
-					 cmpr_info *c_info, zft_position *pos, 
-					 const zft_volinfo *volume, __u8 *buf);
-static int search_valid_segment(unsigned int segment,
-				const unsigned int end_seg,
-				const unsigned int max_foffs,
-				zft_position *pos, cmpr_info *c_info,
-				const zft_volinfo *volume, __u8 *buf);
-static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
-			     zft_position *pos, const zft_volinfo *volume,
-			     __u8 *buf);
-static int compute_seg_pos(unsigned int dest, zft_position *pos,
-			   const zft_volinfo *volume);
-
-#define ZFT_SLOW_SEEK_THRESHOLD  10 /* segments */
-#define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
-#define ZFT_FAST_SEEK_BACKUP     10 /* segments */
-
-static int zftc_seek(unsigned int new_block_pos,
-		     zft_position *pos, const zft_volinfo *volume, __u8 *buf)
-{
-	unsigned int dest;
-	int limit;
-	int distance;
-	int result = 0;
-	int seg_dist;
-	int new_seg;
-	int old_seg = 0;
-	int fast_seek_trials = 0;
-	TRACE_FUN(ft_t_flow);
-
-	if (new_block_pos == 0) {
-		pos->seg_pos      = volume->start_seg;
-		pos->seg_byte_pos = 0;
-		pos->volume_pos   = 0;
-		zftc_reset();
-		TRACE_EXIT 0;
-	}
-	dest = new_block_pos * (volume->blk_sz >> 10);
-	distance = dest - (pos->volume_pos >> 10);
-	while (distance != 0) {
-		seg_dist = compute_seg_pos(dest, pos, volume);
-		TRACE(ft_t_noise, "\n"
-		      KERN_INFO "seg_dist: %d\n"
-		      KERN_INFO "distance: %d\n"
-		      KERN_INFO "dest    : %d\n"
-		      KERN_INFO "vpos    : %d\n"
-		      KERN_INFO "seg_pos : %d\n"
-		      KERN_INFO "trials  : %d",
-		      seg_dist, distance, dest,
-		      (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
-		      fast_seek_trials);
-		if (distance > 0) {
-			if (seg_dist < 0) {
-				TRACE(ft_t_bug, "BUG: distance %d > 0, "
-				      "segment difference %d < 0",
-				      distance, seg_dist);
-				result = -EIO;
-				break;
-			}
-			new_seg = pos->seg_pos + seg_dist;
-			if (new_seg > volume->end_seg) {
-				new_seg = volume->end_seg;
-			}
-			if (old_seg == new_seg || /* loop */
-			    seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
-			    fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
-				TRACE(ft_t_noise, "starting slow seek:\n"
-				   KERN_INFO "fast seek failed too often: %s\n"
-				   KERN_INFO "near target position      : %s\n"
-				   KERN_INFO "looping between two segs  : %s",
-				      (fast_seek_trials >= 
-				       ZFT_FAST_SEEK_MAX_TRIALS)
-				      ? "yes" : "no",
-				      (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD) 
-				      ? "yes" : "no",
-				      (old_seg == new_seg)
-				      ? "yes" : "no");
-				result = slow_seek_forward(dest, &cseg, 
-							   pos, volume, buf);
-				break;
-			}
-			old_seg = new_seg;
-			limit = volume->end_seg;
-			fast_seek_trials ++;
-			for (;;) {
-				result = search_valid_segment(new_seg, limit,
-							      volume->size,
-							      pos, &cseg,
-							      volume, buf);
-				if (result == 0 || result == -EINTR) {
-					break;
-				}
-				if (new_seg == volume->start_seg) {
-					result = -EIO; /* set errror 
-							* condition
-							*/
-					break;
-				}
-				limit    = new_seg;
-				new_seg -= ZFT_FAST_SEEK_BACKUP;
-				if (new_seg < volume->start_seg) {
-					new_seg = volume->start_seg;
-				}
-			}
-			if (result < 0) {
-				TRACE(ft_t_warn,
-				      "Couldn't find a readable segment");
-				break;
-			}
-		} else /* if (distance < 0) */ {
-			if (seg_dist > 0) {
-				TRACE(ft_t_bug, "BUG: distance %d < 0, "
-				      "segment difference %d >0",
-				      distance, seg_dist);
-				result = -EIO;
-				break;
-			}
-			new_seg = pos->seg_pos + seg_dist;
-			if (fast_seek_trials > 0 && seg_dist == 0) {
-				/* this avoids sticking to the same
-				 * segment all the time. On the other hand:
-				 * if we got here for the first time, and the
-				 * deblock_buffer still contains a valid
-				 * segment, then there is no need to skip to 
-				 * the previous segment if the desired position
-				 * is inside this segment.
-				 */
-				new_seg --;
-			}
-			if (new_seg < volume->start_seg) {
-				new_seg = volume->start_seg;
-			}
-			limit   = pos->seg_pos;
-			fast_seek_trials ++;
-			for (;;) {
-				result = search_valid_segment(new_seg, limit,
-							      pos->volume_pos,
-							      pos, &cseg,
-							      volume, buf);
-				if (result == 0 || result == -EINTR) {
-					break;
-				}
-				if (new_seg == volume->start_seg) {
-					result = -EIO; /* set errror 
-							* condition
-							*/
-					break;
-				}
-				limit    = new_seg;
-				new_seg -= ZFT_FAST_SEEK_BACKUP;
-				if (new_seg < volume->start_seg) {
-					new_seg = volume->start_seg;
-				}
-			}
-			if (result < 0) {
-				TRACE(ft_t_warn,
-				      "Couldn't find a readable segment");
-				break;
-			}
-		}
-		distance = dest - (pos->volume_pos >> 10);
-	}
-	TRACE_EXIT result;
-}
-
-
-/*  advance inside the given segment at most to_do bytes.
- *  of kilobytes moved
- */
-
-static int seek_in_segment(const unsigned int to_do,
-			   cmpr_info  *c_info,
-			   const char *src_buf, 
-			   const int seg_sz, 
-			   const int seg_pos,
-			   const zft_volinfo *volume)
-{
-	int result = 0;
-	int blk_sz = volume->blk_sz >> 10;
-	int remaining = to_do;
-	TRACE_FUN(ft_t_flow);
-
-	if (c_info->offset == 0) {
-		/* new segment just read
-		 */
-		TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
-		c_info->cmpr_pos += c_info->count;
-		DUMP_CMPR_INFO(ft_t_noise, "", c_info);
-	}
-	/* loop and uncompress until user buffer full or
-	 * deblock-buffer empty 
-	 */
-	TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
-	      c_info->cmpr_sz, c_info->cmpr_pos);
-	while (c_info->spans == 0 && remaining > 0) {
-		if (c_info->cmpr_pos  != 0) { /* cmpr buf is not empty */
-			result       += blk_sz;
-			remaining    -= blk_sz;
-			c_info->cmpr_pos = 0;
-		}
-		if (remaining > 0) {
-			get_next_cluster(c_info, src_buf, seg_sz, 
-					 volume->end_seg == seg_pos);
-			if (c_info->count != 0) {
-				c_info->cmpr_pos = c_info->count;
-				c_info->offset  += c_info->count;
-			} else {
-				break;
-			}
-		}
-		/*  Allow escape from this loop on signal!
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		DUMP_CMPR_INFO(ft_t_noise, "", c_info);
-		TRACE(ft_t_noise, "to_do: %d", remaining);
-	}
-	if (seg_sz - c_info->offset <= 18) {
-		c_info->offset = seg_sz;
-	}
-	TRACE(ft_t_noise, "\n"
-	      KERN_INFO "segment size   : %d\n"
-	      KERN_INFO "buf_pos_read   : %d\n"
-	      KERN_INFO "remaining      : %d",
-	      seg_sz, c_info->offset,
-	      seg_sz - c_info->offset);
-	TRACE_EXIT result;
-}                
-
-static int slow_seek_forward_until_error(const unsigned int distance,
-					 cmpr_info *c_info,
-					 zft_position *pos, 
-					 const zft_volinfo *volume,
-					 __u8 *buf)
-{
-	unsigned int remaining = distance;
-	int seg_sz;
-	int seg_pos;
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	seg_pos = pos->seg_pos;
-	do {
-		TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf, 
-						       FT_RD_AHEAD),);
-		/* now we have the contents of the actual segment in
-		 * the deblock buffer
-		 */
-		TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
-						     seg_sz, seg_pos,volume),);
-		remaining        -= result;
-		pos->volume_pos  += result<<10;
-		pos->seg_pos      = seg_pos;
-		pos->seg_byte_pos = c_info->offset;
-		seg_pos ++;
-		if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
-			pos->seg_pos ++;
-			pos->seg_byte_pos = 0;
-			c_info->offset = 0;
-		}
-		/*  Allow escape from this loop on signal!
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		TRACE(ft_t_noise, "\n"
-		      KERN_INFO "remaining:  %d\n"
-		      KERN_INFO "seg_pos:    %d\n"
-		      KERN_INFO "end_seg:    %d\n"
-		      KERN_INFO "result:     %d",
-		      remaining, seg_pos, volume->end_seg, result);  
-	} while (remaining > 0 && seg_pos <= volume->end_seg);
-	TRACE_EXIT 0;
-}
-
-/* return segment id of next segment containing valid data, -EIO otherwise
- */
-static int search_valid_segment(unsigned int segment,
-				const unsigned int end_seg,
-				const unsigned int max_foffs,
-				zft_position *pos,
-				cmpr_info *c_info,
-				const zft_volinfo *volume,
-				__u8 *buf)
-{
-	cmpr_info tmp_info;
-	int seg_sz;
-	TRACE_FUN(ft_t_flow);
-	
-	memset(&tmp_info, 0, sizeof(cmpr_info));
-	while (segment <= end_seg) {
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		TRACE(ft_t_noise,
-		      "Searching readable segment between %d and %d",
-		      segment, end_seg);
-		seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
-		if ((seg_sz > 0) &&
-		    (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
-		    (tmp_info.foffs != 0 || segment == volume->start_seg)) {
-			if ((tmp_info.foffs>>10) > max_foffs) {
-				TRACE_ABORT(-EIO, ft_t_noise, "\n"
-					    KERN_INFO "cseg.foff: %d\n"
-					    KERN_INFO "dest     : %d",
-					    (int)(tmp_info.foffs >> 10),
-					    max_foffs);
-			}
-			DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
-			*c_info           = tmp_info;
-			pos->seg_pos      = segment;
-			pos->volume_pos   = c_info->foffs;
-			pos->seg_byte_pos = c_info->offset;
-			TRACE(ft_t_noise, "found segment at %d", segment);
-			TRACE_EXIT 0;
-		}
-		segment++;
-	}
-	TRACE_EXIT -EIO;
-}
-
-static int slow_seek_forward(unsigned int dest,
-			     cmpr_info *c_info,
-			     zft_position *pos,
-			     const zft_volinfo *volume,
-			     __u8 *buf)
-{
-	unsigned int distance;
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-		
-	distance = dest - (pos->volume_pos >> 10);
-	while ((distance > 0) &&
-	       (result = slow_seek_forward_until_error(distance,
-						       c_info,
-						       pos,
-						       volume,
-						       buf)) < 0) {
-		if (result == -EINTR) {
-			break;
-		}
-		TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
-		/* the failing segment is either pos->seg_pos or
-		 * pos->seg_pos + 1. There is no need to further try
-		 * that segment, because ftape_read_segment() already
-		 * has tried very much to read it. So we start with
-		 * following segment, which is pos->seg_pos + 1
-		 */
-		if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
-					pos, c_info,
-					volume, buf) < 0) {
-			TRACE(ft_t_noise, "search_valid_segment() failed");
-			result = -EIO;
-			break;
-		}
-		distance = dest - (pos->volume_pos >> 10);
-		result = 0;
-		TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
-		/* found valid segment, retry the seek */
-	}
-	TRACE_EXIT result;
-}
-
-static int compute_seg_pos(const unsigned int dest,
-			   zft_position *pos,
-			   const zft_volinfo *volume)
-{
-	int segment;
-	int distance = dest - (pos->volume_pos >> 10);
-	unsigned int raw_size;
-	unsigned int virt_size;
-	unsigned int factor;
-	TRACE_FUN(ft_t_flow);
-
-	if (distance >= 0) {
-		raw_size  = volume->end_seg - pos->seg_pos + 1;
-		virt_size = ((unsigned int)(volume->size>>10) 
-			     - (unsigned int)(pos->volume_pos>>10)
-			     + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
-		virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
-		if (virt_size == 0 || raw_size == 0) {
-			TRACE_EXIT 0;
-		}
-		if (raw_size >= (1<<25)) {
-			factor = raw_size/(virt_size>>7);
-		} else {
-			factor = (raw_size<<7)/virt_size;
-		}
-		segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
-		segment = (segment * factor)>>7;
-	} else {
-		raw_size  = pos->seg_pos - volume->start_seg + 1;
-		virt_size = ((unsigned int)(pos->volume_pos>>10)
-			     + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
-		virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
-		if (virt_size == 0 || raw_size == 0) {
-			TRACE_EXIT 0;
-		}
-		if (raw_size >= (1<<25)) {
-			factor = raw_size/(virt_size>>7);
-		} else {
-			factor = (raw_size<<7)/virt_size;
-		}
-		segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
-	}
-	TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
-	TRACE_EXIT segment;
-}
-
-static struct zft_cmpr_ops cmpr_ops = {
-	zftc_write,
-	zftc_read,
-	zftc_seek,
-	zftc_lock,
-	zftc_reset,
-	zftc_cleanup
-};
-
-int zft_compressor_init(void)
-{
-	TRACE_FUN(ft_t_flow);
-	
-#ifdef MODULE
-	printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
-        if (TRACE_LEVEL >= ft_t_info) {
-		printk(
-KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
-KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n");
-        }
-#else /* !MODULE */
-	/* print a short no-nonsense boot message */
-	printk(KERN_INFO "zftape compressor v1.00a 970514\n");
-	printk(KERN_INFO "For use with " FTAPE_VERSION "\n");
-#endif /* MODULE */
-	TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
-	TRACE(ft_t_info, "installing compressor for zftape ...");
-	TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
-	TRACE_EXIT 0;
-}
-
-#ifdef MODULE
-
-MODULE_AUTHOR(
-	"(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
-MODULE_DESCRIPTION(
-"Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
-MODULE_LICENSE("GPL");
-
-/* Called by modules package when installing the driver
- */
-int init_module(void)
-{
-	return zft_compressor_init();
-}
-
-#endif /* MODULE */
diff --git a/drivers/char/ftape/compressor/zftape-compress.h b/drivers/char/ftape/compressor/zftape-compress.h
deleted file mode 100644
index f200741..0000000
--- a/drivers/char/ftape/compressor/zftape-compress.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef _ZFTAPE_COMPRESS_H
-#define _ZFTAPE_COMPRESS_H
-/*
- *      Copyright (c) 1994-1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
- 
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- General Public License for more details.
- 
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/compressor/zftape-compress.h,v $
- * $Revision: 1.1 $
- * $Date: 1997/10/05 19:12:32 $
- *
- * This file contains macros and definitions for zftape's
- * builtin compression code.
- *
- */
-
-#include "../zftape/zftape-buffers.h"
-#include "../zftape/zftape-vtbl.h"
-#include "../compressor/lzrw3.h"
-
-/* CMPR_WRK_MEM_SIZE gives the size of the compression wrk_mem */
-/* I got these out of lzrw3.c */
-#define U(X)            ((__u32) X)
-#define SIZE_P_BYTE     (U(sizeof(__u8 *)))
-#define ALIGNMENT_FUDGE (U(16))
-
-#define CMPR_WRK_MEM_SIZE (U(4096)*(SIZE_P_BYTE) + ALIGNMENT_FUDGE)
-
-/* the maximum number of bytes the size of the "compressed" data can
- * exceed the uncompressed data. As it is quite useless to compress
- * data twice it is sometimes the case that it is more efficient to
- * copy a block of data but to feed it to the "compression"
- * algorithm. In this case there are some flag bytes or the like
- * proceding the "compressed" data.  THAT MUST NOT BE THE CASE for the
- * algorithm we use for this driver. Instead, the high bit 15 of
- * compressed_size:
- *
- * compressed_size = ftape_compress()
- *
- * must be set in such a case.
- *
- * Nevertheless, it might also be as for lzrw3 that there is an
- * "intermediate" overrun that exceeds the amount of the compressed
- * data that is actually produced. During the algorithm we need in the
- * worst case MAX_CMP_GROUP bytes more than the input-size.
- */
-#define MAX_CMP_GROUP (2+16*2) /* from lzrw3.c */
-
-#define CMPR_OVERRUN      MAX_CMP_GROUP /* during compression */
-
-/****************************************************/
-
-#define     CMPR_BUFFER_SIZE (MAX_BLOCK_SIZE + CMPR_OVERRUN)
-
-/* the compression map stores the byte offset compressed blocks within
- * the current volume for catridges with format code 2,3 and 5
- * (and old versions of zftape) and the offset measured in kilobytes for
- * format code 4 and 6. This gives us a possible max. size of a 
- * compressed volume of 1024*4GIG which should be enough.
- */
-typedef __u32 CmprMap;
-
-/* globals 
- */
-
-/* exported functions
- */
-
-#endif /* _ZFTAPE_COMPRESS_H */
diff --git a/drivers/char/ftape/lowlevel/Makefile b/drivers/char/ftape/lowlevel/Makefile
deleted file mode 100644
index febab07..0000000
--- a/drivers/char/ftape/lowlevel/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-#       Copyright (C) 1996, 1997 Clau-Justus Heine.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-# 
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with this program; see the file COPYING.  If not, write to
-# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-#
-# $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/Makefile,v $
-# $Revision: 1.4 $
-# $Date: 1997/10/07 09:26:02 $
-#
-#      Makefile for the lowlevel part QIC-40/80/3010/3020 floppy-tape
-#      driver for Linux.
-#
-
-obj-$(CONFIG_FTAPE) += ftape.o
-
-ftape-objs := ftape-init.o fdc-io.o fdc-isr.o \
-	      ftape-bsm.o ftape-ctl.o ftape-read.o ftape-rw.o \
-	      ftape-write.o ftape-io.o ftape-calibr.o ftape-ecc.o fc-10.o \
-	      ftape-buffer.o ftape-format.o ftape_syms.o
-
-ifeq ($(CONFIG_FTAPE),y)
-ftape-objs += ftape-setup.o
-endif
-
-ifndef CONFIG_FT_NO_TRACE_AT_ALL
-ftape-objs += ftape-tracing.o
-endif
-
-ifeq ($(CONFIG_FT_PROC_FS),y)
-ftape-objs += ftape-proc.o
-endif
diff --git a/drivers/char/ftape/lowlevel/fc-10.c b/drivers/char/ftape/lowlevel/fc-10.c
deleted file mode 100644
index 9bc1cdd..0000000
--- a/drivers/char/ftape/lowlevel/fc-10.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- *
-
-   Copyright (C) 1993,1994 Jon Tombs.
-
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
-
-   The entire guts of this program was written by dosemu, modified to
-   record reads and writes to the ports in the 0x180-0x188 address space,
-   while running the CMS program TAPE.EXE V2.0.5 supplied with the drive.
-
-   Modified to use an array of addresses and generally cleaned up (made
-   much shorter) 4 June 94, dosemu isn't that good at writing short code it
-   would seem :-). Made independent of 0x180, but I doubt it will work
-   at any other address.
-
-   Modified for distribution with ftape source. 21 June 94, SJL.
-
-   Modifications on 20 October 95, by Daniel Cohen (catman@wpi.edu):
-   Modified to support different DMA, IRQ, and IO Ports.  Borland's
-   Turbo Debugger in virtual 8086 mode (TD386.EXE with hardware breakpoints
-   provided by the TDH386.SYS Device Driver) was used on the CMS program
-   TAPE V4.0.5.  I set breakpoints on I/O to ports 0x180-0x187.  Note that
-   CMS's program will not successfully configure the tape drive if you set
-   breakpoints on IO Reads, but you can set them on IO Writes without problems.
-   Known problems:
-   - You can not use DMA Channels 5 or 7.
-
-   Modification on 29 January 96, by Daniel Cohen (catman@wpi.edu):
-   Modified to only accept IRQs 3 - 7, or 9.  Since we can only send a 3 bit
-   number representing the IRQ to the card, special handling is required when
-   IRQ 9 is selected.  IRQ 2 and 9 are the same, and we should request IRQ 9
-   from the kernel while telling the card to use IRQ 2.  Thanks to Greg
-   Crider (gcrider@iclnet.org) for finding and locating this bug, as well as
-   testing the patch.
-
-   Modification on 11 December 96, by Claus Heine (claus@momo.math.rwth-aachen.de):
-   Modified a little to use variahle ft_fdc_base, ft_fdc_irq, ft_fdc_dma 
-   instead of preprocessor symbols. Thus we can compile this into the module
-   or kernel and let the user specify the options as command line arguments.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/fc-10.c,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:04 $
- *
- *      This file contains code for the CMS FC-10/FC-20 card.
- */
-
-#include <asm/io.h>
-#include <linux/ftape.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/fc-10.h"
-
-static __u16 inbs_magic[] = {
-	0x3, 0x3, 0x0, 0x4, 0x7, 0x2, 0x5, 0x3, 0x1, 0x4,
-	0x3, 0x5, 0x2, 0x0, 0x3, 0x7, 0x4, 0x2,
-	0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
-};
-
-static __u16 fc10_ports[] = {
-	0x180, 0x210, 0x2A0, 0x300, 0x330, 0x340, 0x370
-};
-
-int fc10_enable(void)
-{
-	int i;
-	__u8 cardConfig = 0x00;
-	__u8 x;
-	TRACE_FUN(ft_t_flow);
-
-/*  This code will only work if the FC-10 (or FC-20) is set to
- *  use DMA channels 1, 2, or 3.  DMA channels 5 and 7 seem to be 
- *  initialized by the same command as channels 1 and 3, respectively.
- */
-	if (ft_fdc_dma > 3) {
-		TRACE_ABORT(0, ft_t_err,
-"Error: The FC-10/20 must be set to use DMA channels 1, 2, or 3!");
-	}
-/*  Only allow the FC-10/20 to use IRQ 3-7, or 9.  Note that CMS's program
- *  only accepts IRQ's 2-7, but in linux, IRQ 2 is the same as IRQ 9.
- */
-	if (ft_fdc_irq < 3 || ft_fdc_irq == 8 || ft_fdc_irq > 9) {
-		TRACE_ABORT(0, ft_t_err, 
-"Error: The FC-10/20 must be set to use IRQ levels 3 - 7, or 9!\n"
-KERN_INFO "Note: IRQ 9 is the same as IRQ 2");
-	}
-	/*  Clear state machine ???
-	 */
-	for (i = 0; i < NR_ITEMS(inbs_magic); i++) {
-		inb(ft_fdc_base + inbs_magic[i]);
-	}
-	outb(0x0, ft_fdc_base);
-
-	x = inb(ft_fdc_base);
-	if (x == 0x13 || x == 0x93) {
-		for (i = 1; i < 8; i++) {
-			if (inb(ft_fdc_base + i) != x) {
-				TRACE_EXIT 0;
-			}
-		}
-	} else {
-		TRACE_EXIT 0;
-	}
-
-	outb(0x8, ft_fdc_base);
-
-	for (i = 0; i < 8; i++) {
-		if (inb(ft_fdc_base + i) != 0x0) {
-			TRACE_EXIT 0;
-		}
-	}
-	outb(0x10, ft_fdc_base);
-
-	for (i = 0; i < 8; i++) {
-		if (inb(ft_fdc_base + i) != 0xff) {
-			TRACE_EXIT 0;
-		}
-	}
-
-	/*  Okay, we found a FC-10 card ! ???
-	 */
-	outb(0x0, fdc.ccr);
-
-	/*  Clear state machine again ???
-	 */
-	for (i = 0; i < NR_ITEMS(inbs_magic); i++) {
-		inb(ft_fdc_base + inbs_magic[i]);
-	}
-	/* Send io port */
-	for (i = 0; i < NR_ITEMS(fc10_ports); i++)
-		if (ft_fdc_base == fc10_ports[i])
-			cardConfig = i + 1;
-	if (cardConfig == 0) {
-		TRACE_EXIT 0;	/* Invalid I/O Port */
-	}
-	/* and IRQ - If using IRQ 9, tell the FC card it is actually IRQ 2 */
-	if (ft_fdc_irq != 9)
-		cardConfig |= ft_fdc_irq << 3;
-	else
-		cardConfig |= 2 << 3;
-
-	/* and finally DMA Channel */
-	cardConfig |= ft_fdc_dma << 6;
-	outb(cardConfig, ft_fdc_base);	/* DMA [2 bits]/IRQ [3 bits]/BASE [3 bits] */
-
-	/*  Enable FC-10 ???
-	 */
-	outb(0, fdc.ccr);
-	outb(0, fdc.dor2);
-	outb(FDC_DMA_MODE /* 8 */, fdc.dor);
-	outb(FDC_DMA_MODE /* 8 */, fdc.dor);
-	outb(1, fdc.dor2);
-
-	/*************************************
-	 *
-	 * cH: why the hell should this be necessary? This is done 
-	 *     by fdc_reset()!!!
-	 *
-	 *************************************/
-	/*  Initialize fdc, select drive B:
-	 */
-	outb(FDC_DMA_MODE, fdc.dor);	/* assert reset, dma & irq enabled */
-	/*       0x08    */
-	outb(FDC_DMA_MODE|FDC_RESET_NOT, fdc.dor);	/* release reset */
-	/*       0x08    |   0x04   = 0x0c */
-	outb(FDC_DMA_MODE|FDC_RESET_NOT|FDC_MOTOR_1|FTAPE_SEL_B, fdc.dor);
-	/*       0x08    |   0x04      |  0x20     |  0x01  = 0x2d */    
-	/* select drive 1 */ /* why not drive 0 ???? */
-	TRACE_EXIT (x == 0x93) ? 2 : 1;
-}
diff --git a/drivers/char/ftape/lowlevel/fc-10.h b/drivers/char/ftape/lowlevel/fc-10.h
deleted file mode 100644
index da7b88b..0000000
--- a/drivers/char/ftape/lowlevel/fc-10.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _FC_10_H
-#define _FC_10_H
-
-/*
- * Copyright (C) 1994-1996 Bas Laarhoven.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/fc-10.h,v $
- * $Revision: 1.1 $
- * $Date: 1997/09/19 09:05:22 $
- *
- *      This file contains definitions for the FC-10 code
- *      of the QIC-40/80 floppy-tape driver for Linux.
- */
-
-/*
- *      fc-10.c defined global vars.
- */
-
-/*
- *      fc-10.c defined global functions.
- */
-extern int fc10_enable(void);
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/fdc-io.c b/drivers/char/ftape/lowlevel/fdc-io.c
deleted file mode 100644
index bbcf918..0000000
--- a/drivers/char/ftape/lowlevel/fdc-io.c
+++ /dev/null
@@ -1,1349 +0,0 @@
-/*
- * Copyright (C) 1993-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/fdc-io.c,v $
- * $Revision: 1.7.4.2 $
- * $Date: 1997/11/16 14:48:17 $
- *
- *      This file contains the low-level floppy disk interface code
- *      for the QIC-40/80/3010/3020 floppy-tape driver "ftape" for
- *      Linux.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/fdc-isr.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-calibr.h"
-#include "../lowlevel/fc-10.h"
-
-/*      Global vars.
- */
-static int ftape_motor;
-volatile int ftape_current_cylinder = -1;
-volatile fdc_mode_enum fdc_mode = fdc_idle;
-fdc_config_info fdc;
-DECLARE_WAIT_QUEUE_HEAD(ftape_wait_intr);
-
-unsigned int ft_fdc_base       = CONFIG_FT_FDC_BASE;
-unsigned int ft_fdc_irq        = CONFIG_FT_FDC_IRQ;
-unsigned int ft_fdc_dma        = CONFIG_FT_FDC_DMA;
-unsigned int ft_fdc_threshold  = CONFIG_FT_FDC_THR;  /* bytes */
-unsigned int ft_fdc_rate_limit = CONFIG_FT_FDC_MAX_RATE; /* bits/sec */
-int ft_probe_fc10        = CONFIG_FT_PROBE_FC10;
-int ft_mach2             = CONFIG_FT_MACH2;
-
-/*      Local vars.
- */
-static spinlock_t fdc_io_lock; 
-static unsigned int fdc_calibr_count;
-static unsigned int fdc_calibr_time;
-static int fdc_status;
-volatile __u8 fdc_head;		/* FDC head from sector id */
-volatile __u8 fdc_cyl;		/* FDC track from sector id */
-volatile __u8 fdc_sect;		/* FDC sector from sector id */
-static int fdc_data_rate = 500;	/* data rate (Kbps) */
-static int fdc_rate_code;	/* data rate code (0 == 500 Kbps) */
-static int fdc_seek_rate = 2;	/* step rate (msec) */
-static void (*do_ftape) (void);
-static int fdc_fifo_state;	/* original fifo setting - fifo enabled */
-static int fdc_fifo_thr;	/* original fifo setting - threshold */
-static int fdc_lock_state;	/* original lock setting - locked */
-static int fdc_fifo_locked;	/* has fifo && lock set ? */
-static __u8 fdc_precomp;	/* default precomp. value (nsec) */
-static __u8 fdc_prec_code;	/* fdc precomp. select code */
-
-static char ftape_id[] = "ftape";  /* used by request irq and free irq */
-
-static int fdc_set_seek_rate(int seek_rate);
-
-void fdc_catch_stray_interrupts(int count)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&fdc_io_lock, flags);
-	if (count == 0) {
-		ft_expected_stray_interrupts = 0;
-	} else {
-		ft_expected_stray_interrupts += count;
-	}
-	spin_unlock_irqrestore(&fdc_io_lock, flags);
-}
-
-/*  Wait during a timeout period for a given FDC status.
- *  If usecs == 0 then just test status, else wait at least for usecs.
- *  Returns -ETIME on timeout. Function must be calibrated first !
- */
-static int fdc_wait(unsigned int usecs, __u8 mask, __u8 state)
-{
-	int count_1 = (fdc_calibr_count * usecs +
-                       fdc_calibr_count - 1) / fdc_calibr_time;
-
-	do {
-		fdc_status = inb_p(fdc.msr);
-		if ((fdc_status & mask) == state) {
-			return 0;
-		}
-	} while (count_1-- >= 0);
-	return -ETIME;
-}
-
-int fdc_ready_wait(unsigned int usecs)
-{
-	return fdc_wait(usecs, FDC_DATA_READY | FDC_BUSY, FDC_DATA_READY);
-}
-
-/* Why can't we just use udelay()?
- */
-static void fdc_usec_wait(unsigned int usecs)
-{
-	fdc_wait(usecs, 0, 1);	/* will always timeout ! */
-}
-
-static int fdc_ready_out_wait(unsigned int usecs)
-{
-	fdc_usec_wait(FT_RQM_DELAY);	/* wait for valid RQM status */
-	return fdc_wait(usecs, FDC_DATA_OUT_READY, FDC_DATA_OUT_READY);
-}
-
-void fdc_wait_calibrate(void)
-{
-	ftape_calibrate("fdc_wait",
-			fdc_usec_wait, &fdc_calibr_count, &fdc_calibr_time); 
-}
-
-/*  Wait for a (short) while for the FDC to become ready
- *  and transfer the next command byte.
- *  Return -ETIME on timeout on getting ready (depends on hardware!).
- */
-static int fdc_write(const __u8 data)
-{
-	fdc_usec_wait(FT_RQM_DELAY);	/* wait for valid RQM status */
-	if (fdc_wait(150, FDC_DATA_READY_MASK, FDC_DATA_IN_READY) < 0) {
-		return -ETIME;
-	} else {
-		outb(data, fdc.fifo);
-		return 0;
-	}
-}
-
-/*  Wait for a (short) while for the FDC to become ready
- *  and transfer the next result byte.
- *  Return -ETIME if timeout on getting ready (depends on hardware!).
- */
-static int fdc_read(__u8 * data)
-{
-	fdc_usec_wait(FT_RQM_DELAY);	/* wait for valid RQM status */
-	if (fdc_wait(150, FDC_DATA_READY_MASK, FDC_DATA_OUT_READY) < 0) {
-		return -ETIME;
-	} else {
-		*data = inb(fdc.fifo);
-		return 0;
-	}
-}
-
-/*  Output a cmd_len long command string to the FDC.
- *  The FDC should be ready to receive a new command or
- *  an error (EBUSY or ETIME) will occur.
- */
-int fdc_command(const __u8 * cmd_data, int cmd_len)
-{
-	int result = 0;
-	unsigned long flags;
-	int count = cmd_len;
-	int retry = 0;
-#ifdef TESTING
-	static unsigned int last_time;
-	unsigned int time;
-#endif
-	TRACE_FUN(ft_t_any);
-
-	fdc_usec_wait(FT_RQM_DELAY);	/* wait for valid RQM status */
-	spin_lock_irqsave(&fdc_io_lock, flags);
-	if (!in_interrupt())
-		/* Yes, I know, too much comments inside this function
-		 * ...
-		 * 
-		 * Yet another bug in the original driver. All that
-		 * havoc is caused by the fact that the isr() sends
-		 * itself a command to the floppy tape driver (pause,
-		 * micro step pause).  Now, the problem is that
-		 * commands are transmitted via the fdc_seek
-		 * command. But: the fdc performs seeks in the
-		 * background i.e. it doesn't signal busy while
-		 * sending the step pulses to the drive. Therefore the
-		 * non-interrupt level driver has no chance to tell
-		 * whether the isr() just has issued a seek. Therefore
-		 * we HAVE TO have a look at the ft_hide_interrupt
-		 * flag: it signals the non-interrupt level part of
-		 * the driver that it has to wait for the fdc until it
-		 * has completet seeking.
-		 *
-		 * THIS WAS PRESUMABLY THE REASON FOR ALL THAT
-		 * "fdc_read timeout" errors, I HOPE :-)
-		 */
-		if (ft_hide_interrupt) {
-			restore_flags(flags);
-			TRACE(ft_t_info,
-			      "Waiting for the isr() completing fdc_seek()");
-			if (fdc_interrupt_wait(2 * FT_SECOND) < 0) {
-				TRACE(ft_t_warn,
-		      "Warning: timeout waiting for isr() seek to complete");
-			}
-			if (ft_hide_interrupt || !ft_seek_completed) {
-				/* There cannot be another
-				 * interrupt. The isr() only stops
-				 * the tape and the next interrupt
-				 * won't come until we have send our
-				 * command to the drive.
-				 */
-				TRACE_ABORT(-EIO, ft_t_bug,
-					    "BUG? isr() is still seeking?\n"
-					    KERN_INFO "hide: %d\n"
-					    KERN_INFO "seek: %d",
-					    ft_hide_interrupt,
-					    ft_seek_completed);
-
-			}
-			fdc_usec_wait(FT_RQM_DELAY);	/* wait for valid RQM status */
-			spin_lock_irqsave(&fdc_io_lock, flags);
-		}
-	fdc_status = inb(fdc.msr);
-	if ((fdc_status & FDC_DATA_READY_MASK) != FDC_DATA_IN_READY) {
-		spin_unlock_irqrestore(&fdc_io_lock, flags);
-		TRACE_ABORT(-EBUSY, ft_t_err, "fdc not ready");
-	} 
-	fdc_mode = *cmd_data;	/* used by isr */
-#ifdef TESTING
-	if (fdc_mode == FDC_SEEK) {
-		time = ftape_timediff(last_time, ftape_timestamp());
-		if (time < 6000) {
-	TRACE(ft_t_bug,"Warning: short timeout between seek commands: %d",
-	      time);
-		}
-	}
-#endif
-	if (!in_interrupt()) {
-		/* shouldn't be cleared if called from isr
-		 */
-		ft_interrupt_seen = 0;
-	}
-	while (count) {
-		result = fdc_write(*cmd_data);
-		if (result < 0) {
-			TRACE(ft_t_fdc_dma,
-			      "fdc_mode = %02x, status = %02x at index %d",
-			      (int) fdc_mode, (int) fdc_status,
-			      cmd_len - count);
-			if (++retry <= 3) {
-				TRACE(ft_t_warn, "fdc_write timeout, retry");
-			} else {
-				TRACE(ft_t_err, "fdc_write timeout, fatal");
-				/* recover ??? */
-				break;
-			}
-		} else {
-			--count;
-			++cmd_data;
-		}
-        }
-#ifdef TESTING
-	if (fdc_mode == FDC_SEEK) {
-		last_time = ftape_timestamp();
-	}
-#endif
-	spin_unlock_irqrestore(&fdc_io_lock, flags);
-	TRACE_EXIT result;
-}
-
-/*  Input a res_len long result string from the FDC.
- *  The FDC should be ready to send the result or an error
- *  (EBUSY or ETIME) will occur.
- */
-int fdc_result(__u8 * res_data, int res_len)
-{
-	int result = 0;
-	unsigned long flags;
-	int count = res_len;
-	int retry = 0;
-	TRACE_FUN(ft_t_any);
-
-	spin_lock_irqsave(&fdc_io_lock, flags);
-	fdc_status = inb(fdc.msr);
-	if ((fdc_status & FDC_DATA_READY_MASK) != FDC_DATA_OUT_READY) {
-		TRACE(ft_t_err, "fdc not ready");
-		result = -EBUSY;
-	} else while (count) {
-		if (!(fdc_status & FDC_BUSY)) {
-			spin_unlock_irqrestore(&fdc_io_lock, flags);
-			TRACE_ABORT(-EIO, ft_t_err, "premature end of result phase");
-		}
-		result = fdc_read(res_data);
-		if (result < 0) {
-			TRACE(ft_t_fdc_dma,
-			      "fdc_mode = %02x, status = %02x at index %d",
-			      (int) fdc_mode,
-			      (int) fdc_status,
-			      res_len - count);
-			if (++retry <= 3) {
-				TRACE(ft_t_warn, "fdc_read timeout, retry");
-			} else {
-				TRACE(ft_t_err, "fdc_read timeout, fatal");
-				/* recover ??? */
-				break;
-				++retry;
-			}
-		} else {
-			--count;
-			++res_data;
-		}
-	}
-	spin_unlock_irqrestore(&fdc_io_lock, flags);
-	fdc_usec_wait(FT_RQM_DELAY);	/* allow FDC to negate BSY */
-	TRACE_EXIT result;
-}
-
-/*      Handle command and result phases for
- *      commands without data phase.
- */
-static int fdc_issue_command(const __u8 * out_data, int out_count,
-		      __u8 * in_data, int in_count)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (out_count > 0) {
-		TRACE_CATCH(fdc_command(out_data, out_count),);
-	}
-	/* will take 24 - 30 usec for fdc_sense_drive_status and
-	 * fdc_sense_interrupt_status commands.
-	 *    35 fails sometimes (5/9/93 SJL)
-	 * On a loaded system it incidentally takes longer than
-	 * this for the fdc to get ready ! ?????? WHY ??????
-	 * So until we know what's going on use a very long timeout.
-	 */
-	TRACE_CATCH(fdc_ready_out_wait(500 /* usec */),);
-	if (in_count > 0) {
-		TRACE_CATCH(fdc_result(in_data, in_count),
-			    TRACE(ft_t_err, "result phase aborted"));
-	}
-	TRACE_EXIT 0;
-}
-
-/*      Wait for FDC interrupt with timeout (in milliseconds).
- *      Signals are blocked so the wait will not be aborted.
- *      Note: interrupts must be enabled ! (23/05/93 SJL)
- */
-int fdc_interrupt_wait(unsigned int time)
-{
-	DECLARE_WAITQUEUE(wait,current);
-	sigset_t old_sigmask;	
-	static int resetting;
-	long timeout;
-
-	TRACE_FUN(ft_t_fdc_dma);
-
- 	if (waitqueue_active(&ftape_wait_intr)) {
-		TRACE_ABORT(-EIO, ft_t_err, "error: nested call");
-	}
-	/* timeout time will be up to USPT microseconds too long ! */
-	timeout = (1000 * time + FT_USPT - 1) / FT_USPT;
-
-	spin_lock_irq(&current->sighand->siglock);
-	old_sigmask = current->blocked;
-	sigfillset(&current->blocked);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
-	set_current_state(TASK_INTERRUPTIBLE);
-	add_wait_queue(&ftape_wait_intr, &wait);
-	while (!ft_interrupt_seen && timeout)
-		timeout = schedule_timeout_interruptible(timeout);
-
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = old_sigmask;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-	
-	remove_wait_queue(&ftape_wait_intr, &wait);
-	/*  the following IS necessary. True: as well
-	 *  wake_up_interruptible() as the schedule() set TASK_RUNNING
-	 *  when they wakeup a task, BUT: it may very well be that
-	 *  ft_interrupt_seen is already set to 1 when we enter here
-	 *  in which case schedule() gets never called, and
-	 *  TASK_RUNNING never set. This has the funny effect that we
-	 *  execute all the code until we leave kernel space, but then
-	 *  the task is stopped (a task CANNOT be preempted while in
-	 *  kernel mode. Sending a pair of SIGSTOP/SIGCONT to the
-	 *  tasks wakes it up again. Funny! :-)
-	 */
-	current->state = TASK_RUNNING; 
-	if (ft_interrupt_seen) { /* woken up by interrupt */
-		ft_interrupt_seen = 0;
-		TRACE_EXIT 0;
-	}
-	/*  Original comment:
-	 *  In first instance, next statement seems unnecessary since
-	 *  it will be cleared in fdc_command. However, a small part of
-	 *  the software seems to rely on this being cleared here
-	 *  (ftape_close might fail) so stick to it until things get fixed !
-	 */
-	/*  My deeply sought of knowledge:
-	 *  Behold NO! It is obvious. fdc_reset() doesn't call fdc_command()
-	 *  but nevertheless uses fdc_interrupt_wait(). OF COURSE this needs to
-	 *  be reset here.
-	 */
-	ft_interrupt_seen = 0;	/* clear for next call */
-	if (!resetting) {
-		resetting = 1;	/* break infinite recursion if reset fails */
-		TRACE(ft_t_any, "cleanup reset");
-		fdc_reset();
-		resetting = 0;
-	}
-	TRACE_EXIT (signal_pending(current)) ? -EINTR : -ETIME;
-}
-
-/*      Start/stop drive motor. Enable DMA mode.
- */
-void fdc_motor(int motor)
-{
-	int unit = ft_drive_sel;
-	int data = unit | FDC_RESET_NOT | FDC_DMA_MODE;
-	TRACE_FUN(ft_t_any);
-
-	ftape_motor = motor;
-	if (ftape_motor) {
-		data |= FDC_MOTOR_0 << unit;
-		TRACE(ft_t_noise, "turning motor %d on", unit);
-	} else {
-		TRACE(ft_t_noise, "turning motor %d off", unit);
-	}
-	if (ft_mach2) {
-		outb_p(data, fdc.dor2);
-	} else {
-		outb_p(data, fdc.dor);
-	}
-	ftape_sleep(10 * FT_MILLISECOND);
-	TRACE_EXIT;
-}
-
-static void fdc_update_dsr(void)
-{
-	TRACE_FUN(ft_t_any);
-
-	TRACE(ft_t_flow, "rate = %d Kbps, precomp = %d ns",
-	      fdc_data_rate, fdc_precomp);
-	if (fdc.type >= i82077) {
-		outb_p((fdc_rate_code & 0x03) | fdc_prec_code, fdc.dsr);
-	} else {
-		outb_p(fdc_rate_code & 0x03, fdc.ccr);
-	}
-	TRACE_EXIT;
-}
-
-void fdc_set_write_precomp(int precomp)
-{
-	TRACE_FUN(ft_t_any);
-
-	TRACE(ft_t_noise, "New precomp: %d nsec", precomp);
-	fdc_precomp = precomp;
-	/*  write precompensation can be set in multiples of 41.67 nsec.
-	 *  round the parameter to the nearest multiple and convert it
-	 *  into a fdc setting. Note that 0 means default to the fdc,
-	 *  7 is used instead of that.
-	 */
-	fdc_prec_code = ((fdc_precomp + 21) / 42) << 2;
-	if (fdc_prec_code == 0 || fdc_prec_code > (6 << 2)) {
-		fdc_prec_code = 7 << 2;
-	}
-	fdc_update_dsr();
-	TRACE_EXIT;
-}
-
-/*  Reprogram the 82078 registers to use Data Rate Table 1 on all drives.
- */
-static void fdc_set_drive_specs(void)
-{
-	__u8 cmd[] = { FDC_DRIVE_SPEC, 0x00, 0x00, 0x00, 0x00, 0xc0};
-	int result;
-	TRACE_FUN(ft_t_any);
-
-	TRACE(ft_t_flow, "Setting of drive specs called");
-	if (fdc.type >= i82078_1) {
-		cmd[1] = (0 << 5) | (2 << 2);
-		cmd[2] = (1 << 5) | (2 << 2);
-		cmd[3] = (2 << 5) | (2 << 2);
-		cmd[4] = (3 << 5) | (2 << 2);
-		result = fdc_command(cmd, NR_ITEMS(cmd));
-		if (result < 0) {
-			TRACE(ft_t_err, "Setting of drive specs failed");
-		}
-	}
-	TRACE_EXIT;
-}
-
-/* Select clock for fdc, must correspond with tape drive setting !
- * This also influences the fdc timing so we must adjust some values.
- */
-int fdc_set_data_rate(int rate)
-{
-	int bad_rate = 0;
-	TRACE_FUN(ft_t_any);
-
-	/* Select clock for fdc, must correspond with tape drive setting !
-	 * This also influences the fdc timing so we must adjust some values.
-	 */
-	TRACE(ft_t_fdc_dma, "new rate = %d", rate);
-	switch (rate) {
-	case 250:
-		fdc_rate_code = fdc_data_rate_250;
-		break;
-	case 500:
-		fdc_rate_code = fdc_data_rate_500;
-		break;
-	case 1000:
-		if (fdc.type < i82077) {
-			bad_rate = 1;
-                } else {
-			fdc_rate_code = fdc_data_rate_1000;
-		}
-		break;
-	case 2000:
-		if (fdc.type < i82078_1) {
-			bad_rate = 1;
-                } else {
-			fdc_rate_code = fdc_data_rate_2000;
-		}
-		break;
-	default:
-		bad_rate = 1;
-        }
-	if (bad_rate) {
-		TRACE_ABORT(-EIO,
-			    ft_t_fdc_dma, "%d is not a valid data rate", rate);
-	}
-	fdc_data_rate = rate;
-	fdc_update_dsr();
-	fdc_set_seek_rate(fdc_seek_rate);  /* clock changed! */
-	ftape_udelay(1000);
-	TRACE_EXIT 0;
-}
-
-/*  keep the unit select if keep_select is != 0,
- */
-static void fdc_dor_reset(int keep_select)
-{
-	__u8 fdc_ctl = ft_drive_sel;
-
-	if (keep_select != 0) {
-		fdc_ctl |= FDC_DMA_MODE;
-		if (ftape_motor) {
-			fdc_ctl |= FDC_MOTOR_0 << ft_drive_sel;
-		}
-	}
-	ftape_udelay(10); /* ??? but seems to be necessary */
-	if (ft_mach2) {
-		outb_p(fdc_ctl & 0x0f, fdc.dor);
-		outb_p(fdc_ctl, fdc.dor2);
-	} else {
-		outb_p(fdc_ctl, fdc.dor);
-	}
-	fdc_usec_wait(10); /* delay >= 14 fdc clocks */
-	if (keep_select == 0) {
-		fdc_ctl = 0;
-	}
-	fdc_ctl |= FDC_RESET_NOT;
-	if (ft_mach2) {
-		outb_p(fdc_ctl & 0x0f, fdc.dor);
-		outb_p(fdc_ctl, fdc.dor2);
-	} else {
-		outb_p(fdc_ctl, fdc.dor);
-	}
-}
-
-/*      Reset the floppy disk controller. Leave the ftape_unit selected.
- */
-void fdc_reset(void)
-{
-	int st0;
-	int i;
-	int dummy;
-	unsigned long flags;
-	TRACE_FUN(ft_t_any);
-
-	spin_lock_irqsave(&fdc_io_lock, flags);
-
-	fdc_dor_reset(1); /* keep unit selected */
-
-	fdc_mode = fdc_idle;
-
-	/*  maybe the spin_lock_irq* pair is not necessary, BUT:
-	 *  the following line MUST be here. Otherwise fdc_interrupt_wait()
-	 *  won't wait. Note that fdc_reset() is called from 
-	 *  ftape_dumb_stop() when the fdc is busy transferring data. In this
-	 *  case fdc_isr() MOST PROBABLY sets ft_interrupt_seen, and tries
-	 *  to get the result bytes from the fdc etc. CLASH.
-	 */
-	ft_interrupt_seen = 0;
-	
-	/*  Program data rate
-	 */
-	fdc_update_dsr();               /* restore data rate and precomp */
-
-	spin_unlock_irqrestore(&fdc_io_lock, flags);
-
-        /*
-         *	Wait for first polling cycle to complete
-	 */
-	if (fdc_interrupt_wait(1 * FT_SECOND) < 0) {
-		TRACE(ft_t_err, "no drive polling interrupt!");
-	} else {	/* clear all disk-changed statuses */
-		for (i = 0; i < 4; ++i) {
-			if(fdc_sense_interrupt_status(&st0, &dummy) != 0) {
-				TRACE(ft_t_err, "sense failed for %d", i);
-			}
-			if (i == ft_drive_sel) {
-				ftape_current_cylinder = dummy;
-			}
-		}
-		TRACE(ft_t_noise, "drive polling completed");
-	}
-	/*
-         *	SPECIFY COMMAND
-	 */
-	fdc_set_seek_rate(fdc_seek_rate);
-	/*
-	 *	DRIVE SPECIFICATION COMMAND (if fdc type known)
-	 */
-	if (fdc.type >= i82078_1) {
-		fdc_set_drive_specs();
-	}
-	TRACE_EXIT;
-}
-
-#if !defined(CLK_48MHZ)
-# define CLK_48MHZ 1
-#endif
-
-/*  When we're done, put the fdc into reset mode so that the regular
- *  floppy disk driver will figure out that something is wrong and
- *  initialize the controller the way it wants.
- */
-void fdc_disable(void)
-{
-	__u8 cmd1[] = {FDC_CONFIGURE, 0x00, 0x00, 0x00};
-	__u8 cmd2[] = {FDC_LOCK};
-	__u8 cmd3[] = {FDC_UNLOCK};
-	__u8 stat[1];
-	TRACE_FUN(ft_t_flow);
-
-	if (!fdc_fifo_locked) {
-		fdc_reset();
-		TRACE_EXIT;
-	}
-	if (fdc_issue_command(cmd3, 1, stat, 1) < 0 || stat[0] != 0x00) {
-		fdc_dor_reset(0);
-		TRACE_ABORT(/**/, ft_t_bug, 
-		"couldn't unlock fifo, configuration remains changed");
-	}
-	fdc_fifo_locked = 0;
-	if (CLK_48MHZ && fdc.type >= i82078) {
-		cmd1[0] |= FDC_CLK48_BIT;
-	}
-	cmd1[2] = ((fdc_fifo_state) ? 0 : 0x20) + (fdc_fifo_thr - 1);
-	if (fdc_command(cmd1, NR_ITEMS(cmd1)) < 0) {
-		fdc_dor_reset(0);
-		TRACE_ABORT(/**/, ft_t_bug,
-		"couldn't reconfigure fifo to old state");
-	}
-	if (fdc_lock_state &&
-	    fdc_issue_command(cmd2, 1, stat, 1) < 0) {
-		fdc_dor_reset(0);
-		TRACE_ABORT(/**/, ft_t_bug, "couldn't lock old state again");
-	}
-	TRACE(ft_t_noise, "fifo restored: %sabled, thr. %d, %slocked",
-	      fdc_fifo_state ? "en" : "dis",
-	      fdc_fifo_thr, (fdc_lock_state) ? "" : "not ");
-	fdc_dor_reset(0);
-	TRACE_EXIT;
-}
-
-/*      Specify FDC seek-rate (milliseconds)
- */
-static int fdc_set_seek_rate(int seek_rate)
-{
-	/* set step rate, dma mode, and minimal head load and unload times
-	 */
-	__u8 in[3] = { FDC_SPECIFY, 1, (1 << 1)};
- 
-	fdc_seek_rate = seek_rate;
-	in[1] |= (16 - (fdc_data_rate * fdc_seek_rate) / 500) << 4;
-
-	return fdc_command(in, 3);
-}
-
-/*      Sense drive status: get unit's drive status (ST3)
- */
-int fdc_sense_drive_status(int *st3)
-{
-	__u8 out[2];
-	__u8 in[1];
-	TRACE_FUN(ft_t_any);
-
-	out[0] = FDC_SENSED;
-	out[1] = ft_drive_sel;
-	TRACE_CATCH(fdc_issue_command(out, 2, in, 1),);
-	*st3 = in[0];
-	TRACE_EXIT 0;
-}
-
-/*      Sense Interrupt Status command:
- *      should be issued at the end of each seek.
- *      get ST0 and current cylinder.
- */
-int fdc_sense_interrupt_status(int *st0, int *current_cylinder)
-{
-	__u8 out[1];
-	__u8 in[2];
-	TRACE_FUN(ft_t_any);
-
-	out[0] = FDC_SENSEI;
-	TRACE_CATCH(fdc_issue_command(out, 1, in, 2),);
-	*st0 = in[0];
-	*current_cylinder = in[1];
-	TRACE_EXIT 0;
-}
-
-/*      step to track
- */
-int fdc_seek(int track)
-{
-	__u8 out[3];
-	int st0, pcn;
-#ifdef TESTING
-	unsigned int time;
-#endif
-	TRACE_FUN(ft_t_any);
-
-	out[0] = FDC_SEEK;
-	out[1] = ft_drive_sel;
-	out[2] = track;
-#ifdef TESTING
-	time = ftape_timestamp();
-#endif
-	/*  We really need this command to work !
-	 */
-	ft_seek_completed = 0;
-	TRACE_CATCH(fdc_command(out, 3),
-		    fdc_reset();
-		    TRACE(ft_t_noise, "destination was: %d, resetting FDC...",
-			  track));
-	/*    Handle interrupts until ft_seek_completed or timeout.
-	 */
-	for (;;) {
-		TRACE_CATCH(fdc_interrupt_wait(2 * FT_SECOND),);
-		if (ft_seek_completed) {
-			TRACE_CATCH(fdc_sense_interrupt_status(&st0, &pcn),);
-			if ((st0 & ST0_SEEK_END) == 0) {
-				TRACE_ABORT(-EIO, ft_t_err,
-				      "no seek-end after seek completion !??");
-			}
-			break;
-		}
-	}
-#ifdef TESTING
-	time = ftape_timediff(time, ftape_timestamp()) / abs(track - ftape_current_cylinder);
-	if ((time < 900 || time > 3100) && abs(track - ftape_current_cylinder) > 5) {
-		TRACE(ft_t_warn, "Wrong FDC STEP interval: %d usecs (%d)",
-                         time, track - ftape_current_cylinder);
-	}
-#endif
-	/*    Verify whether we issued the right tape command.
-	 */
-	/* Verify that we seek to the proper track. */
-	if (pcn != track) {
-		TRACE_ABORT(-EIO, ft_t_err, "bad seek..");
-	}
-	ftape_current_cylinder = track;
-	TRACE_EXIT 0;
-}
-
-static int perpend_mode; /* set if fdc is in perpendicular mode */
-
-static int perpend_off(void)
-{
- 	__u8 perpend[] = {FDC_PERPEND, 0x00};
-	TRACE_FUN(ft_t_any);
-	
-	if (perpend_mode) {
-		/* Turn off perpendicular mode */
-		perpend[1] = 0x80;
-		TRACE_CATCH(fdc_command(perpend, 2),
-			    TRACE(ft_t_err,"Perpendicular mode exit failed!"));
-		perpend_mode = 0;
-	}
-	TRACE_EXIT 0;
-}
-
-static int handle_perpend(int segment_id)
-{
- 	__u8 perpend[] = {FDC_PERPEND, 0x00};
-	TRACE_FUN(ft_t_any);
-
-	/* When writing QIC-3020 tapes, turn on perpendicular mode
-	 * if tape is moving in forward direction (even tracks).
-	 */
-	if (ft_qic_std == QIC_TAPE_QIC3020 &&
-	    ((segment_id / ft_segments_per_track) & 1) == 0) {
-/*  FIXME: some i82077 seem to support perpendicular mode as
- *  well. 
- */
-#if 0
-		if (fdc.type < i82077AA) {}
-#else
-		if (fdc.type < i82077 && ft_data_rate < 1000) {
-#endif
-			/*  fdc does not support perpendicular mode: complain 
-			 */
-			TRACE_ABORT(-EIO, ft_t_err,
-				    "Your FDC does not support QIC-3020.");
-		}
-		perpend[1] = 0x03 /* 0x83 + (0x4 << ft_drive_sel) */ ;
-		TRACE_CATCH(fdc_command(perpend, 2),
-			   TRACE(ft_t_err,"Perpendicular mode entry failed!"));
-		TRACE(ft_t_flow, "Perpendicular mode set");
-		perpend_mode = 1;
-		TRACE_EXIT 0;
-	}
-	TRACE_EXIT perpend_off();
-}
-
-static inline void fdc_setup_dma(char mode,
-				 volatile void *addr, unsigned int count)
-{
-	/* Program the DMA controller.
-	 */
-	disable_dma(fdc.dma);
-	clear_dma_ff(fdc.dma);
-	set_dma_mode(fdc.dma, mode);
-	set_dma_addr(fdc.dma, virt_to_bus((void*)addr));
-	set_dma_count(fdc.dma, count);
-	enable_dma(fdc.dma);
-}
-
-/*  Setup fdc and dma for formatting the next segment
- */
-int fdc_setup_formatting(buffer_struct * buff)
-{
-	unsigned long flags;
-	__u8 out[6] = {
-		FDC_FORMAT, 0x00, 3, 4 * FT_SECTORS_PER_SEGMENT, 0x00, 0x6b
-	};
-	TRACE_FUN(ft_t_any);
-	
-	TRACE_CATCH(handle_perpend(buff->segment_id),);
-	/* Program the DMA controller.
-	 */
-        TRACE(ft_t_fdc_dma,
-	      "phys. addr. = %lx", virt_to_bus((void*) buff->ptr));
-	spin_lock_irqsave(&fdc_io_lock, flags);
-	fdc_setup_dma(DMA_MODE_WRITE, buff->ptr, FT_SECTORS_PER_SEGMENT * 4);
-	/* Issue FDC command to start reading/writing.
-	 */
-	out[1] = ft_drive_sel;
-	out[4] = buff->gap3;
-	TRACE_CATCH(fdc_setup_error = fdc_command(out, sizeof(out)),
-		    restore_flags(flags); fdc_mode = fdc_idle);
-	spin_unlock_irqrestore(&fdc_io_lock, flags);
-	TRACE_EXIT 0;
-}
-
-
-/*      Setup Floppy Disk Controller and DMA to read or write the next cluster
- *      of good sectors from or to the current segment.
- */
-int fdc_setup_read_write(buffer_struct * buff, __u8 operation)
-{
-	unsigned long flags;
-	__u8 out[9];
-	int dma_mode;
-	TRACE_FUN(ft_t_any);
-
-	switch(operation) {
-	case FDC_VERIFY:
-		if (fdc.type < i82077) {
-			operation = FDC_READ;
-		}
-	case FDC_READ:
-	case FDC_READ_DELETED:
-		dma_mode = DMA_MODE_READ;
-		TRACE(ft_t_fdc_dma, "xfer %d sectors to 0x%p",
-		      buff->sector_count, buff->ptr);
-		TRACE_CATCH(perpend_off(),);
-		break;
-	case FDC_WRITE_DELETED:
-		TRACE(ft_t_noise, "deleting segment %d", buff->segment_id);
-	case FDC_WRITE:
-		dma_mode = DMA_MODE_WRITE;
-		/* When writing QIC-3020 tapes, turn on perpendicular mode
-		 * if tape is moving in forward direction (even tracks).
-		 */
-		TRACE_CATCH(handle_perpend(buff->segment_id),);
-		TRACE(ft_t_fdc_dma, "xfer %d sectors from 0x%p",
-		      buff->sector_count, buff->ptr);
-		break;
-	default:
-		TRACE_ABORT(-EIO,
-			    ft_t_bug, "bug: invalid operation parameter");
-	}
-	TRACE(ft_t_fdc_dma, "phys. addr. = %lx",virt_to_bus((void*)buff->ptr));
-	spin_lock_irqsave(&fdc_io_lock, flags);
-	if (operation != FDC_VERIFY) {
-		fdc_setup_dma(dma_mode, buff->ptr,
-			      FT_SECTOR_SIZE * buff->sector_count);
-	}
-	/* Issue FDC command to start reading/writing.
-	 */
-	out[0] = operation;
-	out[1] = ft_drive_sel;
-	out[2] = buff->cyl;
-	out[3] = buff->head;
-	out[4] = buff->sect + buff->sector_offset;
-	out[5] = 3;		/* Sector size of 1K. */
-	out[6] = out[4] + buff->sector_count - 1;	/* last sector */
-	out[7] = 109;		/* Gap length. */
-	out[8] = 0xff;		/* No limit to transfer size. */
-	TRACE(ft_t_fdc_dma, "C: 0x%02x, H: 0x%02x, R: 0x%02x, cnt: 0x%02x",
-		out[2], out[3], out[4], out[6] - out[4] + 1);
-	spin_unlock_irqrestore(&fdc_io_lock, flags);
-	TRACE_CATCH(fdc_setup_error = fdc_command(out, 9),fdc_mode = fdc_idle);
-	TRACE_EXIT 0;
-}
-
-int fdc_fifo_threshold(__u8 threshold,
-		       int *fifo_state, int *lock_state, int *fifo_thr)
-{
-	const __u8 cmd0[] = {FDC_DUMPREGS};
-	__u8 cmd1[] = {FDC_CONFIGURE, 0, (0x0f & (threshold - 1)), 0};
-	const __u8 cmd2[] = {FDC_LOCK};
-	const __u8 cmd3[] = {FDC_UNLOCK};
-	__u8 reg[10];
-	__u8 stat;
-	int i;
-	int result;
-	TRACE_FUN(ft_t_any);
-
-	if (CLK_48MHZ && fdc.type >= i82078) {
-		cmd1[0] |= FDC_CLK48_BIT;
-	}
-	/*  Dump fdc internal registers for examination
-	 */
-	TRACE_CATCH(fdc_command(cmd0, NR_ITEMS(cmd0)),
-		    TRACE(ft_t_warn, "dumpreg cmd failed, fifo unchanged"));
-	/*  Now read fdc internal registers from fifo
-	 */
-	for (i = 0; i < (int)NR_ITEMS(reg); ++i) {
-		fdc_read(&reg[i]);
-		TRACE(ft_t_fdc_dma, "Register %d = 0x%02x", i, reg[i]);
-	}
-	if (fifo_state && lock_state && fifo_thr) {
-		*fifo_state = (reg[8] & 0x20) == 0;
-		*lock_state = reg[7] & 0x80;
-		*fifo_thr = 1 + (reg[8] & 0x0f);
-	}
-	TRACE(ft_t_noise,
-	      "original fifo state: %sabled, threshold %d, %slocked",
-	      ((reg[8] & 0x20) == 0) ? "en" : "dis",
-	      1 + (reg[8] & 0x0f), (reg[7] & 0x80) ? "" : "not ");
-	/*  If fdc is already locked, unlock it first ! */
-	if (reg[7] & 0x80) {
-		fdc_ready_wait(100);
-		TRACE_CATCH(fdc_issue_command(cmd3, NR_ITEMS(cmd3), &stat, 1),
-			    TRACE(ft_t_bug, "FDC unlock command failed, "
-				  "configuration unchanged"));
-	}
-	fdc_fifo_locked = 0;
-	/*  Enable fifo and set threshold at xx bytes to allow a
-	 *  reasonably large latency and reduce number of dma bursts.
-	 */
-	fdc_ready_wait(100);
-	if ((result = fdc_command(cmd1, NR_ITEMS(cmd1))) < 0) {
-		TRACE(ft_t_bug, "configure cmd failed, fifo unchanged");
-	}
-	/*  Now lock configuration so reset will not change it
-	 */
-        if(fdc_issue_command(cmd2, NR_ITEMS(cmd2), &stat, 1) < 0 ||
-	   stat != 0x10) {
-		TRACE_ABORT(-EIO, ft_t_bug,
-			    "FDC lock command failed, stat = 0x%02x", stat);
-	}
-	fdc_fifo_locked = 1;
-	TRACE_EXIT result;
-}
-
-static int fdc_fifo_enable(void)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (fdc_fifo_locked) {
-		TRACE_ABORT(0, ft_t_warn, "Fifo not enabled because locked");
-	}
-	TRACE_CATCH(fdc_fifo_threshold(ft_fdc_threshold /* bytes */,
-				       &fdc_fifo_state,
-				       &fdc_lock_state,
-				       &fdc_fifo_thr),);
-	TRACE_CATCH(fdc_fifo_threshold(ft_fdc_threshold /* bytes */,
-				       NULL, NULL, NULL),);
-	TRACE_EXIT 0;
-}
-
-/*   Determine fd controller type 
- */
-static __u8 fdc_save_state[2];
-
-static int fdc_probe(void)
-{
-	__u8 cmd[1];
-	__u8 stat[16]; /* must be able to hold dumpregs & save results */
-	int i;
-	TRACE_FUN(ft_t_any);
-
-	/*  Try to find out what kind of fd controller we have to deal with
-	 *  Scheme borrowed from floppy driver:
-	 *  first try if FDC_DUMPREGS command works
-	 *  (this indicates that we have a 82072 or better)
-	 *  then try the FDC_VERSION command (82072 doesn't support this)
-	 *  then try the FDC_UNLOCK command (some older 82077's don't support this)
-	 *  then try the FDC_PARTID command (82078's support this)
-	 */
-	cmd[0] = FDC_DUMPREGS;
-	if (fdc_issue_command(cmd, 1, stat, 1) != 0) {
-		TRACE_ABORT(no_fdc, ft_t_bug, "No FDC found");
-	}
-	if (stat[0] == 0x80) {
-		/* invalid command: must be pre 82072 */
-		TRACE_ABORT(i8272,
-			    ft_t_warn, "Type 8272A/765A compatible FDC found");
-	}
-	fdc_result(&stat[1], 9);
-	fdc_save_state[0] = stat[7];
-	fdc_save_state[1] = stat[8];
-	cmd[0] = FDC_VERSION;
-	if (fdc_issue_command(cmd, 1, stat, 1) < 0 || stat[0] == 0x80) {
-		TRACE_ABORT(i8272, ft_t_warn, "Type 82072 FDC found");
-	}
-	if (*stat != 0x90) {
-		TRACE_ABORT(i8272, ft_t_warn, "Unknown FDC found");
-	}
-	cmd[0] = FDC_UNLOCK;
-	if(fdc_issue_command(cmd, 1, stat, 1) < 0 || stat[0] != 0x00) {
-		TRACE_ABORT(i8272, ft_t_warn,
-			    "Type pre-1991 82077 FDC found, "
-			    "treating it like a 82072");
-	}
-	if (fdc_save_state[0] & 0x80) { /* was locked */
-		cmd[0] = FDC_LOCK; /* restore lock */
-		(void)fdc_issue_command(cmd, 1, stat, 1);
-		TRACE(ft_t_warn, "FDC is already locked");
-	}
-	/* Test for a i82078 FDC */
-	cmd[0] = FDC_PARTID;
-	if (fdc_issue_command(cmd, 1, stat, 1) < 0 || stat[0] == 0x80) {
-		/* invalid command: not a i82078xx type FDC */
-		for (i = 0; i < 4; ++i) {
-			outb_p(i, fdc.tdr);
-			if ((inb_p(fdc.tdr) & 0x03) != i) {
-				TRACE_ABORT(i82077,
-					    ft_t_warn, "Type 82077 FDC found");
-			}
-		}
-		TRACE_ABORT(i82077AA, ft_t_warn, "Type 82077AA FDC found");
-	}
-	/* FDC_PARTID cmd succeeded */
-	switch (stat[0] >> 5) {
-	case 0x0:
-		/* i82078SL or i82078-1.  The SL part cannot run at
-		 * 2Mbps (the SL and -1 dies are identical; they are
-		 * speed graded after production, according to Intel).
-		 * Some SL's can be detected by doing a SAVE cmd and
-		 * look at bit 7 of the first byte (the SEL3V# bit).
-		 * If it is 0, the part runs off 3Volts, and hence it
-		 * is a SL.
-		 */
-		cmd[0] = FDC_SAVE;
-		if(fdc_issue_command(cmd, 1, stat, 16) < 0) {
-			TRACE(ft_t_err, "FDC_SAVE failed. Dunno why");
-			/* guess we better claim the fdc to be a i82078 */
-			TRACE_ABORT(i82078,
-				    ft_t_warn,
-				    "Type i82078 FDC (i suppose) found");
-		}
-		if ((stat[0] & FDC_SEL3V_BIT)) {
-			/* fdc running off 5Volts; Pray that it's a i82078-1
-			 */
-			TRACE_ABORT(i82078_1, ft_t_warn,
-				  "Type i82078-1 or 5Volt i82078SL FDC found");
-		}
-		TRACE_ABORT(i82078, ft_t_warn,
-			    "Type 3Volt i82078SL FDC (1Mbps) found");
-	case 0x1:
-	case 0x2: /* S82078B  */
-		/* The '78B  isn't '78 compatible.  Detect it as a '77AA */
-		TRACE_ABORT(i82077AA, ft_t_warn, "Type i82077AA FDC found");
-	case 0x3: /* NSC PC8744 core; used in several super-IO chips */
-		TRACE_ABORT(i82077AA,
-			    ft_t_warn, "Type 82077AA compatible FDC found");
-	default:
-		TRACE(ft_t_warn, "A previously undetected FDC found");
-		TRACE_ABORT(i82077AA, ft_t_warn,
-			  "Treating it as a 82077AA. Please report partid= %d",
-			    stat[0]);
-	} /* switch(stat[ 0] >> 5) */
-	TRACE_EXIT no_fdc;
-}
-
-static int fdc_request_regions(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (ft_mach2 || ft_probe_fc10) {
-		if (!request_region(fdc.sra, 8, "fdc (ft)")) {
-#ifndef BROKEN_FLOPPY_DRIVER
-			TRACE_EXIT -EBUSY;
-#else
-			TRACE(ft_t_warn,
-"address 0x%03x occupied (by floppy driver?), using it anyway", fdc.sra);
-#endif
-		}
-	} else {
-		if (!request_region(fdc.sra, 6, "fdc (ft)")) {
-#ifndef BROKEN_FLOPPY_DRIVER
-			TRACE_EXIT -EBUSY;
-#else
-			TRACE(ft_t_warn,
-"address 0x%03x occupied (by floppy driver?), using it anyway", fdc.sra);
-#endif
-		}
-		if (!request_region(fdc.sra + 7, 1, "fdc (ft)")) {
-#ifndef BROKEN_FLOPPY_DRIVER
-			release_region(fdc.sra, 6);
-			TRACE_EXIT -EBUSY;
-#else
-			TRACE(ft_t_warn,
-"address 0x%03x occupied (by floppy driver?), using it anyway", fdc.sra + 7);
-#endif
-		}
-	}
-	TRACE_EXIT 0;
-}
-
-void fdc_release_regions(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (fdc.sra != 0) {
-		if (fdc.dor2 != 0) {
-			release_region(fdc.sra, 8);
-		} else {
-			release_region(fdc.sra, 6);
-			release_region(fdc.dir, 1);
-		}
-	}
-	TRACE_EXIT;
-}
-
-static int fdc_config_regs(unsigned int fdc_base, 
-			   unsigned int fdc_irq, 
-			   unsigned int fdc_dma)
-{
-	TRACE_FUN(ft_t_flow);
-
-	fdc.irq = fdc_irq;
-	fdc.dma = fdc_dma;
-	fdc.sra = fdc_base;
-	fdc.srb = fdc_base + 1;
-	fdc.dor = fdc_base + 2;
-	fdc.tdr = fdc_base + 3;
-	fdc.msr = fdc.dsr = fdc_base + 4;
-	fdc.fifo = fdc_base + 5;
-	fdc.dir = fdc.ccr = fdc_base + 7;
-	fdc.dor2 = (ft_mach2 || ft_probe_fc10) ? fdc_base + 6 : 0;
-	TRACE_CATCH(fdc_request_regions(), fdc.sra = 0);
-	TRACE_EXIT 0;
-}
-
-static int fdc_config(void)
-{
-	static int already_done;
-	TRACE_FUN(ft_t_any);
-
-	if (already_done) {
-		TRACE_CATCH(fdc_request_regions(),);
-		*(fdc.hook) = fdc_isr;	/* hook our handler in */
-		TRACE_EXIT 0;
-	}
-	if (ft_probe_fc10) {
-		int fc_type;
-		
-		TRACE_CATCH(fdc_config_regs(ft_fdc_base,
-					    ft_fdc_irq, ft_fdc_dma),);
-		fc_type = fc10_enable();
-		if (fc_type != 0) {
-			TRACE(ft_t_warn, "FC-%c0 controller found", '0' + fc_type);
-			fdc.type = fc10;
-			fdc.hook = &do_ftape;
-			*(fdc.hook) = fdc_isr;	/* hook our handler in */
-			already_done = 1;
-			TRACE_EXIT 0;
-		} else {
-			TRACE(ft_t_warn, "FC-10/20 controller not found");
-			fdc_release_regions();
-			fdc.type = no_fdc;
-			ft_probe_fc10 = 0;
-			ft_fdc_base   = 0x3f0;
-			ft_fdc_irq    = 6;
-			ft_fdc_dma    = 2;
-		}
-	}
-	TRACE(ft_t_warn, "fdc base: 0x%x, irq: %d, dma: %d", 
-	      ft_fdc_base, ft_fdc_irq, ft_fdc_dma);
-	TRACE_CATCH(fdc_config_regs(ft_fdc_base, ft_fdc_irq, ft_fdc_dma),);
-	fdc.hook = &do_ftape;
-	*(fdc.hook) = fdc_isr;	/* hook our handler in */
-	already_done = 1;
-	TRACE_EXIT 0;
-}
-
-static irqreturn_t ftape_interrupt(int irq, void *dev_id)
-{
-	void (*handler) (void) = *fdc.hook;
-	int handled = 0;
-	TRACE_FUN(ft_t_any);
-
-	*fdc.hook = NULL;
-	if (handler) {
-		handled = 1;
-		handler();
-	} else {
-		TRACE(ft_t_bug, "Unexpected ftape interrupt");
-	}
-	TRACE_EXIT IRQ_RETVAL(handled);
-}
-
-static int fdc_grab_irq_and_dma(void)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (fdc.hook == &do_ftape) {
-		/*  Get fast interrupt handler.
-		 */
-		if (request_irq(fdc.irq, ftape_interrupt,
-				IRQF_DISABLED, "ft", ftape_id)) {
-			TRACE_ABORT(-EIO, ft_t_bug,
-				    "Unable to grab IRQ%d for ftape driver",
-				    fdc.irq);
-		}
-		if (request_dma(fdc.dma, ftape_id)) {
-			free_irq(fdc.irq, ftape_id);
-			TRACE_ABORT(-EIO, ft_t_bug,
-			      "Unable to grab DMA%d for ftape driver",
-			      fdc.dma);
-		}
-	}
-	if (ft_fdc_base != 0x3f0 && (ft_fdc_dma == 2 || ft_fdc_irq == 6)) {
-		/* Using same dma channel or irq as standard fdc, need
-		 * to disable the dma-gate on the std fdc. This
-		 * couldn't be done in the floppy driver as some
-		 * laptops are using the dma-gate to enter a low power
-		 * or even suspended state :-(
-		 */
-		outb_p(FDC_RESET_NOT, 0x3f2);
-		TRACE(ft_t_noise, "DMA-gate on standard fdc disabled");
-	}
-	TRACE_EXIT 0;
-}
-
-int fdc_release_irq_and_dma(void)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (fdc.hook == &do_ftape) {
-		disable_dma(fdc.dma);	/* just in case... */
-		free_dma(fdc.dma);
-		free_irq(fdc.irq, ftape_id);
-	}
-	if (ft_fdc_base != 0x3f0 && (ft_fdc_dma == 2 || ft_fdc_irq == 6)) {
-		/* Using same dma channel as standard fdc, need to
-		 * disable the dma-gate on the std fdc. This couldn't
-		 * be done in the floppy driver as some laptops are
-		 * using the dma-gate to enter a low power or even
-		 * suspended state :-(
-		 */
-		outb_p(FDC_RESET_NOT | FDC_DMA_MODE, 0x3f2);
-		TRACE(ft_t_noise, "DMA-gate on standard fdc enabled again");
-	}
-	TRACE_EXIT 0;
-}
-
-int fdc_init(void)
-{
-	TRACE_FUN(ft_t_any);
-
-	/* find a FDC to use */
-	TRACE_CATCH(fdc_config(),);
-	TRACE_CATCH(fdc_grab_irq_and_dma(), fdc_release_regions());
-	ftape_motor = 0;
-	fdc_catch_stray_interrupts(0);	/* clear number of awainted
-					 * stray interrupte 
-					 */
-	fdc_catch_stray_interrupts(1);	/* one always comes (?) */
-	TRACE(ft_t_flow, "resetting fdc");
-	fdc_set_seek_rate(2);		/* use nominal QIC step rate */
-	fdc_reset();			/* init fdc & clear track counters */
-	if (fdc.type == no_fdc) {	/* no FC-10 or FC-20 found */
-		fdc.type = fdc_probe();
-		fdc_reset();		/* update with new knowledge */
-	}
-	if (fdc.type == no_fdc) {
-		fdc_release_irq_and_dma();
-		fdc_release_regions();
-		TRACE_EXIT -ENXIO;
-	}
-	if (fdc.type >= i82077) {
-		if (fdc_fifo_enable() < 0) {
-			TRACE(ft_t_warn, "couldn't enable fdc fifo !");
-		} else {
-			TRACE(ft_t_flow, "fdc fifo enabled and locked");
-		}
-	}
-	TRACE_EXIT 0;
-}
diff --git a/drivers/char/ftape/lowlevel/fdc-io.h b/drivers/char/ftape/lowlevel/fdc-io.h
deleted file mode 100644
index 7ec3c72..0000000
--- a/drivers/char/ftape/lowlevel/fdc-io.h
+++ /dev/null
@@ -1,252 +0,0 @@
-#ifndef _FDC_IO_H
-#define _FDC_IO_H
-
-/*
- *    Copyright (C) 1993-1996 Bas Laarhoven,
- *              (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/fdc-io.h,v $
- * $Revision: 1.3 $
- * $Date: 1997/10/05 19:18:06 $
- *
- *      This file contains the declarations for the low level
- *      functions that communicate with the floppy disk controller,
- *      for the QIC-40/80/3010/3020 floppy-tape driver "ftape" for
- *      Linux.
- */
-
-#include <linux/fdreg.h>
-
-#include "../lowlevel/ftape-bsm.h"
-
-#define FDC_SK_BIT      (0x20)
-#define FDC_MT_BIT      (0x80)
-
-#define FDC_READ        (FD_READ & ~(FDC_SK_BIT | FDC_MT_BIT))
-#define FDC_WRITE       (FD_WRITE & ~FDC_MT_BIT)
-#define FDC_READ_DELETED  (0x4c)
-#define FDC_WRITE_DELETED (0x49)
-#define FDC_VERIFY        (0x56)
-#define FDC_READID      (0x4a)
-#define FDC_SENSED      (0x04)
-#define FDC_SENSEI      (FD_SENSEI)
-#define FDC_FORMAT      (FD_FORMAT)
-#define FDC_RECAL       (FD_RECALIBRATE)
-#define FDC_SEEK        (FD_SEEK)
-#define FDC_SPECIFY     (FD_SPECIFY)
-#define FDC_RECALIBR    (FD_RECALIBRATE)
-#define FDC_VERSION     (FD_VERSION)
-#define FDC_PERPEND     (FD_PERPENDICULAR)
-#define FDC_DUMPREGS    (FD_DUMPREGS)
-#define FDC_LOCK        (FD_LOCK)
-#define FDC_UNLOCK      (FD_UNLOCK)
-#define FDC_CONFIGURE   (FD_CONFIGURE)
-#define FDC_DRIVE_SPEC  (0x8e)	/* i82078 has this (any others?) */
-#define FDC_PARTID      (0x18)	/* i82078 has this */
-#define FDC_SAVE        (0x2e)	/* i82078 has this (any others?) */
-#define FDC_RESTORE     (0x4e)	/* i82078 has this (any others?) */
-
-#define FDC_STATUS_MASK (STATUS_BUSY | STATUS_DMA | STATUS_DIR | STATUS_READY)
-#define FDC_DATA_READY  (STATUS_READY)
-#define FDC_DATA_OUTPUT (STATUS_DIR)
-#define FDC_DATA_READY_MASK (STATUS_READY | STATUS_DIR)
-#define FDC_DATA_OUT_READY  (STATUS_READY | STATUS_DIR)
-#define FDC_DATA_IN_READY   (STATUS_READY)
-#define FDC_BUSY        (STATUS_BUSY)
-#define FDC_CLK48_BIT   (0x80)
-#define FDC_SEL3V_BIT   (0x40)
-
-#define ST0_INT_MASK    (ST0_INTR)
-#define FDC_INT_NORMAL  (ST0_INTR & 0x00)
-#define FDC_INT_ABNORMAL (ST0_INTR & 0x40)
-#define FDC_INT_INVALID (ST0_INTR & 0x80)
-#define FDC_INT_READYCH (ST0_INTR & 0xC0)
-#define ST0_SEEK_END    (ST0_SE)
-#define ST3_TRACK_0     (ST3_TZ)
-
-#define FDC_RESET_NOT   (0x04)
-#define FDC_DMA_MODE    (0x08)
-#define FDC_MOTOR_0     (0x10)
-#define FDC_MOTOR_1     (0x20)
-
-typedef struct {
-	void (**hook) (void);	/* our wedge into the isr */
-	enum {
-		no_fdc, i8272, i82077, i82077AA, fc10,
-		i82078, i82078_1
-	} type;			/* FDC type */
-	unsigned int irq; /* FDC irq nr */
-	unsigned int dma; /* FDC dma channel nr */
-	__u16 sra;	  /* Status register A (PS/2 only) */
-	__u16 srb;	  /* Status register B (PS/2 only) */
-	__u16 dor;	  /* Digital output register */
-	__u16 tdr;	  /* Tape Drive Register (82077SL-1 &
-			     82078 only) */
-	__u16 msr;	  /* Main Status Register */
-	__u16 dsr;	  /* Datarate Select Register (8207x only) */
-	__u16 fifo;	  /* Data register / Fifo on 8207x */
-	__u16 dir;	  /* Digital Input Register */
-	__u16 ccr;	  /* Configuration Control Register */
-	__u16 dor2;	  /* Alternate dor on MACH-2 controller,
-			     also used with FC-10, meaning unknown */
-} fdc_config_info;
-
-typedef enum {
-	fdc_data_rate_250  = 2,
-	fdc_data_rate_300  = 1,	/* any fdc in default configuration */
-	fdc_data_rate_500  = 0,
-	fdc_data_rate_1000 = 3,
-	fdc_data_rate_2000 = 1,	/* i82078-1: when using Data Rate Table #2 */
-} fdc_data_rate_type;
-
-typedef enum {
-	fdc_idle          = 0,
-	fdc_reading_data  = FDC_READ,
-	fdc_seeking       = FDC_SEEK,
-	fdc_writing_data  = FDC_WRITE,
-	fdc_deleting      = FDC_WRITE_DELETED,
-	fdc_reading_id    = FDC_READID,
-	fdc_recalibrating = FDC_RECAL,
-	fdc_formatting    = FDC_FORMAT,
-	fdc_verifying     = FDC_VERIFY
-} fdc_mode_enum;
-
-typedef enum {
-	waiting = 0,
-	reading,
-	writing,
-	formatting,
-	verifying,
-	deleting,
-	done,
-	error,
-	mmapped,
-} buffer_state_enum;
-
-typedef struct {
-	__u8 *address;
-	volatile buffer_state_enum status;
-	volatile __u8 *ptr;
-	volatile unsigned int bytes;
-	volatile unsigned int segment_id;
-
-	/* bitmap for remainder of segment not yet handled.
-	 * one bit set for each bad sector that must be skipped.
-	 */
-	volatile SectorMap bad_sector_map;
-
-	/* bitmap with bad data blocks in data buffer.
-	 * the errors in this map may be retried.
-	 */
-	volatile SectorMap soft_error_map;
-
-	/* bitmap with bad data blocks in data buffer
-	 * the errors in this map may not be retried.
-	 */
-	volatile SectorMap hard_error_map;
-
-	/* retry counter for soft errors.
-	 */
-	volatile int retry;
-
-	/* sectors to skip on retry ???
-	 */
-	volatile unsigned int skip;
-
-	/* nr of data blocks in data buffer
-	 */
-	volatile unsigned int data_offset;
-
-	/* offset in segment for first sector to be handled.
-	 */
-	volatile unsigned int sector_offset;
-
-	/* size of cluster of good sectors to be handled.
-	 */
-	volatile unsigned int sector_count;
-
-	/* size of remaining part of segment to be handled.
-	 */
-	volatile unsigned int remaining;
-
-	/* points to next segment (contiguous) to be handled,
-	 * or is zero if no read-ahead is allowed.
-	 */
-	volatile unsigned int next_segment;
-
-	/* flag being set if deleted data was read.
-	 */
-	volatile int deleted;
-
-	/* floppy coordinates of first sector in segment */
-	volatile __u8 head;
-	volatile __u8 cyl;
-	volatile __u8 sect;
-
-	/* gap to use when formatting */
-	__u8 gap3;
-	/* flag set when buffer is mmaped */
-	int mmapped;
-} buffer_struct;
-
-/*
- *      fdc-io.c defined public variables
- */
-extern volatile fdc_mode_enum fdc_mode;
-extern int fdc_setup_error;	/* outdated ??? */
-extern wait_queue_head_t ftape_wait_intr;
-extern volatile int ftape_current_cylinder; /* track nr FDC thinks we're on */
-extern volatile __u8 fdc_head;	/* FDC head */
-extern volatile __u8 fdc_cyl;	/* FDC track */
-extern volatile __u8 fdc_sect;	/* FDC sector */
-extern fdc_config_info fdc;	/* FDC hardware configuration */
-
-extern unsigned int ft_fdc_base;
-extern unsigned int ft_fdc_irq;
-extern unsigned int ft_fdc_dma;
-extern unsigned int ft_fdc_threshold;
-extern unsigned int ft_fdc_rate_limit;
-extern int ft_probe_fc10;
-extern int ft_mach2;
-/*
- *      fdc-io.c defined public functions
- */
-extern void fdc_catch_stray_interrupts(int count);
-extern int fdc_ready_wait(unsigned int timeout);
-extern int fdc_command(const __u8 * cmd_data, int cmd_len);
-extern int fdc_result(__u8 * res_data, int res_len);
-extern int fdc_interrupt_wait(unsigned int time);
-extern int fdc_seek(int track);
-extern int fdc_sense_drive_status(int *st3);
-extern void fdc_motor(int motor);
-extern void fdc_reset(void);
-extern void fdc_disable(void);
-extern int fdc_fifo_threshold(__u8 threshold,
-			      int *fifo_state, int *lock_state, int *fifo_thr);
-extern void fdc_wait_calibrate(void);
-extern int fdc_sense_interrupt_status(int *st0, int *current_cylinder);
-extern void fdc_save_drive_specs(void);
-extern void fdc_restore_drive_specs(void);
-extern int fdc_set_data_rate(int rate);
-extern void fdc_set_write_precomp(int precomp);
-extern int fdc_release_irq_and_dma(void);
-extern void fdc_release_regions(void);
-extern int fdc_init(void);
-extern int fdc_setup_read_write(buffer_struct * buff, __u8 operation);
-extern int fdc_setup_formatting(buffer_struct * buff);
-#endif
diff --git a/drivers/char/ftape/lowlevel/fdc-isr.c b/drivers/char/ftape/lowlevel/fdc-isr.c
deleted file mode 100644
index ad2bc73..0000000
--- a/drivers/char/ftape/lowlevel/fdc-isr.c
+++ /dev/null
@@ -1,1170 +0,0 @@
-/*
- *      Copyright (C) 1994-1996 Bas Laarhoven,
- *                (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/fdc-isr.c,v $
- * $Revision: 1.9 $
- * $Date: 1997/10/17 23:01:53 $
- *
- *      This file contains the interrupt service routine and
- *      associated code for the QIC-40/80/3010/3020 floppy-tape driver
- *      "ftape" for Linux.
- */
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define volatile		/* */
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/fdc-isr.h"
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-calibr.h"
-#include "../lowlevel/ftape-bsm.h"
-
-/*      Global vars.
- */
-volatile int ft_expected_stray_interrupts;
-volatile int ft_interrupt_seen;
-volatile int ft_seek_completed;
-volatile int ft_hide_interrupt;
-/*      Local vars.
- */
-typedef enum {
-	no_error = 0, id_am_error = 0x01, id_crc_error = 0x02,
-	data_am_error = 0x04, data_crc_error = 0x08,
-	no_data_error = 0x10, overrun_error = 0x20,
-} error_cause;
-static int stop_read_ahead;
-
-
-static void print_error_cause(int cause)
-{
-	TRACE_FUN(ft_t_any);
-
-	switch (cause) {
-	case no_data_error:
-		TRACE(ft_t_noise, "no data error");
-		break;
-	case id_am_error:
-		TRACE(ft_t_noise, "id am error");
-		break;
-	case id_crc_error:
-		TRACE(ft_t_noise, "id crc error");
-		break;
-	case data_am_error:
-		TRACE(ft_t_noise, "data am error");
-		break;
-	case data_crc_error:
-		TRACE(ft_t_noise, "data crc error");
-		break;
-	case overrun_error:
-		TRACE(ft_t_noise, "overrun error");
-		break;
-	default:;
-	}
-	TRACE_EXIT;
-}
-
-static char *fdc_mode_txt(fdc_mode_enum mode)
-{
-	switch (mode) {
-	case fdc_idle:
-		return "fdc_idle";
-	case fdc_reading_data:
-		return "fdc_reading_data";
-	case fdc_seeking:
-		return "fdc_seeking";
-	case fdc_writing_data:
-		return "fdc_writing_data";
-	case fdc_reading_id:
-		return "fdc_reading_id";
-	case fdc_recalibrating:
-		return "fdc_recalibrating";
-	case fdc_formatting:
-		return "fdc_formatting";
-	case fdc_verifying:
-		return "fdc_verifying";
-	default:
-		return "unknown";
-	}
-}
-
-static inline error_cause decode_irq_cause(fdc_mode_enum mode, __u8 st[])
-{
-	error_cause cause = no_error;
-	TRACE_FUN(ft_t_any);
-
-	/*  Valid st[], decode cause of interrupt.
-	 */
-	switch (st[0] & ST0_INT_MASK) {
-	case FDC_INT_NORMAL:
-		TRACE(ft_t_fdc_dma,"normal completion: %s",fdc_mode_txt(mode));
-		break;
-	case FDC_INT_ABNORMAL:
-		TRACE(ft_t_flow, "abnormal completion %s", fdc_mode_txt(mode));
-		TRACE(ft_t_fdc_dma, "ST0: 0x%02x, ST1: 0x%02x, ST2: 0x%02x",
-		      st[0], st[1], st[2]);
-		TRACE(ft_t_fdc_dma,
-		      "C: 0x%02x, H: 0x%02x, R: 0x%02x, N: 0x%02x",
-		      st[3], st[4], st[5], st[6]);
-		if (st[1] & 0x01) {
-			if (st[2] & 0x01) {
-				cause = data_am_error;
-			} else {
-				cause = id_am_error;
-			}
-		} else if (st[1] & 0x20) {
-			if (st[2] & 0x20) {
-				cause = data_crc_error;
-			} else {
-				cause = id_crc_error;
-			}
-		} else if (st[1] & 0x04) {
-			cause = no_data_error;
-		} else if (st[1] & 0x10) {
-			cause = overrun_error;
-		}
-		print_error_cause(cause);
-		break;
-	case FDC_INT_INVALID:
-		TRACE(ft_t_flow, "invalid completion %s", fdc_mode_txt(mode));
-		break;
-	case FDC_INT_READYCH:
-		if (st[0] & ST0_SEEK_END) {
-			TRACE(ft_t_flow, "drive poll completed");
-		} else {
-			TRACE(ft_t_flow, "ready change %s",fdc_mode_txt(mode));
-		}
-		break;
-	default:
-		break;
-	}
-	TRACE_EXIT cause;
-}
-
-static void update_history(error_cause cause)
-{
-	switch (cause) {
-	case id_am_error:
-		ft_history.id_am_errors++;
-		break;
-	case id_crc_error:
-		ft_history.id_crc_errors++;
-		break;
-	case data_am_error:
-		ft_history.data_am_errors++;
-		break;
-	case data_crc_error:
-		ft_history.data_crc_errors++;
-		break;
-	case overrun_error:
-		ft_history.overrun_errors++;
-		break;
-	case no_data_error:
-		ft_history.no_data_errors++;
-		break;
-	default:;
-	}
-}
-
-static void skip_bad_sector(buffer_struct * buff)
-{
-	TRACE_FUN(ft_t_any);
-
-	/*  Mark sector as soft error and skip it
-	 */
-	if (buff->remaining > 0) {
-		++buff->sector_offset;
-		++buff->data_offset;
-		--buff->remaining;
-		buff->ptr += FT_SECTOR_SIZE;
-		buff->bad_sector_map >>= 1;
-	} else {
-		/*  Hey, what is this????????????? C code: if we shift 
-		 *  more than 31 bits, we get no shift. That's bad!!!!!!
-		 */
-		++buff->sector_offset;	/* hack for error maps */
-		TRACE(ft_t_warn, "skipping last sector in segment");
-	}
-	TRACE_EXIT;
-}
-
-static void update_error_maps(buffer_struct * buff, unsigned int error_offset)
-{
-	int hard = 0;
-	TRACE_FUN(ft_t_any);
-
-	if (buff->retry < FT_SOFT_RETRIES) {
-		buff->soft_error_map |= (1 << error_offset);
-	} else {
-		buff->hard_error_map |= (1 << error_offset);
-		buff->soft_error_map &= ~buff->hard_error_map;
-		buff->retry = -1;  /* will be set to 0 in setup_segment */
-		hard = 1;
-	}
-	TRACE(ft_t_noise, "sector %d : %s error\n"
-	      KERN_INFO "hard map: 0x%08lx\n"
-	      KERN_INFO "soft map: 0x%08lx",
-	      FT_SECTOR(error_offset), hard ? "hard" : "soft",
-	      (long) buff->hard_error_map, (long) buff->soft_error_map);
-	TRACE_EXIT;
-}
-
-static void print_progress(buffer_struct *buff, error_cause cause)
-{
-	TRACE_FUN(ft_t_any);
-
-	switch (cause) {
-	case no_error: 
-		TRACE(ft_t_flow,"%d Sector(s) transferred", buff->sector_count);
-		break;
-	case no_data_error:
-		TRACE(ft_t_flow, "Sector %d not found",
-		      FT_SECTOR(buff->sector_offset));
-		break;
-	case overrun_error:
-		/*  got an overrun error on the first byte, must be a
-		 *  hardware problem
-		 */
-		TRACE(ft_t_bug,
-		      "Unexpected error: failing DMA or FDC controller ?");
-		break;
-	case data_crc_error:
-		TRACE(ft_t_flow, "Error in sector %d",
-		      FT_SECTOR(buff->sector_offset - 1));
-		break;
-	case id_crc_error:
-	case id_am_error:
-	case data_am_error:
-		TRACE(ft_t_flow, "Error in sector %d",
-		      FT_SECTOR(buff->sector_offset));
-		break;
-	default:
-		TRACE(ft_t_flow, "Unexpected error at sector %d",
-		      FT_SECTOR(buff->sector_offset));
-		break;
-	}
-	TRACE_EXIT;
-}
-
-/*
- *  Error cause:   Amount xferred:  Action:
- *
- *  id_am_error         0           mark bad and skip
- *  id_crc_error        0           mark bad and skip
- *  data_am_error       0           mark bad and skip
- *  data_crc_error    % 1024        mark bad and skip
- *  no_data_error       0           retry on write
- *                                  mark bad and skip on read
- *  overrun_error  [ 0..all-1 ]     mark bad and skip
- *  no_error           all          continue
- */
-
-/*  the arg `sector' is returned by the fdc and tells us at which sector we
- *  are positioned at (relative to starting sector of segment)
- */
-static void determine_verify_progress(buffer_struct *buff,
-				      error_cause cause,
-				      __u8 sector)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (cause == no_error && sector == 1) {
-		buff->sector_offset = FT_SECTORS_PER_SEGMENT;
-		buff->remaining     = 0;
-		if (TRACE_LEVEL >= ft_t_flow) {
-			print_progress(buff, cause);
-		}
-	} else {
-		buff->sector_offset = sector - buff->sect;
-		buff->remaining = FT_SECTORS_PER_SEGMENT - buff->sector_offset;
-		TRACE(ft_t_noise, "%ssector offset: 0x%04x", 
-		      (cause == no_error) ? "unexpected " : "",
-		      buff->sector_offset);
-		switch (cause) {
-		case overrun_error:
-			break;
-#if 0
-		case no_data_error:
-			buff->retry = FT_SOFT_RETRIES;
-			if (buff->hard_error_map    &&
-			    buff->sector_offset > 1 &&
-			    (buff->hard_error_map & 
-			     (1 << (buff->sector_offset-2)))) {
-				buff->retry --;
-			}
-			break;
-#endif
-		default:
-			buff->retry = FT_SOFT_RETRIES;
-			break;
-		}
-		if (TRACE_LEVEL >= ft_t_flow) {
-			print_progress(buff, cause);
-		}
-		/*  Sector_offset points to the problem area Now adjust
-		 *  sector_offset so it always points one past he failing
-		 *  sector. I.e. skip the bad sector.
-		 */
-		++buff->sector_offset;
-		--buff->remaining;
-		update_error_maps(buff, buff->sector_offset - 1);
-	}
-	TRACE_EXIT;
-}
-
-static void determine_progress(buffer_struct *buff,
-			       error_cause cause,
-			       __u8 sector)
-{
-	unsigned int dma_residue;
-	TRACE_FUN(ft_t_any);
-
-	/*  Using less preferred order of disable_dma and
-	 *  get_dma_residue because this seems to fail on at least one
-	 *  system if reversed!
-	 */
-	dma_residue = get_dma_residue(fdc.dma);
-	disable_dma(fdc.dma);
-	if (cause != no_error || dma_residue != 0) {
-		TRACE(ft_t_noise, "%sDMA residue: 0x%04x", 
-		      (cause == no_error) ? "unexpected " : "",
-		      dma_residue);
-		/* adjust to actual value: */
-		if (dma_residue == 0) {
-			/* this happens sometimes with overrun errors.
-			 * I don't know whether we could ignore the
-			 * overrun error. Play save.
-			 */
-			buff->sector_count --;
-		} else {
-			buff->sector_count -= ((dma_residue + 
-						(FT_SECTOR_SIZE - 1)) /
-					       FT_SECTOR_SIZE);
-		}
-	}
-	/*  Update var's influenced by the DMA operation.
-	 */
-	if (buff->sector_count > 0) {
-		buff->sector_offset   += buff->sector_count;
-		buff->data_offset     += buff->sector_count;
-		buff->ptr             += (buff->sector_count *
-					  FT_SECTOR_SIZE);
-		buff->remaining       -= buff->sector_count;
-		buff->bad_sector_map >>= buff->sector_count;
-	}
-	if (TRACE_LEVEL >= ft_t_flow) {
-		print_progress(buff, cause);
-	}
-	if (cause != no_error) {
-		if (buff->remaining == 0) {
-			TRACE(ft_t_warn, "foo?\n"
-			      KERN_INFO "count : %d\n"
-			      KERN_INFO "offset: %d\n"
-			      KERN_INFO "soft  : %08x\n"
-			      KERN_INFO "hard  : %08x",
-			      buff->sector_count,
-			      buff->sector_offset,
-			      buff->soft_error_map,
-			      buff->hard_error_map);
-		}
-		/*  Sector_offset points to the problem area, except if we got
-		 *  a data_crc_error. In that case it points one past the
-		 *  failing sector.
-		 *
-		 *  Now adjust sector_offset so it always points one past he
-		 *  failing sector. I.e. skip the bad sector.  
-		 */
-		if (cause != data_crc_error) {
-			skip_bad_sector(buff);
-		}
-		update_error_maps(buff, buff->sector_offset - 1);
-	}
-	TRACE_EXIT;
-}
-
-static int calc_steps(int cmd)
-{
-	if (ftape_current_cylinder > cmd) {
-		return ftape_current_cylinder - cmd;
-	} else {
-		return ftape_current_cylinder + cmd;
-	}
-}
-
-static void pause_tape(int retry, int mode)
-{
-	int result;
-	__u8 out[3] = {FDC_SEEK, ft_drive_sel, 0};
-	TRACE_FUN(ft_t_any);
-
-	/*  We'll use a raw seek command to get the tape to rewind and
-	 *  stop for a retry.
-	 */
-	++ft_history.rewinds;
-	if (qic117_cmds[ftape_current_command].non_intr) {
-		TRACE(ft_t_warn, "motion command may be issued too soon");
-	}
-	if (retry && (mode == fdc_reading_data ||
-		      mode == fdc_reading_id   ||
-		      mode == fdc_verifying)) {
-		ftape_current_command = QIC_MICRO_STEP_PAUSE;
-		ftape_might_be_off_track = 1;
-	} else {
-		ftape_current_command = QIC_PAUSE;
-	}
-	out[2] = calc_steps(ftape_current_command);
-	result = fdc_command(out, 3); /* issue QIC_117 command */
-	ftape_current_cylinder = out[ 2];
-	if (result < 0) {
-		TRACE(ft_t_noise, "qic-pause failed, status = %d", result);
-	} else {
-		ft_location.known  = 0;
-		ft_runner_status   = idle;
-		ft_hide_interrupt     = 1;
-		ftape_tape_running = 0;
-	}
-	TRACE_EXIT;
-}
-
-static void continue_xfer(buffer_struct *buff,
-			  fdc_mode_enum mode, 
-			  unsigned int skip)
-{
-	int write = 0;
- 	TRACE_FUN(ft_t_any);
-
-	if (mode == fdc_writing_data || mode == fdc_deleting) {
-		write = 1;
-	}
-	/*  This part can be removed if it never happens
-	 */
-	if (skip > 0 &&
-	    (ft_runner_status != running ||
-	     (write && (buff->status != writing)) ||
-	     (!write && (buff->status != reading && 
-			 buff->status != verifying)))) {
-		TRACE(ft_t_err, "unexpected runner/buffer state %d/%d",
-		      ft_runner_status, buff->status);
-		buff->status = error;
-		/* finish this buffer: */
-		(void)ftape_next_buffer(ft_queue_head);
-		ft_runner_status = aborting;
-		fdc_mode         = fdc_idle;
-	} else if (buff->remaining > 0 && ftape_calc_next_cluster(buff) > 0) {
-		/*  still sectors left in current segment, continue
-		 *  with this segment
-		 */
-		if (fdc_setup_read_write(buff, mode) < 0) {
-			/* failed, abort operation
-			 */
-			buff->bytes = buff->ptr - buff->address;
-			buff->status = error;
-			/* finish this buffer: */
-			(void)ftape_next_buffer(ft_queue_head);
-			ft_runner_status = aborting;
-			fdc_mode         = fdc_idle;
-		}
-	} else {
-		/* current segment completed
-		 */
-		unsigned int last_segment = buff->segment_id;
-		int eot = ((last_segment + 1) % ft_segments_per_track) == 0;
-		unsigned int next = buff->next_segment;	/* 0 means stop ! */
-
-		buff->bytes = buff->ptr - buff->address;
-		buff->status = done;
-		buff = ftape_next_buffer(ft_queue_head);
-		if (eot) {
-			/*  finished last segment on current track,
-			 *  can't continue
-			 */
-			ft_runner_status = logical_eot;
-			fdc_mode         = fdc_idle;
-			TRACE_EXIT;
-		}
-		if (next <= 0) {
-			/*  don't continue with next segment
-			 */
-			TRACE(ft_t_noise, "no %s allowed, stopping tape",
-			      (write) ? "write next" : "read ahead");
-			pause_tape(0, mode);
-			ft_runner_status = idle;  /*  not quite true until
-						   *  next irq 
-						   */
-			TRACE_EXIT;
-		}
-		/*  continue with next segment
-		 */
-		if (buff->status != waiting) {
-			TRACE(ft_t_noise, "all input buffers %s, pausing tape",
-			      (write) ? "empty" : "full");
-			pause_tape(0, mode);
-			ft_runner_status = idle;  /*  not quite true until
-						   *  next irq 
-						   */
-			TRACE_EXIT;
-		}
-		if (write && next != buff->segment_id) {
-			TRACE(ft_t_noise, 
-			      "segments out of order, aborting write");
-			ft_runner_status = do_abort;
-			fdc_mode         = fdc_idle;
-			TRACE_EXIT;
-		}
-		ftape_setup_new_segment(buff, next, 0);
-		if (stop_read_ahead) {
-			buff->next_segment = 0;
-			stop_read_ahead = 0;
-		}
-		if (ftape_calc_next_cluster(buff) == 0 ||
-		    fdc_setup_read_write(buff, mode) != 0) {
-			TRACE(ft_t_err, "couldn't start %s-ahead",
-			      write ? "write" : "read");
-			ft_runner_status = do_abort;
-			fdc_mode         = fdc_idle;
-		} else {
-			/* keep on going */
-			switch (ft_driver_state) {
-			case   reading: buff->status = reading;   break;
-			case verifying: buff->status = verifying; break;
-			case   writing: buff->status = writing;   break;
-			case  deleting: buff->status = deleting;  break;
-			default:
-				TRACE(ft_t_err, 
-		      "BUG: ft_driver_state %d should be one out of "
-		      "{reading, writing, verifying, deleting}",
-				      ft_driver_state);
-				buff->status = write ? writing : reading;
-				break;
-			}
-		}
-	}
-	TRACE_EXIT;
-}
-
-static void retry_sector(buffer_struct *buff, 
-			 int mode,
-			 unsigned int skip)
-{
-	TRACE_FUN(ft_t_any);
-
-	TRACE(ft_t_noise, "%s error, will retry",
-	      (mode == fdc_writing_data || mode == fdc_deleting) ? "write" : "read");
-	pause_tape(1, mode);
-	ft_runner_status = aborting;
-	buff->status     = error;
-	buff->skip       = skip;
-	TRACE_EXIT;
-}
-
-static unsigned int find_resume_point(buffer_struct *buff)
-{
-	int i = 0;
-	SectorMap mask;
-	SectorMap map;
-	TRACE_FUN(ft_t_any);
-
-	/*  This function is to be called after all variables have been
-	 *  updated to point past the failing sector.
-	 *  If there are any soft errors before the failing sector,
-	 *  find the first soft error and return the sector offset.
-	 *  Otherwise find the last hard error.
-	 *  Note: there should always be at least one hard or soft error !
-	 */
-	if (buff->sector_offset < 1 || buff->sector_offset > 32) {
-		TRACE(ft_t_bug, "BUG: sector_offset = %d",
-		      buff->sector_offset);
-		TRACE_EXIT 0;
-	}
-	if (buff->sector_offset >= 32) { /* C-limitation on shift ! */
-		mask = 0xffffffff;
-	} else {
-		mask = (1 << buff->sector_offset) - 1;
-	}
-	map = buff->soft_error_map & mask;
-	if (map) {
-		while ((map & (1 << i)) == 0) {
-			++i;
-		}
-		TRACE(ft_t_noise, "at sector %d", FT_SECTOR(i));
-	} else {
-		map = buff->hard_error_map & mask;
-		i = buff->sector_offset - 1;
-		if (map) {
-			while ((map & (1 << i)) == 0) {
-				--i;
-			}
-			TRACE(ft_t_noise, "after sector %d", FT_SECTOR(i));
-			++i; /* first sector after last hard error */
-		} else {
-			TRACE(ft_t_bug, "BUG: no soft or hard errors");
-		}
-	}
-	TRACE_EXIT i;
-}
-
-/*  check possible dma residue when formatting, update position record in
- *  buffer struct. This is, of course, modelled after determine_progress(), but
- *  we don't need to set up for retries because the format process cannot be
- *  interrupted (except at the end of the tape track).
- */
-static int determine_fmt_progress(buffer_struct *buff, error_cause cause)
-{
-	unsigned int dma_residue;
-	TRACE_FUN(ft_t_any);
-
-	/*  Using less preferred order of disable_dma and
-	 *  get_dma_residue because this seems to fail on at least one
-	 *  system if reversed!
-	 */
-	dma_residue = get_dma_residue(fdc.dma);
-	disable_dma(fdc.dma);
-	if (cause != no_error || dma_residue != 0) {
-		TRACE(ft_t_info, "DMA residue = 0x%04x", dma_residue);
-		fdc_mode = fdc_idle;
-		switch(cause) {
-		case no_error:
-			ft_runner_status = aborting;
-			buff->status = idle;
-			break;
-		case overrun_error:
-			/*  got an overrun error on the first byte, must be a
-			 *  hardware problem 
-			 */
-			TRACE(ft_t_bug, 
-			      "Unexpected error: failing DMA controller ?");
-			ft_runner_status = do_abort;
-			buff->status = error;
-			break;
-		default:
-			TRACE(ft_t_noise, "Unexpected error at segment %d",
-			      buff->segment_id);
-			ft_runner_status = do_abort;
-			buff->status = error;
-			break;
-		}
-		TRACE_EXIT -EIO; /* can only retry entire track in format mode
-				  */
-	}
-	/*  Update var's influenced by the DMA operation.
-	 */
-	buff->ptr             += FT_SECTORS_PER_SEGMENT * 4;
-	buff->bytes           -= FT_SECTORS_PER_SEGMENT * 4;
-	buff->remaining       -= FT_SECTORS_PER_SEGMENT;
-	buff->segment_id ++; /* done with segment */
-	TRACE_EXIT 0;
-}
-
-/*
- *  Continue formatting, switch buffers if there is no data left in
- *  current buffer. This is, of course, modelled after
- *  continue_xfer(), but we don't need to set up for retries because
- *  the format process cannot be interrupted (except at the end of the
- *  tape track).
- */
-static void continue_formatting(buffer_struct *buff)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (buff->remaining <= 0) { /*  no space left in dma buffer */
-		unsigned int next = buff->next_segment; 
-
-		if (next == 0) { /* end of tape track */
-			buff->status     = done;
-			ft_runner_status = logical_eot;
-			fdc_mode         = fdc_idle;
-			TRACE(ft_t_noise, "Done formatting track %d",
-			      ft_location.track);
-			TRACE_EXIT;
-		}
-		/*
-		 *  switch to next buffer!
-		 */
-		buff->status   = done;
-		buff = ftape_next_buffer(ft_queue_head);
-
-		if (buff->status != waiting  || next != buff->segment_id) {
-			goto format_setup_error;
-		}
-	}
-	if (fdc_setup_formatting(buff) < 0) {
-		goto format_setup_error;
-	}
-	buff->status = formatting;
-	TRACE(ft_t_fdc_dma, "Formatting segment %d on track %d",
-	      buff->segment_id, ft_location.track);
-	TRACE_EXIT;
- format_setup_error:
-	ft_runner_status = do_abort;
-	fdc_mode         = fdc_idle;
-	buff->status     = error;
-	TRACE(ft_t_err, "Error setting up for segment %d on track %d",
-	      buff->segment_id, ft_location.track);
-	TRACE_EXIT;
-
-}
-
-/*  this handles writing, read id, reading and formatting
- */
-static void handle_fdc_busy(buffer_struct *buff)
-{
-	static int no_data_error_count;
-	int retry = 0;
-	error_cause cause;
-	__u8 in[7];
-	int skip;
-	fdc_mode_enum fmode = fdc_mode;
-	TRACE_FUN(ft_t_any);
-
-	if (fdc_result(in, 7) < 0) { /* better get it fast ! */
-		TRACE(ft_t_err, 
-		      "Probably fatal error during FDC Result Phase\n"
-		      KERN_INFO
-		      "drive may hang until (power on) reset :-(");
-		/*  what to do next ????
-		 */
-		TRACE_EXIT;
-	}
-	cause = decode_irq_cause(fdc_mode, in);
-#ifdef TESTING
-	{ int i;
-	for (i = 0; i < (int)ft_nr_buffers; ++i)
-		TRACE(ft_t_any, "buffer[%d] status: %d, segment_id: %d",
-		      i, ft_buffer[i]->status, ft_buffer[i]->segment_id);
-	}
-#endif
-	if (fmode == fdc_reading_data && ft_driver_state == verifying) {
-		fmode = fdc_verifying;
-	}
-	switch (fmode) {
-	case fdc_verifying:
-		if (ft_runner_status == aborting ||
-		    ft_runner_status == do_abort) {
-			TRACE(ft_t_noise,"aborting %s",fdc_mode_txt(fdc_mode));
-			break;
-		}
-		if (buff->retry > 0) {
-			TRACE(ft_t_flow, "this is retry nr %d", buff->retry);
-		}
-		switch (cause) {
-		case no_error:
-			no_data_error_count = 0;
-			determine_verify_progress(buff, cause, in[5]);
-			if (in[2] & 0x40) {
-				/*  This should not happen when verifying
-				 */
-				TRACE(ft_t_warn,
-				      "deleted data in segment %d/%d",
-				      buff->segment_id,
-				      FT_SECTOR(buff->sector_offset - 1));
-				buff->remaining = 0; /* abort transfer */
-				buff->hard_error_map = EMPTY_SEGMENT;
-				skip = 1;
-			} else {
-				skip = 0;
-			}
-			continue_xfer(buff, fdc_mode, skip);
-			break;
-		case no_data_error:
-			no_data_error_count ++;
-		case overrun_error:
-			retry ++;
-		case id_am_error:
-		case id_crc_error:
-		case data_am_error:
-		case data_crc_error:
-			determine_verify_progress(buff, cause, in[5]); 
-			if (cause == no_data_error) {
-				if (no_data_error_count >= 2) {
-					TRACE(ft_t_warn,
-					      "retrying because of successive "
-					      "no data errors");
-					no_data_error_count = 0;
-				} else {
-					retry --;
-				}
-			} else {
-				no_data_error_count = 0;
-			}
-			if (retry) {
-				skip = find_resume_point(buff);
-			} else {
-				skip = buff->sector_offset;
-			}
-			if (retry && skip < 32) {
-				retry_sector(buff, fdc_mode, skip);
-			} else {
-				continue_xfer(buff, fdc_mode, skip);
-			}
-			update_history(cause);
-			break;
-		default:
-			/*  Don't know why this could happen 
-			 *  but find out.
-			 */
-			determine_verify_progress(buff, cause, in[5]);
-			retry_sector(buff, fdc_mode, 0);
-			TRACE(ft_t_err, "Error: unexpected error");
-			break;
-		}
-		break;
-	case fdc_reading_data:
-#ifdef TESTING
-		/* I'm sorry, but: NOBODY ever used this trace
-		 * messages for ages. I guess that Bas was the last person
-		 * that ever really used this (thank you, between the lines)
-		 */
-		if (cause == no_error) {
-			TRACE(ft_t_flow,"reading segment %d",buff->segment_id);
-		} else {
-			TRACE(ft_t_noise, "error reading segment %d",
-			      buff->segment_id);
-			TRACE(ft_t_noise, "\n"
-			      KERN_INFO
-			     "IRQ:C: 0x%02x, H: 0x%02x, R: 0x%02x, N: 0x%02x\n"
-			      KERN_INFO
-			      "BUF:C: 0x%02x, H: 0x%02x, R: 0x%02x",
-			      in[3], in[4], in[5], in[6],
-			      buff->cyl, buff->head, buff->sect);
-		}
-#endif
-		if (ft_runner_status == aborting ||
-		    ft_runner_status == do_abort) {
-			TRACE(ft_t_noise,"aborting %s",fdc_mode_txt(fdc_mode));
-			break;
-		}
-		if (buff->bad_sector_map == FAKE_SEGMENT) {
-			/* This condition occurs when reading a `fake'
-			 * sector that's not accessible. Doesn't
-			 * really matter as we would have ignored it
-			 * anyway !
-			 *
-			 * Chance is that we're past the next segment
-			 * now, so the next operation may fail and
-			 * result in a retry.  
-			 */
-			buff->remaining = 0;	/* skip failing sector */
-			/* buff->ptr       = buff->address; */
-			/* fake success: */
-			continue_xfer(buff, fdc_mode, 1);
-			/*  trace calls are expensive: place them AFTER
-			 *  the real stuff has been done.
-			 *  
-			 */
-			TRACE(ft_t_noise, "skipping empty segment %d (read), size? %d",
-			      buff->segment_id, buff->ptr - buff->address);
-			TRACE_EXIT;
-		}
-		if (buff->retry > 0) {
-			TRACE(ft_t_flow, "this is retry nr %d", buff->retry);
-		}
-		switch (cause) {
-		case no_error:
-			determine_progress(buff, cause, in[5]);
-			if (in[2] & 0x40) {
-				/*  Handle deleted data in header segments.
-				 *  Skip segment and force read-ahead.
-				 */
-				TRACE(ft_t_warn,
-				      "deleted data in segment %d/%d",
-				      buff->segment_id,
-				      FT_SECTOR(buff->sector_offset - 1));
-				buff->deleted = 1;
-				buff->remaining = 0;/*abort transfer */
-				buff->soft_error_map |=
-						(-1L << buff->sector_offset);
-				if (buff->segment_id == 0) {
-					/* stop on next segment */
-					stop_read_ahead = 1;
-				}
-				/* force read-ahead: */
-				buff->next_segment = 
-					buff->segment_id + 1;
-				skip = (FT_SECTORS_PER_SEGMENT - 
-					buff->sector_offset);
-			} else {
-				skip = 0;
-			}
-			continue_xfer(buff, fdc_mode, skip);
-			break;
-		case no_data_error:
-			/* Tape started too far ahead of or behind the
-			 * right sector.  This may also happen in the
-			 * middle of a segment !
-			 *
-			 * Handle no-data as soft error. If next
-			 * sector fails too, a retry (with needed
-			 * reposition) will follow.
-			 */
-			retry ++;
-		case id_am_error:
-		case id_crc_error:
-		case data_am_error:
-		case data_crc_error:
-		case overrun_error:
-			retry += (buff->soft_error_map != 0 ||
-				  buff->hard_error_map != 0);
-			determine_progress(buff, cause, in[5]); 
-#if 1 || defined(TESTING)
-			if (cause == overrun_error) retry ++;
-#endif
-			if (retry) {
-				skip = find_resume_point(buff);
-			} else {
-				skip = buff->sector_offset;
-			}
-			/*  Try to resume with next sector on single
-			 *  errors (let ecc correct it), but retry on
-			 *  no_data (we'll be past the target when we
-			 *  get here so we cannot retry) or on
-			 *  multiple errors (reduce chance on ecc
-			 *  failure).
-			 */
-			/*  cH: 23/02/97: if the last sector in the 
-			 *  segment was a hard error, then there is 
-			 *  no sense in a retry. This occasion seldom
-			 *  occurs but ... @:³²¸`@%&§$
-			 */
-			if (retry && skip < 32) {
-				retry_sector(buff, fdc_mode, skip);
-			} else {
-				continue_xfer(buff, fdc_mode, skip);
-			}
-			update_history(cause);
-			break;
-		default:
-			/*  Don't know why this could happen 
-			 *  but find out.
-			 */
-			determine_progress(buff, cause, in[5]);
-			retry_sector(buff, fdc_mode, 0);
-			TRACE(ft_t_err, "Error: unexpected error");
-			break;
-		}
-		break;
-	case fdc_reading_id:
-		if (cause == no_error) {
-			fdc_cyl = in[3];
-			fdc_head = in[4];
-			fdc_sect = in[5];
-			TRACE(ft_t_fdc_dma,
-			      "id read: C: 0x%02x, H: 0x%02x, R: 0x%02x",
-			      fdc_cyl, fdc_head, fdc_sect);
-		} else {	/* no valid information, use invalid sector */
-			fdc_cyl = fdc_head = fdc_sect = 0;
-			TRACE(ft_t_flow, "Didn't find valid sector Id");
-		}
-		fdc_mode = fdc_idle;
-		break;
-	case fdc_deleting:
-	case fdc_writing_data:
-#ifdef TESTING
-		if (cause == no_error) {
-			TRACE(ft_t_flow, "writing segment %d", buff->segment_id);
-		} else {
-			TRACE(ft_t_noise, "error writing segment %d",
-			      buff->segment_id);
-		}
-#endif
-		if (ft_runner_status == aborting ||
-		    ft_runner_status == do_abort) {
-			TRACE(ft_t_flow, "aborting %s",fdc_mode_txt(fdc_mode));
-			break;
-		}
-		if (buff->retry > 0) {
-			TRACE(ft_t_flow, "this is retry nr %d", buff->retry);
-		}
-		if (buff->bad_sector_map == FAKE_SEGMENT) {
-			/* This condition occurs when trying to write to a
-			 * `fake' sector that's not accessible. Doesn't really
-			 * matter as it isn't used anyway ! Might be located
-			 * at wrong segment, then we'll fail on the next
-			 * segment.
-			 */
-			TRACE(ft_t_noise, "skipping empty segment (write)");
-			buff->remaining = 0;	/* skip failing sector */
-			/* fake success: */
-			continue_xfer(buff, fdc_mode, 1);
-			break;
-		}
-		switch (cause) {
-		case no_error:
-			determine_progress(buff, cause, in[5]);
-			continue_xfer(buff, fdc_mode, 0);
-			break;
-		case no_data_error:
-		case id_am_error:
-		case id_crc_error:
-		case data_am_error:
-		case overrun_error:
-			update_history(cause);
-			determine_progress(buff, cause, in[5]);
-			skip = find_resume_point(buff);
-			retry_sector(buff, fdc_mode, skip);
-			break;
-		default:
-			if (in[1] & 0x02) {
-				TRACE(ft_t_err, "media not writable");
-			} else {
-				TRACE(ft_t_bug, "unforeseen write error");
-			}
-			fdc_mode = fdc_idle;
-			break;
-		}
-		break; /* fdc_deleting || fdc_writing_data */
-	case fdc_formatting:
-		/*  The interrupt comes after formatting a segment. We then
-		 *  have to set up QUICKLY for the next segment. But
-		 *  afterwards, there is plenty of time.
-		 */
-		switch (cause) {
-		case no_error:
-			/*  would like to keep most of the formatting stuff
-			 *  outside the isr code, but timing is too critical
-			 */
-			if (determine_fmt_progress(buff, cause) >= 0) {
-				continue_formatting(buff);
-			}
-			break;
-		case no_data_error:
-		case id_am_error:
-		case id_crc_error:
-		case data_am_error:
-		case overrun_error:
-		default:
-			determine_fmt_progress(buff, cause);
-			update_history(cause);
-			if (in[1] & 0x02) {
-				TRACE(ft_t_err, "media not writable");
-			} else {
-				TRACE(ft_t_bug, "unforeseen write error");
-			}
-			break;
-		} /* cause */
-		break;
-	default:
-		TRACE(ft_t_warn, "Warning: unexpected irq during: %s",
-		      fdc_mode_txt(fdc_mode));
-		fdc_mode = fdc_idle;
-		break;
-	}
-	TRACE_EXIT;
-}
-
-/*      FDC interrupt service routine.
- */
-void fdc_isr(void)
-{
-	static int isr_active;
-#ifdef TESTING
-	unsigned int t0 = ftape_timestamp();
-#endif
-	TRACE_FUN(ft_t_any);
-
- 	if (isr_active++) {
-		--isr_active;
-		TRACE(ft_t_bug, "BUG: nested interrupt, not good !");
-		*fdc.hook = fdc_isr; /*  hook our handler into the fdc
-				      *  code again 
-				      */
-		TRACE_EXIT;
-	}
-	sti();
-	if (inb_p(fdc.msr) & FDC_BUSY) {	/*  Entering Result Phase */
-		ft_hide_interrupt = 0;
-		handle_fdc_busy(ftape_get_buffer(ft_queue_head));
-		if (ft_runner_status == do_abort) {
-			/*      cease operation, remember tape position
-			 */
-			TRACE(ft_t_flow, "runner aborting");
-			ft_runner_status = aborting;
-			++ft_expected_stray_interrupts;
-		}
-	} else { /* !FDC_BUSY */
-		/*  clear interrupt, cause should be gotten by issuing
-		 *  a Sense Interrupt Status command.
-		 */
-		if (fdc_mode == fdc_recalibrating || fdc_mode == fdc_seeking) {
-			if (ft_hide_interrupt) {
-				int st0;
-				int pcn;
-
-				if (fdc_sense_interrupt_status(&st0, &pcn) < 0)
-					TRACE(ft_t_err,
-					      "sense interrupt status failed");
-				ftape_current_cylinder = pcn;
-				TRACE(ft_t_flow, "handled hidden interrupt");
-			}
-			ft_seek_completed = 1;
-			fdc_mode = fdc_idle;
-		} else if (!waitqueue_active(&ftape_wait_intr)) {
-			if (ft_expected_stray_interrupts == 0) {
-				TRACE(ft_t_warn, "unexpected stray interrupt");
-			} else {
-				TRACE(ft_t_flow, "expected stray interrupt");
-				--ft_expected_stray_interrupts;
-			}
-		} else {
-			if (fdc_mode == fdc_reading_data ||
-			    fdc_mode == fdc_verifying    ||
-			    fdc_mode == fdc_writing_data ||
-			    fdc_mode == fdc_deleting     ||
-			    fdc_mode == fdc_formatting   ||
-			    fdc_mode == fdc_reading_id) {
-				if (inb_p(fdc.msr) & FDC_BUSY) {
-					TRACE(ft_t_bug,
-					"***** FDC failure, busy too late");
-				} else {
-					TRACE(ft_t_bug,
-					      "***** FDC failure, no busy");
-				}
-			} else {
-				TRACE(ft_t_fdc_dma, "awaited stray interrupt");
-			}
-		}
-		ft_hide_interrupt = 0;
-	}
-	/*    Handle sleep code.
-	 */
-	if (!ft_hide_interrupt) {
-		ft_interrupt_seen ++;
-		if (waitqueue_active(&ftape_wait_intr)) {
-			wake_up_interruptible(&ftape_wait_intr);
-		}
-	} else {
-		TRACE(ft_t_flow, "hiding interrupt while %s", 
-		      waitqueue_active(&ftape_wait_intr) ? "waiting":"active");
-	}
-#ifdef TESTING
-	t0 = ftape_timediff(t0, ftape_timestamp());
-	if (t0 >= 1000) {
-		/* only tell us about long calls */
-		TRACE(ft_t_noise, "isr() duration: %5d usec", t0);
-	}
-#endif
-	*fdc.hook = fdc_isr;	/* hook our handler into the fdc code again */
-	--isr_active;
-	TRACE_EXIT;
-}
diff --git a/drivers/char/ftape/lowlevel/fdc-isr.h b/drivers/char/ftape/lowlevel/fdc-isr.h
deleted file mode 100644
index 065aa97..0000000
--- a/drivers/char/ftape/lowlevel/fdc-isr.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _FDC_ISR_H
-#define _FDC_ISR_H
-
-/*
- * Copyright (C) 1993-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/fdc-isr.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:07 $
- *
- *      This file declares the global variables necessary to
- *      synchronize the interrupt service routine (isr) with the
- *      remainder of the QIC-40/80/3010/3020 floppy-tape driver
- *      "ftape" for Linux.
- */
-
-/*
- *      fdc-isr.c defined public variables
- */
-extern volatile int ft_expected_stray_interrupts; /* masks stray interrupts */
-extern volatile int ft_seek_completed;	          /* flag set by isr */
-extern volatile int ft_interrupt_seen;	          /* flag set by isr */
-extern volatile int ft_hide_interrupt;            /* flag set by isr */
-
-/*
- *      fdc-io.c defined public functions
- */
-extern void fdc_isr(void);
-
-/*
- *      A kernel hook that steals one interrupt from the floppy
- *      driver (Should be fixed when the new fdc driver gets ready)
- *      See the linux kernel source files:
- *          drivers/block/floppy.c & drivers/block/blk.h
- *      for the details.
- */
-extern void (*do_floppy) (void);
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-bsm.c b/drivers/char/ftape/lowlevel/ftape-bsm.c
deleted file mode 100644
index d1a301c..0000000
--- a/drivers/char/ftape/lowlevel/ftape-bsm.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- *      Copyright (C) 1994-1996 Bas Laarhoven,
- *                (C) 1996-1997 Claus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-bsm.c,v $
- * $Revision: 1.3 $
- * $Date: 1997/10/05 19:15:15 $
- *
- *      This file contains the bad-sector map handling code for
- *      the QIC-117 floppy tape driver for Linux.
- *      QIC-40, QIC-80, QIC-3010 and QIC-3020 maps are implemented.
- */
-
-#include <linux/string.h>
-
-#include <linux/ftape.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-bsm.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-
-/*      Global vars.
- */
-
-/*      Local vars.
- */
-static __u8 *bad_sector_map;
-static SectorCount *bsm_hash_ptr; 
-
-typedef enum {
-	forward, backward
-} mode_type;
-
-#if 0
-static void ftape_put_bad_sector_entry(int segment_id, SectorMap new_map);
-#endif
-
-#if 0
-/*  fix_tape converts a normal QIC-80 tape into a 'wide' tape.
- *  For testing purposes only !
- */
-void fix_tape(__u8 * buffer, ft_format_type new_code)
-{
-	static __u8 list[BAD_SECTOR_MAP_SIZE];
-	SectorMap *src_ptr = (SectorMap *) list;
-	__u8 *dst_ptr = bad_sector_map;
-	SectorMap map;
-	unsigned int sector = 1;
-	int i;
-
-	if (format_code != fmt_var && format_code != fmt_big) {
-		memcpy(list, bad_sector_map, sizeof(list));
-		memset(bad_sector_map, 0, sizeof(bad_sector_map));
-		while ((__u8 *) src_ptr - list < sizeof(list)) {
-			map = *src_ptr++;
-			if (map == EMPTY_SEGMENT) {
-				*(SectorMap *) dst_ptr = 0x800000 + sector;
-				dst_ptr += 3;
-				sector += SECTORS_PER_SEGMENT;
-			} else {
-				for (i = 0; i < SECTORS_PER_SEGMENT; ++i) {
-					if (map & 1) {
-						*(SewctorMap *) dst_ptr = sector;
-						dst_ptr += 3;
-					}
-					map >>= 1;
-					++sector;
-				}
-			}
-		}
-	}
-	bad_sector_map_changed = 1;
-	*(buffer + 4) = new_code;	/* put new format code */
-	if (format_code != fmt_var && new_code == fmt_big) {
-		PUT4(buffer, FT_6_HSEG_1,   (__u32)GET2(buffer, 6));
-		PUT4(buffer, FT_6_HSEG_2,   (__u32)GET2(buffer, 8));
-		PUT4(buffer, FT_6_FRST_SEG, (__u32)GET2(buffer, 10));
-		PUT4(buffer, FT_6_LAST_SEG, (__u32)GET2(buffer, 12));
-		memset(buffer+6, '\0', 8);
-	}
-	format_code = new_code;
-}
-
-#endif
-
-/*   given buffer that contains a header segment, find the end of
- *   of the bsm list
- */
-__u8 * ftape_find_end_of_bsm_list(__u8 * address)
-{
-	__u8 *ptr   = address + FT_HEADER_END; /* start of bsm list */
-	__u8 *limit = address + FT_SEGMENT_SIZE;
-	while (ptr + 2 < limit) {
-		if (ptr[0] || ptr[1] || ptr[2]) {
-			ptr += 3;
-		} else {
-			return ptr;
-		}
-	}
-	return NULL;
-}
-
-static inline void put_sector(SectorCount *ptr, unsigned int sector)
-{
-	ptr->bytes[0] = sector & 0xff;
-	sector >>= 8;
-	ptr->bytes[1] = sector & 0xff;
-	sector >>= 8;
-	ptr->bytes[2] = sector & 0xff;
-}
-
-static inline unsigned int get_sector(SectorCount *ptr)
-{
-#if 1
-	unsigned int sector;
-
-	sector  = ptr->bytes[0];
-	sector += ptr->bytes[1] <<  8;
-	sector += ptr->bytes[2] << 16;
-
-	return sector;
-#else
-	/*  GET4 gets the next four bytes in Intel little endian order
-	 *  and converts them to host byte order and handles unaligned
-	 *  access.
-	 */
-	return (GET4(ptr, 0) & 0x00ffffff); /* back to host byte order */
-#endif
-}
-
-static void bsm_debug_fake(void)
-{
-	/* for testing of bad sector handling at end of tape
-	 */
-#if 0
-	ftape_put_bad_sector_entry(segments_per_track * tracks_per_tape - 3,
-				   0x000003e0;
-	ftape_put_bad_sector_entry(segments_per_track * tracks_per_tape - 2,
-				   0xff3fffff;
-	ftape_put_bad_sector_entry(segments_per_track * tracks_per_tape - 1,
-				   0xffffe000;
-#endif
-	/*  Enable to test bad sector handling
-	 */
-#if 0
-	ftape_put_bad_sector_entry(30, 0xfffffffe)
-	ftape_put_bad_sector_entry(32, 0x7fffffff);
-	ftape_put_bad_sector_entry(34, 0xfffeffff);
-	ftape_put_bad_sector_entry(36, 0x55555555);
-	ftape_put_bad_sector_entry(38, 0xffffffff);
-	ftape_put_bad_sector_entry(50, 0xffff0000);
-	ftape_put_bad_sector_entry(51, 0xffffffff);
-	ftape_put_bad_sector_entry(52, 0xffffffff);
-	ftape_put_bad_sector_entry(53, 0x0000ffff);
-#endif
-	/*  Enable when testing multiple volume tar dumps.
-	 */
-#if 0
-	{
-		int i;
-
-		for (i = ft_first_data_segment;
-		     i <= ft_last_data_segment - 7; ++i) {
-			ftape_put_bad_sector_entry(i, EMPTY_SEGMENT);
-		}
-	}
-#endif
-	/*  Enable when testing bit positions in *_error_map
-	 */
-#if 0
-	{
-		int i;
-		
-		for (i = first_data_segment; i <= last_data_segment; ++i) {
-			ftape_put_bad_sector_entry(i,
-					   ftape_get_bad_sector_entry(i) 
-					   | 0x00ff00ff);
-		}
-	}
-#endif
-}
-
-static void print_bad_sector_map(void)
-{
-	unsigned int good_sectors;
-	unsigned int total_bad = 0;
-	int i;
-	TRACE_FUN(ft_t_flow);
-
-	if (ft_format_code == fmt_big || 
-	    ft_format_code == fmt_var || 
-	    ft_format_code == fmt_1100ft) {
-		SectorCount *ptr = (SectorCount *)bad_sector_map;
-		unsigned int sector;
-		__u16 *ptr16;
-
-		while((sector = get_sector(ptr++)) != 0) {
-			if ((ft_format_code == fmt_big || 
-			     ft_format_code == fmt_var) &&
-			    sector & 0x800000) {
-				total_bad += FT_SECTORS_PER_SEGMENT - 3;
-				TRACE(ft_t_noise, "bad segment at sector: %6d",
-				      sector & 0x7fffff);
-			} else {
-				++total_bad;
-				TRACE(ft_t_noise, "bad sector: %6d", sector);
-			}
-		}
-		/*  Display old ftape's end-of-file marks
-		 */
-		ptr16 = (__u16*)ptr;
-		while ((sector = get_unaligned(ptr16++)) != 0) {
-			TRACE(ft_t_noise, "Old ftape eof mark: %4d/%2d",
-			      sector, get_unaligned(ptr16++));
-		}
-	} else { /* fixed size format */
-		for (i = ft_first_data_segment;
-		     i < (int)(ft_segments_per_track * ft_tracks_per_tape); ++i) {
-			SectorMap map = ((SectorMap *) bad_sector_map)[i];
-
-			if (map) {
-				TRACE(ft_t_noise,
-				      "bsm for segment %4d: 0x%08x", i, (unsigned int)map);
-				total_bad += ((map == EMPTY_SEGMENT)
-					       ? FT_SECTORS_PER_SEGMENT - 3
-					       : count_ones(map));
-			}
-		}
-	}
-	good_sectors =
-		((ft_segments_per_track * ft_tracks_per_tape - ft_first_data_segment)
-		 * (FT_SECTORS_PER_SEGMENT - 3)) - total_bad;
-	TRACE(ft_t_info, "%d Kb usable on this tape", good_sectors);
-	if (total_bad == 0) {
-		TRACE(ft_t_info,
-		      "WARNING: this tape has no bad blocks registered !");
-	} else {
-		TRACE(ft_t_info, "%d bad sectors", total_bad);
-	}
-	TRACE_EXIT;
-}
-
-
-void ftape_extract_bad_sector_map(__u8 * buffer)
-{
-	TRACE_FUN(ft_t_any);
-
-	/*  Fill the bad sector map with the contents of buffer.
-	 */
-	if (ft_format_code == fmt_var || ft_format_code == fmt_big) {
-		/* QIC-3010/3020 and wide QIC-80 tapes no longer have a failed
-		 * sector log but use this area to extend the bad sector map.
-		 */
-		bad_sector_map = &buffer[FT_HEADER_END];
-	} else {
-		/* non-wide QIC-80 tapes have a failed sector log area that
-		 * mustn't be included in the bad sector map.
-		 */
-		bad_sector_map = &buffer[FT_FSL + FT_FSL_SIZE];
-	}
-	if (ft_format_code == fmt_1100ft || 
-	    ft_format_code == fmt_var    ||
-	    ft_format_code == fmt_big) {
-		bsm_hash_ptr = (SectorCount *)bad_sector_map;
-	} else {
-		bsm_hash_ptr = NULL;
-	}
-	bsm_debug_fake();
-	if (TRACE_LEVEL >= ft_t_info) {
-		print_bad_sector_map();
-	}
-	TRACE_EXIT;
-}
-
-static inline SectorMap cvt2map(unsigned int sector)
-{
-	return 1 << (((sector & 0x7fffff) - 1) % FT_SECTORS_PER_SEGMENT);
-}
-
-static inline int cvt2segment(unsigned int sector)
-{
-	return ((sector & 0x7fffff) - 1) / FT_SECTORS_PER_SEGMENT;
-}
-
-static int forward_seek_entry(int segment_id, 
-			      SectorCount **ptr, 
-			      SectorMap *map)
-{
-	unsigned int sector;
-	int segment;
-
-	do {
-		sector = get_sector((*ptr)++);
-		segment = cvt2segment(sector);
-	} while (sector != 0 && segment < segment_id);
-	(*ptr) --; /* point to first sector >= segment_id */
-	/*  Get all sectors in segment_id
-	 */
-	if (sector == 0 || segment != segment_id) {
-		*map = 0;
-		return 0;
-	} else if ((sector & 0x800000) &&
-		   (ft_format_code == fmt_var || ft_format_code == fmt_big)) {
-		*map = EMPTY_SEGMENT;
-		return FT_SECTORS_PER_SEGMENT;
-	} else {
-		int count = 1;
-		SectorCount *tmp_ptr = (*ptr) + 1;
-		
-		*map = cvt2map(sector);
-		while ((sector = get_sector(tmp_ptr++)) != 0 &&
-		       (segment = cvt2segment(sector)) == segment_id) {
-			*map |= cvt2map(sector);
-			++count;
-		}
-		return count;
-	}
-}
-
-static int backwards_seek_entry(int segment_id,
-				SectorCount **ptr,
-				SectorMap *map)
-{
-	unsigned int sector;
-	int segment; /* max unsigned int */
-
-	if (*ptr <= (SectorCount *)bad_sector_map) {
-		*map = 0;
-		return 0;
-	}
-	do {
-		sector  = get_sector(--(*ptr));
-		segment = cvt2segment(sector);
-	} while (*ptr > (SectorCount *)bad_sector_map && segment > segment_id);
-	if (segment > segment_id) { /*  at start of list, no entry found */
-		*map = 0;
-		return 0;
-	} else if (segment < segment_id) {
-		/*  before smaller entry, adjust for overshoot */
-		(*ptr) ++;
-		*map = 0;
-		return 0;
-	} else if ((sector & 0x800000) &&
-		   (ft_format_code == fmt_big || ft_format_code == fmt_var)) {
-		*map = EMPTY_SEGMENT;
-		return FT_SECTORS_PER_SEGMENT;
-	} else { /*  get all sectors in segment_id */
-		int count = 1;
-
-		*map = cvt2map(sector);
-		while(*ptr > (SectorCount *)bad_sector_map) {
-			sector = get_sector(--(*ptr));
-			segment = cvt2segment(sector);
-			if (segment != segment_id) {
-				break;
-			}
-			*map |= cvt2map(sector);
-			++count;
-		}
-		if (segment < segment_id) {
-			(*ptr) ++;
-		}
-		return count;
-	}
-}
-
-#if 0
-static void ftape_put_bad_sector_entry(int segment_id, SectorMap new_map)
-{
-	SectorCount *ptr = (SectorCount *)bad_sector_map;
-	int count;
-	int new_count;
-	SectorMap map;
-	TRACE_FUN(ft_t_any);
-
-	if (ft_format_code == fmt_1100ft || 
-	    ft_format_code == fmt_var || 
-	    ft_format_code == fmt_big) {
-		count = forward_seek_entry(segment_id, &ptr, &map);
-		new_count = count_ones(new_map);
-		/* If format code == 4 put empty segment instead of 32
-		 * bad sectors.
-		 */
-		if (ft_format_code == fmt_var || ft_format_code == fmt_big) {
-			if (new_count == FT_SECTORS_PER_SEGMENT) {
-				new_count = 1;
-			}
-			if (count == FT_SECTORS_PER_SEGMENT) {
-				count = 1;
-			}
-		}
-		if (count != new_count) {
-			/* insert (or delete if < 0) new_count - count
-			 * entries.  Move trailing part of list
-			 * including terminating 0.
-			 */
-			SectorCount *hi_ptr = ptr;
-
-			do {
-			} while (get_sector(hi_ptr++) != 0);
-			/*  Note: ptr is of type byte *, and each bad sector
-			 *  consumes 3 bytes.
-			 */
-			memmove(ptr + new_count, ptr + count,
-				(size_t)(hi_ptr - (ptr + count))*sizeof(SectorCount));
-		}
-		TRACE(ft_t_noise, "putting map 0x%08x at %p, segment %d",
-		      (unsigned int)new_map, ptr, segment_id);
-		if (new_count == 1 && new_map == EMPTY_SEGMENT) {
-			put_sector(ptr++, (0x800001 + 
-					  segment_id * 
-					  FT_SECTORS_PER_SEGMENT));
-		} else {
-			int i = 0;
-
-			while (new_map) {
-				if (new_map & 1) {
-					put_sector(ptr++, 
-						   1 + segment_id * 
-						   FT_SECTORS_PER_SEGMENT + i);
-				}
-				++i;
-				new_map >>= 1;
-			}
-		}
-	} else {
-		((SectorMap *) bad_sector_map)[segment_id] = new_map;
-	}
-	TRACE_EXIT;
-}
-#endif  /*  0  */
-
-SectorMap ftape_get_bad_sector_entry(int segment_id)
-{
-	if (ft_used_header_segment == -1) {
-		/*  When reading header segment we'll need a blank map.
-		 */
-		return 0;
-	} else if (bsm_hash_ptr != NULL) {
-		/*  Invariants:
-		 *    map - mask value returned on last call.
-		 *    bsm_hash_ptr - points to first sector greater or equal to
-		 *          first sector in last_referenced segment.
-		 *    last_referenced - segment id used in the last call,
-		 *                      sector and map belong to this id.
-		 *  This code is designed for sequential access and retries.
-		 *  For true random access it may have to be redesigned.
-		 */
-		static int last_reference = -1;
-		static SectorMap map;
-
-		if (segment_id > last_reference) {
-			/*  Skip all sectors before segment_id
-			 */
-			forward_seek_entry(segment_id, &bsm_hash_ptr, &map);
-		} else if (segment_id < last_reference) {
-			/* Skip backwards until begin of buffer or
-			 * first sector in segment_id 
-			 */
-			backwards_seek_entry(segment_id, &bsm_hash_ptr, &map);
-		}		/* segment_id == last_reference : keep map */
-		last_reference = segment_id;
-		return map;
-	} else {
-		return ((SectorMap *) bad_sector_map)[segment_id];
-	}
-}
-
-/*  This is simply here to prevent us from overwriting other kernel
- *  data. Writes will result in NULL Pointer dereference.
- */
-void ftape_init_bsm(void)
-{
-	bad_sector_map = NULL;
-	bsm_hash_ptr   = NULL;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-bsm.h b/drivers/char/ftape/lowlevel/ftape-bsm.h
deleted file mode 100644
index ed45465..0000000
--- a/drivers/char/ftape/lowlevel/ftape-bsm.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _FTAPE_BSM_H
-#define _FTAPE_BSM_H
-
-/*
- * Copyright (C) 1994-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-bsm.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:07 $
- *
- *      This file contains definitions for the bad sector map handling
- *      routines for the QIC-117 floppy-tape driver for Linux.
- */
-
-#include <linux/ftape.h>
-#include <linux/ftape-header-segment.h>
-
-#define EMPTY_SEGMENT           (0xffffffff)
-#define FAKE_SEGMENT            (0xfffffffe)
-
-/*  maximum (format code 4) bad sector map size (bytes).
- */
-#define BAD_SECTOR_MAP_SIZE     (29 * SECTOR_SIZE - 256)
-
-/*  format code 4 bad sector entry, ftape uses this
- *  internally for all format codes
- */
-typedef __u32 SectorMap;
-/*  variable and 1100 ft bad sector map entry. These three bytes represent
- *  a single sector address measured from BOT. 
- */
-typedef struct NewSectorMap {          
-	__u8 bytes[3];
-} SectorCount;
-
-
-/*
- *      ftape-bsm.c defined global vars.
- */
-
-/*
- *      ftape-bsm.c defined global functions.
- */
-extern void update_bad_sector_map(__u8 * buffer);
-extern void ftape_extract_bad_sector_map(__u8 * buffer);
-extern SectorMap ftape_get_bad_sector_entry(int segment_id);
-extern __u8 *ftape_find_end_of_bsm_list(__u8 * address);
-extern void ftape_init_bsm(void);
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-buffer.c b/drivers/char/ftape/lowlevel/ftape-buffer.c
deleted file mode 100644
index c706ff1..0000000
--- a/drivers/char/ftape/lowlevel/ftape-buffer.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- *      Copyright (C) 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-buffer.c,v $
- * $Revision: 1.3 $
- * $Date: 1997/10/16 23:33:11 $
- *
- *  This file contains the allocator/dealloctor for ftape's dynamic dma
- *  buffer.
- */
-
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <asm/dma.h>
-
-#include <linux/ftape.h>
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-buffer.h"
-
-/*  DMA'able memory allocation stuff.
- */
-
-static inline void *dmaalloc(size_t size)
-{
-	unsigned long addr;
-
-	if (size == 0) {
-		return NULL;
-	}
-	addr = __get_dma_pages(GFP_KERNEL, get_order(size));
-	if (addr) {
-		struct page *page;
-
-		for (page = virt_to_page(addr); page < virt_to_page(addr+size); page++)
-			SetPageReserved(page);
-	}
-	return (void *)addr;
-}
-
-static inline void dmafree(void *addr, size_t size)
-{
-	if (size > 0) {
-		struct page *page;
-
-		for (page = virt_to_page((unsigned long)addr);
-		     page < virt_to_page((unsigned long)addr+size); page++)
-			ClearPageReserved(page);
-		free_pages((unsigned long) addr, get_order(size));
-	}
-}
-
-static int add_one_buffer(void)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	if (ft_nr_buffers >= FT_MAX_NR_BUFFERS) {
-		TRACE_EXIT -ENOMEM;
-	}
-	ft_buffer[ft_nr_buffers] = kmalloc(sizeof(buffer_struct), GFP_KERNEL);
-	if (ft_buffer[ft_nr_buffers] == NULL) {
-		TRACE_EXIT -ENOMEM;
-	}
-	memset(ft_buffer[ft_nr_buffers], 0, sizeof(buffer_struct));
-	ft_buffer[ft_nr_buffers]->address = dmaalloc(FT_BUFF_SIZE);
-	if (ft_buffer[ft_nr_buffers]->address == NULL) {
-		kfree(ft_buffer[ft_nr_buffers]);
-		ft_buffer[ft_nr_buffers] = NULL;
-		TRACE_EXIT -ENOMEM;
-	}
-	ft_nr_buffers ++;
-	TRACE(ft_t_info, "buffer nr #%d @ %p, dma area @ %p",
-	      ft_nr_buffers,
-	      ft_buffer[ft_nr_buffers-1],
-	      ft_buffer[ft_nr_buffers-1]->address);
-	TRACE_EXIT 0;
-}
-
-static void del_one_buffer(void)
-{
-	TRACE_FUN(ft_t_flow);
-	if (ft_nr_buffers > 0) {
-		TRACE(ft_t_info, "releasing buffer nr #%d @ %p, dma area @ %p",
-		      ft_nr_buffers,
-		      ft_buffer[ft_nr_buffers-1],
-		      ft_buffer[ft_nr_buffers-1]->address);
-		ft_nr_buffers --;
-		dmafree(ft_buffer[ft_nr_buffers]->address, FT_BUFF_SIZE);
-		kfree(ft_buffer[ft_nr_buffers]);
-		ft_buffer[ft_nr_buffers] = NULL;
-	}
-	TRACE_EXIT;
-}
-
-int ftape_set_nr_buffers(int cnt)
-{
-	int delta = cnt - ft_nr_buffers;
-	TRACE_FUN(ft_t_flow);
-
-	if (delta > 0) {
-		while (delta--) {
-			if (add_one_buffer() < 0) {
-				TRACE_EXIT -ENOMEM;
-			}
-		}
-	} else if (delta < 0) {
-		while (delta++) {
-			del_one_buffer();
-		}
-	}
-	ftape_zap_read_buffers();
-	TRACE_EXIT 0;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-buffer.h b/drivers/char/ftape/lowlevel/ftape-buffer.h
deleted file mode 100644
index eec99ce..0000000
--- a/drivers/char/ftape/lowlevel/ftape-buffer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _FTAPE_BUFFER_H
-#define _FTAPE_BUFFER_H
-
-/*
- *      Copyright (C) 1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-buffer.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:08 $
- *
- *  This file contains the allocator/dealloctor for ftape's dynamic dma
- *  buffer.
- */
-
-extern int  ftape_set_nr_buffers(int cnt);
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-calibr.c b/drivers/char/ftape/lowlevel/ftape-calibr.c
deleted file mode 100644
index 8e50bfd..0000000
--- a/drivers/char/ftape/lowlevel/ftape-calibr.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-calibr.c,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:08 $
- *
- *      GP calibration routine for processor speed dependent
- *      functions.
- */
-
-#include <linux/errno.h>
-#include <linux/jiffies.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#if defined(__alpha__)
-# include <asm/hwrpb.h>
-#elif defined(__x86_64__)
-# include <asm/msr.h>
-# include <asm/timex.h>
-#elif defined(__i386__)
-# include <linux/timex.h>
-#endif
-#include <linux/ftape.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-calibr.h"
-#include "../lowlevel/fdc-io.h"
-
-#undef DEBUG
-
-#if !defined(__alpha__) && !defined(__i386__) && !defined(__x86_64__)
-# error Ftape is not implemented for this architecture!
-#endif
-
-#if defined(__alpha__) || defined(__x86_64__)
-static unsigned long ps_per_cycle = 0;
-#endif
-
-static spinlock_t calibr_lock;
-
-/*
- * Note: On Intel PCs, the clock ticks at 100 Hz (HZ==100) which is
- * too slow for certain timeouts (and that clock doesn't even tick
- * when interrupts are disabled).  For that reason, the 8254 timer is
- * used directly to implement fine-grained timeouts.  However, on
- * Alpha PCs, the 8254 is *not* used to implement the clock tick
- * (which is 1024 Hz, normally) and the 8254 timer runs at some
- * "random" frequency (it seems to run at 18Hz, but it's not safe to
- * rely on this value).  Instead, we use the Alpha's "rpcc"
- * instruction to read cycle counts.  As this is a 32 bit counter,
- * it will overflow only once per 30 seconds (on a 200MHz machine),
- * which is plenty.
- */
-
-unsigned int ftape_timestamp(void)
-{
-#if defined(__alpha__)
-	unsigned long r;
-
-	asm volatile ("rpcc %0" : "=r" (r));
-	return r;
-#elif defined(__x86_64__)
-	unsigned long r;
-	rdtscl(r);
-	return r;
-#elif defined(__i386__)
-
-/*
- * Note that there is some time between counter underflowing and jiffies
- * increasing, so the code below won't always give correct output.
- * -Vojtech
- */
-
-	unsigned long flags;
-	__u16 lo;
-	__u16 hi;
-
-	spin_lock_irqsave(&calibr_lock, flags);
-	outb_p(0x00, 0x43);	/* latch the count ASAP */
-	lo = inb_p(0x40);	/* read the latched count */
-	lo |= inb(0x40) << 8;
-	hi = jiffies;
-	spin_unlock_irqrestore(&calibr_lock, flags);
-	return ((hi + 1) * (unsigned int) LATCH) - lo;  /* downcounter ! */
-#endif
-}
-
-static unsigned int short_ftape_timestamp(void)
-{
-#if defined(__alpha__) || defined(__x86_64__)
-	return ftape_timestamp();
-#elif defined(__i386__)
-	unsigned int count;
- 	unsigned long flags;
- 
-	spin_lock_irqsave(&calibr_lock, flags);
- 	outb_p(0x00, 0x43);	/* latch the count ASAP */
-	count = inb_p(0x40);	/* read the latched count */
-	count |= inb(0x40) << 8;
-	spin_unlock_irqrestore(&calibr_lock, flags);
-	return (LATCH - count);	/* normal: downcounter */
-#endif
-}
-
-static unsigned int diff(unsigned int t0, unsigned int t1)
-{
-#if defined(__alpha__) || defined(__x86_64__)
-	return (t1 - t0);
-#elif defined(__i386__)
-	/*
-	 * This is tricky: to work for both short and full ftape_timestamps
-	 * we'll have to discriminate between these.
-	 * If it _looks_ like short stamps with wrapping around we'll
-	 * asume it are. This will generate a small error if it really
-	 * was a (very large) delta from full ftape_timestamps.
-	 */
-	return (t1 <= t0 && t0 <= LATCH) ? t1 + LATCH - t0 : t1 - t0;
-#endif
-}
-
-static unsigned int usecs(unsigned int count)
-{
-#if defined(__alpha__) || defined(__x86_64__)
-	return (ps_per_cycle * count) / 1000000UL;
-#elif defined(__i386__)
-	return (10000 * count) / ((CLOCK_TICK_RATE + 50) / 100);
-#endif
-}
-
-unsigned int ftape_timediff(unsigned int t0, unsigned int t1)
-{
-	/*
-	 *  Calculate difference in usec for ftape_timestamp results t0 & t1.
-	 *  Note that on the i386 platform with short time-stamps, the
-	 *  maximum allowed timespan is 1/HZ or we'll lose ticks!
-	 */
-	return usecs(diff(t0, t1));
-}
-
-/*      To get an indication of the I/O performance,
- *      measure the duration of the inb() function.
- */
-static void time_inb(void)
-{
-	int i;
-	int t0, t1;
-	unsigned long flags;
-	int status;
-	TRACE_FUN(ft_t_any);
-
-	spin_lock_irqsave(&calibr_lock, flags);
-	t0 = short_ftape_timestamp();
-	for (i = 0; i < 1000; ++i) {
-		status = inb(fdc.msr);
-	}
-	t1 = short_ftape_timestamp();
-	spin_unlock_irqrestore(&calibr_lock, flags);
-	TRACE(ft_t_info, "inb() duration: %d nsec", ftape_timediff(t0, t1));
-	TRACE_EXIT;
-}
-
-static void init_clock(void)
-{
-	TRACE_FUN(ft_t_any);
-
-#if defined(__x86_64__)
-	ps_per_cycle = 1000000000UL / cpu_khz;
-#elif defined(__alpha__)
-	extern struct hwrpb_struct *hwrpb;
-	ps_per_cycle = (1000*1000*1000*1000UL) / hwrpb->cycle_freq;
-#endif
-	TRACE_EXIT;
-}
-
-/*
- *      Input:  function taking int count as parameter.
- *              pointers to calculated calibration variables.
- */
-void ftape_calibrate(char *name,
-		    void (*fun) (unsigned int), 
-		    unsigned int *calibr_count, 
-		    unsigned int *calibr_time)
-{
-	static int first_time = 1;
-	int i;
-	unsigned int tc = 0;
-	unsigned int count;
-	unsigned int time;
-#if defined(__i386__)
-	unsigned int old_tc = 0;
-	unsigned int old_count = 1;
-	unsigned int old_time = 1;
-#endif
-	TRACE_FUN(ft_t_flow);
-
-	if (first_time) {             /* get idea of I/O performance */
-		init_clock();
-		time_inb();
-		first_time = 0;
-	}
-	/*    value of timeout must be set so that on very slow systems
-	 *    it will give a time less than one jiffy, and on
-	 *    very fast systems it'll give reasonable precision.
-	 */
-
-	count = 40;
-	for (i = 0; i < 15; ++i) {
-		unsigned int t0;
-		unsigned int t1;
-		unsigned int once;
-		unsigned int multiple;
-		unsigned long flags;
-
-		*calibr_count =
-		*calibr_time = count;	/* set TC to 1 */
-		spin_lock_irqsave(&calibr_lock, flags);
-		fun(0);		/* dummy, get code into cache */
-		t0 = short_ftape_timestamp();
-		fun(0);		/* overhead + one test */
-		t1 = short_ftape_timestamp();
-		once = diff(t0, t1);
-		t0 = short_ftape_timestamp();
-		fun(count);		/* overhead + count tests */
-		t1 = short_ftape_timestamp();
-		multiple = diff(t0, t1);
-		spin_unlock_irqrestore(&calibr_lock, flags);
-		time = ftape_timediff(0, multiple - once);
-		tc = (1000 * time) / (count - 1);
-		TRACE(ft_t_any, "once:%3d us,%6d times:%6d us, TC:%5d ns",
-			usecs(once), count - 1, usecs(multiple), tc);
-#if defined(__alpha__) || defined(__x86_64__)
-		/*
-		 * Increase the calibration count exponentially until the
-		 * calibration time exceeds 100 ms.
-		 */
-		if (time >= 100*1000) {
-			break;
-		}
-#elif defined(__i386__)
-		/*
-		 * increase the count until the resulting time nears 2/HZ,
-		 * then the tc will drop sharply because we lose LATCH counts.
-		 */
-		if (tc <= old_tc / 2) {
-			time = old_time;
-			count = old_count;
-			break;
-		}
-		old_tc = tc;
-		old_count = count;
-		old_time = time;
-#endif
-		count *= 2;
-	}
-	*calibr_count = count - 1;
-	*calibr_time  = time;
-	TRACE(ft_t_info, "TC for `%s()' = %d nsec (at %d counts)",
-	     name, (1000 * *calibr_time) / *calibr_count, *calibr_count);
-	TRACE_EXIT;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-calibr.h b/drivers/char/ftape/lowlevel/ftape-calibr.h
deleted file mode 100644
index 0c7e752..0000000
--- a/drivers/char/ftape/lowlevel/ftape-calibr.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _FTAPE_CALIBR_H
-#define _FTAPE_CALIBR_H
-
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-calibr.h,v $
- * $Revision: 1.1 $
- * $Date: 1997/09/19 09:05:26 $
- *
- *      This file contains a gp calibration routine for
- *      hardware dependent timeout functions.
- */
-
-extern void ftape_calibrate(char *name,
-			    void (*fun) (unsigned int),
-			    unsigned int *calibr_count,
-			    unsigned int *calibr_time);
-extern unsigned int ftape_timestamp(void);
-extern unsigned int ftape_timediff(unsigned int t0, unsigned int t1);
-
-#endif /* _FTAPE_CALIBR_H */
diff --git a/drivers/char/ftape/lowlevel/ftape-ctl.c b/drivers/char/ftape/lowlevel/ftape-ctl.c
deleted file mode 100644
index 5d7c1ce9..0000000
--- a/drivers/char/ftape/lowlevel/ftape-ctl.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven,
- *                    1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-ctl.c,v $
- * $Revision: 1.4 $
- * $Date: 1997/11/11 14:37:44 $
- *
- *      This file contains the non-read/write ftape functions for the
- *      QIC-40/80/3010/3020 floppy-tape driver "ftape" for Linux.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-/* ease porting between pre-2.4.x and later kernels */
-#define vma_get_pgoff(v)      ((v)->vm_pgoff)
-
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-write.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-bsm.h"
-
-/*      Global vars.
- */
-ftape_info ftape_status = {
-/*  vendor information */
-	{ 0, },     /* drive type */
-/*  data rates */
-	500,        /* used data rate */
-	500,        /* drive max rate */
-	500,        /* fdc max rate   */
-/*  drive selection, either FTAPE_SEL_A/B/C/D */
-	-1,     /* drive selection */
-/*  flags set after decode the drive and tape status   */
-	0,          /* formatted */
-	1,          /* no tape */
-	1,          /* write protected */
-	1,          /* new tape */
-/*  values of last queried drive/tape status and error */
-	{{0,}},     /* last error code */
-	{{0,}},     /* drive status, configuration, tape status */
-/*  cartridge geometry */
-        20,         /* tracks_per_tape */
-        102,        /* segments_per_track */
-/*  location of header segments, etc. */
-	-1,     /* used_header_segment */
-	-1,     /* header_segment_1 */
-	-1,     /* header_segment_2 */
-	-1,     /* first_data_segment */
-        -1,     /* last_data_segment */
-/*  the format code as stored in the header segment  */
-	fmt_normal, /* format code */
-/*  the default for the qic std: unknown */
-	-1,
-/*  is tape running? */
-	idle,       /* runner_state */
-/*  is tape reading/writing/verifying/formatting/deleting */
-	idle,       /* driver state */
-/*  flags fatal hardware error */
-	1,          /* failure */
-/*  history record */
-	{ 0, }      /* history record */
-};
-	
-int ftape_segments_per_head     = 1020;
-int ftape_segments_per_cylinder = 4;
-int ftape_init_drive_needed = 1; /* need to be global for ftape_reset_drive()
-				  * in ftape-io.c
-				  */
-
-/*      Local vars.
- */
-static const vendor_struct vendors[] = QIC117_VENDORS;
-static const wakeup_method methods[] = WAKEUP_METHODS;
-
-const ftape_info *ftape_get_status(void)
-{
-#if defined(STATUS_PARANOYA)
-	static ftape_info get_status;
-
-	get_status = ftape_status;
-	return &get_status;
-#else
-	return &ftape_status; /*  maybe return only a copy of it to assure 
-			       *  read only access
-			       */
-#endif
-}
-
-static int ftape_not_operational(int status)
-{
-	/* return true if status indicates tape can not be used.
-	 */
-	return ((status ^ QIC_STATUS_CARTRIDGE_PRESENT) &
-		(QIC_STATUS_ERROR |
-		 QIC_STATUS_CARTRIDGE_PRESENT |
-		 QIC_STATUS_NEW_CARTRIDGE));
-}
-
-int ftape_seek_to_eot(void)
-{
-	int status;
-	TRACE_FUN(ft_t_any);
-
-	TRACE_CATCH(ftape_ready_wait(ftape_timeout.pause, &status),);
-	while ((status & QIC_STATUS_AT_EOT) == 0) {
-		if (ftape_not_operational(status)) {
-			TRACE_EXIT -EIO;
-		}
-		TRACE_CATCH(ftape_command_wait(QIC_PHYSICAL_FORWARD,
-					       ftape_timeout.rewind,&status),);
-	}
-	TRACE_EXIT 0;
-}
-
-int ftape_seek_to_bot(void)
-{
-	int status;
-	TRACE_FUN(ft_t_any);
-
-	TRACE_CATCH(ftape_ready_wait(ftape_timeout.pause, &status),);
-	while ((status & QIC_STATUS_AT_BOT) == 0) {
-		if (ftape_not_operational(status)) {
-			TRACE_EXIT -EIO;
-		}
-		TRACE_CATCH(ftape_command_wait(QIC_PHYSICAL_REVERSE,
-					       ftape_timeout.rewind,&status),);
-	}
-	TRACE_EXIT 0;
-}
-
-static int ftape_new_cartridge(void)
-{
-	ft_location.track = -1; /* force seek on first access */
-	ftape_zap_read_buffers();
-	ftape_zap_write_buffers();
-	return 0;
-}
-
-int ftape_abort_operation(void)
-{
-	int result = 0;
-	int status;
-	TRACE_FUN(ft_t_flow);
-
-	if (ft_runner_status == running) {
-		TRACE(ft_t_noise, "aborting runner, waiting");
-		
-		ft_runner_status = do_abort;
-		/* set timeout so that the tape will run to logical EOT
-		 * if we missed the last sector and there are no queue pulses.
-		 */
-		result = ftape_dumb_stop();
-	}
-	if (ft_runner_status != idle) {
-		if (ft_runner_status == do_abort) {
-			TRACE(ft_t_noise, "forcing runner abort");
-		}
-		TRACE(ft_t_noise, "stopping tape");
-		result = ftape_stop_tape(&status);
-		ft_location.known = 0;
-		ft_runner_status  = idle;
-	}
-	ftape_reset_buffer();
-	ftape_zap_read_buffers();
-	ftape_set_state(idle);
-	TRACE_EXIT result;
-}
-
-static int lookup_vendor_id(unsigned int vendor_id)
-{
-	int i = 0;
-
-	while (vendors[i].vendor_id != vendor_id) {
-		if (++i >= NR_ITEMS(vendors)) {
-			return -1;
-		}
-	}
-	return i;
-}
-
-static void ftape_detach_drive(void)
-{
-	TRACE_FUN(ft_t_any);
-
-	TRACE(ft_t_flow, "disabling tape drive and fdc");
-	ftape_put_drive_to_sleep(ft_drive_type.wake_up);
-	fdc_catch_stray_interrupts(1);	/* one always comes */
-	fdc_disable();
-	fdc_release_irq_and_dma();
-	fdc_release_regions();
-	TRACE_EXIT;
-}
-
-static void clear_history(void)
-{
-	ft_history.used = 0;
-	ft_history.id_am_errors =
-		ft_history.id_crc_errors =
-		ft_history.data_am_errors =
-		ft_history.data_crc_errors =
-		ft_history.overrun_errors =
-		ft_history.no_data_errors =
-		ft_history.retries =
-		ft_history.crc_errors =
-		ft_history.crc_failures =
-		ft_history.ecc_failures =
-		ft_history.corrected =
-		ft_history.defects =
-		ft_history.rewinds = 0;
-}
-
-static int ftape_activate_drive(vendor_struct * drive_type)
-{
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-
-	/* If we already know the drive type, wake it up.
-	 * Else try to find out what kind of drive is attached.
-	 */
-	if (drive_type->wake_up != unknown_wake_up) {
-		TRACE(ft_t_flow, "enabling tape drive and fdc");
-		result = ftape_wakeup_drive(drive_type->wake_up);
-		if (result < 0) {
-			TRACE(ft_t_err, "known wakeup method failed");
-		}
-	} else {
-		wake_up_types method;
-		const ft_trace_t old_tracing = TRACE_LEVEL;
-		if (TRACE_LEVEL < ft_t_flow) {
-			SET_TRACE_LEVEL(ft_t_bug);
-		}
-
-		/*  Try to awaken the drive using all known methods.
-		 *  Lower tracing for a while.
-		 */
-		for (method=no_wake_up; method < NR_ITEMS(methods); ++method) {
-			drive_type->wake_up = method;
-#ifdef CONFIG_FT_TWO_DRIVES
-			/*  Test setup for dual drive configuration.
-			 *  /dev/rft2 uses mountain wakeup
-			 *  /dev/rft3 uses colorado wakeup
-			 *  Other systems will use the normal scheme.
-			 */
-			if ((ft_drive_sel < 2)                            ||
-			    (ft_drive_sel == 2 && method == FT_WAKE_UP_1) ||
-			    (ft_drive_sel == 3 && method == FT_WAKE_UP_2)) {
-				result=ftape_wakeup_drive(drive_type->wake_up);
-			} else {
-				result = -EIO;
-			}
-#else
-			result = ftape_wakeup_drive(drive_type->wake_up);
-#endif
-			if (result >= 0) {
-				TRACE(ft_t_warn, "drive wakeup method: %s",
-				      methods[drive_type->wake_up].name);
-				break;
-			}
-		}
-		SET_TRACE_LEVEL(old_tracing);
-
-		if (method >= NR_ITEMS(methods)) {
-			/* no response at all, cannot open this drive */
-			drive_type->wake_up = unknown_wake_up;
-			TRACE(ft_t_err, "no tape drive found !");
-			result = -ENODEV;
-		}
-	}
-	TRACE_EXIT result;
-}
-
-static int ftape_get_drive_status(void)
-{
-	int result;
-	int status;
-	TRACE_FUN(ft_t_flow);
-
-	ft_no_tape = ft_write_protected = 0;
-	/*    Tape drive is activated now.
-	 *    First clear error status if present.
-	 */
-	do {
-		result = ftape_ready_wait(ftape_timeout.reset, &status);
-		if (result < 0) {
-			if (result == -ETIME) {
-				TRACE(ft_t_err, "ftape_ready_wait timeout");
-			} else if (result == -EINTR) {
-				TRACE(ft_t_err, "ftape_ready_wait aborted");
-			} else {
-				TRACE(ft_t_err, "ftape_ready_wait failed");
-			}
-			TRACE_EXIT -EIO;
-		}
-		/*  Clear error condition (drive is ready !)
-		 */
-		if (status & QIC_STATUS_ERROR) {
-			unsigned int error;
-			qic117_cmd_t command;
-
-			TRACE(ft_t_err, "error status set");
-			result = ftape_report_error(&error, &command, 1);
-			if (result < 0) {
-				TRACE(ft_t_err,
-				      "report_error_code failed: %d", result);
-				/* hope it's working next time */
-				ftape_reset_drive();
-				TRACE_EXIT -EIO;
-			} else if (error != 0) {
-				TRACE(ft_t_noise, "error code   : %d", error);
-				TRACE(ft_t_noise, "error command: %d", command);
-			}
-		}
-		if (status & QIC_STATUS_NEW_CARTRIDGE) {
-			unsigned int error;
-			qic117_cmd_t command;
-			const ft_trace_t old_tracing = TRACE_LEVEL;
-			SET_TRACE_LEVEL(ft_t_bug);
-
-			/*  Undocumented feature: Must clear (not present!)
-			 *  error here or we'll fail later.
-			 */
-			ftape_report_error(&error, &command, 1);
-
-			SET_TRACE_LEVEL(old_tracing);
-			TRACE(ft_t_info, "status: new cartridge");
-			ft_new_tape = 1;
-		} else {
-			ft_new_tape = 0;
-		}
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-	} while (status & QIC_STATUS_ERROR);
-	
-	ft_no_tape = !(status & QIC_STATUS_CARTRIDGE_PRESENT);
-	ft_write_protected = (status & QIC_STATUS_WRITE_PROTECT) != 0;
-	if (ft_no_tape) {
-		TRACE(ft_t_warn, "no cartridge present");
-	} else {
-		if (ft_write_protected) {
-			TRACE(ft_t_noise, "Write protected cartridge");
-		}
-	}
-	TRACE_EXIT 0;
-}
-
-static void ftape_log_vendor_id(void)
-{
-	int vendor_index;
-	TRACE_FUN(ft_t_flow);
-
-	ftape_report_vendor_id(&ft_drive_type.vendor_id);
-	vendor_index = lookup_vendor_id(ft_drive_type.vendor_id);
-	if (ft_drive_type.vendor_id == UNKNOWN_VENDOR &&
-	    ft_drive_type.wake_up == wake_up_colorado) {
-		vendor_index = 0;
-		/* hack to get rid of all this mail */
-		ft_drive_type.vendor_id = 0;
-	}
-	if (vendor_index < 0) {
-		/* Unknown vendor id, first time opening device.  The
-		 * drive_type remains set to type found at wakeup
-		 * time, this will probably keep the driver operating
-		 * for this new vendor.  
-		 */
-		TRACE(ft_t_warn, "\n"
-		      KERN_INFO "============ unknown vendor id ===========\n"
-		      KERN_INFO "A new, yet unsupported tape drive is found\n"
-		      KERN_INFO "Please report the following values:\n"
-		      KERN_INFO "   Vendor id     : 0x%04x\n"
-		      KERN_INFO "   Wakeup method : %s\n"
-		      KERN_INFO "And a description of your tape drive\n"
-		      KERN_INFO "to "THE_FTAPE_MAINTAINER"\n"
-		      KERN_INFO "==========================================",
-		      ft_drive_type.vendor_id,
-		      methods[ft_drive_type.wake_up].name);
-		ft_drive_type.speed = 0;		/* unknown */
-	} else {
-		ft_drive_type.name  = vendors[vendor_index].name;
-		ft_drive_type.speed = vendors[vendor_index].speed;
-		TRACE(ft_t_info, "tape drive type: %s", ft_drive_type.name);
-		/* scan all methods for this vendor_id in table */
-		while(ft_drive_type.wake_up != vendors[vendor_index].wake_up) {
-			if (vendor_index < NR_ITEMS(vendors) - 1 &&
-			    vendors[vendor_index + 1].vendor_id 
-			    == 
-			    ft_drive_type.vendor_id) {
-				++vendor_index;
-			} else {
-				break;
-			}
-		}
-		if (ft_drive_type.wake_up != vendors[vendor_index].wake_up) {
-			TRACE(ft_t_warn, "\n"
-		     KERN_INFO "==========================================\n"
-		     KERN_INFO "wakeup type mismatch:\n"
-		     KERN_INFO "found: %s, expected: %s\n"
-		     KERN_INFO "please report this to "THE_FTAPE_MAINTAINER"\n"
-		     KERN_INFO "==========================================",
-			      methods[ft_drive_type.wake_up].name,
-			      methods[vendors[vendor_index].wake_up].name);
-		}
-	}
-	TRACE_EXIT;
-}
-
-void ftape_calc_timeouts(unsigned int qic_std,
-			 unsigned int data_rate,
-			 unsigned int tape_len)
-{
-	int speed;		/* deci-ips ! */
-	int ff_speed;
-	int length;
-	TRACE_FUN(ft_t_any);
-
-	/*                           tape transport speed
-	 *  data rate:        QIC-40   QIC-80   QIC-3010 QIC-3020
-	 *
-	 *    250 Kbps        25 ips     n/a      n/a      n/a
-	 *    500 Kbps        50 ips   34 ips   22.6 ips   n/a
-	 *      1 Mbps          n/a    68 ips   45.2 ips 22.6 ips
-	 *      2 Mbps          n/a      n/a      n/a    45.2 ips
-	 *
-	 *  fast tape transport speed is at least 68 ips.
-	 */
-	switch (qic_std) {
-	case QIC_TAPE_QIC40:
-		speed = (data_rate == 250) ? 250 : 500;
-		break;
-	case QIC_TAPE_QIC80:
-		speed = (data_rate == 500) ? 340 : 680;
-		break;
-	case QIC_TAPE_QIC3010:
-		speed = (data_rate == 500) ? 226 : 452;
-		break;
-	case QIC_TAPE_QIC3020:
-		speed = (data_rate == 1000) ? 226 : 452;
-		break;
-	default:
-		TRACE(ft_t_bug, "Unknown qic_std (bug) ?");
-		speed = 500;
-		break;
-	}
-	if (ft_drive_type.speed == 0) {
-		unsigned long t0;
-		static int dt = 0;     /* keep gcc from complaining */
-		static int first_time = 1;
-
-		/*  Measure the time it takes to wind to EOT and back to BOT.
-		 *  If the tape length is known, calculate the rewind speed.
-		 *  Else keep the time value for calculation of the rewind
-		 *  speed later on, when the length _is_ known.
-		 *  Ask for a report only when length and speed are both known.
-		 */
-		if (first_time) {
-			ftape_seek_to_bot();
-			t0 = jiffies;
-			ftape_seek_to_eot();
-			ftape_seek_to_bot();
-			dt = (int) (((jiffies - t0) * FT_USPT) / 1000);
-			if (dt < 1) {
-				dt = 1;	/* prevent div by zero on failures */
-			}
-			first_time = 0;
-			TRACE(ft_t_info,
-			      "trying to determine seek timeout, got %d msec",
-			      dt);
-		}
-		if (tape_len != 0) {
-			ft_drive_type.speed = 
-				(2 * 12 * tape_len * 1000) / dt;
-			TRACE(ft_t_warn, "\n"
-		     KERN_INFO "==========================================\n"
-		     KERN_INFO "drive type: %s\n"
-		     KERN_INFO "delta time = %d ms, length = %d ft\n"
-		     KERN_INFO "has a maximum tape speed of %d ips\n"
-		     KERN_INFO "please report this to "THE_FTAPE_MAINTAINER"\n"
-		     KERN_INFO "==========================================",
-			      ft_drive_type.name, dt, tape_len, 
-			      ft_drive_type.speed);
-		}
-	}
-	/*  Handle unknown length tapes as very long ones. We'll
-	 *  determine the actual length from a header segment later.
-	 *  This is normal for all modern (Wide,TR1/2/3) formats.
-	 */
-	if (tape_len <= 0) {
-		TRACE(ft_t_noise,
-		      "Unknown tape length, using maximal timeouts");
-		length = QIC_TOP_TAPE_LEN;	/* use worst case values */
-	} else {
-		length = tape_len;		/* use actual values */
-	}
-	if (ft_drive_type.speed == 0) {
-		ff_speed = speed; 
-	} else {
-		ff_speed = ft_drive_type.speed;
-	}
-	/*  time to go from bot to eot at normal speed (data rate):
-	 *  time = (1+delta) * length (ft) * 12 (inch/ft) / speed (ips)
-	 *  delta = 10 % for seek speed, 20 % for rewind speed.
-	 */
-	ftape_timeout.seek = (length * 132 * FT_SECOND) / speed;
-	ftape_timeout.rewind = (length * 144 * FT_SECOND) / (10 * ff_speed);
-	ftape_timeout.reset = 20 * FT_SECOND + ftape_timeout.rewind;
-	TRACE(ft_t_noise, "timeouts for speed = %d, length = %d\n"
-	      KERN_INFO "seek timeout  : %d sec\n"
-	      KERN_INFO "rewind timeout: %d sec\n"
-	      KERN_INFO "reset timeout : %d sec",
-	      speed, length,
-	      (ftape_timeout.seek + 500) / 1000,
-	      (ftape_timeout.rewind + 500) / 1000,
-	      (ftape_timeout.reset + 500) / 1000);
-	TRACE_EXIT;
-}
-
-/* This function calibrates the datarate (i.e. determines the maximal
- * usable data rate) and sets the global variable ft_qic_std to qic_std
- *
- */
-int ftape_calibrate_data_rate(unsigned int qic_std)
-{
-	int rate = ft_fdc_rate_limit;
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	ft_qic_std = qic_std;
-
-	if (ft_qic_std == -1) {
-		TRACE_ABORT(-EIO, ft_t_err,
-		"Unable to determine data rate if QIC standard is unknown");
-	}
-
-	/*  Select highest rate supported by both fdc and drive.
-	 *  Start with highest rate supported by the fdc.
-	 */
-	while (fdc_set_data_rate(rate) < 0 && rate > 250) {
-		rate /= 2;
-	}
-	TRACE(ft_t_info,
-	      "Highest FDC supported data rate: %d Kbps", rate);
-	ft_fdc_max_rate = rate;
-	do {
-		result = ftape_set_data_rate(rate, ft_qic_std);
-	} while (result == -EINVAL && (rate /= 2) > 250);
-	if (result < 0) {
-		TRACE_ABORT(-EIO, ft_t_err, "set datarate failed");
-	}
-	ft_data_rate = rate;
-	TRACE_EXIT 0;
-}
-
-static int ftape_init_drive(void)
-{
-	int status;
-	qic_model model;
-	unsigned int qic_std;
-	unsigned int data_rate;
-	TRACE_FUN(ft_t_flow);
-
-	ftape_init_drive_needed = 0; /* don't retry if this fails ? */
-	TRACE_CATCH(ftape_report_raw_drive_status(&status),);
-	if (status & QIC_STATUS_CARTRIDGE_PRESENT) {
-		if (!(status & QIC_STATUS_AT_BOT)) {
-			/*  Antique drives will get here after a soft reset,
-			 *  modern ones only if the driver is loaded when the
-			 *  tape wasn't rewound properly.
-			 */
-			/* Tape should be at bot if new cartridge ! */
-			ftape_seek_to_bot();
-		}
-		if (!(status & QIC_STATUS_REFERENCED)) {
-			TRACE(ft_t_flow, "starting seek_load_point");
-			TRACE_CATCH(ftape_command_wait(QIC_SEEK_LOAD_POINT,
-						       ftape_timeout.reset,
-						       &status),);
-		}
-	}
-	ft_formatted = (status & QIC_STATUS_REFERENCED) != 0;
-	if (!ft_formatted) {
-		TRACE(ft_t_warn, "Warning: tape is not formatted !");
-	}
-
-	/*  report configuration aborts when ftape_tape_len == -1
-	 *  unknown qic_std is okay if not formatted.
-	 */
-	TRACE_CATCH(ftape_report_configuration(&model,
-					       &data_rate,
-					       &qic_std,
-					       &ftape_tape_len),);
-
-	/*  Maybe add the following to the /proc entry
-	 */
-	TRACE(ft_t_info, "%s drive @ %d Kbps",
-	      (model == prehistoric) ? "prehistoric" :
-	      ((model == pre_qic117c) ? "pre QIC-117C" :
-	       ((model == post_qic117b) ? "post QIC-117B" :
-		"post QIC-117D")), data_rate);
-
-	if (ft_formatted) {
-		/*  initialize ft_used_data_rate to maximum value 
-		 *  and set ft_qic_std
-		 */
-		TRACE_CATCH(ftape_calibrate_data_rate(qic_std),);
-		if (ftape_tape_len == 0) {
-			TRACE(ft_t_info, "unknown length QIC-%s tape",
-			      (ft_qic_std == QIC_TAPE_QIC40) ? "40" :
-			      ((ft_qic_std == QIC_TAPE_QIC80) ? "80" :
-			       ((ft_qic_std == QIC_TAPE_QIC3010) 
-				? "3010" : "3020")));
-		} else {
-			TRACE(ft_t_info, "%d ft. QIC-%s tape", ftape_tape_len,
-			      (ft_qic_std == QIC_TAPE_QIC40) ? "40" :
-			      ((ft_qic_std == QIC_TAPE_QIC80) ? "80" :
-			       ((ft_qic_std == QIC_TAPE_QIC3010)
-				? "3010" : "3020")));
-		}
-		ftape_calc_timeouts(ft_qic_std, ft_data_rate, ftape_tape_len);
-		/* soft write-protect QIC-40/QIC-80 cartridges used with a
-		 * Colorado T3000 drive. Buggy hardware!
-		 */
-		if ((ft_drive_type.vendor_id == 0x011c6) &&
-		    ((ft_qic_std == QIC_TAPE_QIC40 ||
-		      ft_qic_std == QIC_TAPE_QIC80) &&
-		     !ft_write_protected)) {
-			TRACE(ft_t_warn, "\n"
-	KERN_INFO "The famous Colorado T3000 bug:\n"
-	KERN_INFO "%s drives can't write QIC40 and QIC80\n"
-	KERN_INFO "cartridges but don't set the write-protect flag!",
-			      ft_drive_type.name);
-			ft_write_protected = 1;
-		}
-	} else {
-		/*  Doesn't make too much sense to set the data rate
-		 *  because we don't know what to use for the write
-		 *  precompensation.
-		 *  Need to do this again when formatting the cartridge.
-		 */
-		ft_data_rate = data_rate;
-		ftape_calc_timeouts(QIC_TAPE_QIC40,
-				    data_rate,
-				    ftape_tape_len);
-	}
-	ftape_new_cartridge();
-	TRACE_EXIT 0;
-}
-
-static void ftape_munmap(void)
-{
-	int i;
-	TRACE_FUN(ft_t_flow);
-	
-	for (i = 0; i < ft_nr_buffers; i++) {
-		ft_buffer[i]->mmapped = 0;
-	}
-	TRACE_EXIT;
-}
-
-/*   Map the dma buffers into the virtual address range given by vma.
- *   We only check the caller doesn't map non-existent buffers. We
- *   don't check for multiple mappings.
- */
-int ftape_mmap(struct vm_area_struct *vma)
-{
-	int num_buffers;
-	int i;
-	TRACE_FUN(ft_t_flow);
-	
-	if (ft_failure) {
-		TRACE_EXIT -ENODEV;
-	}
-	if (!(vma->vm_flags & (VM_READ|VM_WRITE))) {
-		TRACE_ABORT(-EINVAL, ft_t_err, "Undefined mmap() access");
-	}
-	if (vma_get_pgoff(vma) != 0) {
-		TRACE_ABORT(-EINVAL, ft_t_err, "page offset must be 0");
-	}
-	if ((vma->vm_end - vma->vm_start) % FT_BUFF_SIZE != 0) {
-		TRACE_ABORT(-EINVAL, ft_t_err,
-			    "size = %ld, should be a multiple of %d",
-			    vma->vm_end - vma->vm_start,
-			    FT_BUFF_SIZE);
-	}
-	num_buffers = (vma->vm_end - vma->vm_start) / FT_BUFF_SIZE;
-	if (num_buffers > ft_nr_buffers) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_err, "size = %ld, should be less than %d",
-			    vma->vm_end - vma->vm_start,
-			    ft_nr_buffers * FT_BUFF_SIZE);
-	}
-	if (ft_driver_state != idle) {
-		/* this also clears the buffer states 
-		 */
-		ftape_abort_operation();
-	} else {
-		ftape_reset_buffer();
-	}
-	for (i = 0; i < num_buffers; i++) {
-		unsigned long pfn;
-
-		pfn = virt_to_phys(ft_buffer[i]->address) >> PAGE_SHIFT;
-		TRACE_CATCH(remap_pfn_range(vma, vma->vm_start +
-					     i * FT_BUFF_SIZE,
-					     pfn,
-					     FT_BUFF_SIZE,
-					     vma->vm_page_prot),
-			    _res = -EAGAIN);
-		TRACE(ft_t_noise, "remapped dma buffer @ %p to location @ %p",
-		      ft_buffer[i]->address,
-		      (void *)(vma->vm_start + i * FT_BUFF_SIZE));
-	}
-	for (i = 0; i < num_buffers; i++) {
-		memset(ft_buffer[i]->address, 0xAA, FT_BUFF_SIZE);
-		ft_buffer[i]->mmapped++;
-	}	
-	TRACE_EXIT 0;
-}
-
-static void ftape_init_driver(void); /* forward declaration */
-
-/*      OPEN routine called by kernel-interface code
- */
-int ftape_enable(int drive_selection)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (ft_drive_sel == -1 || ft_drive_sel != drive_selection) {
-		/* Other selection than last time
-		 */
-		ftape_init_driver();
-	}
-	ft_drive_sel = FTAPE_SEL(drive_selection);
-	ft_failure = 0;
-	TRACE_CATCH(fdc_init(),); /* init & detect fdc */
-	TRACE_CATCH(ftape_activate_drive(&ft_drive_type),
-		    fdc_disable();
-		    fdc_release_irq_and_dma();
-		    fdc_release_regions());
-	TRACE_CATCH(ftape_get_drive_status(), ftape_detach_drive());
-	if (ft_drive_type.vendor_id == UNKNOWN_VENDOR) {
-		ftape_log_vendor_id();
-	}
-	if (ft_new_tape) {
-		ftape_init_drive_needed = 1;
-	}
-	if (!ft_no_tape && ftape_init_drive_needed) {
-		TRACE_CATCH(ftape_init_drive(), ftape_detach_drive());
-	}
-	ftape_munmap(); /* clear the mmap flag */
-	clear_history();
-	TRACE_EXIT 0;
-}
-
-/*   release routine called by the high level interface modules
- *   zftape or sftape.
- */
-void ftape_disable(void)
-{
-	int i;
-	TRACE_FUN(ft_t_any);
-
-	for (i = 0; i < ft_nr_buffers; i++) {
-		if (ft_buffer[i]->mmapped) {
-			TRACE(ft_t_noise, "first byte of buffer %d: 0x%02x",
-			      i, *ft_buffer[i]->address);
-		}
-	}
-	if (sigtestsetmask(&current->pending.signal, _DONT_BLOCK) && 
-	    !(sigtestsetmask(&current->pending.signal, _NEVER_BLOCK)) &&
-	    ftape_tape_running) {
-		TRACE(ft_t_warn,
-		      "Interrupted by fatal signal and tape still running");
-		ftape_dumb_stop();
-		ftape_abort_operation(); /* it's annoying */
-	} else {
-		ftape_set_state(idle);
-	}
-	ftape_detach_drive();
-	if (ft_history.used) {
-		TRACE(ft_t_info, "== Non-fatal errors this run: ==");
-		TRACE(ft_t_info, "fdc isr statistics:\n"
-		      KERN_INFO " id_am_errors     : %3d\n"
-		      KERN_INFO " id_crc_errors    : %3d\n"
-		      KERN_INFO " data_am_errors   : %3d\n"
-		      KERN_INFO " data_crc_errors  : %3d\n"
-		      KERN_INFO " overrun_errors   : %3d\n"
-		      KERN_INFO " no_data_errors   : %3d\n"
-		      KERN_INFO " retries          : %3d",
-		      ft_history.id_am_errors,   ft_history.id_crc_errors,
-		      ft_history.data_am_errors, ft_history.data_crc_errors,
-		      ft_history.overrun_errors, ft_history.no_data_errors,
-		      ft_history.retries);
-		if (ft_history.used & 1) {
-			TRACE(ft_t_info, "ecc statistics:\n"
-			      KERN_INFO " crc_errors       : %3d\n"
-			      KERN_INFO " crc_failures     : %3d\n"
-			      KERN_INFO " ecc_failures     : %3d\n"
-			      KERN_INFO " sectors corrected: %3d",
-			      ft_history.crc_errors,   ft_history.crc_failures,
-			      ft_history.ecc_failures, ft_history.corrected);
-		}
-		if (ft_history.defects > 0) {
-			TRACE(ft_t_warn, "Warning: %d media defects!",
-			      ft_history.defects);
-		}
-		if (ft_history.rewinds > 0) {
-			TRACE(ft_t_info, "tape motion statistics:\n"
-			      KERN_INFO "repositions       : %3d",
-			      ft_history.rewinds);
-		}
-	}
-	ft_failure = 1;
-	TRACE_EXIT;
-}
-
-static void ftape_init_driver(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	ft_drive_type.vendor_id = UNKNOWN_VENDOR;
-	ft_drive_type.speed     = 0;
-	ft_drive_type.wake_up   = unknown_wake_up;
-	ft_drive_type.name      = "Unknown";
-
-	ftape_timeout.seek      = 650 * FT_SECOND;
-	ftape_timeout.reset     = 670 * FT_SECOND;
-	ftape_timeout.rewind    = 650 * FT_SECOND;
-	ftape_timeout.head_seek =  15 * FT_SECOND;
-	ftape_timeout.stop      =   5 * FT_SECOND;
-	ftape_timeout.pause     =  16 * FT_SECOND;
-
-	ft_qic_std             = -1;
-	ftape_tape_len         = 0;  /* unknown */
-	ftape_current_command  = 0;
-	ftape_current_cylinder = -1;
-
-	ft_segments_per_track       = 102;
-	ftape_segments_per_head     = 1020;
-	ftape_segments_per_cylinder = 4;
-	ft_tracks_per_tape          = 20;
-
-	ft_failure = 1;
-
-	ft_formatted       = 0;
-	ft_no_tape         = 1;
-	ft_write_protected = 1;
-	ft_new_tape        = 1;
-
-	ft_driver_state = idle;
-
-	ft_data_rate = 
-		ft_fdc_max_rate   = 500;
-	ft_drive_max_rate = 0; /* triggers set_rate_test() */
-
-	ftape_init_drive_needed = 1;
-
-	ft_header_segment_1    = -1;
-	ft_header_segment_2    = -1;
-	ft_used_header_segment = -1;
-	ft_first_data_segment  = -1;
-	ft_last_data_segment   = -1;
-
-	ft_location.track = -1;
-	ft_location.known = 0;
-
-	ftape_tape_running = 0;
-	ftape_might_be_off_track = 1;
-
-	ftape_new_cartridge();	/* init some tape related variables */
-	ftape_init_bsm();
-	TRACE_EXIT;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-ctl.h b/drivers/char/ftape/lowlevel/ftape-ctl.h
deleted file mode 100644
index 5f5e30b..0000000
--- a/drivers/char/ftape/lowlevel/ftape-ctl.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef _FTAPE_CTL_H
-#define _FTAPE_CTL_H
-
-/*
- * Copyright (C) 1993-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-ctl.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:09 $
- *
- *      This file contains the non-standard IOCTL related definitions
- *      for the QIC-40/80/3010/3020 floppy-tape driver "ftape" for
- *      Linux.
- */
-
-#include <linux/ioctl.h>
-#include <linux/mtio.h>
-#include <linux/ftape-vendors.h>
-
-#include "../lowlevel/ftape-rw.h"
-#include <linux/ftape-header-segment.h>
-
-typedef struct {
-	int used;		/* any reading or writing done */
-	/* isr statistics */
-	unsigned int id_am_errors;	/* id address mark not found */
-	unsigned int id_crc_errors;	/* crc error in id address mark */
-	unsigned int data_am_errors;	/* data address mark not found */
-	unsigned int data_crc_errors;	/* crc error in data field */
-	unsigned int overrun_errors;	/* fdc access timing problem */
-	unsigned int no_data_errors;	/* sector not found */
-	unsigned int retries;	/* number of tape retries */
-	/* ecc statistics */
-	unsigned int crc_errors;	/* crc error in data */
-	unsigned int crc_failures;	/* bad data without crc error */
-	unsigned int ecc_failures;	/* failed to correct */
-	unsigned int corrected;	/* total sectors corrected */
-	/* general statistics */
-	unsigned int rewinds;	/* number of tape rewinds */
-	unsigned int defects;	/* bad sectors due to media defects */
-} history_record;
-
-/* this structure contains * ALL * information that we want
- * our child modules to know about, but don't want them to
- * modify. 
- */
-typedef struct {
-	/*  vendor information */
-	vendor_struct fti_drive_type;
-	/*  data rates */
-	unsigned int fti_used_data_rate;
-	unsigned int fti_drive_max_rate;
-	unsigned int fti_fdc_max_rate;
-	/*  drive selection, either FTAPE_SEL_A/B/C/D */
-	int fti_drive_sel;      
-	/*  flags set after decode the drive and tape status   */
-	unsigned int fti_formatted      :1;
-	unsigned int fti_no_tape        :1;
-	unsigned int fti_write_protected:1;
-	unsigned int fti_new_tape       :1;
-	/*  values of last queried drive/tape status and error */
-	ft_drive_error  fti_last_error;
-	ft_drive_status fti_last_status;
-	/*  cartridge geometry */
-	unsigned int fti_tracks_per_tape;
-	unsigned int fti_segments_per_track;
-	/*  location of header segments, etc. */
-	int fti_used_header_segment;
-	int fti_header_segment_1;
-	int fti_header_segment_2;
-	int fti_first_data_segment;
-	int fti_last_data_segment;
-	/*  the format code as stored in the header segment  */
-	ft_format_type  fti_format_code;
-	/*  the following is the sole reason for the ftape_set_status() call */
-	unsigned int fti_qic_std;
-	/*  is tape running? */
-	volatile enum runner_status_enum fti_runner_status;
-	/*  is tape reading/writing/verifying/formatting/deleting */
-	buffer_state_enum fti_state;
-	/*  flags fatal hardware error */
-	unsigned int fti_failure:1;
-	/*  history record */
-	history_record fti_history;
-} ftape_info;
-
-/* vendor information */
-#define ft_drive_type          ftape_status.fti_drive_type
-/*  data rates */
-#define ft_data_rate           ftape_status.fti_used_data_rate
-#define ft_drive_max_rate      ftape_status.fti_drive_max_rate
-#define ft_fdc_max_rate        ftape_status.fti_fdc_max_rate
-/*  drive selection, either FTAPE_SEL_A/B/C/D */
-#define ft_drive_sel           ftape_status.fti_drive_sel
-/*  flags set after decode the drive and tape status   */
-#define ft_formatted           ftape_status.fti_formatted
-#define ft_no_tape             ftape_status.fti_no_tape
-#define ft_write_protected     ftape_status.fti_write_protected
-#define ft_new_tape            ftape_status.fti_new_tape
-/*  values of last queried drive/tape status and error */
-#define ft_last_error          ftape_status.fti_last_error
-#define ft_last_status         ftape_status.fti_last_status
-/*  cartridge geometry */
-#define ft_tracks_per_tape     ftape_status.fti_tracks_per_tape
-#define ft_segments_per_track  ftape_status.fti_segments_per_track
-/*  the format code as stored in the header segment  */
-#define ft_format_code         ftape_status.fti_format_code
-/*  the qic status as returned by report drive configuration */
-#define ft_qic_std             ftape_status.fti_qic_std
-#define ft_used_header_segment ftape_status.fti_used_header_segment
-#define ft_header_segment_1    ftape_status.fti_header_segment_1
-#define ft_header_segment_2    ftape_status.fti_header_segment_2
-#define ft_first_data_segment  ftape_status.fti_first_data_segment
-#define ft_last_data_segment   ftape_status.fti_last_data_segment
-/*  is tape running? */
-#define ft_runner_status       ftape_status.fti_runner_status
-/*  is tape reading/writing/verifying/formatting/deleting */
-#define ft_driver_state        ftape_status.fti_state
-/*  flags fatal hardware error */
-#define ft_failure             ftape_status.fti_failure
-/*  history record */
-#define ft_history             ftape_status.fti_history
-
-/*
- *      ftape-ctl.c defined global vars.
- */
-extern ftape_info ftape_status;
-extern int ftape_segments_per_head;
-extern int ftape_segments_per_cylinder;
-extern int ftape_init_drive_needed;
-
-/*
- *      ftape-ctl.c defined global functions.
- */
-extern int  ftape_mmap(struct vm_area_struct *vma);
-extern int  ftape_enable(int drive_selection);
-extern void ftape_disable(void);
-extern int  ftape_seek_to_bot(void);
-extern int  ftape_seek_to_eot(void);
-extern int  ftape_abort_operation(void);
-extern void ftape_calc_timeouts(unsigned int qic_std,
-				 unsigned int data_rate,
-				 unsigned int tape_len);
-extern int  ftape_calibrate_data_rate(unsigned int qic_std);
-extern const ftape_info *ftape_get_status(void);
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-ecc.c b/drivers/char/ftape/lowlevel/ftape-ecc.c
deleted file mode 100644
index e5632f6..0000000
--- a/drivers/char/ftape/lowlevel/ftape-ecc.c
+++ /dev/null
@@ -1,853 +0,0 @@
-/*
- *
- *      Copyright (c) 1993 Ning and David Mosberger.
- 
- This is based on code originally written by Bas Laarhoven (bas@vimec.nl)
- and David L. Brown, Jr., and incorporates improvements suggested by
- Kai Harrekilde-Petersen.
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
- 
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- General Public License for more details.
- 
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-ecc.c,v $
- * $Revision: 1.3 $
- * $Date: 1997/10/05 19:18:10 $
- *
- *      This file contains the Reed-Solomon error correction code 
- *      for the QIC-40/80 floppy-tape driver for Linux.
- */
-
-#include <linux/ftape.h>
-
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-ecc.h"
-
-/* Machines that are big-endian should define macro BIG_ENDIAN.
- * Unfortunately, there doesn't appear to be a standard include file
- * that works for all OSs.
- */
-
-#if defined(__sparc__) || defined(__hppa)
-#define BIG_ENDIAN
-#endif				/* __sparc__ || __hppa */
-
-#if defined(__mips__)
-#error Find a smart way to determine the Endianness of the MIPS CPU
-#endif
-
-/* Notice: to minimize the potential for confusion, we use r to
- *         denote the independent variable of the polynomials in the
- *         Galois Field GF(2^8).  We reserve x for polynomials that
- *         that have coefficients in GF(2^8).
- *         
- * The Galois Field in which coefficient arithmetic is performed are
- * the polynomials over Z_2 (i.e., 0 and 1) modulo the irreducible
- * polynomial f(r), where f(r)=r^8 + r^7 + r^2 + r + 1.  A polynomial
- * is represented as a byte with the MSB as the coefficient of r^7 and
- * the LSB as the coefficient of r^0.  For example, the binary
- * representation of f(x) is 0x187 (of course, this doesn't fit into 8
- * bits).  In this field, the polynomial r is a primitive element.
- * That is, r^i with i in 0,...,255 enumerates all elements in the
- * field.
- *
- * The generator polynomial for the QIC-80 ECC is
- *
- *      g(x) = x^3 + r^105*x^2 + r^105*x + 1
- *
- * which can be factored into:
- *
- *      g(x) = (x-r^-1)(x-r^0)(x-r^1)
- *
- * the byte representation of the coefficients are:
- *
- *      r^105 = 0xc0
- *      r^-1  = 0xc3
- *      r^0   = 0x01
- *      r^1   = 0x02
- *
- * Notice that r^-1 = r^254 as exponent arithmetic is performed
- * modulo 2^8-1 = 255.
- *
- * For more information on Galois Fields and Reed-Solomon codes, refer
- * to any good book.  I found _An Introduction to Error Correcting
- * Codes with Applications_ by S. A. Vanstone and P. C. van Oorschot
- * to be a good introduction into the former.  _CODING THEORY: The
- * Essentials_ I found very useful for its concise description of
- * Reed-Solomon encoding/decoding.
- *
- */
-
-typedef __u8 Matrix[3][3];
-
-/*
- * gfpow[] is defined such that gfpow[i] returns r^i if
- * i is in the range [0..255].
- */
-static const __u8 gfpow[] =
-{
-	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
-	0x87, 0x89, 0x95, 0xad, 0xdd, 0x3d, 0x7a, 0xf4,
-	0x6f, 0xde, 0x3b, 0x76, 0xec, 0x5f, 0xbe, 0xfb,
-	0x71, 0xe2, 0x43, 0x86, 0x8b, 0x91, 0xa5, 0xcd,
-	0x1d, 0x3a, 0x74, 0xe8, 0x57, 0xae, 0xdb, 0x31,
-	0x62, 0xc4, 0x0f, 0x1e, 0x3c, 0x78, 0xf0, 0x67,
-	0xce, 0x1b, 0x36, 0x6c, 0xd8, 0x37, 0x6e, 0xdc,
-	0x3f, 0x7e, 0xfc, 0x7f, 0xfe, 0x7b, 0xf6, 0x6b,
-	0xd6, 0x2b, 0x56, 0xac, 0xdf, 0x39, 0x72, 0xe4,
-	0x4f, 0x9e, 0xbb, 0xf1, 0x65, 0xca, 0x13, 0x26,
-	0x4c, 0x98, 0xb7, 0xe9, 0x55, 0xaa, 0xd3, 0x21,
-	0x42, 0x84, 0x8f, 0x99, 0xb5, 0xed, 0x5d, 0xba,
-	0xf3, 0x61, 0xc2, 0x03, 0x06, 0x0c, 0x18, 0x30,
-	0x60, 0xc0, 0x07, 0x0e, 0x1c, 0x38, 0x70, 0xe0,
-	0x47, 0x8e, 0x9b, 0xb1, 0xe5, 0x4d, 0x9a, 0xb3,
-	0xe1, 0x45, 0x8a, 0x93, 0xa1, 0xc5, 0x0d, 0x1a,
-	0x34, 0x68, 0xd0, 0x27, 0x4e, 0x9c, 0xbf, 0xf9,
-	0x75, 0xea, 0x53, 0xa6, 0xcb, 0x11, 0x22, 0x44,
-	0x88, 0x97, 0xa9, 0xd5, 0x2d, 0x5a, 0xb4, 0xef,
-	0x59, 0xb2, 0xe3, 0x41, 0x82, 0x83, 0x81, 0x85,
-	0x8d, 0x9d, 0xbd, 0xfd, 0x7d, 0xfa, 0x73, 0xe6,
-	0x4b, 0x96, 0xab, 0xd1, 0x25, 0x4a, 0x94, 0xaf,
-	0xd9, 0x35, 0x6a, 0xd4, 0x2f, 0x5e, 0xbc, 0xff,
-	0x79, 0xf2, 0x63, 0xc6, 0x0b, 0x16, 0x2c, 0x58,
-	0xb0, 0xe7, 0x49, 0x92, 0xa3, 0xc1, 0x05, 0x0a,
-	0x14, 0x28, 0x50, 0xa0, 0xc7, 0x09, 0x12, 0x24,
-	0x48, 0x90, 0xa7, 0xc9, 0x15, 0x2a, 0x54, 0xa8,
-	0xd7, 0x29, 0x52, 0xa4, 0xcf, 0x19, 0x32, 0x64,
-	0xc8, 0x17, 0x2e, 0x5c, 0xb8, 0xf7, 0x69, 0xd2,
-	0x23, 0x46, 0x8c, 0x9f, 0xb9, 0xf5, 0x6d, 0xda,
-	0x33, 0x66, 0xcc, 0x1f, 0x3e, 0x7c, 0xf8, 0x77,
-	0xee, 0x5b, 0xb6, 0xeb, 0x51, 0xa2, 0xc3, 0x01
-};
-
-/*
- * This is a log table.  That is, gflog[r^i] returns i (modulo f(r)).
- * gflog[0] is undefined and the first element is therefore not valid.
- */
-static const __u8 gflog[256] =
-{
-	0xff, 0x00, 0x01, 0x63, 0x02, 0xc6, 0x64, 0x6a,
-	0x03, 0xcd, 0xc7, 0xbc, 0x65, 0x7e, 0x6b, 0x2a,
-	0x04, 0x8d, 0xce, 0x4e, 0xc8, 0xd4, 0xbd, 0xe1,
-	0x66, 0xdd, 0x7f, 0x31, 0x6c, 0x20, 0x2b, 0xf3,
-	0x05, 0x57, 0x8e, 0xe8, 0xcf, 0xac, 0x4f, 0x83,
-	0xc9, 0xd9, 0xd5, 0x41, 0xbe, 0x94, 0xe2, 0xb4,
-	0x67, 0x27, 0xde, 0xf0, 0x80, 0xb1, 0x32, 0x35,
-	0x6d, 0x45, 0x21, 0x12, 0x2c, 0x0d, 0xf4, 0x38,
-	0x06, 0x9b, 0x58, 0x1a, 0x8f, 0x79, 0xe9, 0x70,
-	0xd0, 0xc2, 0xad, 0xa8, 0x50, 0x75, 0x84, 0x48,
-	0xca, 0xfc, 0xda, 0x8a, 0xd6, 0x54, 0x42, 0x24,
-	0xbf, 0x98, 0x95, 0xf9, 0xe3, 0x5e, 0xb5, 0x15,
-	0x68, 0x61, 0x28, 0xba, 0xdf, 0x4c, 0xf1, 0x2f,
-	0x81, 0xe6, 0xb2, 0x3f, 0x33, 0xee, 0x36, 0x10,
-	0x6e, 0x18, 0x46, 0xa6, 0x22, 0x88, 0x13, 0xf7,
-	0x2d, 0xb8, 0x0e, 0x3d, 0xf5, 0xa4, 0x39, 0x3b,
-	0x07, 0x9e, 0x9c, 0x9d, 0x59, 0x9f, 0x1b, 0x08,
-	0x90, 0x09, 0x7a, 0x1c, 0xea, 0xa0, 0x71, 0x5a,
-	0xd1, 0x1d, 0xc3, 0x7b, 0xae, 0x0a, 0xa9, 0x91,
-	0x51, 0x5b, 0x76, 0x72, 0x85, 0xa1, 0x49, 0xeb,
-	0xcb, 0x7c, 0xfd, 0xc4, 0xdb, 0x1e, 0x8b, 0xd2,
-	0xd7, 0x92, 0x55, 0xaa, 0x43, 0x0b, 0x25, 0xaf,
-	0xc0, 0x73, 0x99, 0x77, 0x96, 0x5c, 0xfa, 0x52,
-	0xe4, 0xec, 0x5f, 0x4a, 0xb6, 0xa2, 0x16, 0x86,
-	0x69, 0xc5, 0x62, 0xfe, 0x29, 0x7d, 0xbb, 0xcc,
-	0xe0, 0xd3, 0x4d, 0x8c, 0xf2, 0x1f, 0x30, 0xdc,
-	0x82, 0xab, 0xe7, 0x56, 0xb3, 0x93, 0x40, 0xd8,
-	0x34, 0xb0, 0xef, 0x26, 0x37, 0x0c, 0x11, 0x44,
-	0x6f, 0x78, 0x19, 0x9a, 0x47, 0x74, 0xa7, 0xc1,
-	0x23, 0x53, 0x89, 0xfb, 0x14, 0x5d, 0xf8, 0x97,
-	0x2e, 0x4b, 0xb9, 0x60, 0x0f, 0xed, 0x3e, 0xe5,
-	0xf6, 0x87, 0xa5, 0x17, 0x3a, 0xa3, 0x3c, 0xb7
-};
-
-/* This is a multiplication table for the factor 0xc0 (i.e., r^105 (mod f(r)).
- * gfmul_c0[f] returns r^105 * f(r) (modulo f(r)).
- */
-static const __u8 gfmul_c0[256] =
-{
-	0x00, 0xc0, 0x07, 0xc7, 0x0e, 0xce, 0x09, 0xc9,
-	0x1c, 0xdc, 0x1b, 0xdb, 0x12, 0xd2, 0x15, 0xd5,
-	0x38, 0xf8, 0x3f, 0xff, 0x36, 0xf6, 0x31, 0xf1,
-	0x24, 0xe4, 0x23, 0xe3, 0x2a, 0xea, 0x2d, 0xed,
-	0x70, 0xb0, 0x77, 0xb7, 0x7e, 0xbe, 0x79, 0xb9,
-	0x6c, 0xac, 0x6b, 0xab, 0x62, 0xa2, 0x65, 0xa5,
-	0x48, 0x88, 0x4f, 0x8f, 0x46, 0x86, 0x41, 0x81,
-	0x54, 0x94, 0x53, 0x93, 0x5a, 0x9a, 0x5d, 0x9d,
-	0xe0, 0x20, 0xe7, 0x27, 0xee, 0x2e, 0xe9, 0x29,
-	0xfc, 0x3c, 0xfb, 0x3b, 0xf2, 0x32, 0xf5, 0x35,
-	0xd8, 0x18, 0xdf, 0x1f, 0xd6, 0x16, 0xd1, 0x11,
-	0xc4, 0x04, 0xc3, 0x03, 0xca, 0x0a, 0xcd, 0x0d,
-	0x90, 0x50, 0x97, 0x57, 0x9e, 0x5e, 0x99, 0x59,
-	0x8c, 0x4c, 0x8b, 0x4b, 0x82, 0x42, 0x85, 0x45,
-	0xa8, 0x68, 0xaf, 0x6f, 0xa6, 0x66, 0xa1, 0x61,
-	0xb4, 0x74, 0xb3, 0x73, 0xba, 0x7a, 0xbd, 0x7d,
-	0x47, 0x87, 0x40, 0x80, 0x49, 0x89, 0x4e, 0x8e,
-	0x5b, 0x9b, 0x5c, 0x9c, 0x55, 0x95, 0x52, 0x92,
-	0x7f, 0xbf, 0x78, 0xb8, 0x71, 0xb1, 0x76, 0xb6,
-	0x63, 0xa3, 0x64, 0xa4, 0x6d, 0xad, 0x6a, 0xaa,
-	0x37, 0xf7, 0x30, 0xf0, 0x39, 0xf9, 0x3e, 0xfe,
-	0x2b, 0xeb, 0x2c, 0xec, 0x25, 0xe5, 0x22, 0xe2,
-	0x0f, 0xcf, 0x08, 0xc8, 0x01, 0xc1, 0x06, 0xc6,
-	0x13, 0xd3, 0x14, 0xd4, 0x1d, 0xdd, 0x1a, 0xda,
-	0xa7, 0x67, 0xa0, 0x60, 0xa9, 0x69, 0xae, 0x6e,
-	0xbb, 0x7b, 0xbc, 0x7c, 0xb5, 0x75, 0xb2, 0x72,
-	0x9f, 0x5f, 0x98, 0x58, 0x91, 0x51, 0x96, 0x56,
-	0x83, 0x43, 0x84, 0x44, 0x8d, 0x4d, 0x8a, 0x4a,
-	0xd7, 0x17, 0xd0, 0x10, 0xd9, 0x19, 0xde, 0x1e,
-	0xcb, 0x0b, 0xcc, 0x0c, 0xc5, 0x05, 0xc2, 0x02,
-	0xef, 0x2f, 0xe8, 0x28, 0xe1, 0x21, 0xe6, 0x26,
-	0xf3, 0x33, 0xf4, 0x34, 0xfd, 0x3d, 0xfa, 0x3a
-};
-
-
-/* Returns V modulo 255 provided V is in the range -255,-254,...,509.
- */
-static inline __u8 mod255(int v)
-{
-	if (v > 0) {
-		if (v < 255) {
-			return v;
-		} else {
-			return v - 255;
-		}
-	} else {
-		return v + 255;
-	}
-}
-
-
-/* Add two numbers in the field.  Addition in this field is equivalent
- * to a bit-wise exclusive OR operation---subtraction is therefore
- * identical to addition.
- */
-static inline __u8 gfadd(__u8 a, __u8 b)
-{
-	return a ^ b;
-}
-
-
-/* Add two vectors of numbers in the field.  Each byte in A and B gets
- * added individually.
- */
-static inline unsigned long gfadd_long(unsigned long a, unsigned long b)
-{
-	return a ^ b;
-}
-
-
-/* Multiply two numbers in the field:
- */
-static inline __u8 gfmul(__u8 a, __u8 b)
-{
-	if (a && b) {
-		return gfpow[mod255(gflog[a] + gflog[b])];
-	} else {
-		return 0;
-	}
-}
-
-
-/* Just like gfmul, except we have already looked up the log of the
- * second number.
- */
-static inline __u8 gfmul_exp(__u8 a, int b)
-{
-	if (a) {
-		return gfpow[mod255(gflog[a] + b)];
-	} else {
-		return 0;
-	}
-}
-
-
-/* Just like gfmul_exp, except that A is a vector of numbers.  That
- * is, each byte in A gets multiplied by gfpow[mod255(B)].
- */
-static inline unsigned long gfmul_exp_long(unsigned long a, int b)
-{
-	__u8 t;
-
-	if (sizeof(long) == 4) {
-		return (
-		((t = (__u32)a >> 24 & 0xff) ?
-		 (((__u32) gfpow[mod255(gflog[t] + b)]) << 24) : 0) |
-		((t = (__u32)a >> 16 & 0xff) ?
-		 (((__u32) gfpow[mod255(gflog[t] + b)]) << 16) : 0) |
-		((t = (__u32)a >> 8 & 0xff) ?
-		 (((__u32) gfpow[mod255(gflog[t] + b)]) << 8) : 0) |
-		((t = (__u32)a >> 0 & 0xff) ?
-		 (((__u32) gfpow[mod255(gflog[t] + b)]) << 0) : 0));
-	} else if (sizeof(long) == 8) {
-		return (
-		((t = (__u64)a >> 56 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 56) : 0) |
-		((t = (__u64)a >> 48 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 48) : 0) |
-		((t = (__u64)a >> 40 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 40) : 0) |
-		((t = (__u64)a >> 32 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 32) : 0) |
-		((t = (__u64)a >> 24 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 24) : 0) |
-		((t = (__u64)a >> 16 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 16) : 0) |
-		((t = (__u64)a >> 8 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 8) : 0) |
-		((t = (__u64)a >> 0 & 0xff) ?
-		 (((__u64) gfpow[mod255(gflog[t] + b)]) << 0) : 0));
-	} else {
-		TRACE_FUN(ft_t_any);
-		TRACE_ABORT(-1, ft_t_err, "Error: size of long is %d bytes",
-			    (int)sizeof(long));
-	}
-}
-
-
-/* Divide two numbers in the field.  Returns a/b (modulo f(x)).
- */
-static inline __u8 gfdiv(__u8 a, __u8 b)
-{
-	if (!b) {
-		TRACE_FUN(ft_t_any);
-		TRACE_ABORT(0xff, ft_t_bug, "Error: division by zero");
-	} else if (a == 0) {
-		return 0;
-	} else {
-		return gfpow[mod255(gflog[a] - gflog[b])];
-	}
-}
-
-
-/* The following functions return the inverse of the matrix of the
- * linear system that needs to be solved to determine the error
- * magnitudes.  The first deals with matrices of rank 3, while the
- * second deals with matrices of rank 2.  The error indices are passed
- * in arguments L0,..,L2 (0=first sector, 31=last sector).  The error
- * indices must be sorted in ascending order, i.e., L0<L1<L2.
- *
- * The linear system that needs to be solved for the error magnitudes
- * is A * b = s, where s is the known vector of syndromes, b is the
- * vector of error magnitudes and A in the ORDER=3 case:
- *
- *    A_3 = {{1/r^L[0], 1/r^L[1], 1/r^L[2]},
- *          {        1,        1,        1},
- *          { r^L[0], r^L[1], r^L[2]}} 
- */
-static inline int gfinv3(__u8 l0,
-			 __u8 l1, 
-			 __u8 l2, 
-			 Matrix Ainv)
-{
-	__u8 det;
-	__u8 t20, t10, t21, t12, t01, t02;
-	int log_det;
-
-	/* compute some intermediate results: */
-	t20 = gfpow[l2 - l0];	        /* t20 = r^l2/r^l0 */
-	t10 = gfpow[l1 - l0];	        /* t10 = r^l1/r^l0 */
-	t21 = gfpow[l2 - l1];	        /* t21 = r^l2/r^l1 */
-	t12 = gfpow[l1 - l2 + 255];	/* t12 = r^l1/r^l2 */
-	t01 = gfpow[l0 - l1 + 255];	/* t01 = r^l0/r^l1 */
-	t02 = gfpow[l0 - l2 + 255];	/* t02 = r^l0/r^l2 */
-	/* Calculate the determinant of matrix A_3^-1 (sometimes
-	 * called the Vandermonde determinant):
-	 */
-	det = gfadd(t20, gfadd(t10, gfadd(t21, gfadd(t12, gfadd(t01, t02)))));
-	if (!det) {
-		TRACE_FUN(ft_t_any);
-		TRACE_ABORT(0, ft_t_err,
-			   "Inversion failed (3 CRC errors, >0 CRC failures)");
-	}
-	log_det = 255 - gflog[det];
-
-	/* Now, calculate all of the coefficients:
-	 */
-	Ainv[0][0]= gfmul_exp(gfadd(gfpow[l1], gfpow[l2]), log_det);
-	Ainv[0][1]= gfmul_exp(gfadd(t21, t12), log_det);
-	Ainv[0][2]= gfmul_exp(gfadd(gfpow[255 - l1], gfpow[255 - l2]),log_det);
-
-	Ainv[1][0]= gfmul_exp(gfadd(gfpow[l0], gfpow[l2]), log_det);
-	Ainv[1][1]= gfmul_exp(gfadd(t20, t02), log_det);
-	Ainv[1][2]= gfmul_exp(gfadd(gfpow[255 - l0], gfpow[255 - l2]),log_det);
-
-	Ainv[2][0]= gfmul_exp(gfadd(gfpow[l0], gfpow[l1]), log_det);
-	Ainv[2][1]= gfmul_exp(gfadd(t10, t01), log_det);
-	Ainv[2][2]= gfmul_exp(gfadd(gfpow[255 - l0], gfpow[255 - l1]),log_det);
-
-	return 1;
-}
-
-
-static inline int gfinv2(__u8 l0, __u8 l1, Matrix Ainv)
-{
-	__u8 det;
-	__u8 t1, t2;
-	int log_det;
-
-	t1 = gfpow[255 - l0];
-	t2 = gfpow[255 - l1];
-	det = gfadd(t1, t2);
-	if (!det) {
-		TRACE_FUN(ft_t_any);
-		TRACE_ABORT(0, ft_t_err,
-			   "Inversion failed (2 CRC errors, >0 CRC failures)");
-	}
-	log_det = 255 - gflog[det];
-
-	/* Now, calculate all of the coefficients:
-	 */
-	Ainv[0][0] = Ainv[1][0] = gfpow[log_det];
-
-	Ainv[0][1] = gfmul_exp(t2, log_det);
-	Ainv[1][1] = gfmul_exp(t1, log_det);
-
-	return 1;
-}
-
-
-/* Multiply matrix A by vector S and return result in vector B.  M is
- * assumed to be of order NxN, S and B of order Nx1.
- */
-static inline void gfmat_mul(int n, Matrix A, 
-			     __u8 *s, __u8 *b)
-{
-	int i, j;
-	__u8 dot_prod;
-
-	for (i = 0; i < n; ++i) {
-		dot_prod = 0;
-		for (j = 0; j < n; ++j) {
-			dot_prod = gfadd(dot_prod, gfmul(A[i][j], s[j]));
-		}
-		b[i] = dot_prod;
-	}
-}
-
-
-
-/* The Reed Solomon ECC codes are computed over the N-th byte of each
- * block, where N=SECTOR_SIZE.  There are up to 29 blocks of data, and
- * 3 blocks of ECC.  The blocks are stored contiguously in memory.  A
- * segment, consequently, is assumed to have at least 4 blocks: one or
- * more data blocks plus three ECC blocks.
- *
- * Notice: In QIC-80 speak, a CRC error is a sector with an incorrect
- *         CRC.  A CRC failure is a sector with incorrect data, but
- *         a valid CRC.  In the error control literature, the former
- *         is usually called "erasure", the latter "error."
- */
-/* Compute the parity bytes for C columns of data, where C is the
- * number of bytes that fit into a long integer.  We use a linear
- * feed-back register to do this.  The parity bytes P[0], P[STRIDE],
- * P[2*STRIDE] are computed such that:
- *
- *              x^k * p(x) + m(x) = 0 (modulo g(x))
- *
- * where k = NBLOCKS,
- *       p(x) = P[0] + P[STRIDE]*x + P[2*STRIDE]*x^2, and
- *       m(x) = sum_{i=0}^k m_i*x^i.
- *       m_i = DATA[i*SECTOR_SIZE]
- */
-static inline void set_parity(unsigned long *data,
-			      int nblocks, 
-			      unsigned long *p, 
-			      int stride)
-{
-	unsigned long p0, p1, p2, t1, t2, *end;
-
-	end = data + nblocks * (FT_SECTOR_SIZE / sizeof(long));
-	p0 = p1 = p2 = 0;
-	while (data < end) {
-		/* The new parity bytes p0_i, p1_i, p2_i are computed
-		 * from the old values p0_{i-1}, p1_{i-1}, p2_{i-1}
-		 * recursively as:
-		 *
-		 *        p0_i = p1_{i-1} + r^105 * (m_{i-1} - p0_{i-1})
-		 *        p1_i = p2_{i-1} + r^105 * (m_{i-1} - p0_{i-1})
-		 *        p2_i =                    (m_{i-1} - p0_{i-1})
-		 *
-		 * With the initial condition: p0_0 = p1_0 = p2_0 = 0.
-		 */
-		t1 = gfadd_long(*data, p0);
-		/*
-		 * Multiply each byte in t1 by 0xc0:
-		 */
-		if (sizeof(long) == 4) {
-			t2= (((__u32) gfmul_c0[(__u32)t1 >> 24 & 0xff]) << 24 |
-			     ((__u32) gfmul_c0[(__u32)t1 >> 16 & 0xff]) << 16 |
-			     ((__u32) gfmul_c0[(__u32)t1 >>  8 & 0xff]) <<  8 |
-			     ((__u32) gfmul_c0[(__u32)t1 >>  0 & 0xff]) <<  0);
-		} else if (sizeof(long) == 8) {
-			t2= (((__u64) gfmul_c0[(__u64)t1 >> 56 & 0xff]) << 56 |
-			     ((__u64) gfmul_c0[(__u64)t1 >> 48 & 0xff]) << 48 |
-			     ((__u64) gfmul_c0[(__u64)t1 >> 40 & 0xff]) << 40 |
-			     ((__u64) gfmul_c0[(__u64)t1 >> 32 & 0xff]) << 32 |
-			     ((__u64) gfmul_c0[(__u64)t1 >> 24 & 0xff]) << 24 |
-			     ((__u64) gfmul_c0[(__u64)t1 >> 16 & 0xff]) << 16 |
-			     ((__u64) gfmul_c0[(__u64)t1 >>  8 & 0xff]) <<  8 |
-			     ((__u64) gfmul_c0[(__u64)t1 >>  0 & 0xff]) <<  0);
-		} else {
-			TRACE_FUN(ft_t_any);
-			TRACE(ft_t_err, "Error: long is of size %d",
-			      (int) sizeof(long));
-			TRACE_EXIT;
-		}
-		p0 = gfadd_long(t2, p1);
-		p1 = gfadd_long(t2, p2);
-		p2 = t1;
-		data += FT_SECTOR_SIZE / sizeof(long);
-	}
-	*p = p0;
-	p += stride;
-	*p = p1;
-	p += stride;
-	*p = p2;
-	return;
-}
-
-
-/* Compute the 3 syndrome values.  DATA should point to the first byte
- * of the column for which the syndromes are desired.  The syndromes
- * are computed over the first NBLOCKS of rows.  The three bytes will
- * be placed in S[0], S[1], and S[2].
- *
- * S[i] is the value of the "message" polynomial m(x) evaluated at the
- * i-th root of the generator polynomial g(x).
- *
- * As g(x)=(x-r^-1)(x-1)(x-r^1) we evaluate the message polynomial at
- * x=r^-1 to get S[0], at x=r^0=1 to get S[1], and at x=r to get S[2].
- * This could be done directly and efficiently via the Horner scheme.
- * However, it would require multiplication tables for the factors
- * r^-1 (0xc3) and r (0x02).  The following scheme does not require
- * any multiplication tables beyond what's needed for set_parity()
- * anyway and is slightly faster if there are no errors and slightly
- * slower if there are errors.  The latter is hopefully the infrequent
- * case.
- *
- * To understand the alternative algorithm, notice that set_parity(m,
- * k, p) computes parity bytes such that:
- *
- *      x^k * p(x) = m(x) (modulo g(x)).
- *
- * That is, to evaluate m(r^m), where r^m is a root of g(x), we can
- * simply evaluate (r^m)^k*p(r^m).  Also, notice that p is 0 if and
- * only if s is zero.  That is, if all parity bytes are 0, we know
- * there is no error in the data and consequently there is no need to
- * compute s(x) at all!  In all other cases, we compute s(x) from p(x)
- * by evaluating (r^m)^k*p(r^m) for m=-1, m=0, and m=1.  The p(x)
- * polynomial is evaluated via the Horner scheme.
- */
-static int compute_syndromes(unsigned long *data, int nblocks, unsigned long *s)
-{
-	unsigned long p[3];
-
-	set_parity(data, nblocks, p, 1);
-	if (p[0] | p[1] | p[2]) {
-		/* Some of the checked columns do not have a zero
-		 * syndrome.  For simplicity, we compute the syndromes
-		 * for all columns that we have computed the
-		 * remainders for.
-		 */
-		s[0] = gfmul_exp_long(
-			gfadd_long(p[0], 
-				   gfmul_exp_long(
-					   gfadd_long(p[1], 
-						      gfmul_exp_long(p[2], -1)),
-					   -1)), 
-			-nblocks);
-		s[1] = gfadd_long(gfadd_long(p[2], p[1]), p[0]);
-		s[2] = gfmul_exp_long(
-			gfadd_long(p[0], 
-				   gfmul_exp_long(
-					   gfadd_long(p[1],
-						      gfmul_exp_long(p[2], 1)),
-					   1)),
-			nblocks);
-		return 0;
-	} else {
-		return 1;
-	}
-}
-
-
-/* Correct the block in the column pointed to by DATA.  There are NBAD
- * CRC errors and their indices are in BAD_LOC[0], up to
- * BAD_LOC[NBAD-1].  If NBAD>1, Ainv holds the inverse of the matrix
- * of the linear system that needs to be solved to determine the error
- * magnitudes.  S[0], S[1], and S[2] are the syndrome values.  If row
- * j gets corrected, then bit j will be set in CORRECTION_MAP.
- */
-static inline int correct_block(__u8 *data, int nblocks,
-				int nbad, int *bad_loc, Matrix Ainv,
-				__u8 *s,
-				SectorMap * correction_map)
-{
-	int ncorrected = 0;
-	int i;
-	__u8 t1, t2;
-	__u8 c0, c1, c2;	/* check bytes */
-	__u8 error_mag[3], log_error_mag;
-	__u8 *dp, l, e;
-	TRACE_FUN(ft_t_any);
-
-	switch (nbad) {
-	case 0:
-		/* might have a CRC failure: */
-		if (s[0] == 0) {
-			/* more than one error */
-			TRACE_ABORT(-1, ft_t_err,
-				 "ECC failed (0 CRC errors, >1 CRC failures)");
-		}
-		t1 = gfdiv(s[1], s[0]);
-		if ((bad_loc[nbad++] = gflog[t1]) >= nblocks) {
-			TRACE(ft_t_err,
-			      "ECC failed (0 CRC errors, >1 CRC failures)");
-			TRACE_ABORT(-1, ft_t_err,
-				  "attempt to correct data at %d", bad_loc[0]);
-		}
-		error_mag[0] = s[1];
-		break;
-	case 1:
-		t1 = gfadd(gfmul_exp(s[1], bad_loc[0]), s[2]);
-		t2 = gfadd(gfmul_exp(s[0], bad_loc[0]), s[1]);
-		if (t1 == 0 && t2 == 0) {
-			/* one erasure, no error: */
-			Ainv[0][0] = gfpow[bad_loc[0]];
-		} else if (t1 == 0 || t2 == 0) {
-			/* one erasure and more than one error: */
-			TRACE_ABORT(-1, ft_t_err,
-				    "ECC failed (1 erasure, >1 error)");
-		} else {
-			/* one erasure, one error: */
-			if ((bad_loc[nbad++] = gflog[gfdiv(t1, t2)]) 
-			    >= nblocks) {
-				TRACE(ft_t_err, "ECC failed "
-				      "(1 CRC errors, >1 CRC failures)");
-				TRACE_ABORT(-1, ft_t_err,
-					    "attempt to correct data at %d",
-					    bad_loc[1]);
-			}
-			if (!gfinv2(bad_loc[0], bad_loc[1], Ainv)) {
-				/* inversion failed---must have more
-                                 *  than one error 
-				 */
-				TRACE_EXIT -1;
-			}
-		}
-		/* FALL THROUGH TO ERROR MAGNITUDE COMPUTATION:
-		 */
-	case 2:
-	case 3:
-		/* compute error magnitudes: */
-		gfmat_mul(nbad, Ainv, s, error_mag);
-		break;
-
-	default:
-		TRACE_ABORT(-1, ft_t_err,
-			    "Internal Error: number of CRC errors > 3");
-	}
-
-	/* Perform correction by adding ERROR_MAG[i] to the byte at
-	 * offset BAD_LOC[i].  Also add the value of the computed
-	 * error polynomial to the syndrome values.  If the correction
-	 * was successful, the resulting check bytes should be zero
-	 * (i.e., the corrected data is a valid code word).
-	 */
-	c0 = s[0];
-	c1 = s[1];
-	c2 = s[2];
-	for (i = 0; i < nbad; ++i) {
-		e = error_mag[i];
-		if (e) {
-			/* correct the byte at offset L by magnitude E: */
-			l = bad_loc[i];
-			dp = &data[l * FT_SECTOR_SIZE];
-			*dp = gfadd(*dp, e);
-			*correction_map |= 1 << l;
-			++ncorrected;
-
-			log_error_mag = gflog[e];
-			c0 = gfadd(c0, gfpow[mod255(log_error_mag - l)]);
-			c1 = gfadd(c1, e);
-			c2 = gfadd(c2, gfpow[mod255(log_error_mag + l)]);
-		}
-	}
-	if (c0 || c1 || c2) {
-		TRACE_ABORT(-1, ft_t_err,
-			    "ECC self-check failed, too many errors");
-	}
-	TRACE_EXIT ncorrected;
-}
-
-
-#if defined(ECC_SANITY_CHECK) || defined(ECC_PARANOID)
-
-/* Perform a sanity check on the computed parity bytes:
- */
-static int sanity_check(unsigned long *data, int nblocks)
-{
-	TRACE_FUN(ft_t_any);
-	unsigned long s[3];
-
-	if (!compute_syndromes(data, nblocks, s)) {
-		TRACE_ABORT(0, ft_bug,
-			    "Internal Error: syndrome self-check failed");
-	}
-	TRACE_EXIT 1;
-}
-
-#endif /* defined(ECC_SANITY_CHECK) || defined(ECC_PARANOID) */
-
-/* Compute the parity for an entire segment of data.
- */
-int ftape_ecc_set_segment_parity(struct memory_segment *mseg)
-{
-	int i;
-	__u8 *parity_bytes;
-
-	parity_bytes = &mseg->data[(mseg->blocks - 3) * FT_SECTOR_SIZE];
-	for (i = 0; i < FT_SECTOR_SIZE; i += sizeof(long)) {
-		set_parity((unsigned long *) &mseg->data[i], mseg->blocks - 3,
-			   (unsigned long *) &parity_bytes[i],
-			   FT_SECTOR_SIZE / sizeof(long));
-#ifdef ECC_PARANOID
-		if (!sanity_check((unsigned long *) &mseg->data[i],
-				   mseg->blocks)) {
-			return -1;
-		}
-#endif				/* ECC_PARANOID */
-	}
-	return 0;
-}
-
-
-/* Checks and corrects (if possible) the segment MSEG.  Returns one of
- * ECC_OK, ECC_CORRECTED, and ECC_FAILED.
- */
-int ftape_ecc_correct_data(struct memory_segment *mseg)
-{
-	int col, i, result;
-	int ncorrected = 0;
-	int nerasures = 0;	/* # of erasures (CRC errors) */
-	int erasure_loc[3];	/* erasure locations */
-	unsigned long ss[3];
-	__u8 s[3];
-	Matrix Ainv;
-	TRACE_FUN(ft_t_flow);
-
-	mseg->corrected = 0;
-
-	/* find first column that has non-zero syndromes: */
-	for (col = 0; col < FT_SECTOR_SIZE; col += sizeof(long)) {
-		if (!compute_syndromes((unsigned long *) &mseg->data[col],
-				       mseg->blocks, ss)) {
-			/* something is wrong---have to fix things */
-			break;
-		}
-	}
-	if (col >= FT_SECTOR_SIZE) {
-		/* all syndromes are ok, therefore nothing to correct */
-		TRACE_EXIT ECC_OK;
-	}
-	/* count the number of CRC errors if there were any: */
-	if (mseg->read_bad) {
-		for (i = 0; i < mseg->blocks; i++) {
-			if (BAD_CHECK(mseg->read_bad, i)) {
-				if (nerasures >= 3) {
-					/* this is too much for ECC */
-					TRACE_ABORT(ECC_FAILED, ft_t_err,
-						"ECC failed (>3 CRC errors)");
-				}	/* if */
-				erasure_loc[nerasures++] = i;
-			}
-		}
-	}
-	/*
-	 * If there are at least 2 CRC errors, determine inverse of matrix
-	 * of linear system to be solved:
-	 */
-	switch (nerasures) {
-	case 2:
-		if (!gfinv2(erasure_loc[0], erasure_loc[1], Ainv)) {
-			TRACE_EXIT ECC_FAILED;
-		}
-		break;
-	case 3:
-		if (!gfinv3(erasure_loc[0], erasure_loc[1],
-			    erasure_loc[2], Ainv)) {
-			TRACE_EXIT ECC_FAILED;
-		}
-		break;
-	default:
-		/* this is not an error condition... */
-		break;
-	}
-
-	do {
-		for (i = 0; i < sizeof(long); ++i) {
-			s[0] = ss[0];
-			s[1] = ss[1];
-			s[2] = ss[2];
-			if (s[0] | s[1] | s[2]) {
-#ifdef BIG_ENDIAN
-				result = correct_block(
-					&mseg->data[col + sizeof(long) - 1 - i],
-					mseg->blocks,
-					nerasures,
-					erasure_loc,
-					Ainv,
-					s,
-					&mseg->corrected);
-#else
-				result = correct_block(&mseg->data[col + i],
-						       mseg->blocks,
-						       nerasures,
-						       erasure_loc,
-						       Ainv,
-						       s,
-						       &mseg->corrected);
-#endif
-				if (result < 0) {
-					TRACE_EXIT ECC_FAILED;
-				}
-				ncorrected += result;
-			}
-			ss[0] >>= 8;
-			ss[1] >>= 8;
-			ss[2] >>= 8;
-		}
-
-#ifdef ECC_SANITY_CHECK
-		if (!sanity_check((unsigned long *) &mseg->data[col],
-				  mseg->blocks)) {
-			TRACE_EXIT ECC_FAILED;
-		}
-#endif				/* ECC_SANITY_CHECK */
-
-		/* find next column with non-zero syndromes: */
-		while ((col += sizeof(long)) < FT_SECTOR_SIZE) {
-			if (!compute_syndromes((unsigned long *)
-				    &mseg->data[col], mseg->blocks, ss)) {
-				/* something is wrong---have to fix things */
-				break;
-			}
-		}
-	} while (col < FT_SECTOR_SIZE);
-	if (ncorrected && nerasures == 0) {
-		TRACE(ft_t_warn, "block contained error not caught by CRC");
-	}
-	TRACE((ncorrected > 0) ? ft_t_noise : ft_t_any, "number of corrections: %d", ncorrected);
-	TRACE_EXIT ncorrected ? ECC_CORRECTED : ECC_OK;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-ecc.h b/drivers/char/ftape/lowlevel/ftape-ecc.h
deleted file mode 100644
index 4829146..0000000
--- a/drivers/char/ftape/lowlevel/ftape-ecc.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef _FTAPE_ECC_H_
-#define _FTAPE_ECC_H_
-
-/*
- *      Copyright (C) 1993 Ning and David Mosberger.
- *      Original:
- *      Copyright (C) 1993 Bas Laarhoven.
- *      Copyright (C) 1992 David L. Brown, Jr.
- 
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
- 
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- General Public License for more details.
- 
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA.
- 
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-ecc.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:11 $
- *
- *      This file contains the definitions for the
- *      Reed-Solomon error correction code 
- *      for the QIC-40/80 tape streamer device driver.
- */
-
-#include "../lowlevel/ftape-bsm.h"
-
-#define BAD_CLEAR(entry) ((entry)=0)
-#define BAD_SET(entry,sector) ((entry)|=(1<<(sector)))
-#define BAD_CHECK(entry,sector) ((entry)&(1<<(sector)))
-
-/*
- * Return values for ecc_correct_data:
- */
-enum {
-	ECC_OK,			/* Data was correct. */
-	ECC_CORRECTED,		/* Correctable error in data. */
-	ECC_FAILED,		/* Could not correct data. */
-};
-
-/*
- * Representation of an in memory segment.  MARKED_BAD lists the
- * sectors that were marked bad during formatting.  If the N-th sector
- * in a segment is marked bad, bit 1<<N will be set in MARKED_BAD.
- * The sectors should be read in from the disk and packed, as if the
- * bad sectors were not there, and the segment just contained fewer
- * sectors.  READ_SECTORS is a bitmap of errors encountered while
- * reading the data.  These offsets are relative to the packed data.
- * BLOCKS is a count of the sectors not marked bad.  This is just to
- * prevent having to count the zero bits in MARKED_BAD each time this
- * is needed.  DATA is the actual sector packed data from (or to) the
- * tape.
- */
- struct memory_segment {
-	SectorMap marked_bad;
-	SectorMap read_bad;
- 	int blocks;
- 	__u8 *data;
-	SectorMap corrected;
- };
-
-/*
- * ecc.c defined global variables:
- */
-#ifdef TEST
-extern int ftape_ecc_tracing;
-#endif
-
-/*
- * ecc.c defined global functions:
- */
-extern int ftape_ecc_correct_data(struct memory_segment *data);
-extern int ftape_ecc_set_segment_parity(struct memory_segment *data);
-
-#endif	/* _FTAPE_ECC_H_ */
diff --git a/drivers/char/ftape/lowlevel/ftape-format.c b/drivers/char/ftape/lowlevel/ftape-format.c
deleted file mode 100644
index 5dd4c59..0000000
--- a/drivers/char/ftape/lowlevel/ftape-format.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (C) 1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-format.c,v $
- * $Revision: 1.2.4.1 $
- * $Date: 1997/11/14 16:05:39 $
- *
- *      This file contains the code to support formatting of floppy
- *      tape cartridges with the QIC-40/80/3010/3020 floppy-tape
- *      driver "ftape" for Linux.
- */
- 
-#include <linux/string.h>
-#include <linux/errno.h>
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-ecc.h"
-#include "../lowlevel/ftape-bsm.h"
-#include "../lowlevel/ftape-format.h"
-
-#if defined(TESTING)
-#define FT_FMT_SEGS_PER_BUF 50
-#else
-#define FT_FMT_SEGS_PER_BUF (FT_BUFF_SIZE/(4*FT_SECTORS_PER_SEGMENT))
-#endif
-
-static spinlock_t ftape_format_lock;
-
-/*
- *  first segment of the new buffer
- */
-static int switch_segment;
-
-/*
- *  at most 256 segments fit into one 32 kb buffer.  Even TR-1 cartridges have
- *  more than this many segments per track, so better be careful.
- *
- *  buffer_struct *buff: buffer to store the formatting coordinates in
- *  int  start: starting segment for this buffer.
- *  int    spt: segments per track
- *
- *  Note: segment ids are relative to the start of the track here.
- */
-static void setup_format_buffer(buffer_struct *buff, int start, int spt,
-				__u8 gap3)
-{
-	int to_do = spt - start;
-	TRACE_FUN(ft_t_flow);
-
-	if (to_do > FT_FMT_SEGS_PER_BUF) {
-		to_do = FT_FMT_SEGS_PER_BUF;
-	}
-	buff->ptr          = buff->address;
-	buff->remaining    = to_do * FT_SECTORS_PER_SEGMENT; /* # sectors */
-	buff->bytes        = buff->remaining * 4; /* need 4 bytes per sector */
-	buff->gap3         = gap3;
-	buff->segment_id   = start;
-	buff->next_segment = start + to_do;
-	if (buff->next_segment >= spt) {
-		buff->next_segment = 0; /* 0 means: stop runner */
-	}
-	buff->status       = waiting; /* tells the isr that it can use
-				       * this buffer
-				       */
-	TRACE_EXIT;
-}
-
-
-/*
- *  start formatting a new track.
- */
-int ftape_format_track(const unsigned int track, const __u8 gap3)
-{
-	unsigned long flags;
-	buffer_struct *tail, *head;
-	int status;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE_CATCH(ftape_ready_wait(ftape_timeout.pause, &status),);
-	if (track & 1) {
-		if (!(status & QIC_STATUS_AT_EOT)) {
-			TRACE_CATCH(ftape_seek_to_eot(),);
-		}
-	} else {
-		if (!(status & QIC_STATUS_AT_BOT)) {
-			TRACE_CATCH(ftape_seek_to_bot(),);
-		}
-	}
-	ftape_abort_operation(); /* this sets ft_head = ft_tail = 0 */
-	ftape_set_state(formatting);
-
-	TRACE(ft_t_noise,
-	      "Formatting track %d, logical: from segment %d to %d",
-	      track, track * ft_segments_per_track, 
-	      (track + 1) * ft_segments_per_track - 1);
-	
-	/*
-	 *  initialize the buffer switching protocol for this track
-	 */
-	head = ftape_get_buffer(ft_queue_head); /* tape isn't running yet */
-	tail = ftape_get_buffer(ft_queue_tail); /* tape isn't running yet */
-	switch_segment = 0;
-	do {
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		setup_format_buffer(tail, switch_segment,
-				    ft_segments_per_track, gap3);
-		switch_segment = tail->next_segment;
-	} while ((switch_segment != 0) &&
-		 ((tail = ftape_next_buffer(ft_queue_tail)) != head));
-	/* go */
-	head->status = formatting;
-	TRACE_CATCH(ftape_seek_head_to_track(track),);
-	TRACE_CATCH(ftape_command(QIC_LOGICAL_FORWARD),);
-	spin_lock_irqsave(&ftape_format_lock, flags);
-	TRACE_CATCH(fdc_setup_formatting(head), restore_flags(flags));
-	spin_unlock_irqrestore(&ftape_format_lock, flags);
-	TRACE_EXIT 0;
-}
-
-/*   return segment id of segment currently being formatted and do the
- *   buffer switching stuff.
- */
-int ftape_format_status(unsigned int *segment_id)
-{
-	buffer_struct *tail = ftape_get_buffer(ft_queue_tail);
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	while (switch_segment != 0 &&
-	       ftape_get_buffer(ft_queue_head) != tail) {
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		/*  need more buffers, first wait for empty buffer
-		 */
-		TRACE_CATCH(ftape_wait_segment(formatting),);
-		/*  don't worry for gap3. If we ever hit this piece of code,
-		 *  then all buffer already have the correct gap3 set!
-		 */
-		setup_format_buffer(tail, switch_segment,
-				    ft_segments_per_track, tail->gap3);
-		switch_segment = tail->next_segment;
-		if (switch_segment != 0) {
-			tail = ftape_next_buffer(ft_queue_tail);
-		}
-	}
-	/*    should runner stop ?
-	 */
-	if (ft_runner_status == aborting || ft_runner_status == do_abort) {
-		buffer_struct *head = ftape_get_buffer(ft_queue_head);
-		TRACE(ft_t_warn, "Error formatting segment %d",
-		      ftape_get_buffer(ft_queue_head)->segment_id);
-		(void)ftape_abort_operation();
-		TRACE_EXIT (head->status != error) ? -EAGAIN : -EIO;
-	}
-	/*
-	 *  don't care if the timer expires, this is just kind of a
-	 *  "select" operation that lets the calling process sleep
-	 *  until something has happened
-	 */
-	if (fdc_interrupt_wait(5 * FT_SECOND) < 0) {
-		TRACE(ft_t_noise, "End of track %d at segment %d",
-		      ft_location.track,
-		      ftape_get_buffer(ft_queue_head)->segment_id);
-		result = 1;  /* end of track, unlock module */
-	} else {
-		result = 0;
-	}
-	/*
-	 *  the calling process should use the seg id to determine
-	 *  which parts of the dma buffers can be safely overwritten
-	 *  with new data.
-	 */
-	*segment_id = ftape_get_buffer(ft_queue_head)->segment_id;
-	/*
-	 *  Internally we start counting segment ids from the start of
-	 *  each track when formatting, but externally we keep them
-	 *  relative to the start of the tape:
-	 */
-	*segment_id += ft_location.track * ft_segments_per_track;
-	TRACE_EXIT result;
-}
-
-/*
- *  The segment id is relative to the start of the tape
- */
-int ftape_verify_segment(const unsigned int segment_id, SectorMap *bsm)
-{
-	int result;
-	int verify_done = 0;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "Verifying segment %d", segment_id);
-
-	if (ft_driver_state != verifying) {
-		TRACE(ft_t_noise, "calling ftape_abort_operation");
-		if (ftape_abort_operation() < 0) {
-			TRACE(ft_t_err, "ftape_abort_operation failed");
-			TRACE_EXIT -EIO;
-		}
-	}
-	*bsm = 0x00000000;
-	ftape_set_state(verifying);
-	for (;;) {
-		buffer_struct *tail;
-		/*
-		 *  Allow escape from this loop on signal
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		/*
-		 *  Search all full buffers for the first matching the
-		 *  wanted segment.  Clear other buffers on the fly.
-		 */
-		tail = ftape_get_buffer(ft_queue_tail);
-		while (!verify_done && tail->status == done) {
-			/*
-			 *  Allow escape from this loop on signal !
-			 */
-			FT_SIGNAL_EXIT(_DONT_BLOCK);
-			if (tail->segment_id == segment_id) {
-				/*  If out buffer is already full,
-				 *  return its contents.  
-				 */
-				TRACE(ft_t_flow, "found segment in cache: %d",
-				      segment_id);
-				if ((tail->soft_error_map |
-				     tail->hard_error_map) != 0) {
-					TRACE(ft_t_info,"bsm[%d] = 0x%08lx",
-					      segment_id,
-					      (unsigned long)
-					      (tail->soft_error_map |
-					      tail->hard_error_map));
-					*bsm = (tail->soft_error_map |
-						tail->hard_error_map);
-				}
-				verify_done = 1;
-			} else {
-				TRACE(ft_t_flow,"zapping segment in cache: %d",
-				      tail->segment_id);
-			}
-			tail->status = waiting;
-			tail = ftape_next_buffer(ft_queue_tail);
-		}
-		if (!verify_done && tail->status == verifying) {
-			if (tail->segment_id == segment_id) {
-				switch(ftape_wait_segment(verifying)) {
-				case 0:
-					break;
-				case -EINTR:
-					TRACE_ABORT(-EINTR, ft_t_warn,
-						    "interrupted by "
-						    "non-blockable signal");
-					break;
-				default:
-					ftape_abort_operation();
-					ftape_set_state(verifying);
-					/* be picky */
-					TRACE_ABORT(-EIO, ft_t_warn,
-						    "wait_segment failed");
-				}
-			} else {
-				/*  We're reading the wrong segment,
-				 *  stop runner.
-				 */
-				TRACE(ft_t_noise, "verifying wrong segment");
-				ftape_abort_operation();
-				ftape_set_state(verifying);
-			}
-		}
-		/*    should runner stop ?
-		 */
-		if (ft_runner_status == aborting) {
-			buffer_struct *head = ftape_get_buffer(ft_queue_head);
-			if (head->status == error ||
-			    head->status == verifying) {
-				/* no data or overrun error */
-				head->status = waiting;
-			}
-			TRACE_CATCH(ftape_dumb_stop(),);
-		} else {
-			/*  If just passed last segment on tape: wait
-			 *  for BOT or EOT mark. Sets ft_runner_status to
-			 *  idle if at lEOT and successful 
-			 */
-			TRACE_CATCH(ftape_handle_logical_eot(),);
-		}
-		if (verify_done) {
-			TRACE_EXIT 0;
-		}
-		/*    Now at least one buffer is idle!
-		 *    Restart runner & tape if needed.
-		 */
-		/*  We could optimize the following a little bit. We know that 
-		 *  the bad sector map is empty.
-		 */
-		tail = ftape_get_buffer(ft_queue_tail);
-		if (tail->status == waiting) {
-			buffer_struct *head = ftape_get_buffer(ft_queue_head);
-
-			ftape_setup_new_segment(head, segment_id, -1);
-			ftape_calc_next_cluster(head);
-			if (ft_runner_status == idle) {
-				result = ftape_start_tape(segment_id,
-							  head->sector_offset);
-				switch(result) {
-				case 0:
-					break;
-				case -ETIME:
-				case -EINTR:
-					TRACE_ABORT(result, ft_t_err, "Error: "
-						    "segment %d unreachable",
-						    segment_id);
-					break;
-				default:
-					*bsm = EMPTY_SEGMENT;
-					TRACE_EXIT 0;
-					break;
-				}
-			}
-			head->status = verifying;
-			fdc_setup_read_write(head, FDC_VERIFY);
-		}
-	}
-	/* not reached */
-	TRACE_EXIT -EIO;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-format.h b/drivers/char/ftape/lowlevel/ftape-format.h
deleted file mode 100644
index f151615..0000000
--- a/drivers/char/ftape/lowlevel/ftape-format.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _FTAPE_FORMAT_H
-#define _FTAPE_FORMAT_H
-
-/*
- * Copyright (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-format.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:13 $
- *
- *      This file contains the low level definitions for the
- *      formatting support for the QIC-40/80/3010/3020 floppy-tape
- *      driver "ftape" for Linux.
- */
-
-#ifdef __KERNEL__
-extern int ftape_format_track(const unsigned int track, const __u8 gap3);
-extern int ftape_format_status(unsigned int *segment_id);
-extern int ftape_verify_segment(const unsigned int segment_id, SectorMap *bsm);
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-init.c b/drivers/char/ftape/lowlevel/ftape-init.c
deleted file mode 100644
index 4998132..0000000
--- a/drivers/char/ftape/lowlevel/ftape-init.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven,
- *                (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- *      This file contains the code that interfaces the kernel
- *      for the QIC-40/80/3010/3020 floppy-tape driver for Linux.
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/major.h>
-
-#include <linux/ftape.h>
-#include <linux/init.h>
-#include <linux/qic117.h>
-#ifdef CONFIG_ZFTAPE
-#include <linux/zftape.h>
-#endif
-
-#include "../lowlevel/ftape-init.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-write.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/ftape-buffer.h"
-#include "../lowlevel/ftape-proc.h"
-#include "../lowlevel/ftape-tracing.h"
-
-
-#if defined(MODULE) && !defined(CONFIG_FT_NO_TRACE_AT_ALL)
-static int ft_tracing = -1;
-#endif
-
-
-/*  Called by modules package when installing the driver
- *  or by kernel during the initialization phase
- */
-static int __init ftape_init(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-#ifdef MODULE
-#ifndef CONFIG_FT_NO_TRACE_AT_ALL
-	if (ft_tracing != -1) {
-		ftape_tracing = ft_tracing;
-	}
-#endif
-	printk(KERN_INFO FTAPE_VERSION "\n");
-        if (TRACE_LEVEL >= ft_t_info) {
-		printk(
-KERN_INFO "(c) 1993-1996 Bas Laarhoven (bas@vimec.nl)\n"
-KERN_INFO "(c) 1995-1996 Kai Harrekilde-Petersen (khp@dolphinics.no)\n"
-KERN_INFO "(c) 1996-1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
-KERN_INFO "QIC-117 driver for QIC-40/80/3010/3020 floppy tape drives\n");
-        }
-#else /* !MODULE */
-	/* print a short no-nonsense boot message */
-	printk(KERN_INFO FTAPE_VERSION "\n");
-#endif /* MODULE */
-	TRACE(ft_t_info, "installing QIC-117 floppy tape hardware drive ... ");
-	TRACE(ft_t_info, "ftape_init @ 0x%p", ftape_init);
-	/*  Allocate the DMA buffers. They are deallocated at cleanup() time.
-	 */
-#ifdef TESTING
-#ifdef MODULE
-	while (ftape_set_nr_buffers(CONFIG_FT_NR_BUFFERS) < 0) {
-		ftape_sleep(FT_SECOND/20);
-		if (signal_pending(current)) {
-			(void)ftape_set_nr_buffers(0);
-			TRACE(ft_t_bug,
-			      "Killed by signal while allocating buffers.");
-			TRACE_ABORT(-EINTR, 
-				    ft_t_bug, "Free up memory and retry");
-		}
-	}
-#else
-	TRACE_CATCH(ftape_set_nr_buffers(CONFIG_FT_NR_BUFFERS),
-		    (void)ftape_set_nr_buffers(0));
-#endif
-#else
-	TRACE_CATCH(ftape_set_nr_buffers(CONFIG_FT_NR_BUFFERS),
-		    (void)ftape_set_nr_buffers(0));
-#endif
-	ft_drive_sel = -1;
-	ft_failure   = 1;         /* inhibit any operation but open */
-	ftape_udelay_calibrate(); /* must be before fdc_wait_calibrate ! */
-	fdc_wait_calibrate();
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_FT_PROC_FS)
-	(void)ftape_proc_init();
-#endif
-#ifdef CONFIG_ZFTAPE
-	(void)zft_init();
-#endif
-	TRACE_EXIT 0;
-}
-
-module_param(ft_fdc_base,       uint, 0);
-MODULE_PARM_DESC(ft_fdc_base,  "Base address of FDC controller.");
-module_param(ft_fdc_irq,        uint, 0);
-MODULE_PARM_DESC(ft_fdc_irq,   "IRQ (interrupt channel) to use.");
-module_param(ft_fdc_dma,        uint, 0);
-MODULE_PARM_DESC(ft_fdc_dma,   "DMA channel to use.");
-module_param(ft_fdc_threshold,  uint, 0);
-MODULE_PARM_DESC(ft_fdc_threshold,  "Threshold of the FDC Fifo.");
-module_param(ft_fdc_rate_limit, uint, 0);
-MODULE_PARM_DESC(ft_fdc_rate_limit, "Maximal data rate for FDC.");
-module_param(ft_probe_fc10,     bool, 0);
-MODULE_PARM_DESC(ft_probe_fc10,
-	    "If non-zero, probe for a Colorado FC-10/FC-20 controller.");
-module_param(ft_mach2,          bool, 0);
-MODULE_PARM_DESC(ft_mach2,
-	    "If non-zero, probe for a Mountain MACH-2 controller.");
-#if defined(MODULE) && !defined(CONFIG_FT_NO_TRACE_AT_ALL)
-module_param(ft_tracing,        int, 0644);
-MODULE_PARM_DESC(ft_tracing,
-	    "Amount of debugging output, 0 <= tracing <= 8, default 3.");
-#endif
-
-MODULE_AUTHOR(
-	"(c) 1993-1996 Bas Laarhoven (bas@vimec.nl), "
-	"(c) 1995-1996 Kai Harrekilde-Petersen (khp@dolphinics.no), "
-	"(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)");
-MODULE_DESCRIPTION(
-	"QIC-117 driver for QIC-40/80/3010/3020 floppy tape drives.");
-MODULE_LICENSE("GPL");
-
-static void __exit ftape_exit(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_FT_PROC_FS)
-	ftape_proc_destroy();
-#endif
-	(void)ftape_set_nr_buffers(0);
-        printk(KERN_INFO "ftape: unloaded.\n");
-	TRACE_EXIT;
-}
-
-module_init(ftape_init);
-module_exit(ftape_exit);
diff --git a/drivers/char/ftape/lowlevel/ftape-init.h b/drivers/char/ftape/lowlevel/ftape-init.h
deleted file mode 100644
index 99a7b8a..0000000
--- a/drivers/char/ftape/lowlevel/ftape-init.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _FTAPE_INIT_H
-#define _FTAPE_INIT_H
-
-/*
- * Copyright (C) 1993-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-init.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:16 $
- *
- * This file contains the definitions for the interface to 
- * the Linux kernel for floppy tape driver ftape.
- *
- */
-
-#include <linux/linkage.h>
-#include <linux/signal.h>
-
-#define _NEVER_BLOCK    (sigmask(SIGKILL) | sigmask(SIGSTOP))
-#define _DONT_BLOCK     (_NEVER_BLOCK | sigmask(SIGINT))
-#define _DO_BLOCK       (sigmask(SIGPIPE))
-
-#ifndef QIC117_TAPE_MAJOR
-#define QIC117_TAPE_MAJOR 27
-#endif
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-io.c b/drivers/char/ftape/lowlevel/ftape-io.c
deleted file mode 100644
index 259015a..0000000
--- a/drivers/char/ftape/lowlevel/ftape-io.c
+++ /dev/null
@@ -1,992 +0,0 @@
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven,
- *                (C) 1996      Kai Harrekilde-Petersen,
- *                (C) 1997      Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-io.c,v $
- * $Revision: 1.4 $
- * $Date: 1997/11/11 14:02:36 $
- *
- *      This file contains the general control functions for the
- *      QIC-40/80/3010/3020 floppy-tape driver "ftape" for Linux.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/system.h>
-#include <linux/ioctl.h>
-#include <linux/mtio.h>
-#include <linux/delay.h>
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-write.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-init.h"
-#include "../lowlevel/ftape-calibr.h"
-
-/*      Global vars.
- */
-/* NOTE: sectors start numbering at 1, all others at 0 ! */
-ft_timeout_table ftape_timeout;
-unsigned int ftape_tape_len;
-volatile qic117_cmd_t ftape_current_command;
-const struct qic117_command_table qic117_cmds[] = QIC117_COMMANDS;
-int ftape_might_be_off_track;
-
-/*      Local vars.
- */
-static int diagnostic_mode;
-static unsigned int ftape_udelay_count;
-static unsigned int ftape_udelay_time;
-
-void ftape_udelay(unsigned int usecs)
-{
-	volatile int count = (ftape_udelay_count * usecs +
-                              ftape_udelay_count - 1) / ftape_udelay_time;
-	volatile int i;
-
-	while (count-- > 0) {
-		for (i = 0; i < 20; ++i);
-	}
-}
-
-void ftape_udelay_calibrate(void)
-{
-	ftape_calibrate("ftape_udelay",
-			ftape_udelay, &ftape_udelay_count, &ftape_udelay_time);
-}
-
-/*      Delay (msec) routine.
- */
-void ftape_sleep(unsigned int time)
-{
-	TRACE_FUN(ft_t_any);
-
-	time *= 1000;   /* msecs -> usecs */
-	if (time < FT_USPT) {
-		/*  Time too small for scheduler, do a busy wait ! */
-		ftape_udelay(time);
-	} else {
-		long timeout;
-		unsigned long flags;
-		unsigned int ticks = (time + FT_USPT - 1) / FT_USPT;
-
-		TRACE(ft_t_any, "%d msec, %d ticks", time/1000, ticks);
-		timeout = ticks;
-		save_flags(flags);
-		sti();
-		msleep_interruptible(jiffies_to_msecs(timeout));
-		/*  Mmm. Isn't current->blocked == 0xffffffff ?
-		 */
-		if (signal_pending(current)) {
-			TRACE(ft_t_err, "awoken by non-blocked signal :-(");
-		}
-		restore_flags(flags);
-	}
-	TRACE_EXIT;
-}
-
-/*  send a command or parameter to the drive
- *  Generates # of step pulses.
- */
-static inline int ft_send_to_drive(int arg)
-{
-	/*  Always wait for a command_timeout period to separate
-	 *  individuals commands and/or parameters.
-	 */
-	ftape_sleep(3 * FT_MILLISECOND);
-	/*  Keep cylinder nr within range, step towards home if possible.
-	 */
-	if (ftape_current_cylinder >= arg) {
-		return fdc_seek(ftape_current_cylinder - arg);
-	} else {
-		return fdc_seek(ftape_current_cylinder + arg);
-	}
-}
-
-/* forward */ int ftape_report_raw_drive_status(int *status);
-
-static int ft_check_cmd_restrictions(qic117_cmd_t command)
-{
-	int status = -1;
-	TRACE_FUN(ft_t_any);
-	
-	TRACE(ft_t_flow, "%s", qic117_cmds[command].name);
-	/* A new motion command during an uninterruptible (motion)
-	 *  command requires a ready status before the new command can
-	 *  be issued. Otherwise a new motion command needs to be
-	 *  checked against required status.
-	 */
-	if (qic117_cmds[command].cmd_type == motion &&
-	    qic117_cmds[ftape_current_command].non_intr) {
-		ftape_report_raw_drive_status(&status);
-		if ((status & QIC_STATUS_READY) == 0) {
-			TRACE(ft_t_noise,
-			      "motion cmd (%d) during non-intr cmd (%d)",
-			      command, ftape_current_command);
-			TRACE(ft_t_noise, "waiting until drive gets ready");
-			ftape_ready_wait(ftape_timeout.seek,
-					 &status);
-		}
-	}
-	if (qic117_cmds[command].mask != 0) {
-		__u8 difference;
-		/*  Some commands do require a certain status:
-		 */
-		if (status == -1) {	/* not yet set */
-			ftape_report_raw_drive_status(&status);
-		}
-		difference = ((status ^ qic117_cmds[command].state) &
-			      qic117_cmds[command].mask);
-		/*  Wait until the drive gets
-		 *  ready. This may last forever if
-		 *  the drive never gets ready... 
-		 */
-		while ((difference & QIC_STATUS_READY) != 0) {
-			TRACE(ft_t_noise, "command %d issued while not ready",
-			      command);
-			TRACE(ft_t_noise, "waiting until drive gets ready");
-			if (ftape_ready_wait(ftape_timeout.seek,
-					     &status) == -EINTR) {
-				/*  Bail out on signal !
-				 */
-				TRACE_ABORT(-EINTR, ft_t_warn,
-				      "interrupted by non-blockable signal");
-			}
-			difference = ((status ^ qic117_cmds[command].state) &
-				      qic117_cmds[command].mask);
-		}
-		while ((difference & QIC_STATUS_ERROR) != 0) {
-			int err;
-			qic117_cmd_t cmd;
-
-			TRACE(ft_t_noise,
-			      "command %d issued while error pending",
-			      command);
-			TRACE(ft_t_noise, "clearing error status");
-			ftape_report_error(&err, &cmd, 1);
-			ftape_report_raw_drive_status(&status);
-			difference = ((status ^ qic117_cmds[command].state) &
-				      qic117_cmds[command].mask);
-			if ((difference & QIC_STATUS_ERROR) != 0) {
-				/*  Bail out on fatal signal !
-				 */
-				FT_SIGNAL_EXIT(_NEVER_BLOCK);
-			}
-		}
-		if (difference) {
-			/*  Any remaining difference can't be solved
-			 *  here.  
-			 */
-			if (difference & (QIC_STATUS_CARTRIDGE_PRESENT |
-					  QIC_STATUS_NEW_CARTRIDGE |
-					  QIC_STATUS_REFERENCED)) {
-				TRACE(ft_t_warn,
-				      "Fatal: tape removed or reinserted !");
-				ft_failure = 1;
-			} else {
-				TRACE(ft_t_err, "wrong state: 0x%02x should be: 0x%02x",
-				      status & qic117_cmds[command].mask,
-				      qic117_cmds[command].state);
-			}
-			TRACE_EXIT -EIO;
-		}
-		if (~status & QIC_STATUS_READY & qic117_cmds[command].mask) {
-			TRACE_ABORT(-EBUSY, ft_t_err, "Bad: still busy!");
-		}
-	}
-	TRACE_EXIT 0;
-}
-
-/*      Issue a tape command:
- */
-int ftape_command(qic117_cmd_t command)
-{
-	int result = 0;
-	static int level;
-	TRACE_FUN(ft_t_any);
-
-	if ((unsigned int)command > NR_ITEMS(qic117_cmds)) {
-		/*  This is a bug we'll want to know about too.
-		 */
-		TRACE_ABORT(-EIO, ft_t_bug, "bug - bad command: %d", command);
-	}
-	if (++level > 5) { /*  This is a bug we'll want to know about. */
-		--level;
-		TRACE_ABORT(-EIO, ft_t_bug, "bug - recursion for command: %d",
-			    command);
-	}
-	/*  disable logging and restriction check for some commands,
-	 *  check all other commands that have a prescribed starting
-	 *  status.
-	 */
-	if (diagnostic_mode) {
-		TRACE(ft_t_flow, "diagnostic command %d", command);
-	} else if (command == QIC_REPORT_DRIVE_STATUS ||
-		   command == QIC_REPORT_NEXT_BIT) {
-		TRACE(ft_t_any, "%s", qic117_cmds[command].name);
-	} else {
-		TRACE_CATCH(ft_check_cmd_restrictions(command), --level);
-	}
-	/*  Now all conditions are met or result was < 0.
-	 */
-	result = ft_send_to_drive((unsigned int)command);
-	if (qic117_cmds[command].cmd_type == motion &&
-	    command != QIC_LOGICAL_FORWARD && command != QIC_STOP_TAPE) {
-		ft_location.known = 0;
-	}
-	ftape_current_command = command;
-	--level;
-	TRACE_EXIT result;
-}
-
-/*      Send a tape command parameter:
- *      Generates command # of step pulses.
- *      Skips tape-status call !
- */
-int ftape_parameter(unsigned int parameter)
-{
-	TRACE_FUN(ft_t_any);
-
-	TRACE(ft_t_flow, "called with parameter = %d", parameter);
-	TRACE_EXIT ft_send_to_drive(parameter + 2);
-}
-
-/*      Wait for the drive to get ready.
- *      timeout time in milli-seconds
- *      Returned status is valid if result != -EIO
- *
- *      Should we allow to be killed by SIGINT?  (^C)
- *      Would be nice at least for large timeouts.
- */
-int ftape_ready_wait(unsigned int timeout, int *status)
-{
-	unsigned long t0;
-	unsigned int poll_delay;
-	int signal_retries;
-	TRACE_FUN(ft_t_any);
-
-	/*  the following ** REALLY ** reduces the system load when
-	 *  e.g. one simply rewinds or retensions. The tape is slow 
-	 *  anyway. It is really not necessary to detect error 
-	 *  conditions with 1/10 seconds granularity
-	 *
-	 *  On my AMD 133MHZ 486: 100 ms: 23% system load
-	 *                        1  sec:  5%
-	 *                        5  sec:  0.6%, yeah
-	 */
-	if (timeout <= FT_SECOND) {
-		poll_delay = 100 * FT_MILLISECOND;
-		signal_retries = 20; /* two seconds */
-	} else if (timeout < 20 * FT_SECOND) {
-		TRACE(ft_t_flow, "setting poll delay to 1 second");
-		poll_delay = FT_SECOND;
-		signal_retries = 2; /* two seconds */
-	} else {
-		TRACE(ft_t_flow, "setting poll delay to 5 seconds");
-		poll_delay = 5 * FT_SECOND;
-		signal_retries = 1; /* five seconds */
-	}
-	for (;;) {
-		t0 = jiffies;
-		TRACE_CATCH(ftape_report_raw_drive_status(status),);
-		if (*status & QIC_STATUS_READY) {
-			TRACE_EXIT 0;
-		}
-		if (!signal_retries--) {
-			FT_SIGNAL_EXIT(_NEVER_BLOCK);
-		}
-		if ((int)timeout >= 0) {
-			/* this will fail when jiffies wraps around about
-			 * once every year :-)
-			 */
-			timeout -= ((jiffies - t0) * FT_SECOND) / HZ;
-			if (timeout <= 0) {
-				TRACE_ABORT(-ETIME, ft_t_err, "timeout");
-			}
-			ftape_sleep(poll_delay);
-			timeout -= poll_delay;
-		} else {
-			ftape_sleep(poll_delay);
-		}
-	}
-	TRACE_EXIT -ETIME;
-}
-
-/*      Issue command and wait up to timeout milli seconds for drive ready
- */
-int ftape_command_wait(qic117_cmd_t command, unsigned int timeout, int *status)
-{
-	int result;
-
-	/* Drive should be ready, issue command
-	 */
-	result = ftape_command(command);
-	if (result >= 0) {
-		result = ftape_ready_wait(timeout, status);
-	}
-	return result;
-}
-
-static int ftape_parameter_wait(unsigned int parm, unsigned int timeout, int *status)
-{
-	int result;
-
-	/* Drive should be ready, issue command
-	 */
-	result = ftape_parameter(parm);
-	if (result >= 0) {
-		result = ftape_ready_wait(timeout, status);
-	}
-	return result;
-}
-
-/*--------------------------------------------------------------------------
- *      Report operations
- */
-
-/* Query the drive about its status.  The command is sent and
-   result_length bits of status are returned (2 extra bits are read
-   for start and stop). */
-
-int ftape_report_operation(int *status,
-			   qic117_cmd_t command,
-			   int result_length)
-{
-	int i, st3;
-	unsigned int t0;
-	unsigned int dt;
-	TRACE_FUN(ft_t_any);
-
-	TRACE_CATCH(ftape_command(command),);
-	t0 = ftape_timestamp();
-	i = 0;
-	do {
-		++i;
-		ftape_sleep(3 * FT_MILLISECOND);	/* see remark below */
-		TRACE_CATCH(fdc_sense_drive_status(&st3),);
-		dt = ftape_timediff(t0, ftape_timestamp());
-		/*  Ack should be asserted within Ttimout + Tack = 6 msec.
-		 *  Looks like some drives fail to do this so extend this
-		 *  period to 300 msec.
-		 */
-	} while (!(st3 & ST3_TRACK_0) && dt < 300000);
-	if (!(st3 & ST3_TRACK_0)) {
-		TRACE(ft_t_err,
-		      "No acknowledge after %u msec. (%i iter)", dt / 1000, i);
-		TRACE_ABORT(-EIO, ft_t_err, "timeout on Acknowledge");
-	}
-	/*  dt may be larger than expected because of other tasks
-	 *  scheduled while we were sleeping.
-	 */
-	if (i > 1 && dt > 6000) {
-		TRACE(ft_t_err, "Acknowledge after %u msec. (%i iter)",
-		      dt / 1000, i);
-	}
-	*status = 0;
-	for (i = 0; i < result_length + 1; i++) {
-		TRACE_CATCH(ftape_command(QIC_REPORT_NEXT_BIT),);
-		TRACE_CATCH(fdc_sense_drive_status(&st3),);
-		if (i < result_length) {
-			*status |= ((st3 & ST3_TRACK_0) ? 1 : 0) << i;
-		} else if ((st3 & ST3_TRACK_0) == 0) {
-			TRACE_ABORT(-EIO, ft_t_err, "missing status stop bit");
-		}
-	}
-	/* this command will put track zero and index back into normal state */
-	(void)ftape_command(QIC_REPORT_NEXT_BIT);
-	TRACE_EXIT 0;
-}
-
-/* Report the current drive status. */
-
-int ftape_report_raw_drive_status(int *status)
-{
-	int result;
-	int count = 0;
-	TRACE_FUN(ft_t_any);
-
-	do {
-		result = ftape_report_operation(status,
-						QIC_REPORT_DRIVE_STATUS, 8);
-	} while (result < 0 && ++count <= 3);
-	if (result < 0) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "report_operation failed after %d trials", count);
-	}
-	if ((*status & 0xff) == 0xff) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "impossible drive status 0xff");
-	}
-	if (*status & QIC_STATUS_READY) {
-		ftape_current_command = QIC_NO_COMMAND; /* completed */
-	}
-	ft_last_status.status.drive_status = (__u8)(*status & 0xff);
-	TRACE_EXIT 0;
-}
-
-int ftape_report_drive_status(int *status)
-{
-	TRACE_FUN(ft_t_any);
-
-	TRACE_CATCH(ftape_report_raw_drive_status(status),);
-	if (*status & QIC_STATUS_NEW_CARTRIDGE ||
-	    !(*status & QIC_STATUS_CARTRIDGE_PRESENT)) {
-		ft_failure = 1;	/* will inhibit further operations */
-		TRACE_EXIT -EIO;
-	}
-	if (*status & QIC_STATUS_READY && *status & QIC_STATUS_ERROR) {
-		/*  Let caller handle all errors */
-		TRACE_ABORT(1, ft_t_warn, "warning: error status set!");
-	}
-	TRACE_EXIT 0;
-}
-
-int ftape_report_error(unsigned int *error,
-		       qic117_cmd_t *command, int report)
-{
-	static const ftape_error ftape_errors[] = QIC117_ERRORS;
-	int code;
-	TRACE_FUN(ft_t_any);
-
-	TRACE_CATCH(ftape_report_operation(&code, QIC_REPORT_ERROR_CODE, 16),);
-	*error   = (unsigned int)(code & 0xff);
-	*command = (qic117_cmd_t)((code>>8)&0xff);
-	/*  remember hardware status, maybe useful for status ioctls
-	 */
-	ft_last_error.error.command = (__u8)*command;
-	ft_last_error.error.error   = (__u8)*error;
-	if (!report) {
-		TRACE_EXIT 0;
-	}
-	if (*error == 0) {
-		TRACE_ABORT(0, ft_t_info, "No error");
-	}
-	TRACE(ft_t_info, "errorcode: %d", *error);
-	if (*error < NR_ITEMS(ftape_errors)) {
-		TRACE(ft_t_noise, "%sFatal ERROR:",
-		      (ftape_errors[*error].fatal ? "" : "Non-"));
-		TRACE(ft_t_noise, "%s ...", ftape_errors[*error].message);
-	} else {
-		TRACE(ft_t_noise, "Unknown ERROR !");
-	}
-	if ((unsigned int)*command < NR_ITEMS(qic117_cmds) &&
-	    qic117_cmds[*command].name != NULL) {
-		TRACE(ft_t_noise, "... caused by command \'%s\'",
-		      qic117_cmds[*command].name);
-	} else {
-		TRACE(ft_t_noise, "... caused by unknown command %d",
-		      *command);
-	}
-	TRACE_EXIT 0;
-}
-
-int ftape_report_configuration(qic_model *model,
-			       unsigned int *rate,
-			       int *qic_std,
-			       int *tape_len)
-{
-	int result;
-	int config;
-	int status;
-	static const unsigned int qic_rates[ 4] = { 250, 2000, 500, 1000 };
-	TRACE_FUN(ft_t_any);
-
-	result = ftape_report_operation(&config,
-					QIC_REPORT_DRIVE_CONFIGURATION, 8);
-	if (result < 0) {
-		ft_last_status.status.drive_config = (__u8)0x00;
-		*model = prehistoric;
-		*rate = 500;
-		*qic_std = QIC_TAPE_QIC40;
-		*tape_len = 205;
-		TRACE_EXIT 0;
-	} else {
-		ft_last_status.status.drive_config = (__u8)(config & 0xff);
-	}
-	*rate = qic_rates[(config & QIC_CONFIG_RATE_MASK) >> QIC_CONFIG_RATE_SHIFT];
-	result = ftape_report_operation(&status, QIC_REPORT_TAPE_STATUS, 8);
-	if (result < 0) {
-		ft_last_status.status.tape_status = (__u8)0x00;
-		/* pre- QIC117 rev C spec. drive, QIC_CONFIG_80 bit is valid.
-		 */
-		*qic_std = (config & QIC_CONFIG_80) ?
-			QIC_TAPE_QIC80 : QIC_TAPE_QIC40;
-		/* ?? how's about 425ft tapes? */
-		*tape_len = (config & QIC_CONFIG_LONG) ? 307 : 0;
-		*model = pre_qic117c;
-		result = 0;
-	} else {
-		ft_last_status.status.tape_status = (__u8)(status & 0xff);
-		*model = post_qic117b;
-		TRACE(ft_t_any, "report tape status result = %02x", status);
-		/* post- QIC117 rev C spec. drive, QIC_CONFIG_80 bit is
-		 * invalid. 
-		 */
-		switch (status & QIC_TAPE_STD_MASK) {
-		case QIC_TAPE_QIC40:
-		case QIC_TAPE_QIC80:
-		case QIC_TAPE_QIC3020:
-		case QIC_TAPE_QIC3010:
-			*qic_std = status & QIC_TAPE_STD_MASK;
-			break;
-		default:
-			*qic_std = -1;
-			break;
-		}
-		switch (status & QIC_TAPE_LEN_MASK) {
-		case QIC_TAPE_205FT:
-			/* 205 or 425+ ft 550 Oe tape */
-			*tape_len = 0;
-			break;
-		case QIC_TAPE_307FT:
-			/* 307.5 ft 550 Oe Extended Length (XL) tape */
-			*tape_len = 307;
-			break;
-		case QIC_TAPE_VARIABLE:
-			/* Variable length 550 Oe tape */
-			*tape_len = 0;
-			break;
-		case QIC_TAPE_1100FT:
-			/* 1100 ft 550 Oe tape */
-			*tape_len = 1100;
-			break;
-		case QIC_TAPE_FLEX:
-			/* Variable length 900 Oe tape */
-			*tape_len = 0;
-			break;
-		default:
-			*tape_len = -1;
-			break;
-		}
-		if (*qic_std == -1 || *tape_len == -1) {
-			TRACE(ft_t_any,
-			      "post qic-117b spec drive with unknown tape");
-		}
-		result = *tape_len == -1 ? -EIO : 0;
-		if (status & QIC_TAPE_WIDE) {
-			switch (*qic_std) {
-			case QIC_TAPE_QIC80:
-				TRACE(ft_t_info, "TR-1 tape detected");
-				break;
-			case QIC_TAPE_QIC3010:
-				TRACE(ft_t_info, "TR-2 tape detected");
-				break;
-			case QIC_TAPE_QIC3020:
-				TRACE(ft_t_info, "TR-3 tape detected");
-				break;
-			default:
-				TRACE(ft_t_warn,
-				      "Unknown Travan tape type detected");
-				break;
-			}
-		}
-	}
-	TRACE_EXIT (result < 0) ? -EIO : 0;
-}
-
-static int ftape_report_rom_version(int *version)
-{
-
-	if (ftape_report_operation(version, QIC_REPORT_ROM_VERSION, 8) < 0) {
-		return -EIO;
-	} else {
-		return 0;
-	}
-}
-
-void ftape_report_vendor_id(unsigned int *id)
-{
-	int result;
-	TRACE_FUN(ft_t_any);
-
-	/* We'll try to get a vendor id from the drive.  First
-	 * according to the QIC-117 spec, a 16-bit id is requested.
-	 * If that fails we'll try an 8-bit version, otherwise we'll
-	 * try an undocumented query.
-	 */
-	result = ftape_report_operation((int *) id, QIC_REPORT_VENDOR_ID, 16);
-	if (result < 0) {
-		result = ftape_report_operation((int *) id,
-						QIC_REPORT_VENDOR_ID, 8);
-		if (result < 0) {
-			/* The following is an undocumented call found
-			 * in the CMS code.
-			 */
-			result = ftape_report_operation((int *) id, 24, 8);
-			if (result < 0) {
-				*id = UNKNOWN_VENDOR;
-			} else {
-				TRACE(ft_t_noise, "got old 8 bit id: %04x",
-				      *id);
-				*id |= 0x20000;
-			}
-		} else {
-			TRACE(ft_t_noise, "got 8 bit id: %04x", *id);
-			*id |= 0x10000;
-		}
-	} else {
-		TRACE(ft_t_noise, "got 16 bit id: %04x", *id);
-	}
-	if (*id == 0x0047) {
-		int version;
-		int sign;
-
-		if (ftape_report_rom_version(&version) < 0) {
-			TRACE(ft_t_bug, "report rom version failed");
-			TRACE_EXIT;
-		}
-		TRACE(ft_t_noise, "CMS rom version: %d", version);
-		ftape_command(QIC_ENTER_DIAGNOSTIC_1);
-		ftape_command(QIC_ENTER_DIAGNOSTIC_1);
-		diagnostic_mode = 1;
-		if (ftape_report_operation(&sign, 9, 8) < 0) {
-			unsigned int error;
-			qic117_cmd_t command;
-
-			ftape_report_error(&error, &command, 1);
-			ftape_command(QIC_ENTER_PRIMARY_MODE);
-			diagnostic_mode = 0;
-			TRACE_EXIT;	/* failure ! */
-		} else {
-			TRACE(ft_t_noise, "CMS signature: %02x", sign);
-		}
-		if (sign == 0xa5) {
-			result = ftape_report_operation(&sign, 37, 8);
-			if (result < 0) {
-				if (version >= 63) {
-					*id = 0x8880;
-					TRACE(ft_t_noise,
-					      "This is an Iomega drive !");
-				} else {
-					*id = 0x0047;
-					TRACE(ft_t_noise,
-					      "This is a real CMS drive !");
-				}
-			} else {
-				*id = 0x0047;
-				TRACE(ft_t_noise, "CMS status: %d", sign);
-			}
-		} else {
-			*id = UNKNOWN_VENDOR;
-		}
-		ftape_command(QIC_ENTER_PRIMARY_MODE);
-		diagnostic_mode = 0;
-	}
-	TRACE_EXIT;
-}
-
-static int qic_rate_code(unsigned int rate)
-{
-	switch (rate) {
-	case 250:
-		return QIC_CONFIG_RATE_250;
-	case 500:
-		return QIC_CONFIG_RATE_500;
-	case 1000:
-		return QIC_CONFIG_RATE_1000;
-	case 2000:
-		return QIC_CONFIG_RATE_2000;
-	default:
-		return QIC_CONFIG_RATE_500;
-	}
-}
-
-static int ftape_set_rate_test(unsigned int *max_rate)
-{
-	unsigned int error;
-	qic117_cmd_t command;
-	int status;
-	int supported = 0;
-	TRACE_FUN(ft_t_any);
-
-	/*  Check if the drive does support the select rate command
-	 *  by testing all different settings. If any one is accepted
-	 *  we assume the command is supported, else not.
-	 */
-	for (*max_rate = 2000; *max_rate >= 250; *max_rate /= 2) {
-		if (ftape_command(QIC_SELECT_RATE) < 0) {
-			continue;
-		}		
-		if (ftape_parameter_wait(qic_rate_code(*max_rate),
-					 1 * FT_SECOND, &status) < 0) {
-			continue;
-		}
-		if (status & QIC_STATUS_ERROR) {
-			ftape_report_error(&error, &command, 0);
-			continue;
-		}
-		supported = 1; /* did accept a request */
-		break;
-	}
-	TRACE(ft_t_noise, "Select Rate command is%s supported", 
-	      supported ? "" : " not");
-	TRACE_EXIT supported;
-}
-
-int ftape_set_data_rate(unsigned int new_rate /* Kbps */, unsigned int qic_std)
-{
-	int status;
-	int result = 0;
-	unsigned int data_rate = new_rate;
-	static int supported;
-	int rate_changed = 0;
-	qic_model dummy_model;
-	unsigned int dummy_qic_std, dummy_tape_len;
-	TRACE_FUN(ft_t_any);
-
-	if (ft_drive_max_rate == 0) { /* first time */
-		supported = ftape_set_rate_test(&ft_drive_max_rate);
-	}
-	if (supported) {
-		ftape_command(QIC_SELECT_RATE);
-		result = ftape_parameter_wait(qic_rate_code(new_rate),
-					      1 * FT_SECOND, &status);
-		if (result >= 0 && !(status & QIC_STATUS_ERROR)) {
-			rate_changed = 1;
-		}
-	}
-	TRACE_CATCH(result = ftape_report_configuration(&dummy_model,
-							&data_rate, 
-							&dummy_qic_std,
-							&dummy_tape_len),);
-	if (data_rate != new_rate) {
-		if (!supported) {
-			TRACE(ft_t_warn, "Rate change not supported!");
-		} else if (rate_changed) {
-			TRACE(ft_t_warn, "Requested: %d, got %d",
-			      new_rate, data_rate);
-		} else {
-			TRACE(ft_t_warn, "Rate change failed!");
-		}
-		result = -EINVAL;
-	}
-	/*
-	 *  Set data rate and write precompensation as specified:
-	 *
-	 *            |  QIC-40/80  | QIC-3010/3020
-	 *   rate     |   precomp   |    precomp
-	 *  ----------+-------------+--------------
-	 *  250 Kbps. |   250 ns.   |     0 ns.
-	 *  500 Kbps. |   125 ns.   |     0 ns.
-	 *    1 Mbps. |    42 ns.   |     0 ns.
-	 *    2 Mbps  |      N/A    |     0 ns.
-	 */
-	if ((qic_std == QIC_TAPE_QIC40 && data_rate > 500) || 
-	    (qic_std == QIC_TAPE_QIC80 && data_rate > 1000)) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_warn, "Datarate too high for QIC-mode");
-	}
-	TRACE_CATCH(fdc_set_data_rate(data_rate),_res = -EINVAL);
-	ft_data_rate = data_rate;
-	if (qic_std == QIC_TAPE_QIC40 || qic_std == QIC_TAPE_QIC80) {
-		switch (data_rate) {
-		case 250:
-			fdc_set_write_precomp(250);
-			break;
-		default:
-		case 500:
-			fdc_set_write_precomp(125);
-			break;
-		case 1000:
-			fdc_set_write_precomp(42);
-			break;
-		}
-	} else {
-		fdc_set_write_precomp(0);
-	}
-	TRACE_EXIT result;
-}
-
-/*  The next two functions are used to cope with excessive overrun errors
- */
-int ftape_increase_threshold(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (fdc.type < i82077 || ft_fdc_threshold >= 12) {
-		TRACE_ABORT(-EIO, ft_t_err, "cannot increase fifo threshold");
-	}
-	if (fdc_fifo_threshold(++ft_fdc_threshold, NULL, NULL, NULL) < 0) {
-		TRACE(ft_t_err, "cannot increase fifo threshold");
-		ft_fdc_threshold --;
-		fdc_reset();
-	}
-	TRACE(ft_t_info, "New FIFO threshold: %d", ft_fdc_threshold);
-	TRACE_EXIT 0;
-}
-
-int ftape_half_data_rate(void)
-{
-	if (ft_data_rate < 500) {
-		return -1;
-	}
-	if (ftape_set_data_rate(ft_data_rate / 2, ft_qic_std) < 0) {
-		return -EIO;
-	}
-	ftape_calc_timeouts(ft_qic_std, ft_data_rate, ftape_tape_len);
-	return 0;
-}
-
-/*      Seek the head to the specified track.
- */
-int ftape_seek_head_to_track(unsigned int track)
-{
-	int status;
-	TRACE_FUN(ft_t_any);
-
-	ft_location.track = -1; /* remains set in case of error */
-	if (track >= ft_tracks_per_tape) {
-		TRACE_ABORT(-EINVAL, ft_t_bug, "track out of bounds");
-	}
-	TRACE(ft_t_flow, "seeking track %d", track);
-	TRACE_CATCH(ftape_command(QIC_SEEK_HEAD_TO_TRACK),);
-	TRACE_CATCH(ftape_parameter_wait(track, ftape_timeout.head_seek,
-					 &status),);
-	ft_location.track = track;
-	ftape_might_be_off_track = 0;
-	TRACE_EXIT 0;
-}
-
-int ftape_wakeup_drive(wake_up_types method)
-{
-	int status;
-	int motor_on = 0;
-	TRACE_FUN(ft_t_any);
-
-	switch (method) {
-	case wake_up_colorado:
-		TRACE_CATCH(ftape_command(QIC_PHANTOM_SELECT),);
-		TRACE_CATCH(ftape_parameter(0 /* ft_drive_sel ?? */),);
-		break;
-	case wake_up_mountain:
-		TRACE_CATCH(ftape_command(QIC_SOFT_SELECT),);
-		ftape_sleep(FT_MILLISECOND);	/* NEEDED */
-		TRACE_CATCH(ftape_parameter(18),);
-		break;
-	case wake_up_insight:
-		ftape_sleep(100 * FT_MILLISECOND);
-		motor_on = 1;
-		fdc_motor(motor_on);	/* enable is done by motor-on */
-	case no_wake_up:
-		break;
-	default:
-		TRACE_EXIT -ENODEV;	/* unknown wakeup method */
-		break;
-	}
-	/*  If wakeup succeeded we shouldn't get an error here..
-	 */
-	TRACE_CATCH(ftape_report_raw_drive_status(&status),
-		    if (motor_on) {
-			    fdc_motor(0);
-		    });
-	TRACE_EXIT 0;
-}
-
-int ftape_put_drive_to_sleep(wake_up_types method)
-{
-	TRACE_FUN(ft_t_any);
-
-	switch (method) {
-	case wake_up_colorado:
-		TRACE_CATCH(ftape_command(QIC_PHANTOM_DESELECT),);
-		break;
-	case wake_up_mountain:
-		TRACE_CATCH(ftape_command(QIC_SOFT_DESELECT),);
-		break;
-	case wake_up_insight:
-		fdc_motor(0);	/* enable is done by motor-on */
-	case no_wake_up:	/* no wakeup / no sleep ! */
-		break;
-	default:
-		TRACE_EXIT -ENODEV;	/* unknown wakeup method */
-	}
-	TRACE_EXIT 0;
-}
-
-int ftape_reset_drive(void)
-{
-	int result = 0;
-	int status;
-	unsigned int err_code;
-	qic117_cmd_t err_command;
-	int i;
-	TRACE_FUN(ft_t_any);
-
-	/*  We want to re-establish contact with our drive.  Fire a
-	 *  number of reset commands (single step pulses) and pray for
-	 *  success.
-	 */
-	for (i = 0; i < 2; ++i) {
-		TRACE(ft_t_flow, "Resetting fdc");
-		fdc_reset();
-		ftape_sleep(10 * FT_MILLISECOND);
-		TRACE(ft_t_flow, "Reset command to drive");
-		result = ftape_command(QIC_RESET);
-		if (result == 0) {
-			ftape_sleep(1 * FT_SECOND); /*  drive not
-						     *  accessible
-						     *  during 1 second
-						     */
-			TRACE(ft_t_flow, "Re-selecting drive");
-
-			/* Strange, the QIC-117 specs don't mention
-			 * this but the drive gets deselected after a
-			 * soft reset !  So we need to enable it
-			 * again.
-			 */
-			if (ftape_wakeup_drive(ft_drive_type.wake_up) < 0) {
-				TRACE(ft_t_err, "Wakeup failed !");
-			}
-			TRACE(ft_t_flow, "Waiting until drive gets ready");
-			result= ftape_ready_wait(ftape_timeout.reset, &status);
-			if (result == 0 && (status & QIC_STATUS_ERROR)) {
-				result = ftape_report_error(&err_code,
-							    &err_command, 1);
-				if (result == 0 && err_code == 27) {
-					/*  Okay, drive saw reset
-					 *  command and responded as it
-					 *  should
-					 */
-					break;
-				} else {
-					result = -EIO;
-				}
-			} else {
-				result = -EIO;
-			}
-		}
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-	}
-	if (result != 0) {
-		TRACE(ft_t_err, "General failure to reset tape drive");
-	} else {
-		/*  Restore correct settings: keep original rate 
-		 */
-		ftape_set_data_rate(ft_data_rate, ft_qic_std);
-	}
-	ftape_init_drive_needed = 1;
-	TRACE_EXIT result;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-io.h b/drivers/char/ftape/lowlevel/ftape-io.h
deleted file mode 100644
index 26a7baa..0000000
--- a/drivers/char/ftape/lowlevel/ftape-io.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef _FTAPE_IO_H
-#define _FTAPE_IO_H
-
-/*
- * Copyright (C) 1993-1996 Bas Laarhoven,
- *           (C) 1997      Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-io.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:18 $
- *
- *      This file contains definitions for the glue part of the
- *      QIC-40/80/3010/3020 floppy-tape driver "ftape" for Linux.
- */
-
-#include <linux/qic117.h>
-#include <linux/ftape-vendors.h>
-
-typedef struct {
-	unsigned int seek;
-	unsigned int reset;
-	unsigned int rewind;
-	unsigned int head_seek;
-	unsigned int stop;
-	unsigned int pause;
-} ft_timeout_table;
-
-typedef enum {
-	prehistoric, pre_qic117c, post_qic117b, post_qic117d 
-} qic_model;
-
-/*
- *      ftape-io.c defined global vars.
- */
-extern ft_timeout_table ftape_timeout;
-extern unsigned int ftape_tape_len;
-extern volatile qic117_cmd_t ftape_current_command;
-extern const struct qic117_command_table qic117_cmds[];
-extern int ftape_might_be_off_track;
-
-/*
- *      ftape-io.c defined global functions.
- */
-extern void ftape_udelay(unsigned int usecs);
-extern void  ftape_udelay_calibrate(void);
-extern void ftape_sleep(unsigned int time);
-extern void ftape_report_vendor_id(unsigned int *id);
-extern int  ftape_command(qic117_cmd_t command);
-extern int  ftape_command_wait(qic117_cmd_t command,
-			       unsigned int timeout,
-			       int *status);
-extern int  ftape_parameter(unsigned int parameter);
-extern int ftape_report_operation(int *status,
-				  qic117_cmd_t  command,
-				  int result_length);
-extern int ftape_report_configuration(qic_model *model,
-				      unsigned int *rate,
-				      int *qic_std,
-				      int *tape_len);
-extern int ftape_report_drive_status(int *status);
-extern int ftape_report_raw_drive_status(int *status);
-extern int ftape_report_status(int *status);
-extern int ftape_ready_wait(unsigned int timeout, int *status);
-extern int ftape_seek_head_to_track(unsigned int track);
-extern int ftape_set_data_rate(unsigned int new_rate, unsigned int qic_std);
-extern int ftape_report_error(unsigned int *error,
-			      qic117_cmd_t *command,
-			      int report);
-extern int ftape_reset_drive(void);
-extern int ftape_put_drive_to_sleep(wake_up_types method);
-extern int ftape_wakeup_drive(wake_up_types method);
-extern int ftape_increase_threshold(void);
-extern int ftape_half_data_rate(void);
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-proc.c b/drivers/char/ftape/lowlevel/ftape-proc.c
deleted file mode 100644
index e805b15..0000000
--- a/drivers/char/ftape/lowlevel/ftape-proc.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- *      Copyright (C) 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-proc.c,v $
- * $Revision: 1.11 $
- * $Date: 1997/10/24 14:47:37 $
- *
- *      This file contains the procfs interface for the
- *      QIC-40/80/3010/3020 floppy-tape driver "ftape" for Linux.
-
- *	Old code removed, switched to dynamic proc entry.
- */
-
-
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_FT_PROC_FS)
-
-#include <linux/proc_fs.h>
-
-#include <linux/ftape.h>
-#include <linux/init.h>
-#include <linux/qic117.h>
-
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-proc.h"
-#include "../lowlevel/ftape-tracing.h"
-
-static size_t get_driver_info(char *buf)
-{
-	const char *debug_level[] = { "bugs"  ,
-				      "errors",
-				      "warnings",
-				      "informational",
-				      "noisy",
-				      "program flow",
-				      "fdc and dma",
-				      "data flow",
-				      "anything" };
-
-	return sprintf(buf,
-		       "version       : %s\n"
-		       "used data rate: %d kbit/sec\n"
-		       "dma memory    : %d kb\n"
-		       "debug messages: %s\n",
-		       FTAPE_VERSION,
-		       ft_data_rate,
-		       FT_BUFF_SIZE * ft_nr_buffers >> 10,
-		       debug_level[TRACE_LEVEL]);
-}
-
-static size_t get_tapedrive_info(char *buf)
-{ 
-	return sprintf(buf,
-		       "vendor id : 0x%04x\n"
-		       "drive name: %s\n"
-		       "wind speed: %d ips\n"
-		       "wakeup    : %s\n"
-		       "max. rate : %d kbit/sec\n",
-		       ft_drive_type.vendor_id,
-		       ft_drive_type.name,
-		       ft_drive_type.speed,
-		       ((ft_drive_type.wake_up == no_wake_up)
-			? "No wakeup needed" :
-			((ft_drive_type.wake_up == wake_up_colorado)
-			 ? "Colorado" :
-			 ((ft_drive_type.wake_up == wake_up_mountain)
-			  ? "Mountain" :
-			  ((ft_drive_type.wake_up == wake_up_insight)
-			   ? "Motor on" :
-			   "Unknown")))),
-		       ft_drive_max_rate);
-}
-
-static size_t get_cartridge_info(char *buf)
-{
-	if (ftape_init_drive_needed) {
-		return sprintf(buf, "uninitialized\n");
-	}
-	if (ft_no_tape) {
-		return sprintf(buf, "no cartridge inserted\n");
-	}
-	return sprintf(buf,
-		       "segments  : %5d\n"
-		       "tracks    : %5d\n"
-		       "length    : %5dft\n"
-		       "formatted : %3s\n"
-		       "writable  : %3s\n"
-		       "QIC spec. : QIC-%s\n"
-		       "fmt-code  : %1d\n",
-		       ft_segments_per_track,
-		       ft_tracks_per_tape,
-		       ftape_tape_len,
-		       (ft_formatted == 1) ? "yes" : "no",
-		       (ft_write_protected == 1) ? "no" : "yes",
-		       ((ft_qic_std == QIC_TAPE_QIC40) ? "40" :
-			((ft_qic_std == QIC_TAPE_QIC80) ? "80" :
-			 ((ft_qic_std == QIC_TAPE_QIC3010) ? "3010" :
-			  ((ft_qic_std == QIC_TAPE_QIC3020) ? "3020" :
-			   "???")))),
-		       ft_format_code);
-}
-
-static size_t get_controller_info(char *buf)
-{
-	const char  *fdc_name[] = { "no fdc",
-				    "i8272",
-				    "i82077",
-				    "i82077AA",
-				    "Colorado FC-10 or FC-20",
-				    "i82078",
-				    "i82078_1" };
-
-	return sprintf(buf,
-		       "FDC type  : %s\n"
-		       "FDC base  : 0x%03x\n"
-		       "FDC irq   : %d\n"
-		       "FDC dma   : %d\n"
-		       "FDC thr.  : %d\n"
-		       "max. rate : %d kbit/sec\n",
-		       ft_mach2 ? "Mountain MACH-2" : fdc_name[fdc.type],
-		       fdc.sra, fdc.irq, fdc.dma,
-		       ft_fdc_threshold, ft_fdc_max_rate);
-}
-
-static size_t get_history_info(char *buf)
-{
-        size_t len;
-
-	len  = sprintf(buf,
-		       "\nFDC isr statistics\n"
-		       " id_am_errors     : %3d\n"
-		       " id_crc_errors    : %3d\n"
-		       " data_am_errors   : %3d\n"
-		       " data_crc_errors  : %3d\n"
-		       " overrun_errors   : %3d\n"
-		       " no_data_errors   : %3d\n"
-		       " retries          : %3d\n",
-		       ft_history.id_am_errors,   ft_history.id_crc_errors,
-		       ft_history.data_am_errors, ft_history.data_crc_errors,
-		       ft_history.overrun_errors, ft_history.no_data_errors,
-		       ft_history.retries);
-	len += sprintf(buf + len,
-		       "\nECC statistics\n"
-		       " crc_errors       : %3d\n"
-		       " crc_failures     : %3d\n"
-		       " ecc_failures     : %3d\n"
-		       " sectors corrected: %3d\n",
-		       ft_history.crc_errors,   ft_history.crc_failures,
-		       ft_history.ecc_failures, ft_history.corrected);
-	len += sprintf(buf + len,
-		       "\ntape quality statistics\n"
-		       " media defects    : %3d\n",
-		       ft_history.defects);
-	len += sprintf(buf + len,
-		       "\ntape motion statistics\n"
-		       " repositions      : %3d\n",
-		       ft_history.rewinds);
-	return len;
-}
-
-static int ftape_read_proc(char *page, char **start, off_t off,
-			   int count, int *eof, void *data)
-{
-	char *ptr = page;
-	size_t len;
-	
-	ptr += sprintf(ptr, "Kernel Driver\n\n");
-	ptr += get_driver_info(ptr);
-	ptr += sprintf(ptr, "\nTape Drive\n\n");
-	ptr += get_tapedrive_info(ptr);
-	ptr += sprintf(ptr, "\nFDC Controller\n\n");
-	ptr += get_controller_info(ptr);
-	ptr += sprintf(ptr, "\nTape Cartridge\n\n");
-	ptr += get_cartridge_info(ptr);
-	ptr += sprintf(ptr, "\nHistory Record\n\n");
-	ptr += get_history_info(ptr);
-
-	len = strlen(page);
-	*start = NULL;
-	if (off+count >= len) {
-		*eof = 1;
-	} else {
-		*eof = 0;
-	}
-	return len;
-}
-
-int __init ftape_proc_init(void)
-{
-	return create_proc_read_entry("ftape", 0, &proc_root,
-		ftape_read_proc, NULL) != NULL;
-}
-
-void ftape_proc_destroy(void)
-{
-	remove_proc_entry("ftape", &proc_root);
-}
-
-#endif /* defined(CONFIG_PROC_FS) && defined(CONFIG_FT_PROC_FS) */
diff --git a/drivers/char/ftape/lowlevel/ftape-proc.h b/drivers/char/ftape/lowlevel/ftape-proc.h
deleted file mode 100644
index 264dfcc..0000000
--- a/drivers/char/ftape/lowlevel/ftape-proc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef _FTAPE_PROC_H
-#define _FTAPE_PROC_H
-
-/*
- *      Copyright (C) 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-proc.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:20 $
- *
- *      This file contains definitions for the procfs interface of the
- *      QIC-40/80/3010/3020 floppy-tape driver "ftape" for Linux.
- */
-
-#include <linux/proc_fs.h>
-
-extern int  ftape_proc_init(void);
-extern void ftape_proc_destroy(void);
-
-#endif
diff --git a/drivers/char/ftape/lowlevel/ftape-read.c b/drivers/char/ftape/lowlevel/ftape-read.c
deleted file mode 100644
index d967d8c..0000000
--- a/drivers/char/ftape/lowlevel/ftape-read.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven,
- *                (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-read.c,v $
- * $Revision: 1.6 $
- * $Date: 1997/10/21 14:39:22 $
- *
- *      This file contains the reading code
- *      for the QIC-117 floppy-tape driver for Linux.
- *
- */
-
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-write.h"
-#include "../lowlevel/ftape-ecc.h"
-#include "../lowlevel/ftape-bsm.h"
-
-/*      Global vars.
- */
-
-/*      Local vars.
- */
-
-void ftape_zap_read_buffers(void)
-{
-	int i;
-
-	for (i = 0; i < ft_nr_buffers; ++i) {
-/*  changed to "fit" with dynamic allocation of tape_buffer. --khp  */
-		ft_buffer[i]->status = waiting;
-		ft_buffer[i]->bytes = 0;
-		ft_buffer[i]->skip = 0;
-		ft_buffer[i]->retry = 0;
-	}
-/*	ftape_reset_buffer(); */
-}
-
-static SectorMap convert_sector_map(buffer_struct * buff)
-{
-	int i = 0;
-	SectorMap bad_map = ftape_get_bad_sector_entry(buff->segment_id);
-	SectorMap src_map = buff->soft_error_map | buff->hard_error_map;
-	SectorMap dst_map = 0;
-	TRACE_FUN(ft_t_any);
-
-	if (bad_map || src_map) {
-		TRACE(ft_t_flow, "bad_map = 0x%08lx", (long) bad_map);
-		TRACE(ft_t_flow, "src_map = 0x%08lx", (long) src_map);
-	}
-	while (bad_map) {
-		while ((bad_map & 1) == 0) {
-			if (src_map & 1) {
-				dst_map |= (1 << i);
-			}
-			src_map >>= 1;
-			bad_map >>= 1;
-			++i;
-		}
-		/* (bad_map & 1) == 1 */
-		src_map >>= 1;
-		bad_map >>= 1;
-	}
-	if (src_map) {
-		dst_map |= (src_map << i);
-	}
-	if (dst_map) {
-		TRACE(ft_t_flow, "dst_map = 0x%08lx", (long) dst_map);
-	}
-	TRACE_EXIT dst_map;
-}
-
-static int correct_and_copy_fraction(buffer_struct *buff, __u8 * destination,
-				     int start, int size)
-{
-	struct memory_segment mseg;
-	int result;
-	SectorMap read_bad;
-	TRACE_FUN(ft_t_any);
-
-	mseg.read_bad = convert_sector_map(buff);
-	mseg.marked_bad = 0;	/* not used... */
-	mseg.blocks = buff->bytes / FT_SECTOR_SIZE;
-	mseg.data = buff->address;
-	/*    If there are no data sectors we can skip this segment.
-	 */
-	if (mseg.blocks <= 3) {
-		TRACE_ABORT(0, ft_t_noise, "empty segment");
-	}
-	read_bad = mseg.read_bad;
-	ft_history.crc_errors += count_ones(read_bad);
-	result = ftape_ecc_correct_data(&mseg);
-	if (read_bad != 0 || mseg.corrected != 0) {
-		TRACE(ft_t_noise, "crc error map: 0x%08lx", (unsigned long)read_bad);
-		TRACE(ft_t_noise, "corrected map: 0x%08lx", (unsigned long)mseg.corrected);
-		ft_history.corrected += count_ones(mseg.corrected);
-	}
-	if (result == ECC_CORRECTED || result == ECC_OK) {
-		if (result == ECC_CORRECTED) {
-			TRACE(ft_t_info, "ecc corrected segment: %d", buff->segment_id);
-		}
-		if(start < 0) {
-			start= 0;
-		}
-		if((start+size) > ((mseg.blocks - 3) * FT_SECTOR_SIZE)) {
-			size = (mseg.blocks - 3) * FT_SECTOR_SIZE  - start;
-		} 
-		if (size < 0) {
-			size= 0;
-		}
-		if(size > 0) {
-			memcpy(destination + start, mseg.data + start, size);
-		}
-		if ((read_bad ^ mseg.corrected) & mseg.corrected) {
-			/* sectors corrected without crc errors set */
-			ft_history.crc_failures++;
-		}
-		TRACE_EXIT size; /* (mseg.blocks - 3) * FT_SECTOR_SIZE; */
-	} else {
-		ft_history.ecc_failures++;
-		TRACE_ABORT(-EAGAIN,
-			    ft_t_err, "ecc failure on segment %d",
-			    buff->segment_id);
-	}
-	TRACE_EXIT 0;
-}
-
-/*      Read given segment into buffer at address.
- */
-int ftape_read_segment_fraction(const int segment_id,
-				void  *address, 
-				const ft_read_mode_t read_mode,
-				const int start,
-				const int size)
-{
-	int result = 0;
-	int retry  = 0;
-	int bytes_read = 0;
-	int read_done  = 0;
-	TRACE_FUN(ft_t_flow);
-
-	ft_history.used |= 1;
-	TRACE(ft_t_data_flow, "segment_id = %d", segment_id);
-	if (ft_driver_state != reading) {
-		TRACE(ft_t_noise, "calling ftape_abort_operation");
-		TRACE_CATCH(ftape_abort_operation(),);
-		ftape_set_state(reading);
-	}
-	for(;;) {
-		buffer_struct *tail;
-		/*  Allow escape from this loop on signal !
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		/*  Search all full buffers for the first matching the
-		 *  wanted segment.  Clear other buffers on the fly.
-		 */
-		tail = ftape_get_buffer(ft_queue_tail);
-		while (!read_done && tail->status == done) {
-			/*  Allow escape from this loop on signal !
-			 */
-			FT_SIGNAL_EXIT(_DONT_BLOCK);
-			if (tail->segment_id == segment_id) {
-				/*  If out buffer is already full,
-				 *  return its contents.  
-				 */
-				TRACE(ft_t_flow, "found segment in cache: %d",
-				      segment_id);
-				if (tail->deleted) {
-					/*  Return a value that
-					 *  read_header_segment
-					 *  understands.  As this
-					 *  should only occur when
-					 *  searching for the header
-					 *  segments it shouldn't be
-					 *  misinterpreted elsewhere.
-					 */
-					TRACE_EXIT 0;
-				}
-				result = correct_and_copy_fraction(
-					tail,
-					address,
-					start,
-					size);
-				TRACE(ft_t_flow, "segment contains (bytes): %d",
-				      result);
-				if (result < 0) {
-					if (result != -EAGAIN) {
-						TRACE_EXIT result;
-					}
-					/* keep read_done == 0, will
-					 * trigger
-					 * ftape_abort_operation
-					 * because reading wrong
-					 * segment.
-					 */
-					TRACE(ft_t_err, "ecc failed, retry");
-					++retry;
-				} else {
-					read_done = 1;
-					bytes_read = result;
-				}
-			} else {
-				TRACE(ft_t_flow,"zapping segment in cache: %d",
-				      tail->segment_id);
-			}
-			tail->status = waiting;
-			tail = ftape_next_buffer(ft_queue_tail);
-		}
-		if (!read_done && tail->status == reading) {
-			if (tail->segment_id == segment_id) {
-				switch(ftape_wait_segment(reading)) {
-				case 0:
-					break;
-				case -EINTR:
-					TRACE_ABORT(-EINTR, ft_t_warn,
-						    "interrupted by "
-						    "non-blockable signal");
-					break;
-				default:
-					TRACE(ft_t_noise,
-					      "wait_segment failed");
-					ftape_abort_operation();
-					ftape_set_state(reading);
-					break;
-				}
-			} else {
-				/*  We're reading the wrong segment,
-				 *  stop runner.
-				 */
-				TRACE(ft_t_noise, "reading wrong segment");
-				ftape_abort_operation();
-				ftape_set_state(reading);
-			}
-		}
-		/*    should runner stop ?
-		 */
-		if (ft_runner_status == aborting) {
-			buffer_struct *head = ftape_get_buffer(ft_queue_head);
-			switch(head->status) {
-			case error:
-				ft_history.defects += 
-					count_ones(head->hard_error_map);
-			case reading:
-				head->status = waiting;
-				break;
-			default:
-				break;
-			}
-			TRACE_CATCH(ftape_dumb_stop(),);
-		} else {
-			/*  If just passed last segment on tape: wait
-			 *  for BOT or EOT mark. Sets ft_runner_status to
-			 *  idle if at lEOT and successful 
-			 */
-			TRACE_CATCH(ftape_handle_logical_eot(),);
-		}
-		/*    If we got a segment: quit, or else retry up to limit.
-		 *
-		 *    If segment to read is empty, do not start runner for it,
-		 *    but wait for next read call.
-		 */
-		if (read_done ||
-		    ftape_get_bad_sector_entry(segment_id) == EMPTY_SEGMENT ) {
-			/* bytes_read = 0;  should still be zero */
-			TRACE_EXIT bytes_read;
-
-		}
-		if (retry > FT_RETRIES_ON_ECC_ERROR) {
-			ft_history.defects++;
-			TRACE_ABORT(-ENODATA, ft_t_err,
-				    "too many retries on ecc failure");
-		}
-		/*    Now at least one buffer is empty !
-		 *    Restart runner & tape if needed.
-		 */
-		TRACE(ft_t_any, "head: %d, tail: %d, ft_runner_status: %d",
-		      ftape_buffer_id(ft_queue_head),
-		      ftape_buffer_id(ft_queue_tail),
-		      ft_runner_status);
-		TRACE(ft_t_any, "buffer[].status, [head]: %d, [tail]: %d",
-		      ftape_get_buffer(ft_queue_head)->status,
-		      ftape_get_buffer(ft_queue_tail)->status);
-		tail = ftape_get_buffer(ft_queue_tail);
-		if (tail->status == waiting) {
-			buffer_struct *head = ftape_get_buffer(ft_queue_head);
-
-			ftape_setup_new_segment(head, segment_id, -1);
-			if (read_mode == FT_RD_SINGLE) {
-				/* disable read-ahead */
-				head->next_segment = 0;
-			}
-			ftape_calc_next_cluster(head);
-			if (ft_runner_status == idle) {
-				result = ftape_start_tape(segment_id,
-							  head->sector_offset);
-				if (result < 0) {
-					TRACE_ABORT(result, ft_t_err, "Error: "
-						    "segment %d unreachable",
-						    segment_id);
-				}
-			}
-			head->status = reading;
-			fdc_setup_read_write(head, FDC_READ);
-		}
-	}
-	/* not reached */
-	TRACE_EXIT -EIO;
-}
-
-int ftape_read_header_segment(__u8 *address)
-{
-	int result;
-	int header_segment;
-	int first_failed = 0;
-	int status;
-	TRACE_FUN(ft_t_flow);
-
-	ft_used_header_segment = -1;
-	TRACE_CATCH(ftape_report_drive_status(&status),);
-	TRACE(ft_t_flow, "reading...");
-	/*  We're looking for the first header segment.
-	 *  A header segment cannot contain bad sectors, therefor at the
-	 *  tape start, segments with bad sectors are (according to QIC-40/80)
-	 *  written with deleted data marks and must be skipped.
-	 */
-	memset(address, '\0', (FT_SECTORS_PER_SEGMENT - 3) * FT_SECTOR_SIZE); 
-	result = 0;
-#define HEADER_SEGMENT_BOUNDARY 68  /* why not 42? */
-	for (header_segment = 0;
-	     header_segment < HEADER_SEGMENT_BOUNDARY && result == 0;
-	     ++header_segment) {
-		/*  Set no read-ahead, the isr will force read-ahead whenever
-		 *  it encounters deleted data !
-		 */
-		result = ftape_read_segment(header_segment,
-					    address,
-					    FT_RD_SINGLE);
-		if (result < 0 && !first_failed) {
-			TRACE(ft_t_err, "header segment damaged, trying backup");
-			first_failed = 1;
-			result = 0;	/* force read of next (backup) segment */
-		}
-	}
-	if (result < 0 || header_segment >= HEADER_SEGMENT_BOUNDARY) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "no readable header segment found");
-	}
-	TRACE_CATCH(ftape_abort_operation(),);
-	ft_used_header_segment = header_segment;
-	result = ftape_decode_header_segment(address);
- 	TRACE_EXIT result;
-}
-
-int ftape_decode_header_segment(__u8 *address)
-{
-	unsigned int max_floppy_side;
-	unsigned int max_floppy_track;
-	unsigned int max_floppy_sector;
-	unsigned int new_tape_len;
-	TRACE_FUN(ft_t_flow);
-
-	if (GET4(address, FT_SIGNATURE) == FT_D2G_MAGIC) {
-		/* Ditto 2GB header segment. They encrypt the bad sector map.
-		 * We decrypt it and store them in normal format.
-		 * I hope this is correct.
-		 */
-		int i;
-		TRACE(ft_t_warn,
-		      "Found Ditto 2GB tape, "
-		      "trying to decrypt bad sector map");
-		for (i=256; i < 29 * FT_SECTOR_SIZE; i++) {
-			address[i] = ~(address[i] - (i&0xff));
-		}
-		PUT4(address, 0,FT_HSEG_MAGIC);
-	} else if (GET4(address, FT_SIGNATURE) != FT_HSEG_MAGIC) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "wrong signature in header segment");
-	}
-	ft_format_code = (ft_format_type) address[FT_FMT_CODE];
-	if (ft_format_code != fmt_big) {
-		ft_header_segment_1   = GET2(address, FT_HSEG_1);
-		ft_header_segment_2   = GET2(address, FT_HSEG_2);
-		ft_first_data_segment = GET2(address, FT_FRST_SEG);
-		ft_last_data_segment  = GET2(address, FT_LAST_SEG);
-	} else {
-		ft_header_segment_1   = GET4(address, FT_6_HSEG_1);
-		ft_header_segment_2   = GET4(address, FT_6_HSEG_2);
-		ft_first_data_segment = GET4(address, FT_6_FRST_SEG);
-		ft_last_data_segment  = GET4(address, FT_6_LAST_SEG);
-	}
-	TRACE(ft_t_noise, "first data segment: %d", ft_first_data_segment);
-	TRACE(ft_t_noise, "last  data segment: %d", ft_last_data_segment);
-	TRACE(ft_t_noise, "header segments are %d and %d",
-	      ft_header_segment_1, ft_header_segment_2);
-
-	/*    Verify tape parameters...
-	 *    QIC-40/80 spec:                 tape_parameters:
-	 *
-	 *    segments-per-track              segments_per_track
-	 *    tracks-per-cartridge            tracks_per_tape
-	 *    max-floppy-side                 (segments_per_track *
-	 *                                    tracks_per_tape - 1) /
-	 *                                    ftape_segments_per_head
-	 *    max-floppy-track                ftape_segments_per_head /
-	 *                                    ftape_segments_per_cylinder - 1
-	 *    max-floppy-sector               ftape_segments_per_cylinder *
-	 *                                    FT_SECTORS_PER_SEGMENT
-	 */
-	ft_segments_per_track = GET2(address, FT_SPT);
-	ft_tracks_per_tape    = address[FT_TPC];
-	max_floppy_side       = address[FT_FHM];
-	max_floppy_track      = address[FT_FTM];
-	max_floppy_sector     = address[FT_FSM];
-	TRACE(ft_t_noise, "(fmt/spt/tpc/fhm/ftm/fsm) = %d/%d/%d/%d/%d/%d",
-	      ft_format_code, ft_segments_per_track, ft_tracks_per_tape,
-	      max_floppy_side, max_floppy_track, max_floppy_sector);
-	new_tape_len = ftape_tape_len;
-	switch (ft_format_code) {
-	case fmt_425ft:
-		new_tape_len = 425;
-		break;
-	case fmt_normal:
-		if (ftape_tape_len == 0) {	/* otherwise 307 ft */
-			new_tape_len = 205;
-		}
-		break;
-	case fmt_1100ft:
-		new_tape_len = 1100;
-		break;
-	case fmt_var:{
-			int segments_per_1000_inch = 1;		/* non-zero default for switch */
-			switch (ft_qic_std) {
-			case QIC_TAPE_QIC40:
-				segments_per_1000_inch = 332;
-				break;
-			case QIC_TAPE_QIC80:
-				segments_per_1000_inch = 488;
-				break;
-			case QIC_TAPE_QIC3010:
-				segments_per_1000_inch = 730;
-				break;
-			case QIC_TAPE_QIC3020:
-				segments_per_1000_inch = 1430;
-				break;
-			}
-			new_tape_len = (1000 * ft_segments_per_track +
-					(segments_per_1000_inch - 1)) / segments_per_1000_inch;
-			break;
-		}
-	case fmt_big:{
-			int segments_per_1000_inch = 1;		/* non-zero default for switch */
-			switch (ft_qic_std) {
-			case QIC_TAPE_QIC40:
-				segments_per_1000_inch = 332;
-				break;
-			case QIC_TAPE_QIC80:
-				segments_per_1000_inch = 488;
-				break;
-			case QIC_TAPE_QIC3010:
-				segments_per_1000_inch = 730;
-				break;
-			case QIC_TAPE_QIC3020:
-				segments_per_1000_inch = 1430;
-				break;
-			default:
-				TRACE_ABORT(-EIO, ft_t_bug,
-			"%x QIC-standard with fmt-code %d, please report",
-					    ft_qic_std, ft_format_code);
-			}
-			new_tape_len = ((1000 * ft_segments_per_track +
-					 (segments_per_1000_inch - 1)) / 
-					segments_per_1000_inch);
-			break;
-		}
-	default:
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "unknown tape format, please report !");
-	}
-	if (new_tape_len != ftape_tape_len) {
-		ftape_tape_len = new_tape_len;
-		TRACE(ft_t_info, "calculated tape length is %d ft",
-		      ftape_tape_len);
-		ftape_calc_timeouts(ft_qic_std, ft_data_rate, ftape_tape_len);
-	}
-	if (ft_segments_per_track == 0 && ft_tracks_per_tape == 0 &&
-	    max_floppy_side == 0 && max_floppy_track == 0 &&
-	    max_floppy_sector == 0) {
-		/*  QIC-40 Rev E and earlier has no values in the header.
-		 */
-		ft_segments_per_track = 68;
-		ft_tracks_per_tape = 20;
-		max_floppy_side = 1;
-		max_floppy_track = 169;
-		max_floppy_sector = 128;
-	}
-	/*  This test will compensate for the wrong parameter on tapes
-	 *  formatted by Conner software.
-	 */
-	if (ft_segments_per_track == 150 &&
-	    ft_tracks_per_tape == 28 &&
-	    max_floppy_side == 7 &&
-	    max_floppy_track == 149 &&
-	    max_floppy_sector == 128) {
-TRACE(ft_t_info, "the famous CONNER bug: max_floppy_side off by one !");
-		max_floppy_side = 6;
-	}
-	/*  These tests will compensate for the wrong parameter on tapes
-	 *  formatted by ComByte Windows software.
-	 *
-	 *  First, for 205 foot tapes
-	 */
-	if (ft_segments_per_track == 100 &&
-	    ft_tracks_per_tape == 28 &&
-	    max_floppy_side == 9 &&
-	    max_floppy_track == 149 &&
-	    max_floppy_sector == 128) {
-TRACE(ft_t_info, "the ComByte bug: max_floppy_side incorrect!");
-		max_floppy_side = 4;
-	}
-	/* Next, for 307 foot tapes. */
-	if (ft_segments_per_track == 150 &&
-	    ft_tracks_per_tape == 28 &&
-	    max_floppy_side == 9 &&
-	    max_floppy_track == 149 &&
-	    max_floppy_sector == 128) {
-TRACE(ft_t_info, "the ComByte bug: max_floppy_side incorrect!");
-		max_floppy_side = 6;
-	}
-	/*  This test will compensate for the wrong parameter on tapes
-	 *  formatted by Colorado Windows software.
-	 */
-	if (ft_segments_per_track == 150 &&
-	    ft_tracks_per_tape == 28 &&
-	    max_floppy_side == 6 &&
-	    max_floppy_track == 150 &&
-	    max_floppy_sector == 128) {
-TRACE(ft_t_info, "the famous Colorado bug: max_floppy_track off by one !");
-		max_floppy_track = 149;
-	}
-	ftape_segments_per_head = ((max_floppy_sector/FT_SECTORS_PER_SEGMENT) *
-				   (max_floppy_track + 1));
-	/*  This test will compensate for some bug reported by Dima
-	 *  Brodsky.  Seems to be a Colorado bug, either. (freebee
-	 *  Imation tape shipped together with Colorado T3000
-	 */
-	if ((ft_format_code == fmt_var || ft_format_code == fmt_big) &&
-	    ft_tracks_per_tape == 50 &&
-	    max_floppy_side == 54 &&
-	    max_floppy_track == 255 &&
-	    max_floppy_sector == 128) {
-TRACE(ft_t_info, "the famous ??? bug: max_floppy_track off by one !");
-		max_floppy_track = 254;
-	}
-	/*
-	 *    Verify drive_configuration with tape parameters
-	 */
-	if (ftape_segments_per_head == 0 || ftape_segments_per_cylinder == 0 ||
-	  ((ft_segments_per_track * ft_tracks_per_tape - 1) / ftape_segments_per_head
-	   != max_floppy_side) ||
-	    (ftape_segments_per_head / ftape_segments_per_cylinder - 1 != max_floppy_track) ||
-	(ftape_segments_per_cylinder * FT_SECTORS_PER_SEGMENT != max_floppy_sector)
-#ifdef TESTING
-	    || ((ft_format_code == fmt_var || ft_format_code == fmt_big) && 
-		(max_floppy_track != 254 || max_floppy_sector != 128))
-#endif
-	   ) {
-		char segperheadz = ftape_segments_per_head ? ' ' : '?';
-		char segpercylz  = ftape_segments_per_cylinder ? ' ' : '?';
-		TRACE(ft_t_err,"Tape parameters inconsistency, please report");
-		TRACE(ft_t_err, "reported = %d/%d/%d/%d/%d/%d",
-		      ft_format_code,
-		      ft_segments_per_track,
-		      ft_tracks_per_tape,
-		      max_floppy_side,
-		      max_floppy_track,
-		      max_floppy_sector);
-		TRACE(ft_t_err, "required = %d/%d/%d/%d%c/%d%c/%d",
-		      ft_format_code,
-		      ft_segments_per_track,
-		      ft_tracks_per_tape,
-		      ftape_segments_per_head ?
-		      ((ft_segments_per_track * ft_tracks_per_tape -1) / 
-		       ftape_segments_per_head ) :
-			(ft_segments_per_track * ft_tracks_per_tape -1),
-			segperheadz,
-		      ftape_segments_per_cylinder ?
-		      (ftape_segments_per_head / 
-		       ftape_segments_per_cylinder - 1 ) :
-			ftape_segments_per_head - 1,
-			segpercylz,
-		      (ftape_segments_per_cylinder * FT_SECTORS_PER_SEGMENT));
-		TRACE_EXIT -EIO;
-	}
-	ftape_extract_bad_sector_map(address);
- 	TRACE_EXIT 0;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-read.h b/drivers/char/ftape/lowlevel/ftape-read.h
deleted file mode 100644
index 069f99f..0000000
--- a/drivers/char/ftape/lowlevel/ftape-read.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _FTAPE_READ_H
-#define _FTAPE_READ_H
-
-/*
- * Copyright (C) 1994-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-read.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:22 $
- *
- *      This file contains the definitions for the read functions
- *      for the QIC-117 floppy-tape driver for Linux.
- *
- */
-
-/*      ftape-read.c defined global functions.
- */
-typedef enum {
-	FT_RD_SINGLE = 0,
-	FT_RD_AHEAD  = 1,
-} ft_read_mode_t;
-
-extern int ftape_read_header_segment(__u8 *address);
-extern int ftape_decode_header_segment(__u8 *address);
-extern int ftape_read_segment_fraction(const int segment,
-				       void  *address, 
-				       const ft_read_mode_t read_mode,
-				       const int start,
-				       const int size);
-#define ftape_read_segment(segment, address, read_mode)			\
-	ftape_read_segment_fraction(segment, address, read_mode,	\
-				    0, FT_SEGMENT_SIZE)
-extern void ftape_zap_read_buffers(void);
-
-#endif				/* _FTAPE_READ_H */
diff --git a/drivers/char/ftape/lowlevel/ftape-rw.c b/drivers/char/ftape/lowlevel/ftape-rw.c
deleted file mode 100644
index c0d6dc2..0000000
--- a/drivers/char/ftape/lowlevel/ftape-rw.c
+++ /dev/null
@@ -1,1092 +0,0 @@
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven,
- *                (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-rw.c,v $
- * $Revision: 1.7 $
- * $Date: 1997/10/28 14:26:49 $
- *
- *      This file contains some common code for the segment read and
- *      segment write routines for the QIC-117 floppy-tape driver for
- *      Linux.
- */
-
-#include <linux/string.h>
-#include <linux/errno.h>
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/ftape-init.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-ecc.h"
-#include "../lowlevel/ftape-bsm.h"
-
-/*      Global vars.
- */
-int ft_nr_buffers;
-buffer_struct *ft_buffer[FT_MAX_NR_BUFFERS];
-static volatile int ft_head;
-static volatile int ft_tail;	/* not volatile but need same type as head */
-int fdc_setup_error;
-location_record ft_location = {-1, 0};
-volatile int ftape_tape_running;
-
-/*      Local vars.
- */
-static int overrun_count_offset;
-static int inhibit_correction;
-
-/*  maxmimal allowed overshoot when fast seeking
- */
-#define OVERSHOOT_LIMIT 10
-
-/*      Increment cyclic buffer nr.
- */
-buffer_struct *ftape_next_buffer(ft_buffer_queue_t pos)
-{
-	switch (pos) {
-	case ft_queue_head:
-		if (++ft_head >= ft_nr_buffers) {
-			ft_head = 0;
-		}
-		return ft_buffer[ft_head];
-	case ft_queue_tail:
-		if (++ft_tail >= ft_nr_buffers) {
-			ft_tail = 0;
-		}
-		return ft_buffer[ft_tail];
-	default:
-		return NULL;
-	}
-}
-int ftape_buffer_id(ft_buffer_queue_t pos)
-{
-	switch(pos) {
-	case ft_queue_head: return ft_head;
-	case ft_queue_tail: return ft_tail;
-	default: return -1;
-	}
-}
-buffer_struct *ftape_get_buffer(ft_buffer_queue_t pos)
-{
-	switch(pos) {
-	case ft_queue_head: return ft_buffer[ft_head];
-	case ft_queue_tail: return ft_buffer[ft_tail];
-	default: return NULL;
-	}
-}
-void ftape_reset_buffer(void)
-{
-	ft_head = ft_tail = 0;
-}
-
-buffer_state_enum ftape_set_state(buffer_state_enum new_state)
-{
-	buffer_state_enum old_state = ft_driver_state;
-
-	ft_driver_state = new_state;
-	return old_state;
-}
-/*      Calculate Floppy Disk Controller and DMA parameters for a segment.
- *      head:   selects buffer struct in array.
- *      offset: number of physical sectors to skip (including bad ones).
- *      count:  number of physical sectors to handle (including bad ones).
- */
-static int setup_segment(buffer_struct * buff, 
-			 int segment_id,
-			 unsigned int sector_offset, 
-			 unsigned int sector_count, 
-			 int retry)
-{
-	SectorMap offset_mask;
-	SectorMap mask;
-	TRACE_FUN(ft_t_any);
-
-	buff->segment_id = segment_id;
-	buff->sector_offset = sector_offset;
-	buff->remaining = sector_count;
-	buff->head = segment_id / ftape_segments_per_head;
-	buff->cyl = (segment_id % ftape_segments_per_head) / ftape_segments_per_cylinder;
-	buff->sect = (segment_id % ftape_segments_per_cylinder) * FT_SECTORS_PER_SEGMENT + 1;
-	buff->deleted = 0;
-	offset_mask = (1 << buff->sector_offset) - 1;
-	mask = ftape_get_bad_sector_entry(segment_id) & offset_mask;
-	while (mask) {
-		if (mask & 1) {
-			offset_mask >>= 1;	/* don't count bad sector */
-		}
-		mask >>= 1;
-	}
-	buff->data_offset = count_ones(offset_mask);	/* good sectors to skip */
-	buff->ptr = buff->address + buff->data_offset * FT_SECTOR_SIZE;
-	TRACE(ft_t_flow, "data offset = %d sectors", buff->data_offset);
-	if (retry) {
-		buff->soft_error_map &= offset_mask;	/* keep skipped part */
-	} else {
-		buff->hard_error_map = buff->soft_error_map = 0;
-	}
-	buff->bad_sector_map = ftape_get_bad_sector_entry(buff->segment_id);
-	if (buff->bad_sector_map != 0) {
-		TRACE(ft_t_noise, "segment: %d, bad sector map: %08lx",
-			buff->segment_id, (long)buff->bad_sector_map);
-	} else {
-		TRACE(ft_t_flow, "segment: %d", buff->segment_id);
-	}
-	if (buff->sector_offset > 0) {
-		buff->bad_sector_map >>= buff->sector_offset;
-	}
-	if (buff->sector_offset != 0 || buff->remaining != FT_SECTORS_PER_SEGMENT) {
-		TRACE(ft_t_flow, "sector offset = %d, count = %d",
-			buff->sector_offset, buff->remaining);
-	}
-	/*    Segments with 3 or less sectors are not written with valid
-	 *    data because there is no space left for the ecc.  The
-	 *    data written is whatever happens to be in the buffer.
-	 *    Reading such a segment will return a zero byte-count.
-	 *    To allow us to read/write segments with all bad sectors
-	 *    we fake one readable sector in the segment. This
-	 *    prevents having to handle these segments in a very
-	 *    special way.  It is not important if the reading of this
-	 *    bad sector fails or not (the data is ignored). It is
-	 *    only read to keep the driver running.
-	 *
-	 *    The QIC-40/80 spec. has no information on how to handle
-	 *    this case, so this is my interpretation.  
-	 */
-	if (buff->bad_sector_map == EMPTY_SEGMENT) {
-		TRACE(ft_t_flow, "empty segment %d, fake first sector good",
-		      buff->segment_id);
-		if (buff->ptr != buff->address) {
-			TRACE(ft_t_bug, "This is a bug: %p/%p",
-			      buff->ptr, buff->address);
-		}
-		buff->bad_sector_map = FAKE_SEGMENT;
-	}
-	fdc_setup_error = 0;
-	buff->next_segment = segment_id + 1;
-	TRACE_EXIT 0;
-}
-
-/*      Calculate Floppy Disk Controller and DMA parameters for a new segment.
- */
-int ftape_setup_new_segment(buffer_struct * buff, int segment_id, int skip)
-{
-	int result = 0;
-	static int old_segment_id = -1;
-	static buffer_state_enum old_ft_driver_state = idle;
-	int retry = 0;
-	unsigned offset = 0;
-	int count = FT_SECTORS_PER_SEGMENT;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_flow, "%s segment %d (old = %d)",
-	      (ft_driver_state == reading || ft_driver_state == verifying) 
-	      ? "reading" : "writing",
-	      segment_id, old_segment_id);
-	if (ft_driver_state != old_ft_driver_state) {	/* when verifying */
-		old_segment_id = -1;
-		old_ft_driver_state = ft_driver_state;
-	}
-	if (segment_id == old_segment_id) {
-		++buff->retry;
-		++ft_history.retries;
-		TRACE(ft_t_flow, "setting up for retry nr %d", buff->retry);
-		retry = 1;
-		if (skip && buff->skip > 0) {	/* allow skip on retry */
-			offset = buff->skip;
-			count -= offset;
-			TRACE(ft_t_flow, "skipping %d sectors", offset);
-		}
-	} else {
-		buff->retry = 0;
-		buff->skip = 0;
-		old_segment_id = segment_id;
-	}
-	result = setup_segment(buff, segment_id, offset, count, retry);
-	TRACE_EXIT result;
-}
-
-/*      Determine size of next cluster of good sectors.
- */
-int ftape_calc_next_cluster(buffer_struct * buff)
-{
-	/* Skip bad sectors.
-	 */
-	while (buff->remaining > 0 && (buff->bad_sector_map & 1) != 0) {
-		buff->bad_sector_map >>= 1;
-		++buff->sector_offset;
-		--buff->remaining;
-	}
-	/* Find next cluster of good sectors
-	 */
-	if (buff->bad_sector_map == 0) {	/* speed up */
-		buff->sector_count = buff->remaining;
-	} else {
-		SectorMap map = buff->bad_sector_map;
-
-		buff->sector_count = 0;
-		while (buff->sector_count < buff->remaining && (map & 1) == 0) {
-			++buff->sector_count;
-			map >>= 1;
-		}
-	}
-	return buff->sector_count;
-}
-
-/*  if just passed the last segment on a track, wait for BOT
- *  or EOT mark.
- */
-int ftape_handle_logical_eot(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (ft_runner_status == logical_eot) {
-		int status;
-
-		TRACE(ft_t_noise, "tape at logical EOT");
-		TRACE_CATCH(ftape_ready_wait(ftape_timeout.seek, &status),);
-		if ((status & (QIC_STATUS_AT_BOT | QIC_STATUS_AT_EOT)) == 0) {
-			TRACE_ABORT(-EIO, ft_t_err, "eot/bot not reached");
-		}
-		ft_runner_status = end_of_tape;
-	}
-	if (ft_runner_status == end_of_tape) {
-		TRACE(ft_t_noise, "runner stopped because of logical EOT");
-		ft_runner_status = idle;
-	}
-	TRACE_EXIT 0;
-}
-
-static int check_bot_eot(int status)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (status & (QIC_STATUS_AT_BOT | QIC_STATUS_AT_EOT)) {
-		ft_location.bot = ((ft_location.track & 1) == 0 ?
-				(status & QIC_STATUS_AT_BOT) != 0:
-				(status & QIC_STATUS_AT_EOT) != 0);
-		ft_location.eot = !ft_location.bot;
-		ft_location.segment = (ft_location.track +
-			(ft_location.bot ? 0 : 1)) * ft_segments_per_track - 1;
-		ft_location.sector = -1;
-		ft_location.known  = 1;
-		TRACE(ft_t_flow, "tape at logical %s",
-		      ft_location.bot ? "bot" : "eot");
-		TRACE(ft_t_flow, "segment = %d", ft_location.segment);
-	} else {
-		ft_location.known = 0;
-	}
-	TRACE_EXIT ft_location.known;
-}
-
-/*      Read Id of first sector passing tape head.
- */
-static int ftape_read_id(void)
-{
-	int status;
-	__u8 out[2];
-	TRACE_FUN(ft_t_any);
-
-	/* Assume tape is running on entry, be able to handle
-	 * situation where it stopped or is stopping.
-	 */
-	ft_location.known = 0;	/* default is location not known */
-	out[0] = FDC_READID;
-	out[1] = ft_drive_sel;
-	TRACE_CATCH(fdc_command(out, 2),);
-	switch (fdc_interrupt_wait(20 * FT_SECOND)) {
-	case 0:
-		if (fdc_sect == 0) {
-			if (ftape_report_drive_status(&status) >= 0 &&
-			    (status & QIC_STATUS_READY)) {
-				ftape_tape_running = 0;
-				TRACE(ft_t_flow, "tape has stopped");
-				check_bot_eot(status);
-			}
-		} else {
-			ft_location.known = 1;
-			ft_location.segment = (ftape_segments_per_head
-					       * fdc_head
-					       + ftape_segments_per_cylinder
-					       * fdc_cyl
-					       + (fdc_sect - 1)
-					       / FT_SECTORS_PER_SEGMENT);
-			ft_location.sector = ((fdc_sect - 1)
-					      % FT_SECTORS_PER_SEGMENT);
-			ft_location.eot = ft_location.bot = 0;
-		}
-		break;
-	case -ETIME:
-		/*  Didn't find id on tape, must be near end: Wait
-		 *  until stopped.
-		 */
-		if (ftape_ready_wait(FT_FOREVER, &status) >= 0) {
-			ftape_tape_running = 0;
-			TRACE(ft_t_flow, "tape has stopped");
-			check_bot_eot(status);
-		}
-		break;
-	default:
-		/*  Interrupted or otherwise failing
-		 *  fdc_interrupt_wait() 
-		 */
-		TRACE(ft_t_err, "fdc_interrupt_wait failed");
-		break;
-	}
-	if (!ft_location.known) {
-		TRACE_ABORT(-EIO, ft_t_flow, "no id found");
-	}
-	if (ft_location.sector == 0) {
-		TRACE(ft_t_flow, "passing segment %d/%d",
-		      ft_location.segment, ft_location.sector);
-	} else {
-		TRACE(ft_t_fdc_dma, "passing segment %d/%d",
-		      ft_location.segment, ft_location.sector);
-	}
-	TRACE_EXIT 0;
-}
-
-static int logical_forward(void)
-{
-	ftape_tape_running = 1;
-	return ftape_command(QIC_LOGICAL_FORWARD);
-}
-
-int ftape_stop_tape(int *pstatus)
-{
-	int retry = 0;
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	do {
-		result = ftape_command_wait(QIC_STOP_TAPE,
-					    ftape_timeout.stop, pstatus);
-		if (result == 0) {
-			if ((*pstatus & QIC_STATUS_READY) == 0) {
-				result = -EIO;
-			} else {
-				ftape_tape_running = 0;
-			}
-		}
-	} while (result < 0 && ++retry <= 3);
-	if (result < 0) {
-		TRACE(ft_t_err, "failed ! (fatal)");
-	}
-	TRACE_EXIT result;
-}
-
-int ftape_dumb_stop(void)
-{
-	int result;
-	int status;
-	TRACE_FUN(ft_t_flow);
-
-	/*  Abort current fdc operation if it's busy (probably read
-	 *  or write operation pending) with a reset.
-	 */
-	if (fdc_ready_wait(100 /* usec */) < 0) {
-		TRACE(ft_t_noise, "aborting fdc operation");
-		fdc_reset();
-	}
-	/*  Reading id's after the last segment on a track may fail
-	 *  but eventually the drive will become ready (logical eot).
-	 */
-	result = ftape_report_drive_status(&status);
-	ft_location.known = 0;
-	do {
-		if (result == 0 && status & QIC_STATUS_READY) {
-			/* Tape is not running any more.
-			 */
-			TRACE(ft_t_noise, "tape already halted");
-			check_bot_eot(status);
-			ftape_tape_running = 0;
-		} else if (ftape_tape_running) {
-			/*  Tape is (was) still moving.
-			 */
-#ifdef TESTING
-			ftape_read_id();
-#endif
-			result = ftape_stop_tape(&status);
-		} else {
-			/*  Tape not yet ready but stopped.
-			 */
-			result = ftape_ready_wait(ftape_timeout.pause,&status);
-		}
-	} while (ftape_tape_running
-		 && !(sigtestsetmask(&current->pending.signal, _NEVER_BLOCK)));
-#ifndef TESTING
-	ft_location.known = 0;
-#endif
-	if (ft_runner_status == aborting || ft_runner_status == do_abort) {
-		ft_runner_status = idle;
-	}
-	TRACE_EXIT result;
-}
-
-/*      Wait until runner has finished tail buffer.
- *
- */
-int ftape_wait_segment(buffer_state_enum state)
-{
-	int status;
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-
-	while (ft_buffer[ft_tail]->status == state) {
-		TRACE(ft_t_flow, "state: %d", ft_buffer[ft_tail]->status);
-		/*  First buffer still being worked on, wait up to timeout.
-		 *
-		 *  Note: we check two times for being killed. 50
-		 *  seconds are quite long. Note that
-		 *  fdc_interrupt_wait() is not killable by any
-		 *  means. ftape_read_segment() wants us to return
-		 *  -EINTR in case of a signal.  
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		result = fdc_interrupt_wait(50 * FT_SECOND);
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		if (result < 0) {
-			TRACE_ABORT(result,
-				    ft_t_err, "fdc_interrupt_wait failed");
-		}
-		if (fdc_setup_error) {
-			/* recover... FIXME */
-			TRACE_ABORT(-EIO, ft_t_err, "setup error");
-		}
-	}
-	if (ft_buffer[ft_tail]->status != error) {
-		TRACE_EXIT 0;
-	}
-	TRACE_CATCH(ftape_report_drive_status(&status),);
-	TRACE(ft_t_noise, "ftape_report_drive_status: 0x%02x", status);
-	if ((status & QIC_STATUS_READY) && 
-	    (status & QIC_STATUS_ERROR)) {
-		unsigned int error;
-		qic117_cmd_t command;
-		
-		/*  Report and clear error state.
-		 *  In case the drive can't operate at the selected
-		 *  rate, select the next lower data rate.
-		 */
-		ftape_report_error(&error, &command, 1);
-		if (error == 31 && command == QIC_LOGICAL_FORWARD) {
-			/* drive does not accept this data rate */
-			if (ft_data_rate > 250) {
-				TRACE(ft_t_info,
-				      "Probable data rate conflict");
-				TRACE(ft_t_info,
-				      "Lowering data rate to %d Kbps",
-				      ft_data_rate / 2);
-				ftape_half_data_rate();
-				if (ft_buffer[ft_tail]->retry > 0) {
-					/* give it a chance */
-					--ft_buffer[ft_tail]->retry;
-				}
-			} else {
-				/* no rate is accepted... */
-				TRACE(ft_t_err, "We're dead :(");
-			}
-		} else {
-			TRACE(ft_t_err, "Unknown error");
-		}
-		TRACE_EXIT -EIO;   /* g.p. error */
-	}
-	TRACE_EXIT 0;
-}
-
-/* forward */ static int seek_forward(int segment_id, int fast);
-
-static int fast_seek(int count, int reverse)
-{
-	int result = 0;
-	int status;
-	TRACE_FUN(ft_t_flow);
-
-	if (count > 0) {
-		/*  If positioned at begin or end of tape, fast seeking needs
-		 *  special treatment.
-		 *  Starting from logical bot needs a (slow) seek to the first
-		 *  segment before the high speed seek. Most drives do this
-		 *  automatically but some older don't, so we treat them
-		 *  all the same.
-		 *  Starting from logical eot is even more difficult because
-		 *  we cannot (slow) reverse seek to the last segment.
-		 *  TO BE IMPLEMENTED.
-		 */
-		inhibit_correction = 0;
-		if (ft_location.known &&
-		    ((ft_location.bot && !reverse) ||
-		     (ft_location.eot && reverse))) {
-			if (!reverse) {
-				/*  (slow) skip to first segment on a track
-				 */
-				seek_forward(ft_location.track * ft_segments_per_track, 0);
-				--count;
-			} else {
-				/*  When seeking backwards from
-				 *  end-of-tape the number of erased
-				 *  gaps found seems to be higher than
-				 *  expected.  Therefor the drive must
-				 *  skip some more segments than
-				 *  calculated, but we don't know how
-				 *  many.  Thus we will prevent the
-				 *  re-calculation of offset and
-				 *  overshoot when seeking backwards.
-				 */
-				inhibit_correction = 1;
-				count += 3;	/* best guess */
-			}
-		}
-	} else {
-		TRACE(ft_t_flow, "warning: zero or negative count: %d", count);
-	}
-	if (count > 0) {
-		int i;
-		int nibbles = count > 255 ? 3 : 2;
-
-		if (count > 4095) {
-			TRACE(ft_t_noise, "skipping clipped at 4095 segment");
-			count = 4095;
-		}
-		/* Issue this tape command first. */
-		if (!reverse) {
-			TRACE(ft_t_noise, "skipping %d segment(s)", count);
-			result = ftape_command(nibbles == 3 ?
-			   QIC_SKIP_EXTENDED_FORWARD : QIC_SKIP_FORWARD);
-		} else {
-			TRACE(ft_t_noise, "backing up %d segment(s)", count);
-			result = ftape_command(nibbles == 3 ?
-			   QIC_SKIP_EXTENDED_REVERSE : QIC_SKIP_REVERSE);
-		}
-		if (result < 0) {
-			TRACE(ft_t_noise, "Skip command failed");
-		} else {
-			--count;	/* 0 means one gap etc. */
-			for (i = 0; i < nibbles; ++i) {
-				if (result >= 0) {
-					result = ftape_parameter(count & 15);
-					count /= 16;
-				}
-			}
-			result = ftape_ready_wait(ftape_timeout.rewind, &status);
-			if (result >= 0) {
-				ftape_tape_running = 0;
-			}
-		}
-	}
-	TRACE_EXIT result;
-}
-
-static int validate(int id)
-{
-	/* Check to see if position found is off-track as reported
-	 *  once.  Because all tracks in one direction lie next to
-	 *  each other, if off-track the error will be approximately
-	 *  2 * ft_segments_per_track.
-	 */
-	if (ft_location.track == -1) {
-		return 1; /* unforseen situation, don't generate error */
-	} else {
-		/* Use margin of ft_segments_per_track on both sides
-		 * because ftape needs some margin and the error we're
-		 * looking for is much larger !
-		 */
-		int lo = (ft_location.track - 1) * ft_segments_per_track;
-		int hi = (ft_location.track + 2) * ft_segments_per_track;
-
-		return (id >= lo && id < hi);
-	}
-}
-
-static int seek_forward(int segment_id, int fast)
-{
-	int failures = 0;
-	int count;
-	static int margin = 1;	/* fixed: stop this before target */
-	static int overshoot = 1;
-	static int min_count = 8;
-	int expected = -1;
-	int target = segment_id - margin;
-	int fast_seeking;
-	int prev_segment = ft_location.segment;
-	TRACE_FUN(ft_t_flow);
-
-	if (!ft_location.known) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "fatal: cannot seek from unknown location");
-	}
-	if (!validate(segment_id)) {
-		ftape_sleep(1 * FT_SECOND);
-		ft_failure = 1;
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "fatal: head off track (bad hardware?)");
-	}
-	TRACE(ft_t_noise, "from %d/%d to %d/0 - %d",
-	      ft_location.segment, ft_location.sector,segment_id,margin);
-	count = target - ft_location.segment - overshoot;
-	fast_seeking = (fast &&
-			count > (min_count + (ft_location.bot ? 1 : 0)));
-	if (fast_seeking) {
-		TRACE(ft_t_noise, "fast skipping %d segments", count);
-		expected = segment_id - margin;
-		fast_seek(count, 0);
-	}
-	if (!ftape_tape_running) {
-		logical_forward();
-	}
-	while (ft_location.segment < segment_id) {
-		/*  This requires at least one sector in a (bad) segment to
-		 *  have a valid and readable sector id !
-		 *  It looks like this is not guaranteed, so we must try
-		 *  to find a way to skip an EMPTY_SEGMENT. !!! FIXME !!!
-		 */
-		if (ftape_read_id() < 0 || !ft_location.known ||
-		    sigtestsetmask(&current->pending.signal, _DONT_BLOCK)) {
-			ft_location.known = 0;
-			if (!ftape_tape_running ||
-			    ++failures > FT_SECTORS_PER_SEGMENT) {
-				TRACE_ABORT(-EIO, ft_t_err,
-					    "read_id failed completely");
-			}
-			FT_SIGNAL_EXIT(_DONT_BLOCK);
-			TRACE(ft_t_flow, "read_id failed, retry (%d)",
-			      failures);
-			continue;
-		}
-		if (fast_seeking) {
-			TRACE(ft_t_noise, "ended at %d/%d (%d,%d)",
-			      ft_location.segment, ft_location.sector,
-			      overshoot, inhibit_correction);
-			if (!inhibit_correction &&
-			    (ft_location.segment < expected ||
-			     ft_location.segment > expected + margin)) {
-				int error = ft_location.segment - expected;
-				TRACE(ft_t_noise,
-				      "adjusting overshoot from %d to %d",
-				      overshoot, overshoot + error);
-				overshoot += error;
-				/*  All overshoots have the same
-				 *  direction, so it should never
-				 *  become negative, but who knows.
-				 */
-				if (overshoot < -5 ||
-				    overshoot > OVERSHOOT_LIMIT) {
-					if (overshoot < 0) {
-						/* keep sane value */
-						overshoot = -5;
-					} else {
-						/* keep sane value */
-						overshoot = OVERSHOOT_LIMIT;
-					}
-					TRACE(ft_t_noise,
-					      "clipped overshoot to %d",
-					      overshoot);
-				}
-			}
-			fast_seeking = 0;
-		}
-		if (ft_location.known) {
-			if (ft_location.segment > prev_segment + 1) {
-				TRACE(ft_t_noise,
-				      "missed segment %d while skipping",
-				      prev_segment + 1);
-			}
-			prev_segment = ft_location.segment;
-		}
-	}
-	if (ft_location.segment > segment_id) {
-		TRACE_ABORT(-EIO,
-			    ft_t_noise, "failed: skip ended at segment %d/%d",
-			    ft_location.segment, ft_location.sector);
-	}
-	TRACE_EXIT 0;
-}
-
-static int skip_reverse(int segment_id, int *pstatus)
-{
-	int failures = 0;
-	static int overshoot = 1;
-	static int min_rewind = 2;	/* 1 + overshoot */
-	static const int margin = 1;	/* stop this before target */
-	int expected = 0;
-	int count = 1;
-	int short_seek;
-	int target = segment_id - margin;
-	TRACE_FUN(ft_t_flow);
-
-	if (ft_location.known && !validate(segment_id)) {
-		ftape_sleep(1 * FT_SECOND);
-		ft_failure = 1;
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "fatal: head off track (bad hardware?)");
-	}
-	do {
-		if (!ft_location.known) {
-			TRACE(ft_t_warn, "warning: location not known");
-		}
-		TRACE(ft_t_noise, "from %d/%d to %d/0 - %d",
-		      ft_location.segment, ft_location.sector,
-		      segment_id, margin);
-		/*  min_rewind == 1 + overshoot_when_doing_minimum_rewind
-		 *  overshoot  == overshoot_when_doing_larger_rewind
-		 *  Initially min_rewind == 1 + overshoot, optimization
-		 *  of both values will be done separately.
-		 *  overshoot and min_rewind can be negative as both are
-		 *  sums of three components:
-		 *  any_overshoot == rewind_overshoot - 
-		 *                   stop_overshoot   -
-		 *                   start_overshoot
-		 */
-		if (ft_location.segment - target - (min_rewind - 1) < 1) {
-			short_seek = 1;
-		} else {
-			count = ft_location.segment - target - overshoot;
-			short_seek = (count < 1);
-		}
-		if (short_seek) {
-			count = 1;	/* do shortest rewind */
-			expected = ft_location.segment - min_rewind;
-			if (expected/ft_segments_per_track != ft_location.track) {
-				expected = (ft_location.track * 
-					    ft_segments_per_track);
-			}
-		} else {
-			expected = target;
-		}
-		fast_seek(count, 1);
-		logical_forward();
-		if (ftape_read_id() < 0 || !ft_location.known ||
-		    (sigtestsetmask(&current->pending.signal, _DONT_BLOCK))) {
-			if ((!ftape_tape_running && !ft_location.known) ||
-			    ++failures > FT_SECTORS_PER_SEGMENT) {
-				TRACE_ABORT(-EIO, ft_t_err,
-					    "read_id failed completely");
-			}
-			FT_SIGNAL_EXIT(_DONT_BLOCK);
-			TRACE_CATCH(ftape_report_drive_status(pstatus),);
-			TRACE(ft_t_noise, "ftape_read_id failed, retry (%d)",
-			      failures);
-			continue;
-		}
-		TRACE(ft_t_noise, "ended at %d/%d (%d,%d,%d)", 
-		      ft_location.segment, ft_location.sector,
-		      min_rewind, overshoot, inhibit_correction);
-		if (!inhibit_correction &&
-		    (ft_location.segment < expected ||
-		     ft_location.segment > expected + margin)) {
-			int error = expected - ft_location.segment;
-			if (short_seek) {
-				TRACE(ft_t_noise,
-				      "adjusting min_rewind from %d to %d",
-				      min_rewind, min_rewind + error);
-				min_rewind += error;
-				if (min_rewind < -5) {
-					/* is this right ? FIXME ! */
-					/* keep sane value */
-					min_rewind = -5;
-					TRACE(ft_t_noise, 
-					      "clipped min_rewind to %d",
-					      min_rewind);
-				}
-			} else {
-				TRACE(ft_t_noise,
-				      "adjusting overshoot from %d to %d",
-				      overshoot, overshoot + error);
-				overshoot += error;
-				if (overshoot < -5 ||
-				    overshoot > OVERSHOOT_LIMIT) {
-					if (overshoot < 0) {
-						/* keep sane value */
-						overshoot = -5;
-					} else {
-						/* keep sane value */
-						overshoot = OVERSHOOT_LIMIT;
-					}
-					TRACE(ft_t_noise,
-					      "clipped overshoot to %d",
-					      overshoot);
-				}
-			}
-		}
-	} while (ft_location.segment > segment_id);
-	if (ft_location.known) {
-		TRACE(ft_t_noise, "current location: %d/%d",
-		      ft_location.segment, ft_location.sector);
-	}
-	TRACE_EXIT 0;
-}
-
-static int determine_position(void)
-{
-	int retry = 0;
-	int status;
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	if (!ftape_tape_running) {
-		/*  This should only happen if tape is stopped by isr.
-		 */
-		TRACE(ft_t_flow, "waiting for tape stop");
-		if (ftape_ready_wait(ftape_timeout.pause, &status) < 0) {
-			TRACE(ft_t_flow, "drive still running (fatal)");
-			ftape_tape_running = 1;	/* ? */
-		}
-	} else {
-		ftape_report_drive_status(&status);
-	}
-	if (status & QIC_STATUS_READY) {
-		/*  Drive must be ready to check error state !
-		 */
-		TRACE(ft_t_flow, "drive is ready");
-		if (status & QIC_STATUS_ERROR) {
-			unsigned int error;
-			qic117_cmd_t command;
-
-			/*  Report and clear error state, try to continue.
-			 */
-			TRACE(ft_t_flow, "error status set");
-			ftape_report_error(&error, &command, 1);
-			ftape_ready_wait(ftape_timeout.reset, &status);
-			ftape_tape_running = 0;	/* ? */
-		}
-		if (check_bot_eot(status)) {
-			if (ft_location.bot) {
-				if ((status & QIC_STATUS_READY) == 0) {
-					/* tape moving away from
-					 * bot/eot, let's see if we
-					 * can catch up with the first
-					 * segment on this track.
-					 */
-				} else {
-					TRACE(ft_t_flow,
-					      "start tape from logical bot");
-					logical_forward();	/* start moving */
-				}
-			} else {
-				if ((status & QIC_STATUS_READY) == 0) {
-					TRACE(ft_t_noise, "waiting for logical end of track");
-					result = ftape_ready_wait(ftape_timeout.reset, &status);
-					/* error handling needed ? */
-				} else {
-					TRACE(ft_t_noise,
-					      "tape at logical end of track");
-				}
-			}
-		} else {
-			TRACE(ft_t_flow, "start tape");
-			logical_forward();	/* start moving */
-			ft_location.known = 0;	/* not cleared by logical forward ! */
-		}
-	}
-	/* tape should be moving now, start reading id's
-	 */
-	while (!ft_location.known &&
-	       retry++ < FT_SECTORS_PER_SEGMENT &&
-	       (result = ftape_read_id()) < 0) {
-
-		TRACE(ft_t_flow, "location unknown");
-
-		/* exit on signal
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-
-		/*  read-id somehow failed, tape may
-		 *  have reached end or some other
-		 *  error happened.
-		 */
-		TRACE(ft_t_flow, "read-id failed");
-		TRACE_CATCH(ftape_report_drive_status(&status),);
-		TRACE(ft_t_err, "ftape_report_drive_status: 0x%02x", status);
-		if (status & QIC_STATUS_READY) {
-			ftape_tape_running = 0;
-			TRACE(ft_t_noise, "tape stopped for unknown reason! "
-			      "status = 0x%02x", status);
-			if (status & QIC_STATUS_ERROR ||
-			    !check_bot_eot(status)) {
-				/* oops, tape stopped but not at end!
-				 */
-				TRACE_EXIT -EIO;
-			}
-		}
-	}
-	TRACE(ft_t_flow,
-	      "tape is positioned at segment %d", ft_location.segment);
-	TRACE_EXIT ft_location.known ? 0 : -EIO;
-}
-
-/*      Get the tape running and position it just before the
- *      requested segment.
- *      Seek tape-track and reposition as needed.
- */
-int ftape_start_tape(int segment_id, int sector_offset)
-{
-	int track = segment_id / ft_segments_per_track;
-	int result = -EIO;
-	int status;
-	static int last_segment = -1;
-	static int bad_bus_timing = 0;
-	/* number of segments passing the head between starting the tape
-	 * and being able to access the first sector.
-	 */
-	static int start_offset = 1;
-	int retry;
-	TRACE_FUN(ft_t_flow);
-
-	/* If sector_offset > 0, seek into wanted segment instead of
-	 * into previous.
-	 * This allows error recovery if a part of the segment is bad
-	 * (erased) causing the tape drive to generate an index pulse
-	 * thus causing a no-data error before the requested sector
-	 * is reached.
-	 */
-	ftape_tape_running = 0;
-	TRACE(ft_t_noise, "target segment: %d/%d%s", segment_id, sector_offset,
-		ft_buffer[ft_head]->retry > 0 ? " retry" : "");
-	if (ft_buffer[ft_head]->retry > 0) {	/* this is a retry */
-		int dist = segment_id - last_segment;
-
-		if ((int)ft_history.overrun_errors < overrun_count_offset) {
-			overrun_count_offset = ft_history.overrun_errors;
-		} else if (dist < 0 || dist > 50) {
-			overrun_count_offset = ft_history.overrun_errors;
-		} else if ((ft_history.overrun_errors -
-			    overrun_count_offset) >= 8) {
-			if (ftape_increase_threshold() >= 0) {
-				--ft_buffer[ft_head]->retry;
-				overrun_count_offset =
-					ft_history.overrun_errors;
-				TRACE(ft_t_warn, "increased threshold because "
-				      "of excessive overrun errors");
-			} else if (!bad_bus_timing && ft_data_rate >= 1000) {
-				ftape_half_data_rate();
-				--ft_buffer[ft_head]->retry;
-				bad_bus_timing = 1;
-				overrun_count_offset =
-					ft_history.overrun_errors;
-				TRACE(ft_t_warn, "reduced datarate because "
-				      "of excessive overrun errors");
-			}
-		}
-	}
-	last_segment = segment_id;
-	if (ft_location.track != track ||
-	    (ftape_might_be_off_track && ft_buffer[ft_head]->retry== 0)) {
-		/* current track unknown or not equal to destination
-		 */
-		ftape_ready_wait(ftape_timeout.seek, &status);
-		ftape_seek_head_to_track(track);
-		/* overrun_count_offset = ft_history.overrun_errors; */
-	}
-	result = -EIO;
-	retry = 0;
-	while (result < 0     &&
-	       retry++ <= 5   &&
-	       !ft_failure &&
-	       !(sigtestsetmask(&current->pending.signal, _DONT_BLOCK))) {
-		
-		if (retry && start_offset < 5) {
-			start_offset ++;
-		}
-		/*  Check if we are able to catch the requested
-		 *  segment in time.
-		 */
-		if ((ft_location.known || (determine_position() == 0)) &&
-		    ft_location.segment >=
-		    (segment_id -
-		     ((ftape_tape_running || ft_location.bot)
-		      ? 0 : start_offset))) {
-			/*  Too far ahead (in or past target segment).
-			 */
-			if (ftape_tape_running) {
-				if ((result = ftape_stop_tape(&status)) < 0) {
-					TRACE(ft_t_err,
-					      "stop tape failed with code %d",
-					      result);
-					break;
-				}
-				TRACE(ft_t_noise, "tape stopped");
-				ftape_tape_running = 0;
-			}
-			TRACE(ft_t_noise, "repositioning");
-			++ft_history.rewinds;
-			if (segment_id % ft_segments_per_track < start_offset){
-				TRACE(ft_t_noise, "end of track condition\n"
-				      KERN_INFO "segment_id        : %d\n"
-				      KERN_INFO "ft_segments_per_track: %d\n"
-				      KERN_INFO "start_offset      : %d",
-				      segment_id, ft_segments_per_track, 
-				      start_offset);
-				      
-				/*  If seeking to first segments on
-				 *  track better do a complete rewind
-				 *  to logical begin of track to get a
-				 *  more steady tape motion.  
-				 */
-				result = ftape_command_wait(
-					(ft_location.track & 1)
-					? QIC_PHYSICAL_FORWARD
-					: QIC_PHYSICAL_REVERSE,
-					ftape_timeout.rewind, &status);
-				check_bot_eot(status);	/* update location */
-			} else {
-				result= skip_reverse(segment_id - start_offset,
-						     &status);
-			}
-		}
-		if (!ft_location.known) {
-			TRACE(ft_t_bug, "panic: location not known");
-			result = -EIO;
-			continue; /* while() will check for failure */
-		}
-		TRACE(ft_t_noise, "current segment: %d/%d",
-		      ft_location.segment, ft_location.sector);
-		/*  We're on the right track somewhere before the
-		 *  wanted segment.  Start tape movement if needed and
-		 *  skip to just before or inside the requested
-		 *  segment. Keep tape running.  
-		 */
-		result = 0;
-		if (ft_location.segment < 
-		    (segment_id - ((ftape_tape_running || ft_location.bot)
-				   ? 0 : start_offset))) {
-			if (sector_offset > 0) {
-				result = seek_forward(segment_id,
-						      retry <= 3);
-			} else {
-				result = seek_forward(segment_id - 1,
-						      retry <= 3);
-			}
-		}
-		if (result == 0 &&
-		    ft_location.segment !=
-		    (segment_id - (sector_offset > 0 ? 0 : 1))) {
-			result = -EIO;
-		}
-	}
-	if (result < 0) {
-		TRACE(ft_t_err, "failed to reposition");
-	} else {
-		ft_runner_status = running;
-	}
-	TRACE_EXIT result;
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-rw.h b/drivers/char/ftape/lowlevel/ftape-rw.h
deleted file mode 100644
index 32f4fee..0000000
--- a/drivers/char/ftape/lowlevel/ftape-rw.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _FTAPE_RW_H
-#define _FTAPE_RW_H
-
-/*
- * Copyright (C) 1993-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-rw.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:25 $
- *
- *      This file contains the definitions for the read and write
- *      functions for the QIC-117 floppy-tape driver for Linux.
- *
- * Claus-Justus Heine (1996/09/20): Add definition of format code 6
- * Claus-Justus Heine (1996/10/04): Changed GET/PUT macros to cast to (__u8 *)
- *
- */
-
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/ftape-init.h"
-#include "../lowlevel/ftape-bsm.h"
-
-#include <asm/unaligned.h>
-
-#define GET2(address, offset) get_unaligned((__u16*)((__u8 *)address + offset))
-#define GET4(address, offset) get_unaligned((__u32*)((__u8 *)address + offset))
-#define GET8(address, offset) get_unaligned((__u64*)((__u8 *)address + offset))
-#define PUT2(address, offset , value) put_unaligned((value), (__u16*)((__u8 *)address + offset))
-#define PUT4(address, offset , value) put_unaligned((value), (__u32*)((__u8 *)address + offset))
-#define PUT8(address, offset , value) put_unaligned((value), (__u64*)((__u8 *)address + offset))
-
-enum runner_status_enum {
-	idle = 0,
-	running,
-	do_abort,
-	aborting,
-	logical_eot,
-	end_of_tape,
-};
-
-typedef enum ft_buffer_queue {
-	ft_queue_head = 0,
-	ft_queue_tail = 1
-} ft_buffer_queue_t;
-
-
-typedef struct {
-	int track;		/* tape head position */
-	volatile int segment;	/* current segment */
-	volatile int sector;	/* sector offset within current segment */
-	volatile unsigned int bot;	/* logical begin of track */
-	volatile unsigned int eot;	/* logical end of track */
-	volatile unsigned int known;	/* validates bot, segment, sector */
-} location_record;
-
-/*      Count nr of 1's in pattern.
- */
-static inline int count_ones(unsigned long mask)
-{
-	int bits;
-
-	for (bits = 0; mask != 0; mask >>= 1) {
-		if (mask & 1) {
-			++bits;
-		}
-	}
-	return bits;
-}
-
-#define FT_MAX_NR_BUFFERS 16 /* arbitrary value */
-/*      ftape-rw.c defined global vars.
- */
-extern buffer_struct *ft_buffer[FT_MAX_NR_BUFFERS];
-extern int ft_nr_buffers;
-extern location_record ft_location;
-extern volatile int ftape_tape_running;
-
-/*      ftape-rw.c defined global functions.
- */
-extern int  ftape_setup_new_segment(buffer_struct * buff,
-				    int segment_id,
-				    int offset);
-extern int  ftape_calc_next_cluster(buffer_struct * buff);
-extern buffer_struct *ftape_next_buffer (ft_buffer_queue_t pos);
-extern buffer_struct *ftape_get_buffer  (ft_buffer_queue_t pos);
-extern int            ftape_buffer_id   (ft_buffer_queue_t pos);
-extern void           ftape_reset_buffer(void);
-extern void ftape_tape_parameters(__u8 drive_configuration);
-extern int  ftape_wait_segment(buffer_state_enum state);
-extern int  ftape_dumb_stop(void);
-extern int  ftape_start_tape(int segment_id, int offset);
-extern int  ftape_stop_tape(int *pstatus);
-extern int  ftape_handle_logical_eot(void);
-extern buffer_state_enum ftape_set_state(buffer_state_enum new_state);
-#endif				/* _FTAPE_RW_H */
diff --git a/drivers/char/ftape/lowlevel/ftape-setup.c b/drivers/char/ftape/lowlevel/ftape-setup.c
deleted file mode 100644
index 678340a..0000000
--- a/drivers/char/ftape/lowlevel/ftape-setup.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* 
- * Copyright (C) 1996, 1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-setup.c,v $
- * $Revision: 1.7 $
- * $Date: 1997/10/10 09:57:06 $
- *
- *      This file contains the code for processing the kernel command
- *      line options for the QIC-40/80/3010/3020 floppy-tape driver
- *      "ftape" for Linux.
- */
-
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <linux/ftape.h>
-#include <linux/init.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/fdc-io.h"
-
-static struct param_table {
-	const char *name;
-	int *var;
-	int def_param;
-	int min;
-	int max;
-} config_params[] __initdata = {
-#ifndef CONFIG_FT_NO_TRACE_AT_ALL
-	{ "tracing",   &ftape_tracing,     3,              ft_t_bug, ft_t_any},
-#endif
-	{ "ioport",    &ft_fdc_base,       CONFIG_FT_FDC_BASE,     0x0, 0xfff},
-	{ "irq",       &ft_fdc_irq,        CONFIG_FT_FDC_IRQ,        2,    15},
-	{ "dma",       &ft_fdc_dma,        CONFIG_FT_FDC_DMA,        0,     3},
-	{ "threshold", &ft_fdc_threshold,  CONFIG_FT_FDC_THR,         1,    16},
-	{ "datarate",  &ft_fdc_rate_limit, CONFIG_FT_FDC_MAX_RATE, 500,  2000},
-	{ "fc10",      &ft_probe_fc10,     CONFIG_FT_PROBE_FC10,     0,     1},
-	{ "mach2",     &ft_mach2,          CONFIG_FT_MACH2,          0,     1}
-};
-
-static int __init ftape_setup(char *str)
-{
-	int i;
-	int param;
-	int ints[2];
-
-	TRACE_FUN(ft_t_flow);
-
-	str = get_options(str, ARRAY_SIZE(ints), ints);
-	if (str) {
-		for (i=0; i < NR_ITEMS(config_params); i++) {
-			if (strcmp(str,config_params[i].name) == 0){
-				if (ints[0]) {
-					param = ints[1];
-				} else {
-					param = config_params[i].def_param;
-				}
-				if (param < config_params[i].min ||
-				    param > config_params[i].max) {
-					TRACE(ft_t_err,
-					"parameter %s out of range %d ... %d",
-					      config_params[i].name,
-					      config_params[i].min,
-					      config_params[i].max);
-					goto out;
-				}
-				if(config_params[i].var) {
-					TRACE(ft_t_info, "%s=%d", str, param);
-					*config_params[i].var = param;
-				}
-				goto out;
-			}
-		}
-	}
-	if (str) {
-		TRACE(ft_t_err, "unknown ftape option [%s]", str);
-		
-		TRACE(ft_t_err, "allowed options are:");
-		for (i=0; i < NR_ITEMS(config_params); i++) {
-			TRACE(ft_t_err, " %s",config_params[i].name);
-		}
-	} else {
-		TRACE(ft_t_err, "botched ftape option");
-	}
- out:
-	TRACE_EXIT 1;
-}
-
-__setup("ftape=", ftape_setup);
diff --git a/drivers/char/ftape/lowlevel/ftape-tracing.c b/drivers/char/ftape/lowlevel/ftape-tracing.c
deleted file mode 100644
index 7fdc656..0000000
--- a/drivers/char/ftape/lowlevel/ftape-tracing.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven,
- *                (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-tracing.c,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:27 $
- *
- *      This file contains the reading code
- *      for the QIC-117 floppy-tape driver for Linux.
- */
-
-#include <linux/ftape.h>
-#include "../lowlevel/ftape-tracing.h"
-
-/*      Global vars.
- */
-/*      tracing
- *      set it to:     to log :
- *       0              bugs
- *       1              + errors
- *       2              + warnings
- *       3              + information
- *       4              + more information
- *       5              + program flow
- *       6              + fdc/dma info
- *       7              + data flow
- *       8              + everything else
- */
-ft_trace_t ftape_tracing = ft_t_info; /* Default level: information and up */
-int  ftape_function_nest_level;
-
-/*      Local vars.
- */
-static __u8 trace_id;
-static char spacing[] = "*                              ";
-
-void ftape_trace_call(const char *file, const char *name)
-{
-	char *indent;
-
-	/*    Since printk seems not to work with "%*s" format
-	 *    we'll use this work-around.
-	 */
-	if (ftape_function_nest_level < 0) {
-		printk(KERN_INFO "function nest level (%d) < 0\n",
-		       ftape_function_nest_level);
-		ftape_function_nest_level = 0;
-	}
-	if (ftape_function_nest_level < sizeof(spacing)) {
-		indent = (spacing +
-			  sizeof(spacing) - 1 -
-			  ftape_function_nest_level);
-	} else {
-		indent = spacing;
-	}
-	printk(KERN_INFO "[%03d]%s+%s (%s)\n",
-	       (int) trace_id++, indent, file, name);
-}
-
-void ftape_trace_exit(const char *file, const char *name)
-{
-	char *indent;
-
-	/*    Since printk seems not to work with "%*s" format
-	 *    we'll use this work-around.
-	 */
-	if (ftape_function_nest_level < 0) {
-		printk(KERN_INFO "function nest level (%d) < 0\n", ftape_function_nest_level);
-		ftape_function_nest_level = 0;
-	}
-	if (ftape_function_nest_level < sizeof(spacing)) {
-		indent = (spacing +
-			  sizeof(spacing) - 1 -
-			  ftape_function_nest_level);
-	} else {
-		indent = spacing;
-	}
-	printk(KERN_INFO "[%03d]%s-%s (%s)\n",
-	       (int) trace_id++, indent, file, name);
-}
-
-void ftape_trace_log(const char *file, const char *function)
-{
-	char *indent;
-
-	/*    Since printk seems not to work with "%*s" format
-	 *    we'll use this work-around.
-	 */
-	if (ftape_function_nest_level < 0) {
-		printk(KERN_INFO "function nest level (%d) < 0\n", ftape_function_nest_level);
-		ftape_function_nest_level = 0;
-	}
-	if (ftape_function_nest_level < sizeof(spacing)) {
-		indent = (spacing + 
-			  sizeof(spacing) - 1 - 
-			  ftape_function_nest_level);
-	} else {
-		indent = spacing;
-	}
-	printk(KERN_INFO "[%03d]%s%s (%s) - ", 
-	       (int) trace_id++, indent, file, function);
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-tracing.h b/drivers/char/ftape/lowlevel/ftape-tracing.h
deleted file mode 100644
index 2950810..0000000
--- a/drivers/char/ftape/lowlevel/ftape-tracing.h
+++ /dev/null
@@ -1,179 +0,0 @@
-#ifndef _FTAPE_TRACING_H
-#define _FTAPE_TRACING_H
-
-/*
- * Copyright (C) 1994-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-tracing.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:28 $
- *
- *      This file contains definitions that eases the debugging of the
- *      QIC-40/80/3010/3020 floppy-tape driver "ftape" for Linux.
- */
-
-#include <linux/kernel.h>
-
-/*
- *  Be very careful with TRACE_EXIT and TRACE_ABORT.
- *
- *  if (something) TRACE_EXIT error;
- *
- *  will NOT work. Use
- *
- *  if (something) {
- *    TRACE_EXIT error;
- *  }
- *
- *  instead. Maybe a bit dangerous, but save lots of lines of code.
- */
-
-#define LL_X "%d/%d KB"
-#define LL(x) (unsigned int)((__u64)(x)>>10), (unsigned int)((x)&1023)
-
-typedef enum {
-	ft_t_nil = -1,
-	ft_t_bug,
-	ft_t_err,
-	ft_t_warn,
-	ft_t_info,
-	ft_t_noise,
-	ft_t_flow,
-	ft_t_fdc_dma,
-	ft_t_data_flow,
-	ft_t_any
-} ft_trace_t;
-
-#ifdef  CONFIG_FT_NO_TRACE_AT_ALL
-/*  the compiler will optimize away most TRACE() macros
- */
-#define FT_TRACE_TOP_LEVEL	ft_t_bug
-#define TRACE_FUN(level)	do {} while(0)
-#define TRACE_EXIT		return
-#define TRACE(l, m, i...)						\
-{									\
-	if ((ft_trace_t)(l) == FT_TRACE_TOP_LEVEL) {			\
-		printk(KERN_INFO"ftape%s(%s):\n"	                \
-		       KERN_INFO m".\n" ,__FILE__, __FUNCTION__ , ##i);	\
-	}								\
-}
-#define SET_TRACE_LEVEL(l)      if ((l) == (l)) do {} while(0)
-#define TRACE_LEVEL		FT_TRACE_TOP_LEVEL
-
-#else
-
-#ifdef CONFIG_FT_NO_TRACE
-/*  the compiler will optimize away many TRACE() macros
- *  the ftape_simple_trace_call() function simply increments 
- *  the function nest level.
- */ 
-#define FT_TRACE_TOP_LEVEL	ft_t_warn
-#define TRACE_FUN(level)	ftape_function_nest_level++
-#define TRACE_EXIT		ftape_function_nest_level--; return
-
-#else
-#ifdef CONFIG_FT_FULL_DEBUG
-#define FT_TRACE_TOP_LEVEL ft_t_any
-#else
-#define FT_TRACE_TOP_LEVEL ft_t_flow
-#endif
-#define TRACE_FUN(level)					\
-	const ft_trace_t _tracing = level;			\
-	if (ftape_tracing >= (ft_trace_t)(level) &&		\
-	    (ft_trace_t)(level) <= FT_TRACE_TOP_LEVEL)		\
-		ftape_trace_call(__FILE__, __FUNCTION__);	\
-	ftape_function_nest_level ++;
-
-#define TRACE_EXIT						\
-	--ftape_function_nest_level;				\
-	if (ftape_tracing >= (ft_trace_t)(_tracing) &&		\
-	    (ft_trace_t)(_tracing) <= FT_TRACE_TOP_LEVEL)	\
-		ftape_trace_exit(__FILE__, __FUNCTION__);	\
-	return
-
-#endif
-
-#define TRACE(l, m, i...)					\
-{								\
-	if (ftape_tracing >= (ft_trace_t)(l) &&			\
-	    (ft_trace_t)(l) <= FT_TRACE_TOP_LEVEL) {		\
-		ftape_trace_log(__FILE__, __FUNCTION__);	\
-		printk(m".\n" ,##i);				\
-	}							\
-}
-
-#define SET_TRACE_LEVEL(l) 				\
-{							\
-	if ((ft_trace_t)(l) <= FT_TRACE_TOP_LEVEL) {	\
-		ftape_tracing = (ft_trace_t)(l);	\
-	} else {					\
-		ftape_tracing = FT_TRACE_TOP_LEVEL;	\
-	}						\
-}
-#define TRACE_LEVEL    							     \
-((ftape_tracing <= FT_TRACE_TOP_LEVEL) ? ftape_tracing : FT_TRACE_TOP_LEVEL)
-
-
-/*      Global variables declared in tracing.c
- */
-extern ft_trace_t ftape_tracing;  /* sets default level */
-extern int ftape_function_nest_level;
-
-/*      Global functions declared in tracing.c
- */
-extern void ftape_trace_call(const char *file, const char *name);
-extern void ftape_trace_exit(const char *file, const char *name);
-extern void ftape_trace_log (const char *file, const char *name);
-
-#endif /* !defined(CONFIG_FT_NO_TRACE_AT_ALL) */
-
-/*
- *   Abort with a message.
- */
-#define TRACE_ABORT(res, i...)			\
-{						\
- 	TRACE(i);				\
-	TRACE_EXIT res;				\
-}
-
-/*   The following transforms the common "if(result < 0) ... " into a
- *   one-liner.
- */
-#define _TRACE_CATCH(level, fun, action)				\
-{									\
-	int _res = (fun);						\
-	if (_res < 0) {							\
-		do { action /* */ ; } while(0);				\
-		TRACE_ABORT(_res, level, "%s failed: %d", #fun,	_res);	\
-	}								\
-}
-
-#define TRACE_CATCH(fun, fail) _TRACE_CATCH(ft_t_err, fun, fail)
-
-/*  Abort the current function when signalled. This doesn't belong here,
- *  but rather into ftape-rw.h (maybe)
- */
-#define FT_SIGNAL_EXIT(sig_mask)					\
-	if (sigtestsetmask(&current->pending.signal, sig_mask)) {	\
-		TRACE_ABORT(-EINTR,					\
-			    ft_t_warn,					\
-			    "interrupted by non-blockable signal");	\
-	}
-
-#endif /* _FTAPE_TRACING_H */
diff --git a/drivers/char/ftape/lowlevel/ftape-write.c b/drivers/char/ftape/lowlevel/ftape-write.c
deleted file mode 100644
index 45601ec..0000000
--- a/drivers/char/ftape/lowlevel/ftape-write.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- *      Copyright (C) 1993-1995 Bas Laarhoven,
- *                (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-write.c,v $
- * $Revision: 1.3.4.1 $
- * $Date: 1997/11/14 18:07:04 $
- *
- *      This file contains the writing code
- *      for the QIC-117 floppy-tape driver for Linux.
- */
-
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <linux/ftape.h>
-#include <linux/qic117.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-write.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-ecc.h"
-#include "../lowlevel/ftape-bsm.h"
-#include "../lowlevel/fdc-isr.h"
-
-/*      Global vars.
- */
-
-/*      Local vars.
- */
-static int last_write_failed;
-
-void ftape_zap_write_buffers(void)
-{
-	int i;
-
-	for (i = 0; i < ft_nr_buffers; ++i) {
-		ft_buffer[i]->status = done;
-	}
-	ftape_reset_buffer();
-}
-
-static int copy_and_gen_ecc(void *destination, 
-			    const void *source,
-			    const SectorMap bad_sector_map)
-{
-	int result;
-	struct memory_segment mseg;
-	int bads = count_ones(bad_sector_map);
-	TRACE_FUN(ft_t_any);
-
-	if (bads > 0) {
-		TRACE(ft_t_noise, "bad sectors in map: %d", bads);
-	}
-	if (bads + 3 >= FT_SECTORS_PER_SEGMENT) {
-		TRACE(ft_t_noise, "empty segment");
-		mseg.blocks = 0; /* skip entire segment */
-		result = 0;      /* nothing written */
-	} else {
-		mseg.blocks = FT_SECTORS_PER_SEGMENT - bads;
-		mseg.data = destination;
-		memcpy(mseg.data, source, (mseg.blocks - 3) * FT_SECTOR_SIZE);
-		result = ftape_ecc_set_segment_parity(&mseg);
-		if (result < 0) {
-			TRACE(ft_t_err, "ecc_set_segment_parity failed");
-		} else {
-			result = (mseg.blocks - 3) * FT_SECTOR_SIZE;
-		}
-	}
-	TRACE_EXIT result;
-}
-
-
-int ftape_start_writing(const ft_write_mode_t mode)
-{
-	buffer_struct *head = ftape_get_buffer(ft_queue_head);
-	int segment_id = head->segment_id;
-	int result;
-	buffer_state_enum wanted_state = (mode == FT_WR_DELETE
-					  ? deleting
-					  : writing);
-	TRACE_FUN(ft_t_flow);
-
-	if ((ft_driver_state != wanted_state) || head->status != waiting) {
-		TRACE_EXIT 0;
-	}
-	ftape_setup_new_segment(head, segment_id, 1);
-	if (mode == FT_WR_SINGLE) {
-		/* stop tape instead of pause */
-		head->next_segment = 0;
-	}
-	ftape_calc_next_cluster(head); /* prepare */
-	head->status = ft_driver_state; /* either writing or deleting */
-	if (ft_runner_status == idle) {
-		TRACE(ft_t_noise,
-		      "starting runner for segment %d", segment_id);
-		TRACE_CATCH(ftape_start_tape(segment_id,head->sector_offset),);
-	} else {
-		TRACE(ft_t_noise, "runner not idle, not starting tape");
-	}
-	/* go */
-	result = fdc_setup_read_write(head, (mode == FT_WR_DELETE
-					     ? FDC_WRITE_DELETED : FDC_WRITE));
-	ftape_set_state(wanted_state); /* should not be necessary */
-	TRACE_EXIT result;
-}
-
-/*  Wait until all data is actually written to tape.
- *  
- *  There is a problem: when the tape runs into logical EOT, then this
- *  failes. We need to restart the runner in this case.
- */
-int ftape_loop_until_writes_done(void)
-{
-	buffer_struct *head;
-	TRACE_FUN(ft_t_flow);
-
-	while ((ft_driver_state == writing || ft_driver_state == deleting) && 
-	       ftape_get_buffer(ft_queue_head)->status != done) {
-		/* set the runner status to idle if at lEOT */
-		TRACE_CATCH(ftape_handle_logical_eot(),	last_write_failed = 1);
-		/* restart the tape if necessary */
-		if (ft_runner_status == idle) {
-			TRACE(ft_t_noise, "runner is idle, restarting");
-			if (ft_driver_state == deleting) {
-				TRACE_CATCH(ftape_start_writing(FT_WR_DELETE),
-					    last_write_failed = 1);
-			} else {
-				TRACE_CATCH(ftape_start_writing(FT_WR_MULTI),
-					    last_write_failed = 1);
-			}
-		}
-		TRACE(ft_t_noise, "tail: %d, head: %d", 
-		      ftape_buffer_id(ft_queue_tail),
-		      ftape_buffer_id(ft_queue_head));
-		TRACE_CATCH(fdc_interrupt_wait(5 * FT_SECOND),
-			    last_write_failed = 1);
-		head = ftape_get_buffer(ft_queue_head);
-		if (head->status == error) {
-			/* Allow escape from loop when signaled !
-			 */
-			FT_SIGNAL_EXIT(_DONT_BLOCK);
-			if (head->hard_error_map != 0) {
-				/*  Implement hard write error recovery here
-				 */
-			}
-			/* retry this one */
-			head->status = waiting;
-			if (ft_runner_status == aborting) {
-				ftape_dumb_stop();
-			}
-			if (ft_runner_status != idle) {
-				TRACE_ABORT(-EIO, ft_t_err,
-					    "unexpected state: "
-					    "ft_runner_status != idle");
-			}
-			ftape_start_writing(ft_driver_state == deleting
-					    ? FT_WR_MULTI : FT_WR_DELETE);
-		}
-		TRACE(ft_t_noise, "looping until writes done");
-	}
-	ftape_set_state(idle);
-	TRACE_EXIT 0;
-}
-
-/*      Write given segment from buffer at address to tape.
- */
-static int write_segment(const int segment_id,
-			 const void *address, 
-			 const ft_write_mode_t write_mode)
-{
-	int bytes_written = 0;
-	buffer_struct *tail;
-	buffer_state_enum wanted_state = (write_mode == FT_WR_DELETE
-					  ? deleting : writing);
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "segment_id = %d", segment_id);
-	if (ft_driver_state != wanted_state) {
-		if (ft_driver_state == deleting ||
-		    wanted_state == deleting) {
-			TRACE_CATCH(ftape_loop_until_writes_done(),);
-		}
-		TRACE(ft_t_noise, "calling ftape_abort_operation");
-		TRACE_CATCH(ftape_abort_operation(),);
-		ftape_zap_write_buffers();
-		ftape_set_state(wanted_state);
-	}
-	/*    if all buffers full we'll have to wait...
-	 */
-	ftape_wait_segment(wanted_state);
-	tail = ftape_get_buffer(ft_queue_tail);
-	switch(tail->status) {
-	case done:
-		ft_history.defects += count_ones(tail->hard_error_map);
-		break;
-	case waiting:
-		/* this could happen with multiple EMPTY_SEGMENTs, but
-		 * shouldn't happen any more as we re-start the runner even
-		 * with an empty segment.
-		 */
-		bytes_written = -EAGAIN;
-		break;
-	case error:
-		/*  setup for a retry
-		 */
-		tail->status = waiting;
-		bytes_written = -EAGAIN; /* force retry */
-		if (tail->hard_error_map != 0) {
-			TRACE(ft_t_warn, 
-			      "warning: %d hard error(s) in written segment",
-			      count_ones(tail->hard_error_map));
-			TRACE(ft_t_noise, "hard_error_map = 0x%08lx", 
-			      (long)tail->hard_error_map);
-			/*  Implement hard write error recovery here
-			 */
-		}
-		break;
-	default:
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "wait for empty segment failed, tail status: %d",
-			    tail->status);
-	}
-	/*    should runner stop ?
-	 */
-	if (ft_runner_status == aborting) {
-		buffer_struct *head = ftape_get_buffer(ft_queue_head);
-		if (head->status == wanted_state) {
-			head->status = done; /* ???? */
-		}
-		/*  don't call abort_operation(), we don't want to zap
-		 *  the dma buffers
-		 */
-		TRACE_CATCH(ftape_dumb_stop(),);
-	} else {
-		/*  If just passed last segment on tape: wait for BOT
-		 *  or EOT mark. Sets ft_runner_status to idle if at lEOT
-		 *  and successful 
-		 */
-		TRACE_CATCH(ftape_handle_logical_eot(),);
-	}
-	if (tail->status == done) {
-		/* now at least one buffer is empty, fill it with our
-		 * data.  skip bad sectors and generate ecc.
-		 * copy_and_gen_ecc return nr of bytes written, range
-		 * 0..29 Kb inclusive!  
-		 *
-		 * Empty segments are handled inside coyp_and_gen_ecc()
-		 */
-		if (write_mode != FT_WR_DELETE) {
-			TRACE_CATCH(bytes_written = copy_and_gen_ecc(
-				tail->address, address,
-				ftape_get_bad_sector_entry(segment_id)),);
-		}
-		tail->segment_id = segment_id;
-		tail->status = waiting;
-		tail = ftape_next_buffer(ft_queue_tail);
-	}
-	/*  Start tape only if all buffers full or flush mode.
-	 *  This will give higher probability of streaming.
-	 */
-	if (ft_runner_status != running && 
-	    ((tail->status == waiting &&
-	      ftape_get_buffer(ft_queue_head) == tail) ||
-	     write_mode != FT_WR_ASYNC)) {
-		TRACE_CATCH(ftape_start_writing(write_mode),);
-	}
-	TRACE_EXIT bytes_written;
-}
-
-/*  Write as much as fits from buffer to the given segment on tape
- *  and handle retries.
- *  Return the number of bytes written (>= 0), or:
- *      -EIO          write failed
- *      -EINTR        interrupted by signal
- *      -ENOSPC       device full
- */
-int ftape_write_segment(const int segment_id,
-			const void *buffer, 
-			const ft_write_mode_t flush)
-{
-	int retry = 0;
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	ft_history.used |= 2;
-	if (segment_id >= ft_tracks_per_tape*ft_segments_per_track) {
-		/* tape full */
-		TRACE_ABORT(-ENOSPC, ft_t_err,
-			    "invalid segment id: %d (max %d)", 
-			    segment_id, 
-			    ft_tracks_per_tape * ft_segments_per_track -1);
-	}
-	for (;;) {
-		if ((result = write_segment(segment_id, buffer, flush)) >= 0) {
-			if (result == 0) { /* empty segment */
-				TRACE(ft_t_noise,
-				      "empty segment, nothing written");
-			}
-			TRACE_EXIT result;
-		}
-		if (result == -EAGAIN) {
-			if (++retry > 100) { /* give up */
-				TRACE_ABORT(-EIO, ft_t_err,
-				      "write failed, >100 retries in segment");
-			}
-			TRACE(ft_t_warn, "write error, retry %d (%d)",
-			      retry,
-			      ftape_get_buffer(ft_queue_tail)->segment_id);
-		} else {
-			TRACE_ABORT(result, ft_t_err,
-				    "write_segment failed, error: %d", result);
-		}
-		/* Allow escape from loop when signaled !
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-	}
-}
diff --git a/drivers/char/ftape/lowlevel/ftape-write.h b/drivers/char/ftape/lowlevel/ftape-write.h
deleted file mode 100644
index 0e7f898..0000000
--- a/drivers/char/ftape/lowlevel/ftape-write.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _FTAPE_WRITE_H
-#define _FTAPE_WRITE_H
-
-/*
- * Copyright (C) 1994-1995 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-write.h,v $
- $Author: claus $
- *
- $Revision: 1.2 $
- $Date: 1997/10/05 19:18:30 $
- $State: Exp $
- *
- *      This file contains the definitions for the write functions
- *      for the QIC-117 floppy-tape driver for Linux.
- *
- */
-
-
-/*      ftape-write.c defined global functions.
- */
-typedef enum {
-	FT_WR_ASYNC  = 0, /* start tape only when all buffers are full */
-	FT_WR_MULTI  = 1, /* start tape, but don't necessarily stop */
-	FT_WR_SINGLE = 2, /* write a single segment and stop afterwards */
-	FT_WR_DELETE = 3  /* write deleted data marks */
-} ft_write_mode_t;
-
-extern int  ftape_start_writing(const ft_write_mode_t mode);
-extern int  ftape_write_segment(const int segment,
-				const void *address, 
-				const ft_write_mode_t flushing);
-extern void ftape_zap_write_buffers(void);
-extern int  ftape_loop_until_writes_done(void);
-
-#endif				/* _FTAPE_WRITE_H */
-
diff --git a/drivers/char/ftape/lowlevel/ftape_syms.c b/drivers/char/ftape/lowlevel/ftape_syms.c
deleted file mode 100644
index 8e0dc4a..0000000
--- a/drivers/char/ftape/lowlevel/ftape_syms.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- *      Copyright (C) 1996-1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape_syms.c,v $
- * $Revision: 1.4 $
- * $Date: 1997/10/17 00:03:51 $
- *
- *      This file contains the symbols that the ftape low level
- *      part of the QIC-40/80/3010/3020 floppy-tape driver "ftape"
- *      exports to its high level clients
- */
-
-#include <linux/module.h>
-
-#include <linux/ftape.h>
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-init.h"
-#include "../lowlevel/fdc-io.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-write.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-rw.h"
-#include "../lowlevel/ftape-bsm.h"
-#include "../lowlevel/ftape-buffer.h"
-#include "../lowlevel/ftape-format.h"
-
-/* bad sector handling from ftape-bsm.c */
-EXPORT_SYMBOL(ftape_get_bad_sector_entry);
-EXPORT_SYMBOL(ftape_find_end_of_bsm_list);
-/* from ftape-rw.c */
-EXPORT_SYMBOL(ftape_set_state);
-/* from ftape-ctl.c */
-EXPORT_SYMBOL(ftape_seek_to_bot);
-EXPORT_SYMBOL(ftape_seek_to_eot);
-EXPORT_SYMBOL(ftape_abort_operation);
-EXPORT_SYMBOL(ftape_get_status);
-EXPORT_SYMBOL(ftape_enable);
-EXPORT_SYMBOL(ftape_disable);
-EXPORT_SYMBOL(ftape_mmap);
-EXPORT_SYMBOL(ftape_calibrate_data_rate);
-/* from ftape-io.c */
-EXPORT_SYMBOL(ftape_reset_drive);
-EXPORT_SYMBOL(ftape_command);
-EXPORT_SYMBOL(ftape_parameter);
-EXPORT_SYMBOL(ftape_ready_wait);
-EXPORT_SYMBOL(ftape_report_operation);
-EXPORT_SYMBOL(ftape_report_error);
-/* from ftape-read.c */
-EXPORT_SYMBOL(ftape_read_segment_fraction);
-EXPORT_SYMBOL(ftape_zap_read_buffers);
-EXPORT_SYMBOL(ftape_read_header_segment);
-EXPORT_SYMBOL(ftape_decode_header_segment);
-/* from ftape-write.c */
-EXPORT_SYMBOL(ftape_write_segment);
-EXPORT_SYMBOL(ftape_start_writing);
-EXPORT_SYMBOL(ftape_loop_until_writes_done);
-/* from ftape-buffer.h */
-EXPORT_SYMBOL(ftape_set_nr_buffers);
-/* from ftape-format.h */
-EXPORT_SYMBOL(ftape_format_track);
-EXPORT_SYMBOL(ftape_format_status);
-EXPORT_SYMBOL(ftape_verify_segment);
-/* from tracing.c */
-#ifndef CONFIG_FT_NO_TRACE_AT_ALL
-EXPORT_SYMBOL(ftape_tracing);
-EXPORT_SYMBOL(ftape_function_nest_level);
-EXPORT_SYMBOL(ftape_trace_call);
-EXPORT_SYMBOL(ftape_trace_exit);
-EXPORT_SYMBOL(ftape_trace_log);
-#endif
-
diff --git a/drivers/char/ftape/zftape/Makefile b/drivers/char/ftape/zftape/Makefile
deleted file mode 100644
index 6d91c1f..0000000
--- a/drivers/char/ftape/zftape/Makefile
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#       Copyright (C) 1996, 1997 Claus-Justus Heine.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-# 
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with this program; see the file COPYING.  If not, write to
-# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-#
-# $Source: /homes/cvs/ftape-stacked/ftape/zftape/Makefile,v $
-# $Revision: 1.4 $
-# $Date: 1997/10/05 19:18:58 $
-#
-#      Makefile for the QIC-40/80/3010/3020 zftape interface VFS to
-#      ftape
-#
-
-
-# ZFT_OBSOLETE - enable the MTIOC_ZFTAPE_GETBLKSZ ioctl. You should
-#                leave this enabled for compatibility with taper.
-
-obj-$(CONFIG_ZFTAPE) += zftape.o
-
-zftape-objs := zftape-rw.o zftape-ctl.o zftape-read.o \
-	       zftape-write.o zftape-vtbl.o zftape-eof.o \
-	       zftape-init.o zftape-buffers.o zftape_syms.o
-
-EXTRA_CFLAGS := -DZFT_OBSOLETE
diff --git a/drivers/char/ftape/zftape/zftape-buffers.c b/drivers/char/ftape/zftape/zftape-buffers.c
deleted file mode 100644
index 7ebce2e..0000000
--- a/drivers/char/ftape/zftape/zftape-buffers.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- *      Copyright (C) 1995-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-buffers.c,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:59 $
- *
- *      This file contains the dynamic buffer allocation routines 
- *      of zftape
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-
-#include <linux/zftape.h>
-
-#include <linux/vmalloc.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-rw.h"
-#include "../zftape/zftape-vtbl.h"
-
-/*  global variables
- */
-
-/*  local varibales
- */
-static unsigned int used_memory;
-static unsigned int peak_memory;
-
-void zft_memory_stats(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "Memory usage (vmalloc allocations):\n"
-	      KERN_INFO "total allocated: %d\n"
-	      KERN_INFO "peak allocation: %d",
-	      used_memory, peak_memory);
-	peak_memory = used_memory;
-	TRACE_EXIT;
-}
-
-int zft_vcalloc_once(void *new, size_t size)
-{
-	TRACE_FUN(ft_t_flow);
-	if (zft_vmalloc_once(new, size) < 0) {
-		TRACE_EXIT -ENOMEM;
-	}
-	memset(*(void **)new, '\0', size);
-	TRACE_EXIT 0;
-}
-int zft_vmalloc_once(void *new, size_t size)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (*(void **)new != NULL || size == 0) {
-		TRACE_EXIT 0;
-	}
-	if ((*(void **)new = vmalloc(size)) == NULL) {
-		TRACE_EXIT -ENOMEM;
-	}
-	used_memory += size;
-	if (peak_memory < used_memory) {
-		peak_memory = used_memory;
-	}
-	TRACE_ABORT(0, ft_t_noise,
-		    "allocated buffer @ %p, %zd bytes", *(void **)new, size);
-}
-int zft_vmalloc_always(void *new, size_t size)
-{
-	TRACE_FUN(ft_t_flow);
-
-	zft_vfree(new, size);
-	TRACE_EXIT zft_vmalloc_once(new, size);
-}
-void zft_vfree(void *old, size_t size)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (*(void **)old) {
-		vfree(*(void **)old);
-		used_memory -= size;
-		TRACE(ft_t_noise, "released buffer @ %p, %zd bytes",
-		      *(void **)old, size);
-		*(void **)old = NULL;
-	}
-	TRACE_EXIT;
-}
-
-void *zft_kmalloc(size_t size)
-{
-	void *new;
-
-	while ((new = kmalloc(size, GFP_KERNEL)) == NULL) {
-		msleep_interruptible(100);
-	}
-	memset(new, 0, size);
-	used_memory += size;
-	if (peak_memory < used_memory) {
-		peak_memory = used_memory;
-	}
-	return new;
-}
-
-void zft_kfree(void *old, size_t size)
-{
-	kfree(old);
-	used_memory -= size;
-}
-
-/* there are some more buffers that are allocated on demand.
- * cleanup_module() calles this function to be sure to have released
- * them 
- */
-void zft_uninit_mem(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	zft_vfree(&zft_hseg_buf, FT_SEGMENT_SIZE);
-	zft_vfree(&zft_deblock_buf, FT_SEGMENT_SIZE); zft_deblock_segment = -1;
-	zft_free_vtbl();
-	if (zft_cmpr_lock(0 /* don't load */) == 0) {
-		(*zft_cmpr_ops->cleanup)();
-		(*zft_cmpr_ops->reset)(); /* unlock it again */
-	}
-	zft_memory_stats();
-	TRACE_EXIT;
-}
diff --git a/drivers/char/ftape/zftape/zftape-buffers.h b/drivers/char/ftape/zftape/zftape-buffers.h
deleted file mode 100644
index 798e312..0000000
--- a/drivers/char/ftape/zftape/zftape-buffers.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _FTAPE_DYNMEM_H
-#define _FTAPE_DYNMEM_H
-
-/*
- *      Copyright (C) 1995-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-buffers.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:18:59 $
- *
- *   memory allocation routines.
- *
- */
-
-/* we do not allocate all of the really large buffer memory before
- * someone tries to open the drive. ftape_open() may fail with
- * -ENOMEM, but that's better having 200k of vmalloced memory which
- * cannot be swapped out.
- */
-
-extern void  zft_memory_stats(void);
-extern int   zft_vmalloc_once(void *new, size_t size);
-extern int   zft_vcalloc_once(void *new, size_t size);
-extern int   zft_vmalloc_always(void *new, size_t size);
-extern void  zft_vfree(void *old, size_t size);
-extern void *zft_kmalloc(size_t size);
-extern void  zft_kfree(void *old, size_t size);
-
-/* called by cleanup_module() 
- */
-extern void zft_uninit_mem(void);
-
-#endif
-
-
-
-
-
-
-
diff --git a/drivers/char/ftape/zftape/zftape-ctl.c b/drivers/char/ftape/zftape/zftape-ctl.c
deleted file mode 100644
index 22ba0f5..0000000
--- a/drivers/char/ftape/zftape/zftape-ctl.c
+++ /dev/null
@@ -1,1417 +0,0 @@
-/* 
- *      Copyright (C) 1996, 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-ctl.c,v $
- * $Revision: 1.2.6.2 $
- * $Date: 1997/11/14 18:07:33 $
- *
- *      This file contains the non-read/write zftape functions
- *      for the QIC-40/80/3010/3020 floppy-tape driver for Linux.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/fcntl.h>
-
-#include <linux/zftape.h>
-
-#include <asm/uaccess.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-rw.h"
-#include "../zftape/zftape-vtbl.h"
-
-/*      Global vars.
- */
-int zft_write_protected; /* this is when cartridge rdonly or O_RDONLY */
-int zft_header_read;
-int zft_offline;
-unsigned int zft_unit;
-int zft_resid;
-int zft_mt_compression;
-
-/*      Local vars.
- */
-static int going_offline;
-
-typedef int (mt_fun)(int *argptr);
-typedef int (*mt_funp)(int *argptr);
-typedef struct
-{
-	mt_funp function;
-	unsigned offline         : 1; /* op permitted if offline or no_tape */
-	unsigned write_protected : 1; /* op permitted if write-protected    */
-	unsigned not_formatted   : 1; /* op permitted if tape not formatted */
-	unsigned raw_mode        : 1; /* op permitted if zft_mode == 0    */
-	unsigned need_idle_state : 1; /* need to call def_idle_state        */
-	char     *name;
-} fun_entry;
-
-static mt_fun mt_dummy, mt_reset, mt_fsr, mt_bsr, mt_rew, mt_offl, mt_nop,
-	mt_weof, mt_erase, mt_ras2, mt_setblk, mt_setdensity,
-	mt_seek, mt_tell, mt_reten, mt_eom, mt_fsf, mt_bsf,
-	mt_fsfm, mt_bsfm, mt_setdrvbuffer, mt_compression;
-
-static fun_entry mt_funs[]=
-{ 
-	{mt_reset       , 1, 1, 1, 1, 0, "MT_RESET" }, /*  0 */
-	{mt_fsf         , 0, 1, 0, 0, 1, "MT_FSF"   },
-	{mt_bsf         , 0, 1, 0, 0, 1, "MT_BSF"   },
-	{mt_fsr         , 0, 1, 0, 1, 1, "MT_FSR"   },
-	{mt_bsr         , 0, 1, 0, 1, 1, "MT_BSR"   },
-	{mt_weof        , 0, 0, 0, 0, 0, "MT_WEOF"  }, /*  5 */
-	{mt_rew         , 0, 1, 1, 1, 0, "MT_REW"   },
-	{mt_offl        , 0, 1, 1, 1, 0, "MT_OFFL"  },
-	{mt_nop         , 1, 1, 1, 1, 0, "MT_NOP"   },
-	{mt_reten       , 0, 1, 1, 1, 0, "MT_RETEN" },
-	{mt_bsfm        , 0, 1, 0, 0, 1, "MT_BSFM"  }, /* 10 */
-	{mt_fsfm        , 0, 1, 0, 0, 1, "MT_FSFM"  },
-	{mt_eom         , 0, 1, 0, 0, 1, "MT_EOM"   },
-	{mt_erase       , 0, 0, 0, 1, 0, "MT_ERASE" },
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_RAS1"  },
-	{mt_ras2        , 0, 0, 0, 1, 0, "MT_RAS2"  },
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_RAS3"  },
-	{mt_dummy       , 1, 1, 1, 1, 0, "UNKNOWN"  },
-	{mt_dummy       , 1, 1, 1, 1, 0, "UNKNOWN"  },
-	{mt_dummy       , 1, 1, 1, 1, 0, "UNKNOWN"  },
-	{mt_setblk      , 1, 1, 1, 1, 1, "MT_SETBLK"}, /* 20 */
-	{mt_setdensity  , 1, 1, 1, 1, 0, "MT_SETDENSITY"},
-	{mt_seek        , 0, 1, 0, 1, 1, "MT_SEEK"  },
-	{mt_dummy       , 0, 1, 0, 1, 1, "MT_TELL"  }, /* wr-only ?! */
-	{mt_setdrvbuffer, 1, 1, 1, 1, 0, "MT_SETDRVBUFFER" },
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_FSS"   }, /* 25 */
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_BSS"   },
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_WSM"   },
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_LOCK"  },
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_UNLOCK"},
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_LOAD"  }, /* 30 */
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_UNLOAD"},
-	{mt_compression , 1, 1, 1, 0, 1, "MT_COMPRESSION"},
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_SETPART"},
-	{mt_dummy       , 1, 1, 1, 1, 0, "MT_MKPART"}
-};  
-
-#define NR_MT_CMDS NR_ITEMS(mt_funs)
-
-void zft_reset_position(zft_position *pos)
-{
-	TRACE_FUN(ft_t_flow);
-
-	pos->seg_byte_pos =
-		pos->volume_pos = 0;
-	if (zft_header_read) {
-		/* need to keep track of the volume table and
-		 * compression map. We therefor simply
-		 * position at the beginning of the first
-		 * volume. This covers old ftape archives as
-		 * well has various flavours of the
-		 * compression map segments. The worst case is
-		 * that the compression map shows up as a
-		 * additional volume in front of all others.
-		 */
-		pos->seg_pos  = zft_find_volume(0)->start_seg;
-		pos->tape_pos = zft_calc_tape_pos(pos->seg_pos);
-	} else {
-		pos->tape_pos =  0;
-		pos->seg_pos  = -1;
-	}
-	zft_just_before_eof =  0;
-	zft_deblock_segment = -1;
-	zft_io_state        = zft_idle;
-	zft_zap_read_buffers();
-	zft_prevent_flush();
-	/*  unlock the compresison module if it is loaded.
-	 *  The zero arg means not to try to load the module.
-	 */
-	if (zft_cmpr_lock(0) == 0) {
-		(*zft_cmpr_ops->reset)(); /* unlock */
-	}
-	TRACE_EXIT;
-}
-
-static void zft_init_driver(void)
-{
-	TRACE_FUN(ft_t_flow);
-
-	zft_resid =
-		zft_header_read          =
-		zft_old_ftape            =
-		zft_offline              =
-		zft_write_protected      =
-		going_offline            =
-		zft_mt_compression       =
-		zft_header_changed       =
-		zft_volume_table_changed =
-		zft_written_segments     = 0;
-	zft_blk_sz = CONFIG_ZFT_DFLT_BLK_SZ;
-	zft_reset_position(&zft_pos); /* does most of the stuff */
-	ftape_zap_read_buffers();
-	ftape_set_state(idle);
-	TRACE_EXIT;
-}
-
-int zft_def_idle_state(void)
-{ 
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-	
-	if (!zft_header_read) {
-		result = zft_read_header_segments();
-	} else if ((result = zft_flush_buffers()) >= 0 && zft_qic_mode) {
-		/*  don't move past eof
-		 */
-		(void)zft_close_volume(&zft_pos);
-	}
-	if (ftape_abort_operation() < 0) {
-		TRACE(ft_t_warn, "ftape_abort_operation() failed");
-		result = -EIO;
-	}
-	/* clear remaining read buffers */
-	zft_zap_read_buffers();
-	zft_io_state = zft_idle;
-	TRACE_EXIT result;
-}
-
-/*****************************************************************************
- *                                                                           *
- *  functions for the MTIOCTOP commands                                      *
- *                                                                           *
- *****************************************************************************/
-
-static int mt_dummy(int *dummy)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE_EXIT -ENOSYS;
-}
-
-static int mt_reset(int *dummy)
-{        
-	TRACE_FUN(ft_t_flow);
-	
-	(void)ftape_seek_to_bot();
-	TRACE_CATCH(ftape_reset_drive(),
-		    zft_init_driver(); zft_uninit_mem(); zft_offline = 1);
-	/*  fake a re-open of the device. This will set all flage and 
-	 *  allocate buffers as appropriate. The new tape condition will
-	 *  force the open routine to do anything we need.
-	 */
-	TRACE_CATCH(_zft_open(-1 /* fake reopen */, 0 /* dummy */),);
-	TRACE_EXIT 0;
-}
-
-static int mt_fsf(int *arg)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	result = zft_skip_volumes(*arg, &zft_pos);
-	zft_just_before_eof = 0;
-	TRACE_EXIT result;
-}
-
-static int mt_bsf(int *arg)
-{
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-	
-	if (*arg != 0) {
-		result = zft_skip_volumes(-*arg + 1, &zft_pos);
-	}
-	TRACE_EXIT result;
-}
-
-static int seek_block(__s64 data_offset,
-		      __s64 block_increment,
-		      zft_position *pos)
-{ 
-	int result      = 0;
-	__s64 new_block_pos;
-	__s64 vol_block_count;
-	const zft_volinfo *volume;
-	int exceed;
-	TRACE_FUN(ft_t_flow);
-	
-	volume = zft_find_volume(pos->seg_pos);
-	if (volume->start_seg == 0 || volume->end_seg == 0) {
-		TRACE_EXIT -EIO;
-	}
-	new_block_pos   = (zft_div_blksz(data_offset, volume->blk_sz)
-			   + block_increment);
-	vol_block_count = zft_div_blksz(volume->size, volume->blk_sz);
-	if (new_block_pos < 0) {
-		TRACE(ft_t_noise,
-		      "new_block_pos " LL_X " < 0", LL(new_block_pos));
-		zft_resid     = (int)new_block_pos;
-		new_block_pos = 0;
-		exceed = 1;
-	} else if (new_block_pos > vol_block_count) {
-		TRACE(ft_t_noise,
-		      "new_block_pos " LL_X " exceeds size of volume " LL_X,
-		      LL(new_block_pos), LL(vol_block_count));
-		zft_resid     = (int)(vol_block_count - new_block_pos);
-		new_block_pos = vol_block_count;
-		exceed = 1;
-	} else {
-		exceed = 0;
-	}
-	if (zft_use_compression && volume->use_compression) {
-		TRACE_CATCH(zft_cmpr_lock(1 /* try to load */),);
-		result = (*zft_cmpr_ops->seek)(new_block_pos, pos, volume,
-					       zft_deblock_buf);
-		pos->tape_pos  = zft_calc_tape_pos(pos->seg_pos);
-		pos->tape_pos += pos->seg_byte_pos;
-	} else {
-		pos->volume_pos = zft_mul_blksz(new_block_pos, volume->blk_sz);
-		pos->tape_pos   = zft_calc_tape_pos(volume->start_seg);
-		pos->tape_pos  += pos->volume_pos;
-		pos->seg_pos    = zft_calc_seg_byte_coord(&pos->seg_byte_pos,
-							  pos->tape_pos);
-	}
-	zft_just_before_eof = volume->size == pos->volume_pos;
-	if (zft_just_before_eof) {
-		/* why this? because zft_file_no checks agains start
-		 * and end segment of a volume. We do not want to
-		 * advance to the next volume with this function.
-		 */
-		TRACE(ft_t_noise, "set zft_just_before_eof");
-		zft_position_before_eof(pos, volume);
-	}
-	TRACE(ft_t_noise, "\n"
-	      KERN_INFO "new_seg_pos : %d\n"
-	      KERN_INFO "new_tape_pos: " LL_X "\n"
-	      KERN_INFO "vol_size    : " LL_X "\n"
-	      KERN_INFO "seg_byte_pos: %d\n"
-	      KERN_INFO "blk_sz  : %d", 
-	      pos->seg_pos, LL(pos->tape_pos),
-	      LL(volume->size), pos->seg_byte_pos,
-	      volume->blk_sz);
-	if (!exceed) {
-		zft_resid = new_block_pos - zft_div_blksz(pos->volume_pos,
-							  volume->blk_sz);
-	}
-	if (zft_resid < 0) {
-		zft_resid = -zft_resid;
-	}
-	TRACE_EXIT ((exceed || zft_resid != 0) && result >= 0) ? -EINVAL : result;
-}     
-
-static int mt_fsr(int *arg)
-{ 
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	result = seek_block(zft_pos.volume_pos,  *arg, &zft_pos);
-	TRACE_EXIT result;
-}
-
-static int mt_bsr(int *arg)
-{   
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	result = seek_block(zft_pos.volume_pos, -*arg, &zft_pos);
-	TRACE_EXIT result;
-}
-
-static int mt_weof(int *arg)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE_CATCH(zft_flush_buffers(),);
-	result = zft_weof(*arg, &zft_pos);
-	TRACE_EXIT result;
-}
-
-static int mt_rew(int *dummy)
-{          
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	if(zft_header_read) {
-		(void)zft_def_idle_state();
-	}
-	result = ftape_seek_to_bot();
-	zft_reset_position(&zft_pos);
-	TRACE_EXIT result;
-}
-
-static int mt_offl(int *dummy)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	going_offline= 1;
-	result = mt_rew(NULL);
-	TRACE_EXIT result;
-}
-
-static int mt_nop(int *dummy)
-{
-	TRACE_FUN(ft_t_flow);
-	/*  should we set tape status?
-	 */
-	if (!zft_offline) { /* offline includes no_tape */
-		(void)zft_def_idle_state();
-	}
-	TRACE_EXIT 0; 
-}
-
-static int mt_reten(int *dummy)
-{  
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	if(zft_header_read) {
-		(void)zft_def_idle_state();
-	}
-	result = ftape_seek_to_eot();
-	if (result >= 0) {
-		result = ftape_seek_to_bot();
-	}
-	TRACE_EXIT(result);
-}
-
-static int fsfbsfm(int arg, zft_position *pos)
-{ 
-	const zft_volinfo *vtbl;
-	__s64 block_pos;
-	TRACE_FUN(ft_t_flow);
-	
-	/* What to do? This should seek to the next file-mark and
-	 * position BEFORE. That is, a next write would just extend
-	 * the current file.  Well. Let's just seek to the end of the
-	 * current file, if count == 1.  If count > 1, then do a
-	 * "mt_fsf(count - 1)", and then seek to the end of that file.
-	 * If count == 0, do nothing
-	 */
-	if (arg == 0) {
-		TRACE_EXIT 0;
-	}
-	zft_just_before_eof = 0;
-	TRACE_CATCH(zft_skip_volumes(arg < 0 ? arg : arg-1, pos),
-		    if (arg > 0) {
-			    zft_resid ++; 
-		    });
-	vtbl      = zft_find_volume(pos->seg_pos);
-	block_pos = zft_div_blksz(vtbl->size, vtbl->blk_sz);
-	(void)seek_block(0, block_pos, pos);
-	if (pos->volume_pos != vtbl->size) {
-		zft_just_before_eof = 0;
-		zft_resid = 1;
-		/* we didn't managed to go there */
-		TRACE_ABORT(-EIO, ft_t_err, 
-			    "wanted file position " LL_X ", arrived at " LL_X, 
-			    LL(vtbl->size), LL(pos->volume_pos));
-	}
-	zft_just_before_eof = 1;
-	TRACE_EXIT 0; 
-}
-
-static int mt_bsfm(int *arg)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	result = fsfbsfm(-*arg, &zft_pos);
-	TRACE_EXIT result;
-}
-
-static int mt_fsfm(int *arg)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	result = fsfbsfm(*arg, &zft_pos);
-	TRACE_EXIT result;
-}
-
-static int mt_eom(int *dummy)
-{              
-	TRACE_FUN(ft_t_flow);
-	
-	zft_skip_to_eom(&zft_pos);
-	TRACE_EXIT 0;
-}
-
-static int mt_erase(int *dummy)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	result = zft_erase();
-	TRACE_EXIT result;
-}
-
-static int mt_ras2(int *dummy)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	result = -ENOSYS;
-	TRACE_EXIT result;
-} 
-
-/*  Sets the new blocksize in BYTES
- *
- */
-static int mt_setblk(int *new_size)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	if((unsigned int)(*new_size) > ZFT_MAX_BLK_SZ) {
-		TRACE_ABORT(-EINVAL, ft_t_info,
-			    "desired blk_sz (%d) should be <= %d bytes",
-			    *new_size, ZFT_MAX_BLK_SZ);
-	}
-	if ((*new_size & (FT_SECTOR_SIZE-1)) != 0) {
-		TRACE_ABORT(-EINVAL, ft_t_info,
-			"desired blk_sz (%d) must be a multiple of %d bytes",
-			    *new_size, FT_SECTOR_SIZE);
-	}
-	if (*new_size == 0) {
-		if (zft_use_compression) {
-			TRACE_ABORT(-EINVAL, ft_t_info,
-				    "Variable block size not yet "
-				    "supported with compression");
-		}
-		*new_size = 1;
-	}
-	zft_blk_sz = *new_size;
-	TRACE_EXIT 0;
-} 
-
-static int mt_setdensity(int *arg)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	SET_TRACE_LEVEL(*arg);
-	TRACE(TRACE_LEVEL, "tracing set to %d", TRACE_LEVEL);
-	if ((int)TRACE_LEVEL != *arg) {
-		TRACE_EXIT -EINVAL;
-	}
-	TRACE_EXIT 0;
-}          
-
-static int mt_seek(int *new_block_pos)
-{ 
-	int result= 0;        
-	TRACE_FUN(ft_t_any);
-	
-	result = seek_block(0, (__s64)*new_block_pos, &zft_pos);
-	TRACE_EXIT result;
-}
-
-/*  OK, this is totally different from SCSI, but the worst thing that can 
- *  happen is that there is not enough defragmentated memory that can be 
- *  allocated. Also, there is a hardwired limit of 16 dma buffers in the 
- *  stock ftape module. This shouldn't bring the system down.
- *
- * NOTE: the argument specifies the total number of dma buffers to use.
- *       The driver needs at least 3 buffers to function at all.
- * 
- */
-static int mt_setdrvbuffer(int *cnt)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (*cnt < 3) {
-		TRACE_EXIT -EINVAL;
-	}
-	TRACE_CATCH(ftape_set_nr_buffers(*cnt),);
-	TRACE_EXIT 0;
-}
-/* return the block position from start of volume 
- */
-static int mt_tell(int *arg)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	*arg   = zft_div_blksz(zft_pos.volume_pos,
-			       zft_find_volume(zft_pos.seg_pos)->blk_sz);
-	TRACE_EXIT 0;
-}
-
-static int mt_compression(int *arg)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	/*  Ok. We could also check whether compression is available at
-	 *  all by trying to load the compression module.  We could
-	 *  also check for a block size of 1 byte which is illegal
-	 *  with compression.  Instead of doing it here we rely on
-	 *  zftape_write() to do the proper checks.
-	 */
-	if ((unsigned int)*arg > 1) {
-		TRACE_EXIT -EINVAL;
-	}
-	if (*arg != 0 && zft_blk_sz == 1) { /* variable block size */
-		TRACE_ABORT(-EINVAL, ft_t_info,
-			    "Compression not yet supported "
-			    "with variable block size");
-	}
-	zft_mt_compression  = *arg;
-	if ((zft_unit & ZFT_ZIP_MODE) == 0) {
-		zft_use_compression = zft_mt_compression;
-	}
-	TRACE_EXIT 0;
-}
-
-/*  check whether write access is allowed. Write access is denied when
- *  + zft_write_protected == 1 -- this accounts for either hard write 
- *                                protection of the cartridge or for 
- *                                O_RDONLY access mode of the tape device
- *  + zft_offline == 1         -- this meany that there is either no tape 
- *                                or that the MTOFFLINE ioctl has been 
- *                                previously issued (`soft eject')
- *  + ft_formatted == 0        -- this means that the cartridge is not
- *                                formatted
- *  Then we distinuguish two cases. When zft_qic_mode is TRUE, then we try
- *  to emulate a `traditional' (aka SCSI like) UN*X tape device. Therefore we
- *  deny writes when
- *  + zft_qic_mode ==1 && 
- *       (!zft_tape_at_lbot() &&   -- tape no at logical BOT
- *        !(zft_tape_at_eom() ||   -- tape not at logical EOM (or EOD)
- *          (zft_tape_at_eom() &&
- *           zft_old_ftape())))    -- we can't add new volume to tapes 
- *                                    written by old ftape because ftape
- *                                    don't use the volume table
- *
- *  when the drive is in true raw mode (aka /dev/rawft0) then we don't 
- *  care about LBOT and EOM conditions. This device is intended for a 
- *  user level program that wants to truly implement the QIC-80 compliance
- *  at the logical data layout level of the cartridge, i.e. implement all
- *  that volume table and volume directory stuff etc.<
- */
-int zft_check_write_access(zft_position *pos)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (zft_offline) { /* offline includes no_tape */
-		TRACE_ABORT(-ENXIO,
-			    ft_t_info, "tape is offline or no cartridge");
-	}
-	if (!ft_formatted) {
-		TRACE_ABORT(-EACCES, ft_t_info, "tape is not formatted");
-	} 
-	if (zft_write_protected) {
-		TRACE_ABORT(-EACCES, ft_t_info, "cartridge write protected");
-	} 
-	if (zft_qic_mode) {
-		/*  check BOT condition */
-		if (!zft_tape_at_lbot(pos)) {
-			/*  protect cartridges written by old ftape if
-			 *  not at BOT because they use the vtbl
-			 *  segment for storing data
-			 */
-			if (zft_old_ftape) {
-				TRACE_ABORT(-EACCES, ft_t_warn, 
-      "Cannot write to cartridges written by old ftape when not at BOT");
-			}
-			/*  not at BOT, but allow writes at EOD, of course
-			 */
-			if (!zft_tape_at_eod(pos)) {
-				TRACE_ABORT(-EACCES, ft_t_info,
-					    "tape not at BOT and not at EOD");
-			}
-		}
-		/*  fine. Now the tape is either at BOT or at EOD. */
-	}
-	/* or in raw mode in which case we don't care about BOT and EOD */
-	TRACE_EXIT 0;
-}
-
-/*      OPEN routine called by kernel-interface code
- *
- *      NOTE: this is also called by mt_reset() with dev_minor == -1
- *            to fake a reopen after a reset.
- */
-int _zft_open(unsigned int dev_minor, unsigned int access_mode)
-{
-	static unsigned int tape_unit;
-	static unsigned int file_access_mode;
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	if ((int)dev_minor == -1) {
-		/* fake reopen */
-		zft_unit    = tape_unit;
-		access_mode = file_access_mode;
-		zft_init_driver(); /* reset all static data to defaults */
-	} else {
-		tape_unit        = dev_minor;
-		file_access_mode = access_mode;
-		if ((result = ftape_enable(FTAPE_SEL(dev_minor))) < 0) {
-			TRACE_ABORT(-ENXIO, ft_t_err,
-				    "ftape_enable failed: %d", result);
-		}
-		if (ft_new_tape || ft_no_tape || !ft_formatted ||
-		    (FTAPE_SEL(zft_unit) != FTAPE_SEL(dev_minor)) ||
-		    (zft_unit & ZFT_RAW_MODE) != (dev_minor & ZFT_RAW_MODE)) {
-			/* reset all static data to defaults,
-			 */
-			zft_init_driver(); 
-		}
-		zft_unit = dev_minor;
-	}
-	zft_set_flags(zft_unit); /* decode the minor bits */
-	if (zft_blk_sz == 1 && zft_use_compression) {
-		ftape_disable(); /* resets ft_no_tape */
-		TRACE_ABORT(-ENODEV, ft_t_warn, "Variable block size not yet "
-			    "supported with compression");
-	}
-	/*  no need for most of the buffers when no tape or not
-	 *  formatted.  for the read/write operations, it is the
-	 *  regardless whether there is no tape, a not-formatted tape
-	 *  or the whether the driver is soft offline.  
-	 *  Nevertheless we allow some ioctls with non-formatted tapes, 
-	 *  like rewind and reset.
-	 */
-	if (ft_no_tape || !ft_formatted) {
-		zft_uninit_mem();
-	}
-	if (ft_no_tape) {
-		zft_offline = 1; /* so we need not test two variables */
-	}
-	if ((access_mode == O_WRONLY || access_mode == O_RDWR) &&
-	    (ft_write_protected || ft_no_tape)) {
-		ftape_disable(); /* resets ft_no_tape */
-		TRACE_ABORT(ft_no_tape ? -ENXIO : -EROFS,
-			    ft_t_warn, "wrong access mode %s cartridge",
-			    ft_no_tape ? "without a" : "with write protected");
-	}
-	zft_write_protected = (access_mode == O_RDONLY || 
-			       ft_write_protected != 0);
-	if (zft_write_protected) {
-		TRACE(ft_t_noise,
-		      "read only access mode: %d, "
-		      "drive write protected: %d", 
-		      access_mode == O_RDONLY,
-		      ft_write_protected != 0);
-	}
-	if (!zft_offline) {
-		TRACE_CATCH(zft_vmalloc_once(&zft_deblock_buf,FT_SEGMENT_SIZE),
-			    ftape_disable());
-	}
-	/* zft_seg_pos should be greater than the vtbl segpos but not
-	 * if in compatibility mode and only after we read in the
-	 * header segments
-	 *
-	 * might also be a problem if the user makes a backup with a
-	 * *qft* device and rewinds it with a raw device.
-	 */
-	if (zft_qic_mode         &&
-	    !zft_old_ftape       &&
-	    zft_pos.seg_pos >= 0 &&
-	    zft_header_read      && 
-	    zft_pos.seg_pos <= ft_first_data_segment) {
-		TRACE(ft_t_noise, "you probably mixed up the zftape devices!");
-		zft_reset_position(&zft_pos); 
-	}
-	TRACE_EXIT 0;
-}
-
-/*      RELEASE routine called by kernel-interface code
- */
-int _zft_close(void)
-{
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-	
-	if (zft_offline) {
-		/* call the hardware release routine. Puts the drive offline */
-		ftape_disable();
-		TRACE_EXIT 0;
-	}
-	if (!(ft_write_protected || zft_old_ftape)) {
-		result = zft_flush_buffers();
-		TRACE(ft_t_noise, "writing file mark at current position");
-		if (zft_qic_mode && zft_close_volume(&zft_pos) == 0) {
-			zft_move_past_eof(&zft_pos);
-		}
-		if ((zft_tape_at_lbot(&zft_pos) ||
-		     !(zft_unit & FTAPE_NO_REWIND))) {
-			if (result >= 0) {
-				result = zft_update_header_segments();
-			} else {
-				TRACE(ft_t_err,
-				"Error: unable to update header segments");
-			}
-		}
-	}
-	ftape_abort_operation();
-	if (!(zft_unit & FTAPE_NO_REWIND)) {
-		TRACE(ft_t_noise, "rewinding tape");
-		if (ftape_seek_to_bot() < 0 && result >= 0) {
-			result = -EIO; /* keep old value */
-		}
-		zft_reset_position(&zft_pos);
-	} 
-	zft_zap_read_buffers();
-	/*  now free up memory as much as possible. We don't destroy
-	 *  the deblock buffer if it containes a valid segment.
-	 */
-	if (zft_deblock_segment == -1) {
-		zft_vfree(&zft_deblock_buf, FT_SEGMENT_SIZE); 
-	}
-	/* high level driver status, forces creation of a new volume
-	 * when calling ftape_write again and not zft_just_before_eof
-	 */
-	zft_io_state = zft_idle;  
-	if (going_offline) {
-		zft_init_driver();
-		zft_uninit_mem();
-		going_offline = 0;
-		zft_offline   = 1;
-	} else if (zft_cmpr_lock(0 /* don't load */) == 0) {
-		(*zft_cmpr_ops->reset)(); /* unlock it again */
-	}
-	zft_memory_stats();
-	/* call the hardware release routine. Puts the drive offline */
-	ftape_disable();
-	TRACE_EXIT result;
-}
-
-/*
- *  the wrapper function around the wrapper MTIOCTOP ioctl
- */
-static int mtioctop(struct mtop *mtop, int arg_size)
-{
-	int result = 0;
-	fun_entry *mt_fun_entry;
-	TRACE_FUN(ft_t_flow);
-	
-	if (arg_size != sizeof(struct mtop) || mtop->mt_op >= NR_MT_CMDS) {
-		TRACE_EXIT -EINVAL;
-	}
-	TRACE(ft_t_noise, "calling MTIOCTOP command: %s",
-	      mt_funs[mtop->mt_op].name);
-	mt_fun_entry= &mt_funs[mtop->mt_op];
-	zft_resid = mtop->mt_count;
-	if (!mt_fun_entry->offline && zft_offline) {
-		if (ft_no_tape) {
-			TRACE_ABORT(-ENXIO, ft_t_info, "no tape present");
-		} else {
-			TRACE_ABORT(-ENXIO, ft_t_info, "drive is offline");
-		}
-	}
-	if (!mt_fun_entry->not_formatted && !ft_formatted) {
-		TRACE_ABORT(-EACCES, ft_t_info, "tape is not formatted");
-	}
-	if (!mt_fun_entry->write_protected) {
-		TRACE_CATCH(zft_check_write_access(&zft_pos),);
-	}
-	if (mt_fun_entry->need_idle_state && !(zft_offline || !ft_formatted)) {
-		TRACE_CATCH(zft_def_idle_state(),);
-	}
-	if (!zft_qic_mode && !mt_fun_entry->raw_mode) {
-		TRACE_ABORT(-EACCES, ft_t_info, 
-"Drive needs to be in QIC-80 compatibility mode for this command");
-	}
-	result = (mt_fun_entry->function)(&mtop->mt_count);
-	if (zft_tape_at_lbot(&zft_pos)) {
-		TRACE_CATCH(zft_update_header_segments(),);
-	}
-	if (result >= 0) {
-		zft_resid = 0;
-	}
-	TRACE_EXIT result;
-}
-
-/*
- *  standard MTIOCGET ioctl
- */
-static int mtiocget(struct mtget *mtget, int arg_size)
-{
-	const zft_volinfo *volume;
-	__s64 max_tape_pos;
-	TRACE_FUN(ft_t_flow);
-	
-	if (arg_size != sizeof(struct mtget)) {
-		TRACE_ABORT(-EINVAL, ft_t_info, "bad argument size: %d",
-			    arg_size);
-	}
-	mtget->mt_type  = ft_drive_type.vendor_id + 0x800000;
-	mtget->mt_dsreg = ft_last_status.space;
-	mtget->mt_erreg = ft_last_error.space; /* error register */
-	mtget->mt_resid = zft_resid; /* residuum of writes, reads and
-				      * MTIOCTOP commands 
-				      */
-	if (!zft_offline) { /* neither no_tape nor soft offline */
-		mtget->mt_gstat = GMT_ONLINE(~0UL);
-		/* should rather return the status of the cartridge
-		 * than the access mode of the file, therefor use
-		 * ft_write_protected, not zft_write_protected 
-		 */
-		if (ft_write_protected) {
-			mtget->mt_gstat |= GMT_WR_PROT(~0UL);
-		}
-		if(zft_header_read) { /* this catches non-formatted */
-			volume = zft_find_volume(zft_pos.seg_pos);
-			mtget->mt_fileno = volume->count;
-			max_tape_pos = zft_capacity - zft_blk_sz;
-			if (zft_use_compression) {
-				max_tape_pos -= ZFT_CMPR_OVERHEAD;
-			}
-			if (zft_tape_at_eod(&zft_pos)) {
-				mtget->mt_gstat |= GMT_EOD(~0UL);
-			}
-			if (zft_pos.tape_pos > max_tape_pos) {
-				mtget->mt_gstat |= GMT_EOT(~0UL);
-			}
-			mtget->mt_blkno = zft_div_blksz(zft_pos.volume_pos,
-							volume->blk_sz);
-			if (zft_just_before_eof) {
-				mtget->mt_gstat |= GMT_EOF(~0UL);
-			}
-			if (zft_tape_at_lbot(&zft_pos)) {
-				mtget->mt_gstat |= GMT_BOT(~0UL);
-			}
-		} else {
-			mtget->mt_fileno = mtget->mt_blkno = -1;
-			if (mtget->mt_dsreg & QIC_STATUS_AT_BOT) {
-				mtget->mt_gstat |= GMT_BOT(~0UL);
-			}
-		}
-	} else {
-		if (ft_no_tape) {
-			mtget->mt_gstat = GMT_DR_OPEN(~0UL);
-		} else {
-			mtget->mt_gstat = 0UL;
-		}
- 		mtget->mt_fileno = mtget->mt_blkno = -1;
-	}
-	TRACE_EXIT 0;
-}
-
-#ifdef MTIOCRDFTSEG
-/*
- *  Read a floppy tape segment. This is useful for manipulating the
- *  volume table, and read the old header segment before re-formatting
- *  the cartridge.
- */
-static int mtiocrdftseg(struct mtftseg * mtftseg, int arg_size)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "Mag tape ioctl command: MTIOCRDFTSEG");
-	if (zft_qic_mode) {
-		TRACE_ABORT(-EACCES, ft_t_info,
-			    "driver needs to be in raw mode for this ioctl");
-	} 
-	if (arg_size != sizeof(struct mtftseg)) {
-		TRACE_ABORT(-EINVAL, ft_t_info, "bad argument size: %d",
-			    arg_size);
-	}
-	if (zft_offline) {
-		TRACE_EXIT -ENXIO;
-	}
-	if (mtftseg->mt_mode != FT_RD_SINGLE &&
-	    mtftseg->mt_mode != FT_RD_AHEAD) {
-		TRACE_ABORT(-EINVAL, ft_t_info, "invalid read mode");
-	}
-	if (!ft_formatted) {
-		TRACE_EXIT -EACCES; /* -ENXIO ? */
-
-	}
-	if (!zft_header_read) {
-		TRACE_CATCH(zft_def_idle_state(),);
-	}
-	if (mtftseg->mt_segno > ft_last_data_segment) {
-		TRACE_ABORT(-EINVAL, ft_t_info, "segment number is too large");
-	}
-	mtftseg->mt_result = ftape_read_segment(mtftseg->mt_segno,
-						zft_deblock_buf,
-						mtftseg->mt_mode);
-	if (mtftseg->mt_result < 0) {
-		/*  a negativ result is not an ioctl error. if
-		 *  the user wants to read damaged tapes,
-		 *  it's up to her/him
-		 */
-		TRACE_EXIT 0;
-	}
-	if (copy_to_user(mtftseg->mt_data,
-			 zft_deblock_buf,
-			 mtftseg->mt_result) != 0) {
-		TRACE_EXIT -EFAULT;
-	}
-	TRACE_EXIT 0;
-}
-#endif
-
-#ifdef MTIOCWRFTSEG
-/*
- *  write a floppy tape segment. This version features writing of
- *  deleted address marks, and gracefully ignores the (software)
- *  ft_formatted flag to support writing of header segments after
- *  formatting.
- */
-static int mtiocwrftseg(struct mtftseg * mtftseg, int arg_size)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "Mag tape ioctl command: MTIOCWRFTSEG");
-	if (zft_write_protected || zft_qic_mode) {
-		TRACE_EXIT -EACCES;
-	} 
-	if (arg_size != sizeof(struct mtftseg)) {
-		TRACE_ABORT(-EINVAL, ft_t_info, "bad argument size: %d",
-			    arg_size);
-	}
-	if (zft_offline) {
-		TRACE_EXIT -ENXIO;
-	}
-	if (mtftseg->mt_mode != FT_WR_ASYNC   && 
-	    mtftseg->mt_mode != FT_WR_MULTI   &&
-	    mtftseg->mt_mode != FT_WR_SINGLE  &&
-	    mtftseg->mt_mode != FT_WR_DELETE) {
-		TRACE_ABORT(-EINVAL, ft_t_info, "invalid write mode");
-	}
-	/*
-	 *  We don't check for ft_formatted, because this gives
-	 *  only the software status of the driver.
-	 *
-	 *  We assume that the user knows what it is
-	 *  doing. And rely on the low level stuff to fail
-	 *  when the tape isn't formatted. We only make sure
-	 *  that The header segment buffer is allocated,
-	 *  because it holds the bad sector map.
-	 */
-	if (zft_hseg_buf == NULL) {
-		TRACE_EXIT -ENXIO;
-	}
-	if (mtftseg->mt_mode != FT_WR_DELETE) {
-		if (copy_from_user(zft_deblock_buf, 
-				   mtftseg->mt_data,
-				   FT_SEGMENT_SIZE) != 0) {
-			TRACE_EXIT -EFAULT;
-		}
-	}
-	mtftseg->mt_result = ftape_write_segment(mtftseg->mt_segno, 
-						 zft_deblock_buf,
-						 mtftseg->mt_mode);
-	if (mtftseg->mt_result >= 0 && mtftseg->mt_mode == FT_WR_SINGLE) {
-		/*  
-		 *  a negativ result is not an ioctl error. if
-		 *  the user wants to write damaged tapes,
-		 *  it's up to her/him
-		 */
-		if ((result = ftape_loop_until_writes_done()) < 0) {
-			mtftseg->mt_result = result;
-		}
-	}
-	TRACE_EXIT 0;
-}
-#endif
-  
-#ifdef MTIOCVOLINFO
-/*
- *  get information about volume positioned at.
- */
-static int mtiocvolinfo(struct mtvolinfo *volinfo, int arg_size)
-{
-	const zft_volinfo *volume;
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "Mag tape ioctl command: MTIOCVOLINFO");
-	if (arg_size != sizeof(struct mtvolinfo)) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_info, "bad argument size: %d", arg_size);
-	}
-	if (zft_offline) {
-		TRACE_EXIT -ENXIO;
-	}
-	if (!ft_formatted) {
-		TRACE_EXIT -EACCES;
-	}
-	TRACE_CATCH(zft_def_idle_state(),);
-	volume = zft_find_volume(zft_pos.seg_pos);
-	volinfo->mt_volno   = volume->count;
-	volinfo->mt_blksz   = volume->blk_sz == 1 ? 0 : volume->blk_sz;
-	volinfo->mt_size    = volume->size >> 10;
-	volinfo->mt_rawsize = ((zft_calc_tape_pos(volume->end_seg + 1) >> 10) -
-			       (zft_calc_tape_pos(volume->start_seg) >> 10));
-	volinfo->mt_cmpr    = volume->use_compression;
-	TRACE_EXIT 0;
-}
-#endif
-
-#ifdef ZFT_OBSOLETE  
-static int mtioc_zftape_getblksz(struct mtblksz *blksz, int arg_size)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "\n"
-	      KERN_INFO "Mag tape ioctl command: MTIOC_ZTAPE_GETBLKSZ\n"
-	      KERN_INFO "This ioctl is here merely for compatibility.\n"
-	      KERN_INFO "Please use MTIOCVOLINFO instead");
-	if (arg_size != sizeof(struct mtblksz)) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_info, "bad argument size: %d", arg_size);
-	}
-	if (zft_offline) {
-		TRACE_EXIT -ENXIO;
-	}
-	if (!ft_formatted) {
-		TRACE_EXIT -EACCES;
-	}
-	TRACE_CATCH(zft_def_idle_state(),);
-	blksz->mt_blksz = zft_find_volume(zft_pos.seg_pos)->blk_sz;
-	TRACE_EXIT 0;
-}
-#endif
-
-#ifdef MTIOCGETSIZE
-/*
- *  get the capacity of the tape cartridge.
- */
-static int mtiocgetsize(struct mttapesize *size, int arg_size)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "Mag tape ioctl command: MTIOC_ZFTAPE_GETSIZE");
-	if (arg_size != sizeof(struct mttapesize)) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_info, "bad argument size: %d", arg_size);
-	}
-	if (zft_offline) {
-		TRACE_EXIT -ENXIO;
-	}
-	if (!ft_formatted) {
-		TRACE_EXIT -EACCES;
-	}
-	TRACE_CATCH(zft_def_idle_state(),);
-	size->mt_capacity = (unsigned int)(zft_capacity>>10);
-	size->mt_used     = (unsigned int)(zft_get_eom_pos()>>10);
-	TRACE_EXIT 0;
-}
-#endif
-
-static int mtiocpos(struct mtpos *mtpos, int arg_size)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "Mag tape ioctl command: MTIOCPOS");
-	if (arg_size != sizeof(struct mtpos)) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_info, "bad argument size: %d", arg_size);
-	}
-	result = mt_tell((int *)&mtpos->mt_blkno);
-	TRACE_EXIT result;
-}
-
-#ifdef MTIOCFTFORMAT
-/*
- * formatting of floppy tape cartridges. This is intended to be used
- * together with the MTIOCFTCMD ioctl and the new mmap feature 
- */
-
-/* 
- *  This function uses ftape_decode_header_segment() to inform the low
- *  level ftape module about the new parameters.
- *
- *  It erases the hseg_buf. The calling process must specify all
- *  parameters to assure proper operation.
- *
- *  return values: -EINVAL - wrong argument size
- *                 -EINVAL - if ftape_decode_header_segment() failed.
- */
-static int set_format_parms(struct ftfmtparms *p, __u8 *hseg_buf)
-{
-	ft_trace_t old_level = TRACE_LEVEL;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "MTIOCFTFORMAT operation FTFMT_SETPARMS");
-	memset(hseg_buf, 0, FT_SEGMENT_SIZE);
-	PUT4(hseg_buf, FT_SIGNATURE, FT_HSEG_MAGIC);
-
-	/*  fill in user specified parameters
-	 */
-	hseg_buf[FT_FMT_CODE] = (__u8)p->ft_fmtcode;
-	PUT2(hseg_buf, FT_SPT, p->ft_spt);
-	hseg_buf[FT_TPC]      = (__u8)p->ft_tpc;
-	hseg_buf[FT_FHM]      = (__u8)p->ft_fhm;
-	hseg_buf[FT_FTM]      = (__u8)p->ft_ftm;
-
-	/*  fill in sane defaults to make ftape happy.
-	 */ 
-	hseg_buf[FT_FSM] = (__u8)128; /* 128 is hard wired all over ftape */
-	if (p->ft_fmtcode == fmt_big) {
-		PUT4(hseg_buf, FT_6_HSEG_1,   0);
-		PUT4(hseg_buf, FT_6_HSEG_2,   1);
-		PUT4(hseg_buf, FT_6_FRST_SEG, 2);
-		PUT4(hseg_buf, FT_6_LAST_SEG, p->ft_spt * p->ft_tpc - 1);
-	} else {
-		PUT2(hseg_buf, FT_HSEG_1,    0);
-		PUT2(hseg_buf, FT_HSEG_2,    1);
-		PUT2(hseg_buf, FT_FRST_SEG,  2);
-		PUT2(hseg_buf, FT_LAST_SEG, p->ft_spt * p->ft_tpc - 1);
-	}
-
-	/*  Synchronize with the low level module. This is particularly
-	 *  needed for unformatted cartridges as the QIC std was previously 
-	 *  unknown BUT is needed to set data rate and to calculate timeouts.
-	 */
-	TRACE_CATCH(ftape_calibrate_data_rate(p->ft_qicstd&QIC_TAPE_STD_MASK),
-		    _res = -EINVAL);
-
-	/*  The following will also recalcualte the timeouts for the tape
-	 *  length and QIC std we want to format to.
-	 *  abort with -EINVAL rather than -EIO
-	 */
-	SET_TRACE_LEVEL(ft_t_warn);
-	TRACE_CATCH(ftape_decode_header_segment(hseg_buf),
-		    SET_TRACE_LEVEL(old_level); _res = -EINVAL);
-	SET_TRACE_LEVEL(old_level);
-	TRACE_EXIT 0;
-}
-
-/*
- *  Return the internal SOFTWARE status of the kernel driver. This does
- *  NOT query the tape drive about its status.
- */
-static int get_format_parms(struct ftfmtparms *p, __u8 *hseg_buffer)
-{
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "MTIOCFTFORMAT operation FTFMT_GETPARMS");
-	p->ft_qicstd  = ft_qic_std;
-	p->ft_fmtcode = ft_format_code;
-	p->ft_fhm     = hseg_buffer[FT_FHM];
-	p->ft_ftm     = hseg_buffer[FT_FTM];
-	p->ft_spt     = ft_segments_per_track;
-	p->ft_tpc     = ft_tracks_per_tape;
-	TRACE_EXIT 0;
-}
-
-static int mtiocftformat(struct mtftformat *mtftformat, int arg_size)
-{
-	int result;
-	union fmt_arg *arg = &mtftformat->fmt_arg;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "Mag tape ioctl command: MTIOCFTFORMAT");
-	if (zft_offline) {
-		if (ft_no_tape) {
-			TRACE_ABORT(-ENXIO, ft_t_info, "no tape present");
-		} else {
-			TRACE_ABORT(-ENXIO, ft_t_info, "drive is offline");
-		}
-	}
-	if (zft_qic_mode) {
-		TRACE_ABORT(-EACCES, ft_t_info,
-			    "driver needs to be in raw mode for this ioctl");
-	} 
-	if (zft_hseg_buf == NULL) {
-		TRACE_CATCH(zft_vcalloc_once(&zft_hseg_buf, FT_SEGMENT_SIZE),);
-	}
-	zft_header_read = 0;
-	switch(mtftformat->fmt_op) {
-	case FTFMT_SET_PARMS:
-		TRACE_CATCH(set_format_parms(&arg->fmt_parms, zft_hseg_buf),);
-		TRACE_EXIT 0;
-	case FTFMT_GET_PARMS:
-		TRACE_CATCH(get_format_parms(&arg->fmt_parms, zft_hseg_buf),);
-		TRACE_EXIT 0;
-	case FTFMT_FORMAT_TRACK:
-		if ((ft_formatted && zft_check_write_access(&zft_pos) < 0) ||
-		    (!ft_formatted && zft_write_protected)) {
-			TRACE_ABORT(-EACCES, ft_t_info, "Write access denied");
-		}
-		TRACE_CATCH(ftape_format_track(arg->fmt_track.ft_track,
-					       arg->fmt_track.ft_gap3),);
-		TRACE_EXIT 0;
-	case FTFMT_STATUS:
-		TRACE_CATCH(ftape_format_status(&arg->fmt_status.ft_segment),);
-		TRACE_EXIT 0;
-	case FTFMT_VERIFY:
-		TRACE_CATCH(ftape_verify_segment(arg->fmt_verify.ft_segment,
-				(SectorMap *)&arg->fmt_verify.ft_bsm),);
-		TRACE_EXIT 0;
-	default:
-		TRACE_ABORT(-EINVAL, ft_t_err, "Invalid format operation");
-	}
-	TRACE_EXIT result;
-}
-#endif
-
-#ifdef MTIOCFTCMD
-/*
- *  send a QIC-117 command to the drive, with optional timeouts,
- *  parameter and result bits. This is intended to be used together
- *  with the formatting ioctl.
- */
-static int mtiocftcmd(struct mtftcmd *ftcmd, int arg_size)
-{
-	int i;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "Mag tape ioctl command: MTIOCFTCMD");
-	if (!capable(CAP_SYS_ADMIN)) {
-		TRACE_ABORT(-EPERM, ft_t_info,
-			    "need CAP_SYS_ADMIN capability to send raw qic-117 commands");
-	}
-	if (zft_qic_mode) {
-		TRACE_ABORT(-EACCES, ft_t_info,
-			    "driver needs to be in raw mode for this ioctl");
-	} 
-	if (arg_size != sizeof(struct mtftcmd)) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_info, "bad argument size: %d", arg_size);
-	}
-	if (ftcmd->ft_wait_before) {
-		TRACE_CATCH(ftape_ready_wait(ftcmd->ft_wait_before,
-					     &ftcmd->ft_status),);
-	}
-	if (ftcmd->ft_status & QIC_STATUS_ERROR)
-		goto ftmtcmd_error;
-	if (ftcmd->ft_result_bits != 0) {
-		TRACE_CATCH(ftape_report_operation(&ftcmd->ft_result,
-						   ftcmd->ft_cmd,
-						   ftcmd->ft_result_bits),);
-	} else {
-		TRACE_CATCH(ftape_command(ftcmd->ft_cmd),);
-		if (ftcmd->ft_status & QIC_STATUS_ERROR)
-			goto ftmtcmd_error;
-		for (i = 0; i < ftcmd->ft_parm_cnt; i++) {
-			TRACE_CATCH(ftape_parameter(ftcmd->ft_parms[i]&0x0f),);
-			if (ftcmd->ft_status & QIC_STATUS_ERROR)
-				goto ftmtcmd_error;
-		}
-	}
-	if (ftcmd->ft_wait_after != 0) {
-		TRACE_CATCH(ftape_ready_wait(ftcmd->ft_wait_after,
-					     &ftcmd->ft_status),);
-	}
-ftmtcmd_error:	       
-	if (ftcmd->ft_status & QIC_STATUS_ERROR) {
-		TRACE(ft_t_noise, "error status set");
-		TRACE_CATCH(ftape_report_error(&ftcmd->ft_error,
-					       &ftcmd->ft_cmd, 1),);
-	}
-	TRACE_EXIT 0; /* this is not an i/o error */
-}
-#endif
-
-/*  IOCTL routine called by kernel-interface code
- */
-int _zft_ioctl(unsigned int command, void __user * arg)
-{
-	int result;
-	union { struct mtop       mtop;
-		struct mtget      mtget;
-		struct mtpos      mtpos;
-#ifdef MTIOCRDFTSEG
-		struct mtftseg    mtftseg;
-#endif
-#ifdef MTIOCVOLINFO
-		struct mtvolinfo  mtvolinfo;
-#endif
-#ifdef MTIOCGETSIZE
-		struct mttapesize mttapesize;
-#endif
-#ifdef MTIOCFTFORMAT
-		struct mtftformat mtftformat;
-#endif
-#ifdef ZFT_OBSOLETE
-		struct mtblksz mtblksz;
-#endif
-#ifdef MTIOCFTCMD
-		struct mtftcmd mtftcmd;
-#endif
-	} krnl_arg;
-	int arg_size = _IOC_SIZE(command);
-	int dir = _IOC_DIR(command);
-	TRACE_FUN(ft_t_flow);
-
-	/* This check will only catch arguments that are too large !
-	 */
-	if (dir & (_IOC_READ | _IOC_WRITE) && arg_size > sizeof(krnl_arg)) {
-		TRACE_ABORT(-EINVAL,
-			    ft_t_info, "bad argument size: %d", arg_size);
-	}
-	if (dir & _IOC_WRITE) {
-		if (copy_from_user(&krnl_arg, arg, arg_size) != 0) {
-			TRACE_EXIT -EFAULT;
-		}
-	}
-	TRACE(ft_t_flow, "called with ioctl command: 0x%08x", command);
-	switch (command) {
-	case MTIOCTOP:
-		result = mtioctop(&krnl_arg.mtop, arg_size);
-		break;
-	case MTIOCGET:
-		result = mtiocget(&krnl_arg.mtget, arg_size);
-		break;
-	case MTIOCPOS:
-		result = mtiocpos(&krnl_arg.mtpos, arg_size);
-		break;
-#ifdef MTIOCVOLINFO
-	case MTIOCVOLINFO:
-		result = mtiocvolinfo(&krnl_arg.mtvolinfo, arg_size);
-		break;
-#endif
-#ifdef ZFT_OBSOLETE
-	case MTIOC_ZFTAPE_GETBLKSZ:
-		result = mtioc_zftape_getblksz(&krnl_arg.mtblksz, arg_size);
-		break;
-#endif
-#ifdef MTIOCRDFTSEG
-	case MTIOCRDFTSEG: /* read a segment via ioctl */
-		result = mtiocrdftseg(&krnl_arg.mtftseg, arg_size);
-		break;
-#endif
-#ifdef MTIOCWRFTSEG
-	case MTIOCWRFTSEG: /* write a segment via ioctl */
-		result = mtiocwrftseg(&krnl_arg.mtftseg, arg_size);
-		break;
-#endif
-#ifdef MTIOCGETSIZE
-	case MTIOCGETSIZE:
-		result = mtiocgetsize(&krnl_arg.mttapesize, arg_size);
-		break;
-#endif
-#ifdef MTIOCFTFORMAT
-	case MTIOCFTFORMAT:
-		result = mtiocftformat(&krnl_arg.mtftformat, arg_size);
-		break;
-#endif
-#ifdef MTIOCFTCMD
-	case MTIOCFTCMD:
-		result = mtiocftcmd(&krnl_arg.mtftcmd, arg_size);
-		break;
-#endif
-	default:
-		result = -EINVAL;
-		break;
-	}
-	if ((result >= 0) && (dir & _IOC_READ)) {
-		if (copy_to_user(arg, &krnl_arg, arg_size) != 0) {
-			TRACE_EXIT -EFAULT;
-		}
-	}
-	TRACE_EXIT result;
-}
diff --git a/drivers/char/ftape/zftape/zftape-ctl.h b/drivers/char/ftape/zftape/zftape-ctl.h
deleted file mode 100644
index 8e6f2d7..0000000
--- a/drivers/char/ftape/zftape/zftape-ctl.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _ZFTAPE_CTL_H
-#define _ZFTAPE_CTL_H
-
-/*
- * Copyright (C) 1996, 1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version. 
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-ctl.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:02 $
- *
- *      This file contains the non-standard IOCTL related definitions
- *      for the QIC-40/80 floppy-tape driver for Linux.
- */
-
-#include <linux/ioctl.h>
-#include <linux/mtio.h>
-
-#include "../zftape/zftape-rw.h"
-
-#ifdef CONFIG_ZFTAPE_MODULE
-#define ftape_status (*zft_status)
-#endif
-
-extern int zft_offline;
-extern int zft_mt_compression;
-extern int zft_write_protected;
-extern int zft_header_read;
-extern unsigned int zft_unit;
-extern int zft_resid;
-
-extern void zft_reset_position(zft_position *pos);
-extern int  zft_check_write_access(zft_position *pos);
-extern int  zft_def_idle_state(void);
-
-/*  hooks for the VFS interface 
- */
-extern int  _zft_open(unsigned int dev_minor, unsigned int access_mode);
-extern int  _zft_close(void);
-extern int  _zft_ioctl(unsigned int command, void __user *arg);
-#endif
-
-
-
diff --git a/drivers/char/ftape/zftape/zftape-eof.c b/drivers/char/ftape/zftape/zftape-eof.c
deleted file mode 100644
index dcadcae..0000000
--- a/drivers/char/ftape/zftape/zftape-eof.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- *   I use these routines just to decide when I have to fake a 
- *   volume-table to preserve compatibility to original ftape.
- */
-/*
- *      Copyright (C) 1994-1995 Bas Laarhoven.
- *      
- *      Modified for zftape 1996, 1997 Claus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-eof.c,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:02 $
- *
- *      This file contains the eof mark handling code
- *      for the QIC-40/80 floppy-tape driver for Linux.
- */
-
-#include <linux/string.h>
-#include <linux/errno.h>
-
-#include <linux/zftape.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-rw.h"
-#include "../zftape/zftape-eof.h"
-
-/*      Global vars.
- */
-
-/* a copy of the failed sector log from the header segment.
- */
-eof_mark_union *zft_eof_map;
-
-/* number of eof marks (entries in bad sector log) on tape.
- */
-int zft_nr_eof_marks = -1;
-
-
-/*      Local vars.
- */
-
-static char linux_tape_label[] = "Linux raw format V";
-enum { 
-	min_fmt_version = 1, max_fmt_version = 2 
-};
-static unsigned ftape_fmt_version = 0;
-
-
-/* Ftape (mis)uses the bad sector log to record end-of-file marks.
- * Initially (when the tape is erased) all entries in the bad sector
- * log are added to the tape's bad sector map. The bad sector log then
- * is cleared.
- *
- * The bad sector log normally contains entries of the form: 
- * even 16-bit word: segment number of bad sector 
- * odd 16-bit word: encoded date
- * There can be a total of 448 entries (1792 bytes).
- *
- * My guess is that no program is using this bad sector log (the *
- * format seems useless as there is no indication of the bad sector
- * itself, only the segment) However, if any program does use the bad
- * sector log, the format used by ftape will let the program think
- * there are some bad sectors and no harm is done.
- *  
- * The eof mark entries that ftape stores in the bad sector log: even
- * 16-bit word: segment number of eof mark odd 16-bit word: sector
- * number of eof mark [1..32]
- *  
- * The zft_eof_map as maintained is a sorted list of eof mark entries.
- *
- *
- * The tape name field in the header segments is used to store a linux
- * tape identification string and a version number.  This way the tape
- * can be recognized as a Linux raw format tape when using tools under
- * other OS's.
- *
- * 'Wide' QIC tapes (format code 4) don't have a failed sector list
- * anymore. That space is used for the (longer) bad sector map that
- * now is a variable length list too.  We now store our end-of-file
- * marker list after the bad-sector-map on tape. The list is delimited
- * by a (__u32) 0 entry.
- */
-
-int zft_ftape_validate_label(char *label)
-{
-	static char tmp_label[45];
-	int result = 0;
-	TRACE_FUN(ft_t_any);
-	
-	memcpy(tmp_label, label, FT_LABEL_SZ);
-	tmp_label[FT_LABEL_SZ] = '\0';
-	TRACE(ft_t_noise, "tape  label = `%s'", tmp_label);
-	ftape_fmt_version = 0;
-	if (memcmp(label, linux_tape_label, strlen(linux_tape_label)) == 0) {
-		int pos = strlen(linux_tape_label);
-		while (label[pos] >= '0' && label[pos] <= '9') {
-			ftape_fmt_version *= 10;
-			ftape_fmt_version = label[ pos++] - '0';
-		}
-		result = (ftape_fmt_version >= min_fmt_version &&
-			  ftape_fmt_version <= max_fmt_version);
-	}
-	TRACE(ft_t_noise, "format version = %d", ftape_fmt_version);
-	TRACE_EXIT result;
-}
-
-static __u8 * find_end_of_eof_list(__u8 * ptr, __u8 * limit)
-{
-	while (ptr + 3 < limit) {
-
-		if (get_unaligned((__u32*)ptr)) {
-			ptr += sizeof(__u32);
-		} else {
-			return ptr;
-		}
-	}
-	return NULL;
-}
-
-void zft_ftape_extract_file_marks(__u8* address)
-{
-	int i;
-	TRACE_FUN(ft_t_any);
-	
-	zft_eof_map = NULL;
-	if (ft_format_code == fmt_var || ft_format_code == fmt_big) {
-		__u8* end;
-		__u8* start = ftape_find_end_of_bsm_list(address);
-
-		zft_nr_eof_marks = 0;
-		if (start) {
-			start += 3; /* skip end of list mark */
-			end = find_end_of_eof_list(start, 
-						   address + FT_SEGMENT_SIZE);
-			if (end && end - start <= FT_FSL_SIZE) {
-				zft_nr_eof_marks = ((end - start) / 
-						    sizeof(eof_mark_union));
-				zft_eof_map = (eof_mark_union *)start;
-			} else {
-				TRACE(ft_t_err,
-				      "EOF Mark List is too long or damaged!");
-			}
-		} else {
-			TRACE(ft_t_err, 
-			      "Bad Sector List is too long or damaged !");
-		}
-	} else {
-		zft_eof_map = (eof_mark_union *)&address[FT_FSL];
-		zft_nr_eof_marks = GET2(address, FT_FSL_CNT);
-	}
-	TRACE(ft_t_noise, "number of file marks: %d", zft_nr_eof_marks);
-	if (ftape_fmt_version == 1) {
-		TRACE(ft_t_info, "swapping version 1 fields");
-		/* version 1 format uses swapped sector and segment
-		 * fields, correct that !  
-		 */
-		for (i = 0; i < zft_nr_eof_marks; ++i) {
-			__u16 tmp = GET2(&zft_eof_map[i].mark.segment,0);
-			PUT2(&zft_eof_map[i].mark.segment, 0, 
-			     GET2(&zft_eof_map[i].mark.date,0));
-			PUT2(&zft_eof_map[i].mark.date, 0, tmp);
-		}
-	}
-	for (i = 0; i < zft_nr_eof_marks; ++i) {
-		TRACE(ft_t_noise, "eof mark: %5d/%2d",
-			GET2(&zft_eof_map[i].mark.segment, 0), 
-			GET2(&zft_eof_map[i].mark.date,0));
-	}
-	TRACE_EXIT;
-}
-
-void zft_clear_ftape_file_marks(void)
-{
-	TRACE_FUN(ft_t_flow);
-	/*  Clear failed sector log: remove all tape marks. We
-	 *  don't use old ftape-style EOF-marks.
-	 */
-	TRACE(ft_t_info, "Clearing old ftape's eof map");
-	memset(zft_eof_map, 0, zft_nr_eof_marks * sizeof(__u32));
-	zft_nr_eof_marks = 0;
-	PUT2(zft_hseg_buf, FT_FSL_CNT, 0); /* nr of eof-marks */
-	zft_header_changed = 1;
-	zft_update_label(zft_hseg_buf);
-	TRACE_EXIT;
-}
diff --git a/drivers/char/ftape/zftape/zftape-eof.h b/drivers/char/ftape/zftape/zftape-eof.h
deleted file mode 100644
index 26568c2..0000000
--- a/drivers/char/ftape/zftape/zftape-eof.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _ZFTAPE_EOF_H
-#define _ZFTAPE_EOF_H
-
-/*
- * Copyright (C) 1994-1995 Bas Laarhoven.
- * adaptaed for zftape 1996, 1997 by Claus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-eof.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:03 $
- *
- *      Definitions and declarations for the end of file markers
- *      for the QIC-40/80 floppy-tape driver for Linux.
- */
-
-#include <linux/ftape-header-segment.h>
-#include "../zftape/zftape-buffers.h"
-/*  failed sector log size (only used if format code != 4).
- */
-
-typedef union {
-	ft_fsl_entry mark;
-	__u32 entry;
-} eof_mark_union;
- 
-/*      ftape-eof.c defined global vars.
- */
-extern int zft_nr_eof_marks;
-extern eof_mark_union *zft_eof_map;
-
-/*      ftape-eof.c defined global functions.
- */
-extern void zft_ftape_extract_file_marks(__u8* address);
-extern int  zft_ftape_validate_label(char* label);
-extern void zft_clear_ftape_file_marks(void);
-
-#endif
diff --git a/drivers/char/ftape/zftape/zftape-init.c b/drivers/char/ftape/zftape/zftape-init.c
deleted file mode 100644
index 164a1aa..0000000
--- a/drivers/char/ftape/zftape/zftape-init.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- *      Copyright (C) 1996, 1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- *      This file contains the code that registers the zftape frontend 
- *      to the ftape floppy tape driver for Linux
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-#include <linux/fcntl.h>
-#include <linux/smp_lock.h>
-
-#include <linux/zftape.h>
-#include <linux/init.h>
-#include <linux/device.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-buffers.h"
-
-MODULE_AUTHOR("(c) 1996, 1997 Claus-Justus Heine "
-	      "(claus@momo.math.rwth-aachen.de)");
-MODULE_DESCRIPTION(ZFTAPE_VERSION " - "
-		   "VFS interface for the Linux floppy tape driver. "
-		   "Support for QIC-113 compatible volume table "
-		   "and builtin compression (lzrw3 algorithm)");
-MODULE_SUPPORTED_DEVICE("char-major-27");
-MODULE_LICENSE("GPL");
-
-/*      Global vars.
- */
-struct zft_cmpr_ops *zft_cmpr_ops = NULL;
-const ftape_info *zft_status;
-
-/*      Local vars.
- */
-static unsigned long busy_flag;
-
-static sigset_t orig_sigmask;
-
-/*  the interface to the kernel vfs layer
- */
-
-/* Note about llseek():
- *
- * st.c and tpqic.c update fp->f_pos but don't implment llseek() and
- * initialize the llseek component of the file_ops struct with NULL.
- * This means that the user will get the default seek, but the tape
- * device will not respect the new position, but happily read from the
- * old position. Think a zftape specific llseek() function would be
- * better, returning -ESPIPE. TODO.
- */
-
-static int  zft_open (struct inode *ino, struct file *filep);
-static int zft_close(struct inode *ino, struct file *filep);
-static int  zft_ioctl(struct inode *ino, struct file *filep,
-		      unsigned int command, unsigned long arg);
-static int  zft_mmap(struct file *filep, struct vm_area_struct *vma);
-static ssize_t zft_read (struct file *fp, char __user *buff,
-			 size_t req_len, loff_t *ppos);
-static ssize_t zft_write(struct file *fp, const char __user *buff,
-			 size_t req_len, loff_t *ppos);
-
-static const struct file_operations zft_cdev =
-{
-	.owner		= THIS_MODULE,
-	.read		= zft_read,
-	.write		= zft_write,
-	.ioctl		= zft_ioctl,
-	.mmap		= zft_mmap,
-	.open		= zft_open,
-	.release	= zft_close,
-};
-
-static struct class *zft_class;
-
-/*      Open floppy tape device
- */
-static int zft_open(struct inode *ino, struct file *filep)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	nonseekable_open(ino, filep);
-	TRACE(ft_t_flow, "called for minor %d", iminor(ino));
-	if ( test_and_set_bit(0,&busy_flag) ) {
-		TRACE_ABORT(-EBUSY, ft_t_warn, "failed: already busy");
-	}
-	if ((iminor(ino) & ~(ZFT_MINOR_OP_MASK | FTAPE_NO_REWIND))
-	     > 
-	    FTAPE_SEL_D) {
-		clear_bit(0,&busy_flag);
-		TRACE_ABORT(-ENXIO, ft_t_err, "failed: invalid unit nr");
-	}
-	orig_sigmask = current->blocked;
-	sigfillset(&current->blocked);
-	result = _zft_open(iminor(ino), filep->f_flags & O_ACCMODE);
-	if (result < 0) {
-		current->blocked = orig_sigmask; /* restore mask */
-		clear_bit(0,&busy_flag);
-		TRACE_ABORT(result, ft_t_err, "_ftape_open failed");
-	} else {
-		/* Mask signals that will disturb proper operation of the
-		 * program that is calling.
-		 */
-		current->blocked = orig_sigmask;
-		sigaddsetmask (&current->blocked, _DO_BLOCK);
-		TRACE_EXIT 0;
-	}
-}
-
-/*      Close floppy tape device
- */
-static int zft_close(struct inode *ino, struct file *filep)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	if ( !test_bit(0,&busy_flag) || iminor(ino) != zft_unit) {
-		TRACE(ft_t_err, "failed: not busy or wrong unit");
-		TRACE_EXIT 0;
-	}
-	sigfillset(&current->blocked);
-	result = _zft_close();
-	if (result < 0) {
-		TRACE(ft_t_err, "_zft_close failed");
-	}
-	current->blocked = orig_sigmask; /* restore before open state */
-	clear_bit(0,&busy_flag);
-	TRACE_EXIT 0;
-}
-
-/*      Ioctl for floppy tape device
- */
-static int zft_ioctl(struct inode *ino, struct file *filep,
-		     unsigned int command, unsigned long arg)
-{
-	int result = -EIO;
-	sigset_t old_sigmask;
-	TRACE_FUN(ft_t_flow);
-
-	if ( !test_bit(0,&busy_flag) || iminor(ino) != zft_unit || ft_failure) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "failed: not busy, failure or wrong unit");
-	}
-	old_sigmask = current->blocked; /* save mask */
-	sigfillset(&current->blocked);
-	/* This will work as long as sizeof(void *) == sizeof(long) */
-	result = _zft_ioctl(command, (void __user *) arg);
-	current->blocked = old_sigmask; /* restore mask */
-	TRACE_EXIT result;
-}
-
-/*      Ioctl for floppy tape device
- */
-static int  zft_mmap(struct file *filep, struct vm_area_struct *vma)
-{
-	int result = -EIO;
-	sigset_t old_sigmask;
-	TRACE_FUN(ft_t_flow);
-
-	if ( !test_bit(0,&busy_flag) || 
-	    iminor(filep->f_dentry->d_inode) != zft_unit || 
-	    ft_failure)
-	{
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "failed: not busy, failure or wrong unit");
-	}
-	old_sigmask = current->blocked; /* save mask */
-	sigfillset(&current->blocked);
-	if ((result = ftape_mmap(vma)) >= 0) {
-#ifndef MSYNC_BUG_WAS_FIXED
-		static struct vm_operations_struct dummy = { NULL, };
-		vma->vm_ops = &dummy;
-#endif
-	}
-	current->blocked = old_sigmask; /* restore mask */
-	TRACE_EXIT result;
-}
-
-/*      Read from floppy tape device
- */
-static ssize_t zft_read(struct file *fp, char __user *buff,
-			size_t req_len, loff_t *ppos)
-{
-	int result = -EIO;
-	sigset_t old_sigmask;
-	struct inode *ino = fp->f_dentry->d_inode;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_data_flow, "called with count: %ld", (unsigned long)req_len);
-	if (!test_bit(0,&busy_flag)  || iminor(ino) != zft_unit || ft_failure) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "failed: not busy, failure or wrong unit");
-	}
-	old_sigmask = current->blocked; /* save mask */
-	sigfillset(&current->blocked);
-	result = _zft_read(buff, req_len);
-	current->blocked = old_sigmask; /* restore mask */
-	TRACE(ft_t_data_flow, "return with count: %d", result);
-	TRACE_EXIT result;
-}
-
-/*      Write to tape device
- */
-static ssize_t zft_write(struct file *fp, const char __user *buff,
-			 size_t req_len, loff_t *ppos)
-{
-	int result = -EIO;
-	sigset_t old_sigmask;
-	struct inode *ino = fp->f_dentry->d_inode;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_flow, "called with count: %ld", (unsigned long)req_len);
-	if (!test_bit(0,&busy_flag) || iminor(ino) != zft_unit || ft_failure) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "failed: not busy, failure or wrong unit");
-	}
-	old_sigmask = current->blocked; /* save mask */
-	sigfillset(&current->blocked);
-	result = _zft_write(buff, req_len);
-	current->blocked = old_sigmask; /* restore mask */
-	TRACE(ft_t_data_flow, "return with count: %d", result);
-	TRACE_EXIT result;
-}
-
-/*                    END OF VFS INTERFACE 
- *          
- *****************************************************************************/
-
-/*  driver/module initialization
- */
-
-/*  the compression module has to call this function to hook into the zftape 
- *  code
- */
-int zft_cmpr_register(struct zft_cmpr_ops *new_ops)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	if (zft_cmpr_ops != NULL) {
-		TRACE_EXIT -EBUSY;
-	} else {
-		zft_cmpr_ops = new_ops;
-		TRACE_EXIT 0;
-	}
-}
-
-/*  lock the zft-compressor() module.
- */
-int zft_cmpr_lock(int try_to_load)
-{
-	if (zft_cmpr_ops == NULL) {
-#ifdef CONFIG_KMOD
-		if (try_to_load) {
-			request_module("zft-compressor");
-			if (zft_cmpr_ops == NULL) {
-				return -ENOSYS;
-			}
-		} else {
-			return -ENOSYS;
-		}
-#else
-		return -ENOSYS;
-#endif
-	}
-	(*zft_cmpr_ops->lock)();
-	return 0;
-}
-
-#ifdef CONFIG_ZFT_COMPRESSOR
-extern int zft_compressor_init(void);
-#endif
-
-/*  Called by modules package when installing the driver or by kernel
- *  during the initialization phase
- */
-int __init zft_init(void)
-{
-	int i;
-	TRACE_FUN(ft_t_flow);
-
-#ifdef MODULE
-	printk(KERN_INFO ZFTAPE_VERSION "\n");
-        if (TRACE_LEVEL >= ft_t_info) {
-		printk(
-KERN_INFO
-"(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
-KERN_INFO
-"vfs interface for ftape floppy tape driver.\n"
-KERN_INFO
-"Support for QIC-113 compatible volume table, dynamic memory allocation\n"
-KERN_INFO
-"and builtin compression (lzrw3 algorithm).\n");
-        }
-#else /* !MODULE */
-	/* print a short no-nonsense boot message */
-	printk(KERN_INFO ZFTAPE_VERSION "\n");
-#endif /* MODULE */
-	TRACE(ft_t_info, "zft_init @ 0x%p", zft_init);
-	TRACE(ft_t_info,
-	      "installing zftape VFS interface for ftape driver ...");
-	TRACE_CATCH(register_chrdev(QIC117_TAPE_MAJOR, "zft", &zft_cdev),);
-
-	zft_class = class_create(THIS_MODULE, "zft");
-	for (i = 0; i < 4; i++) {
-		class_device_create(zft_class, NULL, MKDEV(QIC117_TAPE_MAJOR, i), NULL, "qft%i", i);
-		class_device_create(zft_class, NULL, MKDEV(QIC117_TAPE_MAJOR, i + 4), NULL, "nqft%i", i);
-		class_device_create(zft_class, NULL, MKDEV(QIC117_TAPE_MAJOR, i + 16), NULL, "zqft%i", i);
-		class_device_create(zft_class, NULL, MKDEV(QIC117_TAPE_MAJOR, i + 20), NULL, "nzqft%i", i);
-		class_device_create(zft_class, NULL, MKDEV(QIC117_TAPE_MAJOR, i + 32), NULL, "rawqft%i", i);
-		class_device_create(zft_class, NULL, MKDEV(QIC117_TAPE_MAJOR, i + 36), NULL, "nrawrawqft%i", i);
-	}
-
-#ifdef CONFIG_ZFT_COMPRESSOR
-	(void)zft_compressor_init();
-#endif
-	zft_status = ftape_get_status(); /*  fetch global data of ftape 
-					  *  hardware driver 
-					  */
-	TRACE_EXIT 0;
-}
-
-
-/* Called by modules package when removing the driver 
- */
-static void zft_exit(void)
-{
-	int i;
-	TRACE_FUN(ft_t_flow);
-
-	if (unregister_chrdev(QIC117_TAPE_MAJOR, "zft") != 0) {
-		TRACE(ft_t_warn, "failed");
-	} else {
-		TRACE(ft_t_info, "successful");
-	}
-        for (i = 0; i < 4; i++) {
-		class_device_destroy(zft_class, MKDEV(QIC117_TAPE_MAJOR, i));
-		class_device_destroy(zft_class, MKDEV(QIC117_TAPE_MAJOR, i + 4));
-		class_device_destroy(zft_class, MKDEV(QIC117_TAPE_MAJOR, i + 16));
-		class_device_destroy(zft_class, MKDEV(QIC117_TAPE_MAJOR, i + 20));
-		class_device_destroy(zft_class, MKDEV(QIC117_TAPE_MAJOR, i + 32));
-		class_device_destroy(zft_class, MKDEV(QIC117_TAPE_MAJOR, i + 36));
-	}
-	class_destroy(zft_class);
-	zft_uninit_mem(); /* release remaining memory, if any */
-        printk(KERN_INFO "zftape successfully unloaded.\n");
-	TRACE_EXIT;
-}
-
-module_init(zft_init);
-module_exit(zft_exit);
diff --git a/drivers/char/ftape/zftape/zftape-init.h b/drivers/char/ftape/zftape/zftape-init.h
deleted file mode 100644
index 937e5d4..0000000
--- a/drivers/char/ftape/zftape/zftape-init.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _ZFTAPE_INIT_H
-#define _ZFTAPE_INIT_H
-
-/*
- * Copyright (C) 1996, 1997 Claus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-init.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:05 $
- *
- * This file contains definitions and macro for the vfs 
- * interface defined by zftape
- *
- */
-
-#include <linux/ftape-header-segment.h>
-
-#include "../lowlevel/ftape-tracing.h"
-#include "../lowlevel/ftape-ctl.h"
-#include "../lowlevel/ftape-read.h"
-#include "../lowlevel/ftape-write.h"
-#include "../lowlevel/ftape-bsm.h"
-#include "../lowlevel/ftape-io.h"
-#include "../lowlevel/ftape-buffer.h"
-#include "../lowlevel/ftape-format.h"
-
-#include "../zftape/zftape-rw.h"
-
-#ifdef MODULE
-#define ftape_status (*zft_status)
-#endif
-
-extern const  ftape_info *zft_status; /* needed for zftape-vtbl.h */
-
-#include "../zftape/zftape-vtbl.h"
-
-struct zft_cmpr_ops {
-	int (*write)(int *write_cnt,
-		     __u8 *dst_buf, const int seg_sz,
-		     const __u8 __user *src_buf, const int req_len, 
-		     const zft_position *pos, const zft_volinfo *volume);
-	int (*read)(int *read_cnt,
-		    __u8 __user *dst_buf, const int req_len,
-		    const __u8 *src_buf, const int seg_sz,
-		    const zft_position *pos, const zft_volinfo *volume);
-	int (*seek)(unsigned int new_block_pos,
-		    zft_position *pos, const zft_volinfo *volume,
-		    __u8 *buffer);
-	void (*lock)   (void);
-	void (*reset)  (void);
-	void (*cleanup)(void);
-};
-
-extern struct zft_cmpr_ops *zft_cmpr_ops;
-/* zftape-init.c defined global functions.
- */
-extern int                  zft_cmpr_register(struct zft_cmpr_ops *new_ops);
-extern int                  zft_cmpr_lock(int try_to_load);
-
-#endif
-
-
diff --git a/drivers/char/ftape/zftape/zftape-read.c b/drivers/char/ftape/zftape/zftape-read.c
deleted file mode 100644
index 214bf03..0000000
--- a/drivers/char/ftape/zftape/zftape-read.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- *      Copyright (C) 1996, 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-read.c,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:06 $
- *
- *      This file contains the high level reading code
- *      for the QIC-117 floppy-tape driver for Linux.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <linux/zftape.h>
-
-#include <asm/uaccess.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-rw.h"
-#include "../zftape/zftape-vtbl.h"
-
-/*      Global vars.
- */
-int zft_just_before_eof;
-
-/*      Local vars.
- */
-static int buf_len_rd;
-
-void zft_zap_read_buffers(void)
-{
-	buf_len_rd = 0;
-}
-
-int zft_read_header_segments(void)      
-{
-	TRACE_FUN(ft_t_flow);
-
-	zft_header_read = 0;
-	TRACE_CATCH(zft_vmalloc_once(&zft_hseg_buf, FT_SEGMENT_SIZE),);
-	TRACE_CATCH(ftape_read_header_segment(zft_hseg_buf),);
-	TRACE(ft_t_info, "Segments written since first format: %d",
-	      (int)GET4(zft_hseg_buf, FT_SEG_CNT));
-	zft_qic113 = (ft_format_code != fmt_normal &&
-		      ft_format_code != fmt_1100ft &&
-		      ft_format_code != fmt_425ft);
-	TRACE(ft_t_info, "ft_first_data_segment: %d, ft_last_data_segment: %d", 
-	      ft_first_data_segment, ft_last_data_segment);
-	zft_capacity = zft_get_capacity();
-	zft_old_ftape = zft_ftape_validate_label(&zft_hseg_buf[FT_LABEL]);
-	if (zft_old_ftape) {
-		TRACE(ft_t_info, 
-"Found old ftaped tape, emulating eof marks, entering read-only mode");
-		zft_ftape_extract_file_marks(zft_hseg_buf);
-		TRACE_CATCH(zft_fake_volume_headers(zft_eof_map, 
-						    zft_nr_eof_marks),);
-	} else {
-		/* the specs say that the volume table must be
-		 * initialized with zeroes during formatting, so it
-		 * MUST be readable, i.e. contain vaid ECC
-		 * information.  
-		 */
-		TRACE_CATCH(ftape_read_segment(ft_first_data_segment, 
-					       zft_deblock_buf, 
-					       FT_RD_SINGLE),);
-		TRACE_CATCH(zft_extract_volume_headers(zft_deblock_buf),);
-	}
-	zft_header_read = 1;
-	zft_set_flags(zft_unit);
-	zft_reset_position(&zft_pos);
-	TRACE_EXIT 0;
-}
-
-int zft_fetch_segment_fraction(const unsigned int segment, void *buffer,
-			       const ft_read_mode_t read_mode,
-			       const unsigned int start,
-			       const unsigned int size)
-{
-	int seg_sz;
-	TRACE_FUN(ft_t_flow);
-
-	if (segment == zft_deblock_segment) {
-		TRACE(ft_t_data_flow,
-		      "re-using segment %d already in deblock buffer",
-		      segment);
-		seg_sz = zft_get_seg_sz(segment);
-		if (start > seg_sz) {
-			TRACE_ABORT(-EINVAL, ft_t_bug,
-				    "trying to read beyond end of segment:\n"
-				    KERN_INFO "seg_sz : %d\n"
-				    KERN_INFO "start  : %d\n"
-				    KERN_INFO "segment: %d",
-				    seg_sz, start, segment);
-		}
-		if ((start + size) > seg_sz) {
-			TRACE_EXIT seg_sz - start;
-		}
-		TRACE_EXIT size;
-	}
-	seg_sz = ftape_read_segment_fraction(segment, buffer, read_mode,
-					     start, size);
-	TRACE(ft_t_data_flow, "segment %d, result %d", segment, seg_sz);
-	if ((int)seg_sz >= 0 && start == 0 && size == FT_SEGMENT_SIZE) {
-		/*  this implicitly assumes that we are always called with
-		 *  buffer == zft_deblock_buf 
-		 */
-		zft_deblock_segment = segment;
-	} else {
-		zft_deblock_segment = -1;
-	}
-	TRACE_EXIT seg_sz;
-}
-
-/*
- * out:
- *
- * int *read_cnt: the number of bytes we removed from the
- *                zft_deblock_buf (result)
- *
- * int *to_do   : the remaining size of the read-request. Is changed.
- *
- * in:
- *
- * char *buff      : buff is the address of the upper part of the user
- *                   buffer, that hasn't been filled with data yet.
- * int buf_pos_read: copy of buf_pos_rd
- * int buf_len_read: copy of buf_len_rd
- * char *zft_deblock_buf: ftape_zft_deblock_buf
- *
- * returns the amount of data actually copied to the user-buffer
- *
- * to_do MUST NOT SHRINK except to indicate an EOT. In this case to_do
- * has to be set to 0. We cannot return -ENOSPC, because we return the
- * amount of data actually * copied to the user-buffer
- */
-static int zft_simple_read (int *read_cnt, 
-			    __u8  __user *dst_buf, 
-			    const int to_do, 
-			    const __u8 *src_buf, 
-			    const int seg_sz, 
-			    const zft_position *pos,
-			    const zft_volinfo *volume)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (seg_sz - pos->seg_byte_pos < to_do) {
-		*read_cnt = seg_sz - pos->seg_byte_pos;
-	} else {
-		*read_cnt = to_do;
-	}
-	if (copy_to_user(dst_buf, 
-			 src_buf + pos->seg_byte_pos, *read_cnt) != 0) {
-		TRACE_EXIT -EFAULT;
-	}
-	TRACE(ft_t_noise, "nr bytes just read: %d", *read_cnt);
-	TRACE_EXIT *read_cnt;
-}
-
-/* req_len: gets clipped due to EOT of EOF.
- * req_clipped: is a flag indicating whether req_len was clipped or not
- * volume: contains information on current volume (blk_sz etc.)
- */
-static int check_read_access(int *req_len, 
-			     const zft_volinfo **volume,
-			     int *req_clipped, 
-			     const zft_position *pos)
-{
-	static __s64 remaining;
-	static int eod;
-	TRACE_FUN(ft_t_flow);
-	
-	if (zft_io_state != zft_reading) {
-		if (zft_offline) { /* offline includes no_tape */
-			TRACE_ABORT(-ENXIO, ft_t_warn,
-				    "tape is offline or no cartridge");
-		}
-		if (!ft_formatted) {
-			TRACE_ABORT(-EACCES,
-				    ft_t_warn, "tape is not formatted");
-		}
-		/*  now enter defined state, read header segment if not
-		 *  already done and flush write buffers
-		 */
-		TRACE_CATCH(zft_def_idle_state(),);
-		zft_io_state = zft_reading;
-		if (zft_tape_at_eod(pos)) {
-			eod = 1;
-			TRACE_EXIT 1;
-		}
-		eod = 0;
-		*volume = zft_find_volume(pos->seg_pos);
-		/* get the space left until EOF */
-		remaining = zft_check_for_eof(*volume, pos);
-		buf_len_rd = 0;
-		TRACE(ft_t_noise, "remaining: " LL_X ", vol_no: %d",
-		      LL(remaining), (*volume)->count);
-	} else if (zft_tape_at_eod(pos)) {
-		if (++eod > 2) {
-			TRACE_EXIT -EIO; /* st.c also returns -EIO */
-		} else {
-			TRACE_EXIT 1;
-		}
-	}
-	if ((*req_len % (*volume)->blk_sz) != 0) {
-		/*  this message is informational only. The user gets the
-		 *  proper return value
-		 */
-		TRACE_ABORT(-EINVAL, ft_t_info,
-			    "req_len %d not a multiple of block size %d",
-			    *req_len, (*volume)->blk_sz);
-	}
-	/* As GNU tar doesn't accept partial read counts when the
-	 * multiple volume flag is set, we make sure to return the
-	 * requested amount of data. Except, of course, at the end of
-	 * the tape or file mark.  
-	 */
-	remaining -= *req_len;
-	if (remaining <= 0) {
-		TRACE(ft_t_noise, 
-		      "clipped request from %d to %d.", 
-		      *req_len, (int)(*req_len + remaining));
-		*req_len += remaining;
-		*req_clipped = 1;
-	} else {
-		*req_clipped = 0;
-	}
-	TRACE_EXIT 0;
-}
-
-/* this_segs_size: the current segment's size.
- * buff: the USER-SPACE buffer provided by the calling function.
- * req_len: how much data should be read at most.
- * volume: contains information on current volume (blk_sz etc.)
- */  
-static int empty_deblock_buf(__u8 __user *usr_buf, const int req_len,
-			     const __u8 *src_buf, const int seg_sz,
-			     zft_position *pos,
-			     const zft_volinfo *volume)
-{
-	int cnt;
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_data_flow, "this_segs_size: %d", seg_sz);
-	if (zft_use_compression && volume->use_compression) {
-		TRACE_CATCH(zft_cmpr_lock(1 /* try to load */),);
-		TRACE_CATCH(result= (*zft_cmpr_ops->read)(&cnt,
-							  usr_buf, req_len,
-							  src_buf, seg_sz,
-							  pos, volume),);
-	} else {                                  
-		TRACE_CATCH(result= zft_simple_read (&cnt,
-						     usr_buf, req_len,
-						     src_buf, seg_sz,
-						     pos, volume),);
-	}
-	pos->volume_pos   += result;
-        pos->tape_pos     += cnt;
-	pos->seg_byte_pos += cnt;
-	buf_len_rd        -= cnt; /* remaining bytes in buffer */
-	TRACE(ft_t_data_flow, "buf_len_rd: %d, cnt: %d", buf_len_rd, cnt);
-	if(pos->seg_byte_pos >= seg_sz) {
-		pos->seg_pos++;
-		pos->seg_byte_pos = 0;
-	}
-	TRACE(ft_t_data_flow, "bytes moved out of deblock-buffer: %d", cnt);
-	TRACE_EXIT result;
-}
-
-
-/* note: we store the segment id of the segment that is inside the
- * deblock buffer. This spares a lot of ftape_read_segment()s when we
- * use small block-sizes. The block-size may be 1kb (SECTOR_SIZE). In
- * this case a MTFSR 28 maybe still inside the same segment.
- */
-int _zft_read(char __user *buff, int req_len)
-{
-	int req_clipped;
-	int result     = 0;
-	int bytes_read = 0;
-	static unsigned int seg_sz = 0;
-	static const zft_volinfo *volume = NULL;
-	TRACE_FUN(ft_t_flow);
-	
-	zft_resid = req_len;
-	result = check_read_access(&req_len, &volume,
-				   &req_clipped, &zft_pos);
-	switch(result) {
-	case 0: 
-		break; /* nothing special */
-	case 1: 
-		TRACE(ft_t_noise, "EOD reached");
-		TRACE_EXIT 0;   /* EOD */
-	default:
-		TRACE_ABORT(result, ft_t_noise,
-			    "check_read_access() failed with result %d",
-			    result);
-		TRACE_EXIT result;
-	}
-	while (req_len > 0) { 
-		/*  Allow escape from this loop on signal !
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		/* buf_len_rd == 0 means that we need to read a new
-		 * segment.
-		 */
-		if (buf_len_rd == 0) {
-			while((result = zft_fetch_segment(zft_pos.seg_pos,
-							  zft_deblock_buf,
-							  FT_RD_AHEAD)) == 0) {
-				zft_pos.seg_pos ++;
-				zft_pos.seg_byte_pos = 0;
-			}
-			if (result < 0) {
-				zft_resid -= bytes_read;
-				TRACE_ABORT(result, ft_t_noise,
-					    "zft_fetch_segment(): %d",
-					    result);
-			}
-			seg_sz = result;
-			buf_len_rd = seg_sz - zft_pos.seg_byte_pos;
-		}
-		TRACE_CATCH(result = empty_deblock_buf(buff, 
-						       req_len,
-						       zft_deblock_buf, 
-						       seg_sz, 
-						       &zft_pos,
-						       volume),
-			    zft_resid -= bytes_read);
-		TRACE(ft_t_data_flow, "bytes just read: %d", result);
-		bytes_read += result; /* what we got so far       */
-		buff       += result; /* index in user-buffer     */
-		req_len    -= result; /* what's left from req_len */
-	} /* while (req_len  > 0) */
-	if (req_clipped) {
-		TRACE(ft_t_data_flow,
-		      "maybe partial count because of eof mark");
-		if (zft_just_before_eof && bytes_read == 0) {
-			/* req_len was > 0, but user didn't get
-			 * anything the user has read in the eof-mark 
-			 */
-			zft_move_past_eof(&zft_pos);
-			ftape_abort_operation();
-		} else {
-			/* don't skip to the next file before the user
-			 * tried to read a second time past EOF Just
-			 * mark that we are at EOF and maybe decrement
-			 * zft_seg_pos to stay in the same volume;
-			 */
-			zft_just_before_eof = 1;
-			zft_position_before_eof(&zft_pos, volume);
-			TRACE(ft_t_noise, "just before eof");
-		}
-	}
-	zft_resid -= result; /* for MTSTATUS       */
-	TRACE_EXIT bytes_read;
-}
diff --git a/drivers/char/ftape/zftape/zftape-read.h b/drivers/char/ftape/zftape/zftape-read.h
deleted file mode 100644
index 42941de..0000000
--- a/drivers/char/ftape/zftape/zftape-read.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _ZFTAPE_READ_H
-#define _ZFTAPE_READ_H
-
-/*
- * Copyright (C) 1996, 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-read.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:07 $
- *
- *      This file contains the definitions for the read functions
- *      for the zftape driver for Linux.
- *
- */
-
-#include "../lowlevel/ftape-read.h"
-
-/*      ftape-read.c defined global vars.
- */
-extern int zft_just_before_eof;
-	
-/*      ftape-read.c defined global functions.
- */
-extern void zft_zap_read_buffers(void);
-extern int  zft_read_header_segments(void);
-extern int  zft_fetch_segment_fraction(const unsigned int segment,
-				       void *buffer,
-				       const ft_read_mode_t read_mode,
-				       const unsigned int start,
-				       const unsigned int size);
-#define zft_fetch_segment(segment, address, read_mode)		\
-	zft_fetch_segment_fraction(segment, address, read_mode,	\
-				   0, FT_SEGMENT_SIZE)
-/*   hook for the VFS interface
- */
-extern int  _zft_read(char __user *buff, int req_len);
-
-#endif /* _ZFTAPE_READ_H */
diff --git a/drivers/char/ftape/zftape/zftape-rw.c b/drivers/char/ftape/zftape/zftape-rw.c
deleted file mode 100644
index dab6346..0000000
--- a/drivers/char/ftape/zftape/zftape-rw.c
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- *      Copyright (C) 1996, 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-rw.c,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:08 $
- *
- *      This file contains some common code for the r/w code for
- *      zftape.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <linux/zftape.h>
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-rw.h"
-#include "../zftape/zftape-vtbl.h"
-
-/*      Global vars.
- */
-
-__u8 *zft_deblock_buf;
-__u8 *zft_hseg_buf;
-int zft_deblock_segment = -1;
-zft_status_enum zft_io_state = zft_idle;
-int zft_header_changed;
-int zft_qic113; /* conform to old specs. and old zftape */
-int zft_use_compression;
-zft_position zft_pos = {
-	-1, /* seg_pos */
-	0,  /* seg_byte_pos */
-	0,  /* tape_pos */
-	0   /* volume_pos */
-};
-unsigned int zft_blk_sz = CONFIG_ZFT_DFLT_BLK_SZ;
-__s64 zft_capacity;
-
-unsigned int zft_written_segments;
-int zft_label_changed;
-
-/*      Local vars.
- */
-
-unsigned int zft_get_seg_sz(unsigned int segment)
-{
-	int size;
-	TRACE_FUN(ft_t_any);
-	
-	size = FT_SEGMENT_SIZE - 
-		count_ones(ftape_get_bad_sector_entry(segment))*FT_SECTOR_SIZE;
-	if (size > 0) {
-		TRACE_EXIT (unsigned)size; 
-	} else {
-		TRACE_EXIT 0;
-	}
-}
-
-/* ftape_set_flags(). Claus-Justus Heine, 1994/1995
- */
-void zft_set_flags(unsigned minor_unit)
-{     
-	TRACE_FUN(ft_t_flow);
-	
-	zft_use_compression = zft_qic_mode = 0;
-	switch (minor_unit & ZFT_MINOR_OP_MASK) {
-	case (ZFT_Q80_MODE | ZFT_ZIP_MODE):
-	case ZFT_ZIP_MODE:
-		zft_use_compression = 1;
-	case 0:
-	case ZFT_Q80_MODE:
-		zft_qic_mode = 1;
-		if (zft_mt_compression) { /* override the default */
-			zft_use_compression = 1;
-		}
-		break;
-	case ZFT_RAW_MODE:
-		TRACE(ft_t_noise, "switching to raw mode");
-		break;
-	default:
-		TRACE(ft_t_warn, "Warning:\n"
-		      KERN_INFO "Wrong combination of minor device bits.\n"
-		      KERN_INFO "Switching to raw read-only mode.");
-		zft_write_protected = 1;
-		break;
-	}
-	TRACE_EXIT;
-}
-
-/* computes the segment and byte offset inside the segment
- * corresponding to tape_pos.
- *
- * tape_pos gives the offset in bytes from the beginning of the
- * ft_first_data_segment *seg_byte_pos is the offset in the current
- * segment in bytes
- *
- * Of, if this routine was called often one should cache the last data
- * pos it was called with, but actually this is only needed in
- * ftape_seek_block(), that is, almost never.
- */
-int zft_calc_seg_byte_coord(int *seg_byte_pos, __s64 tape_pos)
-{
-	int segment;
-	int seg_sz;
-	TRACE_FUN(ft_t_flow);
-	
-	if (tape_pos == 0) {
-		*seg_byte_pos = 0;
-		segment = ft_first_data_segment;
-	} else {
-		seg_sz = 0;
-		
-		for (segment = ft_first_data_segment; 
-		     ((tape_pos > 0) && (segment <= ft_last_data_segment));
-		     segment++) {
-			seg_sz = zft_get_seg_sz(segment); 
-			tape_pos -= seg_sz;
-		}
-		if(tape_pos >= 0) {
-			/* the case tape_pos > != 0 means that the
-			 * argument tape_pos lies beyond the EOT.
-			 */
-			*seg_byte_pos= 0;
-		} else { /* tape_pos < 0 */
-			segment--;
-			*seg_byte_pos= tape_pos + seg_sz;
-		}
-	}
-	TRACE_EXIT(segment);
-}
-
-/* ftape_calc_tape_pos().
- *
- * computes the offset in bytes from the beginning of the
- * ft_first_data_segment inverse to ftape_calc_seg_byte_coord
- *
- * We should do some caching. But how:
- *
- * Each time the header segments are read in, this routine is called
- * with ft_tracks_per_tape*segments_per_track argumnet. So this should be
- * the time to reset the cache.
- *
- * Also, it might be in the future that the bad sector map gets
- * changed.  -> reset the cache
- */
-static int seg_pos;
-static __s64 tape_pos;
-
-__s64 zft_get_capacity(void)
-{
-	seg_pos  = ft_first_data_segment;
-	tape_pos = 0;
-
-	while (seg_pos <= ft_last_data_segment) {
-		tape_pos += zft_get_seg_sz(seg_pos ++);
-	}
-	return tape_pos;
-}
-
-__s64 zft_calc_tape_pos(int segment)
-{
-	int d1, d2, d3;
-	TRACE_FUN(ft_t_any);
-	
-	if (segment > ft_last_data_segment) {
-	        TRACE_EXIT zft_capacity;
-	}
-	if (segment < ft_first_data_segment) {
-		TRACE_EXIT 0;
-	}
-	d2 = segment - seg_pos;
-	if (-d2 > 10) {
-		d1 = segment - ft_first_data_segment;
-		if (-d2 > d1) {
-			tape_pos = 0;
-			seg_pos = ft_first_data_segment;
-			d2 = d1;
-		}
-	}
-	if (d2 > 10) {
-		d3 = ft_last_data_segment - segment;
-		if (d2 > d3) {
-			tape_pos = zft_capacity;
-			seg_pos  = ft_last_data_segment + 1;
-			d2 = -d3;
-		}
-	}		
-	if (d2 > 0) {
-		while (seg_pos < segment) {
-			tape_pos +=  zft_get_seg_sz(seg_pos++);
-		}
-	} else {
-		while (seg_pos > segment) {
-			tape_pos -=  zft_get_seg_sz(--seg_pos);
-		}
-	}
-	TRACE(ft_t_noise, "new cached pos: %d", seg_pos);
-
-	TRACE_EXIT tape_pos;
-}
-
-/* copy Z-label string to buffer, keeps track of the correct offset in
- * `buffer' 
- */
-void zft_update_label(__u8 *buffer)
-{ 
-	TRACE_FUN(ft_t_flow);
-	
-	if (strncmp(&buffer[FT_LABEL], ZFTAPE_LABEL, 
-		    sizeof(ZFTAPE_LABEL)-1) != 0) {
-		TRACE(ft_t_info, "updating label from \"%s\" to \"%s\"",
-		      &buffer[FT_LABEL], ZFTAPE_LABEL);
-		strcpy(&buffer[FT_LABEL], ZFTAPE_LABEL);
-		memset(&buffer[FT_LABEL] + sizeof(ZFTAPE_LABEL) - 1, ' ', 
-		       FT_LABEL_SZ - sizeof(ZFTAPE_LABEL + 1));
-		PUT4(buffer, FT_LABEL_DATE, 0);
-		zft_label_changed = zft_header_changed = 1; /* changed */
-	}
-	TRACE_EXIT;
-}
-
-int zft_verify_write_segments(unsigned int segment, 
-			      __u8 *data, size_t size,
-			      __u8 *buffer)
-{
-	int result;
-	__u8 *write_buf;
-	__u8 *src_buf;
-	int single;
-	int seg_pos;
-	int seg_sz;
-	int remaining;
-	ft_write_mode_t write_mode;
-	TRACE_FUN(ft_t_flow);
-
-	seg_pos   = segment;
-	seg_sz    = zft_get_seg_sz(seg_pos);
-	src_buf   = data;
-	single    = size <= seg_sz;
-	remaining = size;
-	do {
-		TRACE(ft_t_noise, "\n"
-		      KERN_INFO "remaining: %d\n"
-		      KERN_INFO "seg_sz   : %d\n"
-		      KERN_INFO "segment  : %d",
-		      remaining, seg_sz, seg_pos);
-		if (remaining == seg_sz) {
-			write_buf = src_buf;
-			write_mode = single ? FT_WR_SINGLE : FT_WR_MULTI;
-			remaining = 0;
-		} else if (remaining > seg_sz) {
-			write_buf = src_buf;
-			write_mode = FT_WR_ASYNC; /* don't start tape */
-			remaining -= seg_sz;
-		} else { /* remaining < seg_sz */
-			write_buf = buffer;
-			memcpy(write_buf, src_buf, remaining);
-			memset(&write_buf[remaining],'\0',seg_sz-remaining);
-			write_mode = single ? FT_WR_SINGLE : FT_WR_MULTI;
-			remaining = 0;
-		}
-		if ((result = ftape_write_segment(seg_pos, 
-						  write_buf, 
-						  write_mode)) != seg_sz) {
-			TRACE(ft_t_err, "Error: "
-			      "Couldn't write segment %d", seg_pos);
-			TRACE_EXIT result < 0 ? result : -EIO; /* bail out */
-		}
-		zft_written_segments ++;
-		seg_sz = zft_get_seg_sz(++seg_pos);
-		src_buf += result;
-	} while (remaining > 0);
-	if (ftape_get_status()->fti_state == writing) {
-		TRACE_CATCH(ftape_loop_until_writes_done(),);
-		TRACE_CATCH(ftape_abort_operation(),);
-		zft_prevent_flush();
-	}
-	seg_pos = segment;
-	src_buf = data;
-	remaining = size;
-	do {
-		TRACE_CATCH(result = ftape_read_segment(seg_pos, buffer, 
-							single ? FT_RD_SINGLE
-							: FT_RD_AHEAD),);
-		if (memcmp(src_buf, buffer, 
-			   remaining > result ? result : remaining) != 0) {
-			TRACE_ABORT(-EIO, ft_t_err,
-				    "Failed to verify written segment %d",
-				    seg_pos);
-		}
-		remaining -= result;
-		TRACE(ft_t_noise, "verify successful:\n"
-		      KERN_INFO "segment  : %d\n"
-		      KERN_INFO "segsize  : %d\n"
-		      KERN_INFO "remaining: %d",
-		      seg_pos, result, remaining);
-		src_buf   += seg_sz;
-		seg_pos++;
-	} while (remaining > 0);
-	TRACE_EXIT size;
-}
-
-
-/* zft_erase().  implemented compression-handling
- *
- * calculate the first data-segment when using/not using compression.
- *
- * update header-segment and compression-map-segment.
- */
-int zft_erase(void)
-{
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-	
-	if (!zft_header_read) {
-		TRACE_CATCH(zft_vmalloc_once((void **)&zft_hseg_buf,
-					     FT_SEGMENT_SIZE),);
-		/* no need to read the vtbl and compression map */
-		TRACE_CATCH(ftape_read_header_segment(zft_hseg_buf),);
-		if ((zft_old_ftape = 
-		     zft_ftape_validate_label(&zft_hseg_buf[FT_LABEL]))) {
-			zft_ftape_extract_file_marks(zft_hseg_buf);
-		}
-		TRACE(ft_t_noise,
-		      "ft_first_data_segment: %d, ft_last_data_segment: %d", 
-		      ft_first_data_segment, ft_last_data_segment);
-		zft_qic113 = (ft_format_code != fmt_normal &&
-			      ft_format_code != fmt_1100ft &&
-			      ft_format_code != fmt_425ft);
-	}
-	if (zft_old_ftape) {
-		zft_clear_ftape_file_marks();
-		zft_old_ftape = 0; /* no longer old ftape */
-	}
-	PUT2(zft_hseg_buf, FT_CMAP_START, 0);
-	zft_volume_table_changed = 1;
-	zft_capacity = zft_get_capacity();
-	zft_init_vtbl();
-	/* the rest must be done in ftape_update_header_segments 
-	 */
-	zft_header_read = 1;
-	zft_header_changed = 1; /* force update of timestamp */
-	result = zft_update_header_segments();
-
-	ftape_abort_operation();
-
-	zft_reset_position(&zft_pos);
-	zft_set_flags (zft_unit);
-	TRACE_EXIT result;
-}
-
-unsigned int zft_get_time(void) 
-{
-	unsigned int date = FT_TIME_STAMP(2097, 11, 30, 23, 59, 59); /* fun */
-	return date;
-}
diff --git a/drivers/char/ftape/zftape/zftape-rw.h b/drivers/char/ftape/zftape/zftape-rw.h
deleted file mode 100644
index 1ceec22..0000000
--- a/drivers/char/ftape/zftape/zftape-rw.h
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef _ZFTAPE_RW_H
-#define _ZFTAPE_RW_H
-
-/*
- * Copyright (C) 1996, 1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-rw.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:09 $
- *
- *      This file contains the definitions for the read and write
- *      functions for the QIC-117 floppy-tape driver for Linux.
- *
- */
-
-#include "../zftape/zftape-buffers.h"
-
-#define SEGMENTS_PER_TAPE  (ft_segments_per_track * ft_tracks_per_tape)
-
-/*  QIC-113 Rev. G says that `a maximum of 63488 raw bytes may be
- *  compressed into a single frame'.
- *  Maybe we should stick to 32kb to make it more `beautiful'
- */
-#define ZFT_MAX_BLK_SZ           (62*1024) /* bytes */
-#if !defined(CONFIG_ZFT_DFLT_BLK_SZ)
-# define CONFIG_ZFT_DFLT_BLK_SZ   (10*1024) /* bytes, default of gnu tar */
-#elif CONFIG_ZFT_DFLT_BLK_SZ == 0
-# undef  CONFIG_ZFT_DFLT_BLK_SZ
-# define CONFIG_ZFT_DFLT_BLK_SZ 1
-#elif (CONFIG_ZFT_DFLT_BLK_SZ % 1024) != 0
-# error CONFIG_ZFT_DFLT_BLK_SZ must be 1 or a multiple of 1024
-#endif
-/* The *optional* compression routines need some overhead per tape
- *  block for their purposes. Instead of asking the actual compression
- *  implementation how much it needs, we restrict this overhead to be
- *  maximal of ZFT_CMPT_OVERHEAD size. We need this for EOT
- *  conditions. The tape is assumed to be logical at EOT when the
- *  distance from the physical EOT is less than 
- *  one tape block + ZFT_CMPR_OVERHEAD 
- */
-#define ZFT_CMPR_OVERHEAD 16        /* bytes */
-
-typedef enum
-{ 
-	zft_idle = 0,
-	zft_reading,
-	zft_writing,
-} zft_status_enum;
-
-typedef struct               /*  all values measured in bytes */
-{
-	int   seg_pos;       /*  segment currently positioned at */
-	int   seg_byte_pos;  /*  offset in current segment */ 
-	__s64 tape_pos;      /*  real offset from BOT */
-	__s64 volume_pos;    /*  pos. in uncompressed data stream in
-			      *  current volume 
-			      */
-} zft_position; 
-
-extern zft_position zft_pos;
-extern __u8 *zft_deblock_buf;
-extern __u8 *zft_hseg_buf;
-extern int zft_deblock_segment;
-extern zft_status_enum zft_io_state;
-extern int zft_header_changed;
-extern int zft_qic113; /* conform to old specs. and old zftape */
-extern int zft_use_compression;
-extern unsigned int zft_blk_sz;
-extern __s64 zft_capacity;
-extern unsigned int zft_written_segments;
-extern int zft_label_changed;
-
-/*  zftape-rw.c exported functions
- */
-extern unsigned int zft_get_seg_sz(unsigned int segment);
-extern void  zft_set_flags(unsigned int minor_unit);
-extern int   zft_calc_seg_byte_coord(int *seg_byte_pos, __s64 tape_pos);
-extern __s64 zft_calc_tape_pos(int segment);
-extern __s64 zft_get_capacity(void);
-extern void  zft_update_label(__u8 *buffer);
-extern int   zft_erase(void);
-extern int   zft_verify_write_segments(unsigned int segment, 
-				       __u8 *data, size_t size, __u8 *buffer);
-extern unsigned int zft_get_time(void);
-#endif /* _ZFTAPE_RW_H */
-
diff --git a/drivers/char/ftape/zftape/zftape-vtbl.c b/drivers/char/ftape/zftape/zftape-vtbl.c
deleted file mode 100644
index ad7f8be..0000000
--- a/drivers/char/ftape/zftape/zftape-vtbl.c
+++ /dev/null
@@ -1,757 +0,0 @@
-/*
- *      Copyright (c) 1995-1997 Claus-Justus Heine 
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
- 
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- General Public License for more details.
- 
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-vtbl.c,v $
- * $Revision: 1.7.6.1 $
- * $Date: 1997/11/24 13:48:31 $
- *
- *      This file defines a volume table as defined in various QIC
- *      standards.
- * 
- *      This is a minimal implementation, just allowing ordinary DOS
- *      :( prgrams to identify the cartridge as used.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-#include <linux/zftape.h>
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-rw.h"
-#include "../zftape/zftape-vtbl.h"
-
-#define ZFT_CMAP_HACK /* leave this defined to hide the compression map */
-
-/*
- *  global variables 
- */
-int zft_qic_mode   = 1; /* use the vtbl */
-int zft_old_ftape; /* prevents old ftaped tapes to be overwritten */
-int zft_volume_table_changed; /* for write_header_segments() */
-
-/*
- *  private variables (only exported for inline functions)
- */
-LIST_HEAD(zft_vtbl);
-
-/*  We could also allocate these dynamically when extracting the volume table
- *  sizeof(zft_volinfo) is about 32 or something close to that
- */
-static zft_volinfo  tape_vtbl;
-static zft_volinfo  eot_vtbl;
-static zft_volinfo *cur_vtbl;
-
-static inline void zft_new_vtbl_entry(void)
-{
-	struct list_head *tmp = &zft_last_vtbl->node;
-	zft_volinfo *new = zft_kmalloc(sizeof(zft_volinfo));
-
-	list_add(&new->node, tmp);
-	new->count = zft_eom_vtbl->count ++;
-}
-
-void zft_free_vtbl(void)
-{
-	for (;;) {
-		struct list_head *tmp = zft_vtbl.prev;
-		zft_volinfo *vtbl;
-
-		if (tmp == &zft_vtbl)
-			break;
-		list_del(tmp);
-		vtbl = list_entry(tmp, zft_volinfo, node);
-		zft_kfree(vtbl, sizeof(zft_volinfo));
-	}
-	INIT_LIST_HEAD(&zft_vtbl);
-	cur_vtbl = NULL;
-}
-
-/*  initialize vtbl, called by ftape_new_cartridge()
- */
-void zft_init_vtbl(void)
-{ 
-	zft_volinfo *new;
-
-	zft_free_vtbl();
-	
-	/*  Create the two dummy vtbl entries
-	 */
-	new = zft_kmalloc(sizeof(zft_volinfo));
-	list_add(&new->node, &zft_vtbl);
-	new = zft_kmalloc(sizeof(zft_volinfo));
-	list_add(&new->node, &zft_vtbl);
-	zft_head_vtbl->end_seg   = ft_first_data_segment;
-	zft_head_vtbl->blk_sz    = zft_blk_sz;
-	zft_head_vtbl->count     = -1;
-	zft_eom_vtbl->start_seg  = ft_first_data_segment + 1;
-	zft_eom_vtbl->end_seg    = ft_last_data_segment + 1;
-	zft_eom_vtbl->blk_sz     = zft_blk_sz;
-	zft_eom_vtbl->count      = 0;
-
-	/*  Reset the pointer for zft_find_volume()
-	 */
-	cur_vtbl = zft_eom_vtbl;
-
-	/* initialize the dummy vtbl entries for zft_qic_mode == 0
-	 */
-	eot_vtbl.start_seg       = ft_last_data_segment + 1;
-	eot_vtbl.end_seg         = ft_last_data_segment + 1;
-	eot_vtbl.blk_sz          = zft_blk_sz;
-	eot_vtbl.count           = -1;
-	tape_vtbl.start_seg = ft_first_data_segment;
-	tape_vtbl.end_seg   = ft_last_data_segment;
-	tape_vtbl.blk_sz    = zft_blk_sz;
-	tape_vtbl.size      = zft_capacity;
-	tape_vtbl.count     = 0;
-}
-
-/* check for a valid VTBL signature. 
- */
-static int vtbl_signature_valid(__u8 signature[4])
-{
-	const char *vtbl_ids[] = VTBL_IDS; /* valid signatures */
-	int j;
-	
-	for (j = 0; 
-	     (j < NR_ITEMS(vtbl_ids)) && (memcmp(signature, vtbl_ids[j], 4) != 0);
-	     j++);
-	return j < NR_ITEMS(vtbl_ids);
-}
-
-/* We used to store the block-size of the volume in the volume-label,
- * using the keyword "blocksize". The blocksize written to the
- * volume-label is in bytes.
- *
- * We use this now only for compatibility with old zftape version. We
- * store the blocksize directly as binary number in the vendor
- * extension part of the volume entry.
- */
-static int check_volume_label(const char *label, int *blk_sz)
-{ 
-	int valid_format;
-	char *blocksize;
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "called with \"%s\" / \"%s\"", label, ZFT_VOL_NAME);
-	if (strncmp(label, ZFT_VOL_NAME, strlen(ZFT_VOL_NAME)) != 0) {
-		*blk_sz = 1; /* smallest block size that we allow */
-		valid_format = 0;
-	} else {
-		TRACE(ft_t_noise, "got old style zftape vtbl entry");
-		/* get the default blocksize */
-		/* use the kernel strstr()   */
-		blocksize= strstr(label, " blocksize ");
-		if (blocksize) {
-			blocksize += strlen(" blocksize ");
-			for(*blk_sz= 0; 
-			    *blocksize >= '0' && *blocksize <= '9'; 
-			    blocksize++) {
-				*blk_sz *= 10;
-				*blk_sz += *blocksize - '0';
-			}
-			if (*blk_sz > ZFT_MAX_BLK_SZ) {
-				*blk_sz= 1;
-				valid_format= 0;
-			} else {
-				valid_format = 1;
-			}
-		} else {
-			*blk_sz= 1;
-			valid_format= 0;
-		}
-	}
-	TRACE_EXIT valid_format;
-}
-
-/*   check for a zftape volume
- */
-static int check_volume(__u8 *entry, zft_volinfo *volume)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	if(strncmp(&entry[VTBL_EXT+EXT_ZFTAPE_SIG], ZFTAPE_SIG,
-		   strlen(ZFTAPE_SIG)) == 0) {
-		TRACE(ft_t_noise, "got new style zftape vtbl entry");
-		volume->blk_sz = GET2(entry, VTBL_EXT+EXT_ZFTAPE_BLKSZ);
-		volume->qic113 = entry[VTBL_EXT+EXT_ZFTAPE_QIC113];
-		TRACE_EXIT 1;
-	} else {
-		TRACE_EXIT check_volume_label(&entry[VTBL_DESC], &volume->blk_sz);
-	}
-}
-
-
-/* create zftape specific vtbl entry, the volume bounds are inserted
- * in the calling function, zft_create_volume_headers()
- */
-static void create_zft_volume(__u8 *entry, zft_volinfo *vtbl)
-{
-	TRACE_FUN(ft_t_flow);
-
-	memset(entry, 0, VTBL_SIZE);
-	memcpy(&entry[VTBL_SIG], VTBL_ID, 4);
-	sprintf(&entry[VTBL_DESC], ZFT_VOL_NAME" %03d", vtbl->count);
-	entry[VTBL_FLAGS] = (VTBL_FL_NOT_VERIFIED | VTBL_FL_SEG_SPANNING);
-	entry[VTBL_M_NO] = 1; /* multi_cartridge_count */
-	strcpy(&entry[VTBL_EXT+EXT_ZFTAPE_SIG], ZFTAPE_SIG);
-	PUT2(entry, VTBL_EXT+EXT_ZFTAPE_BLKSZ, vtbl->blk_sz);
-	if (zft_qic113) {
-		PUT8(entry, VTBL_DATA_SIZE, vtbl->size);
-		entry[VTBL_CMPR] = VTBL_CMPR_UNREG; 
-		if (vtbl->use_compression) { /* use compression: */
-			entry[VTBL_CMPR] |= VTBL_CMPR_USED;
-		}
-		entry[VTBL_EXT+EXT_ZFTAPE_QIC113] = 1;
-	} else {
-		PUT4(entry, VTBL_DATA_SIZE, vtbl->size);
-		entry[VTBL_K_CMPR] = VTBL_CMPR_UNREG; 
-		if (vtbl->use_compression) { /* use compression: */
-			entry[VTBL_K_CMPR] |= VTBL_CMPR_USED;
-		}
-	}
-	if (ft_format_code == fmt_big) {
-		/* SCSI like vtbl, store the number of used
-		 * segments as 4 byte value 
-		 */
-		PUT4(entry, VTBL_SCSI_SEGS, vtbl->end_seg-vtbl->start_seg + 1);
-	} else {
-		/* normal, QIC-80MC like vtbl 
-		 */
-		PUT2(entry, VTBL_START, vtbl->start_seg);
-		PUT2(entry, VTBL_END, vtbl->end_seg);
-	}
-	TRACE_EXIT;
-}
-
-/* this one creates the volume headers for each volume. It is assumed
- * that buffer already contains the old volume-table, so that vtbl
- * entries without the zft_volume flag set can savely be ignored.
- */
-static void zft_create_volume_headers(__u8 *buffer)
-{   
-	__u8 *entry;
-	struct list_head *tmp;
-	zft_volinfo *vtbl;
-	TRACE_FUN(ft_t_flow);
-	
-#ifdef ZFT_CMAP_HACK
-	if((strncmp(&buffer[VTBL_EXT+EXT_ZFTAPE_SIG], ZFTAPE_SIG,
-		    strlen(ZFTAPE_SIG)) == 0) && 
-	   buffer[VTBL_EXT+EXT_ZFTAPE_CMAP] != 0) {
-		TRACE(ft_t_noise, "deleting cmap volume");
-		memmove(buffer, buffer + VTBL_SIZE,
-			FT_SEGMENT_SIZE - VTBL_SIZE);
-	}
-#endif
-	entry = buffer;
-	for (tmp = zft_head_vtbl->node.next;
-	     tmp != &zft_eom_vtbl->node;
-	     tmp = tmp->next) {
-		vtbl = list_entry(tmp, zft_volinfo, node);
-		/* we now fill in the values only for newly created volumes.
-		 */
-		if (vtbl->new_volume) {
-			create_zft_volume(entry, vtbl);
-			vtbl->new_volume = 0; /* clear the flag */
-		}
-		
-		DUMP_VOLINFO(ft_t_noise, &entry[VTBL_DESC], vtbl);
-		entry += VTBL_SIZE;
-	}
-	memset(entry, 0, FT_SEGMENT_SIZE - zft_eom_vtbl->count * VTBL_SIZE);
-	TRACE_EXIT;
-}
-
-/*  write volume table to tape. Calls zft_create_volume_headers()
- */
-int zft_update_volume_table(unsigned int segment)
-{
-	int result = 0;
-	__u8 *verify_buf = NULL;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE_CATCH(result = ftape_read_segment(ft_first_data_segment, 
-						zft_deblock_buf,
-						FT_RD_SINGLE),);
-	zft_create_volume_headers(zft_deblock_buf);
-	TRACE(ft_t_noise, "writing volume table segment %d", segment);
-	if (zft_vmalloc_once(&verify_buf, FT_SEGMENT_SIZE) == 0) {
-		TRACE_CATCH(zft_verify_write_segments(segment, 
-						      zft_deblock_buf, result,
-						      verify_buf),
-			    zft_vfree(&verify_buf, FT_SEGMENT_SIZE));
-		zft_vfree(&verify_buf, FT_SEGMENT_SIZE);
-	} else {
-		TRACE_CATCH(ftape_write_segment(segment, zft_deblock_buf, 
-						FT_WR_SINGLE),);
-	}
-	TRACE_EXIT 0;
-}
-
-/* non zftape volumes are handled in raw mode. Thus we need to
- * calculate the raw amount of data contained in those segments.  
- */
-static void extract_alien_volume(__u8 *entry, zft_volinfo *vtbl)
-{
-	TRACE_FUN(ft_t_flow);
-
-	vtbl->size  = (zft_calc_tape_pos(zft_last_vtbl->end_seg+1) -
-		       zft_calc_tape_pos(zft_last_vtbl->start_seg));
-	vtbl->use_compression = 0;
-	vtbl->qic113 = zft_qic113;
-	if (vtbl->qic113) {
-		TRACE(ft_t_noise, 
-		      "Fake alien volume's size from " LL_X " to " LL_X, 
-		      LL(GET8(entry, VTBL_DATA_SIZE)), LL(vtbl->size));
-	} else {
-		TRACE(ft_t_noise,
-		      "Fake alien volume's size from %d to " LL_X, 
-		      (int)GET4(entry, VTBL_DATA_SIZE), LL(vtbl->size));
-	}
-	TRACE_EXIT;
-}
-
-
-/* extract an zftape specific volume
- */
-static void extract_zft_volume(__u8 *entry, zft_volinfo *vtbl)
-{
-	TRACE_FUN(ft_t_flow);
-
-	if (vtbl->qic113) {
-		vtbl->size = GET8(entry, VTBL_DATA_SIZE);
-		vtbl->use_compression = 
-			(entry[VTBL_CMPR] & VTBL_CMPR_USED) != 0; 
-	} else {
-		vtbl->size = GET4(entry, VTBL_DATA_SIZE);
-		if (entry[VTBL_K_CMPR] & VTBL_CMPR_UNREG) {
-			vtbl->use_compression = 
-				(entry[VTBL_K_CMPR] & VTBL_CMPR_USED) != 0;
-		} else if (entry[VTBL_CMPR] & VTBL_CMPR_UNREG) {
-			vtbl->use_compression = 
-				(entry[VTBL_CMPR] & VTBL_CMPR_USED) != 0; 
-		} else {
-			TRACE(ft_t_warn, "Geeh! There is something wrong:\n"
-			      KERN_INFO "QIC compression (Rev = K): %x\n"
-			      KERN_INFO "QIC compression (Rev > K): %x",
-			      entry[VTBL_K_CMPR], entry[VTBL_CMPR]);
-		}
-	}
-	TRACE_EXIT;
-}
-
-/* extract the volume table from buffer. "buffer" must already contain
- * the vtbl-segment 
- */
-int zft_extract_volume_headers(__u8 *buffer)
-{                            
-        __u8 *entry;
-	TRACE_FUN(ft_t_flow);
-	
-	zft_init_vtbl();
-	entry = buffer;
-#ifdef ZFT_CMAP_HACK
-	if ((strncmp(&entry[VTBL_EXT+EXT_ZFTAPE_SIG], ZFTAPE_SIG,
-		     strlen(ZFTAPE_SIG)) == 0) &&
-	    entry[VTBL_EXT+EXT_ZFTAPE_CMAP] != 0) {
-		TRACE(ft_t_noise, "ignoring cmap volume");
-		entry += VTBL_SIZE;
-	} 
-#endif
-	/* the end of the vtbl is indicated by an invalid signature 
-	 */
-	while (vtbl_signature_valid(&entry[VTBL_SIG]) &&
-	       (entry - buffer) < FT_SEGMENT_SIZE) {
-		zft_new_vtbl_entry();
-		if (ft_format_code == fmt_big) {
-			/* SCSI like vtbl, stores only the number of
-			 * segments used 
-			 */
-			unsigned int num_segments= GET4(entry, VTBL_SCSI_SEGS);
-			zft_last_vtbl->start_seg = zft_eom_vtbl->start_seg;
-			zft_last_vtbl->end_seg = 
-				zft_last_vtbl->start_seg + num_segments - 1;
-		} else {
-			/* `normal', QIC-80 like vtbl 
-			 */
-			zft_last_vtbl->start_seg = GET2(entry, VTBL_START);
-			zft_last_vtbl->end_seg   = GET2(entry, VTBL_END);
-		}
-		zft_eom_vtbl->start_seg  = zft_last_vtbl->end_seg + 1;
-		/* check if we created this volume and get the
-		 * blk_sz 
-		 */
-		zft_last_vtbl->zft_volume = check_volume(entry, zft_last_vtbl);
-		if (zft_last_vtbl->zft_volume == 0) {
-			extract_alien_volume(entry, zft_last_vtbl);
-		} else {
-			extract_zft_volume(entry, zft_last_vtbl);
-		}
-		DUMP_VOLINFO(ft_t_noise, &entry[VTBL_DESC], zft_last_vtbl);
-		entry +=VTBL_SIZE;
-	}
-#if 0
-/*
- *  undefine to test end of tape handling
- */
-	zft_new_vtbl_entry();
-	zft_last_vtbl->start_seg = zft_eom_vtbl->start_seg;
-	zft_last_vtbl->end_seg   = ft_last_data_segment - 10;
-	zft_last_vtbl->blk_sz          = zft_blk_sz;
-	zft_last_vtbl->zft_volume      = 1;
-	zft_last_vtbl->qic113          = zft_qic113;
-	zft_last_vtbl->size = (zft_calc_tape_pos(zft_last_vtbl->end_seg+1)
-			       - zft_calc_tape_pos(zft_last_vtbl->start_seg));
-#endif
-	TRACE_EXIT 0;
-}
-
-/* this functions translates the failed_sector_log, misused as
- * EOF-marker list, into a virtual volume table. The table mustn't be
- * written to tape, because this would occupy the first data segment,
- * which should be the volume table, but is actually the first segment
- * that is filled with data (when using standard ftape).  We assume,
- * that we get a non-empty failed_sector_log.
- */
-int zft_fake_volume_headers (eof_mark_union *eof_map, int num_failed_sectors)
-{
-	unsigned int segment, sector;
-	int have_eom = 0;
-	int vol_no;
-	TRACE_FUN(ft_t_flow);
-
-	if ((num_failed_sectors >= 2) &&
-	    (GET2(&eof_map[num_failed_sectors - 1].mark.segment, 0) 
-	     == 
-	     GET2(&eof_map[num_failed_sectors - 2].mark.segment, 0) + 1) &&
-	    (GET2(&eof_map[num_failed_sectors - 1].mark.date, 0) == 1)) {
-		/* this should be eom. We keep the remainder of the
-		 * tape as another volume.
-		 */
-		have_eom = 1;
-	}
-	zft_init_vtbl();
-	zft_eom_vtbl->start_seg = ft_first_data_segment;
-	for(vol_no = 0; vol_no < num_failed_sectors - have_eom; vol_no ++) {
-		zft_new_vtbl_entry();
-
-		segment = GET2(&eof_map[vol_no].mark.segment, 0);
-		sector  = GET2(&eof_map[vol_no].mark.date, 0);
-
-		zft_last_vtbl->start_seg  = zft_eom_vtbl->start_seg;
-		zft_last_vtbl->end_seg    = segment;
-		zft_eom_vtbl->start_seg  = segment + 1;
-		zft_last_vtbl->blk_sz     = 1;
-		zft_last_vtbl->size       = 
-			(zft_calc_tape_pos(zft_last_vtbl->end_seg)
-			 - zft_calc_tape_pos(zft_last_vtbl->start_seg)
-			 + (sector-1) * FT_SECTOR_SIZE);
-		TRACE(ft_t_noise, 
-		      "failed sector log: segment: %d, sector: %d", 
-		      segment, sector);
-		DUMP_VOLINFO(ft_t_noise, "Faked volume", zft_last_vtbl);
-	}
-	if (!have_eom) {
-		zft_new_vtbl_entry();
-		zft_last_vtbl->start_seg = zft_eom_vtbl->start_seg;
-		zft_last_vtbl->end_seg   = ft_last_data_segment;
-		zft_eom_vtbl->start_seg  = ft_last_data_segment + 1;
-		zft_last_vtbl->size      = zft_capacity;
-		zft_last_vtbl->size     -= zft_calc_tape_pos(zft_last_vtbl->start_seg);
-		zft_last_vtbl->blk_sz    = 1;
-		DUMP_VOLINFO(ft_t_noise, "Faked volume",zft_last_vtbl);
-	}
-	TRACE_EXIT 0;
-}
-
-/* update the internal volume table
- *
- * if before start of last volume: erase all following volumes if
- * inside a volume: set end of volume to infinity
- *
- * this function is intended to be called every time _ftape_write() is
- * called
- *
- * return: 0 if no new volume was created, 1 if a new volume was
- * created
- *
- * NOTE: we don't need to check for zft_mode as ftape_write() does
- * that already. This function gets never called without accessing
- * zftape via the *qft* devices 
- */
-
-int zft_open_volume(zft_position *pos, int blk_sz, int use_compression)
-{ 
-	TRACE_FUN(ft_t_flow);
-	
-	if (!zft_qic_mode) {
-		TRACE_EXIT 0;
-	}
-	if (zft_tape_at_lbot(pos)) {
-		zft_init_vtbl();
-		if(zft_old_ftape) {
-			/* clear old ftape's eof marks */
-			zft_clear_ftape_file_marks();
-			zft_old_ftape = 0; /* no longer old ftape */
-		}
-		zft_reset_position(pos);
-	}
-	if (pos->seg_pos != zft_last_vtbl->end_seg + 1) {
-		TRACE_ABORT(-EIO, ft_t_bug, 
-		      "BUG: seg_pos: %d, zft_last_vtbl->end_seg: %d", 
-		      pos->seg_pos, zft_last_vtbl->end_seg);
-	}                            
-	TRACE(ft_t_noise, "create new volume");
-	if (zft_eom_vtbl->count >= ZFT_MAX_VOLUMES) {
-		TRACE_ABORT(-ENOSPC, ft_t_err,
-			    "Error: maxmimal number of volumes exhausted "
-			    "(maxmimum is %d)", ZFT_MAX_VOLUMES);
-	}
-	zft_new_vtbl_entry();
-	pos->volume_pos = pos->seg_byte_pos = 0;
-	zft_last_vtbl->start_seg       = pos->seg_pos;
-	zft_last_vtbl->end_seg         = ft_last_data_segment; /* infinity */
-	zft_last_vtbl->blk_sz          = blk_sz;
-	zft_last_vtbl->size            = zft_capacity;
-	zft_last_vtbl->zft_volume      = 1;
-	zft_last_vtbl->use_compression = use_compression;
-	zft_last_vtbl->qic113          = zft_qic113;
-	zft_last_vtbl->new_volume      = 1;
-	zft_last_vtbl->open            = 1;
-	zft_volume_table_changed = 1;
-	zft_eom_vtbl->start_seg  = ft_last_data_segment + 1;
-	TRACE_EXIT 0;
-}
-
-/*  perform mtfsf, mtbsf, not allowed without zft_qic_mode
- */
-int zft_skip_volumes(int count, zft_position *pos)
-{ 
-	const zft_volinfo *vtbl;
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_noise, "count: %d", count);
-	
-	vtbl= zft_find_volume(pos->seg_pos);
-	while (count > 0 && vtbl != zft_eom_vtbl) {
-		vtbl = list_entry(vtbl->node.next, zft_volinfo, node);
-		count --;
-	}
-	while (count < 0 && vtbl != zft_first_vtbl) {
-		vtbl = list_entry(vtbl->node.prev, zft_volinfo, node);
-		count ++;
-	}
-	pos->seg_pos        = vtbl->start_seg;
-	pos->seg_byte_pos   = 0;
-	pos->volume_pos     = 0;
-	pos->tape_pos       = zft_calc_tape_pos(pos->seg_pos);
-	zft_just_before_eof = vtbl->size == 0;
-	if (zft_cmpr_ops) {
-		(*zft_cmpr_ops->reset)();
-	}
-	zft_deblock_segment = -1; /* no need to keep cache */
-	TRACE(ft_t_noise, "repositioning to:\n"
-	      KERN_INFO "zft_seg_pos        : %d\n"
-	      KERN_INFO "zft_seg_byte_pos   : %d\n"
-	      KERN_INFO "zft_tape_pos       : " LL_X "\n"
-	      KERN_INFO "zft_volume_pos     : " LL_X "\n"
-	      KERN_INFO "file number        : %d",
-	      pos->seg_pos, pos->seg_byte_pos, 
-	      LL(pos->tape_pos), LL(pos->volume_pos), vtbl->count);
-	zft_resid = count < 0 ? -count : count;
-	TRACE_EXIT zft_resid ? -EINVAL : 0;
-}
-
-/* the following simply returns the raw data position of the EOM
- * marker, MTIOCSIZE ioctl 
- */
-__s64 zft_get_eom_pos(void)
-{
-	if (zft_qic_mode) {
-		return zft_calc_tape_pos(zft_eom_vtbl->start_seg);
-	} else {
-		/* there is only one volume in raw mode */
-		return zft_capacity;
-	}
-}
-
-/* skip to eom, used for MTEOM
- */
-void zft_skip_to_eom(zft_position *pos)
-{
-	TRACE_FUN(ft_t_flow);
-	pos->seg_pos      = zft_eom_vtbl->start_seg;
-	pos->seg_byte_pos = 
-		pos->volume_pos     = 
-		zft_just_before_eof = 0;
-	pos->tape_pos = zft_calc_tape_pos(pos->seg_pos);
-	TRACE(ft_t_noise, "ftape positioned to segment %d, data pos " LL_X, 
-	      pos->seg_pos, LL(pos->tape_pos));
-	TRACE_EXIT;
-}
-
-/*  write an EOF-marker by setting zft_last_vtbl->end_seg to seg_pos.
- *  NOTE: this function assumes that zft_last_vtbl points to a valid
- *  vtbl entry
- *
- *  NOTE: this routine always positions before the EOF marker
- */
-int zft_close_volume(zft_position *pos)
-{
-	TRACE_FUN(ft_t_any);
-
-	if (zft_vtbl_empty || !zft_last_vtbl->open) { /* should not happen */
-		TRACE(ft_t_noise, "There are no volumes to finish");
-		TRACE_EXIT -EIO;
-	}
-	if (pos->seg_byte_pos == 0 && 
-	    pos->seg_pos != zft_last_vtbl->start_seg) {
-		pos->seg_pos --;
-		pos->seg_byte_pos      = zft_get_seg_sz(pos->seg_pos);
-	}
-	zft_last_vtbl->end_seg   = pos->seg_pos;
-	zft_last_vtbl->size      = pos->volume_pos;
-	zft_volume_table_changed = 1;
-	zft_just_before_eof      = 1;
-	zft_eom_vtbl->start_seg  = zft_last_vtbl->end_seg + 1;
-	zft_last_vtbl->open      = 0; /* closed */
-	TRACE_EXIT 0;
-}
-
-/* write count file-marks at current position. 
- *
- *  The tape is positioned after the eof-marker, that is at byte 0 of
- *  the segment following the eof-marker
- *
- *  this function is only allowed in zft_qic_mode
- *
- *  Only allowed when tape is at BOT or EOD.
- */
-int zft_weof(unsigned int count, zft_position *pos)
-{
-	
-	TRACE_FUN(ft_t_flow);
-
-	if (!count) { /* write zero EOF marks should be a real no-op */
-		TRACE_EXIT 0;
-	}
-	zft_volume_table_changed = 1;
-	if (zft_tape_at_lbot(pos)) {
-		zft_init_vtbl();
-		if(zft_old_ftape) {
-			/* clear old ftape's eof marks */
-			zft_clear_ftape_file_marks();
-			zft_old_ftape = 0;    /* no longer old ftape */
-		}
-	}
-	if (zft_last_vtbl->open) {
-		zft_close_volume(pos);
-		zft_move_past_eof(pos);
-		count --;
-	}
-	/* now it's easy, just append eof-marks, that is empty
-	 * volumes, to the end of the already recorded media.
-	 */
-	while (count > 0 && 
-	       pos->seg_pos <= ft_last_data_segment && 
-	       zft_eom_vtbl->count < ZFT_MAX_VOLUMES) {
-		TRACE(ft_t_noise,
-		      "Writing zero sized file at segment %d", pos->seg_pos);
-		zft_new_vtbl_entry();
-		zft_last_vtbl->start_seg       = pos->seg_pos;
-		zft_last_vtbl->end_seg         = pos->seg_pos;
-		zft_last_vtbl->size            = 0;
-		zft_last_vtbl->blk_sz          = zft_blk_sz;
-		zft_last_vtbl->zft_volume      = 1;
-		zft_last_vtbl->use_compression = 0;
-		pos->tape_pos += zft_get_seg_sz(pos->seg_pos);
-		zft_eom_vtbl->start_seg = ++ pos->seg_pos;
-		count --;
-	} 
-	if (count > 0) {
-		/*  there are two possibilities: end of tape, or the
-		 *  maximum number of files is exhausted.
-		 */
-		zft_resid = count;
-		TRACE(ft_t_noise,"Number of marks NOT written: %d", zft_resid);
-		if (zft_eom_vtbl->count == ZFT_MAX_VOLUMES) {
-			TRACE_ABORT(-EINVAL, ft_t_warn,
-				    "maximum allowed number of files "
-				    "exhausted: %d", ZFT_MAX_VOLUMES);
-		} else {
-			TRACE_ABORT(-ENOSPC,
-				    ft_t_noise, "reached end of tape");
-		}
-	}
-	TRACE_EXIT 0;
-}
-
-const zft_volinfo *zft_find_volume(unsigned int seg_pos)
-{
-	TRACE_FUN(ft_t_flow);
-	
-	TRACE(ft_t_any, "called with seg_pos %d",seg_pos);
-	if (!zft_qic_mode) {
-		if (seg_pos > ft_last_data_segment) {
-			TRACE_EXIT &eot_vtbl;
-		}
-		tape_vtbl.blk_sz =  zft_blk_sz;
-		TRACE_EXIT &tape_vtbl;
-	}
-	if (seg_pos < zft_first_vtbl->start_seg) {
-		TRACE_EXIT (cur_vtbl = zft_first_vtbl);
-	}
-	while (seg_pos > cur_vtbl->end_seg) {
-		cur_vtbl = list_entry(cur_vtbl->node.next, zft_volinfo, node);
-		TRACE(ft_t_noise, "%d - %d", cur_vtbl->start_seg, cur_vtbl->end_seg);
-	}
-	while (seg_pos < cur_vtbl->start_seg) {
-		cur_vtbl = list_entry(cur_vtbl->node.prev, zft_volinfo, node);
-		TRACE(ft_t_noise, "%d - %d", cur_vtbl->start_seg, cur_vtbl->end_seg);
-	}
-	if (seg_pos > cur_vtbl->end_seg || seg_pos < cur_vtbl->start_seg) {
-		TRACE(ft_t_bug, "This cannot happen");
-	}
-	DUMP_VOLINFO(ft_t_noise, "", cur_vtbl);
-	TRACE_EXIT cur_vtbl;
-}
-
-/* this function really assumes that we are just before eof
- */
-void zft_move_past_eof(zft_position *pos)
-{ 
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_noise, "old seg. pos: %d", pos->seg_pos);
-	pos->tape_pos += zft_get_seg_sz(pos->seg_pos++) - pos->seg_byte_pos;
-	pos->seg_byte_pos = 0;
-	pos->volume_pos   = 0;
-	if (zft_cmpr_ops) {
-		(*zft_cmpr_ops->reset)();
-	}
-	zft_just_before_eof =  0;
-	zft_deblock_segment = -1; /* no need to cache it anymore */
-	TRACE(ft_t_noise, "new seg. pos: %d", pos->seg_pos);
-	TRACE_EXIT;
-}
diff --git a/drivers/char/ftape/zftape/zftape-vtbl.h b/drivers/char/ftape/zftape/zftape-vtbl.h
deleted file mode 100644
index f31d196..0000000
--- a/drivers/char/ftape/zftape/zftape-vtbl.h
+++ /dev/null
@@ -1,227 +0,0 @@
-#ifndef _ZFTAPE_VTBL_H
-#define _ZFTAPE_VTBL_H
-
-/*
- *      Copyright (c) 1995-1997  Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2, or (at
- your option) any later version.
- 
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- General Public License for more details.
- 
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-vtbl.h,v $
- * $Revision: 1.3 $
- * $Date: 1997/10/28 14:30:09 $
- *
- *      This file defines a volume table as defined in the QIC-80
- *      development standards.
- */
-
-#include <linux/list.h>
-
-#include "../lowlevel/ftape-tracing.h"
-
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-rw.h"
-
-#define VTBL_SIZE 128 /* bytes */
-
-/* The following are offsets in the vtbl.  */
-#define VTBL_SIG   0
-#define VTBL_START 4
-#define VTBL_END   6
-#define VTBL_DESC  8
-#define VTBL_DATE  52
-#define VTBL_FLAGS 56
-#define VTBL_FL_VENDOR_SPECIFIC (1<<0)
-#define VTBL_FL_MUTLI_CARTRIDGE (1<<1)
-#define VTBL_FL_NOT_VERIFIED    (1<<2)
-#define VTBL_FL_REDIR_INHIBIT   (1<<3)
-#define VTBL_FL_SEG_SPANNING    (1<<4)
-#define VTBL_FL_DIRECTORY_LAST  (1<<5)
-#define VTBL_FL_RESERVED_6      (1<<6)
-#define VTBL_FL_RESERVED_7      (1<<7)
-#define VTBL_M_NO  57
-#define VTBL_EXT   58
-#define EXT_ZFTAPE_SIG     0
-#define EXT_ZFTAPE_BLKSZ  10
-#define EXT_ZFTAPE_CMAP   12
-#define EXT_ZFTAPE_QIC113 13
-#define VTBL_PWD   84
-#define VTBL_DIR_SIZE 92
-#define VTBL_DATA_SIZE 96
-#define VTBL_OS_VERSION 104
-#define VTBL_SRC_DRIVE  106
-#define VTBL_DEV        122
-#define VTBL_RESERVED_1 123
-#define VTBL_CMPR       124
-#define VTBL_CMPR_UNREG 0x3f
-#define VTBL_CMPR_USED  0x80
-#define VTBL_FMT        125
-#define VTBL_RESERVED_2 126
-#define VTBL_RESERVED_3 127
-/* compatibility with pre revision K */
-#define VTBL_K_CMPR     120 
-
-/*  the next is used by QIC-3020 tapes with format code 6 (>2^16
- *  segments) It is specified in QIC-113, Rev. G, Section 5 (SCSI
- *  volume table). The difference is simply, that we only store the
- *  number of segments used, not the starting segment.
- */
-#define VTBL_SCSI_SEGS  4 /* is a 4 byte value */
-
-/*  one vtbl is 128 bytes, that results in a maximum number of
- *  29*1024/128 = 232 volumes.
- */
-#define ZFT_MAX_VOLUMES (FT_SEGMENT_SIZE/VTBL_SIZE)
-#define VTBL_ID  "VTBL"
-#define VTBL_IDS { VTBL_ID, "XTBL", "UTID", "EXVT" } /* other valid ids */
-#define ZFT_VOL_NAME "zftape volume" /* volume label used by me */
-#define ZFTAPE_SIG "LINUX ZFT"
-
-/*  global variables
- */
-typedef struct zft_internal_vtbl
-{
-	struct list_head node;
-	int          count;
-	unsigned int start_seg;         /* 32 bits are enough for now */
-	unsigned int end_seg;           /* 32 bits are enough for now */
-	__s64        size;              /* uncompressed size */
-        unsigned int blk_sz;            /* block size for this volume */
-	unsigned int zft_volume     :1; /* zftape created this volume */
-	unsigned int use_compression:1; /* compressed volume  */
-	unsigned int qic113         :1; /* layout of compressed block
-					 * info and vtbl conforms to
-					 * QIC-113, Rev. G 
-					 */
-	unsigned int new_volume     :1; /* it was created by us, this
-					 * run.  this allows the
-					 * fields that aren't really
-					 * used by zftape to be filled
-					 * in by some user level
-					 * program.
-					 */
-	unsigned int open           :1; /* just in progress of being 
-					 * written
-					 */
-} zft_volinfo;
-
-extern struct list_head zft_vtbl;
-#define zft_head_vtbl  list_entry(zft_vtbl.next, zft_volinfo, node)
-#define zft_eom_vtbl   list_entry(zft_vtbl.prev, zft_volinfo, node)
-#define zft_last_vtbl  list_entry(zft_eom_vtbl->node.prev, zft_volinfo, node)
-#define zft_first_vtbl list_entry(zft_head_vtbl->node.next, zft_volinfo, node)
-#define zft_vtbl_empty (zft_eom_vtbl->node.prev == &zft_head_vtbl->node)
-
-#define DUMP_VOLINFO(level, desc, info)					\
-{									\
-	char tmp[21];							\
-	strlcpy(tmp, desc, sizeof(tmp));				\
-	TRACE(level, "Volume %d:\n"					\
-	      KERN_INFO "description  : %s\n"				\
-	      KERN_INFO "first segment: %d\n"				\
-	      KERN_INFO "last  segment: %d\n"				\
-	      KERN_INFO "size         : " LL_X "\n"			\
-	      KERN_INFO "block size   : %d\n"				\
-	      KERN_INFO "compression  : %d\n"				\
-	      KERN_INFO "zftape volume: %d\n"				\
-	      KERN_INFO "QIC-113 conf.: %d",				\
-	      (info)->count, tmp, (info)->start_seg, (info)->end_seg,	\
-	      LL((info)->size), (info)->blk_sz,				\
-	      (info)->use_compression != 0, (info)->zft_volume != 0,	\
-	      (info)->qic113 != 0);					\
-}
-
-extern int zft_qic_mode;
-extern int zft_old_ftape;
-extern int zft_volume_table_changed;
-
-/* exported functions */
-extern void  zft_init_vtbl             (void);
-extern void  zft_free_vtbl             (void);
-extern int   zft_extract_volume_headers(__u8 *buffer);
-extern int   zft_update_volume_table   (unsigned int segment);
-extern int   zft_open_volume           (zft_position *pos,
-					int blk_sz, int use_compression);
-extern int   zft_close_volume          (zft_position *pos);
-extern const zft_volinfo *zft_find_volume(unsigned int seg_pos);
-extern int   zft_skip_volumes          (int count, zft_position *pos);
-extern __s64 zft_get_eom_pos           (void);
-extern void  zft_skip_to_eom           (zft_position *pos);
-extern int   zft_fake_volume_headers   (eof_mark_union *eof_map, 
-					int num_failed_sectors);
-extern int   zft_weof                  (unsigned int count, zft_position *pos);
-extern void  zft_move_past_eof         (zft_position *pos);
-
-static inline int   zft_tape_at_eod         (const zft_position *pos);
-static inline int   zft_tape_at_lbot        (const zft_position *pos);
-static inline void  zft_position_before_eof (zft_position *pos, 
-					     const zft_volinfo *volume);
-static inline __s64 zft_check_for_eof(const zft_volinfo *vtbl,
-				      const zft_position *pos);
-
-/* this function decrements the zft_seg_pos counter if we are right
- * at the beginning of a segment. This is to handle fsfm/bsfm -- we
- * need to position before the eof mark.  NOTE: zft_tape_pos is not
- * changed 
- */
-static inline void zft_position_before_eof(zft_position *pos, 
-					   const zft_volinfo *volume)
-{ 
-	TRACE_FUN(ft_t_flow);
-
-	if (pos->seg_pos == volume->end_seg + 1 &&  pos->seg_byte_pos == 0) {
-		pos->seg_pos --;
-		pos->seg_byte_pos = zft_get_seg_sz(pos->seg_pos);
-	}
-	TRACE_EXIT;
-}
-
-/*  Mmmh. Is the position at the end of the last volume, that is right
- *  before the last EOF mark also logical an EOD condition?
- */
-static inline int zft_tape_at_eod(const zft_position *pos)
-{ 
-	TRACE_FUN(ft_t_any);
-
-	if (zft_qic_mode) {
-		TRACE_EXIT (pos->seg_pos >= zft_eom_vtbl->start_seg ||
-			    zft_last_vtbl->open);
-	} else {
-		TRACE_EXIT pos->seg_pos > ft_last_data_segment;
-	}
-}
-
-static inline int zft_tape_at_lbot(const zft_position *pos)
-{
-	if (zft_qic_mode) {
-		return (pos->seg_pos <= zft_first_vtbl->start_seg &&
-			pos->volume_pos == 0);
-	} else {
-		return (pos->seg_pos <= ft_first_data_segment && 
-			pos->volume_pos == 0);
-	}
-}
-
-/* This one checks for EOF.  return remaing space (may be negative) 
- */
-static inline __s64 zft_check_for_eof(const zft_volinfo *vtbl,
-				      const zft_position *pos)
-{     
-	return (__s64)(vtbl->size - pos->volume_pos);
-}
-
-#endif /* _ZFTAPE_VTBL_H */
diff --git a/drivers/char/ftape/zftape/zftape-write.c b/drivers/char/ftape/zftape/zftape-write.c
deleted file mode 100644
index 94327b8..0000000
--- a/drivers/char/ftape/zftape/zftape-write.c
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- *      Copyright (C) 1996, 1997 Claus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-write.c,v $
- * $Revision: 1.3 $
- * $Date: 1997/11/06 00:50:29 $
- *
- *      This file contains the writing code
- *      for the QIC-117 floppy-tape driver for Linux.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <linux/zftape.h>
-
-#include <asm/uaccess.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-eof.h"
-#include "../zftape/zftape-ctl.h"
-#include "../zftape/zftape-write.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-rw.h"
-#include "../zftape/zftape-vtbl.h"
-
-/*      Global vars.
- */
-
-/*      Local vars.
- */
-static int last_write_failed;
-static int need_flush;
-
-void zft_prevent_flush(void)
-{
-	need_flush = 0;
-}
-
-static int zft_write_header_segments(__u8* buffer)
-{
-	int header_1_ok = 0;
-	int header_2_ok = 0;
-	unsigned int time_stamp;
-	TRACE_FUN(ft_t_noise);
-	
-	TRACE_CATCH(ftape_abort_operation(),);
-	ftape_seek_to_bot();    /* prevents extra rewind */
-	if (GET4(buffer, 0) != FT_HSEG_MAGIC) {
-		TRACE_ABORT(-EIO, ft_t_err,
-			    "wrong header signature found, aborting");
-	} 
-	/*   Be optimistic: */
-	PUT4(buffer, FT_SEG_CNT,
-	     zft_written_segments + GET4(buffer, FT_SEG_CNT) + 2);
-	if ((time_stamp = zft_get_time()) != 0) {
-		PUT4(buffer, FT_WR_DATE, time_stamp);
-		if (zft_label_changed) {
-			PUT4(buffer, FT_LABEL_DATE, time_stamp);
-		}
-	}
-	TRACE(ft_t_noise,
-	      "writing first header segment %d", ft_header_segment_1);
-	header_1_ok = zft_verify_write_segments(ft_header_segment_1, 
-						buffer, FT_SEGMENT_SIZE,
-						zft_deblock_buf) >= 0;
-	TRACE(ft_t_noise,
-	      "writing second header segment %d", ft_header_segment_2);
-	header_2_ok = zft_verify_write_segments(ft_header_segment_2, 
-						buffer, FT_SEGMENT_SIZE,
-						zft_deblock_buf) >= 0;
-	if (!header_1_ok) {
-		TRACE(ft_t_warn, "Warning: "
-		      "update of first header segment failed");
-	}
-	if (!header_2_ok) {
-		TRACE(ft_t_warn, "Warning: "
-		      "update of second header segment failed");
-	}
-	if (!header_1_ok && !header_2_ok) {
-		TRACE_ABORT(-EIO, ft_t_err, "Error: "
-		      "update of both header segments failed.");
-	}
-	TRACE_EXIT 0;
-}
-
-int zft_update_header_segments(void)
-{
-	TRACE_FUN(ft_t_noise);
-	
-	/*  must NOT use zft_write_protected, as it also includes the
-	 *  file access mode. But we also want to update when soft
-	 *  write protection is enabled (O_RDONLY)
-	 */
-	if (ft_write_protected || zft_old_ftape) {
-		TRACE_ABORT(0, ft_t_noise, "Tape set read-only: no update");
-	} 
-	if (!zft_header_read) {
-		TRACE_ABORT(0, ft_t_noise, "Nothing to update");
-	}
-	if (!zft_header_changed) {
-		zft_header_changed = zft_written_segments > 0;
-	}
-	if (!zft_header_changed && !zft_volume_table_changed) {
-		TRACE_ABORT(0, ft_t_noise, "Nothing to update");
-	}
-	TRACE(ft_t_noise, "Updating header segments");
-	if (ftape_get_status()->fti_state == writing) {
-		TRACE_CATCH(ftape_loop_until_writes_done(),);
-	}
-	TRACE_CATCH(ftape_abort_operation(),);
-	
-	zft_deblock_segment = -1; /* invalidate the cache */
-	if (zft_header_changed) {
-		TRACE_CATCH(zft_write_header_segments(zft_hseg_buf),);
-	}
-	if (zft_volume_table_changed) {
-		TRACE_CATCH(zft_update_volume_table(ft_first_data_segment),);
-	}
-	zft_header_changed =
-		zft_volume_table_changed = 
-		zft_label_changed        =
-		zft_written_segments     = 0;
-	TRACE_CATCH(ftape_abort_operation(),);
-	ftape_seek_to_bot();
-	TRACE_EXIT 0;
-}
-
-static int read_merge_buffer(int seg_pos, __u8 *buffer, int offset, int seg_sz)
-{
-	int result = 0;
-	const ft_trace_t old_tracing = TRACE_LEVEL;
-	TRACE_FUN(ft_t_flow);
-	
-	if (zft_qic_mode) {
-		/*  writing in the middle of a volume is NOT allowed
-		 *
-		 */
-		TRACE(ft_t_noise, "No need to read a segment");
-		memset(buffer + offset, 0, seg_sz - offset);
-		TRACE_EXIT 0;
-	}
-	TRACE(ft_t_any, "waiting");
-	ftape_start_writing(FT_WR_MULTI);
-	TRACE_CATCH(ftape_loop_until_writes_done(),);
-	
-	TRACE(ft_t_noise, "trying to read segment %d from offset %d",
-	      seg_pos, offset);
-	SET_TRACE_LEVEL(ft_t_bug);
-	result = zft_fetch_segment_fraction(seg_pos, buffer, 
-					    FT_RD_SINGLE,
-					    offset, seg_sz - offset);
-	SET_TRACE_LEVEL(old_tracing);
-	if (result != (seg_sz - offset)) {
-		TRACE(ft_t_noise, "Ignore error: read_segment() result: %d",
-		      result);
-		memset(buffer + offset, 0, seg_sz - offset);
-	}
-	TRACE_EXIT 0;
-}
-
-/* flush the write buffer to tape and write an eof-marker at the
- * current position if not in raw mode.  This function always
- * positions the tape before the eof-marker.  _ftape_close() should
- * then advance to the next segment.
- *
- * the parameter "finish_volume" describes whether to position before
- * or after the possibly created file-mark. We always position after
- * the file-mark when called from ftape_close() and a flush was needed
- * (that is ftape_write() was the last tape operation before calling
- * ftape_flush) But we always position before the file-mark when this
- * function get's called from outside ftape_close() 
- */
-int zft_flush_buffers(void)
-{
-	int result;
-	int data_remaining;
-	int this_segs_size;
-	TRACE_FUN(ft_t_flow);
-
-	TRACE(ft_t_data_flow,
-	      "entered, ftape_state = %d", ftape_get_status()->fti_state);
-	if (ftape_get_status()->fti_state != writing && !need_flush) {
-		TRACE_ABORT(0, ft_t_noise, "no need for flush");
-	}
-	zft_io_state = zft_idle; /*  triggers some initializations for the
-				  *  read and write routines 
-				  */
-	if (last_write_failed) {
-		ftape_abort_operation();
-		TRACE_EXIT -EIO;
-	}
-	TRACE(ft_t_noise, "flushing write buffers");
-	this_segs_size = zft_get_seg_sz(zft_pos.seg_pos);
-	if (this_segs_size == zft_pos.seg_byte_pos) {
-		zft_pos.seg_pos ++;
-		data_remaining = zft_pos.seg_byte_pos = 0;
-	} else {
-		data_remaining = zft_pos.seg_byte_pos;
-	}
-	/* If there is any data not written to tape yet, append zero's
-	 * up to the end of the sector (if using compression) or merge
-	 * it with the data existing on the tape Then write the
-	 * segment(s) to tape.
-	 */
-	TRACE(ft_t_noise, "Position:\n"
-	      KERN_INFO "seg_pos  : %d\n"
-	      KERN_INFO "byte pos : %d\n"
-	      KERN_INFO "remaining: %d",
-	      zft_pos.seg_pos, zft_pos.seg_byte_pos, data_remaining);
-	if (data_remaining > 0) {
-		do {
-			this_segs_size = zft_get_seg_sz(zft_pos.seg_pos);
-			if (this_segs_size > data_remaining) {
-				TRACE_CATCH(read_merge_buffer(zft_pos.seg_pos,
-							      zft_deblock_buf,
-							      data_remaining,
-							      this_segs_size),
-					    last_write_failed = 1);
-			}
-			result = ftape_write_segment(zft_pos.seg_pos, 
-						     zft_deblock_buf,
-						     FT_WR_MULTI);
-			if (result != this_segs_size) {
-				TRACE(ft_t_err, "flush buffers failed");
-				zft_pos.tape_pos    -= zft_pos.seg_byte_pos;
-				zft_pos.seg_byte_pos = 0;
-
-				last_write_failed = 1;
-				TRACE_EXIT result;
-			}
-			zft_written_segments ++;
-			TRACE(ft_t_data_flow,
-			      "flush, moved out buffer: %d", result);
-			/* need next segment for more data (empty segments?)
-			 */
-			if (result < data_remaining) { 
-				if (result > 0) {       
-					/* move remainder to buffer beginning 
-					 */
-					memmove(zft_deblock_buf, 
-						zft_deblock_buf + result,
-						FT_SEGMENT_SIZE - result);
-				}
-			} 
-			data_remaining -= result;
-			zft_pos.seg_pos ++;
-		} while (data_remaining > 0);
-		TRACE(ft_t_any, "result: %d", result);
-		zft_deblock_segment = --zft_pos.seg_pos;
-		if (data_remaining == 0) {  /* first byte next segment */
-			zft_pos.seg_byte_pos = this_segs_size;
-		} else { /* after data previous segment, data_remaining < 0 */
-			zft_pos.seg_byte_pos = data_remaining + result;
-		}
-	} else {
-		TRACE(ft_t_noise, "zft_deblock_buf empty");
-		zft_pos.seg_pos --;
-		zft_pos.seg_byte_pos = zft_get_seg_sz (zft_pos.seg_pos);
-		ftape_start_writing(FT_WR_MULTI);
-	}
-	TRACE(ft_t_any, "waiting");
-	if ((result = ftape_loop_until_writes_done()) < 0) {
-		/* that's really bad. What to to with zft_tape_pos? 
-		 */
-		TRACE(ft_t_err, "flush buffers failed");
-	}
-	TRACE(ft_t_any, "zft_seg_pos: %d, zft_seg_byte_pos: %d",
-	      zft_pos.seg_pos, zft_pos.seg_byte_pos);
-	last_write_failed  =
-		need_flush = 0;
-	TRACE_EXIT result;
-}
-
-/* return-value: the number of bytes removed from the user-buffer
- *
- * out: 
- *      int *write_cnt: how much actually has been moved to the
- *                      zft_deblock_buf
- *      int req_len  : MUST NOT BE CHANGED, except at EOT, in 
- *                      which case it may be adjusted
- * in : 
- *      char *buff        : the user buffer
- *      int buf_pos_write : copy of buf_len_wr int
- *      this_segs_size    : the size in bytes of the actual segment
- *                          char
- *      *zft_deblock_buf   : zft_deblock_buf
- */
-static int zft_simple_write(int *cnt,
-			    __u8 *dst_buf, const int seg_sz,
-			    const __u8 __user *src_buf, const int req_len, 
-			    const zft_position *pos,const zft_volinfo *volume)
-{
-	int space_left;
-	TRACE_FUN(ft_t_flow);
-
-	/* volume->size holds the tape capacity while volume is open */
-	if (pos->tape_pos + volume->blk_sz > volume->size) {
-		TRACE_EXIT -ENOSPC;
-	}
-	/*  remaining space in this segment, NOT zft_deblock_buf
-	 */
-	space_left = seg_sz - pos->seg_byte_pos;
-	*cnt = req_len < space_left ? req_len : space_left;
-	if (copy_from_user(dst_buf + pos->seg_byte_pos, src_buf, *cnt) != 0) {
-		TRACE_EXIT -EFAULT;
-	}
-	TRACE_EXIT *cnt;
-}
-
-static int check_write_access(int req_len,
-			      const zft_volinfo **volume,
-			      zft_position *pos,
-			      const unsigned int blk_sz)
-{
-	int result;
-	TRACE_FUN(ft_t_flow);
-
-	if ((req_len % zft_blk_sz) != 0) {
-		TRACE_ABORT(-EINVAL, ft_t_info,
-			    "write-count %d must be multiple of block-size %d",
-			    req_len, blk_sz);
-	}
-	if (zft_io_state == zft_writing) {
-		/*  all other error conditions have been checked earlier
-		 */
-		TRACE_EXIT 0;
-	}
-	zft_io_state = zft_idle;
-	TRACE_CATCH(zft_check_write_access(pos),);
-	/*  If we haven't read the header segment yet, do it now.
-	 *  This will verify the configuration, get the bad sector
-	 *  table and read the volume table segment 
-	 */
-	if (!zft_header_read) {
-		TRACE_CATCH(zft_read_header_segments(),);
-	}
-	/*  fine. Now the tape is either at BOT or at EOD,
-	 *  Write start of volume now
-	 */
-	TRACE_CATCH(zft_open_volume(pos, blk_sz, zft_use_compression),);
-	*volume = zft_find_volume(pos->seg_pos);
-	DUMP_VOLINFO(ft_t_noise, "", *volume);
-	zft_just_before_eof = 0;
-	/* now merge with old data if necessary */
-	if (!zft_qic_mode && pos->seg_byte_pos != 0){
-		result = zft_fetch_segment(pos->seg_pos,
-					   zft_deblock_buf,
-					   FT_RD_SINGLE);
-		if (result < 0) {
-			if (result == -EINTR || result == -ENOSPC) {
-				TRACE_EXIT result;
-			}
-			TRACE(ft_t_noise, 
-			      "ftape_read_segment() result: %d. "
-			      "This might be normal when using "
-			      "a newly\nformatted tape", result);
-			memset(zft_deblock_buf, '\0', pos->seg_byte_pos);
-		}
-	}
-	zft_io_state = zft_writing;
-	TRACE_EXIT 0;
-}
-
-static int fill_deblock_buf(__u8 *dst_buf, const int seg_sz,
-			    zft_position *pos, const zft_volinfo *volume,
-			    const char __user *usr_buf, const int req_len)
-{
-	int cnt = 0;
-	int result = 0;
-	TRACE_FUN(ft_t_flow);
-
-	if (seg_sz == 0) {
-		TRACE_ABORT(0, ft_t_data_flow, "empty segment");
-	}
-	TRACE(ft_t_data_flow, "\n"
-	      KERN_INFO "remaining req_len: %d\n"
-	      KERN_INFO "          buf_pos: %d", 
-	      req_len, pos->seg_byte_pos);
-	/* zft_deblock_buf will not contain a valid segment any longer */
-	zft_deblock_segment = -1;
-	if (zft_use_compression) {
-		TRACE_CATCH(zft_cmpr_lock(1 /* try to load */),);
-		TRACE_CATCH(result= (*zft_cmpr_ops->write)(&cnt,
-							   dst_buf, seg_sz,
-							   usr_buf, req_len,
-							   pos, volume),);
-	} else {
-		TRACE_CATCH(result= zft_simple_write(&cnt,
-						     dst_buf, seg_sz,
-						     usr_buf, req_len,
-						     pos, volume),);
-	}
-	pos->volume_pos   += result;
-	pos->seg_byte_pos += cnt;
-	pos->tape_pos     += cnt;
-	TRACE(ft_t_data_flow, "\n"
-	      KERN_INFO "removed from user-buffer : %d bytes.\n"
-	      KERN_INFO "copied to zft_deblock_buf: %d bytes.\n"
-	      KERN_INFO "zft_tape_pos             : " LL_X " bytes.",
-	      result, cnt, LL(pos->tape_pos));
-	TRACE_EXIT result;
-}
-
-
-/*  called by the kernel-interface routine "zft_write()"
- */
-int _zft_write(const char __user *buff, int req_len)
-{
-	int result = 0;
-	int written = 0;
-	int write_cnt;
-	int seg_sz;
-	static const zft_volinfo *volume = NULL;
-	TRACE_FUN(ft_t_flow);
-	
-	zft_resid         = req_len;	
-	last_write_failed = 1; /* reset to 0 when successful */
-	/* check if write is allowed 
-	 */
-	TRACE_CATCH(check_write_access(req_len, &volume,&zft_pos,zft_blk_sz),);
-	while (req_len > 0) {
-		/* Allow us to escape from this loop with a signal !
-		 */
-		FT_SIGNAL_EXIT(_DONT_BLOCK);
-		seg_sz = zft_get_seg_sz(zft_pos.seg_pos);
-		if ((write_cnt = fill_deblock_buf(zft_deblock_buf,
-						  seg_sz,
-						  &zft_pos,
-						  volume,
-						  buff,
-						  req_len)) < 0) {
-			zft_resid -= written;
-			if (write_cnt == -ENOSPC) {
-				/* leave the remainder to flush_buffers()
-				 */
-				TRACE(ft_t_info, "No space left on device");
-				last_write_failed = 0;
-				if (!need_flush) {
-					need_flush = written > 0;
-				}
-				TRACE_EXIT written > 0 ? written : -ENOSPC;
-			} else {
-				TRACE_EXIT result;
-			}
-		}
-		if (zft_pos.seg_byte_pos == seg_sz) {
-			TRACE_CATCH(ftape_write_segment(zft_pos.seg_pos, 
-							zft_deblock_buf,
-							FT_WR_ASYNC),
-				    zft_resid -= written);
-			zft_written_segments ++;
-			zft_pos.seg_byte_pos =  0;
-			zft_deblock_segment  = zft_pos.seg_pos;
-			++zft_pos.seg_pos;
-		}
-		written += write_cnt;
-		buff    += write_cnt;
-		req_len -= write_cnt;
-	} /* while (req_len > 0) */
-	TRACE(ft_t_data_flow, "remaining in blocking buffer: %d",
-	       zft_pos.seg_byte_pos);
-	TRACE(ft_t_data_flow, "just written bytes: %d", written);
-	last_write_failed = 0;
-	zft_resid -= written;
-	need_flush = need_flush || written > 0;
-	TRACE_EXIT written;               /* bytes written */
-}
diff --git a/drivers/char/ftape/zftape/zftape-write.h b/drivers/char/ftape/zftape/zftape-write.h
deleted file mode 100644
index ea88701..0000000
--- a/drivers/char/ftape/zftape/zftape-write.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _ZFTAPE_WRITE_H
-#define _ZFTAPE_WRITE_H
-
-/*
- * Copyright (C) 1996, 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape-write.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:13 $
- *
- *      This file contains the definitions for the write functions
- *      for the zftape driver for Linux.
- *
- */
-
-extern int  zft_flush_buffers(void);
-extern int  zft_update_header_segments(void);
-extern void zft_prevent_flush(void);
-
-/*  hook for the VFS interface 
- */
-extern int _zft_write(const char __user *buff, int req_len);
-#endif /* _ZFTAPE_WRITE_H */
diff --git a/drivers/char/ftape/zftape/zftape_syms.c b/drivers/char/ftape/zftape/zftape_syms.c
deleted file mode 100644
index 2db1401..0000000
--- a/drivers/char/ftape/zftape/zftape_syms.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- *      Copyright (C) 1997 Claus-Justus Heine
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/ftape/zftape/zftape_syms.c,v $
- * $Revision: 1.3 $
- * $Date: 1997/10/05 19:19:14 $
- *
- *      This file contains the symbols that the zftape frontend to 
- *      the ftape floppy tape driver exports 
- */		 
-
-#include <linux/module.h>
-
-#include <linux/zftape.h>
-
-#include "../zftape/zftape-init.h"
-#include "../zftape/zftape-read.h"
-#include "../zftape/zftape-buffers.h"
-#include "../zftape/zftape-ctl.h"
-
-/* zftape-init.c */
-EXPORT_SYMBOL(zft_cmpr_register);
-/* zftape-read.c */
-EXPORT_SYMBOL(zft_fetch_segment_fraction);
-/* zftape-buffers.c */
-EXPORT_SYMBOL(zft_vmalloc_once);
-EXPORT_SYMBOL(zft_vmalloc_always);
-EXPORT_SYMBOL(zft_vfree);
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 817dc40..23b25ad 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -102,7 +102,7 @@
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void genrtc_troutine(void *data)
+static void genrtc_troutine(struct work_struct *work)
 {
 	unsigned int tmp = get_rtc_ss();
 	
@@ -255,7 +255,7 @@
 		irq_active = 1;
 		stop_rtc_timers = 0;
 		lostint = 0;
-		INIT_WORK(&genrtc_task, genrtc_troutine, NULL);
+		INIT_WORK(&genrtc_task, genrtc_troutine);
 		oldsecs = get_rtc_ss();
 		init_timer(&timer_task);
 
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 091a11c..20dc3be 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -21,6 +21,7 @@
 #include <linux/fcntl.h>
 #include <linux/init.h>
 #include <linux/poll.h>
+#include <linux/mm.h>
 #include <linux/proc_fs.h>
 #include <linux/spinlock.h>
 #include <linux/sysctl.h>
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 9902ffa..cc2cd46 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -38,6 +38,7 @@
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/delay.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 8728255..d090622 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -337,11 +337,6 @@
 static void hvcs_close(struct tty_struct *tty, struct file *filp);
 static void hvcs_hangup(struct tty_struct * tty);
 
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd);
-static void hvcs_remove_device_attrs(struct vio_dev *vdev);
-static void hvcs_create_driver_attrs(void);
-static void hvcs_remove_driver_attrs(void);
-
 static int __devinit hvcs_probe(struct vio_dev *dev,
 		const struct vio_device_id *id);
 static int __devexit hvcs_remove(struct vio_dev *dev);
@@ -353,6 +348,172 @@
 #define HVCS_TRY_WRITE	0x00000004
 #define HVCS_READ_MASK	(HVCS_SCHED_READ | HVCS_QUICK_READ)
 
+static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
+{
+	return viod->dev.driver_data;
+}
+/* The sysfs interface for the driver and devices */
+
+static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
+
+static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
+
+static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
+		size_t count)
+{
+	/*
+	 * Don't need this feature at the present time because firmware doesn't
+	 * yet support multiple partners.
+	 */
+	printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
+	return -EPERM;
+}
+
+static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+
+static DEVICE_ATTR(current_vty,
+	S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
+
+static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
+		size_t count)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+
+	/* writing a '0' to this sysfs entry will result in the disconnect. */
+	if (simple_strtol(buf, NULL, 0) != 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+
+	if (hvcsd->open_count > 0) {
+		spin_unlock_irqrestore(&hvcsd->lock, flags);
+		printk(KERN_INFO "HVCS: vterm state unchanged.  "
+				"The hvcs device node is still in use.\n");
+		return -EPERM;
+	}
+
+	if (hvcsd->connected == 0) {
+		spin_unlock_irqrestore(&hvcsd->lock, flags);
+		printk(KERN_INFO "HVCS: vterm state unchanged. The"
+				" vty-server is not connected to a vty.\n");
+		return -EPERM;
+	}
+
+	hvcs_partner_free(hvcsd);
+	printk(KERN_INFO "HVCS: Closed vty-server@%X and"
+			" partner vty@%X:%d connection.\n",
+			hvcsd->vdev->unit_address,
+			hvcsd->p_unit_address,
+			(uint32_t)hvcsd->p_partition_ID);
+
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return count;
+}
+
+static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%d\n", hvcsd->connected);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
+		hvcs_vterm_state_show, hvcs_vterm_state_store);
+
+static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viod = to_vio_dev(dev);
+	struct hvcs_struct *hvcsd = from_vio_dev(viod);
+	unsigned long flags;
+	int retval;
+
+	spin_lock_irqsave(&hvcsd->lock, flags);
+	retval = sprintf(buf, "%d\n", hvcsd->index);
+	spin_unlock_irqrestore(&hvcsd->lock, flags);
+	return retval;
+}
+
+static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
+
+static struct attribute *hvcs_attrs[] = {
+	&dev_attr_partner_vtys.attr,
+	&dev_attr_partner_clcs.attr,
+	&dev_attr_current_vty.attr,
+	&dev_attr_vterm_state.attr,
+	&dev_attr_index.attr,
+	NULL,
+};
+
+static struct attribute_group hvcs_attr_group = {
+	.attrs = hvcs_attrs,
+};
+
+static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
+{
+	/* A 1 means it is updating, a 0 means it is done updating */
+	return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
+}
+
+static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
+		size_t count)
+{
+	if ((simple_strtol(buf, NULL, 0) != 1)
+		&& (hvcs_rescan_status != 0))
+		return -EINVAL;
+
+	hvcs_rescan_status = 1;
+	printk(KERN_INFO "HVCS: rescanning partner info for all"
+		" vty-servers.\n");
+	hvcs_rescan_devices_list();
+	hvcs_rescan_status = 0;
+	return count;
+}
+
+static DRIVER_ATTR(rescan,
+	S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
+
 static void hvcs_kick(void)
 {
 	hvcs_kicked = 1;
@@ -575,7 +736,7 @@
 	spin_unlock_irqrestore(&hvcsd->lock, flags);
 	spin_unlock(&hvcs_structs_lock);
 
-	hvcs_remove_device_attrs(vdev);
+	sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
 
 	kfree(hvcsd);
 }
@@ -608,6 +769,7 @@
 {
 	struct hvcs_struct *hvcsd;
 	int index;
+	int retval;
 
 	if (!dev || !id) {
 		printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
@@ -658,14 +820,16 @@
 	 * the hvcs_struct has been added to the devices list then the user app
 	 * will get -ENODEV.
 	 */
-
 	spin_lock(&hvcs_structs_lock);
-
 	list_add_tail(&(hvcsd->next), &hvcs_structs);
-
 	spin_unlock(&hvcs_structs_lock);
 
-	hvcs_create_device_attrs(hvcsd);
+	retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group);
+	if (retval) {
+		printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n",
+		       hvcsd->vdev->unit_address);
+		return retval;
+	}
 
 	printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
 
@@ -1354,8 +1518,10 @@
 	if (!hvcs_tty_driver)
 		return -ENOMEM;
 
-	if (hvcs_alloc_index_list(num_ttys_to_alloc))
-		return -ENOMEM;
+	if (hvcs_alloc_index_list(num_ttys_to_alloc)) {
+		rc = -ENOMEM;
+		goto index_fail;
+	}
 
 	hvcs_tty_driver->owner = THIS_MODULE;
 
@@ -1385,41 +1551,57 @@
 	 * dynamically assigned major and minor numbers for our devices.
 	 */
 	if (tty_register_driver(hvcs_tty_driver)) {
-		printk(KERN_ERR "HVCS: registration "
-			" as a tty driver failed.\n");
-		hvcs_free_index_list();
-		put_tty_driver(hvcs_tty_driver);
-		return -EIO;
+		printk(KERN_ERR "HVCS: registration as a tty driver failed.\n");
+		rc = -EIO;
+		goto register_fail;
 	}
 
 	hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!hvcs_pi_buff) {
-		tty_unregister_driver(hvcs_tty_driver);
-		hvcs_free_index_list();
-		put_tty_driver(hvcs_tty_driver);
-		return -ENOMEM;
+		rc = -ENOMEM;
+		goto buff_alloc_fail;
 	}
 
 	hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
 	if (IS_ERR(hvcs_task)) {
 		printk(KERN_ERR "HVCS: khvcsd creation failed.  Driver not loaded.\n");
-		kfree(hvcs_pi_buff);
-		tty_unregister_driver(hvcs_tty_driver);
-		hvcs_free_index_list();
-		put_tty_driver(hvcs_tty_driver);
-		return -EIO;
+		rc = -EIO;
+		goto kthread_fail;
 	}
 
 	rc = vio_register_driver(&hvcs_vio_driver);
+	if (rc) {
+		printk(KERN_ERR "HVCS: can't register vio driver\n");
+		goto vio_fail;
+	}
 
 	/*
 	 * This needs to be done AFTER the vio_register_driver() call or else
 	 * the kobjects won't be initialized properly.
 	 */
-	hvcs_create_driver_attrs();
+	rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
+	if (rc) {
+		printk(KERN_ERR "HVCS: sysfs attr create failed\n");
+		goto attr_fail;
+	}
 
 	printk(KERN_INFO "HVCS: driver module inserted.\n");
 
+	return 0;
+
+attr_fail:
+	vio_unregister_driver(&hvcs_vio_driver);
+vio_fail:
+	kthread_stop(hvcs_task);
+kthread_fail:
+	kfree(hvcs_pi_buff);
+buff_alloc_fail:
+	tty_unregister_driver(hvcs_tty_driver);
+register_fail:
+	hvcs_free_index_list();
+index_fail:
+	put_tty_driver(hvcs_tty_driver);
+	hvcs_tty_driver = NULL;
 	return rc;
 }
 
@@ -1441,7 +1623,7 @@
 	hvcs_pi_buff = NULL;
 	spin_unlock(&hvcs_pi_lock);
 
-	hvcs_remove_driver_attrs();
+	driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
 
 	vio_unregister_driver(&hvcs_vio_driver);
 
@@ -1456,191 +1638,3 @@
 
 module_init(hvcs_module_init);
 module_exit(hvcs_module_exit);
-
-static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
-{
-	return viod->dev.driver_data;
-}
-/* The sysfs interface for the driver and devices */
-
-static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
-
-static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
-
-static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
-		size_t count)
-{
-	/*
-	 * Don't need this feature at the present time because firmware doesn't
-	 * yet support multiple partners.
-	 */
-	printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
-	return -EPERM;
-}
-
-static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-
-static DEVICE_ATTR(current_vty,
-	S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
-
-static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
-		size_t count)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-
-	/* writing a '0' to this sysfs entry will result in the disconnect. */
-	if (simple_strtol(buf, NULL, 0) != 0)
-		return -EINVAL;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-
-	if (hvcsd->open_count > 0) {
-		spin_unlock_irqrestore(&hvcsd->lock, flags);
-		printk(KERN_INFO "HVCS: vterm state unchanged.  "
-				"The hvcs device node is still in use.\n");
-		return -EPERM;
-	}
-
-	if (hvcsd->connected == 0) {
-		spin_unlock_irqrestore(&hvcsd->lock, flags);
-		printk(KERN_INFO "HVCS: vterm state unchanged. The"
-				" vty-server is not connected to a vty.\n");
-		return -EPERM;
-	}
-
-	hvcs_partner_free(hvcsd);
-	printk(KERN_INFO "HVCS: Closed vty-server@%X and"
-			" partner vty@%X:%d connection.\n",
-			hvcsd->vdev->unit_address,
-			hvcsd->p_unit_address,
-			(uint32_t)hvcsd->p_partition_ID);
-
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return count;
-}
-
-static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%d\n", hvcsd->connected);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
-		hvcs_vterm_state_show, hvcs_vterm_state_store);
-
-static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viod = to_vio_dev(dev);
-	struct hvcs_struct *hvcsd = from_vio_dev(viod);
-	unsigned long flags;
-	int retval;
-
-	spin_lock_irqsave(&hvcsd->lock, flags);
-	retval = sprintf(buf, "%d\n", hvcsd->index);
-	spin_unlock_irqrestore(&hvcsd->lock, flags);
-	return retval;
-}
-
-static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
-
-static struct attribute *hvcs_attrs[] = {
-	&dev_attr_partner_vtys.attr,
-	&dev_attr_partner_clcs.attr,
-	&dev_attr_current_vty.attr,
-	&dev_attr_vterm_state.attr,
-	&dev_attr_index.attr,
-	NULL,
-};
-
-static struct attribute_group hvcs_attr_group = {
-	.attrs = hvcs_attrs,
-};
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd)
-{
-	struct vio_dev *vdev = hvcsd->vdev;
-	sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static void hvcs_remove_device_attrs(struct vio_dev *vdev)
-{
-	sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
-{
-	/* A 1 means it is updating, a 0 means it is done updating */
-	return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
-}
-
-static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
-		size_t count)
-{
-	if ((simple_strtol(buf, NULL, 0) != 1)
-		&& (hvcs_rescan_status != 0))
-		return -EINVAL;
-
-	hvcs_rescan_status = 1;
-	printk(KERN_INFO "HVCS: rescanning partner info for all"
-		" vty-servers.\n");
-	hvcs_rescan_devices_list();
-	hvcs_rescan_status = 0;
-	return count;
-}
-static DRIVER_ATTR(rescan,
-	S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
-
-static void hvcs_create_driver_attrs(void)
-{
-	struct device_driver *driverfs = &(hvcs_vio_driver.driver);
-	driver_create_file(driverfs, &driver_attr_rescan);
-}
-
-static void hvcs_remove_driver_attrs(void)
-{
-	struct device_driver *driverfs = &(hvcs_vio_driver.driver);
-	driver_remove_file(driverfs, &driver_attr_rescan);
-}
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 2cf63e7..82a41d5 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -69,7 +69,7 @@
 #define __ALIGNED__	__attribute__((__aligned__(sizeof(long))))
 
 struct hvsi_struct {
-	struct work_struct writer;
+	struct delayed_work writer;
 	struct work_struct handshaker;
 	wait_queue_head_t emptyq; /* woken when outbuf is emptied */
 	wait_queue_head_t stateq; /* woken when HVSI state changes */
@@ -744,9 +744,10 @@
 	return 0;
 }
 
-static void hvsi_handshaker(void *arg)
+static void hvsi_handshaker(struct work_struct *work)
 {
-	struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+	struct hvsi_struct *hp =
+		container_of(work, struct hvsi_struct, handshaker);
 
 	if (hvsi_handshake(hp) >= 0)
 		return;
@@ -951,9 +952,10 @@
 }
 
 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
-static void hvsi_write_worker(void *arg)
+static void hvsi_write_worker(struct work_struct *work)
 {
-	struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+	struct hvsi_struct *hp =
+		container_of(work, struct hvsi_struct, writer.work);
 	unsigned long flags;
 #ifdef DEBUG
 	static long start_j = 0;
@@ -1287,8 +1289,8 @@
 		}
 
 		hp = &hvsi_ports[hvsi_count];
-		INIT_WORK(&hp->writer, hvsi_write_worker, hp);
-		INIT_WORK(&hp->handshaker, hvsi_handshaker, hp);
+		INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
+		INIT_WORK(&hp->handshaker, hvsi_handshaker);
 		init_waitqueue_head(&hp->emptyq);
 		init_waitqueue_head(&hp->stateq);
 		spin_lock_init(&hp->lock);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 9f7635f..5f3acd8 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -3,17 +3,20 @@
 #
 
 config HW_RANDOM
-	bool "Hardware Random Number Generator Core support"
-	default y
+	tristate "Hardware Random Number Generator Core support"
+	default m
 	---help---
 	  Hardware Random Number Generator Core infrastructure.
 
+	  To compile this driver as a module, choose M here: the
+	  module will be called rng-core.
+
 	  If unsure, say Y.
 
 config HW_RANDOM_INTEL
 	tristate "Intel HW Random Number Generator support"
 	depends on HW_RANDOM && (X86 || IA64) && PCI
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on Intel i8xx-based motherboards.
@@ -26,7 +29,7 @@
 config HW_RANDOM_AMD
 	tristate "AMD HW Random Number Generator support"
 	depends on HW_RANDOM && X86 && PCI
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on AMD 76x-based motherboards.
@@ -39,7 +42,7 @@
 config HW_RANDOM_GEODE
 	tristate "AMD Geode HW Random Number Generator support"
 	depends on HW_RANDOM && X86 && PCI
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on the AMD Geode LX.
@@ -52,7 +55,7 @@
 config HW_RANDOM_VIA
 	tristate "VIA HW Random Number Generator support"
 	depends on HW_RANDOM && X86_32
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on VIA based motherboards.
@@ -65,7 +68,7 @@
 config HW_RANDOM_IXP4XX
 	tristate "Intel IXP4xx NPU HW Random Number Generator support"
 	depends on HW_RANDOM && ARCH_IXP4XX
-	default y
+	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random
 	  Number Generator hardware found on the Intel IXP4xx NPU.
@@ -78,7 +81,7 @@
 config HW_RANDOM_OMAP
 	tristate "OMAP Random Number Generator support"
 	depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX)
-	default y
+	default HW_RANDOM
  	---help---
  	  This driver provides kernel-side support for the Random Number
 	  Generator hardware found on OMAP16xx and OMAP24xx multimedia
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e263ae9..c41fa19 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -2,7 +2,8 @@
 # Makefile for HW Random Number Generator (RNG) device drivers.
 #
 
-obj-$(CONFIG_HW_RANDOM) += core.o
+obj-$(CONFIG_HW_RANDOM) += rng-core.o
+rng-core-y := core.o
 obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
 obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
 obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 154a81d..26a860a 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -36,6 +36,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/delay.h>
@@ -162,7 +163,8 @@
 };
 
 
-static ssize_t hwrng_attr_current_store(struct class_device *class,
+static ssize_t hwrng_attr_current_store(struct device *dev,
+					struct device_attribute *attr,
 					const char *buf, size_t len)
 {
 	int err;
@@ -192,7 +194,8 @@
 	return err ? : len;
 }
 
-static ssize_t hwrng_attr_current_show(struct class_device *class,
+static ssize_t hwrng_attr_current_show(struct device *dev,
+				       struct device_attribute *attr,
 				       char *buf)
 {
 	int err;
@@ -210,7 +213,8 @@
 	return ret;
 }
 
-static ssize_t hwrng_attr_available_show(struct class_device *class,
+static ssize_t hwrng_attr_available_show(struct device *dev,
+					 struct device_attribute *attr,
 					 char *buf)
 {
 	int err;
@@ -234,20 +238,18 @@
 	return ret;
 }
 
-static CLASS_DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
-			 hwrng_attr_current_show,
-			 hwrng_attr_current_store);
-static CLASS_DEVICE_ATTR(rng_available, S_IRUGO,
-			 hwrng_attr_available_show,
-			 NULL);
+static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
+		   hwrng_attr_current_show,
+		   hwrng_attr_current_store);
+static DEVICE_ATTR(rng_available, S_IRUGO,
+		   hwrng_attr_available_show,
+		   NULL);
 
 
 static void unregister_miscdev(void)
 {
-	class_device_remove_file(rng_miscdev.class,
-				 &class_device_attr_rng_available);
-	class_device_remove_file(rng_miscdev.class,
-				 &class_device_attr_rng_current);
+	device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
+	device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
 	misc_deregister(&rng_miscdev);
 }
 
@@ -258,20 +260,19 @@
 	err = misc_register(&rng_miscdev);
 	if (err)
 		goto out;
-	err = class_device_create_file(rng_miscdev.class,
-				       &class_device_attr_rng_current);
+	err = device_create_file(rng_miscdev.this_device,
+				 &dev_attr_rng_current);
 	if (err)
 		goto err_misc_dereg;
-	err = class_device_create_file(rng_miscdev.class,
-				       &class_device_attr_rng_available);
+	err = device_create_file(rng_miscdev.this_device,
+				 &dev_attr_rng_available);
 	if (err)
 		goto err_remove_current;
 out:
 	return err;
 
 err_remove_current:
-	class_device_remove_file(rng_miscdev.class,
-				 &class_device_attr_rng_current);
+	device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
 err_misc_dereg:
 	misc_deregister(&rng_miscdev);
 	goto out;
diff --git a/drivers/char/ip2/i2cmd.h b/drivers/char/ip2/i2cmd.h
index baa4e72..29277ec 100644
--- a/drivers/char/ip2/i2cmd.h
+++ b/drivers/char/ip2/i2cmd.h
@@ -367,11 +367,6 @@
 #define CSE_NULL  3  // Replace with a null
 #define CSE_MARK  4  // Replace with a 3-character sequence (as Unix)
 
-#define  CMD_SET_REPLACEMENT(arg,ch)   \
-			(((cmdSyntaxPtr)(ct36a))->cmd[1] = (arg), \
-			(((cmdSyntaxPtr)(ct36a))->cmd[2] = (ch),  \
-			(cmdSyntaxPtr)(ct36a))
-
 #define CSE_REPLACE  0x8	// Replace the errored character with the
 							// replacement character defined here
 
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 54d93f0..7804576 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -84,8 +84,8 @@
 static void serviceOutgoingFifo(i2eBordStrPtr);
 
 // Functions defined in ip2.c as part of interrupt handling
-static void do_input(void *);
-static void do_status(void *);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 //***************
 //* Debug  Data *
@@ -331,8 +331,8 @@
 		pCh->ClosingWaitTime  = 30*HZ;
 
 		// Initialize task queue objects
-		INIT_WORK(&pCh->tqueue_input, do_input, pCh);
-		INIT_WORK(&pCh->tqueue_status, do_status, pCh);
+		INIT_WORK(&pCh->tqueue_input, do_input);
+		INIT_WORK(&pCh->tqueue_status, do_status);
 
 #ifdef IP2DEBUG_TRACE
 		pCh->trace = ip2trace;
@@ -1016,7 +1016,6 @@
 	unsigned short channel;
 	unsigned short stuffIndex;
 	unsigned long flags;
-	int rc = 0;
 
 	int bailout = 10;
 
@@ -1573,7 +1572,7 @@
 #ifdef USE_IQ
 			schedule_work(&pCh->tqueue_input);
 #else
-			do_input(pCh);
+			do_input(&pCh->tqueue_input);
 #endif
 
 			// Note we do not need to maintain any flow-control credits at this
@@ -1810,7 +1809,7 @@
 #ifdef USE_IQ
 						schedule_work(&pCh->tqueue_status);
 #else
-						do_status(pCh);
+						do_status(&pCh->tqueue_status);
 #endif
 					}
 				}
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index a3f32d4..cda2459 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -189,12 +189,12 @@
 			 unsigned int set, unsigned int clear);
 
 static void set_irq(int, int);
-static void ip2_interrupt_bh(i2eBordStrPtr pB);
+static void ip2_interrupt_bh(struct work_struct *work);
 static irqreturn_t ip2_interrupt(int irq, void *dev_id);
 static void ip2_poll(unsigned long arg);
 static inline void service_all_boards(void);
-static void do_input(void *p);
-static void do_status(void *p);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 static void ip2_wait_until_sent(PTTY,int);
 
@@ -918,7 +918,7 @@
 		pCh++;
 	}
 ex_exit:
-	INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB);
+	INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
 	return;
 
 err_release_region:
@@ -1125,8 +1125,8 @@
 
 
 /******************************************************************************/
-/* Function:   ip2_interrupt_bh(pB)                                           */
-/* Parameters: pB - pointer to the board structure                            */
+/* Function:   ip2_interrupt_bh(work)                                         */
+/* Parameters: work - pointer to the board structure                          */
 /* Returns:    Nothing                                                        */
 /*                                                                            */
 /* Description:                                                               */
@@ -1135,8 +1135,9 @@
 /*                                                                            */
 /******************************************************************************/
 static void
-ip2_interrupt_bh(i2eBordStrPtr pB)
+ip2_interrupt_bh(struct work_struct *work)
 {
+	i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
 //	pB better well be set or we have a problem!  We can only get
 //	here from the IMMEDIATE queue.  Here, we process the boards.
 //	Checking pB doesn't cost much and it saves us from the sanity checkers.
@@ -1245,9 +1246,9 @@
 	ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
 }
 
-static void do_input(void *p)
+static void do_input(struct work_struct *work)
 {
-	i2ChanStrPtr pCh = p;
+	i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
 	unsigned long flags;
 
 	ip2trace(CHANN, ITRC_INPUT, 21, 0 );
@@ -1279,9 +1280,9 @@
 	}
 }
 
-static void do_status(void *p)
+static void do_status(struct work_struct *work)
 {
-	i2ChanStrPtr pCh = p;
+	i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
 	int status;
 
 	status =  i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 0030cd8..6c59baa 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -33,11 +33,13 @@
 #include <linux/ipmi_msgdefs.h>		/* for completion codes */
 #include "ipmi_si_sm.h"
 
-static int bt_debug = 0x00;	/* Production value 0, see following flags */
+#define BT_DEBUG_OFF	0	/* Used in production */
+#define BT_DEBUG_ENABLE	1	/* Generic messages */
+#define BT_DEBUG_MSG	2	/* Prints all request/response buffers */
+#define BT_DEBUG_STATES	4	/* Verbose look at state changes */
 
-#define	BT_DEBUG_ENABLE	1
-#define BT_DEBUG_MSG	2
-#define BT_DEBUG_STATES	4
+static int bt_debug = BT_DEBUG_OFF;
+
 module_param(bt_debug, int, 0644);
 MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
 
@@ -47,38 +49,54 @@
    Since the Open IPMI architecture is single-message oriented at this
    stage, the queue depth of BT is of no concern. */
 
-#define BT_NORMAL_TIMEOUT	5000000	/* seconds in microseconds */
-#define BT_RETRY_LIMIT		2
-#define BT_RESET_DELAY		6000000	/* 6 seconds after warm reset */
+#define BT_NORMAL_TIMEOUT	5	/* seconds */
+#define BT_NORMAL_RETRY_LIMIT	2
+#define BT_RESET_DELAY		6	/* seconds after warm reset */
+
+/* States are written in chronological order and usually cover
+   multiple rows of the state table discussion in the IPMI spec. */
 
 enum bt_states {
-	BT_STATE_IDLE,
+	BT_STATE_IDLE = 0,	/* Order is critical in this list */
 	BT_STATE_XACTION_START,
 	BT_STATE_WRITE_BYTES,
-	BT_STATE_WRITE_END,
 	BT_STATE_WRITE_CONSUME,
-	BT_STATE_B2H_WAIT,
-	BT_STATE_READ_END,
-	BT_STATE_RESET1,		/* These must come last */
+	BT_STATE_READ_WAIT,
+	BT_STATE_CLEAR_B2H,
+	BT_STATE_READ_BYTES,
+	BT_STATE_RESET1,	/* These must come last */
 	BT_STATE_RESET2,
 	BT_STATE_RESET3,
 	BT_STATE_RESTART,
-	BT_STATE_HOSED
+	BT_STATE_PRINTME,
+	BT_STATE_CAPABILITIES_BEGIN,
+	BT_STATE_CAPABILITIES_END,
+	BT_STATE_LONG_BUSY	/* BT doesn't get hosed :-) */
 };
 
+/* Macros seen at the end of state "case" blocks.  They help with legibility
+   and debugging. */
+
+#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y)   { last_printed = BT_STATE_PRINTME; return Y; }
+
 struct si_sm_data {
 	enum bt_states	state;
-	enum bt_states	last_state;	/* assist printing and resets */
 	unsigned char	seq;		/* BT sequence number */
 	struct si_sm_io	*io;
-        unsigned char	write_data[IPMI_MAX_MSG_LENGTH];
-        int		write_count;
-        unsigned char	read_data[IPMI_MAX_MSG_LENGTH];
-        int		read_count;
-        int		truncated;
-        long		timeout;
-        unsigned int	error_retries;	/* end of "common" fields */
+	unsigned char	write_data[IPMI_MAX_MSG_LENGTH];
+	int		write_count;
+	unsigned char	read_data[IPMI_MAX_MSG_LENGTH];
+	int		read_count;
+	int		truncated;
+	long		timeout;	/* microseconds countdown */
+	int		error_retries;	/* end of "common" fields */
 	int		nonzero_status;	/* hung BMCs stay all 0 */
+	enum bt_states	complete;	/* to divert the state machine */
+	int		BT_CAP_outreqs;
+	long		BT_CAP_req2rsp;
+	int		BT_CAP_retries;	/* Recommended retries */
 };
 
 #define BT_CLR_WR_PTR	0x01	/* See IPMI 1.5 table 11.6.4 */
@@ -111,86 +129,118 @@
 static char *state2txt(unsigned char state)
 {
 	switch (state) {
-		case BT_STATE_IDLE:		return("IDLE");
-		case BT_STATE_XACTION_START:	return("XACTION");
-		case BT_STATE_WRITE_BYTES:	return("WR_BYTES");
-		case BT_STATE_WRITE_END:	return("WR_END");
-		case BT_STATE_WRITE_CONSUME:	return("WR_CONSUME");
-		case BT_STATE_B2H_WAIT:		return("B2H_WAIT");
-		case BT_STATE_READ_END:		return("RD_END");
-		case BT_STATE_RESET1:		return("RESET1");
-		case BT_STATE_RESET2:		return("RESET2");
-		case BT_STATE_RESET3:		return("RESET3");
-		case BT_STATE_RESTART:		return("RESTART");
-		case BT_STATE_HOSED:		return("HOSED");
+	case BT_STATE_IDLE:		return("IDLE");
+	case BT_STATE_XACTION_START:	return("XACTION");
+	case BT_STATE_WRITE_BYTES:	return("WR_BYTES");
+	case BT_STATE_WRITE_CONSUME:	return("WR_CONSUME");
+	case BT_STATE_READ_WAIT:	return("RD_WAIT");
+	case BT_STATE_CLEAR_B2H:	return("CLEAR_B2H");
+	case BT_STATE_READ_BYTES:	return("RD_BYTES");
+	case BT_STATE_RESET1:		return("RESET1");
+	case BT_STATE_RESET2:		return("RESET2");
+	case BT_STATE_RESET3:		return("RESET3");
+	case BT_STATE_RESTART:		return("RESTART");
+	case BT_STATE_LONG_BUSY:	return("LONG_BUSY");
+	case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
+	case BT_STATE_CAPABILITIES_END:	return("CAP_END");
 	}
 	return("BAD STATE");
 }
 #define STATE2TXT state2txt(bt->state)
 
-static char *status2txt(unsigned char status, char *buf)
+static char *status2txt(unsigned char status)
 {
+	/*
+	 * This cannot be called by two threads at the same time and
+	 * the buffer is always consumed immediately, so the static is
+	 * safe to use.
+	 */
+	static char buf[40];
+
 	strcpy(buf, "[ ");
-	if (status & BT_B_BUSY) strcat(buf, "B_BUSY ");
-	if (status & BT_H_BUSY) strcat(buf, "H_BUSY ");
-	if (status & BT_OEM0) strcat(buf, "OEM0 ");
-	if (status & BT_SMS_ATN) strcat(buf, "SMS ");
-	if (status & BT_B2H_ATN) strcat(buf, "B2H ");
-	if (status & BT_H2B_ATN) strcat(buf, "H2B ");
+	if (status & BT_B_BUSY)
+		strcat(buf, "B_BUSY ");
+	if (status & BT_H_BUSY)
+		strcat(buf, "H_BUSY ");
+	if (status & BT_OEM0)
+		strcat(buf, "OEM0 ");
+	if (status & BT_SMS_ATN)
+		strcat(buf, "SMS ");
+	if (status & BT_B2H_ATN)
+		strcat(buf, "B2H ");
+	if (status & BT_H2B_ATN)
+		strcat(buf, "H2B ");
 	strcat(buf, "]");
 	return buf;
 }
-#define STATUS2TXT(buf) status2txt(status, buf)
+#define STATUS2TXT status2txt(status)
 
-/* This will be called from within this module on a hosed condition */
-#define FIRST_SEQ	0
+/* called externally at insmod time, and internally on cleanup */
+
 static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
 {
-	bt->state = BT_STATE_IDLE;
-	bt->last_state = BT_STATE_IDLE;
-	bt->seq = FIRST_SEQ;
-	bt->io = io;
-	bt->write_count = 0;
-	bt->read_count = 0;
-	bt->error_retries = 0;
-	bt->nonzero_status = 0;
-	bt->truncated = 0;
-	bt->timeout = BT_NORMAL_TIMEOUT;
+	memset(bt, 0, sizeof(struct si_sm_data));
+	if (bt->io != io) {		/* external: one-time only things */
+		bt->io = io;
+		bt->seq = 0;
+	}
+	bt->state = BT_STATE_IDLE;	/* start here */
+	bt->complete = BT_STATE_IDLE;	/* end here */
+	bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+	bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+	/* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
 	return 3; /* We claim 3 bytes of space; ought to check SPMI table */
 }
 
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+	bt->read_data[0] = 4;				/* # following bytes */
+	bt->read_data[1] = bt->write_data[1] | 4;	/* Odd NetFn/LUN */
+	bt->read_data[2] = bt->write_data[2];		/* seq (ignored) */
+	bt->read_data[3] = bt->write_data[3];		/* Command */
+	bt->read_data[4] = completion_code;
+	bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
 static int bt_start_transaction(struct si_sm_data *bt,
 				unsigned char *data,
 				unsigned int size)
 {
 	unsigned int i;
 
-	if ((size < 2) || (size > (IPMI_MAX_MSG_LENGTH - 2)))
-	       return -1;
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > IPMI_MAX_MSG_LENGTH)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
 
-	if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
-		return -2;
+	if (bt->state == BT_STATE_LONG_BUSY)
+		return IPMI_NODE_BUSY_ERR;
+
+	if (bt->state != BT_STATE_IDLE)
+		return IPMI_NOT_IN_MY_STATE_ERR;
 
 	if (bt_debug & BT_DEBUG_MSG) {
-    		printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
-		printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
+		printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
+		printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
 		for (i = 0; i < size; i ++)
-		       printk (" %02x", data[i]);
+			printk (" %02x", data[i]);
 		printk("\n");
 	}
 	bt->write_data[0] = size + 1;	/* all data plus seq byte */
 	bt->write_data[1] = *data;	/* NetFn/LUN */
-	bt->write_data[2] = bt->seq;
+	bt->write_data[2] = bt->seq++;
 	memcpy(bt->write_data + 3, data + 1, size - 1);
 	bt->write_count = size + 2;
-
 	bt->error_retries = 0;
 	bt->nonzero_status = 0;
-	bt->read_count = 0;
 	bt->truncated = 0;
 	bt->state = BT_STATE_XACTION_START;
-	bt->last_state = BT_STATE_IDLE;
-	bt->timeout = BT_NORMAL_TIMEOUT;
+	bt->timeout = bt->BT_CAP_req2rsp;
+	force_result(bt, IPMI_ERR_UNSPECIFIED);
 	return 0;
 }
 
@@ -198,38 +248,30 @@
    it calls this.  Strip out the length and seq bytes. */
 
 static int bt_get_result(struct si_sm_data *bt,
-			   unsigned char *data,
-			   unsigned int length)
+			 unsigned char *data,
+			 unsigned int length)
 {
 	int i, msg_len;
 
 	msg_len = bt->read_count - 2;		/* account for length & seq */
-	/* Always NetFn, Cmd, cCode */
 	if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
-		printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len);
-		data[0] = bt->write_data[1] | 0x4;	/* Kludge a response */
-		data[1] = bt->write_data[3];
-		data[2] = IPMI_ERR_UNSPECIFIED;
+		force_result(bt, IPMI_ERR_UNSPECIFIED);
 		msg_len = 3;
-	} else {
-		data[0] = bt->read_data[1];
-		data[1] = bt->read_data[3];
-		if (length < msg_len)
-		       bt->truncated = 1;
-		if (bt->truncated) {	/* can be set in read_all_bytes() */
-			data[2] = IPMI_ERR_MSG_TRUNCATED;
-			msg_len = 3;
-		} else
-		       memcpy(data + 2, bt->read_data + 4, msg_len - 2);
-
-		if (bt_debug & BT_DEBUG_MSG) {
-			printk (KERN_WARNING "BT: res (raw)");
-			for (i = 0; i < msg_len; i++)
-			       printk(" %02x", data[i]);
-			printk ("\n");
-		}
 	}
-	bt->read_count = 0;	/* paranoia */
+	data[0] = bt->read_data[1];
+	data[1] = bt->read_data[3];
+	if (length < msg_len || bt->truncated) {
+		data[2] = IPMI_ERR_MSG_TRUNCATED;
+		msg_len = 3;
+	} else
+		memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+
+	if (bt_debug & BT_DEBUG_MSG) {
+		printk (KERN_WARNING "BT: result %d bytes:", msg_len);
+		for (i = 0; i < msg_len; i++)
+			printk(" %02x", data[i]);
+		printk ("\n");
+	}
 	return msg_len;
 }
 
@@ -238,22 +280,40 @@
 
 static void reset_flags(struct si_sm_data *bt)
 {
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: flag reset %s\n",
+					status2txt(BT_STATUS));
 	if (BT_STATUS & BT_H_BUSY)
-	       BT_CONTROL(BT_H_BUSY);
-	if (BT_STATUS & BT_B_BUSY)
-	       BT_CONTROL(BT_B_BUSY);
-	BT_CONTROL(BT_CLR_WR_PTR);
-	BT_CONTROL(BT_SMS_ATN);
+		BT_CONTROL(BT_H_BUSY);	/* force clear */
+	BT_CONTROL(BT_CLR_WR_PTR);	/* always reset */
+	BT_CONTROL(BT_SMS_ATN);		/* always clear */
+	BT_INTMASK_W(BT_BMC_HWRST);
+}
 
-	if (BT_STATUS & BT_B2H_ATN) {
-		int i;
-		BT_CONTROL(BT_H_BUSY);
-		BT_CONTROL(BT_B2H_ATN);
-		BT_CONTROL(BT_CLR_RD_PTR);
-		for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++)
-		       BMC2HOST;
-		BT_CONTROL(BT_H_BUSY);
-	}
+/* Get rid of an unwanted/stale response.  This should only be needed for
+   BMCs that support multiple outstanding requests. */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+	int i, size;
+
+	if (!(BT_STATUS & BT_B2H_ATN)) 	/* Not signalling a response */
+		return;
+
+	BT_CONTROL(BT_H_BUSY);		/* now set */
+	BT_CONTROL(BT_B2H_ATN);		/* always clear */
+	BT_STATUS;			/* pause */
+	BT_CONTROL(BT_B2H_ATN);		/* some BMCs are stubborn */
+	BT_CONTROL(BT_CLR_RD_PTR);	/* always reset */
+	if (bt_debug)
+		printk(KERN_WARNING "IPMI BT: stale response %s; ",
+			status2txt(BT_STATUS));
+	size = BMC2HOST;
+	for (i = 0; i < size ; i++)
+		BMC2HOST;
+	BT_CONTROL(BT_H_BUSY);		/* now clear */
+	if (bt_debug)
+		printk("drained %d bytes\n", size + 1);
 }
 
 static inline void write_all_bytes(struct si_sm_data *bt)
@@ -261,201 +321,256 @@
 	int i;
 
 	if (bt_debug & BT_DEBUG_MSG) {
-    		printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+		printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
 			bt->write_count, bt->seq);
 		for (i = 0; i < bt->write_count; i++)
 			printk (" %02x", bt->write_data[i]);
 		printk ("\n");
 	}
 	for (i = 0; i < bt->write_count; i++)
-	       HOST2BMC(bt->write_data[i]);
+		HOST2BMC(bt->write_data[i]);
 }
 
 static inline int read_all_bytes(struct si_sm_data *bt)
 {
 	unsigned char i;
 
+	/* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+	   Keep layout of first four bytes aligned with write_data[] */
+
 	bt->read_data[0] = BMC2HOST;
 	bt->read_count = bt->read_data[0];
-	if (bt_debug & BT_DEBUG_MSG)
-    		printk(KERN_WARNING "BT: read %d bytes:", bt->read_count);
 
-	/* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more
-	   following the length byte. */
 	if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
 		if (bt_debug & BT_DEBUG_MSG)
-			printk("bad length %d\n", bt->read_count);
+			printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
+				bt->read_count);
 		bt->truncated = 1;
 		return 1;	/* let next XACTION START clean it up */
 	}
 	for (i = 1; i <= bt->read_count; i++)
-	       bt->read_data[i] = BMC2HOST;
-	bt->read_count++;	/* account for the length byte */
+		bt->read_data[i] = BMC2HOST;
+	bt->read_count++;	/* Account internally for length byte */
 
 	if (bt_debug & BT_DEBUG_MSG) {
-	    	for (i = 0; i < bt->read_count; i++)
-			printk (" %02x", bt->read_data[i]);
-	    	printk ("\n");
-	}
-	if (bt->seq != bt->write_data[2])	/* idiot check */
-		printk(KERN_DEBUG "BT: internal error: sequence mismatch\n");
+		int max = bt->read_count;
 
-	/* per the spec, the (NetFn, Seq, Cmd) tuples should match */
-	if ((bt->read_data[3] == bt->write_data[3]) &&		/* Cmd */
-        	(bt->read_data[2] == bt->write_data[2]) &&	/* Sequence */
-        	((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+		printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
+			max, bt->read_data[2]);
+		if (max > 16)
+			max = 16;
+		for (i = 0; i < max; i++)
+			printk (" %02x", bt->read_data[i]);
+		printk ("%s\n", bt->read_count == max ? "" : " ...");
+	}
+
+	/* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+	if ((bt->read_data[3] == bt->write_data[3]) &&
+	    (bt->read_data[2] == bt->write_data[2]) &&
+	    ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
 			return 1;
 
 	if (bt_debug & BT_DEBUG_MSG)
-	       printk(KERN_WARNING "BT: bad packet: "
+		printk(KERN_WARNING "IPMI BT: bad packet: "
 		"want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
-		bt->write_data[1], bt->write_data[2], bt->write_data[3],
+		bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
 		bt->read_data[1],  bt->read_data[2],  bt->read_data[3]);
 	return 0;
 }
 
-/* Modifies bt->state appropriately, need to get into the bt_event() switch */
+/* Restart if retries are left, or return an error completion code */
 
-static void error_recovery(struct si_sm_data *bt, char *reason)
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+					unsigned char status,
+					unsigned char cCode)
 {
-	unsigned char status;
-	char buf[40]; /* For getting status */
+	char *reason;
 
-	bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */
+	bt->timeout = bt->BT_CAP_req2rsp;
 
-	status = BT_STATUS;
-	printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT,
-	       STATUS2TXT(buf));
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		reason = "timeout";
+		break;
+	default:
+		reason = "internal error";
+		break;
+	}
 
+	printk(KERN_WARNING "IPMI BT: %s in %s %s ", 	/* open-ended line */
+		reason, STATE2TXT, STATUS2TXT);
+
+	/* Per the IPMI spec, retries are based on the sequence number
+	   known only to this module, so manage a restart here. */
 	(bt->error_retries)++;
-	if (bt->error_retries > BT_RETRY_LIMIT) {
-		printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT);
-		bt->state = BT_STATE_HOSED;
-		if (!bt->nonzero_status)
-			printk(KERN_ERR "IPMI: BT stuck, try power cycle\n");
-		else if (bt->error_retries <= BT_RETRY_LIMIT + 1) {
-			printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n");
-        		bt->state = BT_STATE_RESET1;
+	if (bt->error_retries < bt->BT_CAP_retries) {
+		printk("%d retries left\n",
+			bt->BT_CAP_retries - bt->error_retries);
+		bt->state = BT_STATE_RESTART;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	printk("failed %d retries, sending error response\n",
+		bt->BT_CAP_retries);
+	if (!bt->nonzero_status)
+		printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+
+	/* this is most likely during insmod */
+	else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+		printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+		bt->state = BT_STATE_RESET1;
+		return SI_SM_CALL_WITHOUT_DELAY;
+	}
+
+	/* Concoct a useful error message, set up the next state, and
+	   be done with this sequence. */
+
+	bt->state = BT_STATE_IDLE;
+	switch (cCode) {
+	case IPMI_TIMEOUT_ERR:
+		if (status & BT_B_BUSY) {
+			cCode = IPMI_NODE_BUSY_ERR;
+			bt->state = BT_STATE_LONG_BUSY;
 		}
-	return;
+		break;
+	default:
+		break;
 	}
-
-	/* Sometimes the BMC queues get in an "off-by-one" state...*/
-	if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) {
-    		printk(KERN_DEBUG "retry B2H_WAIT\n");
-		return;
-	}
-
-	printk(KERN_DEBUG "restart command\n");
-	bt->state = BT_STATE_RESTART;
+	force_result(bt, cCode);
+	return SI_SM_TRANSACTION_COMPLETE;
 }
 
-/* Check the status and (possibly) advance the BT state machine.  The
-   default return is SI_SM_CALL_WITH_DELAY. */
+/* Check status and (usually) take action and change this state machine. */
 
 static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 {
-	unsigned char status;
-	char buf[40]; /* For getting status */
+	unsigned char status, BT_CAP[8];
+	static enum bt_states last_printed = BT_STATE_PRINTME;
 	int i;
 
 	status = BT_STATUS;
 	bt->nonzero_status |= status;
-
-	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state))
+	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
 		printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
 			STATE2TXT,
-			STATUS2TXT(buf),
+			STATUS2TXT,
 			bt->timeout,
 			time);
-	bt->last_state = bt->state;
+		last_printed = bt->state;
+	}
 
-	if (bt->state == BT_STATE_HOSED)
-	       return SI_SM_HOSED;
+	/* Commands that time out may still (eventually) provide a response.
+	   This stale response will get in the way of a new response so remove
+	   it if possible (hopefully during IDLE).  Even if it comes up later
+	   it will be rejected by its (now-forgotten) seq number. */
 
-	if (bt->state != BT_STATE_IDLE) {	/* do timeout test */
+	if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+		drain_BMC2HOST(bt);
+		BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+	}
+
+	if ((bt->state != BT_STATE_IDLE) &&
+	    (bt->state <  BT_STATE_PRINTME)) {		/* check timeout */
 		bt->timeout -= time;
-		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
-			error_recovery(bt, "timed out");
-			return SI_SM_CALL_WITHOUT_DELAY;
-		}
+		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+			return error_recovery(bt,
+					      status,
+					      IPMI_TIMEOUT_ERR);
 	}
 
 	switch (bt->state) {
 
-    	case BT_STATE_IDLE:	/* check for asynchronous messages */
+	/* Idle state first checks for asynchronous messages from another
+	   channel, then does some opportunistic housekeeping. */
+
+	case BT_STATE_IDLE:
 		if (status & BT_SMS_ATN) {
 			BT_CONTROL(BT_SMS_ATN);	/* clear it */
 			return SI_SM_ATTN;
 		}
-		return SI_SM_IDLE;
+
+		if (status & BT_H_BUSY)		/* clear a leftover H_BUSY */
+			BT_CONTROL(BT_H_BUSY);
+
+		/* Read BT capabilities if it hasn't been done yet */
+		if (!bt->BT_CAP_outreqs)
+			BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
+					SI_SM_CALL_WITHOUT_DELAY);
+		bt->timeout = bt->BT_CAP_req2rsp;
+		BT_SI_SM_RETURN(SI_SM_IDLE);
 
 	case BT_STATE_XACTION_START:
-		if (status & BT_H_BUSY) {
-			BT_CONTROL(BT_H_BUSY);
-			break;
-		}
-    		if (status & BT_B2H_ATN)
-		       break;
-		bt->state = BT_STATE_WRITE_BYTES;
-		return SI_SM_CALL_WITHOUT_DELAY;	/* for logging */
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		if (BT_STATUS & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* force clear */
+		BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
 
 	case BT_STATE_WRITE_BYTES:
-		if (status & (BT_B_BUSY | BT_H2B_ATN))
-		       break;
+		if (status & BT_H_BUSY)
+			BT_CONTROL(BT_H_BUSY);	/* clear */
 		BT_CONTROL(BT_CLR_WR_PTR);
 		write_all_bytes(bt);
-		BT_CONTROL(BT_H2B_ATN);	/* clears too fast to catch? */
-		bt->state = BT_STATE_WRITE_CONSUME;
-		return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
+		BT_CONTROL(BT_H2B_ATN);	/* can clear too fast to catch */
+		BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-	case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
-        	if (status & (BT_H2B_ATN | BT_B_BUSY))
-		       break;
-		bt->state = BT_STATE_B2H_WAIT;
-		/* fall through with status */
+	case BT_STATE_WRITE_CONSUME:
+		if (status & (BT_B_BUSY | BT_H2B_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-	/* Stay in BT_STATE_B2H_WAIT until a packet matches.  However, spinning
-	   hard here, constantly reading status, seems to hold off the
-	   generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
+	/* Spinning hard can suppress B2H_ATN and force a timeout */
 
-	case BT_STATE_B2H_WAIT:
-    		if (!(status & BT_B2H_ATN))
-		       break;
+	case BT_STATE_READ_WAIT:
+		if (!(status & BT_B2H_ATN))
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		BT_CONTROL(BT_H_BUSY);		/* set */
 
-		/* Assume ordered, uncached writes: no need to wait */
-		if (!(status & BT_H_BUSY))
-		       BT_CONTROL(BT_H_BUSY); /* set */
-		BT_CONTROL(BT_B2H_ATN);		/* clear it, ACK to the BMC */
-		BT_CONTROL(BT_CLR_RD_PTR);	/* reset the queue */
-		i = read_all_bytes(bt);
-		BT_CONTROL(BT_H_BUSY);		/* clear */
-		if (!i)				/* Try this state again */
-		       break;
-		bt->state = BT_STATE_READ_END;
-		return SI_SM_CALL_WITHOUT_DELAY;	/* for logging */
+		/* Uncached, ordered writes should just proceeed serially but
+		   some BMCs don't clear B2H_ATN with one hit.  Fast-path a
+		   workaround without too much penalty to the general case. */
 
-    	case BT_STATE_READ_END:
+		BT_CONTROL(BT_B2H_ATN);		/* clear it to ACK the BMC */
+		BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-		/* I could wait on BT_H_BUSY to go clear for a truly clean
-		   exit.  However, this is already done in XACTION_START
-		   and the (possible) extra loop/status/possible wait affects
-		   performance.  So, as long as it works, just ignore H_BUSY */
+	case BT_STATE_CLEAR_B2H:
+		if (status & BT_B2H_ATN) {	/* keep hitting it */
+			BT_CONTROL(BT_B2H_ATN);
+			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+		}
+		BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+				SI_SM_CALL_WITHOUT_DELAY);
 
-#ifdef MAKE_THIS_TRUE_IF_NECESSARY
+	case BT_STATE_READ_BYTES:
+		if (!(status & BT_H_BUSY))	/* check in case of retry */
+			BT_CONTROL(BT_H_BUSY);
+		BT_CONTROL(BT_CLR_RD_PTR);	/* start of BMC2HOST buffer */
+		i = read_all_bytes(bt);		/* true == packet seq match */
+		BT_CONTROL(BT_H_BUSY);		/* NOW clear */
+		if (!i) 			/* Not my message */
+			BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+					SI_SM_CALL_WITHOUT_DELAY);
+		bt->state = bt->complete;
+		return bt->state == BT_STATE_IDLE ?	/* where to next? */
+			SI_SM_TRANSACTION_COMPLETE :	/* normal */
+			SI_SM_CALL_WITHOUT_DELAY;	/* Startup magic */
 
-		if (status & BT_H_BUSY)
-		       break;
-#endif
-		bt->seq++;
-		bt->state = BT_STATE_IDLE;
-		return SI_SM_TRANSACTION_COMPLETE;
+	case BT_STATE_LONG_BUSY:	/* For example: after FW update */
+		if (!(status & BT_B_BUSY)) {
+			reset_flags(bt);	/* next state is now IDLE */
+			bt_init_data(bt, bt->io);
+		}
+		return SI_SM_CALL_WITH_DELAY;	/* No repeat printing */
 
 	case BT_STATE_RESET1:
-    		reset_flags(bt);
-    		bt->timeout = BT_RESET_DELAY;
-		bt->state = BT_STATE_RESET2;
-		break;
+		reset_flags(bt);
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESET2,
+				SI_SM_CALL_WITH_DELAY);
 
 	case BT_STATE_RESET2:		/* Send a soft reset */
 		BT_CONTROL(BT_CLR_WR_PTR);
@@ -464,29 +579,59 @@
 		HOST2BMC(42);		/* Sequence number */
 		HOST2BMC(3);		/* Cmd == Soft reset */
 		BT_CONTROL(BT_H2B_ATN);
-		bt->state = BT_STATE_RESET3;
-		break;
+		bt->timeout = BT_RESET_DELAY * 1000000;
+		BT_STATE_CHANGE(BT_STATE_RESET3,
+				SI_SM_CALL_WITH_DELAY);
 
-	case BT_STATE_RESET3:
+	case BT_STATE_RESET3:		/* Hold off everything for a bit */
 		if (bt->timeout > 0)
-		       return SI_SM_CALL_WITH_DELAY;
-		bt->state = BT_STATE_RESTART;	/* printk in debug modes */
-		break;
+			return SI_SM_CALL_WITH_DELAY;
+		drain_BMC2HOST(bt);
+		BT_STATE_CHANGE(BT_STATE_RESTART,
+				SI_SM_CALL_WITH_DELAY);
 
-	case BT_STATE_RESTART:		/* don't reset retries! */
-		reset_flags(bt);
-		bt->write_data[2] = ++bt->seq;
+	case BT_STATE_RESTART:		/* don't reset retries or seq! */
 		bt->read_count = 0;
 		bt->nonzero_status = 0;
-		bt->timeout = BT_NORMAL_TIMEOUT;
-		bt->state = BT_STATE_XACTION_START;
-		break;
+		bt->timeout = bt->BT_CAP_req2rsp;
+		BT_STATE_CHANGE(BT_STATE_XACTION_START,
+				SI_SM_CALL_WITH_DELAY);
 
-	default:	/* HOSED is supposed to be caught much earlier */
-		error_recovery(bt, "internal logic error");
-		break;
-  	}
-  	return SI_SM_CALL_WITH_DELAY;
+	/* Get BT Capabilities, using timing of upper level state machine.
+	   Set outreqs to prevent infinite loop on timeout. */
+	case BT_STATE_CAPABILITIES_BEGIN:
+		bt->BT_CAP_outreqs = 1;
+		{
+			unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+			bt->state = BT_STATE_IDLE;
+			bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+		}
+		bt->complete = BT_STATE_CAPABILITIES_END;
+		BT_STATE_CHANGE(BT_STATE_XACTION_START,
+				SI_SM_CALL_WITH_DELAY);
+
+	case BT_STATE_CAPABILITIES_END:
+		i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+		bt_init_data(bt, bt->io);
+		if ((i == 8) && !BT_CAP[2]) {
+			bt->BT_CAP_outreqs = BT_CAP[3];
+			bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+			bt->BT_CAP_retries = BT_CAP[7];
+		} else
+			printk(KERN_WARNING "IPMI BT: using default values\n");
+		if (!bt->BT_CAP_outreqs)
+			bt->BT_CAP_outreqs = 1;
+		printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
+			bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+		bt->timeout = bt->BT_CAP_req2rsp;
+		return SI_SM_CALL_WITHOUT_DELAY;
+
+	default:	/* should never occur */
+		return error_recovery(bt,
+				      status,
+				      IPMI_ERR_UNSPECIFIED);
+	}
+	return SI_SM_CALL_WITH_DELAY;
 }
 
 static int bt_detect(struct si_sm_data *bt)
@@ -497,7 +642,7 @@
 	   test that first.  The calling routine uses negative logic. */
 
 	if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
-	       return 1;
+		return 1;
 	reset_flags(bt);
 	return 0;
 }
@@ -513,11 +658,11 @@
 
 struct si_sm_handlers bt_smi_handlers =
 {
-	.init_data         = bt_init_data,
-	.start_transaction = bt_start_transaction,
-	.get_result        = bt_get_result,
-	.event             = bt_event,
-	.detect            = bt_detect,
-	.cleanup           = bt_cleanup,
-	.size              = bt_size,
+	.init_data		= bt_init_data,
+	.start_transaction	= bt_start_transaction,
+	.get_result		= bt_get_result,
+	.event			= bt_event,
+	.detect			= bt_detect,
+	.cleanup		= bt_cleanup,
+	.size			= bt_size,
 };
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 81fcf0c..375d337 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -596,6 +596,31 @@
 		rv = 0;
 		break;
 	}
+
+	case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		mode = ipmi_get_maintenance_mode(priv->user);
+		if (copy_to_user(arg, &mode, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = 0;
+		break;
+	}
+
+	case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+	{
+		int mode;
+
+		if (copy_from_user(&mode, arg, sizeof(mode))) {
+			rv = -EFAULT;
+			break;
+		}
+		rv = ipmi_set_maintenance_mode(priv->user, mode);
+		break;
+	}
 	}
   
 	return rv;
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 2062675..c1b8228 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -93,8 +93,8 @@
 				   state machine. */
 };
 
-#define MAX_KCS_READ_SIZE 80
-#define MAX_KCS_WRITE_SIZE 80
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
 
 /* Timeouts in microseconds. */
 #define IBF_RETRY_TIMEOUT 1000000
@@ -261,12 +261,14 @@
 {
 	unsigned int i;
 
-	if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
-		return -1;
-	}
-	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
-		return -2;
-	}
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_KCS_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
 	if (kcs_debug & KCS_DEBUG_MSG) {
 		printk(KERN_DEBUG "start_kcs_transaction -");
 		for (i = 0; i < size; i ++) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c47add8..5703ee2 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -48,7 +48,7 @@
 
 #define PFX "IPMI message handler: "
 
-#define IPMI_DRIVER_VERSION "39.0"
+#define IPMI_DRIVER_VERSION "39.1"
 
 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
 static int ipmi_init_msghandler(void);
@@ -59,6 +59,9 @@
 static struct proc_dir_entry *proc_ipmi_root = NULL;
 #endif /* CONFIG_PROC_FS */
 
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
+
 #define MAX_EVENTS_IN_QUEUE	25
 
 /* Don't let a message sit in a queue forever, always time it with at lest
@@ -193,17 +196,28 @@
 
 	struct kref refcount;
 
+	/* Used for a list of interfaces. */
+	struct list_head link;
+
 	/* The list of upper layers that are using me.  seq_lock
 	 * protects this. */
 	struct list_head users;
 
+	/* Information to supply to users. */
+	unsigned char ipmi_version_major;
+	unsigned char ipmi_version_minor;
+
 	/* Used for wake ups at startup. */
 	wait_queue_head_t waitq;
 
 	struct bmc_device *bmc;
 	char *my_dev_name;
+	char *sysfs_name;
 
-	/* This is the lower-layer's sender routine. */
+	/* This is the lower-layer's sender routine.  Note that you
+	 * must either be holding the ipmi_interfaces_mutex or be in
+	 * an umpreemptible region to use this.  You must fetch the
+	 * value into a local variable and make sure it is not NULL. */
 	struct ipmi_smi_handlers *handlers;
 	void                     *send_info;
 
@@ -242,6 +256,7 @@
 	spinlock_t       events_lock; /* For dealing with event stuff. */
 	struct list_head waiting_events;
 	unsigned int     waiting_events_count; /* How many events in queue? */
+	int              delivering_events;
 
 	/* The event receiver for my BMC, only really used at panic
 	   shutdown as a place to store this. */
@@ -250,6 +265,12 @@
 	unsigned char local_sel_device;
 	unsigned char local_event_generator;
 
+	/* For handling of maintenance mode. */
+	int maintenance_mode;
+	int maintenance_mode_enable;
+	int auto_maintenance_timeout;
+	spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
 	/* A cheap hack, if this is non-null and a message to an
 	   interface comes in with a NULL user, call this routine with
 	   it.  Note that the message will still be freed by the
@@ -338,13 +359,6 @@
 };
 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 
-/* Used to mark an interface entry that cannot be used but is not a
- * free entry, either, primarily used at creation and deletion time so
- * a slot doesn't get reused too quickly. */
-#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
-#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
-				   || (i == IPMI_INVALID_INTERFACE_ENTRY))
-
 /**
  * The driver model view of the IPMI messaging driver.
  */
@@ -354,16 +368,13 @@
 };
 static DEFINE_MUTEX(ipmidriver_mutex);
 
-#define MAX_IPMI_INTERFACES 4
-static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
-
-/* Directly protects the ipmi_interfaces data structure. */
-static DEFINE_SPINLOCK(interfaces_lock);
+static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
 
 /* List of watchers that want to know when smi's are added and
    deleted. */
 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
-static DECLARE_RWSEM(smi_watchers_sem);
+static DEFINE_MUTEX(smi_watchers_mutex);
 
 
 static void free_recv_msg_list(struct list_head *q)
@@ -423,48 +434,84 @@
 	kfree(intf);
 }
 
+struct watcher_entry {
+	int              intf_num;
+	ipmi_smi_t       intf;
+	struct list_head link;
+};
+
 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 {
-	int           i;
-	unsigned long flags;
+	ipmi_smi_t intf;
+	struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
+	struct watcher_entry *e, *e2;
 
-	down_write(&smi_watchers_sem);
-	list_add(&(watcher->link), &smi_watchers);
-	up_write(&smi_watchers_sem);
-	spin_lock_irqsave(&interfaces_lock, flags);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		ipmi_smi_t intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	mutex_lock(&smi_watchers_mutex);
+
+	mutex_lock(&ipmi_interfaces_mutex);
+
+	/* Build a list of things to deliver. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == -1)
 			continue;
-		spin_unlock_irqrestore(&interfaces_lock, flags);
-		watcher->new_smi(i, intf->si_dev);
-		spin_lock_irqsave(&interfaces_lock, flags);
+		e = kmalloc(sizeof(*e), GFP_KERNEL);
+		if (!e)
+			goto out_err;
+		kref_get(&intf->refcount);
+		e->intf = intf;
+		e->intf_num = intf->intf_num;
+		list_add_tail(&e->link, &to_deliver);
 	}
-	spin_unlock_irqrestore(&interfaces_lock, flags);
+
+	/* We will succeed, so add it to the list. */
+	list_add(&watcher->link, &smi_watchers);
+
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	list_for_each_entry_safe(e, e2, &to_deliver, link) {
+		list_del(&e->link);
+		watcher->new_smi(e->intf_num, e->intf->si_dev);
+		kref_put(&e->intf->refcount, intf_free);
+		kfree(e);
+	}
+
+	mutex_unlock(&smi_watchers_mutex);
+
 	return 0;
+
+ out_err:
+	mutex_unlock(&ipmi_interfaces_mutex);
+	mutex_unlock(&smi_watchers_mutex);
+	list_for_each_entry_safe(e, e2, &to_deliver, link) {
+		list_del(&e->link);
+		kref_put(&e->intf->refcount, intf_free);
+		kfree(e);
+	}
+	return -ENOMEM;
 }
 
 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 {
-	down_write(&smi_watchers_sem);
+	mutex_lock(&smi_watchers_mutex);
 	list_del(&(watcher->link));
-	up_write(&smi_watchers_sem);
+	mutex_unlock(&smi_watchers_mutex);
 	return 0;
 }
 
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
 static void
 call_smi_watchers(int i, struct device *dev)
 {
 	struct ipmi_smi_watcher *w;
 
-	down_read(&smi_watchers_sem);
 	list_for_each_entry(w, &smi_watchers, link) {
 		if (try_module_get(w->owner)) {
 			w->new_smi(i, dev);
 			module_put(w->owner);
 		}
 	}
-	up_read(&smi_watchers_sem);
 }
 
 static int
@@ -590,6 +637,17 @@
 	}
 }
 
+static void
+deliver_err_response(struct ipmi_recv_msg *msg, int err)
+{
+	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+	msg->msg_data[0] = err;
+	msg->msg.netfn |= 1; /* Convert to a response. */
+	msg->msg.data_len = 1;
+	msg->msg.data = msg->msg_data;
+	deliver_response(msg);
+}
+
 /* Find the next sequence number not being used and add the given
    message with the given timeout to the sequence table.  This must be
    called with the interface's seq_lock held. */
@@ -727,14 +785,8 @@
 	}
 	spin_unlock_irqrestore(&(intf->seq_lock), flags);
 
-	if (msg) {
-		msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-		msg->msg_data[0] = err;
-		msg->msg.netfn |= 1; /* Convert to a response. */
-		msg->msg.data_len = 1;
-		msg->msg.data = msg->msg_data;
-		deliver_response(msg);
-	}
+	if (msg)
+		deliver_err_response(msg, err);
 
 	return rv;
 }
@@ -776,17 +828,18 @@
 	if (!new_user)
 		return -ENOMEM;
 
-	spin_lock_irqsave(&interfaces_lock, flags);
-	intf = ipmi_interfaces[if_num];
-	if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
-		spin_unlock_irqrestore(&interfaces_lock, flags);
-		rv = -EINVAL;
-		goto out_kfree;
+	mutex_lock(&ipmi_interfaces_mutex);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
 	}
+	/* Not found, return an error */
+	rv = -EINVAL;
+	goto out_kfree;
 
+ found:
 	/* Note that each existing user holds a refcount to the interface. */
 	kref_get(&intf->refcount);
-	spin_unlock_irqrestore(&interfaces_lock, flags);
 
 	kref_init(&new_user->refcount);
 	new_user->handler = handler;
@@ -807,6 +860,10 @@
 		}
 	}
 
+	/* Hold the lock so intf->handlers is guaranteed to be good
+	 * until now */
+	mutex_unlock(&ipmi_interfaces_mutex);
+
 	new_user->valid = 1;
 	spin_lock_irqsave(&intf->seq_lock, flags);
 	list_add_rcu(&new_user->link, &intf->users);
@@ -817,6 +874,7 @@
 out_kref:
 	kref_put(&intf->refcount, intf_free);
 out_kfree:
+	mutex_unlock(&ipmi_interfaces_mutex);
 	kfree(new_user);
 	return rv;
 }
@@ -846,6 +904,7 @@
 		    && (intf->seq_table[i].recv_msg->user == user))
 		{
 			intf->seq_table[i].inuse = 0;
+			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
 		}
 	}
 	spin_unlock_irqrestore(&intf->seq_lock, flags);
@@ -872,9 +931,13 @@
 		kfree(rcvr);
 	}
 
-	module_put(intf->handlers->owner);
-	if (intf->handlers->dec_usecount)
-		intf->handlers->dec_usecount(intf->send_info);
+	mutex_lock(&ipmi_interfaces_mutex);
+	if (intf->handlers) {
+		module_put(intf->handlers->owner);
+		if (intf->handlers->dec_usecount)
+			intf->handlers->dec_usecount(intf->send_info);
+	}
+	mutex_unlock(&ipmi_interfaces_mutex);
 
 	kref_put(&intf->refcount, intf_free);
 
@@ -887,8 +950,8 @@
 		      unsigned char *major,
 		      unsigned char *minor)
 {
-	*major = ipmi_version_major(&user->intf->bmc->id);
-	*minor = ipmi_version_minor(&user->intf->bmc->id);
+	*major = user->intf->ipmi_version_major;
+	*minor = user->intf->ipmi_version_minor;
 }
 
 int ipmi_set_my_address(ipmi_user_t   user,
@@ -931,6 +994,65 @@
 	return 0;
 }
 
+int ipmi_get_maintenance_mode(ipmi_user_t user)
+{
+	int           mode;
+	unsigned long flags;
+
+	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+	mode = user->intf->maintenance_mode;
+	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+
+	return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(ipmi_smi_t intf)
+{
+	if (intf->handlers->set_maintenance_mode)
+		intf->handlers->set_maintenance_mode(
+			intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
+{
+	int           rv = 0;
+	unsigned long flags;
+	ipmi_smi_t    intf = user->intf;
+
+	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+	if (intf->maintenance_mode != mode) {
+		switch (mode) {
+		case IPMI_MAINTENANCE_MODE_AUTO:
+			intf->maintenance_mode = mode;
+			intf->maintenance_mode_enable
+				= (intf->auto_maintenance_timeout > 0);
+			break;
+
+		case IPMI_MAINTENANCE_MODE_OFF:
+			intf->maintenance_mode = mode;
+			intf->maintenance_mode_enable = 0;
+			break;
+
+		case IPMI_MAINTENANCE_MODE_ON:
+			intf->maintenance_mode = mode;
+			intf->maintenance_mode_enable = 1;
+			break;
+
+		default:
+			rv = -EINVAL;
+			goto out_unlock;
+		}
+
+		maintenance_mode_update(intf);
+	}
+ out_unlock:
+	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
 int ipmi_set_gets_events(ipmi_user_t user, int val)
 {
 	unsigned long        flags;
@@ -943,20 +1065,33 @@
 	spin_lock_irqsave(&intf->events_lock, flags);
 	user->gets_events = val;
 
-	if (val) {
-		/* Deliver any queued events. */
+	if (intf->delivering_events)
+		/*
+		 * Another thread is delivering events for this, so
+		 * let it handle any new events.
+		 */
+		goto out;
+
+	/* Deliver any queued events. */
+	while (user->gets_events && !list_empty(&intf->waiting_events)) {
 		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
 			list_move_tail(&msg->link, &msgs);
 		intf->waiting_events_count = 0;
+
+		intf->delivering_events = 1;
+		spin_unlock_irqrestore(&intf->events_lock, flags);
+
+		list_for_each_entry_safe(msg, msg2, &msgs, link) {
+			msg->user = user;
+			kref_get(&user->refcount);
+			deliver_response(msg);
+		}
+
+		spin_lock_irqsave(&intf->events_lock, flags);
+		intf->delivering_events = 0;
 	}
 
-	/* Hold the events lock while doing this to preserve order. */
-	list_for_each_entry_safe(msg, msg2, &msgs, link) {
-		msg->user = user;
-		kref_get(&user->refcount);
-		deliver_response(msg);
-	}
-
+ out:
 	spin_unlock_irqrestore(&intf->events_lock, flags);
 
 	return 0;
@@ -1067,7 +1202,8 @@
 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
 {
 	ipmi_smi_t intf = user->intf;
-	intf->handlers->set_run_to_completion(intf->send_info, val);
+	if (intf->handlers)
+		intf->handlers->set_run_to_completion(intf->send_info, val);
 }
 
 static unsigned char
@@ -1178,10 +1314,11 @@
 			  int                  retries,
 			  unsigned int         retry_time_ms)
 {
-	int                  rv = 0;
-	struct ipmi_smi_msg  *smi_msg;
-	struct ipmi_recv_msg *recv_msg;
-	unsigned long        flags;
+	int                      rv = 0;
+	struct ipmi_smi_msg      *smi_msg;
+	struct ipmi_recv_msg     *recv_msg;
+	unsigned long            flags;
+	struct ipmi_smi_handlers *handlers;
 
 
 	if (supplied_recv) {
@@ -1204,6 +1341,13 @@
 		}
 	}
 
+	rcu_read_lock();
+	handlers = intf->handlers;
+	if (!handlers) {
+		rv = -ENODEV;
+		goto out_err;
+	}
+
 	recv_msg->user = user;
 	if (user)
 		kref_get(&user->refcount);
@@ -1246,6 +1390,24 @@
 			goto out_err;
 		}
 
+		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
+			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
+		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
+		{
+			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+			intf->auto_maintenance_timeout
+				= IPMI_MAINTENANCE_MODE_TIMEOUT;
+			if (!intf->maintenance_mode
+			    && !intf->maintenance_mode_enable)
+			{
+				intf->maintenance_mode_enable = 1;
+				maintenance_mode_update(intf);
+			}
+			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+					       flags);
+		}
+
 		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
 			spin_lock_irqsave(&intf->counter_lock, flags);
 			intf->sent_invalid_commands++;
@@ -1520,11 +1682,14 @@
 		printk("\n");
 	}
 #endif
-	intf->handlers->sender(intf->send_info, smi_msg, priority);
+
+	handlers->sender(intf->send_info, smi_msg, priority);
+	rcu_read_unlock();
 
 	return 0;
 
  out_err:
+	rcu_read_unlock();
 	ipmi_free_smi_msg(smi_msg);
 	ipmi_free_recv_msg(recv_msg);
 	return rv;
@@ -1604,6 +1769,7 @@
 			      -1, 0);
 }
 
+#ifdef CONFIG_PROC_FS
 static int ipmb_file_read_proc(char *page, char **start, off_t off,
 			       int count, int *eof, void *data)
 {
@@ -1692,6 +1858,7 @@
 
 	return (out - ((char *) page));
 }
+#endif /* CONFIG_PROC_FS */
 
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
 			    read_proc_t *read_proc, write_proc_t *write_proc,
@@ -1817,13 +1984,12 @@
 	struct bmc_device *bmc = dev_get_drvdata(dev);
 
 	return (bmc->id.product_id == id->product_id
-		&& bmc->id.product_id == id->product_id
 		&& bmc->id.device_id == id->device_id);
 }
 
 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
 	struct device_driver *drv,
-	unsigned char product_id, unsigned char device_id)
+	unsigned int product_id, unsigned char device_id)
 {
 	struct prod_dev_id id = {
 		.product_id = product_id,
@@ -1940,6 +2106,9 @@
 
 static void remove_files(struct bmc_device *bmc)
 {
+	if (!bmc->dev)
+		return;
+
 	device_remove_file(&bmc->dev->dev,
 			   &bmc->device_id_attr);
 	device_remove_file(&bmc->dev->dev,
@@ -1973,7 +2142,8 @@
 	bmc = container_of(ref, struct bmc_device, refcount);
 
 	remove_files(bmc);
-	platform_device_unregister(bmc->dev);
+	if (bmc->dev)
+		platform_device_unregister(bmc->dev);
 	kfree(bmc);
 }
 
@@ -1981,7 +2151,11 @@
 {
 	struct bmc_device *bmc = intf->bmc;
 
-	sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+	if (intf->sysfs_name) {
+		sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
+	}
 	if (intf->my_dev_name) {
 		sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
 		kfree(intf->my_dev_name);
@@ -1990,6 +2164,7 @@
 
 	mutex_lock(&ipmidriver_mutex);
 	kref_put(&bmc->refcount, cleanup_bmc_device);
+	intf->bmc = NULL;
 	mutex_unlock(&ipmidriver_mutex);
 }
 
@@ -1997,6 +2172,56 @@
 {
 	int err;
 
+	bmc->device_id_attr.attr.name = "device_id";
+	bmc->device_id_attr.attr.owner = THIS_MODULE;
+	bmc->device_id_attr.attr.mode = S_IRUGO;
+	bmc->device_id_attr.show = device_id_show;
+
+	bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
+	bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
+	bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
+	bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+
+	bmc->revision_attr.attr.name = "revision";
+	bmc->revision_attr.attr.owner = THIS_MODULE;
+	bmc->revision_attr.attr.mode = S_IRUGO;
+	bmc->revision_attr.show = revision_show;
+
+	bmc->firmware_rev_attr.attr.name = "firmware_revision";
+	bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
+	bmc->firmware_rev_attr.attr.mode = S_IRUGO;
+	bmc->firmware_rev_attr.show = firmware_rev_show;
+
+	bmc->version_attr.attr.name = "ipmi_version";
+	bmc->version_attr.attr.owner = THIS_MODULE;
+	bmc->version_attr.attr.mode = S_IRUGO;
+	bmc->version_attr.show = ipmi_version_show;
+
+	bmc->add_dev_support_attr.attr.name = "additional_device_support";
+	bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
+	bmc->add_dev_support_attr.attr.mode = S_IRUGO;
+	bmc->add_dev_support_attr.show = add_dev_support_show;
+
+	bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
+	bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
+	bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
+	bmc->manufacturer_id_attr.show = manufacturer_id_show;
+
+	bmc->product_id_attr.attr.name = "product_id";
+	bmc->product_id_attr.attr.owner = THIS_MODULE;
+	bmc->product_id_attr.attr.mode = S_IRUGO;
+	bmc->product_id_attr.show = product_id_show;
+
+	bmc->guid_attr.attr.name = "guid";
+	bmc->guid_attr.attr.owner = THIS_MODULE;
+	bmc->guid_attr.attr.mode = S_IRUGO;
+	bmc->guid_attr.show = guid_show;
+
+	bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+	bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
+	bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
+	bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->device_id_attr);
 	if (err) goto out;
@@ -2066,7 +2291,8 @@
 	return err;
 }
 
-static int ipmi_bmc_register(ipmi_smi_t intf)
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
+			     const char *sysfs_name)
 {
 	int               rv;
 	struct bmc_device *bmc = intf->bmc;
@@ -2106,9 +2332,39 @@
 		       bmc->id.product_id,
 		       bmc->id.device_id);
 	} else {
-		bmc->dev = platform_device_alloc("ipmi_bmc",
-						 bmc->id.device_id);
+		char name[14];
+		unsigned char orig_dev_id = bmc->id.device_id;
+		int warn_printed = 0;
+
+		snprintf(name, sizeof(name),
+			 "ipmi_bmc.%4.4x", bmc->id.product_id);
+
+		while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
+						 bmc->id.product_id,
+						 bmc->id.device_id))
+		{
+			if (!warn_printed) {
+				printk(KERN_WARNING PFX
+				       "This machine has two different BMCs"
+				       " with the same product id and device"
+				       " id.  This is an error in the"
+				       " firmware, but incrementing the"
+				       " device id to work around the problem."
+				       " Prod ID = 0x%x, Dev ID = 0x%x\n",
+				       bmc->id.product_id, bmc->id.device_id);
+				warn_printed = 1;
+			}
+			bmc->id.device_id++; /* Wraps at 255 */
+			if (bmc->id.device_id == orig_dev_id) {
+				printk(KERN_ERR PFX
+				       "Out of device ids!\n");
+				break;
+			}
+		}
+
+		bmc->dev = platform_device_alloc(name, bmc->id.device_id);
 		if (!bmc->dev) {
+			mutex_unlock(&ipmidriver_mutex);
 			printk(KERN_ERR
 			       "ipmi_msghandler:"
 			       " Unable to allocate platform device\n");
@@ -2121,6 +2377,8 @@
 		rv = platform_device_add(bmc->dev);
 		mutex_unlock(&ipmidriver_mutex);
 		if (rv) {
+			platform_device_put(bmc->dev);
+			bmc->dev = NULL;
 			printk(KERN_ERR
 			       "ipmi_msghandler:"
 			       " Unable to register bmc device: %d\n",
@@ -2130,57 +2388,6 @@
 			return rv;
 		}
 
-		bmc->device_id_attr.attr.name = "device_id";
-		bmc->device_id_attr.attr.owner = THIS_MODULE;
-		bmc->device_id_attr.attr.mode = S_IRUGO;
-		bmc->device_id_attr.show = device_id_show;
-
-		bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
-		bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
-		bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
-		bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
-
-		bmc->revision_attr.attr.name = "revision";
-		bmc->revision_attr.attr.owner = THIS_MODULE;
-		bmc->revision_attr.attr.mode = S_IRUGO;
-		bmc->revision_attr.show = revision_show;
-
-		bmc->firmware_rev_attr.attr.name = "firmware_revision";
-		bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
-		bmc->firmware_rev_attr.attr.mode = S_IRUGO;
-		bmc->firmware_rev_attr.show = firmware_rev_show;
-
-		bmc->version_attr.attr.name = "ipmi_version";
-		bmc->version_attr.attr.owner = THIS_MODULE;
-		bmc->version_attr.attr.mode = S_IRUGO;
-		bmc->version_attr.show = ipmi_version_show;
-
-		bmc->add_dev_support_attr.attr.name
-			= "additional_device_support";
-		bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
-		bmc->add_dev_support_attr.attr.mode = S_IRUGO;
-		bmc->add_dev_support_attr.show = add_dev_support_show;
-
-		bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
-		bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
-		bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
-		bmc->manufacturer_id_attr.show = manufacturer_id_show;
-
-		bmc->product_id_attr.attr.name = "product_id";
-		bmc->product_id_attr.attr.owner = THIS_MODULE;
-		bmc->product_id_attr.attr.mode = S_IRUGO;
-		bmc->product_id_attr.show = product_id_show;
-
-		bmc->guid_attr.attr.name = "guid";
-		bmc->guid_attr.attr.owner = THIS_MODULE;
-		bmc->guid_attr.attr.mode = S_IRUGO;
-		bmc->guid_attr.show = guid_show;
-
-		bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
-		bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
-		bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
-		bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
-
 		rv = create_files(bmc);
 		if (rv) {
 			mutex_lock(&ipmidriver_mutex);
@@ -2202,29 +2409,44 @@
 	 * create symlink from system interface device to bmc device
 	 * and back.
 	 */
+	intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
+	if (!intf->sysfs_name) {
+		rv = -ENOMEM;
+		printk(KERN_ERR
+		       "ipmi_msghandler: allocate link to BMC: %d\n",
+		       rv);
+		goto out_err;
+	}
+
 	rv = sysfs_create_link(&intf->si_dev->kobj,
-			       &bmc->dev->dev.kobj, "bmc");
+			       &bmc->dev->dev.kobj, intf->sysfs_name);
 	if (rv) {
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
 		printk(KERN_ERR
 		       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
 		       rv);
 		goto out_err;
 	}
 
-	size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
+	size = snprintf(dummy, 0, "ipmi%d", ifnum);
 	intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
 	if (!intf->my_dev_name) {
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
 		rv = -ENOMEM;
 		printk(KERN_ERR
 		       "ipmi_msghandler: allocate link from BMC: %d\n",
 		       rv);
 		goto out_err;
 	}
-	snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
+	snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
 
 	rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
 			       intf->my_dev_name);
 	if (rv) {
+		kfree(intf->sysfs_name);
+		intf->sysfs_name = NULL;
 		kfree(intf->my_dev_name);
 		intf->my_dev_name = NULL;
 		printk(KERN_ERR
@@ -2409,17 +2631,14 @@
 		      void		       *send_info,
 		      struct ipmi_device_id    *device_id,
 		      struct device            *si_dev,
+		      const char               *sysfs_name,
 		      unsigned char            slave_addr)
 {
 	int              i, j;
 	int              rv;
 	ipmi_smi_t       intf;
-	unsigned long    flags;
-	int              version_major;
-	int              version_minor;
-
-	version_major = ipmi_version_major(device_id);
-	version_minor = ipmi_version_minor(device_id);
+	ipmi_smi_t       tintf;
+	struct list_head *link;
 
 	/* Make sure the driver is actually initialized, this handles
 	   problems with initialization order. */
@@ -2437,12 +2656,16 @@
 	if (!intf)
 		return -ENOMEM;
 	memset(intf, 0, sizeof(*intf));
+
+	intf->ipmi_version_major = ipmi_version_major(device_id);
+	intf->ipmi_version_minor = ipmi_version_minor(device_id);
+
 	intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
 	if (!intf->bmc) {
 		kfree(intf);
 		return -ENOMEM;
 	}
-	intf->intf_num = -1;
+	intf->intf_num = -1; /* Mark it invalid for now. */
 	kref_init(&intf->refcount);
 	intf->bmc->id = *device_id;
 	intf->si_dev = si_dev;
@@ -2470,26 +2693,30 @@
 	INIT_LIST_HEAD(&intf->waiting_events);
 	intf->waiting_events_count = 0;
 	mutex_init(&intf->cmd_rcvrs_mutex);
+	spin_lock_init(&intf->maintenance_mode_lock);
 	INIT_LIST_HEAD(&intf->cmd_rcvrs);
 	init_waitqueue_head(&intf->waitq);
 
 	spin_lock_init(&intf->counter_lock);
 	intf->proc_dir = NULL;
 
-	rv = -ENOMEM;
-	spin_lock_irqsave(&interfaces_lock, flags);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		if (ipmi_interfaces[i] == NULL) {
-			intf->intf_num = i;
-			/* Reserve the entry till we are done. */
-			ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
-			rv = 0;
+	mutex_lock(&smi_watchers_mutex);
+	mutex_lock(&ipmi_interfaces_mutex);
+	/* Look for a hole in the numbers. */
+	i = 0;
+	link = &ipmi_interfaces;
+	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+		if (tintf->intf_num != i) {
+			link = &tintf->link;
 			break;
 		}
+		i++;
 	}
-	spin_unlock_irqrestore(&interfaces_lock, flags);
-	if (rv)
-		goto out;
+	/* Add the new interface in numeric order. */
+	if (i == 0)
+		list_add_rcu(&intf->link, &ipmi_interfaces);
+	else
+		list_add_tail_rcu(&intf->link, link);
 
 	rv = handlers->start_processing(send_info, intf);
 	if (rv)
@@ -2497,8 +2724,9 @@
 
 	get_guid(intf);
 
-	if ((version_major > 1)
-	    || ((version_major == 1) && (version_minor >= 5)))
+	if ((intf->ipmi_version_major > 1)
+	    || ((intf->ipmi_version_major == 1)
+		&& (intf->ipmi_version_minor >= 5)))
 	{
 		/* Start scanning the channels to see what is
 		   available. */
@@ -2521,64 +2749,67 @@
 	if (rv == 0)
 		rv = add_proc_entries(intf, i);
 
-	rv = ipmi_bmc_register(intf);
+	rv = ipmi_bmc_register(intf, i, sysfs_name);
 
  out:
 	if (rv) {
 		if (intf->proc_dir)
 			remove_proc_entries(intf);
+		intf->handlers = NULL;
+		list_del_rcu(&intf->link);
+		mutex_unlock(&ipmi_interfaces_mutex);
+		mutex_unlock(&smi_watchers_mutex);
+		synchronize_rcu();
 		kref_put(&intf->refcount, intf_free);
-		if (i < MAX_IPMI_INTERFACES) {
-			spin_lock_irqsave(&interfaces_lock, flags);
-			ipmi_interfaces[i] = NULL;
-			spin_unlock_irqrestore(&interfaces_lock, flags);
-		}
 	} else {
-		spin_lock_irqsave(&interfaces_lock, flags);
-		ipmi_interfaces[i] = intf;
-		spin_unlock_irqrestore(&interfaces_lock, flags);
+		/* After this point the interface is legal to use. */
+		intf->intf_num = i;
+		mutex_unlock(&ipmi_interfaces_mutex);
 		call_smi_watchers(i, intf->si_dev);
+		mutex_unlock(&smi_watchers_mutex);
 	}
 
 	return rv;
 }
 
+static void cleanup_smi_msgs(ipmi_smi_t intf)
+{
+	int              i;
+	struct seq_table *ent;
+
+	/* No need for locks, the interface is down. */
+	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+		ent = &(intf->seq_table[i]);
+		if (!ent->inuse)
+			continue;
+		deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+	}
+}
+
 int ipmi_unregister_smi(ipmi_smi_t intf)
 {
-	int                     i;
 	struct ipmi_smi_watcher *w;
-	unsigned long           flags;
+	int    intf_num = intf->intf_num;
 
 	ipmi_bmc_unregister(intf);
 
-	spin_lock_irqsave(&interfaces_lock, flags);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		if (ipmi_interfaces[i] == intf) {
-			/* Set the interface number reserved until we
-			 * are done. */
-			ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
-			intf->intf_num = -1;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&interfaces_lock,flags);
+	mutex_lock(&smi_watchers_mutex);
+	mutex_lock(&ipmi_interfaces_mutex);
+	intf->intf_num = -1;
+	intf->handlers = NULL;
+	list_del_rcu(&intf->link);
+	mutex_unlock(&ipmi_interfaces_mutex);
+	synchronize_rcu();
 
-	if (i == MAX_IPMI_INTERFACES)
-		return -ENODEV;
+	cleanup_smi_msgs(intf);
 
 	remove_proc_entries(intf);
 
 	/* Call all the watcher interfaces to tell them that
 	   an interface is gone. */
-	down_read(&smi_watchers_sem);
 	list_for_each_entry(w, &smi_watchers, link)
-		w->smi_gone(i);
-	up_read(&smi_watchers_sem);
-
-	/* Allow the entry to be reused now. */
-	spin_lock_irqsave(&interfaces_lock, flags);
-	ipmi_interfaces[i] = NULL;
-	spin_unlock_irqrestore(&interfaces_lock,flags);
+		w->smi_gone(intf_num);
+	mutex_unlock(&smi_watchers_mutex);
 
 	kref_put(&intf->refcount, intf_free);
 	return 0;
@@ -2660,6 +2891,7 @@
 	struct ipmi_ipmb_addr    *ipmb_addr;
 	struct ipmi_recv_msg     *recv_msg;
 	unsigned long            flags;
+	struct ipmi_smi_handlers *handlers;
 
 	if (msg->rsp_size < 10) {
 		/* Message not big enough, just ignore it. */
@@ -2716,10 +2948,16 @@
 		printk("\n");
 	}
 #endif
-		intf->handlers->sender(intf->send_info, msg, 0);
-
-		rv = -1; /* We used the message, so return the value that
-			    causes it to not be freed or queued. */
+		rcu_read_lock();
+		handlers = intf->handlers;
+		if (handlers) {
+			handlers->sender(intf->send_info, msg, 0);
+			/* We used the message, so return the value
+			   that causes it to not be freed or
+			   queued. */
+			rv = -1;
+		}
+		rcu_read_unlock();
 	} else {
 		/* Deliver the message to the user. */
 		spin_lock_irqsave(&intf->counter_lock, flags);
@@ -3309,16 +3547,6 @@
 	rcu_read_unlock();
 }
 
-static void
-handle_msg_timeout(struct ipmi_recv_msg *msg)
-{
-	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-	msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
-	msg->msg.netfn |= 1; /* Convert to a response. */
-	msg->msg.data_len = 1;
-	msg->msg.data = msg->msg_data;
-	deliver_response(msg);
-}
 
 static struct ipmi_smi_msg *
 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3350,7 +3578,11 @@
 			      struct list_head *timeouts, long timeout_period,
 			      int slot, unsigned long *flags)
 {
-	struct ipmi_recv_msg *msg;
+	struct ipmi_recv_msg     *msg;
+	struct ipmi_smi_handlers *handlers;
+
+	if (intf->intf_num == -1)
+		return;
 
 	if (!ent->inuse)
 		return;
@@ -3393,13 +3625,19 @@
 			return;
 
 		spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
 		/* Send the new message.  We send with a zero
 		 * priority.  It timed out, I doubt time is
 		 * that critical now, and high priority
 		 * messages are really only for messages to the
 		 * local MC, which don't get resent. */
-		intf->handlers->sender(intf->send_info,
-				       smi_msg, 0);
+		handlers = intf->handlers;
+		if (handlers)
+			intf->handlers->sender(intf->send_info,
+					       smi_msg, 0);
+		else
+			ipmi_free_smi_msg(smi_msg);
+
 		spin_lock_irqsave(&intf->seq_lock, *flags);
 	}
 }
@@ -3411,18 +3649,12 @@
 	struct ipmi_recv_msg *msg, *msg2;
 	struct ipmi_smi_msg  *smi_msg, *smi_msg2;
 	unsigned long        flags;
-	int                  i, j;
+	int                  i;
 
 	INIT_LIST_HEAD(&timeouts);
 
-	spin_lock(&interfaces_lock);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
-			continue;
-		kref_get(&intf->refcount);
-		spin_unlock(&interfaces_lock);
-
+	rcu_read_lock();
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
 		/* See if any waiting messages need to be processed. */
 		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
 		list_for_each_entry_safe(smi_msg, smi_msg2,
@@ -3442,35 +3674,60 @@
 		   have timed out, putting them in the timeouts
 		   list. */
 		spin_lock_irqsave(&intf->seq_lock, flags);
-		for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
-			check_msg_timeout(intf, &(intf->seq_table[j]),
-					  &timeouts, timeout_period, j,
+		for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+			check_msg_timeout(intf, &(intf->seq_table[i]),
+					  &timeouts, timeout_period, i,
 					  &flags);
 		spin_unlock_irqrestore(&intf->seq_lock, flags);
 
 		list_for_each_entry_safe(msg, msg2, &timeouts, link)
-			handle_msg_timeout(msg);
+			deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
 
-		kref_put(&intf->refcount, intf_free);
-		spin_lock(&interfaces_lock);
+		/*
+		 * Maintenance mode handling.  Check the timeout
+		 * optimistically before we claim the lock.  It may
+		 * mean a timeout gets missed occasionally, but that
+		 * only means the timeout gets extended by one period
+		 * in that case.  No big deal, and it avoids the lock
+		 * most of the time.
+		 */
+		if (intf->auto_maintenance_timeout > 0) {
+			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+			if (intf->auto_maintenance_timeout > 0) {
+				intf->auto_maintenance_timeout
+					-= timeout_period;
+				if (!intf->maintenance_mode
+				    && (intf->auto_maintenance_timeout <= 0))
+				{
+					intf->maintenance_mode_enable = 0;
+					maintenance_mode_update(intf);
+				}
+			}
+			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+					       flags);
+		}
 	}
-	spin_unlock(&interfaces_lock);
+	rcu_read_unlock();
 }
 
 static void ipmi_request_event(void)
 {
-	ipmi_smi_t intf;
-	int        i;
+	ipmi_smi_t               intf;
+	struct ipmi_smi_handlers *handlers;
 
-	spin_lock(&interfaces_lock);
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	rcu_read_lock();
+	/* Called from the timer, no need to check if handlers is
+	 * valid. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		/* No event requests when in maintenance mode. */
+		if (intf->maintenance_mode_enable)
 			continue;
 
-		intf->handlers->request_events(intf->send_info);
+		handlers = intf->handlers;
+		if (handlers)
+			handlers->request_events(intf->send_info);
 	}
-	spin_unlock(&interfaces_lock);
+	rcu_read_unlock();
 }
 
 static struct timer_list ipmi_timer;
@@ -3599,7 +3856,6 @@
 	struct kernel_ipmi_msg            msg;
 	ipmi_smi_t                        intf;
 	unsigned char                     data[16];
-	int                               i;
 	struct ipmi_system_interface_addr *si;
 	struct ipmi_addr                  addr;
 	struct ipmi_smi_msg               smi_msg;
@@ -3633,9 +3889,9 @@
 	recv_msg.done = dummy_recv_done_handler;
 
 	/* For every registered interface, send the event. */
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (!intf->handlers)
+			/* Interface is not ready. */
 			continue;
 
 		/* Send the event announcing the panic. */
@@ -3660,13 +3916,14 @@
 	if (!str) 
 		return;
 
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
+	/* For every registered interface, send the event. */
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
 		char                  *p = str;
 		struct ipmi_ipmb_addr *ipmb;
 		int                   j;
 
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+		if (intf->intf_num == -1)
+			/* Interface was not ready yet. */
 			continue;
 
 		/* First job here is to figure out where to send the
@@ -3792,7 +4049,6 @@
 		       unsigned long         event,
                        void                  *ptr)
 {
-	int        i;
 	ipmi_smi_t intf;
 
 	if (has_panicked)
@@ -3800,9 +4056,9 @@
 	has_panicked = 1;
 
 	/* For every registered interface, set it to run to completion. */
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-		intf = ipmi_interfaces[i];
-		if (IPMI_INVALID_INTERFACE(intf))
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (!intf->handlers)
+			/* Interface is not ready. */
 			continue;
 
 		intf->handlers->set_run_to_completion(intf->send_info, 1);
@@ -3823,7 +4079,6 @@
 
 static int ipmi_init_msghandler(void)
 {
-	int i;
 	int rv;
 
 	if (initialized)
@@ -3838,9 +4093,6 @@
 	printk(KERN_INFO "ipmi message handler version "
 	       IPMI_DRIVER_VERSION "\n");
 
-	for (i = 0; i < MAX_IPMI_INTERFACES; i++)
-		ipmi_interfaces[i] = NULL;
-
 #ifdef CONFIG_PROC_FS
 	proc_ipmi_root = proc_mkdir("ipmi", NULL);
 	if (!proc_ipmi_root) {
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 8d941db..597eb4f 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -43,6 +43,9 @@
 
 #define PFX "IPMI poweroff: "
 
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
 /* Definitions for controlling power off (if the system supports it).  It
  * conveniently matches the IPMI chassis control values. */
 #define IPMI_CHASSIS_POWER_DOWN		0	/* power down, the default. */
@@ -51,6 +54,37 @@
 /* the IPMI data command */
 static int poweroff_powercycle;
 
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready = 0;
+static ipmi_user_t ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+		return 0;
+
+	ipmi_po_smi_gone(ipmi_ifnum);
+	ipmi_po_new_smi(ifnum_to_use, NULL);
+	return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+		  &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
 /* parameter definition to allow user to flag power cycle */
 module_param(poweroff_powercycle, int, 0644);
 MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
@@ -142,6 +176,42 @@
 #define IPMI_ATCA_GET_ADDR_INFO_CMD	0x01
 #define IPMI_PICMG_ID			0
 
+#define IPMI_NETFN_OEM				0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART		0x11
+#define IPMI_ATCA_PPS_IANA			"\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID		0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID	0x0051
+
+static void (*atca_oem_poweroff_hook)(ipmi_user_t user) = NULL;
+
+static void pps_poweroff_atca (ipmi_user_t user)
+{
+        struct ipmi_system_interface_addr smi_addr;
+        struct kernel_ipmi_msg            send_msg;
+        int                               rv;
+        /*
+         * Configure IPMI address for local access
+         */
+        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+        smi_addr.channel = IPMI_BMC_CHANNEL;
+        smi_addr.lun = 0;
+
+        printk(KERN_INFO PFX "PPS powerdown hook used");
+
+        send_msg.netfn = IPMI_NETFN_OEM;
+        send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+        send_msg.data = IPMI_ATCA_PPS_IANA;
+        send_msg.data_len = 3;
+        rv = ipmi_request_in_rc_mode(user,
+                                  (struct ipmi_addr *) &smi_addr,
+                                   &send_msg);
+        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+                printk(KERN_ERR PFX "Unable to send ATCA ,"
+                       " IPMI error 0x%x\n", rv);
+        }
+	return;
+}
+
 static int ipmi_atca_detect (ipmi_user_t user)
 {
 	struct ipmi_system_interface_addr smi_addr;
@@ -167,6 +237,13 @@
 	rv = ipmi_request_wait_for_response(user,
 					    (struct ipmi_addr *) &smi_addr,
 					    &send_msg);
+
+        printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
+        if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+            && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+		printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n");
+		atca_oem_poweroff_hook = pps_poweroff_atca;
+	}
 	return !rv;
 }
 
@@ -200,12 +277,19 @@
 	rv = ipmi_request_in_rc_mode(user,
 				     (struct ipmi_addr *) &smi_addr,
 				     &send_msg);
-	if (rv) {
+        /** At this point, the system may be shutting down, and most
+         ** serial drivers (if used) will have interrupts turned off
+         ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+         ** return code
+         **/
+        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
 		printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
 		       " IPMI error 0x%x\n", rv);
 		goto out;
 	}
 
+	if(atca_oem_poweroff_hook)
+		return atca_oem_poweroff_hook(user);
  out:
 	return;
 }
@@ -440,15 +524,6 @@
 		      / sizeof(struct poweroff_function))
 
 
-/* Our local state. */
-static int ready = 0;
-static ipmi_user_t ipmi_user;
-static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
-
-/* Holds the old poweroff function so we can restore it on removal. */
-static void (*old_poweroff_func)(void);
-
-
 /* Called on a powerdown request. */
 static void ipmi_poweroff_function (void)
 {
@@ -473,6 +548,9 @@
 	if (ready)
 		return;
 
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+		return;
+
 	rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
 			      &ipmi_user);
 	if (rv) {
@@ -481,6 +559,8 @@
 		return;
 	}
 
+	ipmi_ifnum = if_num;
+
         /*
          * Do a get device ide and store some results, since this is
 	 * used by several functions.
@@ -541,9 +621,15 @@
 
 static void ipmi_po_smi_gone(int if_num)
 {
-	/* This can never be called, because once poweroff driver is
-	   registered, the interface can't go away until the power
-	   driver is unregistered. */
+	if (!ready)
+		return;
+
+	if (ipmi_ifnum != if_num)
+		return;
+
+	ready = 0;
+	ipmi_destroy_user(ipmi_user);
+	pm_power_off = old_poweroff_func;
 }
 
 static struct ipmi_smi_watcher smi_watcher =
@@ -616,9 +702,9 @@
 		printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
 		goto out_err;
 	}
-#endif
 
  out_err:
+#endif
 	return rv;
 }
 
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb1fac1..81a0c89 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,6 +61,10 @@
 #include "ipmi_si_sm.h"
 #include <linux/init.h>
 #include <linux/dmi.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#define PFX "ipmi_si: "
 
 /* Measure times between events in the driver. */
 #undef DEBUG_TIMING
@@ -92,7 +96,7 @@
 enum si_type {
     SI_KCS, SI_SMIC, SI_BT
 };
-static char *si_to_str[] = { "KCS", "SMIC", "BT" };
+static char *si_to_str[] = { "kcs", "smic", "bt" };
 
 #define DEVICE_NAME "ipmi_si"
 
@@ -222,7 +226,10 @@
 static int force_kipmid[SI_MAX_PARMS];
 static int num_force_kipmid;
 
+static int unload_when_empty = 1;
+
 static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *to_clean);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block * nb)
@@ -240,14 +247,18 @@
 	spin_lock(&(smi_info->si_lock));
 }
 
-static void return_hosed_msg(struct smi_info *smi_info)
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 {
 	struct ipmi_smi_msg *msg = smi_info->curr_msg;
 
+	if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+		cCode = IPMI_ERR_UNSPECIFIED;
+	/* else use it as is */
+
 	/* Make it a reponse */
 	msg->rsp[0] = msg->data[0] | 4;
 	msg->rsp[1] = msg->data[1];
-	msg->rsp[2] = 0xFF; /* Unknown error. */
+	msg->rsp[2] = cCode;
 	msg->rsp_size = 3;
 
 	smi_info->curr_msg = NULL;
@@ -298,7 +309,7 @@
 			smi_info->curr_msg->data,
 			smi_info->curr_msg->data_size);
 		if (err) {
-			return_hosed_msg(smi_info);
+			return_hosed_msg(smi_info, err);
 		}
 
 		rv = SI_SM_CALL_WITHOUT_DELAY;
@@ -640,7 +651,7 @@
 			/* If we were handling a user message, format
                            a response to send to the upper layer to
                            tell it about the error. */
-			return_hosed_msg(smi_info);
+			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 		}
 		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 	}
@@ -684,22 +695,24 @@
 	{
 		/* We are idle and the upper layer requested that I fetch
 		   events, so do so. */
-		unsigned char msg[2];
-
-		spin_lock(&smi_info->count_lock);
-		smi_info->flag_fetches++;
-		spin_unlock(&smi_info->count_lock);
-
 		atomic_set(&smi_info->req_events, 0);
-		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
-		msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+		smi_info->curr_msg = ipmi_alloc_smi_msg();
+		if (!smi_info->curr_msg)
+			goto out;
+
+		smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+		smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+		smi_info->curr_msg->data_size = 2;
 
 		smi_info->handlers->start_transaction(
-			smi_info->si_sm, msg, 2);
-		smi_info->si_state = SI_GETTING_FLAGS;
+			smi_info->si_sm,
+			smi_info->curr_msg->data,
+			smi_info->curr_msg->data_size);
+		smi_info->si_state = SI_GETTING_EVENTS;
 		goto restart;
 	}
-
+ out:
 	return si_sm_result;
 }
 
@@ -714,6 +727,15 @@
 	struct timeval    t;
 #endif
 
+	if (atomic_read(&smi_info->stop_operation)) {
+		msg->rsp[0] = msg->data[0] | 4;
+		msg->rsp[1] = msg->data[1];
+		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+		msg->rsp_size = 3;
+		deliver_recv_msg(smi_info, msg);
+		return;
+	}
+
 	spin_lock_irqsave(&(smi_info->msg_lock), flags);
 #ifdef DEBUG_TIMING
 	do_gettimeofday(&t);
@@ -805,13 +827,21 @@
 {
 	struct smi_info *smi_info = send_info;
 
-	smi_event_handler(smi_info, 0);
+	/*
+	 * Make sure there is some delay in the poll loop so we can
+	 * drive time forward and timeout things.
+	 */
+	udelay(10);
+	smi_event_handler(smi_info, 10);
 }
 
 static void request_events(void *send_info)
 {
 	struct smi_info *smi_info = send_info;
 
+	if (atomic_read(&smi_info->stop_operation))
+		return;
+
 	atomic_set(&smi_info->req_events, 1);
 }
 
@@ -949,12 +979,21 @@
 	return 0;
 }
 
+static void set_maintenance_mode(void *send_info, int enable)
+{
+	struct smi_info   *smi_info = send_info;
+
+	if (!enable)
+		atomic_set(&smi_info->req_events, 0);
+}
+
 static struct ipmi_smi_handlers handlers =
 {
 	.owner                  = THIS_MODULE,
 	.start_processing       = smi_start_processing,
 	.sender			= sender,
 	.request_events		= request_events,
+	.set_maintenance_mode   = set_maintenance_mode,
 	.set_run_to_completion  = set_run_to_completion,
 	.poll			= poll,
 };
@@ -987,6 +1026,16 @@
 static int slave_addrs[SI_MAX_PARMS];
 static int num_slave_addrs = 0;
 
+#define IPMI_IO_ADDR_SPACE  0
+#define IPMI_MEM_ADDR_SPACE 1
+static char *addr_space_to_str[] = { "I/O", "mem" };
+
+static int hotmod_handler(const char *val, struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
+		 " Documentation/IPMI.txt in the kernel sources for the"
+		 " gory details.");
 
 module_param_named(trydefaults, si_trydefaults, bool, 0);
 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
@@ -1038,12 +1087,12 @@
 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
 		 " disabled(0).  Normally the IPMI driver auto-detects"
 		 " this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, int, 0);
+MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
+		 " specified or found, default is 1.  Setting to 0"
+		 " is useful for hot add of devices using hotmod.");
 
 
-#define IPMI_IO_ADDR_SPACE  0
-#define IPMI_MEM_ADDR_SPACE 1
-static char *addr_space_to_str[] = { "I/O", "memory" };
-
 static void std_irq_cleanup(struct smi_info *info)
 {
 	if (info->si_type == SI_BT)
@@ -1317,6 +1366,234 @@
 	return 0;
 }
 
+/*
+ * Parms come in as <op1>[:op2[:op3...]].  ops are:
+ *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ *   rsp=<regspacing>
+ *   rsi=<regsize>
+ *   rsh=<regshift>
+ *   irq=<irq>
+ *   ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+	char *name;
+	int  val;
+};
+static struct hotmod_vals hotmod_ops[] = {
+	{ "add",	HM_ADD },
+	{ "remove",	HM_REMOVE },
+	{ NULL }
+};
+static struct hotmod_vals hotmod_si[] = {
+	{ "kcs",	SI_KCS },
+	{ "smic",	SI_SMIC },
+	{ "bt",		SI_BT },
+	{ NULL }
+};
+static struct hotmod_vals hotmod_as[] = {
+	{ "mem",	IPMI_MEM_ADDR_SPACE },
+	{ "i/o",	IPMI_IO_ADDR_SPACE },
+	{ NULL }
+};
+static int ipmi_strcasecmp(const char *s1, const char *s2)
+{
+	while (*s1 || *s2) {
+		if (!*s1)
+			return -1;
+		if (!*s2)
+			return 1;
+		if (*s1 != *s2)
+			return *s1 - *s2;
+		s1++;
+		s2++;
+	}
+	return 0;
+}
+static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
+{
+	char *s;
+	int  i;
+
+	s = strchr(*curr, ',');
+	if (!s) {
+		printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+		return -EINVAL;
+	}
+	*s = '\0';
+	s++;
+	for (i = 0; hotmod_ops[i].name; i++) {
+		if (ipmi_strcasecmp(*curr, v[i].name) == 0) {
+			*val = v[i].val;
+			*curr = s;
+			return 0;
+		}
+	}
+
+	printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+	return -EINVAL;
+}
+
+static int hotmod_handler(const char *val, struct kernel_param *kp)
+{
+	char *str = kstrdup(val, GFP_KERNEL);
+	int  rv = -EINVAL;
+	char *next, *curr, *s, *n, *o;
+	enum hotmod_op op;
+	enum si_type si_type;
+	int  addr_space;
+	unsigned long addr;
+	int regspacing;
+	int regsize;
+	int regshift;
+	int irq;
+	int ipmb;
+	int ival;
+	struct smi_info *info;
+
+	if (!str)
+		return -ENOMEM;
+
+	/* Kill any trailing spaces, as we can get a "\n" from echo. */
+	ival = strlen(str) - 1;
+	while ((ival >= 0) && isspace(str[ival])) {
+		str[ival] = '\0';
+		ival--;
+	}
+
+	for (curr = str; curr; curr = next) {
+		regspacing = 1;
+		regsize = 1;
+		regshift = 0;
+		irq = 0;
+		ipmb = 0x20;
+
+		next = strchr(curr, ':');
+		if (next) {
+			*next = '\0';
+			next++;
+		}
+
+		rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+		if (rv)
+			break;
+		op = ival;
+
+		rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+		if (rv)
+			break;
+		si_type = ival;
+
+		rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+		if (rv)
+			break;
+
+		s = strchr(curr, ',');
+		if (s) {
+			*s = '\0';
+			s++;
+		}
+		addr = simple_strtoul(curr, &n, 0);
+		if ((*n != '\0') || (*curr == '\0')) {
+			printk(KERN_WARNING PFX "Invalid hotmod address"
+			       " '%s'\n", curr);
+			break;
+		}
+
+		while (s) {
+			curr = s;
+			s = strchr(curr, ',');
+			if (s) {
+				*s = '\0';
+				s++;
+			}
+			o = strchr(curr, '=');
+			if (o) {
+				*o = '\0';
+				o++;
+			}
+#define HOTMOD_INT_OPT(name, val) \
+			if (ipmi_strcasecmp(curr, name) == 0) {		\
+				if (!o) {				\
+					printk(KERN_WARNING PFX		\
+					       "No option given for '%s'\n", \
+						curr);			\
+					goto out;			\
+				}					\
+				val = simple_strtoul(o, &n, 0);		\
+				if ((*n != '\0') || (*o == '\0')) {	\
+					printk(KERN_WARNING PFX		\
+					       "Bad option given for '%s'\n", \
+					       curr);			\
+					goto out;			\
+				}					\
+			}
+
+			HOTMOD_INT_OPT("rsp", regspacing)
+			else HOTMOD_INT_OPT("rsi", regsize)
+			else HOTMOD_INT_OPT("rsh", regshift)
+			else HOTMOD_INT_OPT("irq", irq)
+			else HOTMOD_INT_OPT("ipmb", ipmb)
+			else {
+				printk(KERN_WARNING PFX
+				       "Invalid hotmod option '%s'\n",
+				       curr);
+				goto out;
+			}
+#undef HOTMOD_INT_OPT
+		}
+
+		if (op == HM_ADD) {
+			info = kzalloc(sizeof(*info), GFP_KERNEL);
+			if (!info) {
+				rv = -ENOMEM;
+				goto out;
+			}
+
+			info->addr_source = "hotmod";
+			info->si_type = si_type;
+			info->io.addr_data = addr;
+			info->io.addr_type = addr_space;
+			if (addr_space == IPMI_MEM_ADDR_SPACE)
+				info->io_setup = mem_setup;
+			else
+				info->io_setup = port_setup;
+
+			info->io.addr = NULL;
+			info->io.regspacing = regspacing;
+			if (!info->io.regspacing)
+				info->io.regspacing = DEFAULT_REGSPACING;
+			info->io.regsize = regsize;
+			if (!info->io.regsize)
+				info->io.regsize = DEFAULT_REGSPACING;
+			info->io.regshift = regshift;
+			info->irq = irq;
+			if (info->irq)
+				info->irq_setup = std_irq_setup;
+			info->slave_addr = ipmb;
+
+			try_smi_init(info);
+		} else {
+			/* remove */
+			struct smi_info *e, *tmp_e;
+
+			mutex_lock(&smi_infos_lock);
+			list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+				if (e->io.addr_type != addr_space)
+					continue;
+				if (e->si_type != si_type)
+					continue;
+				if (e->io.addr_data == addr)
+					cleanup_one_si(e);
+			}
+			mutex_unlock(&smi_infos_lock);
+		}
+	}
+ out:
+	kfree(str);
+	return rv;
+}
 
 static __devinit void hardcode_find_bmc(void)
 {
@@ -1333,11 +1610,11 @@
 
 		info->addr_source = "hardcoded";
 
-		if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+		if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) {
 			info->si_type = SI_KCS;
-		} else if (strcmp(si_type[i], "smic") == 0) {
+		} else if (ipmi_strcasecmp(si_type[i], "smic") == 0) {
 			info->si_type = SI_SMIC;
-		} else if (strcmp(si_type[i], "bt") == 0) {
+		} else if (ipmi_strcasecmp(si_type[i], "bt") == 0) {
 			info->si_type = SI_BT;
 		} else {
 			printk(KERN_WARNING
@@ -1952,19 +2229,9 @@
 static int type_file_read_proc(char *page, char **start, off_t off,
 			       int count, int *eof, void *data)
 {
-	char            *out = (char *) page;
 	struct smi_info *smi = data;
 
-	switch (smi->si_type) {
-	    case SI_KCS:
-		return sprintf(out, "kcs\n");
-	    case SI_SMIC:
-		return sprintf(out, "smic\n");
-	    case SI_BT:
-		return sprintf(out, "bt\n");
-	    default:
-		return 0;
-	}
+	return sprintf(page, "%s\n", si_to_str[smi->si_type]);
 }
 
 static int stat_file_read_proc(char *page, char **start, off_t off,
@@ -2000,7 +2267,24 @@
 	out += sprintf(out, "incoming_messages:     %ld\n",
 		       smi->incoming_messages);
 
-	return (out - ((char *) page));
+	return out - page;
+}
+
+static int param_read_proc(char *page, char **start, off_t off,
+			   int count, int *eof, void *data)
+{
+	struct smi_info *smi = data;
+
+	return sprintf(page,
+		       "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+		       si_to_str[smi->si_type],
+		       addr_space_to_str[smi->io.addr_type],
+		       smi->io.addr_data,
+		       smi->io.regspacing,
+		       smi->io.regsize,
+		       smi->io.regshift,
+		       smi->irq,
+		       smi->slave_addr);
 }
 
 /*
@@ -2362,6 +2646,7 @@
 			       new_smi,
 			       &new_smi->device_id,
 			       new_smi->dev,
+			       "bmc",
 			       new_smi->slave_addr);
 	if (rv) {
 		printk(KERN_ERR
@@ -2390,6 +2675,16 @@
 		goto out_err_stop_timer;
 	}
 
+	rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
+				     param_read_proc, NULL,
+				     new_smi, THIS_MODULE);
+	if (rv) {
+		printk(KERN_ERR
+		       "ipmi_si: Unable to create proc entry: %d\n",
+		       rv);
+		goto out_err_stop_timer;
+	}
+
 	list_add_tail(&new_smi->link, &smi_infos);
 
 	mutex_unlock(&smi_infos_lock);
@@ -2483,7 +2778,12 @@
 #endif
 
 #ifdef CONFIG_PCI
-	pci_module_init(&ipmi_pci_driver);
+	rv = pci_register_driver(&ipmi_pci_driver);
+	if (rv){
+		printk(KERN_ERR
+		       "init_ipmi_si: Unable to register PCI driver: %d\n",
+		       rv);
+	}
 #endif
 
 	if (si_trydefaults) {
@@ -2498,7 +2798,7 @@
 	}
 
 	mutex_lock(&smi_infos_lock);
-	if (list_empty(&smi_infos)) {
+	if (unload_when_empty && list_empty(&smi_infos)) {
 		mutex_unlock(&smi_infos_lock);
 #ifdef CONFIG_PCI
 		pci_unregister_driver(&ipmi_pci_driver);
@@ -2513,7 +2813,7 @@
 }
 module_init(init_ipmi_si);
 
-static void __devexit cleanup_one_si(struct smi_info *to_clean)
+static void cleanup_one_si(struct smi_info *to_clean)
 {
 	int           rv;
 	unsigned long flags;
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index 39d7e5e..e64ea7d 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -141,12 +141,14 @@
 {
 	unsigned int i;
 
-	if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) {
-		return -1;
-	}
-	if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
-		return -2;
-	}
+	if (size < 2)
+		return IPMI_REQ_LEN_INVALID_ERR;
+	if (size > MAX_SMIC_WRITE_SIZE)
+		return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+	if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+		return IPMI_NOT_IN_MY_STATE_ERR;
+
 	if (smic_debug & SMIC_DEBUG_MSG) {
 		printk(KERN_INFO "start_smic_transaction -");
 		for (i = 0; i < size; i ++) {
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 73f759e..90fb2a5 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -135,6 +135,7 @@
 static int nowayout = WATCHDOG_NOWAYOUT;
 
 static ipmi_user_t watchdog_user = NULL;
+static int watchdog_ifnum;
 
 /* Default the timeout to 10 seconds. */
 static int timeout = 10;
@@ -161,6 +162,8 @@
 static char pretimeout_since_last_heartbeat = 0;
 static char expect_close;
 
+static int ifnum_to_use = -1;
+
 static DECLARE_RWSEM(register_sem);
 
 /* Parameters to ipmi_set_timeout */
@@ -169,6 +172,8 @@
 #define IPMI_SET_TIMEOUT_FORCE_HB		2
 
 static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
 
 /* If true, the driver will start running as soon as it is configured
    and ready. */
@@ -245,6 +250,26 @@
 	return strlen(buffer);
 }
 
+
+static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+{
+	int rv = param_set_int(val, kp);
+	if (rv)
+		return rv;
+	if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+		return 0;
+
+	ipmi_unregister_watchdog(watchdog_ifnum);
+	ipmi_register_watchdog(ifnum_to_use);
+	return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
+		  &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+		 "timer.  Setting to -1 defaults to the first registered "
+		 "interface");
+
 module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
 MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
 
@@ -263,12 +288,13 @@
 MODULE_PARM_DESC(preop, "Pretimeout driver operation.  One of: "
 		 "preop_none, preop_panic, preop_give_data.");
 
-module_param(start_now, int, 0);
+module_param(start_now, int, 0444);
 MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
 		 "soon as the driver is loaded.");
 
 module_param(nowayout, int, 0644);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+		 "(default=CONFIG_WATCHDOG_NOWAYOUT)");
 
 /* Default state of the timer. */
 static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
@@ -872,6 +898,11 @@
 	if (watchdog_user)
 		goto out;
 
+	if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+		goto out;
+
+	watchdog_ifnum = ipmi_intf;
+
 	rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
 	if (rv < 0) {
 		printk(KERN_CRIT PFX "Unable to register with ipmi\n");
@@ -901,6 +932,39 @@
 	}
 }
 
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+	int rv;
+
+	down_write(&register_sem);
+
+	if (!watchdog_user)
+		goto out;
+
+	if (watchdog_ifnum != ipmi_intf)
+		goto out;
+
+	/* Make sure no one can call us any more. */
+	misc_deregister(&ipmi_wdog_miscdev);
+
+	/* Wait to make sure the message makes it out.  The lower layer has
+	   pointers to our buffers, we want to make sure they are done before
+	   we release our memory. */
+	while (atomic_read(&set_timeout_tofree))
+		schedule_timeout_uninterruptible(1);
+
+	/* Disconnect from IPMI. */
+	rv = ipmi_destroy_user(watchdog_user);
+	if (rv) {
+		printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+		       rv);
+	}
+	watchdog_user = NULL;
+
+ out:
+	up_write(&register_sem);
+}
+
 #ifdef HAVE_NMI_HANDLER
 static int
 ipmi_nmi(void *dev_id, int cpu, int handled)
@@ -1004,9 +1068,7 @@
 
 static void ipmi_smi_gone(int if_num)
 {
-	/* This can never be called, because once the watchdog is
-	   registered, the interface can't go away until the watchdog
-	   is unregistered. */
+	ipmi_unregister_watchdog(if_num);
 }
 
 static struct ipmi_smi_watcher smi_watcher =
@@ -1148,30 +1210,32 @@
 
 	check_parms();
 
+	register_reboot_notifier(&wdog_reboot_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&wdog_panic_notifier);
+
 	rv = ipmi_smi_watcher_register(&smi_watcher);
 	if (rv) {
 #ifdef HAVE_NMI_HANDLER
 		if (preaction_val == WDOG_PRETIMEOUT_NMI)
 			release_nmi(&ipmi_nmi_handler);
 #endif
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+						 &wdog_panic_notifier);
+		unregister_reboot_notifier(&wdog_reboot_notifier);
 		printk(KERN_WARNING PFX "can't register smi watcher\n");
 		return rv;
 	}
 
-	register_reboot_notifier(&wdog_reboot_notifier);
-	atomic_notifier_chain_register(&panic_notifier_list,
-			&wdog_panic_notifier);
-
 	printk(KERN_INFO PFX "driver initialized\n");
 
 	return 0;
 }
 
-static __exit void ipmi_unregister_watchdog(void)
+static void __exit ipmi_wdog_exit(void)
 {
-	int rv;
-
-	down_write(&register_sem);
+	ipmi_smi_watcher_unregister(&smi_watcher);
+	ipmi_unregister_watchdog(watchdog_ifnum);
 
 #ifdef HAVE_NMI_HANDLER
 	if (nmi_handler_registered)
@@ -1179,37 +1243,8 @@
 #endif
 
 	atomic_notifier_chain_unregister(&panic_notifier_list,
-			&wdog_panic_notifier);
+					 &wdog_panic_notifier);
 	unregister_reboot_notifier(&wdog_reboot_notifier);
-
-	if (! watchdog_user)
-		goto out;
-
-	/* Make sure no one can call us any more. */
-	misc_deregister(&ipmi_wdog_miscdev);
-
-	/* Wait to make sure the message makes it out.  The lower layer has
-	   pointers to our buffers, we want to make sure they are done before
-	   we release our memory. */
-	while (atomic_read(&set_timeout_tofree))
-		schedule_timeout_uninterruptible(1);
-
-	/* Disconnect from IPMI. */
-	rv = ipmi_destroy_user(watchdog_user);
-	if (rv) {
-		printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
-		       rv);
-	}
-	watchdog_user = NULL;
-
- out:
-	up_write(&register_sem);
-}
-
-static void __exit ipmi_wdog_exit(void)
-{
-	ipmi_smi_watcher_unregister(&smi_watcher);
-	ipmi_unregister_watchdog();
 }
 module_exit(ipmi_wdog_exit);
 module_init(ipmi_wdog_init);
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 58c955e..1637c1d 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -530,9 +530,9 @@
 /* 	Interrupt handlers 	*/
 
 
-static void isicom_bottomhalf(void *data)
+static void isicom_bottomhalf(struct work_struct *work)
 {
-	struct isi_port *port = (struct isi_port *) data;
+	struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
 	struct tty_struct *tty = port->tty;
 
 	if (!tty)
@@ -1474,9 +1474,9 @@
 }
 
 /* hangup et all */
-static void do_isicom_hangup(void *data)
+static void do_isicom_hangup(struct work_struct *work)
 {
-	struct isi_port *port = data;
+	struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
 	struct tty_struct *tty;
 
 	tty = port->tty;
@@ -1966,8 +1966,8 @@
 			port->channel = channel;
 			port->close_delay = 50 * HZ/100;
 			port->closing_wait = 3000 * HZ/100;
-			INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
-			INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
+			INIT_WORK(&port->hangup_tq, do_isicom_hangup);
+			INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
 			port->status = 0;
 			init_waitqueue_head(&port->open_wait);
 			init_waitqueue_head(&port->close_wait);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index ffdf9df..8f59194 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -663,7 +663,7 @@
 static int	stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait);
 static int	stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait);
 static int	stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp);
-static void	stli_dohangup(void *arg);
+static void	stli_dohangup(struct work_struct *);
 static int	stli_setport(stliport_t *portp);
 static int	stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
 static void	stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
@@ -1990,9 +1990,9 @@
  *	aren't that time critical).
  */
 
-static void stli_dohangup(void *arg)
+static void stli_dohangup(struct work_struct *ugly_api)
 {
-	stliport_t *portp = (stliport_t *) arg;
+	stliport_t *portp = container_of(ugly_api, stliport_t, tqhangup);
 	if (portp->tty != NULL) {
 		tty_hangup(portp->tty);
 	}
@@ -2898,7 +2898,7 @@
 		portp->baud_base = STL_BAUDBASE;
 		portp->close_delay = STL_CLOSEDELAY;
 		portp->closing_wait = 30 * HZ;
-		INIT_WORK(&portp->tqhangup, stli_dohangup, portp);
+		INIT_WORK(&portp->tqhangup, stli_dohangup);
 		init_waitqueue_head(&portp->open_wait);
 		init_waitqueue_head(&portp->close_wait);
 		init_waitqueue_head(&portp->raw_wait);
@@ -3476,6 +3476,8 @@
 	if (sig.magic != cpu_to_le32(ECP_MAGIC))
 	{
 		release_region(brdp->iobase, brdp->iosize);
+		iounmap(brdp->membase);
+		brdp->membase = NULL;
 		return -ENODEV;
 	}
 
@@ -3632,6 +3634,8 @@
 	    sig.magic3 != cpu_to_le16(ONB_MAGIC3))
 	{
 		release_region(brdp->iobase, brdp->iosize);
+		iounmap(brdp->membase);
+		brdp->membase = NULL;
 		return -ENODEV;
 	}
 
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 5547337..e67eef4 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -980,10 +980,10 @@
 
 	mem_class = class_create(THIS_MODULE, "mem");
 	for (i = 0; i < ARRAY_SIZE(devlist); i++)
-		class_device_create(mem_class, NULL,
-					MKDEV(MEM_MAJOR, devlist[i].minor),
-					NULL, devlist[i].name);
-	
+		device_create(mem_class, NULL,
+			      MKDEV(MEM_MAJOR, devlist[i].minor),
+			      devlist[i].name);
+
 	return 0;
 }
 
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 62ebe09..7e975f6 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -169,11 +169,6 @@
 	return err;
 }
 
-/* 
- * TODO for 2.7:
- *  - add a struct kref to struct miscdevice and make all usages of
- *    them dynamic.
- */
 static struct class *misc_class;
 
 static const struct file_operations misc_fops = {
@@ -204,6 +199,8 @@
 	dev_t dev;
 	int err = 0;
 
+	INIT_LIST_HEAD(&misc->list);
+
 	down(&misc_sem);
 	list_for_each_entry(c, &misc_list, list) {
 		if (c->minor == misc->minor) {
@@ -228,10 +225,10 @@
 		misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
 	dev = MKDEV(MISC_MAJOR, misc->minor);
 
-	misc->class = class_device_create(misc_class, NULL, dev, misc->dev,
+	misc->this_device = device_create(misc_class, misc->parent, dev,
 					  "%s", misc->name);
-	if (IS_ERR(misc->class)) {
-		err = PTR_ERR(misc->class);
+	if (IS_ERR(misc->this_device)) {
+		err = PTR_ERR(misc->this_device);
 		goto out;
 	}
 
@@ -264,7 +261,7 @@
 
 	down(&misc_sem);
 	list_del(&misc->list);
-	class_device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
+	device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
 	if (i < DYNAMIC_MINORS && i>0) {
 		misc_minors[i>>3] &= ~(1 << (misc->minor & 7));
 	}
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 22b9905..c091603 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -680,7 +680,7 @@
 	if (sn_rtc_cycles_per_second < 100000) {
 		printk(KERN_ERR "%s: unable to determine clock frequency\n",
 		       MMTIMER_NAME);
-		return -1;
+		goto out1;
 	}
 
 	mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
@@ -689,13 +689,13 @@
 	if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
 		printk(KERN_WARNING "%s: unable to allocate interrupt.",
 			MMTIMER_NAME);
-		return -1;
+		goto out1;
 	}
 
 	if (misc_register(&mmtimer_miscdev)) {
 		printk(KERN_ERR "%s: failed to register device\n",
 		       MMTIMER_NAME);
-		return -1;
+		goto out2;
 	}
 
 	/* Get max numbered node, calculate slots needed */
@@ -709,16 +709,18 @@
 	if (timers == NULL) {
 		printk(KERN_ERR "%s: failed to allocate memory for device\n",
 				MMTIMER_NAME);
-		return -1;
+		goto out3;
 	}
 
+	memset(timers,0,(sizeof(mmtimer_t *)*maxn));
+
 	/* Allocate mmtimer_t's for each online node */
 	for_each_online_node(node) {
 		timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
 		if (timers[node] == NULL) {
 			printk(KERN_ERR "%s: failed to allocate memory for device\n",
 				MMTIMER_NAME);
-			return -1;
+			goto out4;
 		}
 		for (i=0; i< NUM_COMPARATORS; i++) {
 			mmtimer_t * base = timers[node] + i;
@@ -739,6 +741,17 @@
 	       sn_rtc_cycles_per_second/(unsigned long)1E6);
 
 	return 0;
+
+out4:
+	for_each_online_node(node) {
+		kfree(timers[node]);
+	}
+out3:
+	misc_deregister(&mmtimer_miscdev);
+out2:
+	free_irq(SGI_MMTIMER_VECTOR, NULL);
+out1:
+	return -1;
 }
 
 module_init(mmtimer_init);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 96cb1f0..8b31695 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -222,7 +222,7 @@
 /*
  * static functions:
  */
-static void do_moxa_softint(void *);
+static void do_moxa_softint(struct work_struct *);
 static int moxa_open(struct tty_struct *, struct file *);
 static void moxa_close(struct tty_struct *, struct file *);
 static int moxa_write(struct tty_struct *, const unsigned char *, int);
@@ -363,7 +363,7 @@
 	for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
 		ch->type = PORT_16550A;
 		ch->port = i;
-		INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
+		INIT_WORK(&ch->tqueue, do_moxa_softint);
 		ch->tty = NULL;
 		ch->close_delay = 5 * HZ / 10;
 		ch->closing_wait = 30 * HZ;
@@ -498,9 +498,12 @@
 		printk("Couldn't unregister MOXA Intellio family serial driver\n");
 	put_tty_driver(moxaDriver);
 
-	for (i = 0; i < MAX_BOARDS; i++)
+	for (i = 0; i < MAX_BOARDS; i++) {
+		if (moxaBaseAddr[i])
+			iounmap(moxaBaseAddr[i]);
 		if (moxa_boards[i].busType == MOXA_BUS_TYPE_PCI)
 			pci_dev_put(moxa_boards[i].pciInfo.pdev);
+	}
 
 	if (verbose)
 		printk("Done\n");
@@ -509,9 +512,9 @@
 module_init(moxa_init);
 module_exit(moxa_exit);
 
-static void do_moxa_softint(void *private_)
+static void do_moxa_softint(struct work_struct *work)
 {
-	struct moxa_str *ch = (struct moxa_str *) private_;
+	struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
 	struct tty_struct *tty;
 
 	if (ch && (tty = ch->tty)) {
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 048d911..5ed2486 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -389,7 +389,7 @@
 /* static void   mxser_poll(unsigned long); */
 static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
 static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
-static void mxser_do_softint(void *);
+static void mxser_do_softint(struct work_struct *);
 static int mxser_open(struct tty_struct *, struct file *);
 static void mxser_close(struct tty_struct *, struct file *);
 static int mxser_write(struct tty_struct *, const unsigned char *, int);
@@ -590,7 +590,7 @@
 		info->custom_divisor = hwconf->baud_base[i] * 16;
 		info->close_delay = 5 * HZ / 10;
 		info->closing_wait = 30 * HZ;
-		INIT_WORK(&info->tqueue, mxser_do_softint, info);
+		INIT_WORK(&info->tqueue, mxser_do_softint);
 		info->normal_termios = mxvar_sdriver->init_termios;
 		init_waitqueue_head(&info->open_wait);
 		init_waitqueue_head(&info->close_wait);
@@ -917,9 +917,10 @@
 	return 0;
 }
 
-static void mxser_do_softint(void *private_)
+static void mxser_do_softint(struct work_struct *work)
 {
-	struct mxser_struct *info = private_;
+	struct mxser_struct *info =
+		container_of(work, struct mxser_struct, tqueue);
 	struct tty_struct *tty;
 
 	tty = info->tty;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 50d20aa..211c93f 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1764,29 +1764,11 @@
 	int rc;
 
 	/* read the config-tuples */
-	tuple.DesiredTuple = CISTPL_CONFIG;
 	tuple.Attributes = 0;
 	tuple.TupleData = buf;
 	tuple.TupleDataMax = sizeof(buf);
 	tuple.TupleOffset = 0;
 
-	if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
-		fail_fn = GetFirstTuple;
-		goto cs_failed;
-	}
-	if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
-		fail_fn = GetTupleData;
-		goto cs_failed;
-	}
-	if ((fail_rc =
-	     pcmcia_parse_tuple(link, &tuple, &parse)) != CS_SUCCESS) {
-		fail_fn = ParseTuple;
-		goto cs_failed;
-	}
-
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
-
 	link->io.BasePort2 = 0;
 	link->io.NumPorts2 = 0;
 	link->io.Attributes2 = 0;
@@ -1841,8 +1823,6 @@
 
 	return 0;
 
-cs_failed:
-	cs_error(link, fail_fn, fail_rc);
 cs_release:
 	cm4000_release(link);
 	return -ENODEV;
@@ -1973,14 +1953,14 @@
 	printk(KERN_INFO "%s\n", version);
 
 	cmm_class = class_create(THIS_MODULE, "cardman_4000");
-	if (!cmm_class)
-		return -1;
+	if (IS_ERR(cmm_class))
+		return PTR_ERR(cmm_class);
 
 	major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
 	if (major < 0) {
 		printk(KERN_WARNING MODULE_NAME
 			": could not get major number\n");
-		return -1;
+		return major;
 	}
 
 	rc = pcmcia_register_driver(&cm4000_driver);
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 55cf4be..9b1ff7e 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -523,29 +523,11 @@
 	int fail_fn, fail_rc;
 	int rc;
 
-	tuple.DesiredTuple = CISTPL_CONFIG;
 	tuple.Attributes = 0;
 	tuple.TupleData = buf;
 	tuple.TupleDataMax = sizeof(buf);
  	tuple.TupleOffset = 0;
 
-	if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
-		fail_fn = GetFirstTuple;
-		goto cs_failed;
-	}
-	if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
-		fail_fn = GetTupleData;
-		goto cs_failed;
-	}
-	if ((fail_rc = pcmcia_parse_tuple(link, &tuple, &parse))
-							!= CS_SUCCESS) {
-		fail_fn = ParseTuple;
-		goto cs_failed;
-	}
-
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
-
 	link->io.BasePort2 = 0;
 	link->io.NumPorts2 = 0;
 	link->io.Attributes2 = 0;
@@ -609,8 +591,6 @@
 
 	return 0;
 
-cs_failed:
-	cs_error(link, fail_fn, fail_rc);
 cs_release:
 	reader_release(link);
 	return -ENODEV;
@@ -721,14 +701,14 @@
 
 	printk(KERN_INFO "%s\n", version);
 	cmx_class = class_create(THIS_MODULE, "cardman_4040");
-	if (!cmx_class)
-		return -1;
+	if (IS_ERR(cmx_class))
+		return PTR_ERR(cmx_class);
 
 	major = register_chrdev(0, DEVICE_NAME, &reader_fops);
 	if (major < 0) {
 		printk(KERN_WARNING MODULE_NAME
 			": could not get major number\n");
-		return -1;
+		return major;
 	}
 
 	rc = pcmcia_register_driver(&reader_driver);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 1a0bc30..74d21c1 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -75,8 +75,10 @@
 #include <pcmcia/cisreg.h>
 #include <pcmcia/ds.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -235,7 +237,7 @@
 	int dosyncppp;
 	spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 
@@ -392,7 +394,7 @@
 
 static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(MGSLPC_INFO *info);
 static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size);
@@ -421,7 +423,7 @@
 /*
  * Bottom half interrupt handlers
  */
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(MGSLPC_INFO *info);
 static void bh_status(MGSLPC_INFO *info);
 
@@ -547,7 +549,7 @@
 
     memset(info, 0, sizeof(MGSLPC_INFO));
     info->magic = MGSLPC_MAGIC;
-    INIT_WORK(&info->task, bh_handler, info);
+    INIT_WORK(&info->task, bh_handler);
     info->max_frame_size = 4096;
     info->close_delay = 5*HZ/10;
     info->closing_wait = 30*HZ;
@@ -604,17 +606,10 @@
     if (debug_level >= DEBUG_LEVEL_INFO)
 	    printk("mgslpc_config(0x%p)\n", link);
 
-    /* read CONFIG tuple to find its configuration registers */
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.Attributes = 0;
     tuple.TupleData = buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
 
     /* get CIS configuration entry */
 
@@ -842,9 +837,9 @@
 	return rc;
 }
 
-static void bh_handler(void* Context)
+static void bh_handler(struct work_struct *work)
 {
-	MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
+	MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
 	int action;
 
 	if (!info)
@@ -1060,7 +1055,7 @@
 		info->drop_rts_on_tx_done = 0;
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else 
@@ -1171,7 +1166,7 @@
 	}
 	else
 		info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount) {
 		if (info->serial_signals & SerialSignal_DCD)
 			netif_carrier_on(info->netdev);
@@ -2960,7 +2955,7 @@
 	printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
 		info->device_name, info->io_base, info->irq_level);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 }
@@ -2976,7 +2971,7 @@
 				last->next_device = info->next_device;
 			else
 				mgslpc_device_list = info->next_device;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			hdlcdev_exit(info);
 #endif
 			release_resources(info);
@@ -3908,7 +3903,7 @@
 				return_frame = 1;
 		}
 		framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		{
 			struct net_device_stats *stats = hdlc_stats(info->netdev);
 			stats->rx_errors++;
@@ -3942,7 +3937,7 @@
 				++framesize;
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info, buf->data, framesize);
 			else
@@ -4098,7 +4093,7 @@
 
 	spin_unlock_irqrestore(&info->lock,flags);
 	
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
@@ -4106,7 +4101,7 @@
 		bh_transmit(info);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index efc485e..c1e3dd8 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -752,13 +752,13 @@
 
 static void pp_attach(struct parport *port)
 {
-	class_device_create(ppdev_class, NULL, MKDEV(PP_MAJOR, port->number),
-			NULL, "parport%d", port->number);
+	device_create(ppdev_class, NULL, MKDEV(PP_MAJOR, port->number),
+			"parport%d", port->number);
 }
 
 static void pp_detach(struct parport *port)
 {
-	class_device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+	device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
 }
 
 static struct parport_driver pp_driver = {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index eb6b13f..4c6782a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1422,9 +1422,9 @@
 
 static unsigned int ip_cnt;
 
-static void rekey_seq_generator(void *private_);
+static void rekey_seq_generator(struct work_struct *work);
 
-static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
 
 /*
  * Lock avoidance:
@@ -1438,7 +1438,7 @@
  * happen, and even if that happens only a not perfectly compliant
  * ISN is generated, nothing fatal.
  */
-static void rekey_seq_generator(void *private_)
+static void rekey_seq_generator(struct work_struct *work)
 {
 	struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
 
@@ -1466,8 +1466,8 @@
 late_initcall(seqgen_init);
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-__u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
-				   __u16 sport, __u16 dport)
+__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+				   __be16 sport, __be16 dport)
 {
 	struct timeval tv;
 	__u32 seq;
@@ -1479,10 +1479,10 @@
 	 */
 
 	memcpy(hash, saddr, 16);
-	hash[4]=(sport << 16) + dport;
+	hash[4]=((__force u16)sport << 16) + (__force u16)dport;
 	memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
 
-	seq = twothirdsMD4Transform(daddr, hash) & HASH_MASK;
+	seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
 	seq += keyptr->count;
 
 	do_gettimeofday(&tv);
@@ -1496,7 +1496,7 @@
 /*  The code below is shamelessly stolen from secure_tcp_sequence_number().
  *  All blames to Andrey V. Savochkin <saw@msu.ru>.
  */
-__u32 secure_ip_id(__u32 daddr)
+__u32 secure_ip_id(__be32 daddr)
 {
 	struct keydata *keyptr;
 	__u32 hash[4];
@@ -1508,7 +1508,7 @@
 	 *  The dest ip address is placed in the starting vector,
 	 *  which is then hashed with random data.
 	 */
-	hash[0] = daddr;
+	hash[0] = (__force __u32)daddr;
 	hash[1] = keyptr->secret[9];
 	hash[2] = keyptr->secret[10];
 	hash[3] = keyptr->secret[11];
@@ -1518,8 +1518,8 @@
 
 #ifdef CONFIG_INET
 
-__u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
-				 __u16 sport, __u16 dport)
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+				 __be16 sport, __be16 dport)
 {
 	struct timeval tv;
 	__u32 seq;
@@ -1532,9 +1532,9 @@
 	 *  Note that the words are placed into the starting vector, which is
 	 *  then mixed with a partial MD4 over random data.
 	 */
-	hash[0]=saddr;
-	hash[1]=daddr;
-	hash[2]=(sport << 16) + dport;
+	hash[0]=(__force u32)saddr;
+	hash[1]=(__force u32)daddr;
+	hash[2]=((__force u16)sport << 16) + (__force u16)dport;
 	hash[3]=keyptr->secret[11];
 
 	seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
@@ -1559,7 +1559,7 @@
 EXPORT_SYMBOL(secure_tcp_sequence_number);
 
 /* Generate secure starting point for ephemeral IPV4 transport port search */
-u32 secure_ipv4_port_ephemeral(__u32 saddr, __u32 daddr, __u16 dport)
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 {
 	struct keydata *keyptr = get_keyptr();
 	u32 hash[4];
@@ -1568,25 +1568,25 @@
 	 *  Pick a unique starting offset for each ephemeral port search
 	 *  (saddr, daddr, dport) and 48bits of random data.
 	 */
-	hash[0] = saddr;
-	hash[1] = daddr;
-	hash[2] = dport ^ keyptr->secret[10];
+	hash[0] = (__force u32)saddr;
+	hash[1] = (__force u32)daddr;
+	hash[2] = (__force u32)dport ^ keyptr->secret[10];
 	hash[3] = keyptr->secret[11];
 
 	return half_md4_transform(hash, keyptr->secret);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dport)
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
 {
 	struct keydata *keyptr = get_keyptr();
 	u32 hash[12];
 
 	memcpy(hash, saddr, 16);
-	hash[4] = dport;
+	hash[4] = (__force u32)dport;
 	memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
 
-	return twothirdsMD4Transform(daddr, hash);
+	return twothirdsMD4Transform((const __u32 *)daddr, hash);
 }
 #endif
 
@@ -1595,17 +1595,17 @@
  * bit's 32-47 increase every key exchange
  *       0-31  hash(source, dest)
  */
-u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
-				__u16 sport, __u16 dport)
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+				__be16 sport, __be16 dport)
 {
 	struct timeval tv;
 	u64 seq;
 	__u32 hash[4];
 	struct keydata *keyptr = get_keyptr();
 
-	hash[0] = saddr;
-	hash[1] = daddr;
-	hash[2] = (sport << 16) + dport;
+	hash[0] = (__force u32)saddr;
+	hash[1] = (__force u32)daddr;
+	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
 	hash[3] = keyptr->secret[11];
 
 	seq = half_md4_transform(hash, keyptr->secret);
@@ -1641,7 +1641,7 @@
 	 * drain on it), and uses halfMD4Transform within the second. We
 	 * also mix it with jiffies and the PID:
 	 */
-	return secure_ip_id(current->pid + jiffies);
+	return secure_ip_id((__force __be32)(current->pid + jiffies));
 }
 
 /*
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 89b718e..3b32313 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -127,9 +127,9 @@
 
 static void bind_device(struct raw_config_request *rq)
 {
-	class_device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
-	class_device_create(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor),
-				      NULL, "raw%d", rq->raw_minor);
+	device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
+	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor),
+		      "raw%d", rq->raw_minor);
 }
 
 /*
@@ -200,7 +200,7 @@
 			if (rq.block_major == 0 && rq.block_minor == 0) {
 				/* unbind */
 				rawdev->binding = NULL;
-				class_device_destroy(raw_class,
+				device_destroy(raw_class,
 						MKDEV(RAW_MAJOR, rq.raw_minor));
 			} else {
 				rawdev->binding = bdget(dev);
@@ -283,7 +283,7 @@
 		ret = PTR_ERR(raw_class);
 		goto error_region;
 	}
-	class_device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
+	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), "rawctl");
 
 	return 0;
 
@@ -295,7 +295,7 @@
 
 static void __exit raw_exit(void)
 {
-	class_device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
+	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
 	class_destroy(raw_class);
 	cdev_del(&raw_cdev);
 	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS);
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 7ac68cb..e79b2ed 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -1026,6 +1026,7 @@
 			found++;
 		} else {
 			iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+			p->RIOHosts[p->RIONumHosts].Caddr = NULL;
 		}
 	}
 
@@ -1078,6 +1079,7 @@
 			found++;
 		} else {
 			iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+			p->RIOHosts[p->RIONumHosts].Caddr = NULL;
 		}
 #else
 		printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n");
@@ -1117,8 +1119,10 @@
 				}
 			}
 
-			if (!okboard)
+			if (!okboard) {
 				iounmap(hp->Caddr);
+				hp->Caddr = NULL;
+			}
 		}
 	}
 
@@ -1188,6 +1192,8 @@
 		}
 		/* It is safe/allowed to del_timer a non-active timer */
 		del_timer(&hp->timer);
+		if (hp->Caddr)
+			iounmap(hp->Caddr);
 		if (hp->Type == RIO_PCI)
 			pci_dev_put(hp->pdev);
 	}
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index 4df6ab2..167ebc8 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -922,7 +922,7 @@
 ** 
 ** Packet is an actual packet structure to be filled in with the packet
 ** information associated with the command. You need to fill in everything,
-** as the command processore doesn't process the command packet in any way.
+** as the command processor doesn't process the command packet in any way.
 ** 
 ** The PreFuncP is called before the packet is enqueued on the host rup.
 ** PreFuncP is called as (*PreFuncP)(PreArg, CmdBlkP);. PreFuncP must
diff --git a/drivers/char/rio/rioinit.c b/drivers/char/rio/rioinit.c
index 99f3df0..0794844 100644
--- a/drivers/char/rio/rioinit.c
+++ b/drivers/char/rio/rioinit.c
@@ -222,7 +222,7 @@
 ** which value will be written into memory.
 ** Call with op set to zero means that the RAM will not be read and checked
 ** before it is written.
-** Call with op not zero, and the RAM will be read and compated with val[op-1]
+** Call with op not zero and the RAM will be read and compared with val[op-1]
 ** to check that the data from the previous phase was retained.
 */
 
diff --git a/drivers/char/rio/rioparam.c b/drivers/char/rio/rioparam.c
index 1066d97..bb498d2 100644
--- a/drivers/char/rio/rioparam.c
+++ b/drivers/char/rio/rioparam.c
@@ -87,8 +87,8 @@
 ** command bit set onto the port. The command bit is in the len field,
 ** and gets ORed in with the actual byte count.
 **
-** When you send a packet with the command bit set, then the first
-** data byte ( data[0] ) is interpretted as the command to execute.
+** When you send a packet with the command bit set the first
+** data byte (data[0]) is interpreted as the command to execute.
 ** It also governs what data structure overlay should accompany the packet.
 ** Commands are defined in cirrus/cirrus.h
 **
@@ -103,7 +103,7 @@
 **
 ** Most commands do not use the remaining bytes in the data array. The
 ** exceptions are OPEN MOPEN and CONFIG. (NB. As with the SI CONFIG and
-** OPEN are currently analagous). With these three commands the following
+** OPEN are currently analogous). With these three commands the following
 ** 11 data bytes are all used to pass config information such as baud rate etc.
 ** The fields are also defined in cirrus.h. Some contain straightforward
 ** information such as the transmit XON character. Two contain the transmit and
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 5ab32b3..0a77bfc 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -82,11 +82,6 @@
 static struct riscom_board * IRQ_to_board[16];
 static struct tty_driver *riscom_driver;
 
-static unsigned long baud_table[] =  {
-	0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-	9600, 19200, 38400, 57600, 76800, 0, 
-};
-
 static struct riscom_board rc_board[RC_NBOARD] =  {
 	{
 		.base	= RC_IOBASE1,
@@ -1516,9 +1511,9 @@
  * 	do_rc_hangup() -> tty->hangup() -> rc_hangup()
  * 
  */
-static void do_rc_hangup(void *private_)
+static void do_rc_hangup(struct work_struct *ugly_api)
 {
-	struct riscom_port	*port = (struct riscom_port *) private_;
+	struct riscom_port	*port = container_of(ugly_api, struct riscom_port, tqueue_hangup);
 	struct tty_struct	*tty;
 	
 	tty = port->tty;
@@ -1567,9 +1562,9 @@
 	}
 }
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *ugly_api)
 {
-	struct riscom_port	*port = (struct riscom_port *) private_;
+	struct riscom_port	*port = container_of(ugly_api, struct riscom_port, tqueue);
 	struct tty_struct	*tty;
 	
 	if(!(tty = port->tty)) 
@@ -1632,8 +1627,8 @@
 	memset(rc_port, 0, sizeof(rc_port));
 	for (i = 0; i < RC_NPORT * RC_NBOARD; i++)  {
 		rc_port[i].magic = RISCOM8_MAGIC;
-		INIT_WORK(&rc_port[i].tqueue, do_softint, &rc_port[i]);
-		INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup, &rc_port[i]);
+		INIT_WORK(&rc_port[i].tqueue, do_softint);
+		INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup);
 		rc_port[i].close_delay = 50 * HZ/100;
 		rc_port[i].closing_wait = 3000 * HZ/100;
 		init_waitqueue_head(&rc_port[i].open_wait);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 3af7f09..9ba13af 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -706,9 +706,9 @@
  * had to poll every port to see if that port needed servicing.
  */
 static void
-do_softint(void *private_)
+do_softint(struct work_struct *ugly_api)
 {
-  struct cyclades_port *info = (struct cyclades_port *) private_;
+  struct cyclades_port *info = container_of(ugly_api, struct cyclades_port, tqueue);
   struct tty_struct    *tty;
 
     tty = info->tty;
@@ -2273,7 +2273,7 @@
 		info->blocked_open = 0;
 		info->default_threshold = 0;
 		info->default_timeout = 0;
-		INIT_WORK(&info->tqueue, do_softint, info);
+		INIT_WORK(&info->tqueue, do_softint);
 		init_waitqueue_head(&info->open_wait);
 		init_waitqueue_head(&info->close_wait);
 		/* info->session */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index c084149..fc87070 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -765,7 +765,7 @@
 	sonypi_device.bluetooth_power = state;
 }
 
-static void input_keyrelease(void *data)
+static void input_keyrelease(struct work_struct *work)
 {
 	struct sonypi_keypress kp;
 
@@ -1412,7 +1412,7 @@
 			goto err_inpdev_unregister;
 		}
 
-		INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
+		INIT_WORK(&sonypi_device.input_work, input_keyrelease);
 	}
 
 	sonypi_enable(0);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 7e1bd95..99137ab 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2261,9 +2261,10 @@
  * 	do_sx_hangup() -> tty->hangup() -> sx_hangup()
  *
  */
-static void do_sx_hangup(void *private_)
+static void do_sx_hangup(struct work_struct *work)
 {
-	struct specialix_port	*port = (struct specialix_port *) private_;
+	struct specialix_port	*port =
+		container_of(work, struct specialix_port, tqueue_hangup);
 	struct tty_struct	*tty;
 
 	func_enter();
@@ -2336,9 +2337,10 @@
 }
 
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-	struct specialix_port	*port = (struct specialix_port *) private_;
+	struct specialix_port	*port =
+		container_of(work, struct specialix_port, tqueue);
 	struct tty_struct	*tty;
 
 	func_enter();
@@ -2411,8 +2413,8 @@
 	memset(sx_port, 0, sizeof(sx_port));
 	for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
 		sx_port[i].magic = SPECIALIX_MAGIC;
-		INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]);
-		INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]);
+		INIT_WORK(&sx_port[i].tqueue, do_softint);
+		INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
 		sx_port[i].close_delay = 50 * HZ/100;
 		sx_port[i].closing_wait = 3000 * HZ/100;
 		init_waitqueue_head(&sx_port[i].open_wait);
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 522e88e..5e2de62 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -500,7 +500,7 @@
 static int	stl_echmcaintr(stlbrd_t *brdp);
 static int	stl_echpciintr(stlbrd_t *brdp);
 static int	stl_echpci64intr(stlbrd_t *brdp);
-static void	stl_offintr(void *private);
+static void	stl_offintr(struct work_struct *);
 static stlbrd_t *stl_allocbrd(void);
 static stlport_t *stl_getport(int brdnr, int panelnr, int portnr);
 
@@ -2081,14 +2081,12 @@
 /*
  *	Service an off-level request for some channel.
  */
-static void stl_offintr(void *private)
+static void stl_offintr(struct work_struct *work)
 {
-	stlport_t		*portp;
+	stlport_t		*portp = container_of(work, stlport_t, tqueue);
 	struct tty_struct	*tty;
 	unsigned int		oldsigs;
 
-	portp = private;
-
 #ifdef DEBUG
 	printk("stl_offintr(portp=%x)\n", (int) portp);
 #endif
@@ -2156,7 +2154,7 @@
 		portp->baud_base = STL_BAUDBASE;
 		portp->close_delay = STL_CLOSEDELAY;
 		portp->closing_wait = 30 * HZ;
-		INIT_WORK(&portp->tqueue, stl_offintr, portp);
+		INIT_WORK(&portp->tqueue, stl_offintr);
 		init_waitqueue_head(&portp->open_wait);
 		init_waitqueue_head(&portp->close_wait);
 		portp->stats.brd = portp->brdnr;
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 06784ad..645187b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -101,8 +101,10 @@
 #include <linux/hdlc.h>
 #include <linux/dma-mapping.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -320,7 +322,7 @@
 	int dosyncppp;
 	spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 };
@@ -728,7 +730,7 @@
 
 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(struct mgsl_struct *info);
 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
@@ -802,7 +804,7 @@
 /*
  * Bottom half interrupt handlers
  */
-static void mgsl_bh_handler(void* Context);
+static void mgsl_bh_handler(struct work_struct *work);
 static void mgsl_bh_receive(struct mgsl_struct *info);
 static void mgsl_bh_transmit(struct mgsl_struct *info);
 static void mgsl_bh_status(struct mgsl_struct *info);
@@ -1071,9 +1073,10 @@
 /*
  * 	Perform bottom half processing of work items queued by ISR.
  */
-static void mgsl_bh_handler(void* Context)
+static void mgsl_bh_handler(struct work_struct *work)
 {
-	struct mgsl_struct *info = (struct mgsl_struct*)Context;
+	struct mgsl_struct *info =
+		container_of(work, struct mgsl_struct, task);
 	int action;
 
 	if (!info)
@@ -1276,7 +1279,7 @@
 		info->drop_rts_on_tx_done = 0;
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else 
@@ -1341,7 +1344,7 @@
 				info->input_signal_events.dcd_up++;
 			} else
 				info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount) {
 				if (status & MISCSTATUS_DCD)
 					netif_carrier_on(info->netdev);
@@ -4312,7 +4315,7 @@
 		     	info->max_frame_size );
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 
@@ -4337,7 +4340,7 @@
 	} else {
 		memset(info, 0, sizeof(struct mgsl_struct));
 		info->magic = MGSL_MAGIC;
-		INIT_WORK(&info->task, mgsl_bh_handler, info);
+		INIT_WORK(&info->task, mgsl_bh_handler);
 		info->max_frame_size = 4096;
 		info->close_delay = 5*HZ/10;
 		info->closing_wait = 30*HZ;
@@ -4470,7 +4473,7 @@
 
 	info = mgsl_device_list;
 	while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		hdlcdev_exit(info);
 #endif
 		mgsl_release_resources(info);
@@ -6644,7 +6647,7 @@
 				return_frame = 1;
 		}
 		framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		{
 			struct net_device_stats *stats = hdlc_stats(info->netdev);
 			stats->rx_errors++;
@@ -6720,7 +6723,7 @@
 						*ptmp);
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
 			else
@@ -7624,7 +7627,7 @@
 
 	spin_unlock_irqrestore(&info->irq_spinlock,flags);
 	
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
@@ -7700,7 +7703,7 @@
  	return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index d4334c7..e4730a7 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -83,8 +83,10 @@
 
 #include "linux/synclink.h"
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 /*
@@ -171,7 +173,7 @@
 /*
  * generic HDLC support and callbacks
  */
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(struct slgt_info *info);
 static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
@@ -359,7 +361,7 @@
 	int netcount;
 	int dosyncppp;
 	spinlock_t netlock;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 
@@ -485,7 +487,7 @@
 static void set_rate(struct slgt_info *info, u32 data_rate);
 
 static int  bh_action(struct slgt_info *info);
-static void bh_handler(void* context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(struct slgt_info *info);
 static void isr_serial(struct slgt_info *info);
 static void isr_rdma(struct slgt_info *info);
@@ -1354,7 +1356,7 @@
 	spin_unlock_irqrestore(&info->lock,flags);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -1878,9 +1880,9 @@
 /*
  * perform bottom half processing
  */
-static void bh_handler(void* context)
+static void bh_handler(struct work_struct *work)
 {
-	struct slgt_info *info = context;
+	struct slgt_info *info = container_of(work, struct slgt_info, task);
 	int action;
 
 	if (!info)
@@ -2002,7 +2004,7 @@
 	} else {
 		info->input_signal_events.dcd_down++;
 	}
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount) {
 		if (info->signals & SerialSignal_DCD)
 			netif_carrier_on(info->netdev);
@@ -2180,7 +2182,7 @@
 			set_signals(info);
 		}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		if (info->netcount)
 			hdlcdev_tx_done(info);
 		else
@@ -3306,7 +3308,7 @@
 		devstr, info->device_name, info->phys_reg_addr,
 		info->irq_level, info->max_frame_size);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 }
@@ -3326,7 +3328,7 @@
 	} else {
 		memset(info, 0, sizeof(struct slgt_info));
 		info->magic = MGSL_MAGIC;
-		INIT_WORK(&info->task, bh_handler, info);
+		INIT_WORK(&info->task, bh_handler);
 		info->max_frame_size = 4096;
 		info->raw_rx_size = DMABUFSIZE;
 		info->close_delay = 5*HZ/10;
@@ -3488,7 +3490,7 @@
 	/* release devices */
 	info = slgt_device_list;
 	while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		hdlcdev_exit(info);
 #endif
 		free_dma_bufs(info);
@@ -3522,6 +3524,7 @@
 
 	if (!slgt_device_list) {
 		printk("%s no devices found\n",driver_name);
+		pci_unregister_driver(&pci_driver);
 		return -ENODEV;
 	}
 
@@ -4433,7 +4436,7 @@
 			framesize = 0;
 	}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (framesize == 0) {
 		struct net_device_stats *stats = hdlc_stats(info->netdev);
 		stats->rx_errors++;
@@ -4476,7 +4479,7 @@
 				framesize++;
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info,info->tmp_rbuf, framesize);
 			else
@@ -4779,7 +4782,7 @@
 	info->tx_count = 0;
 	spin_unlock_irqrestore(&info->lock,flags);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
@@ -4799,6 +4802,6 @@
 	spin_lock_irqsave(&info->lock, flags);
 	info->pending_bh |= BH_RECEIVE;
 	spin_unlock_irqrestore(&info->lock, flags);
-	bh_handler(info);
+	bh_handler(&info->task);
 }
 
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 3e932b6..20a96ef 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -67,8 +67,10 @@
 #include <linux/workqueue.h>
 #include <linux/hdlc.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -280,7 +282,7 @@
 	int dosyncppp;
 	spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	struct net_device *netdev;
 #endif
 
@@ -536,7 +538,7 @@
 static void unthrottle(struct tty_struct * tty);
 static void set_break(struct tty_struct *tty, int break_state);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(SLMP_INFO *info);
 static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size);
@@ -602,7 +604,7 @@
 static void set_rate(SLMP_INFO *info, u32 data_rate);
 
 static int  bh_action(SLMP_INFO *info);
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_receive(SLMP_INFO *info);
 static void bh_transmit(SLMP_INFO *info);
 static void bh_status(SLMP_INFO *info);
@@ -1607,7 +1609,7 @@
 	spin_unlock_irqrestore(&info->lock,flags);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -2063,9 +2065,9 @@
 
 /* Perform bottom half processing of work items queued by ISR.
  */
-void bh_handler(void* Context)
+void bh_handler(struct work_struct *work)
 {
-	SLMP_INFO *info = (SLMP_INFO*)Context;
+	SLMP_INFO *info = container_of(work, SLMP_INFO, task);
 	int action;
 
 	if (!info)
@@ -2339,7 +2341,7 @@
 			set_signals(info);
 		}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		if (info->netcount)
 			hdlcdev_tx_done(info);
 		else
@@ -2523,7 +2525,7 @@
 				info->input_signal_events.dcd_up++;
 			} else
 				info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount) {
 				if (status & SerialSignal_DCD)
 					netif_carrier_on(info->netdev);
@@ -3783,7 +3785,7 @@
 		info->irq_level,
 		info->max_frame_size );
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	hdlcdev_init(info);
 #endif
 }
@@ -3805,7 +3807,7 @@
 	} else {
 		memset(info, 0, sizeof(SLMP_INFO));
 		info->magic = MGSL_MAGIC;
-		INIT_WORK(&info->task, bh_handler, info);
+		INIT_WORK(&info->task, bh_handler);
 		info->max_frame_size = 4096;
 		info->close_delay = 5*HZ/10;
 		info->closing_wait = 30*HZ;
@@ -3977,7 +3979,7 @@
 	/* release devices */
 	info = synclinkmp_device_list;
 	while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		hdlcdev_exit(info);
 #endif
 		free_dma_bufs(info);
@@ -4979,7 +4981,7 @@
 			info->icount.rxcrc++;
 
 		framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 		{
 			struct net_device_stats *stats = hdlc_stats(info->netdev);
 			stats->rx_errors++;
@@ -5020,7 +5022,7 @@
 					index = 0;
 			}
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 			if (info->netcount)
 				hdlcdev_rx(info,info->tmp_rx_buf,framesize);
 			else
@@ -5531,7 +5533,7 @@
 
 	spin_unlock_irqrestore(&info->lock,flags);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
 		hdlcdev_tx_done(info);
 	else
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5f49280..05810c8 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -182,6 +182,18 @@
 	.enable_mask	= SYSRQ_ENABLE_DUMP,
 };
 
+static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+{
+	show_state_filter(TASK_UNINTERRUPTIBLE);
+}
+static struct sysrq_key_op sysrq_showstate_blocked_op = {
+	.handler	= sysrq_handle_showstate_blocked,
+	.help_msg	= "showBlockedTasks",
+	.action_msg	= "Show Blocked State",
+	.enable_mask	= SYSRQ_ENABLE_DUMP,
+};
+
+
 static void sysrq_handle_showmem(int key, struct tty_struct *tty)
 {
 	show_mem();
@@ -219,13 +231,13 @@
 	.enable_mask	= SYSRQ_ENABLE_SIGNAL,
 };
 
-static void moom_callback(void *ignored)
+static void moom_callback(struct work_struct *ignored)
 {
 	out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
 			GFP_KERNEL, 0);
 }
 
-static DECLARE_WORK(moom_work, moom_callback, NULL);
+static DECLARE_WORK(moom_work, moom_callback);
 
 static void sysrq_handle_moom(int key, struct tty_struct *tty)
 {
@@ -304,7 +316,7 @@
 	/* May be assigned at init time by SMP VOYAGER */
 	NULL,				/* v */
 	NULL,				/* w */
-	NULL,				/* x */
+	&sysrq_showstate_blocked_op,	/* x */
 	NULL,				/* y */
 	NULL				/* z */
 };
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index dd36fd0..07067c3 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -249,6 +249,7 @@
 
 	return eax;
 }
+EXPORT_SYMBOL(tosh_smm);
 
 
 static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6ad2d3b..33e1f66 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -325,9 +325,9 @@
 	schedule_work(&chip->work);
 }
 
-static void timeout_work(void *ptr)
+static void timeout_work(struct work_struct *work)
 {
-	struct tpm_chip *chip = ptr;
+	struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
 
 	down(&chip->buffer_mutex);
 	atomic_set(&chip->data_pending, 0);
@@ -1105,7 +1105,7 @@
 	init_MUTEX(&chip->tpm_mutex);
 	INIT_LIST_HEAD(&chip->list);
 
-	INIT_WORK(&chip->work, timeout_work, chip);
+	INIT_WORK(&chip->work, timeout_work);
 
 	init_timer(&chip->user_read_timer);
 	chip->user_read_timer.function = user_reader_timeout;
@@ -1130,7 +1130,7 @@
 	scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
 	chip->vendor.miscdev.name = devname;
 
-	chip->vendor.miscdev.dev = dev;
+	chip->vendor.miscdev.parent = dev;
 	chip->dev = get_device(dev);
 
 	if (misc_register(&chip->vendor.miscdev)) {
@@ -1155,6 +1155,7 @@
 
 	if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
 		list_del(&chip->list);
+		misc_deregister(&chip->vendor.miscdev);
 		put_device(dev);
 		clear_bit(chip->dev_num, dev_mask);
 		kfree(chip);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 050ced2..bb9a43c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -22,6 +22,7 @@
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/miscdevice.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index e90ea39..b3cfc8b 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1254,7 +1254,7 @@
 	
 /**
  *	do_tty_hangup		-	actual handler for hangup events
- *	@data: tty device
+ *	@work: tty device
  *
  *	This can be called by the "eventd" kernel thread.  That is process
  *	synchronous but doesn't hold any locks, so we need to make sure we
@@ -1274,9 +1274,10 @@
  *		tasklist_lock to walk task list for hangup event
  *
  */
-static void do_tty_hangup(void *data)
+static void do_tty_hangup(struct work_struct *work)
 {
-	struct tty_struct *tty = (struct tty_struct *) data;
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
 	struct file * cons_filp = NULL;
 	struct file *filp, *f = NULL;
 	struct task_struct *p;
@@ -1433,7 +1434,7 @@
 
 	printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
 #endif
-	do_tty_hangup((void *) tty);
+	do_tty_hangup(&tty->hangup_work);
 }
 EXPORT_SYMBOL(tty_vhangup);
 
@@ -3304,12 +3305,13 @@
  * Nasty bug: do_SAK is being called in interrupt context.  This can
  * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
  */
-static void __do_SAK(void *arg)
+static void __do_SAK(struct work_struct *work)
 {
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, SAK_work);
 #ifdef TTY_SOFT_SAK
 	tty_hangup(tty);
 #else
-	struct tty_struct *tty = arg;
 	struct task_struct *g, *p;
 	int session;
 	int		i;
@@ -3388,7 +3390,7 @@
 {
 	if (!tty)
 		return;
-	PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
+	PREPARE_WORK(&tty->SAK_work, __do_SAK);
 	schedule_work(&tty->SAK_work);
 }
 
@@ -3396,7 +3398,7 @@
 
 /**
  *	flush_to_ldisc
- *	@private_: tty structure passed from work queue.
+ *	@work: tty structure passed from work queue.
  *
  *	This routine is called out of the software interrupt to flush data
  *	from the buffer chain to the line discipline.
@@ -3406,9 +3408,10 @@
  *	receive_buf method is single threaded for each tty instance.
  */
  
-static void flush_to_ldisc(void *private_)
+static void flush_to_ldisc(struct work_struct *work)
 {
-	struct tty_struct *tty = (struct tty_struct *) private_;
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, buf.work.work);
 	unsigned long 	flags;
 	struct tty_ldisc *disc;
 	struct tty_buffer *tbuf, *head;
@@ -3553,7 +3556,7 @@
 	spin_unlock_irqrestore(&tty->buf.lock, flags);
 
 	if (tty->low_latency)
-		flush_to_ldisc((void *) tty);
+		flush_to_ldisc(&tty->buf.work.work);
 	else
 		schedule_delayed_work(&tty->buf.work, 1);
 }
@@ -3580,17 +3583,17 @@
 	tty->overrun_time = jiffies;
 	tty->buf.head = tty->buf.tail = NULL;
 	tty_buffer_init(tty);
-	INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+	INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
 	init_MUTEX(&tty->buf.pty_sem);
 	mutex_init(&tty->termios_mutex);
 	init_waitqueue_head(&tty->write_wait);
 	init_waitqueue_head(&tty->read_wait);
-	INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
+	INIT_WORK(&tty->hangup_work, do_tty_hangup);
 	mutex_init(&tty->atomic_read_lock);
 	mutex_init(&tty->atomic_write_lock);
 	spin_lock_init(&tty->read_lock);
 	INIT_LIST_HEAD(&tty->tty_files);
-	INIT_WORK(&tty->SAK_work, NULL, NULL);
+	INIT_WORK(&tty->SAK_work, NULL);
 }
 
 /*
@@ -3612,7 +3615,8 @@
  *		This field is optional, if there is no known struct device
  *		for this tty device it can be set to NULL safely.
  *
- *	Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
+ *	Returns a pointer to the struct device for this tty device
+ *	(or ERR_PTR(-EFOO) on error).
  *
  *	This call is required to be made to register an individual tty device
  *	if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set.  If
@@ -3622,8 +3626,8 @@
  *	Locking: ??
  */
 
-struct class_device *tty_register_device(struct tty_driver *driver,
-					 unsigned index, struct device *device)
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+				   struct device *device)
 {
 	char name[64];
 	dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
@@ -3639,7 +3643,7 @@
 	else
 		tty_line_name(driver, index, name);
 
-	return class_device_create(tty_class, NULL, dev, device, "%s", name);
+	return device_create(tty_class, device, dev, name);
 }
 
 /**
@@ -3655,7 +3659,7 @@
 
 void tty_unregister_device(struct tty_driver *driver, unsigned index)
 {
-	class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
+	device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
 }
 
 EXPORT_SYMBOL(tty_register_device);
@@ -3895,20 +3899,20 @@
 	if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
 		panic("Couldn't register /dev/tty driver\n");
-	class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), "tty");
 
 	cdev_init(&console_cdev, &console_fops);
 	if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
 		panic("Couldn't register /dev/console driver\n");
-	class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), "console");
 
 #ifdef CONFIG_UNIX98_PTYS
 	cdev_init(&ptmx_cdev, &ptmx_fops);
 	if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
 		panic("Couldn't register /dev/ptmx driver\n");
-	class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), "ptmx");
 #endif
 
 #ifdef CONFIG_VT
@@ -3916,7 +3920,7 @@
 	if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
 	    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
 		panic("Couldn't register /dev/tty0 driver\n");
-	class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+	device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), "tty0");
 
 	vty_init();
 #endif
diff --git a/drivers/char/vc_screen.c b/drivers/char/vc_screen.c
index bd7a98c..f442b57 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/char/vc_screen.c
@@ -476,16 +476,16 @@
 
 void vcs_make_sysfs(struct tty_struct *tty)
 {
-	class_device_create(vc_class, NULL, MKDEV(VCS_MAJOR, tty->index + 1),
-			NULL, "vcs%u", tty->index + 1);
-	class_device_create(vc_class, NULL, MKDEV(VCS_MAJOR, tty->index + 129),
-			NULL, "vcsa%u", tty->index + 1);
+	device_create(vc_class, NULL, MKDEV(VCS_MAJOR, tty->index + 1),
+			"vcs%u", tty->index + 1);
+	device_create(vc_class, NULL, MKDEV(VCS_MAJOR, tty->index + 129),
+			"vcsa%u", tty->index + 1);
 }
 
 void vcs_remove_sysfs(struct tty_struct *tty)
 {
-	class_device_destroy(vc_class, MKDEV(VCS_MAJOR, tty->index + 1));
-	class_device_destroy(vc_class, MKDEV(VCS_MAJOR, tty->index + 129));
+	device_destroy(vc_class, MKDEV(VCS_MAJOR, tty->index + 1));
+	device_destroy(vc_class, MKDEV(VCS_MAJOR, tty->index + 129));
 }
 
 int __init vcs_init(void)
@@ -494,7 +494,7 @@
 		panic("unable to get major %d for vcs device", VCS_MAJOR);
 	vc_class = class_create(THIS_MODULE, "vc");
 
-	class_device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), NULL, "vcs");
-	class_device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), NULL, "vcsa");
+	device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 0), "vcs");
+	device_create(vc_class, NULL, MKDEV(VCS_MAJOR, 128), "vcsa");
 	return 0;
 }
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 8e4413f..a8239da 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -112,7 +112,7 @@
 struct con_driver {
 	const struct consw *con;
 	const char *desc;
-	struct class_device *class_dev;
+	struct device *dev;
 	int node;
 	int first;
 	int last;
@@ -152,10 +152,10 @@
 static void save_cur(struct vc_data *vc);
 static void reset_terminal(struct vc_data *vc, int do_clear);
 static void con_flush_chars(struct tty_struct *tty);
-static void set_vesa_blanking(char __user *p);
+static int set_vesa_blanking(char __user *p);
 static void set_cursor(struct vc_data *vc);
 static void hide_cursor(struct vc_data *vc);
-static void console_callback(void *ignored);
+static void console_callback(struct work_struct *ignored);
 static void blank_screen_t(unsigned long dummy);
 static void set_palette(struct vc_data *vc);
 
@@ -174,7 +174,7 @@
 static int blankinterval = 10*60*HZ;
 static int vesa_off_interval;
 
-static DECLARE_WORK(console_work, console_callback, NULL);
+static DECLARE_WORK(console_work, console_callback);
 
 /*
  * fg_console is the current virtual console,
@@ -2154,7 +2154,7 @@
  * with other console code and prevention of re-entrancy is
  * ensured with console_sem.
  */
-static void console_callback(void *ignored)
+static void console_callback(struct work_struct *ignored)
 {
 	acquire_console_sem();
 
@@ -2369,7 +2369,7 @@
 			ret = __put_user(data, p);
 			break;
 		case TIOCL_SETVESABLANK:
-			set_vesa_blanking(p);
+			ret = set_vesa_blanking(p);
 			break;
 		case TIOCL_GETKMSGREDIRECT:
 			data = kmsg_redirect;
@@ -3023,10 +3023,10 @@
 }
 #endif /* CONFIG_VT_HW_CONSOLE_BINDING */
 
-static ssize_t store_bind(struct class_device *class_device,
+static ssize_t store_bind(struct device *dev, struct device_attribute *attr,
 			  const char *buf, size_t count)
 {
-	struct con_driver *con = class_get_devdata(class_device);
+	struct con_driver *con = dev_get_drvdata(dev);
 	int bind = simple_strtoul(buf, NULL, 0);
 
 	if (bind)
@@ -3037,17 +3037,19 @@
 	return count;
 }
 
-static ssize_t show_bind(struct class_device *class_device, char *buf)
+static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
+			 char *buf)
 {
-	struct con_driver *con = class_get_devdata(class_device);
+	struct con_driver *con = dev_get_drvdata(dev);
 	int bind = con_is_bound(con->con);
 
 	return snprintf(buf, PAGE_SIZE, "%i\n", bind);
 }
 
-static ssize_t show_name(struct class_device *class_device, char *buf)
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+			 char *buf)
 {
-	struct con_driver *con = class_get_devdata(class_device);
+	struct con_driver *con = dev_get_drvdata(dev);
 
 	return snprintf(buf, PAGE_SIZE, "%s %s\n",
 			(con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
@@ -3055,43 +3057,40 @@
 
 }
 
-static struct class_device_attribute class_device_attrs[] = {
+static struct device_attribute device_attrs[] = {
 	__ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
 	__ATTR(name, S_IRUGO, show_name, NULL),
 };
 
-static int vtconsole_init_class_device(struct con_driver *con)
+static int vtconsole_init_device(struct con_driver *con)
 {
 	int i;
 	int error = 0;
 
 	con->flag |= CON_DRIVER_FLAG_ATTR;
-	class_set_devdata(con->class_dev, con);
-	for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
-		error = class_device_create_file(con->class_dev,
-					 &class_device_attrs[i]);
+	dev_set_drvdata(con->dev, con);
+	for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+		error = device_create_file(con->dev, &device_attrs[i]);
 		if (error)
 			break;
 	}
 
 	if (error) {
 		while (--i >= 0)
-			class_device_remove_file(con->class_dev,
-					 &class_device_attrs[i]);
+			device_remove_file(con->dev, &device_attrs[i]);
 		con->flag &= ~CON_DRIVER_FLAG_ATTR;
 	}
 
 	return error;
 }
 
-static void vtconsole_deinit_class_device(struct con_driver *con)
+static void vtconsole_deinit_device(struct con_driver *con)
 {
 	int i;
 
 	if (con->flag & CON_DRIVER_FLAG_ATTR) {
-		for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
-			class_device_remove_file(con->class_dev,
-						 &class_device_attrs[i]);
+		for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+			device_remove_file(con->dev, &device_attrs[i]);
 		con->flag &= ~CON_DRIVER_FLAG_ATTR;
 	}
 }
@@ -3179,18 +3178,17 @@
 	if (retval)
 		goto err;
 
-	con_driver->class_dev = class_device_create(vtconsole_class, NULL,
-						    MKDEV(0, con_driver->node),
-						    NULL, "vtcon%i",
-						    con_driver->node);
+	con_driver->dev = device_create(vtconsole_class, NULL,
+					MKDEV(0, con_driver->node),
+					"vtcon%i", con_driver->node);
 
-	if (IS_ERR(con_driver->class_dev)) {
-		printk(KERN_WARNING "Unable to create class_device for %s; "
+	if (IS_ERR(con_driver->dev)) {
+		printk(KERN_WARNING "Unable to create device for %s; "
 		       "errno = %ld\n", con_driver->desc,
-		       PTR_ERR(con_driver->class_dev));
-		con_driver->class_dev = NULL;
+		       PTR_ERR(con_driver->dev));
+		con_driver->dev = NULL;
 	} else {
-		vtconsole_init_class_device(con_driver);
+		vtconsole_init_device(con_driver);
 	}
 
 err:
@@ -3226,12 +3224,12 @@
 
 		if (con_driver->con == csw &&
 		    con_driver->flag & CON_DRIVER_FLAG_MODULE) {
-			vtconsole_deinit_class_device(con_driver);
-			class_device_destroy(vtconsole_class,
-					     MKDEV(0, con_driver->node));
+			vtconsole_deinit_device(con_driver);
+			device_destroy(vtconsole_class,
+				       MKDEV(0, con_driver->node));
 			con_driver->con = NULL;
 			con_driver->desc = NULL;
-			con_driver->class_dev = NULL;
+			con_driver->dev = NULL;
 			con_driver->node = 0;
 			con_driver->flag = 0;
 			con_driver->first = 0;
@@ -3289,19 +3287,18 @@
 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
 		struct con_driver *con = &registered_con_driver[i];
 
-		if (con->con && !con->class_dev) {
-			con->class_dev =
-				class_device_create(vtconsole_class, NULL,
-						    MKDEV(0, con->node), NULL,
-						    "vtcon%i", con->node);
+		if (con->con && !con->dev) {
+			con->dev = device_create(vtconsole_class, NULL,
+						 MKDEV(0, con->node),
+						 "vtcon%i", con->node);
 
-			if (IS_ERR(con->class_dev)) {
+			if (IS_ERR(con->dev)) {
 				printk(KERN_WARNING "Unable to create "
-				       "class_device for %s; errno = %ld\n",
-				       con->desc, PTR_ERR(con->class_dev));
-				con->class_dev = NULL;
+				       "device for %s; errno = %ld\n",
+				       con->desc, PTR_ERR(con->dev));
+				con->dev = NULL;
 			} else {
-				vtconsole_init_class_device(con);
+				vtconsole_init_device(con);
 			}
 		}
 	}
@@ -3316,11 +3313,15 @@
  *	Screen blanking
  */
 
-static void set_vesa_blanking(char __user *p)
+static int set_vesa_blanking(char __user *p)
 {
-    unsigned int mode;
-    get_user(mode, p + 1);
-    vesa_blank_mode = (mode < 4) ? mode : 0;
+	unsigned int mode;
+
+	if (get_user(mode, p + 1))
+		return -EFAULT;
+
+	vesa_blank_mode = (mode < 4) ? mode : 0;
+	return 0;
 }
 
 void do_blank_screen(int entering_gfx)
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 0187b11..ea09d0c 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -340,6 +340,14 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called iTCO_wdt.
 
+config ITCO_VENDOR_SUPPORT
+	bool "Intel TCO Timer/Watchdog Specific Vendor Support"
+	depends on ITCO_WDT
+	---help---
+	  Add vendor specific support to the intel TCO timer based watchdog
+	  devices. At this moment we only have additional support for some
+	  SuperMicro Inc. motherboards.
+
 config SC1200_WDT
 	tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
 	depends on WATCHDOG && X86
@@ -363,6 +371,20 @@
 
 	  If compiled as a module, it will be called scx200_wdt.
 
+config PC87413_WDT
+	tristate "NS PC87413 watchdog"
+	depends on WATCHDOG && X86
+	---help---
+	  This is the driver for the hardware watchdog on the PC87413 chipset
+	  This watchdog simply watches your kernel to make sure it doesn't
+	  freeze, and if it does, it reboots your computer after a certain
+	  amount of time.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called pc87413_wdt.
+
+	  Most people will say N.
+ 
 config 60XX_WDT
 	tristate "SBC-60XX Watchdog Timer"
 	depends on WATCHDOG && X86
@@ -553,6 +575,16 @@
 	  timer expired and no process has written to /dev/watchdog during
 	  that time.
 
+config WDT_RM9K_GPI
+	tristate "RM9000/GPI hardware watchdog"
+	depends on WATCHDOG && CPU_RM9000
+	help
+	  Watchdog implementation using the GPI hardware found on
+	  PMC-Sierra RM9xxx CPUs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rm9k_wdt.
+
 # S390 Architecture
 
 config ZVM_WATCHDOG
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index 3644049..2cd8ff8 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -47,9 +47,10 @@
 obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
 obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
 obj-$(CONFIG_I8XX_TCO) += i8xx_tco.o
-obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o
+obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o iTCO_vendor_support.o
 obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
 obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
+obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o
 obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
 obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
 obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
@@ -72,6 +73,7 @@
 
 # MIPS Architecture
 obj-$(CONFIG_INDYDOG) += indydog.o
+obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
 
 # S390 Architecture
 
diff --git a/drivers/char/watchdog/iTCO_vendor_support.c b/drivers/char/watchdog/iTCO_vendor_support.c
new file mode 100644
index 0000000..4150839
--- /dev/null
+++ b/drivers/char/watchdog/iTCO_vendor_support.c
@@ -0,0 +1,307 @@
+/*
+ *	intel TCO vendor specific watchdog driver support
+ *
+ *	(c) Copyright 2006 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor
+ *	provide warranty for any of this software. This material is
+ *	provided "AS-IS" and at no charge.
+ */
+
+/*
+ *	Includes, defines, variables, module parameters, ...
+ */
+
+/* Module and version information */
+#define DRV_NAME        "iTCO_vendor_support"
+#define DRV_VERSION     "1.01"
+#define DRV_RELDATE     "11-Nov-2006"
+#define PFX		DRV_NAME ": "
+
+/* Includes */
+#include <linux/module.h>		/* For module specific items */
+#include <linux/moduleparam.h>		/* For new moduleparam's */
+#include <linux/types.h>		/* For standard types (like size_t) */
+#include <linux/errno.h>		/* For the -ENODEV/... values */
+#include <linux/kernel.h>		/* For printk/panic/... */
+#include <linux/init.h>			/* For __init/__exit/... */
+#include <linux/ioport.h>		/* For io-port access */
+
+#include <asm/io.h>			/* For inb/outb/... */
+
+/* iTCO defines */
+#define	SMI_EN		acpibase + 0x30	/* SMI Control and Enable Register */
+#define	TCOBASE		acpibase + 0x60	/* TCO base address		*/
+#define	TCO1_STS	TCOBASE + 0x04	/* TCO1 Status Register		*/
+
+/* List of vendor support modes */
+#define SUPERMICRO_OLD_BOARD	1	/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
+#define SUPERMICRO_NEW_BOARD	2	/* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */
+
+static int vendorsupport = 0;
+module_param(vendorsupport, int, 0);
+MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+");
+
+/*
+ *	Vendor Specific Support
+ */
+
+/*
+ *	Vendor Support: 1
+ *	Board: Super Micro Computer Inc. 370SSE+-OEM1/P3TSSE
+ *	iTCO chipset: ICH2
+ *
+ *	Code contributed by: R. Seretny <lkpatches@paypc.com>
+ *	Documentation obtained by R. Seretny from SuperMicro Technical Support
+ *
+ *	To enable Watchdog function:
+ *	    BIOS setup -> Power -> TCO Logic SMI Enable -> Within5Minutes
+ *	    This setting enables SMI to clear the watchdog expired flag.
+ *	    If BIOS or CPU fail which may cause SMI hang, then system will
+ *	    reboot. When application starts to use watchdog function,
+ *	    application has to take over the control from SMI.
+ *
+ *	    For P3TSSE, J36 jumper needs to be removed to enable the Watchdog
+ *	    function.
+ *
+ *	    Note: The system will reboot when Expire Flag is set TWICE.
+ *	    So, if the watchdog timer is 20 seconds, then the maximum hang
+ *	    time is about 40 seconds, and the minimum hang time is about
+ *	    20.6 seconds.
+ */
+
+static void supermicro_old_pre_start(unsigned long acpibase)
+{
+	unsigned long val32;
+
+	val32 = inl(SMI_EN);
+	val32 &= 0xffffdfff;	/* Turn off SMI clearing watchdog */
+	outl(val32, SMI_EN);	/* Needed to activate watchdog */
+}
+
+static void supermicro_old_pre_stop(unsigned long acpibase)
+{
+	unsigned long val32;
+
+	val32 = inl(SMI_EN);
+	val32 &= 0x00002000;	/* Turn on SMI clearing watchdog */
+	outl(val32, SMI_EN);	/* Needed to deactivate watchdog */
+}
+
+static void supermicro_old_pre_keepalive(unsigned long acpibase)
+{
+	/* Reload TCO Timer (done in iTCO_wdt_keepalive) + */
+	/* Clear "Expire Flag" (Bit 3 of TC01_STS register) */
+	outb(0x08, TCO1_STS);
+}
+
+/*
+ *	Vendor Support: 2
+ *	Board: Super Micro Computer Inc. P4SBx, P4DPx
+ *	iTCO chipset: ICH4
+ *
+ *	Code contributed by: R. Seretny <lkpatches@paypc.com>
+ *	Documentation obtained by R. Seretny from SuperMicro Technical Support
+ *
+ *	To enable Watchdog function:
+ *	 1. BIOS
+ *	  For P4SBx:
+ *	  BIOS setup -> Advanced -> Integrated Peripherals -> Watch Dog Feature
+ *	  For P4DPx:
+ *	  BIOS setup -> Advanced -> I/O Device Configuration -> Watch Dog
+ *	 This setting enables or disables Watchdog function. When enabled, the
+ *	 default watchdog timer is set to be 5 minutes (about 4’35”). It is
+ *	 enough to load and run the OS. The application (service or driver) has
+ *	 to take over the control once OS is running up and before watchdog
+ *	 expires.
+ *
+ *	 2. JUMPER
+ *	  For P4SBx: JP39
+ *	  For P4DPx: JP37
+ *	  This jumper is used for safety.  Closed is enabled. This jumper
+ *	  prevents user enables watchdog in BIOS by accident.
+ *
+ *	 To enable Watch Dog function, both BIOS and JUMPER must be enabled.
+ *
+ *	The documentation lists motherboards P4SBx and P4DPx series as of
+ *	20-March-2002. However, this code works flawlessly with much newer
+ *	motherboards, such as my X6DHR-8G2 (SuperServer 6014H-82).
+ *
+ *	The original iTCO driver as written does not actually reset the
+ *	watchdog timer on these machines, as a result they reboot after five
+ *	minutes.
+ *
+ *	NOTE: You may leave the Watchdog function disabled in the SuperMicro
+ *	BIOS to avoid a "boot-race"... This driver will enable watchdog
+ *	functionality even if it's disabled in the BIOS once the /dev/watchdog
+ *	file is opened.
+ */
+
+/* I/O Port's */
+#define SM_REGINDEX	0x2e		/* SuperMicro ICH4+ Register Index */
+#define SM_DATAIO	0x2f		/* SuperMicro ICH4+ Register Data I/O */
+
+/* Control Register's */
+#define SM_CTLPAGESW	0x07		/* SuperMicro ICH4+ Control Page Switch */
+#define SM_CTLPAGE		0x08	/* SuperMicro ICH4+ Control Page Num */
+
+#define SM_WATCHENABLE	0x30		/* Watchdog enable: Bit 0: 0=off, 1=on */
+
+#define SM_WATCHPAGE	0x87		/* Watchdog unlock control page */
+
+#define SM_ENDWATCH	0xAA		/* Watchdog lock control page */
+
+#define SM_COUNTMODE	0xf5		/* Watchdog count mode select */
+					/* (Bit 3: 0 = seconds, 1 = minutes */
+
+#define SM_WATCHTIMER	0xf6		/* 8-bits, Watchdog timer counter (RW) */
+
+#define SM_RESETCONTROL	0xf7		/* Watchdog reset control */
+					/* Bit 6: timer is reset by kbd interrupt */
+					/* Bit 7: timer is reset by mouse interrupt */
+
+static void supermicro_new_unlock_watchdog(void)
+{
+	outb(SM_WATCHPAGE, SM_REGINDEX);	/* Write 0x87 to port 0x2e twice */
+	outb(SM_WATCHPAGE, SM_REGINDEX);
+
+	outb(SM_CTLPAGESW, SM_REGINDEX);	/* Switch to watchdog control page */
+	outb(SM_CTLPAGE, SM_DATAIO);
+}
+
+static void supermicro_new_lock_watchdog(void)
+{
+	outb(SM_ENDWATCH, SM_REGINDEX);
+}
+
+static void supermicro_new_pre_start(unsigned int heartbeat)
+{
+	unsigned int val;
+
+	supermicro_new_unlock_watchdog();
+
+	/* Watchdog timer setting needs to be in seconds*/
+	outb(SM_COUNTMODE, SM_REGINDEX);
+	val = inb(SM_DATAIO);
+	val &= 0xF7;
+	outb(val, SM_DATAIO);
+
+	/* Write heartbeat interval to WDOG */
+	outb (SM_WATCHTIMER, SM_REGINDEX);
+	outb((heartbeat & 255), SM_DATAIO);
+
+	/* Make sure keyboard/mouse interrupts don't interfere */
+	outb(SM_RESETCONTROL, SM_REGINDEX);
+	val = inb(SM_DATAIO);
+	val &= 0x3f;
+	outb(val, SM_DATAIO);
+
+	/* enable watchdog by setting bit 0 of Watchdog Enable to 1 */
+	outb(SM_WATCHENABLE, SM_REGINDEX);
+	val = inb(SM_DATAIO);
+	val |= 0x01;
+	outb(val, SM_DATAIO);
+
+	supermicro_new_lock_watchdog();
+}
+
+static void supermicro_new_pre_stop(void)
+{
+	unsigned int val;
+
+	supermicro_new_unlock_watchdog();
+
+	/* disable watchdog by setting bit 0 of Watchdog Enable to 0 */
+	outb(SM_WATCHENABLE, SM_REGINDEX);
+	val = inb(SM_DATAIO);
+	val &= 0xFE;
+	outb(val, SM_DATAIO);
+
+	supermicro_new_lock_watchdog();
+}
+
+static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
+{
+	supermicro_new_unlock_watchdog();
+
+	/* reset watchdog timeout to heartveat value */
+	outb(SM_WATCHTIMER, SM_REGINDEX);
+	outb((heartbeat & 255), SM_DATAIO);
+
+	supermicro_new_lock_watchdog();
+}
+
+/*
+ *	Generic Support Functions
+ */
+
+void iTCO_vendor_pre_start(unsigned long acpibase,
+			   unsigned int heartbeat)
+{
+	if (vendorsupport == SUPERMICRO_OLD_BOARD)
+		supermicro_old_pre_start(acpibase);
+	else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+		supermicro_new_pre_start(heartbeat);
+}
+EXPORT_SYMBOL(iTCO_vendor_pre_start);
+
+void iTCO_vendor_pre_stop(unsigned long acpibase)
+{
+	if (vendorsupport == SUPERMICRO_OLD_BOARD)
+		supermicro_old_pre_stop(acpibase);
+	else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+		supermicro_new_pre_stop();
+}
+EXPORT_SYMBOL(iTCO_vendor_pre_stop);
+
+void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
+{
+	if (vendorsupport == SUPERMICRO_OLD_BOARD)
+		supermicro_old_pre_keepalive(acpibase);
+	else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+		supermicro_new_pre_set_heartbeat(heartbeat);
+}
+EXPORT_SYMBOL(iTCO_vendor_pre_keepalive);
+
+void iTCO_vendor_pre_set_heartbeat(unsigned int heartbeat)
+{
+	if (vendorsupport == SUPERMICRO_NEW_BOARD)
+		supermicro_new_pre_set_heartbeat(heartbeat);
+}
+EXPORT_SYMBOL(iTCO_vendor_pre_set_heartbeat);
+
+int iTCO_vendor_check_noreboot_on(void)
+{
+	switch(vendorsupport) {
+	case SUPERMICRO_OLD_BOARD:
+		return 0;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
+
+static int __init iTCO_vendor_init_module(void)
+{
+	printk (KERN_INFO PFX "vendor-support=%d\n", vendorsupport);
+	return 0;
+}
+
+static void __exit iTCO_vendor_exit_module(void)
+{
+	printk (KERN_INFO PFX "Module Unloaded\n");
+}
+
+module_init(iTCO_vendor_init_module);
+module_exit(iTCO_vendor_exit_module);
+
+MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>, R. Seretny <lkpatches@paypc.com>");
+MODULE_DESCRIPTION("Intel TCO Vendor Specific WatchDog Timer Driver Support");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/char/watchdog/iTCO_wdt.c b/drivers/char/watchdog/iTCO_wdt.c
index b6f29cb..7eac922 100644
--- a/drivers/char/watchdog/iTCO_wdt.c
+++ b/drivers/char/watchdog/iTCO_wdt.c
@@ -48,8 +48,8 @@
 
 /* Module and version information */
 #define DRV_NAME        "iTCO_wdt"
-#define DRV_VERSION     "1.00"
-#define DRV_RELDATE     "08-Oct-2006"
+#define DRV_VERSION     "1.01"
+#define DRV_RELDATE     "11-Nov-2006"
 #define PFX		DRV_NAME ": "
 
 /* Includes */
@@ -189,6 +189,21 @@
 module_param(nowayout, int, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
 
+/* iTCO Vendor Specific Support hooks */
+#ifdef CONFIG_ITCO_VENDOR_SUPPORT
+extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
+extern void iTCO_vendor_pre_stop(unsigned long);
+extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
+extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
+extern int iTCO_vendor_check_noreboot_on(void);
+#else
+#define iTCO_vendor_pre_start(acpibase, heartbeat)	{}
+#define iTCO_vendor_pre_stop(acpibase)			{}
+#define iTCO_vendor_pre_keepalive(acpibase,heartbeat)	{}
+#define iTCO_vendor_pre_set_heartbeat(heartbeat)	{}
+#define iTCO_vendor_check_noreboot_on()			1	/* 1=check noreboot; 0=don't check */
+#endif
+
 /*
  * Some TCO specific functions
  */
@@ -249,6 +264,8 @@
 
 	spin_lock(&iTCO_wdt_private.io_lock);
 
+	iTCO_vendor_pre_start(iTCO_wdt_private.ACPIBASE, heartbeat);
+
 	/* disable chipset's NO_REBOOT bit */
 	if (iTCO_wdt_unset_NO_REBOOT_bit()) {
 		printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot disabled by hardware\n");
@@ -273,6 +290,8 @@
 
 	spin_lock(&iTCO_wdt_private.io_lock);
 
+	iTCO_vendor_pre_stop(iTCO_wdt_private.ACPIBASE);
+
 	/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
 	val = inw(TCO1_CNT);
 	val |= 0x0800;
@@ -293,6 +312,8 @@
 {
 	spin_lock(&iTCO_wdt_private.io_lock);
 
+	iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat);
+
 	/* Reload the timer by writing to the TCO Timer Counter register */
 	if (iTCO_wdt_private.iTCO_version == 2) {
 		outw(0x01, TCO_RLD);
@@ -319,6 +340,8 @@
 	    ((iTCO_wdt_private.iTCO_version == 1) && (tmrval > 0x03f)))
 		return -EINVAL;
 
+	iTCO_vendor_pre_set_heartbeat(tmrval);
+
 	/* Write new heartbeat to watchdog */
 	if (iTCO_wdt_private.iTCO_version == 2) {
 		spin_lock(&iTCO_wdt_private.io_lock);
@@ -569,7 +592,7 @@
 	}
 
 	/* Check chipset's NO_REBOOT bit */
-	if (iTCO_wdt_unset_NO_REBOOT_bit()) {
+	if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
 		printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot disabled by hardware\n");
 		ret = -ENODEV;	/* Cannot reset NO_REBOOT bit */
 		goto out;
diff --git a/drivers/char/watchdog/pc87413_wdt.c b/drivers/char/watchdog/pc87413_wdt.c
new file mode 100644
index 0000000..1d447e3
--- /dev/null
+++ b/drivers/char/watchdog/pc87413_wdt.c
@@ -0,0 +1,635 @@
+/*
+ *      NS pc87413-wdt Watchdog Timer driver for Linux 2.6.x.x
+ *
+ *      This code is based on wdt.c with original copyright.
+ *
+ *      (C) Copyright 2006 Sven Anders, <anders@anduras.de>
+ *                     and Marcus Junker, <junker@anduras.de>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ *      Neither Sven Anders, Marcus Junker nor ANDURAS AG
+ *      admit liability nor provide warranty for any of this software.
+ *      This material is provided "AS-IS" and at no charge.
+ *
+ *      Release 1.1
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+/* #define DEBUG 1 */
+
+#define DEFAULT_TIMEOUT     1            /* 1 minute */
+#define MAX_TIMEOUT         255
+
+#define VERSION             "1.1"
+#define MODNAME             "pc87413 WDT"
+#define PFX                 MODNAME ": "
+#define DPFX                MODNAME " - DEBUG: "
+
+#define WDT_INDEX_IO_PORT   (io+0)       /* I/O port base (index register) */
+#define WDT_DATA_IO_PORT    (WDT_INDEX_IO_PORT+1)
+#define SWC_LDN             0x04
+#define SIOCFG2             0x22         /* Serial IO register */
+#define WDCTL               0x10         /* Watchdog-Timer-Controll-Register */
+#define WDTO                0x11         /* Watchdog timeout register */
+#define WDCFG               0x12         /* Watchdog config register */
+
+static int io = 0x2E;		         /* Address used on Portwell Boards */
+
+static int timeout = DEFAULT_TIMEOUT;    /* timeout value */
+static unsigned long timer_enabled = 0;  /* is the timer enabled? */
+
+static char expect_close;                /* is the close expected? */
+
+static spinlock_t io_lock;               /* to guard the watchdog from io races */
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+/* -- Low level function ----------------------------------------*/
+
+/* Select pins for Watchdog output */
+
+static inline void pc87413_select_wdt_out (void)
+{
+	unsigned int cr_data = 0;
+
+	/* Step 1: Select multiple pin,pin55,as WDT output */
+
+	outb_p(SIOCFG2, WDT_INDEX_IO_PORT);
+
+	cr_data = inb (WDT_DATA_IO_PORT);
+
+	cr_data |= 0x80; /* Set Bit7 to 1*/
+	outb_p(SIOCFG2, WDT_INDEX_IO_PORT);
+
+	outb_p(cr_data, WDT_DATA_IO_PORT);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "Select multiple pin,pin55,as WDT output:"
+	                      " Bit7 to 1: %d\n", cr_data);
+#endif
+}
+
+/* Enable SWC functions */
+
+static inline void pc87413_enable_swc(void)
+{
+	unsigned int cr_data=0;
+
+	/* Step 2: Enable SWC functions */
+
+	outb_p(0x07, WDT_INDEX_IO_PORT);        /* Point SWC_LDN (LDN=4) */
+	outb_p(SWC_LDN, WDT_DATA_IO_PORT);
+
+	outb_p(0x30, WDT_INDEX_IO_PORT);        /* Read Index 0x30 First */
+	cr_data = inb(WDT_DATA_IO_PORT);
+	cr_data |= 0x01;                        /* Set Bit0 to 1 */
+	outb_p(0x30, WDT_INDEX_IO_PORT);
+	outb_p(cr_data, WDT_DATA_IO_PORT);      /* Index0x30_bit0P1 */
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "pc87413 - Enable SWC functions\n");
+#endif
+}
+
+/* Read SWC I/O base address */
+
+static inline unsigned int pc87413_get_swc_base(void)
+{
+	unsigned int  swc_base_addr = 0;
+	unsigned char addr_l, addr_h = 0;
+
+	/* Step 3: Read SWC I/O Base Address */
+
+	outb_p(0x60, WDT_INDEX_IO_PORT);        /* Read Index 0x60 */
+	addr_h = inb(WDT_DATA_IO_PORT);
+
+	outb_p(0x61, WDT_INDEX_IO_PORT);        /* Read Index 0x61 */
+
+	addr_l = inb(WDT_DATA_IO_PORT);
+
+	swc_base_addr = (addr_h << 8) + addr_l;
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "Read SWC I/O Base Address: low %d, high %d,"
+	                      " res %d\n", addr_l, addr_h, swc_base_addr);
+#endif
+
+	return swc_base_addr;
+}
+
+/* Select Bank 3 of SWC */
+
+static inline void pc87413_swc_bank3(unsigned int swc_base_addr)
+{
+	/* Step 4: Select Bank3 of SWC */
+
+	outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "Select Bank3 of SWC\n");
+#endif
+}
+
+/* Set watchdog timeout to x minutes */
+
+static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
+					 char pc87413_time)
+{
+	/* Step 5: Programm WDTO, Twd. */
+
+	outb_p(pc87413_time, swc_base_addr + WDTO);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "Set WDTO to %d minutes\n", pc87413_time);
+#endif
+}
+
+/* Enable WDEN */
+
+static inline void pc87413_enable_wden(unsigned int swc_base_addr)
+{
+	/* Step 6: Enable WDEN */
+
+	outb_p(inb (swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "Enable WDEN\n");
+#endif
+}
+
+/* Enable SW_WD_TREN */
+static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
+{
+	/* Enable SW_WD_TREN */
+
+	outb_p(inb (swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "Enable SW_WD_TREN\n");
+#endif
+}
+
+/* Disable SW_WD_TREN */
+
+static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
+{
+	/* Disable SW_WD_TREN */
+
+	outb_p(inb (swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "pc87413 - Disable SW_WD_TREN\n");
+#endif
+}
+
+/* Enable SW_WD_TRG */
+
+static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
+{
+	/* Enable SW_WD_TRG */
+
+	outb_p(inb (swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "pc87413 - Enable SW_WD_TRG\n");
+#endif
+}
+
+/* Disable SW_WD_TRG */
+
+static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr)
+{
+	/* Disable SW_WD_TRG */
+
+	outb_p(inb (swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL);
+
+#ifdef DEBUG
+	printk(KERN_INFO DPFX "Disable SW_WD_TRG\n");
+#endif
+}
+
+/* -- Higher level functions ------------------------------------*/
+
+/* Enable the watchdog */
+
+static void pc87413_enable(void)
+{
+	unsigned int swc_base_addr;
+
+	spin_lock(&io_lock);
+
+	pc87413_select_wdt_out();
+	pc87413_enable_swc();
+	swc_base_addr = pc87413_get_swc_base();
+	pc87413_swc_bank3(swc_base_addr);
+	pc87413_programm_wdto(swc_base_addr, timeout);
+	pc87413_enable_wden(swc_base_addr);
+	pc87413_enable_sw_wd_tren(swc_base_addr);
+	pc87413_enable_sw_wd_trg(swc_base_addr);
+
+	spin_unlock(&io_lock);
+}
+
+/* Disable the watchdog */
+
+static void pc87413_disable(void)
+{
+	unsigned int swc_base_addr;
+
+	spin_lock(&io_lock);
+
+	pc87413_select_wdt_out();
+	pc87413_enable_swc();
+	swc_base_addr = pc87413_get_swc_base();
+	pc87413_swc_bank3(swc_base_addr);
+	pc87413_disable_sw_wd_tren(swc_base_addr);
+	pc87413_disable_sw_wd_trg(swc_base_addr);
+	pc87413_programm_wdto(swc_base_addr, 0);
+
+	spin_unlock(&io_lock);
+}
+
+/* Refresh the watchdog */
+
+static void pc87413_refresh(void)
+{
+	unsigned int swc_base_addr;
+
+	spin_lock(&io_lock);
+
+	pc87413_select_wdt_out();
+	pc87413_enable_swc();
+	swc_base_addr = pc87413_get_swc_base();
+	pc87413_swc_bank3(swc_base_addr);
+	pc87413_disable_sw_wd_tren(swc_base_addr);
+	pc87413_disable_sw_wd_trg(swc_base_addr);
+	pc87413_programm_wdto(swc_base_addr, timeout);
+	pc87413_enable_wden(swc_base_addr);
+	pc87413_enable_sw_wd_tren(swc_base_addr);
+	pc87413_enable_sw_wd_trg(swc_base_addr);
+
+	spin_unlock(&io_lock);
+}
+
+/* -- File operations -------------------------------------------*/
+
+/**
+ *	pc87413_open:
+ *	@inode: inode of device
+ *	@file: file handle to device
+ *
+ */
+
+static int pc87413_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+
+	if (test_and_set_bit(0, &timer_enabled))
+		return -EBUSY;
+
+	if (nowayout)
+		__module_get(THIS_MODULE);
+
+	/* Reload and activate timer */
+	pc87413_refresh();
+
+	printk(KERN_INFO MODNAME "Watchdog enabled. Timeout set to"
+	                         " %d minute(s).\n", timeout);
+
+	return nonseekable_open(inode, file);
+}
+
+/**
+ *	pc87413_release:
+ *	@inode: inode to board
+ *	@file: file handle to board
+ *
+ *	The watchdog has a configurable API. There is a religious dispute
+ *	between people who want their watchdog to be able to shut down and
+ *	those who want to be sure if the watchdog manager dies the machine
+ *	reboots. In the former case we disable the counters, in the latter
+ *	case you have to open it again very soon.
+ */
+
+static int pc87413_release(struct inode *inode, struct file *file)
+{
+	/* Shut off the timer. */
+
+	if (expect_close == 42) {
+		pc87413_disable();
+		printk(KERN_INFO MODNAME "Watchdog disabled,"
+		                         " sleeping again...\n");
+	} else {
+		printk(KERN_CRIT MODNAME "Unexpected close, not stopping"
+		                         " watchdog!\n");
+		pc87413_refresh();
+	}
+
+	clear_bit(0, &timer_enabled);
+	expect_close = 0;
+
+	return 0;
+}
+
+/**
+ *	pc87413_status:
+ *
+ *      return, if the watchdog is enabled (timeout is set...)
+ */
+
+
+static int pc87413_status(void)
+{
+	  return 0; /* currently not supported */
+}
+
+/**
+ *	pc87413_write:
+ *	@file: file handle to the watchdog
+ *	@data: data buffer to write
+ *	@len: length in bytes
+ *	@ppos: pointer to the position to write. No seeks allowed
+ *
+ *	A write to a watchdog device is defined as a keepalive signal. Any
+ *	write of data will do, as we we don't define content meaning.
+ */
+
+static ssize_t pc87413_write(struct file *file, const char __user *data,
+			     size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/* reset expect flag */
+			expect_close = 0;
+
+			/* scan to see whether or not we got the magic character */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data+i))
+					return -EFAULT;
+				if (c == 'V')
+					expect_close = 42;
+			}
+		}
+
+		/* someone wrote to us, we should reload the timer */
+		pc87413_refresh();
+	}
+	return len;
+}
+
+/**
+ *	pc87413_ioctl:
+ *	@inode: inode of the device
+ *	@file: file handle to the device
+ *	@cmd: watchdog command
+ *	@arg: argument pointer
+ *
+ *	The watchdog API defines a common set of functions for all watchdogs
+ *	according to their available features. We only actually usefully support
+ *	querying capabilities and current status.
+ */
+
+static int pc87413_ioctl(struct inode *inode, struct file *file,
+			 unsigned int cmd, unsigned long arg)
+{
+	int new_timeout;
+
+	union {
+		struct watchdog_info __user *ident;
+		int __user *i;
+	} uarg;
+
+	static struct watchdog_info ident = {
+		.options          = WDIOF_KEEPALIVEPING |
+		                    WDIOF_SETTIMEOUT |
+		                    WDIOF_MAGICCLOSE,
+		.firmware_version = 1,
+		.identity         = "PC87413(HF/F) watchdog"
+	};
+
+	uarg.i = (int __user *)arg;
+
+	switch(cmd) {
+		default:
+			return -ENOTTY;
+
+		case WDIOC_GETSUPPORT:
+			return copy_to_user(uarg.ident, &ident,
+				sizeof(ident)) ? -EFAULT : 0;
+
+		case WDIOC_GETSTATUS:
+			return put_user(pc87413_status(), uarg.i);
+
+		case WDIOC_GETBOOTSTATUS:
+			return put_user(0, uarg.i);
+
+		case WDIOC_KEEPALIVE:
+			pc87413_refresh();
+#ifdef DEBUG
+	                printk(KERN_INFO DPFX "keepalive\n");
+#endif
+			return 0;
+
+		case WDIOC_SETTIMEOUT:
+			if (get_user(new_timeout, uarg.i))
+				return -EFAULT;
+
+			// the API states this is given in secs
+			new_timeout /= 60;
+
+			if (new_timeout < 0 || new_timeout > MAX_TIMEOUT)
+				return -EINVAL;
+
+			timeout = new_timeout;
+			pc87413_refresh();
+
+			// fall through and return the new timeout...
+
+		case WDIOC_GETTIMEOUT:
+
+		        new_timeout = timeout * 60;
+
+			return put_user(new_timeout, uarg.i);
+
+		case WDIOC_SETOPTIONS:
+		{
+			int options, retval = -EINVAL;
+
+			if (get_user(options, uarg.i))
+				return -EFAULT;
+
+			if (options & WDIOS_DISABLECARD) {
+			        pc87413_disable();
+				retval = 0;
+			}
+
+			if (options & WDIOS_ENABLECARD) {
+				pc87413_enable();
+				retval = 0;
+			}
+
+			return retval;
+		}
+	}
+}
+
+/* -- Notifier funtions -----------------------------------------*/
+
+/**
+ *	notify_sys:
+ *	@this: our notifier block
+ *	@code: the event being reported
+ *	@unused: unused
+ *
+ *	Our notifier is called on system shutdowns. We want to turn the card
+ *	off at reboot otherwise the machine will reboot again during memory
+ *	test or worse yet during the following fsck. This would suck, in fact
+ *	trust me - if it happens it does suck.
+ */
+
+static int pc87413_notify_sys(struct notifier_block *this,
+			      unsigned long code,
+			      void *unused)
+{
+	if (code == SYS_DOWN || code == SYS_HALT)
+	{
+		/* Turn the card off */
+		pc87413_disable();
+	}
+	return NOTIFY_DONE;
+}
+
+/* -- Module's structures ---------------------------------------*/
+
+static struct file_operations pc87413_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= pc87413_write,
+	.ioctl		= pc87413_ioctl,
+	.open		= pc87413_open,
+	.release	= pc87413_release,
+};
+
+static struct notifier_block pc87413_notifier =
+{
+	.notifier_call  = pc87413_notify_sys,
+};
+
+static struct miscdevice pc87413_miscdev=
+{
+	.minor          = WATCHDOG_MINOR,
+	.name           = "watchdog",
+	.fops           = &pc87413_fops
+};
+
+/* -- Module init functions -------------------------------------*/
+
+/**
+ * 	pc87413_init: module's "constructor"
+ *
+ *	Set up the WDT watchdog board. All we have to do is grab the
+ *	resources we require and bitch if anyone beat us to them.
+ *	The open() function will actually kick the board off.
+ */
+
+static int __init pc87413_init(void)
+{
+	int ret;
+
+	spin_lock_init(&io_lock);
+
+	printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n", WDT_INDEX_IO_PORT);
+
+	/* request_region(io, 2, "pc87413"); */
+
+	ret = register_reboot_notifier(&pc87413_notifier);
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n",
+			ret);
+	}
+
+	ret = misc_register(&pc87413_miscdev);
+
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
+			WATCHDOG_MINOR, ret);
+		unregister_reboot_notifier(&pc87413_notifier);
+		return ret;
+	}
+
+	printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout);
+
+	pc87413_enable();
+
+	return 0;
+}
+
+/**
+ *	pc87413_exit: module's "destructor"
+ *
+ *	Unload the watchdog. You cannot do this with any file handles open.
+ *	If your watchdog is set to continue ticking on close and you unload
+ *	it, well it keeps ticking. We won't get the interrupt but the board
+ *	will not touch PC memory so all is fine. You just have to load a new
+ *	module in 60 seconds or reboot.
+ */
+
+static void __exit pc87413_exit(void)
+{
+	/* Stop the timer before we leave */
+	if (!nowayout)
+	{
+		pc87413_disable();
+		printk(KERN_INFO MODNAME "Watchdog disabled.\n");
+	}
+
+	misc_deregister(&pc87413_miscdev);
+	unregister_reboot_notifier(&pc87413_notifier);
+	/* release_region(io,2); */
+
+	printk(MODNAME " watchdog component driver removed.\n");
+}
+
+module_init(pc87413_init);
+module_exit(pc87413_exit);
+
+MODULE_AUTHOR("Sven Anders <anders@anduras.de>, Marcus Junker <junker@anduras.de>,");
+MODULE_DESCRIPTION("PC87413 WDT driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+
+module_param(io, int, 0);
+MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(io) ").");
+
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes (default=" __MODULE_STRING(timeout) ").");
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index bda4533..6113872 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -561,8 +561,7 @@
  */
 static inline void usb_pcwd_delete (struct usb_pcwd_private *usb_pcwd)
 {
-	if (usb_pcwd->intr_urb != NULL)
-		usb_free_urb (usb_pcwd->intr_urb);
+	usb_free_urb(usb_pcwd->intr_urb);
 	if (usb_pcwd->intr_buffer != NULL)
 		usb_buffer_free(usb_pcwd->udev, usb_pcwd->intr_size,
 				usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
@@ -635,7 +634,7 @@
 	usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8);
 
 	/* set up the memory buffer's */
-	if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, SLAB_ATOMIC, &usb_pcwd->intr_dma))) {
+	if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma))) {
 		printk(KERN_ERR PFX "Out of memory\n");
 		goto error;
 	}
diff --git a/drivers/char/watchdog/rm9k_wdt.c b/drivers/char/watchdog/rm9k_wdt.c
new file mode 100644
index 0000000..ec39093
--- /dev/null
+++ b/drivers/char/watchdog/rm9k_wdt.c
@@ -0,0 +1,420 @@
+/*
+ *  Watchdog implementation for GPI h/w found on PMC-Sierra RM9xxx
+ *  chips.
+ *
+ *  Copyright (C) 2004 by Basler Vision Technologies AG
+ *  Author: Thomas Koeller <thomas.koeller@baslerweb.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/rm9k-ocd.h>
+
+#include <rm9k_wdt.h>
+
+
+#define CLOCK                  125000000
+#define MAX_TIMEOUT_SECONDS    32
+#define CPCCR                  0x0080
+#define CPGIG1SR               0x0044
+#define CPGIG1ER               0x0054
+
+
+/* Function prototypes */
+static irqreturn_t wdt_gpi_irqhdl(int, void *, struct pt_regs *);
+static void wdt_gpi_start(void);
+static void wdt_gpi_stop(void);
+static void wdt_gpi_set_timeout(unsigned int);
+static int wdt_gpi_open(struct inode *, struct file *);
+static int wdt_gpi_release(struct inode *, struct file *);
+static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t, loff_t *);
+static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long);
+static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *);
+static const struct resource *wdt_gpi_get_resource(struct platform_device *, const char *, unsigned int);
+static int __init wdt_gpi_probe(struct device *);
+static int __exit wdt_gpi_remove(struct device *);
+
+
+static const char wdt_gpi_name[] = "wdt_gpi";
+static atomic_t opencnt;
+static int expect_close;
+static int locked;
+
+
+/* These are set from device resources */
+static void __iomem * wd_regs;
+static unsigned int wd_irq, wd_ctr;
+
+
+/* Module arguments */
+static int timeout = MAX_TIMEOUT_SECONDS;
+module_param(timeout, int, 0444);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
+
+static unsigned long resetaddr = 0xbffdc200;
+module_param(resetaddr, ulong, 0444);
+MODULE_PARM_DESC(resetaddr, "Address to write to to force a reset");
+
+static unsigned long flagaddr = 0xbffdc104;
+module_param(flagaddr, ulong, 0444);
+MODULE_PARM_DESC(flagaddr, "Address to write to boot flags to");
+
+static int powercycle;
+module_param(powercycle, bool, 0444);
+MODULE_PARM_DESC(powercycle, "Cycle power if watchdog expires");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
+
+
+/* Interrupt handler */
+static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt, struct pt_regs *regs)
+{
+	if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
+		return IRQ_NONE;
+	__raw_writel(0x1, wd_regs + 0x0008);
+
+
+	printk(KERN_CRIT "%s: watchdog expired - resetting system\n",
+		wdt_gpi_name);
+
+	*(volatile char *) flagaddr |= 0x01;
+	*(volatile char *) resetaddr = powercycle ? 0x01 : 0x2;
+	iob();
+	while (1)
+		cpu_relax();
+}
+
+
+/* Watchdog functions */
+static void wdt_gpi_start(void)
+{
+	u32 reg;
+
+	lock_titan_regs();
+	reg = titan_readl(CPGIG1ER);
+	titan_writel(reg | (0x100 << wd_ctr), CPGIG1ER);
+	iob();
+	unlock_titan_regs();
+}
+
+static void wdt_gpi_stop(void)
+{
+	u32 reg;
+
+	lock_titan_regs();
+	reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
+	titan_writel(reg, CPCCR);
+	reg = titan_readl(CPGIG1ER);
+	titan_writel(reg & ~(0x100 << wd_ctr), CPGIG1ER);
+	iob();
+	unlock_titan_regs();
+}
+
+static void wdt_gpi_set_timeout(unsigned int to)
+{
+	u32 reg;
+	const u32 wdval = (to * CLOCK) & ~0x0000000f;
+
+	lock_titan_regs();
+	reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
+	titan_writel(reg, CPCCR);
+	wmb();
+	__raw_writel(wdval, wd_regs + 0x0000);
+	wmb();
+	titan_writel(reg | (0x2 << (wd_ctr * 4)), CPCCR);
+	wmb();
+	titan_writel(reg | (0x5 << (wd_ctr * 4)), CPCCR);
+	iob();
+	unlock_titan_regs();
+}
+
+
+/* /dev/watchdog operations */
+static int wdt_gpi_open(struct inode *inode, struct file *file)
+{
+	int res;
+
+	if (unlikely(atomic_dec_if_positive(&opencnt) < 0))
+		return -EBUSY;
+
+	expect_close = 0;
+	if (locked) {
+		module_put(THIS_MODULE);
+		free_irq(wd_irq, &miscdev);
+		locked = 0;
+	}
+
+	res = request_irq(wd_irq, wdt_gpi_irqhdl, SA_SHIRQ | SA_INTERRUPT,
+			  wdt_gpi_name, &miscdev);
+	if (unlikely(res))
+		return res;
+
+	wdt_gpi_set_timeout(timeout);
+	wdt_gpi_start();
+
+	printk(KERN_INFO "%s: watchdog started, timeout = %u seconds\n",
+		wdt_gpi_name, timeout);
+	return nonseekable_open(inode, file);
+}
+
+static int wdt_gpi_release(struct inode *inode, struct file *file)
+{
+	if (nowayout) {
+		printk(KERN_INFO "%s: no way out - watchdog left running\n",
+			wdt_gpi_name);
+		__module_get(THIS_MODULE);
+		locked = 1;
+	} else {
+		if (expect_close) {
+			wdt_gpi_stop();
+			free_irq(wd_irq, &miscdev);
+			printk(KERN_INFO "%s: watchdog stopped\n", wdt_gpi_name);
+		} else {
+			printk(KERN_CRIT "%s: unexpected close() -"
+				" watchdog left running\n",
+				wdt_gpi_name);
+			wdt_gpi_set_timeout(timeout);
+			__module_get(THIS_MODULE);
+			locked = 1;
+		}
+	}
+
+	atomic_inc(&opencnt);
+	return 0;
+}
+
+static ssize_t
+wdt_gpi_write(struct file *f, const char __user *d, size_t s, loff_t *o)
+{
+	char val;
+
+	wdt_gpi_set_timeout(timeout);
+	expect_close = (s > 0) && !get_user(val, d) && (val == 'V');
+	return s ? 1 : 0;
+}
+
+static long
+wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	long res = -ENOTTY;
+	const long size = _IOC_SIZE(cmd);
+	int stat;
+	void __user *argp = (void __user *)arg;
+	static struct watchdog_info wdinfo = {
+		.identity               = "RM9xxx/GPI watchdog",
+		.firmware_version       = 0,
+		.options                = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING
+	};
+
+	if (unlikely(_IOC_TYPE(cmd) != WATCHDOG_IOCTL_BASE))
+		return -ENOTTY;
+
+	if ((_IOC_DIR(cmd) & _IOC_READ)
+	    && !access_ok(VERIFY_WRITE, arg, size))
+		return -EFAULT;
+
+	if ((_IOC_DIR(cmd) & _IOC_WRITE)
+	    && !access_ok(VERIFY_READ, arg, size))
+		return -EFAULT;
+
+	expect_close = 0;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		wdinfo.options = nowayout ?
+			WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING :
+			WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE;
+		res = __copy_to_user(argp, &wdinfo, size) ?  -EFAULT : size;
+		break;
+
+	case WDIOC_GETSTATUS:
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		stat = (*(volatile char *) flagaddr & 0x01)
+			? WDIOF_CARDRESET : 0;
+		res = __copy_to_user(argp, &stat, size) ?
+			-EFAULT : size;
+		break;
+
+	case WDIOC_SETOPTIONS:
+		break;
+
+	case WDIOC_KEEPALIVE:
+		wdt_gpi_set_timeout(timeout);
+		res = size;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		{
+			int val;
+			if (unlikely(__copy_from_user(&val, argp, size))) {
+				res = -EFAULT;
+				break;
+			}
+
+			if (val > MAX_TIMEOUT_SECONDS)
+				val = MAX_TIMEOUT_SECONDS;
+			timeout = val;
+			wdt_gpi_set_timeout(val);
+			res = size;
+			printk(KERN_INFO "%s: timeout set to %u seconds\n",
+				wdt_gpi_name, timeout);
+		}
+		break;
+
+	case WDIOC_GETTIMEOUT:
+		res = __copy_to_user(argp, &timeout, size) ?
+			-EFAULT : size;
+		break;
+	}
+
+	return res;
+}
+
+
+/* Shutdown notifier */
+static int
+wdt_gpi_notify(struct notifier_block *this, unsigned long code, void *unused)
+{
+	if (code == SYS_DOWN || code == SYS_HALT)
+		wdt_gpi_stop();
+
+	return NOTIFY_DONE;
+}
+
+
+/* Kernel interfaces */
+static struct file_operations fops = {
+	.owner		= THIS_MODULE,
+	.open		= wdt_gpi_open,
+	.release	= wdt_gpi_release,
+	.write		= wdt_gpi_write,
+	.unlocked_ioctl	= wdt_gpi_ioctl,
+};
+
+static struct miscdevice miscdev = {
+	.minor		= WATCHDOG_MINOR,
+	.name		= wdt_gpi_name,
+	.fops		= &fops,
+};
+
+static struct notifier_block wdt_gpi_shutdown = {
+	.notifier_call	= wdt_gpi_notify,
+};
+
+
+/* Init & exit procedures */
+static const struct resource *
+wdt_gpi_get_resource(struct platform_device *pdv, const char *name,
+		      unsigned int type)
+{
+	char buf[80];
+	if (snprintf(buf, sizeof buf, "%s_0", name) >= sizeof buf)
+		return NULL;
+	return platform_get_resource_byname(pdv, type, buf);
+}
+
+/* No hotplugging on the platform bus - use __init */
+static int __init wdt_gpi_probe(struct device *dev)
+{
+	int res;
+	struct platform_device * const pdv = to_platform_device(dev);
+	const struct resource
+		* const rr = wdt_gpi_get_resource(pdv, WDT_RESOURCE_REGS,
+						  IORESOURCE_MEM),
+		* const ri = wdt_gpi_get_resource(pdv, WDT_RESOURCE_IRQ,
+						  IORESOURCE_IRQ),
+		* const rc = wdt_gpi_get_resource(pdv, WDT_RESOURCE_COUNTER,
+						  0);
+
+	if (unlikely(!rr || !ri || !rc))
+		return -ENXIO;
+
+	wd_regs = ioremap_nocache(rr->start, rr->end + 1 - rr->start);
+	if (unlikely(!wd_regs))
+		return -ENOMEM;
+	wd_irq = ri->start;
+	wd_ctr = rc->start;
+	res = misc_register(&miscdev);
+	if (res)
+		iounmap(wd_regs);
+	else
+		register_reboot_notifier(&wdt_gpi_shutdown);
+	return res;
+}
+
+static int __exit wdt_gpi_remove(struct device *dev)
+{
+	int res;
+
+	unregister_reboot_notifier(&wdt_gpi_shutdown);
+	res = misc_deregister(&miscdev);
+	iounmap(wd_regs);
+	wd_regs = NULL;
+	return res;
+}
+
+
+/* Device driver init & exit */
+static struct device_driver wdt_gpi_driver = {
+	.name		= (char *) wdt_gpi_name,
+	.bus		= &platform_bus_type,
+	.owner		= THIS_MODULE,
+	.probe		= wdt_gpi_probe,
+	.remove		= __exit_p(wdt_gpi_remove),
+	.shutdown	= NULL,
+	.suspend	= NULL,
+	.resume		= NULL,
+};
+
+static int __init wdt_gpi_init_module(void)
+{
+	atomic_set(&opencnt, 1);
+	if (timeout > MAX_TIMEOUT_SECONDS)
+		timeout = MAX_TIMEOUT_SECONDS;
+	return driver_register(&wdt_gpi_driver);
+}
+
+static void __exit wdt_gpi_cleanup_module(void)
+{
+	driver_unregister(&wdt_gpi_driver);
+}
+
+module_init(wdt_gpi_init_module);
+module_exit(wdt_gpi_cleanup_module);
+
+MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
+MODULE_DESCRIPTION("Basler eXcite watchdog driver for gpi devices");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 05f8ce2..b418b16 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,9 +31,11 @@
 #include <linux/connector.h>
 #include <linux/delay.h>
 
-void cn_queue_wrapper(void *data)
+void cn_queue_wrapper(struct work_struct *work)
 {
-	struct cn_callback_data *d = data;
+	struct cn_callback_entry *cbq =
+		container_of(work, struct cn_callback_entry, work.work);
+	struct cn_callback_data *d = &cbq->data;
 
 	d->callback(d->callback_priv);
 
@@ -57,7 +59,7 @@
 	memcpy(&cbq->id.id, id, sizeof(struct cb_id));
 	cbq->data.callback = callback;
 	
-	INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
+	INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
 	return cbq;
 }
 
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index b49bacf..5e7cd45 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -135,40 +135,39 @@
 	spin_lock_bh(&dev->cbdev->queue_lock);
 	list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
 		if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
-			if (likely(!test_bit(0, &__cbq->work.pending) &&
+			if (likely(!test_bit(WORK_STRUCT_PENDING,
+					     &__cbq->work.work.management) &&
 					__cbq->data.ddata == NULL)) {
 				__cbq->data.callback_priv = msg;
 
 				__cbq->data.ddata = data;
 				__cbq->data.destruct_data = destruct_data;
 
-				if (queue_work(dev->cbdev->cn_queue,
-						&__cbq->work))
+				if (queue_delayed_work(
+					    dev->cbdev->cn_queue,
+					    &__cbq->work, 0))
 					err = 0;
 			} else {
-				struct work_struct *w;
 				struct cn_callback_data *d;
 				
-				w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
-				if (w) {
-					d = (struct cn_callback_data *)(w+1);
-
+				__cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
+				if (__cbq) {
+					d = &__cbq->data;
 					d->callback_priv = msg;
 					d->callback = __cbq->data.callback;
 					d->ddata = data;
 					d->destruct_data = destruct_data;
-					d->free = w;
+					d->free = __cbq;
 
-					INIT_LIST_HEAD(&w->entry);
-					w->pending = 0;
-					w->func = &cn_queue_wrapper;
-					w->data = d;
-					init_timer(&w->timer);
+					INIT_DELAYED_WORK(&__cbq->work,
+							  &cn_queue_wrapper);
 					
-					if (queue_work(dev->cbdev->cn_queue, w))
+					if (queue_delayed_work(
+						    dev->cbdev->cn_queue,
+						    &__cbq->work, 0))
 						err = 0;
 					else {
-						kfree(w);
+						kfree(__cbq);
 						err = -EINVAL;
 					}
 				} else
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index dd0c262..47ab42d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,7 +42,7 @@
 
 /* internal prototypes */
 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
-static void handle_update(void *data);
+static void handle_update(struct work_struct *work);
 
 /**
  * Two notifier lists: the "policy" list is involved in the
@@ -665,7 +665,7 @@
 	mutex_init(&policy->lock);
 	mutex_lock(&policy->lock);
 	init_completion(&policy->kobj_unregister);
-	INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
+	INIT_WORK(&policy->update, handle_update);
 
 	/* call driver. From then on the cpufreq must be able
 	 * to accept all calls to ->verify and ->setpolicy for this CPU
@@ -895,9 +895,11 @@
 }
 
 
-static void handle_update(void *data)
+static void handle_update(struct work_struct *work)
 {
-	unsigned int cpu = (unsigned int)(long)data;
+	struct cpufreq_policy *policy =
+		container_of(work, struct cpufreq_policy, update);
+	unsigned int cpu = policy->cpu;
 	dprintk("handle_update for cpu %u called\n", cpu);
 	cpufreq_update_policy(cpu);
 }
@@ -1535,7 +1537,6 @@
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int cpufreq_cpu_callback(struct notifier_block *nfb,
 					unsigned long action, void *hcpu)
 {
@@ -1575,7 +1576,6 @@
 {
     .notifier_call = cpufreq_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*********************************************************************
  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c4c578d..5ef5ede 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -59,7 +59,7 @@
 #define MAX_SAMPLING_DOWN_FACTOR		(10)
 #define TRANSITION_LATENCY_LIMIT		(10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
 
 struct cpu_dbs_info_s {
 	struct cpufreq_policy 	*cur_policy;
@@ -82,7 +82,7 @@
  * is recursive for the same process. -Venki
  */
 static DEFINE_MUTEX 	(dbs_mutex);
-static DECLARE_WORK	(dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
 
 struct dbs_tuners {
 	unsigned int 		sampling_rate;
@@ -420,7 +420,7 @@
 	}
 }
 
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 { 
 	int i;
 	lock_cpu_hotplug();
@@ -435,7 +435,6 @@
 
 static inline void dbs_timer_init(void)
 {
-	INIT_WORK(&dbs_work, do_dbs_timer, NULL);
 	schedule_delayed_work(&dbs_work,
 			usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
 	return;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bf8aa45..e1cc511 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,13 +47,17 @@
 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER	(1000)
 #define TRANSITION_LATENCY_LIMIT		(10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
+
+/* Sampling types */
+enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
 
 struct cpu_dbs_info_s {
 	cputime64_t prev_cpu_idle;
 	cputime64_t prev_cpu_wall;
 	struct cpufreq_policy *cur_policy;
- 	struct work_struct work;
+ 	struct delayed_work work;
+	enum dbs_sample sample_type;
 	unsigned int enable;
 	struct cpufreq_frequency_table *freq_table;
 	unsigned int freq_lo;
@@ -407,30 +411,31 @@
 	}
 }
 
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 {
 	unsigned int cpu = smp_processor_id();
 	struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+	enum dbs_sample sample_type = dbs_info->sample_type;
 	/* We want all CPUs to do sampling nearly on same jiffy */
 	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+	/* Permit rescheduling of this work item */
+	work_release(work);
+
 	delay -= jiffies % delay;
 
 	if (!dbs_info->enable)
 		return;
 	/* Common NORMAL_SAMPLE setup */
-	INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
 	if (!dbs_tuners_ins.powersave_bias ||
-	    (unsigned long) data == DBS_NORMAL_SAMPLE) {
+	    sample_type == DBS_NORMAL_SAMPLE) {
 		lock_cpu_hotplug();
 		dbs_check_cpu(dbs_info);
 		unlock_cpu_hotplug();
 		if (dbs_info->freq_lo) {
 			/* Setup timer for SUB_SAMPLE */
-			INIT_WORK(&dbs_info->work, do_dbs_timer,
-					(void *)DBS_SUB_SAMPLE);
+			dbs_info->sample_type = DBS_SUB_SAMPLE;
 			delay = dbs_info->freq_hi_jiffies;
 		}
 	} else {
@@ -449,7 +454,8 @@
 	delay -= jiffies % delay;
 
 	ondemand_powersave_bias_init();
-	INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+	INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
+	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
 	queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
 }
 
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index adb5541..e816535 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,4 +51,17 @@
 	  If unsure say M. The compiled module will be
 	  called padlock-sha.ko
 
+config CRYPTO_DEV_GEODE
+	tristate "Support for the Geode LX AES engine"
+	depends on CRYPTO && X86_32
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	default m
+	help
+	  Say 'Y' here to use the AMD Geode LX processor on-board AES
+	  engine for the CryptoAPI AES alogrithm.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called geode-aes.
+
 endmenu
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 4c3d0ec..6059cf8 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
new file mode 100644
index 0000000..43a6839
--- /dev/null
+++ b/drivers/crypto/geode-aes.c
@@ -0,0 +1,474 @@
+ /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/algapi.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+
+#include "geode-aes.h"
+
+/* Register definitions */
+
+#define AES_CTRLA_REG  0x0000
+
+#define AES_CTRL_START     0x01
+#define AES_CTRL_DECRYPT   0x00
+#define AES_CTRL_ENCRYPT   0x02
+#define AES_CTRL_WRKEY     0x04
+#define AES_CTRL_DCA       0x08
+#define AES_CTRL_SCA       0x10
+#define AES_CTRL_CBC       0x20
+
+#define AES_INTR_REG  0x0008
+
+#define AES_INTRA_PENDING (1 << 16)
+#define AES_INTRB_PENDING (1 << 17)
+
+#define AES_INTR_PENDING  (AES_INTRA_PENDING | AES_INTRB_PENDING)
+#define AES_INTR_MASK     0x07
+
+#define AES_SOURCEA_REG   0x0010
+#define AES_DSTA_REG      0x0014
+#define AES_LENA_REG      0x0018
+#define AES_WRITEKEY0_REG 0x0030
+#define AES_WRITEIV0_REG  0x0040
+
+/*  A very large counter that is used to gracefully bail out of an
+ *  operation in case of trouble
+ */
+
+#define AES_OP_TIMEOUT    0x50000
+
+/* Static structures */
+
+static void __iomem * _iobase;
+static spinlock_t lock;
+
+/* Write a 128 bit field (either a writable key or IV) */
+static inline void
+_writefield(u32 offset, void *value)
+{
+	int i;
+	for(i = 0; i < 4; i++)
+		iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
+}
+
+/* Read a 128 bit field (either a writable key or IV) */
+static inline void
+_readfield(u32 offset, void *value)
+{
+	int i;
+	for(i = 0; i < 4; i++)
+		((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
+}
+
+static int
+do_crypt(void *src, void *dst, int len, u32 flags)
+{
+	u32 status;
+	u32 counter = AES_OP_TIMEOUT;
+
+	iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
+	iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
+	iowrite32(len,  _iobase + AES_LENA_REG);
+
+	/* Start the operation */
+	iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
+
+	do
+		status = ioread32(_iobase + AES_INTR_REG);
+	while(!(status & AES_INTRA_PENDING) && --counter);
+
+	/* Clear the event */
+	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
+	return counter ? 0 : 1;
+}
+
+static unsigned int
+geode_aes_crypt(struct geode_aes_op *op)
+{
+
+	u32 flags = 0;
+	int iflags;
+
+	if (op->len == 0 || op->src == op->dst)
+		return 0;
+
+	if (op->flags & AES_FLAGS_COHERENT)
+		flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
+
+	if (op->dir == AES_DIR_ENCRYPT)
+		flags |= AES_CTRL_ENCRYPT;
+
+	/* Start the critical section */
+
+	spin_lock_irqsave(&lock, iflags);
+
+	if (op->mode == AES_MODE_CBC) {
+		flags |= AES_CTRL_CBC;
+		_writefield(AES_WRITEIV0_REG, op->iv);
+	}
+
+	if (op->flags & AES_FLAGS_USRKEY) {
+		flags |= AES_CTRL_WRKEY;
+		_writefield(AES_WRITEKEY0_REG, op->key);
+	}
+
+	do_crypt(op->src, op->dst, op->len, flags);
+
+	if (op->mode == AES_MODE_CBC)
+		_readfield(AES_WRITEIV0_REG, op->iv);
+
+	spin_unlock_irqrestore(&lock, iflags);
+
+	return op->len;
+}
+
+/* CRYPTO-API Functions */
+
+static int
+geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	if (len != AES_KEY_LENGTH) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	memcpy(op->key, key, len);
+	return 0;
+}
+
+static void
+geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	if ((out == NULL) || (in == NULL))
+		return;
+
+	op->src = (void *) in;
+	op->dst = (void *) out;
+	op->mode = AES_MODE_ECB;
+	op->flags = 0;
+	op->len = AES_MIN_BLOCK_SIZE;
+	op->dir = AES_DIR_ENCRYPT;
+
+	geode_aes_crypt(op);
+}
+
+
+static void
+geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+	if ((out == NULL) || (in == NULL))
+		return;
+
+	op->src = (void *) in;
+	op->dst = (void *) out;
+	op->mode = AES_MODE_ECB;
+	op->flags = 0;
+	op->len = AES_MIN_BLOCK_SIZE;
+	op->dir = AES_DIR_DECRYPT;
+
+	geode_aes_crypt(op);
+}
+
+
+static struct crypto_alg geode_alg = {
+	.cra_name               =       "aes",
+	.cra_driver_name	=       "geode-aes-128",
+	.cra_priority           =       300,
+	.cra_alignmask          =       15,
+	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
+	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(geode_alg.cra_list),
+	.cra_u			=	{
+		.cipher = {
+			.cia_min_keysize	=  AES_KEY_LENGTH,
+			.cia_max_keysize	=  AES_KEY_LENGTH,
+			.cia_setkey		=  geode_setkey,
+			.cia_encrypt		=  geode_encrypt,
+			.cia_decrypt		=  geode_decrypt
+		}
+	}
+};
+
+static int
+geode_cbc_decrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_CBC;
+		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->dir = AES_DIR_DECRYPT;
+
+		memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+
+		ret = geode_aes_crypt(op);
+
+		memcpy(walk.iv, op->iv, AES_IV_LENGTH);
+		nbytes -= ret;
+
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static int
+geode_cbc_encrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_CBC;
+		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->dir = AES_DIR_ENCRYPT;
+
+		memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+
+		ret = geode_aes_crypt(op);
+		nbytes -= ret;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static struct crypto_alg geode_cbc_alg = {
+	.cra_name		=	"cbc(aes)",
+	.cra_driver_name	=	"cbc-aes-geode-128",
+	.cra_priority		=	400,
+	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+	.cra_alignmask		=	15,
+	.cra_type		=	&crypto_blkcipher_type,
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(geode_cbc_alg.cra_list),
+	.cra_u			=	{
+		.blkcipher = {
+			.min_keysize		=	AES_KEY_LENGTH,
+			.max_keysize		=	AES_KEY_LENGTH,
+			.setkey			=	geode_setkey,
+			.encrypt		=	geode_cbc_encrypt,
+			.decrypt		=	geode_cbc_decrypt,
+		}
+	}
+};
+
+static int
+geode_ecb_decrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_ECB;
+		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->dir = AES_DIR_DECRYPT;
+
+		ret = geode_aes_crypt(op);
+		nbytes -= ret;
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static int
+geode_ecb_encrypt(struct blkcipher_desc *desc,
+		  struct scatterlist *dst, struct scatterlist *src,
+		  unsigned int nbytes)
+{
+	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+	struct blkcipher_walk walk;
+	int err, ret;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while((nbytes = walk.nbytes)) {
+		op->src = walk.src.virt.addr,
+		op->dst = walk.dst.virt.addr;
+		op->mode = AES_MODE_ECB;
+		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->dir = AES_DIR_ENCRYPT;
+
+		ret = geode_aes_crypt(op);
+		nbytes -= ret;
+		ret =  blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static struct crypto_alg geode_ecb_alg = {
+	.cra_name		=	"ecb(aes)",
+	.cra_driver_name	=	"ecb-aes-geode-128",
+	.cra_priority		=	400,
+	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+	.cra_ctxsize		=	sizeof(struct geode_aes_op),
+	.cra_alignmask		=	15,
+	.cra_type		=	&crypto_blkcipher_type,
+	.cra_module		=	THIS_MODULE,
+	.cra_list		=	LIST_HEAD_INIT(geode_ecb_alg.cra_list),
+	.cra_u			=	{
+		.blkcipher = {
+			.min_keysize		=	AES_KEY_LENGTH,
+			.max_keysize		=	AES_KEY_LENGTH,
+			.setkey			=	geode_setkey,
+			.encrypt		=	geode_ecb_encrypt,
+			.decrypt		=	geode_ecb_decrypt,
+		}
+	}
+};
+
+static void
+geode_aes_remove(struct pci_dev *dev)
+{
+	crypto_unregister_alg(&geode_alg);
+	crypto_unregister_alg(&geode_ecb_alg);
+	crypto_unregister_alg(&geode_cbc_alg);
+
+	pci_iounmap(dev, _iobase);
+	_iobase = NULL;
+
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+}
+
+
+static int
+geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int ret;
+
+	if ((ret = pci_enable_device(dev)))
+		return ret;
+
+	if ((ret = pci_request_regions(dev, "geode-aes-128")))
+		goto eenable;
+
+	_iobase = pci_iomap(dev, 0, 0);
+
+	if (_iobase == NULL) {
+		ret = -ENOMEM;
+		goto erequest;
+	}
+
+	spin_lock_init(&lock);
+
+	/* Clear any pending activity */
+	iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
+
+	if ((ret = crypto_register_alg(&geode_alg)))
+		goto eiomap;
+
+	if ((ret = crypto_register_alg(&geode_ecb_alg)))
+		goto ealg;
+
+	if ((ret = crypto_register_alg(&geode_cbc_alg)))
+		goto eecb;
+
+	printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
+	return 0;
+
+ eecb:
+	crypto_unregister_alg(&geode_ecb_alg);
+
+ ealg:
+	crypto_unregister_alg(&geode_alg);
+
+ eiomap:
+	pci_iounmap(dev, _iobase);
+
+ erequest:
+	pci_release_regions(dev);
+
+ eenable:
+	pci_disable_device(dev);
+
+	printk(KERN_ERR "geode-aes:  GEODE AES initialization failed.\n");
+	return ret;
+}
+
+static struct pci_device_id geode_aes_tbl[] = {
+	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
+
+static struct pci_driver geode_aes_driver = {
+	.name = "Geode LX AES",
+	.id_table = geode_aes_tbl,
+	.probe = geode_aes_probe,
+	.remove = __devexit_p(geode_aes_remove)
+};
+
+static int __init
+geode_aes_init(void)
+{
+	return pci_module_init(&geode_aes_driver);
+}
+
+static void __exit
+geode_aes_exit(void)
+{
+	pci_unregister_driver(&geode_aes_driver);
+}
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_DESCRIPTION("Geode LX Hardware AES driver");
+MODULE_LICENSE("GPL");
+
+module_init(geode_aes_init);
+module_exit(geode_aes_exit);
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
new file mode 100644
index 0000000..8003a36
--- /dev/null
+++ b/drivers/crypto/geode-aes.h
@@ -0,0 +1,40 @@
+/* Copyright (C) 2003-2006, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _GEODE_AES_H_
+#define _GEODE_AES_H_
+
+#define AES_KEY_LENGTH 16
+#define AES_IV_LENGTH  16
+
+#define AES_MIN_BLOCK_SIZE 16
+
+#define AES_MODE_ECB 0
+#define AES_MODE_CBC 1
+
+#define AES_DIR_DECRYPT 0
+#define AES_DIR_ENCRYPT 1
+
+#define AES_FLAGS_USRKEY   (1 << 0)
+#define AES_FLAGS_COHERENT (1 << 1)
+
+struct geode_aes_op {
+
+	void *src;
+	void *dst;
+
+	u32 mode;
+	u32 dir;
+	u32 flags;
+	int len;
+
+	u8 key[AES_KEY_LENGTH];
+	u8 iv[AES_IV_LENGTH];
+};
+
+#endif
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 0358419..8e87261 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -636,10 +636,10 @@
 	dma_cookie_t cookie;
 	int err = 0;
 
-	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
+	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 	if (!src)
 		return -ENOMEM;
-	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
+	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 	if (!dest) {
 		kfree(src);
 		return -ENOMEM;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 75e9e38..1b4fc92 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -28,6 +28,7 @@
 #include <linux/sysdev.h>
 #include <linux/ctype.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/page.h>
 #include <asm/edac.h>
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index e5cb0fd..b1dc63e 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -21,6 +21,7 @@
     etc voltage & frequency control is not supported!
 */
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index 26be4ea..e8ef62b 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -33,6 +33,7 @@
 #include <linux/module.h>
 #include <linux/timer.h>
 #include <linux/dmi.h>
+#include <linux/jiffies.h>
 #include <asm/io.h>
 
 #define HDAPS_LOW_PORT		0x1600	/* first port used by hdaps */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 5cbf8b9..90f91d0 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -125,6 +125,7 @@
 	    ICH7
 	    ESB2
 	    ICH8
+	    ICH9
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index bbb2fbe..c7be2fd 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -33,6 +33,7 @@
     ICH7		27DA
     ESB2		269B
     ICH8		283E
+    ICH9		2930
     This driver supports several versions of Intel's I/O Controller Hubs (ICH).
     For SMBus support, they are similar to the PIIX4 and are part
     of Intel's '810' and other chipsets.
@@ -457,6 +458,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_17) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
 	{ 0, }
 };
 
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index 4630f19..15edf40 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -140,12 +140,14 @@
 	return t1;
 }
 
-static void ds1374_set_work(void *arg)
+static ulong new_time;
+
+static void ds1374_set_work(struct work_struct *work)
 {
 	ulong t1, t2;
 	int limit = 10;		/* arbitrary retry limit */
 
-	t1 = *(ulong *) arg;
+	t1 = new_time;
 
 	mutex_lock(&ds1374_mutex);
 
@@ -167,11 +169,9 @@
 			 "can't confirm time set from rtc chip\n");
 }
 
-static ulong new_time;
-
 static struct workqueue_struct *ds1374_workqueue;
 
-static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
+static DECLARE_WORK(ds1374_work, ds1374_set_work);
 
 int ds1374_set_rtc_time(ulong nowtime)
 {
@@ -180,7 +180,7 @@
 	if (in_interrupt())
 		queue_work(ds1374_workqueue, &ds1374_work);
 	else
-		ds1374_set_work(&new_time);
+		ds1374_set_work(NULL);
 
 	return 0;
 }
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c
index 2dd0a34..420377c 100644
--- a/drivers/i2c/chips/m41t00.c
+++ b/drivers/i2c/chips/m41t00.c
@@ -215,8 +215,15 @@
 }
 
 static ulong new_time;
+/* well, isn't this API just _lovely_? */
+static void
+m41t00_barf(struct work_struct *unusable)
+{
+	m41t00_set(&new_time);
+}
+
 static struct workqueue_struct *m41t00_wq;
-static DECLARE_WORK(m41t00_work, m41t00_set, &new_time);
+static DECLARE_WORK(m41t00_work, m41t00_barf);
 
 int
 m41t00_set_rtc_time(ulong nowtime)
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 3f86903..94a4e9a 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -42,7 +42,7 @@
 struct i2c_dev {
 	struct list_head list;
 	struct i2c_adapter *adap;
-	struct class_device *class_dev;
+	struct device *dev;
 };
 
 #define I2C_MINORS	256
@@ -92,15 +92,16 @@
 	spin_unlock(&i2c_dev_list_lock);
 }
 
-static ssize_t show_adapter_name(struct class_device *class_dev, char *buf)
+static ssize_t show_adapter_name(struct device *dev,
+				 struct device_attribute *attr, char *buf)
 {
-	struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(class_dev->devt));
+	struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt));
 
 	if (!i2c_dev)
 		return -ENODEV;
 	return sprintf(buf, "%s\n", i2c_dev->adap->name);
 }
-static CLASS_DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
+static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
 
 static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
                             loff_t *offset)
@@ -413,15 +414,14 @@
 		return PTR_ERR(i2c_dev);
 
 	/* register this i2c device with the driver core */
-	i2c_dev->class_dev = class_device_create(i2c_dev_class, NULL,
-						 MKDEV(I2C_MAJOR, adap->nr),
-						 &adap->dev, "i2c-%d",
-						 adap->nr);
-	if (!i2c_dev->class_dev) {
+	i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
+				     MKDEV(I2C_MAJOR, adap->nr),
+				     "i2c-%d", adap->nr);
+	if (!i2c_dev->dev) {
 		res = -ENODEV;
 		goto error;
 	}
-	res = class_device_create_file(i2c_dev->class_dev, &class_device_attr_name);
+	res = device_create_file(i2c_dev->dev, &dev_attr_name);
 	if (res)
 		goto error_destroy;
 
@@ -429,7 +429,7 @@
 		 adap->name, adap->nr);
 	return 0;
 error_destroy:
-	class_device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
+	device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
 error:
 	return_i2c_dev(i2c_dev);
 	kfree(i2c_dev);
@@ -444,9 +444,9 @@
 	if (!i2c_dev) /* attach_adapter must have failed */
 		return 0;
 
-	class_device_remove_file(i2c_dev->class_dev, &class_device_attr_name);
+	device_remove_file(i2c_dev->dev, &dev_attr_name);
 	return_i2c_dev(i2c_dev);
-	class_device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
+	device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
 	kfree(i2c_dev);
 
 	pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 0c68d0f..e23bc0d 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -389,14 +389,6 @@
 	  Linux. This may slow disk throughput by a few percent, but at least
 	  things will operate 100% reliably.
 
-config BLK_DEV_SL82C105
-	tristate "Winbond SL82c105 support"
-	depends on PCI && (PPC || ARM) && BLK_DEV_IDEPCI
-	help
-	  If you have a Winbond SL82c105 IDE controller, say Y here to enable
-	  special configuration for this chip. This is common on various CHRP
-	  motherboards, but could be used elsewhere. If in doubt, say Y.
-
 config BLK_DEV_IDEDMA_PCI
 	bool "Generic PCI bus-master DMA support"
 	depends on PCI && BLK_DEV_IDEPCI
@@ -712,6 +704,14 @@
 
 	  Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>.
 
+config BLK_DEV_SL82C105
+	tristate "Winbond SL82c105 support"
+	depends on (PPC || ARM)
+	help
+	  If you have a Winbond SL82c105 IDE controller, say Y here to enable
+	  special configuration for this chip. This is common on various CHRP
+	  motherboards, but could be used elsewhere. If in doubt, say Y.
+
 config BLK_DEV_SLC90E66
 	tristate "SLC90E66 chipset support"
 	help
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8ccee9c..e3a2676 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -1635,7 +1635,7 @@
 /*
 ** Get ATAPI_FORMAT_UNIT progress indication.
 **
-** Userland gives a pointer to an int.  The int is set to a progresss
+** Userland gives a pointer to an int.  The int is set to a progress
 ** indicator 0-65536, with 65536=100%.
 **
 ** If the drive does not support format progress indication, we just check
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 287a662..1689076 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -973,8 +973,8 @@
  *	@drive: drive
  *
  *	Automatically remove all the driver specific settings for this
- *	drive. This function may sleep and must not be called from IRQ
- *	context. The caller must hold ide_setting_sem.
+ *	drive. This function may not be called from IRQ context. The
+ *	caller must hold ide_setting_sem.
  */
  
 static void auto_remove_settings (ide_drive_t *drive)
@@ -1874,11 +1874,22 @@
 {
 	unsigned long flags;
 	
-	down(&ide_setting_sem);
-	spin_lock_irqsave(&ide_lock, flags);
 #ifdef CONFIG_PROC_FS
 	ide_remove_proc_entries(drive->proc, driver->proc);
 #endif
+	down(&ide_setting_sem);
+	spin_lock_irqsave(&ide_lock, flags);
+	/*
+	 * ide_setting_sem protects the settings list
+	 * ide_lock protects the use of settings
+	 *
+	 * so we need to hold both, ide_settings_sem because we want to
+	 * modify the settings list, and ide_lock because we cannot take
+	 * a setting out that is being used.
+	 *
+	 * OTOH both ide_{read,write}_setting are only ever used under
+	 * ide_setting_sem.
+	 */
 	auto_remove_settings(drive);
 	spin_unlock_irqrestore(&ide_lock, flags);
 	up(&ide_setting_sem);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index bef4759..7efd28a 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -192,20 +192,10 @@
     tuple.TupleOffset = 0;
     tuple.TupleDataMax = 255;
     tuple.Attributes = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &stk->parse));
-    link->conf.ConfigBase = stk->parse.config.base;
-    link->conf.Present = stk->parse.config.rmask[0];
 
-    tuple.DesiredTuple = CISTPL_MANFID;
-    if (!pcmcia_get_first_tuple(link, &tuple) &&
-	!pcmcia_get_tuple_data(link, &tuple) &&
-	!pcmcia_parse_tuple(link, &tuple, &stk->parse))
-	is_kme = ((stk->parse.manfid.manf == MANFID_KME) &&
-		  ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) ||
-		   (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
+    is_kme = ((link->manf_id == MANFID_KME) &&
+	      ((link->card_id == PRODID_KME_KXLC005_A) ||
+	       (link->card_id == PRODID_KME_KXLC005_B)));
 
     /* Not sure if this is right... look up the current Vcc */
     CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &stk->conf));
@@ -408,8 +398,10 @@
 	PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
 	PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
 	PCMCIA_DEVICE_PROD_ID1("TRANSCEND    512M   ", 0xd0909443),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
 	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
 	PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+	PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
 	PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
 	PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
 	PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 2af634d..61f1a96 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -35,7 +35,7 @@
 #include <linux/ide.h>
 #include <asm/io.h>
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_PPC_CHRP
 #include <asm/processor.h>
 #endif
 
@@ -282,11 +282,11 @@
 	 * Find the ISA bridge to see how good the IDE is.
 	 */
 	via_config = via_config_find(&isa);
-	if (!via_config->id) {
-		printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n");
-		pci_dev_put(isa);
-		return -ENODEV;
-	}
+
+	/* We checked this earlier so if it fails here deeep badness
+	   is involved */
+
+	BUG_ON(!via_config->id);
 
 	/*
 	 * Setup or disable Clk66 if appropriate
@@ -442,7 +442,7 @@
 	hwif->speedproc = &via_set_drive;
 
 
-#if defined(CONFIG_PPC_CHRP) && defined(CONFIG_PPC32)
+#ifdef CONFIG_PPC_CHRP
 	if(machine_is(chrp) && _chrp_type == _CHRP_Pegasos) {
 		hwif->irq = hwif->channel ? 15 : 14;
 	}
@@ -494,6 +494,17 @@
 
 static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
+	struct pci_dev *isa = NULL;
+	struct via_isa_bridge *via_config;
+	/*
+	 * Find the ISA bridge and check we know what it is.
+	 */
+	via_config = via_config_find(&isa);
+	pci_dev_put(isa);
+	if (!via_config->id) {
+		printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n");
+		return -ENODEV;
+	}
 	return ide_setup_pci_device(dev, &via82cxxx_chipsets[id->driver_data]);
 }
 
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 31e5cc4..27d6c64 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -133,7 +133,7 @@
 #define ETH1394_DRIVER_NAME "eth1394"
 static const char driver_name[] = ETH1394_DRIVER_NAME;
 
-static kmem_cache_t *packet_task_cache;
+static struct kmem_cache *packet_task_cache;
 
 static struct hpsb_highlevel eth1394_highlevel;
 
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index d90a3a1..b935e08 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -31,9 +31,10 @@
 #include "config_roms.h"
 
 
-static void delayed_reset_bus(void * __reset_info)
+static void delayed_reset_bus(struct work_struct *work)
 {
-	struct hpsb_host *host = (struct hpsb_host*)__reset_info;
+	struct hpsb_host *host =
+		container_of(work, struct hpsb_host, delayed_reset.work);
 	int generation = host->csr.generation + 1;
 
 	/* The generation field rolls over to 2 rather than 0 per IEEE
@@ -122,7 +123,7 @@
 	int i;
 	int hostnum = 0;
 
-	h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL);
+	h = kzalloc(sizeof(*h) + extra, GFP_KERNEL);
 	if (!h)
 		return NULL;
 
@@ -145,7 +146,7 @@
 
 	atomic_set(&h->generation, 0);
 
-	INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
+	INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
 	
 	init_timer(&h->timeout);
 	h->timeout.data = (unsigned long) h;
@@ -234,7 +235,7 @@
 		 * Config ROM in the near future. */
 		reset_delay = HZ;
 
-	PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
+	PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
 	schedule_delayed_work(&host->delayed_reset, reset_delay);
 
 	return 0;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index bc6dbfa..d553e38 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -62,7 +62,7 @@
 	struct class_device class_dev;
 
 	int update_config_rom;
-	struct work_struct delayed_reset;
+	struct delayed_work delayed_reset;
 	unsigned int config_roms;
 
 	struct list_head addr_space;
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 8e7b83f..e829c93 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/moduleparam.h>
+#include <linux/freezer.h>
 #include <asm/atomic.h>
 
 #include "csr.h"
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 6e8ea91..eae97d8 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1225,7 +1225,7 @@
 	int ctx;
 	int ret = -ENOMEM;
 
-	recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
+	recv = kmalloc(sizeof(*recv), GFP_KERNEL);
 	if (!recv)
 		return -ENOMEM;
 
@@ -1918,7 +1918,7 @@
 	int ctx;
 	int ret = -ENOMEM;
 
-	xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
+	xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
 	if (!xmit)
 		return -ENOMEM;
 
@@ -3021,7 +3021,7 @@
 			return -ENOMEM;
 		}
 
-		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
 		OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
 
                 if (d->prg_cpu[i] != NULL) {
@@ -3117,7 +3117,7 @@
 	OHCI_DMA_ALLOC("dma_rcv prg pool");
 
 	for (i = 0; i < d->num_desc; i++) {
-		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+		d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
 		OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
 
                 if (d->prg_cpu[i] != NULL) {
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 0a7412e..9cab1d6 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1428,7 +1428,7 @@
         	struct i2c_algo_bit_data i2c_adapter_data;
 
         	error = -ENOMEM;
-		i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
+		i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL);
         	if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
 
 		memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 5ec4f5e..bf71e06 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -112,7 +112,7 @@
 
 static inline struct pending_request *alloc_pending_request(void)
 {
-	return __alloc_pending_request(SLAB_KERNEL);
+	return __alloc_pending_request(GFP_KERNEL);
 }
 
 static void free_pending_request(struct pending_request *req)
@@ -259,7 +259,7 @@
 	if (hi != NULL) {
 		list_for_each_entry(fi, &hi->file_info_list, list) {
 			if (fi->notification == RAW1394_NOTIFY_ON) {
-				req = __alloc_pending_request(SLAB_ATOMIC);
+				req = __alloc_pending_request(GFP_ATOMIC);
 
 				if (req != NULL) {
 					req->file_info = fi;
@@ -306,13 +306,13 @@
 			if (!(fi->listen_channels & (1ULL << channel)))
 				continue;
 
-			req = __alloc_pending_request(SLAB_ATOMIC);
+			req = __alloc_pending_request(GFP_ATOMIC);
 			if (!req)
 				break;
 
 			if (!ibs) {
 				ibs = kmalloc(sizeof(*ibs) + length,
-					      SLAB_ATOMIC);
+					      GFP_ATOMIC);
 				if (!ibs) {
 					kfree(req);
 					break;
@@ -367,13 +367,13 @@
 			if (!fi->fcp_buffer)
 				continue;
 
-			req = __alloc_pending_request(SLAB_ATOMIC);
+			req = __alloc_pending_request(GFP_ATOMIC);
 			if (!req)
 				break;
 
 			if (!ibs) {
 				ibs = kmalloc(sizeof(*ibs) + length,
-					      SLAB_ATOMIC);
+					      GFP_ATOMIC);
 				if (!ibs) {
 					kfree(req);
 					break;
@@ -593,7 +593,7 @@
 	switch (req->req.type) {
 	case RAW1394_REQ_LIST_CARDS:
 		spin_lock_irqsave(&host_info_lock, flags);
-		khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
+		khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC);
 
 		if (khl) {
 			req->req.misc = host_count;
@@ -1045,7 +1045,7 @@
 	}
 	if (arm_addr->notification_options & ARM_READ) {
 		DBGMSG("arm_read -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_read -> rcode_conflict_error");
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1064,7 +1064,7 @@
 			    sizeof(struct arm_response) +
 			    sizeof(struct arm_request_response);
 		}
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_read -> rcode_conflict_error");
@@ -1198,7 +1198,7 @@
 	}
 	if (arm_addr->notification_options & ARM_WRITE) {
 		DBGMSG("arm_write -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_write -> rcode_conflict_error");
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1209,7 +1209,7 @@
 		    sizeof(struct arm_request) + sizeof(struct arm_response) +
 		    (length) * sizeof(byte_t) +
 		    sizeof(struct arm_request_response);
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_write -> rcode_conflict_error");
@@ -1400,7 +1400,7 @@
 	if (arm_addr->notification_options & ARM_LOCK) {
 		byte_t *buf1, *buf2;
 		DBGMSG("arm_lock -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			DBGMSG("arm_lock -> rcode_conflict_error");
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1408,7 +1408,7 @@
 							   The request may be retried */
 		}
 		size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);	/* maximum */
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			DBGMSG("arm_lock -> rcode_conflict_error");
@@ -1628,7 +1628,7 @@
 	if (arm_addr->notification_options & ARM_LOCK) {
 		byte_t *buf1, *buf2;
 		DBGMSG("arm_lock64 -> entering notification-section");
-		req = __alloc_pending_request(SLAB_ATOMIC);
+		req = __alloc_pending_request(GFP_ATOMIC);
 		if (!req) {
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
 			DBGMSG("arm_lock64 -> rcode_conflict_error");
@@ -1636,7 +1636,7 @@
 							   The request may be retried */
 		}
 		size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);	/* maximum */
-		req->data = kmalloc(size, SLAB_ATOMIC);
+		req->data = kmalloc(size, GFP_ATOMIC);
 		if (!(req->data)) {
 			free_pending_request(req);
 			spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1737,7 +1737,7 @@
 		return (-EINVAL);
 	}
 	/* addr-list-entry for fileinfo */
-	addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
+	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
 	if (!addr) {
 		req->req.length = 0;
 		return (-ENOMEM);
@@ -2103,7 +2103,7 @@
 static int get_config_rom(struct file_info *fi, struct pending_request *req)
 {
 	int ret = sizeof(struct raw1394_request);
-	quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+	quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
 	int status;
 
 	if (!data)
@@ -2133,7 +2133,7 @@
 static int update_config_rom(struct file_info *fi, struct pending_request *req)
 {
 	int ret = sizeof(struct raw1394_request);
-	quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+	quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 	if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
@@ -2443,7 +2443,7 @@
 	/* only one ISO activity event may be in the queue */
 	if (!__rawiso_event_in_queue(fi)) {
 		struct pending_request *req =
-		    __alloc_pending_request(SLAB_ATOMIC);
+		    __alloc_pending_request(GFP_ATOMIC);
 
 		if (req) {
 			req->file_info = fi;
@@ -2779,7 +2779,7 @@
 {
 	struct file_info *fi;
 
-	fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
+	fi = kzalloc(sizeof(*fi), GFP_KERNEL);
 	if (!fi)
 		return -ENOMEM;
 
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 6986ac1..cd156d4 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -493,20 +493,25 @@
 		scsi_unblock_requests(scsi_id->scsi_host);
 }
 
-static void sbp2util_write_orb_pointer(void *p)
+static void sbp2util_write_orb_pointer(struct work_struct *work)
 {
+	struct scsi_id_instance_data *scsi_id =
+		container_of(work, struct scsi_id_instance_data,
+			     protocol_work.work);
 	quadlet_t data[2];
 
-	data[0] = ORB_SET_NODE_ID(
-			((struct scsi_id_instance_data *)p)->hi->host->node_id);
-	data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
+	data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
+	data[1] = scsi_id->last_orb_dma;
 	sbp2util_cpu_to_be32_buffer(data, 8);
-	sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
+	sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
 }
 
-static void sbp2util_write_doorbell(void *p)
+static void sbp2util_write_doorbell(struct work_struct *work)
 {
-	sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
+	struct scsi_id_instance_data *scsi_id =
+		container_of(work, struct scsi_id_instance_data,
+			     protocol_work.work);
+	sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
 }
 
 /*
@@ -843,7 +848,7 @@
 	INIT_LIST_HEAD(&scsi_id->scsi_list);
 	spin_lock_init(&scsi_id->sbp2_command_orb_lock);
 	atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
-	INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
+	INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
 
 	ud->device.driver_data = scsi_id;
 
@@ -2047,11 +2052,10 @@
 		 * We do not accept new commands until the job is over.
 		 */
 		scsi_block_requests(scsi_id->scsi_host);
-		PREPARE_WORK(&scsi_id->protocol_work,
+		PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
 			     last_orb ? sbp2util_write_doorbell:
-					sbp2util_write_orb_pointer,
-			     scsi_id);
-		schedule_work(&scsi_id->protocol_work);
+					sbp2util_write_orb_pointer);
+		schedule_delayed_work(&scsi_id->protocol_work, 0);
 	}
 }
 
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index abbe48e..1b16d6b 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -348,7 +348,7 @@
 	unsigned workarounds;
 
 	atomic_t state;
-	struct work_struct protocol_work;
+	struct delayed_work protocol_work;
 };
 
 /* For use in scsi_id_instance_data.state */
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e11187e..af93979 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -55,11 +55,11 @@
 	int status;
 };
 
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
 
 static DEFINE_MUTEX(lock);
 static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
 static struct workqueue_struct *addr_wq;
 
 void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -139,7 +139,7 @@
 
 	mutex_lock(&lock);
 	list_for_each_entry_reverse(temp_req, &req_list, list) {
-		if (time_after(req->timeout, temp_req->timeout))
+		if (time_after_eq(req->timeout, temp_req->timeout))
 			break;
 	}
 
@@ -215,7 +215,7 @@
 	return ret;
 }
 
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
 {
 	struct addr_req *req, *temp_req;
 	struct sockaddr_in *src_in, *dst_in;
@@ -225,19 +225,17 @@
 
 	mutex_lock(&lock);
 	list_for_each_entry_safe(req, temp_req, &req_list, list) {
-		if (req->status) {
+		if (req->status == -ENODATA) {
 			src_in = (struct sockaddr_in *) &req->src_addr;
 			dst_in = (struct sockaddr_in *) &req->dst_addr;
 			req->status = addr_resolve_remote(src_in, dst_in,
 							  req->addr);
+			if (req->status && time_after_eq(jiffies, req->timeout))
+				req->status = -ETIMEDOUT;
+			else if (req->status == -ENODATA)
+				continue;
 		}
-		if (req->status && time_after(jiffies, req->timeout))
-			req->status = -ETIMEDOUT;
-		else if (req->status == -ENODATA)
-			continue;
-
-		list_del(&req->list);
-		list_add_tail(&req->list, &done_list);
+		list_move_tail(&req->list, &done_list);
 	}
 
 	if (!list_empty(&req_list)) {
@@ -347,8 +345,7 @@
 		if (req->addr == addr) {
 			req->status = -ECANCELED;
 			req->timeout = jiffies;
-			list_del(&req->list);
-			list_add(&req->list, &req_list);
+			list_move(&req->list, &req_list);
 			set_timeout(req->timeout);
 			break;
 		}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 20e9f64..98272fb 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -285,9 +285,10 @@
 	kfree(tprops);
 }
 
-static void ib_cache_task(void *work_ptr)
+static void ib_cache_task(struct work_struct *_work)
 {
-	struct ib_update_work *work = work_ptr;
+	struct ib_update_work *work =
+		container_of(_work, struct ib_update_work, work);
 
 	ib_cache_update(work->device, work->port_num);
 	kfree(work);
@@ -306,7 +307,7 @@
 	    event->event == IB_EVENT_CLIENT_REREGISTER) {
 		work = kmalloc(sizeof *work, GFP_ATOMIC);
 		if (work) {
-			INIT_WORK(&work->work, ib_cache_task, work);
+			INIT_WORK(&work->work, ib_cache_task);
 			work->device   = event->device;
 			work->port_num = event->element.port_num;
 			schedule_work(&work->work);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 25b1018..79c937b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@
 };
 
 struct cm_work {
-	struct work_struct work;
+	struct delayed_work work;
 	struct list_head list;
 	struct cm_port *port;
 	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
@@ -147,12 +147,12 @@
 	__be32 rq_psn;
 	int timeout_ms;
 	enum ib_mtu path_mtu;
+	__be16 pkey;
 	u8 private_data_len;
 	u8 max_cm_retries;
 	u8 peer_to_peer;
 	u8 responder_resources;
 	u8 initiator_depth;
-	u8 local_ack_timeout;
 	u8 retry_count;
 	u8 rnr_retry_count;
 	u8 service_timeout;
@@ -161,7 +161,7 @@
 	atomic_t work_count;
 };
 
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
 
 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
 {
@@ -240,11 +240,10 @@
 	if (!private_data || !private_data_len)
 		return NULL;
 
-	data = kmalloc(private_data_len, GFP_KERNEL);
+	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
 	if (!data)
 		return ERR_PTR(-ENOMEM);
 
-	memcpy(data, private_data, private_data_len);
 	return data;
 }
 
@@ -669,8 +668,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	timewait_info->work.local_id = local_id;
-	INIT_WORK(&timewait_info->work.work, cm_work_handler,
-		  &timewait_info->work);
+	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
 	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
 	return timewait_info;
 }
@@ -691,7 +689,7 @@
 	 * timewait before notifying the user that we've exited timewait.
 	 */
 	cm_id_priv->id.state = IB_CM_TIMEWAIT;
-	wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
+	wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
 	queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
 			   msecs_to_jiffies(wait_time));
 	cm_id_priv->timewait_info = NULL;
@@ -1010,6 +1008,7 @@
 	cm_id_priv->responder_resources = param->responder_resources;
 	cm_id_priv->retry_count = param->retry_count;
 	cm_id_priv->path_mtu = param->primary_path->mtu;
+	cm_id_priv->pkey = param->primary_path->pkey;
 	cm_id_priv->qp_type = param->qp_type;
 
 	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
@@ -1024,8 +1023,6 @@
 
 	cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
 	cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
-	cm_id_priv->local_ack_timeout =
-				cm_req_get_primary_local_ack_timeout(req_msg);
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1410,9 +1407,8 @@
 	cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
 	cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
 	cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
+	cm_id_priv->pkey = req_msg->pkey;
 	cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
-	cm_id_priv->local_ack_timeout =
-				cm_req_get_primary_local_ack_timeout(req_msg);
 	cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
 	cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
 	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
@@ -1716,7 +1712,7 @@
 	unsigned long flags;
 	int ret;
 
-	/* See comment in ib_cm_establish about lookup. */
+	/* See comment in cm_establish about lookup. */
 	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
 	if (!cm_id_priv)
 		return -EINVAL;
@@ -2402,11 +2398,16 @@
 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	if (cm_id->state != IB_CM_ESTABLISHED ||
-	    cm_id->lap_state != IB_CM_LAP_IDLE) {
+	    (cm_id->lap_state != IB_CM_LAP_UNINIT &&
+	     cm_id->lap_state != IB_CM_LAP_IDLE)) {
 		ret = -EINVAL;
 		goto out;
 	}
 
+	ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+	if (ret)
+		goto out;
+
 	ret = cm_alloc_msg(cm_id_priv, &msg);
 	if (ret)
 		goto out;
@@ -2431,7 +2432,8 @@
 }
 EXPORT_SYMBOL(ib_send_cm_lap);
 
-static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
+static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
+				    struct ib_sa_path_rec *path,
 				    struct cm_lap_msg *lap_msg)
 {
 	memset(path, 0, sizeof *path);
@@ -2443,10 +2445,10 @@
 	path->hop_limit = lap_msg->alt_hop_limit;
 	path->traffic_class = cm_lap_get_traffic_class(lap_msg);
 	path->reversible = 1;
-	/* pkey is same as in REQ */
+	path->pkey = cm_id_priv->pkey;
 	path->sl = cm_lap_get_sl(lap_msg);
 	path->mtu_selector = IB_SA_EQ;
-	/* mtu is same as in REQ */
+	path->mtu = cm_id_priv->path_mtu;
 	path->rate_selector = IB_SA_EQ;
 	path->rate = cm_lap_get_packet_rate(lap_msg);
 	path->packet_life_time_selector = IB_SA_EQ;
@@ -2472,7 +2474,7 @@
 
 	param = &work->cm_event.param.lap_rcvd;
 	param->alternate_path = &work->path[0];
-	cm_format_path_from_lap(param->alternate_path, lap_msg);
+	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
 	work->cm_event.private_data = &lap_msg->private_data;
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -2480,6 +2482,7 @@
 		goto unlock;
 
 	switch (cm_id_priv->id.lap_state) {
+	case IB_CM_LAP_UNINIT:
 	case IB_CM_LAP_IDLE:
 		break;
 	case IB_CM_MRA_LAP_SENT:
@@ -2502,6 +2505,10 @@
 
 	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
 	cm_id_priv->tid = lap_msg->hdr.tid;
+	cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+				work->mad_recv_wc->recv_buf.grh,
+				&cm_id_priv->av);
+	cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
 	if (!ret)
 		list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -2987,9 +2994,9 @@
 	}
 }
 
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
 {
-	struct cm_work *work = data;
+	struct cm_work *work = container_of(_work, struct cm_work, work.work);
 	int ret;
 
 	switch (work->cm_event.event) {
@@ -3040,7 +3047,7 @@
 		cm_free_work(work);
 }
 
-int ib_cm_establish(struct ib_cm_id *cm_id)
+static int cm_establish(struct ib_cm_id *cm_id)
 {
 	struct cm_id_private *cm_id_priv;
 	struct cm_work *work;
@@ -3079,16 +3086,53 @@
 	 * we need to find the cm_id once we're in the context of the
 	 * worker thread, rather than holding a reference on it.
 	 */
-	INIT_WORK(&work->work, cm_work_handler, work);
+	INIT_DELAYED_WORK(&work->work, cm_work_handler);
 	work->local_id = cm_id->local_id;
 	work->remote_id = cm_id->remote_id;
 	work->mad_recv_wc = NULL;
 	work->cm_event.event = IB_CM_USER_ESTABLISHED;
-	queue_work(cm.wq, &work->work);
+	queue_delayed_work(cm.wq, &work->work, 0);
 out:
 	return ret;
 }
-EXPORT_SYMBOL(ib_cm_establish);
+
+static int cm_migrate(struct ib_cm_id *cm_id)
+{
+	struct cm_id_private *cm_id_priv;
+	unsigned long flags;
+	int ret = 0;
+
+	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	if (cm_id->state == IB_CM_ESTABLISHED &&
+	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
+		cm_id->lap_state = IB_CM_LAP_IDLE;
+		cm_id_priv->av = cm_id_priv->alt_av;
+	} else
+		ret = -EINVAL;
+	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+	return ret;
+}
+
+int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
+{
+	int ret;
+
+	switch (event) {
+	case IB_EVENT_COMM_EST:
+		ret = cm_establish(cm_id);
+		break;
+	case IB_EVENT_PATH_MIG:
+		ret = cm_migrate(cm_id);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(ib_cm_notify);
 
 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
 			    struct ib_mad_recv_wc *mad_recv_wc)
@@ -3146,11 +3190,11 @@
 		return;
 	}
 
-	INIT_WORK(&work->work, cm_work_handler, work);
+	INIT_DELAYED_WORK(&work->work, cm_work_handler);
 	work->cm_event.event = event;
 	work->mad_recv_wc = mad_recv_wc;
 	work->port = (struct cm_port *)mad_agent->context;
-	queue_work(cm.wq, &work->work);
+	queue_delayed_work(cm.wq, &work->work, 0);
 }
 
 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3173,8 +3217,7 @@
 	case IB_CM_ESTABLISHED:
 		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
 				IB_QP_PKEY_INDEX | IB_QP_PORT;
-		qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
-					   IB_ACCESS_REMOTE_WRITE;
+		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
 		if (cm_id_priv->responder_resources)
 			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
 						    IB_ACCESS_REMOTE_ATOMIC;
@@ -3222,6 +3265,9 @@
 		if (cm_id_priv->alt_av.ah_attr.dlid) {
 			*qp_attr_mask |= IB_QP_ALT_PATH;
 			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+			qp_attr->alt_timeout =
+					cm_id_priv->alt_av.packet_life_time + 1;
 			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
 		}
 		ret = 0;
@@ -3248,19 +3294,31 @@
 	case IB_CM_REP_SENT:
 	case IB_CM_MRA_REP_RCVD:
 	case IB_CM_ESTABLISHED:
-		*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
-		qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
-		if (cm_id_priv->qp_type == IB_QPT_RC) {
-			*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
-					 IB_QP_RNR_RETRY |
-					 IB_QP_MAX_QP_RD_ATOMIC;
-			qp_attr->timeout = cm_id_priv->local_ack_timeout;
-			qp_attr->retry_cnt = cm_id_priv->retry_count;
-			qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
-			qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
-		}
-		if (cm_id_priv->alt_av.ah_attr.dlid) {
-			*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
+			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
+			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
+			if (cm_id_priv->qp_type == IB_QPT_RC) {
+				*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+						 IB_QP_RNR_RETRY |
+						 IB_QP_MAX_QP_RD_ATOMIC;
+				qp_attr->timeout =
+					cm_id_priv->av.packet_life_time + 1;
+				qp_attr->retry_cnt = cm_id_priv->retry_count;
+				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
+				qp_attr->max_rd_atomic =
+					cm_id_priv->initiator_depth;
+			}
+			if (cm_id_priv->alt_av.ah_attr.dlid) {
+				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+				qp_attr->path_mig_state = IB_MIG_REARM;
+			}
+		} else {
+			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
+			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+			qp_attr->alt_timeout =
+				cm_id_priv->alt_av.packet_life_time + 1;
+			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
 			qp_attr->path_mig_state = IB_MIG_REARM;
 		}
 		ret = 0;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 845090b..985a6b5 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -344,7 +344,7 @@
 		return ret;
 
 	qp_attr.qp_state = IB_QPS_INIT;
-	qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+	qp_attr.qp_access_flags = 0;
 	qp_attr.port_num = id_priv->id.port_num;
 	return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
 					  IB_QP_PKEY_INDEX | IB_QP_PORT);
@@ -935,13 +935,8 @@
 	mutex_lock(&lock);
 	ret = cma_acquire_dev(conn_id);
 	mutex_unlock(&lock);
-	if (ret) {
-		ret = -ENODEV;
-		cma_exch(conn_id, CMA_DESTROYING);
-		cma_release_remove(conn_id);
-		rdma_destroy_id(&conn_id->id);
-		goto out;
-	}
+	if (ret)
+		goto release_conn_id;
 
 	conn_id->cm_id.ib = cm_id;
 	cm_id->context = conn_id;
@@ -951,13 +946,17 @@
 	ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
 			      ib_event->private_data + offset,
 			      IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
-	if (ret) {
-		/* Destroy the CM ID by returning a non-zero value. */
-		conn_id->cm_id.ib = NULL;
-		cma_exch(conn_id, CMA_DESTROYING);
-		cma_release_remove(conn_id);
-		rdma_destroy_id(&conn_id->id);
-	}
+	if (!ret)
+		goto out;
+
+	/* Destroy the CM ID by returning a non-zero value. */
+	conn_id->cm_id.ib = NULL;
+
+release_conn_id:
+	cma_exch(conn_id, CMA_DESTROYING);
+	cma_release_remove(conn_id);
+	rdma_destroy_id(&conn_id->id);
+
 out:
 	cma_release_remove(listen_id);
 	return ret;
@@ -1341,9 +1340,9 @@
 	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
 }
 
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
 {
-	struct cma_work *work = data;
+	struct cma_work *work = container_of(_work, struct cma_work, work);
 	struct rdma_id_private *id_priv = work->id;
 	int destroy = 0;
 
@@ -1374,7 +1373,7 @@
 		return -ENOMEM;
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler, work);
+	INIT_WORK(&work->work, cma_work_handler);
 	work->old_state = CMA_ROUTE_QUERY;
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1431,7 +1430,7 @@
 		return -ENOMEM;
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler, work);
+	INIT_WORK(&work->work, cma_work_handler);
 	work->old_state = CMA_ROUTE_QUERY;
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1481,19 +1480,18 @@
 	u8 p;
 
 	mutex_lock(&lock);
-	list_for_each_entry(cma_dev, &dev_list, list)
-		for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
-			if (!ib_query_port (cma_dev->device, p, &port_attr) &&
-			    port_attr.state == IB_PORT_ACTIVE)
-				goto port_found;
-
-	if (!list_empty(&dev_list)) {
-		p = 1;
-		cma_dev = list_entry(dev_list.next, struct cma_device, list);
-	} else {
+	if (list_empty(&dev_list)) {
 		ret = -ENODEV;
 		goto out;
 	}
+	list_for_each_entry(cma_dev, &dev_list, list)
+		for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
+			if (!ib_query_port(cma_dev->device, p, &port_attr) &&
+			    port_attr.state == IB_PORT_ACTIVE)
+				goto port_found;
+
+	p = 1;
+	cma_dev = list_entry(dev_list.next, struct cma_device, list);
 
 port_found:
 	ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
@@ -1585,7 +1583,7 @@
 	}
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler, work);
+	INIT_WORK(&work->work, cma_work_handler);
 	work->old_state = CMA_ADDR_QUERY;
 	work->new_state = CMA_ADDR_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
@@ -2123,8 +2121,6 @@
 
 	cma_dev->device = device;
 	cma_dev->node_guid = device->node_guid;
-	if (!cma_dev->node_guid)
-		goto err;
 
 	init_completion(&cma_dev->comp);
 	atomic_set(&cma_dev->refcount, 1);
@@ -2136,9 +2132,6 @@
 	list_for_each_entry(id_priv, &listen_any_list, list)
 		cma_listen_on_dev(id_priv, cma_dev);
 	mutex_unlock(&lock);
-	return;
-err:
-	kfree(cma_dev);
 }
 
 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c3fb304..1039ad5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@
  * 1) in the event upcall, cm_event_handler(), for a listening cm_id.  If
  *    the backlog is exceeded, then no more connection request events will
  *    be processed.  cm_event_handler() returns -ENOMEM in this case.  Its up
- *    to the provider to reject the connectino request.
+ *    to the provider to reject the connection request.
  * 2) in the connection request workqueue handler, cm_conn_req_handler().
  *    If work elements cannot be allocated for the new connect request cm_id,
  *    then IWCM will call the provider reject method.  This is ok since
@@ -131,26 +131,25 @@
 }
 
 /*
- * Save private data from incoming connection requests in the
- * cm_id_priv so the low level driver doesn't have to.  Adjust
+ * Save private data from incoming connection requests to
+ * iw_cm_event, so the low level driver doesn't have to. Adjust
  * the event ptr to point to the local copy.
  */
-static int copy_private_data(struct iwcm_id_private *cm_id_priv,
-		       struct iw_cm_event *event)
+static int copy_private_data(struct iw_cm_event *event)
 {
 	void *p;
 
-	p = kmalloc(event->private_data_len, GFP_ATOMIC);
+	p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
 	if (!p)
 		return -ENOMEM;
-	memcpy(p, event->private_data, event->private_data_len);
 	event->private_data = p;
 	return 0;
 }
 
 /*
- * Release a reference on cm_id. If the last reference is being removed
- * and iw_destroy_cm_id is waiting, wake up the waiting thread.
+ * Release a reference on cm_id. If the last reference is being
+ * released, enable the waiting thread (in iw_destroy_cm_id) to
+ * get woken up, and return 1 if a thread is already waiting.
  */
 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
 {
@@ -243,7 +242,7 @@
 /*
  * CM_ID <-- CLOSING
  *
- * Block if a passive or active connection is currenlty being processed. Then
+ * Block if a passive or active connection is currently being processed. Then
  * process the event as follows:
  * - If we are ESTABLISHED, move to CLOSING and modify the QP state
  *   based on the abrupt flag
@@ -408,7 +407,7 @@
 {
 	struct iwcm_id_private *cm_id_priv;
 	unsigned long flags;
-	int ret = 0;
+	int ret;
 
 	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
@@ -535,7 +534,7 @@
 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
 {
 	struct iwcm_id_private *cm_id_priv;
-	int ret = 0;
+	int ret;
 	unsigned long flags;
 	struct ib_qp *qp;
 
@@ -620,7 +619,7 @@
 	spin_lock_irqsave(&listen_id_priv->lock, flags);
 	if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
 		spin_unlock_irqrestore(&listen_id_priv->lock, flags);
-		return;
+		goto out;
 	}
 	spin_unlock_irqrestore(&listen_id_priv->lock, flags);
 
@@ -629,7 +628,7 @@
 				listen_id_priv->id.context);
 	/* If the cm_id could not be created, ignore the request */
 	if (IS_ERR(cm_id))
-		return;
+		goto out;
 
 	cm_id->provider_data = iw_event->provider_data;
 	cm_id->local_addr = iw_event->local_addr;
@@ -642,7 +641,7 @@
 	if (ret) {
 		iw_cm_reject(cm_id, NULL, 0);
 		iw_destroy_cm_id(cm_id);
-		return;
+		goto out;
 	}
 
 	/* Call the client CM handler */
@@ -654,6 +653,7 @@
 			kfree(cm_id);
 	}
 
+out:
 	if (iw_event->private_data_len)
 		kfree(iw_event->private_data);
 }
@@ -674,7 +674,7 @@
 			       struct iw_cm_event *iw_event)
 {
 	unsigned long flags;
-	int ret = 0;
+	int ret;
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 
@@ -704,7 +704,7 @@
 			       struct iw_cm_event *iw_event)
 {
 	unsigned long flags;
-	int ret = 0;
+	int ret;
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	/*
@@ -828,9 +828,10 @@
  * thread asleep on the destroy_comp list vs. an object destroyed
  * here synchronously when the last reference is removed.
  */
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
 {
-	struct iwcm_work *work = arg, lwork;
+	struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
+	struct iw_cm_event levent;
 	struct iwcm_id_private *cm_id_priv = work->cm_id;
 	unsigned long flags;
 	int empty;
@@ -843,11 +844,11 @@
 				  struct iwcm_work, list);
 		list_del_init(&work->list);
 		empty = list_empty(&cm_id_priv->work_list);
-		lwork = *work;
+		levent = work->event;
 		put_work(work);
 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-		ret = process_event(cm_id_priv, &work->event);
+		ret = process_event(cm_id_priv, &levent);
 		if (ret) {
 			set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
 			destroy_cm_id(&cm_id_priv->id);
@@ -899,14 +900,14 @@
 		goto out;
 	}
 
-	INIT_WORK(&work->work, cm_work_handler, work);
+	INIT_WORK(&work->work, cm_work_handler);
 	work->cm_id = cm_id_priv;
 	work->event = *iw_event;
 
 	if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
 	     work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
 	    work->event.private_data_len) {
-		ret = copy_private_data(cm_id_priv, &work->event);
+		ret = copy_private_data(&work->event);
 		if (ret) {
 			put_work(work);
 			goto out;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a72bcea..15f38d9 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -46,7 +46,7 @@
 MODULE_AUTHOR("Hal Rosenstock");
 MODULE_AUTHOR("Sean Hefty");
 
-static kmem_cache_t *ib_mad_cache;
+static struct kmem_cache *ib_mad_cache;
 
 static struct list_head ib_mad_port_list;
 static u32 ib_mad_client_id = 0;
@@ -65,8 +65,8 @@
 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
 				    struct ib_mad_private *mad);
 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 			      struct ib_mad_agent_private *agent_priv,
 			      u8 mgmt_class);
@@ -356,10 +356,9 @@
 	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
 	INIT_LIST_HEAD(&mad_agent_priv->done_list);
 	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
-	INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
 	INIT_LIST_HEAD(&mad_agent_priv->local_list);
-	INIT_WORK(&mad_agent_priv->local_work, local_completions,
-		   mad_agent_priv);
+	INIT_WORK(&mad_agent_priv->local_work, local_completions);
 	atomic_set(&mad_agent_priv->refcount, 1);
 	init_completion(&mad_agent_priv->comp);
 
@@ -2198,12 +2197,12 @@
 /*
  * IB MAD completion callback
  */
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
 {
 	struct ib_mad_port_private *port_priv;
 	struct ib_wc wc;
 
-	port_priv = (struct ib_mad_port_private *)data;
+	port_priv = container_of(work, struct ib_mad_port_private, work);
 	ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
 
 	while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@
 }
 EXPORT_SYMBOL(ib_cancel_mad);
 
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
 {
 	struct ib_mad_agent_private *mad_agent_priv;
 	struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@
 	struct ib_wc wc;
 	struct ib_mad_send_wc mad_send_wc;
 
-	mad_agent_priv = (struct ib_mad_agent_private *)data;
+	mad_agent_priv =
+		container_of(work, struct ib_mad_agent_private, local_work);
 
 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
 	while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@
 	return ret;
 }
 
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
 {
 	struct ib_mad_agent_private *mad_agent_priv;
 	struct ib_mad_send_wr_private *mad_send_wr;
 	struct ib_mad_send_wc mad_send_wc;
 	unsigned long flags, delay;
 
-	mad_agent_priv = (struct ib_mad_agent_private *)data;
+	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+				      timed_work.work);
 	mad_send_wc.vendor_err = 0;
 
 	spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2799,7 +2800,7 @@
 		ret = -ENOMEM;
 		goto error8;
 	}
-	INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+	INIT_WORK(&port_priv->work, ib_mad_completion_handler);
 
 	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
 	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d06b590..d5548e7 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -102,7 +102,7 @@
 	struct list_head send_list;
 	struct list_head wait_list;
 	struct list_head done_list;
-	struct work_struct timed_work;
+	struct delayed_work timed_work;
 	unsigned long timeout;
 	struct list_head local_list;
 	struct work_struct local_work;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 1ef79d0..3663fd7 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -45,8 +45,8 @@
 struct mad_rmpp_recv {
 	struct ib_mad_agent_private *agent;
 	struct list_head list;
-	struct work_struct timeout_work;
-	struct work_struct cleanup_work;
+	struct delayed_work timeout_work;
+	struct delayed_work cleanup_work;
 	struct completion comp;
 	enum rmpp_state state;
 	spinlock_t lock;
@@ -233,9 +233,10 @@
 	}
 }
 
-static void recv_timeout_handler(void *data)
+static void recv_timeout_handler(struct work_struct *work)
 {
-	struct mad_rmpp_recv *rmpp_recv = data;
+	struct mad_rmpp_recv *rmpp_recv =
+		container_of(work, struct mad_rmpp_recv, timeout_work.work);
 	struct ib_mad_recv_wc *rmpp_wc;
 	unsigned long flags;
 
@@ -254,9 +255,10 @@
 	ib_free_recv_mad(rmpp_wc);
 }
 
-static void recv_cleanup_handler(void *data)
+static void recv_cleanup_handler(struct work_struct *work)
 {
-	struct mad_rmpp_recv *rmpp_recv = data;
+	struct mad_rmpp_recv *rmpp_recv =
+		container_of(work, struct mad_rmpp_recv, cleanup_work.work);
 	unsigned long flags;
 
 	spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@
 
 	rmpp_recv->agent = agent;
 	init_completion(&rmpp_recv->comp);
-	INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
-	INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
+	INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
+	INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
 	spin_lock_init(&rmpp_recv->lock);
 	rmpp_recv->state = RMPP_STATE_ACTIVE;
 	atomic_set(&rmpp_recv->refcount, 1);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1706d3c..e45afba 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -360,9 +360,10 @@
 	kfree(sm_ah);
 }
 
-static void update_sm_ah(void *port_ptr)
+static void update_sm_ah(struct work_struct *work)
 {
-	struct ib_sa_port *port = port_ptr;
+	struct ib_sa_port *port =
+		container_of(work, struct ib_sa_port, update_task);
 	struct ib_sa_sm_ah *new_ah, *old_ah;
 	struct ib_port_attr port_attr;
 	struct ib_ah_attr   ah_attr;
@@ -992,8 +993,7 @@
 		if (IS_ERR(sa_dev->port[i].agent))
 			goto err;
 
-		INIT_WORK(&sa_dev->port[i].update_task,
-			  update_sm_ah, &sa_dev->port[i]);
+		INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
 	}
 
 	ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@
 		goto err;
 
 	for (i = 0; i <= e - s; ++i)
-		update_sm_ah(&sa_dev->port[i]);
+		update_sm_ah(&sa_dev->port[i].update_task);
 
 	return;
 
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index ad4f4d5..f15220a 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -161,12 +161,14 @@
 				    struct ib_ucm_event, ctx_list);
 		list_del(&uevent->file_list);
 		list_del(&uevent->ctx_list);
+		mutex_unlock(&ctx->file->file_mutex);
 
 		/* clear incoming connections. */
 		if (ib_ucm_new_cm_id(uevent->resp.event))
 			ib_destroy_cm_id(uevent->cm_id);
 
 		kfree(uevent);
+		mutex_lock(&ctx->file->file_mutex);
 	}
 	mutex_unlock(&ctx->file->file_mutex);
 }
@@ -328,20 +330,18 @@
 	}
 
 	if (uvt->data_len) {
-		uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
+		uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL);
 		if (!uvt->data)
 			goto err1;
 
-		memcpy(uvt->data, evt->private_data, uvt->data_len);
 		uvt->resp.present |= IB_UCM_PRES_DATA;
 	}
 
 	if (uvt->info_len) {
-		uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
+		uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL);
 		if (!uvt->info)
 			goto err2;
 
-		memcpy(uvt->info, info, uvt->info_len);
 		uvt->resp.present |= IB_UCM_PRES_INFO;
 	}
 	return 0;
@@ -685,11 +685,11 @@
 	return result;
 }
 
-static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
-				const char __user *inbuf,
-				int in_len, int out_len)
+static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
+			     const char __user *inbuf,
+			     int in_len, int out_len)
 {
-	struct ib_ucm_establish cmd;
+	struct ib_ucm_notify cmd;
 	struct ib_ucm_context *ctx;
 	int result;
 
@@ -700,7 +700,7 @@
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	result = ib_cm_establish(ctx->cm_id);
+	result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
 	ib_ucm_ctx_put(ctx);
 	return result;
 }
@@ -1107,7 +1107,7 @@
 	[IB_USER_CM_CMD_DESTROY_ID]    = ib_ucm_destroy_id,
 	[IB_USER_CM_CMD_ATTR_ID]       = ib_ucm_attr_id,
 	[IB_USER_CM_CMD_LISTEN]        = ib_ucm_listen,
-	[IB_USER_CM_CMD_ESTABLISH]     = ib_ucm_establish,
+	[IB_USER_CM_CMD_NOTIFY]        = ib_ucm_notify,
 	[IB_USER_CM_CMD_SEND_REQ]      = ib_ucm_send_req,
 	[IB_USER_CM_CMD_SEND_REP]      = ib_ucm_send_rep,
 	[IB_USER_CM_CMD_SEND_RTU]      = ib_ucm_send_rtu,
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index efe147d..db12cc0 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -179,9 +179,10 @@
 	up_write(&current->mm->mmap_sem);
 }
 
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
 {
-	struct ib_umem_account_work *work = work_ptr;
+	struct ib_umem_account_work *work =
+		container_of(_work, struct ib_umem_account_work, work);
 
 	down_write(&work->mm->mmap_sem);
 	work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@
 		return;
 	}
 
-	INIT_WORK(&work->work, ib_umem_account, work);
+	INIT_WORK(&work->work, ib_umem_account);
 	work->mm   = mm;
 	work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 1b17dcd..04a9db5 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -302,7 +302,7 @@
 	unsigned long pa;	/* PA device memory */
 	void **qptr_array;
 
-	kmem_cache_t *host_msg_cache;
+	struct kmem_cache *host_msg_cache;
 
 	struct list_head cca_link;		/* adapter list */
 	struct list_head eh_wakeup_list;	/* event wakeup list */
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 5bcf697..179d005 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -564,6 +564,32 @@
 	return err;
 }
 
+static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+	if (send_cq == recv_cq)
+		spin_lock_irq(&send_cq->lock);
+	else if (send_cq > recv_cq) {
+		spin_lock_irq(&send_cq->lock);
+		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+	} else {
+		spin_lock_irq(&recv_cq->lock);
+		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+	}
+}
+
+static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+	if (send_cq == recv_cq)
+		spin_unlock_irq(&send_cq->lock);
+	else if (send_cq > recv_cq) {
+		spin_unlock(&recv_cq->lock);
+		spin_unlock_irq(&send_cq->lock);
+	} else {
+		spin_unlock(&send_cq->lock);
+		spin_unlock_irq(&recv_cq->lock);
+	}
+}
+
 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
 {
 	struct c2_cq *send_cq;
@@ -576,15 +602,9 @@
 	 * Lock CQs here, so that CQ polling code can do QP lookup
 	 * without taking a lock.
 	 */
-	spin_lock_irq(&send_cq->lock);
-	if (send_cq != recv_cq)
-		spin_lock(&recv_cq->lock);
-
+	c2_lock_cqs(send_cq, recv_cq);
 	c2_free_qpn(c2dev, qp->qpn);
-
-	if (send_cq != recv_cq)
-		spin_unlock(&recv_cq->lock);
-	spin_unlock_irq(&send_cq->lock);
+	c2_unlock_cqs(send_cq, recv_cq);
 
 	/*
 	 * Destory qp in the rnic...
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 623dc95..1687c51 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -441,7 +441,7 @@
  * involves initalizing the various limits and resouce pools that
  * comprise the RNIC instance.
  */
-int c2_rnic_init(struct c2_dev *c2dev)
+int __devinit c2_rnic_init(struct c2_dev *c2dev)
 {
 	int err;
 	u32 qsize, msgsize;
@@ -611,7 +611,7 @@
 /*
  * Called by c2_remove to cleanup the RNIC resources.
  */
-void c2_rnic_term(struct c2_dev *c2dev)
+void __devexit c2_rnic_term(struct c2_dev *c2dev)
 {
 
 	/* Close the open adapter instance */
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 40caeb5..36620a2 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -164,7 +164,7 @@
  */
 void *vq_repbuf_alloc(struct c2_dev *c2dev)
 {
-	return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC);
+	return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
 }
 
 /*
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 214e2fd..0d6e2c4 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -57,7 +57,7 @@
 	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
 					      ib_device);
 
-	av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
+	av = kmem_cache_alloc(av_cache, GFP_KERNEL);
 	if (!av) {
 		ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
 			 pd, ah_attr);
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 458fe19..93995b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -134,7 +134,7 @@
 	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
 		return ERR_PTR(-EINVAL);
 
-	my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
+	my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
 	if (!my_cq) {
 		ehca_err(device, "Out of memory for ehca_cq struct device=%p",
 			 device);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 01f5aa9..cc47e4c 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION("SVNEHCA_0018");
+MODULE_VERSION("SVNEHCA_0019");
 
 int ehca_open_aqp1     = 0;
 int ehca_debug_level   = 0;
@@ -108,7 +108,7 @@
 
 void *ehca_alloc_fw_ctrlblock(void)
 {
-	void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL);
+	void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL);
 	if (!ret)
 		ehca_gen_err("Out of memory for ctblk");
 	return ret;
@@ -790,7 +790,7 @@
 	int ret;
 
 	printk(KERN_INFO "eHCA Infiniband Device Driver "
-	                 "(Rel.: SVNEHCA_0018)\n");
+	                 "(Rel.: SVNEHCA_0019)\n");
 	idr_init(&ehca_qp_idr);
 	idr_init(&ehca_cq_idr);
 	spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index abce676..0a5e221 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -53,7 +53,7 @@
 {
 	struct ehca_mr *me;
 
-	me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
+	me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
 	if (me) {
 		memset(me, 0, sizeof(struct ehca_mr));
 		spin_lock_init(&me->mrlock);
@@ -72,7 +72,7 @@
 {
 	struct ehca_mw *me;
 
-	me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
+	me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
 	if (me) {
 		memset(me, 0, sizeof(struct ehca_mw));
 		spin_lock_init(&me->mwlock);
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
index 2c3cdc6..d5345e5 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -50,7 +50,7 @@
 {
 	struct ehca_pd *pd;
 
-	pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
+	pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
 	if (!pd) {
 		ehca_err(device, "device=%p context=%p out of memory",
 			 device, context);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index cf3e50e..c6c9cef 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -450,7 +450,7 @@
 	if (pd->uobject && udata)
 		context = pd->uobject->context;
 
-	my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
+	my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
 	if (!my_qp) {
 		ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
 		return ERR_PTR(-ENOMEM);
@@ -732,8 +732,7 @@
 	u64 h_ret;
 	struct ipz_queue *squeue;
 	void *bad_send_wqe_p, *bad_send_wqe_v;
-	void *squeue_start_p, *squeue_end_p;
-	void *squeue_start_v, *squeue_end_v;
+	u64 q_ofs;
 	struct ehca_wqe *wqe;
 	int qp_num = my_qp->ib_qp.qp_num;
 
@@ -755,26 +754,23 @@
 	if (ehca_debug_level)
 		ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
 	squeue = &my_qp->ipz_squeue;
-	squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L));
-	squeue_end_p = squeue_start_p+squeue->queue_length;
-	squeue_start_v = abs_to_virt((u64)squeue_start_p);
-	squeue_end_v = abs_to_virt((u64)squeue_end_p);
-	ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
-		 qp_num, squeue_start_v, squeue_end_v);
+	if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
+		ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
+			 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
+		return -EFAULT;
+	}
 
 	/* loop sets wqe's purge bit */
-	wqe = (struct ehca_wqe*)bad_send_wqe_v;
+	wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
 	*bad_wqe_cnt = 0;
 	while (wqe->optype != 0xff && wqe->wqef != 0xff) {
 		if (ehca_debug_level)
 			ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
 		wqe->nr_of_data_seg = 0; /* suppress data access */
 		wqe->wqef = WQEF_PURGE; /* WQE to be purged */
-		wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size);
+		q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
+		wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
 		*bad_wqe_cnt = (*bad_wqe_cnt)+1;
-		if ((void*)wqe >= squeue_end_v) {
-			wqe = squeue_start_v;
-		}
 	}
 	/*
 	 * bad wqe will be reprocessed and ignored when pol_cq() is called,
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index e028ff1..bf7a400 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -70,6 +70,19 @@
 	return ret;
 }
 
+int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
+{
+	int i;
+	for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
+		u64 page = (u64)virt_to_abs(queue->queue_pages[i]);
+		if (addr >= page && addr < page + queue->pagesize) {
+			*q_offset = addr - page + i * queue->pagesize;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
 int ipz_queue_ctor(struct ipz_queue *queue,
 		   const u32 nr_of_pages,
 		   const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 2f13509..dc3bda2 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -150,6 +150,21 @@
 	return ipz_qeit_get(queue);
 }
 
+/*
+ * return the q_offset corresponding to an absolute address
+ */
+int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
+
+/*
+ * return the next queue offset. don't modify the queue.
+ */
+static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
+{
+	offset += queue->qe_size;
+	if (offset >= queue->queue_length) offset = 0;
+	return offset;
+}
+
 /* struct generic page table */
 struct ipz_pt {
 	u64 entries[EHCA_PT_ENTRIES];
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 413754b..8536aeb 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -214,9 +214,10 @@
 	unsigned long num_pages;
 };
 
-static void user_pages_account(void *ptr)
+static void user_pages_account(struct work_struct *_work)
 {
-	struct ipath_user_pages_work *work = ptr;
+	struct ipath_user_pages_work *work =
+		container_of(_work, struct ipath_user_pages_work, work);
 
 	down_write(&work->mm->mmap_sem);
 	work->mm->locked_vm -= work->num_pages;
@@ -242,7 +243,7 @@
 
 	goto bail;
 
-	INIT_WORK(&work->work, user_pages_account, work);
+	INIT_WORK(&work->work, user_pages_account);
 	work->mm = mm;
 	work->num_pages = num_pages;
 
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index a545610..acdee33 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1487,7 +1487,7 @@
 	idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
 	idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
 	idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
-	idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT;
+	idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
 	idev->link_width_enabled = 3;	/* 1x or 4x */
 
 	/* Snapshot current HW counters to "clear" them. */
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 6959945..27caf3b 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -33,7 +33,6 @@
  * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $
  */
 
-#include <linux/init.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 
@@ -190,7 +189,7 @@
 on_hca_fail:
 	if (ah->type == MTHCA_AH_PCI_POOL) {
 		ah->av = pci_pool_alloc(dev->av_table.pool,
-					SLAB_ATOMIC, &ah->avdma);
+					GFP_ATOMIC, &ah->avdma);
 		if (!ah->av)
 			return -ENOMEM;
 
@@ -323,7 +322,7 @@
 	return 0;
 }
 
-int __devinit mthca_init_av_table(struct mthca_dev *dev)
+int mthca_init_av_table(struct mthca_dev *dev)
 {
 	int err;
 
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cd044ea..e948158 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -57,7 +57,7 @@
 module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
 MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
 
-static void catas_reset(void *work_ptr)
+static void catas_reset(struct work_struct *work)
 {
 	struct mthca_dev *dev, *tmpdev;
 	LIST_HEAD(tlist);
@@ -203,7 +203,7 @@
 
 int __init mthca_catas_init(void)
 {
-	INIT_WORK(&catas_work, catas_reset, NULL);
+	INIT_WORK(&catas_work, catas_reset);
 
 	catas_wq = create_singlethread_workqueue("mthca_catas");
 	if (!catas_wq)
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 149b369..283d50b 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -36,7 +36,6 @@
  * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
  */
 
-#include <linux/init.h>
 #include <linux/hardirq.h>
 
 #include <asm/io.h>
@@ -970,7 +969,7 @@
 	mthca_free_mailbox(dev, mailbox);
 }
 
-int __devinit mthca_init_cq_table(struct mthca_dev *dev)
+int mthca_init_cq_table(struct mthca_dev *dev)
 {
 	int err;
 
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index e284e06..8ec9fa1 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -33,7 +33,6 @@
  * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -479,10 +478,10 @@
 	return IRQ_HANDLED;
 }
 
-static int __devinit mthca_create_eq(struct mthca_dev *dev,
-				     int nent,
-				     u8 intr,
-				     struct mthca_eq *eq)
+static int mthca_create_eq(struct mthca_dev *dev,
+			   int nent,
+			   u8 intr,
+			   struct mthca_eq *eq)
 {
 	int npages;
 	u64 *dma_list = NULL;
@@ -664,9 +663,9 @@
 				 dev->eq_table.eq + i);
 }
 
-static int __devinit mthca_map_reg(struct mthca_dev *dev,
-				   unsigned long offset, unsigned long size,
-				   void __iomem **map)
+static int mthca_map_reg(struct mthca_dev *dev,
+			 unsigned long offset, unsigned long size,
+			 void __iomem **map)
 {
 	unsigned long base = pci_resource_start(dev->pdev, 0);
 
@@ -691,7 +690,7 @@
 	iounmap(map);
 }
 
-static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
+static int mthca_map_eq_regs(struct mthca_dev *dev)
 {
 	if (mthca_is_memfree(dev)) {
 		/*
@@ -781,7 +780,7 @@
 	}
 }
 
-int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
+int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
 {
 	int ret;
 	u8 status;
@@ -825,7 +824,7 @@
 	__free_page(dev->eq_table.icm_page);
 }
 
-int __devinit mthca_init_eq_table(struct mthca_dev *dev)
+int mthca_init_eq_table(struct mthca_dev *dev)
 {
 	int err;
 	u8 status;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 45e106f..acfa41d 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -317,7 +317,7 @@
 	return ret;
 }
 
-void __devexit mthca_free_agents(struct mthca_dev *dev)
+void mthca_free_agents(struct mthca_dev *dev)
 {
 	struct ib_mad_agent *agent;
 	int p, q;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 47ea021..0491ec7 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -98,7 +98,7 @@
 	.uarc_size	   = 1 << 18,	/* Arbel only */
 };
 
-static int __devinit mthca_tune_pci(struct mthca_dev *mdev)
+static int mthca_tune_pci(struct mthca_dev *mdev)
 {
 	int cap;
 	u16 val;
@@ -143,7 +143,7 @@
 	return 0;
 }
 
-static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
+static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
 {
 	int err;
 	u8 status;
@@ -255,7 +255,7 @@
 	return 0;
 }
 
-static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
+static int mthca_init_tavor(struct mthca_dev *mdev)
 {
 	u8 status;
 	int err;
@@ -333,7 +333,7 @@
 	return err;
 }
 
-static int __devinit mthca_load_fw(struct mthca_dev *mdev)
+static int mthca_load_fw(struct mthca_dev *mdev)
 {
 	u8 status;
 	int err;
@@ -379,10 +379,10 @@
 	return err;
 }
 
-static int __devinit mthca_init_icm(struct mthca_dev *mdev,
-				    struct mthca_dev_lim *dev_lim,
-				    struct mthca_init_hca_param *init_hca,
-				    u64 icm_size)
+static int mthca_init_icm(struct mthca_dev *mdev,
+			  struct mthca_dev_lim *dev_lim,
+			  struct mthca_init_hca_param *init_hca,
+			  u64 icm_size)
 {
 	u64 aux_pages;
 	u8 status;
@@ -575,7 +575,7 @@
 	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
 }
 
-static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
+static int mthca_init_arbel(struct mthca_dev *mdev)
 {
 	struct mthca_dev_lim        dev_lim;
 	struct mthca_profile        profile;
@@ -683,7 +683,7 @@
 		mthca_SYS_DIS(mdev, &status);
 }
 
-static int __devinit mthca_init_hca(struct mthca_dev *mdev)
+static int mthca_init_hca(struct mthca_dev *mdev)
 {
 	u8 status;
 	int err;
@@ -720,7 +720,7 @@
 	return err;
 }
 
-static int __devinit mthca_setup_hca(struct mthca_dev *dev)
+static int mthca_setup_hca(struct mthca_dev *dev)
 {
 	int err;
 	u8 status;
@@ -875,8 +875,7 @@
 	return err;
 }
 
-static int __devinit mthca_request_regions(struct pci_dev *pdev,
-					   int ddr_hidden)
+static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
 {
 	int err;
 
@@ -928,7 +927,7 @@
 			   MTHCA_HCR_SIZE);
 }
 
-static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev)
+static int mthca_enable_msi_x(struct mthca_dev *mdev)
 {
 	struct msix_entry entries[3];
 	int err;
@@ -1213,7 +1212,7 @@
 }
 
 static int __devinit mthca_init_one(struct pci_dev *pdev,
-			     const struct pci_device_id *id)
+				    const struct pci_device_id *id)
 {
 	static int mthca_version_printed = 0;
 	int ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 47ca8a9..a8ad072 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -32,7 +32,6 @@
  * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $
  */
 
-#include <linux/init.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 
@@ -371,7 +370,7 @@
 	return err;
 }
 
-int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
+int mthca_init_mcg_table(struct mthca_dev *dev)
 {
 	int err;
 	int table_size = dev->limits.num_mgms + dev->limits.num_amgms;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index a486dec..f71ffa8 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -34,7 +34,6 @@
  */
 
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/errno.h>
 
 #include "mthca_dev.h"
@@ -135,7 +134,7 @@
 	spin_unlock(&buddy->lock);
 }
 
-static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
+static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
 {
 	int i, s;
 
@@ -759,7 +758,7 @@
 	*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
 }
 
-int __devinit mthca_init_mr_table(struct mthca_dev *dev)
+int mthca_init_mr_table(struct mthca_dev *dev)
 {
 	unsigned long addr;
 	int err, i;
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index 59df516..c1e9507 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -34,7 +34,6 @@
  * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 
 #include "mthca_dev.h"
@@ -69,7 +68,7 @@
 	mthca_free(&dev->pd_table.alloc, pd->pd_num);
 }
 
-int __devinit mthca_init_pd_table(struct mthca_dev *dev)
+int mthca_init_pd_table(struct mthca_dev *dev)
 {
 	return mthca_alloc_init(&dev->pd_table.alloc,
 				dev->limits.num_pds,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index fc67f78..21422a3 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1100,11 +1100,10 @@
 	struct mthca_fmr *fmr;
 	int err;
 
-	fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
+	fmr = kmemdup(fmr_attr, sizeof *fmr, GFP_KERNEL);
 	if (!fmr)
 		return ERR_PTR(-ENOMEM);
 
-	memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
 	err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
 			     convert_access(mr_access_flags), fmr);
 
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 6a7822e..33e3ba7 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -35,7 +35,6 @@
  * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
  */
 
-#include <linux/init.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 
@@ -2241,7 +2240,7 @@
 		*new_wqe = 0;
 }
 
-int __devinit mthca_init_qp_table(struct mthca_dev *dev)
+int mthca_init_qp_table(struct mthca_dev *dev)
 {
 	int err;
 	u8 status;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index f5d7677..34d2c47 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -120,7 +120,7 @@
 
 	memset(context, 0, sizeof *context);
 
-	logsize = long_log2(srq->max) + srq->wqe_shift;
+	logsize = long_log2(srq->max);
 	context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
 	context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
 	context->db_index = cpu_to_be32(srq->db_index);
@@ -715,7 +715,7 @@
 		     sizeof (struct mthca_data_seg));
 }
 
-int __devinit mthca_init_srq_table(struct mthca_dev *dev)
+int mthca_init_srq_table(struct mthca_dev *dev)
 {
 	int err;
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0b8a79d..9954799 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -136,11 +136,11 @@
 	struct list_head multicast_list;
 	struct rb_root multicast_tree;
 
-	struct work_struct pkey_task;
-	struct work_struct mcast_task;
+	struct delayed_work pkey_task;
+	struct delayed_work mcast_task;
 	struct work_struct flush_task;
 	struct work_struct restart_task;
-	struct work_struct ah_reap_task;
+	struct delayed_work ah_reap_task;
 
 	struct ib_device *ca;
 	u8            	  port;
@@ -233,7 +233,7 @@
 }
 
 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh);
-void ipoib_neigh_free(struct ipoib_neigh *neigh);
+void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
 
 extern struct workqueue_struct *ipoib_workqueue;
 
@@ -254,13 +254,13 @@
 
 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
 		struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
 
 void ipoib_flush_paths(struct net_device *dev);
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
 
 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
 int ipoib_ib_dev_open(struct net_device *dev);
@@ -271,10 +271,10 @@
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
 void ipoib_dev_cleanup(struct net_device *dev);
 
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
 
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
 int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
@@ -312,7 +312,7 @@
 int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
 int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
 
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8bf5e9e..f10fba5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -400,10 +400,11 @@
 	spin_unlock_irq(&priv->tx_lock);
 }
 
-void ipoib_reap_ah(void *dev_ptr)
+void ipoib_reap_ah(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+	struct net_device *dev = priv->dev;
 
 	__ipoib_reap_ah(dev);
 
@@ -613,10 +614,11 @@
 	return 0;
 }
 
-void ipoib_ib_dev_flush(void *_dev)
+void ipoib_ib_dev_flush(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *)_dev;
-	struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
+	struct ipoib_dev_priv *cpriv, *priv =
+		container_of(work, struct ipoib_dev_priv, flush_task);
+	struct net_device *dev = priv->dev;
 
 	if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
 		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
@@ -638,14 +640,14 @@
 	 */
 	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
 		ipoib_ib_dev_up(dev);
-		ipoib_mcast_restart_task(dev);
+		ipoib_mcast_restart_task(&priv->restart_task);
 	}
 
 	mutex_lock(&priv->vlan_mutex);
 
 	/* Flush any child interfaces too */
 	list_for_each_entry(cpriv, &priv->child_intfs, list)
-		ipoib_ib_dev_flush(cpriv->dev);
+		ipoib_ib_dev_flush(&cpriv->flush_task);
 
 	mutex_unlock(&priv->vlan_mutex);
 }
@@ -672,10 +674,11 @@
  * change async notification is available.
  */
 
-void ipoib_pkey_poll(void *dev_ptr)
+void ipoib_pkey_poll(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, pkey_task.work);
+	struct net_device *dev = priv->dev;
 
 	ipoib_pkey_dev_check_presence(dev);
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 85522da..c092802 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -264,7 +264,7 @@
 		if (neigh->ah)
 			ipoib_put_ah(neigh->ah);
 
-		ipoib_neigh_free(neigh);
+		ipoib_neigh_free(dev, neigh);
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
@@ -525,10 +525,11 @@
 		ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
 	} else {
 		neigh->ah  = NULL;
-		__skb_queue_tail(&neigh->queue, skb);
 
 		if (!path->query && path_rec_start(dev, path))
 			goto err_list;
+
+		__skb_queue_tail(&neigh->queue, skb);
 	}
 
 	spin_unlock(&priv->lock);
@@ -538,7 +539,7 @@
 	list_del(&neigh->list);
 
 err_path:
-	ipoib_neigh_free(neigh);
+	ipoib_neigh_free(dev, neigh);
 	++priv->stats.tx_dropped;
 	dev_kfree_skb_any(skb);
 
@@ -655,7 +656,7 @@
 				 */
 				ipoib_put_ah(neigh->ah);
 				list_del(&neigh->list);
-				ipoib_neigh_free(neigh);
+				ipoib_neigh_free(dev, neigh);
 				spin_unlock(&priv->lock);
 				ipoib_path_lookup(skb, dev);
 				goto out;
@@ -786,7 +787,7 @@
 		if (neigh->ah)
 			ah = neigh->ah;
 		list_del(&neigh->list);
-		ipoib_neigh_free(neigh);
+		ipoib_neigh_free(n->dev, neigh);
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
@@ -809,9 +810,15 @@
 	return neigh;
 }
 
-void ipoib_neigh_free(struct ipoib_neigh *neigh)
+void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
 {
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct sk_buff *skb;
 	*to_ipoib_neigh(neigh->neighbour) = NULL;
+	while ((skb = __skb_dequeue(&neigh->queue))) {
+		++priv->stats.tx_dropped;
+		dev_kfree_skb_any(skb);
+	}
 	kfree(neigh);
 }
 
@@ -933,11 +940,11 @@
 	INIT_LIST_HEAD(&priv->dead_ahs);
 	INIT_LIST_HEAD(&priv->multicast_list);
 
-	INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
-	INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
-	INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
-	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
-	INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
+	INIT_DELAYED_WORK(&priv->pkey_task,    ipoib_pkey_poll);
+	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
+	INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
+	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
 }
 
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 3faa182..b04b72c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -114,7 +114,7 @@
 		 */
 		if (neigh->ah)
 			ipoib_put_ah(neigh->ah);
-		ipoib_neigh_free(neigh);
+		ipoib_neigh_free(dev, neigh);
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
@@ -399,7 +399,8 @@
 		mcast->backoff = 1;
 		mutex_lock(&mcast_mutex);
 		if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-			queue_work(ipoib_workqueue, &priv->mcast_task);
+			queue_delayed_work(ipoib_workqueue,
+					   &priv->mcast_task, 0);
 		mutex_unlock(&mcast_mutex);
 		complete(&mcast->done);
 		return;
@@ -435,7 +436,8 @@
 
 	if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
 		if (status == -ETIMEDOUT)
-			queue_work(ipoib_workqueue, &priv->mcast_task);
+			queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+					   0);
 		else
 			queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
 					   mcast->backoff * HZ);
@@ -517,10 +519,11 @@
 		mcast->query_id = ret;
 }
 
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, mcast_task.work);
+	struct net_device *dev = priv->dev;
 
 	if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
 		return;
@@ -610,7 +613,7 @@
 
 	mutex_lock(&mcast_mutex);
 	if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-		queue_work(ipoib_workqueue, &priv->mcast_task);
+		queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
 	mutex_unlock(&mcast_mutex);
 
 	spin_lock_irq(&priv->lock);
@@ -818,10 +821,11 @@
 	}
 }
 
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
 {
-	struct net_device *dev = dev_ptr;
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	struct ipoib_dev_priv *priv =
+		container_of(work, struct ipoib_dev_priv, restart_task);
+	struct net_device *dev = priv->dev;
 	struct dev_mc_list *mclist;
 	struct ipoib_mcast *mcast, *tmcast;
 	LIST_HEAD(remove_list);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9c53916..234e5b0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -283,7 +283,7 @@
 	struct mutex      connlist_mutex;
 	struct list_head  connlist;		/* all iSER IB connections */
 
-	kmem_cache_t *desc_cache;
+	struct kmem_cache *desc_cache;
 };
 
 extern struct iser_global ig;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 0606744..5e12250 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <asm/io.h>
 #include <asm/scatterlist.h>
 #include <linux/scatterlist.h>
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 18a0000..693b770 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -48,7 +48,7 @@
 
 static void iser_cq_tasklet_fn(unsigned long data);
 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(void *data);
+static void iser_comp_error_worker(struct work_struct *work);
 
 static void iser_cq_event_callback(struct ib_event *cause, void *context)
 {
@@ -480,8 +480,7 @@
 	init_waitqueue_head(&ib_conn->wait);
 	atomic_set(&ib_conn->post_recv_buf_count, 0);
 	atomic_set(&ib_conn->post_send_buf_count, 0);
-	INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
-		  ib_conn);
+	INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
 	INIT_LIST_HEAD(&ib_conn->conn_list);
 	spin_lock_init(&ib_conn->lock);
 
@@ -754,9 +753,10 @@
 	return ret_val;
 }
 
-static void iser_comp_error_worker(void *data)
+static void iser_comp_error_worker(struct work_struct *work)
 {
-	struct iser_conn *ib_conn = data;
+	struct iser_conn *ib_conn =
+		container_of(work, struct iser_conn, comperror_work);
 
 	/* getting here when the state is UP means that the conn is being *
 	 * terminated asynchronously from the iSCSI layer's perspective.  */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 4b09147..a628959 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -390,9 +390,10 @@
 	wait_for_completion(&target->done);
 }
 
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
 {
-	struct srp_target_port *target = target_ptr;
+	struct srp_target_port *target =
+		container_of(work, struct srp_target_port, work);
 
 	spin_lock_irq(target->scsi_host->host_lock);
 	if (target->state != SRP_TARGET_DEAD) {
@@ -575,7 +576,7 @@
 	spin_lock_irq(target->scsi_host->host_lock);
 	if (target->state == SRP_TARGET_CONNECTING) {
 		target->state = SRP_TARGET_DEAD;
-		INIT_WORK(&target->work, srp_remove_work, target);
+		INIT_WORK(&target->work, srp_remove_work);
 		schedule_work(&target->work);
 	}
 	spin_unlock_irq(target->scsi_host->host_lock);
@@ -1176,9 +1177,11 @@
 			break;
 		}
 
-		target->status = srp_alloc_iu_bufs(target);
-		if (target->status)
-			break;
+		if (!target->rx_ring[0]) {
+			target->status = srp_alloc_iu_bufs(target);
+			if (target->status)
+				break;
+		}
 
 		qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
 		if (!qp_attr) {
@@ -1716,7 +1719,8 @@
 	if (!target_host)
 		return -ENOMEM;
 
-	target_host->max_lun = SRP_MAX_LUN;
+	target_host->max_lun     = SRP_MAX_LUN;
+	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
 
 	target = host_to_target(target_host);
 
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index a0af97e..79dfb4b 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -23,6 +23,7 @@
 #include <linux/kthread.h>
 #include <linux/sched.h>	/* HZ */
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 /*#include <asm/io.h>*/
 
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 105112f..80cdebc 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -178,9 +178,9 @@
 
 fail:
 	if (iforce) {
-		if (iforce->irq) usb_free_urb(iforce->irq);
-		if (iforce->out) usb_free_urb(iforce->out);
-		if (iforce->ctrl) usb_free_urb(iforce->ctrl);
+		usb_free_urb(iforce->irq);
+		usb_free_urb(iforce->out);
+		usb_free_urb(iforce->ctrl);
 		kfree(iforce);
 	}
 
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index cbb9366..8451b29 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -567,9 +567,9 @@
  * interrupt context.
  */
 
-static void atkbd_event_work(void *data)
+static void atkbd_event_work(struct work_struct *work)
 {
-	struct atkbd *atkbd = data;
+	struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
 
 	mutex_lock(&atkbd->event_mutex);
 
@@ -943,7 +943,7 @@
 
 	atkbd->dev = dev;
 	ps2_init(&atkbd->ps2dev, serio);
-	INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
+	INIT_WORK(&atkbd->event_work, atkbd_event_work);
 	mutex_init(&atkbd->event_mutex);
 
 	switch (serio->id.type) {
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 979b93e..b7f049b 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -572,9 +572,9 @@
  * were in.
  */
 static void
-lkkbd_reinit (void *data)
+lkkbd_reinit (struct work_struct *work)
 {
-	struct lkkbd *lk = data;
+	struct lkkbd *lk = container_of(work, struct lkkbd, tq);
 	int division;
 	unsigned char leds_on = 0;
 	unsigned char leds_off = 0;
@@ -651,7 +651,7 @@
 
 	lk->serio = serio;
 	lk->dev = input_dev;
-	INIT_WORK (&lk->tq, lkkbd_reinit, lk);
+	INIT_WORK (&lk->tq, lkkbd_reinit);
 	lk->bell_volume = bell_volume;
 	lk->keyclick_volume = keyclick_volume;
 	lk->ctrlclick_volume = ctrlclick_volume;
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index cac4781..6cd887c 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -208,9 +208,9 @@
  * were in.
  */
 
-static void sunkbd_reinit(void *data)
+static void sunkbd_reinit(struct work_struct *work)
 {
-	struct sunkbd *sunkbd = data;
+	struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
 
 	wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
 
@@ -248,7 +248,7 @@
 	sunkbd->serio = serio;
 	sunkbd->dev = input_dev;
 	init_waitqueue_head(&sunkbd->wait);
-	INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd);
+	INIT_WORK(&sunkbd->tq, sunkbd_reinit);
 	snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
 
 	serio_set_drvdata(serio, sunkbd);
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index ab4da79..31d5a13 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -695,7 +695,9 @@
 
 	if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
 		return ret;
-	misc_register(&hp_sdc_rtc_dev);
+	if (misc_register(&hp_sdc_rtc_dev) != 0)
+		printk(KERN_INFO "Could not register misc. dev for i8042 rtc\n");
+
         create_proc_read_entry ("driver/rtc", 0, NULL,
 				hp_sdc_rtc_read_proc, NULL);
 
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 6f9b2c7..52bb222 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -888,9 +888,10 @@
  * psmouse_resync() attempts to re-validate current protocol.
  */
 
-static void psmouse_resync(void *p)
+static void psmouse_resync(struct work_struct *work)
 {
-	struct psmouse *psmouse = p, *parent = NULL;
+	struct psmouse *parent = NULL, *psmouse =
+		container_of(work, struct psmouse, resync_work);
 	struct serio *serio = psmouse->ps2dev.serio;
 	psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
 	int failed = 0, enabled = 0;
@@ -1121,7 +1122,7 @@
 		goto out;
 
 	ps2_init(&psmouse->ps2dev, serio);
-	INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
+	INIT_WORK(&psmouse->resync_work, psmouse_resync);
 	psmouse->dev = input_dev;
 	snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
 
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index e5b1b60..b3e84d3 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -251,9 +251,9 @@
  * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
  */
 
-static void ps2_execute_scheduled_command(void *data)
+static void ps2_execute_scheduled_command(struct work_struct *work)
 {
-	struct ps2work *ps2work = data;
+	struct ps2work *ps2work = container_of(work, struct ps2work, work);
 
 	ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
 	kfree(ps2work);
@@ -278,7 +278,7 @@
 	ps2work->ps2dev = ps2dev;
 	ps2work->command = command;
 	memcpy(ps2work->param, param, send);
-	INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work);
+	INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
 
 	if (!schedule_work(&ps2work->work)) {
 		kfree(ps2work);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 211943f..5f1d403 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Serio abstraction core");
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index ba2a203..7c8d039 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -297,7 +297,7 @@
 
 	serio_raw->dev.minor = PSMOUSE_MINOR;
 	serio_raw->dev.name = serio_raw->name;
-	serio_raw->dev.dev = &serio->dev;
+	serio_raw->dev.parent = &serio->dev;
 	serio_raw->dev.fops = &serio_raw_fops;
 
 	err = misc_register(&serio_raw->dev);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index f56d6a0..0517c73 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -189,7 +189,7 @@
 {
 	struct spi_device	*spi = to_spi_device(dev);
 	struct ads7846		*ts = dev_get_drvdata(dev);
-	struct ser_req		*req = kzalloc(sizeof *req, SLAB_KERNEL);
+	struct ser_req		*req = kzalloc(sizeof *req, GFP_KERNEL);
 	int			status;
 	int			sample;
 	int			i;
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c
index 6ae6eb3..946c38c 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/isdn/act2000/capi.c
@@ -627,8 +627,10 @@
 }
 
 void
-actcapi_dispatch(act2000_card *card)
+actcapi_dispatch(struct work_struct *work)
 {
+	struct act2000_card *card =
+		container_of(work, struct act2000_card, rcv_tq);
 	struct sk_buff *skb;
 	actcapi_msg *msg;
 	__u16 ccmd;
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h
index 49f453c..e55f6a9 100644
--- a/drivers/isdn/act2000/capi.h
+++ b/drivers/isdn/act2000/capi.h
@@ -356,7 +356,7 @@
 extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
 extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
 extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
-extern void actcapi_dispatch(act2000_card *);
+extern void actcapi_dispatch(struct work_struct *);
 #ifdef DEBUG_MSG
 extern void actcapi_debug_msg(struct sk_buff *skb, int);
 #else
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index d89dcde..90593e2 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -192,8 +192,11 @@
 }
 
 static void
-act2000_transmit(struct act2000_card *card)
+act2000_transmit(struct work_struct *work)
 {
+	struct act2000_card *card =
+		container_of(work, struct act2000_card, snd_tq);
+
 	switch (card->bus) {
 		case ACT2000_BUS_ISA:
 			act2000_isa_send(card);
@@ -207,8 +210,11 @@
 }
 
 static void
-act2000_receive(struct act2000_card *card)
+act2000_receive(struct work_struct *work)
 {
+	struct act2000_card *card =
+		container_of(work, struct act2000_card, poll_tq);
+
 	switch (card->bus) {
 		case ACT2000_BUS_ISA:
 			act2000_isa_receive(card);
@@ -227,7 +233,7 @@
 	act2000_card * card = (act2000_card *)data;
 	unsigned long flags;
 
-	act2000_receive(card);
+	act2000_receive(&card->poll_tq);
 	spin_lock_irqsave(&card->lock, flags);
 	mod_timer(&card->ptimer, jiffies+3);
 	spin_unlock_irqrestore(&card->lock, flags);
@@ -578,9 +584,9 @@
 	skb_queue_head_init(&card->sndq);
 	skb_queue_head_init(&card->rcvq);
 	skb_queue_head_init(&card->ackq);
-	INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card);
-	INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card);
-	INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card);
+	INIT_WORK(&card->snd_tq, act2000_transmit);
+	INIT_WORK(&card->rcv_tq, actcapi_dispatch);
+	INIT_WORK(&card->poll_tq, act2000_receive);
 	init_timer(&card->ptimer);
 	card->interface.owner = THIS_MODULE;
         card->interface.channels = ACT2000_BCH;
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 8c4fcb9..783a255 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -208,9 +208,10 @@
 	}
 }
 
-static void notify_handler(void *data)
+static void notify_handler(struct work_struct *work)
 {
-	struct capi_notifier *np = data;
+	struct capi_notifier *np =
+		container_of(work, struct capi_notifier, work);
 
 	switch (np->cmd) {
 	case KCI_CONTRUP:
@@ -235,7 +236,7 @@
 	if (!np)
 		return -ENOMEM;
 
-	INIT_WORK(&np->work, notify_handler, np);
+	INIT_WORK(&np->work, notify_handler);
 	np->cmd = cmd;
 	np->controller = controller;
 	np->applid = applid;
@@ -248,10 +249,11 @@
 	
 /* -------- Receiver ------------------------------------------ */
 
-static void recv_handler(void *_ap)
+static void recv_handler(struct work_struct *work)
 {
 	struct sk_buff *skb;
-	struct capi20_appl *ap = (struct capi20_appl *) _ap;
+	struct capi20_appl *ap =
+		container_of(work, struct capi20_appl, recv_work);
 
 	if ((!ap) || (ap->release_in_progress))
 		return;
@@ -527,7 +529,7 @@
 	ap->callback = NULL;
 	init_MUTEX(&ap->recv_sem);
 	skb_queue_head_init(&ap->recv_queue);
-	INIT_WORK(&ap->recv_work, recv_handler, (void *)ap);
+	INIT_WORK(&ap->recv_work, recv_handler);
 	ap->release_in_progress = 0;
 
 	write_unlock_irqrestore(&application_lock, flags);
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 1f5ebe9..03319ea 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -10,6 +10,8 @@
  */
 
 #include <linux/proc_fs.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
 
 #include "isdn_divert.h"
 
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 0c93732..63b629b 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -572,7 +572,7 @@
 			     ucs->rcvbuf, ucs->rcvbuf_size,
 			     read_ctrl_callback, cs->inbuf);
 
-	if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
+	if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) {
 		update_basstate(ucs, 0, BS_ATRDPEND);
 		dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
 			get_usb_rcmsg(ret));
@@ -747,7 +747,7 @@
 	check_pending(ucs);
 
 resubmit:
-	rc = usb_submit_urb(urb, SLAB_ATOMIC);
+	rc = usb_submit_urb(urb, GFP_ATOMIC);
 	if (unlikely(rc != 0 && rc != -ENODEV)) {
 		dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
 			get_usb_rcmsg(rc));
@@ -807,7 +807,7 @@
 			urb->number_of_packets = BAS_NUMFRAMES;
 			gig_dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit",
 				__func__);
-			rc = usb_submit_urb(urb, SLAB_ATOMIC);
+			rc = usb_submit_urb(urb, GFP_ATOMIC);
 			if (unlikely(rc != 0 && rc != -ENODEV)) {
 				dev_err(bcs->cs->dev,
 					"could not resubmit isochronous read "
@@ -900,7 +900,7 @@
 		}
 
 		dump_urb(DEBUG_ISO, "Initial isoc read", urb);
-		if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0)
+		if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
 			goto error;
 	}
 
@@ -935,7 +935,7 @@
 	/* submit two URBs, keep third one */
 	for (k = 0; k < 2; ++k) {
 		dump_urb(DEBUG_ISO, "Initial isoc write", urb);
-		rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC);
+		rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
 		if (rc != 0)
 			goto error;
 	}
@@ -1042,7 +1042,7 @@
 		return 0;	/* no data to send */
 	urb->number_of_packets = nframe;
 
-	rc = usb_submit_urb(urb, SLAB_ATOMIC);
+	rc = usb_submit_urb(urb, GFP_ATOMIC);
 	if (unlikely(rc)) {
 		if (rc == -ENODEV)
 			/* device removed - give up silently */
@@ -1341,7 +1341,7 @@
 		urb->dev = bcs->cs->hw.bas->udev;
 		urb->transfer_flags = URB_ISO_ASAP;
 		urb->number_of_packets = BAS_NUMFRAMES;
-		rc = usb_submit_urb(urb, SLAB_ATOMIC);
+		rc = usb_submit_urb(urb, GFP_ATOMIC);
 		if (unlikely(rc != 0 && rc != -ENODEV)) {
 			dev_err(cs->dev,
 				"could not resubmit isochronous read URB: %s\n",
@@ -1458,7 +1458,7 @@
 			   ucs->retry_ctrl);
 		/* urb->dev is clobbered by USB subsystem */
 		urb->dev = ucs->udev;
-		rc = usb_submit_urb(urb, SLAB_ATOMIC);
+		rc = usb_submit_urb(urb, GFP_ATOMIC);
 		if (unlikely(rc)) {
 			dev_err(&ucs->interface->dev,
 				"could not resubmit request 0x%02x: %s\n",
@@ -1517,7 +1517,7 @@
 			     (unsigned char*) &ucs->dr_ctrl, NULL, 0,
 			     write_ctrl_callback, ucs);
 	ucs->retry_ctrl = 0;
-	ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC);
+	ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
 	if (unlikely(ret)) {
 		dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
 			req, get_usb_rcmsg(ret));
@@ -1763,7 +1763,7 @@
 			     usb_sndctrlpipe(ucs->udev, 0),
 			     (unsigned char*) &ucs->dr_cmd_out, buf, len,
 			     write_command_callback, cs);
-	rc = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC);
+	rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
 	if (unlikely(rc)) {
 		update_basstate(ucs, 0, BS_ATWRPEND);
 		dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n",
@@ -2218,21 +2218,21 @@
 	 * - three for the different uses of the default control pipe
 	 * - three for each isochronous pipe
 	 */
-	if (!(ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
-	    !(ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
-	    !(ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL)) ||
-	    !(ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL)))
+	if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+	    !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+	    !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
+	    !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
 		goto allocerr;
 
 	for (j = 0; j < 2; ++j) {
 		ubc = cs->bcs[j].hw.bas;
 		for (i = 0; i < BAS_OUTURBS; ++i)
 			if (!(ubc->isoouturbs[i].urb =
-			      usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
+			      usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
 				goto allocerr;
 		for (i = 0; i < BAS_INURBS; ++i)
 			if (!(ubc->isoinurbs[i] =
-			      usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
+			      usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
 				goto allocerr;
 	}
 
@@ -2246,7 +2246,7 @@
 					(endpoint->bEndpointAddress) & 0x0f),
 			 ucs->int_in_buf, 3, read_int_callback, cs,
 			 endpoint->bInterval);
-	if ((rc = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL)) != 0) {
+	if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
 		dev_err(cs->dev, "could not submit interrupt URB: %s\n",
 			get_usb_rcmsg(rc));
 		goto error;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 5800bee..defd5743 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -702,7 +702,7 @@
 	cs->open_count = 0;
 	cs->dev = NULL;
 	cs->tty = NULL;
-	cs->class = NULL;
+	cs->tty_dev = NULL;
 	cs->cidmode = cidmode != 0;
 
 	//if(onechannel) { //FIXME
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 884bd72..06298cc 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -444,7 +444,7 @@
 	struct gigaset_driver *driver;
 	unsigned minor_index;
 	struct device *dev;
-	struct class_device *class;
+	struct device *tty_dev;
 
 	const struct gigaset_ops *ops;
 
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 596f3ae..7edea01 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -625,13 +625,13 @@
 		return;
 
 	tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs);
-	cs->class = tty_register_device(drv->tty, cs->minor_index, NULL);
+	cs->tty_dev = tty_register_device(drv->tty, cs->minor_index, NULL);
 
-	if (!IS_ERR(cs->class))
-		class_set_devdata(cs->class, cs);
+	if (!IS_ERR(cs->tty_dev))
+		dev_set_drvdata(cs->tty_dev, cs);
 	else {
 		warn("could not register device to the tty subsystem");
-		cs->class = NULL;
+		cs->tty_dev = NULL;
 	}
 }
 
@@ -645,7 +645,7 @@
 
 	tasklet_disable(&cs->if_wake_tasklet);
 	tasklet_kill(&cs->if_wake_tasklet);
-	cs->class = NULL;
+	cs->tty_dev = NULL;
 	tty_unregister_device(drv->tty, cs->minor_index);
 }
 
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index 9ad840e..e767afa 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -16,11 +16,12 @@
 #include "gigaset.h"
 #include <linux/ctype.h>
 
-static ssize_t show_cidmode(struct class_device *class, char *buf)
+static ssize_t show_cidmode(struct device *dev,
+			    struct device_attribute *attr, char *buf)
 {
 	int ret;
 	unsigned long flags;
-	struct cardstate *cs = class_get_devdata(class);
+	struct cardstate *cs = dev_get_drvdata(dev);
 
 	spin_lock_irqsave(&cs->lock, flags);
 	ret = sprintf(buf, "%u\n", cs->cidmode);
@@ -29,10 +30,10 @@
 	return ret;
 }
 
-static ssize_t set_cidmode(struct class_device *class,
+static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
 			   const char *buf, size_t count)
 {
-	struct cardstate *cs = class_get_devdata(class);
+	struct cardstate *cs = dev_get_drvdata(dev);
 	long int value;
 	char *end;
 
@@ -64,25 +65,25 @@
 	return count;
 }
 
-static CLASS_DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode);
+static DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode);
 
 /* free sysfs for device */
 void gigaset_free_dev_sysfs(struct cardstate *cs)
 {
-	if (!cs->class)
+	if (!cs->tty_dev)
 		return;
 
 	gig_dbg(DEBUG_INIT, "removing sysfs entries");
-	class_device_remove_file(cs->class, &class_device_attr_cidmode);
+	device_remove_file(cs->tty_dev, &dev_attr_cidmode);
 }
 
 /* initialize sysfs for device */
 void gigaset_init_dev_sysfs(struct cardstate *cs)
 {
-	if (!cs->class)
+	if (!cs->tty_dev)
 		return;
 
 	gig_dbg(DEBUG_INIT, "setting up sysfs");
-	if (class_device_create_file(cs->class, &class_device_attr_cidmode))
+	if (device_create_file(cs->tty_dev, &dev_attr_cidmode))
 		dev_err(cs->dev, "could not create sysfs attribute\n");
 }
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4ffa9eb..04f2ad7 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -410,7 +410,7 @@
 
 	if (resubmit) {
 		spin_lock_irqsave(&cs->lock, flags);
-		r = cs->connected ? usb_submit_urb(urb, SLAB_ATOMIC) : -ENODEV;
+		r = cs->connected ? usb_submit_urb(urb, GFP_ATOMIC) : -ENODEV;
 		spin_unlock_irqrestore(&cs->lock, flags);
 		if (r)
 			dev_err(cs->dev, "error %d when resubmitting urb.\n",
@@ -486,7 +486,7 @@
 			atomic_set(&ucs->busy, 1);
 
 			spin_lock_irqsave(&cs->lock, flags);
-			status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC) : -ENODEV;
+			status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV;
 			spin_unlock_irqrestore(&cs->lock, flags);
 
 			if (status) {
@@ -664,7 +664,7 @@
 						  ucs->bulk_out_endpointAddr & 0x0f),
 				  ucs->bulk_out_buffer, count,
 				  gigaset_write_bulk_callback, cs);
-		ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
+		ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
 	} else {
 		ret = -ENODEV;
 	}
@@ -763,7 +763,7 @@
 		goto error;
 	}
 
-	ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL);
+	ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!ucs->bulk_out_urb) {
 		dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
 		retval = -ENOMEM;
@@ -774,7 +774,7 @@
 
 	atomic_set(&ucs->busy, 0);
 
-	ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL);
+	ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!ucs->read_urb) {
 		dev_err(cs->dev, "No free urbs available\n");
 		retval = -ENOMEM;
@@ -797,7 +797,7 @@
 			 gigaset_read_int_callback,
 			 cs->inbuf + 0, endpoint->bInterval);
 
-	retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL);
+	retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
 	if (retval) {
 		dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
 		goto error;
@@ -815,14 +815,11 @@
 	return 0;
 
 error:
-	if (ucs->read_urb)
-		usb_kill_urb(ucs->read_urb);
+	usb_kill_urb(ucs->read_urb);
 	kfree(ucs->bulk_out_buffer);
-	if (ucs->bulk_out_urb != NULL)
-		usb_free_urb(ucs->bulk_out_urb);
+	usb_free_urb(ucs->bulk_out_urb);
 	kfree(cs->inbuf[0].rcvbuf);
-	if (ucs->read_urb != NULL)
-		usb_free_urb(ucs->read_urb);
+	usb_free_urb(ucs->read_urb);
 	usb_set_intfdata(interface, NULL);
 	ucs->read_urb = ucs->bulk_out_urb = NULL;
 	cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
@@ -850,11 +847,9 @@
 	usb_kill_urb(ucs->bulk_out_urb);	/* FIXME: only if active? */
 
 	kfree(ucs->bulk_out_buffer);
-	if (ucs->bulk_out_urb != NULL)
-		usb_free_urb(ucs->bulk_out_urb);
+	usb_free_urb(ucs->bulk_out_urb);
 	kfree(cs->inbuf[0].rcvbuf);
-	if (ucs->read_urb != NULL)
-		usb_free_urb(ucs->read_urb);
+	usb_free_urb(ucs->read_urb);
 	ucs->read_urb = ucs->bulk_out_urb = NULL;
 	cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
 
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 7bbfd85..fd5d736 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -194,41 +194,11 @@
 
     dev = link->priv;
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
     do {
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	i = pcmcia_get_first_tuple(link, &tuple);
-	if (i != CS_SUCCESS) break;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = 64;
-	tuple.TupleOffset = 0;
-	i = pcmcia_get_tuple_data(link, &tuple);
-	if (i != CS_SUCCESS) break;
-	i = pcmcia_parse_tuple(link, &tuple, &parse);
-	if (i != CS_SUCCESS) break;
-	link->conf.ConfigBase = parse.config.base;
-    } while (0);
-    if (i != CS_SUCCESS) {
-	cs_error(link, ParseTuple, i);
-	return -ENODEV;
-    }
-
-    do {
-
-	tuple.Attributes = 0;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = 254;
-	tuple.TupleOffset = 0;
-	tuple.DesiredTuple = CISTPL_VERS_1;
-
 	devname[0] = 0;
-	if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
-	    strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 
-			sizeof(devname));
-	}
+	if (link->prod_id[1])
+		strlcpy(devname, link->prod_id[1], sizeof(devname));
+
 	/*
          * find IO port
          */
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c
index 11e6f93..7b4ec3f 100644
--- a/drivers/isdn/hardware/eicon/os_4bri.c
+++ b/drivers/isdn/hardware/eicon/os_4bri.c
@@ -464,7 +464,7 @@
 
 /*
 **  Cleanup function will be called for master adapter only
-**  this is garanteed by design: cleanup callback is set
+**  this is guaranteed by design: cleanup callback is set
 **  by master adapter only
 */
 static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t * a)
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index bec5901..3b19cae 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -232,9 +232,10 @@
 
 
 static void
-Amd7930_bh(struct IsdnCardState *cs)
+Amd7930_bh(struct work_struct *work)
 {
-
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
         struct PStack *stptr;
 
 	if (!cs)
@@ -789,7 +790,7 @@
 void __devinit
 setup_Amd7930(struct IsdnCardState *cs)
 {
-        INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs);
+        INIT_WORK(&cs->tqueue, Amd7930_bh);
 	cs->dbusytimer.function = (void *) dbusy_timer_handler;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index ac28e32..876fec6 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -216,41 +216,11 @@
 
     DEBUG(0, "avma1cs_config(0x%p)\n", link);
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
     do {
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	i = pcmcia_get_first_tuple(link, &tuple);
-	if (i != CS_SUCCESS) break;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = 64;
-	tuple.TupleOffset = 0;
-	i = pcmcia_get_tuple_data(link, &tuple);
-	if (i != CS_SUCCESS) break;
-	i = pcmcia_parse_tuple(link, &tuple, &parse);
-	if (i != CS_SUCCESS) break;
-	link->conf.ConfigBase = parse.config.base;
-    } while (0);
-    if (i != CS_SUCCESS) {
-	cs_error(link, ParseTuple, i);
-	return -ENODEV;
-    }
-
-    do {
-
-	tuple.Attributes = 0;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = 254;
-	tuple.TupleOffset = 0;
-	tuple.DesiredTuple = CISTPL_VERS_1;
-
 	devname[0] = 0;
-	if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
-	    strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 
-			sizeof(devname));
-	}
+	if (link->prod_id[1])
+		strlcpy(devname, link->prod_id[1], sizeof(devname));
+
 	/*
          * find IO port
          */
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 785b085..cede72c 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1137,7 +1137,6 @@
 	cs->tx_skb = NULL;
 	cs->tx_cnt = 0;
 	cs->event = 0;
-	cs->tqueue.data = cs;
 
 	skb_queue_head_init(&cs->rq);
 	skb_queue_head_init(&cs->sq);
@@ -1554,7 +1553,7 @@
 static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
 static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
 static void hisax_bc_close(struct BCState *bcs);
-static void hisax_bh(struct IsdnCardState *cs);
+static void hisax_bh(struct work_struct *work);
 static void EChannel_proc_rcv(struct hisax_d_if *d_if);
 
 int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
@@ -1586,7 +1585,7 @@
 	hisax_d_if->cs = cs;
 	cs->hw.hisax_d_if = hisax_d_if;
 	cs->cardmsg = hisax_cardmsg;
-	INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs);
+	INIT_WORK(&cs->tqueue, hisax_bh);
 	cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
 	for (i = 0; i < 2; i++) {
 		cs->bcs[i].BC_SetStack = hisax_bc_setstack;
@@ -1618,8 +1617,10 @@
 	schedule_work(&cs->tqueue);
 }
 
-static void hisax_bh(struct IsdnCardState *cs)
+static void hisax_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *st;
 	int pr;
 
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index e18e75b..4e180d2 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -242,23 +242,6 @@
     DEBUG(0, "elsa_config(0x%p)\n", link);
     dev = link->priv;
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    tuple.TupleData = (cisdata_t *)buf;
-    tuple.TupleDataMax = 255;
-    tuple.TupleOffset = 0;
-    tuple.Attributes = 0;
-    i = first_tuple(link, &tuple, &parse);
-    if (i != CS_SUCCESS) {
-        last_fn = ParseTuple;
-	goto cs_failed;
-    }
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
     tuple.Attributes = 0;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index d852c9d..de9b1a4 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -1083,8 +1083,9 @@
 /* bottom half handler for interrupt */
 /*************************************/
 static void
-hfc4s8s_bh(hfc4s8s_hw * hw)
+hfc4s8s_bh(struct work_struct *work)
 {
+	hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
 	u_char b;
 	struct hfc4s8s_l1 *l1p;
 	volatile u_char *fifo_stat;
@@ -1550,7 +1551,7 @@
 		goto out;
 	}
 
-	INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw);
+	INIT_WORK(&hw->tqueue, hfc4s8s_bh);
 
 	if (request_irq
 	    (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.h b/drivers/isdn/hisax/hfc4s8s_l1.h
index e8f9c07..9d5d2a5 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.h
+++ b/drivers/isdn/hisax/hfc4s8s_l1.h
@@ -16,7 +16,7 @@
 
 /*
 *  include Genero generated HFC-4S/8S header file hfc48scu.h
-*  for comlete register description. This will define _HFC48SCU_H_
+*  for complete register description. This will define _HFC48SCU_H_
 *  to prevent redefinitions
 */
 
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 6360e82..8d98644 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -549,10 +549,11 @@
 }
 
 static void
-hfcd_bh(struct IsdnCardState *cs)
+hfcd_bh(struct work_struct *work)
 {
-	if (!cs)
-		return;
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
+
 	if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
 		switch (cs->dc.hfcd.ph_state) {
 			case (0):
@@ -1072,5 +1073,5 @@
 	cs->dbusytimer.function = (void *) hfc_dbusy_timer;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
-	INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs);
+	INIT_WORK(&cs->tqueue, hfcd_bh);
 }
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 93f60b5..5db0a85 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1506,8 +1506,10 @@
 /* handle L1 state changes */
 /***************************/
 static void
-hfcpci_bh(struct IsdnCardState *cs)
+hfcpci_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	u_long	flags;
 //      struct PStack *stptr;
 
@@ -1722,7 +1724,7 @@
 		Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
 		/* At this point the needed PCI config is done */
 		/* fifos are still not enabled */
-		INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
+		INIT_WORK(&cs->tqueue,  hfcpci_bh);
 		cs->setstack_d = setstack_hfcpci;
 		cs->BC_Send_Data = &hfcpci_send_data;
 		cs->readisac = NULL;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 954d153..4fd09d2 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1251,8 +1251,10 @@
 /* handle L1 state changes */
 /***************************/
 static void
-hfcsx_bh(struct IsdnCardState *cs)
+hfcsx_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	u_long flags;
 
 	if (!cs)
@@ -1499,7 +1501,7 @@
 	cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
-	INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs);
+	INIT_WORK(&cs->tqueue, hfcsx_bh);
 	cs->readisac = NULL;
 	cs->writeisac = NULL;
 	cs->readisacfifo = NULL;
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index da70692..682cac3 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -77,8 +77,10 @@
 }
 
 static void
-icc_bh(struct IsdnCardState *cs)
+icc_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *stptr;
 	
 	if (!cs)
@@ -674,7 +676,7 @@
 void __devinit
 setup_icc(struct IsdnCardState *cs)
 {
-	INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs);
+	INIT_WORK(&cs->tqueue, icc_bh);
 	cs->dbusytimer.function = (void *) dbusy_timer_handler;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 282f349..4e9f238 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -81,8 +81,10 @@
 }
 
 static void
-isac_bh(struct IsdnCardState *cs)
+isac_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *stptr;
 	
 	if (!cs)
@@ -674,7 +676,7 @@
 void __devinit
 setup_isac(struct IsdnCardState *cs)
 {
-	INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs);
+	INIT_WORK(&cs->tqueue, isac_bh);
 	cs->dbusytimer.function = (void *) dbusy_timer_handler;
 	cs->dbusytimer.data = (long) cs;
 	init_timer(&cs->dbusytimer);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 674af67..6f1a658 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -437,8 +437,10 @@
 #define B_LL_OK		10
 
 static void
-isar_bh(struct BCState *bcs)
+isar_bh(struct work_struct *work)
 {
+	struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
 	BChannel_bh(bcs);
 	if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
 		ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
@@ -1580,7 +1582,7 @@
 		cs->bcs[i].mode = 0;
 		cs->bcs[i].hw.isar.dpath = i + 1;
 		modeisar(&cs->bcs[i], 0, 0);
-		INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]);
+		INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
 	}
 }
 
diff --git a/drivers/isdn/hisax/isdnhdlc.h b/drivers/isdn/hisax/isdnhdlc.h
index 2693159..5655b5f 100644
--- a/drivers/isdn/hisax/isdnhdlc.h
+++ b/drivers/isdn/hisax/isdnhdlc.h
@@ -41,10 +41,10 @@
 	unsigned char shift_reg;
 	unsigned char ffvalue;
 
-	int data_received:1; 	// set if transferring data
-	int dchannel:1; 	// set if D channel (send idle instead of flags)
-	int do_adapt56:1; 	// set if 56K adaptation
-        int do_closing:1; 	// set if in closing phase (need to send CRC + flag
+	unsigned int data_received:1; 	// set if transferring data
+	unsigned int dchannel:1; 	// set if D channel (send idle instead of flags)
+	unsigned int do_adapt56:1; 	// set if 56K adaptation
+	unsigned int do_closing:1; 	// set if in closing phase (need to send CRC + flag
 };
 
 
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c
index bab3568..a14204e 100644
--- a/drivers/isdn/hisax/isdnl1.c
+++ b/drivers/isdn/hisax/isdnl1.c
@@ -315,8 +315,10 @@
 }
 
 void
-BChannel_bh(struct BCState *bcs)
+BChannel_bh(struct work_struct *work)
 {
+	struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
 	if (!bcs)
 		return;
 	if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
@@ -362,7 +364,7 @@
 
 	bcs->cs = cs;
 	bcs->channel = bc;
-	INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs);
+	INIT_WORK(&bcs->tqueue, BChannel_bh);
 	spin_lock_init(&bcs->aclock);
 	bcs->BC_SetStack = NULL;
 	bcs->BC_Close = NULL;
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 6d04317..cd3b5ad 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1442,7 +1442,7 @@
 }
 
 static void
-l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
+l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
 	
@@ -1453,7 +1453,7 @@
 }
 
 static void
-l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
+l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
 
@@ -1466,7 +1466,7 @@
 }
 
 static void
-l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
+l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
 
@@ -1477,7 +1477,7 @@
 }
 
 static void
-l2_persistant_da(struct FsmInst *fi, int event, void *arg)
+l2_persistent_da(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
 
@@ -1612,14 +1612,14 @@
 	{ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
 	{ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
 	{ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
-	{ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
+	{ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
 	{ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
 	{ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
-	{ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
-	{ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
-	{ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
-	{ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
-	{ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
+	{ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
+	{ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
+	{ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
+	{ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
+	{ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
 };
 
 #define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index f9c14a2..46ed653 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -233,20 +233,10 @@
 
     DEBUG(0, "sedlbauer_config(0x%p)\n", link);
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.Attributes = 0;
     tuple.TupleData = buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
 
     CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
 
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index afcc2ae..6b754f1 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -232,23 +232,6 @@
     DEBUG(0, "teles_config(0x%p)\n", link);
     dev = link->priv;
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    tuple.TupleData = (cisdata_t *)buf;
-    tuple.TupleDataMax = 255;
-    tuple.TupleOffset = 0;
-    tuple.Attributes = 0;
-    i = first_tuple(link, &tuple, &parse);
-    if (i != CS_SUCCESS) {
-        last_fn = ParseTuple;
-	goto cs_failed;
-    }
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
     tuple.Attributes = 0;
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 1655341..3aeceaf 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -101,8 +101,10 @@
 }
 
 static void
-W6692_bh(struct IsdnCardState *cs)
+W6692_bh(struct work_struct *work)
 {
+	struct IsdnCardState *cs =
+		container_of(work, struct IsdnCardState, tqueue);
 	struct PStack *stptr;
 
 	if (!cs)
@@ -1070,7 +1072,7 @@
 	       id_list[cs->subtyp].card_name, cs->irq,
 	       cs->hw.w6692.iobase);
 
-	INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs);
+	INIT_WORK(&cs->tqueue, W6692_bh);
 	cs->readW6692 = &ReadW6692;
 	cs->writeW6692 = &WriteW6692;
 	cs->readisacfifo = &ReadISACfifo;
diff --git a/drivers/isdn/hysdn/boardergo.c b/drivers/isdn/hysdn/boardergo.c
index 82e42a8..a120649 100644
--- a/drivers/isdn/hysdn/boardergo.c
+++ b/drivers/isdn/hysdn/boardergo.c
@@ -71,8 +71,9 @@
 /* may be queued from everywhere (interrupts included).                       */
 /******************************************************************************/
 static void
-ergo_irq_bh(hysdn_card * card)
+ergo_irq_bh(struct work_struct *ugli_api)
 {
+	hysdn_card * card = container_of(ugli_api, hysdn_card, irq_queue);
 	tErgDpram *dpr;
 	int again;
 	unsigned long flags;
@@ -442,7 +443,7 @@
 	card->writebootseq = ergo_writebootseq;
 	card->waitpofready = ergo_waitpofready;
 	card->set_errlog_state = ergo_set_errlog_state;
-	INIT_WORK(&card->irq_queue, (void *) (void *) ergo_irq_bh, card);
+	INIT_WORK(&card->irq_queue, ergo_irq_bh);
 	card->hysdn_lock = SPIN_LOCK_UNLOCKED;
 
 	return (0);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f8d6ae..2e4daeb 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -984,9 +984,9 @@
 /*
  * called from tq_immediate
  */
-static void isdn_net_softint(void *private)
+static void isdn_net_softint(struct work_struct *work)
 {
-	isdn_net_local *lp = private;
+	isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
 	struct sk_buff *skb;
 
 	spin_lock_bh(&lp->xmit_lock);
@@ -2596,7 +2596,7 @@
 	netdev->local->netdev = netdev;
 	netdev->local->next = netdev->local;
 
-	INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local);
+	INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
 	spin_lock_init(&netdev->local->xmit_lock);
 
 	netdev->local->isdn_device = -1;
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 6ead5e1..1966f34 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -68,8 +68,6 @@
 static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
 
 
-extern void pcbit_deliver(void * data);
-
 int pcbit_init_dev(int board, int mem_base, int irq)
 {
 	struct pcbit_dev *dev;
@@ -129,7 +127,7 @@
 	memset(dev->b2, 0, sizeof(struct pcbit_chan));
 	dev->b2->id = 1;
 
-	INIT_WORK(&dev->qdelivery, pcbit_deliver, dev);
+	INIT_WORK(&dev->qdelivery, pcbit_deliver);
 
 	/*
 	 *  interrupts
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index 937fd21..0c9f6df 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -67,7 +67,6 @@
  *  Prototypes
  */
 
-void pcbit_deliver(void *data);
 static void pcbit_transmit(struct pcbit_dev *dev);
 
 static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
@@ -299,11 +298,12 @@
  */
 
 void
-pcbit_deliver(void *data)
+pcbit_deliver(struct work_struct *work)
 {
 	struct frame_buf *frame;
 	unsigned long flags, msg;
-	struct pcbit_dev *dev = (struct pcbit_dev *) data;
+	struct pcbit_dev *dev =
+		container_of(work, struct pcbit_dev, qdelivery);
 
 	spin_lock_irqsave(&dev->lock, flags);
 
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/isdn/pcbit/pcbit.h
index 388bace..19c18e8 100644
--- a/drivers/isdn/pcbit/pcbit.h
+++ b/drivers/isdn/pcbit/pcbit.h
@@ -166,4 +166,6 @@
 #define L2_RUNNING  5
 #define L2_ERROR    6
 
+extern void pcbit_deliver(struct work_struct *work);
+
 #endif
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9c39b98..176142c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -76,6 +76,12 @@
 	  This option enables support for the Soekris net4801 and net4826 error
 	  LED.
 
+config LEDS_WRAP
+	tristate "LED Support for the WRAP series LEDs"
+	depends on LEDS_CLASS && SCx200_GPIO
+	help
+	  This option enables support for the PCEngines WRAP programmable LEDs.
+
 comment "LED Triggers"
 
 config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 6aa2aed..500de3dc 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_LEDS_S3C24XX)		+= leds-s3c24xx.o
 obj-$(CONFIG_LEDS_AMS_DELTA)		+= leds-ams-delta.o
 obj-$(CONFIG_LEDS_NET48XX)		+= leds-net48xx.o
+obj-$(CONFIG_LEDS_WRAP)			+= leds-wrap.o
 
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGER_TIMER)	+= ledtrig-timer.o
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
new file mode 100644
index 0000000..27fb2d8
--- /dev/null
+++ b/drivers/leds/leds-wrap.c
@@ -0,0 +1,142 @@
+/*
+ * LEDs driver for PCEngines WRAP
+ *
+ * Copyright (C) 2006 Kristian Kielhofner <kris@krisk.org>
+ *
+ * Based on leds-net48xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#include <linux/scx200_gpio.h>
+
+#define DRVNAME "wrap-led"
+#define WRAP_ERROR_LED_GPIO	3
+#define	WRAP_EXTRA_LED_GPIO	18
+
+static struct platform_device *pdev;
+
+static void wrap_error_led_set(struct led_classdev *led_cdev,
+		enum led_brightness value)
+{
+	if (value)
+		scx200_gpio_set_low(WRAP_ERROR_LED_GPIO);
+	else
+		scx200_gpio_set_high(WRAP_ERROR_LED_GPIO);
+}
+
+static void wrap_extra_led_set(struct led_classdev *led_cdev,
+		enum led_brightness value)
+{
+	if (value)
+		scx200_gpio_set_low(WRAP_EXTRA_LED_GPIO);
+	else
+		scx200_gpio_set_high(WRAP_EXTRA_LED_GPIO);
+}
+
+static struct led_classdev wrap_error_led = {
+	.name		= "wrap:error",
+	.brightness_set	= wrap_error_led_set,
+};
+
+static struct led_classdev wrap_extra_led = {
+	.name           = "wrap:extra",
+	.brightness_set = wrap_extra_led_set,
+};
+
+#ifdef CONFIG_PM
+static int wrap_led_suspend(struct platform_device *dev,
+		pm_message_t state)
+{
+	led_classdev_suspend(&wrap_error_led);
+	led_classdev_suspend(&wrap_extra_led);
+	return 0;
+}
+
+static int wrap_led_resume(struct platform_device *dev)
+{
+	led_classdev_resume(&wrap_error_led);
+	led_classdev_resume(&wrap_extra_led);
+	return 0;
+}
+#else
+#define wrap_led_suspend NULL
+#define wrap_led_resume NULL
+#endif
+
+static int wrap_led_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = led_classdev_register(&pdev->dev, &wrap_error_led);
+	if (ret == 0) {
+		ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
+		if (ret < 0)
+			led_classdev_unregister(&wrap_error_led);
+	}
+	return ret;
+}
+
+static int wrap_led_remove(struct platform_device *pdev)
+{
+	led_classdev_unregister(&wrap_error_led);
+	led_classdev_unregister(&wrap_extra_led);
+	return 0;
+}
+
+static struct platform_driver wrap_led_driver = {
+	.probe		= wrap_led_probe,
+	.remove		= wrap_led_remove,
+	.suspend	= wrap_led_suspend,
+	.resume		= wrap_led_resume,
+	.driver		= {
+		.name		= DRVNAME,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init wrap_led_init(void)
+{
+	int ret;
+
+	if (!scx200_gpio_present()) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = platform_driver_register(&wrap_led_driver);
+	if (ret < 0)
+		goto out;
+
+	pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+	if (IS_ERR(pdev)) {
+		ret = PTR_ERR(pdev);
+		platform_driver_unregister(&wrap_led_driver);
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+static void __exit wrap_led_exit(void)
+{
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&wrap_led_driver);
+}
+
+module_init(wrap_led_init);
+module_exit(wrap_led_exit);
+
+MODULE_AUTHOR("Kristian Kielhofner <kris@krisk.org>");
+MODULE_DESCRIPTION("PCEngines WRAP LED driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/leds/ledtrig-ide-disk.c b/drivers/leds/ledtrig-ide-disk.c
index fa65188..54b155c 100644
--- a/drivers/leds/ledtrig-ide-disk.c
+++ b/drivers/leds/ledtrig-ide-disk.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/timer.h>
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 29a8818a..d756bdb 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/list.h>
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 7f8477d..92ccee8 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -228,4 +228,11 @@
 	tristate "Support for ANS LCD display"
 	depends on ADB_CUDA && PPC_PMAC
 
+config PMAC_RACKMETER
+	tristate "Support for Apple XServe front panel LEDs"
+	depends on PPC_PMAC
+	help
+	  This driver procides some support to control the front panel
+          blue LEDs "vu-meter" of the XServer macs.
+
 endmenu
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index b53d45f..2dfc3f4 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -42,3 +42,4 @@
 				   windfarm_smu_sensors.o \
 				   windfarm_max6690_sensor.o \
 				   windfarm_lm75_sensor.o windfarm_pid.o
+obj-$(CONFIG_PMAC_RACKMETER)	+= rack-meter.o
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index be0bd34..d43ea81 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -267,12 +267,12 @@
 }
 
 static void
-__adb_probe_task(void *data)
+__adb_probe_task(struct work_struct *bullshit)
 {
 	adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL);
 }
 
-static DECLARE_WORK(adb_reset_work, __adb_probe_task, NULL);
+static DECLARE_WORK(adb_reset_work, __adb_probe_task);
 
 int
 adb_reset_bus(void)
diff --git a/drivers/macintosh/apm_emu.c b/drivers/macintosh/apm_emu.c
index 1293876..8862a83 100644
--- a/drivers/macintosh/apm_emu.c
+++ b/drivers/macintosh/apm_emu.c
@@ -529,7 +529,8 @@
 	if (apm_proc)
 		apm_proc->owner = THIS_MODULE;
 
-	misc_register(&apm_device);
+	if (misc_register(&apm_device) != 0)
+		printk(KERN_INFO "Could not create misc. device for apm\n");
 
 	pmu_register_sleep_notifier(&apm_sleep_notifier);
 
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
new file mode 100644
index 0000000..5ed41fe
--- /dev/null
+++ b/drivers/macintosh/rack-meter.c
@@ -0,0 +1,616 @@
+/*
+ * RackMac vu-meter driver
+ *
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ * Released under the term of the GNU GPL v2.
+ *
+ * Support the CPU-meter LEDs of the Xserve G5
+ *
+ * TODO: Implement PWM to do variable intensity and provide userland
+ * interface for fun. Also, the CPU-meter could be made nicer by being
+ * a bit less "immediate" but giving instead a more average load over
+ * time. Patches welcome :-)
+ *
+ */
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/dbdma.h>
+#include <asm/dbdma.h>
+#include <asm/macio.h>
+#include <asm/keylargo.h>
+
+/* Number of samples in a sample buffer */
+#define SAMPLE_COUNT		256
+
+/* CPU meter sampling rate in ms */
+#define CPU_SAMPLING_RATE	250
+
+struct rackmeter_dma {
+	struct dbdma_cmd	cmd[4]			____cacheline_aligned;
+	u32			mark			____cacheline_aligned;
+	u32			buf1[SAMPLE_COUNT]	____cacheline_aligned;
+	u32			buf2[SAMPLE_COUNT]	____cacheline_aligned;
+} ____cacheline_aligned;
+
+struct rackmeter_cpu {
+	struct delayed_work	sniffer;
+	struct rackmeter	*rm;
+	cputime64_t		prev_wall;
+	cputime64_t		prev_idle;
+	int			zero;
+} ____cacheline_aligned;
+
+struct rackmeter {
+	struct macio_dev		*mdev;
+	unsigned int			irq;
+	struct device_node		*i2s;
+	u8				*ubuf;
+	struct dbdma_regs __iomem	*dma_regs;
+	void __iomem			*i2s_regs;
+	dma_addr_t			dma_buf_p;
+	struct rackmeter_dma		*dma_buf_v;
+	int				stale_irq;
+	struct rackmeter_cpu		cpu[2];
+	int				paused;
+	struct mutex			sem;
+};
+
+/* To be set as a tunable */
+static int rackmeter_ignore_nice;
+
+/* This GPIO is whacked by the OS X driver when initializing */
+#define RACKMETER_MAGIC_GPIO	0x78
+
+/* This is copied from cpufreq_ondemand, maybe we should put it in
+ * a common header somewhere
+ */
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+{
+	cputime64_t retval;
+
+	retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
+			kstat_cpu(cpu).cpustat.iowait);
+
+	if (rackmeter_ignore_nice)
+		retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+
+	return retval;
+}
+
+static void rackmeter_setup_i2s(struct rackmeter *rm)
+{
+	struct macio_chip *macio = rm->mdev->bus->chip;
+
+	/* First whack magic GPIO */
+	pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, RACKMETER_MAGIC_GPIO, 5);
+
+
+	/* Call feature code to enable the sound channel and the proper
+	 * clock sources
+	 */
+	pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, rm->i2s, 0, 1);
+
+	/* Power i2s and stop i2s clock. We whack MacIO FCRs directly for now.
+	 * This is a bit racy, thus we should add new platform functions to
+	 * handle that. snd-aoa needs that too
+	 */
+	MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE);
+	MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT);
+	(void)MACIO_IN32(KEYLARGO_FCR1);
+	udelay(10);
+
+	/* Then setup i2s. For now, we use the same magic value that
+	 * the OS X driver seems to use. We might want to play around
+	 * with the clock divisors later
+	 */
+	out_le32(rm->i2s_regs + 0x10, 0x01fa0000);
+	(void)in_le32(rm->i2s_regs + 0x10);
+	udelay(10);
+
+	/* Fully restart i2s*/
+	MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE |
+		  KL1_I2S0_CLK_ENABLE_BIT);
+	(void)MACIO_IN32(KEYLARGO_FCR1);
+	udelay(10);
+}
+
+static void rackmeter_set_default_pattern(struct rackmeter *rm)
+{
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		if (i < 8)
+			rm->ubuf[i] = (i & 1) * 255;
+		else
+			rm->ubuf[i] = ((~i) & 1) * 255;
+	}
+}
+
+static void rackmeter_do_pause(struct rackmeter *rm, int pause)
+{
+	struct rackmeter_dma *rdma = rm->dma_buf_v;
+
+	pr_debug("rackmeter: %s\n", pause ? "paused" : "started");
+
+	rm->paused = pause;
+	if (pause) {
+		DBDMA_DO_STOP(rm->dma_regs);
+		return;
+	}
+	memset(rdma->buf1, 0, SAMPLE_COUNT & sizeof(u32));
+	memset(rdma->buf2, 0, SAMPLE_COUNT & sizeof(u32));
+
+	rm->dma_buf_v->mark = 0;
+
+	mb();
+	out_le32(&rm->dma_regs->cmdptr_hi, 0);
+	out_le32(&rm->dma_regs->cmdptr, rm->dma_buf_p);
+	out_le32(&rm->dma_regs->control, (RUN << 16) | RUN);
+}
+
+static void rackmeter_setup_dbdma(struct rackmeter *rm)
+{
+	struct rackmeter_dma *db = rm->dma_buf_v;
+	struct dbdma_cmd *cmd = db->cmd;
+
+	/* Make sure dbdma is reset */
+	DBDMA_DO_RESET(rm->dma_regs);
+
+	pr_debug("rackmeter: mark offset=0x%lx\n",
+		 offsetof(struct rackmeter_dma, mark));
+	pr_debug("rackmeter: buf1 offset=0x%lx\n",
+		 offsetof(struct rackmeter_dma, buf1));
+	pr_debug("rackmeter: buf2 offset=0x%lx\n",
+		 offsetof(struct rackmeter_dma, buf2));
+
+	/* Prepare 4 dbdma commands for the 2 buffers */
+	memset(cmd, 0, 4 * sizeof(struct dbdma_cmd));
+	st_le16(&cmd->req_count, 4);
+	st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
+	st_le32(&cmd->phy_addr, rm->dma_buf_p +
+		offsetof(struct rackmeter_dma, mark));
+	st_le32(&cmd->cmd_dep, 0x02000000);
+	cmd++;
+
+	st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
+	st_le16(&cmd->command, OUTPUT_MORE);
+	st_le32(&cmd->phy_addr, rm->dma_buf_p +
+		offsetof(struct rackmeter_dma, buf1));
+	cmd++;
+
+	st_le16(&cmd->req_count, 4);
+	st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
+	st_le32(&cmd->phy_addr, rm->dma_buf_p +
+		offsetof(struct rackmeter_dma, mark));
+	st_le32(&cmd->cmd_dep, 0x01000000);
+	cmd++;
+
+	st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
+	st_le16(&cmd->command, OUTPUT_MORE | BR_ALWAYS);
+	st_le32(&cmd->phy_addr, rm->dma_buf_p +
+		offsetof(struct rackmeter_dma, buf2));
+	st_le32(&cmd->cmd_dep, rm->dma_buf_p);
+
+	rackmeter_do_pause(rm, 0);
+}
+
+static void rackmeter_do_timer(struct work_struct *work)
+{
+	struct rackmeter_cpu *rcpu =
+		container_of(work, struct rackmeter_cpu, sniffer.work);
+	struct rackmeter *rm = rcpu->rm;
+	unsigned int cpu = smp_processor_id();
+	cputime64_t cur_jiffies, total_idle_ticks;
+	unsigned int total_ticks, idle_ticks;
+	int i, offset, load, cumm, pause;
+
+	cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+	total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
+						  rcpu->prev_wall);
+	rcpu->prev_wall = cur_jiffies;
+
+	total_idle_ticks = get_cpu_idle_time(cpu);
+	idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
+				rcpu->prev_idle);
+	rcpu->prev_idle = total_idle_ticks;
+
+	/* We do a very dumb calculation to update the LEDs for now,
+	 * we'll do better once we have actual PWM implemented
+	 */
+	load = (9 * (total_ticks - idle_ticks)) / total_ticks;
+
+	offset = cpu << 3;
+	cumm = 0;
+	for (i = 0; i < 8; i++) {
+		u8 ub = (load > i) ? 0xff : 0;
+		rm->ubuf[i + offset] = ub;
+		cumm |= ub;
+	}
+	rcpu->zero = (cumm == 0);
+
+	/* Now check if LEDs are all 0, we can stop DMA */
+	pause = (rm->cpu[0].zero && rm->cpu[1].zero);
+	if (pause != rm->paused) {
+		mutex_lock(&rm->sem);
+		pause = (rm->cpu[0].zero && rm->cpu[1].zero);
+		rackmeter_do_pause(rm, pause);
+		mutex_unlock(&rm->sem);
+	}
+	schedule_delayed_work_on(cpu, &rcpu->sniffer,
+				 msecs_to_jiffies(CPU_SAMPLING_RATE));
+}
+
+static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
+{
+	unsigned int cpu;
+
+	/* This driver works only with 1 or 2 CPUs numbered 0 and 1,
+	 * but that's really all we have on Apple Xserve. It doesn't
+	 * play very nice with CPU hotplug neither but we don't do that
+	 * on those machines yet
+	 */
+
+	rm->cpu[0].rm = rm;
+	INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
+	rm->cpu[1].rm = rm;
+	INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
+
+	for_each_online_cpu(cpu) {
+		struct rackmeter_cpu *rcpu;
+
+		if (cpu > 1)
+			continue;
+		rcpu = &rm->cpu[cpu];;
+		rcpu->prev_idle = get_cpu_idle_time(cpu);
+		rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
+		schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
+					 msecs_to_jiffies(CPU_SAMPLING_RATE));
+	}
+}
+
+static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
+{
+	cancel_rearming_delayed_work(&rm->cpu[0].sniffer);
+	cancel_rearming_delayed_work(&rm->cpu[1].sniffer);
+}
+
+static int rackmeter_setup(struct rackmeter *rm)
+{
+	pr_debug("rackmeter: setting up i2s..\n");
+	rackmeter_setup_i2s(rm);
+
+	pr_debug("rackmeter: setting up default pattern..\n");
+	rackmeter_set_default_pattern(rm);
+
+	pr_debug("rackmeter: setting up dbdma..\n");
+	rackmeter_setup_dbdma(rm);
+
+	pr_debug("rackmeter: start CPU measurements..\n");
+	rackmeter_init_cpu_sniffer(rm);
+
+	printk(KERN_INFO "RackMeter initialized\n");
+
+	return 0;
+}
+
+/*  XXX FIXME: No PWM yet, this is 0/1 */
+static u32 rackmeter_calc_sample(struct rackmeter *rm, unsigned int index)
+{
+	int led;
+	u32 sample = 0;
+
+	for (led = 0; led < 16; led++) {
+		sample >>= 1;
+		sample |= ((rm->ubuf[led] >= 0x80) << 15);
+	}
+	return (sample << 17) | (sample >> 15);
+}
+
+static irqreturn_t rackmeter_irq(int irq, void *arg)
+{
+	struct rackmeter *rm = arg;
+	struct rackmeter_dma *db = rm->dma_buf_v;
+	unsigned int mark, i;
+	u32 *buf;
+
+	/* Flush PCI buffers with an MMIO read. Maybe we could actually
+	 * check the status one day ... in case things go wrong, though
+	 * this never happened to me
+	 */
+	(void)in_le32(&rm->dma_regs->status);
+
+	/* Make sure the CPU gets us in order */
+	rmb();
+
+	/* Read mark */
+	mark = db->mark;
+	if (mark != 1 && mark != 2) {
+		printk(KERN_WARNING "rackmeter: Incorrect DMA mark 0x%08x\n",
+		       mark);
+		/* We allow for 3 errors like that (stale DBDMA irqs) */
+		if (++rm->stale_irq > 3) {
+			printk(KERN_ERR "rackmeter: Too many errors,"
+			       " stopping DMA\n");
+			DBDMA_DO_RESET(rm->dma_regs);
+		}
+		return IRQ_HANDLED;
+	}
+
+	/* Next buffer we need to fill is mark value */
+	buf = mark == 1 ? db->buf1 : db->buf2;
+
+	/* Fill it now. This routine converts the 8 bits depth sample array
+	 * into the PWM bitmap for each LED.
+	 */
+	for (i = 0; i < SAMPLE_COUNT; i++)
+		buf[i] = rackmeter_calc_sample(rm, i);
+
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit rackmeter_probe(struct macio_dev* mdev,
+				     const struct of_device_id *match)
+{
+	struct device_node *i2s = NULL, *np = NULL;
+	struct rackmeter *rm = NULL;
+	struct resource ri2s, rdma;
+	int rc = -ENODEV;
+
+	pr_debug("rackmeter_probe()\n");
+
+	/* Get i2s-a node */
+	while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL)
+	       if (strcmp(i2s->name, "i2s-a") == 0)
+		       break;
+	if (i2s == NULL) {
+		pr_debug("  i2s-a child not found\n");
+		goto bail;
+	}
+	/* Get lightshow or virtual sound */
+	while ((np = of_get_next_child(i2s, np)) != NULL) {
+	       if (strcmp(np->name, "lightshow") == 0)
+		       break;
+	       if ((strcmp(np->name, "sound") == 0) &&
+		   get_property(np, "virtual", NULL) != NULL)
+		       break;
+	}
+	if (np == NULL) {
+		pr_debug("  lightshow or sound+virtual child not found\n");
+		goto bail;
+	}
+
+	/* Create and initialize our instance data */
+	rm = kzalloc(sizeof(struct rackmeter), GFP_KERNEL);
+	if (rm == NULL) {
+		printk(KERN_ERR "rackmeter: failed to allocate memory !\n");
+		rc = -ENOMEM;
+		goto bail_release;
+	}
+	rm->mdev = mdev;
+	rm->i2s = i2s;
+	mutex_init(&rm->sem);
+	dev_set_drvdata(&mdev->ofdev.dev, rm);
+	/* Check resources availability. We need at least resource 0 and 1 */
+#if 0 /* Use that when i2s-a is finally an mdev per-se */
+	if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
+		printk(KERN_ERR
+		       "rackmeter: found match but lacks resources: %s"
+		       " (%d resources, %d interrupts)\n",
+		       mdev->ofdev.node->full_name);
+		rc = -ENXIO;
+		goto bail_free;
+	}
+	if (macio_request_resources(mdev, "rackmeter")) {
+		printk(KERN_ERR
+		       "rackmeter: failed to request resources: %s\n",
+		       mdev->ofdev.node->full_name);
+		rc = -EBUSY;
+		goto bail_free;
+	}
+	rm->irq = macio_irq(mdev, 1);
+#else
+	rm->irq = irq_of_parse_and_map(i2s, 1);
+	if (rm->irq == NO_IRQ ||
+	    of_address_to_resource(i2s, 0, &ri2s) ||
+	    of_address_to_resource(i2s, 1, &rdma)) {
+		printk(KERN_ERR
+		       "rackmeter: found match but lacks resources: %s",
+		       mdev->ofdev.node->full_name);
+		rc = -ENXIO;
+		goto bail_free;
+	}
+#endif
+
+	pr_debug("  i2s @0x%08x\n", (unsigned int)ri2s.start);
+	pr_debug("  dma @0x%08x\n", (unsigned int)rdma.start);
+	pr_debug("  irq %d\n", rm->irq);
+
+	rm->ubuf = (u8 *)__get_free_page(GFP_KERNEL);
+	if (rm->ubuf == NULL) {
+		printk(KERN_ERR
+		       "rackmeter: failed to allocate samples page !\n");
+		rc = -ENOMEM;
+		goto bail_release;
+	}
+
+	rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
+					   sizeof(struct rackmeter_dma),
+					   &rm->dma_buf_p, GFP_KERNEL);
+	if (rm->dma_buf_v == NULL) {
+		printk(KERN_ERR
+		       "rackmeter: failed to allocate dma buffer !\n");
+		rc = -ENOMEM;
+		goto bail_free_samples;
+	}
+#if 0
+	rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000);
+#else
+	rm->i2s_regs = ioremap(ri2s.start, 0x1000);
+#endif
+	if (rm->i2s_regs == NULL) {
+		printk(KERN_ERR
+		       "rackmeter: failed to map i2s registers !\n");
+		rc = -ENXIO;
+		goto bail_free_dma;
+	}
+#if 0
+	rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100);
+#else
+	rm->dma_regs = ioremap(rdma.start, 0x100);
+#endif
+	if (rm->dma_regs == NULL) {
+		printk(KERN_ERR
+		       "rackmeter: failed to map dma registers !\n");
+		rc = -ENXIO;
+		goto bail_unmap_i2s;
+	}
+
+	rc = rackmeter_setup(rm);
+	if (rc) {
+		printk(KERN_ERR
+		       "rackmeter: failed to initialize !\n");
+		rc = -ENXIO;
+		goto bail_unmap_dma;
+	}
+
+	rc = request_irq(rm->irq, rackmeter_irq, 0, "rackmeter", rm);
+	if (rc != 0) {
+		printk(KERN_ERR
+		       "rackmeter: failed to request interrupt !\n");
+		goto bail_stop_dma;
+	}
+	of_node_put(np);
+	return 0;
+
+ bail_stop_dma:
+	DBDMA_DO_RESET(rm->dma_regs);
+ bail_unmap_dma:
+	iounmap(rm->dma_regs);
+ bail_unmap_i2s:
+	iounmap(rm->i2s_regs);
+ bail_free_dma:
+	dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
+			  sizeof(struct rackmeter_dma),
+			  rm->dma_buf_v, rm->dma_buf_p);
+ bail_free_samples:
+	free_page((unsigned long)rm->ubuf);
+ bail_release:
+#if 0
+	macio_release_resources(mdev);
+#endif
+ bail_free:
+	kfree(rm);
+ bail:
+	of_node_put(i2s);
+	of_node_put(np);
+	dev_set_drvdata(&mdev->ofdev.dev, NULL);
+	return rc;
+}
+
+static int __devexit rackmeter_remove(struct macio_dev* mdev)
+{
+	struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
+
+	/* Stop CPU sniffer timer & work queues */
+	rackmeter_stop_cpu_sniffer(rm);
+
+	/* Clear reference to private data */
+	dev_set_drvdata(&mdev->ofdev.dev, NULL);
+
+	/* Stop/reset dbdma */
+	DBDMA_DO_RESET(rm->dma_regs);
+
+	/* Release the IRQ */
+	free_irq(rm->irq, rm);
+
+	/* Unmap registers */
+	iounmap(rm->dma_regs);
+	iounmap(rm->i2s_regs);
+
+	/* Free DMA */
+	dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
+			  sizeof(struct rackmeter_dma),
+			  rm->dma_buf_v, rm->dma_buf_p);
+
+	/* Free samples */
+	free_page((unsigned long)rm->ubuf);
+
+#if 0
+	/* Release resources */
+	macio_release_resources(mdev);
+#endif
+
+	/* Get rid of me */
+	kfree(rm);
+
+	return 0;
+}
+
+static int rackmeter_shutdown(struct macio_dev* mdev)
+{
+	struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
+
+	if (rm == NULL)
+		return -ENODEV;
+
+	/* Stop CPU sniffer timer & work queues */
+	rackmeter_stop_cpu_sniffer(rm);
+
+	/* Stop/reset dbdma */
+	DBDMA_DO_RESET(rm->dma_regs);
+
+	return 0;
+}
+
+static struct of_device_id rackmeter_match[] = {
+	{ .name = "i2s" },
+	{ }
+};
+
+static struct macio_driver rackmeter_drv = {
+	.name = "rackmeter",
+	.owner = THIS_MODULE,
+	.match_table = rackmeter_match,
+	.probe = rackmeter_probe,
+	.remove = rackmeter_remove,
+	.shutdown = rackmeter_shutdown,
+};
+
+
+static int __init rackmeter_init(void)
+{
+	pr_debug("rackmeter_init()\n");
+
+	return macio_register_driver(&rackmeter_drv);
+}
+
+static void __exit rackmeter_exit(void)
+{
+	pr_debug("rackmeter_exit()\n");
+
+	macio_unregister_driver(&rackmeter_drv);
+}
+
+module_init(rackmeter_init);
+module_exit(rackmeter_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("RackMeter: Support vu-meter on XServe front panel");
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index ade25b3..6dde27a 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -46,6 +46,7 @@
 #include <asm/abs_addr.h>
 #include <asm/uaccess.h>
 #include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 #define VERSION "0.7"
 #define AUTHOR  "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
@@ -600,7 +601,7 @@
  * sysfs visibility
  */
 
-static void smu_expose_childs(void *unused)
+static void smu_expose_childs(struct work_struct *unused)
 {
 	struct device_node *np;
 
@@ -610,7 +611,7 @@
 						  &smu->of_dev->dev);
 }
 
-static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
+static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
 
 static int smu_platform_probe(struct of_device* dev,
 			      const struct of_device_id *match)
@@ -653,7 +654,7 @@
 	 * I'm a bit too far from figuring out how that works with those
 	 * new chipsets, but that will come back and bite us
 	 */
-	of_register_driver(&smu_of_platform_driver);
+	of_register_platform_driver(&smu_of_platform_driver);
 	return 0;
 }
 
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index a0f30d0..3d3bf16 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -24,13 +24,14 @@
 #include <linux/suspend.h>
 #include <linux/kthread.h>
 #include <linux/moduleparam.h>
+#include <linux/freezer.h>
 
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/system.h>
 #include <asm/sections.h>
-#include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 #undef DEBUG
 
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index d00c0c3..2e4ad44 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -129,6 +129,7 @@
 #include <asm/sections.h>
 #include <asm/of_device.h>
 #include <asm/macio.h>
+#include <asm/of_platform.h>
 
 #include "therm_pm72.h"
 
@@ -2236,14 +2237,14 @@
 		return -ENODEV;
 	}
 
-	of_register_driver(&fcu_of_platform_driver);
+	of_register_platform_driver(&fcu_of_platform_driver);
 	
 	return 0;
 }
 
 static void __exit therm_pm72_exit(void)
 {
-	of_unregister_driver(&fcu_of_platform_driver);
+	of_unregister_platform_driver(&fcu_of_platform_driver);
 
 	if (of_dev)
 		of_device_unregister(of_dev);
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 738faab..a1d3a98 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -36,12 +36,13 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/system.h>
 #include <asm/sections.h>
-#include <asm/of_device.h>
+#include <asm/of_platform.h>
 #include <asm/macio.h>
 
 #define LOG_TEMP		0			/* continously log temperature */
@@ -511,14 +512,14 @@
 		return -ENODEV;
 	}
 
-	of_register_driver( &therm_of_driver );
+	of_register_platform_driver( &therm_of_driver );
 	return 0;
 }
 
 static void __exit
 g4fan_exit( void )
 {
-	of_unregister_driver( &therm_of_driver );
+	of_unregister_platform_driver( &therm_of_driver );
 
 	if( x.of_dev )
 		of_device_unregister( x.of_dev );
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index e63ea1c..c8558d4 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -42,7 +42,7 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/sysdev.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/syscalls.h>
 #include <linux/cpu.h>
 #include <asm/prom.h>
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index ab3faa7..e947af9 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -34,6 +34,7 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <asm/prom.h>
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08a40f4..a1086ee 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -20,6 +20,7 @@
 #include <asm/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
+#include <asm/unaligned.h>
 
 #include "dm.h"
 
@@ -85,7 +86,10 @@
 	 */
 	struct crypt_iv_operations *iv_gen_ops;
 	char *iv_mode;
-	struct crypto_cipher *iv_gen_private;
+	union {
+		struct crypto_cipher *essiv_tfm;
+		int benbi_shift;
+	} iv_gen_private;
 	sector_t iv_offset;
 	unsigned int iv_size;
 
@@ -101,7 +105,7 @@
 #define MIN_POOL_PAGES 32
 #define MIN_BIO_PAGES  8
 
-static kmem_cache_t *_crypt_io_pool;
+static struct kmem_cache *_crypt_io_pool;
 
 /*
  * Different IV generation algorithms:
@@ -113,6 +117,9 @@
  *        encrypted with the bulk cipher using a salt as key. The salt
  *        should be derived from the bulk cipher's key via hashing.
  *
+ * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
+ *        (needed for LRW-32-AES and possible other narrow block modes)
+ *
  * plumb: unimplemented, see:
  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  */
@@ -191,21 +198,61 @@
 	}
 	kfree(salt);
 
-	cc->iv_gen_private = essiv_tfm;
+	cc->iv_gen_private.essiv_tfm = essiv_tfm;
 	return 0;
 }
 
 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
 {
-	crypto_free_cipher(cc->iv_gen_private);
-	cc->iv_gen_private = NULL;
+	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
+	cc->iv_gen_private.essiv_tfm = NULL;
 }
 
 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 {
 	memset(iv, 0, cc->iv_size);
 	*(u64 *)iv = cpu_to_le64(sector);
-	crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
+	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
+	return 0;
+}
+
+static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
+			      const char *opts)
+{
+	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
+	int log = long_log2(bs);
+
+	/* we need to calculate how far we must shift the sector count
+	 * to get the cipher block count, we use this shift in _gen */
+
+	if (1 << log != bs) {
+		ti->error = "cypher blocksize is not a power of 2";
+		return -EINVAL;
+	}
+
+	if (log > 9) {
+		ti->error = "cypher blocksize is > 512";
+		return -EINVAL;
+	}
+
+	cc->iv_gen_private.benbi_shift = 9 - log;
+
+	return 0;
+}
+
+static void crypt_iv_benbi_dtr(struct crypt_config *cc)
+{
+}
+
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+	__be64 val;
+
+	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
+
+	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
+	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
+
 	return 0;
 }
 
@@ -219,13 +266,18 @@
 	.generator = crypt_iv_essiv_gen
 };
 
+static struct crypt_iv_operations crypt_iv_benbi_ops = {
+	.ctr	   = crypt_iv_benbi_ctr,
+	.dtr	   = crypt_iv_benbi_dtr,
+	.generator = crypt_iv_benbi_gen
+};
 
 static int
 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                           struct scatterlist *in, unsigned int length,
                           int write, sector_t sector)
 {
-	u8 iv[cc->iv_size];
+	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
 	struct blkcipher_desc desc = {
 		.tfm = cc->tfm,
 		.info = iv,
@@ -458,11 +510,11 @@
  * interrupt context.
  */
 static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
 
 static void kcryptd_queue_io(struct crypt_io *io)
 {
-	INIT_WORK(&io->work, kcryptd_do_work, io);
+	INIT_WORK(&io->work, kcryptd_do_work);
 	queue_work(_kcryptd_workqueue, &io->work);
 }
 
@@ -618,9 +670,9 @@
 	dec_pending(io, crypt_convert(cc, &ctx));
 }
 
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
 {
-	struct crypt_io *io = data;
+	struct crypt_io *io = container_of(work, struct crypt_io, work);
 
 	if (io->post_process)
 		process_read_endio(io);
@@ -768,7 +820,7 @@
 	cc->tfm = tfm;
 
 	/*
-	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
+	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
 	 * See comments at iv code
 	 */
 
@@ -778,6 +830,8 @@
 		cc->iv_gen_ops = &crypt_iv_plain_ops;
 	else if (strcmp(ivmode, "essiv") == 0)
 		cc->iv_gen_ops = &crypt_iv_essiv_ops;
+	else if (strcmp(ivmode, "benbi") == 0)
+		cc->iv_gen_ops = &crypt_iv_benbi_ops;
 	else {
 		ti->error = "Invalid IV mode";
 		goto bad2;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d754e0b..cf8bf05 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -101,11 +101,11 @@
 
 #define MIN_IOS 256	/* Mempool size */
 
-static kmem_cache_t *_mpio_cache;
+static struct kmem_cache *_mpio_cache;
 
 struct workqueue_struct *kmultipathd;
-static void process_queued_ios(void *data);
-static void trigger_event(void *data);
+static void process_queued_ios(struct work_struct *work);
+static void trigger_event(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -173,8 +173,8 @@
 		INIT_LIST_HEAD(&m->priority_groups);
 		spin_lock_init(&m->lock);
 		m->queue_io = 1;
-		INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
-		INIT_WORK(&m->trigger_event, trigger_event, m);
+		INIT_WORK(&m->process_queued_ios, process_queued_ios);
+		INIT_WORK(&m->trigger_event, trigger_event);
 		m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
 		if (!m->mpio_pool) {
 			kfree(m);
@@ -379,9 +379,10 @@
 	}
 }
 
-static void process_queued_ios(void *data)
+static void process_queued_ios(struct work_struct *work)
 {
-	struct multipath *m = (struct multipath *) data;
+	struct multipath *m =
+		container_of(work, struct multipath, process_queued_ios);
 	struct hw_handler *hwh = &m->hw_handler;
 	struct pgpath *pgpath = NULL;
 	unsigned init_required = 0, must_queue = 1;
@@ -421,9 +422,10 @@
  * An event is triggered whenever a path is taken out of use.
  * Includes path failure and PG bypass.
  */
-static void trigger_event(void *data)
+static void trigger_event(struct work_struct *work)
 {
-	struct multipath *m = (struct multipath *) data;
+	struct multipath *m =
+		container_of(work, struct multipath, trigger_event);
 
 	dm_table_event(m->ti->table);
 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 48a653b..fc8cbb1 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -883,7 +883,7 @@
 	do_writes(ms, &writes);
 }
 
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
 	struct mirror_set *ms;
 
@@ -1269,7 +1269,7 @@
 		dm_dirty_log_exit();
 		return r;
 	}
-	INIT_WORK(&_kmirrord_work, do_work, NULL);
+	INIT_WORK(&_kmirrord_work, do_work);
 
 	r = dm_register_target(&mirror_target);
 	if (r < 0) {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 5281e00..b0ce2ce 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,7 +40,7 @@
 #define SNAPSHOT_PAGES 256
 
 struct workqueue_struct *ksnapd;
-static void flush_queued_bios(void *data);
+static void flush_queued_bios(struct work_struct *work);
 
 struct pending_exception {
 	struct exception e;
@@ -88,8 +88,8 @@
  * Hash table mapping origin volumes to lists of snapshots and
  * a lock to protect it
  */
-static kmem_cache_t *exception_cache;
-static kmem_cache_t *pending_cache;
+static struct kmem_cache *exception_cache;
+static struct kmem_cache *pending_cache;
 static mempool_t *pending_pool;
 
 /*
@@ -228,7 +228,7 @@
 	return 0;
 }
 
-static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
+static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
 {
 	struct list_head *slot;
 	struct exception *ex, *next;
@@ -528,7 +528,7 @@
 	}
 
 	bio_list_init(&s->queued_bios);
-	INIT_WORK(&s->queued_bios_work, flush_queued_bios, s);
+	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
 
 	/* Add snapshot to the list of snapshots for this origin */
 	/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -603,9 +603,10 @@
 	}
 }
 
-static void flush_queued_bios(void *data)
+static void flush_queued_bios(struct work_struct *work)
 {
-	struct dm_snapshot *s = (struct dm_snapshot *) data;
+	struct dm_snapshot *s =
+		container_of(work, struct dm_snapshot, queued_bios_work);
 	struct bio *queued_bios;
 	unsigned long flags;
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fc4f743..7ec1b11 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -121,8 +121,8 @@
 };
 
 #define MIN_IOS 256
-static kmem_cache_t *_io_cache;
-static kmem_cache_t *_tio_cache;
+static struct kmem_cache *_io_cache;
+static struct kmem_cache *_tio_cache;
 
 static int __init local_init(void)
 {
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index f1db6ef..b46f6c5 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -203,7 +203,7 @@
 /* FIXME: this should scale with the number of pages */
 #define MIN_JOBS 512
 
-static kmem_cache_t *_job_cache;
+static struct kmem_cache *_job_cache;
 static mempool_t *_job_pool;
 
 /*
@@ -417,7 +417,7 @@
 /*
  * kcopyd does this every time it's woken up.
  */
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
 	/*
 	 * The order that these are called is *very* important.
@@ -628,7 +628,7 @@
 	}
 
 	kcopyd_clients++;
-	INIT_WORK(&_kcopyd_work, do_work, NULL);
+	INIT_WORK(&_kcopyd_work, do_work);
 	mutex_unlock(&kcopyd_init_lock);
 	return 0;
 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cbf9c9..6c4345b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,10 +39,10 @@
 #include <linux/raid/bitmap.h>
 #include <linux/sysctl.h>
 #include <linux/buffer_head.h> /* for invalidate_bdev */
-#include <linux/suspend.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
 #include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include <linux/init.h>
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 69c3e20..52914d5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -348,7 +348,7 @@
 
 static int grow_stripes(raid5_conf_t *conf, int num)
 {
-	kmem_cache_t *sc;
+	struct kmem_cache *sc;
 	int devs = conf->raid_disks;
 
 	sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
@@ -397,7 +397,7 @@
 	LIST_HEAD(newstripes);
 	struct disk_info *ndisks;
 	int err = 0;
-	kmem_cache_t *sc;
+	struct kmem_cache *sc;
 	int i;
 
 	if (newsize <= conf->pool_size)
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 0689324..6e16680 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -63,7 +63,7 @@
 
 	unsigned long last_irq;
 
-	struct work_struct irq_check_work;
+	struct delayed_work irq_check_work;
 
 	struct flexcop_device *fc_dev;
 };
@@ -97,9 +97,10 @@
 	return 0;
 }
 
-static void flexcop_pci_irq_check_work(void *data)
+static void flexcop_pci_irq_check_work(struct work_struct *work)
 {
-	struct flexcop_pci *fc_pci = data;
+	struct flexcop_pci *fc_pci =
+		container_of(work, struct flexcop_pci, irq_check_work.work);
 	struct flexcop_device *fc = fc_pci->fc_dev;
 
 	flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
@@ -371,7 +372,7 @@
 	if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
 		goto err_fc_exit;
 
-	INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci);
+	INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
 
 	return ret;
 
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index ff7d4f5..9123147 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -30,6 +30,7 @@
 #include <linux/input.h>
 #include <linux/dvb/frontend.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
 
 #include "dmxdev.h"
 #include "dvb_demux.h"
@@ -127,7 +128,7 @@
 
 	struct dvbt_set_parameters_msg param;
 	struct dvbt_get_status_msg status;
-	struct work_struct query_work;
+	struct delayed_work query_work;
 
 	wait_queue_head_t poll_wq;
 	int pending_fe_events;
@@ -141,7 +142,7 @@
 #ifdef ENABLE_RC
 	struct input_dev *rc_input_dev;
 	char phys[64];
-	struct work_struct rc_query_work;
+	struct delayed_work rc_query_work;
 	int rc_input_event;
 	u32 rc_last_code;
 	unsigned long last_event_jiffies;
@@ -275,8 +276,7 @@
 	int i;
 
 	for (i=0; i<STREAM_URB_COUNT; i++)
-		if (cinergyt2->stream_urb[i])
-			usb_free_urb(cinergyt2->stream_urb[i]);
+		usb_free_urb(cinergyt2->stream_urb[i]);
 
 	usb_buffer_free(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
 			    cinergyt2->streambuf, cinergyt2->streambuf_dmahandle);
@@ -287,7 +287,7 @@
 	int i;
 
 	cinergyt2->streambuf = usb_buffer_alloc(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
-					      SLAB_KERNEL, &cinergyt2->streambuf_dmahandle);
+					      GFP_KERNEL, &cinergyt2->streambuf_dmahandle);
 	if (!cinergyt2->streambuf) {
 		dprintk(1, "failed to alloc consistent stream memory area, bailing out!\n");
 		return -ENOMEM;
@@ -320,8 +320,7 @@
 	cinergyt2_control_stream_transfer(cinergyt2, 0);
 
 	for (i=0; i<STREAM_URB_COUNT; i++)
-		if (cinergyt2->stream_urb[i])
-			usb_kill_urb(cinergyt2->stream_urb[i]);
+		usb_kill_urb(cinergyt2->stream_urb[i]);
 }
 
 static int cinergyt2_start_stream_xfer (struct cinergyt2 *cinergyt2)
@@ -724,9 +723,10 @@
 
 #ifdef ENABLE_RC
 
-static void cinergyt2_query_rc (void *data)
+static void cinergyt2_query_rc (struct work_struct *work)
 {
-	struct cinergyt2 *cinergyt2 = data;
+	struct cinergyt2 *cinergyt2 =
+		container_of(work, struct cinergyt2, rc_query_work.work);
 	char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
 	struct cinergyt2_rc_event rc_events[12];
 	int n, len, i;
@@ -807,7 +807,7 @@
 	strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
 	cinergyt2->rc_input_event = KEY_MAX;
 	cinergyt2->rc_last_code = ~0;
-	INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
+	INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
 
 	input_dev->name = DRIVER_NAME " remote control";
 	input_dev->phys = cinergyt2->phys;
@@ -848,9 +848,10 @@
 
 #endif /* ENABLE_RC */
 
-static void cinergyt2_query (void *data)
+static void cinergyt2_query (struct work_struct *work)
 {
-	struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
+	struct cinergyt2 *cinergyt2 =
+		container_of(work, struct cinergyt2, query_work.work);
 	char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
 	struct dvbt_get_status_msg *s = &cinergyt2->status;
 	uint8_t lock_bits;
@@ -894,7 +895,7 @@
 
 	mutex_init(&cinergyt2->sem);
 	init_waitqueue_head (&cinergyt2->poll_wq);
-	INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
+	INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
 
 	cinergyt2->udev = interface_to_usbdev(intf);
 	cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index a2ab2ee..e859722 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -34,7 +34,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/list.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/jiffies.h>
 #include <asm/processor.h>
 
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8859ab7..ebf4dc5 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -127,6 +127,7 @@
 	int in_use;
 	struct net_device_stats stats;
 	u16 pid;
+	struct net_device *net;
 	struct dvb_net *host;
 	struct dmx_demux *demux;
 	struct dmx_section_feed *secfeed;
@@ -1123,10 +1124,11 @@
 }
 
 
-static void wq_set_multicast_list (void *data)
+static void wq_set_multicast_list (struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct dvb_net_priv *priv = dev->priv;
+	struct dvb_net_priv *priv =
+		container_of(work, struct dvb_net_priv, set_multicast_list_wq);
+	struct net_device *dev = priv->net;
 
 	dvb_net_feed_stop(dev);
 	priv->rx_mode = RX_MODE_UNI;
@@ -1167,9 +1169,11 @@
 }
 
 
-static void wq_restart_net_feed (void *data)
+static void wq_restart_net_feed (struct work_struct *work)
 {
-	struct net_device *dev = data;
+	struct dvb_net_priv *priv =
+		container_of(work, struct dvb_net_priv, restart_net_feed_wq);
+	struct net_device *dev = priv->net;
 
 	if (netif_running(dev)) {
 		dvb_net_feed_stop(dev);
@@ -1276,6 +1280,7 @@
 	dvbnet->device[if_num] = net;
 
 	priv = net->priv;
+	priv->net = net;
 	priv->demux = dvbnet->demux;
 	priv->pid = pid;
 	priv->rx_mode = RX_MODE_UNI;
@@ -1284,8 +1289,8 @@
 	priv->feedtype = feedtype;
 	reset_ule(priv);
 
-	INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net);
-	INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net);
+	INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
+	INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
 	mutex_init(&priv->mutex);
 
 	net->base_addr = pid;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 0a3a0b6..794e447 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -13,9 +13,10 @@
  *
  * TODO: Fix the repeat rate of the input device.
  */
-static void dvb_usb_read_remote_control(void *data)
+static void dvb_usb_read_remote_control(struct work_struct *work)
 {
-	struct dvb_usb_device *d = data;
+	struct dvb_usb_device *d =
+		container_of(work, struct dvb_usb_device, rc_query_work.work);
 	u32 event;
 	int state;
 
@@ -128,7 +129,7 @@
 
 	input_register_device(d->rc_input_dev);
 
-	INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d);
+	INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
 
 	info("schedule remote query interval to %d msecs.", d->props.rc_interval);
 	schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 376c45a..0d72173 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -369,7 +369,7 @@
 	/* remote control */
 	struct input_dev *rc_input_dev;
 	char rc_phys[64];
-	struct work_struct rc_query_work;
+	struct delayed_work rc_query_work;
 	u32 last_event;
 	int last_state;
 
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index 78035ee..397f51a 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -116,7 +116,7 @@
 	for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) {
 		deb_mem("allocating buffer %d\n",stream->buf_num);
 		if (( stream->buf_list[stream->buf_num] =
-					usb_buffer_alloc(stream->udev, size, SLAB_ATOMIC,
+					usb_buffer_alloc(stream->udev, size, GFP_ATOMIC,
 					&stream->dma_addr[stream->buf_num]) ) == NULL) {
 			deb_mem("not enough memory for urb-buffer allocation.\n");
 			usb_free_stream_buffers(stream);
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index f3bc82e..1aeacb1 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -36,7 +36,7 @@
 	struct dvb_frontend frontend;
 
 	/* private demodulator data */
-	int first:1;
+	unsigned int first:1;
 };
 
 #define dprintk(args...) \
diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c
index fc1267b..9a15539 100644
--- a/drivers/media/dvb/ttpci/budget-patch.c
+++ b/drivers/media/dvb/ttpci/budget-patch.c
@@ -500,14 +500,14 @@
 
 /*      New design (By Emard)
 **      this rps1 code will copy internal HS event to GPIO3 pin.
-**      GPIO3 is in budget-patch hardware connectd to port B VSYNC
+**      GPIO3 is in budget-patch hardware connected to port B VSYNC
 
 **      HS is an internal event of 7146, accessible with RPS
 **      and temporarily raised high every n lines
 **      (n in defined in the RPS_THRESH1 counter threshold)
 **      I think HS is raised high on the beginning of the n-th line
 **      and remains high until this n-th line that triggered
-**      it is completely received. When the receiption of n-th line
+**      it is completely received. When the reception of n-th line
 **      ends, HS is lowered.
 
 **      To transmit data over DMA, 7146 needs changing state at
@@ -541,7 +541,7 @@
 **      hardware debug note: a working budget card (including budget patch)
 **      with vpeirq() interrupt setup in mode "0x90" (every 64K) will
 **      generate 3 interrupts per 25-Hz DMA frame of 2*188*512 bytes
-**      and that means 3*25=75 Hz of interrupt freqency, as seen by
+**      and that means 3*25=75 Hz of interrupt frequency, as seen by
 **      watch cat /proc/interrupts
 **
 **      If this frequency is 3x lower (and data received in the DMA
@@ -550,7 +550,7 @@
 **      this means VSYNC line is not connected in the hardware.
 **      (check soldering pcb and pins)
 **      The same behaviour of missing VSYNC can be duplicated on budget
-**      cards, by seting DD1_INIT trigger mode 7 in 3rd nibble.
+**      cards, by setting DD1_INIT trigger mode 7 in 3rd nibble.
 */
 
 	// Setup RPS1 "program" (p35)
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index a1c9fa9..10b121a 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1135,8 +1135,7 @@
 	dprintk("%s\n", __FUNCTION__);
 
 	for (i = 0; i < ISO_BUF_COUNT; i++)
-		if (dec->iso_urb[i])
-			usb_free_urb(dec->iso_urb[i]);
+		usb_free_urb(dec->iso_urb[i]);
 
 	pci_free_consistent(NULL,
 			    ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF *
@@ -1245,7 +1244,7 @@
 			return -ENOMEM;
 		}
 		dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE,
-					SLAB_ATOMIC, &dec->irq_dma_handle);
+					GFP_ATOMIC, &dec->irq_dma_handle);
 		if(!dec->irq_buffer) {
 			return -ENOMEM;
 		}
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 6d96b17..920b63f 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -173,38 +173,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called radio-maestro.
 
-config RADIO_MIROPCM20
-	tristate "miroSOUND PCM20 radio"
-	depends on ISA && VIDEO_V4L1 && SOUND_ACI_MIXER
-	---help---
-	  Choose Y here if you have this FM radio card. You also need to say Y
-	  to "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20 radio)" (in "Sound")
-	  for this to work.
-
-	  In order to control your radio card, you will need to use programs
-	  that are compatible with the Video For Linux API.  Information on
-	  this API and pointers to "v4l" programs may be found at
-	  <file:Documentation/video4linux/API.html>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called miropcm20.
-
-config RADIO_MIROPCM20_RDS
-	tristate "miroSOUND PCM20 radio RDS user interface (EXPERIMENTAL)"
-	depends on RADIO_MIROPCM20 && EXPERIMENTAL
-	---help---
-	  Choose Y here if you want to see RDS/RBDS information like
-	  RadioText, Programme Service name, Clock Time and date, Programme
-	  Type and Traffic Announcement/Programme identification.
-
-	  It's not possible to read the raw RDS packets from the device, so
-	  the driver cant provide an V4L interface for this.  But the
-	  availability of RDS is reported over V4L by the basic driver
-	  already.  Here RDS can be read from files in /dev/v4l/rds.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called miropcm20-rds.
-
 config RADIO_SF16FMI
 	tristate "SF16FMI Radio"
 	depends on ISA && VIDEO_V4L2
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index bf26755..b8fde5c 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -24,7 +24,7 @@
 	  decode audio/video standards. This option will autoselect
 	  all pertinent modules to each selected video module.
 
-	  Unselect this only if you know exaclty what you are doing, since
+	  Unselect this only if you know exactly what you are doing, since
 	  it may break support on some boards.
 
 	  In doubt, say Y.
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index 41f4b8d..b12cec9 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -82,6 +82,8 @@
 	struct pardevice *pdev;
 	struct parport *port;
 	struct work_struct cb_task;
+	void (*cb_func)(void *cbdata);
+	void *cb_data;
 	int open_count;
 	wait_queue_head_t wq_stream;
 	/* image state flags */
@@ -130,6 +132,20 @@
 #define PARPORT_CHUNK_SIZE	PAGE_SIZE
 
 
+static void cpia_pp_run_callback(struct work_struct *work)
+{
+	void (*cb_func)(void *cbdata);
+	void *cb_data;
+	struct pp_cam_entry *cam;
+
+	cam = container_of(work, struct pp_cam_entry, cb_task);
+	cb_func = cam->cb_func;
+	cb_data = cam->cb_data;
+	work_release(work);
+
+	cb_func(cb_data);
+}
+
 /****************************************************************************
  *
  *  CPiA-specific  low-level parport functions for nibble uploads
@@ -664,7 +680,9 @@
 	int retval = 0;
 
 	if(cam->port->irq != PARPORT_IRQ_NONE) {
-		INIT_WORK(&cam->cb_task, cb, cbdata);
+		cam->cb_func = cb;
+		cam->cb_data = cbdata;
+		INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
 	} else {
 		retval = -1;
 	}
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 57e1c02..e60a0a5 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -145,9 +145,9 @@
 	schedule_work(&ir->work);
 }
 
-static void cx88_ir_work(void *data)
+static void cx88_ir_work(struct work_struct *work)
 {
-	struct cx88_IR *ir = data;
+	struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
 	unsigned long timeout;
 
 	cx88_ir_handle_key(ir);
@@ -308,7 +308,7 @@
 	core->ir = ir;
 
 	if (ir->polling) {
-		INIT_WORK(&ir->work, cx88_ir_work, ir);
+		INIT_WORK(&ir->work, cx88_ir_work);
 		init_timer(&ir->timer);
 		ir->timer.function = ir_timer;
 		ir->timer.data = (unsigned long)ir;
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 1457b16..ab87e7b 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -268,9 +268,9 @@
 	schedule_work(&ir->work);
 }
 
-static void ir_work(void *data)
+static void ir_work(struct work_struct *work)
 {
-	struct IR_i2c *ir = data;
+	struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
 	ir_key_poll(ir);
 	mod_timer(&ir->timer, jiffies+HZ/10);
 }
@@ -400,7 +400,7 @@
 	       ir->input->name,ir->input->phys,adap->name);
 
 	/* start polling via eventd */
-	INIT_WORK(&ir->work, ir_work, ir);
+	INIT_WORK(&ir->work, ir_work);
 	init_timer(&ir->timer);
 	ir->timer.function = ir_timer;
 	ir->timer.data     = (unsigned long)ir;
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index cf43df3..e1b56dc 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -56,7 +56,7 @@
 #include <media/tvaudio.h>
 #include <media/msp3400.h>
 #include <linux/kthread.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include "msp3400-driver.h"
 
 /* ---------------------------------------------------------------------- */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
index f129f31..cf12974 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -45,16 +45,21 @@
 }
 
 
-static void pvr2_context_poll(struct pvr2_context *mp)
+static void pvr2_context_poll(struct work_struct *work)
 {
+	struct pvr2_context *mp =
+		container_of(work, struct pvr2_context, workpoll);
 	pvr2_context_enter(mp); do {
 		pvr2_hdw_poll(mp->hdw);
 	} while (0); pvr2_context_exit(mp);
 }
 
 
-static void pvr2_context_setup(struct pvr2_context *mp)
+static void pvr2_context_setup(struct work_struct *work)
 {
+	struct pvr2_context *mp =
+		container_of(work, struct pvr2_context, workinit);
+
 	pvr2_context_enter(mp); do {
 		if (!pvr2_hdw_dev_ok(mp->hdw)) break;
 		pvr2_hdw_setup(mp->hdw);
@@ -92,8 +97,8 @@
 	}
 
 	mp->workqueue = create_singlethread_workqueue("pvrusb2");
-	INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
-	INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+	INIT_WORK(&mp->workinit, pvr2_context_setup);
+	INIT_WORK(&mp->workpoll, pvr2_context_poll);
 	queue_work(mp->workqueue,&mp->workinit);
  done:
 	return mp;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index f920e0c..1f78733 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -1953,8 +1953,8 @@
 	return hdw;
  fail:
 	if (hdw) {
-		if (hdw->ctl_read_urb) usb_free_urb(hdw->ctl_read_urb);
-		if (hdw->ctl_write_urb) usb_free_urb(hdw->ctl_write_urb);
+		usb_free_urb(hdw->ctl_read_urb);
+		usb_free_urb(hdw->ctl_write_urb);
 		if (hdw->ctl_read_buffer) kfree(hdw->ctl_read_buffer);
 		if (hdw->ctl_write_buffer) kfree(hdw->ctl_write_buffer);
 		if (hdw->controls) kfree(hdw->controls);
@@ -2575,12 +2575,10 @@
 	struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
 	if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
 		hdw->ctl_timeout_flag = !0;
-		if (hdw->ctl_write_pend_flag && hdw->ctl_write_urb) {
+		if (hdw->ctl_write_pend_flag)
 			usb_unlink_urb(hdw->ctl_write_urb);
-		}
-		if (hdw->ctl_read_pend_flag && hdw->ctl_read_urb) {
+		if (hdw->ctl_read_pend_flag)
 			usb_unlink_urb(hdw->ctl_read_urb);
-		}
 	}
 }
 
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.c b/drivers/media/video/pvrusb2/pvrusb2-io.c
index 70aa63e..57fb320 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-io.c
@@ -289,7 +289,7 @@
 	pvr2_buffer_set_none(bp);
 	bp->signature = 0;
 	bp->stream = NULL;
-	if (bp->purb) usb_free_urb(bp->purb);
+	usb_free_urb(bp->purb);
 	pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
 		   " bufferDone     %p",bp);
 }
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 46c1148..a996aad 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -866,11 +866,9 @@
 	}
 	if (ret) {
 		/* De-allocate in reverse order */
-		while (i >= 0) {
-			if (pdev->sbuf[i].urb != NULL)
-				usb_free_urb(pdev->sbuf[i].urb);
+		while (i--) {
+			usb_free_urb(pdev->sbuf[i].urb);
 			pdev->sbuf[i].urb = NULL;
-			i--;
 		}
 		return ret;
 	}
@@ -1095,8 +1093,7 @@
 	PWC_DEBUG_OPEN(">> video_open called(vdev = 0x%p).\n", vdev);
 
 	pdev = (struct pwc_device *)vdev->priv;
-	if (pdev == NULL)
-		BUG();
+	BUG_ON(!pdev);
 	if (pdev->vopen) {
 		PWC_DEBUG_OPEN("I'm busy, someone is using the device.\n");
 		return -EBUSY;
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 7b9859c..92eabf8 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -324,9 +324,9 @@
 	schedule_work(&s->work);
 }
 
-static void saa6588_work(void *data)
+static void saa6588_work(struct work_struct *work)
 {
-	struct saa6588 *s = (struct saa6588 *)data;
+	struct saa6588 *s = container_of(work, struct saa6588, work);
 
 	saa6588_i2c_poll(s);
 	mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
@@ -419,7 +419,7 @@
 	saa6588_configure(s);
 
 	/* start polling via eventd */
-	INIT_WORK(&s->work, saa6588_work, s);
+	INIT_WORK(&s->work, saa6588_work);
 	init_timer(&s->timer);
 	s->timer.function = saa6588_timer;
 	s->timer.data = (unsigned long)s;
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 65d0440..daaae87 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -343,9 +343,10 @@
 	.minor	       = -1,
 };
 
-static void empress_signal_update(void* data)
+static void empress_signal_update(struct work_struct *work)
 {
-	struct saa7134_dev* dev = (struct saa7134_dev*) data;
+	struct saa7134_dev* dev =
+		container_of(work, struct saa7134_dev, empress_workqueue);
 
 	if (dev->nosignal) {
 		dprintk("no video signal\n");
@@ -378,7 +379,7 @@
 		 "%s empress (%s)", dev->name,
 		 saa7134_boards[dev->board].name);
 
-	INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev);
+	INIT_WORK(&dev->empress_workqueue, empress_signal_update);
 
 	err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
 				    empress_nr[dev->nr]);
@@ -399,7 +400,7 @@
 			    sizeof(struct saa7134_buf),
 			    dev);
 
-	empress_signal_update(dev);
+	empress_signal_update(&dev->empress_workqueue);
 	return 0;
 }
 
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 42fb60d..18458d4 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -775,7 +775,7 @@
 	return 0;
 
 free_urbs:
-	for (i = 0; (i < SN9C102_URBS) &&  cam->urb[i]; i++)
+	for (i = 0; i < SN9C102_URBS; i++)
 		usb_free_urb(cam->urb[i]);
 
 free_buffers:
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index fcaef4b..d506dfa 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <media/tvaudio.h>
 #include <media/v4l2-common.h>
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index 9a26b94..bbf2bee 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -190,8 +190,7 @@
 
 static void qcm_free_int(struct qcm *cam)
 {
-	if (cam->button_urb)
-		usb_free_urb(cam->button_urb);
+	usb_free_urb(cam->button_urb);
 }
 #endif /* CONFIG_INPUT */
 
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index f53edf1..fcc5467 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -20,7 +20,7 @@
 #include <linux/fs.h>
 #include <linux/kthread.h>
 #include <linux/file.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <media/video-buf.h>
 #include <media/video-buf-dvb.h>
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 3c8dc72..9986de5 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -36,6 +36,7 @@
 #include <media/v4l2-common.h>
 #include <linux/kthread.h>
 #include <linux/highmem.h>
+#include <linux/freezer.h>
 
 /* Wake up at about 30 fps */
 #define WAKE_NUMERATOR 30
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 5b55634..52d0f75 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -489,7 +489,7 @@
 	return 0;
 
 free_urbs:
-	for (i = 0; (i < ZC0301_URBS) &&  cam->urb[i]; i++)
+	for (i = 0; i < ZC0301_URBS; i++)
 		usb_free_urb(cam->urb[i]);
 
 free_buffers:
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e5c7271..6e068cf 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -347,7 +347,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
  *	@irq: irq number (not used)
  *	@bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
@@ -387,14 +387,16 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_base_reply - MPT base driver's callback routine; all base driver
- *	"internal" request/reply processing is routed here.
- *	Currently used for EventNotification and EventAck handling.
+/**
+ *	mpt_base_reply - MPT base driver's callback routine
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@mf: Pointer to original MPT request frame
  *	@reply: Pointer to MPT reply frame (NULL if TurboReply)
  *
+ *	MPT base driver's callback routine; all base driver
+ *	"internal" request/reply processing is routed here.
+ *	Currently used for EventNotification and EventAck handling.
+ *
  *	Returns 1 indicating original alloc'd request frame ptr
  *	should be freed, or 0 if it shouldn't.
  */
@@ -530,7 +532,7 @@
  *	@dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
  *
  *	This routine is called by a protocol-specific driver (SCSI host,
- *	LAN, SCSI target) to register it's reply callback routine.  Each
+ *	LAN, SCSI target) to register its reply callback routine.  Each
  *	protocol-specific driver must do this before it will be able to
  *	use any IOC resources, such as obtaining request frames.
  *
@@ -572,7 +574,7 @@
  *	mpt_deregister - Deregister a protocol drivers resources.
  *	@cb_idx: previously registered callback handle
  *
- *	Each protocol-specific driver should call this routine when it's
+ *	Each protocol-specific driver should call this routine when its
  *	module is unloaded.
  */
 void
@@ -617,7 +619,7 @@
  *
  *	Each protocol-specific driver should call this routine
  *	when it does not (or can no longer) handle events,
- *	or when it's module is unloaded.
+ *	or when its module is unloaded.
  */
 void
 mpt_event_deregister(int cb_idx)
@@ -656,7 +658,7 @@
  *
  *	Each protocol-specific driver should call this routine
  *	when it does not (or can no longer) handle IOC reset handling,
- *	or when it's module is unloaded.
+ *	or when its module is unloaded.
  */
 void
 mpt_reset_deregister(int cb_idx)
@@ -670,6 +672,8 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_device_driver_register - Register device driver hooks
+ *	@dd_cbfunc: driver callbacks struct
+ *	@cb_idx: MPT protocol driver index
  */
 int
 mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
@@ -696,6 +700,7 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_device_driver_deregister - DeRegister device driver hooks
+ *	@cb_idx: MPT protocol driver index
  */
 void
 mpt_device_driver_deregister(int cb_idx)
@@ -887,8 +892,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mpt_send_handshake_request - Send MPT request via doorbell
- *	handshake method.
+ *	mpt_send_handshake_request - Send MPT request via doorbell handshake method.
  *	@handle: Handle of registered MPT protocol driver
  *	@ioc: Pointer to MPT adapter structure
  *	@reqBytes: Size of the request in bytes
@@ -981,10 +985,13 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- * mpt_host_page_access_control - provides mechanism for the host
- * driver to control the IOC's Host Page Buffer access.
+ * mpt_host_page_access_control - control the IOC's Host Page Buffer access
  * @ioc: Pointer to MPT adapter structure
  * @access_control_value: define bits below
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Provides mechanism for the host driver to control the IOC's
+ * Host Page Buffer access.
  *
  * Access Control Value - bits[15:12]
  * 0h Reserved
@@ -1022,10 +1029,10 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_host_page_alloc - allocate system memory for the fw
- *	If we already allocated memory in past, then resend the same pointer.
- *	ioc@: Pointer to pointer to IOC adapter
- *	ioc_init@: Pointer to ioc init config page
+ *	@ioc: Pointer to pointer to IOC adapter
+ *	@ioc_init: Pointer to ioc init config page
  *
+ *	If we already allocated memory in past, then resend the same pointer.
  *	Returns 0 for success, non-zero for failure.
  */
 static int
@@ -1091,12 +1098,15 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mpt_verify_adapter - Given a unique IOC identifier, set pointer to
- *	the associated MPT adapter structure.
+ *	mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
  *	@iocid: IOC unique identifier (integer)
  *	@iocpp: Pointer to pointer to IOC adapter
  *
- *	Returns iocid and sets iocpp.
+ *	Given a unique IOC identifier, set pointer to the associated MPT
+ *	adapter structure.
+ *
+ *	Returns iocid and sets iocpp if iocid is found.
+ *	Returns -1 if iocid is not found.
  */
 int
 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
@@ -1115,9 +1125,10 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_attach - Install a PCI intelligent MPT adapter.
  *	@pdev: Pointer to pci_dev structure
+ *	@id: PCI device ID information
  *
  *	This routine performs all the steps necessary to bring the IOC of
  *	a MPT adapter to a OPERATIONAL state.  This includes registering
@@ -1417,10 +1428,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_detach - Remove a PCI intelligent MPT adapter.
  *	@pdev: Pointer to pci_dev structure
- *
  */
 
 void
@@ -1466,10 +1476,10 @@
  */
 #ifdef CONFIG_PM
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_suspend - Fusion MPT base driver suspend routine.
- *
- *
+ *	@pdev: Pointer to pci_dev structure
+ *	@state: new state to enter
  */
 int
 mpt_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -1505,10 +1515,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_resume - Fusion MPT base driver resume routine.
- *
- *
+ *	@pdev: Pointer to pci_dev structure
  */
 int
 mpt_resume(struct pci_dev *pdev)
@@ -1566,7 +1575,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_do_ioc_recovery - Initialize or recover MPT adapter.
  *	@ioc: Pointer to MPT adapter structure
  *	@reason: Event word / reason
@@ -1892,13 +1901,15 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_detect_bound_ports - Search for PCI bus/dev_function
- *	which matches PCI bus/dev_function (+/-1) for newly discovered 929,
- *	929X, 1030 or 1035.
+/**
+ *	mpt_detect_bound_ports - Search for matching PCI bus/dev_function
  *	@ioc: Pointer to MPT adapter structure
  *	@pdev: Pointer to (struct pci_dev) structure
  *
+ *	Search for PCI bus/dev_function which matches
+ *	PCI bus/dev_function (+/-1) for newly discovered 929,
+ *	929X, 1030 or 1035.
+ *
  *	If match on PCI dev_function +/-1 is found, bind the two MPT adapters
  *	using alt_ioc pointer fields in their %MPT_ADAPTER structures.
  */
@@ -1945,9 +1956,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_adapter_disable - Disable misbehaving MPT adapter.
- *	@this: Pointer to MPT adapter structure
+ *	@ioc: Pointer to MPT adapter structure
  */
 static void
 mpt_adapter_disable(MPT_ADAPTER *ioc)
@@ -2046,9 +2057,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_adapter_dispose - Free all resources associated with a MPT
- *	adapter.
+/**
+ *	mpt_adapter_dispose - Free all resources associated with an MPT adapter
  *	@ioc: Pointer to MPT adapter structure
  *
  *	This routine unregisters h/w resources and frees all alloc'd memory
@@ -2099,8 +2109,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	MptDisplayIocCapabilities - Disply IOC's capacilities.
+/**
+ *	MptDisplayIocCapabilities - Disply IOC's capabilities.
  *	@ioc: Pointer to MPT adapter structure
  */
 static void
@@ -2142,7 +2152,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	MakeIocReady - Get IOC to a READY state, using KickStart if needed.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@force: Force hard KickStart of IOC
@@ -2279,7 +2289,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_GetIocState - Get the current state of a MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@cooked: Request raw or cooked IOC state
@@ -2304,7 +2314,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetIocFacts - Send IOCFacts request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Specifies whether the process can sleep
@@ -2478,7 +2488,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetPortFacts - Send PortFacts request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@portnum: Port number
@@ -2545,7 +2555,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	SendIocInit - Send IOCInit request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Specifies whether the process can sleep
@@ -2630,7 +2640,7 @@
 	}
 
 	/* No need to byte swap the multibyte fields in the reply
-	 * since we don't even look at it's contents.
+	 * since we don't even look at its contents.
 	 */
 
 	dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
@@ -2672,7 +2682,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	SendPortEnable - Send PortEnable request to MPT adapter port.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@portnum: Port number to enable
@@ -2723,9 +2733,13 @@
 	return rc;
 }
 
-/*
- *	ioc: Pointer to MPT_ADAPTER structure
- *      size - total FW bytes
+/**
+ *	mpt_alloc_fw_memory - allocate firmware memory
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *      @size: total FW bytes
+ *
+ *	If memory has already been allocated, the same (cached) value
+ *	is returned.
  */
 void
 mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
@@ -2742,9 +2756,12 @@
 			ioc->alloc_total += size;
 	}
 }
-/*
- * If alt_img is NULL, delete from ioc structure.
- * Else, delete a secondary image in same format.
+/**
+ *	mpt_free_fw_memory - free firmware memory
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ *	If alt_img is NULL, delete from ioc structure.
+ *	Else, delete a secondary image in same format.
  */
 void
 mpt_free_fw_memory(MPT_ADAPTER *ioc)
@@ -2763,7 +2780,7 @@
 
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Specifies whether the process can sleep
@@ -2865,10 +2882,10 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_downloadboot - DownloadBoot code
  *	@ioc: Pointer to MPT_ADAPTER structure
- *	@flag: Specify which part of IOC memory is to be uploaded.
+ *	@pFwHeader: Pointer to firmware header info
  *	@sleepFlag: Specifies whether the process can sleep
  *
  *	FwDownloadBoot requires Programmed IO access.
@@ -3071,7 +3088,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	KickStart - Perform hard reset of MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@force: Force hard reset
@@ -3145,12 +3162,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_diag_reset - Perform hard reset of the adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@ignore: Set if to honor and clear to ignore
  *		the reset history bit
- *	@sleepflag: CAN_SLEEP if called in a non-interrupt thread,
+ *	@sleepFlag: CAN_SLEEP if called in a non-interrupt thread,
  *		else set to NO_SLEEP (use mdelay instead)
  *
  *	This routine places the adapter in diagnostic mode via the
@@ -3436,11 +3453,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	SendIocReset - Send IOCReset request to MPT adapter.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@reset_type: reset type, expected values are
  *	%MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
+ *	@sleepFlag: Specifies whether the process can sleep
  *
  *	Send IOCReset request to the MPT adapter.
  *
@@ -3494,11 +3512,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	initChainBuffers - Allocate memory for and initialize
- *	chain buffers, chain buffer control arrays and spinlock.
- *	@hd: Pointer to MPT_SCSI_HOST structure
- *	@init: If set, initialize the spin lock.
+/**
+ *	initChainBuffers - Allocate memory for and initialize chain buffers
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ *	Allocates memory for and initializes chain buffers,
+ *	chain buffer control arrays and spinlock.
  */
 static int
 initChainBuffers(MPT_ADAPTER *ioc)
@@ -3594,7 +3613,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	PrimeIocFifos - Initialize IOC request and reply FIFOs.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *
@@ -3891,15 +3910,15 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	WaitForDoorbellAck - Wait for IOC to clear the IOP_DOORBELL_STATUS bit
- *	in it's IntStatus register.
+/**
+ *	WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@howlong: How long to wait (in seconds)
  *	@sleepFlag: Specifies whether the process can sleep
  *
  *	This routine waits (up to ~2 seconds max) for IOC doorbell
- *	handshake ACKnowledge.
+ *	handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS
+ *	bit in its IntStatus register being clear.
  *
  *	Returns a negative value on failure, else wait loop count.
  */
@@ -3942,14 +3961,14 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	WaitForDoorbellInt - Wait for IOC to set the HIS_DOORBELL_INTERRUPT bit
- *	in it's IntStatus register.
+/**
+ *	WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@howlong: How long to wait (in seconds)
  *	@sleepFlag: Specifies whether the process can sleep
  *
- *	This routine waits (up to ~2 seconds max) for IOC doorbell interrupt.
+ *	This routine waits (up to ~2 seconds max) for IOC doorbell interrupt
+ *	(MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register.
  *
  *	Returns a negative value on failure, else wait loop count.
  */
@@ -3991,8 +4010,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	WaitForDoorbellReply - Wait for and capture a IOC handshake reply.
+/**
+ *	WaitForDoorbellReply - Wait for and capture an IOC handshake reply.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@howlong: How long to wait (in seconds)
  *	@sleepFlag: Specifies whether the process can sleep
@@ -4077,7 +4096,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetLanConfigPages - Fetch LANConfig pages.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *
@@ -4188,12 +4207,9 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
+/**
+ *	mptbase_sas_persist_operation - Perform operation on SAS Persistent Table
  *	@ioc: Pointer to MPT_ADAPTER structure
- *	@sas_address: 64bit SAS Address for operation.
- *	@target_id: specified target for operation
- *	@bus: specified bus for operation
  *	@persist_opcode: see below
  *
  *	MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
@@ -4202,7 +4218,7 @@
  *
  *	NOTE: Don't use not this function during interrupt time.
  *
- *	Returns: 0 for success, non-zero error
+ *	Returns 0 for success, non-zero error
  */
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -4399,7 +4415,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	GetIoUnitPage2 - Retrieve BIOS version and boot order information.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *
@@ -4457,7 +4473,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*	mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
+/**
+ *	mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
  *	@ioc: Pointer to a Adapter Strucutre
  *	@portnum: IOC port number
  *
@@ -4644,7 +4661,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*	mpt_readScsiDevicePageHeaders - save version and length of SDP1
+/**
+ *	mpt_readScsiDevicePageHeaders - save version and length of SDP1
  *	@ioc: Pointer to a Adapter Strucutre
  *	@portnum: IOC port number
  *
@@ -4996,9 +5014,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	SendEventNotification - Send EventNotification (on or off) request
- *	to MPT adapter.
+/**
+ *	SendEventNotification - Send EventNotification (on or off) request to adapter
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@EvSwitch: Event switch flags
  */
@@ -5062,8 +5079,8 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mpt_config - Generic function to issue config message
- *	@ioc - Pointer to an adapter structure
- *	@cfg - Pointer to a configuration structure. Struct contains
+ *	@ioc:   Pointer to an adapter structure
+ *	@pCfg:  Pointer to a configuration structure. Struct contains
  *		action, page address, direction, physical address
  *		and pointer to a configuration page header
  *		Page header is updated.
@@ -5188,8 +5205,8 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	mpt_timer_expired - Call back for timer process.
+/**
+ *	mpt_timer_expired - Callback for timer process.
  *	Used only internal config functionality.
  *	@data: Pointer to MPT_SCSI_HOST recast as an unsigned long
  */
@@ -5214,12 +5231,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_ioc_reset - Base cleanup for hard reset
  *	@ioc: Pointer to the adapter structure
  *	@reset_phase: Indicates pre- or post-reset functionality
  *
- *	Remark: Free's resources with internally generated commands.
+ *	Remark: Frees resources with internally generated commands.
  */
 static int
 mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
@@ -5271,7 +5288,7 @@
  *	procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
  */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
  *
  *	Returns 0 for success, non-zero for failure.
@@ -5297,7 +5314,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
  *
  *	Returns 0 for success, non-zero for failure.
@@ -5311,16 +5328,16 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	procmpt_summary_read - Handle read request from /proc/mpt/summary
- *	or from /proc/mpt/iocN/summary.
+/**
+ *	procmpt_summary_read - Handle read request of a summary file
  *	@buf: Pointer to area to write information
  *	@start: Pointer to start pointer
  *	@offset: Offset to start writing
- *	@request:
+ *	@request: Amount of read data requested
  *	@eof: Pointer to EOF integer
  *	@data: Pointer
  *
+ *	Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
  *	Returns number of characters written to process performing the read.
  */
 static int
@@ -5355,12 +5372,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_version_read - Handle read request from /proc/mpt/version.
  *	@buf: Pointer to area to write information
  *	@start: Pointer to start pointer
  *	@offset: Offset to start writing
- *	@request:
+ *	@request: Amount of read data requested
  *	@eof: Pointer to EOF integer
  *	@data: Pointer
  *
@@ -5411,12 +5428,12 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info.
  *	@buf: Pointer to area to write information
  *	@start: Pointer to start pointer
  *	@offset: Offset to start writing
- *	@request:
+ *	@request: Amount of read data requested
  *	@eof: Pointer to EOF integer
  *	@data: Pointer
  *
@@ -5577,16 +5594,17 @@
  */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mpt_HardResetHandler - Generic reset handler, issue SCSI Task
- *	Management call based on input arg values.  If TaskMgmt fails,
- *	return associated SCSI request.
+ *	mpt_HardResetHandler - Generic reset handler
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@sleepFlag: Indicates if sleep or schedule must be called.
  *
+ *	Issues SCSI Task Management call based on input arg values.
+ *	If TaskMgmt fails, returns associated SCSI request.
+ *
  *	Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
  *	or a non-interrupt thread.  In the former, must not call schedule().
  *
- *	Remark: A return of -1 is a FATAL error case, as it means a
+ *	Note: A return of -1 is a FATAL error case, as it means a
  *	FW reload/initialization failed.
  *
  *	Returns 0 for SUCCESS or -1 if FAILED.
@@ -5935,13 +5953,14 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *	ProcessEventNotification - Route a received EventNotificationReply to
- *	all currently regeistered event handlers.
+/**
+ *	ProcessEventNotification - Route EventNotificationReply to all event handlers
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@pEventReply: Pointer to EventNotification reply frame
  *	@evHandlers: Pointer to integer, number of event handlers
  *
+ *	Routes a received EventNotificationReply to all currently registered
+ *	event handlers.
  *	Returns sum of event handlers return values.
  */
 static int
@@ -6056,7 +6075,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_fc_log_info - Log information returned from Fibre Channel IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@log_info: U32 LogInfo reply word from the IOC
@@ -6077,7 +6096,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@mr: Pointer to MPT reply frame
@@ -6185,7 +6204,7 @@
 		"Abort",					/* 12h */
 		"IO Not Yet Executed",				/* 13h */
 		"IO Executed",					/* 14h */
-		"Persistant Reservation Out Not Affiliation Owner", /* 15h */
+		"Persistent Reservation Out Not Affiliation Owner", /* 15h */
 		"Open Transmit DMA Abort",			/* 16h */
 		"IO Device Missing Delay Retry",		/* 17h */
 		NULL,						/* 18h */
@@ -6200,7 +6219,7 @@
 	};
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_sas_log_info - Log information returned from SAS IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@log_info: U32 LogInfo reply word from the IOC
@@ -6255,7 +6274,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
  *	@ioc: Pointer to MPT_ADAPTER structure
  *	@ioc_status: U32 IOCStatus word from IOC
@@ -6416,7 +6435,7 @@
 EXPORT_SYMBOL(mptbase_sas_persist_operation);
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	fusion_init - Fusion MPT base driver initialization routine.
  *
  *	Returns 0 for success, non-zero for failure.
@@ -6456,7 +6475,7 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *	fusion_exit - Perform driver unload cleanup.
  *
  *	This routine frees all resources associated with each MPT adapter
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 1dd4917..ca2f910 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1018,9 +1018,10 @@
 }
 
 static void
-mptfc_setup_reset(void *arg)
+mptfc_setup_reset(struct work_struct *work)
 {
-	MPT_ADAPTER		*ioc = (MPT_ADAPTER *)arg;
+	MPT_ADAPTER		*ioc =
+		container_of(work, MPT_ADAPTER, fc_setup_reset_work);
 	u64			pn;
 	struct mptfc_rport_info *ri;
 
@@ -1043,9 +1044,10 @@
 }
 
 static void
-mptfc_rescan_devices(void *arg)
+mptfc_rescan_devices(struct work_struct *work)
 {
-	MPT_ADAPTER		*ioc = (MPT_ADAPTER *)arg;
+	MPT_ADAPTER		*ioc =
+		container_of(work, MPT_ADAPTER, fc_rescan_work);
 	int			ii;
 	u64			pn;
 	struct mptfc_rport_info *ri;
@@ -1154,8 +1156,8 @@
         }
 
 	spin_lock_init(&ioc->fc_rescan_work_lock);
-	INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc);
-	INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc);
+	INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
+	INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
 
 	spin_lock_irqsave(&ioc->FreeQlock, flags);
 
@@ -1393,8 +1395,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptfc_init - Register MPT adapter(s) as SCSI host(s) with
- *	linux scsi mid-layer.
+ *	mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
  *
  *	Returns 0 for success, non-zero for failure.
  */
@@ -1438,7 +1439,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptfc_remove - Removed fc infrastructure for devices
+ *	mptfc_remove - Remove fc infrastructure for devices
  *	@pdev: Pointer to pci_dev structure
  *
  */
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 314c3a2..b7c4407 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -111,7 +111,8 @@
 	u32 total_received;
 	struct net_device_stats stats;	/* Per device statistics */
 
-	struct work_struct post_buckets_task;
+	struct delayed_work post_buckets_task;
+	struct net_device *dev;
 	unsigned long post_buckets_active;
 };
 
@@ -132,7 +133,7 @@
 static int  mpt_lan_open(struct net_device *dev);
 static int  mpt_lan_reset(struct net_device *dev);
 static int  mpt_lan_close(struct net_device *dev);
-static void mpt_lan_post_receive_buckets(void *dev_id);
+static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
 					   int priority);
 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
@@ -345,7 +346,7 @@
 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
 	} else {
-		mpt_lan_post_receive_buckets(dev);
+		mpt_lan_post_receive_buckets(priv);
 		netif_wake_queue(dev);
 	}
 
@@ -441,7 +442,7 @@
 
 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
 
-	mpt_lan_post_receive_buckets(dev);
+	mpt_lan_post_receive_buckets(priv);
 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
 			IOC_AND_NETDEV_NAMES_s_s(dev));
 
@@ -854,7 +855,7 @@
 	
 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
 		if (priority) {
-			schedule_work(&priv->post_buckets_task);
+			schedule_delayed_work(&priv->post_buckets_task, 0);
 		} else {
 			schedule_delayed_work(&priv->post_buckets_task, 1);
 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
@@ -1188,10 +1189,9 @@
 /* Simple SGE's only at the moment */
 
 static void
-mpt_lan_post_receive_buckets(void *dev_id)
+mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
 {
-	struct net_device *dev = dev_id;
-	struct mpt_lan_priv *priv = dev->priv;
+	struct net_device *dev = priv->dev;
 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
 	MPT_FRAME_HDR *mf;
 	LANReceivePostRequest_t *pRecvReq;
@@ -1335,6 +1335,13 @@
 	clear_bit(0, &priv->post_buckets_active);
 }
 
+static void
+mpt_lan_post_receive_buckets_work(struct work_struct *work)
+{
+	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
+						  post_buckets_task.work));
+}
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static struct net_device *
 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1350,11 +1357,13 @@
 
 	priv = netdev_priv(dev);
 
+	priv->dev = dev;
 	priv->mpt_dev = mpt_dev;
 	priv->pnum = pnum;
 
-	memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
-	INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
+	memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
+	INIT_DELAYED_WORK(&priv->post_buckets_task,
+			  mpt_lan_post_receive_buckets_work);
 	priv->post_buckets_active = 0;
 
 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b752a47..4f0c530 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2006,9 +2006,10 @@
  *(Mutex LOCKED)
  */
 static void
-mptsas_discovery_work(void * arg)
+mptsas_discovery_work(struct work_struct *work)
 {
-	struct mptsas_discovery_event *ev = arg;
+	struct mptsas_discovery_event *ev =
+		container_of(work, struct mptsas_discovery_event, work);
 	MPT_ADAPTER *ioc = ev->ioc;
 
 	mutex_lock(&ioc->sas_discovery_mutex);
@@ -2068,9 +2069,9 @@
  * Work queue thread to clear the persitency table
  */
 static void
-mptsas_persist_clear_table(void * arg)
+mptsas_persist_clear_table(struct work_struct *work)
 {
-	MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+	MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
 
 	mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
 }
@@ -2093,9 +2094,10 @@
  * Work queue thread to handle SAS hotplug events
  */
 static void
-mptsas_hotplug_work(void *arg)
+mptsas_hotplug_work(struct work_struct *work)
 {
-	struct mptsas_hotplug_event *ev = arg;
+	struct mptsas_hotplug_event *ev =
+		container_of(work, struct mptsas_hotplug_event, work);
 	MPT_ADAPTER *ioc = ev->ioc;
 	struct mptsas_phyinfo *phy_info;
 	struct sas_rphy *rphy;
@@ -2341,7 +2343,7 @@
 			break;
 		}
 
-		INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+		INIT_WORK(&ev->work, mptsas_hotplug_work);
 		ev->ioc = ioc;
 		ev->handle = le16_to_cpu(sas_event_data->DevHandle);
 		ev->parent_handle =
@@ -2366,7 +2368,7 @@
 	 * Persistent table is full.
 	 */
 		INIT_WORK(&ioc->sas_persist_task,
-		    mptsas_persist_clear_table, (void *)ioc);
+		    mptsas_persist_clear_table);
 		schedule_work(&ioc->sas_persist_task);
 		break;
 	case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
@@ -2395,7 +2397,7 @@
 		return;
 	}
 
-	INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+	INIT_WORK(&ev->work, mptsas_hotplug_work);
 	ev->ioc = ioc;
 	ev->id = raid_event_data->VolumeID;
 	ev->event_type = MPTSAS_IGNORE_EVENT;
@@ -2474,7 +2476,7 @@
 	ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 	if (!ev)
 		return;
-	INIT_WORK(&ev->work, mptsas_discovery_work, ev);
+	INIT_WORK(&ev->work, mptsas_discovery_work);
 	ev->ioc = ioc;
 	schedule_work(&ev->work);
 };
@@ -2511,8 +2513,7 @@
 		break;
 	case MPI_EVENT_PERSISTENT_TABLE_FULL:
 		INIT_WORK(&ioc->sas_persist_task,
-		    mptsas_persist_clear_table,
-		    (void *)ioc);
+		    mptsas_persist_clear_table);
 		schedule_work(&ioc->sas_persist_task);
 		break;
 	 case MPI_EVENT_SAS_DISCOVERY:
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 30524dc..2c72c36 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1230,15 +1230,15 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mptscsih_proc_info - Return information about MPT adapter
+ * 	@host:   scsi host struct
+ * 	@buffer: if write, user data; if read, buffer for user
+ *	@start: returns the buffer address
+ * 	@offset: if write, 0; if read, the current offset into the buffer from
+ * 		 the previous read.
+ * 	@length: if write, return length;
+ *	@func:   write = 1; read = 0
  *
  *	(linux scsi_host_template.info routine)
- *
- * 	buffer: if write, user data; if read, buffer for user
- * 	length: if write, return length;
- * 	offset: if write, 0; if read, the current offset into the buffer from
- * 		the previous read.
- * 	hostno: scsi host number
- *	func:   if write = 1; if read = 0
  */
 int
 mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
@@ -1902,8 +1902,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptscsih_host_reset - Perform a SCSI host adapter RESET!
- *	new_eh variant
+ *	mptscsih_host_reset - Perform a SCSI host adapter RESET (new_eh variant)
  *	@SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
  *
  *	(linux scsi_host_template.eh_host_reset_handler routine)
@@ -1949,8 +1948,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptscsih_tm_pending_wait - wait for pending task management request to
- *		complete.
+ *	mptscsih_tm_pending_wait - wait for pending task management request to complete
  *	@hd: Pointer to MPT host structure.
  *
  *	Returns {SUCCESS,FAILED}.
@@ -1982,6 +1980,7 @@
 /**
  *	mptscsih_tm_wait_for_completion - wait for completion of TM task
  *	@hd: Pointer to MPT host structure.
+ *	@timeout: timeout in seconds
  *
  *	Returns {SUCCESS,FAILED}.
  */
@@ -3429,8 +3428,7 @@
 /**
  *	mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
  *	@hd: Pointer to a SCSI HOST structure
- *	@vtarget: per device private data
- *	@lun: lun
+ *	@vdevice: virtual target device
  *
  *	Uses the ISR, but with special processing.
  *	MUST be single-threaded.
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e4cc3dd..36641da 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -646,9 +646,10 @@
 	int			disk;
 };
 
-static void mpt_work_wrapper(void *data)
+static void mpt_work_wrapper(struct work_struct *work)
 {
-	struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+	struct work_queue_wrapper *wqw =
+		container_of(work, struct work_queue_wrapper, work);
 	struct _MPT_SCSI_HOST *hd = wqw->hd;
 	struct Scsi_Host *shost = hd->ioc->sh;
 	struct scsi_device *sdev;
@@ -695,7 +696,7 @@
 			   disk);
 		return;
 	}
-	INIT_WORK(&wqw->work, mpt_work_wrapper, wqw);
+	INIT_WORK(&wqw->work, mpt_work_wrapper);
 	wqw->hd = hd;
 	wqw->disk = disk;
 
@@ -784,9 +785,10 @@
  * renegotiate for a given target
  */
 static void
-mptspi_dv_renegotiate_work(void *data)
+mptspi_dv_renegotiate_work(struct work_struct *work)
 {
-	struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+	struct work_queue_wrapper *wqw =
+		container_of(work, struct work_queue_wrapper, work);
 	struct _MPT_SCSI_HOST *hd = wqw->hd;
 	struct scsi_device *sdev;
 
@@ -804,7 +806,7 @@
 	if (!wqw)
 		return;
 
-	INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
+	INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
 	wqw->hd = hd;
 
 	schedule_work(&wqw->work);
@@ -1098,8 +1100,7 @@
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *	mptspi_init - Register MPT adapter(s) as SCSI host(s) with
- *	linux scsi mid-layer.
+ *	mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
  *
  *	Returns 0 for success, non-zero for failure.
  */
@@ -1133,7 +1134,6 @@
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *	mptspi_exit - Unregisters MPT adapter(s)
- *
  */
 static void __exit
 mptspi_exit(void)
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index d96c687..c463dc2 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -56,6 +56,9 @@
 /**
  *	i2o_bus_store_scan - Scan the I2O Bus Adapter
  *	@d: device which should be scanned
+ *	@attr: device_attribute
+ *	@buf: output buffer
+ *	@count: buffer size
  *
  *	Returns count.
  */
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index ee18305..b9df143 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -54,8 +54,8 @@
  *	@dev: I2O device to claim
  *	@drv: I2O driver which wants to claim the device
  *
- *	Do the leg work to assign a device to a given OSM. If the claim succeed
- *	the owner of the rimary. If the attempt fails a negative errno code
+ *	Do the leg work to assign a device to a given OSM. If the claim succeeds,
+ *	the owner is the primary. If the attempt fails a negative errno code
  *	is returned. On success zero is returned.
  */
 int i2o_device_claim(struct i2o_device *dev)
@@ -208,24 +208,23 @@
 
 /**
  *	i2o_device_add - allocate a new I2O device and add it to the IOP
- *	@iop: I2O controller where the device is on
+ *	@c: I2O controller that the device is on
  *	@entry: LCT entry of the I2O device
  *
  *	Allocate a new I2O device and initialize it with the LCT entry. The
  *	device is appended to the device list of the controller.
  *
- *	Returns a pointer to the I2O device on success or negative error code
- *	on failure.
+ *	Returns zero on success, or a -ve errno.
  */
-static struct i2o_device *i2o_device_add(struct i2o_controller *c,
-					 i2o_lct_entry * entry)
+static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
 {
 	struct i2o_device *i2o_dev, *tmp;
+	int rc;
 
 	i2o_dev = i2o_device_alloc();
 	if (IS_ERR(i2o_dev)) {
 		printk(KERN_ERR "i2o: unable to allocate i2o device\n");
-		return i2o_dev;
+		return PTR_ERR(i2o_dev);
 	}
 
 	i2o_dev->lct_data = *entry;
@@ -236,7 +235,9 @@
 	i2o_dev->iop = c;
 	i2o_dev->device.parent = &c->device;
 
-	device_register(&i2o_dev->device);
+	rc = device_register(&i2o_dev->device);
+	if (rc)
+		goto err;
 
 	list_add_tail(&i2o_dev->list, &c->devices);
 
@@ -270,12 +271,16 @@
 
 	pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
 
-	return i2o_dev;
+	return 0;
+
+err:
+	kfree(i2o_dev);
+	return rc;
 }
 
 /**
  *	i2o_device_remove - remove an I2O device from the I2O core
- *	@dev: I2O device which should be released
+ *	@i2o_dev: I2O device which should be released
  *
  *	Is used on I2O controller removal or LCT modification, when the device
  *	is removed from the system. Note that the device could still hang
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 6413022..9104b65 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -34,9 +34,7 @@
 static struct i2o_driver **i2o_drivers;
 
 /**
- *	i2o_bus_match - Tell if a I2O device class id match the class ids of
- *			the I2O driver (OSM)
- *
+ *	i2o_bus_match - Tell if I2O device class id matches the class ids of the I2O driver (OSM)
  *	@dev: device which should be verified
  *	@drv: the driver to match against
  *
@@ -232,7 +230,7 @@
 			break;
 		}
 
-		INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt);
+		INIT_WORK(&evt->work, drv->event);
 		queue_work(drv->event_queue, &evt->work);
 		return 1;
 	}
@@ -248,7 +246,7 @@
 
 /**
  *	i2o_driver_notify_controller_add_all - Send notify of added controller
- *					       to all I2O drivers
+ *	@c: newly added controller
  *
  *	Send notifications to all registered drivers that a new controller was
  *	added.
@@ -267,8 +265,8 @@
 }
 
 /**
- *	i2o_driver_notify_controller_remove_all - Send notify of removed
- *						  controller to all I2O drivers
+ *	i2o_driver_notify_controller_remove_all - Send notify of removed controller
+ *	@c: controller that is being removed
  *
  *	Send notifications to all registered drivers that a controller was
  *	removed.
@@ -287,8 +285,8 @@
 }
 
 /**
- *	i2o_driver_notify_device_add_all - Send notify of added device to all
- *					   I2O drivers
+ *	i2o_driver_notify_device_add_all - Send notify of added device
+ *	@i2o_dev: newly added I2O device
  *
  *	Send notifications to all registered drivers that a device was added.
  */
@@ -306,8 +304,8 @@
 }
 
 /**
- *	i2o_driver_notify_device_remove_all - Send notify of removed device to
- *					      all I2O drivers
+ *	i2o_driver_notify_device_remove_all - Send notify of removed device
+ *	@i2o_dev: device that is being removed
  *
  *	Send notifications to all registered drivers that a device was removed.
  */
@@ -362,7 +360,7 @@
 /**
  *	i2o_driver_exit - clean up I2O drivers (OSMs)
  *
- *	Unregisters the I2O bus and free driver array.
+ *	Unregisters the I2O bus and frees driver array.
  */
 void __exit i2o_driver_exit(void)
 {
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index a235064..902753b 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -94,8 +94,8 @@
 };
 
 /**
- *	i2o_exec_wait_free - Free a i2o_exec_wait struct
- *	@i2o_exec_wait: I2O wait data which should be cleaned up
+ *	i2o_exec_wait_free - Free an i2o_exec_wait struct
+ *	@wait: I2O wait data which should be cleaned up
  */
 static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
 {
@@ -105,7 +105,7 @@
 /**
  * 	i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
  *	@c: controller
- *	@m: message to post
+ *	@msg: message to post
  *	@timeout: time in seconds to wait
  *	@dma: i2o_dma struct of the DMA buffer to free on failure
  *
@@ -269,6 +269,7 @@
 /**
  *	i2o_exec_show_vendor_id - Displays Vendor ID of controller
  *	@d: device of which the Vendor ID should be displayed
+ *	@attr: device_attribute to display
  *	@buf: buffer into which the Vendor ID should be printed
  *
  *	Returns number of bytes printed into buffer.
@@ -290,6 +291,7 @@
 /**
  *	i2o_exec_show_product_id - Displays Product ID of controller
  *	@d: device of which the Product ID should be displayed
+ *	@attr: device_attribute to display
  *	@buf: buffer into which the Product ID should be printed
  *
  *	Returns number of bytes printed into buffer.
@@ -365,14 +367,16 @@
 
 /**
  *	i2o_exec_lct_modified - Called on LCT NOTIFY reply
- *	@c: I2O controller on which the LCT has modified
+ *	@work: work struct for a specific controller
  *
  *	This function handles asynchronus LCT NOTIFY replies. It parses the
  *	new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
  *	again, otherwise send LCT NOTIFY to get informed on next LCT change.
  */
-static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work)
+static void i2o_exec_lct_modified(struct work_struct *_work)
 {
+	struct i2o_exec_lct_notify_work *work =
+		container_of(_work, struct i2o_exec_lct_notify_work, work);
 	u32 change_ind = 0;
 	struct i2o_controller *c = work->c;
 
@@ -439,8 +443,7 @@
 
 		work->c = c;
 
-		INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified,
-			  work);
+		INIT_WORK(&work->work, i2o_exec_lct_modified);
 		queue_work(i2o_exec_driver.event_queue, &work->work);
 		return 1;
 	}
@@ -460,13 +463,15 @@
 
 /**
  *	i2o_exec_event - Event handling function
- *	@evt: Event which occurs
+ *	@work: Work item in occurring event
  *
  *	Handles events send by the Executive device. At the moment does not do
  *	anything useful.
  */
-static void i2o_exec_event(struct i2o_event *evt)
+static void i2o_exec_event(struct work_struct *work)
 {
+	struct i2o_event *evt = container_of(work, struct i2o_event, work);
+
 	if (likely(evt->i2o_dev))
 		osm_debug("Event received from device: %d\n",
 			  evt->i2o_dev->lct_data.tid);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index eaba81b..da9859f 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -259,7 +259,7 @@
 /**
  *	i2o_block_device_power - Power management for device dev
  *	@dev: I2O device which should receive the power management request
- *	@operation: Operation which should be send
+ *	@op: Operation to send
  *
  *	Send a power management request to the device dev.
  *
@@ -315,7 +315,7 @@
  *	i2o_block_request_free - Frees a I2O block request
  *	@ireq: I2O block request which should be freed
  *
- *	Fres the allocated memory (give it back to the request mempool).
+ *	Frees the allocated memory (give it back to the request mempool).
  */
 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
 {
@@ -326,6 +326,7 @@
  *	i2o_block_sglist_alloc - Allocate the SG list and map it
  *	@c: I2O controller to which the request belongs
  *	@ireq: I2O block request
+ *	@mptr: message body pointer
  *
  *	Builds the SG list and map it to be accessable by the controller.
  *
@@ -419,16 +420,18 @@
 
 /**
  *	i2o_block_delayed_request_fn - delayed request queue function
- *	delayed_request: the delayed request with the queue to start
+ *	@work: the delayed request with the queue to start
  *
  *	If the request queue is stopped for a disk, and there is no open
  *	request, a new event is created, which calls this function to start
  *	the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
  *	be started again.
  */
-static void i2o_block_delayed_request_fn(void *delayed_request)
+static void i2o_block_delayed_request_fn(struct work_struct *work)
 {
-	struct i2o_block_delayed_request *dreq = delayed_request;
+	struct i2o_block_delayed_request *dreq =
+		container_of(work, struct i2o_block_delayed_request,
+			     work.work);
 	struct request_queue *q = dreq->queue;
 	unsigned long flags;
 
@@ -488,7 +491,7 @@
  *	i2o_block_reply - Block OSM reply handler.
  *	@c: I2O controller from which the message arrives
  *	@m: message id of reply
- *	qmsg: the actuall I2O message reply
+ *	@msg: the actual I2O message reply
  *
  *	This function gets all the message replies.
  *
@@ -538,8 +541,9 @@
 	return 1;
 };
 
-static void i2o_block_event(struct i2o_event *evt)
+static void i2o_block_event(struct work_struct *work)
 {
+	struct i2o_event *evt = container_of(work, struct i2o_event, work);
 	osm_debug("event received\n");
 	kfree(evt);
 };
@@ -599,6 +603,8 @@
 
 /**
  *	i2o_block_open - Open the block device
+ *	@inode: inode for block device being opened
+ *	@file: file to open
  *
  *	Power up the device, mount and lock the media. This function is called,
  *	if the block device is opened for access.
@@ -626,6 +632,8 @@
 
 /**
  *	i2o_block_release - Release the I2O block device
+ *	@inode: inode for block device being released
+ *	@file: file to close
  *
  *	Unlock and unmount the media, and power down the device. Gets called if
  *	the block device is closed.
@@ -672,6 +680,8 @@
 
 /**
  *	i2o_block_ioctl - Issue device specific ioctl calls.
+ *	@inode: inode for block device ioctl
+ *	@file: file for ioctl
  *	@cmd: ioctl command
  *	@arg: arg
  *
@@ -899,7 +909,7 @@
 
 /**
  *	i2o_block_request_fn - request queue handling function
- *	q: request queue from which the request could be fetched
+ *	@q: request queue from which the request could be fetched
  *
  *	Takes the next request from the queue, transfers it and if no error
  *	occurs dequeue it from the queue. On arrival of the reply the message
@@ -938,8 +948,8 @@
 				continue;
 
 			dreq->queue = q;
-			INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
-				  dreq);
+			INIT_DELAYED_WORK(&dreq->work,
+					  i2o_block_delayed_request_fn);
 
 			if (!queue_delayed_work(i2o_block_driver.event_queue,
 						&dreq->work,
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
index 4fdaa5b..67f921b 100644
--- a/drivers/message/i2o/i2o_block.h
+++ b/drivers/message/i2o/i2o_block.h
@@ -64,7 +64,7 @@
 
 /* I2O Block OSM mempool struct */
 struct i2o_block_mempool {
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 	mempool_t *pool;
 };
 
@@ -96,7 +96,7 @@
 
 /* I2O Block device delayed request */
 struct i2o_block_delayed_request {
-	struct work_struct work;
+	struct delayed_work work;
 	struct request_queue *queue;
 };
 
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 7d23e08..1de30d7 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -265,7 +265,11 @@
 		return -ENOMEM;
 	}
 
-	__copy_from_user(buffer.virt, kxfer.buf, fragsize);
+	if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) {
+		i2o_msg_nop(c, msg);
+		i2o_dma_free(&c->pdev->dev, &buffer);
+		return -EFAULT;
+	}
 
 	msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
 	msg->u.head[1] =
@@ -516,7 +520,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_I2O_EXT_ADAPTEC
 #ifdef CONFIG_COMPAT
 static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
 			      unsigned long arg)
@@ -759,6 +762,7 @@
 
 #endif
 
+#ifdef CONFIG_I2O_EXT_ADAPTEC
 static int i2o_cfg_passthru(unsigned long arg)
 {
 	struct i2o_cmd_passthru __user *cmd =
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 3d2e76e..a61cb17 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -163,7 +163,7 @@
  *	i2o_get_class_name - 	do i2o class name lookup
  *	@class: class number
  *
- *	Return a descriptive string for an i2o class
+ *	Return a descriptive string for an i2o class.
  */
 static const char *i2o_get_class_name(int class)
 {
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 6ebf382..1045c8a 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -220,7 +220,7 @@
 	u32 id = -1;
 	u64 lun = -1;
 	int channel = -1;
-	int i;
+	int i, rc;
 
 	i2o_shost = i2o_scsi_get_host(c);
 	if (!i2o_shost)
@@ -304,14 +304,20 @@
 		return PTR_ERR(scsi_dev);
 	}
 
-	sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
-			  "scsi");
+	rc = sysfs_create_link(&i2o_dev->device.kobj,
+			       &scsi_dev->sdev_gendev.kobj, "scsi");
+	if (rc)
+		goto err;
 
 	osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
 		 i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
 		 (long unsigned int)le64_to_cpu(lun));
 
 	return 0;
+
+err:
+	scsi_remove_device(scsi_dev);
+	return rc;
 };
 
 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
@@ -405,8 +411,7 @@
 };
 
 /**
- *	i2o_scsi_notify_device_remove - Retrieve notifications of removed
- *				        devices
+ *	i2o_scsi_notify_device_remove - Retrieve notifications of removed devices
  *	@i2o_dev: the I2O device which was removed
  *
  *	If a I2O device is removed, we catch the notification to remove the
@@ -426,8 +431,7 @@
 };
 
 /**
- *	i2o_scsi_notify_controller_add - Retrieve notifications of added
- *					 controllers
+ *	i2o_scsi_notify_controller_add - Retrieve notifications of added controllers
  *	@c: the controller which was added
  *
  *	If a I2O controller is added, we catch the notification to add a
@@ -457,8 +461,7 @@
 };
 
 /**
- *	i2o_scsi_notify_controller_remove - Retrieve notifications of removed
- *					    controllers
+ *	i2o_scsi_notify_controller_remove - Retrieve notifications of removed controllers
  *	@c: the controller which was removed
  *
  *	If a I2O controller is removed, we catch the notification to remove the
@@ -745,7 +748,7 @@
  *	@capacity: size in sectors
  *	@ip: geometry array
  *
- *	This is anyones guess quite frankly. We use the same rules everyone
+ *	This is anyone's guess quite frankly. We use the same rules everyone
  *	else appears to and hope. It seems to work.
  */
 
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index 62f1ac0..3661e6e 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -259,6 +259,7 @@
 
 /**
  *	i2o_pci_irq_enable - Allocate interrupt for I2O controller
+ *	@c: i2o_controller that the request is for
  *
  *	Allocate an interrupt for the I2O controller, and activate interrupts
  *	on the I2O controller.
@@ -305,7 +306,7 @@
 
 /**
  *	i2o_pci_probe - Probe the PCI device for an I2O controller
- *	@dev: PCI device to test
+ *	@pdev: PCI device to test
  *	@id: id which matched with the PCI device id table
  *
  *	Probe the PCI device for any device which is a memory of the
@@ -320,7 +321,6 @@
 	struct i2o_controller *c;
 	int rc;
 	struct pci_dev *i960 = NULL;
-	int enabled = pdev->is_enabled;
 
 	printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
 
@@ -330,12 +330,11 @@
 		return -ENODEV;
 	}
 
-	if (!enabled)
-		if ((rc = pci_enable_device(pdev))) {
-			printk(KERN_WARNING "i2o: couldn't enable device %s\n",
-			       pci_name(pdev));
-			return rc;
-		}
+	if ((rc = pci_enable_device(pdev))) {
+		printk(KERN_WARNING "i2o: couldn't enable device %s\n",
+		       pci_name(pdev));
+		return rc;
+	}
 
 	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
 		printk(KERN_WARNING "i2o: no suitable DMA found for %s\n",
@@ -442,15 +441,14 @@
 	i2o_iop_free(c);
 
       disable:
-	if (!enabled)
-		pci_disable_device(pdev);
+	pci_disable_device(pdev);
 
 	return rc;
 }
 
 /**
  *	i2o_pci_remove - Removes a I2O controller from the system
- *	pdev: I2O controller which should be removed
+ *	@pdev: I2O controller which should be removed
  *
  *	Reset the I2O controller, disable interrupts and remove all allocated
  *	resources.
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 82938ad..ce1a481 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -28,7 +28,7 @@
 #include <linux/string.h>
 #include <linux/input.h>
 #include <linux/device.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/slab.h>
 #include <linux/kthread.h>
 
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 1ba8754..2ab7add 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -33,9 +33,10 @@
 	spin_unlock_irqrestore(&fm->lock, flags);
 }
 
-static void tifm_7xx1_remove_media(void *adapter)
+static void tifm_7xx1_remove_media(struct work_struct *work)
 {
-	struct tifm_adapter *fm = adapter;
+	struct tifm_adapter *fm =
+		container_of(work, struct tifm_adapter, media_remover);
 	unsigned long flags;
 	int cnt;
 	struct tifm_dev *sock;
@@ -169,9 +170,10 @@
 	return base_addr + ((sock_num + 1) << 10);
 }
 
-static void tifm_7xx1_insert_media(void *adapter)
+static void tifm_7xx1_insert_media(struct work_struct *work)
 {
-	struct tifm_adapter *fm = adapter;
+	struct tifm_adapter *fm =
+		container_of(work, struct tifm_adapter, media_inserter);
 	unsigned long flags;
 	tifm_media_id media_id;
 	char *card_name = "xx";
@@ -261,7 +263,7 @@
 	spin_unlock_irqrestore(&fm->lock, flags);
 	flush_workqueue(fm->wq);
 
-	tifm_7xx1_remove_media(fm);
+	tifm_7xx1_remove_media(&fm->media_remover);
 
 	pci_set_power_state(dev, PCI_D3hot);
         pci_disable_device(dev);
@@ -328,8 +330,8 @@
 	if (!fm->sockets)
 		goto err_out_free;
 
-	INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm);
-	INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm);
+	INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
+	INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
 	fm->eject = tifm_7xx1_eject;
 	pci_set_drvdata(dev, fm);
 
@@ -384,7 +386,7 @@
 
 	flush_workqueue(fm->wq);
 
-	tifm_7xx1_remove_media(fm);
+	tifm_7xx1_remove_media(&fm->media_remover);
 
 	writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
 	free_irq(dev->irq, fm);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index ee32613..d61df5c 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -219,8 +219,9 @@
 	struct tifm_driver *drv = fm_dev->drv;
 
 	if (drv) {
-		if (drv->remove) drv->remove(fm_dev);
-		fm_dev->drv = 0;
+		if (drv->remove)
+			drv->remove(fm_dev);
+		fm_dev->drv = NULL;
 	}
 
 	put_device(dev);
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index fbef8da..4224686 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -40,7 +40,7 @@
 	  If unsure, say N.
 
 config MMC_PXA
-	tristate "Intel PXA255 Multimedia Card Interface support"
+	tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
 	depends on ARCH_PXA && MMC
 	help
 	  This selects the Intel(R) PXA(R) Multimedia card Interface.
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 41761f7..4633dbc 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -793,7 +793,7 @@
 	return read_only;
 }
 
-static struct mmc_host_ops at91_mci_ops = {
+static const struct mmc_host_ops at91_mci_ops = {
 	.request	= at91_mci_request,
 	.set_ios	= at91_mci_set_ios,
 	.get_ro		= at91_mci_get_ro,
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 53ffcbb..447fba5 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -875,7 +875,7 @@
 	host->rx_chan = rxchan;
 }
 
-struct mmc_host_ops au1xmmc_ops = {
+struct const mmc_host_ops au1xmmc_ops = {
 	.request	= au1xmmc_request,
 	.set_ios	= au1xmmc_set_ios,
 };
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 659d4a8..06e7fcd 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -877,7 +877,7 @@
 	}
 }
 
-static struct mmc_host_ops imxmci_ops = {
+static const struct mmc_host_ops imxmci_ops = {
 	.request	= imxmci_request,
 	.set_ios	= imxmci_set_ios,
 };
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 766bc54..6f2a282 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -4,6 +4,7 @@
  *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  *  SD support Copyright (C) 2005 Pierre Ossman, All Rights Reserved.
+ *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -396,23 +397,23 @@
 		return err;
 
 	/*
-	 * Default bus width is 1 bit.
-	 */
-	host->ios.bus_width = MMC_BUS_WIDTH_1;
-
-	/*
-	 * We can only change the bus width of the selected
-	 * card so therefore we have to put the handling
+	 * We can only change the bus width of SD cards when
+	 * they are selected so we have to put the handling
 	 * here.
+	 *
+	 * The card is in 1 bit mode by default so
+	 * we only need to change if it supports the
+	 * wider version.
 	 */
-	if (host->caps & MMC_CAP_4_BIT_DATA) {
+	if (mmc_card_sd(card) &&
+		(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+
 		/*
-		 * The card is in 1 bit mode by default so
-		 * we only need to change if it supports the
-		 * wider version.
-		 */
-		if (mmc_card_sd(card) &&
-			(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+		* Default bus width is 1 bit.
+		*/
+		host->ios.bus_width = MMC_BUS_WIDTH_1;
+
+		if (host->caps & MMC_CAP_4_BIT_DATA) {
 			struct mmc_command cmd;
 			cmd.opcode = SD_APP_SET_BUS_WIDTH;
 			cmd.arg = SD_BUS_WIDTH_4;
@@ -453,11 +454,11 @@
 
 static inline void mmc_delay(unsigned int ms)
 {
-	if (ms < HZ / 1000) {
-		yield();
+	if (ms < 1000 / HZ) {
+		cond_resched();
 		mdelay(ms);
 	} else {
-		msleep_interruptible (ms);
+		msleep(ms);
 	}
 }
 
@@ -953,6 +954,137 @@
 	}
 }
 
+static void mmc_process_ext_csds(struct mmc_host *host)
+{
+	int err;
+	struct mmc_card *card;
+
+	struct mmc_request mrq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+
+	struct scatterlist sg;
+
+	/*
+	 * As the ext_csd is so large and mostly unused, we don't store the
+	 * raw block in mmc_card.
+	 */
+	u8 *ext_csd;
+	ext_csd = kmalloc(512, GFP_KERNEL);
+	if (!ext_csd) {
+		printk("%s: could not allocate a buffer to receive the ext_csd."
+		       "mmc v4 cards will be treated as v3.\n",
+			mmc_hostname(host));
+		return;
+	}
+
+	list_for_each_entry(card, &host->cards, node) {
+		if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
+			continue;
+		if (mmc_card_sd(card))
+			continue;
+		if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+			continue;
+
+		err = mmc_select_card(host, card);
+		if (err != MMC_ERR_NONE) {
+			mmc_card_set_dead(card);
+			continue;
+		}
+
+		memset(&cmd, 0, sizeof(struct mmc_command));
+
+		cmd.opcode = MMC_SEND_EXT_CSD;
+		cmd.arg = 0;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+		memset(&data, 0, sizeof(struct mmc_data));
+
+		mmc_set_data_timeout(&data, card, 0);
+
+		data.blksz = 512;
+		data.blocks = 1;
+		data.flags = MMC_DATA_READ;
+		data.sg = &sg;
+		data.sg_len = 1;
+
+		memset(&mrq, 0, sizeof(struct mmc_request));
+
+		mrq.cmd = &cmd;
+		mrq.data = &data;
+
+		sg_init_one(&sg, ext_csd, 512);
+
+		mmc_wait_for_req(host, &mrq);
+
+		if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+			mmc_card_set_dead(card);
+			continue;
+		}
+
+		switch (ext_csd[EXT_CSD_CARD_TYPE]) {
+		case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
+			card->ext_csd.hs_max_dtr = 52000000;
+			break;
+		case EXT_CSD_CARD_TYPE_26:
+			card->ext_csd.hs_max_dtr = 26000000;
+			break;
+		default:
+			/* MMC v4 spec says this cannot happen */
+			printk("%s: card is mmc v4 but doesn't support "
+			       "any high-speed modes.\n",
+				mmc_hostname(card->host));
+			mmc_card_set_bad(card);
+			continue;
+		}
+
+		/* Activate highspeed support. */
+		cmd.opcode = MMC_SWITCH;
+		cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+			  (EXT_CSD_HS_TIMING << 16) |
+			  (1 << 8) |
+			  EXT_CSD_CMD_SET_NORMAL;
+		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+		err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+		if (err != MMC_ERR_NONE) {
+			printk("%s: failed to switch card to mmc v4 "
+			       "high-speed mode.\n",
+			       mmc_hostname(card->host));
+			continue;
+		}
+
+		mmc_card_set_highspeed(card);
+
+		/* Check for host support for wide-bus modes. */
+		if (!(host->caps & MMC_CAP_4_BIT_DATA)) {
+			continue;
+		}
+
+		/* Activate 4-bit support. */
+		cmd.opcode = MMC_SWITCH;
+		cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+			  (EXT_CSD_BUS_WIDTH << 16) |
+			  (EXT_CSD_BUS_WIDTH_4 << 8) |
+			  EXT_CSD_CMD_SET_NORMAL;
+		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+		err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+		if (err != MMC_ERR_NONE) {
+			printk("%s: failed to switch card to "
+			       "mmc v4 4-bit bus mode.\n",
+			       mmc_hostname(card->host));
+			continue;
+		}
+
+		host->ios.bus_width = MMC_BUS_WIDTH_4;
+	}
+
+	kfree(ext_csd);
+
+	mmc_deselect_cards(host);
+}
+
 static void mmc_read_scrs(struct mmc_host *host)
 {
 	int err;
@@ -1025,14 +1157,133 @@
 	mmc_deselect_cards(host);
 }
 
+static void mmc_read_switch_caps(struct mmc_host *host)
+{
+	int err;
+	struct mmc_card *card;
+	struct mmc_request mrq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+	unsigned char *status;
+	struct scatterlist sg;
+
+	status = kmalloc(64, GFP_KERNEL);
+	if (!status) {
+		printk(KERN_WARNING "%s: Unable to allocate buffer for "
+			"reading switch capabilities.\n",
+			mmc_hostname(host));
+		return;
+	}
+
+	list_for_each_entry(card, &host->cards, node) {
+		if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
+			continue;
+		if (!mmc_card_sd(card))
+			continue;
+		if (card->scr.sda_vsn < SCR_SPEC_VER_1)
+			continue;
+
+		err = mmc_select_card(host, card);
+		if (err != MMC_ERR_NONE) {
+			mmc_card_set_dead(card);
+			continue;
+		}
+
+		memset(&cmd, 0, sizeof(struct mmc_command));
+
+		cmd.opcode = SD_SWITCH;
+		cmd.arg = 0x00FFFFF1;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+		memset(&data, 0, sizeof(struct mmc_data));
+
+		mmc_set_data_timeout(&data, card, 0);
+
+		data.blksz = 64;
+		data.blocks = 1;
+		data.flags = MMC_DATA_READ;
+		data.sg = &sg;
+		data.sg_len = 1;
+
+		memset(&mrq, 0, sizeof(struct mmc_request));
+
+		mrq.cmd = &cmd;
+		mrq.data = &data;
+
+		sg_init_one(&sg, status, 64);
+
+		mmc_wait_for_req(host, &mrq);
+
+		if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+			mmc_card_set_dead(card);
+			continue;
+		}
+
+		if (status[13] & 0x02)
+			card->sw_caps.hs_max_dtr = 50000000;
+
+		memset(&cmd, 0, sizeof(struct mmc_command));
+
+		cmd.opcode = SD_SWITCH;
+		cmd.arg = 0x80FFFFF1;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+		memset(&data, 0, sizeof(struct mmc_data));
+
+		mmc_set_data_timeout(&data, card, 0);
+
+		data.blksz = 64;
+		data.blocks = 1;
+		data.flags = MMC_DATA_READ;
+		data.sg = &sg;
+		data.sg_len = 1;
+
+		memset(&mrq, 0, sizeof(struct mmc_request));
+
+		mrq.cmd = &cmd;
+		mrq.data = &data;
+
+		sg_init_one(&sg, status, 64);
+
+		mmc_wait_for_req(host, &mrq);
+
+		if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+			mmc_card_set_dead(card);
+			continue;
+		}
+
+		if ((status[16] & 0xF) != 1) {
+			printk(KERN_WARNING "%s: Problem switching card "
+				"into high-speed mode!\n",
+				mmc_hostname(host));
+			continue;
+		}
+
+		mmc_card_set_highspeed(card);
+	}
+
+	kfree(status);
+
+	mmc_deselect_cards(host);
+}
+
 static unsigned int mmc_calculate_clock(struct mmc_host *host)
 {
 	struct mmc_card *card;
 	unsigned int max_dtr = host->f_max;
 
 	list_for_each_entry(card, &host->cards, node)
-		if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
-			max_dtr = card->csd.max_dtr;
+		if (!mmc_card_dead(card)) {
+			if (mmc_card_highspeed(card) && mmc_card_sd(card)) {
+				if (max_dtr > card->sw_caps.hs_max_dtr)
+					max_dtr = card->sw_caps.hs_max_dtr;
+			} else if (mmc_card_highspeed(card) && !mmc_card_sd(card)) {
+				if (max_dtr > card->ext_csd.hs_max_dtr)
+					max_dtr = card->ext_csd.hs_max_dtr;
+			} else if (max_dtr > card->csd.max_dtr) {
+				max_dtr = card->csd.max_dtr;
+			}
+		}
 
 	pr_debug("%s: selected %d.%03dMHz transfer rate\n",
 		 mmc_hostname(host),
@@ -1150,8 +1401,11 @@
 
 	mmc_read_csds(host);
 
-	if (host->mode == MMC_MODE_SD)
+	if (host->mode == MMC_MODE_SD) {
 		mmc_read_scrs(host);
+		mmc_read_switch_caps(host);
+	} else
+		mmc_process_ext_csds(host);
 }
 
 
@@ -1165,18 +1419,16 @@
  */
 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
 {
-	if (delay)
-		mmc_schedule_delayed_work(&host->detect, delay);
-	else
-		mmc_schedule_work(&host->detect);
+	mmc_schedule_delayed_work(&host->detect, delay);
 }
 
 EXPORT_SYMBOL(mmc_detect_change);
 
 
-static void mmc_rescan(void *data)
+static void mmc_rescan(struct work_struct *work)
 {
-	struct mmc_host *host = data;
+	struct mmc_host *host =
+		container_of(work, struct mmc_host, detect.work);
 	struct list_head *l, *n;
 	unsigned char power_mode;
 
@@ -1259,7 +1511,7 @@
 		spin_lock_init(&host->lock);
 		init_waitqueue_head(&host->wq);
 		INIT_LIST_HEAD(&host->cards);
-		INIT_WORK(&host->detect, mmc_rescan, host);
+		INIT_DELAYED_WORK(&host->detect, mmc_rescan);
 
 		/*
 		 * By default, hosts do not support SGIO or large requests.
@@ -1357,7 +1609,7 @@
  */
 int mmc_resume_host(struct mmc_host *host)
 {
-	mmc_rescan(host);
+	mmc_rescan(&host->detect.work);
 
 	return 0;
 }
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index cd5e0ab..149affe 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -20,6 +20,6 @@
 void mmc_free_host_sysfs(struct mmc_host *host);
 
 int mmc_schedule_work(struct work_struct *work);
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay);
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
 void mmc_flush_scheduled_work(void);
 #endif
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index f9027c8..8771357 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -83,7 +83,6 @@
 	md->usage--;
 	if (md->usage == 0) {
 		put_disk(md->disk);
-		mmc_cleanup_queue(&md->queue);
 		kfree(md);
 	}
 	mutex_unlock(&open_lock);
@@ -225,10 +224,10 @@
 	struct mmc_blk_data *md = mq->data;
 	struct mmc_card *card = md->queue.card;
 	struct mmc_blk_request brq;
-	int ret;
+	int ret = 1;
 
 	if (mmc_card_claim_host(card))
-		goto cmd_err;
+		goto flush_queue;
 
 	do {
 		struct mmc_command cmd;
@@ -345,8 +344,6 @@
 	return 1;
 
  cmd_err:
-	ret = 1;
-
  	/*
  	 * If this is an SD card and we're writing, we can first
  	 * mark the known good sectors as ok.
@@ -380,6 +377,7 @@
 
 	mmc_card_release_host(card);
 
+flush_queue:
 	spin_lock_irq(&md->lock);
 	while (ret) {
 		ret = end_that_request_chunk(req, 0,
@@ -553,12 +551,11 @@
 	if (md) {
 		int devidx;
 
+		/* Stop new requests from getting into the queue */
 		del_gendisk(md->disk);
 
-		/*
-		 * I think this is needed.
-		 */
-		md->disk->queue = NULL;
+		/* Then flush out any already in there */
+		mmc_cleanup_queue(&md->queue);
 
 		devidx = md->disk->first_minor >> MMC_SHIFT;
 		__clear_bit(devidx, dev_use);
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index 4ccdd82..a17423a 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -10,13 +10,13 @@
  */
 #include <linux/module.h>
 #include <linux/blkdev.h>
+#include <linux/kthread.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include "mmc_queue.h"
 
-#define MMC_QUEUE_EXIT		(1 << 0)
-#define MMC_QUEUE_SUSPENDED	(1 << 1)
+#define MMC_QUEUE_SUSPENDED	(1 << 0)
 
 /*
  * Prepare a MMC request.  Essentially, this means passing the
@@ -59,7 +59,6 @@
 {
 	struct mmc_queue *mq = d;
 	struct request_queue *q = mq->queue;
-	DECLARE_WAITQUEUE(wait, current);
 
 	/*
 	 * Set iothread to ensure that we aren't put to sleep by
@@ -67,12 +66,7 @@
 	 */
 	current->flags |= PF_MEMALLOC|PF_NOFREEZE;
 
-	daemonize("mmcqd");
-
-	complete(&mq->thread_complete);
-
 	down(&mq->thread_sem);
-	add_wait_queue(&mq->thread_wq, &wait);
 	do {
 		struct request *req = NULL;
 
@@ -84,7 +78,7 @@
 		spin_unlock_irq(q->queue_lock);
 
 		if (!req) {
-			if (mq->flags & MMC_QUEUE_EXIT)
+			if (kthread_should_stop())
 				break;
 			up(&mq->thread_sem);
 			schedule();
@@ -95,10 +89,8 @@
 
 		mq->issue_fn(mq, req);
 	} while (1);
-	remove_wait_queue(&mq->thread_wq, &wait);
 	up(&mq->thread_sem);
 
-	complete_and_exit(&mq->thread_complete, 0);
 	return 0;
 }
 
@@ -111,9 +103,22 @@
 static void mmc_request(request_queue_t *q)
 {
 	struct mmc_queue *mq = q->queuedata;
+	struct request *req;
+	int ret;
+
+	if (!mq) {
+		printk(KERN_ERR "MMC: killing requests for dead queue\n");
+		while ((req = elv_next_request(q)) != NULL) {
+			do {
+				ret = end_that_request_chunk(req, 0,
+					req->current_nr_sectors << 9);
+			} while (ret);
+		}
+		return;
+	}
 
 	if (!mq->req)
-		wake_up(&mq->thread_wq);
+		wake_up_process(mq->thread);
 }
 
 /**
@@ -130,8 +135,8 @@
 	u64 limit = BLK_BOUNCE_HIGH;
 	int ret;
 
-	if (host->dev->dma_mask && *host->dev->dma_mask)
-		limit = *host->dev->dma_mask;
+	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+		limit = *mmc_dev(host)->dma_mask;
 
 	mq->card = card;
 	mq->queue = blk_init_queue(mmc_request, lock);
@@ -152,36 +157,40 @@
 			 GFP_KERNEL);
 	if (!mq->sg) {
 		ret = -ENOMEM;
-		goto cleanup;
+		goto cleanup_queue;
 	}
 
-	init_completion(&mq->thread_complete);
-	init_waitqueue_head(&mq->thread_wq);
 	init_MUTEX(&mq->thread_sem);
 
-	ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL);
-	if (ret >= 0) {
-		wait_for_completion(&mq->thread_complete);
-		init_completion(&mq->thread_complete);
-		ret = 0;
-		goto out;
+	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
+	if (IS_ERR(mq->thread)) {
+		ret = PTR_ERR(mq->thread);
+		goto free_sg;
 	}
 
- cleanup:
+	return 0;
+
+ free_sg:
 	kfree(mq->sg);
 	mq->sg = NULL;
-
+ cleanup_queue:
 	blk_cleanup_queue(mq->queue);
- out:
 	return ret;
 }
 EXPORT_SYMBOL(mmc_init_queue);
 
 void mmc_cleanup_queue(struct mmc_queue *mq)
 {
-	mq->flags |= MMC_QUEUE_EXIT;
-	wake_up(&mq->thread_wq);
-	wait_for_completion(&mq->thread_complete);
+	request_queue_t *q = mq->queue;
+	unsigned long flags;
+
+	/* Mark that we should start throwing out stragglers */
+	spin_lock_irqsave(q->queue_lock, flags);
+	q->queuedata = NULL;
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	/* Then terminate our worker thread */
+	kthread_stop(mq->thread);
 
 	kfree(mq->sg);
 	mq->sg = NULL;
diff --git a/drivers/mmc/mmc_queue.h b/drivers/mmc/mmc_queue.h
index 7182d2f..c9f139e 100644
--- a/drivers/mmc/mmc_queue.h
+++ b/drivers/mmc/mmc_queue.h
@@ -6,8 +6,7 @@
 
 struct mmc_queue {
 	struct mmc_card		*card;
-	struct completion	thread_complete;
-	wait_queue_head_t	thread_wq;
+	struct task_struct	*thread;
 	struct semaphore	thread_sem;
 	unsigned int		flags;
 	struct request		*req;
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index 10cc973..e334acd 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -199,7 +199,7 @@
 	memset(card, 0, sizeof(struct mmc_card));
 	card->host = host;
 	device_initialize(&card->dev);
-	card->dev.parent = card->host->dev;
+	card->dev.parent = mmc_dev(host);
 	card->dev.bus = &mmc_bus_type;
 	card->dev.release = mmc_release_card;
 }
@@ -242,7 +242,7 @@
 }
 
 
-static void mmc_host_classdev_release(struct class_device *dev)
+static void mmc_host_classdev_release(struct device *dev)
 {
 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
 	kfree(host);
@@ -250,7 +250,7 @@
 
 static struct class mmc_host_class = {
 	.name		= "mmc_host",
-	.release	= mmc_host_classdev_release,
+	.dev_release	= mmc_host_classdev_release,
 };
 
 static DEFINE_IDR(mmc_host_idr);
@@ -267,10 +267,10 @@
 	if (host) {
 		memset(host, 0, sizeof(struct mmc_host) + extra);
 
-		host->dev = dev;
-		host->class_dev.dev = host->dev;
+		host->parent = dev;
+		host->class_dev.parent = dev;
 		host->class_dev.class = &mmc_host_class;
-		class_device_initialize(&host->class_dev);
+		device_initialize(&host->class_dev);
 	}
 
 	return host;
@@ -292,10 +292,10 @@
 	if (err)
 		return err;
 
-	snprintf(host->class_dev.class_id, BUS_ID_SIZE,
+	snprintf(host->class_dev.bus_id, BUS_ID_SIZE,
 		 "mmc%d", host->index);
 
-	return class_device_add(&host->class_dev);
+	return device_add(&host->class_dev);
 }
 
 /*
@@ -303,7 +303,7 @@
  */
 void mmc_remove_host_sysfs(struct mmc_host *host)
 {
-	class_device_del(&host->class_dev);
+	device_del(&host->class_dev);
 
 	spin_lock(&mmc_host_lock);
 	idr_remove(&mmc_host_idr, host->index);
@@ -315,23 +315,15 @@
  */
 void mmc_free_host_sysfs(struct mmc_host *host)
 {
-	class_device_put(&host->class_dev);
+	put_device(&host->class_dev);
 }
 
 static struct workqueue_struct *workqueue;
 
 /*
- * Internal function. Schedule work in the MMC work queue.
- */
-int mmc_schedule_work(struct work_struct *work)
-{
-	return queue_work(workqueue, work);
-}
-
-/*
  * Internal function. Schedule delayed work in the MMC work queue.
  */
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
 {
 	return queue_delayed_work(workqueue, work, delay);
 }
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 828503c..e9b80e9 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -443,7 +443,7 @@
 	}
 }
 
-static struct mmc_host_ops mmci_ops = {
+static const struct mmc_host_ops mmci_ops = {
 	.request	= mmci_request,
 	.set_ios	= mmci_set_ios,
 };
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index 762fa28..435d331 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -38,7 +38,57 @@
 #include <asm/arch/fpga.h>
 #include <asm/arch/tps65010.h>
 
-#include "omap.h"
+#define	OMAP_MMC_REG_CMD	0x00
+#define	OMAP_MMC_REG_ARGL	0x04
+#define	OMAP_MMC_REG_ARGH	0x08
+#define	OMAP_MMC_REG_CON	0x0c
+#define	OMAP_MMC_REG_STAT	0x10
+#define	OMAP_MMC_REG_IE		0x14
+#define	OMAP_MMC_REG_CTO	0x18
+#define	OMAP_MMC_REG_DTO	0x1c
+#define	OMAP_MMC_REG_DATA	0x20
+#define	OMAP_MMC_REG_BLEN	0x24
+#define	OMAP_MMC_REG_NBLK	0x28
+#define	OMAP_MMC_REG_BUF	0x2c
+#define OMAP_MMC_REG_SDIO	0x34
+#define	OMAP_MMC_REG_REV	0x3c
+#define	OMAP_MMC_REG_RSP0	0x40
+#define	OMAP_MMC_REG_RSP1	0x44
+#define	OMAP_MMC_REG_RSP2	0x48
+#define	OMAP_MMC_REG_RSP3	0x4c
+#define	OMAP_MMC_REG_RSP4	0x50
+#define	OMAP_MMC_REG_RSP5	0x54
+#define	OMAP_MMC_REG_RSP6	0x58
+#define	OMAP_MMC_REG_RSP7	0x5c
+#define	OMAP_MMC_REG_IOSR	0x60
+#define	OMAP_MMC_REG_SYSC	0x64
+#define	OMAP_MMC_REG_SYSS	0x68
+
+#define	OMAP_MMC_STAT_CARD_ERR		(1 << 14)
+#define	OMAP_MMC_STAT_CARD_IRQ		(1 << 13)
+#define	OMAP_MMC_STAT_OCR_BUSY		(1 << 12)
+#define	OMAP_MMC_STAT_A_EMPTY		(1 << 11)
+#define	OMAP_MMC_STAT_A_FULL		(1 << 10)
+#define	OMAP_MMC_STAT_CMD_CRC		(1 <<  8)
+#define	OMAP_MMC_STAT_CMD_TOUT		(1 <<  7)
+#define	OMAP_MMC_STAT_DATA_CRC		(1 <<  6)
+#define	OMAP_MMC_STAT_DATA_TOUT		(1 <<  5)
+#define	OMAP_MMC_STAT_END_BUSY		(1 <<  4)
+#define	OMAP_MMC_STAT_END_OF_DATA	(1 <<  3)
+#define	OMAP_MMC_STAT_CARD_BUSY		(1 <<  2)
+#define	OMAP_MMC_STAT_END_OF_CMD	(1 <<  0)
+
+#define OMAP_MMC_READ(host, reg)	__raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
+#define OMAP_MMC_WRITE(host, reg, val)	__raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
+
+/*
+ * Command types
+ */
+#define OMAP_MMC_CMDTYPE_BC	0
+#define OMAP_MMC_CMDTYPE_BCR	1
+#define OMAP_MMC_CMDTYPE_AC	2
+#define OMAP_MMC_CMDTYPE_ADTC	3
+
 
 #define DRIVER_NAME "mmci-omap"
 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
@@ -60,8 +110,9 @@
 	unsigned char		id; /* 16xx chips have 2 MMC blocks */
 	struct clk *		iclk;
 	struct clk *		fclk;
-	struct resource		*res;
-	void __iomem		*base;
+	struct resource		*mem_res;
+	void __iomem		*virt_base;
+	unsigned int		phys_base;
 	int			irq;
 	unsigned char		bus_mode;
 	unsigned char		hw_bus_mode;
@@ -191,16 +242,16 @@
 
 	clk_enable(host->fclk);
 
-	OMAP_MMC_WRITE(host->base, CTO, 200);
-	OMAP_MMC_WRITE(host->base, ARGL, cmd->arg & 0xffff);
-	OMAP_MMC_WRITE(host->base, ARGH, cmd->arg >> 16);
-	OMAP_MMC_WRITE(host->base, IE,
+	OMAP_MMC_WRITE(host, CTO, 200);
+	OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
+	OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
+	OMAP_MMC_WRITE(host, IE,
 		       OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    |
 		       OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  |
 		       OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT |
 		       OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  |
 		       OMAP_MMC_STAT_END_OF_DATA);
-	OMAP_MMC_WRITE(host->base, CMD, cmdreg);
+	OMAP_MMC_WRITE(host, CMD, cmdreg);
 }
 
 static void
@@ -296,22 +347,22 @@
 		if (cmd->flags & MMC_RSP_136) {
 			/* response type 2 */
 			cmd->resp[3] =
-				OMAP_MMC_READ(host->base, RSP0) |
-				(OMAP_MMC_READ(host->base, RSP1) << 16);
+				OMAP_MMC_READ(host, RSP0) |
+				(OMAP_MMC_READ(host, RSP1) << 16);
 			cmd->resp[2] =
-				OMAP_MMC_READ(host->base, RSP2) |
-				(OMAP_MMC_READ(host->base, RSP3) << 16);
+				OMAP_MMC_READ(host, RSP2) |
+				(OMAP_MMC_READ(host, RSP3) << 16);
 			cmd->resp[1] =
-				OMAP_MMC_READ(host->base, RSP4) |
-				(OMAP_MMC_READ(host->base, RSP5) << 16);
+				OMAP_MMC_READ(host, RSP4) |
+				(OMAP_MMC_READ(host, RSP5) << 16);
 			cmd->resp[0] =
-				OMAP_MMC_READ(host->base, RSP6) |
-				(OMAP_MMC_READ(host->base, RSP7) << 16);
+				OMAP_MMC_READ(host, RSP6) |
+				(OMAP_MMC_READ(host, RSP7) << 16);
 		} else {
 			/* response types 1, 1b, 3, 4, 5, 6 */
 			cmd->resp[0] =
-				OMAP_MMC_READ(host->base, RSP6) |
-				(OMAP_MMC_READ(host->base, RSP7) << 16);
+				OMAP_MMC_READ(host, RSP6) |
+				(OMAP_MMC_READ(host, RSP7) << 16);
 		}
 	}
 
@@ -354,9 +405,9 @@
 	host->data->bytes_xfered += n;
 
 	if (write) {
-		__raw_writesw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
+		__raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
 	} else {
-		__raw_readsw(host->base + OMAP_MMC_REG_DATA, host->buffer, n);
+		__raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
 	}
 }
 
@@ -386,11 +437,11 @@
 	int transfer_error;
 
 	if (host->cmd == NULL && host->data == NULL) {
-		status = OMAP_MMC_READ(host->base, STAT);
+		status = OMAP_MMC_READ(host, STAT);
 		dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status);
 		if (status != 0) {
-			OMAP_MMC_WRITE(host->base, STAT, status);
-			OMAP_MMC_WRITE(host->base, IE, 0);
+			OMAP_MMC_WRITE(host, STAT, status);
+			OMAP_MMC_WRITE(host, IE, 0);
 		}
 		return IRQ_HANDLED;
 	}
@@ -399,8 +450,8 @@
 	end_transfer = 0;
 	transfer_error = 0;
 
-	while ((status = OMAP_MMC_READ(host->base, STAT)) != 0) {
-		OMAP_MMC_WRITE(host->base, STAT, status);
+	while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
+		OMAP_MMC_WRITE(host, STAT, status);
 #ifdef CONFIG_MMC_DEBUG
 		dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
 			status, host->cmd != NULL ? host->cmd->opcode : -1);
@@ -470,8 +521,8 @@
 
 		if (status & OMAP_MMC_STAT_CARD_ERR) {
 			if (host->cmd && host->cmd->opcode == MMC_STOP_TRANSMISSION) {
-				u32 response = OMAP_MMC_READ(host->base, RSP6)
-					| (OMAP_MMC_READ(host->base, RSP7) << 16);
+				u32 response = OMAP_MMC_READ(host, RSP6)
+					| (OMAP_MMC_READ(host, RSP7) << 16);
 				/* STOP sometimes sets must-ignore bits */
 				if (!(response & (R1_CC_ERROR
 								| R1_ILLEGAL_COMMAND
@@ -530,12 +581,6 @@
 	schedule_work(&host->switch_work);
 }
 
-/* FIXME: Handle card insertion and removal properly. Maybe use a mask
- * for MMC state? */
-static void mmc_omap_switch_callback(unsigned long data, u8 mmc_mask)
-{
-}
-
 static void mmc_omap_switch_handler(void *data)
 {
 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
@@ -581,7 +626,7 @@
 	int dst_port = 0;
 	int sync_dev = 0;
 
-	data_addr = io_v2p((u32) host->base) + OMAP_MMC_REG_DATA;
+	data_addr = host->phys_base + OMAP_MMC_REG_DATA;
 	frame = data->blksz;
 	count = sg_dma_len(sg);
 
@@ -640,10 +685,9 @@
 	}
 
 	/* Max limit for DMA frame count is 0xffff */
-	if (unlikely(count > 0xffff))
-		BUG();
+	BUG_ON(count > 0xffff);
 
-	OMAP_MMC_WRITE(host->base, BUF, buf);
+	OMAP_MMC_WRITE(host, BUF, buf);
 	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
 				     frame, count, OMAP_DMA_SYNC_FRAME,
 				     sync_dev, 0);
@@ -728,11 +772,11 @@
 {
 	u16 reg;
 
-	reg = OMAP_MMC_READ(host->base, SDIO);
+	reg = OMAP_MMC_READ(host, SDIO);
 	reg &= ~(1 << 5);
-	OMAP_MMC_WRITE(host->base, SDIO, reg);
+	OMAP_MMC_WRITE(host, SDIO, reg);
 	/* Set maximum timeout */
-	OMAP_MMC_WRITE(host->base, CTO, 0xff);
+	OMAP_MMC_WRITE(host, CTO, 0xff);
 }
 
 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
@@ -746,14 +790,14 @@
 	timeout = req->data->timeout_clks + req->data->timeout_ns / 500;
 
 	/* Check if we need to use timeout multiplier register */
-	reg = OMAP_MMC_READ(host->base, SDIO);
+	reg = OMAP_MMC_READ(host, SDIO);
 	if (timeout > 0xffff) {
 		reg |= (1 << 5);
 		timeout /= 1024;
 	} else
 		reg &= ~(1 << 5);
-	OMAP_MMC_WRITE(host->base, SDIO, reg);
-	OMAP_MMC_WRITE(host->base, DTO, timeout);
+	OMAP_MMC_WRITE(host, SDIO, reg);
+	OMAP_MMC_WRITE(host, DTO, timeout);
 }
 
 static void
@@ -765,19 +809,18 @@
 
 	host->data = data;
 	if (data == NULL) {
-		OMAP_MMC_WRITE(host->base, BLEN, 0);
-		OMAP_MMC_WRITE(host->base, NBLK, 0);
-		OMAP_MMC_WRITE(host->base, BUF, 0);
+		OMAP_MMC_WRITE(host, BLEN, 0);
+		OMAP_MMC_WRITE(host, NBLK, 0);
+		OMAP_MMC_WRITE(host, BUF, 0);
 		host->dma_in_use = 0;
 		set_cmd_timeout(host, req);
 		return;
 	}
 
-
 	block_size = data->blksz;
 
-	OMAP_MMC_WRITE(host->base, NBLK, data->blocks - 1);
-	OMAP_MMC_WRITE(host->base, BLEN, block_size - 1);
+	OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
+	OMAP_MMC_WRITE(host, BLEN, block_size - 1);
 	set_data_timeout(host, req);
 
 	/* cope with calling layer confusion; it issues "single
@@ -819,7 +862,7 @@
 
 	/* Revert to PIO? */
 	if (!use_dma) {
-		OMAP_MMC_WRITE(host->base, BUF, 0x1f1f);
+		OMAP_MMC_WRITE(host, BUF, 0x1f1f);
 		host->total_bytes_left = data->blocks * block_size;
 		host->sg_len = sg_len;
 		mmc_omap_sg_to_buf(host);
@@ -845,7 +888,6 @@
 static void innovator_fpga_socket_power(int on)
 {
 #if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
-
 	if (on) {
 		fpga_write(fpga_read(OMAP1510_FPGA_POWER) | (1 << 3),
 		     OMAP1510_FPGA_POWER);
@@ -871,8 +913,8 @@
 			/* GPIO 4 of TPS65010 sends SD_EN signal */
 			tps65010_set_gpio_out_value(GPIO4, HIGH);
 		else if (cpu_is_omap24xx()) {
-			u16 reg = OMAP_MMC_READ(host->base, CON);
-			OMAP_MMC_WRITE(host->base, CON, reg | (1 << 11));
+			u16 reg = OMAP_MMC_READ(host, CON);
+			OMAP_MMC_WRITE(host, CON, reg | (1 << 11));
 		} else
 			if (host->power_pin >= 0)
 				omap_set_gpio_dataout(host->power_pin, 1);
@@ -884,8 +926,8 @@
 		else if (machine_is_omap_h3())
 			tps65010_set_gpio_out_value(GPIO4, LOW);
 		else if (cpu_is_omap24xx()) {
-			u16 reg = OMAP_MMC_READ(host->base, CON);
-			OMAP_MMC_WRITE(host->base, CON, reg & ~(1 << 11));
+			u16 reg = OMAP_MMC_READ(host, CON);
+			OMAP_MMC_WRITE(host, CON, reg & ~(1 << 11));
 		} else
 			if (host->power_pin >= 0)
 				omap_set_gpio_dataout(host->power_pin, 0);
@@ -927,7 +969,7 @@
 	case MMC_POWER_UP:
 	case MMC_POWER_ON:
 		mmc_omap_power(host, 1);
-		dsor |= 1<<11;
+		dsor |= 1 << 11;
 		break;
 	}
 
@@ -941,14 +983,14 @@
 	 * which results in the while loop below getting stuck.
 	 * Writing to the CON register twice seems to do the trick. */
 	for (i = 0; i < 2; i++)
-		OMAP_MMC_WRITE(host->base, CON, dsor);
+		OMAP_MMC_WRITE(host, CON, dsor);
 	if (ios->power_mode == MMC_POWER_UP) {
 		/* Send clock cycles, poll completion */
-		OMAP_MMC_WRITE(host->base, IE, 0);
-		OMAP_MMC_WRITE(host->base, STAT, 0xffff);
-		OMAP_MMC_WRITE(host->base, CMD, 1<<7);
-		while (0 == (OMAP_MMC_READ(host->base, STAT) & 1));
-		OMAP_MMC_WRITE(host->base, STAT, 1);
+		OMAP_MMC_WRITE(host, IE, 0);
+		OMAP_MMC_WRITE(host, STAT, 0xffff);
+		OMAP_MMC_WRITE(host, CMD, 1 << 7);
+		while ((OMAP_MMC_READ(host, STAT) & 1) == 0);
+		OMAP_MMC_WRITE(host, STAT, 1);
 	}
 	clk_disable(host->fclk);
 }
@@ -960,7 +1002,7 @@
 	return host->wp_pin && omap_get_gpio_datain(host->wp_pin);
 }
 
-static struct mmc_host_ops mmc_omap_ops = {
+static const struct mmc_host_ops mmc_omap_ops = {
 	.request	= mmc_omap_request,
 	.set_ios	= mmc_omap_set_ios,
 	.get_ro		= mmc_omap_get_ro,
@@ -971,25 +1013,29 @@
 	struct omap_mmc_conf *minfo = pdev->dev.platform_data;
 	struct mmc_host *mmc;
 	struct mmc_omap_host *host = NULL;
-	struct resource *r;
+	struct resource *res;
 	int ret = 0;
 	int irq;
-	
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (minfo == NULL) {
+		dev_err(&pdev->dev, "platform data missing\n");
+		return -ENXIO;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_irq(pdev, 0);
-	if (!r || irq < 0)
+	if (res == NULL || irq < 0)
 		return -ENXIO;
 
-	r = request_mem_region(pdev->resource[0].start,
-				pdev->resource[0].end - pdev->resource[0].start + 1,
-			       pdev->name);
-	if (!r)
+	res = request_mem_region(res->start, res->end - res->start + 1,
+			         pdev->name);
+	if (res == NULL)
 		return -EBUSY;
 
 	mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
-	if (!mmc) {
+	if (mmc == NULL) {
 		ret = -ENOMEM;
-		goto out;
+		goto err_free_mem_region;
 	}
 
 	host = mmc_priv(mmc);
@@ -1001,13 +1047,13 @@
 	host->dma_timer.data = (unsigned long) host;
 
 	host->id = pdev->id;
-	host->res = r;
+	host->mem_res = res;
 	host->irq = irq;
 
 	if (cpu_is_omap24xx()) {
 		host->iclk = clk_get(&pdev->dev, "mmc_ick");
 		if (IS_ERR(host->iclk))
-			goto out;
+			goto err_free_mmc_host;
 		clk_enable(host->iclk);
 	}
 
@@ -1018,7 +1064,7 @@
 
 	if (IS_ERR(host->fclk)) {
 		ret = PTR_ERR(host->fclk);
-		goto out;
+		goto err_free_iclk;
 	}
 
 	/* REVISIT:
@@ -1031,14 +1077,15 @@
 	host->use_dma = 1;
 	host->dma_ch = -1;
 
-	host->irq = pdev->resource[1].start;
-	host->base = (void __iomem*)IO_ADDRESS(r->start);
+	host->irq = irq;
+	host->phys_base = host->mem_res->start;
+	host->virt_base = (void __iomem *) IO_ADDRESS(host->phys_base);
 
 	mmc->ops = &mmc_omap_ops;
 	mmc->f_min = 400000;
 	mmc->f_max = 24000000;
-	mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
-	mmc->caps = MMC_CAP_BYTEBLOCK;
+	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+	mmc->caps = MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
 
 	if (minfo->wire4)
 		 mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1056,20 +1103,18 @@
 		if ((ret = omap_request_gpio(host->power_pin)) != 0) {
 			dev_err(mmc_dev(host->mmc),
 				"Unable to get GPIO pin for MMC power\n");
-			goto out;
+			goto err_free_fclk;
 		}
 		omap_set_gpio_direction(host->power_pin, 0);
 	}
 
 	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
 	if (ret)
-		goto out;
+		goto err_free_power_gpio;
 
 	host->dev = &pdev->dev;
 	platform_set_drvdata(pdev, host);
 
-	mmc_add_host(mmc);
-
 	if (host->switch_pin >= 0) {
 		INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host);
 		init_timer(&host->switch_timer);
@@ -1107,10 +1152,11 @@
 			schedule_work(&host->switch_work);
 	}
 
-no_switch:
+	mmc_add_host(mmc);
+
 	return 0;
 
-out:
+no_switch:
 	/* FIXME: Free other resources too. */
 	if (host) {
 		if (host->iclk && !IS_ERR(host->iclk))
@@ -1119,6 +1165,20 @@
 			clk_put(host->fclk);
 		mmc_free_host(host->mmc);
 	}
+err_free_power_gpio:
+	if (host->power_pin >= 0)
+		omap_free_gpio(host->power_pin);
+err_free_fclk:
+	clk_put(host->fclk);
+err_free_iclk:
+	if (host->iclk != NULL) {
+		clk_disable(host->iclk);
+		clk_put(host->iclk);
+	}
+err_free_mmc_host:
+	mmc_free_host(host->mmc);
+err_free_mem_region:
+	release_mem_region(res->start, res->end - res->start + 1);
 	return ret;
 }
 
@@ -1128,30 +1188,31 @@
 
 	platform_set_drvdata(pdev, NULL);
 
-	if (host) {
-		mmc_remove_host(host->mmc);
-		free_irq(host->irq, host);
+	BUG_ON(host == NULL);
 
-		if (host->power_pin >= 0)
-			omap_free_gpio(host->power_pin);
-		if (host->switch_pin >= 0) {
-			device_remove_file(&pdev->dev, &dev_attr_enable_poll);
-			device_remove_file(&pdev->dev, &dev_attr_cover_switch);
-			free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
-			omap_free_gpio(host->switch_pin);
-			host->switch_pin = -1;
-			del_timer_sync(&host->switch_timer);
-			flush_scheduled_work();
-		}
-		if (host->iclk && !IS_ERR(host->iclk))
-			clk_put(host->iclk);
-		if (host->fclk && !IS_ERR(host->fclk))
-			clk_put(host->fclk);
-		mmc_free_host(host->mmc);
+	mmc_remove_host(host->mmc);
+	free_irq(host->irq, host);
+
+	if (host->power_pin >= 0)
+		omap_free_gpio(host->power_pin);
+	if (host->switch_pin >= 0) {
+		device_remove_file(&pdev->dev, &dev_attr_enable_poll);
+		device_remove_file(&pdev->dev, &dev_attr_cover_switch);
+		free_irq(OMAP_GPIO_IRQ(host->switch_pin), host);
+		omap_free_gpio(host->switch_pin);
+		host->switch_pin = -1;
+		del_timer_sync(&host->switch_timer);
+		flush_scheduled_work();
 	}
+	if (host->iclk && !IS_ERR(host->iclk))
+		clk_put(host->iclk);
+	if (host->fclk && !IS_ERR(host->fclk))
+		clk_put(host->fclk);
 
 	release_mem_region(pdev->resource[0].start,
-			pdev->resource[0].end - pdev->resource[0].start + 1);
+			   pdev->resource[0].end - pdev->resource[0].start + 1);
+
+	mmc_free_host(host->mmc);
 
 	return 0;
 }
diff --git a/drivers/mmc/omap.h b/drivers/mmc/omap.h
deleted file mode 100644
index c954d35..0000000
--- a/drivers/mmc/omap.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef	DRIVERS_MEDIA_MMC_OMAP_H
-#define	DRIVERS_MEDIA_MMC_OMAP_H
-
-#define	OMAP_MMC_REG_CMD	0x00
-#define	OMAP_MMC_REG_ARGL	0x04
-#define	OMAP_MMC_REG_ARGH	0x08
-#define	OMAP_MMC_REG_CON	0x0c
-#define	OMAP_MMC_REG_STAT	0x10
-#define	OMAP_MMC_REG_IE		0x14
-#define	OMAP_MMC_REG_CTO	0x18
-#define	OMAP_MMC_REG_DTO	0x1c
-#define	OMAP_MMC_REG_DATA	0x20
-#define	OMAP_MMC_REG_BLEN	0x24
-#define	OMAP_MMC_REG_NBLK	0x28
-#define	OMAP_MMC_REG_BUF	0x2c
-#define OMAP_MMC_REG_SDIO	0x34
-#define	OMAP_MMC_REG_REV	0x3c
-#define	OMAP_MMC_REG_RSP0	0x40
-#define	OMAP_MMC_REG_RSP1	0x44
-#define	OMAP_MMC_REG_RSP2	0x48
-#define	OMAP_MMC_REG_RSP3	0x4c
-#define	OMAP_MMC_REG_RSP4	0x50
-#define	OMAP_MMC_REG_RSP5	0x54
-#define	OMAP_MMC_REG_RSP6	0x58
-#define	OMAP_MMC_REG_RSP7	0x5c
-#define	OMAP_MMC_REG_IOSR	0x60
-#define	OMAP_MMC_REG_SYSC	0x64
-#define	OMAP_MMC_REG_SYSS	0x68
-
-#define	OMAP_MMC_STAT_CARD_ERR		(1 << 14)
-#define	OMAP_MMC_STAT_CARD_IRQ		(1 << 13)
-#define	OMAP_MMC_STAT_OCR_BUSY		(1 << 12)
-#define	OMAP_MMC_STAT_A_EMPTY		(1 << 11)
-#define	OMAP_MMC_STAT_A_FULL		(1 << 10)
-#define	OMAP_MMC_STAT_CMD_CRC		(1 <<  8)
-#define	OMAP_MMC_STAT_CMD_TOUT		(1 <<  7)
-#define	OMAP_MMC_STAT_DATA_CRC		(1 <<  6)
-#define	OMAP_MMC_STAT_DATA_TOUT		(1 <<  5)
-#define	OMAP_MMC_STAT_END_BUSY		(1 <<  4)
-#define	OMAP_MMC_STAT_END_OF_DATA	(1 <<  3)
-#define	OMAP_MMC_STAT_CARD_BUSY		(1 <<  2)
-#define	OMAP_MMC_STAT_END_OF_CMD	(1 <<  0)
-
-#define OMAP_MMC_READ(base, reg)	__raw_readw((base) + OMAP_MMC_REG_##reg)
-#define OMAP_MMC_WRITE(base, reg, val)	__raw_writew((val), (base) + OMAP_MMC_REG_##reg)
-
-/*
- * Command types
- */
-#define OMAP_MMC_CMDTYPE_BC	0
-#define OMAP_MMC_CMDTYPE_BCR	1
-#define OMAP_MMC_CMDTYPE_AC	2
-#define OMAP_MMC_CMDTYPE_ADTC	3
-
-#endif
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index a526698..471e9f4 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -393,7 +393,7 @@
 		 host->clkrt, host->cmdat);
 }
 
-static struct mmc_host_ops pxamci_ops = {
+static const struct mmc_host_ops pxamci_ops = {
 	.request	= pxamci_request,
 	.get_ro		= pxamci_get_ro,
 	.set_ios	= pxamci_set_ios,
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 9a7d39b..cd98117 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -616,6 +616,7 @@
 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 {
 	int div;
+	u8 ctrl;
 	u16 clk;
 	unsigned long timeout;
 
@@ -624,6 +625,13 @@
 
 	writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
 
+	ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+	if (clock > 25000000)
+		ctrl |= SDHCI_CTRL_HISPD;
+	else
+		ctrl &= ~SDHCI_CTRL_HISPD;
+	writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+
 	if (clock == 0)
 		goto out;
 
@@ -784,7 +792,7 @@
 	return !(present & SDHCI_WRITE_PROTECT);
 }
 
-static struct mmc_host_ops sdhci_ops = {
+static const struct mmc_host_ops sdhci_ops = {
 	.request	= sdhci_request,
 	.set_ios	= sdhci_set_ios,
 	.get_ro		= sdhci_get_ro,
@@ -1291,6 +1299,13 @@
 	else if (caps & SDHCI_CAN_VDD_180)
 		mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
 
+	if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
+		printk(KERN_ERR "%s: Controller reports > 25 MHz base clock,"
+			" but no high speed support.\n",
+			host->slot_descr);
+		mmc->f_max = 25000000;
+	}
+
 	if (mmc->ocr_avail == 0) {
 		printk(KERN_ERR "%s: Hardware doesn't report any "
 			"support voltages.\n", host->slot_descr);
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h
index 72a6793..f9d1a0a 100644
--- a/drivers/mmc/sdhci.h
+++ b/drivers/mmc/sdhci.h
@@ -71,6 +71,7 @@
 #define SDHCI_HOST_CONTROL 	0x28
 #define  SDHCI_CTRL_LED		0x01
 #define  SDHCI_CTRL_4BITBUS	0x02
+#define  SDHCI_CTRL_HISPD	0x04
 
 #define SDHCI_POWER_CONTROL	0x29
 #define  SDHCI_POWER_ON		0x01
@@ -138,6 +139,7 @@
 #define  SDHCI_CLOCK_BASE_SHIFT	8
 #define  SDHCI_MAX_BLOCK_MASK	0x00030000
 #define  SDHCI_MAX_BLOCK_SHIFT  16
+#define  SDHCI_CAN_DO_HISPD	0x00200000
 #define  SDHCI_CAN_DO_DMA	0x00400000
 #define  SDHCI_CAN_VDD_330	0x01000000
 #define  SDHCI_CAN_VDD_300	0x02000000
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index 0fdc55b..e846499 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -99,7 +99,7 @@
 
 	struct mmc_request    *req;
 	struct work_struct    cmd_handler;
-	struct work_struct    abort_handler;
+	struct delayed_work   abort_handler;
 	wait_queue_head_t     can_eject;
 
 	size_t                written_blocks;
@@ -496,9 +496,9 @@
 	mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd(void *data)
+static void tifm_sd_end_cmd(struct work_struct *work)
 {
-	struct tifm_sd *host = data;
+	struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
 	struct tifm_dev *sock = host->dev;
 	struct mmc_host *mmc = tifm_get_drvdata(sock);
 	struct mmc_request *mrq;
@@ -608,9 +608,9 @@
 	mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd_nodma(void *data)
+static void tifm_sd_end_cmd_nodma(struct work_struct *work)
 {
-	struct tifm_sd *host = (struct tifm_sd*)data;
+	struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
 	struct tifm_dev *sock = host->dev;
 	struct mmc_host *mmc = tifm_get_drvdata(sock);
 	struct mmc_request *mrq;
@@ -661,11 +661,14 @@
 	mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_abort(void *data)
+static void tifm_sd_abort(struct work_struct *work)
 {
+	struct tifm_sd *host =
+		container_of(work, struct tifm_sd, abort_handler.work);
+
 	printk(KERN_ERR DRIVER_NAME
 		": card failed to respond for a long period of time");
-	tifm_eject(((struct tifm_sd*)data)->dev);
+	tifm_eject(host->dev);
 }
 
 static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -762,9 +765,9 @@
 	.get_ro  = tifm_sd_ro
 };
 
-static void tifm_sd_register_host(void *data)
+static void tifm_sd_register_host(struct work_struct *work)
 {
-	struct tifm_sd *host = (struct tifm_sd*)data;
+	struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
 	struct tifm_dev *sock = host->dev;
 	struct mmc_host *mmc = tifm_get_drvdata(sock);
 	unsigned long flags;
@@ -772,8 +775,7 @@
 	spin_lock_irqsave(&sock->lock, flags);
 	host->flags |= HOST_REG;
 	PREPARE_WORK(&host->cmd_handler,
-			no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
-			data);
+			no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
 	spin_unlock_irqrestore(&sock->lock, flags);
 	dev_dbg(&sock->dev, "adding host\n");
 	mmc_add_host(mmc);
@@ -799,8 +801,8 @@
 	host->dev = sock;
 	host->clk_div = 61;
 	init_waitqueue_head(&host->can_eject);
-	INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host);
-	INIT_WORK(&host->abort_handler, tifm_sd_abort, host);
+	INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
+	INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
 
 	tifm_set_drvdata(sock, mmc);
 	sock->signal_irq = tifm_sd_signal_irq;
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index ced309b..7a28267 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1021,7 +1021,7 @@
 	return csr & WBSD_WRPT;
 }
 
-static struct mmc_host_ops wbsd_ops = {
+static const struct mmc_host_ops wbsd_ops = {
 	.request	= wbsd_request,
 	.set_ios	= wbsd_set_ios,
 	.get_ro		= wbsd_get_ro,
@@ -1488,7 +1488,7 @@
 	/*
 	 * Translate the address to a physical address.
 	 */
-	host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
+	host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
 		WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 
 	/*
@@ -1512,7 +1512,7 @@
 	 */
 	BUG_ON(1);
 
-	dma_unmap_single(host->mmc->dev, host->dma_addr,
+	dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
 		WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 	host->dma_addr = (dma_addr_t)NULL;
 
@@ -1530,7 +1530,7 @@
 static void __devexit wbsd_release_dma(struct wbsd_host *host)
 {
 	if (host->dma_addr) {
-		dma_unmap_single(host->mmc->dev, host->dma_addr,
+		dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
 			WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
 	}
 	kfree(host->dma_buffer);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ef4a731..334e078f 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -451,7 +451,7 @@
 		return -ENODEV;
 	}
 
-	flash = kzalloc(sizeof *flash, SLAB_KERNEL);
+	flash = kzalloc(sizeof *flash, GFP_KERNEL);
 	if (!flash)
 		return -ENOMEM;
 
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 24747bd..d132ed5 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -607,7 +607,7 @@
 	default "4"
 
 config MTD_SHARP_SL
-	bool "ROM maped on Sharp SL Series"
+	bool "ROM mapped on Sharp SL Series"
 	depends on MTD && ARCH_PXA
 	help
 	  This enables access to the flash chip on the Sharp SL Series of PDAs.
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 92b5d88..65e5ee5 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -80,7 +80,7 @@
 		.size =		FLASH_PARTITION2_SIZE
 	},
 	{
-		.name =		"Persistant storage",
+		.name =		"Persistent storage",
 		.offset =	FLASH_PARTITION3_ADDR,
 		.size =		FLASH_PARTITION3_SIZE
 	}
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 11d170a..06e3378 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -922,7 +922,7 @@
  * and then free up the resources we took when the card was found.
  */
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	struct net_device *dev = dev_3c501;
 	unregister_netdev(dev);
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index a34b220..7e34c4f 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -726,7 +726,7 @@
 		iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 458cb9c..702bfb2 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1670,7 +1670,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index aa43563..54e1d5a 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -940,7 +940,7 @@
 	return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	struct net_device *dev = dev_3c507;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 9184946..17d61eb 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -1302,7 +1302,7 @@
 	} else return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 	for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index f4aca53..6c7437e 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1659,7 +1659,7 @@
  *	transmit operations are allowed to start scribbling into memory.
  */
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(this_device);
 	cleanup_card(this_device);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d02ed51..931028f 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -594,7 +594,7 @@
 	u32 rx_config;
 	struct rtl_extra_stats xstats;
 
-	struct work_struct thread;
+	struct delayed_work thread;
 
 	struct mii_if_info mii;
 	unsigned int regs_len;
@@ -636,8 +636,8 @@
 static void rtl8139_set_rx_mode (struct net_device *dev);
 static void __set_rx_mode (struct net_device *dev);
 static void rtl8139_hw_start (struct net_device *dev);
-static void rtl8139_thread (void *_data);
-static void rtl8139_tx_timeout_task(void *_data);
+static void rtl8139_thread (struct work_struct *work);
+static void rtl8139_tx_timeout_task(struct work_struct *work);
 static const struct ethtool_ops rtl8139_ethtool_ops;
 
 /* write MMIO register, with flush */
@@ -1010,7 +1010,7 @@
 		(debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
 	spin_lock_init (&tp->lock);
 	spin_lock_init (&tp->rx_lock);
-	INIT_WORK(&tp->thread, rtl8139_thread, dev);
+	INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
 	tp->mii.dev = dev;
 	tp->mii.mdio_read = mdio_read;
 	tp->mii.mdio_write = mdio_write;
@@ -1596,15 +1596,16 @@
 		 RTL_R8 (Config1));
 }
 
-static void rtl8139_thread (void *_data)
+static void rtl8139_thread (struct work_struct *work)
 {
-	struct net_device *dev = _data;
-	struct rtl8139_private *tp = netdev_priv(dev);
+	struct rtl8139_private *tp =
+		container_of(work, struct rtl8139_private, thread.work);
+	struct net_device *dev = tp->mii.dev;
 	unsigned long thr_delay = next_tick;
 
 	if (tp->watchdog_fired) {
 		tp->watchdog_fired = 0;
-		rtl8139_tx_timeout_task(_data);
+		rtl8139_tx_timeout_task(work);
 	} else if (rtnl_trylock()) {
 		rtl8139_thread_iter (dev, tp, tp->mmio_addr);
 		rtnl_unlock ();
@@ -1646,10 +1647,11 @@
 	/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
 }
 
-static void rtl8139_tx_timeout_task (void *_data)
+static void rtl8139_tx_timeout_task (struct work_struct *work)
 {
-	struct net_device *dev = _data;
-	struct rtl8139_private *tp = netdev_priv(dev);
+	struct rtl8139_private *tp =
+		container_of(work, struct rtl8139_private, thread.work);
+	struct net_device *dev = tp->mii.dev;
 	void __iomem *ioaddr = tp->mmio_addr;
 	int i;
 	u8 tmp8;
@@ -1695,7 +1697,7 @@
 	struct rtl8139_private *tp = netdev_priv(dev);
 
 	if (!tp->have_thread) {
-		INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev);
+		INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
 		schedule_delayed_work(&tp->thread, next_tick);
 	} else
 		tp->watchdog_fired = 1;
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 3d1c599..a828076 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -1,1104 +1,40 @@
-/* 8390.c: A general NS8390 ethernet driver core for linux. */
-/*
-	Written 1992-94 by Donald Becker.
-
-	Copyright 1993 United States Government as represented by the
-	Director, National Security Agency.
-
-	This software may be used and distributed according to the terms
-	of the GNU General Public License, incorporated herein by reference.
-
-	The author may be reached as becker@scyld.com, or C/O
-	Scyld Computing Corporation
-	410 Severn Ave., Suite 210
-	Annapolis MD 21403
-
-
-  This is the chip-specific code for many 8390-based ethernet adaptors.
-  This is not a complete driver, it must be combined with board-specific
-  code such as ne.c, wd.c, 3c503.c, etc.
-
-  Seeing how at least eight drivers use this code, (not counting the
-  PCMCIA ones either) it is easy to break some card by what seems like
-  a simple innocent change. Please contact me or Donald if you think
-  you have found something that needs changing. -- PG
-
-
-  Changelog:
-
-  Paul Gortmaker	: remove set_bit lock, other cleanups.
-  Paul Gortmaker	: add ei_get_8390_hdr() so we can pass skb's to
-			  ei_block_input() for eth_io_copy_and_sum().
-  Paul Gortmaker	: exchange static int ei_pingpong for a #define,
-			  also add better Tx error handling.
-  Paul Gortmaker	: rewrite Rx overrun handling as per NS specs.
-  Alexey Kuznetsov	: use the 8390's six bit hash multicast filter.
-  Paul Gortmaker	: tweak ANK's above multicast changes a bit.
-  Paul Gortmaker	: update packet statistics for v2.1.x
-  Alan Cox		: support arbitary stupid port mappings on the
-  			  68K Macintosh. Support >16bit I/O spaces
-  Paul Gortmaker	: add kmod support for auto-loading of the 8390
-			  module by all drivers that require it.
-  Alan Cox		: Spinlocking work, added 'BUG_83C690'
-  Paul Gortmaker	: Separate out Tx timeout code from Tx path.
-  Paul Gortmaker	: Remove old unused single Tx buffer code.
-  Hayato Fujiwara	: Add m32r support.
-  Paul Gortmaker	: use skb_padto() instead of stack scratch area
-
-  Sources:
-  The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
-
-  */
+/* 8390 core for usual drivers */
 
 static const char version[] =
     "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/fs.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fcntl.h>
-#include <linux/in.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
+#include "lib8390.c"
 
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#define NS8390_CORE
-#include "8390.h"
-
-#define BUG_83C690
-
-/* These are the operational function interfaces to board-specific
-   routines.
-	void reset_8390(struct net_device *dev)
-		Resets the board associated with DEV, including a hardware reset of
-		the 8390.  This is only called when there is a transmit timeout, and
-		it is always followed by 8390_init().
-	void block_output(struct net_device *dev, int count, const unsigned char *buf,
-					  int start_page)
-		Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
-		"page" value uses the 8390's 256-byte pages.
-	void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
-		Read the 4 byte, page aligned 8390 header. *If* there is a
-		subsequent read, it will be of the rest of the packet.
-	void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-		Read COUNT bytes from the packet buffer into the skb data area. Start
-		reading from RING_OFFSET, the address as the 8390 sees it.  This will always
-		follow the read of the 8390 header.
-*/
-#define ei_reset_8390 (ei_local->reset_8390)
-#define ei_block_output (ei_local->block_output)
-#define ei_block_input (ei_local->block_input)
-#define ei_get_8390_hdr (ei_local->get_8390_hdr)
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef ei_debug
-int ei_debug = 1;
-#endif
-
-/* Index to functions. */
-static void ei_tx_intr(struct net_device *dev);
-static void ei_tx_err(struct net_device *dev);
-static void ei_tx_timeout(struct net_device *dev);
-static void ei_receive(struct net_device *dev);
-static void ei_rx_overrun(struct net_device *dev);
-
-/* Routines generic to NS8390-based boards. */
-static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
-								int start_page);
-static void set_multicast_list(struct net_device *dev);
-static void do_set_multicast_list(struct net_device *dev);
-
-/*
- *	SMP and the 8390 setup.
- *
- *	The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
- *	a page register that controls bank and packet buffer access. We guard
- *	this with ei_local->page_lock. Nobody should assume or set the page other
- *	than zero when the lock is not held. Lock holders must restore page 0
- *	before unlocking. Even pure readers must take the lock to protect in
- *	page 0.
- *
- *	To make life difficult the chip can also be very slow. We therefore can't
- *	just use spinlocks. For the longer lockups we disable the irq the device
- *	sits on and hold the lock. We must hold the lock because there is a dual
- *	processor case other than interrupts (get stats/set multicast list in
- *	parallel with each other and transmit).
- *
- *	Note: in theory we can just disable the irq on the card _but_ there is
- *	a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
- *	enter lock, take the queued irq. So we waddle instead of flying.
- *
- *	Finally by special arrangement for the purpose of being generally
- *	annoying the transmit function is called bh atomic. That places
- *	restrictions on the user context callers as disable_irq won't save
- *	them.
- */
-
-
-
-/**
- * ei_open - Open/initialize the board.
- * @dev: network device to initialize
- *
- * This routine goes all-out, setting everything
- * up anew at each open, even though many of these registers should only
- * need to be set once at boot.
- */
 int ei_open(struct net_device *dev)
 {
-	unsigned long flags;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-
-	/* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
-	    wrapper that does e.g. media check & then calls ei_tx_timeout. */
-	if (dev->tx_timeout == NULL)
-		 dev->tx_timeout = ei_tx_timeout;
-	if (dev->watchdog_timeo <= 0)
-		 dev->watchdog_timeo = TX_TIMEOUT;
-
-	/*
-	 *	Grab the page lock so we own the register set, then call
-	 *	the init function.
-	 */
-
-      	spin_lock_irqsave(&ei_local->page_lock, flags);
-	NS8390_init(dev, 1);
-	/* Set the flag before we drop the lock, That way the IRQ arrives
-	   after its set and we get no silly warnings */
-	netif_start_queue(dev);
-      	spin_unlock_irqrestore(&ei_local->page_lock, flags);
-	ei_local->irqlock = 0;
-	return 0;
+	return __ei_open(dev);
 }
 
-/**
- * ei_close - shut down network device
- * @dev: network device to close
- *
- * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
- */
 int ei_close(struct net_device *dev)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	unsigned long flags;
-
-	/*
-	 *	Hold the page lock during close
-	 */
-
-      	spin_lock_irqsave(&ei_local->page_lock, flags);
-	NS8390_init(dev, 0);
-      	spin_unlock_irqrestore(&ei_local->page_lock, flags);
-	netif_stop_queue(dev);
-	return 0;
+	return __ei_close(dev);
 }
 
-/**
- * ei_tx_timeout - handle transmit time out condition
- * @dev: network device which has apparently fallen asleep
- *
- * Called by kernel when device never acknowledges a transmit has
- * completed (or failed) - i.e. never posted a Tx related interrupt.
- */
-
-void ei_tx_timeout(struct net_device *dev)
-{
-	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	int txsr, isr, tickssofar = jiffies - dev->trans_start;
-	unsigned long flags;
-
-#if defined(CONFIG_M32R) && defined(CONFIG_SMP)
-	unsigned long icucr;
-
-	local_irq_save(flags);
-	icucr = inl(M32R_ICU_CR1_PORTL);
-	icucr |= M32R_ICUCR_ISMOD11;
-	outl(icucr, M32R_ICU_CR1_PORTL);
-	local_irq_restore(flags);
-#endif
-	ei_local->stat.tx_errors++;
-
-	spin_lock_irqsave(&ei_local->page_lock, flags);
-	txsr = inb(e8390_base+EN0_TSR);
-	isr = inb(e8390_base+EN0_ISR);
-	spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
-	printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
-		dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
-		(isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
-
-	if (!isr && !ei_local->stat.tx_packets)
-	{
-		/* The 8390 probably hasn't gotten on the cable yet. */
-		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
-	}
-
-	/* Ugly but a reset can be slow, yet must be protected */
-
-	disable_irq_nosync_lockdep(dev->irq);
-	spin_lock(&ei_local->page_lock);
-
-	/* Try to restart the card.  Perhaps the user has fixed something. */
-	ei_reset_8390(dev);
-	NS8390_init(dev, 1);
-
-	spin_unlock(&ei_local->page_lock);
-	enable_irq_lockdep(dev->irq);
-	netif_wake_queue(dev);
-}
-
-/**
- * ei_start_xmit - begin packet transmission
- * @skb: packet to be sent
- * @dev: network device to which packet is sent
- *
- * Sends a packet to an 8390 network device.
- */
-
-static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	int send_length = skb->len, output_page;
-	unsigned long flags;
-	char buf[ETH_ZLEN];
-	char *data = skb->data;
-
-	if (skb->len < ETH_ZLEN) {
-		memset(buf, 0, ETH_ZLEN);	/* more efficient than doing just the needed bits */
-		memcpy(buf, data, skb->len);
-		send_length = ETH_ZLEN;
-		data = buf;
-	}
-
-	/* Mask interrupts from the ethercard.
-	   SMP: We have to grab the lock here otherwise the IRQ handler
-	   on another CPU can flip window and race the IRQ mask set. We end
-	   up trashing the mcast filter not disabling irqs if we don't lock */
-
-	spin_lock_irqsave(&ei_local->page_lock, flags);
-	outb_p(0x00, e8390_base + EN0_IMR);
-	spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
-
-	/*
-	 *	Slow phase with lock held.
-	 */
-
-	disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
-
-	spin_lock(&ei_local->page_lock);
-
-	ei_local->irqlock = 1;
-
-	/*
-	 * We have two Tx slots available for use. Find the first free
-	 * slot, and then perform some sanity checks. With two Tx bufs,
-	 * you get very close to transmitting back-to-back packets. With
-	 * only one Tx buf, the transmitter sits idle while you reload the
-	 * card, leaving a substantial gap between each transmitted packet.
-	 */
-
-	if (ei_local->tx1 == 0)
-	{
-		output_page = ei_local->tx_start_page;
-		ei_local->tx1 = send_length;
-		if (ei_debug  &&  ei_local->tx2 > 0)
-			printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
-				dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
-	}
-	else if (ei_local->tx2 == 0)
-	{
-		output_page = ei_local->tx_start_page + TX_PAGES/2;
-		ei_local->tx2 = send_length;
-		if (ei_debug  &&  ei_local->tx1 > 0)
-			printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
-				dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
-	}
-	else
-	{	/* We should never get here. */
-		if (ei_debug)
-			printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
-				dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
-		ei_local->irqlock = 0;
-		netif_stop_queue(dev);
-		outb_p(ENISR_ALL, e8390_base + EN0_IMR);
-		spin_unlock(&ei_local->page_lock);
-		enable_irq_lockdep_irqrestore(dev->irq, &flags);
-		ei_local->stat.tx_errors++;
-		return 1;
-	}
-
-	/*
-	 * Okay, now upload the packet and trigger a send if the transmitter
-	 * isn't already sending. If it is busy, the interrupt handler will
-	 * trigger the send later, upon receiving a Tx done interrupt.
-	 */
-
-	ei_block_output(dev, send_length, data, output_page);
-
-	if (! ei_local->txing)
-	{
-		ei_local->txing = 1;
-		NS8390_trigger_send(dev, send_length, output_page);
-		dev->trans_start = jiffies;
-		if (output_page == ei_local->tx_start_page)
-		{
-			ei_local->tx1 = -1;
-			ei_local->lasttx = -1;
-		}
-		else
-		{
-			ei_local->tx2 = -1;
-			ei_local->lasttx = -2;
-		}
-	}
-	else ei_local->txqueue++;
-
-	if (ei_local->tx1  &&  ei_local->tx2)
-		netif_stop_queue(dev);
-	else
-		netif_start_queue(dev);
-
-	/* Turn 8390 interrupts back on. */
-	ei_local->irqlock = 0;
-	outb_p(ENISR_ALL, e8390_base + EN0_IMR);
-
-	spin_unlock(&ei_local->page_lock);
-	enable_irq_lockdep_irqrestore(dev->irq, &flags);
-
-	dev_kfree_skb (skb);
-	ei_local->stat.tx_bytes += send_length;
-
-	return 0;
-}
-
-/**
- * ei_interrupt - handle the interrupts from an 8390
- * @irq: interrupt number
- * @dev_id: a pointer to the net_device
- *
- * Handle the ether interface interrupts. We pull packets from
- * the 8390 via the card specific functions and fire them at the networking
- * stack. We also handle transmit completions and wake the transmit path if
- * necessary. We also update the counters and do other housekeeping as
- * needed.
- */
-
 irqreturn_t ei_interrupt(int irq, void *dev_id)
 {
-	struct net_device *dev = dev_id;
-	long e8390_base;
-	int interrupts, nr_serviced = 0;
-	struct ei_device *ei_local;
-
-	e8390_base = dev->base_addr;
-	ei_local = netdev_priv(dev);
-
-	/*
-	 *	Protect the irq test too.
-	 */
-
-	spin_lock(&ei_local->page_lock);
-
-	if (ei_local->irqlock)
-	{
-#if 1 /* This might just be an interrupt for a PCI device sharing this line */
-		/* The "irqlock" check is only for testing. */
-		printk(ei_local->irqlock
-			   ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
-			   : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
-			   dev->name, inb_p(e8390_base + EN0_ISR),
-			   inb_p(e8390_base + EN0_IMR));
-#endif
-		spin_unlock(&ei_local->page_lock);
-		return IRQ_NONE;
-	}
-
-	/* Change to page 0 and read the intr status reg. */
-	outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
-	if (ei_debug > 3)
-		printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
-			   inb_p(e8390_base + EN0_ISR));
-
-	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
-	while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
-		   && ++nr_serviced < MAX_SERVICE)
-	{
-		if (!netif_running(dev)) {
-			printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
-			/* rmk - acknowledge the interrupts */
-			outb_p(interrupts, e8390_base + EN0_ISR);
-			interrupts = 0;
-			break;
-		}
-		if (interrupts & ENISR_OVER)
-			ei_rx_overrun(dev);
-		else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
-		{
-			/* Got a good (?) packet. */
-			ei_receive(dev);
-		}
-		/* Push the next to-transmit packet through. */
-		if (interrupts & ENISR_TX)
-			ei_tx_intr(dev);
-		else if (interrupts & ENISR_TX_ERR)
-			ei_tx_err(dev);
-
-		if (interrupts & ENISR_COUNTERS)
-		{
-			ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
-			ei_local->stat.rx_crc_errors   += inb_p(e8390_base + EN0_COUNTER1);
-			ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
-			outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
-		}
-
-		/* Ignore any RDC interrupts that make it back to here. */
-		if (interrupts & ENISR_RDC)
-		{
-			outb_p(ENISR_RDC, e8390_base + EN0_ISR);
-		}
-
-		outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
-	}
-
-	if (interrupts && ei_debug)
-	{
-		outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
-		if (nr_serviced >= MAX_SERVICE)
-		{
-			/* 0xFF is valid for a card removal */
-			if(interrupts!=0xFF)
-				printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
-				   dev->name, interrupts);
-			outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
-		} else {
-			printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
-			outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
-		}
-	}
-	spin_unlock(&ei_local->page_lock);
-	return IRQ_RETVAL(nr_serviced > 0);
+	return __ei_interrupt(irq, dev_id);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 void ei_poll(struct net_device *dev)
 {
-	disable_irq_lockdep(dev->irq);
-	ei_interrupt(dev->irq, dev);
-	enable_irq_lockdep(dev->irq);
+	__ei_poll(dev);
 }
 #endif
 
-/**
- * ei_tx_err - handle transmitter error
- * @dev: network device which threw the exception
- *
- * A transmitter error has happened. Most likely excess collisions (which
- * is a fairly normal condition). If the error is one where the Tx will
- * have been aborted, we try and send another one right away, instead of
- * letting the failed packet sit and collect dust in the Tx buffer. This
- * is a much better solution as it avoids kernel based Tx timeouts, and
- * an unnecessary card reset.
- *
- * Called with lock held.
- */
-
-static void ei_tx_err(struct net_device *dev)
-{
-	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	unsigned char txsr = inb_p(e8390_base+EN0_TSR);
-	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
-
-#ifdef VERBOSE_ERROR_DUMP
-	printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
-	if (txsr & ENTSR_ABT)
-		printk("excess-collisions ");
-	if (txsr & ENTSR_ND)
-		printk("non-deferral ");
-	if (txsr & ENTSR_CRS)
-		printk("lost-carrier ");
-	if (txsr & ENTSR_FU)
-		printk("FIFO-underrun ");
-	if (txsr & ENTSR_CDH)
-		printk("lost-heartbeat ");
-	printk("\n");
-#endif
-
-	outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
-
-	if (tx_was_aborted)
-		ei_tx_intr(dev);
-	else
-	{
-		ei_local->stat.tx_errors++;
-		if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
-		if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
-		if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
-	}
-}
-
-/**
- * ei_tx_intr - transmit interrupt handler
- * @dev: network device for which tx intr is handled
- *
- * We have finished a transmit: check for errors and then trigger the next
- * packet to be sent. Called with lock held.
- */
-
-static void ei_tx_intr(struct net_device *dev)
-{
-	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	int status = inb(e8390_base + EN0_TSR);
-
-	outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
-
-	/*
-	 * There are two Tx buffers, see which one finished, and trigger
-	 * the send of another one if it exists.
-	 */
-	ei_local->txqueue--;
-
-	if (ei_local->tx1 < 0)
-	{
-		if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
-			printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
-				ei_local->name, ei_local->lasttx, ei_local->tx1);
-		ei_local->tx1 = 0;
-		if (ei_local->tx2 > 0)
-		{
-			ei_local->txing = 1;
-			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
-			dev->trans_start = jiffies;
-			ei_local->tx2 = -1,
-			ei_local->lasttx = 2;
-		}
-		else ei_local->lasttx = 20, ei_local->txing = 0;
-	}
-	else if (ei_local->tx2 < 0)
-	{
-		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
-			printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
-				ei_local->name, ei_local->lasttx, ei_local->tx2);
-		ei_local->tx2 = 0;
-		if (ei_local->tx1 > 0)
-		{
-			ei_local->txing = 1;
-			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
-			dev->trans_start = jiffies;
-			ei_local->tx1 = -1;
-			ei_local->lasttx = 1;
-		}
-		else
-			ei_local->lasttx = 10, ei_local->txing = 0;
-	}
-//	else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
-//			dev->name, ei_local->lasttx);
-
-	/* Minimize Tx latency: update the statistics after we restart TXing. */
-	if (status & ENTSR_COL)
-		ei_local->stat.collisions++;
-	if (status & ENTSR_PTX)
-		ei_local->stat.tx_packets++;
-	else
-	{
-		ei_local->stat.tx_errors++;
-		if (status & ENTSR_ABT)
-		{
-			ei_local->stat.tx_aborted_errors++;
-			ei_local->stat.collisions += 16;
-		}
-		if (status & ENTSR_CRS)
-			ei_local->stat.tx_carrier_errors++;
-		if (status & ENTSR_FU)
-			ei_local->stat.tx_fifo_errors++;
-		if (status & ENTSR_CDH)
-			ei_local->stat.tx_heartbeat_errors++;
-		if (status & ENTSR_OWC)
-			ei_local->stat.tx_window_errors++;
-	}
-	netif_wake_queue(dev);
-}
-
-/**
- * ei_receive - receive some packets
- * @dev: network device with which receive will be run
- *
- * We have a good packet(s), get it/them out of the buffers.
- * Called with lock held.
- */
-
-static void ei_receive(struct net_device *dev)
-{
-	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	unsigned char rxing_page, this_frame, next_frame;
-	unsigned short current_offset;
-	int rx_pkt_count = 0;
-	struct e8390_pkt_hdr rx_frame;
-	int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
-
-	while (++rx_pkt_count < 10)
-	{
-		int pkt_len, pkt_stat;
-
-		/* Get the rx page (incoming packet pointer). */
-		outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
-		rxing_page = inb_p(e8390_base + EN1_CURPAG);
-		outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
-
-		/* Remove one frame from the ring.  Boundary is always a page behind. */
-		this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
-		if (this_frame >= ei_local->stop_page)
-			this_frame = ei_local->rx_start_page;
-
-		/* Someday we'll omit the previous, iff we never get this message.
-		   (There is at least one clone claimed to have a problem.)
-
-		   Keep quiet if it looks like a card removal. One problem here
-		   is that some clones crash in roughly the same way.
-		 */
-		if (ei_debug > 0  &&  this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
-			printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
-				   dev->name, this_frame, ei_local->current_page);
-
-		if (this_frame == rxing_page)	/* Read all the frames? */
-			break;				/* Done for now */
-
-		current_offset = this_frame << 8;
-		ei_get_8390_hdr(dev, &rx_frame, this_frame);
-
-		pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
-		pkt_stat = rx_frame.status;
-
-		next_frame = this_frame + 1 + ((pkt_len+4)>>8);
-
-		/* Check for bogosity warned by 3c503 book: the status byte is never
-		   written.  This happened a lot during testing! This code should be
-		   cleaned up someday. */
-		if (rx_frame.next != next_frame
-			&& rx_frame.next != next_frame + 1
-			&& rx_frame.next != next_frame - num_rx_pages
-			&& rx_frame.next != next_frame + 1 - num_rx_pages) {
-			ei_local->current_page = rxing_page;
-			outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
-			ei_local->stat.rx_errors++;
-			continue;
-		}
-
-		if (pkt_len < 60  ||  pkt_len > 1518)
-		{
-			if (ei_debug)
-				printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
-					   dev->name, rx_frame.count, rx_frame.status,
-					   rx_frame.next);
-			ei_local->stat.rx_errors++;
-			ei_local->stat.rx_length_errors++;
-		}
-		 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
-		{
-			struct sk_buff *skb;
-
-			skb = dev_alloc_skb(pkt_len+2);
-			if (skb == NULL)
-			{
-				if (ei_debug > 1)
-					printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
-						   dev->name, pkt_len);
-				ei_local->stat.rx_dropped++;
-				break;
-			}
-			else
-			{
-				skb_reserve(skb,2);	/* IP headers on 16 byte boundaries */
-				skb->dev = dev;
-				skb_put(skb, pkt_len);	/* Make room */
-				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
-				skb->protocol=eth_type_trans(skb,dev);
-				netif_rx(skb);
-				dev->last_rx = jiffies;
-				ei_local->stat.rx_packets++;
-				ei_local->stat.rx_bytes += pkt_len;
-				if (pkt_stat & ENRSR_PHY)
-					ei_local->stat.multicast++;
-			}
-		}
-		else
-		{
-			if (ei_debug)
-				printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
-					   dev->name, rx_frame.status, rx_frame.next,
-					   rx_frame.count);
-			ei_local->stat.rx_errors++;
-			/* NB: The NIC counts CRC, frame and missed errors. */
-			if (pkt_stat & ENRSR_FO)
-				ei_local->stat.rx_fifo_errors++;
-		}
-		next_frame = rx_frame.next;
-
-		/* This _should_ never happen: it's here for avoiding bad clones. */
-		if (next_frame >= ei_local->stop_page) {
-			printk("%s: next frame inconsistency, %#2x\n", dev->name,
-				   next_frame);
-			next_frame = ei_local->rx_start_page;
-		}
-		ei_local->current_page = next_frame;
-		outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
-	}
-
-	/* We used to also ack ENISR_OVER here, but that would sometimes mask
-	   a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
-	outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
-	return;
-}
-
-/**
- * ei_rx_overrun - handle receiver overrun
- * @dev: network device which threw exception
- *
- * We have a receiver overrun: we have to kick the 8390 to get it started
- * again. Problem is that you have to kick it exactly as NS prescribes in
- * the updated datasheets, or "the NIC may act in an unpredictable manner."
- * This includes causing "the NIC to defer indefinitely when it is stopped
- * on a busy network."  Ugh.
- * Called with lock held. Don't call this with the interrupts off or your
- * computer will hate you - it takes 10ms or so.
- */
-
-static void ei_rx_overrun(struct net_device *dev)
-{
-	long e8390_base = dev->base_addr;
-	unsigned char was_txing, must_resend = 0;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-
-	/*
-	 * Record whether a Tx was in progress and then issue the
-	 * stop command.
-	 */
-	was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
-	outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
-	if (ei_debug > 1)
-		printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
-	ei_local->stat.rx_over_errors++;
-
-	/*
-	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
-	 * Early datasheets said to poll the reset bit, but now they say that
-	 * it "is not a reliable indicator and subsequently should be ignored."
-	 * We wait at least 10ms.
-	 */
-
-	mdelay(10);
-
-	/*
-	 * Reset RBCR[01] back to zero as per magic incantation.
-	 */
-	outb_p(0x00, e8390_base+EN0_RCNTLO);
-	outb_p(0x00, e8390_base+EN0_RCNTHI);
-
-	/*
-	 * See if any Tx was interrupted or not. According to NS, this
-	 * step is vital, and skipping it will cause no end of havoc.
-	 */
-
-	if (was_txing)
-	{
-		unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
-		if (!tx_completed)
-			must_resend = 1;
-	}
-
-	/*
-	 * Have to enter loopback mode and then restart the NIC before
-	 * you are allowed to slurp packets up off the ring.
-	 */
-	outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
-	outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
-
-	/*
-	 * Clear the Rx ring of all the debris, and ack the interrupt.
-	 */
-	ei_receive(dev);
-	outb_p(ENISR_OVER, e8390_base+EN0_ISR);
-
-	/*
-	 * Leave loopback mode, and resend any packet that got stopped.
-	 */
-	outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
-	if (must_resend)
-    		outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
-}
-
-/*
- *	Collect the stats. This is called unlocked and from several contexts.
- */
-
-static struct net_device_stats *get_stats(struct net_device *dev)
-{
-	long ioaddr = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	unsigned long flags;
-
-	/* If the card is stopped, just return the present stats. */
-	if (!netif_running(dev))
-		return &ei_local->stat;
-
-	spin_lock_irqsave(&ei_local->page_lock,flags);
-	/* Read the counter registers, assuming we are in page 0. */
-	ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
-	ei_local->stat.rx_crc_errors   += inb_p(ioaddr + EN0_COUNTER1);
-	ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
-	spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
-	return &ei_local->stat;
-}
-
-/*
- * Form the 64 bit 8390 multicast table from the linked list of addresses
- * associated with this dev structure.
- */
-
-static inline void make_mc_bits(u8 *bits, struct net_device *dev)
-{
-	struct dev_mc_list *dmi;
-
-	for (dmi=dev->mc_list; dmi; dmi=dmi->next)
-	{
-		u32 crc;
-		if (dmi->dmi_addrlen != ETH_ALEN)
-		{
-			printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
-			continue;
-		}
-		crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
-		/*
-		 * The 8390 uses the 6 most significant bits of the
-		 * CRC to index the multicast table.
-		 */
-		bits[crc>>29] |= (1<<((crc>>26)&7));
-	}
-}
-
-/**
- * do_set_multicast_list - set/clear multicast filter
- * @dev: net device for which multicast filter is adjusted
- *
- *	Set or clear the multicast filter for this adaptor. May be called
- *	from a BH in 2.1.x. Must be called with lock held.
- */
-
-static void do_set_multicast_list(struct net_device *dev)
-{
-	long e8390_base = dev->base_addr;
-	int i;
-	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
-
-	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
-	{
-		memset(ei_local->mcfilter, 0, 8);
-		if (dev->mc_list)
-			make_mc_bits(ei_local->mcfilter, dev);
-	}
-	else
-		memset(ei_local->mcfilter, 0xFF, 8);	/* mcast set to accept-all */
-
-	/*
-	 * DP8390 manuals don't specify any magic sequence for altering
-	 * the multicast regs on an already running card. To be safe, we
-	 * ensure multicast mode is off prior to loading up the new hash
-	 * table. If this proves to be not enough, we can always resort
-	 * to stopping the NIC, loading the table and then restarting.
-	 *
-	 * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
-	 * Elite16) appear to be write-only. The NS 8390 data sheet lists
-	 * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
-	 * Ultra32 EISA) appears to have this bug fixed.
-	 */
-
-	if (netif_running(dev))
-		outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
-	outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
-	for(i = 0; i < 8; i++)
-	{
-		outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
-#ifndef BUG_83C690
-		if(inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
-			printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
-#endif
-	}
-	outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
-
-  	if(dev->flags&IFF_PROMISC)
-  		outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
-	else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
-  		outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
-  	else
-  		outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
- }
-
-/*
- *	Called without lock held. This is invoked from user context and may
- *	be parallel to just about everything else. Its also fairly quick and
- *	not called too often. Must protect against both bh and irq users
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
-	unsigned long flags;
-	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
-
-	spin_lock_irqsave(&ei_local->page_lock, flags);
-	do_set_multicast_list(dev);
-	spin_unlock_irqrestore(&ei_local->page_lock, flags);
-}
-
-/**
- * ethdev_setup - init rest of 8390 device struct
- * @dev: network device structure to init
- *
- * Initialize the rest of the 8390 device structure.  Do NOT __init
- * this, as it is used by 8390 based modular drivers too.
- */
-
-static void ethdev_setup(struct net_device *dev)
-{
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	if (ei_debug > 1)
-		printk(version);
-
-	dev->hard_start_xmit = &ei_start_xmit;
-	dev->get_stats	= get_stats;
-	dev->set_multicast_list = &set_multicast_list;
-
-	ether_setup(dev);
-
-	spin_lock_init(&ei_local->page_lock);
-}
-
-/**
- * alloc_ei_netdev - alloc_etherdev counterpart for 8390
- * @size: extra bytes to allocate
- *
- * Allocate 8390-specific net_device.
- */
 struct net_device *__alloc_ei_netdev(int size)
 {
-	return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
-				ethdev_setup);
+	return ____alloc_ei_netdev(size);
 }
 
-
-
-
-/* This page of functions should be 8390 generic */
-/* Follow National Semi's recommendations for initializing the "NIC". */
-
-/**
- * NS8390_init - initialize 8390 hardware
- * @dev: network device to initialize
- * @startp: boolean.  non-zero value to initiate chip processing
- *
- *	Must be called with lock held.
- */
-
 void NS8390_init(struct net_device *dev, int startp)
 {
-	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
-	int i;
-	int endcfg = ei_local->word16
-	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
-	    : 0x48;
-
-	if(sizeof(struct e8390_pkt_hdr)!=4)
-    		panic("8390.c: header struct mispacked\n");
-	/* Follow National Semi's recommendations for initing the DP83902. */
-	outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
-	outb_p(endcfg, e8390_base + EN0_DCFG);	/* 0x48 or 0x49 */
-	/* Clear the remote byte count registers. */
-	outb_p(0x00,  e8390_base + EN0_RCNTLO);
-	outb_p(0x00,  e8390_base + EN0_RCNTHI);
-	/* Set to monitor and loopback mode -- this is vital!. */
-	outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
-	outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
-	/* Set the transmit page and receive ring. */
-	outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
-	ei_local->tx1 = ei_local->tx2 = 0;
-	outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
-	outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);	/* 3c503 says 0x3f,NS0x26*/
-	ei_local->current_page = ei_local->rx_start_page;		/* assert boundary+1 */
-	outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
-	/* Clear the pending interrupts and mask. */
-	outb_p(0xFF, e8390_base + EN0_ISR);
-	outb_p(0x00,  e8390_base + EN0_IMR);
-
-	/* Copy the station address into the DS8390 registers. */
-
-	outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
-	for(i = 0; i < 6; i++)
-	{
-		outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
-		if (ei_debug > 1 && inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
-			printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
-	}
-
-	outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
-	outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
-
-	netif_start_queue(dev);
-	ei_local->tx1 = ei_local->tx2 = 0;
-	ei_local->txing = 0;
-
-	if (startp)
-	{
-		outb_p(0xff,  e8390_base + EN0_ISR);
-		outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
-		outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
-		outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
-		/* 3c503 TechMan says rxconfig only after the NIC is started. */
-		outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
-		do_set_multicast_list(dev);	/* (re)load the mcast table */
-	}
-}
-
-/* Trigger a transmit start, assuming the length is valid.
-   Always called with the page lock held */
-
-static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
-								int start_page)
-{
-	long e8390_base = dev->base_addr;
- 	struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
-
-	outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
-
-	if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
-	{
-		printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
-			dev->name);
-		return;
-	}
-	outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
-	outb_p(length >> 8, e8390_base + EN0_TCNTHI);
-	outb_p(start_page, e8390_base + EN0_TPSR);
-	outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+	return __NS8390_init(dev, startp);
 }
 
 EXPORT_SYMBOL(ei_open);
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
index f44f122..414de5b 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/8390.h
@@ -107,35 +107,14 @@
  *      - removed AMIGA_PCMCIA from this list, handled as ISA io now
  */
 
-#if defined(CONFIG_MAC) ||  \
-    defined(CONFIG_ZORRO8390) || defined(CONFIG_ZORRO8390_MODULE) || \
-    defined(CONFIG_HYDRA) || defined(CONFIG_HYDRA_MODULE)
-#define EI_SHIFT(x)	(ei_local->reg_offset[x])
-#undef inb
-#undef inb_p
-#undef outb
-#undef outb_p
+#ifndef ei_inb
+#define ei_inb(_p)	inb(_p)
+#define ei_outb(_v,_p)	outb(_v,_p)
+#define ei_inb_p(_p)	inb_p(_p)
+#define ei_outb_p(_v,_p) outb_p(_v,_p)
+#endif
 
-#define inb(port)   in_8(port)
-#define outb(val,port)  out_8(port,val)
-#define inb_p(port)   in_8(port)
-#define outb_p(val,port)  out_8(port,val)
-
-#elif defined(CONFIG_ARM_ETHERH) || defined(CONFIG_ARM_ETHERH_MODULE)
-#define EI_SHIFT(x)	(ei_local->reg_offset[x])
-#undef inb
-#undef inb_p
-#undef outb
-#undef outb_p
-
-#define inb(_p)		readb(_p)
-#define outb(_v,_p)	writeb(_v,_p)
-#define inb_p(_p)	inb(_p)
-#define outb_p(_v,_p)	outb(_v,_p)
-
-#elif defined(CONFIG_NE_H8300) || defined(CONFIG_NE_H8300_MODULE)
-#define EI_SHIFT(x)	(ei_local->reg_offset[x])
-#else
+#ifndef EI_SHIFT
 #define EI_SHIFT(x)	(x)
 #endif
 
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6e863aa..9de0eed 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -32,7 +32,7 @@
 	tristate "Intermediate Functional Block support"
 	depends on NET_CLS_ACT
 	---help---
-	  This is an intermidiate driver that allows sharing of
+	  This is an intermediate driver that allows sharing of
 	  resources.
 	  To compile this driver as a module, choose M here: the module
 	  will be called ifb.  If you want to use more than one ifb
@@ -188,6 +188,17 @@
 	  or internal device.  It is safe to say Y or M here even if your
 	  ethernet card lack MII.
 
+config MACB
+	tristate "Atmel MACB support"
+	depends on NET_ETHERNET && AVR32
+	select MII
+	help
+	  The Atmel MACB ethernet interface is found on many AT32 and AT91
+	  parts. Say Y to include support for the MACB chip.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called macb.
+
 source "drivers/net/arm/Kconfig"
 
 config MACE
@@ -1769,8 +1780,8 @@
 	  information.
 
 config LAN_SAA9730
-	bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
-	depends on NET_PCI && EXPERIMENTAL && MIPS
+	bool "Philips SAA9730 Ethernet support"
+	depends on NET_PCI && PCI && MIPS_ATLAS
 	help
 	  The SAA9730 is a combined multimedia and peripheral controller used
 	  in thin clients, Internet access terminals, and diskless
@@ -2136,7 +2147,7 @@
 	  This driver supports the original Yukon chipset. A cleaner driver is 
 	  also available (skge) which seems to work better than this one.
 
-	  This driver does not support the newer Yukon2 chipset. A seperate
+	  This driver does not support the newer Yukon2 chipset. A separate
 	  driver, sky2, is provided to support Yukon2-based adapters.
 
 	  The following adapters are supported by this driver:
@@ -2251,6 +2262,14 @@
 	  This driver supports the Gigabit Ethernet chips present on the
 	  Cell Processor-Based Blades from IBM.
 
+config TSI108_ETH
+	   tristate "Tundra TSI108 gigabit Ethernet support"
+	   depends on TSI108_BRIDGE
+	   help
+	     This driver supports Tundra TSI108 gigabit Ethernet ports.
+	     To compile this driver as a module, choose M here: the module
+	     will be called tsi108_eth.
+
 config GIANFAR
 	tristate "Gianfar Ethernet"
 	depends on 85xx || 83xx || PPC_86xx
@@ -2341,10 +2360,11 @@
 config CHELSIO_T1
         tristate "Chelsio 10Gb Ethernet support"
         depends on PCI
+	select CRC32
         help
-          This driver supports Chelsio N110 and N210 models 10Gb Ethernet
-          cards. More information about adapter features and performance
-          tuning is in <file:Documentation/networking/cxgb.txt>.
+          This driver supports Chelsio gigabit and 10-gigabit
+          Ethernet cards. More information about adapter features and
+	  performance tuning is in <file:Documentation/networking/cxgb.txt>.
 
           For general information about Chelsio and our products, visit
           our website at <http://www.chelsio.com>.
@@ -2357,6 +2377,13 @@
           To compile this driver as a module, choose M here: the module
           will be called cxgb.
 
+config CHELSIO_T1_1G
+        bool "Chelsio gigabit Ethernet support"
+        depends on CHELSIO_T1
+        help
+          Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
+          are using only 10G cards say 'N' here.
+
 config EHEA
 	tristate "eHEA Ethernet support"
 	depends on IBMEBUS
@@ -2447,6 +2474,12 @@
 	  <file:Documentation/networking/net-modules.txt>.  The module
 	  will be called myri10ge.
 
+config NETXEN_NIC
+	tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
+	depends on PCI
+	help
+	  This enables the support for NetXen's Gigabit Ethernet card.
+
 endmenu
 
 source "drivers/net/tokenring/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f270bc4..4c0d4e5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -82,7 +82,7 @@
 obj-$(CONFIG_NET) += Space.o loopback.o
 obj-$(CONFIG_SEEQ8005) += seeq8005.o
 obj-$(CONFIG_NET_SB1000) += sb1000.o
-obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
+obj-$(CONFIG_MAC8390) += mac8390.o
 obj-$(CONFIG_APNE) += apne.o 8390.o
 obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
 obj-$(CONFIG_SHAPER) += shaper.o
@@ -90,7 +90,6 @@
 obj-$(CONFIG_SMC9194) += smc9194.o
 obj-$(CONFIG_FEC) += fec.o
 obj-$(CONFIG_68360_ENET) += 68360enet.o
-obj-$(CONFIG_ARM_ETHERH) += 8390.o
 obj-$(CONFIG_WD80x3) += wd.o 8390.o
 obj-$(CONFIG_EL2) += 3c503.o 8390.o
 obj-$(CONFIG_NE2000) += ne.o 8390.o
@@ -107,8 +106,9 @@
 obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o
 obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o
 
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
 
@@ -165,7 +165,7 @@
 obj-$(CONFIG_LP486E) += lp486e.o
 
 obj-$(CONFIG_ETH16I) += eth16i.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o
 obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
 obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
 obj-$(CONFIG_EQUALIZER) += eql.o
@@ -178,7 +178,7 @@
 obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o
 obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o
 obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_HYDRA) += hydra.o
 obj-$(CONFIG_ARIADNE) += ariadne.o
 obj-$(CONFIG_CS89x0) += cs89x0.o
 obj-$(CONFIG_MACSONIC) += macsonic.o
@@ -197,6 +197,8 @@
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
 
+obj-$(CONFIG_MACB) += macb.o
+
 obj-$(CONFIG_ARM) += arm/
 obj-$(CONFIG_DEV_APPLETALK) += appletalk/
 obj-$(CONFIG_TR) += tokenring/
@@ -214,3 +216,4 @@
 
 obj-$(CONFIG_FS_ENET) += fs_enet/
 
+obj-$(CONFIG_NETXEN_NIC) += netxen/
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index a67f5ef..602ed31 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -33,7 +33,6 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/netlink.h>
-#include <linux/divert.h>
 
 /* A unified ethernet device probe.  This is the easiest way to have every
    ethernet adaptor have the name "eth[0123...]".
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 0dca8bb..c01f87f 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -405,7 +405,7 @@
 	iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index ef65e59..18896f2 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1490,32 +1490,7 @@
 	buf[12] = readl(mmio + STAT0);
 }
 
-/*
-amd8111e crc generator implementation is different from the kernel
-ether_crc() function.
-*/
-static int amd8111e_ether_crc(int len, char* mac_addr)
-{
-	int i,byte;
-	unsigned char octet;
-	u32 crc= INITCRC;
 
-	for(byte=0; byte < len; byte++){
-		octet = mac_addr[byte];
-		for( i=0;i < 8; i++){
-			/*If the next bit form the input stream is 1,subtract				 the divisor (CRC32) from the dividend(crc).*/
-			if( (octet & 0x1) ^ (crc & 0x1) ){
-				crc >>= 1;
-				crc ^= CRC32;
-			}
-			else
-				crc >>= 1;
-
-			octet >>= 1;
-		}
-	}
-	return crc;
-}
 /*
 This function sets promiscuos mode, all-multi mode or the multicast address
 list to the device.
@@ -1556,7 +1531,7 @@
 	mc_filter[1] = mc_filter[0] = 0;
 	for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
 		     i++, mc_ptr = mc_ptr->next) {
-		bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr)							 >> 26 ) & 0x3f;
+		bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
 		mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
 	}
 	amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index 7727d32..2007510 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -651,10 +651,6 @@
 /* driver ioctl parameters */
 #define AMD8111E_REG_DUMP_LEN	 13*sizeof(u32)
 
-/* crc generator constants */
-#define CRC32 0xedb88320
-#define INITCRC 0xFFFFFFFF
-
 /* amd8111e desriptor format */
 
 struct amd8111e_tx_dr{
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 9164d8c..d4e4081 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -568,7 +568,7 @@
 #ifdef MODULE
 static struct net_device *apne_dev;
 
-int init_module(void)
+int __init init_module(void)
 {
 	apne_dev = apne_probe(-1);
 	if (IS_ERR(apne_dev))
@@ -576,7 +576,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(apne_dev);
 
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index cc1a27e..dba5e51 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -1041,7 +1041,7 @@
         return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(cops_dev);
 	cleanup_card(cops_dev);
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index b54b857..fada15d 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -41,9 +41,6 @@
 #define DRV_NAME	"at91_ether"
 #define DRV_VERSION	"1.0"
 
-static struct net_device *at91_dev;
-
-static struct timer_list check_timer;
 #define LINK_POLL_INTERVAL	(HZ)
 
 /* ..................................................................... */
@@ -146,7 +143,7 @@
  */
 static void update_linkspeed(struct net_device *dev, int silent)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned int bmsr, bmcr, lpa, mac_cfg;
 	unsigned int speed, duplex;
 
@@ -199,7 +196,7 @@
 static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = (struct net_device *) dev_id;
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned int phy;
 
 	/*
@@ -242,7 +239,7 @@
  */
 static void enable_phyirq(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned int dsintr, irq_number;
 	int status;
 
@@ -252,8 +249,7 @@
 		 * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
 		 * or board does not have it connected.
 		 */
-		check_timer.expires = jiffies + LINK_POLL_INTERVAL;
-		add_timer(&check_timer);
+		mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
 		return;
 	}
 
@@ -294,13 +290,13 @@
  */
 static void disable_phyirq(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned int dsintr;
 	unsigned int irq_number;
 
 	irq_number = lp->board_data.phy_irq_pin;
 	if (!irq_number) {
-		del_timer_sync(&check_timer);
+		del_timer_sync(&lp->check_timer);
 		return;
 	}
 
@@ -340,7 +336,7 @@
 #if 0
 static void reset_phy(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned int bmcr;
 
 	spin_lock_irq(&lp->lock);
@@ -362,13 +358,13 @@
 static void at91ether_check_link(unsigned long dev_id)
 {
 	struct net_device *dev = (struct net_device *) dev_id;
+	struct at91_private *lp = netdev_priv(dev);
 
 	enable_mdi();
 	update_linkspeed(dev, 1);
 	disable_mdi();
 
-	check_timer.expires = jiffies + LINK_POLL_INTERVAL;
-	add_timer(&check_timer);
+	mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
 }
 
 /* ......................... ADDRESS MANAGEMENT ........................ */
@@ -590,7 +586,7 @@
 
 static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	int ret;
 
 	spin_lock_irq(&lp->lock);
@@ -611,7 +607,7 @@
 
 static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	int ret;
 
 	spin_lock_irq(&lp->lock);
@@ -627,7 +623,7 @@
 
 static int at91ether_nwayreset(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	int ret;
 
 	spin_lock_irq(&lp->lock);
@@ -658,7 +654,7 @@
 
 static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	int res;
 
 	if (!netif_running(dev))
@@ -680,7 +676,7 @@
  */
 static void at91ether_start(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	struct recv_desc_bufs *dlist, *dlist_phys;
 	int i;
 	unsigned long ctl;
@@ -712,7 +708,7 @@
  */
 static int at91ether_open(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned long ctl;
 
 	if (!is_valid_ether_addr(dev->dev_addr))
@@ -752,7 +748,7 @@
  */
 static int at91ether_close(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned long ctl;
 
 	/* Disable Receiver and Transmitter */
@@ -779,7 +775,7 @@
  */
 static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 
 	if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
 		netif_stop_queue(dev);
@@ -811,7 +807,7 @@
  */
 static struct net_device_stats *at91ether_stats(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	int ale, lenerr, seqe, lcol, ecol;
 
 	if (netif_running(dev)) {
@@ -847,7 +843,7 @@
  */
 static void at91ether_rx(struct net_device *dev)
 {
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	struct recv_desc_bufs *dlist;
 	unsigned char *p_recv;
 	struct sk_buff *skb;
@@ -857,14 +853,13 @@
 	while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
 		p_recv = dlist->recv_buf[lp->rxBuffIndex];
 		pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff;	/* Length of frame including FCS */
-		skb = alloc_skb(pktlen + 2, GFP_ATOMIC);
+		skb = dev_alloc_skb(pktlen + 2);
 		if (skb != NULL) {
 			skb_reserve(skb, 2);
 			memcpy(skb_put(skb, pktlen), p_recv, pktlen);
 
 			skb->dev = dev;
 			skb->protocol = eth_type_trans(skb, dev);
-			skb->len = pktlen;
 			dev->last_rx = jiffies;
 			lp->stats.rx_bytes += pktlen;
 			netif_rx(skb);
@@ -891,7 +886,7 @@
 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = (struct net_device *) dev_id;
-	struct at91_private *lp = (struct at91_private *) dev->priv;
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned long intstatus, ctl;
 
 	/* MAC Interrupt Status register indicates what interrupts are pending.
@@ -927,6 +922,17 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void at91ether_poll_controller(struct net_device *dev)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	at91ether_interrupt(dev->irq, dev);
+	local_irq_restore(flags);
+}
+#endif
+
 /*
  * Initialize the ethernet interface
  */
@@ -939,9 +945,6 @@
 	unsigned int val;
 	int res;
 
-	if (at91_dev)			/* already initialized */
-		return 0;
-
 	dev = alloc_etherdev(sizeof(struct at91_private));
 	if (!dev)
 		return -ENOMEM;
@@ -957,7 +960,7 @@
 	}
 
 	/* Allocate memory for DMA Receive descriptors */
-	lp = (struct at91_private *)dev->priv;
+	lp = netdev_priv(dev);
 	lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
 	if (lp->dlist == NULL) {
 		free_irq(dev->irq, dev);
@@ -979,6 +982,9 @@
 	dev->set_mac_address = set_mac_address;
 	dev->ethtool_ops = &at91ether_ethtool_ops;
 	dev->do_ioctl = at91ether_ioctl;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = at91ether_poll_controller;
+#endif
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -1024,7 +1030,6 @@
 		dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
 		return res;
 	}
-	at91_dev = dev;
 
 	/* Determine current link speed */
 	spin_lock_irq(&lp->lock);
@@ -1036,9 +1041,9 @@
 
 	/* If board has no PHY IRQ, use a timer to poll the PHY */
 	if (!lp->board_data.phy_irq_pin) {
-		init_timer(&check_timer);
-		check_timer.data = (unsigned long)dev;
-		check_timer.function = at91ether_check_link;
+		init_timer(&lp->check_timer);
+		lp->check_timer.data = (unsigned long)dev;
+		lp->check_timer.function = at91ether_check_link;
 	}
 
 	/* Display ethernet banner */
@@ -1115,15 +1120,16 @@
 
 static int __devexit at91ether_remove(struct platform_device *pdev)
 {
-	struct at91_private *lp = (struct at91_private *) at91_dev->priv;
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct at91_private *lp = netdev_priv(dev);
 
-	unregister_netdev(at91_dev);
-	free_irq(at91_dev->irq, at91_dev);
+	unregister_netdev(dev);
+	free_irq(dev->irq, dev);
 	dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
 	clk_put(lp->ether_clk);
 
-	free_netdev(at91_dev);
-	at91_dev = NULL;
+	platform_set_drvdata(pdev, NULL);
+	free_netdev(dev);
 	return 0;
 }
 
@@ -1131,8 +1137,8 @@
 
 static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
 {
-	struct at91_private *lp = (struct at91_private *) at91_dev->priv;
 	struct net_device *net_dev = platform_get_drvdata(pdev);
+	struct at91_private *lp = netdev_priv(net_dev);
 	int phy_irq = lp->board_data.phy_irq_pin;
 
 	if (netif_running(net_dev)) {
@@ -1149,8 +1155,8 @@
 
 static int at91ether_resume(struct platform_device *pdev)
 {
-	struct at91_private *lp = (struct at91_private *) at91_dev->priv;
 	struct net_device *net_dev = platform_get_drvdata(pdev);
+	struct at91_private *lp = netdev_priv(net_dev);
 	int phy_irq = lp->board_data.phy_irq_pin;
 
 	if (netif_running(net_dev)) {
diff --git a/drivers/net/arm/at91_ether.h b/drivers/net/arm/at91_ether.h
index d1e72e0..b6b665d 100644
--- a/drivers/net/arm/at91_ether.h
+++ b/drivers/net/arm/at91_ether.h
@@ -87,6 +87,7 @@
 	spinlock_t lock;			/* lock for MDI interface */
 	short phy_media;			/* media interface type */
 	unsigned short phy_address;		/* 5-bit MDI address of PHY (0..31) */
+	struct timer_list check_timer;		/* Poll link status */
 
 	/* Transmit */
 	struct sk_buff *skb;			/* holds skb until xmit interrupt completes */
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index f3478a3..d6da3ce 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -254,7 +254,7 @@
 	} while (thislen);
 }
 
-static int __init
+static int __devinit
 ether1_ramtest(struct net_device *dev, unsigned char byte)
 {
 	unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
@@ -308,7 +308,7 @@
 	return BUS_16;
 }
 
-static int __init
+static int __devinit
 ether1_init_2(struct net_device *dev)
 {
 	int i;
@@ -986,7 +986,7 @@
 
 /* ------------------------------------------------------------------------- */
 
-static void __init ether1_banner(void)
+static void __devinit ether1_banner(void)
 {
 	static unsigned int version_printed = 0;
 
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 84686c8..4fc2347 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -198,7 +198,7 @@
  * Read the ethernet address string from the on board rom.
  * This is an ascii string!!!
  */
-static int __init
+static int __devinit
 ether3_addr(char *addr, struct expansion_card *ec)
 {
 	struct in_chunk_dir cd;
@@ -223,7 +223,7 @@
 
 /* --------------------------------------------------------------------------- */
 
-static int __init
+static int __devinit
 ether3_ramtest(struct net_device *dev, unsigned char byte)
 {
 	unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
@@ -272,7 +272,7 @@
 
 /* ------------------------------------------------------------------------------- */
 
-static int __init ether3_init_2(struct net_device *dev)
+static int __devinit ether3_init_2(struct net_device *dev)
 {
 	int i;
 
@@ -765,7 +765,7 @@
 	}
 }
 
-static void __init ether3_banner(void)
+static void __devinit ether3_banner(void)
 {
 	static unsigned version_printed = 0;
 
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 4ae9897..f3faa4f 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -52,7 +52,12 @@
 #include <asm/ecard.h>
 #include <asm/io.h>
 
-#include "../8390.h"
+#define EI_SHIFT(x)	(ei_local->reg_offset[x])
+
+#define ei_inb(_p)	 readb((void __iomem *)_p)
+#define ei_outb(_v,_p)	 writeb(_v,(void __iomem *)_p)
+#define ei_inb_p(_p)	 readb((void __iomem *)_p)
+#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
 
 #define NET_DEBUG  0
 #define DEBUG_INIT 2
@@ -60,6 +65,11 @@
 #define DRV_NAME	"etherh"
 #define DRV_VERSION	"1.11"
 
+static char version[] __initdata =
+	"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
+
+#include "../lib8390.c"
+
 static unsigned int net_debug = NET_DEBUG;
 
 struct etherh_priv {
@@ -87,9 +97,6 @@
 MODULE_DESCRIPTION("EtherH/EtherM driver");
 MODULE_LICENSE("GPL");
 
-static char version[] __initdata =
-	"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
-
 #define ETHERH500_DATAPORT	0x800	/* MEMC */
 #define ETHERH500_NS8390	0x000	/* MEMC */
 #define ETHERH500_CTRLPORT	0x800	/* IOC  */
@@ -177,7 +184,7 @@
 	switch (etherh_priv(dev)->id) {
 	case PROD_I3_ETHERLAN600:
 	case PROD_I3_ETHERLAN600A:
-		addr = (void *)dev->base_addr + EN0_RCNTHI;
+		addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
 
 		switch (dev->if_port) {
 		case IF_PORT_10BASE2:
@@ -218,7 +225,7 @@
 	switch (etherh_priv(dev)->id) {
 	case PROD_I3_ETHERLAN600:
 	case PROD_I3_ETHERLAN600A:
-		addr = (void *)dev->base_addr + EN0_RCNTHI;
+		addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
 		switch (dev->if_port) {
 		case IF_PORT_10BASE2:
 			stat = 1;
@@ -281,7 +288,7 @@
 etherh_reset(struct net_device *dev)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
-	void __iomem *addr = (void *)dev->base_addr;
+	void __iomem *addr = (void __iomem *)dev->base_addr;
 
 	writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
 
@@ -327,7 +334,7 @@
 
 	ei_local->dmaing = 1;
 
-	addr = (void *)dev->base_addr;
+	addr = (void __iomem *)dev->base_addr;
 	dma_base = etherh_priv(dev)->dma_base;
 
 	count = (count + 1) & ~1;
@@ -360,7 +367,7 @@
 			printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
 				dev->name);
 			etherh_reset (dev);
-			NS8390_init (dev, 1);
+			__NS8390_init (dev, 1);
 			break;
 		}
 
@@ -387,7 +394,7 @@
 
 	ei_local->dmaing = 1;
 
-	addr = (void *)dev->base_addr;
+	addr = (void __iomem *)dev->base_addr;
 	dma_base = etherh_priv(dev)->dma_base;
 
 	buf = skb->data;
@@ -427,7 +434,7 @@
 
 	ei_local->dmaing = 1;
 
-	addr = (void *)dev->base_addr;
+	addr = (void __iomem *)dev->base_addr;
 	dma_base = etherh_priv(dev)->dma_base;
 
 	writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
@@ -465,7 +472,7 @@
 		return -EINVAL;
 	}
 
-	if (request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))
+	if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
 		return -EAGAIN;
 
 	/*
@@ -491,7 +498,7 @@
 		etherh_setif(dev);
 
 	etherh_reset(dev);
-	ei_open(dev);
+	__ei_open(dev);
 
 	return 0;
 }
@@ -502,7 +509,7 @@
 static int
 etherh_close(struct net_device *dev)
 {
-	ei_close (dev);
+	__ei_close (dev);
 	free_irq (dev->irq, dev);
 	return 0;
 }
@@ -650,7 +657,7 @@
 	if (ret)
 		goto out;
 
-	dev = __alloc_ei_netdev(sizeof(struct etherh_priv));
+	dev = ____alloc_ei_netdev(sizeof(struct etherh_priv));
 	if (!dev) {
 		ret = -ENOMEM;
 		goto release;
@@ -736,7 +743,7 @@
 	ei_local->interface_num = 0;
 
 	etherh_reset(dev);
-	NS8390_init(dev, 0);
+	__NS8390_init(dev, 0);
 
 	ret = register_netdev(dev);
 	if (ret)
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 8620a5b..56ae8ba 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -908,7 +908,7 @@
 	return 0;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	unregister_netdev(dev_at1700);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index d79489e..7e37ac8 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1179,7 +1179,7 @@
 #ifdef MODULE
 static struct net_device *atarilance_dev;
 
-int init_module(void)
+int __init init_module(void)
 {
 	atarilance_dev = atarilance_probe(-1);
 	if (IS_ERR(atarilance_dev))
@@ -1187,7 +1187,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(atarilance_dev);
 	free_irq(atarilance_dev->irq, atarilance_dev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 7db3c8a..f0b6879 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -360,7 +360,8 @@
 	BUG_ON(!phydev);
 	BUG_ON(phydev->attached_dev);
 
-	phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0);
+	phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0,
+			PHY_INTERFACE_MODE_MII);
 
 	if (IS_ERR(phydev)) {
 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 01b76d3..5bacb75 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -53,11 +53,12 @@
 
 #include "bnx2.h"
 #include "bnx2_fw.h"
+#include "bnx2_fw2.h"
 
 #define DRV_MODULE_NAME		"bnx2"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"1.4.45"
-#define DRV_MODULE_RELDATE	"September 29, 2006"
+#define DRV_MODULE_VERSION	"1.5.1"
+#define DRV_MODULE_RELDATE	"November 15, 2006"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -85,6 +86,7 @@
 	NC370F,
 	BCM5708,
 	BCM5708S,
+	BCM5709,
 } board_t;
 
 /* indexed by board_t, above */
@@ -98,6 +100,7 @@
 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
+	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
 	};
 
 static struct pci_device_id bnx2_pci_tbl[] = {
@@ -115,6 +118,8 @@
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
 	{ 0, }
 };
 
@@ -236,8 +241,23 @@
 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 {
 	offset += cid_addr;
-	REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
-	REG_WR(bp, BNX2_CTX_DATA, val);
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		int i;
+
+		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
+		REG_WR(bp, BNX2_CTX_CTX_CTRL,
+		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+		for (i = 0; i < 5; i++) {
+			u32 val;
+			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
+			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
+				break;
+			udelay(5);
+		}
+	} else {
+		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
+		REG_WR(bp, BNX2_CTX_DATA, val);
+	}
 }
 
 static int
@@ -403,6 +423,14 @@
 {
 	int i;
 
+	for (i = 0; i < bp->ctx_pages; i++) {
+		if (bp->ctx_blk[i]) {
+			pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
+					    bp->ctx_blk[i],
+					    bp->ctx_blk_mapping[i]);
+			bp->ctx_blk[i] = NULL;
+		}
+	}
 	if (bp->status_blk) {
 		pci_free_consistent(bp->pdev, bp->status_stats_size,
 				    bp->status_blk, bp->status_blk_mapping);
@@ -481,6 +509,18 @@
 
 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
+		if (bp->ctx_pages == 0)
+			bp->ctx_pages = 1;
+		for (i = 0; i < bp->ctx_pages; i++) {
+			bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
+						BCM_PAGE_SIZE,
+						&bp->ctx_blk_mapping[i]);
+			if (bp->ctx_blk[i] == NULL)
+				goto alloc_mem_err;
+		}
+	}
 	return 0;
 
 alloc_mem_err:
@@ -803,13 +843,13 @@
 
 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
-		BNX2_EMAC_MODE_25G);
+		BNX2_EMAC_MODE_25G_MODE);
 
 	if (bp->link_up) {
 		switch (bp->line_speed) {
 			case SPEED_10:
-				if (CHIP_NUM(bp) == CHIP_NUM_5708) {
-					val |= BNX2_EMAC_MODE_PORT_MII_10;
+				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
+					val |= BNX2_EMAC_MODE_PORT_MII_10M;
 					break;
 				}
 				/* fall through */
@@ -817,7 +857,7 @@
 				val |= BNX2_EMAC_MODE_PORT_MII;
 				break;
 			case SPEED_2500:
-				val |= BNX2_EMAC_MODE_25G;
+				val |= BNX2_EMAC_MODE_25G_MODE;
 				/* fall through */
 			case SPEED_1000:
 				val |= BNX2_EMAC_MODE_PORT_GMII;
@@ -860,7 +900,7 @@
 	u32 bmsr;
 	u8 link_up;
 
-	if (bp->loopback == MAC_LOOPBACK) {
+	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
 		bp->link_up = 1;
 		return 0;
 	}
@@ -902,6 +942,7 @@
 			u32 bmcr;
 
 			bnx2_read_phy(bp, MII_BMCR, &bmcr);
+			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
 			if (!(bmcr & BMCR_ANENABLE)) {
 				bnx2_write_phy(bp, MII_BMCR, bmcr |
 					BMCR_ANENABLE);
@@ -988,7 +1029,21 @@
 		u32 new_bmcr;
 		int force_link_down = 0;
 
-		if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+		bnx2_read_phy(bp, MII_ADVERTISE, &adv);
+		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
+
+		bnx2_read_phy(bp, MII_BMCR, &bmcr);
+		new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
+		new_bmcr |= BMCR_SPEED1000;
+		if (bp->req_line_speed == SPEED_2500) {
+			new_bmcr |= BCM5708S_BMCR_FORCE_2500;
+			bnx2_read_phy(bp, BCM5708S_UP1, &up1);
+			if (!(up1 & BCM5708S_UP1_2G5)) {
+				up1 |= BCM5708S_UP1_2G5;
+				bnx2_write_phy(bp, BCM5708S_UP1, up1);
+				force_link_down = 1;
+			}
+		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
 			bnx2_read_phy(bp, BCM5708S_UP1, &up1);
 			if (up1 & BCM5708S_UP1_2G5) {
 				up1 &= ~BCM5708S_UP1_2G5;
@@ -997,12 +1052,6 @@
 			}
 		}
 
-		bnx2_read_phy(bp, MII_ADVERTISE, &adv);
-		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
-
-		bnx2_read_phy(bp, MII_BMCR, &bmcr);
-		new_bmcr = bmcr & ~BMCR_ANENABLE;
-		new_bmcr |= BMCR_SPEED1000;
 		if (bp->req_duplex == DUPLEX_FULL) {
 			adv |= ADVERTISE_1000XFULL;
 			new_bmcr |= BMCR_FULLDPLX;
@@ -1023,6 +1072,7 @@
 				bp->link_up = 0;
 				netif_carrier_off(bp->dev);
 				bnx2_write_phy(bp, MII_BMCR, new_bmcr);
+				bnx2_report_link(bp);
 			}
 			bnx2_write_phy(bp, MII_ADVERTISE, adv);
 			bnx2_write_phy(bp, MII_BMCR, new_bmcr);
@@ -1048,30 +1098,26 @@
 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
 		/* Force a link down visible on the other side */
 		if (bp->link_up) {
-			int i;
-
 			bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
-			for (i = 0; i < 110; i++) {
-				udelay(100);
-			}
+			spin_unlock_bh(&bp->phy_lock);
+			msleep(20);
+			spin_lock_bh(&bp->phy_lock);
 		}
 
 		bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
 		bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
 			BMCR_ANENABLE);
-		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-			/* Speed up link-up time when the link partner
-			 * does not autonegotiate which is very common
-			 * in blade servers. Some blade servers use
-			 * IPMI for kerboard input and it's important
-			 * to minimize link disruptions. Autoneg. involves
-			 * exchanging base pages plus 3 next pages and
-			 * normally completes in about 120 msec.
-			 */
-			bp->current_interval = SERDES_AN_TIMEOUT;
-			bp->serdes_an_pending = 1;
-			mod_timer(&bp->timer, jiffies + bp->current_interval);
-		}
+		/* Speed up link-up time when the link partner
+		 * does not autonegotiate which is very common
+		 * in blade servers. Some blade servers use
+		 * IPMI for kerboard input and it's important
+		 * to minimize link disruptions. Autoneg. involves
+		 * exchanging base pages plus 3 next pages and
+		 * normally completes in about 120 msec.
+		 */
+		bp->current_interval = SERDES_AN_TIMEOUT;
+		bp->serdes_an_pending = 1;
+		mod_timer(&bp->timer, jiffies + bp->current_interval);
 	}
 
 	return 0;
@@ -1153,7 +1199,6 @@
 	}
 	if (new_bmcr != bmcr) {
 		u32 bmsr;
-		int i = 0;
 
 		bnx2_read_phy(bp, MII_BMSR, &bmsr);
 		bnx2_read_phy(bp, MII_BMSR, &bmsr);
@@ -1161,12 +1206,12 @@
 		if (bmsr & BMSR_LSTATUS) {
 			/* Force link down */
 			bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
-			do {
-				udelay(100);
-				bnx2_read_phy(bp, MII_BMSR, &bmsr);
-				bnx2_read_phy(bp, MII_BMSR, &bmsr);
-				i++;
-			} while ((bmsr & BMSR_LSTATUS) && (i < 620));
+			spin_unlock_bh(&bp->phy_lock);
+			msleep(50);
+			spin_lock_bh(&bp->phy_lock);
+
+			bnx2_read_phy(bp, MII_BMSR, &bmsr);
+			bnx2_read_phy(bp, MII_BMSR, &bmsr);
 		}
 
 		bnx2_write_phy(bp, MII_BMCR, new_bmcr);
@@ -1258,9 +1303,8 @@
 {
 	bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
 
-	if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-        	REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
-	}
+	if (CHIP_NUM(bp) == CHIP_NUM_5706)
+        	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
 
 	if (bp->dev->mtu > 1500) {
 		u32 val;
@@ -1397,13 +1441,13 @@
 	for (i = 0; i < 10; i++) {
 		if (bnx2_test_link(bp) == 0)
 			break;
-		udelay(10);
+		msleep(100);
 	}
 
 	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
-		      BNX2_EMAC_MODE_25G);
+		      BNX2_EMAC_MODE_25G_MODE);
 
 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
 	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
@@ -1454,6 +1498,40 @@
 	return 0;
 }
 
+static int
+bnx2_init_5709_context(struct bnx2 *bp)
+{
+	int i, ret = 0;
+	u32 val;
+
+	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
+	val |= (BCM_PAGE_BITS - 8) << 16;
+	REG_WR(bp, BNX2_CTX_COMMAND, val);
+	for (i = 0; i < bp->ctx_pages; i++) {
+		int j;
+
+		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
+		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+		       (u64) bp->ctx_blk_mapping[i] >> 32);
+		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
 static void
 bnx2_init_context(struct bnx2 *bp)
 {
@@ -1576,9 +1654,8 @@
 		return -ENOMEM;
 	}
 
-	if (unlikely((align = (unsigned long) skb->data & 0x7))) {
-		skb_reserve(skb, 8 - align);
-	}
+	if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
+		skb_reserve(skb, BNX2_RX_ALIGN - align);
 
 	mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
 		PCI_DMA_FROMDEVICE);
@@ -2040,7 +2117,8 @@
 	if (dev->flags & IFF_PROMISC) {
 		/* Promiscuous mode. */
 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
-		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
+		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+			     BNX2_RPM_SORT_USER0_PROM_VLAN;
 	}
 	else if (dev->flags & IFF_ALLMULTI) {
 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
@@ -2208,11 +2286,12 @@
 	}
 }
 
-static void
+static int
 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
 {
 	u32 offset;
 	u32 val;
+	int rc;
 
 	/* Halt the CPU. */
 	val = REG_RD_IND(bp, cpu_reg->mode);
@@ -2222,7 +2301,18 @@
 
 	/* Load the Text area. */
 	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
-	if (fw->text) {
+	if (fw->gz_text) {
+		u32 text_len;
+		void *text;
+
+		rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
+				 &text_len);
+		if (rc)
+			return rc;
+
+		fw->text = text;
+	}
+	if (fw->gz_text) {
 		int j;
 
 		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
@@ -2280,13 +2370,15 @@
 	val &= ~cpu_reg->mode_value_halt;
 	REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
 	REG_WR_IND(bp, cpu_reg->mode, val);
+
+	return 0;
 }
 
 static int
 bnx2_init_cpus(struct bnx2 *bp)
 {
 	struct cpu_reg cpu_reg;
-	struct fw_info fw;
+	struct fw_info *fw;
 	int rc = 0;
 	void *text;
 	u32 text_len;
@@ -2323,44 +2415,15 @@
 	cpu_reg.spad_base = BNX2_RXP_SCRATCH;
 	cpu_reg.mips_view_base = 0x8000000;
 
-	fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
-	fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
-	fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
-	fw.start_addr = bnx2_RXP_b06FwStartAddr;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		fw = &bnx2_rxp_fw_09;
+	else
+		fw = &bnx2_rxp_fw_06;
 
-	fw.text_addr = bnx2_RXP_b06FwTextAddr;
-	fw.text_len = bnx2_RXP_b06FwTextLen;
-	fw.text_index = 0;
-
-	rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
-			 &text, &text_len);
+	rc = load_cpu_fw(bp, &cpu_reg, fw);
 	if (rc)
 		goto init_cpu_err;
 
-	fw.text = text;
-
-	fw.data_addr = bnx2_RXP_b06FwDataAddr;
-	fw.data_len = bnx2_RXP_b06FwDataLen;
-	fw.data_index = 0;
-	fw.data = bnx2_RXP_b06FwData;
-
-	fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
-	fw.sbss_len = bnx2_RXP_b06FwSbssLen;
-	fw.sbss_index = 0;
-	fw.sbss = bnx2_RXP_b06FwSbss;
-
-	fw.bss_addr = bnx2_RXP_b06FwBssAddr;
-	fw.bss_len = bnx2_RXP_b06FwBssLen;
-	fw.bss_index = 0;
-	fw.bss = bnx2_RXP_b06FwBss;
-
-	fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
-	fw.rodata_len = bnx2_RXP_b06FwRodataLen;
-	fw.rodata_index = 0;
-	fw.rodata = bnx2_RXP_b06FwRodata;
-
-	load_cpu_fw(bp, &cpu_reg, &fw);
-
 	/* Initialize the TX Processor. */
 	cpu_reg.mode = BNX2_TXP_CPU_MODE;
 	cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
@@ -2375,44 +2438,15 @@
 	cpu_reg.spad_base = BNX2_TXP_SCRATCH;
 	cpu_reg.mips_view_base = 0x8000000;
 
-	fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
-	fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
-	fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
-	fw.start_addr = bnx2_TXP_b06FwStartAddr;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		fw = &bnx2_txp_fw_09;
+	else
+		fw = &bnx2_txp_fw_06;
 
-	fw.text_addr = bnx2_TXP_b06FwTextAddr;
-	fw.text_len = bnx2_TXP_b06FwTextLen;
-	fw.text_index = 0;
-
-	rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
-			 &text, &text_len);
+	rc = load_cpu_fw(bp, &cpu_reg, fw);
 	if (rc)
 		goto init_cpu_err;
 
-	fw.text = text;
-
-	fw.data_addr = bnx2_TXP_b06FwDataAddr;
-	fw.data_len = bnx2_TXP_b06FwDataLen;
-	fw.data_index = 0;
-	fw.data = bnx2_TXP_b06FwData;
-
-	fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
-	fw.sbss_len = bnx2_TXP_b06FwSbssLen;
-	fw.sbss_index = 0;
-	fw.sbss = bnx2_TXP_b06FwSbss;
-
-	fw.bss_addr = bnx2_TXP_b06FwBssAddr;
-	fw.bss_len = bnx2_TXP_b06FwBssLen;
-	fw.bss_index = 0;
-	fw.bss = bnx2_TXP_b06FwBss;
-
-	fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
-	fw.rodata_len = bnx2_TXP_b06FwRodataLen;
-	fw.rodata_index = 0;
-	fw.rodata = bnx2_TXP_b06FwRodata;
-
-	load_cpu_fw(bp, &cpu_reg, &fw);
-
 	/* Initialize the TX Patch-up Processor. */
 	cpu_reg.mode = BNX2_TPAT_CPU_MODE;
 	cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
@@ -2427,44 +2461,15 @@
 	cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
 	cpu_reg.mips_view_base = 0x8000000;
 
-	fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
-	fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
-	fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
-	fw.start_addr = bnx2_TPAT_b06FwStartAddr;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		fw = &bnx2_tpat_fw_09;
+	else
+		fw = &bnx2_tpat_fw_06;
 
-	fw.text_addr = bnx2_TPAT_b06FwTextAddr;
-	fw.text_len = bnx2_TPAT_b06FwTextLen;
-	fw.text_index = 0;
-
-	rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
-			 &text, &text_len);
+	rc = load_cpu_fw(bp, &cpu_reg, fw);
 	if (rc)
 		goto init_cpu_err;
 
-	fw.text = text;
-
-	fw.data_addr = bnx2_TPAT_b06FwDataAddr;
-	fw.data_len = bnx2_TPAT_b06FwDataLen;
-	fw.data_index = 0;
-	fw.data = bnx2_TPAT_b06FwData;
-
-	fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
-	fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
-	fw.sbss_index = 0;
-	fw.sbss = bnx2_TPAT_b06FwSbss;
-
-	fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
-	fw.bss_len = bnx2_TPAT_b06FwBssLen;
-	fw.bss_index = 0;
-	fw.bss = bnx2_TPAT_b06FwBss;
-
-	fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
-	fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
-	fw.rodata_index = 0;
-	fw.rodata = bnx2_TPAT_b06FwRodata;
-
-	load_cpu_fw(bp, &cpu_reg, &fw);
-
 	/* Initialize the Completion Processor. */
 	cpu_reg.mode = BNX2_COM_CPU_MODE;
 	cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
@@ -2479,44 +2484,36 @@
 	cpu_reg.spad_base = BNX2_COM_SCRATCH;
 	cpu_reg.mips_view_base = 0x8000000;
 
-	fw.ver_major = bnx2_COM_b06FwReleaseMajor;
-	fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
-	fw.ver_fix = bnx2_COM_b06FwReleaseFix;
-	fw.start_addr = bnx2_COM_b06FwStartAddr;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		fw = &bnx2_com_fw_09;
+	else
+		fw = &bnx2_com_fw_06;
 
-	fw.text_addr = bnx2_COM_b06FwTextAddr;
-	fw.text_len = bnx2_COM_b06FwTextLen;
-	fw.text_index = 0;
-
-	rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
-			 &text, &text_len);
+	rc = load_cpu_fw(bp, &cpu_reg, fw);
 	if (rc)
 		goto init_cpu_err;
 
-	fw.text = text;
+	/* Initialize the Command Processor. */
+	cpu_reg.mode = BNX2_CP_CPU_MODE;
+	cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
+	cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
+	cpu_reg.state = BNX2_CP_CPU_STATE;
+	cpu_reg.state_value_clear = 0xffffff;
+	cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
+	cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
+	cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
+	cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
+	cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
+	cpu_reg.spad_base = BNX2_CP_SCRATCH;
+	cpu_reg.mips_view_base = 0x8000000;
 
-	fw.data_addr = bnx2_COM_b06FwDataAddr;
-	fw.data_len = bnx2_COM_b06FwDataLen;
-	fw.data_index = 0;
-	fw.data = bnx2_COM_b06FwData;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		fw = &bnx2_cp_fw_09;
 
-	fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
-	fw.sbss_len = bnx2_COM_b06FwSbssLen;
-	fw.sbss_index = 0;
-	fw.sbss = bnx2_COM_b06FwSbss;
-
-	fw.bss_addr = bnx2_COM_b06FwBssAddr;
-	fw.bss_len = bnx2_COM_b06FwBssLen;
-	fw.bss_index = 0;
-	fw.bss = bnx2_COM_b06FwBss;
-
-	fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
-	fw.rodata_len = bnx2_COM_b06FwRodataLen;
-	fw.rodata_index = 0;
-	fw.rodata = bnx2_COM_b06FwRodata;
-
-	load_cpu_fw(bp, &cpu_reg, &fw);
-
+		load_cpu_fw(bp, &cpu_reg, fw);
+		if (rc)
+			goto init_cpu_err;
+	}
 init_cpu_err:
 	bnx2_gunzip_end(bp);
 	return rc;
@@ -3288,31 +3285,44 @@
 	 * before we issue a reset. */
 	val = REG_RD(bp, BNX2_MISC_ID);
 
-	val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
-	      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
-	      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
+		REG_RD(bp, BNX2_MISC_COMMAND);
+		udelay(5);
 
-	/* Chip reset. */
-	REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
 
-	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
-	    (CHIP_ID(bp) == CHIP_ID_5706_A1))
-		msleep(15);
+		pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
 
-	/* Reset takes approximate 30 usec */
-	for (i = 0; i < 10; i++) {
-		val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
-		if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
-			    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
-			break;
+	} else {
+		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+		/* Chip reset. */
+		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
+			current->state = TASK_UNINTERRUPTIBLE;
+			schedule_timeout(HZ / 50);
 		}
-		udelay(10);
-	}
 
-	if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
-		   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
-		printk(KERN_ERR PFX "Chip reset did not complete\n");
-		return -EBUSY;
+		/* Reset takes approximate 30 usec */
+		for (i = 0; i < 10; i++) {
+			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
+			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
+				break;
+			udelay(10);
+		}
+
+		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
+			printk(KERN_ERR PFX "Chip reset did not complete\n");
+			return -EBUSY;
+		}
 	}
 
 	/* Make sure byte swapping is properly configured. */
@@ -3390,7 +3400,10 @@
 
 	/* Initialize context mapping and zero out the quick contexts.  The
 	 * context block must have already been enabled. */
-	bnx2_init_context(bp);
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		bnx2_init_5709_context(bp);
+	else
+		bnx2_init_context(bp);
 
 	if ((rc = bnx2_init_cpus(bp)) != 0)
 		return rc;
@@ -3501,12 +3514,40 @@
 	return rc;
 }
 
+static void
+bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
+{
+	u32 val, offset0, offset1, offset2, offset3;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
+
+	val = (u64) bp->tx_desc_mapping >> 32;
+	CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
+
+	val = (u64) bp->tx_desc_mapping & 0xffffffff;
+	CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
+}
 
 static void
 bnx2_init_tx_ring(struct bnx2 *bp)
 {
 	struct tx_bd *txbd;
-	u32 val;
+	u32 cid;
 
 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
 
@@ -3520,19 +3561,11 @@
 	bp->hw_tx_cons = 0;
 	bp->tx_prod_bseq = 0;
 
-	val = BNX2_L2CTX_TYPE_TYPE_L2;
-	val |= BNX2_L2CTX_TYPE_SIZE_L2;
-	CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
+	cid = TX_CID;
+	bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+	bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
 
-	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
-	val |= 8 << 16;
-	CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
-
-	val = (u64) bp->tx_desc_mapping >> 32;
-	CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
-
-	val = (u64) bp->tx_desc_mapping & 0xffffffff;
-	CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
+	bnx2_init_tx_context(bp, cid);
 }
 
 static void
@@ -3545,8 +3578,8 @@
 
 	/* 8 for CRC and VLAN */
 	bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
-	/* 8 for alignment */
-	bp->rx_buf_size = bp->rx_buf_use_size + 8;
+	/* hw alignment */
+	bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
 
 	ring_prod = prod = bp->rx_prod = 0;
 	bp->rx_cons = 0;
@@ -3712,7 +3745,9 @@
 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
 		return rc;
 
+	spin_lock_bh(&bp->phy_lock);
 	bnx2_init_phy(bp);
+	spin_unlock_bh(&bp->phy_lock);
 	bnx2_set_link(bp);
 	return 0;
 }
@@ -3952,7 +3987,7 @@
 		bnx2_set_mac_loopback(bp);
 	}
 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
-		bp->loopback = 0;
+		bp->loopback = PHY_LOOPBACK;
 		bnx2_set_phy_loopback(bp);
 	}
 	else
@@ -3992,8 +4027,8 @@
 	bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
 	bp->tx_prod_bseq += pkt_size;
 
-	REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
-	REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
+	REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
+	REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
 
 	udelay(100);
 
@@ -4162,6 +4197,96 @@
 }
 
 static void
+bnx2_5706_serdes_timer(struct bnx2 *bp)
+{
+	spin_lock(&bp->phy_lock);
+	if (bp->serdes_an_pending)
+		bp->serdes_an_pending--;
+	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+		u32 bmcr;
+
+		bp->current_interval = bp->timer_interval;
+
+		bnx2_read_phy(bp, MII_BMCR, &bmcr);
+
+		if (bmcr & BMCR_ANENABLE) {
+			u32 phy1, phy2;
+
+			bnx2_write_phy(bp, 0x1c, 0x7c00);
+			bnx2_read_phy(bp, 0x1c, &phy1);
+
+			bnx2_write_phy(bp, 0x17, 0x0f01);
+			bnx2_read_phy(bp, 0x15, &phy2);
+			bnx2_write_phy(bp, 0x17, 0x0f01);
+			bnx2_read_phy(bp, 0x15, &phy2);
+
+			if ((phy1 & 0x10) &&	/* SIGNAL DETECT */
+				!(phy2 & 0x20)) {	/* no CONFIG */
+
+				bmcr &= ~BMCR_ANENABLE;
+				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+				bnx2_write_phy(bp, MII_BMCR, bmcr);
+				bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
+			}
+		}
+	}
+	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
+		 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
+		u32 phy2;
+
+		bnx2_write_phy(bp, 0x17, 0x0f01);
+		bnx2_read_phy(bp, 0x15, &phy2);
+		if (phy2 & 0x20) {
+			u32 bmcr;
+
+			bnx2_read_phy(bp, MII_BMCR, &bmcr);
+			bmcr |= BMCR_ANENABLE;
+			bnx2_write_phy(bp, MII_BMCR, bmcr);
+
+			bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
+		}
+	} else
+		bp->current_interval = bp->timer_interval;
+
+	spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_5708_serdes_timer(struct bnx2 *bp)
+{
+	if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
+		bp->serdes_an_pending = 0;
+		return;
+	}
+
+	spin_lock(&bp->phy_lock);
+	if (bp->serdes_an_pending)
+		bp->serdes_an_pending--;
+	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+		u32 bmcr;
+
+		bnx2_read_phy(bp, MII_BMCR, &bmcr);
+
+		if (bmcr & BMCR_ANENABLE) {
+			bmcr &= ~BMCR_ANENABLE;
+			bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
+			bnx2_write_phy(bp, MII_BMCR, bmcr);
+			bp->current_interval = SERDES_FORCED_TIMEOUT;
+		} else {
+			bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
+			bmcr |= BMCR_ANENABLE;
+			bnx2_write_phy(bp, MII_BMCR, bmcr);
+			bp->serdes_an_pending = 2;
+			bp->current_interval = bp->timer_interval;
+		}
+
+	} else
+		bp->current_interval = bp->timer_interval;
+
+	spin_unlock(&bp->phy_lock);
+}
+
+static void
 bnx2_timer(unsigned long data)
 {
 	struct bnx2 *bp = (struct bnx2 *) data;
@@ -4178,64 +4303,11 @@
 
 	bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
 
-	if ((bp->phy_flags & PHY_SERDES_FLAG) &&
-	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
-
-		spin_lock(&bp->phy_lock);
-		if (bp->serdes_an_pending) {
-			bp->serdes_an_pending--;
-		}
-		else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
-			u32 bmcr;
-
-			bp->current_interval = bp->timer_interval;
-
-			bnx2_read_phy(bp, MII_BMCR, &bmcr);
-
-			if (bmcr & BMCR_ANENABLE) {
-				u32 phy1, phy2;
-
-				bnx2_write_phy(bp, 0x1c, 0x7c00);
-				bnx2_read_phy(bp, 0x1c, &phy1);
-
-				bnx2_write_phy(bp, 0x17, 0x0f01);
-				bnx2_read_phy(bp, 0x15, &phy2);
-				bnx2_write_phy(bp, 0x17, 0x0f01);
-				bnx2_read_phy(bp, 0x15, &phy2);
-
-				if ((phy1 & 0x10) &&	/* SIGNAL DETECT */
-					!(phy2 & 0x20)) {	/* no CONFIG */
-
-					bmcr &= ~BMCR_ANENABLE;
-					bmcr |= BMCR_SPEED1000 |
-						BMCR_FULLDPLX;
-					bnx2_write_phy(bp, MII_BMCR, bmcr);
-					bp->phy_flags |=
-						PHY_PARALLEL_DETECT_FLAG;
-				}
-			}
-		}
-		else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
-			(bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
-			u32 phy2;
-
-			bnx2_write_phy(bp, 0x17, 0x0f01);
-			bnx2_read_phy(bp, 0x15, &phy2);
-			if (phy2 & 0x20) {
-				u32 bmcr;
-
-				bnx2_read_phy(bp, MII_BMCR, &bmcr);
-				bmcr |= BMCR_ANENABLE;
-				bnx2_write_phy(bp, MII_BMCR, bmcr);
-
-				bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
-
-			}
-		}
-		else
-			bp->current_interval = bp->timer_interval;
-
-		spin_unlock(&bp->phy_lock);
+	if (bp->phy_flags & PHY_SERDES_FLAG) {
+		if (CHIP_NUM(bp) == CHIP_NUM_5706)
+			bnx2_5706_serdes_timer(bp);
+		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+			bnx2_5708_serdes_timer(bp);
 	}
 
 bnx2_restart_timer:
@@ -4339,9 +4411,9 @@
 }
 
 static void
-bnx2_reset_task(void *data)
+bnx2_reset_task(struct work_struct *work)
 {
-	struct bnx2 *bp = data;
+	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
 
 	if (!netif_running(bp->dev))
 		return;
@@ -4508,8 +4580,8 @@
 	prod = NEXT_TX_BD(prod);
 	bp->tx_prod_bseq += skb->len;
 
-	REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
-	REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
+	REG_WR16(bp, bp->tx_bidx_addr, prod);
+	REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
 
 	mmiowb();
 
@@ -4743,10 +4815,14 @@
 	}
 	else {
 		if (bp->phy_flags & PHY_SERDES_FLAG) {
-			if ((cmd->speed != SPEED_1000) ||
-				(cmd->duplex != DUPLEX_FULL)) {
+			if ((cmd->speed != SPEED_1000 &&
+			     cmd->speed != SPEED_2500) ||
+			    (cmd->duplex != DUPLEX_FULL))
 				return -EINVAL;
-			}
+
+			if (cmd->speed == SPEED_2500 &&
+			    !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
+				return -EINVAL;
 		}
 		else if (cmd->speed == SPEED_1000) {
 			return -EINVAL;
@@ -4903,11 +4979,10 @@
 		msleep(20);
 
 		spin_lock_bh(&bp->phy_lock);
-		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-			bp->current_interval = SERDES_AN_TIMEOUT;
-			bp->serdes_an_pending = 1;
-			mod_timer(&bp->timer, jiffies + bp->current_interval);
-		}
+
+		bp->current_interval = SERDES_AN_TIMEOUT;
+		bp->serdes_an_pending = 1;
+		mod_timer(&bp->timer, jiffies + bp->current_interval);
 	}
 
 	bnx2_read_phy(bp, MII_BMCR, &bmcr);
@@ -5288,6 +5363,8 @@
 
 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		int i;
+
 		bnx2_netif_stop(bp);
 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
 		bnx2_free_skbs(bp);
@@ -5312,9 +5389,11 @@
 		}
 
 		/* wait for link up */
-		msleep_interruptible(3000);
-		if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
-			msleep_interruptible(4000);
+		for (i = 0; i < 7; i++) {
+			if (bp->link_up)
+				break;
+			msleep_interruptible(1000);
+		}
 	}
 
 	if (bnx2_test_nvram(bp) != 0) {
@@ -5604,13 +5683,6 @@
 		goto err_out_release;
 	}
 
-	bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
-	if (bp->pcix_cap == 0) {
-		dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
-		rc = -EIO;
-		goto err_out_release;
-	}
-
 	if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
 		bp->flags |= USING_DAC_FLAG;
 		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
@@ -5630,10 +5702,10 @@
 	bp->pdev = pdev;
 
 	spin_lock_init(&bp->phy_lock);
-	INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
 	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-	mem_len = MB_GET_CID_ADDR(17);
+	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
 	dev->mem_end = dev->mem_start + mem_len;
 	dev->irq = pdev->irq;
 
@@ -5657,6 +5729,16 @@
 
 	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
 
+	if (CHIP_NUM(bp) != CHIP_NUM_5709) {
+		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
+		if (bp->pcix_cap == 0) {
+			dev_err(&pdev->dev,
+				"Cannot find PCIX capability, aborting.\n");
+			rc = -EIO;
+			goto err_out_unmap;
+		}
+	}
+
 	/* Get bus information. */
 	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
@@ -5776,10 +5858,15 @@
 	bp->phy_addr = 1;
 
 	/* Disable WOL support if we are running on a SERDES chip. */
-	if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
+			bp->phy_flags |= PHY_SERDES_FLAG;
+	} else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
 		bp->phy_flags |= PHY_SERDES_FLAG;
+
+	if (bp->phy_flags & PHY_SERDES_FLAG) {
 		bp->flags |= NO_WOL_FLAG;
-		if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+		if (CHIP_NUM(bp) != CHIP_NUM_5706) {
 			bp->phy_addr = 2;
 			reg = REG_RD_IND(bp, bp->shmem_base +
 					 BNX2_SHARED_HW_CFG_CONFIG);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index ca31904..13b6f9b 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -56,6 +56,7 @@
 
 };
 
+#define BNX2_RX_ALIGN			16
 
 /*
  *  status_block definition
@@ -90,6 +91,7 @@
 		#define STATUS_ATTN_BITS_DMAE_ABORT		(1L<<25)
 		#define STATUS_ATTN_BITS_FLSH_ABORT		(1L<<26)
 		#define STATUS_ATTN_BITS_GRC_ABORT		(1L<<27)
+		#define STATUS_ATTN_BITS_EPB_ERROR		(1L<<30)
 		#define STATUS_ATTN_BITS_PARITY_ERROR		(1L<<31)
 
 	u32 status_attn_bits_ack;
@@ -117,7 +119,8 @@
 	u16 status_completion_producer_index;
 	u16 status_cmd_consumer_index;
 	u16 status_idx;
-	u16 status_unused;
+	u8 status_unused;
+	u8 status_blk_num;
 #elif defined(__LITTLE_ENDIAN)
 	u16 status_tx_quick_consumer_index1;
 	u16 status_tx_quick_consumer_index0;
@@ -141,7 +144,8 @@
 	u16 status_rx_quick_consumer_index14;
 	u16 status_cmd_consumer_index;
 	u16 status_completion_producer_index;
-	u16 status_unused;
+	u8 status_blk_num;
+	u8 status_unused;
 	u16 status_idx;
 #endif
 };
@@ -301,6 +305,10 @@
 #define BNX2_L2CTX_TXP_BIDX				0x000000a8
 #define BNX2_L2CTX_TXP_BSEQ				0x000000ac
 
+#define BNX2_L2CTX_TYPE_XI				0x00000080
+#define BNX2_L2CTX_CMD_TYPE_XI				0x00000240
+#define BNX2_L2CTX_TBDR_BHADDR_HI_XI			0x00000258
+#define BNX2_L2CTX_TBDR_BHADDR_LO_XI			0x0000025c
 
 /*
  *  l2_bd_chain_context definition
@@ -328,11 +336,15 @@
 #define BNX2_PCICFG_MISC_CONFIG				0x00000068
 #define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP	 (1L<<2)
 #define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP	 (1L<<3)
+#define BNX2_PCICFG_MISC_CONFIG_RESERVED1		 (1L<<4)
 #define BNX2_PCICFG_MISC_CONFIG_CLOCK_CTL_ENA		 (1L<<5)
 #define BNX2_PCICFG_MISC_CONFIG_TARGET_GRC_WORD_SWAP	 (1L<<6)
 #define BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA		 (1L<<7)
 #define BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ		 (1L<<8)
 #define BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY		 (1L<<9)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN1_SWAP_EN	 (1L<<10)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN2_SWAP_EN	 (1L<<11)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN3_SWAP_EN	 (1L<<12)
 #define BNX2_PCICFG_MISC_CONFIG_ASIC_METAL_REV		 (0xffL<<16)
 #define BNX2_PCICFG_MISC_CONFIG_ASIC_BASE_REV		 (0xfL<<24)
 #define BNX2_PCICFG_MISC_CONFIG_ASIC_ID			 (0xfL<<28)
@@ -347,6 +359,7 @@
 #define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_100		 (1L<<4)
 #define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_133		 (2L<<4)
 #define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_PCI_MODE	 (3L<<4)
+#define BNX2_PCICFG_MISC_STATUS_BAD_MEM_WRITE_BE	 (1L<<8)
 
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS		0x00000070
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET	 (0xfL<<0)
@@ -366,7 +379,7 @@
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12	 (1L<<8)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6	 (2L<<8)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62	 (4L<<8)
-#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PLAY_DEAD	 (1L<<11)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_MIN_POWER	 (1L<<11)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED	 (0xfL<<12)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100	 (0L<<12)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80	 (1L<<12)
@@ -374,18 +387,21 @@
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40	 (4L<<12)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25	 (8L<<12)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP	 (1L<<16)
-#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_PLL_STOP	 (1L<<17)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_17	 (1L<<17)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_18	 (1L<<18)
-#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_USE_SPD_DET	 (1L<<19)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_19	 (1L<<19)
 #define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED	 (0xfffL<<20)
 
 #define BNX2_PCICFG_REG_WINDOW_ADDRESS			0x00000078
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL		 (0xfffffL<<2)
+
 #define BNX2_PCICFG_REG_WINDOW				0x00000080
 #define BNX2_PCICFG_INT_ACK_CMD				0x00000084
 #define BNX2_PCICFG_INT_ACK_CMD_INDEX			 (0xffffL<<0)
 #define BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID		 (1L<<16)
 #define BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM	 (1L<<17)
 #define BNX2_PCICFG_INT_ACK_CMD_MASK_INT		 (1L<<18)
+#define BNX2_PCICFG_INT_ACK_CMD_INTERRUPT_NUM		 (0xfL<<24)
 
 #define BNX2_PCICFG_STATUS_BIT_SET_CMD			0x00000088
 #define BNX2_PCICFG_STATUS_BIT_CLEAR_CMD		0x0000008c
@@ -398,9 +414,11 @@
  *  offset: 0x400
  */
 #define BNX2_PCI_GRC_WINDOW_ADDR			0x00000400
-#define BNX2_PCI_GRC_WINDOW_ADDR_PCI_GRC_WINDOW_ADDR_VALUE	 (0x3ffffL<<8)
+#define BNX2_PCI_GRC_WINDOW_ADDR_VALUE			 (0x1ffL<<13)
+#define BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN		 (1L<<31)
 
 #define BNX2_PCI_CONFIG_1				0x00000404
+#define BNX2_PCI_CONFIG_1_RESERVED0			 (0xffL<<0)
 #define BNX2_PCI_CONFIG_1_READ_BOUNDARY			 (0x7L<<8)
 #define BNX2_PCI_CONFIG_1_READ_BOUNDARY_OFF		 (0L<<8)
 #define BNX2_PCI_CONFIG_1_READ_BOUNDARY_16		 (1L<<8)
@@ -419,6 +437,7 @@
 #define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_256		 (5L<<11)
 #define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_512		 (6L<<11)
 #define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_1024		 (7L<<11)
+#define BNX2_PCI_CONFIG_1_RESERVED1			 (0x3ffffL<<14)
 
 #define BNX2_PCI_CONFIG_2				0x00000408
 #define BNX2_PCI_CONFIG_2_BAR1_SIZE			 (0xfL<<0)
@@ -468,9 +487,13 @@
 #define BNX2_PCI_CONFIG_2_FORCE_32_BIT_MSTR		 (1L<<23)
 #define BNX2_PCI_CONFIG_2_FORCE_32_BIT_TGT		 (1L<<24)
 #define BNX2_PCI_CONFIG_2_KEEP_REQ_ASSERT		 (1L<<25)
+#define BNX2_PCI_CONFIG_2_RESERVED0			 (0x3fL<<26)
+#define BNX2_PCI_CONFIG_2_BAR_PREFETCH_XI		 (1L<<16)
+#define BNX2_PCI_CONFIG_2_RESERVED0_XI			 (0x7fffL<<17)
 
 #define BNX2_PCI_CONFIG_3				0x0000040c
 #define BNX2_PCI_CONFIG_3_STICKY_BYTE			 (0xffL<<0)
+#define BNX2_PCI_CONFIG_3_REG_STICKY_BYTE		 (0xffL<<8)
 #define BNX2_PCI_CONFIG_3_FORCE_PME			 (1L<<24)
 #define BNX2_PCI_CONFIG_3_PME_STATUS			 (1L<<25)
 #define BNX2_PCI_CONFIG_3_PME_ENABLE			 (1L<<26)
@@ -501,8 +524,10 @@
 #define BNX2_PCI_VPD_INTF_INTF_REQ			 (1L<<0)
 
 #define BNX2_PCI_VPD_ADDR_FLAG				0x0000042c
-#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS			 (0x1fff<<2)
-#define BNX2_PCI_VPD_ADDR_FLAG_WR			 (1<<15)
+#define BNX2_PCI_VPD_ADDR_FLAG_MSK			0x0000ffff
+#define BNX2_PCI_VPD_ADDR_FLAG_SL			0L
+#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS			 (0x1fffL<<2)
+#define BNX2_PCI_VPD_ADDR_FLAG_WR			 (1L<<15)
 
 #define BNX2_PCI_VPD_DATA				0x00000430
 #define BNX2_PCI_ID_VAL1				0x00000434
@@ -535,19 +560,26 @@
 #define BNX2_PCI_ID_VAL4_CAP_ENA_13			 (13L<<0)
 #define BNX2_PCI_ID_VAL4_CAP_ENA_14			 (14L<<0)
 #define BNX2_PCI_ID_VAL4_CAP_ENA_15			 (15L<<0)
+#define BNX2_PCI_ID_VAL4_RESERVED0			 (0x3L<<4)
 #define BNX2_PCI_ID_VAL4_PM_SCALE_PRG			 (0x3L<<6)
 #define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_0			 (0L<<6)
 #define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_1			 (1L<<6)
 #define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_2			 (2L<<6)
 #define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_3			 (3L<<6)
+#define BNX2_PCI_ID_VAL4_MSI_PV_MASK_CAP		 (1L<<8)
 #define BNX2_PCI_ID_VAL4_MSI_LIMIT			 (0x7L<<9)
-#define BNX2_PCI_ID_VAL4_MSI_ADVERTIZE			 (0x7L<<12)
+#define BNX2_PCI_ID_VAL4_MULTI_MSG_CAP			 (0x7L<<12)
 #define BNX2_PCI_ID_VAL4_MSI_ENABLE			 (1L<<15)
 #define BNX2_PCI_ID_VAL4_MAX_64_ADVERTIZE		 (1L<<16)
 #define BNX2_PCI_ID_VAL4_MAX_133_ADVERTIZE		 (1L<<17)
-#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE		 (0x3L<<21)
-#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE			 (0x7L<<23)
-#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE		 (0x7L<<26)
+#define BNX2_PCI_ID_VAL4_RESERVED2			 (0x7L<<18)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B21	 (0x3L<<21)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B21		 (0x3L<<23)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B0		 (1L<<25)
+#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE_B10		 (0x3L<<26)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B0		 (1L<<28)
+#define BNX2_PCI_ID_VAL4_RESERVED3			 (0x7L<<29)
+#define BNX2_PCI_ID_VAL4_RESERVED3_XI			 (0xffffL<<16)
 
 #define BNX2_PCI_ID_VAL5				0x00000444
 #define BNX2_PCI_ID_VAL5_D1_SUPPORT			 (1L<<0)
@@ -556,6 +588,10 @@
 #define BNX2_PCI_ID_VAL5_PME_IN_D1			 (1L<<3)
 #define BNX2_PCI_ID_VAL5_PME_IN_D2			 (1L<<4)
 #define BNX2_PCI_ID_VAL5_PME_IN_D3_HOT			 (1L<<5)
+#define BNX2_PCI_ID_VAL5_RESERVED0_TE			 (0x3ffffffL<<6)
+#define BNX2_PCI_ID_VAL5_PM_VERSION_XI			 (0x7L<<6)
+#define BNX2_PCI_ID_VAL5_NO_SOFT_RESET_XI		 (1L<<9)
+#define BNX2_PCI_ID_VAL5_RESERVED0_XI			 (0x3fffffL<<10)
 
 #define BNX2_PCI_PCIX_EXTENDED_STATUS			0x00000448
 #define BNX2_PCI_PCIX_EXTENDED_STATUS_NO_SNOOP		 (1L<<8)
@@ -567,12 +603,91 @@
 #define BNX2_PCI_ID_VAL6_MAX_LAT			 (0xffL<<0)
 #define BNX2_PCI_ID_VAL6_MIN_GNT			 (0xffL<<8)
 #define BNX2_PCI_ID_VAL6_BIST				 (0xffL<<16)
+#define BNX2_PCI_ID_VAL6_RESERVED0			 (0xffL<<24)
 
 #define BNX2_PCI_MSI_DATA				0x00000450
-#define BNX2_PCI_MSI_DATA_PCI_MSI_DATA			 (0xffffL<<0)
+#define BNX2_PCI_MSI_DATA_MSI_DATA			 (0xffffL<<0)
 
 #define BNX2_PCI_MSI_ADDR_H				0x00000454
 #define BNX2_PCI_MSI_ADDR_L				0x00000458
+#define BNX2_PCI_MSI_ADDR_L_VAL				 (0x3fffffffL<<2)
+
+#define BNX2_PCI_CFG_ACCESS_CMD				0x0000045c
+#define BNX2_PCI_CFG_ACCESS_CMD_ADR			 (0x3fL<<2)
+#define BNX2_PCI_CFG_ACCESS_CMD_RD_REQ			 (1L<<27)
+#define BNX2_PCI_CFG_ACCESS_CMD_WR_REQ			 (0xfL<<28)
+
+#define BNX2_PCI_CFG_ACCESS_DATA			0x00000460
+#define BNX2_PCI_MSI_MASK				0x00000464
+#define BNX2_PCI_MSI_MASK_MSI_MASK			 (0xffffffffL<<0)
+
+#define BNX2_PCI_MSI_PEND				0x00000468
+#define BNX2_PCI_MSI_PEND_MSI_PEND			 (0xffffffffL<<0)
+
+#define BNX2_PCI_PM_DATA_C				0x0000046c
+#define BNX2_PCI_PM_DATA_C_PM_DATA_8_PRG		 (0xffL<<0)
+#define BNX2_PCI_PM_DATA_C_RESERVED0			 (0xffffffL<<8)
+
+#define BNX2_PCI_MSIX_CONTROL				0x000004c0
+#define BNX2_PCI_MSIX_CONTROL_MSIX_TBL_SIZ		 (0x7ffL<<0)
+#define BNX2_PCI_MSIX_CONTROL_RESERVED0			 (0x1fffffL<<11)
+
+#define BNX2_PCI_MSIX_TBL_OFF_BIR			0x000004c4
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_BIR		 (0x7L<<0)
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_OFF		 (0x1fffffffL<<3)
+
+#define BNX2_PCI_MSIX_PBA_OFF_BIT			0x000004c8
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_BIR		 (0x7L<<0)
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_OFF		 (0x1fffffffL<<3)
+
+#define BNX2_PCI_PCIE_CAPABILITY			0x000004d0
+#define BNX2_PCI_PCIE_CAPABILITY_INTERRUPT_MSG_NUM	 (0x1fL<<0)
+#define BNX2_PCI_PCIE_CAPABILITY_COMPLY_PCIE_1_1	 (1L<<5)
+
+#define BNX2_PCI_DEVICE_CAPABILITY			0x000004d4
+#define BNX2_PCI_DEVICE_CAPABILITY_MAX_PL_SIZ_SUPPORTED	 (0x7L<<0)
+#define BNX2_PCI_DEVICE_CAPABILITY_EXTENDED_TAG_SUPPORT	 (1L<<5)
+#define BNX2_PCI_DEVICE_CAPABILITY_L0S_ACCEPTABLE_LATENCY	 (0x7L<<6)
+#define BNX2_PCI_DEVICE_CAPABILITY_L1_ACCEPTABLE_LATENCY	 (0x7L<<9)
+#define BNX2_PCI_DEVICE_CAPABILITY_ROLE_BASED_ERR_RPT	 (1L<<15)
+
+#define BNX2_PCI_LINK_CAPABILITY			0x000004dc
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED		 (0xfL<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0001	 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0010	 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_WIDTH		 (0x1fL<<4)
+#define BNX2_PCI_LINK_CAPABILITY_CLK_POWER_MGMT		 (1L<<9)
+#define BNX2_PCI_LINK_CAPABILITY_ASPM_SUPPORT		 (0x3L<<10)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT		 (0x7L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_101	 (5L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_110	 (6L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT		 (0x7L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_001	 (1L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_010	 (2L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT	 (0x7L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_101	 (5L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_110	 (6L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT	 (0x7L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_001	 (1L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_010	 (2L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_PORT_NUM		 (0xffL<<24)
+
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2		0x000004e4
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_RANGE_SUPP	 (0xfL<<0)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_DISABL_SUPP	 (1L<<4)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_RESERVED	 (0x7ffffffL<<5)
+
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2			0x000004e8
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2_RESERVED	 (0xffffffffL<<0)
+
+#define BNX2_PCI_GRC_WINDOW1_ADDR			0x00000610
+#define BNX2_PCI_GRC_WINDOW1_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW2_ADDR			0x00000614
+#define BNX2_PCI_GRC_WINDOW2_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW3_ADDR			0x00000618
+#define BNX2_PCI_GRC_WINDOW3_ADDR_VALUE			 (0x1ffL<<13)
 
 
 /*
@@ -582,13 +697,23 @@
 #define BNX2_MISC_COMMAND				0x00000800
 #define BNX2_MISC_COMMAND_ENABLE_ALL			 (1L<<0)
 #define BNX2_MISC_COMMAND_DISABLE_ALL			 (1L<<1)
-#define BNX2_MISC_COMMAND_CORE_RESET			 (1L<<4)
-#define BNX2_MISC_COMMAND_HARD_RESET			 (1L<<5)
+#define BNX2_MISC_COMMAND_SW_RESET			 (1L<<4)
+#define BNX2_MISC_COMMAND_POR_RESET			 (1L<<5)
+#define BNX2_MISC_COMMAND_HD_RESET			 (1L<<6)
+#define BNX2_MISC_COMMAND_CMN_SW_RESET			 (1L<<7)
 #define BNX2_MISC_COMMAND_PAR_ERROR			 (1L<<8)
+#define BNX2_MISC_COMMAND_CS16_ERR			 (1L<<9)
+#define BNX2_MISC_COMMAND_CS16_ERR_LOC			 (0xfL<<12)
 #define BNX2_MISC_COMMAND_PAR_ERR_RAM			 (0x7fL<<16)
+#define BNX2_MISC_COMMAND_POWERDOWN_EVENT		 (1L<<23)
+#define BNX2_MISC_COMMAND_SW_SHUTDOWN			 (1L<<24)
+#define BNX2_MISC_COMMAND_SHUTDOWN_EN			 (1L<<25)
+#define BNX2_MISC_COMMAND_DINTEG_ATTN_EN		 (1L<<26)
+#define BNX2_MISC_COMMAND_PCIE_LINK_IN_L23		 (1L<<27)
+#define BNX2_MISC_COMMAND_PCIE_DIS			 (1L<<28)
 
 #define BNX2_MISC_CFG					0x00000804
-#define BNX2_MISC_CFG_PCI_GRC_TMOUT			 (1L<<0)
+#define BNX2_MISC_CFG_GRC_TMOUT				 (1L<<0)
 #define BNX2_MISC_CFG_NVM_WR_EN				 (0x3L<<1)
 #define BNX2_MISC_CFG_NVM_WR_EN_PROTECT			 (0L<<1)
 #define BNX2_MISC_CFG_NVM_WR_EN_PCI			 (1L<<1)
@@ -596,16 +721,45 @@
 #define BNX2_MISC_CFG_NVM_WR_EN_ALLOW2			 (3L<<1)
 #define BNX2_MISC_CFG_BIST_EN				 (1L<<3)
 #define BNX2_MISC_CFG_CK25_OUT_ALT_SRC			 (1L<<4)
-#define BNX2_MISC_CFG_BYPASS_BSCAN			 (1L<<5)
-#define BNX2_MISC_CFG_BYPASS_EJTAG			 (1L<<6)
+#define BNX2_MISC_CFG_RESERVED5_TE			 (1L<<5)
+#define BNX2_MISC_CFG_RESERVED6_TE			 (1L<<6)
 #define BNX2_MISC_CFG_CLK_CTL_OVERRIDE			 (1L<<7)
-#define BNX2_MISC_CFG_LEDMODE				 (0x3L<<8)
+#define BNX2_MISC_CFG_LEDMODE				 (0x7L<<8)
 #define BNX2_MISC_CFG_LEDMODE_MAC			 (0L<<8)
-#define BNX2_MISC_CFG_LEDMODE_GPHY1			 (1L<<8)
-#define BNX2_MISC_CFG_LEDMODE_GPHY2			 (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_TE			 (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_TE			 (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_TE			 (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_TE			 (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_TE			 (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_TE			 (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_TE			 (7L<<8)
+#define BNX2_MISC_CFG_MCP_GRC_TMOUT_TE			 (1L<<11)
+#define BNX2_MISC_CFG_DBU_GRC_TMOUT_TE			 (1L<<12)
+#define BNX2_MISC_CFG_LEDMODE_XI			 (0xfL<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC_XI			 (0L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_XI			 (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_XI			 (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_XI			 (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC2_XI			 (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_XI			 (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_XI			 (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_XI			 (7L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC3_XI			 (8L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_XI			 (9L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY8_XI			 (10L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY9_XI			 (11L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC4_XI			 (12L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY10_XI			 (13L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY11_XI			 (14L<<8)
+#define BNX2_MISC_CFG_LEDMODE_UNUSED_XI			 (15L<<8)
+#define BNX2_MISC_CFG_PORT_SELECT_XI			 (1L<<13)
+#define BNX2_MISC_CFG_PARITY_MODE_XI			 (1L<<14)
 
 #define BNX2_MISC_ID					0x00000808
 #define BNX2_MISC_ID_BOND_ID				 (0xfL<<0)
+#define BNX2_MISC_ID_BOND_ID_X				 (0L<<0)
+#define BNX2_MISC_ID_BOND_ID_C				 (3L<<0)
+#define BNX2_MISC_ID_BOND_ID_S				 (12L<<0)
 #define BNX2_MISC_ID_CHIP_METAL				 (0xffL<<4)
 #define BNX2_MISC_ID_CHIP_REV				 (0xfL<<12)
 #define BNX2_MISC_ID_CHIP_NUM				 (0xffffL<<16)
@@ -639,6 +793,8 @@
 #define BNX2_MISC_ENABLE_STATUS_BITS_TIMER_ENABLE	 (1L<<25)
 #define BNX2_MISC_ENABLE_STATUS_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
 #define BNX2_MISC_ENABLE_STATUS_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
 
 #define BNX2_MISC_ENABLE_SET_BITS			0x00000810
 #define BNX2_MISC_ENABLE_SET_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
@@ -669,6 +825,8 @@
 #define BNX2_MISC_ENABLE_SET_BITS_TIMER_ENABLE		 (1L<<25)
 #define BNX2_MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
 #define BNX2_MISC_ENABLE_SET_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_SET_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_SET_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
 
 #define BNX2_MISC_ENABLE_CLR_BITS			0x00000814
 #define BNX2_MISC_ENABLE_CLR_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
@@ -699,6 +857,8 @@
 #define BNX2_MISC_ENABLE_CLR_BITS_TIMER_ENABLE		 (1L<<25)
 #define BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
 #define BNX2_MISC_ENABLE_CLR_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_CLR_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_CLR_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
 
 #define BNX2_MISC_CLOCK_CONTROL_BITS			0x00000818
 #define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET	 (0xfL<<0)
@@ -718,30 +878,41 @@
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12	 (1L<<8)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6	 (2L<<8)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62	 (4L<<8)
-#define BNX2_MISC_CLOCK_CONTROL_BITS_PLAY_DEAD		 (1L<<11)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED0_XI	 (0x7L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_MIN_POWER		 (1L<<11)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED	 (0xfL<<12)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100	 (0L<<12)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80	 (1L<<12)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50	 (2L<<12)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40	 (4L<<12)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25	 (8L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED1_XI	 (0xfL<<12)
 #define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP	 (1L<<16)
-#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_PLL_STOP	 (1L<<17)
-#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18	 (1L<<18)
-#define BNX2_MISC_CLOCK_CONTROL_BITS_USE_SPD_DET	 (1L<<19)
-#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED		 (0xfffL<<20)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_17_TE	 (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18_TE	 (1L<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_19_TE	 (1L<<19)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_TE	 (0xfffL<<20)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_MGMT_XI	 (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED2_XI	 (0x3fL<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_VCO_XI	 (0x7L<<24)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED3_XI	 (1L<<27)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_XI	 (0xfL<<28)
 
-#define BNX2_MISC_GPIO					0x0000081c
-#define BNX2_MISC_GPIO_VALUE				 (0xffL<<0)
-#define BNX2_MISC_GPIO_SET				 (0xffL<<8)
-#define BNX2_MISC_GPIO_CLR				 (0xffL<<16)
-#define BNX2_MISC_GPIO_FLOAT				 (0xffL<<24)
+#define BNX2_MISC_SPIO					0x0000081c
+#define BNX2_MISC_SPIO_VALUE				 (0xffL<<0)
+#define BNX2_MISC_SPIO_SET				 (0xffL<<8)
+#define BNX2_MISC_SPIO_CLR				 (0xffL<<16)
+#define BNX2_MISC_SPIO_FLOAT				 (0xffL<<24)
 
-#define BNX2_MISC_GPIO_INT				0x00000820
-#define BNX2_MISC_GPIO_INT_INT_STATE			 (0xfL<<0)
-#define BNX2_MISC_GPIO_INT_OLD_VALUE			 (0xfL<<8)
-#define BNX2_MISC_GPIO_INT_OLD_SET			 (0xfL<<16)
-#define BNX2_MISC_GPIO_INT_OLD_CLR			 (0xfL<<24)
+#define BNX2_MISC_SPIO_INT				0x00000820
+#define BNX2_MISC_SPIO_INT_INT_STATE_TE			 (0xfL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_TE			 (0xfL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_TE			 (0xfL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_TE			 (0xfL<<24)
+#define BNX2_MISC_SPIO_INT_INT_STATE_XI			 (0xffL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_XI			 (0xffL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_XI			 (0xffL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_XI			 (0xffL<<24)
 
 #define BNX2_MISC_CONFIG_LFSR				0x00000824
 #define BNX2_MISC_CONFIG_LFSR_DIV			 (0xffffL<<0)
@@ -775,6 +946,8 @@
 #define BNX2_MISC_LFSR_MASK_BITS_TIMER_ENABLE		 (1L<<25)
 #define BNX2_MISC_LFSR_MASK_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
 #define BNX2_MISC_LFSR_MASK_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_LFSR_MASK_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_LFSR_MASK_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
 
 #define BNX2_MISC_ARB_REQ0				0x0000082c
 #define BNX2_MISC_ARB_REQ1				0x00000830
@@ -831,22 +1004,12 @@
 #define BNX2_MISC_ARB_GNT3_30				 (0x7L<<24)
 #define BNX2_MISC_ARB_GNT3_31				 (0x7L<<28)
 
-#define BNX2_MISC_PRBS_CONTROL				0x00000878
-#define BNX2_MISC_PRBS_CONTROL_EN			 (1L<<0)
-#define BNX2_MISC_PRBS_CONTROL_RSTB			 (1L<<1)
-#define BNX2_MISC_PRBS_CONTROL_INV			 (1L<<2)
-#define BNX2_MISC_PRBS_CONTROL_ERR_CLR			 (1L<<3)
-#define BNX2_MISC_PRBS_CONTROL_ORDER			 (0x3L<<4)
-#define BNX2_MISC_PRBS_CONTROL_ORDER_7TH		 (0L<<4)
-#define BNX2_MISC_PRBS_CONTROL_ORDER_15TH		 (1L<<4)
-#define BNX2_MISC_PRBS_CONTROL_ORDER_23RD		 (2L<<4)
-#define BNX2_MISC_PRBS_CONTROL_ORDER_31ST		 (3L<<4)
+#define BNX2_MISC_RESERVED1				0x00000878
+#define BNX2_MISC_RESERVED1_MISC_RESERVED1_VALUE	 (0x3fL<<0)
 
-#define BNX2_MISC_PRBS_STATUS				0x0000087c
-#define BNX2_MISC_PRBS_STATUS_LOCK			 (1L<<0)
-#define BNX2_MISC_PRBS_STATUS_STKY			 (1L<<1)
-#define BNX2_MISC_PRBS_STATUS_ERRORS			 (0x3fffL<<2)
-#define BNX2_MISC_PRBS_STATUS_STATE			 (0xfL<<16)
+#define BNX2_MISC_RESERVED2				0x0000087c
+#define BNX2_MISC_RESERVED2_PCIE_DIS			 (1L<<0)
+#define BNX2_MISC_RESERVED2_LINK_IN_L23			 (1L<<1)
 
 #define BNX2_MISC_SM_ASF_CONTROL			0x00000880
 #define BNX2_MISC_SM_ASF_CONTROL_ASF_RST		 (1L<<0)
@@ -857,13 +1020,15 @@
 #define BNX2_MISC_SM_ASF_CONTROL_PL_TO			 (1L<<5)
 #define BNX2_MISC_SM_ASF_CONTROL_RT_TO			 (1L<<6)
 #define BNX2_MISC_SM_ASF_CONTROL_SMB_EVENT		 (1L<<7)
-#define BNX2_MISC_SM_ASF_CONTROL_RES			 (0xfL<<8)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_EN		 (1L<<8)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_PULSE		 (1L<<9)
+#define BNX2_MISC_SM_ASF_CONTROL_RES			 (0x3L<<10)
 #define BNX2_MISC_SM_ASF_CONTROL_SMB_EN			 (1L<<12)
 #define BNX2_MISC_SM_ASF_CONTROL_SMB_BB_EN		 (1L<<13)
 #define BNX2_MISC_SM_ASF_CONTROL_SMB_NO_ADDR_FILT	 (1L<<14)
 #define BNX2_MISC_SM_ASF_CONTROL_SMB_AUTOREAD		 (1L<<15)
-#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1		 (0x3fL<<16)
-#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2		 (0x3fL<<24)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1		 (0x7fL<<16)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2		 (0x7fL<<23)
 #define BNX2_MISC_SM_ASF_CONTROL_EN_NIC_SMB_ADDR_0	 (1L<<30)
 #define BNX2_MISC_SM_ASF_CONTROL_SMB_EARLY_ATTN		 (1L<<31)
 
@@ -891,13 +1056,13 @@
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS		 (0xfL<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_OK		 (0L<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_NACK	 (1L<<20)
-#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK	 (9L<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_UFLOW		 (2L<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_STOP		 (3L<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_TIMEOUT	 (4L<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_LOST	 (5L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK		 (6L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK	 (9L<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_LOST	 (0xdL<<20)
-#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK		 (0x6L<<20)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_SLAVEMODE		 (1L<<24)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_EN		 (1L<<25)
 #define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_IN		 (1L<<26)
@@ -955,6 +1120,38 @@
 #define BNX2_MISC_PERR_ENA0_RDE_MISC_RPC		 (1L<<29)
 #define BNX2_MISC_PERR_ENA0_RDE_MISC_RPM		 (1L<<30)
 #define BNX2_MISC_PERR_ENA0_RV2P_MISC_CB0REGS		 (1L<<31)
+#define BNX2_MISC_PERR_ENA0_COM_DMAE_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA0_CP_DMAE_PERR_EN_XI		 (1L<<1)
+#define BNX2_MISC_PERR_ENA0_RPM_ACPIBEMEM_PERR_EN_XI	 (1L<<2)
+#define BNX2_MISC_PERR_ENA0_CTX_USAGE_CNT_PERR_EN_XI	 (1L<<3)
+#define BNX2_MISC_PERR_ENA0_CTX_PGTBL_PERR_EN_XI	 (1L<<4)
+#define BNX2_MISC_PERR_ENA0_CTX_CACHE_PERR_EN_XI	 (1L<<5)
+#define BNX2_MISC_PERR_ENA0_CTX_MIRROR_PERR_EN_XI	 (1L<<6)
+#define BNX2_MISC_PERR_ENA0_COM_CTXC_PERR_EN_XI		 (1L<<7)
+#define BNX2_MISC_PERR_ENA0_COM_SCPAD_PERR_EN_XI	 (1L<<8)
+#define BNX2_MISC_PERR_ENA0_CP_CTXC_PERR_EN_XI		 (1L<<9)
+#define BNX2_MISC_PERR_ENA0_CP_SCPAD_PERR_EN_XI		 (1L<<10)
+#define BNX2_MISC_PERR_ENA0_RXP_RBUFC_PERR_EN_XI	 (1L<<11)
+#define BNX2_MISC_PERR_ENA0_RXP_CTXC_PERR_EN_XI		 (1L<<12)
+#define BNX2_MISC_PERR_ENA0_RXP_SCPAD_PERR_EN_XI	 (1L<<13)
+#define BNX2_MISC_PERR_ENA0_TPAT_SCPAD_PERR_EN_XI	 (1L<<14)
+#define BNX2_MISC_PERR_ENA0_TXP_CTXC_PERR_EN_XI		 (1L<<15)
+#define BNX2_MISC_PERR_ENA0_TXP_SCPAD_PERR_EN_XI	 (1L<<16)
+#define BNX2_MISC_PERR_ENA0_CS_TMEM_PERR_EN_XI		 (1L<<17)
+#define BNX2_MISC_PERR_ENA0_MQ_CTX_PERR_EN_XI		 (1L<<18)
+#define BNX2_MISC_PERR_ENA0_RPM_DFIFOMEM_PERR_EN_XI	 (1L<<19)
+#define BNX2_MISC_PERR_ENA0_RPC_DFIFOMEM_PERR_EN_XI	 (1L<<20)
+#define BNX2_MISC_PERR_ENA0_RBUF_PTRMEM_PERR_EN_XI	 (1L<<21)
+#define BNX2_MISC_PERR_ENA0_RBUF_DATAMEM_PERR_EN_XI	 (1L<<22)
+#define BNX2_MISC_PERR_ENA0_RV2P_P2IRAM_PERR_EN_XI	 (1L<<23)
+#define BNX2_MISC_PERR_ENA0_RV2P_P1IRAM_PERR_EN_XI	 (1L<<24)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB1REGS_PERR_EN_XI	 (1L<<25)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB0REGS_PERR_EN_XI	 (1L<<26)
+#define BNX2_MISC_PERR_ENA0_TPBUF_PERR_EN_XI		 (1L<<27)
+#define BNX2_MISC_PERR_ENA0_THBUF_PERR_EN_XI		 (1L<<28)
+#define BNX2_MISC_PERR_ENA0_TDMA_PERR_EN_XI		 (1L<<29)
+#define BNX2_MISC_PERR_ENA0_TBDC_PERR_EN_XI		 (1L<<30)
+#define BNX2_MISC_PERR_ENA0_TSCH_LR_PERR_EN_XI		 (1L<<31)
 
 #define BNX2_MISC_PERR_ENA1				0x000008a8
 #define BNX2_MISC_PERR_ENA1_RV2P_MISC_CB1REGS		 (1L<<0)
@@ -989,6 +1186,35 @@
 #define BNX2_MISC_PERR_ENA1_RXPQ_MISC			 (1L<<29)
 #define BNX2_MISC_PERR_ENA1_RXPCQ_MISC			 (1L<<30)
 #define BNX2_MISC_PERR_ENA1_RLUPQ_MISC			 (1L<<31)
+#define BNX2_MISC_PERR_ENA1_RBDC_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA1_RDMA_DFIFO_PERR_EN_XI	 (1L<<2)
+#define BNX2_MISC_PERR_ENA1_HC_STATS_PERR_EN_XI		 (1L<<3)
+#define BNX2_MISC_PERR_ENA1_HC_MSIX_PERR_EN_XI		 (1L<<4)
+#define BNX2_MISC_PERR_ENA1_HC_PRODUCSTB_PERR_EN_XI	 (1L<<5)
+#define BNX2_MISC_PERR_ENA1_HC_CONSUMSTB_PERR_EN_XI	 (1L<<6)
+#define BNX2_MISC_PERR_ENA1_TPATQ_PERR_EN_XI		 (1L<<7)
+#define BNX2_MISC_PERR_ENA1_MCPQ_PERR_EN_XI		 (1L<<8)
+#define BNX2_MISC_PERR_ENA1_TDMAQ_PERR_EN_XI		 (1L<<9)
+#define BNX2_MISC_PERR_ENA1_TXPQ_PERR_EN_XI		 (1L<<10)
+#define BNX2_MISC_PERR_ENA1_COMTQ_PERR_EN_XI		 (1L<<11)
+#define BNX2_MISC_PERR_ENA1_COMQ_PERR_EN_XI		 (1L<<12)
+#define BNX2_MISC_PERR_ENA1_RLUPQ_PERR_EN_XI		 (1L<<13)
+#define BNX2_MISC_PERR_ENA1_RXPQ_PERR_EN_XI		 (1L<<14)
+#define BNX2_MISC_PERR_ENA1_RV2PPQ_PERR_EN_XI		 (1L<<15)
+#define BNX2_MISC_PERR_ENA1_RDMAQ_PERR_EN_XI		 (1L<<16)
+#define BNX2_MISC_PERR_ENA1_TASQ_PERR_EN_XI		 (1L<<17)
+#define BNX2_MISC_PERR_ENA1_TBDRQ_PERR_EN_XI		 (1L<<18)
+#define BNX2_MISC_PERR_ENA1_TSCHQ_PERR_EN_XI		 (1L<<19)
+#define BNX2_MISC_PERR_ENA1_COMXQ_PERR_EN_XI		 (1L<<20)
+#define BNX2_MISC_PERR_ENA1_RXPCQ_PERR_EN_XI		 (1L<<21)
+#define BNX2_MISC_PERR_ENA1_RV2PTQ_PERR_EN_XI		 (1L<<22)
+#define BNX2_MISC_PERR_ENA1_RV2PMQ_PERR_EN_XI		 (1L<<23)
+#define BNX2_MISC_PERR_ENA1_CPQ_PERR_EN_XI		 (1L<<24)
+#define BNX2_MISC_PERR_ENA1_CSQ_PERR_EN_XI		 (1L<<25)
+#define BNX2_MISC_PERR_ENA1_RLUP_CID_PERR_EN_XI		 (1L<<26)
+#define BNX2_MISC_PERR_ENA1_RV2PCS_TMEM_PERR_EN_XI	 (1L<<27)
+#define BNX2_MISC_PERR_ENA1_RV2PCSQ_PERR_EN_XI		 (1L<<28)
+#define BNX2_MISC_PERR_ENA1_MQ_IDX_PERR_EN_XI		 (1L<<29)
 
 #define BNX2_MISC_PERR_ENA2				0x000008ac
 #define BNX2_MISC_PERR_ENA2_COMQ_MISC			 (1L<<0)
@@ -1000,19 +1226,498 @@
 #define BNX2_MISC_PERR_ENA2_TDMAQ_MISC			 (1L<<6)
 #define BNX2_MISC_PERR_ENA2_TPATQ_MISC			 (1L<<7)
 #define BNX2_MISC_PERR_ENA2_TASQ_MISC			 (1L<<8)
+#define BNX2_MISC_PERR_ENA2_TGT_FIFO_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA2_UMP_TX_PERR_EN_XI		 (1L<<1)
+#define BNX2_MISC_PERR_ENA2_UMP_RX_PERR_EN_XI		 (1L<<2)
+#define BNX2_MISC_PERR_ENA2_MCP_ROM_PERR_EN_XI		 (1L<<3)
+#define BNX2_MISC_PERR_ENA2_MCP_SCPAD_PERR_EN_XI	 (1L<<4)
+#define BNX2_MISC_PERR_ENA2_HB_MEM_PERR_EN_XI		 (1L<<5)
+#define BNX2_MISC_PERR_ENA2_PCIE_REPLAY_PERR_EN_XI	 (1L<<6)
 
 #define BNX2_MISC_DEBUG_VECTOR_SEL			0x000008b0
 #define BNX2_MISC_DEBUG_VECTOR_SEL_0			 (0xfffL<<0)
 #define BNX2_MISC_DEBUG_VECTOR_SEL_1			 (0xfffL<<12)
+#define BNX2_MISC_DEBUG_VECTOR_SEL_1_XI			 (0xfffL<<15)
 
 #define BNX2_MISC_VREG_CONTROL				0x000008b4
 #define BNX2_MISC_VREG_CONTROL_1_2			 (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_XI		 (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS14_XI	 (0L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS12_XI	 (1L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS10_XI	 (2L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS8_XI	 (3L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS6_XI	 (4L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS4_XI	 (5L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS2_XI	 (6L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_NOM_XI		 (7L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS2_XI	 (8L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS4_XI	 (9L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS6_XI	 (10L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS8_XI	 (11L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS10_XI	 (12L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS12_XI	 (13L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS14_XI	 (14L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS16_XI	 (15L<<0)
 #define BNX2_MISC_VREG_CONTROL_2_5			 (0xfL<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS14		 (0L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS12		 (1L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS10		 (2L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS8		 (3L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS6		 (4L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS4		 (5L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS2		 (6L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_NOM			 (7L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS2		 (8L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS4		 (9L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS6		 (10L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS8		 (11L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS10		 (12L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS12		 (13L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS14		 (14L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS16		 (15L<<4)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT			 (0xfL<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS14		 (0L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS12		 (1L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS10		 (2L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS8		 (3L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS6		 (4L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS4		 (5L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS2		 (6L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_NOM		 (7L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS2		 (8L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS4		 (9L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS6		 (10L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS8		 (11L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS10		 (12L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS12		 (13L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS14		 (14L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS16		 (15L<<8)
 
 #define BNX2_MISC_FINAL_CLK_CTL_VAL			0x000008b8
 #define BNX2_MISC_FINAL_CLK_CTL_VAL_MISC_FINAL_CLK_CTL_VAL	 (0x3ffffffL<<6)
 
-#define BNX2_MISC_UNUSED0				0x000008bc
+#define BNX2_MISC_GP_HW_CTL0				0x000008bc
+#define BNX2_MISC_GP_HW_CTL0_TX_DRIVE			 (1L<<0)
+#define BNX2_MISC_GP_HW_CTL0_RMII_MODE			 (1L<<1)
+#define BNX2_MISC_GP_HW_CTL0_RMII_CRSDV_SEL		 (1L<<2)
+#define BNX2_MISC_GP_HW_CTL0_RVMII_MODE			 (1L<<3)
+#define BNX2_MISC_GP_HW_CTL0_FLASH_SAMP_SCLK_NEGEDGE_TE	 (1L<<4)
+#define BNX2_MISC_GP_HW_CTL0_HIDDEN_REVISION_ID_TE	 (1L<<5)
+#define BNX2_MISC_GP_HW_CTL0_HC_CNTL_TMOUT_CTR_RST_TE	 (1L<<6)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED1_XI		 (0x7L<<4)
+#define BNX2_MISC_GP_HW_CTL0_ENA_CORE_RST_ON_MAIN_PWR_GOING_AWAY	 (1L<<7)
+#define BNX2_MISC_GP_HW_CTL0_ENA_SEL_VAUX_B_IN_L2_TE	 (1L<<8)
+#define BNX2_MISC_GP_HW_CTL0_GRC_BNK_FREE_FIX_TE	 (1L<<9)
+#define BNX2_MISC_GP_HW_CTL0_LED_ACT_SEL_TE		 (1L<<10)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED2_XI		 (0x7L<<8)
+#define BNX2_MISC_GP_HW_CTL0_UP1_DEF0			 (1L<<11)
+#define BNX2_MISC_GP_HW_CTL0_FIBER_MODE_DIS_DEF		 (1L<<12)
+#define BNX2_MISC_GP_HW_CTL0_FORCE2500_DEF		 (1L<<13)
+#define BNX2_MISC_GP_HW_CTL0_AUTODETECT_DIS_DEF		 (1L<<14)
+#define BNX2_MISC_GP_HW_CTL0_PARALLEL_DETECT_DEF	 (1L<<15)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI		 (0xfL<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_3MA		 (0L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P5MA		 (1L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P0MA		 (3L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P5MA		 (5L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P0MA		 (7L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_PWRDN		 (15L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE2DIS		 (1L<<20)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE1DIS		 (1L<<21)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT		 (0x3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M6P		 (0L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M0P		 (1L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P0P		 (2L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P6P		 (3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT		 (0x3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M6P		 (0L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M0P		 (1L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P0P		 (2L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P6P		 (3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ		 (0x3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_240UA	 (0L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_160UA	 (1L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_400UA	 (2L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_320UA	 (3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ		 (0x3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_240UA	 (0L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_160UA	 (1L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_400UA	 (2L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_320UA	 (3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ		 (0x3L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P57	 (0L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P45	 (1L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P62	 (2L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P66	 (3L<<30)
+
+#define BNX2_MISC_GP_HW_CTL1				0x000008c0
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_BTN_PRSNT_TE	 (1L<<0)
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_IND_PRSNT_TE	 (1L<<1)
+#define BNX2_MISC_GP_HW_CTL1_1_PWR_IND_PRSNT_TE		 (1L<<2)
+#define BNX2_MISC_GP_HW_CTL1_0_PCIE_LOOPBACK_TE		 (1L<<3)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_SOFT_XI		 (0xffffL<<0)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_HARD_XI		 (0xffffL<<16)
+
+#define BNX2_MISC_NEW_HW_CTL				0x000008c4
+#define BNX2_MISC_NEW_HW_CTL_MAIN_POR_BYPASS		 (1L<<0)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_ENABLE		 (1L<<1)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL0		 (1L<<2)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL1		 (1L<<3)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SHARED		 (0xfffL<<4)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SPLIT		 (0xffffL<<16)
+
+#define BNX2_MISC_NEW_CORE_CTL				0x000008c8
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_SUCCESS	 (1L<<0)
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_REQ		 (1L<<1)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_CMN		 (0x3fffL<<2)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_TC		 (0xffffL<<16)
+
+#define BNX2_MISC_ECO_HW_CTL				0x000008cc
+#define BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN		 (1L<<0)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_SOFT		 (0x7fffL<<1)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_HARD		 (0xffffL<<16)
+
+#define BNX2_MISC_ECO_CORE_CTL				0x000008d0
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_SOFT		 (0xffffL<<0)
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_HARD		 (0xffffL<<16)
+
+#define BNX2_MISC_PPIO					0x000008d4
+#define BNX2_MISC_PPIO_VALUE				 (0xfL<<0)
+#define BNX2_MISC_PPIO_SET				 (0xfL<<8)
+#define BNX2_MISC_PPIO_CLR				 (0xfL<<16)
+#define BNX2_MISC_PPIO_FLOAT				 (0xfL<<24)
+
+#define BNX2_MISC_PPIO_INT				0x000008d8
+#define BNX2_MISC_PPIO_INT_INT_STATE			 (0xfL<<0)
+#define BNX2_MISC_PPIO_INT_OLD_VALUE			 (0xfL<<8)
+#define BNX2_MISC_PPIO_INT_OLD_SET			 (0xfL<<16)
+#define BNX2_MISC_PPIO_INT_OLD_CLR			 (0xfL<<24)
+
+#define BNX2_MISC_RESET_NUMS				0x000008dc
+#define BNX2_MISC_RESET_NUMS_NUM_HARD_RESETS		 (0x7L<<0)
+#define BNX2_MISC_RESET_NUMS_NUM_PCIE_RESETS		 (0x7L<<4)
+#define BNX2_MISC_RESET_NUMS_NUM_PERSTB_RESETS		 (0x7L<<8)
+#define BNX2_MISC_RESET_NUMS_NUM_CMN_RESETS		 (0x7L<<12)
+#define BNX2_MISC_RESET_NUMS_NUM_PORT_RESETS		 (0x7L<<16)
+
+#define BNX2_MISC_CS16_ERR				0x000008e0
+#define BNX2_MISC_CS16_ERR_ENA_PCI			 (1L<<0)
+#define BNX2_MISC_CS16_ERR_ENA_RDMA			 (1L<<1)
+#define BNX2_MISC_CS16_ERR_ENA_TDMA			 (1L<<2)
+#define BNX2_MISC_CS16_ERR_ENA_EMAC			 (1L<<3)
+#define BNX2_MISC_CS16_ERR_ENA_CTX			 (1L<<4)
+#define BNX2_MISC_CS16_ERR_ENA_TBDR			 (1L<<5)
+#define BNX2_MISC_CS16_ERR_ENA_RBDC			 (1L<<6)
+#define BNX2_MISC_CS16_ERR_ENA_COM			 (1L<<7)
+#define BNX2_MISC_CS16_ERR_ENA_CP			 (1L<<8)
+#define BNX2_MISC_CS16_ERR_STA_PCI			 (1L<<16)
+#define BNX2_MISC_CS16_ERR_STA_RDMA			 (1L<<17)
+#define BNX2_MISC_CS16_ERR_STA_TDMA			 (1L<<18)
+#define BNX2_MISC_CS16_ERR_STA_EMAC			 (1L<<19)
+#define BNX2_MISC_CS16_ERR_STA_CTX			 (1L<<20)
+#define BNX2_MISC_CS16_ERR_STA_TBDR			 (1L<<21)
+#define BNX2_MISC_CS16_ERR_STA_RBDC			 (1L<<22)
+#define BNX2_MISC_CS16_ERR_STA_COM			 (1L<<23)
+#define BNX2_MISC_CS16_ERR_STA_CP			 (1L<<24)
+
+#define BNX2_MISC_SPIO_EVENT				0x000008e4
+#define BNX2_MISC_SPIO_EVENT_ENABLE			 (0xffL<<0)
+
+#define BNX2_MISC_PPIO_EVENT				0x000008e8
+#define BNX2_MISC_PPIO_EVENT_ENABLE			 (0xfL<<0)
+
+#define BNX2_MISC_DUAL_MEDIA_CTRL			0x000008ec
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID		 (0xffL<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_X		 (0L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C		 (3L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S		 (12L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP	 (0x7L<<8)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP_PIN		 (1L<<11)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_SIGDET	 (1L<<12)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_SIGDET	 (1L<<13)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_SIGDET		 (1L<<14)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_SIGDET		 (1L<<15)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_LCPLL_RST		 (1L<<16)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_RST		 (1L<<17)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_RST		 (1L<<18)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_RST		 (1L<<19)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_RST		 (1L<<20)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL		 (0x7L<<21)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP		 (1L<<24)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE	 (1L<<25)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ	 (0xfL<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER1_IDDQ	 (1L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER0_IDDQ	 (2L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY1_IDDQ	 (4L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY0_IDDQ	 (8L<<26)
+
+#define BNX2_MISC_OTP_CMD1				0x000008f0
+#define BNX2_MISC_OTP_CMD1_FMODE			 (0x7L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_IDLE			 (0L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_WRITE			 (1L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_INIT			 (2L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_SET			 (3L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RST			 (4L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_VERIFY			 (5L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED0		 (6L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED1		 (7L<<0)
+#define BNX2_MISC_OTP_CMD1_USEPINS			 (1L<<8)
+#define BNX2_MISC_OTP_CMD1_PROGSEL			 (1L<<9)
+#define BNX2_MISC_OTP_CMD1_PROGSTART			 (1L<<10)
+#define BNX2_MISC_OTP_CMD1_PCOUNT			 (0x7L<<16)
+#define BNX2_MISC_OTP_CMD1_PBYP				 (1L<<19)
+#define BNX2_MISC_OTP_CMD1_VSEL				 (0xfL<<20)
+#define BNX2_MISC_OTP_CMD1_TM				 (0x7L<<27)
+#define BNX2_MISC_OTP_CMD1_SADBYP			 (1L<<30)
+#define BNX2_MISC_OTP_CMD1_DEBUG			 (1L<<31)
+
+#define BNX2_MISC_OTP_CMD2				0x000008f4
+#define BNX2_MISC_OTP_CMD2_OTP_ROM_ADDR			 (0x3ffL<<0)
+#define BNX2_MISC_OTP_CMD2_DOSEL			 (0x7fL<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_0			 (0L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_1			 (1L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_127			 (127L<<16)
+
+#define BNX2_MISC_OTP_STATUS				0x000008f8
+#define BNX2_MISC_OTP_STATUS_DATA			 (0xffL<<0)
+#define BNX2_MISC_OTP_STATUS_VALID			 (1L<<8)
+#define BNX2_MISC_OTP_STATUS_BUSY			 (1L<<9)
+#define BNX2_MISC_OTP_STATUS_BUSYSM			 (1L<<10)
+#define BNX2_MISC_OTP_STATUS_DONE			 (1L<<11)
+
+#define BNX2_MISC_OTP_SHIFT1_CMD			0x000008fc
+#define BNX2_MISC_OTP_SHIFT1_CMD_RESET_MODE_N		 (1L<<0)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_START		 (1L<<2)
+#define BNX2_MISC_OTP_SHIFT1_CMD_LOAD_DATA		 (1L<<3)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_SELECT		 (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT1_DATA			0x00000900
+#define BNX2_MISC_OTP_SHIFT2_CMD			0x00000904
+#define BNX2_MISC_OTP_SHIFT2_CMD_RESET_MODE_N		 (1L<<0)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_START		 (1L<<2)
+#define BNX2_MISC_OTP_SHIFT2_CMD_LOAD_DATA		 (1L<<3)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_SELECT		 (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT2_DATA			0x00000908
+#define BNX2_MISC_BIST_CS0				0x0000090c
+#define BNX2_MISC_BIST_CS0_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS0_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS0_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS0_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS0_MBIST_GO			 (1L<<9)
+#define BNX2_MISC_BIST_CS0_BIST_OVERRIDE		 (1L<<31)
+
+#define BNX2_MISC_BIST_MEMSTATUS0			0x00000910
+#define BNX2_MISC_BIST_CS1				0x00000914
+#define BNX2_MISC_BIST_CS1_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS1_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS1_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS1_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS1_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS1			0x00000918
+#define BNX2_MISC_BIST_CS2				0x0000091c
+#define BNX2_MISC_BIST_CS2_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS2_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS2_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS2_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS2_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS2			0x00000920
+#define BNX2_MISC_BIST_CS3				0x00000924
+#define BNX2_MISC_BIST_CS3_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS3_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS3_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS3_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS3_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS3			0x00000928
+#define BNX2_MISC_BIST_CS4				0x0000092c
+#define BNX2_MISC_BIST_CS4_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS4_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS4_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS4_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS4_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS4			0x00000930
+#define BNX2_MISC_BIST_CS5				0x00000934
+#define BNX2_MISC_BIST_CS5_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS5_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS5_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS5_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS5_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS5			0x00000938
+#define BNX2_MISC_MEM_TM0				0x0000093c
+#define BNX2_MISC_MEM_TM0_PCIE_REPLAY_TM		 (0xfL<<0)
+#define BNX2_MISC_MEM_TM0_MCP_SCPAD			 (0xfL<<8)
+#define BNX2_MISC_MEM_TM0_UMP_TM			 (0xffL<<16)
+#define BNX2_MISC_MEM_TM0_HB_MEM_TM			 (0xfL<<24)
+
+#define BNX2_MISC_USPLL_CTRL				0x00000940
+#define BNX2_MISC_USPLL_CTRL_PH_DET_DIS			 (1L<<0)
+#define BNX2_MISC_USPLL_CTRL_FREQ_DET_DIS		 (1L<<1)
+#define BNX2_MISC_USPLL_CTRL_LCPX			 (0x3fL<<2)
+#define BNX2_MISC_USPLL_CTRL_RX				 (0x3L<<8)
+#define BNX2_MISC_USPLL_CTRL_VC_EN			 (1L<<10)
+#define BNX2_MISC_USPLL_CTRL_VCO_MG			 (0x3L<<11)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XF			 (0x7L<<13)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XS			 (0x7L<<16)
+#define BNX2_MISC_USPLL_CTRL_TESTD_EN			 (1L<<19)
+#define BNX2_MISC_USPLL_CTRL_TESTD_SEL			 (0x7L<<20)
+#define BNX2_MISC_USPLL_CTRL_TESTA_EN			 (1L<<23)
+#define BNX2_MISC_USPLL_CTRL_TESTA_SEL			 (0x3L<<24)
+#define BNX2_MISC_USPLL_CTRL_ATTEN_FREF			 (1L<<26)
+#define BNX2_MISC_USPLL_CTRL_DIGITAL_RST		 (1L<<27)
+#define BNX2_MISC_USPLL_CTRL_ANALOG_RST			 (1L<<28)
+#define BNX2_MISC_USPLL_CTRL_LOCK			 (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS0				0x00000944
+#define BNX2_MISC_PERR_STATUS0_COM_DMAE_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS0_CP_DMAE_PERR		 (1L<<1)
+#define BNX2_MISC_PERR_STATUS0_RPM_ACPIBEMEM_PERR	 (1L<<2)
+#define BNX2_MISC_PERR_STATUS0_CTX_USAGE_CNT_PERR	 (1L<<3)
+#define BNX2_MISC_PERR_STATUS0_CTX_PGTBL_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS0_CTX_CACHE_PERR		 (1L<<5)
+#define BNX2_MISC_PERR_STATUS0_CTX_MIRROR_PERR		 (1L<<6)
+#define BNX2_MISC_PERR_STATUS0_COM_CTXC_PERR		 (1L<<7)
+#define BNX2_MISC_PERR_STATUS0_COM_SCPAD_PERR		 (1L<<8)
+#define BNX2_MISC_PERR_STATUS0_CP_CTXC_PERR		 (1L<<9)
+#define BNX2_MISC_PERR_STATUS0_CP_SCPAD_PERR		 (1L<<10)
+#define BNX2_MISC_PERR_STATUS0_RXP_RBUFC_PERR		 (1L<<11)
+#define BNX2_MISC_PERR_STATUS0_RXP_CTXC_PERR		 (1L<<12)
+#define BNX2_MISC_PERR_STATUS0_RXP_SCPAD_PERR		 (1L<<13)
+#define BNX2_MISC_PERR_STATUS0_TPAT_SCPAD_PERR		 (1L<<14)
+#define BNX2_MISC_PERR_STATUS0_TXP_CTXC_PERR		 (1L<<15)
+#define BNX2_MISC_PERR_STATUS0_TXP_SCPAD_PERR		 (1L<<16)
+#define BNX2_MISC_PERR_STATUS0_CS_TMEM_PERR		 (1L<<17)
+#define BNX2_MISC_PERR_STATUS0_MQ_CTX_PERR		 (1L<<18)
+#define BNX2_MISC_PERR_STATUS0_RPM_DFIFOMEM_PERR	 (1L<<19)
+#define BNX2_MISC_PERR_STATUS0_RPC_DFIFOMEM_PERR	 (1L<<20)
+#define BNX2_MISC_PERR_STATUS0_RBUF_PTRMEM_PERR		 (1L<<21)
+#define BNX2_MISC_PERR_STATUS0_RBUF_DATAMEM_PERR	 (1L<<22)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P2IRAM_PERR		 (1L<<23)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P1IRAM_PERR		 (1L<<24)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB1REGS_PERR	 (1L<<25)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB0REGS_PERR	 (1L<<26)
+#define BNX2_MISC_PERR_STATUS0_TPBUF_PERR		 (1L<<27)
+#define BNX2_MISC_PERR_STATUS0_THBUF_PERR		 (1L<<28)
+#define BNX2_MISC_PERR_STATUS0_TDMA_PERR		 (1L<<29)
+#define BNX2_MISC_PERR_STATUS0_TBDC_PERR		 (1L<<30)
+#define BNX2_MISC_PERR_STATUS0_TSCH_LR_PERR		 (1L<<31)
+
+#define BNX2_MISC_PERR_STATUS1				0x00000948
+#define BNX2_MISC_PERR_STATUS1_RBDC_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS1_RDMA_DFIFO_PERR		 (1L<<2)
+#define BNX2_MISC_PERR_STATUS1_HC_STATS_PERR		 (1L<<3)
+#define BNX2_MISC_PERR_STATUS1_HC_MSIX_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS1_HC_PRODUCSTB_PERR	 (1L<<5)
+#define BNX2_MISC_PERR_STATUS1_HC_CONSUMSTB_PERR	 (1L<<6)
+#define BNX2_MISC_PERR_STATUS1_TPATQ_PERR		 (1L<<7)
+#define BNX2_MISC_PERR_STATUS1_MCPQ_PERR		 (1L<<8)
+#define BNX2_MISC_PERR_STATUS1_TDMAQ_PERR		 (1L<<9)
+#define BNX2_MISC_PERR_STATUS1_TXPQ_PERR		 (1L<<10)
+#define BNX2_MISC_PERR_STATUS1_COMTQ_PERR		 (1L<<11)
+#define BNX2_MISC_PERR_STATUS1_COMQ_PERR		 (1L<<12)
+#define BNX2_MISC_PERR_STATUS1_RLUPQ_PERR		 (1L<<13)
+#define BNX2_MISC_PERR_STATUS1_RXPQ_PERR		 (1L<<14)
+#define BNX2_MISC_PERR_STATUS1_RV2PPQ_PERR		 (1L<<15)
+#define BNX2_MISC_PERR_STATUS1_RDMAQ_PERR		 (1L<<16)
+#define BNX2_MISC_PERR_STATUS1_TASQ_PERR		 (1L<<17)
+#define BNX2_MISC_PERR_STATUS1_TBDRQ_PERR		 (1L<<18)
+#define BNX2_MISC_PERR_STATUS1_TSCHQ_PERR		 (1L<<19)
+#define BNX2_MISC_PERR_STATUS1_COMXQ_PERR		 (1L<<20)
+#define BNX2_MISC_PERR_STATUS1_RXPCQ_PERR		 (1L<<21)
+#define BNX2_MISC_PERR_STATUS1_RV2PTQ_PERR		 (1L<<22)
+#define BNX2_MISC_PERR_STATUS1_RV2PMQ_PERR		 (1L<<23)
+#define BNX2_MISC_PERR_STATUS1_CPQ_PERR			 (1L<<24)
+#define BNX2_MISC_PERR_STATUS1_CSQ_PERR			 (1L<<25)
+#define BNX2_MISC_PERR_STATUS1_RLUP_CID_PERR		 (1L<<26)
+#define BNX2_MISC_PERR_STATUS1_RV2PCS_TMEM_PERR		 (1L<<27)
+#define BNX2_MISC_PERR_STATUS1_RV2PCSQ_PERR		 (1L<<28)
+#define BNX2_MISC_PERR_STATUS1_MQ_IDX_PERR		 (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS2				0x0000094c
+#define BNX2_MISC_PERR_STATUS2_TGT_FIFO_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS2_UMP_TX_PERR		 (1L<<1)
+#define BNX2_MISC_PERR_STATUS2_UMP_RX_PERR		 (1L<<2)
+#define BNX2_MISC_PERR_STATUS2_MCP_ROM_PERR		 (1L<<3)
+#define BNX2_MISC_PERR_STATUS2_MCP_SCPAD_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS2_HB_MEM_PERR		 (1L<<5)
+#define BNX2_MISC_PERR_STATUS2_PCIE_REPLAY_PERR		 (1L<<6)
+
+#define BNX2_MISC_LCPLL_CTRL0				0x00000950
+#define BNX2_MISC_LCPLL_CTRL0_OAC			 (0x7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_NEGTWENTY		 (0L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_ZERO			 (1L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_TWENTY		 (3L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_FORTY			 (7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL			 (0x7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_360		 (0L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_480		 (1L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_600		 (3L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_720		 (7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_BIAS_CTRL			 (0x3L<<6)
+#define BNX2_MISC_LCPLL_CTRL0_PLL_OBSERVE		 (0x7L<<8)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL			 (0x3L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_0		 (0L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_1		 (1L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_2		 (2L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_PLLSEQSTART		 (1L<<13)
+#define BNX2_MISC_LCPLL_CTRL0_RESERVED			 (1L<<14)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRETRY_EN		 (1L<<15)
+#define BNX2_MISC_LCPLL_CTRL0_FREQMONITOR_EN		 (1L<<16)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRESTART_EN		 (1L<<17)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRETRY_EN		 (1L<<18)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE_EN		 (1L<<19)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE		 (1L<<20)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFPASS		 (1L<<21)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE_EN	 (1L<<22)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE		 (1L<<23)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS_EN	 (1L<<24)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS		 (1L<<25)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRESTART		 (1L<<26)
+#define BNX2_MISC_LCPLL_CTRL0_CAPSELECTM_EN		 (1L<<27)
+
+#define BNX2_MISC_LCPLL_CTRL1				0x00000954
+#define BNX2_MISC_LCPLL_CTRL1_CAPSELECTM		 (0x1fL<<0)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN_EN	 (1L<<5)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN		 (1L<<6)
+#define BNX2_MISC_LCPLL_CTRL1_SLOWDN_XOR		 (1L<<7)
+
+#define BNX2_MISC_LCPLL_STATUS				0x00000958
+#define BNX2_MISC_LCPLL_STATUS_FREQDONE_SM		 (1L<<0)
+#define BNX2_MISC_LCPLL_STATUS_FREQPASS_SM		 (1L<<1)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQDONE		 (1L<<2)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQPASS		 (1L<<3)
+#define BNX2_MISC_LCPLL_STATUS_PLLSTATE			 (0x7L<<4)
+#define BNX2_MISC_LCPLL_STATUS_CAPSTATE			 (0x7L<<7)
+#define BNX2_MISC_LCPLL_STATUS_CAPSELECT		 (0x1fL<<10)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR		 (1L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_0	 (0L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_1	 (1L<<15)
+
+#define BNX2_MISC_OSCFUNDS_CTRL				0x0000095c
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON		 (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_OFF		 (0L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_ON		 (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM		 (0x3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_0		 (0L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_1		 (1L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_2		 (2L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_3		 (3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ		 (0x3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_0		 (0L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_1		 (1L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_2		 (2L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_3		 (3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ		 (0x3L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_0		 (0L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_1		 (1L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_2		 (2L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_3		 (3L<<10)
 
 
 /*
@@ -1031,11 +1736,35 @@
 #define BNX2_NVM_COMMAND_WRDI				 (1L<<17)
 #define BNX2_NVM_COMMAND_EWSR				 (1L<<18)
 #define BNX2_NVM_COMMAND_WRSR				 (1L<<19)
+#define BNX2_NVM_COMMAND_RD_ID				 (1L<<20)
+#define BNX2_NVM_COMMAND_RD_STATUS			 (1L<<21)
+#define BNX2_NVM_COMMAND_MODE_256			 (1L<<22)
 
 #define BNX2_NVM_STATUS					0x00006404
 #define BNX2_NVM_STATUS_PI_FSM_STATE			 (0xfL<<0)
 #define BNX2_NVM_STATUS_EE_FSM_STATE			 (0xfL<<4)
 #define BNX2_NVM_STATUS_EQ_FSM_STATE			 (0xfL<<8)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_XI		 (0x1fL<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_IDLE_XI	 (0L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD0_XI	 (1L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD1_XI	 (2L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH0_XI	 (3L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH1_XI	 (4L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ADDR0_XI	 (5L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA0_XI	 (6L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA1_XI	 (7L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA2_XI	 (8L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA0_XI	 (9L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA1_XI	 (10L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA2_XI	 (11L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID0_XI	 (12L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID1_XI	 (13L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID2_XI	 (14L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID3_XI	 (15L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID4_XI	 (16L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CHECK_BUSY0_XI	 (17L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ST_WREN_XI	 (18L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WAIT_XI	 (19L<<0)
 
 #define BNX2_NVM_WRITE					0x00006408
 #define BNX2_NVM_WRITE_NVM_WRITE_VALUE			 (0xffffffffL<<0)
@@ -1046,6 +1775,10 @@
 #define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B		 (8L<<0)
 #define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO		 (16L<<0)
 #define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI		 (32L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK_XI		 (8L<<0)
 
 #define BNX2_NVM_ADDR					0x0000640c
 #define BNX2_NVM_ADDR_NVM_ADDR_VALUE			 (0xffffffL<<0)
@@ -1056,6 +1789,10 @@
 #define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B		 (8L<<0)
 #define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO			 (16L<<0)
 #define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI			 (32L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK_XI		 (8L<<0)
 
 #define BNX2_NVM_READ					0x00006410
 #define BNX2_NVM_READ_NVM_READ_VALUE			 (0xffffffffL<<0)
@@ -1066,6 +1803,10 @@
 #define BNX2_NVM_READ_NVM_READ_VALUE_CS_B		 (8L<<0)
 #define BNX2_NVM_READ_NVM_READ_VALUE_SO			 (16L<<0)
 #define BNX2_NVM_READ_NVM_READ_VALUE_SI			 (32L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK_XI		 (8L<<0)
 
 #define BNX2_NVM_CFG1					0x00006414
 #define BNX2_NVM_CFG1_FLASH_MODE			 (1L<<0)
@@ -1077,14 +1818,21 @@
 #define BNX2_NVM_CFG1_STATUS_BIT_BUFFER_RDY		 (7L<<4)
 #define BNX2_NVM_CFG1_SPI_CLK_DIV			 (0xfL<<7)
 #define BNX2_NVM_CFG1_SEE_CLK_DIV			 (0x7ffL<<11)
+#define BNX2_NVM_CFG1_STRAP_CONTROL_0			 (1L<<23)
 #define BNX2_NVM_CFG1_PROTECT_MODE			 (1L<<24)
 #define BNX2_NVM_CFG1_FLASH_SIZE			 (1L<<25)
+#define BNX2_NVM_CFG1_FW_USTRAP_1			 (1L<<26)
+#define BNX2_NVM_CFG1_FW_USTRAP_0			 (1L<<27)
+#define BNX2_NVM_CFG1_FW_USTRAP_2			 (1L<<28)
+#define BNX2_NVM_CFG1_FW_USTRAP_3			 (1L<<29)
+#define BNX2_NVM_CFG1_FW_FLASH_TYPE_EN			 (1L<<30)
 #define BNX2_NVM_CFG1_COMPAT_BYPASSS			 (1L<<31)
 
 #define BNX2_NVM_CFG2					0x00006418
 #define BNX2_NVM_CFG2_ERASE_CMD				 (0xffL<<0)
 #define BNX2_NVM_CFG2_DUMMY				 (0xffL<<8)
 #define BNX2_NVM_CFG2_STATUS_CMD			 (0xffL<<16)
+#define BNX2_NVM_CFG2_READ_ID				 (0xffL<<24)
 
 #define BNX2_NVM_CFG3					0x0000641c
 #define BNX2_NVM_CFG3_BUFFER_RD_CMD			 (0xffL<<0)
@@ -1119,6 +1867,35 @@
 #define BNX2_NVM_WRITE1_WRDI_CMD			 (0xffL<<8)
 #define BNX2_NVM_WRITE1_SR_DATA				 (0xffL<<16)
 
+#define BNX2_NVM_CFG4					0x0000642c
+#define BNX2_NVM_CFG4_FLASH_SIZE			 (0x7L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_1MBIT			 (0L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_2MBIT			 (1L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_4MBIT			 (2L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_8MBIT			 (3L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_16MBIT			 (4L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_32MBIT			 (5L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_64MBIT			 (6L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_128MBIT		 (7L<<0)
+#define BNX2_NVM_CFG4_FLASH_VENDOR			 (1L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ST			 (0L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ATMEL		 (1L<<3)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC		 (0x3L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT8	 (0L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT9	 (1L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT10	 (2L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT11	 (3L<<4)
+#define BNX2_NVM_CFG4_STATUS_BIT_POLARITY		 (1L<<6)
+#define BNX2_NVM_CFG4_RESERVED				 (0x1ffffffL<<7)
+
+#define BNX2_NVM_RECONFIG				0x00006430
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE		 (0xfL<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ST		 (0L<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ATMEL	 (1L<<0)
+#define BNX2_NVM_RECONFIG_RECONFIG_STRAP_VALUE		 (0xfL<<4)
+#define BNX2_NVM_RECONFIG_RESERVED			 (0x7fffffL<<8)
+#define BNX2_NVM_RECONFIG_RECONFIG_DONE			 (1L<<31)
+
 
 
 /*
@@ -1140,6 +1917,8 @@
 #define BNX2_DMA_STATUS_BIG_WRITE_TRANSFERS_STAT	 (1L<<23)
 #define BNX2_DMA_STATUS_BIG_WRITE_DELAY_PCI_CLKS_STAT	 (1L<<24)
 #define BNX2_DMA_STATUS_BIG_WRITE_RETRY_AFTER_DATA_STAT	 (1L<<25)
+#define BNX2_DMA_STATUS_GLOBAL_ERR_XI			 (1L<<0)
+#define BNX2_DMA_STATUS_BME_XI				 (1L<<4)
 
 #define BNX2_DMA_CONFIG					0x00000c08
 #define BNX2_DMA_CONFIG_DATA_BYTE_SWAP			 (1L<<0)
@@ -1161,85 +1940,315 @@
 #define BNX2_DMA_CONFIG_BIG_SIZE_128			 (0x2L<<24)
 #define BNX2_DMA_CONFIG_BIG_SIZE_256			 (0x4L<<24)
 #define BNX2_DMA_CONFIG_BIG_SIZE_512			 (0x8L<<24)
+#define BNX2_DMA_CONFIG_DAT_WBSWAP_MODE_XI		 (0x3L<<0)
+#define BNX2_DMA_CONFIG_CTL_WBSWAP_MODE_XI		 (0x3L<<4)
+#define BNX2_DMA_CONFIG_MAX_PL_XI			 (0x7L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_128B_XI			 (0L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_256B_XI			 (1L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_512B_XI			 (2L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_EN_XI			 (1L<<15)
+#define BNX2_DMA_CONFIG_MAX_RRS_XI			 (0x7L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_128B_XI			 (0L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_256B_XI			 (1L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_512B_XI			 (2L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_1024B_XI		 (3L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_2048B_XI		 (4L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_4096B_XI		 (5L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_EN_XI			 (1L<<19)
+#define BNX2_DMA_CONFIG_NO_64SWAP_EN_XI			 (1L<<31)
 
 #define BNX2_DMA_BLACKOUT				0x00000c0c
 #define BNX2_DMA_BLACKOUT_RD_RETRY_BLACKOUT		 (0xffL<<0)
 #define BNX2_DMA_BLACKOUT_2ND_RD_RETRY_BLACKOUT		 (0xffL<<8)
 #define BNX2_DMA_BLACKOUT_WR_RETRY_BLACKOUT		 (0xffL<<16)
 
-#define BNX2_DMA_RCHAN_STAT				0x00000c30
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_0			 (0x7L<<0)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_0			 (1L<<3)
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_1			 (0x7L<<4)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_1			 (1L<<7)
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_2			 (0x7L<<8)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_2			 (1L<<11)
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_3			 (0x7L<<12)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_3			 (1L<<15)
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_4			 (0x7L<<16)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_4			 (1L<<19)
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_5			 (0x7L<<20)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_5			 (1L<<23)
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_6			 (0x7L<<24)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_6			 (1L<<27)
-#define BNX2_DMA_RCHAN_STAT_COMP_CODE_7			 (0x7L<<28)
-#define BNX2_DMA_RCHAN_STAT_PAR_ERR_7			 (1L<<31)
+#define BNX2_DMA_READ_MASTER_SETTING_0			0x00000c10
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PRIORITY	 (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PRIORITY	 (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PARAM_EN	 (1L<<15)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_NO_SNOOP	 (1L<<16)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_RELAX_ORDER	 (1L<<17)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PRIORITY	 (1L<<18)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_TRAFFIC_CLASS	 (0x7L<<20)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PARAM_EN	 (1L<<23)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_NO_SNOOP	 (1L<<24)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_RELAX_ORDER	 (1L<<25)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PRIORITY	 (1L<<26)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_TRAFFIC_CLASS	 (0x7L<<28)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PARAM_EN	 (1L<<31)
 
-#define BNX2_DMA_WCHAN_STAT				0x00000c34
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_0			 (0x7L<<0)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_0			 (1L<<3)
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_1			 (0x7L<<4)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_1			 (1L<<7)
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_2			 (0x7L<<8)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_2			 (1L<<11)
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_3			 (0x7L<<12)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_3			 (1L<<15)
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_4			 (0x7L<<16)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_4			 (1L<<19)
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_5			 (0x7L<<20)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_5			 (1L<<23)
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_6			 (0x7L<<24)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_6			 (1L<<27)
-#define BNX2_DMA_WCHAN_STAT_COMP_CODE_7			 (0x7L<<28)
-#define BNX2_DMA_WCHAN_STAT_PAR_ERR_7			 (1L<<31)
+#define BNX2_DMA_READ_MASTER_SETTING_1			0x00000c14
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PRIORITY	 (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PRIORITY	 (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PARAM_EN	 (1L<<15)
 
-#define BNX2_DMA_RCHAN_ASSIGNMENT			0x00000c38
-#define BNX2_DMA_RCHAN_ASSIGNMENT_0			 (0xfL<<0)
-#define BNX2_DMA_RCHAN_ASSIGNMENT_1			 (0xfL<<4)
-#define BNX2_DMA_RCHAN_ASSIGNMENT_2			 (0xfL<<8)
-#define BNX2_DMA_RCHAN_ASSIGNMENT_3			 (0xfL<<12)
-#define BNX2_DMA_RCHAN_ASSIGNMENT_4			 (0xfL<<16)
-#define BNX2_DMA_RCHAN_ASSIGNMENT_5			 (0xfL<<20)
-#define BNX2_DMA_RCHAN_ASSIGNMENT_6			 (0xfL<<24)
-#define BNX2_DMA_RCHAN_ASSIGNMENT_7			 (0xfL<<28)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0			0x00000c18
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PRIORITY	 (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_CS_VLD	 (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PRIORITY	 (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_CS_VLD	 (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PARAM_EN	 (1L<<15)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_NO_SNOOP	 (1L<<24)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_RELAX_ORDER	 (1L<<25)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PRIORITY	 (1L<<26)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_CS_VLD	 (1L<<27)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_TRAFFIC_CLASS	 (0x7L<<28)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PARAM_EN	 (1L<<31)
 
-#define BNX2_DMA_WCHAN_ASSIGNMENT			0x00000c3c
-#define BNX2_DMA_WCHAN_ASSIGNMENT_0			 (0xfL<<0)
-#define BNX2_DMA_WCHAN_ASSIGNMENT_1			 (0xfL<<4)
-#define BNX2_DMA_WCHAN_ASSIGNMENT_2			 (0xfL<<8)
-#define BNX2_DMA_WCHAN_ASSIGNMENT_3			 (0xfL<<12)
-#define BNX2_DMA_WCHAN_ASSIGNMENT_4			 (0xfL<<16)
-#define BNX2_DMA_WCHAN_ASSIGNMENT_5			 (0xfL<<20)
-#define BNX2_DMA_WCHAN_ASSIGNMENT_6			 (0xfL<<24)
-#define BNX2_DMA_WCHAN_ASSIGNMENT_7			 (0xfL<<28)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1			0x00000c1c
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PRIORITY	 (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_CS_VLD	 (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PRIORITY	 (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_CS_VLD	 (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PARAM_EN	 (1L<<15)
 
-#define BNX2_DMA_RCHAN_STAT_00				0x00000c40
-#define BNX2_DMA_RCHAN_STAT_00_RCHAN_STA_HOST_ADDR_LOW	 (0xffffffffL<<0)
+#define BNX2_DMA_ARBITER				0x00000c20
+#define BNX2_DMA_ARBITER_NUM_READS			 (0x7L<<0)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE			 (1L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_STRICT		 (0L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_RND_RBN		 (1L<<4)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE			 (0x3L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_STRICT		 (0L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_RND_RBN		 (1L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_WGT_RND_RBN	 (2L<<5)
+#define BNX2_DMA_ARBITER_ALT_MODE_EN			 (1L<<8)
+#define BNX2_DMA_ARBITER_RR_MODE			 (1L<<9)
+#define BNX2_DMA_ARBITER_TIMER_MODE			 (1L<<10)
+#define BNX2_DMA_ARBITER_OUSTD_READ_REQ			 (0xfL<<12)
 
-#define BNX2_DMA_RCHAN_STAT_01				0x00000c44
-#define BNX2_DMA_RCHAN_STAT_01_RCHAN_STA_HOST_ADDR_HIGH	 (0xffffffffL<<0)
+#define BNX2_DMA_ARB_TIMERS				0x00000c24
+#define BNX2_DMA_ARB_TIMERS_RD_DRR_WAIT_TIME		 (0xffL<<0)
+#define BNX2_DMA_ARB_TIMERS_TM_MIN_TIMEOUT		 (0xffL<<12)
+#define BNX2_DMA_ARB_TIMERS_TM_MAX_TIMEOUT		 (0xfffL<<20)
 
-#define BNX2_DMA_RCHAN_STAT_02				0x00000c48
-#define BNX2_DMA_RCHAN_STAT_02_LENGTH			 (0xffffL<<0)
-#define BNX2_DMA_RCHAN_STAT_02_WORD_SWAP		 (1L<<16)
-#define BNX2_DMA_RCHAN_STAT_02_BYTE_SWAP		 (1L<<17)
-#define BNX2_DMA_RCHAN_STAT_02_PRIORITY_LVL		 (1L<<18)
+#define BNX2_DMA_DEBUG_VECT_PEEK			0x00000c2c
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
 
-#define BNX2_DMA_RCHAN_STAT_10				0x00000c4c
-#define BNX2_DMA_RCHAN_STAT_11				0x00000c50
-#define BNX2_DMA_RCHAN_STAT_12				0x00000c54
-#define BNX2_DMA_RCHAN_STAT_20				0x00000c58
-#define BNX2_DMA_RCHAN_STAT_21				0x00000c5c
+#define BNX2_DMA_TAG_RAM_00				0x00000c30
+#define BNX2_DMA_TAG_RAM_00_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_00_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_00_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_00_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_00_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_01				0x00000c34
+#define BNX2_DMA_TAG_RAM_01_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_01_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_01_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_01_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_01_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_02				0x00000c38
+#define BNX2_DMA_TAG_RAM_02_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_02_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_02_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_02_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_02_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_03				0x00000c3c
+#define BNX2_DMA_TAG_RAM_03_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_03_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_03_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_03_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_03_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_04				0x00000c40
+#define BNX2_DMA_TAG_RAM_04_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_04_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_04_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_04_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_04_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_05				0x00000c44
+#define BNX2_DMA_TAG_RAM_05_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_05_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_05_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_05_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_05_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_06				0x00000c48
+#define BNX2_DMA_TAG_RAM_06_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_06_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_06_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_06_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_06_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_07				0x00000c4c
+#define BNX2_DMA_TAG_RAM_07_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_07_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_07_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_07_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_07_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_08				0x00000c50
+#define BNX2_DMA_TAG_RAM_08_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_08_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_08_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_08_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_08_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_09				0x00000c54
+#define BNX2_DMA_TAG_RAM_09_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_09_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_09_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_09_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_09_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_10				0x00000c58
+#define BNX2_DMA_TAG_RAM_10_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_10_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_10_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_10_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_10_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_11				0x00000c5c
+#define BNX2_DMA_TAG_RAM_11_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_11_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_11_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_11_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_11_VALID			 (1L<<10)
+
 #define BNX2_DMA_RCHAN_STAT_22				0x00000c60
 #define BNX2_DMA_RCHAN_STAT_30				0x00000c64
 #define BNX2_DMA_RCHAN_STAT_31				0x00000c68
@@ -1336,6 +2345,25 @@
  */
 #define BNX2_CTX_COMMAND				0x00001000
 #define BNX2_CTX_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_CTX_COMMAND_DISABLE_USAGE_CNT		 (1L<<1)
+#define BNX2_CTX_COMMAND_DISABLE_PLRU			 (1L<<2)
+#define BNX2_CTX_COMMAND_DISABLE_COMBINE_READ		 (1L<<3)
+#define BNX2_CTX_COMMAND_FLUSH_AHEAD			 (0x1fL<<8)
+#define BNX2_CTX_COMMAND_MEM_INIT			 (1L<<13)
+#define BNX2_CTX_COMMAND_PAGE_SIZE			 (0xfL<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256			 (0L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512			 (1L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1K			 (2L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_2K			 (3L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_4K			 (4L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_8K			 (5L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_16K			 (6L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_32K			 (7L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_64K			 (8L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_128K			 (9L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256K			 (10L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512K			 (11L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1M			 (12L<<16)
 
 #define BNX2_CTX_STATUS					0x00001004
 #define BNX2_CTX_STATUS_LOCK_WAIT			 (1L<<0)
@@ -1343,6 +2371,13 @@
 #define BNX2_CTX_STATUS_WRITE_STAT			 (1L<<17)
 #define BNX2_CTX_STATUS_ACC_STALL_STAT			 (1L<<18)
 #define BNX2_CTX_STATUS_LOCK_STALL_STAT			 (1L<<19)
+#define BNX2_CTX_STATUS_EXT_READ_STAT			 (1L<<20)
+#define BNX2_CTX_STATUS_EXT_WRITE_STAT			 (1L<<21)
+#define BNX2_CTX_STATUS_MISS_STAT			 (1L<<22)
+#define BNX2_CTX_STATUS_HIT_STAT			 (1L<<23)
+#define BNX2_CTX_STATUS_DEAD_LOCK			 (1L<<24)
+#define BNX2_CTX_STATUS_USAGE_CNT_ERR			 (1L<<25)
+#define BNX2_CTX_STATUS_INVALID_PAGE			 (1L<<26)
 
 #define BNX2_CTX_VIRT_ADDR				0x00001008
 #define BNX2_CTX_VIRT_ADDR_VIRT_ADDR			 (0x7fffL<<6)
@@ -1357,10 +2392,15 @@
 #define BNX2_CTX_LOCK					0x00001018
 #define BNX2_CTX_LOCK_TYPE				 (0x7L<<0)
 #define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_VOID		 (0x0L<<0)
-#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE		 (0x7L<<0)
 #define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_PROTOCOL		 (0x1L<<0)
 #define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TX			 (0x2L<<0)
 #define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TIMER		 (0x4L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE		 (0x7L<<0)
+#define BNX2_CTX_LOCK_TYPE_VOID_XI			 (0L<<0)
+#define BNX2_CTX_LOCK_TYPE_PROTOCOL_XI			 (1L<<0)
+#define BNX2_CTX_LOCK_TYPE_TX_XI			 (2L<<0)
+#define BNX2_CTX_LOCK_TYPE_TIMER_XI			 (4L<<0)
+#define BNX2_CTX_LOCK_TYPE_COMPLETE_XI			 (7L<<0)
 #define BNX2_CTX_LOCK_CID_VALUE				 (0x3fffL<<7)
 #define BNX2_CTX_LOCK_GRANTED				 (1L<<26)
 #define BNX2_CTX_LOCK_MODE				 (0x7L<<27)
@@ -1370,21 +2410,89 @@
 #define BNX2_CTX_LOCK_STATUS				 (1L<<30)
 #define BNX2_CTX_LOCK_REQ				 (1L<<31)
 
+#define BNX2_CTX_CTX_CTRL				0x0000101c
+#define BNX2_CTX_CTX_CTRL_CTX_ADDR			 (0x7ffffL<<2)
+#define BNX2_CTX_CTX_CTRL_MOD_USAGE_CNT			 (0x3L<<21)
+#define BNX2_CTX_CTX_CTRL_NO_RAM_ACC			 (1L<<23)
+#define BNX2_CTX_CTX_CTRL_PREFETCH_SIZE			 (0x3L<<24)
+#define BNX2_CTX_CTX_CTRL_ATTR				 (1L<<26)
+#define BNX2_CTX_CTX_CTRL_WRITE_REQ			 (1L<<30)
+#define BNX2_CTX_CTX_CTRL_READ_REQ			 (1L<<31)
+
+#define BNX2_CTX_CTX_DATA				0x00001020
 #define BNX2_CTX_ACCESS_STATUS				0x00001040
 #define BNX2_CTX_ACCESS_STATUS_MASTERENCODED		 (0xfL<<0)
 #define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYSM		 (0x3L<<10)
 #define BNX2_CTX_ACCESS_STATUS_PAGETABLEINITSM		 (0x3L<<12)
 #define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYINITSM	 (0x3L<<14)
 #define BNX2_CTX_ACCESS_STATUS_QUALIFIED_REQUEST	 (0x7ffL<<17)
+#define BNX2_CTX_ACCESS_STATUS_CAMMASTERENCODED_XI	 (0x1fL<<0)
+#define BNX2_CTX_ACCESS_STATUS_CACHEMASTERENCODED_XI	 (0x1fL<<5)
+#define BNX2_CTX_ACCESS_STATUS_REQUEST_XI		 (0x3fffffL<<10)
 
 #define BNX2_CTX_DBG_LOCK_STATUS			0x00001044
 #define BNX2_CTX_DBG_LOCK_STATUS_SM			 (0x3ffL<<0)
 #define BNX2_CTX_DBG_LOCK_STATUS_MATCH			 (0x3ffL<<22)
 
+#define BNX2_CTX_CACHE_CTRL_STATUS			0x00001048
+#define BNX2_CTX_CACHE_CTRL_STATUS_RFIFO_OVERFLOW	 (1L<<0)
+#define BNX2_CTX_CACHE_CTRL_STATUS_INVALID_READ_COMP	 (1L<<1)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FLUSH_START		 (1L<<6)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FREE_ENTRY_CNT	 (0x3fL<<7)
+#define BNX2_CTX_CACHE_CTRL_STATUS_CACHE_ENTRY_NEEDED	 (0x3fL<<13)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN0_ACTIVE	 (1L<<19)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN1_ACTIVE	 (1L<<20)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN2_ACTIVE	 (1L<<21)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN3_ACTIVE	 (1L<<22)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN4_ACTIVE	 (1L<<23)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN5_ACTIVE	 (1L<<24)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN6_ACTIVE	 (1L<<25)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN7_ACTIVE	 (1L<<26)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN8_ACTIVE	 (1L<<27)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN9_ACTIVE	 (1L<<28)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN10_ACTIVE	 (1L<<29)
+
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS			0x0000104c
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_DWC		 (0x7L<<0)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_WFIFOC		 (0x7L<<3)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RTAGC		 (0x7L<<6)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RFIFOC		 (0x7L<<9)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_INVALID_BLK_ADDR	 (0x7fffL<<16)
+
+#define BNX2_CTX_CACHE_STATUS				0x00001050
+#define BNX2_CTX_CACHE_STATUS_HELD_ENTRIES		 (0x3ffL<<0)
+#define BNX2_CTX_CACHE_STATUS_MAX_HELD_ENTRIES		 (0x3ffL<<16)
+
+#define BNX2_CTX_DMA_STATUS				0x00001054
+#define BNX2_CTX_DMA_STATUS_RD_CHAN0_STATUS		 (0x3L<<0)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN1_STATUS		 (0x3L<<2)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN2_STATUS		 (0x3L<<4)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN3_STATUS		 (0x3L<<6)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN4_STATUS		 (0x3L<<8)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN5_STATUS		 (0x3L<<10)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN6_STATUS		 (0x3L<<12)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN7_STATUS		 (0x3L<<14)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN8_STATUS		 (0x3L<<16)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN9_STATUS		 (0x3L<<18)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN10_STATUS		 (0x3L<<20)
+
+#define BNX2_CTX_REP_STATUS				0x00001058
+#define BNX2_CTX_REP_STATUS_ERROR_ENTRY			 (0x3ffL<<0)
+#define BNX2_CTX_REP_STATUS_ERROR_CLIENT_ID		 (0x1fL<<10)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MAX_ERR		 (1L<<16)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MIN_ERR		 (1L<<17)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MISS_ERR		 (1L<<18)
+
+#define BNX2_CTX_CKSUM_ERROR_STATUS			0x0000105c
+#define BNX2_CTX_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_CTX_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
 #define BNX2_CTX_CHNL_LOCK_STATUS_0			0x00001080
 #define BNX2_CTX_CHNL_LOCK_STATUS_0_CID			 (0x3fffL<<0)
 #define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE		 (0x3L<<14)
 #define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE		 (1L<<16)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE_XI		 (1L<<14)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE_XI		 (0x7L<<15)
 
 #define BNX2_CTX_CHNL_LOCK_STATUS_1			0x00001084
 #define BNX2_CTX_CHNL_LOCK_STATUS_2			0x00001088
@@ -1394,6 +2502,26 @@
 #define BNX2_CTX_CHNL_LOCK_STATUS_6			0x00001098
 #define BNX2_CTX_CHNL_LOCK_STATUS_7			0x0000109c
 #define BNX2_CTX_CHNL_LOCK_STATUS_8			0x000010a0
+#define BNX2_CTX_CHNL_LOCK_STATUS_9			0x000010a4
+
+#define BNX2_CTX_CACHE_DATA				0x000010c4
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL			0x000010c8
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_PAGE_TBL_ADDR	 (0x1ffL<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ		 (1L<<30)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_READ_REQ		 (1L<<31)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0			0x000010cc
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID		 (1L<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALUE		 (0xffffffL<<8)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA1			0x000010d0
+#define BNX2_CTX_CAM_CTRL				0x000010d4
+#define BNX2_CTX_CAM_CTRL_CAM_ADDR			 (0x3ffL<<0)
+#define BNX2_CTX_CAM_CTRL_RESET				 (1L<<27)
+#define BNX2_CTX_CAM_CTRL_INVALIDATE			 (1L<<28)
+#define BNX2_CTX_CAM_CTRL_SEARCH			 (1L<<29)
+#define BNX2_CTX_CAM_CTRL_WRITE_REQ			 (1L<<30)
+#define BNX2_CTX_CAM_CTRL_READ_REQ			 (1L<<31)
 
 
 /*
@@ -1407,14 +2535,16 @@
 #define BNX2_EMAC_MODE_PORT_NONE			 (0L<<2)
 #define BNX2_EMAC_MODE_PORT_MII				 (1L<<2)
 #define BNX2_EMAC_MODE_PORT_GMII			 (2L<<2)
-#define BNX2_EMAC_MODE_PORT_MII_10			 (3L<<2)
+#define BNX2_EMAC_MODE_PORT_MII_10M			 (3L<<2)
 #define BNX2_EMAC_MODE_MAC_LOOP				 (1L<<4)
-#define BNX2_EMAC_MODE_25G				 (1L<<5)
+#define BNX2_EMAC_MODE_25G_MODE				 (1L<<5)
 #define BNX2_EMAC_MODE_TAGGED_MAC_CTL			 (1L<<7)
 #define BNX2_EMAC_MODE_TX_BURST				 (1L<<8)
 #define BNX2_EMAC_MODE_MAX_DEFER_DROP_ENA		 (1L<<9)
 #define BNX2_EMAC_MODE_EXT_LINK_POL			 (1L<<10)
 #define BNX2_EMAC_MODE_FORCE_LINK			 (1L<<11)
+#define BNX2_EMAC_MODE_SERDES_MODE			 (1L<<12)
+#define BNX2_EMAC_MODE_BOND_OVRD			 (1L<<13)
 #define BNX2_EMAC_MODE_MPKT				 (1L<<18)
 #define BNX2_EMAC_MODE_MPKT_RCVD			 (1L<<19)
 #define BNX2_EMAC_MODE_ACPI_RCVD			 (1L<<20)
@@ -1422,6 +2552,11 @@
 #define BNX2_EMAC_STATUS				0x00001404
 #define BNX2_EMAC_STATUS_LINK				 (1L<<11)
 #define BNX2_EMAC_STATUS_LINK_CHANGE			 (1L<<12)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_COMPLETE	 (1L<<13)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_CHANGE		 (1L<<14)
+#define BNX2_EMAC_STATUS_SERDES_NXT_PG_CHANGE		 (1L<<16)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0		 (1L<<17)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0_CHANGE	 (1L<<18)
 #define BNX2_EMAC_STATUS_MI_COMPLETE			 (1L<<22)
 #define BNX2_EMAC_STATUS_MI_INT				 (1L<<23)
 #define BNX2_EMAC_STATUS_AP_ERROR			 (1L<<24)
@@ -1429,6 +2564,9 @@
 
 #define BNX2_EMAC_ATTENTION_ENA				0x00001408
 #define BNX2_EMAC_ATTENTION_ENA_LINK			 (1L<<11)
+#define BNX2_EMAC_ATTENTION_ENA_AUTONEG_CHANGE		 (1L<<14)
+#define BNX2_EMAC_ATTENTION_ENA_NXT_PG_CHANGE		 (1L<<16)
+#define BNX2_EMAC_ATTENTION_ENA_SERDES_RX_CONFIG_IS_0_CHANGE	 (1L<<18)
 #define BNX2_EMAC_ATTENTION_ENA_MI_COMPLETE		 (1L<<22)
 #define BNX2_EMAC_ATTENTION_ENA_MI_INT			 (1L<<23)
 #define BNX2_EMAC_ATTENTION_ENA_AP_ERROR		 (1L<<24)
@@ -1445,6 +2583,13 @@
 #define BNX2_EMAC_LED_100MB				 (1L<<8)
 #define BNX2_EMAC_LED_10MB				 (1L<<9)
 #define BNX2_EMAC_LED_TRAFFIC_STAT			 (1L<<10)
+#define BNX2_EMAC_LED_2500MB				 (1L<<11)
+#define BNX2_EMAC_LED_2500MB_OVERRIDE			 (1L<<12)
+#define BNX2_EMAC_LED_ACTIVITY_SEL			 (0x3L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_0			 (0L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_1			 (1L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_2			 (2L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_3			 (3L<<17)
 #define BNX2_EMAC_LED_BLNK_RATE				 (0xfffL<<19)
 #define BNX2_EMAC_LED_BLNK_RATE_ENA			 (1L<<31)
 
@@ -1515,9 +2660,15 @@
 #define BNX2_EMAC_MDIO_COMM_PHY_ADDR			 (0x1fL<<21)
 #define BNX2_EMAC_MDIO_COMM_COMMAND			 (0x3L<<26)
 #define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_0		 (0L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_ADDRESS		 (0L<<26)
 #define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE		 (1L<<26)
 #define BNX2_EMAC_MDIO_COMM_COMMAND_READ		 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_22_XI		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_45_XI		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_22_XI		 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_INC_45_XI	 (2L<<26)
 #define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_3		 (3L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_45		 (3L<<26)
 #define BNX2_EMAC_MDIO_COMM_FAIL			 (1L<<28)
 #define BNX2_EMAC_MDIO_COMM_START_BUSY			 (1L<<29)
 #define BNX2_EMAC_MDIO_COMM_DISEXT			 (1L<<30)
@@ -1534,13 +2685,17 @@
 #define BNX2_EMAC_MDIO_MODE_MDIO_OE			 (1L<<10)
 #define BNX2_EMAC_MDIO_MODE_MDC				 (1L<<11)
 #define BNX2_EMAC_MDIO_MODE_MDINT			 (1L<<12)
+#define BNX2_EMAC_MDIO_MODE_EXT_MDINT			 (1L<<13)
 #define BNX2_EMAC_MDIO_MODE_CLOCK_CNT			 (0x1fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT_XI		 (0x3fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLAUSE_45_XI		 (1L<<31)
 
 #define BNX2_EMAC_MDIO_AUTO_STATUS			0x000014b8
 #define BNX2_EMAC_MDIO_AUTO_STATUS_AUTO_ERR		 (1L<<0)
 
 #define BNX2_EMAC_TX_MODE				0x000014bc
 #define BNX2_EMAC_TX_MODE_RESET				 (1L<<0)
+#define BNX2_EMAC_TX_MODE_CS16_TEST			 (1L<<2)
 #define BNX2_EMAC_TX_MODE_EXT_PAUSE_EN			 (1L<<3)
 #define BNX2_EMAC_TX_MODE_FLOW_EN			 (1L<<4)
 #define BNX2_EMAC_TX_MODE_BIG_BACKOFF			 (1L<<5)
@@ -1553,6 +2708,7 @@
 #define BNX2_EMAC_TX_STATUS_XON_SENT			 (1L<<2)
 #define BNX2_EMAC_TX_STATUS_LINK_UP			 (1L<<3)
 #define BNX2_EMAC_TX_STATUS_UNDERRUN			 (1L<<4)
+#define BNX2_EMAC_TX_STATUS_CS16_ERROR			 (1L<<5)
 
 #define BNX2_EMAC_TX_LENGTHS				0x000014c4
 #define BNX2_EMAC_TX_LENGTHS_SLOT			 (0xffL<<0)
@@ -1586,6 +2742,10 @@
 #define BNX2_EMAC_MULTICAST_HASH5			0x000014e4
 #define BNX2_EMAC_MULTICAST_HASH6			0x000014e8
 #define BNX2_EMAC_MULTICAST_HASH7			0x000014ec
+#define BNX2_EMAC_CKSUM_ERROR_STATUS			0x000014f0
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
 #define BNX2_EMAC_RX_STAT_IFHCINOCTETS			0x00001500
 #define BNX2_EMAC_RX_STAT_IFHCINBADOCTETS		0x00001504
 #define BNX2_EMAC_RX_STAT_ETHERSTATSFRAGMENTS		0x00001508
@@ -1608,7 +2768,7 @@
 #define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS	0x0000154c
 #define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS	0x00001550
 #define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS	0x00001554
-#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1523OCTETSTO9022OCTETS	0x00001558
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTSOVER1522OCTETS	0x00001558
 #define BNX2_EMAC_RXMAC_DEBUG0				0x0000155c
 #define BNX2_EMAC_RXMAC_DEBUG1				0x00001560
 #define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_NE_BYTE_COUNT	 (1L<<0)
@@ -1661,9 +2821,9 @@
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC2		 (0x1L<<16)
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC3		 (0x2L<<16)
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UNI		 (0x3L<<16)
-#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2		 (0x7L<<16)
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC3		 (0x5L<<16)
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA1		 (0x6L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2		 (0x7L<<16)
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA2		 (0x7L<<16)
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA3		 (0x8L<<16)
 #define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC2		 (0x9L<<16)
@@ -1701,7 +2861,7 @@
 #define BNX2_EMAC_RXMAC_DEBUG4_SLOT_FILLED		 (1L<<23)
 #define BNX2_EMAC_RXMAC_DEBUG4_FALSE_CARRIER		 (1L<<24)
 #define BNX2_EMAC_RXMAC_DEBUG4_LAST_DATA		 (1L<<25)
-#define BNX2_EMAC_RXMAC_DEBUG4_sfd_FOUND		 (1L<<26)
+#define BNX2_EMAC_RXMAC_DEBUG4_SFD_FOUND		 (1L<<26)
 #define BNX2_EMAC_RXMAC_DEBUG4_ADVANCE			 (1L<<27)
 #define BNX2_EMAC_RXMAC_DEBUG4_START			 (1L<<28)
 
@@ -1733,6 +2893,7 @@
 #define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_ACCEPT		 (1L<<19)
 #define BNX2_EMAC_RXMAC_DEBUG5_FMLEN			 (0xfffL<<20)
 
+#define BNX2_EMAC_RX_STAT_FALSECARRIERERRORS		0x00001574
 #define BNX2_EMAC_RX_STAT_AC0				0x00001580
 #define BNX2_EMAC_RX_STAT_AC1				0x00001584
 #define BNX2_EMAC_RX_STAT_AC2				0x00001588
@@ -1757,6 +2918,7 @@
 #define BNX2_EMAC_RX_STAT_AC21				0x000015d4
 #define BNX2_EMAC_RX_STAT_AC22				0x000015d8
 #define BNX2_EMAC_RXMAC_SUC_DBG_OVERRUNVEC		0x000015dc
+#define BNX2_EMAC_RX_STAT_AC_28				0x000015f4
 #define BNX2_EMAC_TX_STAT_IFHCOUTOCTETS			0x00001600
 #define BNX2_EMAC_TX_STAT_IFHCOUTBADOCTETS		0x00001604
 #define BNX2_EMAC_TX_STAT_ETHERSTATSCOLLISIONS		0x00001608
@@ -1777,7 +2939,7 @@
 #define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS	0x00001644
 #define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS	0x00001648
 #define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS	0x0000164c
-#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1523OCTETSTO9022OCTETS	0x00001650
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTSOVER1522OCTETS	0x00001650
 #define BNX2_EMAC_TX_STAT_DOT3STATSINTERNALMACTRANSMITERRORS	0x00001654
 #define BNX2_EMAC_TXMAC_DEBUG0				0x00001658
 #define BNX2_EMAC_TXMAC_DEBUG1				0x0000165c
@@ -1843,16 +3005,16 @@
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_IDLE		 (0x0L<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA1		 (0x2L<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA2		 (0x3L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3		 (0x4L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2		 (0x5L<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA3		 (0x6L<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC1		 (0x7L<<16)
-#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2		 (0x5L<<16)
-#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3		 (0x4L<<16)
-#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE		 (0xcL<<16)
-#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD		 (0xeL<<16)
-#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME		 (0xaL<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC1		 (0x8L<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC2		 (0x9L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME		 (0xaL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE		 (0xcL<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_WAIT		 (0xdL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD		 (0xeL<<16)
 #define BNX2_EMAC_TXMAC_DEBUG4_STATS0_VALID		 (1L<<20)
 #define BNX2_EMAC_TXMAC_DEBUG4_APPEND_CRC		 (1L<<21)
 #define BNX2_EMAC_TXMAC_DEBUG4_SLOT_FILLED		 (1L<<22)
@@ -1887,8 +3049,11 @@
 #define BNX2_EMAC_TX_STAT_AC18				0x000016c8
 #define BNX2_EMAC_TX_STAT_AC19				0x000016cc
 #define BNX2_EMAC_TX_STAT_AC20				0x000016d0
-#define BNX2_EMAC_TX_STAT_AC21				0x000016d4
 #define BNX2_EMAC_TXMAC_SUC_DBG_OVERRUNVEC		0x000016d8
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL			0x000016fc
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_INC	 (0x7fL<<0)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_NUM	 (0x7fL<<16)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_RATE_LIMITER_EN	 (1L<<31)
 
 
 /*
@@ -1909,8 +3074,15 @@
 #define BNX2_RPM_CONFIG_ACPI_KEEP			 (1L<<2)
 #define BNX2_RPM_CONFIG_MP_KEEP				 (1L<<3)
 #define BNX2_RPM_CONFIG_SORT_VECT_VAL			 (0xfL<<4)
+#define BNX2_RPM_CONFIG_DISABLE_WOL_ASSERT		 (1L<<30)
 #define BNX2_RPM_CONFIG_IGNORE_VLAN			 (1L<<31)
 
+#define BNX2_RPM_MGMT_PKT_CTRL				0x0000180c
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_SORT		 (0xfL<<0)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_RULE		 (0xfL<<4)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_DISCARD_EN		 (1L<<30)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_EN			 (1L<<31)
+
 #define BNX2_RPM_VLAN_MATCH0				0x00001810
 #define BNX2_RPM_VLAN_MATCH0_RPM_VLAN_MTCH0_VALUE	 (0xfffL<<0)
 
@@ -1931,6 +3103,7 @@
 #define BNX2_RPM_SORT_USER0_PROM_EN			 (1L<<19)
 #define BNX2_RPM_SORT_USER0_VLAN_EN			 (0xfL<<20)
 #define BNX2_RPM_SORT_USER0_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER0_VLAN_NOTMATCH		 (1L<<25)
 #define BNX2_RPM_SORT_USER0_ENA				 (1L<<31)
 
 #define BNX2_RPM_SORT_USER1				0x00001824
@@ -1968,11 +3141,187 @@
 #define BNX2_RPM_STAT_IFINFTQDISCARDS			0x00001848
 #define BNX2_RPM_STAT_IFINMBUFDISCARD			0x0000184c
 #define BNX2_RPM_STAT_RULE_CHECKER_P4_HIT		0x00001850
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0		0x00001854
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1		0x00001858
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2		0x0000185c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3		0x00001860
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4		0x00001864
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5		0x00001868
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6		0x0000186c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7		0x00001870
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_EN	 (1L<<31)
+
 #define BNX2_RPM_STAT_AC0				0x00001880
 #define BNX2_RPM_STAT_AC1				0x00001884
 #define BNX2_RPM_STAT_AC2				0x00001888
 #define BNX2_RPM_STAT_AC3				0x0000188c
 #define BNX2_RPM_STAT_AC4				0x00001890
+#define BNX2_RPM_RC_CNTL_16				0x000018e0
+#define BNX2_RPM_RC_CNTL_16_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_16_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_16_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_16_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_16_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_16_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_16_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_16_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_16_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_16_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_16_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_16_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_16_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_16_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_16			0x000018e4
+#define BNX2_RPM_RC_VALUE_MASK_16_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_16_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_17				0x000018e8
+#define BNX2_RPM_RC_CNTL_17_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_17_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_17_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_17_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_17_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_17_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_17_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_17_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_17_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_17_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_17_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_17_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_17_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_17_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_17			0x000018ec
+#define BNX2_RPM_RC_VALUE_MASK_17_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_17_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_18				0x000018f0
+#define BNX2_RPM_RC_CNTL_18_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_18_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_18_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_18_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_18_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_18_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_18_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_18_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_18_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_18_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_18_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_18_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_18_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_18_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_18			0x000018f4
+#define BNX2_RPM_RC_VALUE_MASK_18_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_18_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_19				0x000018f8
+#define BNX2_RPM_RC_CNTL_19_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_19_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_19_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_19_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_19_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_19_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_19_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_19_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_19_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_19_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_19_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_19_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_19_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_19_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_19			0x000018fc
+#define BNX2_RPM_RC_VALUE_MASK_19_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_19_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_0				0x00001900
 #define BNX2_RPM_RC_CNTL_0_OFFSET			 (0xffL<<0)
 #define BNX2_RPM_RC_CNTL_0_CLASS			 (0x7L<<8)
@@ -1984,14 +3333,18 @@
 #define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP			 (2L<<13)
 #define BNX2_RPM_RC_CNTL_0_HDR_TYPE_UDP			 (3L<<13)
 #define BNX2_RPM_RC_CNTL_0_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_ICMPV6		 (6L<<13)
 #define BNX2_RPM_RC_CNTL_0_COMP				 (0x3L<<16)
 #define BNX2_RPM_RC_CNTL_0_COMP_EQUAL			 (0L<<16)
 #define BNX2_RPM_RC_CNTL_0_COMP_NEQUAL			 (1L<<16)
 #define BNX2_RPM_RC_CNTL_0_COMP_GREATER			 (2L<<16)
 #define BNX2_RPM_RC_CNTL_0_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_0_MAP_XI			 (1L<<18)
 #define BNX2_RPM_RC_CNTL_0_SBIT				 (1L<<19)
 #define BNX2_RPM_RC_CNTL_0_CMDSEL			 (0xfL<<20)
 #define BNX2_RPM_RC_CNTL_0_MAP				 (1L<<24)
+#define BNX2_RPM_RC_CNTL_0_CMDSEL_XI			 (0x1fL<<20)
 #define BNX2_RPM_RC_CNTL_0_DISCARD			 (1L<<25)
 #define BNX2_RPM_RC_CNTL_0_MASK				 (1L<<26)
 #define BNX2_RPM_RC_CNTL_0_P1				 (1L<<27)
@@ -2006,81 +3359,518 @@
 #define BNX2_RPM_RC_CNTL_1				0x00001908
 #define BNX2_RPM_RC_CNTL_1_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_1_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_1_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_1_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_1_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_1_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_1_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_1_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_1_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_1_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_1_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_1_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_1_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_1_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_1_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_1_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_1			0x0000190c
+#define BNX2_RPM_RC_VALUE_MASK_1_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_1_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_2				0x00001910
 #define BNX2_RPM_RC_CNTL_2_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_2_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_2_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_2_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_2_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_2_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_2_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_2_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_2_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_2_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_2_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_2_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_2_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_2_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_2_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_2_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_2			0x00001914
+#define BNX2_RPM_RC_VALUE_MASK_2_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_2_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_3				0x00001918
 #define BNX2_RPM_RC_CNTL_3_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_3_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_3_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_3_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_3_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_3_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_3_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_3_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_3_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_3_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_3_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_3_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_3_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_3_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_3_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_3_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_3			0x0000191c
+#define BNX2_RPM_RC_VALUE_MASK_3_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_3_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_4				0x00001920
 #define BNX2_RPM_RC_CNTL_4_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_4_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_4_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_4_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_4_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_4_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_4_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_4_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_4_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_4_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_4_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_4_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_4_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_4_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_4_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_4_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_4			0x00001924
+#define BNX2_RPM_RC_VALUE_MASK_4_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_4_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_5				0x00001928
 #define BNX2_RPM_RC_CNTL_5_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_5_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_5_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_5_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_5_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_5_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_5_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_5_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_5_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_5_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_5_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_5_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_5_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_5_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_5_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_5_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_5			0x0000192c
+#define BNX2_RPM_RC_VALUE_MASK_5_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_5_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_6				0x00001930
 #define BNX2_RPM_RC_CNTL_6_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_6_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_6_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_6_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_6_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_6_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_6_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_6_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_6_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_6_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_6_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_6_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_6_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_6_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_6_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_6_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_6			0x00001934
+#define BNX2_RPM_RC_VALUE_MASK_6_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_6_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_7				0x00001938
 #define BNX2_RPM_RC_CNTL_7_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_7_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_7_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_7_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_7_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_7_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_7_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_7_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_7_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_7_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_7_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_7_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_7_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_7_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_7_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_7_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_7			0x0000193c
+#define BNX2_RPM_RC_VALUE_MASK_7_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_7_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_8				0x00001940
 #define BNX2_RPM_RC_CNTL_8_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_8_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_8_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_8_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_8_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_8_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_8_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_8_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_8_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_8_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_8_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_8_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_8_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_8_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_8_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_8_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_8			0x00001944
+#define BNX2_RPM_RC_VALUE_MASK_8_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_8_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_9				0x00001948
 #define BNX2_RPM_RC_CNTL_9_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_9_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_9_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_9_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_9_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_9_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_9_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_9_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_9_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_9_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_9_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_9_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_9_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_9_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_9_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_9_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_9			0x0000194c
+#define BNX2_RPM_RC_VALUE_MASK_9_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_9_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_10				0x00001950
 #define BNX2_RPM_RC_CNTL_10_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_10_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_10_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_10_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_10_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_10_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_10_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_10_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_10_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_10_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_10_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_10_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_10_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_10_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_10_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_10_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_10			0x00001954
+#define BNX2_RPM_RC_VALUE_MASK_10_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_10_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_11				0x00001958
 #define BNX2_RPM_RC_CNTL_11_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_11_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_11_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_11_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_11_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_11_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_11_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_11_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_11_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_11_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_11_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_11_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_11_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_11_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_11_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_11_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_11			0x0000195c
+#define BNX2_RPM_RC_VALUE_MASK_11_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_11_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_12				0x00001960
 #define BNX2_RPM_RC_CNTL_12_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_12_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_12_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_12_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_12_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_12_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_12_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_12_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_12_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_12_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_12_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_12_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_12_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_12_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_12_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_12_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_12			0x00001964
+#define BNX2_RPM_RC_VALUE_MASK_12_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_12_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_13				0x00001968
 #define BNX2_RPM_RC_CNTL_13_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_13_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_13_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_13_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_13_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_13_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_13_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_13_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_13_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_13_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_13_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_13_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_13_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_13_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_13_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_13_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_13			0x0000196c
+#define BNX2_RPM_RC_VALUE_MASK_13_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_13_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_14				0x00001970
 #define BNX2_RPM_RC_CNTL_14_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_14_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_14_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_14_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_14_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_14_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_14_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_14_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_14_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_14_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_14_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_14_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_14_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_14_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_14_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_14_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_14			0x00001974
+#define BNX2_RPM_RC_VALUE_MASK_14_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_14_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CNTL_15				0x00001978
 #define BNX2_RPM_RC_CNTL_15_A				 (0x3ffffL<<0)
 #define BNX2_RPM_RC_CNTL_15_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_15_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_15_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_15_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_15_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_15_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_15_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_15_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_15_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_15_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_15_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_15_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_15_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_15_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_15_NBIT_XI			 (1L<<30)
 
 #define BNX2_RPM_RC_VALUE_MASK_15			0x0000197c
+#define BNX2_RPM_RC_VALUE_MASK_15_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_15_MASK			 (0xffffL<<16)
+
 #define BNX2_RPM_RC_CONFIG				0x00001980
 #define BNX2_RPM_RC_CONFIG_RULE_ENABLE			 (0xffffL<<0)
+#define BNX2_RPM_RC_CONFIG_RULE_ENABLE_XI		 (0xfffffL<<0)
 #define BNX2_RPM_RC_CONFIG_DEF_CLASS			 (0x7L<<24)
+#define BNX2_RPM_RC_CONFIG_KNUM_OVERWRITE		 (1L<<31)
 
 #define BNX2_RPM_DEBUG0					0x00001984
 #define BNX2_RPM_DEBUG0_FM_BCNT				 (0xffffL<<0)
@@ -2236,6 +4026,16 @@
 #define BNX2_RPM_DEBUG9_INFIFO_OVERRUN_OCCURRED		 (1L<<29)
 #define BNX2_RPM_DEBUG9_ACPI_MATCH_INT			 (1L<<30)
 #define BNX2_RPM_DEBUG9_ACPI_ENABLE_SYN			 (1L<<31)
+#define BNX2_RPM_DEBUG9_BEMEM_R_XI			 (0x1fL<<0)
+#define BNX2_RPM_DEBUG9_EO_XI				 (1L<<5)
+#define BNX2_RPM_DEBUG9_AEOF_DE_XI			 (1L<<6)
+#define BNX2_RPM_DEBUG9_SO_XI				 (1L<<7)
+#define BNX2_RPM_DEBUG9_WD64_CT_XI			 (0x1fL<<8)
+#define BNX2_RPM_DEBUG9_EOF_VLDBYTE_XI			 (0x7L<<13)
+#define BNX2_RPM_DEBUG9_ACPI_RDE_PAT_ID_XI		 (0xfL<<16)
+#define BNX2_RPM_DEBUG9_CALCRC_RESULT_XI		 (0x3ffL<<20)
+#define BNX2_RPM_DEBUG9_DATA_IN_VL_XI			 (1L<<30)
+#define BNX2_RPM_DEBUG9_CALCRC_BUFFER_VLD_XI		 (1L<<31)
 
 #define BNX2_RPM_ACPI_DBG_BUF_W00			0x000019c0
 #define BNX2_RPM_ACPI_DBG_BUF_W01			0x000019c4
@@ -2253,6 +4053,56 @@
 #define BNX2_RPM_ACPI_DBG_BUF_W31			0x000019f4
 #define BNX2_RPM_ACPI_DBG_BUF_W32			0x000019f8
 #define BNX2_RPM_ACPI_DBG_BUF_W33			0x000019fc
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL			0x00001a00
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_BYTE_ADDRESS	 (0xffffL<<0)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_DEBUGRD		 (1L<<28)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_MODE		 (1L<<29)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_INIT		 (1L<<30)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_WR		 (1L<<31)
+
+#define BNX2_RPM_ACPI_PATTERN_CTRL			0x00001a04
+#define BNX2_RPM_ACPI_PATTERN_CTRL_PATTERN_ID		 (0xfL<<0)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR		 (1L<<30)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_WR			 (1L<<31)
+
+#define BNX2_RPM_ACPI_DATA				0x00001a08
+#define BNX2_RPM_ACPI_DATA_PATTERN_BE			 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN0			0x00001a0c
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN3		 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN2		 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN1		 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN0		 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN1			0x00001a10
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN7		 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN6		 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN5		 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN4		 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC0			0x00001a18
+#define BNX2_RPM_ACPI_PATTERN_CRC0_PATTERN_CRC0		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC1			0x00001a1c
+#define BNX2_RPM_ACPI_PATTERN_CRC1_PATTERN_CRC1		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC2			0x00001a20
+#define BNX2_RPM_ACPI_PATTERN_CRC2_PATTERN_CRC2		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC3			0x00001a24
+#define BNX2_RPM_ACPI_PATTERN_CRC3_PATTERN_CRC3		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC4			0x00001a28
+#define BNX2_RPM_ACPI_PATTERN_CRC4_PATTERN_CRC4		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC5			0x00001a2c
+#define BNX2_RPM_ACPI_PATTERN_CRC5_PATTERN_CRC5		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC6			0x00001a30
+#define BNX2_RPM_ACPI_PATTERN_CRC6_PATTERN_CRC6		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC7			0x00001a34
+#define BNX2_RPM_ACPI_PATTERN_CRC7_PATTERN_CRC7		 (0xffffffffL<<0)
 
 
 /*
@@ -2263,15 +4113,20 @@
 #define BNX2_RBUF_COMMAND_ENABLED			 (1L<<0)
 #define BNX2_RBUF_COMMAND_FREE_INIT			 (1L<<1)
 #define BNX2_RBUF_COMMAND_RAM_INIT			 (1L<<2)
+#define BNX2_RBUF_COMMAND_PKT_OFFSET_OVFL		 (1L<<3)
 #define BNX2_RBUF_COMMAND_OVER_FREE			 (1L<<4)
 #define BNX2_RBUF_COMMAND_ALLOC_REQ			 (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHNGE_TE		 (1L<<6)
+#define BNX2_RBUF_COMMAND_CU_ISOLATE_XI			 (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHANGE_XI		 (1L<<6)
+#define BNX2_RBUF_COMMAND_GRC_ENDIAN_CONV_DIS_XI	 (1L<<7)
 
 #define BNX2_RBUF_STATUS1				0x00200004
 #define BNX2_RBUF_STATUS1_FREE_COUNT			 (0x3ffL<<0)
 
 #define BNX2_RBUF_STATUS2				0x00200008
-#define BNX2_RBUF_STATUS2_FREE_TAIL			 (0x3ffL<<0)
-#define BNX2_RBUF_STATUS2_FREE_HEAD			 (0x3ffL<<16)
+#define BNX2_RBUF_STATUS2_FREE_TAIL			 (0x1ffL<<0)
+#define BNX2_RBUF_STATUS2_FREE_HEAD			 (0x1ffL<<16)
 
 #define BNX2_RBUF_CONFIG				0x0020000c
 #define BNX2_RBUF_CONFIG_XOFF_TRIP			 (0x3ffL<<0)
@@ -2279,16 +4134,21 @@
 
 #define BNX2_RBUF_FW_BUF_ALLOC				0x00200010
 #define BNX2_RBUF_FW_BUF_ALLOC_VALUE			 (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_ALLOC_TYPE			 (1L<<16)
+#define BNX2_RBUF_FW_BUF_ALLOC_ALLOC_REQ		 (1L<<31)
 
 #define BNX2_RBUF_FW_BUF_FREE				0x00200014
 #define BNX2_RBUF_FW_BUF_FREE_COUNT			 (0x7fL<<0)
 #define BNX2_RBUF_FW_BUF_FREE_TAIL			 (0x1ffL<<7)
 #define BNX2_RBUF_FW_BUF_FREE_HEAD			 (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_FREE_TYPE			 (1L<<25)
+#define BNX2_RBUF_FW_BUF_FREE_FREE_REQ			 (1L<<31)
 
 #define BNX2_RBUF_FW_BUF_SEL				0x00200018
 #define BNX2_RBUF_FW_BUF_SEL_COUNT			 (0x7fL<<0)
 #define BNX2_RBUF_FW_BUF_SEL_TAIL			 (0x1ffL<<7)
 #define BNX2_RBUF_FW_BUF_SEL_HEAD			 (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_SEL_SEL_REQ			 (1L<<31)
 
 #define BNX2_RBUF_CONFIG2				0x0020001c
 #define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP			 (0x3ffL<<0)
@@ -2376,6 +4236,8 @@
 #define BNX2_RV2P_INSTR_HIGH_HIGH			 (0x1fL<<0)
 
 #define BNX2_RV2P_INSTR_LOW				0x00002834
+#define BNX2_RV2P_INSTR_LOW_LOW				 (0xffffffffL<<0)
+
 #define BNX2_RV2P_PROC1_ADDR_CMD			0x00002838
 #define BNX2_RV2P_PROC1_ADDR_CMD_ADD			 (0x3ffL<<0)
 #define BNX2_RV2P_PROC1_ADDR_CMD_RDWR			 (1L<<31)
@@ -2395,7 +4257,29 @@
 #define BNX2_RV2P_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
 #define BNX2_RV2P_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
 
-#define BNX2_RV2P_PFTQ_DATA				0x00002b40
+#define BNX2_RV2P_MPFE_PFE_CTL				0x00002afc
+#define BNX2_RV2P_MPFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE			 (0xfL<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_RV2P_MPFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_RV2P_RV2PPQ				0x00002b40
 #define BNX2_RV2P_PFTQ_CMD				0x00002b78
 #define BNX2_RV2P_PFTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_RV2P_PFTQ_CMD_WR_TOP			 (1L<<10)
@@ -2416,7 +4300,7 @@
 #define BNX2_RV2P_PFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
 #define BNX2_RV2P_PFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
 
-#define BNX2_RV2P_TFTQ_DATA				0x00002b80
+#define BNX2_RV2P_RV2PTQ				0x00002b80
 #define BNX2_RV2P_TFTQ_CMD				0x00002bb8
 #define BNX2_RV2P_TFTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_RV2P_TFTQ_CMD_WR_TOP			 (1L<<10)
@@ -2437,7 +4321,7 @@
 #define BNX2_RV2P_TFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
 #define BNX2_RV2P_TFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
 
-#define BNX2_RV2P_MFTQ_DATA				0x00002bc0
+#define BNX2_RV2P_RV2PMQ				0x00002bc0
 #define BNX2_RV2P_MFTQ_CMD				0x00002bf8
 #define BNX2_RV2P_MFTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_RV2P_MFTQ_CMD_WR_TOP			 (1L<<10)
@@ -2466,18 +4350,26 @@
  */
 #define BNX2_MQ_COMMAND					0x00003c00
 #define BNX2_MQ_COMMAND_ENABLED				 (1L<<0)
+#define BNX2_MQ_COMMAND_INIT				 (1L<<1)
 #define BNX2_MQ_COMMAND_OVERFLOW			 (1L<<4)
 #define BNX2_MQ_COMMAND_WR_ERROR			 (1L<<5)
 #define BNX2_MQ_COMMAND_RD_ERROR			 (1L<<6)
+#define BNX2_MQ_COMMAND_IDB_CFG_ERROR			 (1L<<7)
+#define BNX2_MQ_COMMAND_IDB_OVERFLOW			 (1L<<10)
+#define BNX2_MQ_COMMAND_NO_BIN_ERROR			 (1L<<11)
+#define BNX2_MQ_COMMAND_NO_MAP_ERROR			 (1L<<12)
 
 #define BNX2_MQ_STATUS					0x00003c04
 #define BNX2_MQ_STATUS_CTX_ACCESS_STAT			 (1L<<16)
 #define BNX2_MQ_STATUS_CTX_ACCESS64_STAT		 (1L<<17)
 #define BNX2_MQ_STATUS_PCI_STALL_STAT			 (1L<<18)
+#define BNX2_MQ_STATUS_IDB_OFLOW_STAT			 (1L<<19)
 
 #define BNX2_MQ_CONFIG					0x00003c08
 #define BNX2_MQ_CONFIG_TX_HIGH_PRI			 (1L<<0)
 #define BNX2_MQ_CONFIG_HALT_DIS				 (1L<<1)
+#define BNX2_MQ_CONFIG_BIN_MQ_MODE			 (1L<<2)
+#define BNX2_MQ_CONFIG_DIS_IDB_DROP			 (1L<<3)
 #define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE			 (0x7L<<4)
 #define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256		 (0L<<4)
 #define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_512		 (1L<<4)
@@ -2533,6 +4425,7 @@
 
 #define BNX2_MQ_MEM_WR_DATA2				0x00003c80
 #define BNX2_MQ_MEM_WR_DATA2_VALUE			 (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_WR_DATA2_VALUE_XI			 (0x7fffffffL<<0)
 
 #define BNX2_MQ_MEM_RD_ADDR				0x00003c84
 #define BNX2_MQ_MEM_RD_ADDR_VALUE			 (0x3fL<<0)
@@ -2545,6 +4438,16 @@
 
 #define BNX2_MQ_MEM_RD_DATA2				0x00003c90
 #define BNX2_MQ_MEM_RD_DATA2_VALUE			 (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_RD_DATA2_VALUE_XI			 (0x7fffffffL<<0)
+
+
+/*
+ *  tsch_reg definition
+ *  offset: 0x4c00
+ */
+#define BNX2_TSCH_TSS_CFG				0x00004c1c
+#define BNX2_TSCH_TSS_CFG_TSS_START_CID			 (0x7ffL<<8)
+#define BNX2_TSCH_TSS_CFG_NUM_OF_TSS_CON		 (0xfL<<24)
 
 
 
@@ -2594,7 +4497,11 @@
 #define BNX2_TBDR_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
 #define BNX2_TBDR_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
 
-#define BNX2_TBDR_FTQ_DATA				0x000053c0
+#define BNX2_TBDR_CKSUM_ERROR_STATUS			0x00005010
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TBDR_TBDRQ					0x000053c0
 #define BNX2_TBDR_FTQ_CMD				0x000053f8
 #define BNX2_TBDR_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_TBDR_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -2624,7 +4531,15 @@
 #define BNX2_TDMA_COMMAND				0x00005c00
 #define BNX2_TDMA_COMMAND_ENABLED			 (1L<<0)
 #define BNX2_TDMA_COMMAND_MASTER_ABORT			 (1L<<4)
+#define BNX2_TDMA_COMMAND_CS16_ERR			 (1L<<5)
 #define BNX2_TDMA_COMMAND_BAD_L2_LENGTH_ABORT		 (1L<<7)
+#define BNX2_TDMA_COMMAND_MASK_CS1			 (1L<<20)
+#define BNX2_TDMA_COMMAND_MASK_CS2			 (1L<<21)
+#define BNX2_TDMA_COMMAND_MASK_CS3			 (1L<<22)
+#define BNX2_TDMA_COMMAND_MASK_CS4			 (1L<<23)
+#define BNX2_TDMA_COMMAND_FORCE_ILOCK_CKERR		 (1L<<24)
+#define BNX2_TDMA_COMMAND_OFIFO_CLR			 (1L<<30)
+#define BNX2_TDMA_COMMAND_IFIFO_CLR			 (1L<<31)
 
 #define BNX2_TDMA_STATUS				0x00005c04
 #define BNX2_TDMA_STATUS_DMA_WAIT			 (1L<<0)
@@ -2633,10 +4548,18 @@
 #define BNX2_TDMA_STATUS_LOCK_WAIT			 (1L<<3)
 #define BNX2_TDMA_STATUS_FTQ_ENTRY_CNT			 (1L<<16)
 #define BNX2_TDMA_STATUS_BURST_CNT			 (1L<<17)
+#define BNX2_TDMA_STATUS_MAX_IFIFO_DEPTH		 (0x3fL<<20)
+#define BNX2_TDMA_STATUS_OFIFO_OVERFLOW			 (1L<<30)
+#define BNX2_TDMA_STATUS_IFIFO_OVERFLOW			 (1L<<31)
 
 #define BNX2_TDMA_CONFIG				0x00005c08
 #define BNX2_TDMA_CONFIG_ONE_DMA			 (1L<<0)
 #define BNX2_TDMA_CONFIG_ONE_RECORD			 (1L<<1)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN			 (0x3L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_0			 (0L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_1			 (1L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_2			 (2L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_3			 (3L<<2)
 #define BNX2_TDMA_CONFIG_LIMIT_SZ			 (0xfL<<4)
 #define BNX2_TDMA_CONFIG_LIMIT_SZ_64			 (0L<<4)
 #define BNX2_TDMA_CONFIG_LIMIT_SZ_128			 (0x4L<<4)
@@ -2649,7 +4572,35 @@
 #define BNX2_TDMA_CONFIG_LINE_SZ_512			 (8L<<8)
 #define BNX2_TDMA_CONFIG_ALIGN_ENA			 (1L<<15)
 #define BNX2_TDMA_CONFIG_CHK_L2_BD			 (1L<<16)
+#define BNX2_TDMA_CONFIG_CMPL_ENTRY			 (1L<<17)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP			 (1L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_3			 (0L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_2			 (1L<<19)
 #define BNX2_TDMA_CONFIG_FIFO_CMP			 (0xfL<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_XI			 (0x7L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_0_XI		 (0L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_4_XI		 (1L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_8_XI		 (2L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_16_XI		 (3L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_32_XI		 (4L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_64_XI		 (5L<<20)
+#define BNX2_TDMA_CONFIG_FIFO_CMP_EN_XI			 (1L<<23)
+#define BNX2_TDMA_CONFIG_BYTES_OST_XI			 (0x7L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_512_XI		 (0L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_1024_XI		 (1L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_2048_XI		 (2L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_4096_XI		 (3L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_8192_XI		 (4L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_16384_XI		 (5L<<24)
+#define BNX2_TDMA_CONFIG_HC_BYPASS_XI			 (1L<<27)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_XI			 (0x7L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_128_XI		 (0L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_256_XI		 (1L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_512_XI		 (2L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_1024_XI		 (3L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_2048_XI		 (4L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_4096_XI		 (5L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_EN_XI			 (1L<<31)
 
 #define BNX2_TDMA_PAYLOAD_PROD				0x00005c0c
 #define BNX2_TDMA_PAYLOAD_PROD_VALUE			 (0x1fffL<<3)
@@ -2685,7 +4636,22 @@
 #define BNX2_TDMA_DR_INTF_STATUS_NXT_PNTR		 (0xfL<<12)
 #define BNX2_TDMA_DR_INTF_STATUS_BYTE_COUNT		 (0x7L<<16)
 
-#define BNX2_TDMA_FTQ_DATA				0x00005fc0
+#define BNX2_TDMA_PUSH_FSM				0x00005c90
+#define BNX2_TDMA_BD_IF_DEBUG				0x00005c94
+#define BNX2_TDMA_DMAD_IF_DEBUG				0x00005c98
+#define BNX2_TDMA_CTX_IF_DEBUG				0x00005c9c
+#define BNX2_TDMA_TPBUF_IF_DEBUG			0x00005ca0
+#define BNX2_TDMA_DR_IF_DEBUG				0x00005ca4
+#define BNX2_TDMA_TPATQ_IF_DEBUG			0x00005ca8
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM			0x00005cac
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_CALCULATED		 (0xffffL<<0)
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TDMA_TDMA_PCIE_CKSUM			0x00005cb0
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_CALCULATED		 (0xffffL<<0)
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TDMA_TDMAQ					0x00005fc0
 #define BNX2_TDMA_FTQ_CMD				0x00005ff8
 #define BNX2_TDMA_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_TDMA_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -2724,6 +4690,8 @@
 #define BNX2_HC_COMMAND_FORCE_INT_LOW			 (2L<<19)
 #define BNX2_HC_COMMAND_FORCE_INT_FREE			 (3L<<19)
 #define BNX2_HC_COMMAND_CLR_STAT_NOW			 (1L<<21)
+#define BNX2_HC_COMMAND_MAIN_PWR_INT			 (1L<<22)
+#define BNX2_HC_COMMAND_COAL_ON_NEXT_EVENT		 (1L<<27)
 
 #define BNX2_HC_STATUS					0x00006804
 #define BNX2_HC_STATUS_MASTER_ABORT			 (1L<<0)
@@ -2746,6 +4714,23 @@
 #define BNX2_HC_CONFIG_STATISTIC_PRIORITY		 (1L<<5)
 #define BNX2_HC_CONFIG_STATUS_PRIORITY			 (1L<<6)
 #define BNX2_HC_CONFIG_STAT_MEM_ADDR			 (0xffL<<8)
+#define BNX2_HC_CONFIG_PER_MODE				 (1L<<16)
+#define BNX2_HC_CONFIG_ONE_SHOT				 (1L<<17)
+#define BNX2_HC_CONFIG_USE_INT_PARAM			 (1L<<18)
+#define BNX2_HC_CONFIG_SET_MASK_AT_RD			 (1L<<19)
+#define BNX2_HC_CONFIG_PER_COLLECT_LIMIT		 (0xfL<<20)
+#define BNX2_HC_CONFIG_SB_ADDR_INC			 (0x7L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_64B			 (0L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_128B			 (1L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_256B			 (2L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_512B			 (3L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_1024B		 (4L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_2048B		 (5L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_4096B		 (6L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_8192B		 (7L<<24)
+#define BNX2_HC_CONFIG_GEN_STAT_AVG_INTR		 (1L<<29)
+#define BNX2_HC_CONFIG_UNMASK_ALL			 (1L<<30)
+#define BNX2_HC_CONFIG_TX_SEL				 (1L<<31)
 
 #define BNX2_HC_ATTN_BITS_ENABLE			0x0000680c
 #define BNX2_HC_STATUS_ADDR_L				0x00006810
@@ -2782,6 +4767,7 @@
 
 #define BNX2_HC_PERIODIC_TICKS				0x0000683c
 #define BNX2_HC_PERIODIC_TICKS_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
 
 #define BNX2_HC_STAT_COLLECT_TICKS			0x00006840
 #define BNX2_HC_STAT_COLLECT_TICKS_HC_STAT_COLL_TICKS	 (0xffL<<4)
@@ -2789,6 +4775,10 @@
 #define BNX2_HC_STATS_TICKS				0x00006844
 #define BNX2_HC_STATS_TICKS_HC_STAT_TICKS		 (0xffffL<<8)
 
+#define BNX2_HC_STATS_INTERRUPT_STATUS			0x00006848
+#define BNX2_HC_STATS_INTERRUPT_STATUS_SB_STATUS	 (0x1ffL<<0)
+#define BNX2_HC_STATS_INTERRUPT_STATUS_INT_STATUS	 (0x1ffL<<16)
+
 #define BNX2_HC_STAT_MEM_DATA				0x0000684c
 #define BNX2_HC_STAT_GEN_SEL_0				0x00006850
 #define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0		 (0x7fL<<0)
@@ -2917,24 +4907,108 @@
 #define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1		 (0x7fL<<8)
 #define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2		 (0x7fL<<16)
 #define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UMP_RX_FRAME_DROP_XI	 (52L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S0_XI	 (57L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S1_XI	 (58L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S2_XI	 (85L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S3_XI	 (86L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S4_XI	 (87L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S5_XI	 (88L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S6_XI	 (89L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S7_XI	 (90L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S8_XI	 (91L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S9_XI	 (92L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S10_XI	 (93L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MQ_IDB_OFLOW_XI	 (94L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_RD_CNT_XI	 (123L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_WR_CNT_XI	 (124L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_HITS_XI	 (125L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_MISSES_XI	 (126L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC1_XI	 (128L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC1_XI	 (129L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC1_XI	 (130L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC1_XI	 (131L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC1_XI	 (132L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC1_XI	 (133L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC2_XI	 (134L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC2_XI	 (135L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC2_XI	 (136L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC2_XI	 (137L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC2_XI	 (138L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC2_XI	 (139L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC3_XI	 (140L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC3_XI	 (141L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC3_XI	 (142L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC3_XI	 (143L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC3_XI	 (144L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC3_XI	 (145L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC4_XI	 (146L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC4_XI	 (147L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC4_XI	 (148L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC4_XI	 (149L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC4_XI	 (150L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC4_XI	 (151L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC5_XI	 (152L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC5_XI	 (153L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC5_XI	 (154L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC5_XI	 (155L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC5_XI	 (156L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC5_XI	 (157L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC6_XI	 (158L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC6_XI	 (159L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC6_XI	 (160L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC6_XI	 (161L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC6_XI	 (162L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC6_XI	 (163L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC7_XI	 (164L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC7_XI	 (165L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC7_XI	 (166L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC7_XI	 (167L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC7_XI	 (168L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC7_XI	 (169L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC8_XI	 (170L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC8_XI	 (171L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC8_XI	 (172L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC8_XI	 (173L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC8_XI	 (174L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC8_XI	 (175L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_CMD_CNT_XI	 (176L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_SLOT_CNT_XI	 (177L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI	 (178L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3_XI		 (0xffL<<24)
 
 #define BNX2_HC_STAT_GEN_SEL_1				0x00006854
 #define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4		 (0x7fL<<0)
 #define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5		 (0x7fL<<8)
 #define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6		 (0x7fL<<16)
 #define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7_XI		 (0xffL<<24)
 
 #define BNX2_HC_STAT_GEN_SEL_2				0x00006858
 #define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8		 (0x7fL<<0)
 #define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9		 (0x7fL<<8)
 #define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10		 (0x7fL<<16)
 #define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11_XI		 (0xffL<<24)
 
 #define BNX2_HC_STAT_GEN_SEL_3				0x0000685c
 #define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12		 (0x7fL<<0)
 #define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13		 (0x7fL<<8)
 #define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14		 (0x7fL<<16)
 #define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15_XI		 (0xffL<<24)
 
 #define BNX2_HC_STAT_GEN_STAT0				0x00006888
 #define BNX2_HC_STAT_GEN_STAT1				0x0000688c
@@ -2968,6 +5042,7 @@
 #define BNX2_HC_STAT_GEN_STAT_AC13			0x000068fc
 #define BNX2_HC_STAT_GEN_STAT_AC14			0x00006900
 #define BNX2_HC_STAT_GEN_STAT_AC15			0x00006904
+#define BNX2_HC_STAT_GEN_STAT_AC			0x000068c8
 #define BNX2_HC_VIS					0x00006908
 #define BNX2_HC_VIS_STAT_BUILD_STATE			 (0xfL<<0)
 #define BNX2_HC_VIS_STAT_BUILD_STATE_IDLE		 (0L<<0)
@@ -3038,6 +5113,349 @@
 #define BNX2_HC_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
 #define BNX2_HC_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
 
+#define BNX2_HC_COALESCE_NOW				0x00006914
+#define BNX2_HC_COALESCE_NOW_COAL_NOW			 (0x1ffL<<1)
+#define BNX2_HC_COALESCE_NOW_COAL_NOW_WO_INT		 (0x1ffL<<11)
+#define BNX2_HC_COALESCE_NOW_COAL_ON_NXT_EVENT		 (0x1ffL<<21)
+
+#define BNX2_HC_MSIX_BIT_VECTOR				0x00006918
+#define BNX2_HC_MSIX_BIT_VECTOR_VAL			 (0x1ffL<<0)
+
+#define BNX2_HC_SB_CONFIG_1				0x00006a00
+#define BNX2_HC_SB_CONFIG_1_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_1_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_1_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_1_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_1_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_1_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_1_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_1_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1			0x00006a04
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_1			0x00006a08
+#define BNX2_HC_COMP_PROD_TRIP_1_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_1_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1			0x00006a0c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_1				0x00006a10
+#define BNX2_HC_RX_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_1				0x00006a14
+#define BNX2_HC_TX_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_1				0x00006a18
+#define BNX2_HC_COM_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_1				0x00006a1c
+#define BNX2_HC_CMD_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_1			0x00006a20
+#define BNX2_HC_PERIODIC_TICKS_1_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_1_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_2				0x00006a24
+#define BNX2_HC_SB_CONFIG_2_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_2_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_2_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_2_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_2_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_2_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_2_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_2_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2			0x00006a28
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_2			0x00006a2c
+#define BNX2_HC_COMP_PROD_TRIP_2_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_2_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2			0x00006a30
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_2				0x00006a34
+#define BNX2_HC_RX_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_2				0x00006a38
+#define BNX2_HC_TX_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_2				0x00006a3c
+#define BNX2_HC_COM_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_2				0x00006a40
+#define BNX2_HC_CMD_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_2			0x00006a44
+#define BNX2_HC_PERIODIC_TICKS_2_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_2_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_3				0x00006a48
+#define BNX2_HC_SB_CONFIG_3_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_3_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_3_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_3_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_3_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_3_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_3_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_3_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3			0x00006a4c
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_3			0x00006a50
+#define BNX2_HC_COMP_PROD_TRIP_3_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_3_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3			0x00006a54
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_3				0x00006a58
+#define BNX2_HC_RX_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_3				0x00006a5c
+#define BNX2_HC_TX_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_3				0x00006a60
+#define BNX2_HC_COM_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_3				0x00006a64
+#define BNX2_HC_CMD_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_3			0x00006a68
+#define BNX2_HC_PERIODIC_TICKS_3_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_3_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_4				0x00006a6c
+#define BNX2_HC_SB_CONFIG_4_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_4_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_4_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_4_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_4_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_4_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_4_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_4_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4			0x00006a70
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_4			0x00006a74
+#define BNX2_HC_COMP_PROD_TRIP_4_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_4_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4			0x00006a78
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_4				0x00006a7c
+#define BNX2_HC_RX_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_4				0x00006a80
+#define BNX2_HC_TX_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_4				0x00006a84
+#define BNX2_HC_COM_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_4				0x00006a88
+#define BNX2_HC_CMD_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_4			0x00006a8c
+#define BNX2_HC_PERIODIC_TICKS_4_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_4_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_5				0x00006a90
+#define BNX2_HC_SB_CONFIG_5_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_5_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_5_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_5_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_5_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_5_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_5_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_5_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5			0x00006a94
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_5			0x00006a98
+#define BNX2_HC_COMP_PROD_TRIP_5_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_5_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5			0x00006a9c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_5				0x00006aa0
+#define BNX2_HC_RX_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_5				0x00006aa4
+#define BNX2_HC_TX_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_5				0x00006aa8
+#define BNX2_HC_COM_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_5				0x00006aac
+#define BNX2_HC_CMD_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_5			0x00006ab0
+#define BNX2_HC_PERIODIC_TICKS_5_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_5_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_6				0x00006ab4
+#define BNX2_HC_SB_CONFIG_6_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_6_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_6_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_6_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_6_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_6_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_6_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_6_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6			0x00006ab8
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_6			0x00006abc
+#define BNX2_HC_COMP_PROD_TRIP_6_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_6_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6			0x00006ac0
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_6				0x00006ac4
+#define BNX2_HC_RX_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_6				0x00006ac8
+#define BNX2_HC_TX_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_6				0x00006acc
+#define BNX2_HC_COM_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_6				0x00006ad0
+#define BNX2_HC_CMD_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_6			0x00006ad4
+#define BNX2_HC_PERIODIC_TICKS_6_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_6_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_7				0x00006ad8
+#define BNX2_HC_SB_CONFIG_7_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_7_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_7_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_7_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_7_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_7_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_7_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_7_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7			0x00006adc
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_7			0x00006ae0
+#define BNX2_HC_COMP_PROD_TRIP_7_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_7_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7			0x00006ae4
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_7				0x00006ae8
+#define BNX2_HC_RX_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_7				0x00006aec
+#define BNX2_HC_TX_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_7				0x00006af0
+#define BNX2_HC_COM_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_7				0x00006af4
+#define BNX2_HC_CMD_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_7			0x00006af8
+#define BNX2_HC_PERIODIC_TICKS_7_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_7_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_8				0x00006afc
+#define BNX2_HC_SB_CONFIG_8_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_8_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_8_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_8_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_8_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_8_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_8_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_8_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8			0x00006b00
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_8			0x00006b04
+#define BNX2_HC_COMP_PROD_TRIP_8_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_8_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8			0x00006b08
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_8				0x00006b0c
+#define BNX2_HC_RX_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_8				0x00006b10
+#define BNX2_HC_TX_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_8				0x00006b14
+#define BNX2_HC_COM_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_8				0x00006b18
+#define BNX2_HC_CMD_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_8			0x00006b1c
+#define BNX2_HC_PERIODIC_TICKS_8_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_8_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
 
 
 /*
@@ -3063,7 +5481,7 @@
 #define BNX2_TXP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
 #define BNX2_TXP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
 #define BNX2_TXP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
-#define BNX2_TXP_CPU_STATE_BAD_pc_HALTED		 (1L<<6)
+#define BNX2_TXP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
 #define BNX2_TXP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
 #define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
 #define BNX2_TXP_CPU_STATE_SOFT_HALTED			 (1L<<10)
@@ -3111,7 +5529,7 @@
 #define BNX2_TXP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
 
 #define BNX2_TXP_CPU_REG_FILE				0x00045200
-#define BNX2_TXP_FTQ_DATA				0x000453c0
+#define BNX2_TXP_TXPQ					0x000453c0
 #define BNX2_TXP_FTQ_CMD				0x000453f8
 #define BNX2_TXP_FTQ_CMD_OFFSET				 (0x3ffL<<0)
 #define BNX2_TXP_FTQ_CMD_WR_TOP				 (1L<<10)
@@ -3158,7 +5576,7 @@
 #define BNX2_TPAT_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
 #define BNX2_TPAT_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
 #define BNX2_TPAT_CPU_STATE_BAD_DATA_ADDR_HALTED	 (1L<<5)
-#define BNX2_TPAT_CPU_STATE_BAD_pc_HALTED		 (1L<<6)
+#define BNX2_TPAT_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
 #define BNX2_TPAT_CPU_STATE_ALIGN_HALTED		 (1L<<7)
 #define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
 #define BNX2_TPAT_CPU_STATE_SOFT_HALTED			 (1L<<10)
@@ -3206,7 +5624,7 @@
 #define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
 
 #define BNX2_TPAT_CPU_REG_FILE				0x00085200
-#define BNX2_TPAT_FTQ_DATA				0x000853c0
+#define BNX2_TPAT_TPATQ					0x000853c0
 #define BNX2_TPAT_FTQ_CMD				0x000853f8
 #define BNX2_TPAT_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_TPAT_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -3253,7 +5671,7 @@
 #define BNX2_RXP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
 #define BNX2_RXP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
 #define BNX2_RXP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
-#define BNX2_RXP_CPU_STATE_BAD_pc_HALTED		 (1L<<6)
+#define BNX2_RXP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
 #define BNX2_RXP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
 #define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
 #define BNX2_RXP_CPU_STATE_SOFT_HALTED			 (1L<<10)
@@ -3301,7 +5719,29 @@
 #define BNX2_RXP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
 
 #define BNX2_RXP_CPU_REG_FILE				0x000c5200
-#define BNX2_RXP_CFTQ_DATA				0x000c5380
+#define BNX2_RXP_PFE_PFE_CTL				0x000c537c
+#define BNX2_RXP_PFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE			 (0xfL<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_0			 (0L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_1			 (1L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_2			 (2L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_3			 (3L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_4			 (4L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_5			 (5L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_6			 (6L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_7			 (7L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_8			 (8L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_9			 (9L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_COUNT			 (0xfL<<12)
+#define BNX2_RXP_PFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_RXP_RXPCQ					0x000c5380
 #define BNX2_RXP_CFTQ_CMD				0x000c53b8
 #define BNX2_RXP_CFTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_RXP_CFTQ_CMD_WR_TOP			 (1L<<10)
@@ -3322,7 +5762,7 @@
 #define BNX2_RXP_CFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
 #define BNX2_RXP_CFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
 
-#define BNX2_RXP_FTQ_DATA				0x000c53c0
+#define BNX2_RXP_RXPQ					0x000c53c0
 #define BNX2_RXP_FTQ_CMD				0x000c53f8
 #define BNX2_RXP_FTQ_CMD_OFFSET				 (0x3ffL<<0)
 #define BNX2_RXP_FTQ_CMD_WR_TOP				 (1L<<10)
@@ -3350,6 +5790,10 @@
  *  com_reg definition
  *  offset: 0x100000
  */
+#define BNX2_COM_CKSUM_ERROR_STATUS			0x00100000
+#define BNX2_COM_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_COM_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
 #define BNX2_COM_CPU_MODE				0x00105000
 #define BNX2_COM_CPU_MODE_LOCAL_RST			 (1L<<0)
 #define BNX2_COM_CPU_MODE_STEP_ENA			 (1L<<1)
@@ -3369,7 +5813,7 @@
 #define BNX2_COM_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
 #define BNX2_COM_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
 #define BNX2_COM_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
-#define BNX2_COM_CPU_STATE_BAD_pc_HALTED		 (1L<<6)
+#define BNX2_COM_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
 #define BNX2_COM_CPU_STATE_ALIGN_HALTED			 (1L<<7)
 #define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
 #define BNX2_COM_CPU_STATE_SOFT_HALTED			 (1L<<10)
@@ -3417,7 +5861,29 @@
 #define BNX2_COM_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
 
 #define BNX2_COM_CPU_REG_FILE				0x00105200
-#define BNX2_COM_COMXQ_FTQ_DATA				0x00105340
+#define BNX2_COM_COMTQ_PFE_PFE_CTL			0x001052bc
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_INC_USAGE_CNT	 (1L<<0)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE		 (0xfL<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_OFFSET		 (0x1ffL<<16)
+
+#define BNX2_COM_COMXQ					0x00105340
 #define BNX2_COM_COMXQ_FTQ_CMD				0x00105378
 #define BNX2_COM_COMXQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -3438,7 +5904,7 @@
 #define BNX2_COM_COMXQ_FTQ_CTL_MAX_DEPTH		 (0x3ffL<<12)
 #define BNX2_COM_COMXQ_FTQ_CTL_CUR_DEPTH		 (0x3ffL<<22)
 
-#define BNX2_COM_COMTQ_FTQ_DATA				0x00105380
+#define BNX2_COM_COMTQ					0x00105380
 #define BNX2_COM_COMTQ_FTQ_CMD				0x001053b8
 #define BNX2_COM_COMTQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -3459,7 +5925,7 @@
 #define BNX2_COM_COMTQ_FTQ_CTL_MAX_DEPTH		 (0x3ffL<<12)
 #define BNX2_COM_COMTQ_FTQ_CTL_CUR_DEPTH		 (0x3ffL<<22)
 
-#define BNX2_COM_COMQ_FTQ_DATA				0x001053c0
+#define BNX2_COM_COMQ					0x001053c0
 #define BNX2_COM_COMQ_FTQ_CMD				0x001053f8
 #define BNX2_COM_COMQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_COM_COMQ_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -3489,6 +5955,10 @@
  *  cp_reg definition
  *  offset: 0x180000
  */
+#define BNX2_CP_CKSUM_ERROR_STATUS			0x00180000
+#define BNX2_CP_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_CP_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
 #define BNX2_CP_CPU_MODE				0x00185000
 #define BNX2_CP_CPU_MODE_LOCAL_RST			 (1L<<0)
 #define BNX2_CP_CPU_MODE_STEP_ENA			 (1L<<1)
@@ -3508,7 +5978,7 @@
 #define BNX2_CP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
 #define BNX2_CP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
 #define BNX2_CP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
-#define BNX2_CP_CPU_STATE_BAD_pc_HALTED			 (1L<<6)
+#define BNX2_CP_CPU_STATE_BAD_PC_HALTED			 (1L<<6)
 #define BNX2_CP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
 #define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
 #define BNX2_CP_CPU_STATE_SOFT_HALTED			 (1L<<10)
@@ -3556,7 +6026,29 @@
 #define BNX2_CP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
 
 #define BNX2_CP_CPU_REG_FILE				0x00185200
-#define BNX2_CP_CPQ_FTQ_DATA				0x001853c0
+#define BNX2_CP_CPQ_PFE_PFE_CTL				0x001853bc
+#define BNX2_CP_CPQ_PFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE		 (0xfL<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_CP_CPQ					0x001853c0
 #define BNX2_CP_CPQ_FTQ_CMD				0x001853f8
 #define BNX2_CP_CPQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_CP_CPQ_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -3584,6 +6076,59 @@
  *  mcp_reg definition
  *  offset: 0x140000
  */
+#define BNX2_MCP_MCP_CONTROL				0x00140080
+#define BNX2_MCP_MCP_CONTROL_SMBUS_SEL			 (1L<<30)
+#define BNX2_MCP_MCP_CONTROL_MCP_ISOLATE		 (1L<<31)
+
+#define BNX2_MCP_MCP_ATTENTION_STATUS			0x00140084
+#define BNX2_MCP_MCP_ATTENTION_STATUS_DRV_DOORBELL	 (1L<<29)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_WATCHDOG_TIMEOUT	 (1L<<30)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_CPU_EVENT		 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL			0x00140088
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL_MCP_HEARTBEAT_ENABLE	 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS			0x0014008c
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_MCP_HEARTBEAT_PERIOD	 (0x7ffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_VALID		 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT				0x00140090
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_COUNT	 (0x3fffffffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_INC	 (1L<<30)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_RESET	 (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_RESET				0x00140094
+#define BNX2_MCP_WATCHDOG_RESET_WATCHDOG_RESET		 (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_CONTROL			0x00140098
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_TIMEOUT	 (0xfffffffL<<0)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ATTN		 (1L<<29)
+#define BNX2_MCP_WATCHDOG_CONTROL_MCP_RST_ENABLE	 (1L<<30)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ENABLE	 (1L<<31)
+
+#define BNX2_MCP_ACCESS_LOCK				0x0014009c
+#define BNX2_MCP_ACCESS_LOCK_LOCK			 (1L<<31)
+
+#define BNX2_MCP_TOE_ID					0x001400a0
+#define BNX2_MCP_TOE_ID_FUNCTION_ID			 (1L<<31)
+
+#define BNX2_MCP_MAILBOX_CFG				0x001400a4
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_OFFSET		 (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_SIZE		 (0xfffL<<20)
+
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC			0x001400a8
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_OFFSET	 (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_SIZE	 (0xfffL<<20)
+
+#define BNX2_MCP_MCP_DOORBELL				0x001400ac
+#define BNX2_MCP_MCP_DOORBELL_MCP_DOORBELL		 (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL			0x001400b0
+#define BNX2_MCP_DRIVER_DOORBELL_DRIVER_DOORBELL	 (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC		0x001400b4
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC_DRIVER_DOORBELL	 (1L<<31)
+
 #define BNX2_MCP_CPU_MODE				0x00145000
 #define BNX2_MCP_CPU_MODE_LOCAL_RST			 (1L<<0)
 #define BNX2_MCP_CPU_MODE_STEP_ENA			 (1L<<1)
@@ -3603,7 +6148,7 @@
 #define BNX2_MCP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
 #define BNX2_MCP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
 #define BNX2_MCP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
-#define BNX2_MCP_CPU_STATE_BAD_pc_HALTED		 (1L<<6)
+#define BNX2_MCP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
 #define BNX2_MCP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
 #define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
 #define BNX2_MCP_CPU_STATE_SOFT_HALTED			 (1L<<10)
@@ -3651,7 +6196,7 @@
 #define BNX2_MCP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
 
 #define BNX2_MCP_CPU_REG_FILE				0x00145200
-#define BNX2_MCP_MCPQ_FTQ_DATA				0x001453c0
+#define BNX2_MCP_MCPQ					0x001453c0
 #define BNX2_MCP_MCPQ_FTQ_CMD				0x001453f8
 #define BNX2_MCP_MCPQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
 #define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP			 (1L<<10)
@@ -3696,6 +6241,8 @@
 
 /* 5708 Serdes PHY registers */
 
+#define BCM5708S_BMCR_FORCE_2500		0x20
+
 #define BCM5708S_UP1				0xb
 
 #define BCM5708S_UP1_2G5			0x1
@@ -3804,6 +6351,7 @@
 #define INVALID_CID_ADDR            0xffffffff
 
 #define TX_CID		16
+#define TX_TSS_CID	32
 #define RX_CID		0
 
 #define MB_TX_CID_ADDR	MB_GET_CID_ADDR(TX_CID)
@@ -3889,6 +6437,8 @@
 
 	u32		tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
 	u16		tx_prod;
+	u32		tx_bidx_addr;
+	u32		tx_bseq_addr;
 
 	u16		tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
 	u16		hw_tx_cons;
@@ -3945,6 +6495,7 @@
 #define CHIP_NUM(bp)			(((bp)->chip_id) & 0xffff0000)
 #define CHIP_NUM_5706			0x57060000
 #define CHIP_NUM_5708			0x57080000
+#define CHIP_NUM_5709			0x57090000
 
 #define CHIP_REV(bp)			(((bp)->chip_id) & 0x0000f000)
 #define CHIP_REV_Ax			0x00000000
@@ -4007,6 +6558,10 @@
 	struct statistics_block	*stats_blk;
 	dma_addr_t		stats_blk_mapping;
 
+	int			ctx_pages;
+	void			*ctx_blk[4];
+	dma_addr_t		ctx_blk_mapping[4];
+
 	u32			hc_cmd;
 	u32			rx_mode;
 
@@ -4038,6 +6593,7 @@
 
 	u8			serdes_an_pending;
 #define SERDES_AN_TIMEOUT	(HZ / 3)
+#define SERDES_FORCED_TIMEOUT	(HZ / 10)
 
 	u8			mac_addr[8];
 
@@ -4104,41 +6660,43 @@
 };
 
 struct fw_info {
-	u32 ver_major;
-	u32 ver_minor;
-	u32 ver_fix;
+	const u32 ver_major;
+	const u32 ver_minor;
+	const u32 ver_fix;
 
-	u32 start_addr;
+	const u32 start_addr;
 
 	/* Text section. */
-	u32 text_addr;
-	u32 text_len;
-	u32 text_index;
+	const u32 text_addr;
+	const u32 text_len;
+	const u32 text_index;
 	u32 *text;
+	u8 *gz_text;
+	const u32 gz_text_len;
 
 	/* Data section. */
-	u32 data_addr;
-	u32 data_len;
-	u32 data_index;
-	u32 *data;
+	const u32 data_addr;
+	const u32 data_len;
+	const u32 data_index;
+	const u32 *data;
 
 	/* SBSS section. */
-	u32 sbss_addr;
-	u32 sbss_len;
-	u32 sbss_index;
-	u32 *sbss;
+	const u32 sbss_addr;
+	const u32 sbss_len;
+	const u32 sbss_index;
+	const u32 *sbss;
 
 	/* BSS section. */
-	u32 bss_addr;
-	u32 bss_len;
-	u32 bss_index;
-	u32 *bss;
+	const u32 bss_addr;
+	const u32 bss_len;
+	const u32 bss_index;
+	const u32 *bss;
 
 	/* Read-only section. */
-	u32 rodata_addr;
-	u32 rodata_len;
-	u32 rodata_index;
-	u32 *rodata;
+	const u32 rodata_addr;
+	const u32 rodata_len;
+	const u32 rodata_index;
+	const u32 *rodata;
 };
 
 #define RV2P_PROC1                              0
diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h
index 2d753dc..21d368f 100644
--- a/drivers/net/bnx2_fw.h
+++ b/drivers/net/bnx2_fw.h
@@ -14,20 +14,6 @@
  * accompanying it.
  */
 
-static const int bnx2_COM_b06FwReleaseMajor = 0x1;
-static const int bnx2_COM_b06FwReleaseMinor = 0x0;
-static const int bnx2_COM_b06FwReleaseFix = 0x0;
-static const u32 bnx2_COM_b06FwStartAddr = 0x080008b4;
-static const u32 bnx2_COM_b06FwTextAddr = 0x08000000;
-static const int bnx2_COM_b06FwTextLen = 0x57bc;
-static const u32 bnx2_COM_b06FwDataAddr = 0x08005840;
-static const int bnx2_COM_b06FwDataLen = 0x0;
-static const u32 bnx2_COM_b06FwRodataAddr = 0x080057c0;
-static const int bnx2_COM_b06FwRodataLen = 0x58;
-static const u32 bnx2_COM_b06FwBssAddr = 0x08005860;
-static const int bnx2_COM_b06FwBssLen = 0x88;
-static const u32 bnx2_COM_b06FwSbssAddr = 0x08005840;
-static const int bnx2_COM_b06FwSbssLen = 0x1c;
 static u8 bnx2_COM_b06FwText[] = {
 	0x1f, 0x8b, 0x08, 0x08, 0x09, 0x83, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65,
 	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xec, 0x5b, 0x7d, 0x6c,
@@ -673,389 +659,752 @@
 static u32 bnx2_COM_b06FwBss[(0x88/4) + 1] = { 0x0 };
 static u32 bnx2_COM_b06FwSbss[(0x1c/4) + 1] = { 0x0 };
 
-static int bnx2_RXP_b06FwReleaseMajor = 0x1;
-static int bnx2_RXP_b06FwReleaseMinor = 0x0;
-static int bnx2_RXP_b06FwReleaseFix = 0x0;
-static u32 bnx2_RXP_b06FwStartAddr = 0x08003184;
-static u32 bnx2_RXP_b06FwTextAddr = 0x08000000;
-static int bnx2_RXP_b06FwTextLen = 0x588c;
-static u32 bnx2_RXP_b06FwDataAddr = 0x080058e0;
-static int bnx2_RXP_b06FwDataLen = 0x0;
-static u32 bnx2_RXP_b06FwRodataAddr = 0x08005890;
-static int bnx2_RXP_b06FwRodataLen = 0x28;
-static u32 bnx2_RXP_b06FwBssAddr = 0x08005900;
-static int bnx2_RXP_b06FwBssLen = 0x13a4;
-static u32 bnx2_RXP_b06FwSbssAddr = 0x080058e0;
-static int bnx2_RXP_b06FwSbssLen = 0x1c;
+static struct fw_info bnx2_com_fw_06 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x080008b4,
+
+	.text_addr			= 0x08000000,
+	.text_len			= 0x57bc,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_COM_b06FwText,
+	.gz_text_len			= sizeof(bnx2_COM_b06FwText),
+
+	.data_addr			= 0x08005840,
+	.data_len			= 0x0,
+	.data_index			= 0x0,
+	.data				= bnx2_COM_b06FwData,
+
+	.sbss_addr			= 0x08005840,
+	.sbss_len			= 0x1c,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_COM_b06FwSbss,
+
+	.bss_addr			= 0x08005860,
+	.bss_len			= 0x88,
+	.bss_index			= 0x0,
+	.bss				= bnx2_COM_b06FwBss,
+
+	.rodata_addr			= 0x080057c0,
+	.rodata_len			= 0x58,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_COM_b06FwRodata,
+};
+
 static u8 bnx2_RXP_b06FwText[] = {
-	0x1f, 0x8b, 0x08, 0x08, 0x07, 0x87, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65,
-	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xed, 0x5c, 0x5d, 0x6c,
-	0x1c, 0xd7, 0x75, 0x3e, 0xf3, 0x43, 0x71, 0x49, 0x91, 0xd4, 0x70, 0xb9,
-	0x62, 0x57, 0x12, 0x65, 0xed, 0x8a, 0x43, 0x71, 0x6d, 0x31, 0xce, 0x50,
-	0x58, 0xdb, 0x82, 0xb1, 0x48, 0xc7, 0xb3, 0xa4, 0xc8, 0x24, 0x02, 0x42,
-	0x1b, 0x42, 0xab, 0xa4, 0xa9, 0xc1, 0x90, 0x72, 0x91, 0x22, 0x2c, 0xa0,
-	0x1a, 0x79, 0xf0, 0x43, 0x10, 0x2f, 0x56, 0x3f, 0xa6, 0xd1, 0x8d, 0x96,
-	0xb6, 0x1c, 0x53, 0x08, 0x82, 0x82, 0xe5, 0x52, 0x52, 0x0b, 0x2c, 0xb4,
-	0x96, 0xed, 0x36, 0x7e, 0xa8, 0x23, 0x9a, 0x92, 0x8d, 0xa6, 0x68, 0x81,
-	0x22, 0xad, 0xd1, 0xf4, 0x4d, 0x95, 0x9a, 0x4a, 0x75, 0x5f, 0xd4, 0xa2,
-	0x48, 0xda, 0x46, 0xcd, 0xf4, 0xfb, 0xee, 0xcc, 0x88, 0xd4, 0x9a, 0xb2,
-	0x2c, 0x3b, 0x0d, 0x62, 0x74, 0x0e, 0x30, 0xd8, 0xb9, 0x7f, 0xe7, 0xef,
-	0x9e, 0x73, 0xee, 0x39, 0x77, 0x28, 0x7d, 0xa5, 0x43, 0xda, 0x25, 0x84,
-	0x4e, 0x3c, 0x99, 0xc3, 0xcf, 0x3c, 0xfd, 0xe0, 0xc3, 0x0f, 0xee, 0xc1,
-	0xeb, 0xb0, 0xa1, 0x6d, 0xd0, 0xa3, 0xfe, 0x18, 0x62, 0x88, 0x21, 0x86,
-	0x18, 0x62, 0x88, 0x21, 0x86, 0x18, 0x62, 0x88, 0x21, 0x86, 0x18, 0x62,
-	0x88, 0x21, 0x86, 0x18, 0x62, 0x88, 0x21, 0x86, 0x18, 0x62, 0x88, 0x21,
-	0x86, 0x18, 0x62, 0x88, 0x21, 0x86, 0x18, 0x62, 0x88, 0x21, 0x86, 0x18,
-	0x62, 0x88, 0x21, 0x86, 0x18, 0x62, 0x88, 0x21, 0x86, 0x18, 0x62, 0x88,
-	0x21, 0x86, 0x18, 0x62, 0x88, 0x21, 0x86, 0xff, 0xef, 0x60, 0x88, 0x58,
-	0xfc, 0xed, 0x0c, 0x1f, 0x49, 0xe8, 0x85, 0xcb, 0x07, 0x3d, 0x5b, 0x12,
-	0x46, 0x61, 0x69, 0x66, 0xda, 0x16, 0x71, 0xeb, 0xbb, 0x33, 0x45, 0xf9,
-	0x1f, 0xbf, 0x94, 0x32, 0x85, 0xfd, 0xdb, 0x0b, 0x37, 0x9f, 0x7d, 0xf3,
-	0x91, 0xec, 0x8d, 0x05, 0x43, 0x12, 0x56, 0xe1, 0xe8, 0xb0, 0xb5, 0x4b,
-	0x12, 0x7d, 0x58, 0xf3, 0xdd, 0xc1, 0xcf, 0x59, 0xd2, 0x15, 0xe1, 0xba,
-	0xee, 0xbf, 0x39, 0x68, 0xc9, 0x2b, 0x8d, 0x94, 0x5c, 0x68, 0x6c, 0xdf,
-	0x24, 0x5d, 0xd9, 0x52, 0x09, 0xfd, 0x6e, 0x8a, 0xe3, 0x96, 0x94, 0xab,
-	0x2d, 0xe2, 0x2a, 0xba, 0x7d, 0x5a, 0x71, 0xfe, 0x3e, 0xcd, 0x9b, 0x7f,
-	0x9e, 0xff, 0x1e, 0x24, 0xa5, 0xcb, 0x7d, 0x68, 0xf7, 0xa1, 0xcd, 0xf7,
-	0x81, 0xf4, 0x94, 0x98, 0x72, 0xa4, 0x91, 0x90, 0xa3, 0xd5, 0x8c, 0xe8,
-	0x05, 0x71, 0xbd, 0xbc, 0x9d, 0x2e, 0xa3, 0x6f, 0xea, 0x00, 0xdb, 0x29,
-	0xe0, 0xf9, 0x0e, 0xd7, 0x59, 0x5e, 0x5e, 0x4a, 0xb7, 0xc6, 0x14, 0x0d,
-	0x8e, 0xb1, 0x0f, 0xbf, 0x58, 0x5f, 0xae, 0x76, 0x00, 0x6f, 0xd6, 0x71,
-	0x41, 0xdc, 0x73, 0x2c, 0xd0, 0xf6, 0xfd, 0xdf, 0x75, 0x32, 0xb2, 0xe2,
-	0x74, 0x81, 0xa7, 0x16, 0x69, 0xb5, 0xc5, 0xd2, 0x0b, 0xb6, 0xb5, 0x22,
-	0x6d, 0x1c, 0xeb, 0x34, 0x0a, 0xbe, 0x3f, 0x9d, 0x97, 0xae, 0xa0, 0x6f,
-	0xb7, 0xe2, 0x63, 0x72, 0x42, 0xc3, 0xbc, 0x57, 0x49, 0x0f, 0x3a, 0xe2,
-	0x3b, 0x7f, 0xf3, 0x52, 0xac, 0x6c, 0x97, 0xc9, 0x54, 0xf6, 0xa0, 0x1b,
-	0xd0, 0x74, 0x3d, 0x67, 0x2b, 0x70, 0x6a, 0xe0, 0x4f, 0xdb, 0x81, 0xf5,
-	0xee, 0x0a, 0x68, 0x1a, 0x85, 0xcd, 0x62, 0x6c, 0x66, 0x9f, 0xe8, 0x3b,
-	0x87, 0x93, 0xe1, 0x78, 0x97, 0x36, 0x32, 0x6f, 0x88, 0x6e, 0xff, 0x81,
-	0xe6, 0xd5, 0x7a, 0xe5, 0xd8, 0xbc, 0x8e, 0x77, 0x5d, 0xae, 0xe6, 0x4b,
-	0x9a, 0xdb, 0xa8, 0x68, 0xde, 0xd9, 0x59, 0xad, 0x78, 0xd6, 0x94, 0xa3,
-	0xb6, 0x7f, 0xe1, 0xb4, 0x73, 0x42, 0x1b, 0x39, 0x7b, 0x46, 0x1b, 0x3d,
-	0xfb, 0x86, 0x36, 0xde, 0xd8, 0xb2, 0x49, 0xda, 0xb3, 0xd0, 0x1e, 0x71,
-	0x90, 0xbf, 0x4f, 0x87, 0xba, 0xec, 0xa2, 0xde, 0x4a, 0xe4, 0x7d, 0x9f,
-	0xf3, 0x86, 0xe6, 0x55, 0x6d, 0x8b, 0xfb, 0xe6, 0xa6, 0x22, 0x1a, 0xed,
-	0x72, 0x74, 0xde, 0x94, 0x63, 0xd5, 0x94, 0x3c, 0x57, 0x2d, 0x29, 0x5a,
-	0x86, 0x5d, 0xd2, 0xbc, 0x06, 0xc7, 0x2b, 0xa0, 0x75, 0x42, 0xdb, 0x07,
-	0x9a, 0xde, 0x59, 0x29, 0x5d, 0x71, 0xe6, 0x40, 0xaf, 0x03, 0x78, 0xff,
-	0x58, 0x1b, 0x6d, 0xf4, 0x6a, 0xde, 0xc9, 0x9b, 0xe2, 0x39, 0x59, 0xeb,
-	0x4b, 0x62, 0xba, 0xb0, 0x01, 0xc8, 0x0c, 0xfd, 0x38, 0xd0, 0x49, 0xca,
-	0xf7, 0xf5, 0x82, 0xff, 0x2c, 0x74, 0x6f, 0x5d, 0xa1, 0xfc, 0x8d, 0x5e,
-	0x29, 0xcf, 0x53, 0xd7, 0xa6, 0x36, 0x52, 0xf5, 0x2f, 0x78, 0x8e, 0xf4,
-	0x19, 0xe2, 0xfb, 0x47, 0x9d, 0x81, 0xf4, 0x21, 0x39, 0x03, 0xdc, 0x75,
-	0xad, 0xd8, 0xa0, 0xae, 0xc1, 0xdf, 0x2d, 0x39, 0x02, 0xbd, 0x15, 0x9d,
-	0x5e, 0x99, 0xb4, 0xb2, 0x2e, 0xf6, 0x68, 0x53, 0x20, 0x57, 0x32, 0xb4,
-	0x17, 0xd2, 0xe7, 0xde, 0x67, 0xd3, 0x9e, 0xa1, 0xcb, 0x53, 0x2f, 0x3d,
-	0xdf, 0xb3, 0x38, 0xb4, 0x91, 0x32, 0x43, 0xff, 0xf2, 0x45, 0xcf, 0xf6,
-	0xb6, 0xb4, 0x48, 0x29, 0x6d, 0x48, 0x16, 0xfb, 0xb4, 0x43, 0x4e, 0x3b,
-	0x22, 0x87, 0x2a, 0xd0, 0x8d, 0x6d, 0x5a, 0x8b, 0x62, 0x67, 0xca, 0x32,
-	0x50, 0x32, 0x75, 0x74, 0x26, 0x49, 0x97, 0x3a, 0xd2, 0xe5, 0x7a, 0x9e,
-	0x7a, 0xa2, 0x3d, 0x7f, 0x28, 0x5d, 0x69, 0xab, 0xba, 0x5a, 0xd5, 0xd3,
-	0xf8, 0x2f, 0x5d, 0x4f, 0xd4, 0xc9, 0x72, 0x28, 0xb7, 0x03, 0xdc, 0x8f,
-	0x40, 0x5f, 0xe2, 0xea, 0xc3, 0x0f, 0xb1, 0x6f, 0x93, 0x51, 0xb0, 0xd3,
-	0x17, 0x61, 0x14, 0x7a, 0x61, 0x37, 0x64, 0x19, 0xa6, 0xee, 0xe0, 0xc3,
-	0x1f, 0x49, 0x5e, 0xf9, 0xd5, 0x90, 0x97, 0xfc, 0xdb, 0x32, 0x55, 0x49,
-	0x80, 0x06, 0x65, 0xd4, 0xe5, 0xbd, 0x7c, 0x64, 0x1b, 0x7b, 0x20, 0x5f,
-	0x5e, 0xa6, 0xbe, 0x45, 0x7f, 0xa2, 0xfd, 0xf3, 0x9d, 0xb2, 0xfe, 0xcc,
-	0xbf, 0xee, 0x7c, 0x92, 0xf6, 0x96, 0x7c, 0x27, 0xe4, 0x78, 0x35, 0xc9,
-	0x3d, 0xd4, 0x56, 0x54, 0x6c, 0x8a, 0x64, 0x14, 0xdd, 0x28, 0x74, 0x48,
-	0x51, 0xed, 0xf7, 0x5e, 0xd0, 0x43, 0x2c, 0xa8, 0xf2, 0xbd, 0xa0, 0x64,
-	0x9b, 0xb6, 0xed, 0xcc, 0x11, 0xc9, 0xc2, 0xbe, 0x45, 0x8e, 0xcc, 0x99,
-	0x32, 0x6d, 0xff, 0x63, 0xa7, 0xb4, 0x2f, 0xdf, 0x6f, 0xa8, 0xb8, 0xae,
-	0xf7, 0x6e, 0x90, 0x4d, 0xe0, 0x77, 0xf9, 0x7e, 0x5d, 0xe4, 0xa6, 0x59,
-	0xc8, 0x5a, 0x23, 0x08, 0xf6, 0x46, 0x81, 0xb1, 0x4c, 0x43, 0x2c, 0x93,
-	0x44, 0x8b, 0x4d, 0x7d, 0xf9, 0xfe, 0xf8, 0xf0, 0xdd, 0xf5, 0x75, 0x64,
-	0x9e, 0xb4, 0xa9, 0x2f, 0xc6, 0xa8, 0x12, 0xf4, 0xc1, 0xf8, 0x74, 0xbb,
-	0xae, 0x8a, 0xa1, 0xae, 0x46, 0xfe, 0xef, 0xed, 0xc2, 0xf5, 0xaa, 0xa2,
-	0x79, 0xce, 0xbb, 0xa1, 0x2f, 0xd8, 0x32, 0x02, 0x7f, 0x37, 0xec, 0x4f,
-	0xcb, 0x91, 0x54, 0x76, 0xc2, 0x95, 0xc0, 0xe6, 0xaf, 0xad, 0xb1, 0xf9,
-	0xd1, 0xbb, 0xc8, 0x75, 0x3c, 0x94, 0xcb, 0x0d, 0xe5, 0x1a, 0x85, 0x5c,
-	0x63, 0x90, 0x6b, 0xe5, 0x23, 0xc8, 0xb5, 0xf2, 0x91, 0xe5, 0xd2, 0xa4,
-	0xec, 0x3c, 0x08, 0x5a, 0xa6, 0xfc, 0xab, 0x13, 0xd8, 0xf2, 0xbf, 0x38,
-	0x9f, 0x14, 0x19, 0x7c, 0x7f, 0x70, 0xd8, 0x16, 0xef, 0x5b, 0xe0, 0xd5,
-	0x71, 0x40, 0x8b, 0xef, 0xef, 0x97, 0xe1, 0x6e, 0xfe, 0x38, 0x8b, 0x7d,
-	0x5d, 0xcf, 0x1f, 0x29, 0x87, 0x3e, 0x7c, 0xef, 0xfe, 0xa8, 0x6b, 0x1f,
-	0x55, 0x0e, 0xc6, 0x9c, 0x4f, 0x35, 0x9d, 0xab, 0x1f, 0x56, 0x86, 0xf5,
-	0x63, 0xca, 0x2f, 0x4f, 0x86, 0xc7, 0x64, 0x72, 0x33, 0xed, 0xa9, 0xa4,
-	0x8d, 0x0c, 0x92, 0xef, 0xb5, 0xfc, 0x4a, 0x26, 0xe0, 0x0d, 0x39, 0xd1,
-	0xd2, 0x46, 0x39, 0xb2, 0x60, 0x49, 0x69, 0xe9, 0x4e, 0x71, 0x57, 0x03,
-	0x6f, 0xb4, 0x47, 0xf6, 0x7d, 0xd2, 0x7c, 0x2a, 0xc8, 0x2b, 0x2e, 0x54,
-	0x91, 0x83, 0x56, 0x13, 0x72, 0xd9, 0x48, 0xcb, 0x9b, 0x83, 0x87, 0xe5,
-	0xf3, 0xd5, 0x24, 0xe8, 0x31, 0x9f, 0x2c, 0xe7, 0x10, 0x17, 0xb5, 0xb2,
-	0x63, 0x08, 0x79, 0xaf, 0xd9, 0x9c, 0x13, 0xc4, 0x96, 0x72, 0x10, 0x83,
-	0x5d, 0x6f, 0x50, 0xe5, 0x14, 0x90, 0x4f, 0x64, 0x0c, 0xb1, 0xb7, 0x66,
-	0xb3, 0xcd, 0xfe, 0xa0, 0xef, 0xb3, 0x95, 0x5e, 0xad, 0xc8, 0xbc, 0x64,
-	0xf0, 0xa6, 0x4c, 0x3b, 0x41, 0xdf, 0xe7, 0x2a, 0xa3, 0x9b, 0x98, 0x1f,
-	0x1a, 0x05, 0xc9, 0x94, 0x9d, 0xf7, 0x7c, 0xd7, 0xba, 0x7d, 0xcd, 0xfa,
-	0x78, 0xb2, 0x13, 0x81, 0xce, 0x45, 0xfb, 0xaa, 0xad, 0xf7, 0xb6, 0x4a,
-	0x09, 0x27, 0x5d, 0xd6, 0x1a, 0x47, 0xe7, 0xbe, 0x4a, 0x79, 0x5b, 0xab,
-	0xdc, 0x34, 0x80, 0x3f, 0x6d, 0x68, 0x62, 0x1e, 0xaa, 0x94, 0xbb, 0xd9,
-	0xa6, 0xbe, 0x74, 0x4d, 0x12, 0xa3, 0x15, 0x5f, 0xae, 0x3a, 0x41, 0xee,
-	0x63, 0x68, 0x7a, 0x6f, 0x5b, 0xb8, 0x56, 0xd7, 0x76, 0x39, 0x97, 0x44,
-	0x3a, 0x0e, 0x55, 0xc4, 0x2a, 0x56, 0x76, 0x39, 0x6f, 0x4b, 0xb9, 0xa7,
-	0x6d, 0x75, 0x5d, 0x8a, 0xeb, 0x76, 0x0e, 0xaf, 0x9d, 0xbb, 0xcb, 0xb9,
-	0x28, 0xe5, 0x2d, 0x6d, 0xab, 0xb4, 0xd2, 0x58, 0xdb, 0x17, 0xac, 0xe5,
-	0xf8, 0x66, 0x71, 0xbb, 0x39, 0x47, 0xef, 0x6d, 0xbf, 0x45, 0x43, 0x32,
-	0xc5, 0x4a, 0xb9, 0xa7, 0x7d, 0x15, 0xaf, 0x4d, 0xbc, 0xde, 0x1a, 0xbc,
-	0xc4, 0xd9, 0xbe, 0x8a, 0x33, 0x07, 0x9c, 0x43, 0xab, 0x38, 0x39, 0x7e,
-	0x58, 0x8a, 0x38, 0xd3, 0x5a, 0x0a, 0x32, 0xbc, 0x54, 0xc9, 0x48, 0x79,
-	0x28, 0x01, 0xdd, 0xf7, 0x1f, 0xfc, 0x9a, 0xaa, 0x43, 0xcc, 0x61, 0x0f,
-	0xba, 0x32, 0x55, 0x5e, 0x87, 0xd8, 0x08, 0xdb, 0xf8, 0x5a, 0x5d, 0x86,
-	0x17, 0xeb, 0xa6, 0x1c, 0x6f, 0x70, 0xbf, 0x98, 0xe3, 0x05, 0x75, 0xc6,
-	0x85, 0x46, 0x4e, 0xdb, 0x87, 0xbd, 0x66, 0x9d, 0xb0, 0xaf, 0x61, 0x6a,
-	0xa3, 0x3c, 0x1f, 0x80, 0x97, 0x76, 0x7e, 0xac, 0x41, 0xdb, 0x79, 0x03,
-	0xb6, 0x41, 0xce, 0xa3, 0x9c, 0xbd, 0x95, 0xb9, 0x53, 0x66, 0xd1, 0x51,
-	0x75, 0x88, 0x56, 0xcb, 0x77, 0x20, 0x07, 0x4d, 0xa0, 0xd6, 0x80, 0xcd,
-	0xdb, 0x78, 0x6f, 0x70, 0xde, 0x32, 0xe6, 0x6d, 0xe0, 0x3c, 0xec, 0xcd,
-	0x25, 0xe5, 0x0f, 0xa6, 0xcd, 0xf1, 0x77, 0xb1, 0xc7, 0x68, 0xd7, 0x59,
-	0x57, 0x58, 0x02, 0x5f, 0xc1, 0x3e, 0xa2, 0x6e, 0x48, 0xed, 0x60, 0x7e,
-	0x8f, 0xb9, 0x19, 0xcc, 0xcd, 0x66, 0x18, 0xcf, 0x3d, 0xfb, 0x99, 0x0e,
-	0xe9, 0x42, 0xbb, 0xce, 0x35, 0xd9, 0x0c, 0x72, 0x5b, 0xdf, 0xcb, 0xb7,
-	0xc9, 0x4a, 0xca, 0xbf, 0x60, 0xd8, 0xd1, 0xdc, 0x08, 0x6f, 0xf3, 0x5c,
-	0xe6, 0xc5, 0xc4, 0xbd, 0x21, 0xcc, 0x83, 0xc7, 0xc5, 0x6d, 0xfc, 0x49,
-	0xb7, 0x74, 0xb9, 0xf8, 0x8d, 0xe6, 0x4c, 0x6f, 0x0e, 0x6a, 0x2e, 0xbe,
-	0xb7, 0x50, 0x3e, 0x17, 0xe7, 0xa1, 0x56, 0xac, 0x66, 0x26, 0x59, 0x1f,
-	0x15, 0xeb, 0x6c, 0xef, 0x85, 0x3f, 0x04, 0x75, 0xd7, 0x85, 0x5b, 0xbe,
-	0x70, 0x19, 0x7a, 0x4b, 0x43, 0x6f, 0x29, 0x39, 0xdf, 0x60, 0x9d, 0xe6,
-	0x42, 0x5f, 0x19, 0xf1, 0x1a, 0xe3, 0x58, 0x2b, 0x87, 0x81, 0x03, 0x3a,
-	0x17, 0x47, 0x2f, 0x64, 0x65, 0xca, 0xda, 0x1d, 0xf1, 0x00, 0x5c, 0x88,
-	0x1f, 0x85, 0x36, 0xf4, 0xf1, 0x1d, 0x9a, 0x53, 0xff, 0x86, 0x7f, 0x94,
-	0xed, 0x09, 0xbd, 0x30, 0xd6, 0xd4, 0xbf, 0x6e, 0xfc, 0xa1, 0x1c, 0x68,
-	0x33, 0x06, 0x31, 0xfe, 0xe8, 0xa8, 0xf3, 0x18, 0x8b, 0x48, 0xd7, 0x92,
-	0x23, 0x4b, 0x23, 0xdc, 0x37, 0x8b, 0xf1, 0xa7, 0x5c, 0xe7, 0x9e, 0x29,
-	0x5c, 0xc0, 0x19, 0xad, 0xf1, 0xfd, 0x11, 0x87, 0x6b, 0x7c, 0x99, 0x70,
-	0x3a, 0xc4, 0x48, 0x96, 0xb4, 0xc7, 0x07, 0x11, 0x7b, 0x1e, 0xe0, 0x3e,
-	0x32, 0x06, 0x6d, 0x17, 0xb0, 0xea, 0xb4, 0x3c, 0x3c, 0xc8, 0x75, 0xa0,
-	0xdd, 0x2a, 0x7a, 0x92, 0x34, 0xf3, 0x21, 0x4f, 0x43, 0xdd, 0x81, 0xbe,
-	0x06, 0xac, 0x40, 0x7f, 0x9f, 0xe9, 0x5e, 0xd5, 0x1f, 0xd7, 0x35, 0xf3,
-	0xcb, 0x18, 0x96, 0x90, 0x81, 0x33, 0x1b, 0x65, 0xe7, 0xa2, 0x25, 0xf6,
-	0x99, 0x55, 0xfe, 0x76, 0x9e, 0x5b, 0xcb, 0x5f, 0xf4, 0x7f, 0x15, 0x5c,
-	0xd0, 0xc5, 0x8e, 0xfa, 0x1e, 0x4b, 0x05, 0xb8, 0xa3, 0xf6, 0x7b, 0xe1,
-	0x5e, 0xf1, 0xfd, 0x99, 0x70, 0x4f, 0xb0, 0x07, 0x88, 0x95, 0xe7, 0x6f,
-	0xc5, 0xa9, 0x0c, 0xf6, 0x06, 0xb6, 0xa7, 0xe2, 0x11, 0xe3, 0x18, 0xed,
-	0xbb, 0x63, 0xd2, 0x2c, 0xb0, 0x8e, 0xe6, 0x3e, 0xc9, 0x44, 0xb9, 0x22,
-	0xa5, 0xad, 0x85, 0x67, 0x7d, 0xd8, 0xcf, 0xa4, 0xa5, 0x6c, 0xaf, 0x63,
-	0xaf, 0x97, 0x37, 0xa0, 0x1b, 0x8c, 0xc1, 0x26, 0xf5, 0x42, 0x42, 0x8a,
-	0x8d, 0x44, 0xc2, 0x3c, 0x31, 0xf0, 0x23, 0xcf, 0x48, 0x24, 0xf4, 0x13,
-	0x81, 0x9d, 0x4d, 0xd6, 0x6f, 0x20, 0x56, 0x6a, 0x72, 0x74, 0xe8, 0x86,
-	0xcf, 0x1a, 0xd8, 0xdb, 0x0b, 0x9b, 0x1b, 0x82, 0xcf, 0x80, 0x8f, 0x72,
-	0xa3, 0xa3, 0x37, 0xe0, 0xed, 0x2b, 0x11, 0x8f, 0xa6, 0x8e, 0xdc, 0xd3,
-	0xcb, 0xfb, 0xbe, 0x51, 0xd8, 0x90, 0x98, 0xce, 0x8f, 0x6f, 0xd1, 0xcf,
-	0xed, 0xdf, 0x62, 0x9c, 0x2b, 0x6d, 0x01, 0x3e, 0xdd, 0xcb, 0xe3, 0xf7,
-	0x9c, 0xc8, 0x44, 0x15, 0x3a, 0xdf, 0x03, 0x3d, 0x59, 0xf0, 0xc5, 0x3d,
-	0xa6, 0xca, 0xd1, 0xf5, 0x3d, 0x2f, 0x6e, 0x0a, 0x70, 0xf0, 0xfd, 0x27,
-	0x7e, 0x70, 0x86, 0x5e, 0x0e, 0xfb, 0x7e, 0x3f, 0xdc, 0x87, 0x5f, 0x45,
-	0xb9, 0x78, 0x5e, 0x44, 0xb2, 0xad, 0x3d, 0x37, 0xb2, 0xe3, 0x25, 0x9c,
-	0x33, 0xa7, 0x1d, 0xdf, 0x7f, 0x07, 0xcf, 0x35, 0xa7, 0xd9, 0x46, 0xde,
-	0x7f, 0xf6, 0x31, 0x07, 0xf8, 0x2c, 0xce, 0xbd, 0xd1, 0xa6, 0xb3, 0xff,
-	0x5e, 0xcf, 0xbd, 0x7b, 0x3f, 0xfb, 0xc9, 0xf3, 0x1d, 0x7d, 0xef, 0x03,
-	0xce, 0xfe, 0x0f, 0x5c, 0x77, 0x0f, 0x3e, 0x1b, 0xd8, 0x6d, 0xb1, 0xd1,
-	0x1c, 0x5f, 0xee, 0xd5, 0x7f, 0x7f, 0xad, 0xfb, 0x76, 0xff, 0xb5, 0xbb,
-	0x6f, 0xf7, 0xdf, 0xcd, 0xdd, 0xbf, 0x18, 0xff, 0xcd, 0x01, 0x0f, 0x7d,
-	0x70, 0xad, 0xff, 0xae, 0xe7, 0x93, 0xd4, 0xf7, 0xf3, 0x3d, 0xe5, 0xa1,
-	0xce, 0x30, 0x1f, 0x52, 0xe7, 0xf5, 0x17, 0xa7, 0x6d, 0xef, 0x7e, 0x53,
-	0x4a, 0xb9, 0x16, 0xc9, 0xe6, 0x6a, 0xb2, 0x43, 0x8e, 0x3b, 0x22, 0x4b,
-	0xaa, 0x16, 0x31, 0x51, 0x8b, 0x0f, 0xa0, 0x3e, 0x0b, 0xf4, 0xba, 0xa4,
-	0xf4, 0xf2, 0x02, 0x78, 0x89, 0xf0, 0x74, 0xdd, 0x05, 0x0f, 0x71, 0x10,
-	0x17, 0xf1, 0x0c, 0xe2, 0x7c, 0xb7, 0xd7, 0xc1, 0x85, 0x73, 0xea, 0x25,
-	0xd4, 0x64, 0xb6, 0xde, 0xa3, 0x07, 0x67, 0xb2, 0x5b, 0x96, 0xdd, 0xe9,
-	0xeb, 0xf2, 0x05, 0x9e, 0x59, 0x0a, 0xae, 0xce, 0x21, 0x56, 0x0f, 0x8d,
-	0x85, 0x75, 0xd2, 0xdc, 0x41, 0xcf, 0x8e, 0xee, 0x49, 0x78, 0x47, 0x92,
-	0x90, 0x92, 0x9a, 0xb5, 0x04, 0x1d, 0x68, 0x72, 0x0d, 0x67, 0xd0, 0xd5,
-	0xb9, 0x76, 0xe0, 0x45, 0xee, 0x77, 0x20, 0xbb, 0x57, 0xb4, 0x7e, 0xab,
-	0x55, 0x6b, 0x87, 0x2f, 0x65, 0xc4, 0x55, 0x6d, 0x9e, 0xd3, 0xa7, 0x66,
-	0x16, 0x2b, 0xc8, 0x03, 0x6d, 0x9c, 0xaf, 0x79, 0xbc, 0xd7, 0x49, 0x43,
-	0x93, 0x2b, 0x73, 0xba, 0xfc, 0xd3, 0x9c, 0x21, 0xff, 0x8c, 0x3a, 0xf4,
-	0x9a, 0x7d, 0x6a, 0xe6, 0xb4, 0x2d, 0xf7, 0x81, 0xd5, 0xf0, 0x0e, 0x4f,
-	0x76, 0x9a, 0x42, 0x5b, 0x1d, 0x48, 0xff, 0x8e, 0x20, 0xff, 0xc1, 0x9a,
-	0x2b, 0x73, 0xa4, 0xb5, 0x76, 0x8d, 0xf4, 0x22, 0x1f, 0x83, 0x5d, 0x0f,
-	0x30, 0x27, 0xe2, 0x7c, 0xd4, 0xab, 0x03, 0xd6, 0x3e, 0xc5, 0x5b, 0x42,
-	0x16, 0xeb, 0x9c, 0x6f, 0x82, 0xb7, 0x2e, 0x9c, 0x31, 0x59, 0x6b, 0x52,
-	0xfe, 0xb0, 0x5b, 0xe5, 0xaa, 0x1a, 0xfb, 0x0d, 0xb5, 0xc7, 0xef, 0xef,
-	0xe7, 0xde, 0x1b, 0x32, 0x95, 0x62, 0x9b, 0x63, 0x59, 0xd4, 0x9c, 0xc4,
-	0x97, 0xdd, 0xeb, 0x0a, 0x79, 0x0e, 0xde, 0xaf, 0x08, 0x65, 0xdb, 0x6d,
-	0x5d, 0x97, 0xd7, 0x7d, 0xf7, 0x00, 0xe5, 0x89, 0x72, 0x8b, 0x39, 0x9f,
-	0xb1, 0xd8, 0x28, 0xcc, 0xc0, 0x8e, 0xbf, 0x2a, 0xdf, 0x6f, 0x1c, 0x92,
-	0xef, 0x35, 0x26, 0xe5, 0xcf, 0x1a, 0x5f, 0x96, 0x3f, 0x6d, 0x1c, 0x94,
-	0xd7, 0x1b, 0x07, 0xe4, 0xb5, 0xc6, 0x84, 0xbc, 0xda, 0xd8, 0x0f, 0x1b,
-	0x1f, 0x87, 0x8d, 0x9f, 0x9a, 0x99, 0xac, 0xf7, 0xcb, 0xd4, 0x49, 0xc4,
-	0x20, 0xe7, 0x1b, 0xba, 0xba, 0xe3, 0xb3, 0xe9, 0xe7, 0x2d, 0x32, 0xad,
-	0xee, 0xaf, 0x34, 0xe4, 0x89, 0x2d, 0xbc, 0x2b, 0x7c, 0xc5, 0x33, 0x2e,
-	0x87, 0xf1, 0xe8, 0xe1, 0x94, 0xb4, 0x03, 0xbf, 0xca, 0x4b, 0x4d, 0x9e,
-	0xdb, 0x62, 0x86, 0xf7, 0x9c, 0x87, 0x24, 0xc9, 0xfb, 0xb0, 0x9c, 0x67,
-	0xa0, 0xde, 0x5e, 0xd7, 0x27, 0x73, 0xb4, 0x65, 0xe8, 0xc6, 0x95, 0x43,
-	0xb0, 0x53, 0xc3, 0x7e, 0xcb, 0xa5, 0x1e, 0x16, 0x97, 0x28, 0xf7, 0x46,
-	0x59, 0x5c, 0xa0, 0x6f, 0xff, 0x1b, 0x64, 0x6c, 0x97, 0xda, 0x82, 0x89,
-	0xb9, 0x6e, 0x98, 0xab, 0x6c, 0xa7, 0x3d, 0x00, 0x1f, 0xf1, 0x7e, 0x10,
-	0x4e, 0xab, 0x09, 0x27, 0xf1, 0x24, 0x54, 0x0c, 0x08, 0x70, 0x5b, 0x52,
-	0x5b, 0x4a, 0xca, 0xc2, 0x42, 0x0f, 0x9e, 0x94, 0x2c, 0xd4, 0x6d, 0x3c,
-	0x39, 0x3c, 0x43, 0x78, 0xd2, 0xb0, 0x53, 0xca, 0xc8, 0xd8, 0x12, 0xc9,
-	0x88, 0x78, 0x5c, 0xed, 0x0d, 0x6b, 0x2a, 0xf2, 0xa3, 0x85, 0xfc, 0x74,
-	0x87, 0x7d, 0x1d, 0x52, 0xab, 0x38, 0x32, 0x55, 0xfd, 0x94, 0x3e, 0xa5,
-	0x74, 0x07, 0xfc, 0x95, 0x21, 0xb4, 0xef, 0x0f, 0xdb, 0x8f, 0xca, 0xf4,
-	0xbc, 0xc8, 0xca, 0xcb, 0x03, 0x7a, 0x51, 0xb5, 0xf7, 0xa2, 0xad, 0xa3,
-	0x9d, 0x0d, 0xdb, 0xcc, 0x8f, 0x0e, 0xe0, 0x71, 0xd5, 0xf3, 0xf5, 0xea,
-	0xb8, 0x3c, 0x55, 0xed, 0x77, 0x5e, 0x87, 0xcd, 0xbd, 0x65, 0x46, 0xf7,
-	0xd2, 0x04, 0x24, 0x79, 0xf6, 0x56, 0x75, 0xf7, 0xf1, 0x04, 0xe2, 0xad,
-	0x9b, 0x34, 0xe5, 0x6f, 0x4f, 0x64, 0xad, 0xa7, 0xf5, 0x5c, 0x52, 0xda,
-	0x7d, 0xff, 0x71, 0x3b, 0x3b, 0x3b, 0xa9, 0x77, 0xca, 0xdf, 0xbf, 0x98,
-	0x91, 0x85, 0xb3, 0x5b, 0x65, 0xa1, 0x06, 0x99, 0x1a, 0xbf, 0x8e, 0x7d,
-	0x35, 0xe5, 0xea, 0x9e, 0x47, 0xb1, 0x27, 0x8c, 0x5d, 0x49, 0xe4, 0x6c,
-	0x1b, 0xc4, 0xec, 0x25, 0x5d, 0x49, 0x98, 0x85, 0x9c, 0x1c, 0x81, 0xdf,
-	0x4f, 0xdb, 0xb9, 0x1e, 0x69, 0xc7, 0x7b, 0x7d, 0x04, 0x7c, 0x5b, 0x32,
-	0xd5, 0x6b, 0xc9, 0x99, 0xc1, 0x68, 0xff, 0xb6, 0x62, 0x6e, 0x46, 0x16,
-	0xcf, 0x66, 0xf0, 0x9b, 0x83, 0xfd, 0xec, 0x94, 0x57, 0x6a, 0xfd, 0xb2,
-	0x54, 0xdb, 0x2a, 0x8b, 0xb5, 0xe6, 0x7d, 0xe8, 0xec, 0x09, 0xe2, 0x1d,
-	0xf1, 0xf4, 0x5b, 0x53, 0xfa, 0x56, 0x71, 0xcd, 0x7e, 0xeb, 0x29, 0xfd,
-	0x1f, 0xe4, 0x31, 0x33, 0xa0, 0xa9, 0x17, 0x7e, 0xa4, 0xee, 0x84, 0x26,
-	0x79, 0xf6, 0x2a, 0xbc, 0x4f, 0x26, 0x49, 0xfb, 0xf5, 0xc6, 0x07, 0xd1,
-	0x59, 0xcb, 0xcf, 0x9d, 0x68, 0x52, 0x06, 0xe2, 0xec, 0xbf, 0x71, 0x52,
-	0xef, 0x95, 0xe5, 0x6d, 0x0f, 0x58, 0x4f, 0xea, 0xad, 0x88, 0x01, 0x3f,
-	0x97, 0x9f, 0xee, 0xd9, 0x24, 0x3f, 0xfc, 0xcd, 0xec, 0xa9, 0x6f, 0x22,
-	0xd9, 0xbf, 0xb2, 0xa7, 0x83, 0x71, 0x01, 0xef, 0xec, 0xcf, 0xde, 0x70,
-	0x75, 0xea, 0xe1, 0x2f, 0xa0, 0x87, 0xec, 0x9c, 0xba, 0x9b, 0x56, 0x3c,
-	0x90, 0x3e, 0xf5, 0x52, 0x06, 0x6f, 0x18, 0xab, 0xf7, 0x03, 0x57, 0x59,
-	0xe9, 0xf9, 0x09, 0x27, 0x7b, 0x03, 0xe9, 0xb0, 0xbf, 0x68, 0xf7, 0xa7,
-	0x77, 0xea, 0x3b, 0x64, 0x32, 0xfd, 0x80, 0xf5, 0xb4, 0x6c, 0x21, 0xce,
-	0xd9, 0x05, 0xc1, 0xda, 0x79, 0xe2, 0xfb, 0x2b, 0xe0, 0x0b, 0x70, 0x28,
-	0xff, 0x51, 0x38, 0x77, 0x59, 0x5f, 0xd7, 0x79, 0xc6, 0x63, 0x0c, 0x71,
-	0xe1, 0xe2, 0x10, 0x65, 0x40, 0x82, 0x95, 0xca, 0xa6, 0x5d, 0xfd, 0xc3,
-	0xc8, 0x47, 0xfc, 0xfd, 0x56, 0x51, 0x27, 0x0f, 0xe7, 0xc0, 0xcb, 0x4f,
-	0xc0, 0x7f, 0x3f, 0x70, 0xa2, 0xf6, 0x48, 0x47, 0x74, 0xff, 0x4e, 0xd1,
-	0x7d, 0xad, 0x21, 0xe6, 0x2a, 0x5d, 0xf4, 0xd5, 0x75, 0xc8, 0xdd, 0x07,
-	0x7b, 0xb5, 0xf0, 0xcb, 0xbd, 0xe9, 0x0c, 0xf7, 0x98, 0xeb, 0x22, 0xba,
-	0x11, 0xbf, 0x5c, 0x73, 0x27, 0x1e, 0xee, 0x75, 0x3e, 0xea, 0xd4, 0x03,
-	0x09, 0x79, 0xf7, 0x44, 0xb4, 0x37, 0x07, 0x64, 0xba, 0x0a, 0xdd, 0xed,
-	0xea, 0x0f, 0xfc, 0x27, 0x1d, 0xf1, 0x40, 0xde, 0xff, 0x06, 0xbc, 0x07,
-	0xb8, 0x5b, 0x0b, 0xcd, 0xba, 0xc3, 0x58, 0x3d, 0xa0, 0x31, 0xb6, 0x0e,
-	0x4f, 0x57, 0xf6, 0x44, 0xbe, 0x98, 0x84, 0x5f, 0xed, 0xb6, 0x9e, 0x10,
-	0xd6, 0x63, 0xc4, 0x9b, 0x94, 0x1f, 0xbe, 0x0c, 0x1e, 0x92, 0xf4, 0x93,
-	0x7f, 0x5f, 0xe3, 0x27, 0x1c, 0xdb, 0x2a, 0x35, 0xd4, 0xd4, 0x5e, 0xde,
-	0x94, 0x69, 0x25, 0x03, 0xda, 0x35, 0xfa, 0x77, 0x29, 0xf4, 0xef, 0x47,
-	0x80, 0xa3, 0x5d, 0x8c, 0x47, 0x1f, 0xc7, 0x59, 0x9d, 0xcd, 0x2c, 0xeb,
-	0xcc, 0x03, 0x76, 0x4b, 0x51, 0xdd, 0x4f, 0xdf, 0x8b, 0xee, 0xa2, 0xd8,
-	0x94, 0x96, 0x8b, 0x95, 0x28, 0x2e, 0xa5, 0x71, 0x9e, 0xb4, 0xcb, 0xa5,
-	0xb9, 0x28, 0xe6, 0xb5, 0xcb, 0x12, 0xf2, 0x9a, 0x95, 0x97, 0x2c, 0x8c,
-	0x25, 0xe5, 0xe2, 0x5c, 0x12, 0x31, 0xab, 0x47, 0x56, 0xe6, 0x7a, 0x30,
-	0x96, 0xc2, 0xba, 0x14, 0xe6, 0xdb, 0xb2, 0x52, 0xb1, 0x81, 0x27, 0x87,
-	0x76, 0x0e, 0xed, 0x21, 0xb9, 0xa4, 0xbe, 0x17, 0x30, 0x2f, 0x18, 0x42,
-	0xdc, 0x62, 0x5e, 0x30, 0x82, 0x18, 0x32, 0x81, 0x27, 0x8a, 0x5d, 0xa7,
-	0x66, 0xa6, 0x2a, 0xbc, 0x73, 0x84, 0x0e, 0xac, 0x53, 0x33, 0xd3, 0xb6,
-	0x89, 0xba, 0xed, 0x1b, 0xda, 0x54, 0x83, 0x72, 0x41, 0xb7, 0x43, 0x1d,
-	0xa2, 0x3f, 0x4a, 0x9b, 0xe4, 0x79, 0x67, 0x20, 0xc6, 0x77, 0x01, 0x9f,
-	0x23, 0xfa, 0x6f, 0xd0, 0x17, 0xa0, 0xc3, 0x27, 0xba, 0xe4, 0xd2, 0xcb,
-	0x8c, 0x35, 0xae, 0xbc, 0x7a, 0x96, 0x3a, 0x2c, 0xf6, 0xac, 0xea, 0x90,
-	0x63, 0x0f, 0xe1, 0x8c, 0xd8, 0x0f, 0x7b, 0x32, 0x33, 0x87, 0x90, 0xcb,
-	0x7c, 0x1b, 0xf6, 0x59, 0x66, 0xcd, 0x9d, 0x0e, 0x6a, 0x84, 0x20, 0x06,
-	0xa0, 0xdd, 0x47, 0x5d, 0xb1, 0xdd, 0x07, 0xbb, 0xe3, 0x58, 0x9f, 0x1a,
-	0x5b, 0x04, 0x8e, 0x60, 0x8c, 0xed, 0xcd, 0xb2, 0xa8, 0xc6, 0x0e, 0xaa,
-	0xb1, 0xb2, 0xb2, 0x0f, 0x8e, 0x1d, 0x52, 0xb1, 0xe9, 0x7c, 0x23, 0xea,
-	0xdf, 0x88, 0x58, 0xc2, 0x7e, 0xf6, 0xe5, 0x61, 0xeb, 0x7b, 0x71, 0xae,
-	0x15, 0x64, 0xa9, 0x81, 0x3a, 0x30, 0xff, 0x7b, 0x98, 0xcb, 0x3d, 0xc8,
-	0x9e, 0x2a, 0xe9, 0xe4, 0xf1, 0x20, 0xce, 0x83, 0xfd, 0x21, 0xad, 0xb6,
-	0x90, 0xaf, 0x03, 0x61, 0xbb, 0x25, 0xa4, 0x4d, 0x3c, 0x36, 0x70, 0x1c,
-	0xc3, 0x5a, 0x17, 0x38, 0x18, 0x63, 0x11, 0x23, 0x52, 0x29, 0xe8, 0x82,
-	0x34, 0xdb, 0xa4, 0xac, 0xde, 0xf7, 0xc3, 0x76, 0xb9, 0x16, 0x3a, 0xb4,
-	0xa2, 0x75, 0xa5, 0x70, 0xcf, 0x53, 0xea, 0x9c, 0xd1, 0x93, 0x9b, 0xc3,
-	0x9c, 0x10, 0x7a, 0x45, 0x9c, 0xd5, 0x93, 0x8c, 0x37, 0xef, 0x84, 0x76,
-	0xda, 0x8b, 0xbe, 0x87, 0x44, 0xef, 0x65, 0xdf, 0x51, 0xe0, 0x61, 0xed,
-	0x3c, 0x0c, 0x99, 0xd9, 0xe6, 0xfa, 0x6c, 0xd3, 0xfa, 0xc4, 0x3a, 0xeb,
-	0x3b, 0x9a, 0xfa, 0x32, 0x52, 0x9b, 0xef, 0x52, 0xf1, 0xf2, 0x7c, 0x18,
-	0x2f, 0x17, 0x6b, 0x94, 0x05, 0x7e, 0x96, 0x7f, 0x5b, 0xe9, 0xa2, 0x76,
-	0x36, 0xb0, 0xf5, 0xa5, 0x93, 0x3c, 0x17, 0x57, 0xe7, 0xd5, 0xd4, 0xbc,
-	0xdf, 0x06, 0xff, 0xba, 0x1c, 0x55, 0x32, 0x70, 0x3e, 0xe6, 0xd5, 0x02,
-	0xbf, 0x31, 0x6c, 0xce, 0xa1, 0x8f, 0x44, 0x6b, 0x38, 0xff, 0xe7, 0xa8,
-	0x55, 0xbe, 0xac, 0xd6, 0xac, 0xfa, 0x0c, 0xf9, 0x71, 0x42, 0x9e, 0x7b,
-	0xc0, 0x5f, 0x67, 0x28, 0x43, 0x7b, 0x28, 0x03, 0xf1, 0xfd, 0x27, 0x70,
-	0xb7, 0x61, 0x1e, 0x79, 0xdd, 0x86, 0x3e, 0xbe, 0xff, 0x17, 0xfa, 0x76,
-	0x23, 0xff, 0x23, 0x6f, 0x89, 0x26, 0xde, 0xfe, 0x03, 0x63, 0x3d, 0x4a,
-	0xb7, 0x35, 0xd4, 0x26, 0x53, 0xbc, 0xef, 0x48, 0xe1, 0x1c, 0x38, 0xb9,
-	0x4d, 0xd1, 0xad, 0x9d, 0xbd, 0x86, 0xf1, 0x5e, 0xac, 0x89, 0xda, 0xcd,
-	0xb2, 0xe9, 0x58, 0xfb, 0x53, 0x25, 0xcf, 0x62, 0xed, 0x4e, 0xf2, 0x6f,
-	0x5b, 0x23, 0x3b, 0xe5, 0x26, 0x4f, 0xe4, 0xa7, 0x1f, 0x4f, 0x2b, 0x72,
-	0x21, 0xd8, 0x6d, 0xd2, 0x90, 0xd1, 0x7c, 0x9a, 0xdf, 0xf9, 0x12, 0xbc,
-	0x17, 0x1d, 0x19, 0xe4, 0x9e, 0xa1, 0xdd, 0x60, 0x4e, 0x47, 0x7f, 0x4b,
-	0xc8, 0x31, 0xd4, 0x24, 0xe5, 0x85, 0x8c, 0x56, 0x3c, 0x99, 0x45, 0x16,
-	0xad, 0xbe, 0xd5, 0xc9, 0x8b, 0x4b, 0xb6, 0x7c, 0x1b, 0x7e, 0x7a, 0xb2,
-	0x9e, 0x4d, 0x7f, 0x13, 0xf9, 0xc1, 0x91, 0x25, 0xe6, 0x13, 0x3d, 0x29,
-	0x65, 0x9b, 0xf3, 0x9a, 0x6c, 0x60, 0x4c, 0x9b, 0x47, 0x7e, 0x6a, 0xdd,
-	0x2d, 0x47, 0x82, 0x9f, 0x57, 0xd7, 0xc6, 0x0c, 0xca, 0xb1, 0x36, 0x66,
-	0x10, 0x0f, 0x63, 0xc6, 0x4e, 0xec, 0x13, 0x63, 0x06, 0xf6, 0xff, 0x24,
-	0x63, 0x86, 0x8d, 0x75, 0x8c, 0x19, 0x79, 0x59, 0xac, 0x32, 0x66, 0xec,
-	0x45, 0x9b, 0x31, 0xa3, 0x80, 0x76, 0x10, 0x2f, 0x16, 0x55, 0xbc, 0xc8,
-	0x5a, 0xcb, 0xc2, 0x38, 0x81, 0x3c, 0xb1, 0x8a, 0x3c, 0xb1, 0x8a, 0x3c,
-	0xb1, 0x8a, 0x3c, 0xb1, 0x8a, 0x3c, 0x11, 0xb6, 0xfe, 0x5a, 0x15, 0x79,
-	0x22, 0xfc, 0xe7, 0x3c, 0x72, 0x92, 0xa0, 0xa6, 0x38, 0x8c, 0x9a, 0xc2,
-	0xd5, 0xc6, 0xaa, 0xe3, 0xda, 0xbe, 0x2a, 0x6a, 0x43, 0xf5, 0x9d, 0x58,
-	0x1f, 0xda, 0x80, 0xba, 0xa8, 0xe6, 0x6c, 0x01, 0x5f, 0xd7, 0xe0, 0x1b,
-	0xd4, 0xd3, 0x56, 0x99, 0xca, 0xed, 0x80, 0x7c, 0xd8, 0x7f, 0xfb, 0xfb,
-	0xe8, 0x43, 0x3e, 0x9f, 0x63, 0x0d, 0xc2, 0x78, 0xb5, 0x0f, 0x6d, 0x1d,
-	0x6d, 0xec, 0xe9, 0x04, 0x7c, 0xc4, 0x7e, 0x90, 0xf9, 0x62, 0x7a, 0x41,
-	0x9e, 0xdc, 0x1c, 0xd8, 0xf4, 0x6f, 0x31, 0x27, 0x5e, 0xd3, 0xde, 0x88,
-	0x39, 0xf0, 0x17, 0xd8, 0x97, 0x5a, 0x03, 0x5c, 0xba, 0xfd, 0xe7, 0xc4,
-	0xd1, 0xb7, 0xe1, 0xd6, 0x1c, 0xda, 0xd5, 0xf7, 0x9a, 0xfa, 0xb2, 0x98,
-	0xcf, 0xef, 0xe2, 0x3b, 0xf0, 0xfb, 0x16, 0x7e, 0x61, 0x77, 0xf6, 0x05,
-	0xcc, 0xe9, 0xc3, 0xef, 0x77, 0x9a, 0xe6, 0x42, 0x0a, 0xfb, 0x2f, 0xd1,
-	0x77, 0x31, 0xa4, 0xc1, 0x6f, 0x89, 0x5f, 0x6a, 0xe2, 0xe3, 0x07, 0xe8,
-	0xfb, 0x6b, 0xf4, 0xf9, 0xfe, 0xdb, 0x4e, 0xd4, 0x27, 0xa5, 0x96, 0x70,
-	0xef, 0x46, 0xd5, 0xde, 0x69, 0xca, 0xe6, 0x8f, 0x2c, 0xe9, 0xaa, 0x0e,
-	0x7a, 0xae, 0x8e, 0xea, 0x08, 0x71, 0xbe, 0xbc, 0x10, 0xd4, 0xad, 0xc7,
-	0x51, 0x73, 0x16, 0xab, 0xb4, 0x91, 0x1c, 0xfa, 0x6d, 0x9c, 0x69, 0x32,
-	0x69, 0xdc, 0xaa, 0x63, 0x13, 0x89, 0xc9, 0x7a, 0x9b, 0x48, 0x37, 0x69,
-	0x32, 0x4f, 0x22, 0x8e, 0xd9, 0x99, 0xe2, 0xc2, 0xec, 0x8c, 0x07, 0x9c,
-	0x63, 0x75, 0xae, 0xe5, 0x3c, 0x93, 0xf7, 0x63, 0x4d, 0x74, 0x69, 0x13,
-	0x60, 0x06, 0xf4, 0x9e, 0xab, 0x93, 0x7e, 0x40, 0xb3, 0xac, 0x68, 0xda,
-	0xe8, 0x8f, 0xea, 0xc7, 0x1c, 0x6a, 0x5d, 0x99, 0x64, 0xed, 0x5c, 0x0c,
-	0x69, 0xba, 0x75, 0x49, 0x24, 0x0a, 0xcd, 0xf8, 0x82, 0x8c, 0xf3, 0xb9,
-	0xfa, 0xec, 0x8c, 0xfe, 0x42, 0x36, 0xc7, 0x3b, 0x11, 0xd7, 0x9a, 0x9d,
-	0x69, 0x1d, 0x48, 0xc8, 0x8f, 0x91, 0xbb, 0x1d, 0x53, 0x34, 0x66, 0x67,
-	0x8c, 0x17, 0x02, 0x5b, 0x0c, 0xe8, 0xe0, 0x3c, 0xc9, 0xb7, 0x43, 0x4e,
-	0xd2, 0x62, 0x4d, 0x1d, 0x8c, 0x4f, 0xaa, 0x7a, 0xd1, 0x94, 0x2b, 0x15,
-	0x45, 0x3b, 0xac, 0xdb, 0xc9, 0xc3, 0xec, 0x8c, 0xfc, 0xd1, 0x2d, 0x1e,
-	0xd6, 0x91, 0x87, 0x78, 0x49, 0x27, 0xd0, 0x5b, 0xc0, 0x7f, 0x12, 0xf5,
-	0x7b, 0x54, 0xab, 0xfb, 0xfe, 0x8a, 0x93, 0x43, 0x5c, 0xe0, 0x3e, 0xb6,
-	0xa8, 0x3c, 0xd7, 0x73, 0x32, 0xbc, 0xef, 0x9b, 0xe3, 0xdf, 0x39, 0x78,
-	0xf9, 0x01, 0xd4, 0x4d, 0xbc, 0x1b, 0xa4, 0x7f, 0xe1, 0xf7, 0x36, 0xff,
-	0xe2, 0x7c, 0xf6, 0x93, 0xe7, 0x81, 0xf4, 0x55, 0xf0, 0xe7, 0xe5, 0xd1,
-	0x87, 0x58, 0x51, 0x6c, 0x44, 0xb8, 0x78, 0xc7, 0xce, 0x39, 0x2a, 0xff,
-	0x6e, 0xf2, 0xd1, 0x96, 0xf0, 0xdc, 0xa5, 0x8e, 0xc8, 0x27, 0xf9, 0xe9,
-	0x84, 0x4d, 0x90, 0x17, 0xce, 0x8f, 0xee, 0x25, 0xd8, 0xfe, 0xb8, 0x36,
-	0x12, 0xdd, 0xa9, 0x7d, 0x9c, 0x3d, 0x8f, 0x74, 0x76, 0x37, 0x7e, 0x88,
-	0x83, 0xb4, 0x23, 0xbe, 0x22, 0x9e, 0x88, 0x8f, 0xfc, 0x44, 0xbc, 0x28,
-	0x1b, 0x5d, 0x97, 0x9f, 0x60, 0x5d, 0xc0, 0x4f, 0x69, 0x21, 0x0d, 0x9d,
-	0x90, 0xa7, 0x11, 0x6d, 0xa4, 0xba, 0xde, 0x1d, 0xc7, 0x0f, 0x5c, 0xc6,
-	0xd5, 0xb1, 0x06, 0xef, 0xa1, 0x48, 0x97, 0x7f, 0x3b, 0xb2, 0xa4, 0x8d,
-	0x34, 0xf8, 0x9d, 0xa9, 0xae, 0xb9, 0x8d, 0x88, 0xde, 0x5a, 0x9d, 0x46,
-	0xbf, 0xbc, 0x2b, 0xff, 0x0c, 0xf6, 0xa9, 0x3b, 0xf8, 0xbb, 0x14, 0x55,
-	0x47, 0xb1, 0x6f, 0xb9, 0xd5, 0x73, 0xa2, 0xbf, 0xd3, 0xd9, 0x1f, 0xe6,
-	0x43, 0x51, 0x6d, 0x1c, 0xd5, 0x59, 0xea, 0x9e, 0x7d, 0xaf, 0xe7, 0x68,
-	0xc8, 0x4f, 0x99, 0x33, 0x05, 0x3a, 0x08, 0xf1, 0xde, 0x91, 0xcf, 0x91,
-	0x26, 0x3e, 0x47, 0xc1, 0xe7, 0x3e, 0xf0, 0x39, 0x76, 0x8b, 0xcf, 0x5b,
-	0xb6, 0x97, 0x29, 0xc3, 0xf6, 0x46, 0xd6, 0xb5, 0xbd, 0x55, 0x3a, 0xab,
-	0x73, 0x83, 0xfb, 0x9a, 0x91, 0x86, 0x2f, 0xc7, 0x9d, 0x8f, 0x53, 0x37,
-	0xb7, 0xcb, 0x99, 0x85, 0xbb, 0xd5, 0xb7, 0x11, 0xaf, 0x2a, 0x77, 0x94,
-	0x4b, 0xf5, 0x80, 0x9f, 0x1f, 0x2f, 0xb1, 0x3d, 0x12, 0xea, 0x8a, 0x3a,
-	0xcb, 0x3a, 0x25, 0xb9, 0x1b, 0x2f, 0xbf, 0xf8, 0x9c, 0x76, 0xa5, 0x12,
-	0x9d, 0x4f, 0x5a, 0x78, 0xc6, 0xae, 0xe5, 0x29, 0xfa, 0x6e, 0x32, 0x66,
-	0x45, 0xf7, 0x67, 0x22, 0xfc, 0xfe, 0xc0, 0xef, 0x75, 0x6b, 0xbf, 0x13,
-	0xf0, 0x7c, 0x8a, 0x78, 0xd7, 0x53, 0x3c, 0x9f, 0xc6, 0x9c, 0x66, 0x19,
-	0x5c, 0xd8, 0xa4, 0x9e, 0xe4, 0x98, 0xe7, 0xd0, 0x2f, 0x4c, 0xd0, 0x0c,
-	0xee, 0xdd, 0x6a, 0x4b, 0xbe, 0x5c, 0x74, 0x36, 0x06, 0xe7, 0x28, 0x64,
-	0xba, 0x6c, 0xf1, 0xfe, 0x0a, 0x31, 0x8c, 0x67, 0x83, 0xb2, 0xb5, 0x16,
-	0xf5, 0x5c, 0x39, 0xd0, 0x0e, 0x1d, 0xb3, 0xdd, 0xd6, 0xcb, 0xfb, 0x0a,
-	0xca, 0xbc, 0xa0, 0xf6, 0x21, 0xd2, 0x71, 0xf4, 0x7d, 0xae, 0x55, 0x96,
-	0xc3, 0xbb, 0xad, 0xc5, 0x8a, 0xef, 0xbf, 0x83, 0x3c, 0xfc, 0x34, 0x74,
-	0x5f, 0xae, 0xff, 0xcc, 0x5f, 0x4e, 0xf1, 0x6f, 0xa5, 0x22, 0x9b, 0xd8,
-	0xd1, 0xcb, 0x7b, 0x20, 0xf8, 0x96, 0x1c, 0xaf, 0x87, 0x65, 0xbf, 0x70,
-	0x9c, 0x7d, 0xff, 0x0d, 0xbe, 0x7d, 0xff, 0xf4, 0xaa, 0x9d, 0x02, 0xfe,
-	0x17, 0x33, 0xe1, 0x9b, 0xdd, 0x90, 0x58, 0x00, 0x00, 0x00 };
+	0x1f, 0x8b, 0x08, 0x08, 0xcb, 0xa3, 0x46, 0x45, 0x00, 0x03, 0x74, 0x65,
+	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xec, 0x5c, 0x6f, 0x6c,
+	0x1c, 0xc7, 0x75, 0x7f, 0x3b, 0xbb, 0xa4, 0x4e, 0xd4, 0x91, 0x5c, 0x1e,
+	0x4f, 0xf4, 0x49, 0x66, 0x94, 0x5d, 0x71, 0x25, 0x5e, 0x2d, 0xc6, 0x5d,
+	0x31, 0x57, 0x9b, 0x08, 0xce, 0xf1, 0x79, 0xef, 0x64, 0xb1, 0x86, 0x0a,
+	0x51, 0x0d, 0x1d, 0x1b, 0x85, 0x6b, 0xb0, 0x47, 0x39, 0xae, 0xdb, 0x7e,
+	0x90, 0x65, 0x1b, 0x30, 0xda, 0x10, 0xbe, 0x1c, 0xe9, 0x46, 0x75, 0x2f,
+	0xdc, 0x8b, 0xc4, 0x98, 0x06, 0xfa, 0x07, 0x57, 0x92, 0xfa, 0x83, 0xe0,
+	0xa0, 0x93, 0xe2, 0x26, 0xf5, 0x17, 0x57, 0x84, 0x2a, 0xc7, 0xf9, 0xe0,
+	0x02, 0x4e, 0x63, 0x20, 0x06, 0xea, 0x16, 0xaa, 0xec, 0xd8, 0x46, 0x81,
+	0xa2, 0x42, 0x1c, 0xd8, 0x46, 0xfc, 0x67, 0xfb, 0x7b, 0x33, 0xbb, 0xd4,
+	0x91, 0x96, 0x6d, 0xa0, 0x1f, 0xfa, 0xa5, 0x3b, 0xc0, 0x61, 0x67, 0x66,
+	0xe7, 0xbd, 0x79, 0xf3, 0xfe, 0xbf, 0x59, 0x4a, 0x7f, 0x90, 0xa4, 0x2e,
+	0x0a, 0x5b, 0x37, 0x7e, 0xd6, 0x91, 0xc7, 0x8f, 0xde, 0x3c, 0x76, 0xf3,
+	0x28, 0xd1, 0x97, 0x47, 0xf5, 0x1b, 0x12, 0x22, 0x9a, 0x8f, 0x5b, 0xdc,
+	0xe2, 0x16, 0xb7, 0xb8, 0xc5, 0x2d, 0x6e, 0x71, 0x8b, 0x5b, 0xdc, 0xe2,
+	0x16, 0xb7, 0xb8, 0xc5, 0x2d, 0x6e, 0x71, 0x8b, 0x5b, 0xdc, 0xe2, 0x16,
+	0xb7, 0xb8, 0xc5, 0x2d, 0x6e, 0x71, 0x8b, 0x5b, 0xdc, 0xe2, 0x16, 0xb7,
+	0xb8, 0xc5, 0x2d, 0x6e, 0x71, 0x8b, 0x5b, 0xdc, 0xe2, 0x16, 0xb7, 0xb8,
+	0xc5, 0x2d, 0x6e, 0x71, 0x8b, 0x5b, 0xdc, 0xe2, 0x16, 0xb7, 0xb8, 0xc5,
+	0x2d, 0x6e, 0x71, 0xfb, 0xff, 0xde, 0x74, 0x22, 0x93, 0x9f, 0xdd, 0xe1,
+	0x8f, 0x12, 0x22, 0xbf, 0xfa, 0x90, 0xe7, 0x50, 0x42, 0xcf, 0xbf, 0x34,
+	0x33, 0xed, 0x10, 0x15, 0x9a, 0x7b, 0xac, 0x22, 0x7d, 0x14, 0x54, 0xd2,
+	0x06, 0xf1, 0xfc, 0x17, 0xf2, 0x1f, 0x3e, 0xf1, 0xfc, 0xad, 0xf6, 0xd5,
+	0x86, 0x4e, 0x09, 0x33, 0x3f, 0xb7, 0xd7, 0xdc, 0x4d, 0x89, 0x41, 0xc0,
+	0xfc, 0xf5, 0xf0, 0x7f, 0xf4, 0x50, 0x0f, 0x5d, 0xc3, 0xe3, 0x24, 0xe8,
+	0xb2, 0xfe, 0x9c, 0xe6, 0xb5, 0x82, 0xe0, 0xa4, 0x1b, 0x04, 0x3f, 0xc6,
+	0xef, 0x2d, 0x17, 0x63, 0xff, 0xe3, 0xa0, 0x60, 0xe8, 0x24, 0x9c, 0xbf,
+	0xd4, 0xbc, 0xe5, 0x2e, 0xaa, 0x2e, 0x1a, 0x34, 0xeb, 0xa7, 0xe9, 0x98,
+	0x5f, 0xd1, 0x4a, 0xad, 0x9a, 0xb6, 0xef, 0xf4, 0xbc, 0x76, 0xe7, 0xe9,
+	0x63, 0xda, 0xfe, 0xd3, 0x75, 0xcd, 0x3b, 0x4d, 0x15, 0xb1, 0x37, 0x49,
+	0x05, 0xf3, 0x8c, 0x56, 0x6c, 0x0d, 0x68, 0xde, 0x89, 0x0f, 0xc9, 0x73,
+	0x6d, 0xf3, 0xf7, 0xc8, 0x28, 0x80, 0x16, 0xf2, 0x6a, 0x41, 0xe0, 0xb9,
+	0x06, 0x15, 0xd2, 0x41, 0x20, 0xf2, 0xc1, 0x13, 0x5e, 0xce, 0x31, 0x85,
+	0x96, 0xa6, 0x6a, 0x6b, 0x00, 0x78, 0x93, 0x5a, 0x71, 0xd1, 0xd0, 0x4a,
+	0x7e, 0x70, 0xc1, 0x73, 0x69, 0x50, 0xa7, 0x20, 0x98, 0x73, 0x77, 0x65,
+	0x0e, 0xd3, 0x29, 0xe0, 0x6d, 0x02, 0x1f, 0x99, 0x22, 0xcf, 0xf4, 0x31,
+	0x9d, 0x4c, 0x72, 0x45, 0x2b, 0x0e, 0x47, 0xf4, 0x91, 0xc5, 0xf4, 0x97,
+	0x57, 0x04, 0xe8, 0xdc, 0x42, 0xe5, 0x86, 0x49, 0x53, 0x2b, 0x1b, 0xd7,
+	0x5f, 0x0e, 0x9e, 0x1f, 0x36, 0xe9, 0x5c, 0xcb, 0xae, 0x54, 0x28, 0x41,
+	0x73, 0xbe, 0x45, 0x22, 0x4f, 0x05, 0x2f, 0x37, 0x48, 0x17, 0x5a, 0x19,
+	0xfa, 0x41, 0xcb, 0xc9, 0x54, 0x69, 0x13, 0x95, 0xd3, 0x69, 0x3a, 0xdf,
+	0x4a, 0xe3, 0x8c, 0xc1, 0x05, 0xe1, 0x38, 0x66, 0x15, 0x6b, 0xab, 0xad,
+	0x97, 0xf8, 0xdf, 0xbf, 0x98, 0xd3, 0x39, 0x09, 0x53, 0x01, 0xdd, 0xe1,
+	0x5a, 0x3e, 0x87, 0x5c, 0x2b, 0xcf, 0xa2, 0xd6, 0x52, 0x65, 0x3a, 0x87,
+	0xb9, 0xd6, 0x1d, 0x6b, 0xfc, 0x2d, 0xa4, 0xf9, 0x69, 0x52, 0xd5, 0xef,
+	0x00, 0x6f, 0xb8, 0x3f, 0x88, 0xb3, 0xee, 0xd0, 0xbc, 0xc5, 0x7f, 0x65,
+	0xbc, 0x69, 0x41, 0x3b, 0x30, 0x1e, 0xc4, 0x98, 0xfb, 0xbb, 0x32, 0x65,
+	0x02, 0x8f, 0x5b, 0x49, 0x8c, 0x99, 0xce, 0x20, 0xd8, 0xef, 0x92, 0x59,
+	0x75, 0x7b, 0x01, 0x6b, 0x51, 0xd5, 0xed, 0x01, 0xbe, 0x0e, 0xea, 0x73,
+	0xf8, 0x7c, 0xbc, 0xe7, 0x66, 0xcc, 0x07, 0xdd, 0x7a, 0x3e, 0x08, 0xa6,
+	0x73, 0xd4, 0xa3, 0xe6, 0xf6, 0x48, 0x1c, 0x53, 0x13, 0x1a, 0xd6, 0xbd,
+	0xc3, 0x7b, 0x24, 0x52, 0x79, 0xee, 0xf3, 0x33, 0x47, 0xde, 0xfc, 0x8e,
+	0x90, 0xa6, 0x0c, 0x68, 0xba, 0x31, 0xec, 0x43, 0x0e, 0x3e, 0xf8, 0xe1,
+	0xde, 0x80, 0xb1, 0xf6, 0x45, 0xe0, 0xc9, 0x56, 0x89, 0xf7, 0xe8, 0xa7,
+	0xa5, 0x34, 0x89, 0x2b, 0x6e, 0x5f, 0xb8, 0xae, 0x07, 0xb4, 0x46, 0xfa,
+	0x30, 0x40, 0x73, 0x8b, 0xcc, 0xf3, 0x1a, 0x64, 0x24, 0x68, 0xe7, 0x2d,
+	0x15, 0xad, 0xd0, 0x3a, 0x86, 0xbe, 0x41, 0xd3, 0x4e, 0x70, 0x61, 0xce,
+	0x9d, 0xd7, 0x8a, 0xa7, 0x4f, 0x69, 0xa5, 0xd3, 0xcf, 0x69, 0xfb, 0x5a,
+	0x2f, 0x76, 0x53, 0x97, 0x8d, 0xd3, 0x27, 0xe8, 0x49, 0x5f, 0x23, 0xa6,
+	0x73, 0x09, 0x3c, 0x2c, 0x98, 0x15, 0x32, 0x9c, 0x1e, 0xed, 0x4e, 0xe0,
+	0xe9, 0x70, 0xfe, 0x2c, 0x49, 0x3d, 0x3a, 0x6d, 0x72, 0xa2, 0xb5, 0x69,
+	0xfa, 0x73, 0xd0, 0x74, 0xd1, 0xdd, 0xca, 0x7c, 0xeb, 0x55, 0x30, 0xa9,
+	0x90, 0x0e, 0xd6, 0x2f, 0x96, 0x9f, 0x6d, 0x7a, 0xba, 0xa0, 0xd2, 0xc2,
+	0x5f, 0xf4, 0x57, 0x47, 0xb6, 0xf0, 0x3a, 0xd8, 0xc2, 0xd5, 0x87, 0xa6,
+	0x1d, 0xaf, 0xcf, 0xa0, 0x8a, 0x29, 0xc8, 0x36, 0x8b, 0xf4, 0x45, 0x9a,
+	0x73, 0x89, 0x8a, 0x35, 0xec, 0xeb, 0x18, 0xe0, 0x8f, 0x03, 0xfe, 0xec,
+	0xaa, 0xe8, 0xe2, 0x1e, 0xa0, 0xa9, 0x68, 0x46, 0xc8, 0xcb, 0x25, 0xba,
+	0x4b, 0xc2, 0x8b, 0xbc, 0x0b, 0x5d, 0xed, 0xe2, 0x3e, 0xf6, 0x4e, 0xc8,
+	0xbd, 0xf5, 0xbc, 0x93, 0x59, 0x26, 0xd2, 0x44, 0x7e, 0x0f, 0xf0, 0xb1,
+	0x0e, 0xf3, 0xba, 0x79, 0xd0, 0xc9, 0xf4, 0x73, 0xdf, 0x01, 0x4c, 0x02,
+	0xfa, 0xde, 0xdd, 0x46, 0x2b, 0xe8, 0x49, 0x33, 0xbf, 0x99, 0x7f, 0xf2,
+	0xac, 0xda, 0xb5, 0xb3, 0x7e, 0x10, 0x0c, 0x8f, 0x1a, 0xf4, 0x63, 0x79,
+	0x66, 0xb6, 0x37, 0x5e, 0x97, 0x0e, 0xf5, 0x23, 0x01, 0x9d, 0x22, 0xad,
+	0xec, 0x9a, 0x6b, 0xb8, 0xca, 0x44, 0x42, 0xcf, 0x27, 0xa9, 0x28, 0xe9,
+	0x1b, 0xc3, 0x5e, 0x6c, 0x87, 0xb0, 0x27, 0x87, 0xcf, 0xc2, 0x73, 0x79,
+	0xd8, 0xbb, 0xcd, 0x7d, 0x2a, 0xd7, 0xd9, 0xf6, 0x99, 0xb6, 0x55, 0x5b,
+	0xfe, 0xd3, 0x2c, 0x89, 0x4f, 0x0c, 0xe8, 0xd4, 0x4b, 0x13, 0xee, 0x87,
+	0x81, 0xd8, 0x8d, 0xf7, 0x23, 0x19, 0xd0, 0x66, 0x5b, 0xb0, 0xca, 0x94,
+	0x4e, 0x1a, 0xe8, 0xde, 0x93, 0x31, 0xc9, 0xc1, 0xd9, 0xc0, 0xdf, 0x89,
+	0x55, 0x30, 0xff, 0xd3, 0xe8, 0x54, 0x78, 0x41, 0x66, 0xc1, 0x03, 0x8d,
+	0x9e, 0xfb, 0x65, 0xc9, 0x33, 0x13, 0xe7, 0xd7, 0xe7, 0x99, 0xbf, 0x5d,
+	0xb0, 0x0b, 0x8d, 0xca, 0x2e, 0xe3, 0x8e, 0x70, 0x08, 0x1a, 0xbe, 0xa5,
+	0x1d, 0x47, 0x24, 0x5f, 0xd6, 0x5d, 0x83, 0x46, 0x47, 0x79, 0x2d, 0xaf,
+	0xe3, 0xf5, 0xf6, 0x98, 0x25, 0x3e, 0x08, 0xf6, 0xae, 0xdb, 0xd3, 0x21,
+	0x31, 0x0f, 0x9a, 0x95, 0x2c, 0xc0, 0xc3, 0xcf, 0x5b, 0xcb, 0x72, 0xd8,
+	0xc8, 0x6f, 0x5e, 0xdb, 0xbe, 0x0e, 0xfa, 0x3c, 0xc0, 0x34, 0x9c, 0x4c,
+	0x2a, 0x3b, 0x8d, 0x68, 0x8a, 0x64, 0xa9, 0x85, 0x38, 0x3e, 0xeb, 0x1c,
+	0xbc, 0x1e, 0xfe, 0xc3, 0x87, 0xff, 0x80, 0x4f, 0x3c, 0xef, 0xc3, 0xbf,
+	0xf8, 0xec, 0x6f, 0x2c, 0x7a, 0x7e, 0x18, 0xfe, 0xf1, 0x9a, 0x7f, 0x42,
+	0x1b, 0x47, 0x5f, 0x90, 0x0e, 0xff, 0x34, 0xdb, 0x10, 0xb0, 0x73, 0xf8,
+	0x8a, 0x15, 0x9e, 0x83, 0x5f, 0x58, 0x29, 0xe1, 0xe9, 0x50, 0xb5, 0xc9,
+	0x7a, 0x18, 0xf9, 0x61, 0xf6, 0x57, 0x19, 0xf8, 0x26, 0xf6, 0x47, 0xec,
+	0xb7, 0x78, 0x6d, 0x10, 0x94, 0x5c, 0x86, 0x0d, 0x20, 0x47, 0xb6, 0xbb,
+	0x24, 0x89, 0x54, 0x45, 0x3b, 0x34, 0x0c, 0x7b, 0xbc, 0x89, 0x7d, 0x0b,
+	0xdb, 0xe5, 0x8d, 0x44, 0x9d, 0xbc, 0xdf, 0xaf, 0xbb, 0xd5, 0xbf, 0xd9,
+	0xdb, 0x84, 0x35, 0xf2, 0xd9, 0xa3, 0xc6, 0x66, 0xe8, 0x97, 0xf8, 0xbd,
+	0x6d, 0x15, 0x68, 0x57, 0x38, 0xe6, 0xfe, 0x1a, 0xbd, 0xae, 0xb8, 0x25,
+	0x41, 0x3b, 0x4f, 0x29, 0x7f, 0xba, 0x73, 0x09, 0x9a, 0x71, 0x4a, 0xd1,
+	0xb8, 0xf3, 0x6c, 0xe4, 0x57, 0x93, 0xc0, 0x07, 0xfa, 0xfc, 0xb5, 0x18,
+	0x82, 0xf6, 0x9e, 0x06, 0xd3, 0xc2, 0xdc, 0x46, 0x5e, 0xb0, 0x2f, 0x67,
+	0xfb, 0x34, 0xdb, 0xed, 0x73, 0x2f, 0xec, 0xd3, 0xed, 0x24, 0xdb, 0xfd,
+	0x27, 0xd8, 0xe7, 0xb7, 0x5d, 0x0d, 0xbc, 0x21, 0xba, 0x54, 0xcb, 0xc0,
+	0x3f, 0x18, 0x99, 0xd7, 0x69, 0x97, 0x35, 0x0b, 0xbd, 0x3c, 0xc9, 0x73,
+	0x4d, 0xcc, 0x49, 0x3f, 0xae, 0xfc, 0xc7, 0x65, 0xfd, 0xbb, 0xa0, 0x2b,
+	0x08, 0x66, 0x81, 0xb3, 0x3c, 0xa2, 0x87, 0xb6, 0x18, 0xcd, 0x5f, 0x45,
+	0x3c, 0xf4, 0x7e, 0x43, 0xa7, 0x4a, 0xb6, 0x83, 0xec, 0xec, 0x12, 0x70,
+	0x4f, 0xbb, 0xca, 0xee, 0xd9, 0x36, 0x96, 0x81, 0x7f, 0xce, 0x1f, 0x86,
+	0x5f, 0x60, 0xbb, 0x01, 0x5d, 0xc0, 0xbf, 0x0c, 0xfc, 0x73, 0xad, 0x0e,
+	0xfa, 0x96, 0x11, 0xc5, 0xd9, 0xe8, 0x3c, 0xdb, 0xb0, 0x2c, 0xda, 0xf7,
+	0x08, 0xdd, 0xe5, 0xa7, 0xe0, 0x73, 0xd8, 0x27, 0x57, 0xb3, 0xb0, 0x2b,
+	0xad, 0xea, 0xf2, 0xde, 0x3a, 0x2d, 0xaf, 0xad, 0xa1, 0x42, 0x55, 0xd9,
+	0x6c, 0xc1, 0x1b, 0xae, 0x64, 0x74, 0xe9, 0x7b, 0x88, 0xee, 0x84, 0xad,
+	0x2e, 0x3b, 0x3c, 0xe6, 0x79, 0x35, 0x37, 0x5e, 0x1b, 0xd0, 0x8a, 0xec,
+	0xbf, 0x86, 0x3f, 0x04, 0x7d, 0x6a, 0xee, 0xb7, 0x6b, 0x0f, 0xb1, 0x8c,
+	0x70, 0x16, 0xb2, 0xaa, 0xee, 0x7f, 0x06, 0xd0, 0xdf, 0x75, 0x30, 0xd7,
+	0xc7, 0x63, 0x8f, 0x2b, 0x9d, 0x25, 0x6d, 0xbf, 0x23, 0x06, 0x3a, 0x43,
+	0x9f, 0xb7, 0x1f, 0x93, 0xfb, 0x6a, 0xd5, 0xfe, 0x4e, 0xfa, 0x50, 0xe7,
+	0x18, 0x7c, 0x85, 0xc8, 0xf0, 0x6a, 0xbb, 0xc1, 0x8f, 0x6a, 0x5f, 0xdb,
+	0x5c, 0xa2, 0x54, 0x0b, 0xe8, 0xa2, 0xab, 0x60, 0x30, 0x4e, 0x16, 0x6b,
+	0x62, 0x20, 0x41, 0x6b, 0x63, 0x93, 0x61, 0x56, 0x68, 0x77, 0x76, 0x99,
+	0x24, 0x6c, 0x7f, 0xe2, 0x1a, 0x6c, 0xba, 0x54, 0xab, 0xf6, 0xb5, 0x8d,
+	0x33, 0x45, 0xe0, 0x12, 0x7b, 0xd7, 0x60, 0x07, 0xaf, 0xc1, 0x6e, 0x25,
+	0xab, 0x8f, 0xe1, 0xc5, 0xc0, 0xe6, 0x6b, 0xb8, 0xad, 0x90, 0x9e, 0xfe,
+	0xcd, 0xd7, 0x70, 0x38, 0x8c, 0xb3, 0x6d, 0x9c, 0x65, 0x9c, 0x3b, 0xaf,
+	0xe1, 0x1c, 0x59, 0x4f, 0xcf, 0x11, 0x82, 0x0f, 0x4a, 0x74, 0xe6, 0x69,
+	0xef, 0xa5, 0xda, 0xd0, 0xc4, 0x5d, 0x84, 0xf8, 0x38, 0xb2, 0x29, 0xf4,
+	0xe1, 0xc6, 0x5e, 0x0f, 0xbc, 0x32, 0x88, 0x7d, 0xa2, 0x46, 0x55, 0xc8,
+	0xf9, 0x8f, 0x9a, 0xb4, 0xf7, 0x62, 0xd3, 0x08, 0x75, 0x89, 0x75, 0xe2,
+	0x6d, 0xd8, 0x58, 0x72, 0xca, 0x40, 0x0c, 0xbf, 0x20, 0x6d, 0x8c, 0x26,
+	0xaa, 0x35, 0xaa, 0x6c, 0xcf, 0x3f, 0x11, 0x40, 0x17, 0xa7, 0xe0, 0xd3,
+	0xc0, 0xe3, 0xe4, 0x98, 0x97, 0xc3, 0x7c, 0x93, 0x6d, 0x0b, 0x7e, 0x05,
+	0xb0, 0xd0, 0xb5, 0x84, 0x3e, 0xbf, 0xeb, 0x55, 0x4f, 0xe7, 0x7d, 0x2c,
+	0xe4, 0x5c, 0x89, 0x84, 0x98, 0xbf, 0x1a, 0xb0, 0x9e, 0x4d, 0x8f, 0x5c,
+	0x45, 0x8e, 0x63, 0x92, 0x37, 0x06, 0xff, 0x01, 0x7d, 0x9f, 0x6d, 0x11,
+	0x72, 0x82, 0x3f, 0xed, 0x55, 0x36, 0xf6, 0x9d, 0xad, 0xea, 0x49, 0x06,
+	0xfb, 0xf2, 0xe9, 0x1c, 0xe7, 0x0f, 0x9d, 0x09, 0x2f, 0x37, 0xbe, 0x4d,
+	0x3f, 0x7b, 0x60, 0x9b, 0x38, 0x5b, 0xd9, 0x26, 0x10, 0x03, 0x60, 0x53,
+	0xc2, 0xcb, 0xa1, 0x7f, 0x36, 0xb2, 0xa1, 0x0c, 0x6c, 0x88, 0x69, 0x65,
+	0x3a, 0x7f, 0x04, 0x7b, 0x95, 0xb4, 0xd2, 0x84, 0x0f, 0x9a, 0x46, 0x3f,
+	0x82, 0x9e, 0xe0, 0x2c, 0xf0, 0x81, 0x05, 0x70, 0x49, 0x8c, 0xfe, 0x2a,
+	0xb4, 0x67, 0xee, 0xbf, 0x1b, 0xa8, 0x78, 0x72, 0x30, 0xdc, 0xff, 0x17,
+	0xa1, 0x0f, 0x88, 0x70, 0x31, 0x9e, 0xac, 0x36, 0x81, 0x5c, 0x68, 0xa2,
+	0x65, 0x68, 0xec, 0xcf, 0x8b, 0x3e, 0xe7, 0x30, 0x9c, 0xbf, 0x3c, 0x1e,
+	0xfa, 0x45, 0xce, 0x5b, 0x92, 0x21, 0x4f, 0x73, 0x51, 0x5c, 0x94, 0xf6,
+	0x86, 0x18, 0x65, 0x95, 0x5d, 0x99, 0xd3, 0x68, 0xd3, 0xb9, 0x24, 0xd6,
+	0x61, 0xae, 0x85, 0x73, 0xc3, 0x2f, 0x21, 0x0f, 0xe2, 0xdc, 0x14, 0xeb,
+	0x3b, 0x43, 0x9b, 0xbf, 0x44, 0x65, 0xf8, 0x54, 0xc3, 0xe1, 0xf7, 0x5e,
+	0x2f, 0x75, 0x61, 0xdc, 0xc4, 0x5e, 0xf0, 0x13, 0xba, 0xe4, 0x33, 0x62,
+	0x41, 0xfa, 0x06, 0xce, 0xaf, 0xb0, 0xd6, 0xc2, 0x5a, 0xf6, 0xbb, 0xbc,
+	0xf6, 0x3c, 0xe8, 0xc0, 0xb8, 0xc9, 0x30, 0xec, 0xa3, 0x82, 0xf7, 0xbc,
+	0xdc, 0x1e, 0x73, 0x82, 0x12, 0x21, 0x5e, 0xab, 0x0d, 0xef, 0xc6, 0xb5,
+	0x9c, 0xd3, 0x04, 0x17, 0x74, 0x87, 0xf1, 0x47, 0x79, 0x18, 0xd1, 0xf4,
+	0x09, 0xe4, 0x71, 0x4e, 0x07, 0xe2, 0x14, 0x8f, 0x86, 0x4c, 0x75, 0xce,
+	0x08, 0xe6, 0x81, 0xfe, 0xf5, 0xe3, 0xbb, 0x53, 0xd7, 0xfc, 0x23, 0x5b,
+	0x17, 0x15, 0x10, 0x1f, 0xc0, 0x27, 0x6b, 0x8a, 0x73, 0xbf, 0x62, 0x53,
+	0xa2, 0xc4, 0xdc, 0x18, 0x7c, 0xa2, 0xca, 0xa3, 0x2e, 0xf8, 0x1b, 0xe5,
+	0x66, 0x82, 0xd7, 0x05, 0xf0, 0xd7, 0x82, 0xfe, 0x8c, 0x03, 0x96, 0x8e,
+	0x00, 0x07, 0xc7, 0x6a, 0x57, 0xe4, 0x53, 0x54, 0x36, 0x39, 0xa7, 0xe8,
+	0x64, 0xfa, 0x0a, 0x6c, 0xfb, 0x22, 0xbf, 0x19, 0x73, 0xdc, 0x7f, 0xac,
+	0x57, 0xc9, 0xab, 0x9b, 0xc7, 0x13, 0x22, 0xdf, 0xbb, 0x61, 0xfe, 0x9f,
+	0xbb, 0x15, 0x6d, 0x72, 0x8c, 0xf9, 0x7f, 0xdb, 0x30, 0xfe, 0xe3, 0xd4,
+	0xfa, 0xf1, 0xdd, 0xdb, 0x42, 0xfd, 0x43, 0xff, 0xf1, 0x90, 0x5e, 0xd0,
+	0xb6, 0x46, 0x6b, 0x94, 0x2b, 0x53, 0x5d, 0x20, 0x5f, 0xf4, 0x72, 0xbb,
+	0xac, 0x2a, 0x6c, 0xaa, 0xd4, 0x02, 0xdd, 0x6b, 0x71, 0x6c, 0x6d, 0x4d,
+	0xe5, 0xda, 0x1a, 0xe5, 0xe7, 0x4b, 0xad, 0x00, 0x79, 0x56, 0x7b, 0xcc,
+	0xcb, 0xa2, 0x5f, 0xc1, 0x3e, 0x05, 0x9a, 0xf6, 0x2f, 0x16, 0x84, 0x73,
+	0x4c, 0xe6, 0x89, 0xc2, 0x79, 0x4a, 0x2b, 0x2d, 0x73, 0xfe, 0x08, 0x5b,
+	0x72, 0x64, 0xdd, 0x80, 0x98, 0x72, 0x5c, 0x2b, 0x9c, 0x5e, 0x40, 0xfe,
+	0xb8, 0x82, 0xdf, 0x19, 0xfc, 0x9a, 0xf8, 0x45, 0xf9, 0xfb, 0x33, 0xc8,
+	0xff, 0xa5, 0x7f, 0x45, 0x2c, 0x50, 0xfb, 0xff, 0x62, 0x05, 0x3a, 0xb6,
+	0x90, 0xa6, 0x6f, 0x3b, 0xa2, 0x5f, 0x28, 0x9f, 0x52, 0x40, 0xde, 0x6b,
+	0xbe, 0x4d, 0xbf, 0x13, 0xe6, 0x50, 0x44, 0xaf, 0xd7, 0xc1, 0xc7, 0x91,
+	0xfd, 0xa1, 0xbe, 0x66, 0x1f, 0xf4, 0xa4, 0xef, 0x0c, 0x73, 0x24, 0xe4,
+	0x6a, 0x05, 0xb9, 0xea, 0xfb, 0xe0, 0x8d, 0x46, 0x6f, 0x41, 0x7f, 0x5e,
+	0xaf, 0x77, 0x81, 0x1e, 0x87, 0xca, 0x93, 0xf6, 0x18, 0x69, 0x43, 0xe6,
+	0x26, 0xad, 0x0b, 0x36, 0x0c, 0xfb, 0x96, 0x63, 0x4a, 0x74, 0xe4, 0xcf,
+	0xcd, 0x2c, 0xd5, 0x04, 0xd6, 0x22, 0xef, 0xc9, 0xa1, 0x0f, 0xd9, 0x5f,
+	0xa9, 0x33, 0x9c, 0xa0, 0x37, 0xea, 0x3a, 0xbd, 0x89, 0xbc, 0xeb, 0x2d,
+	0xe7, 0xdc, 0x0c, 0x62, 0xd6, 0x00, 0xe2, 0x03, 0x6a, 0x98, 0x5d, 0xec,
+	0xa3, 0x77, 0x1a, 0x78, 0x96, 0xf0, 0xbb, 0x13, 0x79, 0xe3, 0xf5, 0x61,
+	0x3e, 0x6d, 0x3d, 0xd3, 0x96, 0x00, 0x0c, 0xaf, 0x37, 0x40, 0x5b, 0x0f,
+	0xe4, 0x6f, 0x9b, 0x53, 0xf4, 0xcb, 0x5e, 0x99, 0xab, 0x68, 0x3c, 0xaf,
+	0xfc, 0xd2, 0x27, 0xe7, 0x99, 0xcf, 0x3a, 0x74, 0x9c, 0xc7, 0xfc, 0x8e,
+	0xfd, 0x27, 0xe3, 0xb3, 0xc7, 0x0a, 0x38, 0xcc, 0x95, 0xba, 0xea, 0x47,
+	0x73, 0xa4, 0x45, 0x31, 0x8c, 0xfd, 0x62, 0x89, 0x0a, 0x92, 0xef, 0x13,
+	0x24, 0x65, 0xb0, 0x4e, 0x9e, 0x94, 0x30, 0xf2, 0xf5, 0x99, 0x39, 0x87,
+	0xe5, 0x0a, 0xff, 0x56, 0x8b, 0xe4, 0xca, 0x32, 0xea, 0xa4, 0x6a, 0xfd,
+	0x29, 0xc8, 0x55, 0x84, 0xf5, 0x01, 0xec, 0x7b, 0x81, 0xe5, 0x8b, 0xba,
+	0xb1, 0x8e, 0xbc, 0xa7, 0x4e, 0x29, 0x55, 0xdf, 0x1c, 0x47, 0x5d, 0x00,
+	0xf9, 0xd5, 0x16, 0x80, 0x03, 0x36, 0x5a, 0x5b, 0xc1, 0x13, 0xb5, 0x48,
+	0xed, 0x0c, 0x9e, 0x83, 0x78, 0x36, 0x59, 0x37, 0xc3, 0x3c, 0xe3, 0x13,
+	0xf4, 0xc0, 0x9e, 0x4a, 0x6c, 0x4f, 0xf4, 0x8f, 0xad, 0x3c, 0xfd, 0x43,
+	0x6b, 0x8c, 0x7e, 0xd4, 0xca, 0xd1, 0x0f, 0x5b, 0x2e, 0xfd, 0x7d, 0x6b,
+	0x84, 0x9e, 0x6d, 0x65, 0xb9, 0x96, 0x43, 0xce, 0x64, 0x71, 0xce, 0x44,
+	0x0f, 0xfa, 0xb7, 0xc3, 0xde, 0x59, 0xfe, 0xe7, 0x66, 0x0a, 0xcd, 0x21,
+	0x2a, 0x9f, 0x80, 0x6f, 0x76, 0x6f, 0xe3, 0x1a, 0x94, 0x1e, 0x73, 0xb9,
+	0x86, 0xe8, 0xe0, 0xf7, 0xa8, 0x23, 0xe0, 0xbb, 0xe1, 0xcb, 0xa6, 0xd2,
+	0xf6, 0x39, 0x4f, 0x1f, 0x08, 0x7d, 0xc0, 0x5d, 0x29, 0xea, 0xc2, 0x5e,
+	0xf0, 0x7f, 0x17, 0x9f, 0x86, 0x0d, 0xc8, 0x1a, 0x28, 0x01, 0x5f, 0xc3,
+	0x79, 0x80, 0xc1, 0x76, 0xcc, 0xf5, 0x87, 0xe5, 0xe9, 0x5c, 0x17, 0xb2,
+	0x3d, 0xeb, 0x08, 0x1a, 0x0c, 0x37, 0x69, 0xb2, 0xdc, 0x0c, 0x87, 0x7d,
+	0x6a, 0x21, 0xf4, 0x6f, 0x89, 0x50, 0x2f, 0x4d, 0xcc, 0x3f, 0x15, 0xfa,
+	0xe3, 0x8d, 0xfb, 0x20, 0x56, 0x20, 0x97, 0x54, 0xeb, 0x18, 0x56, 0x0b,
+	0x61, 0xfb, 0xc3, 0xb9, 0x24, 0xf8, 0xed, 0x52, 0xd9, 0x7f, 0x43, 0xe3,
+	0x1c, 0x5b, 0x38, 0xcc, 0xff, 0x11, 0x8c, 0x2f, 0x87, 0xe3, 0xaf, 0xd0,
+	0xf4, 0x22, 0x81, 0xd6, 0xd7, 0xb4, 0xa2, 0x1c, 0x8f, 0x61, 0x2c, 0x30,
+	0xd6, 0xb9, 0x6e, 0xe0, 0x0c, 0x23, 0xc5, 0xba, 0x2e, 0x9c, 0x71, 0xf0,
+	0x71, 0x12, 0xbf, 0x82, 0xfc, 0x3d, 0xe2, 0x0f, 0x15, 0xde, 0x41, 0xbc,
+	0xd0, 0x3a, 0xa2, 0xdc, 0x67, 0x3b, 0x6a, 0xcf, 0x20, 0x38, 0x84, 0x5a,
+	0xdd, 0x4a, 0x19, 0xf4, 0x2f, 0xf3, 0xb6, 0x79, 0x48, 0xcc, 0xe1, 0x4c,
+	0x41, 0x30, 0xe1, 0xd8, 0x95, 0x82, 0xe8, 0xa6, 0x9f, 0x1f, 0xe7, 0xb8,
+	0x5b, 0x9f, 0x79, 0x01, 0xba, 0xd7, 0x58, 0xe9, 0xa4, 0x46, 0xc3, 0xa0,
+	0x2b, 0xa3, 0x43, 0xa0, 0xd3, 0xa4, 0x46, 0x33, 0x85, 0x5c, 0x6e, 0x33,
+	0xa1, 0x3c, 0x94, 0x0e, 0x43, 0xcf, 0x67, 0xa5, 0x8f, 0xf6, 0x1c, 0x3c,
+	0x9b, 0x1f, 0xf4, 0xae, 0x3f, 0x73, 0x09, 0xf4, 0xf7, 0xa0, 0x0a, 0xd9,
+	0x2e, 0xe5, 0x5c, 0xf6, 0x87, 0x4c, 0x4f, 0x20, 0x6e, 0x19, 0x43, 0xe6,
+	0x7e, 0xf1, 0xab, 0xe0, 0x0e, 0x83, 0x65, 0xf7, 0xaa, 0xac, 0x77, 0x64,
+	0x9c, 0xc3, 0x7e, 0x4b, 0x2b, 0xaf, 0x81, 0x16, 0x93, 0x9e, 0x6d, 0x6e,
+	0x0f, 0xc7, 0x96, 0xe4, 0xc5, 0xb3, 0xcd, 0x2e, 0xfa, 0x61, 0x63, 0x0b,
+	0x2d, 0x37, 0xf8, 0x7d, 0x27, 0x2d, 0x35, 0x86, 0xae, 0x1e, 0x15, 0x03,
+	0xb4, 0x7a, 0xe3, 0x4d, 0xe6, 0x57, 0x05, 0xf2, 0x82, 0xc9, 0x8f, 0xe9,
+	0xbd, 0xd1, 0x5e, 0xfa, 0xe9, 0x3d, 0x76, 0xfd, 0x7e, 0x01, 0x1b, 0x18,
+	0x4d, 0xb2, 0x6d, 0xa3, 0xcf, 0xf3, 0xf6, 0x55, 0x4b, 0xb0, 0x6e, 0xff,
+	0x04, 0x3c, 0xb5, 0x8f, 0x29, 0x3b, 0x60, 0xdc, 0x8c, 0x17, 0xba, 0xe1,
+	0xbc, 0x08, 0x9c, 0x78, 0xd7, 0x1c, 0x02, 0xae, 0x17, 0x25, 0x2f, 0x0e,
+	0xb9, 0xf6, 0x55, 0x42, 0x0e, 0x79, 0xc5, 0x19, 0xca, 0x0a, 0xb1, 0x9d,
+	0x1a, 0x99, 0x9b, 0xcc, 0xf3, 0xf0, 0xff, 0xa8, 0xab, 0x2a, 0x97, 0xa9,
+	0x3e, 0x73, 0xc9, 0x61, 0xfd, 0x67, 0xbf, 0xf1, 0x12, 0xf2, 0x4e, 0x93,
+	0x4e, 0x34, 0xd9, 0x5f, 0x32, 0x2e, 0xce, 0xfd, 0x77, 0x9b, 0x5f, 0x13,
+	0x9c, 0x23, 0xe0, 0x1d, 0xe6, 0xf5, 0x2f, 0xb1, 0x9c, 0x3b, 0x18, 0x36,
+	0x6b, 0x89, 0x60, 0x03, 0x8f, 0x86, 0xcc, 0x5d, 0x82, 0xf7, 0xfb, 0x6f,
+	0xec, 0xfb, 0x2e, 0x68, 0x1d, 0x02, 0x2c, 0xe2, 0x65, 0xa6, 0x7d, 0x8f,
+	0x57, 0xe4, 0x1e, 0xc7, 0x9b, 0xc8, 0xf3, 0xd6, 0xf6, 0xc0, 0x5c, 0x53,
+	0xe0, 0x9c, 0x86, 0x94, 0xcb, 0x95, 0x51, 0xe6, 0xef, 0x6d, 0x7d, 0x9c,
+	0x63, 0xea, 0xf9, 0xbf, 0x09, 0xa2, 0x5a, 0xf3, 0x95, 0xf9, 0x49, 0xf8,
+	0xe7, 0x20, 0xa8, 0xee, 0x1e, 0x52, 0x71, 0x68, 0x90, 0xdf, 0x1f, 0x90,
+	0xb2, 0xa8, 0x8a, 0x4e, 0xba, 0xc3, 0xb0, 0x00, 0xcb, 0x73, 0x2f, 0x87,
+	0x72, 0x84, 0x11, 0x75, 0xa1, 0xdf, 0x8c, 0xf4, 0x32, 0x05, 0x1d, 0xdb,
+	0x63, 0x1e, 0x0a, 0x63, 0x32, 0xc7, 0xb4, 0x9f, 0x42, 0xe7, 0xac, 0x14,
+	0xeb, 0x4d, 0xaa, 0xef, 0x9a, 0xde, 0xf0, 0xbb, 0xfa, 0x8c, 0x07, 0xda,
+	0x8a, 0x0b, 0x9d, 0x54, 0xaa, 0x27, 0x90, 0x03, 0x19, 0x34, 0x97, 0xc3,
+	0x18, 0x3a, 0x54, 0x6a, 0xb0, 0xce, 0x57, 0x42, 0x9d, 0x4f, 0x86, 0xb8,
+	0x4f, 0x82, 0x17, 0xb6, 0xb5, 0x2a, 0xb8, 0x76, 0xda, 0x26, 0xeb, 0x5f,
+	0x1d, 0xb6, 0x5c, 0xae, 0x71, 0xed, 0x89, 0xfc, 0xdb, 0x3c, 0x37, 0x33,
+	0xed, 0x18, 0xa0, 0x6b, 0x44, 0x2b, 0xb7, 0x1c, 0xad, 0xec, 0x33, 0x7d,
+	0xbb, 0x41, 0xb7, 0x26, 0x6b, 0xdc, 0xa5, 0xd6, 0x7b, 0xc1, 0xd2, 0xee,
+	0x4d, 0xe8, 0x43, 0xe7, 0x27, 0x58, 0xae, 0x5f, 0x60, 0xba, 0xac, 0x82,
+	0x60, 0x3e, 0xa7, 0xe9, 0xd4, 0xf0, 0xdf, 0xf5, 0x72, 0x3e, 0x75, 0x7a,
+	0x98, 0xf1, 0x83, 0x8e, 0x74, 0x9a, 0x96, 0x7d, 0xde, 0xa3, 0x3e, 0xc3,
+	0x3c, 0x2c, 0x2f, 0x98, 0xf4, 0x88, 0x94, 0xdb, 0x6b, 0xd2, 0xa6, 0xcb,
+	0x2b, 0xb0, 0xa5, 0xd4, 0x90, 0x79, 0x94, 0xec, 0xab, 0x17, 0x75, 0xbb,
+	0x3e, 0x05, 0x7b, 0x5e, 0x5a, 0xd4, 0x69, 0xa7, 0xac, 0xb1, 0x58, 0x36,
+	0xf6, 0x31, 0x58, 0x7c, 0x78, 0xf6, 0x43, 0x6d, 0x67, 0xef, 0xa1, 0x4b,
+	0x4f, 0xff, 0x16, 0x7c, 0x0d, 0xf3, 0xd5, 0xb0, 0x0e, 0x23, 0x9f, 0x58,
+	0x40, 0xee, 0x51, 0x45, 0x6e, 0x5c, 0xc8, 0x30, 0x6c, 0xc4, 0xef, 0xad,
+	0x92, 0xff, 0x42, 0xf2, 0x7f, 0x07, 0x55, 0xa5, 0x0d, 0x65, 0xe4, 0x3b,
+	0x01, 0x1c, 0xea, 0x1d, 0x8f, 0x91, 0x2b, 0xc9, 0x77, 0xf7, 0x2a, 0x38,
+	0xf6, 0x11, 0x19, 0x7e, 0x77, 0x14, 0x7b, 0x32, 0x8f, 0xa3, 0xf9, 0x6e,
+	0x52, 0x36, 0x14, 0xf1, 0x1d, 0x89, 0x44, 0x33, 0x4d, 0xbf, 0x8b, 0x9a,
+	0x67, 0xb2, 0x39, 0x48, 0xa5, 0xa6, 0x05, 0x19, 0xcc, 0xf4, 0xf1, 0xd9,
+	0x8a, 0x2b, 0x38, 0x8f, 0x60, 0x5a, 0xef, 0xa5, 0xc3, 0x7e, 0x44, 0x4f,
+	0x32, 0xa4, 0x6f, 0x32, 0x1c, 0x27, 0x42, 0x1a, 0xda, 0xf1, 0x25, 0x81,
+	0x0b, 0x31, 0x3e, 0xf7, 0x57, 0x21, 0x1e, 0xf6, 0x1f, 0xa0, 0x75, 0x32,
+	0x43, 0x2b, 0x3e, 0xd3, 0xb1, 0x85, 0xaa, 0x69, 0xee, 0x1f, 0x80, 0x9e,
+	0x31, 0x9e, 0x4d, 0x9c, 0xc7, 0xac, 0xe3, 0xf1, 0x91, 0x66, 0x05, 0x3c,
+	0x66, 0xfe, 0xf2, 0xba, 0x24, 0x2d, 0x7d, 0x85, 0xe5, 0xb7, 0x07, 0xf9,
+	0x3b, 0xeb, 0xc2, 0x96, 0x50, 0xaf, 0xd4, 0x9e, 0xa5, 0x85, 0x1e, 0xc8,
+	0x8a, 0xf7, 0xed, 0xa2, 0xbb, 0x61, 0xef, 0xc5, 0x06, 0xef, 0x3f, 0x09,
+	0x3d, 0x7a, 0x59, 0xee, 0x5f, 0x5a, 0x19, 0x08, 0xe1, 0x19, 0xb6, 0x67,
+	0x03, 0x6c, 0x27, 0xed, 0xab, 0x9b, 0xd7, 0x81, 0xff, 0x7d, 0xc0, 0x0b,
+	0x3a, 0x99, 0x63, 0x78, 0xc6, 0x83, 0x75, 0x8d, 0xf4, 0x67, 0xe0, 0x49,
+	0xc9, 0x5a, 0xbe, 0xd8, 0xe8, 0xa4, 0x62, 0x3d, 0xc2, 0xc5, 0x78, 0x3e,
+	0x46, 0xad, 0x7b, 0x9f, 0xc4, 0x35, 0x2d, 0x71, 0xe1, 0x7d, 0x83, 0x7d,
+	0xcd, 0xad, 0x80, 0x47, 0xbd, 0xee, 0x80, 0xb6, 0x54, 0x37, 0x2d, 0xc9,
+	0x7a, 0xbd, 0x4b, 0xf9, 0x98, 0xd4, 0x66, 0xbc, 0xdf, 0x02, 0x5b, 0xdf,
+	0x83, 0x3c, 0xa6, 0x07, 0x73, 0xd6, 0x86, 0xb9, 0x8d, 0xf4, 0x27, 0x36,
+	0xd0, 0xdf, 0x89, 0x75, 0xfd, 0xd8, 0x53, 0xad, 0x2b, 0x61, 0xdd, 0xec,
+	0x02, 0x6c, 0x82, 0x73, 0xf3, 0x34, 0xc7, 0xe4, 0x1b, 0x25, 0x2d, 0xb3,
+	0x2b, 0xef, 0xe1, 0x5c, 0x03, 0x80, 0x8d, 0xc6, 0x8a, 0x0f, 0x75, 0xe0,
+	0xf9, 0x5e, 0x43, 0xde, 0x4b, 0x40, 0x06, 0x9b, 0x53, 0x7c, 0xf6, 0x6a,
+	0xe3, 0xf3, 0x78, 0x76, 0x63, 0x1b, 0xbf, 0x98, 0x57, 0x4c, 0x2f, 0xd3,
+	0x0a, 0x3d, 0x25, 0xd8, 0x9b, 0x8b, 0x9a, 0x2f, 0xa5, 0x53, 0x29, 0x87,
+	0x78, 0xee, 0xf3, 0x5d, 0x2d, 0xdb, 0xe5, 0xa0, 0xaa, 0x0b, 0x1c, 0x8e,
+	0xeb, 0x86, 0x3c, 0xfb, 0xe1, 0x15, 0xbe, 0xaf, 0xb5, 0x10, 0x4f, 0xed,
+	0x2c, 0xe1, 0xec, 0x0f, 0xaf, 0x38, 0xf4, 0x68, 0x33, 0x4b, 0x47, 0x9b,
+	0xb6, 0x79, 0x3f, 0x7c, 0x40, 0x79, 0xed, 0x1e, 0x77, 0x57, 0x8a, 0xfd,
+	0x96, 0x81, 0x9c, 0xb3, 0xc3, 0x51, 0x39, 0x48, 0x95, 0xeb, 0xb1, 0x05,
+	0x9b, 0xef, 0x68, 0xcc, 0x06, 0x6d, 0xcc, 0x53, 0xfe, 0x2f, 0x73, 0x14,
+	0xde, 0x9f, 0xfd, 0x34, 0x72, 0x12, 0x1f, 0x39, 0x89, 0x8f, 0x9c, 0xc4,
+	0x47, 0x4e, 0xe2, 0x23, 0x27, 0xf1, 0x91, 0x93, 0xf8, 0xc8, 0x49, 0x7c,
+	0xe4, 0x24, 0xc8, 0xff, 0x55, 0x5d, 0x30, 0x8e, 0x5c, 0x1b, 0xfe, 0xcb,
+	0xff, 0x6a, 0x98, 0x53, 0x44, 0x31, 0x99, 0xe7, 0x56, 0x37, 0x79, 0x6e,
+	0x74, 0x4f, 0x7c, 0x00, 0x73, 0x13, 0x61, 0xee, 0xc3, 0x6b, 0xa2, 0x98,
+	0xcd, 0xeb, 0x68, 0xcc, 0x43, 0xbd, 0x59, 0x98, 0xe4, 0xdc, 0x48, 0xc5,
+	0x2a, 0x95, 0x97, 0xbf, 0x8a, 0xfc, 0xc8, 0x42, 0x7e, 0x34, 0x88, 0x5c,
+	0x88, 0xef, 0xb5, 0xa3, 0xfb, 0xa3, 0x82, 0x76, 0xc8, 0x1f, 0xd7, 0xbe,
+	0xe6, 0x73, 0xde, 0xee, 0x58, 0x65, 0x21, 0x16, 0xfa, 0x29, 0xa0, 0xe2,
+	0xe8, 0xb7, 0x90, 0x23, 0x7f, 0x4f, 0xde, 0x95, 0x4d, 0x0c, 0xb3, 0xcc,
+	0x27, 0x3e, 0x25, 0x4f, 0x8e, 0xf8, 0xab, 0xee, 0xf8, 0xc4, 0x12, 0xf3,
+	0x8f, 0xa8, 0xef, 0x2c, 0x18, 0x7e, 0x36, 0x41, 0xa9, 0x53, 0x5b, 0x30,
+	0x67, 0x52, 0xbf, 0xbc, 0x27, 0x82, 0x28, 0xcf, 0xfe, 0x1a, 0xf2, 0x72,
+	0x48, 0x9c, 0xe5, 0xdb, 0x04, 0xc6, 0xcb, 0xfe, 0xb5, 0x32, 0x53, 0x6c,
+	0x54, 0xa4, 0x4e, 0x1d, 0x6a, 0x96, 0x90, 0x3f, 0xf5, 0xf6, 0x53, 0x97,
+	0x81, 0x1a, 0x2a, 0xc2, 0xcd, 0x38, 0x7f, 0x99, 0x92, 0xb5, 0xcd, 0xd9,
+	0x35, 0x79, 0x42, 0xd6, 0xbc, 0x4f, 0x65, 0xa6, 0x5a, 0xb7, 0x33, 0x5c,
+	0xd7, 0x82, 0xd6, 0x99, 0x27, 0x81, 0x63, 0x19, 0x39, 0x81, 0x2e, 0xf7,
+	0xae, 0xcc, 0xcc, 0xd6, 0xd5, 0x5d, 0x95, 0xa2, 0x01, 0xf1, 0x2f, 0xd7,
+	0x45, 0xfa, 0x92, 0xba, 0xb3, 0x12, 0x12, 0x96, 0xe1, 0x18, 0xde, 0x00,
+	0x1c, 0xcb, 0x2d, 0x0b, 0x58, 0x96, 0x1d, 0xd3, 0x50, 0x99, 0xa9, 0x34,
+	0xda, 0x69, 0x60, 0x3c, 0x8c, 0x37, 0x3a, 0x0f, 0x9f, 0x25, 0x45, 0xe2,
+	0x54, 0x10, 0x94, 0x47, 0x07, 0xc3, 0x3a, 0x12, 0xf5, 0xe3, 0x09, 0x43,
+	0xea, 0xb9, 0x1a, 0x7f, 0x53, 0xc6, 0x29, 0x4b, 0xf0, 0x3c, 0x3f, 0xf1,
+	0x2e, 0xf7, 0x24, 0xe6, 0x30, 0x5e, 0x8e, 0xd6, 0x8a, 0x70, 0x6d, 0x77,
+	0x1b, 0x3f, 0x3b, 0xc2, 0xfd, 0x98, 0x26, 0x3e, 0xe7, 0x65, 0xec, 0xc5,
+	0x74, 0xf1, 0x1a, 0x13, 0xb4, 0x41, 0x96, 0xfe, 0xff, 0x96, 0xf7, 0xed,
+	0x67, 0x62, 0x9e, 0x1a, 0x80, 0xe1, 0xf5, 0x8c, 0x23, 0x82, 0xc1, 0x8b,
+	0xb3, 0x0a, 0x4e, 0xac, 0xdd, 0xed, 0x7d, 0xd6, 0xbe, 0xed, 0xb4, 0x46,
+	0xfb, 0x47, 0x78, 0xb2, 0x4a, 0x6e, 0x6b, 0xf0, 0xf2, 0xff, 0x0a, 0xc3,
+	0x13, 0xba, 0xf8, 0x89, 0x3b, 0xd2, 0x6c, 0x5b, 0x6d, 0x1c, 0xdd, 0x35,
+	0x70, 0xcd, 0xcf, 0x35, 0x3c, 0x7f, 0x47, 0x68, 0xaf, 0x4b, 0x4b, 0x61,
+	0x2c, 0x83, 0x2e, 0xaa, 0xbb, 0xd4, 0x70, 0x6c, 0x70, 0x6c, 0x43, 0xe3,
+	0x1c, 0x3f, 0xb2, 0x91, 0xf6, 0x7b, 0x42, 0x95, 0x9b, 0x9c, 0x59, 0x8c,
+	0x7c, 0x0e, 0xfc, 0xc1, 0xb0, 0x11, 0xfa, 0xed, 0x24, 0xfc, 0x56, 0x0f,
+	0xed, 0x83, 0xbf, 0xb9, 0x13, 0xfe, 0x66, 0x3f, 0xea, 0xca, 0xf1, 0x95,
+	0xf6, 0xfb, 0x57, 0xae, 0x65, 0xab, 0x74, 0x58, 0xca, 0xae, 0x12, 0xe8,
+	0xce, 0xc7, 0x90, 0xdf, 0x2e, 0x99, 0xa3, 0x29, 0x79, 0xc2, 0x57, 0xba,
+	0xfc, 0x2d, 0x62, 0xe3, 0x3d, 0x6f, 0x16, 0x7a, 0xdd, 0x55, 0x10, 0x32,
+	0xff, 0x52, 0x7c, 0xab, 0x36, 0x14, 0xdf, 0xe0, 0x53, 0x81, 0xdf, 0xa0,
+	0x4a, 0xd3, 0xa4, 0x0a, 0xf6, 0xad, 0x60, 0xdf, 0x0a, 0xea, 0xc1, 0xd9,
+	0x66, 0xfb, 0x77, 0xaa, 0xee, 0xb0, 0xc6, 0x66, 0xd8, 0xa8, 0x6f, 0x86,
+	0xe7, 0xd2, 0xda, 0x9e, 0xc7, 0xc0, 0xbb, 0x47, 0xc1, 0xbb, 0x23, 0xa8,
+	0x83, 0xfe, 0x04, 0x75, 0xd0, 0x1f, 0xa2, 0x0e, 0x3a, 0x8c, 0x3a, 0x68,
+	0x0a, 0x75, 0xd0, 0x7d, 0xb0, 0xfd, 0x7b, 0x61, 0xfb, 0x93, 0xb0, 0xfd,
+	0x09, 0x79, 0xc7, 0x73, 0xc8, 0xdf, 0x78, 0xef, 0x11, 0xed, 0xc5, 0xed,
+	0x4d, 0x22, 0x88, 0xaf, 0x7c, 0x62, 0x9c, 0x1a, 0x2d, 0xae, 0x87, 0x5c,
+	0x79, 0x7f, 0x35, 0xed, 0x4e, 0x6a, 0x53, 0xc8, 0xb9, 0xef, 0x1f, 0xe1,
+	0x3a, 0x29, 0xa5, 0xee, 0x2b, 0x73, 0xf6, 0x73, 0x1e, 0xd2, 0x2e, 0xe4,
+	0x6d, 0x38, 0xb3, 0x7d, 0xa6, 0xa8, 0x47, 0x35, 0x4a, 0xdf, 0x5a, 0x8d,
+	0xb2, 0x3c, 0xcf, 0x35, 0xca, 0xab, 0x6b, 0x35, 0xca, 0xf2, 0x3c, 0xd7,
+	0x28, 0xaf, 0xac, 0xab, 0x51, 0xae, 0x3c, 0xfd, 0xf2, 0xba, 0x1a, 0xe5,
+	0xca, 0xd3, 0x2f, 0x85, 0x63, 0xa6, 0x03, 0x7e, 0xc9, 0x0d, 0x69, 0x35,
+	0x5d, 0x3c, 0x7b, 0xc3, 0x7c, 0xe1, 0xfb, 0xfd, 0xeb, 0xff, 0x1f, 0x3a,
+	0x6e, 0x9d, 0x1a, 0x39, 0xdf, 0xd8, 0xaa, 0xea, 0x9a, 0xf6, 0xf9, 0xde,
+	0xb6, 0xf9, 0x55, 0xf9, 0x6d, 0xb4, 0x5c, 0xdb, 0xfc, 0x3e, 0xbc, 0x27,
+	0xad, 0x0c, 0xdb, 0xf5, 0x02, 0x7d, 0x1c, 0xf0, 0xf7, 0x3d, 0x4f, 0x74,
+	0xc9, 0xef, 0x6a, 0x9e, 0xcc, 0x91, 0x61, 0xa3, 0xa3, 0x47, 0xb7, 0x2a,
+	0x3b, 0xe6, 0x7e, 0x5a, 0x53, 0xbe, 0xf9, 0x41, 0xe0, 0x01, 0xaf, 0x7d,
+	0x43, 0xde, 0xe1, 0xa8, 0xf3, 0xaa, 0xbb, 0x6c, 0x23, 0xbf, 0x8a, 0x38,
+	0x03, 0x59, 0x4b, 0xdc, 0x5c, 0xf3, 0x71, 0x9d, 0x18, 0xf9, 0xef, 0x08,
+	0xd7, 0xcf, 0xd2, 0x8a, 0xee, 0xdb, 0x50, 0xef, 0xf1, 0x9a, 0x68, 0xdc,
+	0x5e, 0x1f, 0x26, 0xc3, 0xfb, 0xac, 0x55, 0x95, 0x13, 0x49, 0x7c, 0x46,
+	0x88, 0xef, 0xbf, 0x02, 0xe5, 0x37, 0x18, 0xde, 0x6c, 0x83, 0x1f, 0x47,
+	0x9e, 0xc6, 0x77, 0x2b, 0x9c, 0x6f, 0x19, 0xf4, 0xee, 0x7c, 0x37, 0xbd,
+	0x73, 0x1c, 0xf9, 0xa6, 0x6b, 0x67, 0x5f, 0x46, 0xbd, 0x70, 0x8a, 0xf3,
+	0xe2, 0x51, 0xa6, 0x73, 0xc8, 0x9a, 0x25, 0xab, 0x5f, 0xe5, 0xd1, 0x47,
+	0xb4, 0x4f, 0xd2, 0x2d, 0xc2, 0x7d, 0x7e, 0xd6, 0xb6, 0x8f, 0xd5, 0xb6,
+	0x4f, 0x81, 0xed, 0xad, 0xf1, 0x75, 0x9c, 0xb9, 0xb2, 0xfd, 0x26, 0x33,
+	0x1d, 0xd6, 0x52, 0x8f, 0x8c, 0x6e, 0xa6, 0xfa, 0x80, 0x7d, 0xee, 0x15,
+	0xe4, 0xda, 0xe5, 0x51, 0xcc, 0xa5, 0x87, 0xf0, 0x8e, 0xe7, 0xed, 0x06,
+	0x09, 0xfb, 0x5c, 0x83, 0x90, 0x4c, 0x77, 0xd9, 0x15, 0xbe, 0x63, 0x4b,
+	0x0b, 0xee, 0x4b, 0xda, 0x1a, 0xa1, 0xfd, 0x66, 0x2e, 0xe2, 0xcc, 0x53,
+	0xa8, 0x99, 0x8e, 0xa8, 0xbb, 0xaf, 0x70, 0x9f, 0x5b, 0xb4, 0x8b, 0x32,
+	0xaf, 0xcd, 0x69, 0x95, 0xb4, 0x3a, 0xe3, 0x37, 0x60, 0xeb, 0xba, 0x60,
+	0xd8, 0x77, 0x81, 0x5b, 0xa3, 0xa5, 0xe3, 0xba, 0xbc, 0xeb, 0x2c, 0x8f,
+	0xb2, 0xac, 0xf9, 0x79, 0x3d, 0xde, 0x45, 0x67, 0xfa, 0xdb, 0xf0, 0x4c,
+	0x5f, 0x0a, 0x6b, 0xed, 0xe8, 0x4c, 0x09, 0x7a, 0x63, 0xde, 0x04, 0xec,
+	0x08, 0xf8, 0x51, 0xa2, 0x95, 0x96, 0xf5, 0x39, 0x78, 0x6a, 0x6d, 0xbc,
+	0x31, 0x36, 0xc8, 0x30, 0xaa, 0x59, 0xc0, 0x83, 0x89, 0x0c, 0xec, 0x70,
+	0xba, 0x3f, 0xba, 0x83, 0xd5, 0x1d, 0xa1, 0xa9, 0xda, 0x9b, 0xe7, 0x07,
+	0x61, 0x8b, 0x16, 0xec, 0x93, 0xf3, 0x9d, 0x12, 0xd7, 0x19, 0xe1, 0xf7,
+	0x4b, 0xdb, 0x9c, 0xa4, 0x2c, 0x6a, 0x15, 0x3e, 0x7f, 0x9e, 0x96, 0x5b,
+	0x11, 0x0d, 0x39, 0xd8, 0xe3, 0x18, 0x7e, 0x23, 0x78, 0xe7, 0xe2, 0xc7,
+	0x75, 0x4e, 0x81, 0x1e, 0x93, 0x79, 0x34, 0xf2, 0xe4, 0x61, 0xa6, 0xef,
+	0x00, 0xd6, 0xb3, 0x3e, 0xb3, 0x9e, 0x1e, 0x20, 0x6f, 0x80, 0x7d, 0x45,
+	0x06, 0xb8, 0x01, 0xe3, 0xbf, 0x0e, 0x5b, 0x1f, 0xc4, 0xd3, 0x36, 0xcb,
+	0xcc, 0x5b, 0x89, 0x3f, 0x08, 0xf4, 0x1c, 0x7f, 0x3b, 0x18, 0x0f, 0xc7,
+	0x43, 0xe6, 0xdd, 0xac, 0x7b, 0x99, 0x1d, 0x74, 0x6e, 0x31, 0x8a, 0x61,
+	0x33, 0xb0, 0x41, 0xbe, 0x53, 0x1d, 0x07, 0x5f, 0x78, 0xac, 0x85, 0xb1,
+	0x0c, 0xf3, 0xcb, 0x0b, 0x38, 0x77, 0x9e, 0x4e, 0xa1, 0x66, 0xa7, 0x01,
+	0x7e, 0x22, 0x57, 0xf5, 0xb7, 0x84, 0xfa, 0xbe, 0x1e, 0x5e, 0x77, 0xb8,
+	0x3f, 0x0e, 0xfa, 0x8c, 0x36, 0x78, 0x86, 0x51, 0xb5, 0xc5, 0x45, 0x42,
+	0x2c, 0xcd, 0x04, 0xb7, 0x8b, 0xfc, 0x7d, 0xf4, 0x80, 0x3c, 0x53, 0x9e,
+	0x0e, 0x2f, 0x06, 0x81, 0x97, 0x1b, 0xca, 0x2e, 0x93, 0x9d, 0x7d, 0x92,
+	0xf6, 0x98, 0xfb, 0x48, 0x97, 0xdf, 0xe0, 0x50, 0x13, 0xdf, 0xde, 0x91,
+	0x0f, 0x82, 0x93, 0xa0, 0xfd, 0x05, 0xb9, 0xcf, 0x7d, 0xa0, 0x1f, 0xbc,
+	0x92, 0xf5, 0x04, 0xd3, 0x0a, 0xde, 0xa4, 0x99, 0xde, 0x24, 0x1d, 0x6e,
+	0x9d, 0x0f, 0x65, 0xf3, 0x28, 0x79, 0xfe, 0xdb, 0x3a, 0xdf, 0x47, 0x97,
+	0x5b, 0x4f, 0x86, 0xb4, 0xe5, 0x41, 0x2f, 0xf6, 0x6f, 0xbd, 0x90, 0x66,
+	0xdf, 0xc0, 0x32, 0xf7, 0x90, 0xf1, 0x79, 0xa3, 0xcf, 0x40, 0x07, 0x3f,
+	0xcd, 0x0f, 0xa4, 0x68, 0xbd, 0x1f, 0x60, 0xb8, 0xd4, 0x75, 0x74, 0x85,
+	0xe9, 0x20, 0xe9, 0x3f, 0x85, 0xb3, 0x19, 0xf4, 0x30, 0x3e, 0x7d, 0x83,
+	0x2f, 0xa8, 0xc8, 0xe7, 0xaa, 0xce, 0xbe, 0x89, 0xe3, 0x14, 0xeb, 0x70,
+	0x0f, 0xfc, 0x1f, 0x74, 0x10, 0x76, 0x5c, 0x5c, 0xe4, 0x3b, 0x85, 0x61,
+	0xbe, 0x87, 0x3a, 0x53, 0x82, 0x6c, 0x97, 0xf8, 0xbb, 0x60, 0x5a, 0xe5,
+	0x82, 0xaa, 0x76, 0xb2, 0xd8, 0x17, 0x32, 0xaf, 0xa5, 0x9f, 0x2c, 0xc9,
+	0xef, 0x80, 0x29, 0xac, 0x09, 0xf0, 0x6c, 0xff, 0x9b, 0x88, 0x9f, 0x14,
+	0xd4, 0xdf, 0x44, 0x84, 0xdf, 0x64, 0x1b, 0x2a, 0x07, 0x78, 0xb8, 0x69,
+	0xd0, 0x54, 0x33, 0xfa, 0x1b, 0x09, 0x96, 0x83, 0x83, 0x3a, 0x3e, 0x8a,
+	0xfb, 0x81, 0x8c, 0x2f, 0xd5, 0x75, 0xb2, 0xfc, 0x66, 0x98, 0xcf, 0x70,
+	0xfe, 0xce, 0x3c, 0xc4, 0x78, 0x59, 0xc9, 0x6f, 0x49, 0xec, 0x84, 0xfc,
+	0xc0, 0x73, 0xdf, 0x80, 0x2d, 0x65, 0xc2, 0x98, 0x6c, 0x72, 0x7d, 0x18,
+	0xd6, 0xac, 0xdb, 0xa9, 0x3a, 0xc9, 0xef, 0x13, 0xf4, 0xfa, 0xfc, 0xa0,
+	0x7c, 0x5f, 0xa6, 0x44, 0xf8, 0x9e, 0xc7, 0x29, 0x2a, 0xcb, 0xf7, 0xf7,
+	0x86, 0xf8, 0x50, 0x63, 0xdd, 0x1b, 0x8d, 0x33, 0x90, 0xa3, 0x82, 0x9b,
+	0x46, 0x2c, 0x7b, 0x0c, 0x71, 0x6c, 0x1a, 0x7c, 0x2f, 0x4e, 0x54, 0x68,
+	0x87, 0xc3, 0x3a, 0x0e, 0x99, 0xa5, 0x58, 0xc7, 0x58, 0xbf, 0x18, 0xa6,
+	0x17, 0x79, 0x26, 0xce, 0x3b, 0x4a, 0x53, 0x7a, 0xfe, 0xfd, 0x83, 0xe5,
+	0x9a, 0x6d, 0x16, 0xe8, 0xa3, 0xc0, 0x33, 0x78, 0xbc, 0x7a, 0xf0, 0x61,
+	0x75, 0x4f, 0x2f, 0x44, 0xfe, 0xd2, 0xc1, 0xb2, 0xea, 0xe3, 0xcc, 0xef,
+	0x87, 0x7d, 0x86, 0xd3, 0xe5, 0xf7, 0xd3, 0x7f, 0xbf, 0xd5, 0xa0, 0x8b,
+	0xb7, 0x06, 0xc1, 0xfd, 0xfc, 0x0d, 0x27, 0xac, 0x41, 0xd5, 0x77, 0x71,
+	0x8e, 0x13, 0xa8, 0x37, 0x46, 0x2d, 0xad, 0x04, 0xdb, 0x3d, 0xe5, 0xa3,
+	0x5e, 0x11, 0xf6, 0xd8, 0xaa, 0x30, 0x11, 0x7f, 0xb9, 0x96, 0xff, 0xcd,
+	0x7e, 0xfe, 0x26, 0x3c, 0xe7, 0xf2, 0x9a, 0x6d, 0xea, 0xae, 0xea, 0xe6,
+	0xdb, 0xa4, 0xcf, 0x25, 0x0a, 0xe3, 0xd0, 0xcd, 0xed, 0xf6, 0xd1, 0x9e,
+	0x23, 0xb2, 0x5d, 0xd0, 0x94, 0x01, 0x7a, 0xaa, 0xb5, 0x28, 0xdf, 0xe2,
+	0xef, 0xfd, 0xab, 0x07, 0xbf, 0xdb, 0xbc, 0x74, 0x70, 0x16, 0xf2, 0xe1,
+	0x33, 0xcd, 0x36, 0x23, 0xfd, 0x8b, 0x72, 0x7e, 0xee, 0x23, 0xfe, 0xfb,
+	0x88, 0xff, 0x3e, 0xe2, 0xbf, 0x8f, 0xf8, 0xef, 0x23, 0xfe, 0xfb, 0x88,
+	0xff, 0xe0, 0xe1, 0x0f, 0xa0, 0x2f, 0xe7, 0xfd, 0x89, 0x30, 0xdf, 0x7a,
+	0x7c, 0x2d, 0xdf, 0x3a, 0xd7, 0xe2, 0x6f, 0x3f, 0x92, 0x96, 0x4a, 0x85,
+	0x54, 0xbe, 0x4a, 0x82, 0xf3, 0x9b, 0x28, 0x5f, 0xbd, 0xfe, 0x37, 0x0c,
+	0x05, 0xc7, 0xb9, 0x1a, 0xc3, 0x55, 0x34, 0xe1, 0x30, 0x9c, 0xca, 0xd7,
+	0xb8, 0x46, 0x5a, 0x0f, 0xc3, 0xdf, 0xc9, 0xd8, 0xb7, 0xa9, 0x6f, 0x34,
+	0xea, 0x7b, 0xd0, 0xe3, 0x5f, 0xf7, 0x10, 0x8b, 0xcb, 0x4d, 0x19, 0x8f,
+	0x31, 0x7e, 0x06, 0x63, 0x83, 0xf5, 0x8f, 0xdf, 0xdd, 0xc3, 0x75, 0x41,
+	0xb9, 0x89, 0xbc, 0x68, 0x39, 0xca, 0x85, 0x00, 0xe7, 0xbf, 0xa9, 0x95,
+	0xea, 0x2c, 0x67, 0x41, 0xb3, 0x69, 0x30, 0xc5, 0x69, 0xaf, 0x75, 0x5e,
+	0x96, 0xb5, 0x8e, 0xfa, 0x9b, 0x9e, 0x11, 0xd0, 0x16, 0xdd, 0xfd, 0x12,
+	0xe9, 0xf3, 0x69, 0xf9, 0x77, 0x00, 0x29, 0x67, 0x58, 0xfe, 0x3d, 0x42,
+	0x1f, 0xf6, 0x11, 0xf3, 0x3b, 0xdb, 0xee, 0x56, 0xa9, 0xa0, 0x7c, 0x76,
+	0xa7, 0xfa, 0x3b, 0x08, 0x91, 0x86, 0xed, 0xde, 0xb6, 0x0d, 0x67, 0x83,
+	0x5c, 0x5f, 0xdd, 0x2a, 0xf3, 0x67, 0xf8, 0xd1, 0x93, 0xc3, 0x7d, 0x03,
+	0xd4, 0xb3, 0x9d, 0x4e, 0x0d, 0x73, 0xad, 0xb5, 0x19, 0xf8, 0x78, 0xad,
+	0x9d, 0x2d, 0x88, 0xed, 0x74, 0x7a, 0x11, 0x7e, 0x76, 0xd1, 0x76, 0x59,
+	0x97, 0x97, 0x86, 0xd3, 0xf0, 0xcf, 0x63, 0x03, 0x1c, 0x9f, 0x97, 0x5b,
+	0xac, 0x2b, 0x7d, 0x80, 0x1f, 0x84, 0x5e, 0x6e, 0x82, 0x3d, 0x09, 0xec,
+	0x1f, 0xe1, 0xfe, 0xb9, 0xc4, 0xdd, 0xe7, 0xec, 0xd9, 0x26, 0x75, 0x43,
+	0xd8, 0xa6, 0x25, 0x40, 0xfb, 0x27, 0x6a, 0x44, 0x97, 0xf8, 0x6c, 0xb3,
+	0x7e, 0xfb, 0xb7, 0xba, 0x37, 0xb5, 0x72, 0x9d, 0xff, 0x0e, 0x61, 0x98,
+	0xf6, 0x41, 0xbf, 0x4c, 0xe7, 0x4d, 0xed, 0x81, 0xc6, 0xff, 0x14, 0x6e,
+	0x75, 0xb1, 0x71, 0x5c, 0x55, 0xf8, 0xdc, 0x59, 0xaf, 0xed, 0x38, 0x6b,
+	0x67, 0xe2, 0x6c, 0xec, 0xb5, 0x15, 0xc4, 0xce, 0x7a, 0x12, 0x4f, 0xb5,
+	0x8e, 0x3a, 0xb6, 0x12, 0xb4, 0x42, 0x96, 0x58, 0xed, 0x7a, 0x5d, 0x87,
+	0x92, 0xb2, 0x85, 0x50, 0x05, 0x09, 0x55, 0x96, 0x9d, 0xd2, 0x54, 0x80,
+	0x90, 0xfa, 0x80, 0x78, 0xcb, 0x6a, 0x6d, 0x87, 0xa4, 0xec, 0x76, 0x6d,
+	0x62, 0xd7, 0x2f, 0x3c, 0x2c, 0xeb, 0x75, 0x6a, 0xbb, 0x9b, 0xac, 0x42,
+	0xfb, 0x50, 0x9e, 0x62, 0x99, 0x92, 0xc2, 0x4b, 0x85, 0xc4, 0x03, 0x02,
+	0x54, 0xa9, 0x4a, 0xda, 0x34, 0x0f, 0x25, 0x11, 0xbc, 0x50, 0x0a, 0xd2,
+	0xf0, 0x7d, 0x77, 0x66, 0x1d, 0x27, 0x50, 0x61, 0x69, 0x35, 0x77, 0xee,
+	0xdc, 0x3b, 0x73, 0x7f, 0xce, 0xf9, 0xce, 0x77, 0xce, 0x3d, 0x66, 0x1b,
+	0x1b, 0x65, 0xfa, 0xd3, 0xab, 0x6a, 0xa6, 0xda, 0x2b, 0x0b, 0x90, 0xe3,
+	0xe2, 0x48, 0x38, 0x88, 0x97, 0x76, 0x05, 0xfa, 0x0c, 0xc7, 0xdf, 0xb7,
+	0x57, 0x9a, 0x57, 0x16, 0xcd, 0x4e, 0xcd, 0xab, 0x1e, 0x7d, 0x76, 0x0a,
+	0x63, 0x8a, 0x61, 0x1d, 0xba, 0xfb, 0x34, 0x36, 0x19, 0xbc, 0xef, 0x7f,
+	0xec, 0xbe, 0xef, 0xb1, 0xfb, 0xc3, 0xff, 0xa3, 0x3d, 0xcb, 0x8f, 0xcb,
+	0x03, 0xc7, 0x69, 0xa5, 0xf8, 0x95, 0x62, 0xc9, 0x36, 0x66, 0x4b, 0x56,
+	0x9a, 0xbc, 0x20, 0x2b, 0x9e, 0xca, 0xba, 0xed, 0xc0, 0xbb, 0x76, 0x99,
+	0x5f, 0x86, 0xcc, 0x63, 0x1e, 0x1d, 0x36, 0xcf, 0xb4, 0x13, 0x7d, 0xd4,
+	0x99, 0x4e, 0x6c, 0x83, 0x61, 0x0f, 0xc5, 0xd0, 0xce, 0x7b, 0xc9, 0x4d,
+	0x9a, 0xe7, 0x74, 0x1c, 0x86, 0x7c, 0xc6, 0x53, 0x45, 0x9d, 0x9f, 0xc1,
+	0x36, 0x6d, 0x72, 0xc7, 0xce, 0xf4, 0x06, 0xf9, 0x3e, 0xf0, 0x5b, 0xc7,
+	0xfa, 0xc8, 0x35, 0x5e, 0x74, 0x77, 0xeb, 0xcc, 0xdb, 0xc2, 0x3c, 0x2a,
+	0x08, 0xcd, 0xb3, 0x22, 0xd5, 0xba, 0xc8, 0xeb, 0xf8, 0xfd, 0xae, 0x1e,
+	0xf8, 0x0a, 0x8a, 0x3e, 0xf3, 0xb8, 0x6c, 0x55, 0xbe, 0x2c, 0x0d, 0xd8,
+	0x9f, 0x4d, 0xd7, 0xf3, 0xee, 0xb9, 0x71, 0xbd, 0xe6, 0x3f, 0x29, 0x29,
+	0x49, 0x8c, 0xd2, 0xbe, 0xb5, 0xcb, 0x4f, 0x97, 0xdb, 0x64, 0xdb, 0xb4,
+	0xcc, 0x7b, 0xc2, 0x5c, 0xb6, 0x98, 0x4c, 0x45, 0x43, 0x9a, 0xa3, 0xca,
+	0xb7, 0xc0, 0xa0, 0xf1, 0xec, 0xee, 0xf2, 0x33, 0x7d, 0x8c, 0x9d, 0x7c,
+	0xb4, 0xcc, 0x7b, 0x03, 0x57, 0x43, 0x76, 0xec, 0x10, 0xb8, 0x2c, 0x40,
+	0xc8, 0xe4, 0xba, 0x73, 0xbe, 0xcf, 0x71, 0x6c, 0xa8, 0xa3, 0x2f, 0xda,
+	0x2e, 0xc5, 0xa3, 0xc0, 0x44, 0x35, 0xa4, 0x73, 0x8a, 0x76, 0xa2, 0x1a,
+	0xa3, 0x43, 0x35, 0xe6, 0xc8, 0x99, 0xfb, 0x35, 0x5e, 0x67, 0xae, 0x7d,
+	0x5f, 0xcf, 0x05, 0xe5, 0x42, 0xcd, 0xa5, 0xac, 0x9a, 0xb2, 0x09, 0x5d,
+	0xdb, 0x68, 0x2e, 0xf5, 0x73, 0xaf, 0xb6, 0x9a, 0x3f, 0xe8, 0xf3, 0x7d,
+	0x2d, 0xd6, 0xfd, 0xb0, 0xcf, 0xaf, 0x8b, 0x07, 0xbe, 0x13, 0x7d, 0xac,
+	0x2a, 0xe6, 0xf6, 0xb2, 0x34, 0x57, 0x7f, 0x2c, 0x6f, 0x57, 0x7e, 0x24,
+	0xbf, 0x5a, 0x3d, 0x0b, 0xfe, 0x61, 0x55, 0x0b, 0xb0, 0x27, 0x37, 0x9a,
+	0x9e, 0x77, 0xc3, 0x3d, 0x03, 0x5f, 0xc1, 0xf3, 0xfe, 0xe0, 0x6e, 0x4b,
+	0x62, 0xec, 0x3b, 0x98, 0x73, 0x1e, 0x3a, 0x44, 0x2c, 0x9c, 0x82, 0xbc,
+	0x25, 0xfb, 0xa5, 0x2b, 0xa2, 0xe5, 0x64, 0x68, 0x2c, 0x8c, 0x39, 0x18,
+	0x01, 0x27, 0xe7, 0x5c, 0x46, 0xfa, 0x29, 0x33, 0x46, 0xf3, 0x15, 0x7c,
+	0x3f, 0x0c, 0xbd, 0xd8, 0x8f, 0x9f, 0x92, 0x7b, 0xa3, 0x18, 0xeb, 0x28,
+	0x65, 0x2f, 0x2c, 0x89, 0x27, 0x31, 0x8f, 0x7c, 0x9b, 0xdc, 0x2f, 0x5d,
+	0xe9, 0x63, 0x5c, 0xee, 0x7e, 0x89, 0x65, 0xe3, 0x4b, 0x3d, 0xe2, 0x49,
+	0x1b, 0x6c, 0xf9, 0xfc, 0x09, 0x9f, 0x37, 0xfd, 0x5a, 0x0d, 0xa3, 0xbd,
+	0x5d, 0x78, 0x47, 0x91, 0xe7, 0x15, 0xbc, 0x30, 0x78, 0x79, 0x0e, 0x7c,
+	0x28, 0xd3, 0xbc, 0x20, 0x3b, 0xa3, 0x11, 0xb4, 0x21, 0x5f, 0xd1, 0x58,
+	0x22, 0xd9, 0x12, 0x73, 0xb0, 0x98, 0x0f, 0x85, 0x31, 0x9e, 0x21, 0x6e,
+	0x70, 0x8c, 0xed, 0x3c, 0xb7, 0x0b, 0xea, 0x6c, 0xc8, 0x08, 0xeb, 0x28,
+	0xdf, 0x69, 0xcd, 0xa9, 0x60, 0x43, 0xf1, 0xbe, 0x11, 0xc9, 0xe8, 0x72,
+	0x0f, 0xde, 0x77, 0x41, 0xe7, 0x25, 0xfa, 0xef, 0x4c, 0xa1, 0x0d, 0x71,
+	0x26, 0x05, 0x2e, 0xf1, 0xa1, 0x9a, 0x00, 0xbd, 0x99, 0x29, 0xf5, 0xc9,
+	0x84, 0xb9, 0x6f, 0xcf, 0x1c, 0x0b, 0xda, 0x57, 0x30, 0x8c, 0x91, 0x60,
+	0x4c, 0x3d, 0x7b, 0xc6, 0xc4, 0xfe, 0xf8, 0xc1, 0xc7, 0xcd, 0x2c, 0x2f,
+	0x02, 0xa7, 0x16, 0x7f, 0x9b, 0x71, 0x9f, 0x97, 0x6c, 0xb4, 0x5d, 0xfb,
+	0x36, 0x35, 0xec, 0x4b, 0xb6, 0xc4, 0x78, 0xd4, 0xb7, 0x81, 0x43, 0xfb,
+	0x82, 0x3a, 0xb6, 0x15, 0x23, 0x83, 0xb5, 0x4f, 0x6b, 0x3d, 0x64, 0xdd,
+	0x17, 0x25, 0xb3, 0x98, 0x97, 0x49, 0xdd, 0x8f, 0x6b, 0x38, 0xa8, 0x79,
+	0x08, 0x75, 0x35, 0x71, 0x08, 0x6b, 0x99, 0x0c, 0x07, 0x6d, 0xf7, 0x91,
+	0xc9, 0xe3, 0xef, 0xd3, 0x40, 0x67, 0xf1, 0xec, 0x10, 0xf7, 0xa8, 0x5d,
+	0x12, 0xdf, 0x84, 0xbd, 0x2c, 0xb5, 0xea, 0x23, 0xf2, 0x49, 0xe9, 0xb3,
+	0x3e, 0x9e, 0x93, 0xfc, 0xb5, 0x64, 0xca, 0x47, 0x25, 0x7d, 0x7e, 0x3a,
+	0x1d, 0x12, 0xeb, 0xbc, 0xef, 0x67, 0x1f, 0x9d, 0x9e, 0x57, 0x7c, 0x7e,
+	0xf4, 0xfc, 0xba, 0xea, 0x44, 0xdb, 0x08, 0xda, 0x71, 0x1c, 0xa6, 0xe4,
+	0x4a, 0x7f, 0xf7, 0x66, 0x8e, 0x79, 0xde, 0xa4, 0xce, 0xe1, 0x4a, 0x9a,
+	0xf3, 0xaa, 0xc5, 0xcf, 0x1d, 0x29, 0x45, 0x3b, 0xf0, 0xad, 0xa4, 0xb9,
+	0xae, 0x8e, 0x62, 0x3c, 0x2c, 0x1f, 0xa2, 0x4e, 0xc4, 0xb6, 0x85, 0xef,
+	0xb7, 0xa6, 0xd6, 0x54, 0x32, 0x3e, 0xa4, 0xac, 0x74, 0x11, 0xbf, 0x36,
+	0xa5, 0xcf, 0x1e, 0x63, 0x71, 0x05, 0xdd, 0xc5, 0x9c, 0xec, 0xe3, 0x9e,
+	0x37, 0x65, 0xb3, 0x3e, 0x69, 0x46, 0x14, 0xe3, 0x26, 0x5d, 0xfa, 0x8c,
+	0xf2, 0xd2, 0xe1, 0xa4, 0x79, 0x5c, 0x1d, 0x0c, 0xee, 0x53, 0xc0, 0xcc,
+	0xdd, 0xf7, 0x9d, 0x5d, 0x53, 0xa6, 0x5c, 0x2e, 0x25, 0xe3, 0xb3, 0xca,
+	0xca, 0xe3, 0x9d, 0xf9, 0x09, 0x45, 0xdc, 0x48, 0x9a, 0x5d, 0x8a, 0xb1,
+	0xcd, 0x0e, 0x3d, 0xef, 0x29, 0xf4, 0x4f, 0xaa, 0xb6, 0x60, 0x3c, 0xdc,
+	0xaf, 0xcb, 0xfd, 0xbe, 0xce, 0x10, 0x73, 0x06, 0x8c, 0x99, 0x45, 0xe6,
+	0x83, 0xe9, 0x3c, 0x84, 0x74, 0x62, 0x8c, 0xf7, 0x86, 0x3c, 0x38, 0xf9,
+	0x0f, 0xd4, 0xa1, 0x5c, 0x65, 0x9d, 0x13, 0xe8, 0xdb, 0x31, 0xcd, 0x9f,
+	0x1f, 0x9c, 0x2c, 0xe8, 0xfc, 0xc4, 0x1d, 0x95, 0x08, 0xe6, 0xbd, 0xbb,
+	0x67, 0xf1, 0x8c, 0xfb, 0x05, 0xbe, 0x67, 0x31, 0x34, 0xde, 0x21, 0xcc,
+	0x07, 0xcd, 0x55, 0x5a, 0xb2, 0xc1, 0xd8, 0x00, 0xcf, 0xf7, 0x5b, 0x67,
+	0xe5, 0x17, 0xc4, 0x18, 0xeb, 0xdc, 0x23, 0x27, 0xe0, 0x9d, 0xe0, 0xab,
+	0x75, 0xbc, 0xa7, 0xb8, 0x2c, 0x05, 0xbf, 0xbf, 0x74, 0x32, 0xff, 0xb4,
+	0x58, 0xff, 0xbc, 0x77, 0xf8, 0x36, 0x30, 0x87, 0xfb, 0x07, 0x27, 0x29,
+	0x9f, 0x5c, 0x9b, 0xb8, 0x9a, 0xbc, 0xc2, 0xf1, 0x0c, 0x4a, 0x6e, 0x19,
+	0xdc, 0x08, 0xbf, 0xf9, 0x65, 0x7f, 0xdf, 0xd6, 0xc1, 0xb3, 0x73, 0x25,
+	0x53, 0xeb, 0xeb, 0xac, 0xcb, 0xb3, 0x0f, 0xe8, 0x8a, 0xce, 0x7b, 0x62,
+	0x5f, 0xe6, 0x0a, 0x1e, 0xa1, 0x7d, 0x74, 0x6a, 0x12, 0x45, 0x5b, 0x72,
+	0x56, 0xd6, 0x83, 0xbf, 0xc3, 0x66, 0x16, 0x5f, 0x8d, 0x08, 0x30, 0x39,
+	0x15, 0x0f, 0x1d, 0x90, 0x79, 0xd7, 0x95, 0x46, 0xf3, 0x84, 0x5c, 0x6b,
+	0x3a, 0xfa, 0x19, 0xed, 0xd9, 0xc2, 0x6b, 0xfa, 0x5c, 0x3a, 0xfe, 0xa1,
+	0xb2, 0x9c, 0xab, 0xf0, 0x6b, 0xbe, 0x7b, 0x8c, 0x79, 0xc2, 0xe1, 0x81,
+	0x87, 0x79, 0x70, 0xc0, 0x0e, 0x70, 0x8e, 0xb7, 0xc0, 0x39, 0xde, 0x04,
+	0xe7, 0xf8, 0x25, 0x38, 0xf6, 0x8d, 0xca, 0x54, 0x80, 0xff, 0xd3, 0xc0,
+	0x21, 0xda, 0x6a, 0xeb, 0x2c, 0xf6, 0x74, 0xba, 0x00, 0x19, 0xfc, 0x00,
+	0xfe, 0xc7, 0x56, 0x25, 0x23, 0x1b, 0xab, 0x93, 0xb2, 0xb9, 0xea, 0xe7,
+	0x1c, 0xbf, 0xcb, 0x3c, 0xad, 0x51, 0xee, 0x93, 0x03, 0x1c, 0xda, 0x27,
+	0x89, 0xe3, 0xc4, 0x8f, 0x4e, 0x59, 0x2b, 0xaf, 0x69, 0x1c, 0x5a, 0x2b,
+	0xb3, 0x1c, 0x12, 0x9d, 0xf3, 0x75, 0x66, 0x5b, 0x6a, 0xee, 0x16, 0xea,
+	0xbb, 0x99, 0xdb, 0x15, 0xc4, 0xd6, 0x89, 0x97, 0x7f, 0x0e, 0xf6, 0x5e,
+	0xe9, 0x5c, 0xb8, 0x19, 0xf3, 0x00, 0xda, 0xb5, 0xb0, 0x6b, 0xc8, 0x3f,
+	0x27, 0x57, 0x7f, 0x41, 0x1b, 0x7c, 0x03, 0x9c, 0xf1, 0x2a, 0x6c, 0xc8,
+	0x8e, 0x73, 0x40, 0x73, 0xbf, 0x1d, 0xe7, 0x88, 0xce, 0xad, 0xe5, 0x7b,
+	0x8a, 0x65, 0x5b, 0xe6, 0xca, 0x56, 0xbc, 0x00, 0xf9, 0xbb, 0x06, 0xbf,
+	0x6d, 0x03, 0x7b, 0xb0, 0x89, 0xb5, 0xd8, 0x6a, 0xd2, 0xce, 0xbf, 0xaf,
+	0xb1, 0x77, 0xad, 0xf9, 0x27, 0xbc, 0xc7, 0x3a, 0x9b, 0x96, 0x3f, 0xf6,
+	0x13, 0x03, 0x99, 0x8f, 0x97, 0xd5, 0xfd, 0xfd, 0x7e, 0x1b, 0x68, 0xbb,
+	0xd9, 0x24, 0x1e, 0x8b, 0x5c, 0x2c, 0xd9, 0xb0, 0x25, 0x17, 0x63, 0xe4,
+	0x00, 0x55, 0xd5, 0xea, 0xe7, 0x05, 0x63, 0xf6, 0xbc, 0xfd, 0x36, 0xc7,
+	0xe5, 0x04, 0xb8, 0x4d, 0xdb, 0xbf, 0xad, 0xb9, 0x4d, 0xa9, 0xf2, 0xbc,
+	0x5c, 0x5f, 0x4d, 0x05, 0x1c, 0x27, 0x2f, 0x6f, 0x80, 0xe3, 0x35, 0x2b,
+	0xad, 0x1c, 0xed, 0x71, 0xac, 0x53, 0x45, 0xcd, 0x2d, 0x75, 0xc9, 0xa5,
+	0x95, 0xa2, 0xba, 0xbc, 0x52, 0x52, 0xaf, 0x2c, 0x95, 0x55, 0x71, 0xc9,
+	0xf3, 0xfe, 0xe9, 0xce, 0xc8, 0xdb, 0xab, 0x9e, 0x9c, 0x76, 0x8d, 0x81,
+	0x90, 0xb4, 0xf2, 0xdf, 0x3c, 0xaf, 0x13, 0xd8, 0xbc, 0x75, 0xd8, 0xf3,
+	0x9e, 0x18, 0x1d, 0x15, 0xe7, 0x30, 0x39, 0xca, 0x70, 0x8c, 0x39, 0xac,
+	0xc4, 0x9c, 0x8c, 0x6d, 0x9f, 0xaf, 0x29, 0x05, 0x7c, 0x3b, 0xe0, 0xf3,
+	0x97, 0x27, 0xbb, 0x83, 0x33, 0x8f, 0xb3, 0x2f, 0x31, 0x26, 0x1c, 0xfb,
+	0xaf, 0x98, 0xb0, 0x29, 0xe7, 0xca, 0x58, 0x88, 0xae, 0xa8, 0x7c, 0xaf,
+	0x1c, 0x79, 0xac, 0x6c, 0xe2, 0xea, 0x18, 0xc5, 0xf2, 0x7d, 0x6f, 0x48,
+	0xc7, 0xfe, 0xc1, 0x49, 0x4c, 0xcf, 0x9b, 0x75, 0xf9, 0xbd, 0x03, 0x8c,
+	0xc9, 0x98, 0xdd, 0xb0, 0xff, 0xa7, 0xb5, 0x7d, 0xae, 0xaa, 0x8c, 0x4d,
+	0xfd, 0x8e, 0xca, 0x44, 0x19, 0x36, 0x5e, 0x31, 0x2f, 0x94, 0x5c, 0xc1,
+	0x8a, 0xcd, 0x02, 0x3b, 0x66, 0x80, 0x37, 0x4f, 0xeb, 0xb3, 0xd1, 0x43,
+	0x1a, 0x7b, 0xe6, 0x58, 0xce, 0x4b, 0xba, 0xe6, 0xf6, 0xea, 0xf5, 0xbb,
+	0x7d, 0xad, 0x18, 0xf3, 0xf7, 0x1c, 0x7a, 0x9c, 0xe7, 0xf9, 0x40, 0xaf,
+	0x64, 0xd7, 0xcf, 0x40, 0x27, 0x62, 0x58, 0xdb, 0xb0, 0xd6, 0x87, 0x1d,
+	0xd8, 0xef, 0x1d, 0x27, 0x1c, 0x60, 0x6a, 0x27, 0xee, 0xd9, 0x6e, 0x12,
+	0xfd, 0x3a, 0x24, 0xb3, 0xd4, 0xae, 0x71, 0xf5, 0xd1, 0xba, 0x34, 0x78,
+	0x48, 0x0e, 0xe5, 0x10, 0xea, 0xe2, 0x41, 0x99, 0xdc, 0x6b, 0x1a, 0xe5,
+	0x36, 0x5c, 0xd9, 0xe6, 0x28, 0x78, 0x05, 0xae, 0xbf, 0xc0, 0xfb, 0x46,
+	0x31, 0xe6, 0xbc, 0x29, 0xef, 0x9d, 0xa4, 0x2d, 0x71, 0x0c, 0xe6, 0x1a,
+	0xcf, 0xda, 0xb8, 0x36, 0xca, 0x2a, 0xbb, 0xc8, 0x32, 0xae, 0x55, 0xff,
+	0xf9, 0x23, 0x98, 0x84, 0x3e, 0x99, 0x15, 0x1f, 0x93, 0xde, 0xdb, 0xc5,
+	0x24, 0xd6, 0x75, 0xc8, 0xc4, 0x52, 0x5c, 0x9d, 0xba, 0x62, 0x42, 0xde,
+	0xba, 0x24, 0xbb, 0x12, 0xd5, 0x7c, 0xb4, 0x06, 0x59, 0x5c, 0x87, 0x5c,
+	0xad, 0x41, 0xa6, 0x32, 0x65, 0x2b, 0x35, 0xad, 0xe2, 0x3a, 0x2e, 0x30,
+	0x05, 0x79, 0x0d, 0xbf, 0x4a, 0x2e, 0x4a, 0xfd, 0x75, 0xd0, 0x46, 0x68,
+	0x47, 0xd3, 0x61, 0x65, 0x43, 0x0e, 0x21, 0x97, 0x65, 0x5f, 0x7f, 0xdf,
+	0x51, 0x1a, 0x57, 0x53, 0x77, 0x24, 0xe9, 0xdc, 0x11, 0xcb, 0xdd, 0xc1,
+	0xef, 0x37, 0xe2, 0xca, 0x55, 0xe8, 0xfb, 0xeb, 0xf8, 0x4e, 0xf8, 0x35,
+	0x43, 0x8e, 0x0d, 0x6b, 0x9d, 0x4e, 0x49, 0xc8, 0x72, 0x36, 0xc5, 0xd7,
+	0xf1, 0x75, 0xad, 0xe3, 0x90, 0x37, 0x60, 0x90, 0xaf, 0xd3, 0xe9, 0x40,
+	0x46, 0xbf, 0x01, 0xfd, 0xb5, 0xe0, 0x95, 0xc5, 0x65, 0x1e, 0xfa, 0x7f,
+	0x15, 0xcf, 0x6f, 0x36, 0x3f, 0x56, 0x73, 0x8b, 0x2a, 0xc8, 0x3f, 0x79,
+	0x0e, 0x3c, 0xf9, 0xf7, 0x58, 0xbb, 0x1e, 0xcd, 0xdd, 0x13, 0xa3, 0x3c,
+	0x07, 0xfb, 0xb7, 0xba, 0x64, 0x1f, 0x97, 0xdb, 0x23, 0x27, 0x50, 0xee,
+	0xc6, 0xd5, 0xc0, 0x3a, 0x44, 0xf4, 0xf9, 0xf5, 0x5a, 0x69, 0xc4, 0x28,
+	0xea, 0x33, 0xe6, 0x31, 0xf4, 0x25, 0x96, 0x1d, 0xc6, 0x73, 0xc6, 0x65,
+	0x38, 0x37, 0x70, 0x26, 0x15, 0xd3, 0x39, 0xa1, 0x35, 0x70, 0x89, 0x75,
+	0xbc, 0xef, 0x16, 0xe3, 0x7a, 0x0d, 0xe8, 0xf0, 0xc8, 0x67, 0x5e, 0x3a,
+	0xca, 0xbc, 0xf3, 0xf7, 0x63, 0xbe, 0xfd, 0xfb, 0xc4, 0xbb, 0x6d, 0xcf,
+	0xa5, 0x0c, 0xdc, 0x7c, 0x60, 0x02, 0xef, 0xc8, 0xdb, 0x61, 0x8b, 0xaa,
+	0x5a, 0x7e, 0xd9, 0xce, 0xef, 0x5b, 0x6c, 0x24, 0xcd, 0x77, 0xc5, 0xef,
+	0x3b, 0x6f, 0xd3, 0xee, 0x74, 0x00, 0x5f, 0xe2, 0x9a, 0x57, 0xde, 0xb2,
+	0x0b, 0x40, 0x05, 0x2b, 0x3e, 0x05, 0x19, 0x6d, 0x17, 0xcb, 0xc9, 0xc9,
+	0xc3, 0xef, 0xce, 0xea, 0xbe, 0x6c, 0xdb, 0xea, 0xdb, 0xfa, 0x2e, 0xc7,
+	0xcf, 0xb9, 0x70, 0x0e, 0xf0, 0x6d, 0x4c, 0x53, 0xcb, 0xe8, 0x4e, 0xc3,
+	0x18, 0xf0, 0x65, 0xb4, 0x35, 0x8f, 0xe8, 0xff, 0x99, 0x07, 0xe5, 0x64,
+	0xc4, 0xf0, 0xcf, 0xdb, 0x71, 0x6d, 0x70, 0x3d, 0x3f, 0x06, 0xbf, 0xdf,
+	0x2b, 0x3f, 0xad, 0x38, 0xa3, 0x2f, 0x3f, 0x4f, 0xec, 0xca, 0x0f, 0x7d,
+	0xd4, 0x2e, 0xc9, 0xad, 0xd8, 0x32, 0x59, 0xd6, 0xfb, 0x0d, 0xae, 0xc9,
+	0xf8, 0xd1, 0x09, 0xc8, 0x0d, 0x65, 0x9d, 0xba, 0x65, 0x4a, 0x15, 0x72,
+	0x54, 0x05, 0x3e, 0x55, 0x21, 0x53, 0xe4, 0x40, 0x55, 0xe0, 0x5b, 0xb5,
+	0x69, 0x39, 0x75, 0xcc, 0x99, 0x36, 0x7b, 0x1d, 0x72, 0x74, 0xb5, 0xc9,
+	0xfd, 0xd7, 0x63, 0x36, 0x69, 0x07, 0x6f, 0xee, 0xee, 0xfd, 0xa7, 0xd8,
+	0xfb, 0x23, 0x72, 0x0d, 0x7e, 0xcb, 0xf5, 0xca, 0x08, 0x30, 0x49, 0x80,
+	0x51, 0x2e, 0x64, 0x23, 0x25, 0x1b, 0x95, 0x71, 0xd9, 0x84, 0x7d, 0xda,
+	0x5a, 0x4d, 0x80, 0x4f, 0x03, 0x47, 0xaf, 0x1c, 0x93, 0x37, 0x56, 0x95,
+	0xcc, 0xd8, 0xb0, 0x33, 0x6b, 0x8c, 0xc1, 0x43, 0x9e, 0xab, 0x5d, 0xfa,
+	0xbc, 0x7d, 0xa2, 0xee, 0xc7, 0xe2, 0x73, 0xf5, 0x1e, 0x99, 0xac, 0x9b,
+	0xf2, 0x54, 0xbd, 0x57, 0xbe, 0x5a, 0x8f, 0xca, 0xe9, 0x46, 0x4c, 0xbe,
+	0x56, 0x1f, 0x94, 0xa7, 0xeb, 0x47, 0xe4, 0x99, 0x46, 0x5c, 0xbe, 0x0e,
+	0xbf, 0x30, 0xdf, 0x70, 0x64, 0xaa, 0x31, 0x22, 0xa7, 0x1a, 0x8c, 0xb1,
+	0xe3, 0x7b, 0xf8, 0x65, 0x77, 0x63, 0x17, 0x1c, 0x57, 0x27, 0xc6, 0xe5,
+	0xa8, 0x9c, 0x3e, 0x6f, 0x94, 0xbc, 0x1f, 0xff, 0x10, 0x79, 0x01, 0x7d,
+	0x17, 0xae, 0x28, 0xa9, 0xe9, 0xef, 0xb7, 0xfe, 0x47, 0x24, 0xa2, 0x7d,
+	0xa3, 0x17, 0xaa, 0x83, 0x68, 0x63, 0xd3, 0x27, 0x09, 0xe2, 0x20, 0xad,
+	0xf8, 0x7f, 0xcb, 0xf7, 0x32, 0x74, 0x0c, 0xfb, 0x26, 0x7d, 0x2f, 0xbd,
+	0xf6, 0xc4, 0x0f, 0xfa, 0x39, 0xf4, 0xb5, 0xf6, 0x9e, 0x51, 0xb4, 0xbe,
+	0xbb, 0x90, 0x7f, 0xf4, 0x7f, 0x51, 0xfc, 0xb3, 0xa6, 0x73, 0x8d, 0x41,
+	0xfe, 0x4f, 0x0a, 0xc6, 0xf2, 0xf9, 0xf9, 0xdd, 0x93, 0x95, 0x09, 0xf5,
+	0x54, 0x85, 0x8c, 0xc6, 0x93, 0x85, 0xdd, 0x3c, 0xba, 0xaf, 0xc8, 0x9a,
+	0x1b, 0xd1, 0x63, 0xf0, 0xe3, 0xf6, 0x69, 0x9d, 0x53, 0x37, 0x31, 0x4c,
+	0xf9, 0xe3, 0x19, 0x5a, 0x4f, 0x70, 0xb6, 0x00, 0x6e, 0xeb, 0x9a, 0x72,
+	0xb1, 0xee, 0xc7, 0xaf, 0xe6, 0xb4, 0xbc, 0x5c, 0x87, 0xcc, 0xf1, 0xfc,
+	0xc1, 0xbf, 0x16, 0xaa, 0x7e, 0xdf, 0xec, 0xb0, 0x43, 0x7f, 0x1c, 0xf3,
+	0x35, 0x7a, 0xf9, 0x2d, 0xfe, 0x4f, 0x0e, 0xca, 0xc1, 0x78, 0x99, 0x0f,
+	0x6c, 0x6b, 0x59, 0xf4, 0xcf, 0x67, 0x1d, 0x79, 0x11, 0x7b, 0x51, 0x33,
+	0x39, 0xfe, 0x4e, 0xa9, 0x39, 0xf4, 0x6d, 0x89, 0xdf, 0xc3, 0x52, 0xc5,
+	0x77, 0x6a, 0x4e, 0x2b, 0x36, 0xe6, 0xe3, 0x6c, 0xcd, 0x7c, 0xf8, 0xdd,
+	0xe9, 0xea, 0x41, 0xdc, 0xa3, 0xce, 0x01, 0x67, 0x3a, 0xc3, 0xfb, 0x05,
+	0x94, 0x19, 0x1b, 0x99, 0xc3, 0x35, 0x16, 0xd4, 0xfd, 0x7c, 0x40, 0x73,
+	0xf5, 0xf1, 0x87, 0xfd, 0x66, 0xaa, 0x56, 0x21, 0x13, 0xba, 0xab, 0x8c,
+	0x9f, 0xad, 0x0f, 0x10, 0x73, 0x0f, 0xda, 0xfc, 0x45, 0xe4, 0x6f, 0xa6,
+	0x8e, 0x29, 0x04, 0xcf, 0xf6, 0xc9, 0xb3, 0x26, 0x73, 0xcd, 0xd3, 0x6a,
+	0xa2, 0xf2, 0x72, 0x90, 0x57, 0x7b, 0x57, 0x1d, 0xac, 0x35, 0x07, 0xfc,
+	0xbc, 0x74, 0xbe, 0x7b, 0x6f, 0x2e, 0xfa, 0x5e, 0x39, 0x61, 0x4e, 0x7a,
+	0x07, 0x78, 0xab, 0x36, 0x62, 0xd0, 0x41, 0xe0, 0x9d, 0xdd, 0xa6, 0xf5,
+	0xb1, 0xd8, 0xf8, 0x97, 0xb7, 0xad, 0xf5, 0xb9, 0x15, 0x63, 0xb8, 0x35,
+	0x40, 0xdf, 0x96, 0xb8, 0x71, 0xd1, 0x8f, 0x1b, 0x69, 0x1f, 0x1a, 0x58,
+	0x81, 0x3a, 0xea, 0x2a, 0xf4, 0x64, 0xb7, 0x2d, 0xff, 0xfe, 0x03, 0x7d,
+	0xe7, 0x95, 0xf0, 0x2c, 0x67, 0x00, 0x00, 0x00 };
 
 static u32 bnx2_RXP_b06FwData[(0x0/4) + 1] = { 0x0 };
-static u32 bnx2_RXP_b06FwRodata[(0x28/4) + 1] = {
-	0x0800468c, 0x0800458c, 0x08004630, 0x08004648, 0x08004660, 0x08004680,
-	0x0800468c, 0x0800468c, 0x08004594, 0x00000000, 0x00000000 };
-static u32 bnx2_RXP_b06FwBss[(0x13a4/4) + 1] = { 0x0 };
-static u32 bnx2_RXP_b06FwSbss[(0x1c/4) + 1] = { 0x0 };
+static u32 bnx2_RXP_b06FwRodata[(0x278/4) + 1] = {
+	0x08003fdc, 0x08003edc, 0x08003f80, 0x08003f98, 0x08003fb0, 0x08003fd0,
+	0x08003fdc, 0x08003fdc, 0x08003ee4, 0x00000000, 0x08004a04, 0x08004a3c,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004a74, 0x08004c38,
+	0x08004b80, 0x08004bb8, 0x08004c38, 0x08004b08, 0x08004c38, 0x08004c38,
+	0x08004bb8, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004bf8,
+	0x08004c38, 0x08004bf8, 0x08004b80, 0x08004c38, 0x08004c38, 0x08004bf8,
+	0x08004bf8, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38, 0x08004c38,
+	0x08004ae4, 0x00000000, 0x08006018, 0x08006030, 0x08006030, 0x08006030,
+	0x08006018, 0x08006030, 0x08006030, 0x08006030, 0x08006018, 0x08006030,
+	0x08006030, 0x08006030, 0x08006018, 0x08006030, 0x08006030, 0x08006030,
+	0x08006024, 0x00000000, 0x00000000 };
+
+static u32 bnx2_RXP_b06FwBss[(0x13dc/4) + 1] = { 0x0 };
+static u32 bnx2_RXP_b06FwSbss[(0x2c/4) + 1] = { 0x0 };
+
+static struct fw_info bnx2_rxp_fw_06 = {
+	.ver_major			= 0x2,
+	.ver_minor			= 0x8,
+	.ver_fix			= 0x17,
+
+	.start_addr			= 0x08003184,
+
+	.text_addr			= 0x08000000,
+	.text_len			= 0x6728,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_RXP_b06FwText,
+	.gz_text_len			= sizeof(bnx2_RXP_b06FwText),
+
+	.data_addr			= 0x080069c0,
+	.data_len			= 0x0,
+	.data_index			= 0x0,
+	.data				= bnx2_RXP_b06FwData,
+
+	.sbss_addr			= 0x080069c0,
+	.sbss_len			= 0x2c,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_RXP_b06FwSbss,
+
+	.bss_addr			= 0x080069f0,
+	.bss_len			= 0x13dc,
+	.bss_index			= 0x0,
+	.bss				= bnx2_RXP_b06FwBss,
+
+	.rodata_addr			= 0x08006728,
+	.rodata_len			= 0x278,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_RXP_b06FwRodata,
+};
 
 static u8 bnx2_rv2p_proc1[] = {
 	0x1f, 0x8b, 0x08, 0x08, 0x5e, 0xd0, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65,
@@ -1316,20 +1665,6 @@
 	0x63, 0xd6, 0x11, 0x8f, 0x47, 0xd5, 0x5f, 0x3f, 0x97, 0x8f, 0x31, 0xd8,
 	0x17, 0x00, 0x00, 0x00 };
 
-static const int bnx2_TPAT_b06FwReleaseMajor = 0x1;
-static const int bnx2_TPAT_b06FwReleaseMinor = 0x0;
-static const int bnx2_TPAT_b06FwReleaseFix = 0x0;
-static const u32 bnx2_TPAT_b06FwStartAddr = 0x08000860;
-static const u32 bnx2_TPAT_b06FwTextAddr = 0x08000800;
-static const int bnx2_TPAT_b06FwTextLen = 0x122c;
-static const u32 bnx2_TPAT_b06FwDataAddr = 0x08001a60;
-static const int bnx2_TPAT_b06FwDataLen = 0x0;
-static const u32 bnx2_TPAT_b06FwRodataAddr = 0x00000000;
-static const int bnx2_TPAT_b06FwRodataLen = 0x0;
-static const u32 bnx2_TPAT_b06FwBssAddr = 0x08001aa0;
-static const int bnx2_TPAT_b06FwBssLen = 0x250;
-static const u32 bnx2_TPAT_b06FwSbssAddr = 0x08001a60;
-static const int bnx2_TPAT_b06FwSbssLen = 0x34;
 static u8 bnx2_TPAT_b06FwText[] = {
 	0x1f, 0x8b, 0x08, 0x08, 0x47, 0xd2, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65,
 	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xc5, 0x57, 0x4d, 0x68,
@@ -1529,20 +1864,40 @@
 static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 };
 static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 };
 
-static const int bnx2_TXP_b06FwReleaseMajor = 0x1;
-static const int bnx2_TXP_b06FwReleaseMinor = 0x0;
-static const int bnx2_TXP_b06FwReleaseFix = 0x0;
-static const u32 bnx2_TXP_b06FwStartAddr = 0x080034b0;
-static const u32 bnx2_TXP_b06FwTextAddr = 0x08000000;
-static const int bnx2_TXP_b06FwTextLen = 0x5748;
-static const u32 bnx2_TXP_b06FwDataAddr = 0x08005760;
-static const int bnx2_TXP_b06FwDataLen = 0x0;
-static const u32 bnx2_TXP_b06FwRodataAddr = 0x00000000;
-static const int bnx2_TXP_b06FwRodataLen = 0x0;
-static const u32 bnx2_TXP_b06FwBssAddr = 0x080057a0;
-static const int bnx2_TXP_b06FwBssLen = 0x1c4;
-static const u32 bnx2_TXP_b06FwSbssAddr = 0x08005760;
-static const int bnx2_TXP_b06FwSbssLen = 0x38;
+static struct fw_info bnx2_tpat_fw_06 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x08000860,
+
+	.text_addr			= 0x08000800,
+	.text_len			= 0x122c,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_TPAT_b06FwText,
+	.gz_text_len			= sizeof(bnx2_TPAT_b06FwText),
+
+	.data_addr			= 0x08001a60,
+	.data_len			= 0x0,
+	.data_index			= 0x0,
+	.data				= bnx2_TPAT_b06FwData,
+
+	.sbss_addr			= 0x08001a60,
+	.sbss_len			= 0x34,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_TPAT_b06FwSbss,
+
+	.bss_addr			= 0x08001aa0,
+	.bss_len			= 0x250,
+	.bss_index			= 0x0,
+	.bss				= bnx2_TPAT_b06FwBss,
+
+	.rodata_addr			= 0x00000000,
+	.rodata_len			= 0x0,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_TPAT_b06FwRodata,
+};
+
 static u8 bnx2_TXP_b06FwText[] = {
 	0x1f, 0x8b, 0x08, 0x08, 0x21, 0xd3, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65,
 	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xed, 0x5c, 0x6d, 0x6c,
@@ -1964,3 +2319,38 @@
 static u32 bnx2_TXP_b06FwRodata[(0x0/4) + 1] = { 0x0 };
 static u32 bnx2_TXP_b06FwBss[(0x1c4/4) + 1] = { 0x0 };
 static u32 bnx2_TXP_b06FwSbss[(0x38/4) + 1] = { 0x0 };
+
+static struct fw_info bnx2_txp_fw_06 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x080034b0,
+
+	.text_addr			= 0x08000000,
+	.text_len			= 0x5748,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_TXP_b06FwText,
+	.gz_text_len			= sizeof(bnx2_TXP_b06FwText),
+
+	.data_addr			= 0x08005760,
+	.data_len			= 0x0,
+	.data_index			= 0x0,
+	.data				= bnx2_TXP_b06FwData,
+
+	.sbss_addr			= 0x08005760,
+	.sbss_len			= 0x38,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_TXP_b06FwSbss,
+
+	.bss_addr			= 0x080057a0,
+	.bss_len			= 0x1c4,
+	.bss_index			= 0x0,
+	.bss				= bnx2_TXP_b06FwBss,
+
+	.rodata_addr			= 0x00000000,
+	.rodata_len			= 0x0,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_TXP_b06FwRodata,
+};
+
diff --git a/drivers/net/bnx2_fw2.h b/drivers/net/bnx2_fw2.h
new file mode 100644
index 0000000..680c769
--- /dev/null
+++ b/drivers/net/bnx2_fw2.h
@@ -0,0 +1,4086 @@
+/* bnx2_fw2.h: Broadcom NX2 network driver.
+ *
+ * Copyright (c) 2006 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, except as noted below.
+ *
+ * This file contains firmware data derived from proprietary unpublished
+ * source code, Copyright (c) 2006 Broadcom Corporation.
+ *
+ * Permission is hereby granted for the distribution of this firmware data
+ * in hexadecimal or equivalent format, provided this copyright notice is
+ * accompanying it.
+ */
+
+static u8 bnx2_COM_b09FwText[] = {
+	0x1f, 0x8b, 0x08, 0x08, 0xac, 0xfb, 0x2f, 0x45, 0x00, 0x03, 0x74, 0x65,
+	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xdc, 0x5b, 0x6b, 0x70,
+	0x1b, 0xd7, 0x75, 0x3e, 0xfb, 0x00, 0x09, 0x91, 0x10, 0xb5, 0xa4, 0x60,
+	0x1a, 0x96, 0x68, 0x07, 0x20, 0x57, 0x22, 0x6a, 0xb1, 0x29, 0x4c, 0x33,
+	0x16, 0x9b, 0xc2, 0x12, 0x02, 0x50, 0xae, 0x26, 0xc3, 0x3a, 0x94, 0xcd,
+	0xd8, 0x4a, 0xaa, 0xc9, 0x30, 0x00, 0xa5, 0xf4, 0x61, 0xb7, 0x92, 0xab,
+	0xa9, 0x5d, 0xd7, 0xaa, 0x21, 0x92, 0x6a, 0xf5, 0x83, 0xe5, 0x2a, 0x16,
+	0x43, 0xa9, 0xd3, 0x74, 0xc2, 0x12, 0x56, 0xac, 0x4e, 0x31, 0x85, 0xfc,
+	0xd6, 0x38, 0xb1, 0xc9, 0x4a, 0x76, 0xeb, 0xf4, 0xe1, 0xa6, 0x33, 0xcd,
+	0xa3, 0x9d, 0x36, 0xf6, 0xa8, 0x3f, 0xea, 0xe9, 0xd3, 0x33, 0x6e, 0xa7,
+	0xea, 0xd8, 0x0e, 0xfa, 0x7d, 0x77, 0x77, 0x81, 0x25, 0x48, 0xbd, 0xfc,
+	0xc8, 0x8f, 0x70, 0x06, 0xb3, 0x7b, 0xef, 0xde, 0xbd, 0xf7, 0xdc, 0xf3,
+	0xf8, 0xce, 0x63, 0x2f, 0xfb, 0x44, 0x5a, 0xc4, 0xfb, 0x5b, 0x8b, 0x5f,
+	0xfc, 0xfe, 0x5f, 0x2d, 0x7c, 0x7c, 0xf0, 0xe3, 0xfd, 0x22, 0xb7, 0xdc,
+	0xa2, 0xb7, 0x86, 0x75, 0xf6, 0x1b, 0xf8, 0x45, 0xf1, 0xeb, 0xf7, 0xee,
+	0x57, 0xfb, 0xb3, 0xf0, 0x7b, 0x13, 0x0f, 0xc7, 0xfe, 0x55, 0x44, 0xbb,
+	0xc4, 0x98, 0xe0, 0x5f, 0xb5, 0x7a, 0xf9, 0xe7, 0x5c, 0x38, 0x7e, 0x89,
+	0x67, 0x86, 0xbb, 0x9c, 0xa2, 0x97, 0x3f, 0x09, 0xeb, 0x69, 0x39, 0x94,
+	0xb5, 0x25, 0x6c, 0xa4, 0xdf, 0x3c, 0x54, 0xb0, 0x45, 0x32, 0xe5, 0x2d,
+	0xf1, 0x9c, 0xbc, 0x57, 0x2d, 0x46, 0x4d, 0x61, 0xff, 0x8d, 0xe9, 0x77,
+	0xbf, 0xf6, 0xe2, 0xd6, 0xc4, 0x5b, 0xf3, 0x86, 0x84, 0xad, 0xf4, 0x19,
+	0xb1, 0x36, 0x4b, 0xb8, 0x0b, 0xef, 0x7c, 0xb5, 0xf7, 0x3d, 0x91, 0x36,
+	0x7f, 0xae, 0x37, 0xab, 0x2f, 0xf6, 0x4a, 0x71, 0x43, 0x3a, 0x2c, 0x7a,
+	0x7a, 0xd3, 0xf7, 0xb3, 0x86, 0x35, 0x66, 0xa4, 0x2d, 0x59, 0xac, 0xc8,
+	0xc8, 0xde, 0x69, 0x09, 0x87, 0xd3, 0x47, 0x9b, 0x9b, 0x37, 0x49, 0xd8,
+	0x4c, 0x8f, 0x1d, 0xfa, 0x6d, 0xfb, 0xd1, 0xaa, 0x6e, 0xdb, 0xc9, 0x05,
+	0x89, 0x0c, 0x9e, 0x1a, 0xc0, 0xf3, 0x72, 0x22, 0x29, 0xb2, 0x55, 0x74,
+	0xbb, 0x18, 0x31, 0xec, 0xb0, 0x64, 0x2b, 0xb6, 0xe4, 0x2a, 0x22, 0x7f,
+	0x5e, 0xd6, 0xe4, 0x94, 0xdd, 0x29, 0x0b, 0x7d, 0xef, 0x56, 0x33, 0xa0,
+	0xe5, 0xcf, 0xec, 0xb1, 0x43, 0x53, 0x36, 0xe9, 0x9d, 0x6d, 0x76, 0xe9,
+	0x9d, 0x6a, 0x2a, 0xd8, 0xa6, 0x4c, 0x94, 0xd9, 0x37, 0xa2, 0xb3, 0x2f,
+	0x94, 0x7e, 0x68, 0xcd, 0x29, 0x3b, 0xe2, 0xf5, 0xed, 0xdc, 0x9e, 0xc5,
+	0x7c, 0x93, 0x65, 0x8e, 0x3d, 0x93, 0x2a, 0xd8, 0x51, 0xaf, 0x3f, 0x79,
+	0x5b, 0xd6, 0x8e, 0xa1, 0xbf, 0xcb, 0x7b, 0x76, 0xf2, 0xbe, 0x82, 0x6d,
+	0x7b, 0xcf, 0xbe, 0x8a, 0xb9, 0x93, 0x5e, 0xff, 0x7d, 0xdb, 0x0a, 0x76,
+	0x9f, 0xd7, 0x3f, 0xbd, 0x2d, 0x6b, 0xa7, 0xbc, 0xfe, 0xe4, 0xee, 0x82,
+	0x3d, 0xe0, 0xf5, 0x9f, 0xbd, 0x3d, 0x6b, 0x0f, 0x7a, 0xfd, 0x0f, 0x6d,
+	0x2d, 0xd8, 0x69, 0xf4, 0x1f, 0x6d, 0xd6, 0x37, 0x59, 0x72, 0xa4, 0x1c,
+	0xc7, 0x2f, 0x83, 0x67, 0x43, 0xe8, 0xdb, 0x89, 0xdf, 0x30, 0x7e, 0xbf,
+	0xb8, 0x4e, 0xda, 0x46, 0x70, 0xfd, 0xc6, 0x46, 0x97, 0x77, 0xe0, 0x91,
+	0x13, 0x96, 0x37, 0x8c, 0x98, 0xbc, 0xd8, 0xfb, 0x06, 0x78, 0x68, 0xc9,
+	0x99, 0x8a, 0x68, 0x23, 0xbd, 0x31, 0xf0, 0x2e, 0x2a, 0x4f, 0x56, 0x5a,
+	0xc5, 0x78, 0xcc, 0x00, 0x6f, 0x3e, 0x2f, 0xf9, 0x68, 0x58, 0xda, 0xe7,
+	0x34, 0xe9, 0xee, 0x0f, 0x4b, 0xc6, 0x52, 0x72, 0x13, 0x7d, 0x26, 0x2a,
+	0xc6, 0x5c, 0x66, 0xbd, 0x2e, 0x9b, 0xac, 0x9c, 0x14, 0xc1, 0xbb, 0xef,
+	0x51, 0x27, 0xf1, 0x2c, 0x2e, 0xb9, 0xe9, 0x9b, 0x65, 0xcc, 0x22, 0x5d,
+	0x3b, 0x6f, 0x74, 0xd7, 0x0a, 0x6b, 0xd9, 0x13, 0x23, 0x72, 0xc4, 0x89,
+	0x68, 0xb9, 0x13, 0xdb, 0x24, 0x9b, 0x92, 0x28, 0xde, 0x8b, 0xe5, 0xf1,
+	0xa4, 0x54, 0x1e, 0x91, 0x29, 0x47, 0xb4, 0xac, 0x43, 0x7e, 0x76, 0xe2,
+	0x79, 0x9b, 0x1a, 0x8b, 0xbe, 0x2e, 0x43, 0xcd, 0x1d, 0x46, 0xbf, 0x85,
+	0xfe, 0x0e, 0x6d, 0x48, 0xcd, 0xa1, 0xfa, 0xe3, 0x93, 0x12, 0x91, 0xc7,
+	0xcb, 0x51, 0x6f, 0x6c, 0xb5, 0x9a, 0x4d, 0x59, 0x18, 0x37, 0x22, 0x93,
+	0x4e, 0x54, 0xc6, 0x70, 0x9d, 0x70, 0xb8, 0x7e, 0x0c, 0x3a, 0xf5, 0xda,
+	0xa1, 0xfc, 0xac, 0x9a, 0x2f, 0x6e, 0xa4, 0x39, 0x5f, 0x17, 0xc6, 0x4d,
+	0x80, 0x2e, 0x4d, 0x4c, 0x25, 0xcb, 0x8c, 0xe4, 0xa7, 0x35, 0xe8, 0x1b,
+	0xae, 0x8a, 0xaf, 0x43, 0xa0, 0xdf, 0x14, 0xbb, 0x5f, 0x93, 0x02, 0x64,
+	0x55, 0xb4, 0xd0, 0x2e, 0x9f, 0xd5, 0xb3, 0x4e, 0xb3, 0xe4, 0xcc, 0xb8,
+	0x18, 0x33, 0x4a, 0x97, 0x64, 0x12, 0xef, 0xe8, 0x36, 0xc7, 0x5c, 0xc4,
+	0xbe, 0xc7, 0x94, 0x1c, 0x9a, 0xd2, 0x45, 0x3d, 0x57, 0xe9, 0x14, 0x7d,
+	0x6e, 0x8f, 0xbc, 0x3c, 0x2d, 0x96, 0x91, 0x7e, 0xb7, 0x9a, 0xb5, 0xa7,
+	0xf4, 0xec, 0x13, 0xa6, 0x84, 0x66, 0x34, 0x99, 0xb2, 0x13, 0xb0, 0x80,
+	0xa3, 0xfa, 0x8e, 0xca, 0x59, 0x8c, 0xe3, 0x7b, 0x18, 0x57, 0xd6, 0xc1,
+	0x57, 0xde, 0x6f, 0xb1, 0x74, 0xa5, 0xcf, 0x1c, 0x03, 0x19, 0x60, 0x1f,
+	0x4f, 0x3a, 0x90, 0x89, 0x92, 0x51, 0x1c, 0x32, 0x7a, 0x15, 0x32, 0x1a,
+	0x80, 0x6c, 0x52, 0xf2, 0x52, 0xa5, 0x4f, 0x9e, 0xaf, 0x24, 0xe5, 0x39,
+	0xe8, 0xeb, 0xb3, 0x95, 0xb8, 0x3c, 0x53, 0xe9, 0x92, 0xa7, 0x2b, 0x31,
+	0x79, 0x4a, 0xc9, 0x2d, 0x07, 0xdb, 0x50, 0xb2, 0x0c, 0x5f, 0x9f, 0x96,
+	0x70, 0x27, 0xe4, 0xd1, 0x01, 0xfd, 0x69, 0x87, 0x6e, 0x7e, 0xa5, 0x37,
+	0x2c, 0xb3, 0xbd, 0x92, 0x59, 0x8f, 0xfe, 0x9b, 0xd2, 0xa6, 0xe2, 0x91,
+	0x89, 0xe7, 0x93, 0xd3, 0x21, 0xc9, 0x59, 0x8f, 0xcb, 0x85, 0x19, 0x53,
+	0x26, 0x2b, 0xdb, 0x6f, 0x72, 0x65, 0xc6, 0xf6, 0xbc, 0x9c, 0x9f, 0x69,
+	0xc2, 0xb3, 0x79, 0x79, 0x79, 0xb3, 0x2e, 0x13, 0xb3, 0x6f, 0x89, 0x09,
+	0x1e, 0x0e, 0x29, 0x79, 0x3f, 0x2e, 0xff, 0xfc, 0x27, 0x22, 0x23, 0xe0,
+	0x8b, 0xde, 0xff, 0xef, 0xd5, 0x8c, 0x05, 0x7e, 0xf4, 0xf7, 0x41, 0x3f,
+	0x74, 0x5c, 0x29, 0xcf, 0x38, 0xc6, 0x98, 0x5a, 0xce, 0x39, 0x0d, 0x9b,
+	0x6a, 0xd5, 0xb2, 0xc7, 0x45, 0x0a, 0xc7, 0xab, 0x52, 0x48, 0x85, 0xe4,
+	0x01, 0xab, 0x2a, 0x43, 0xa9, 0x26, 0x39, 0x60, 0x75, 0xca, 0x44, 0xdf,
+	0xcf, 0x68, 0x3e, 0x96, 0x7d, 0xa5, 0x92, 0xc6, 0x3d, 0xfb, 0x44, 0x66,
+	0xd5, 0xbd, 0xdb, 0x5f, 0xac, 0x84, 0x24, 0x13, 0x2d, 0xc6, 0x4c, 0xb9,
+	0xa0, 0xb9, 0xb4, 0xed, 0xf4, 0x9f, 0x41, 0x5e, 0x63, 0xc0, 0x90, 0x84,
+	0xd2, 0xa5, 0xfc, 0xf4, 0x9a, 0x8b, 0x19, 0xd5, 0x1d, 0x52, 0x7a, 0x6a,
+	0xa4, 0x4d, 0xd2, 0x31, 0xa6, 0xa5, 0xa3, 0xd2, 0xad, 0xec, 0x64, 0x00,
+	0x63, 0x06, 0xb5, 0xbb, 0x2b, 0x94, 0x37, 0xee, 0xcb, 0xa4, 0x75, 0x03,
+	0xc6, 0x9a, 0xb8, 0x66, 0x3c, 0x9a, 0x83, 0x74, 0x72, 0x2e, 0xd2, 0xc9,
+	0xeb, 0xde, 0x00, 0x9d, 0xfb, 0x6a, 0xf7, 0xb3, 0x81, 0xfb, 0x62, 0xe5,
+	0xd7, 0x5b, 0x5c, 0xfa, 0xc8, 0xd7, 0x41, 0x99, 0x98, 0x7e, 0xc8, 0x5b,
+	0x0b, 0xf7, 0x65, 0xae, 0xb1, 0x00, 0x3e, 0xa9, 0x91, 0x57, 0x58, 0xab,
+	0x18, 0x58, 0xeb, 0x70, 0x60, 0xad, 0xc3, 0x81, 0xb5, 0x8a, 0xe0, 0xad,
+	0xac, 0xd3, 0x81, 0x33, 0x79, 0xc2, 0xbc, 0x1c, 0xc5, 0x9c, 0x6f, 0x88,
+	0x91, 0xa6, 0x2d, 0xf8, 0x36, 0xf9, 0x07, 0x18, 0x9f, 0x96, 0x73, 0x0e,
+	0x78, 0x73, 0x3c, 0x24, 0x77, 0xa9, 0x71, 0xff, 0xb1, 0xc6, 0xa5, 0x31,
+	0xf8, 0x2c, 0x2c, 0xbb, 0xa2, 0xbc, 0xf7, 0x9f, 0x99, 0xe0, 0x37, 0xdb,
+	0x93, 0x37, 0xb8, 0x6d, 0xde, 0x9f, 0xf5, 0xf6, 0xd2, 0xee, 0xbe, 0x57,
+	0x79, 0x53, 0x61, 0xc6, 0x62, 0x85, 0xb6, 0x2d, 0x29, 0xc3, 0x96, 0xfd,
+	0x43, 0xa9, 0x4e, 0x99, 0xb4, 0xb4, 0xd4, 0x44, 0xb2, 0x99, 0xfc, 0xcf,
+	0xe8, 0x76, 0x2b, 0xec, 0x47, 0xe2, 0x3a, 0x71, 0x51, 0xed, 0xeb, 0x5b,
+	0x1e, 0xfd, 0x16, 0xdb, 0x23, 0xba, 0xdd, 0xd1, 0xd0, 0x4f, 0xfd, 0xff,
+	0x4b, 0xdc, 0xd3, 0x06, 0xfa, 0x75, 0x77, 0xed, 0xbf, 0x42, 0x9b, 0x58,
+	0x15, 0xf1, 0xda, 0xfe, 0xf3, 0xff, 0x32, 0x96, 0xb7, 0x8f, 0x6d, 0x5c,
+	0xde, 0xf6, 0x6d, 0x29, 0x88, 0x73, 0xdc, 0x2b, 0x6c, 0xd8, 0xa6, 0xfe,
+	0x85, 0x40, 0x6b, 0x0a, 0x36, 0xdc, 0xec, 0xd1, 0xf0, 0xba, 0x47, 0x03,
+	0x68, 0xc5, 0xb8, 0x89, 0x0a, 0xdf, 0x51, 0xa2, 0x6c, 0x68, 0x93, 0xf7,
+	0xfe, 0xfd, 0x5a, 0xf5, 0xfc, 0x0d, 0x83, 0xeb, 0xf8, 0x57, 0xd1, 0x86,
+	0x60, 0x67, 0x93, 0xb3, 0xa6, 0xe4, 0x53, 0x31, 0x65, 0x0f, 0xf9, 0x54,
+	0x1d, 0x3f, 0x26, 0xa7, 0x1b, 0xf1, 0x83, 0xef, 0x11, 0x3f, 0x5c, 0xec,
+	0x98, 0x98, 0x25, 0x8e, 0xd4, 0x71, 0xe3, 0xc8, 0xb4, 0x8f, 0x25, 0x9c,
+	0x9b, 0x18, 0xe2, 0xe3, 0x07, 0xdf, 0x23, 0x7e, 0x18, 0x90, 0x15, 0xe7,
+	0xf4, 0xd7, 0x9f, 0x6a, 0x98, 0x7b, 0x4a, 0x61, 0x93, 0x8b, 0xcb, 0x6f,
+	0x06, 0x70, 0xbe, 0x0b, 0x18, 0x1d, 0x85, 0xfc, 0x7c, 0x8c, 0x26, 0x76,
+	0xc6, 0x80, 0xeb, 0xe0, 0x91, 0xc2, 0xe4, 0x08, 0x70, 0xcc, 0xf4, 0x30,
+	0x35, 0xec, 0x61, 0x6a, 0x04, 0x78, 0xca, 0xb6, 0xe5, 0xb5, 0xa3, 0x5e,
+	0x3b, 0x86, 0x36, 0xfc, 0xef, 0x1c, 0x6d, 0xec, 0xb5, 0x43, 0xe3, 0xb3,
+	0x0a, 0xa7, 0x89, 0xf1, 0xc0, 0x0a, 0xe2, 0x2c, 0xf1, 0xb6, 0x4b, 0x16,
+	0xca, 0x58, 0xaf, 0x86, 0x69, 0x94, 0x47, 0x90, 0x1e, 0xd2, 0xb2, 0x46,
+	0xf4, 0xc7, 0xdc, 0xfd, 0xe8, 0xe9, 0xcf, 0xeb, 0xd2, 0xc2, 0x7d, 0x90,
+	0xee, 0x1b, 0x41, 0x2b, 0xf7, 0xf6, 0xa3, 0xa4, 0x95, 0xeb, 0x35, 0xd2,
+	0x7b, 0x1a, 0xf4, 0x66, 0x80, 0xb7, 0xa2, 0x8d, 0xf6, 0x8e, 0x82, 0xde,
+	0x11, 0x60, 0xf1, 0x30, 0xb0, 0x78, 0x27, 0xb0, 0x78, 0x08, 0x58, 0x9c,
+	0x06, 0x0e, 0x0f, 0x02, 0x87, 0x07, 0x80, 0xc3, 0x29, 0xec, 0x2b, 0x2a,
+	0xf3, 0xc0, 0xe5, 0x79, 0xe0, 0xf3, 0x3c, 0xe4, 0x35, 0x31, 0x27, 0xda,
+	0x17, 0xb0, 0xfe, 0x63, 0x33, 0x89, 0xd3, 0xd0, 0xcd, 0x58, 0x51, 0x87,
+	0x3d, 0xa5, 0x06, 0xa1, 0x23, 0x49, 0x29, 0x55, 0x46, 0xa5, 0x40, 0x3f,
+	0xb6, 0xb9, 0x07, 0xb6, 0x0b, 0xfb, 0x89, 0xf9, 0x71, 0xd3, 0x5a, 0xef,
+	0xfa, 0xf7, 0x22, 0xf6, 0x1f, 0x83, 0x27, 0x89, 0xb8, 0xc8, 0xb0, 0xe4,
+	0x9d, 0x1e, 0x2b, 0xab, 0x27, 0x31, 0x8e, 0xed, 0xb8, 0xb6, 0xf7, 0x78,
+	0x42, 0x1b, 0x3f, 0xce, 0x3d, 0x4d, 0x03, 0xe3, 0xaa, 0x32, 0x95, 0xa2,
+	0xad, 0x56, 0xe5, 0x54, 0x2a, 0x31, 0x58, 0x94, 0x56, 0x39, 0x12, 0x9d,
+	0x56, 0xfe, 0xcd, 0x4c, 0x1f, 0x53, 0xfa, 0x51, 0xb0, 0x71, 0x2d, 0x77,
+	0x6b, 0xf9, 0xe3, 0xf4, 0x3b, 0x3d, 0xf8, 0x85, 0x40, 0x0b, 0xe7, 0x37,
+	0x65, 0x68, 0x40, 0xb4, 0x7d, 0xbd, 0x45, 0xa0, 0x62, 0xc2, 0x3a, 0x87,
+	0x95, 0x73, 0xd3, 0x3d, 0xb1, 0x9c, 0x6e, 0xca, 0x98, 0xa9, 0xc9, 0x04,
+	0xec, 0x65, 0x28, 0xf5, 0x7f, 0xd5, 0x23, 0x51, 0x3e, 0x6f, 0x96, 0xdf,
+	0x51, 0x38, 0x8b, 0xb5, 0x4b, 0xb3, 0x58, 0x37, 0x04, 0xfe, 0x71, 0x5d,
+	0xce, 0x83, 0x36, 0x30, 0xcf, 0xb4, 0x13, 0xa7, 0x8b, 0xb2, 0x1d, 0x76,
+	0xb7, 0x4e, 0xb2, 0x7d, 0x4d, 0x92, 0x19, 0x89, 0xcb, 0xc4, 0xcc, 0x76,
+	0xe0, 0x1e, 0x64, 0x60, 0xb7, 0x48, 0x7e, 0x34, 0x2e, 0x5f, 0x9e, 0x61,
+	0x5f, 0x06, 0xfb, 0x4f, 0x1c, 0xcd, 0x08, 0xf7, 0x1f, 0x52, 0xfb, 0x8a,
+	0xeb, 0x19, 0x39, 0xe0, 0xbc, 0xa4, 0xbb, 0x76, 0xe9, 0xb6, 0xf7, 0x42,
+	0x1e, 0xa7, 0xc0, 0xef, 0xbc, 0x63, 0xcb, 0x02, 0xfc, 0x4a, 0xee, 0x38,
+	0x70, 0xd5, 0x6e, 0x03, 0x06, 0x26, 0xce, 0xd2, 0x3e, 0x0c, 0xc4, 0x5a,
+	0x25, 0xc5, 0xeb, 0x2e, 0x39, 0x3e, 0xa3, 0xcb, 0xb3, 0xb7, 0xc5, 0xd1,
+	0x06, 0xd6, 0xa6, 0x12, 0x67, 0xc6, 0xf4, 0x2e, 0xb9, 0xb5, 0x23, 0x86,
+	0xf7, 0x52, 0x5a, 0xde, 0xf9, 0x37, 0xf2, 0xf2, 0x64, 0x5c, 0xe7, 0x58,
+	0x5d, 0x72, 0x29, 0x03, 0x3a, 0x56, 0xc4, 0xf8, 0x7f, 0x40, 0x7f, 0x97,
+	0xcc, 0x21, 0xbe, 0x99, 0x03, 0x4d, 0xd9, 0x14, 0xb1, 0x30, 0x71, 0x72,
+	0x49, 0x07, 0x66, 0xcd, 0x41, 0x37, 0x47, 0x11, 0x3f, 0xcc, 0xfc, 0x37,
+	0xc6, 0xc4, 0x21, 0xd3, 0x1e, 0x6b, 0x02, 0xf8, 0x92, 0xe9, 0xe2, 0x3d,
+	0xe7, 0xb4, 0xe5, 0x94, 0x43, 0x1d, 0x8a, 0xcb, 0xe3, 0x15, 0xbe, 0xd7,
+	0x73, 0xf6, 0x69, 0xb1, 0xe5, 0x41, 0xe7, 0x7f, 0x30, 0xfe, 0x1d, 0xc4,
+	0x9e, 0x96, 0x94, 0x20, 0xb7, 0x02, 0x78, 0x99, 0x89, 0xb9, 0xed, 0x89,
+	0xb9, 0xc4, 0xd9, 0x0b, 0x3a, 0xef, 0xed, 0xe2, 0x82, 0x7e, 0xb3, 0x48,
+	0x07, 0xf9, 0x99, 0x02, 0x2f, 0x6d, 0x4b, 0xd7, 0x37, 0x7b, 0xf1, 0x16,
+	0x6d, 0xc0, 0x06, 0x7d, 0xa6, 0x2c, 0xf4, 0x07, 0x6d, 0x80, 0x7e, 0xd6,
+	0xb7, 0x81, 0x44, 0x6c, 0x49, 0xd7, 0xf1, 0xdc, 0x94, 0x63, 0xaa, 0xad,
+	0x81, 0xd6, 0x44, 0x8c, 0xfb, 0x9b, 0x2c, 0x27, 0xe5, 0x71, 0x87, 0xe3,
+	0xc1, 0xe7, 0xe9, 0x88, 0x37, 0x1e, 0xf1, 0x8e, 0xc3, 0x98, 0x29, 0x09,
+	0x9a, 0x5d, 0xbb, 0x58, 0x98, 0x8e, 0xaa, 0x67, 0x47, 0x1c, 0x37, 0x36,
+	0xd2, 0x11, 0x3f, 0xcd, 0x23, 0x7e, 0xca, 0x29, 0x1b, 0xb1, 0x32, 0x88,
+	0xaf, 0xe1, 0x67, 0x5d, 0xfb, 0x28, 0x95, 0x49, 0xcb, 0x3d, 0xa0, 0x2f,
+	0x51, 0x04, 0x31, 0xc7, 0x74, 0xb8, 0xeb, 0xec, 0x80, 0x14, 0x19, 0x63,
+	0x9d, 0x33, 0x1e, 0x91, 0xb1, 0x12, 0xfd, 0x1b, 0x7e, 0x8e, 0x6d, 0x31,
+	0xa6, 0xcf, 0x28, 0xdf, 0xd3, 0x03, 0x3d, 0x80, 0x5f, 0x4a, 0xb5, 0x8b,
+	0xeb, 0x07, 0xf7, 0x40, 0x9e, 0xc3, 0x90, 0x7b, 0x5a, 0xc6, 0x4f, 0x8c,
+	0x53, 0xa7, 0x93, 0x25, 0x49, 0x24, 0x8f, 0xc8, 0x16, 0x6b, 0x01, 0xbe,
+	0x30, 0x33, 0x5a, 0xdd, 0xae, 0xa7, 0xf9, 0xce, 0xa3, 0x78, 0x07, 0xd7,
+	0xd2, 0xb8, 0x3c, 0x50, 0x61, 0xdf, 0x9d, 0x86, 0xb4, 0xc0, 0x56, 0x06,
+	0xf6, 0x78, 0x76, 0x80, 0xf9, 0x4c, 0x7f, 0xbe, 0x71, 0x6f, 0x3e, 0x8e,
+	0xe3, 0x18, 0xbe, 0x53, 0x9f, 0x77, 0x07, 0x7d, 0x1b, 0xb0, 0x64, 0x87,
+	0x5e, 0xdd, 0x1e, 0xc2, 0xf3, 0x53, 0x03, 0xbc, 0xc7, 0x3c, 0xf0, 0x6d,
+	0x96, 0x3d, 0x8c, 0xb1, 0xa3, 0x98, 0x73, 0x8d, 0x64, 0x3b, 0x7d, 0x7a,
+	0xa9, 0x03, 0x8c, 0x3f, 0xd8, 0x8e, 0xae, 0x77, 0x79, 0xff, 0x25, 0xc3,
+	0xd5, 0xc9, 0x11, 0xb4, 0x69, 0x7f, 0x07, 0x25, 0xe7, 0x24, 0xb0, 0x4f,
+	0xf0, 0xb6, 0x32, 0xe1, 0xed, 0x11, 0xfc, 0x1f, 0x39, 0x0c, 0x3e, 0x48,
+	0xd1, 0xe5, 0x0d, 0xf9, 0x42, 0x9e, 0xfc, 0x16, 0x74, 0xff, 0x61, 0x8c,
+	0x81, 0x7f, 0x50, 0x3c, 0x58, 0xea, 0x70, 0x63, 0xd1, 0x44, 0x31, 0xc3,
+	0xfc, 0xa9, 0x83, 0x98, 0x07, 0xfc, 0xa9, 0x40, 0xb1, 0x30, 0xf7, 0x92,
+	0xbe, 0x86, 0xf4, 0xc6, 0x97, 0x0c, 0x83, 0xed, 0xe4, 0x12, 0x74, 0xb8,
+	0x04, 0xf9, 0x64, 0xfb, 0x68, 0xb3, 0x36, 0xe4, 0x31, 0x63, 0x50, 0x5f,
+	0x4b, 0x88, 0x05, 0xf3, 0xce, 0x16, 0xeb, 0x5e, 0xf2, 0xcd, 0xb2, 0xe4,
+	0x69, 0x27, 0x88, 0x1d, 0x3b, 0x30, 0x94, 0x7a, 0x18, 0x85, 0x1e, 0x98,
+	0xf0, 0xc9, 0x31, 0xc8, 0xfc, 0xc5, 0x0e, 0x77, 0x2f, 0xbc, 0x37, 0x65,
+	0xde, 0xc2, 0x9a, 0xce, 0xef, 0xaf, 0x73, 0xfb, 0x78, 0xcf, 0xb8, 0xc8,
+	0x97, 0xab, 0x4f, 0x3b, 0xe5, 0xdb, 0x28, 0xd3, 0x43, 0xd8, 0x0b, 0xfb,
+	0x71, 0x2d, 0x1d, 0x94, 0x71, 0xd0, 0x56, 0x18, 0xd8, 0x14, 0x3b, 0x8f,
+	0xf1, 0x39, 0xe0, 0x79, 0xd1, 0xe4, 0xb3, 0x8b, 0x5a, 0xfd, 0x1d, 0xc4,
+	0x5c, 0x36, 0xfd, 0xd9, 0x92, 0xf6, 0x85, 0xca, 0xcb, 0x5a, 0xb6, 0x74,
+	0x51, 0xcb, 0x41, 0x4f, 0x4a, 0x0e, 0x73, 0x06, 0xda, 0x8f, 0x85, 0xb5,
+	0x13, 0xb1, 0xb7, 0xf5, 0x9e, 0xf8, 0x02, 0xb0, 0x60, 0x2f, 0x6c, 0x3a,
+	0x67, 0xee, 0x94, 0x02, 0xb0, 0x35, 0x7f, 0x62, 0x0b, 0xec, 0x2d, 0x1e,
+	0xa0, 0x8b, 0x78, 0x56, 0xa4, 0x4f, 0xd5, 0x76, 0x38, 0x52, 0x6c, 0x4a,
+	0x13, 0xd7, 0x36, 0x41, 0x77, 0xd0, 0x57, 0xae, 0xeb, 0xdf, 0x1d, 0x2b,
+	0x68, 0x45, 0x7e, 0x39, 0xb0, 0x9c, 0xde, 0x92, 0x5c, 0x99, 0xde, 0x1d,
+	0x35, 0x7a, 0x89, 0x19, 0xc0, 0x7f, 0xd8, 0xcd, 0x4b, 0xd0, 0xdf, 0xe7,
+	0x1d, 0xe0, 0xbf, 0x03, 0xfc, 0x87, 0x4d, 0x3d, 0x03, 0xdd, 0x7b, 0xda,
+	0x81, 0x0f, 0x70, 0xe0, 0x03, 0x1c, 0xf8, 0x00, 0x27, 0x0b, 0x39, 0x11,
+	0xe7, 0xe9, 0x43, 0x76, 0xd7, 0x7c, 0x9e, 0x1b, 0x37, 0xdd, 0xe0, 0xc5,
+	0x22, 0xa3, 0x88, 0x45, 0x36, 0xc8, 0x44, 0xf2, 0x7a, 0xec, 0xad, 0x05,
+	0xd7, 0x56, 0x5c, 0xb1, 0x46, 0xf2, 0x76, 0xcf, 0x4e, 0x1e, 0x06, 0x5d,
+	0x88, 0xbf, 0x93, 0x3f, 0x0d, 0x3d, 0x6c, 0x02, 0x3d, 0x3f, 0xe5, 0xc5,
+	0x2c, 0x0f, 0x9a, 0xae, 0x1e, 0xb6, 0xa2, 0xef, 0x93, 0xe8, 0x6b, 0xc5,
+	0x98, 0x03, 0x18, 0xc3, 0x98, 0xa7, 0xcd, 0xeb, 0x0b, 0x8e, 0x63, 0xec,
+	0xf3, 0x19, 0xac, 0x95, 0xc0, 0xb8, 0x36, 0xcc, 0xdd, 0x85, 0x31, 0xdb,
+	0x30, 0xe6, 0x46, 0xb4, 0x19, 0x33, 0x6f, 0x44, 0xfb, 0x13, 0x0d, 0xef,
+	0x7c, 0x0c, 0x7d, 0xb7, 0x37, 0xf4, 0x9d, 0x43, 0x1f, 0xf2, 0x50, 0xeb,
+	0xbc, 0xf7, 0x5e, 0x11, 0xed, 0xce, 0x86, 0x31, 0xaf, 0xa2, 0x0f, 0x71,
+	0xaf, 0xf5, 0x2d, 0x5c, 0x91, 0x7f, 0x5a, 0xa4, 0xc9, 0x7f, 0xc6, 0xb8,
+	0x37, 0x8e, 0xfe, 0x90, 0x17, 0xbb, 0xfe, 0xa6, 0x09, 0xbd, 0xd3, 0x86,
+	0x9c, 0xdf, 0x30, 0xdd, 0x58, 0xef, 0x4e, 0xcb, 0xd5, 0x43, 0xbf, 0xfd,
+	0x70, 0x43, 0x9b, 0x63, 0x17, 0x1a, 0xfa, 0xfe, 0xa5, 0xa1, 0xfd, 0xdd,
+	0xd0, 0xca, 0x77, 0x06, 0xdb, 0x97, 0xf7, 0x15, 0x3a, 0x96, 0xb7, 0xed,
+	0xa6, 0x95, 0xef, 0xe8, 0xeb, 0x96, 0xf7, 0xdd, 0xb8, 0xbe, 0x61, 0x0c,
+	0x74, 0x2a, 0x8a, 0x1c, 0xc9, 0x1f, 0x1f, 0xbe, 0xce, 0x7d, 0x4e, 0xfe,
+	0x36, 0xea, 0x92, 0xda, 0x3a, 0xda, 0x3a, 0xe4, 0xb0, 0xa4, 0xc1, 0x9e,
+	0x2c, 0x3d, 0xfd, 0xb2, 0x96, 0x83, 0x4e, 0x65, 0x2b, 0xfe, 0x7c, 0xb4,
+	0xd9, 0xc6, 0xdc, 0xdc, 0xcf, 0xc9, 0x19, 0x2b, 0x45, 0xa0, 0x37, 0xf7,
+	0xd0, 0x27, 0x1d, 0x2d, 0x4a, 0xdd, 0x3e, 0xbb, 0xf5, 0x4b, 0xd9, 0xe7,
+	0xed, 0x1e, 0x46, 0x1d, 0x06, 0x9d, 0x55, 0x19, 0x49, 0x35, 0xd3, 0xc7,
+	0x78, 0xd8, 0x45, 0xdc, 0xa9, 0x56, 0x8d, 0xcd, 0x55, 0xd9, 0x9f, 0x7a,
+	0xa7, 0x2a, 0x0a, 0xf3, 0x06, 0x15, 0xee, 0xc4, 0xf5, 0x1e, 0xc8, 0xc8,
+	0x42, 0x6e, 0x82, 0x7c, 0x3a, 0x4a, 0x9f, 0x74, 0x90, 0xf1, 0xc9, 0xa3,
+	0x2e, 0xa6, 0x12, 0x77, 0xd0, 0x46, 0x5e, 0x96, 0x3f, 0xce, 0xf5, 0x71,
+	0x2d, 0x11, 0xc7, 0x47, 0x95, 0x4f, 0xc9, 0x5b, 0x9c, 0x77, 0x35, 0x6c,
+	0x3c, 0x6b, 0x32, 0xa6, 0x33, 0xed, 0xd3, 0xf0, 0x6f, 0x7c, 0xc6, 0x58,
+	0xe1, 0x34, 0xe3, 0x92, 0x00, 0x56, 0x6d, 0x35, 0xe0, 0x32, 0x8b, 0xcb,
+	0xf7, 0xb5, 0x81, 0x79, 0xc4, 0x55, 0xec, 0x75, 0x75, 0x2c, 0xea, 0xd1,
+	0xaf, 0x6c, 0xdb, 0xbb, 0x6a, 0xb6, 0xed, 0xeb, 0xde, 0x6a, 0x39, 0xf8,
+	0xf7, 0x95, 0x2c, 0x9e, 0xaa, 0x24, 0x8e, 0x15, 0x61, 0x4b, 0x8b, 0x2a,
+	0xef, 0xf6, 0xe5, 0xc2, 0x18, 0x27, 0x71, 0x72, 0x1e, 0x6f, 0x8e, 0xab,
+	0x1c, 0x83, 0xf9, 0x45, 0x55, 0x76, 0xa4, 0x5a, 0xa3, 0xe4, 0x43, 0x46,
+	0xff, 0x76, 0x88, 0x31, 0xc3, 0xa2, 0x43, 0x9e, 0xa5, 0xf0, 0x3c, 0x05,
+	0x4c, 0xf8, 0x27, 0xc9, 0x45, 0xd9, 0xf7, 0x76, 0x75, 0x01, 0x71, 0x95,
+	0x8a, 0x8f, 0x94, 0xbf, 0x67, 0x7c, 0xb7, 0x1f, 0xfc, 0x22, 0x4f, 0x47,
+	0xc0, 0x67, 0x3f, 0x06, 0x78, 0x8d, 0x75, 0x15, 0x59, 0x1e, 0x07, 0x8b,
+	0x3c, 0x50, 0x7e, 0x19, 0x73, 0xea, 0x6e, 0xac, 0xc2, 0x3c, 0xdc, 0x66,
+	0x7f, 0x47, 0x88, 0xb1, 0x9c, 0xeb, 0xeb, 0x0d, 0xac, 0x87, 0xdc, 0xbe,
+	0xfc, 0x8f, 0x2a, 0x6e, 0x2a, 0x28, 0x79, 0x20, 0x86, 0xaa, 0xf0, 0x19,
+	0xfb, 0xc2, 0x5e, 0xec, 0x1c, 0xf1, 0x62, 0x65, 0xcb, 0x8b, 0x95, 0x49,
+	0x07, 0x6b, 0x6f, 0x7e, 0x5c, 0x40, 0x99, 0x2d, 0x1d, 0xd2, 0x37, 0x33,
+	0x2e, 0x68, 0x93, 0xd5, 0xe3, 0x02, 0x9f, 0xa6, 0x6d, 0xa0, 0x89, 0x71,
+	0x9e, 0xaa, 0xbd, 0x74, 0xb8, 0xf5, 0x1e, 0xd2, 0xe0, 0xfb, 0x47, 0xe5,
+	0x87, 0x8f, 0xc2, 0xe5, 0x61, 0x6f, 0x69, 0xd0, 0xba, 0x53, 0xb2, 0xd3,
+	0xdb, 0x3c, 0x7f, 0xcb, 0x1c, 0x80, 0xf1, 0xb7, 0xab, 0xb3, 0xd9, 0xd4,
+	0x84, 0x3f, 0x4f, 0x27, 0x3c, 0x64, 0xa0, 0x2e, 0xc4, 0xb5, 0x18, 0xc7,
+	0xf8, 0x31, 0xcd, 0x4e, 0x2f, 0xa6, 0x19, 0x96, 0xfd, 0x8e, 0x1b, 0xf3,
+	0x8f, 0xa0, 0x3f, 0xef, 0x28, 0xda, 0x63, 0x8c, 0x2d, 0x75, 0xc4, 0xdc,
+	0x99, 0x3d, 0x09, 0x24, 0x0f, 0xee, 0x5e, 0xba, 0xb1, 0x97, 0x52, 0x6d,
+	0x2f, 0xad, 0x4b, 0xcb, 0xf7, 0x32, 0xaa, 0xde, 0x9d, 0x5a, 0xf1, 0xae,
+	0x60, 0x1f, 0xbb, 0x2f, 0xf1, 0x8c, 0x7b, 0x64, 0xdc, 0x60, 0x79, 0x7b,
+	0xf4, 0xe5, 0x74, 0x00, 0x7b, 0x4c, 0x6a, 0x79, 0x15, 0x6b, 0xed, 0x51,
+	0x3c, 0xcf, 0x97, 0xc7, 0x70, 0xa5, 0x7d, 0xa8, 0x79, 0x94, 0x8d, 0x4c,
+	0x28, 0x3e, 0x8f, 0xab, 0x7d, 0x2c, 0x94, 0x7f, 0x41, 0x0a, 0x27, 0x7e,
+	0x09, 0x7e, 0x2f, 0x58, 0x0f, 0x63, 0x2d, 0x91, 0xfc, 0x28, 0x06, 0xf0,
+	0x93, 0x7b, 0x65, 0xad, 0xeb, 0x0f, 0x43, 0x6e, 0x7e, 0x10, 0x81, 0x8c,
+	0x35, 0xf7, 0xb9, 0x5a, 0xdf, 0xe7, 0x6b, 0x53, 0x80, 0x9e, 0x2a, 0x62,
+	0xce, 0x18, 0x68, 0x08, 0xbe, 0x73, 0x50, 0x86, 0x1c, 0xca, 0xa3, 0x27,
+	0x36, 0x2e, 0xb6, 0x95, 0x17, 0x3f, 0xce, 0xe0, 0xfa, 0xb4, 0xf9, 0x5c,
+	0xcc, 0x10, 0xd6, 0x2f, 0x7d, 0xde, 0xf9, 0x7c, 0x8b, 0x2c, 0x35, 0xea,
+	0xc0, 0x14, 0xe8, 0x29, 0x38, 0xe4, 0x93, 0xaf, 0x9b, 0xfe, 0xda, 0xaf,
+	0xaa, 0xfd, 0x4c, 0xaa, 0x9a, 0xdd, 0x73, 0x35, 0x1d, 0x9d, 0x40, 0x0c,
+	0xe2, 0xea, 0xdc, 0x7d, 0x1e, 0x6f, 0x7c, 0xdd, 0x8c, 0x78, 0x72, 0x66,
+	0x1e, 0x47, 0xdb, 0xf1, 0xf5, 0x60, 0x93, 0x75, 0xb7, 0xe2, 0x05, 0x9f,
+	0x11, 0x53, 0x5c, 0x59, 0x8e, 0xd5, 0x64, 0xb9, 0xb6, 0x41, 0x2f, 0xbf,
+	0xb7, 0xce, 0xb5, 0x43, 0xda, 0x1b, 0xec, 0x16, 0xf4, 0x3d, 0xb5, 0xcc,
+	0xbe, 0x93, 0x97, 0xa8, 0x83, 0x46, 0xc4, 0x98, 0xfb, 0x53, 0xf0, 0xf2,
+	0x63, 0xc8, 0x55, 0x44, 0xcc, 0x19, 0xe2, 0x10, 0xe3, 0x8d, 0x7a, 0xbc,
+	0xbb, 0x20, 0xab, 0xc5, 0xba, 0x57, 0x8a, 0x35, 0x7e, 0xf2, 0x2a, 0x63,
+	0x8d, 0x78, 0x93, 0xb4, 0x10, 0x8b, 0x86, 0x11, 0xdb, 0x6a, 0xd2, 0x64,
+	0x3f, 0x08, 0x1f, 0x76, 0xc6, 0x6c, 0xb6, 0x7d, 0x4c, 0x88, 0x48, 0xfb,
+	0xdc, 0x06, 0x85, 0x0b, 0xd6, 0x4c, 0x1d, 0x17, 0x26, 0xc0, 0xfb, 0x11,
+	0xb7, 0xb6, 0x1a, 0x6d, 0x97, 0xab, 0xcd, 0x8d, 0xeb, 0x71, 0xff, 0x58,
+	0x2d, 0xee, 0xbf, 0xa1, 0x81, 0x8f, 0xab, 0xe1, 0xe2, 0x19, 0xf0, 0x2d,
+	0x8d, 0xfc, 0x97, 0x79, 0xed, 0x10, 0xf2, 0x61, 0xe6, 0x62, 0x19, 0xe4,
+	0xc4, 0x89, 0x33, 0xc0, 0x2a, 0xe4, 0xc8, 0x89, 0xb7, 0xe0, 0x57, 0x90,
+	0x37, 0x27, 0xe6, 0x99, 0xbb, 0x2e, 0x22, 0x3f, 0x7e, 0x1a, 0xf9, 0xf1,
+	0x53, 0x95, 0x3e, 0xf0, 0x37, 0xa9, 0xb0, 0x73, 0xef, 0x71, 0xd1, 0xee,
+	0x52, 0xf5, 0x61, 0xda, 0x73, 0x14, 0x7e, 0xb4, 0x5a, 0x3d, 0x90, 0xea,
+	0x41, 0x4e, 0x1e, 0x97, 0x4f, 0x99, 0xcc, 0x63, 0x35, 0xb3, 0xbb, 0x7f,
+	0xc1, 0x08, 0xc6, 0xa4, 0xd9, 0x2b, 0xfa, 0x81, 0x95, 0xbc, 0xcf, 0x29,
+	0x5f, 0x70, 0xcc, 0xb8, 0x1c, 0xef, 0xef, 0xaa, 0xf1, 0xfe, 0xc2, 0x1a,
+	0x69, 0x19, 0x56, 0x35, 0x80, 0xee, 0xfe, 0x03, 0xc4, 0xab, 0x14, 0xfc,
+	0x3a, 0xfc, 0x6f, 0x55, 0xee, 0x48, 0x5d, 0xac, 0x9e, 0xb7, 0xd7, 0x49,
+	0xbe, 0xef, 0x8b, 0x1e, 0x66, 0x8f, 0x3d, 0x92, 0xb5, 0x8b, 0xb0, 0x0f,
+	0xb7, 0x16, 0x39, 0x3e, 0x1d, 0x46, 0x14, 0xca, 0xbf, 0x0e, 0x59, 0x18,
+	0xfc, 0x1b, 0xc8, 0x70, 0xcb, 0x69, 0x16, 0xb0, 0x74, 0xe0, 0xf0, 0x42,
+	0x34, 0xa2, 0xea, 0x33, 0xd7, 0xd9, 0xec, 0xb7, 0x20, 0xd3, 0x51, 0x59,
+	0x40, 0xfc, 0x50, 0x1a, 0x04, 0x8d, 0x7d, 0x9d, 0x18, 0x4f, 0xbb, 0x23,
+	0xcf, 0x47, 0xe1, 0x7b, 0xc9, 0xd3, 0x28, 0xc6, 0xef, 0xc2, 0x98, 0x0e,
+	0x5c, 0xbf, 0x68, 0x2c, 0x58, 0xcc, 0x9d, 0x7f, 0x0e, 0x6d, 0xce, 0x11,
+	0xf4, 0x9d, 0x9f, 0x0e, 0x89, 0x9a, 0x93, 0xef, 0x74, 0x2a, 0xfb, 0xaf,
+	0xaf, 0xc5, 0x75, 0xf8, 0xec, 0xbd, 0xea, 0x2d, 0xfd, 0x83, 0x81, 0xf5,
+	0xda, 0x02, 0xeb, 0x0d, 0x06, 0xd6, 0x23, 0x9d, 0x1d, 0x01, 0x3a, 0x3b,
+	0xf0, 0x7e, 0x0e, 0x6b, 0x0f, 0xab, 0x98, 0xa7, 0xbe, 0xe6, 0xfd, 0x81,
+	0x35, 0xfd, 0xfd, 0x75, 0x06, 0xde, 0x7b, 0x07, 0xeb, 0xb1, 0x2f, 0x1a,
+	0xe8, 0x23, 0x0d, 0xeb, 0xd1, 0xc7, 0x76, 0x47, 0x80, 0x2e, 0xd2, 0xba,
+	0x16, 0xfd, 0x2a, 0x7e, 0x02, 0x9f, 0x5b, 0xe0, 0xb7, 0x74, 0xf8, 0x0e,
+	0xd6, 0xa0, 0x1b, 0xf7, 0xfa, 0x65, 0xac, 0xeb, 0xcf, 0x17, 0xc5, 0x1c,
+	0x1c, 0xcf, 0xb1, 0x86, 0xf7, 0x3e, 0xfb, 0xf9, 0xfc, 0x1b, 0xd5, 0xaf,
+	0x2b, 0xbe, 0xad, 0x07, 0xed, 0xaa, 0xee, 0x22, 0xf3, 0x1d, 0x26, 0xe4,
+	0xc9, 0xfc, 0x58, 0x93, 0x9b, 0x6c, 0x5d, 0xeb, 0xe9, 0xa7, 0xec, 0xd7,
+	0x79, 0x58, 0xda, 0xa2, 0x65, 0x8f, 0xb3, 0x5e, 0xd0, 0xea, 0xe5, 0x7c,
+	0xc8, 0x3d, 0x94, 0x8f, 0x31, 0xbd, 0xe7, 0xf4, 0x31, 0x8c, 0x5b, 0xe8,
+	0x3f, 0x33, 0xde, 0x3d, 0xae, 0xd0, 0xe1, 0x7d, 0xa5, 0x0e, 0x39, 0xaf,
+	0x78, 0x6a, 0xc9, 0xb9, 0x1a, 0x4f, 0x43, 0xde, 0xb7, 0x90, 0x83, 0xde,
+	0x77, 0x06, 0x03, 0x71, 0x11, 0xee, 0xcb, 0x19, 0xd0, 0x10, 0x97, 0x9e,
+	0x7e, 0xe6, 0x6e, 0x45, 0x5c, 0x59, 0xa7, 0xd0, 0x70, 0x75, 0xeb, 0x17,
+	0x3d, 0xfd, 0xf0, 0x4b, 0xc0, 0xa1, 0x9e, 0xfe, 0xef, 0xa8, 0x7c, 0xae,
+	0x54, 0xb1, 0xb4, 0x3b, 0x1c, 0xb7, 0x46, 0x74, 0xce, 0xbe, 0x5c, 0x8d,
+	0x68, 0xa0, 0x99, 0x75, 0x0d, 0xbf, 0x46, 0x74, 0x4e, 0x54, 0x8d, 0xe8,
+	0xe4, 0x15, 0x6a, 0x44, 0x99, 0xab, 0xaf, 0x11, 0x71, 0x7e, 0x53, 0xee,
+	0x1e, 0x10, 0xed, 0x4b, 0x5e, 0x8d, 0xe8, 0x82, 0xb8, 0x35, 0xa2, 0xf3,
+	0xb2, 0x7a, 0x8d, 0xe8, 0x68, 0x43, 0x8d, 0x68, 0xbd, 0xaa, 0x11, 0x71,
+	0x1e, 0xb7, 0x46, 0xc4, 0x76, 0xbe, 0x7f, 0x30, 0x50, 0xeb, 0x00, 0xfe,
+	0x3a, 0xb7, 0x82, 0x6f, 0x96, 0x36, 0xea, 0xf8, 0x98, 0x46, 0xec, 0xbf,
+	0xbe, 0xe6, 0xbf, 0xea, 0xf8, 0xa6, 0x29, 0x9d, 0xbb, 0x12, 0xbe, 0x8d,
+	0xba, 0x71, 0xc9, 0x32, 0x6c, 0x9b, 0xaa, 0xc5, 0x2e, 0xbf, 0xdc, 0xcc,
+	0x1c, 0x7a, 0xb2, 0x5c, 0x9f, 0x77, 0x12, 0xf2, 0x1e, 0xab, 0xd5, 0x49,
+	0x2e, 0x15, 0x1f, 0x45, 0xe5, 0xe0, 0xaa, 0xdf, 0x9a, 0x62, 0x99, 0x95,
+	0xdf, 0x9a, 0x34, 0x89, 0x82, 0xce, 0x7c, 0x7f, 0x5e, 0xe5, 0x5d, 0x0b,
+	0xce, 0xcf, 0xcb, 0xd2, 0xbd, 0x16, 0xf0, 0xc7, 0xaf, 0x9f, 0x50, 0xbe,
+	0x75, 0x9f, 0x92, 0xd5, 0x3f, 0xba, 0x1a, 0xca, 0x3e, 0x55, 0x43, 0xf9,
+	0x5a, 0x73, 0xb0, 0x86, 0x72, 0x4e, 0x2e, 0x5f, 0x43, 0xd9, 0xb7, 0x4a,
+	0x0d, 0xe5, 0x15, 0xa9, 0xd7, 0x50, 0x5e, 0x11, 0xbf, 0x86, 0x62, 0xc8,
+	0xd2, 0x7a, 0xce, 0xb3, 0x1f, 0xef, 0x8c, 0xe0, 0x37, 0x8c, 0x9f, 0x5b,
+	0x53, 0x39, 0x57, 0xa3, 0x7f, 0xb5, 0x9a, 0xca, 0x37, 0x9b, 0xdf, 0x4f,
+	0x4d, 0xc5, 0xf5, 0x01, 0x7e, 0x4d, 0xa5, 0x05, 0xf1, 0x0e, 0x7c, 0x8e,
+	0x1e, 0xac, 0xa9, 0xfc, 0x2d, 0xed, 0x01, 0x7d, 0x2a, 0x46, 0x40, 0x3f,
+	0xec, 0x02, 0x7e, 0x29, 0xa3, 0x6a, 0x1c, 0x9f, 0xf6, 0x78, 0xb8, 0x1b,
+	0x7b, 0x8e, 0x43, 0x16, 0xe4, 0x63, 0x8f, 0x8a, 0x2d, 0x33, 0x66, 0x4c,
+	0xcb, 0xf6, 0xc2, 0x9b, 0x4d, 0xf3, 0x5b, 0x74, 0x4c, 0xc6, 0x2b, 0xd4,
+	0xf1, 0x2e, 0xc4, 0xe2, 0x26, 0xfa, 0x76, 0xa3, 0xed, 0xc7, 0x54, 0xfd,
+	0xb5, 0x39, 0x68, 0x9b, 0x0b, 0xc0, 0x59, 0xe0, 0xc4, 0x55, 0xf8, 0xa8,
+	0x6d, 0xa0, 0x39, 0xb8, 0x8f, 0x22, 0xfc, 0x13, 0xfa, 0x94, 0xcc, 0x19,
+	0x5b, 0xfa, 0xb4, 0xc4, 0x69, 0xe7, 0x57, 0x31, 0x1f, 0xfb, 0xb6, 0xa9,
+	0x7c, 0xac, 0x30, 0xc0, 0xbd, 0xd2, 0xd7, 0x2d, 0x82, 0x3e, 0xf4, 0x95,
+	0x98, 0x03, 0xd2, 0xef, 0xf9, 0x39, 0x5a, 0x44, 0xe5, 0x68, 0x9d, 0x8a,
+	0x1f, 0xe4, 0xf5, 0x8d, 0x61, 0x62, 0x65, 0xa7, 0xcd, 0x3d, 0x0c, 0x7b,
+	0x58, 0xc7, 0xb6, 0x9b, 0x0b, 0x66, 0x74, 0xde, 0x3f, 0x02, 0xb9, 0xb2,
+	0x4e, 0xe3, 0xcb, 0xef, 0x21, 0x6f, 0xdf, 0x83, 0x52, 0xec, 0x94, 0xf0,
+	0x7a, 0xd0, 0x93, 0x9f, 0x61, 0xdc, 0xfd, 0x09, 0x95, 0x83, 0x44, 0xed,
+	0x4b, 0xdb, 0xed, 0x5d, 0xd7, 0x60, 0xb7, 0x23, 0x97, 0xb5, 0xdb, 0xcf,
+	0x85, 0x83, 0x76, 0x7b, 0xd7, 0x35, 0xd8, 0xed, 0xfe, 0x6b, 0xb2, 0x5b,
+	0xee, 0x8d, 0x98, 0xe4, 0xd7, 0xc4, 0x56, 0xc6, 0x59, 0xfe, 0xba, 0x13,
+	0x58, 0x33, 0x73, 0x89, 0x35, 0xc7, 0x2e, 0x59, 0x5b, 0x6d, 0x8c, 0xb1,
+	0xae, 0x46, 0xde, 0xcc, 0xad, 0xe8, 0x6f, 0x23, 0x9e, 0x5f, 0xba, 0xdd,
+	0xcb, 0xe7, 0xfd, 0xbc, 0x3e, 0x68, 0x3f, 0xd4, 0x0b, 0xea, 0xc2, 0x63,
+	0xe0, 0x17, 0xf5, 0xc1, 0xb7, 0xb9, 0x9e, 0x06, 0x1d, 0x5c, 0x44, 0xbe,
+	0xdf, 0xe3, 0xe9, 0x20, 0x65, 0xdd, 0xab, 0xbe, 0x11, 0x95, 0x9c, 0x47,
+	0xdc, 0x3c, 0x1f, 0x3a, 0x90, 0x2f, 0xf9, 0xb6, 0x06, 0x9e, 0x44, 0xfd,
+	0x67, 0xe4, 0xa3, 0x8d, 0x98, 0x67, 0x0b, 0xe2, 0x35, 0xf0, 0x48, 0xf5,
+	0x2f, 0xaf, 0x09, 0x5f, 0x1e, 0xcf, 0xa4, 0x18, 0xc2, 0xd8, 0x53, 0x03,
+	0xb0, 0xf1, 0x01, 0x62, 0x54, 0x1a, 0x79, 0x0f, 0xf5, 0x90, 0xba, 0xb9,
+	0x29, 0xb9, 0x43, 0x67, 0x4c, 0xb5, 0x07, 0xb6, 0x47, 0x7d, 0x8d, 0xcb,
+	0x8e, 0xca, 0xa6, 0x33, 0xe7, 0x74, 0xae, 0x51, 0xad, 0xe6, 0x99, 0x2b,
+	0x5a, 0xa2, 0x77, 0xf7, 0xff, 0x45, 0x98, 0x7e, 0xe9, 0x7a, 0xdb, 0xf0,
+	0x74, 0x2d, 0x83, 0x7b, 0xea, 0xed, 0xeb, 0xf0, 0xf7, 0xfc, 0xc6, 0xfe,
+	0x03, 0xf4, 0xc7, 0x60, 0xf3, 0xf4, 0xef, 0xcc, 0x47, 0xb6, 0x7a, 0xe3,
+	0x7a, 0xd4, 0xf7, 0xcf, 0x6c, 0xea, 0x56, 0xef, 0xbb, 0x13, 0xfd, 0x4f,
+	0x82, 0x3e, 0x7b, 0x99, 0x9c, 0x79, 0x46, 0x21, 0xa7, 0xf2, 0x19, 0xbe,
+	0xaf, 0x74, 0x12, 0x39, 0x88, 0x19, 0xa8, 0xa5, 0x87, 0xbd, 0xdc, 0x8d,
+	0x36, 0x16, 0x81, 0x0c, 0xb7, 0x7b, 0xb9, 0x0a, 0xf3, 0xd7, 0xe5, 0x67,
+	0x13, 0x56, 0xd7, 0x81, 0x0d, 0xef, 0x43, 0x07, 0x1a, 0xe5, 0x17, 0x86,
+	0xed, 0xfb, 0xf2, 0xf3, 0xe3, 0x98, 0x79, 0x6f, 0xdf, 0x3d, 0xae, 0x0c,
+	0x7f, 0x2c, 0xf6, 0xa9, 0x05, 0xf6, 0xe9, 0xe3, 0xd1, 0x3e, 0x6f, 0x9f,
+	0x5b, 0x1b, 0xf0, 0x68, 0xa4, 0xc1, 0x66, 0x3f, 0x4a, 0x3c, 0x3a, 0xb4,
+	0xe6, 0xa3, 0xc7, 0x23, 0xee, 0x6b, 0xe3, 0xaa, 0x38, 0xe4, 0xee, 0xe3,
+	0x77, 0x45, 0x4f, 0x7f, 0x98, 0xf9, 0xde, 0xfb, 0x91, 0x4f, 0x10, 0x47,
+	0x28, 0x93, 0x36, 0x15, 0xc3, 0xba, 0xb6, 0x07, 0x5f, 0x5e, 0x0a, 0xc9,
+	0x1b, 0xf7, 0x84, 0xe5, 0x7f, 0x6f, 0xe3, 0xf7, 0x30, 0xd3, 0xab, 0x69,
+	0xb1, 0xfd, 0xc2, 0x1a, 0xd7, 0x0f, 0xbd, 0xd0, 0xee, 0xfa, 0x1d, 0xbe,
+	0xe3, 0xdb, 0xb3, 0x85, 0xe7, 0x7c, 0xb6, 0x91, 0x5f, 0x4c, 0xae, 0x21,
+	0x07, 0xdc, 0x64, 0x5d, 0xd0, 0x57, 0xcb, 0x01, 0x2f, 0x5f, 0x0f, 0xac,
+	0xe7, 0x80, 0xc4, 0xd9, 0x0e, 0xa5, 0x1b, 0xf9, 0x28, 0x73, 0x1f, 0xc3,
+	0xc3, 0x4e, 0xde, 0x23, 0xb7, 0x75, 0x90, 0xef, 0x42, 0xb6, 0xcf, 0x21,
+	0x5e, 0x7a, 0xd6, 0x41, 0x8e, 0xeb, 0x20, 0xb7, 0x75, 0x90, 0xdb, 0x3a,
+	0xc8, 0x6d, 0x9d, 0xa4, 0x97, 0x23, 0x8f, 0x78, 0x75, 0x7f, 0x7e, 0xe3,
+	0x66, 0x7d, 0xa1, 0x08, 0x5f, 0x32, 0xc5, 0x73, 0x13, 0x7a, 0x36, 0xb5,
+	0xc6, 0xdb, 0x9f, 0x5f, 0x13, 0xef, 0xf2, 0x6a, 0x36, 0xdf, 0x54, 0x75,
+	0x43, 0xd1, 0x1f, 0x68, 0x71, 0xbf, 0x83, 0xf3, 0x7c, 0xc7, 0xaf, 0x21,
+	0x2e, 0x51, 0x67, 0x88, 0x68, 0xa3, 0x55, 0x3d, 0xcd, 0x9a, 0x8c, 0xe8,
+	0x7a, 0xfa, 0x16, 0xbc, 0xb3, 0xc5, 0xcd, 0x09, 0xa2, 0x62, 0xe8, 0xe9,
+	0x56, 0xf2, 0x54, 0xd3, 0xd3, 0x6b, 0xbd, 0xb9, 0xf6, 0xb7, 0xb8, 0xb1,
+	0x55, 0x2f, 0xdb, 0xa6, 0xce, 0x38, 0x41, 0xc5, 0xda, 0x7e, 0xff, 0xc5,
+	0xf6, 0xe5, 0x6b, 0x85, 0x14, 0xbe, 0x67, 0x53, 0xf7, 0x62, 0x3e, 0xb6,
+	0xeb, 0xfc, 0xd6, 0x2f, 0xc9, 0xef, 0x90, 0xc7, 0x6f, 0x97, 0xc7, 0x06,
+	0xc7, 0xa9, 0xba, 0x30, 0x79, 0xed, 0xcf, 0xa7, 0xea, 0x7a, 0x58, 0x47,
+	0x9d, 0xcd, 0xc0, 0xf5, 0x07, 0xa6, 0xb4, 0x8d, 0xee, 0x0e, 0xd9, 0xc1,
+	0x75, 0xfd, 0x6f, 0xe2, 0x57, 0xb3, 0x66, 0x8f, 0xfa, 0x8e, 0xe6, 0xfa,
+	0x8c, 0x90, 0xd2, 0x41, 0x33, 0xcd, 0x7d, 0xfd, 0x50, 0x9d, 0xa9, 0xa1,
+	0xfe, 0xe5, 0x90, 0xc7, 0x4c, 0x0d, 0x6c, 0x8a, 0x9b, 0xfa, 0x48, 0x0b,
+	0xeb, 0xaf, 0x43, 0x15, 0x1f, 0xf7, 0xb8, 0x5e, 0xa3, 0x1f, 0x67, 0x5d,
+	0xcd, 0xc7, 0x33, 0xd9, 0xe0, 0xd6, 0xdb, 0x3e, 0x88, 0x2d, 0xb5, 0x34,
+	0xd8, 0x92, 0xbf, 0x4f, 0xee, 0x9f, 0xd7, 0xd5, 0xcf, 0x43, 0x2c, 0x56,
+	0x02, 0xdf, 0x47, 0x6a, 0xba, 0xc1, 0xb3, 0x2a, 0x9f, 0x85, 0x0e, 0xf2,
+	0xdb, 0xc0, 0x4e, 0xd8, 0x51, 0xb5, 0x3a, 0xc4, 0x1a, 0x73, 0xdf, 0x67,
+	0x54, 0x7e, 0xa9, 0xa7, 0xe7, 0x55, 0xfd, 0xc1, 0x5c, 0x51, 0x7f, 0x18,
+	0x82, 0xae, 0x20, 0x06, 0x70, 0xda, 0x54, 0x4c, 0xa7, 0xe2, 0x85, 0x4a,
+	0xe3, 0xf7, 0x97, 0xfb, 0x5b, 0x5d, 0x3e, 0xfc, 0x5d, 0x8b, 0xfb, 0x0d,
+	0xe2, 0x8f, 0xa2, 0xcb, 0xdb, 0x7c, 0xff, 0xaf, 0x5b, 0xfc, 0xb3, 0x3b,
+	0x85, 0x13, 0x43, 0xd0, 0x45, 0xe4, 0xe4, 0x6a, 0x3e, 0xc4, 0xbb, 0x4f,
+	0xcc, 0x76, 0x2c, 0x1f, 0x8f, 0xbe, 0x13, 0xfe, 0xf8, 0x8e, 0x86, 0xf1,
+	0x1d, 0x18, 0xff, 0x7b, 0x0d, 0xe3, 0x3b, 0x02, 0xe3, 0xa3, 0x0d, 0xe3,
+	0xa3, 0x18, 0xff, 0x7c, 0xc3, 0xf8, 0x68, 0x60, 0x7c, 0x67, 0xc3, 0xf8,
+	0x4e, 0x8c, 0x7f, 0xa1, 0x61, 0x3c, 0xfa, 0x4e, 0x34, 0x79, 0xdf, 0xc5,
+	0x88, 0xb1, 0xfb, 0xbd, 0x5c, 0x1c, 0xd7, 0x72, 0xe3, 0xb7, 0x16, 0xea,
+	0x5d, 0x17, 0x64, 0xe0, 0x9f, 0xa7, 0xa3, 0xbd, 0x66, 0x60, 0xaf, 0xf5,
+	0x58, 0xc6, 0xd5, 0xc7, 0xa0, 0x2e, 0x12, 0x1f, 0x8a, 0x62, 0xd8, 0xd0,
+	0x9d, 0x12, 0x74, 0xa8, 0xe4, 0xfb, 0x24, 0x9e, 0x83, 0xe2, 0x19, 0x53,
+	0xd7, 0xf7, 0x86, 0xec, 0x45, 0x2f, 0x07, 0x7b, 0x9b, 0xb4, 0x03, 0x2f,
+	0x7d, 0xcc, 0x94, 0x63, 0xae, 0xdd, 0x50, 0x7f, 0x39, 0xbf, 0x67, 0x3f,
+	0xd4, 0x55, 0x6f, 0x9d, 0xa1, 0x15, 0xb8, 0x16, 0x5f, 0x51, 0xdb, 0x32,
+	0xae, 0x02, 0xd7, 0x46, 0x6a, 0xb8, 0xf6, 0x59, 0x99, 0xaf, 0xe5, 0xdb,
+	0xc3, 0x72, 0xc0, 0xd9, 0xc5, 0x33, 0x36, 0xc7, 0x32, 0xf2, 0xe1, 0xe4,
+	0xdb, 0xbb, 0x6a, 0x7e, 0x92, 0x67, 0x3a, 0x96, 0x0e, 0x31, 0x87, 0xf2,
+	0x6b, 0xb3, 0x53, 0xce, 0xcf, 0xb6, 0x42, 0x2e, 0xb0, 0x8d, 0x6b, 0xcd,
+	0xb7, 0x39, 0x5f, 0x54, 0x0e, 0xb8, 0xe7, 0x1d, 0x6a, 0xf3, 0x16, 0x6b,
+	0xf3, 0xc6, 0x3c, 0x7b, 0xa3, 0x0f, 0xae, 0xfb, 0xcb, 0x1c, 0xfc, 0xe5,
+	0x18, 0x72, 0xee, 0x45, 0x67, 0xb5, 0xfa, 0xe8, 0xb5, 0xfa, 0xcb, 0xc6,
+	0x3a, 0x73, 0xa3, 0xbf, 0xe4, 0x3a, 0x8d, 0xb5, 0xe5, 0x78, 0x03, 0xfe,
+	0x53, 0x9f, 0x0e, 0x7b, 0x31, 0x35, 0xae, 0xa5, 0xc3, 0xb0, 0x47, 0x5d,
+	0xc6, 0x94, 0xfe, 0xb2, 0xed, 0xe7, 0x96, 0xbb, 0x6b, 0xb9, 0x65, 0x3d,
+	0x1f, 0x44, 0xec, 0x9a, 0xfc, 0xa4, 0x87, 0x8f, 0x8c, 0x91, 0xa7, 0xd0,
+	0x7f, 0x0c, 0x3a, 0xc0, 0x67, 0xac, 0x97, 0xde, 0x2c, 0x9f, 0x32, 0x5d,
+	0xff, 0xe4, 0xd6, 0xa6, 0x76, 0xab, 0xf8, 0x9f, 0xdf, 0x0b, 0x0a, 0xa9,
+	0x76, 0x2f, 0xde, 0xbb, 0x12, 0xae, 0x2e, 0xcf, 0x4d, 0x75, 0xfd, 0x51,
+	0xbc, 0xcb, 0xdc, 0xd4, 0x8c, 0x10, 0x43, 0xb3, 0x95, 0xcb, 0xbe, 0x5f,
+	0xa4, 0x7f, 0x29, 0xa8, 0xef, 0x82, 0x2a, 0x0f, 0xc5, 0xb8, 0x45, 0xef,
+	0x7d, 0x37, 0x0f, 0xcd, 0x56, 0xbe, 0xdd, 0xea, 0xe2, 0xe0, 0xe5, 0x72,
+	0x96, 0x9f, 0x88, 0xb0, 0xae, 0xb7, 0xe8, 0x5c, 0x89, 0xd6, 0x95, 0x79,
+	0xaf, 0xb1, 0x22, 0xef, 0x1d, 0xf5, 0xf2, 0xda, 0xcf, 0xa9, 0xbc, 0xd7,
+	0xe5, 0x31, 0xf7, 0x12, 0xcc, 0xa3, 0x6c, 0x60, 0x21, 0xbf, 0xa9, 0x10,
+	0x1f, 0x26, 0x94, 0xdf, 0xca, 0x4f, 0xdf, 0x09, 0x3e, 0x47, 0x57, 0xd1,
+	0x9b, 0x8f, 0xda, 0x4f, 0xf8, 0x7b, 0x3f, 0x2c, 0x6e, 0xbd, 0x6e, 0x27,
+	0x68, 0x61, 0x6e, 0x15, 0xf2, 0xf4, 0xe1, 0xbb, 0xde, 0x39, 0x53, 0x7f,
+	0x9c, 0x9f, 0xc7, 0xd7, 0xbe, 0xbb, 0x16, 0x33, 0xcb, 0xea, 0x27, 0x1b,
+	0x09, 0xc3, 0x90, 0x7b, 0xe6, 0x1a, 0xbe, 0x5b, 0x7c, 0x90, 0xf3, 0x11,
+	0x8d, 0x7e, 0x8d, 0xdf, 0x4d, 0xf9, 0xad, 0x54, 0xb4, 0xbb, 0x7b, 0x6d,
+	0xd8, 0x00, 0xcf, 0x2c, 0x07, 0xf1, 0x35, 0x2c, 0xf9, 0x39, 0x09, 0x47,
+	0xd3, 0xfc, 0x06, 0x40, 0xff, 0xff, 0xba, 0xb7, 0xcf, 0x98, 0xec, 0x9f,
+	0x71, 0x6b, 0x9e, 0xfa, 0x65, 0xcf, 0xc5, 0x1d, 0x00, 0x1f, 0x12, 0x47,
+	0xfd, 0x9a, 0xa7, 0xee, 0x9e, 0x8b, 0x3b, 0xfa, 0xe1, 0x9d, 0x8b, 0xe3,
+	0xfc, 0xa6, 0xec, 0x5a, 0xe5, 0x5c, 0x9c, 0x71, 0x95, 0xe7, 0xe2, 0xda,
+	0x55, 0xcd, 0x93, 0xf3, 0xb8, 0x35, 0x4f, 0xb6, 0xbb, 0xfb, 0x59, 0x2b,
+	0xe1, 0xd9, 0xb7, 0x01, 0x75, 0x06, 0xb9, 0xbb, 0xff, 0x47, 0x91, 0xa3,
+	0x7c, 0x3d, 0xf2, 0xd1, 0xe7, 0x28, 0xdc, 0xcb, 0xaf, 0xb8, 0xdf, 0x77,
+	0xe5, 0x5a, 0xea, 0x00, 0x1f, 0xac, 0xae, 0xb9, 0x5f, 0xd5, 0x35, 0xbf,
+	0x13, 0x09, 0xd6, 0x35, 0xf5, 0x2b, 0x9c, 0x0d, 0xdb, 0xbf, 0x4a, 0x5d,
+	0x33, 0x14, 0x38, 0x1b, 0x16, 0xf2, 0xce, 0x86, 0xb5, 0xdb, 0xc8, 0x25,
+	0xbd, 0x3a, 0xa6, 0x7e, 0xd9, 0xb3, 0x61, 0xff, 0x19, 0xf9, 0xe0, 0x75,
+	0xcc, 0x15, 0x67, 0xc3, 0xe0, 0xeb, 0x36, 0x48, 0xfc, 0x9a, 0xf2, 0x9e,
+	0x0f, 0x92, 0xf3, 0xf0, 0xbc, 0x7e, 0x13, 0xf6, 0x1c, 0x92, 0x5d, 0x51,
+	0xea, 0x27, 0xcf, 0x36, 0xf6, 0xc2, 0x16, 0x70, 0xad, 0xb0, 0x9d, 0xa4,
+	0x8c, 0xb4, 0x91, 0xde, 0xe5, 0xe7, 0x10, 0xea, 0xe7, 0x71, 0xc3, 0xb5,
+	0xf3, 0xb8, 0x47, 0xa0, 0x37, 0xfa, 0x4c, 0x58, 0x16, 0x02, 0x3a, 0x35,
+	0x85, 0x78, 0x4f, 0x9f, 0xb3, 0xbc, 0xe7, 0xfc, 0x9f, 0x8a, 0x28, 0x30,
+	0x8f, 0x67, 0x78, 0xdb, 0xc4, 0x98, 0x73, 0xbf, 0x59, 0xba, 0xff, 0x57,
+	0x12, 0xc3, 0x18, 0x9e, 0xf1, 0x0c, 0xc9, 0x01, 0x55, 0xb3, 0xf0, 0x75,
+	0x79, 0xc7, 0x5a, 0x69, 0x59, 0x9f, 0xa9, 0xb7, 0xa3, 0xab, 0xf8, 0x7d,
+	0xc4, 0x91, 0x33, 0xd4, 0xe7, 0x5b, 0x25, 0xe7, 0xd5, 0x83, 0x0a, 0x95,
+	0x6d, 0x5e, 0x7e, 0xa1, 0xbe, 0xed, 0x80, 0x97, 0xdd, 0x9e, 0x0f, 0xc6,
+	0xb5, 0xd4, 0x4d, 0x9f, 0x87, 0x35, 0x4e, 0xca, 0xd0, 0xf4, 0x96, 0xd8,
+	0x38, 0xf0, 0x6e, 0x4c, 0xad, 0x79, 0x2d, 0x3c, 0xd7, 0x2e, 0xf1, 0xbd,
+	0xf1, 0x6a, 0xf9, 0xee, 0xc7, 0xc7, 0x8f, 0x62, 0x7f, 0xdd, 0xd0, 0x8f,
+	0x87, 0x25, 0x77, 0xe2, 0x66, 0x19, 0x9a, 0x4d, 0x80, 0x9e, 0x1f, 0x56,
+	0x0b, 0x29, 0xc4, 0xd2, 0x4f, 0xf0, 0xdc, 0x18, 0x30, 0x14, 0x7c, 0x7b,
+	0x66, 0xc5, 0x77, 0xec, 0xe0, 0x59, 0xb3, 0x64, 0xed, 0xec, 0xd0, 0x53,
+	0x15, 0x09, 0x77, 0x90, 0xe6, 0x99, 0xfa, 0xd9, 0xef, 0xc5, 0xca, 0x0e,
+	0xe5, 0xdb, 0x9e, 0xac, 0x2c, 0xab, 0xfd, 0x28, 0x19, 0x4e, 0x94, 0x9f,
+	0x04, 0x2f, 0x5e, 0x51, 0xfe, 0xed, 0x88, 0x23, 0x37, 0x19, 0x42, 0x79,
+	0x88, 0x06, 0x1e, 0xa8, 0x33, 0x1c, 0xee, 0xf7, 0xfd, 0x2e, 0x25, 0x57,
+	0x17, 0x2b, 0x76, 0x06, 0xce, 0x60, 0xd4, 0x65, 0xeb, 0x9e, 0xcd, 0x70,
+	0x65, 0xe1, 0x9e, 0x1f, 0x21, 0x3f, 0x97, 0x0e, 0xed, 0xb2, 0xdd, 0xf3,
+	0x23, 0x3d, 0x73, 0xec, 0xeb, 0x6c, 0xf0, 0x7d, 0x61, 0xe8, 0x00, 0xcf,
+	0x1d, 0xf1, 0xcc, 0x37, 0x69, 0x56, 0xb5, 0x8e, 0x55, 0xbf, 0x6d, 0x5f,
+	0x5b, 0xcd, 0xd5, 0x5d, 0xb3, 0x5b, 0xad, 0x79, 0x9d, 0x87, 0x59, 0xfe,
+	0x59, 0xef, 0x94, 0xf6, 0xff, 0xd4, 0x5d, 0x7b, 0x6c, 0x1b, 0xf7, 0x7d,
+	0xff, 0xf2, 0x48, 0x3d, 0xac, 0xe7, 0x49, 0xa6, 0x64, 0x5a, 0x52, 0x94,
+	0x3b, 0xe9, 0x64, 0x29, 0xb1, 0x12, 0x70, 0x9e, 0xba, 0x0a, 0x88, 0x9a,
+	0xb0, 0x24, 0xfd, 0x58, 0x10, 0x0c, 0xb4, 0xad, 0x64, 0xee, 0x92, 0xad,
+	0x0e, 0x25, 0xa7, 0x1d, 0x30, 0x60, 0x6e, 0xd6, 0x02, 0x69, 0x07, 0xc7,
+	0x0c, 0x65, 0x27, 0xc6, 0xaa, 0x88, 0x4c, 0xcc, 0x6a, 0x1d, 0xb0, 0x62,
+	0x1c, 0xa5, 0x38, 0x69, 0xa7, 0x80, 0x69, 0xda, 0x04, 0xc5, 0xfe, 0xb1,
+	0x26, 0x3b, 0x7b, 0x61, 0x7f, 0x04, 0xdb, 0x80, 0x1a, 0x5b, 0x81, 0xba,
+	0x76, 0x8a, 0x65, 0x1b, 0xe0, 0x34, 0xdb, 0xb0, 0x75, 0x58, 0x0b, 0xee,
+	0xfb, 0xf9, 0x3d, 0xc8, 0x23, 0x79, 0xd4, 0xc3, 0x71, 0x06, 0x4c, 0x80,
+	0x40, 0xde, 0xf1, 0x77, 0x77, 0xbf, 0xdf, 0xf7, 0xf7, 0x7d, 0xbf, 0x6e,
+	0x2e, 0x33, 0x1e, 0xf2, 0x33, 0x7e, 0xcf, 0x15, 0x61, 0x5f, 0x37, 0xd2,
+	0xe1, 0x36, 0x83, 0x67, 0xa4, 0x0e, 0x9e, 0xd5, 0x34, 0xc1, 0xf6, 0x78,
+	0x99, 0x77, 0x4b, 0xd8, 0xc9, 0xf3, 0xc8, 0x63, 0xd7, 0x39, 0x0e, 0x12,
+	0x76, 0x65, 0x1a, 0x5a, 0x72, 0xe7, 0x37, 0x54, 0x60, 0x77, 0xb2, 0x0c,
+	0xbb, 0x3d, 0xff, 0x8f, 0x60, 0x77, 0x4d, 0xe8, 0xbf, 0xdf, 0x2e, 0x22,
+	0x6f, 0x4d, 0xeb, 0x00, 0xba, 0x6e, 0x09, 0x70, 0x04, 0x3f, 0xb5, 0xf3,
+	0xeb, 0x04, 0x9e, 0x8a, 0xbc, 0xe2, 0x52, 0xe9, 0x3b, 0xe1, 0xb2, 0x9f,
+	0x92, 0xed, 0x12, 0xd8, 0x27, 0xf0, 0xe7, 0x35, 0x96, 0x91, 0x47, 0x6f,
+	0x4b, 0x46, 0x42, 0x57, 0xaa, 0xb5, 0x4f, 0x7e, 0xbb, 0xcb, 0x6d, 0x9f,
+	0x1c, 0xdd, 0xa1, 0x7d, 0x72, 0x5a, 0xda, 0x27, 0xa9, 0xed, 0xdb, 0x27,
+	0x03, 0x75, 0x79, 0x5d, 0x95, 0xf5, 0xec, 0xdc, 0x3e, 0x31, 0x36, 0xb5,
+	0x4f, 0x46, 0x5d, 0xbe, 0x18, 0xcc, 0xf7, 0x57, 0x29, 0x75, 0x0c, 0x3c,
+	0x4e, 0xc3, 0x19, 0x30, 0x3e, 0x56, 0xe3, 0x17, 0xfe, 0x38, 0x61, 0xfd,
+	0xd7, 0xff, 0xc7, 0xb0, 0x1e, 0xac, 0xf3, 0x79, 0x57, 0xd6, 0x03, 0x21,
+	0xfe, 0x51, 0x60, 0x3d, 0xd8, 0xd0, 0x77, 0xda, 0x38, 0x67, 0xb1, 0xda,
+	0x77, 0x3a, 0x62, 0x34, 0xe2, 0xed, 0x7f, 0xe4, 0xf2, 0xa9, 0xba, 0xf9,
+	0x3b, 0x68, 0x8a, 0x7c, 0x47, 0xc7, 0xf5, 0xb3, 0x40, 0x4b, 0x76, 0x2a,
+	0x45, 0xb0, 0x99, 0xf0, 0xbc, 0x90, 0xa0, 0xb5, 0x1a, 0x7d, 0x8b, 0x9f,
+	0xc7, 0xeb, 0x7b, 0xf5, 0x09, 0x21, 0xa7, 0xa4, 0xff, 0x01, 0xe3, 0x27,
+	0x7c, 0xf3, 0x62, 0xac, 0xcc, 0x6f, 0x52, 0xfe, 0x08, 0xa5, 0xfb, 0x37,
+	0xf2, 0x43, 0xd4, 0xcb, 0xbc, 0x9d, 0xd9, 0x0a, 0x9a, 0xc6, 0xef, 0xe6,
+	0x7d, 0x09, 0x55, 0xd9, 0x5a, 0xe0, 0x9f, 0xa7, 0x59, 0x2f, 0x18, 0x29,
+	0xeb, 0x04, 0xd5, 0x7b, 0x73, 0x4e, 0xd8, 0x74, 0x9a, 0x77, 0x26, 0x64,
+	0xee, 0xa9, 0x38, 0x0f, 0x3d, 0x4d, 0xf3, 0xce, 0x5a, 0x3d, 0xf8, 0x6e,
+	0x0f, 0xbc, 0xf0, 0xca, 0x69, 0x2a, 0xef, 0x9d, 0x85, 0x9c, 0xf3, 0xb8,
+	0xe7, 0xde, 0x95, 0x6b, 0xc2, 0x52, 0x95, 0xb1, 0xf2, 0xfa, 0xb8, 0x58,
+	0xd7, 0x0f, 0x8e, 0x44, 0x51, 0xfb, 0x56, 0xae, 0x17, 0xab, 0xad, 0x77,
+	0x82, 0x1c, 0xd0, 0x74, 0xa8, 0x6b, 0xa2, 0x01, 0x8b, 0x61, 0x8f, 0x7a,
+	0x27, 0xb7, 0x2c, 0xc1, 0x75, 0xb5, 0xb0, 0xa8, 0xc8, 0x91, 0xf3, 0x4a,
+	0x8e, 0x14, 0x5c, 0x7c, 0xbc, 0x5e, 0x6f, 0xef, 0xf5, 0xd0, 0xdb, 0xbd,
+	0x6a, 0x9e, 0x30, 0xa7, 0x67, 0x58, 0x0f, 0xb9, 0x1f, 0x7a, 0x88, 0x89,
+	0xba, 0x25, 0xa9, 0x8b, 0xe0, 0x77, 0x96, 0x35, 0xaf, 0x86, 0x18, 0x57,
+	0x8e, 0xd0, 0x53, 0xac, 0x6b, 0x5f, 0xa2, 0x7b, 0x94, 0x7d, 0x16, 0x71,
+	0xe5, 0x99, 0x22, 0x8f, 0xdf, 0x47, 0xa9, 0x27, 0xec, 0x89, 0x08, 0x1d,
+	0xa1, 0x53, 0x22, 0x67, 0x06, 0xf1, 0x3d, 0xe4, 0x1c, 0xdc, 0x2b, 0x9e,
+	0x2f, 0x7d, 0x19, 0x77, 0x22, 0xa7, 0x6e, 0xfb, 0xf9, 0xfb, 0xba, 0x56,
+	0x2f, 0x2a, 0x9e, 0xb9, 0xaa, 0x68, 0x4a, 0x9c, 0xe3, 0xeb, 0x9f, 0x31,
+	0xea, 0xaf, 0x8f, 0x18, 0xf1, 0x62, 0xdc, 0x88, 0xae, 0x60, 0xdc, 0x33,
+	0x46, 0xac, 0x08, 0x1b, 0x52, 0xe3, 0x88, 0x1d, 0x06, 0xbd, 0x6d, 0xd0,
+	0xd6, 0xb1, 0x88, 0x02, 0xd5, 0xd4, 0x49, 0x6c, 0x63, 0xde, 0x87, 0xaa,
+	0xe6, 0xad, 0xe1, 0x8b, 0xef, 0xf0, 0xf7, 0x44, 0x18, 0xa6, 0x5a, 0xaf,
+	0x6d, 0x83, 0x7f, 0x7d, 0x22, 0x45, 0x9b, 0xe9, 0xb5, 0x76, 0x9d, 0x5e,
+	0x5b, 0xd8, 0x72, 0xde, 0x1f, 0x95, 0xc6, 0x65, 0x3d, 0xa2, 0xdf, 0x11,
+	0xfa, 0x2b, 0xcf, 0xbb, 0x4a, 0xb7, 0xad, 0xc1, 0x29, 0x8c, 0xd1, 0x7e,
+	0x70, 0xed, 0x07, 0xeb, 0x52, 0xf9, 0xc0, 0x3a, 0x3f, 0xa1, 0x0d, 0xf5,
+	0x5e, 0xa6, 0xcc, 0x6b, 0x85, 0x8d, 0xb5, 0xce, 0xf3, 0x83, 0xbd, 0xf5,
+	0xa0, 0x98, 0x23, 0xdb, 0x5b, 0x56, 0x8c, 0xa4, 0xaf, 0x7b, 0xbe, 0x58,
+	0x55, 0xff, 0xe9, 0x51, 0x07, 0x39, 0xe2, 0x51, 0x07, 0xe9, 0xa6, 0xb5,
+	0x80, 0x8b, 0xd6, 0x42, 0x2e, 0xbd, 0x6d, 0x88, 0xed, 0x96, 0x0e, 0xe6,
+	0x21, 0xb0, 0x5b, 0xda, 0xc8, 0xff, 0xb2, 0xdb, 0x6e, 0xa9, 0xad, 0x45,
+	0x07, 0xdd, 0x41, 0x37, 0x93, 0x36, 0x4c, 0x3c, 0x57, 0xae, 0x63, 0xe7,
+	0x75, 0x57, 0x6a, 0x0e, 0x57, 0xea, 0xea, 0x23, 0xbd, 0xe6, 0x3b, 0x5c,
+	0x37, 0x5f, 0xc8, 0xaf, 0x48, 0x43, 0x9d, 0xce, 0xcb, 0xae, 0xba, 0x53,
+	0xf3, 0xab, 0xe5, 0x67, 0x78, 0xd6, 0x88, 0xf0, 0x79, 0xa7, 0xca, 0xbc,
+	0x6c, 0x5a, 0xce, 0x37, 0x53, 0x6d, 0x67, 0xf8, 0x97, 0x48, 0xc1, 0xce,
+	0x9b, 0xb7, 0xef, 0xcc, 0x7f, 0xd6, 0x5e, 0x23, 0x77, 0xdf, 0x33, 0xa5,
+	0x5f, 0xac, 0x49, 0xe5, 0x61, 0xf7, 0x29, 0x7b, 0x6f, 0x2b, 0x7c, 0xc7,
+	0xb9, 0x26, 0xe5, 0x4b, 0xb4, 0xad, 0x3c, 0x01, 0xcf, 0x8f, 0x9d, 0x68,
+	0x72, 0x4c, 0x15, 0xcb, 0x42, 0xbc, 0x0a, 0x78, 0xaf, 0xef, 0x0f, 0x9e,
+	0xbd, 0x9d, 0x3d, 0xb3, 0xea, 0xf6, 0x4c, 0xe2, 0x15, 0x6c, 0x2d, 0xe4,
+	0x17, 0x4f, 0xd6, 0xe4, 0x78, 0x7f, 0x14, 0x58, 0x74, 0x79, 0xe4, 0x3d,
+	0x23, 0x6f, 0xb9, 0xd1, 0x3c, 0xaf, 0xbb, 0xf4, 0x72, 0xcc, 0xb7, 0x54,
+	0x7a, 0x23, 0x3c, 0x20, 0x65, 0x71, 0xd1, 0x5b, 0x47, 0x32, 0xb7, 0x3d,
+	0xbf, 0x5a, 0xd9, 0xbb, 0x77, 0x9b, 0xb2, 0x57, 0xf4, 0xf4, 0xf0, 0x1d,
+	0x14, 0x3c, 0xa0, 0x83, 0x56, 0x72, 0xc8, 0xbf, 0xfe, 0x05, 0xd0, 0x3c,
+	0xf3, 0x59, 0x57, 0x4d, 0x9a, 0xf7, 0x3e, 0x96, 0x63, 0x2a, 0x81, 0x19,
+	0xc4, 0xfe, 0x90, 0x5b, 0xd2, 0xcb, 0xbc, 0x07, 0xe3, 0xc7, 0xac, 0xab,
+	0xf0, 0xf7, 0x2a, 0xff, 0x53, 0x5c, 0xc9, 0x97, 0x83, 0xdb, 0x88, 0xad,
+	0xec, 0x8c, 0x4f, 0xdb, 0xd6, 0x3a, 0x21, 0xee, 0x83, 0x7c, 0xe1, 0xfb,
+	0xba, 0xa8, 0xeb, 0x33, 0x2d, 0x2d, 0xce, 0x97, 0x7a, 0x64, 0x2c, 0x0a,
+	0xbf, 0x75, 0xd0, 0x2b, 0x39, 0xe4, 0x72, 0xe3, 0xb7, 0xdf, 0xe0, 0xdf,
+	0xbc, 0x78, 0x94, 0xce, 0x45, 0x87, 0x2e, 0x27, 0xf7, 0x27, 0x4f, 0xb0,
+	0x95, 0x4a, 0xf4, 0xb7, 0xe1, 0x5f, 0x94, 0xf1, 0x8c, 0xe2, 0x9d, 0x8e,
+	0xd5, 0x78, 0xf9, 0x0b, 0xdf, 0xec, 0xb9, 0xdd, 0xdc, 0xc8, 0x2f, 0x6c,
+	0xcb, 0x5f, 0x88, 0x38, 0xff, 0x76, 0x62, 0x26, 0x3a, 0x36, 0x3c, 0x25,
+	0x6a, 0x4e, 0xdd, 0x78, 0x70, 0x67, 0xe2, 0xc3, 0xc0, 0x87, 0xe1, 0x3a,
+	0x5e, 0xf5, 0xd1, 0xfd, 0xfd, 0xb5, 0x70, 0x6d, 0xf3, 0xf4, 0x55, 0x79,
+	0xc7, 0x81, 0x11, 0xf3, 0x87, 0x9f, 0xfa, 0x21, 0x9a, 0xbf, 0x08, 0x1c,
+	0x36, 0x18, 0xdb, 0x46, 0x69, 0x21, 0x88, 0xba, 0x22, 0x51, 0x9b, 0xa3,
+	0xe2, 0x86, 0xb2, 0x56, 0x68, 0x5e, 0xd4, 0x40, 0x8e, 0x85, 0x6e, 0xf2,
+	0xbc, 0xe7, 0x8b, 0x29, 0x3a, 0xc5, 0x32, 0xf6, 0xd4, 0x4a, 0x45, 0x77,
+	0xaf, 0xaf, 0x83, 0xac, 0xc6, 0xf1, 0x9b, 0x02, 0xc7, 0x87, 0x36, 0xc5,
+	0xf1, 0xc3, 0x65, 0x1c, 0xff, 0x44, 0xaf, 0xc4, 0xe7, 0x67, 0xf9, 0x5e,
+	0x5d, 0x74, 0x50, 0xdc, 0x37, 0xc5, 0xdf, 0xdb, 0xe9, 0xa0, 0xec, 0x61,
+	0xc1, 0xcf, 0x66, 0x1e, 0x9f, 0x49, 0xd1, 0x53, 0x17, 0x53, 0xbe, 0xb8,
+	0xa8, 0x5f, 0x70, 0xf7, 0xe8, 0xd0, 0xd7, 0x63, 0x5c, 0x23, 0xfc, 0xd7,
+	0x7c, 0x49, 0xd6, 0x5c, 0xe5, 0x25, 0x7f, 0xa2, 0x77, 0xc3, 0x83, 0x35,
+	0xf8, 0x5f, 0x6d, 0x3b, 0x9e, 0x56, 0x32, 0xf0, 0xd8, 0x26, 0x7e, 0x8d,
+	0x7a, 0xbc, 0xec, 0xf1, 0xd0, 0x87, 0x7f, 0xbd, 0x57, 0xc6, 0xa9, 0x36,
+	0xf3, 0x6b, 0xb8, 0x71, 0xb4, 0x2a, 0x6e, 0xcf, 0x7c, 0xff, 0xbf, 0x55,
+	0x1c, 0xfd, 0xa5, 0x5e, 0x29, 0x2f, 0x50, 0x1f, 0x98, 0x60, 0x38, 0x9c,
+	0x64, 0x5d, 0x65, 0x90, 0x9a, 0x5f, 0xd6, 0x6b, 0x1d, 0x14, 0xfc, 0xd6,
+	0xed, 0xa7, 0x39, 0xa7, 0x6a, 0xbb, 0xd3, 0xae, 0x35, 0x9d, 0x13, 0x36,
+	0x4e, 0x63, 0x7a, 0x6b, 0x9c, 0x73, 0x35, 0x54, 0x23, 0x13, 0x6a, 0xf1,
+	0x0d, 0xbd, 0x4f, 0xb0, 0xbf, 0x64, 0x48, 0x3d, 0x78, 0x9a, 0xf5, 0xdb,
+	0x9d, 0xc6, 0x8b, 0x3e, 0xaa, 0x8e, 0x58, 0xdb, 0x53, 0xa3, 0xf6, 0x3b,
+	0xf6, 0x41, 0xda, 0x1c, 0xc9, 0x57, 0x1f, 0x12, 0xbc, 0xe0, 0xdc, 0x64,
+	0x89, 0x62, 0xe1, 0x4e, 0x4a, 0x4e, 0xf2, 0xb3, 0xa7, 0x1d, 0xb6, 0xbd,
+	0xfc, 0x94, 0x62, 0xfa, 0x4d, 0x4e, 0xee, 0x52, 0xfa, 0xa2, 0xf6, 0xa7,
+	0xb7, 0xa8, 0x3c, 0x87, 0x67, 0x45, 0x5c, 0x52, 0xf6, 0xc6, 0xe0, 0xef,
+	0x2b, 0xfa, 0xde, 0xcf, 0x8a, 0xf8, 0x68, 0xf2, 0x62, 0xb3, 0x1a, 0xd7,
+	0xee, 0x1a, 0x87, 0x31, 0xed, 0x6a, 0x2c, 0xee, 0xa9, 0x75, 0x8a, 0x56,
+	0xc5, 0x6f, 0x1f, 0x11, 0x75, 0x60, 0xb2, 0x56, 0x0f, 0xbf, 0x9f, 0xa6,
+	0xb9, 0xf2, 0x5a, 0xda, 0x79, 0xec, 0xcf, 0x4a, 0x11, 0x61, 0xcb, 0xb5,
+	0xb3, 0xce, 0x8b, 0x79, 0xd7, 0xcf, 0x09, 0x6b, 0xf1, 0x8b, 0xf8, 0x10,
+	0x7f, 0x57, 0xcf, 0x39, 0x59, 0x9e, 0x13, 0x72, 0x34, 0xec, 0x90, 0xbc,
+	0x97, 0x1e, 0xd7, 0xee, 0x1a, 0xa7, 0x79, 0x85, 0x8e, 0x3f, 0xfc, 0x80,
+	0xe7, 0xf1, 0x37, 0x2a, 0x87, 0xd7, 0x14, 0xf1, 0x53, 0x99, 0xa3, 0xa1,
+	0xbf, 0xc3, 0xbf, 0x8c, 0x9c, 0x0a, 0xe4, 0x49, 0xb8, 0xf9, 0x8d, 0x5c,
+	0x6f, 0x00, 0xb2, 0xa8, 0x88, 0xb8, 0x29, 0xe2, 0x15, 0x8d, 0x74, 0xe7,
+	0xbd, 0xc8, 0xcd, 0xdf, 0x81, 0x0e, 0xba, 0x1d, 0xfa, 0xb3, 0x3c, 0xe8,
+	0xcf, 0xfd, 0x7c, 0xd4, 0xc1, 0xa1, 0x1e, 0x2e, 0x35, 0x61, 0x50, 0x89,
+	0x6d, 0x05, 0x83, 0xf2, 0xa6, 0x8f, 0x9e, 0x72, 0xec, 0xf0, 0x0a, 0xc9,
+	0x9a, 0xc9, 0xd8, 0xa2, 0x3d, 0xb1, 0x4e, 0xfb, 0x45, 0xcd, 0x38, 0x7a,
+	0x1f, 0xe4, 0x59, 0x06, 0x9f, 0xa4, 0x09, 0xb6, 0x8f, 0xd8, 0xfe, 0x9c,
+	0x45, 0xbc, 0x45, 0xef, 0x0b, 0x6a, 0xe0, 0xf1, 0x39, 0xc1, 0x70, 0x7a,
+	0x6c, 0x37, 0xb5, 0x45, 0xf8, 0x9e, 0x13, 0xe0, 0x4f, 0xe8, 0xe7, 0x45,
+	0x51, 0xb6, 0x93, 0x60, 0xb3, 0x9e, 0x9c, 0xb5, 0xcd, 0x3c, 0x19, 0x3c,
+	0x16, 0xb6, 0x2b, 0xee, 0x83, 0xeb, 0x23, 0x66, 0x13, 0xd5, 0xd6, 0xe4,
+	0x3e, 0x2b, 0xea, 0x14, 0xdf, 0x0d, 0xdf, 0x47, 0x46, 0x3f, 0xf8, 0x15,
+	0xf6, 0xed, 0x5e, 0x15, 0x27, 0x3a, 0xcb, 0xdf, 0xc7, 0xd5, 0xf7, 0xaf,
+	0x88, 0xfd, 0x94, 0xdf, 0x35, 0x7e, 0xe3, 0xef, 0x5f, 0x5a, 0xc8, 0xf9,
+	0xa1, 0xca, 0x59, 0xa9, 0xca, 0x05, 0x09, 0x8d, 0x1a, 0x5f, 0xa1, 0xd3,
+	0x2b, 0x9b, 0xf9, 0x5f, 0xbc, 0x6a, 0x5d, 0xbb, 0xb7, 0x59, 0xeb, 0xfa,
+	0x07, 0xbb, 0x65, 0x6d, 0x99, 0x7b, 0x2e, 0xff, 0xc9, 0x73, 0xf1, 0xd2,
+	0xc9, 0xea, 0xf4, 0x44, 0x5e, 0x6f, 0x89, 0xfe, 0x29, 0xfc, 0x49, 0xba,
+	0x1e, 0x0c, 0xa9, 0x9c, 0x25, 0xe4, 0x28, 0xdd, 0xa7, 0xf0, 0x5a, 0xf3,
+	0x7e, 0xf2, 0xe0, 0xfd, 0x8f, 0x89, 0x5c, 0x4d, 0x29, 0x3b, 0x06, 0x15,
+	0x3c, 0x00, 0xb3, 0x90, 0x0b, 0x66, 0x7d, 0x2e, 0x98, 0x19, 0xea, 0x7b,
+	0xa7, 0x38, 0x3e, 0xbd, 0xf2, 0x99, 0x6e, 0x59, 0x2f, 0x8e, 0x58, 0xe2,
+	0xbc, 0xfa, 0xbe, 0xd5, 0x7a, 0x7f, 0xce, 0x6b, 0x15, 0xfe, 0x26, 0xd7,
+	0x5a, 0x5f, 0x27, 0x72, 0x5a, 0x82, 0xf5, 0x30, 0xf8, 0x8e, 0xeb, 0x3c,
+	0xe6, 0x38, 0xe6, 0x9a, 0xe3, 0x88, 0x6b, 0x8e, 0x77, 0x37, 0x98, 0x23,
+	0xf3, 0xf8, 0xe2, 0x69, 0xfe, 0xbf, 0xdd, 0xb9, 0xca, 0x79, 0xce, 0x0b,
+	0x78, 0xb6, 0x53, 0x3a, 0x18, 0x52, 0xb2, 0xe3, 0xfb, 0xaa, 0x16, 0xdd,
+	0x6b, 0xce, 0xff, 0x40, 0x8d, 0xf7, 0xcd, 0x8d, 0xab, 0xee, 0xfa, 0xe3,
+	0x97, 0x28, 0x26, 0xeb, 0xc8, 0x15, 0x6d, 0x7f, 0xb5, 0x81, 0x1f, 0xfa,
+	0x41, 0xa1, 0xff, 0xcc, 0xcb, 0x78, 0xd0, 0x80, 0xec, 0xbf, 0x16, 0xa0,
+	0xd5, 0x72, 0x2d, 0xaf, 0x5f, 0xd5, 0xee, 0xdc, 0x1f, 0xbc, 0xb3, 0x75,
+	0xbc, 0x38, 0xff, 0x88, 0xf0, 0xe5, 0xc9, 0xf8, 0x51, 0x42, 0xd5, 0x23,
+	0xdb, 0x16, 0x72, 0x03, 0x0a, 0x6b, 0xf0, 0xbf, 0x36, 0xaa, 0xdd, 0xc5,
+	0xb5, 0xf0, 0x03, 0x6a, 0x3b, 0xfe, 0x84, 0xe0, 0x89, 0xd2, 0x3f, 0x26,
+	0xeb, 0x6f, 0x0b, 0x6b, 0x27, 0x45, 0xcd, 0x6b, 0x54, 0xd5, 0xf1, 0x26,
+	0xa9, 0x43, 0xe8, 0xb9, 0xb7, 0x5f, 0x7f, 0xfb, 0x5c, 0x70, 0xe7, 0xf5,
+	0xb7, 0xee, 0x6b, 0x76, 0x56, 0x7f, 0x6b, 0xf2, 0xda, 0x8d, 0x65, 0x59,
+	0x7f, 0x5b, 0x1d, 0x93, 0x91, 0xfe, 0xc0, 0xa4, 0x4b, 0x7f, 0x90, 0xfa,
+	0xfa, 0x6f, 0xb9, 0xf2, 0xb7, 0x65, 0x6d, 0x6d, 0xa1, 0xac, 0xb3, 0xca,
+	0xda, 0x5a, 0x99, 0xef, 0xed, 0xee, 0x03, 0x23, 0x63, 0x3f, 0xf2, 0x39,
+	0x9d, 0x35, 0xb1, 0x1f, 0x59, 0x53, 0x6b, 0x19, 0x8d, 0x6c, 0x38, 0xd1,
+	0xe7, 0xa2, 0x8f, 0xba, 0x22, 0x8c, 0xbb, 0xed, 0x0d, 0xfa, 0x21, 0x44,
+	0x1a, 0xf4, 0x43, 0x70, 0xf3, 0x7e, 0xb7, 0x8e, 0x05, 0x9d, 0x18, 0xb2,
+	0x11, 0xba, 0x30, 0xfa, 0x19, 0x84, 0xe9, 0x74, 0x59, 0xf7, 0xbc, 0x8f,
+	0x12, 0x4a, 0xf7, 0x3c, 0xbd, 0xa2, 0xf9, 0xd1, 0x48, 0x0d, 0x3f, 0xf2,
+	0xd2, 0x45, 0x6d, 0x95, 0xe7, 0xa3, 0xe9, 0x35, 0xe5, 0xa2, 0xd7, 0x94,
+	0x07, 0xbd, 0x8a, 0x67, 0x34, 0x98, 0xf7, 0xf7, 0xd5, 0x35, 0xf8, 0x4f,
+	0x84, 0xd0, 0xb3, 0x85, 0x79, 0x6a, 0x50, 0xe9, 0x7f, 0x2e, 0x7a, 0x3d,
+	0xc5, 0xf4, 0xaa, 0xcf, 0x63, 0xbe, 0x0d, 0x73, 0x41, 0x95, 0xce, 0x38,
+	0xe8, 0x3b, 0x74, 0xf1, 0x1b, 0x22, 0x4f, 0xaa, 0xda, 0x5e, 0xd4, 0xfa,
+	0xc4, 0x3e, 0x41, 0x4b, 0xd7, 0xfd, 0xc8, 0x5b, 0xd1, 0xe7, 0x4c, 0xe5,
+	0x27, 0xd3, 0xb0, 0x68, 0xae, 0xd2, 0x39, 0x2a, 0xfa, 0x86, 0xc8, 0xf1,
+	0x75, 0xcd, 0xed, 0x43, 0x9e, 0x9b, 0x3e, 0xaf, 0x65, 0xe6, 0xb5, 0x2a,
+	0x7f, 0xc6, 0xe5, 0xaa, 0x9e, 0x83, 0xf0, 0x1d, 0x75, 0x26, 0x0c, 0x27,
+	0x2e, 0x72, 0x4c, 0x7b, 0x1c, 0xf8, 0xc9, 0xa2, 0x4c, 0xfb, 0x3d, 0x09,
+	0xe4, 0x33, 0xf7, 0x2c, 0x59, 0x74, 0x3c, 0x73, 0xff, 0x5d, 0x12, 0x57,
+	0xce, 0x8a, 0x3e, 0x92, 0xe8, 0x67, 0x16, 0x63, 0xf9, 0x1c, 0xf5, 0x4f,
+	0xd3, 0xf9, 0x62, 0x0b, 0x15, 0x58, 0xbb, 0xf7, 0x3b, 0x79, 0xe1, 0xeb,
+	0x63, 0x9e, 0x94, 0x45, 0x2f, 0x51, 0x63, 0xb9, 0x99, 0xef, 0xdb, 0x4f,
+	0xab, 0xb9, 0x31, 0xd1, 0x13, 0x4a, 0xf6, 0x17, 0xc1, 0x58, 0x1f, 0xf5,
+	0x3a, 0x07, 0xfb, 0xa8, 0xed, 0xb3, 0x22, 0xc7, 0xb2, 0x90, 0x3d, 0x2b,
+	0x3f, 0xf3, 0x0f, 0xa8, 0x67, 0xf0, 0xf3, 0x8a, 0x7f, 0x4a, 0x91, 0x5e,
+	0xcb, 0x65, 0xcb, 0xb9, 0xff, 0xbc, 0xf5, 0x95, 0xa3, 0x3b, 0xd2, 0x57,
+	0x52, 0x89, 0x8a, 0xbe, 0xe2, 0xbe, 0x77, 0x39, 0x07, 0xa6, 0x5f, 0xf6,
+	0x7b, 0x00, 0x0c, 0xda, 0xa1, 0x8b, 0x25, 0x00, 0x4b, 0x63, 0xc6, 0x0e,
+	0x45, 0xfd, 0x53, 0xb4, 0x50, 0x1c, 0x32, 0x92, 0x59, 0xe8, 0xcc, 0xfc,
+	0x99, 0x8f, 0xee, 0x91, 0x3e, 0x1a, 0x7d, 0x0d, 0xf8, 0xca, 0x6e, 0x1e,
+	0xff, 0x7a, 0xbf, 0xcc, 0xcb, 0x76, 0x9f, 0xef, 0xe2, 0xf3, 0x7b, 0x42,
+	0xd5, 0xe7, 0x77, 0xf1, 0xf9, 0xde, 0x04, 0xf6, 0xd0, 0x58, 0x82, 0x5f,
+	0xd2, 0xa1, 0x34, 0xef, 0xcd, 0x42, 0x91, 0x65, 0xeb, 0xcb, 0xcc, 0x47,
+	0x57, 0xf4, 0xb8, 0x3e, 0xd4, 0xec, 0x88, 0x3d, 0x31, 0x78, 0xcc, 0xb9,
+	0xcc, 0x04, 0x8f, 0x1b, 0x24, 0xff, 0xcb, 0x6c, 0x8b, 0xae, 0x68, 0x5c,
+	0xd5, 0xf9, 0xf6, 0xdf, 0xe8, 0x93, 0x39, 0x55, 0xdf, 0xdd, 0x23, 0xe1,
+	0xe7, 0x08, 0x9e, 0x72, 0x9e, 0xe1, 0xf2, 0xbc, 0xc0, 0x43, 0x7b, 0xda,
+	0x2a, 0x3f, 0xbf, 0x13, 0x78, 0xd5, 0x8a, 0xbc, 0xd9, 0xc0, 0x12, 0xf3,
+	0xc5, 0x19, 0xc7, 0x4c, 0x97, 0x73, 0xd5, 0x1e, 0x1f, 0x90, 0xd7, 0xbf,
+	0xd9, 0x27, 0xfb, 0x83, 0x7e, 0x6b, 0x40, 0xf7, 0x48, 0x94, 0x32, 0x07,
+	0xf9, 0xcb, 0x3e, 0x01, 0x1b, 0xff, 0x32, 0xf8, 0xa5, 0xc1, 0xdf, 0x79,
+	0x3d, 0x09, 0xcc, 0xf1, 0x4a, 0x9f, 0xee, 0x17, 0x23, 0xd7, 0x15, 0xe7,
+	0xf9, 0x46, 0x78, 0x5d, 0xfa, 0xfc, 0x0c, 0x1f, 0x7b, 0xed, 0x2f, 0xee,
+	0xd5, 0x96, 0x90, 0xfd, 0xc5, 0xda, 0x12, 0xc9, 0x09, 0xb9, 0xcf, 0x15,
+	0x9f, 0x6e, 0xa8, 0xec, 0xd3, 0x3d, 0x9f, 0xb9, 0xd5, 0x07, 0xff, 0x86,
+	0xb1, 0xc4, 0xfb, 0x1d, 0x7c, 0x9e, 0xc7, 0xa2, 0x56, 0x21, 0xcd, 0x9f,
+	0x1d, 0x2a, 0xaf, 0xa7, 0x1e, 0x57, 0x64, 0x9e, 0x84, 0x96, 0x5b, 0xb8,
+	0xf6, 0x43, 0xbe, 0x87, 0x94, 0x5d, 0x8d, 0x9f, 0x43, 0x75, 0x79, 0x30,
+	0xf5, 0x38, 0xb6, 0x99, 0x1f, 0x56, 0xc4, 0x13, 0x3d, 0xf0, 0x6c, 0xb3,
+	0x7e, 0x06, 0xd7, 0x84, 0x1f, 0x2d, 0x56, 0x47, 0xaf, 0xa0, 0xe3, 0x00,
+	0xfd, 0xce, 0x62, 0x8a, 0x76, 0xf1, 0x5e, 0xfd, 0xa6, 0xf1, 0x00, 0xe2,
+	0xed, 0x24, 0x73, 0x9e, 0x18, 0xc6, 0x19, 0x67, 0xe2, 0x94, 0x11, 0x01,
+	0xbf, 0x2c, 0x05, 0x9c, 0x0e, 0x6a, 0x66, 0x5a, 0xfd, 0x65, 0x1a, 0x65,
+	0xfb, 0x0f, 0x34, 0xeb, 0x84, 0xe2, 0x04, 0x7a, 0xb3, 0xcd, 0x43, 0xac,
+	0x13, 0xc7, 0x8a, 0xc0, 0x67, 0x83, 0x3e, 0x9f, 0x23, 0xfa, 0x5c, 0x6e,
+	0xd4, 0xfc, 0x26, 0x39, 0x56, 0xe5, 0x77, 0xdb, 0x8c, 0xf2, 0x3c, 0xe2,
+	0xc5, 0x2f, 0xd3, 0xfb, 0xa2, 0xcf, 0x09, 0xe0, 0xa8, 0xf7, 0xfd, 0x4b,
+	0x74, 0x32, 0x81, 0x79, 0x6f, 0x9f, 0x3e, 0x8f, 0xef, 0x88, 0x3e, 0xdb,
+	0x3c, 0xe8, 0xf3, 0xc5, 0x7e, 0x89, 0x37, 0x25, 0xc6, 0xd1, 0x36, 0x9a,
+	0xcb, 0x22, 0x07, 0xec, 0xd3, 0xe8, 0x3b, 0x95, 0x4d, 0x32, 0x5f, 0x4a,
+	0x56, 0xf8, 0xd2, 0x85, 0x28, 0x1b, 0xc3, 0x4c, 0xe3, 0xe8, 0xcb, 0xa6,
+	0xf2, 0x7e, 0xb0, 0x8e, 0x01, 0x1a, 0x5d, 0x6e, 0xe7, 0x6b, 0x69, 0x3d,
+	0x3a, 0x15, 0x51, 0xb5, 0xfe, 0xb6, 0x15, 0x63, 0xfe, 0x78, 0x9e, 0x69,
+	0x39, 0x9d, 0xbd, 0x97, 0x0a, 0xc1, 0x21, 0x1a, 0x59, 0xd6, 0xfd, 0x4d,
+	0x44, 0xce, 0xc6, 0xa0, 0xe4, 0x49, 0x7a, 0xdd, 0x9f, 0x10, 0xbe, 0x0b,
+	0xeb, 0xd2, 0xc7, 0xb5, 0xee, 0xf6, 0x2d, 0xf8, 0xd2, 0x25, 0x45, 0xb3,
+	0xa5, 0xcb, 0xd1, 0x30, 0xa5, 0xa2, 0x53, 0xaf, 0xf4, 0x03, 0xff, 0x47,
+	0x2e, 0xc1, 0x0f, 0x07, 0x1e, 0x6d, 0x51, 0x22, 0x53, 0x0b, 0x8b, 0x21,
+	0x5e, 0x37, 0x7e, 0x2f, 0x7d, 0x30, 0x17, 0x7e, 0x40, 0xc8, 0xfe, 0xd1,
+	0x4b, 0x3c, 0x4e, 0xca, 0x26, 0xc5, 0x37, 0xbc, 0xf0, 0x50, 0xf7, 0xc5,
+	0xd4, 0xb8, 0x28, 0x73, 0x3d, 0x59, 0x7f, 0x33, 0x13, 0xfe, 0x5a, 0x9c,
+	0xbc, 0xe6, 0x3b, 0xba, 0x68, 0xd1, 0xb1, 0x8c, 0xfd, 0xf5, 0x14, 0x4d,
+	0x31, 0x5d, 0xbb, 0xe5, 0x05, 0x8f, 0x27, 0xe0, 0xd9, 0x34, 0xd3, 0x3e,
+	0xdb, 0xcd, 0x59, 0x4b, 0xe6, 0xdd, 0x89, 0xde, 0x73, 0x38, 0x46, 0xdd,
+	0xf1, 0x5f, 0xf5, 0x6b, 0x79, 0x90, 0xcc, 0xa2, 0x8e, 0x90, 0x3f, 0xf3,
+	0x3c, 0x1e, 0xb9, 0xff, 0x39, 0xdc, 0x07, 0xf2, 0x0e, 0x73, 0xe7, 0xe3,
+	0x55, 0xb9, 0xaf, 0x23, 0x7c, 0x6f, 0xd4, 0xdd, 0x1f, 0x2b, 0x4e, 0xf2,
+	0xfe, 0x76, 0x09, 0xde, 0x2c, 0xf7, 0x73, 0x9a, 0xce, 0x79, 0xf2, 0x15,
+	0xb9, 0x2f, 0x49, 0x17, 0x7d, 0x27, 0x05, 0x7d, 0x4f, 0x8b, 0xfd, 0x48,
+	0xe6, 0x0c, 0xd6, 0xd7, 0xb4, 0xef, 0x81, 0xed, 0xec, 0x5c, 0x40, 0xe7,
+	0x06, 0xf2, 0xf7, 0x0f, 0xfb, 0x45, 0x5e, 0x22, 0xec, 0xef, 0x1c, 0x3e,
+	0xa7, 0xe9, 0x79, 0x96, 0xeb, 0x2f, 0x64, 0x5a, 0xe8, 0x6a, 0xb6, 0x85,
+	0xde, 0xc9, 0x0e, 0xd1, 0x95, 0xc5, 0x6e, 0x3a, 0xc7, 0x3a, 0xf3, 0x39,
+	0x27, 0x60, 0xa5, 0xa9, 0x1b, 0xf1, 0x45, 0xe4, 0x0c, 0x31, 0xdd, 0x61,
+	0x3c, 0xf4, 0xbf, 0xe8, 0x5e, 0xc6, 0x39, 0xd6, 0xbd, 0x5b, 0xe9, 0x3d,
+	0x7e, 0x66, 0x3a, 0xa3, 0x73, 0x1d, 0xe0, 0x93, 0x1f, 0x2b, 0xeb, 0xaf,
+	0x5b, 0xe3, 0x88, 0xb9, 0x05, 0x8e, 0x4c, 0x8b, 0xf8, 0xd6, 0xc2, 0x22,
+	0xff, 0xbe, 0x08, 0xff, 0x39, 0xc3, 0x9b, 0xf9, 0xf3, 0x93, 0x01, 0x8c,
+	0xc7, 0x39, 0x47, 0xe6, 0x4a, 0x8a, 0xb5, 0x85, 0xf8, 0xd8, 0x27, 0x6a,
+	0xa4, 0x25, 0x1c, 0x5a, 0x79, 0x7d, 0x3e, 0x31, 0x3e, 0xb9, 0xda, 0x4a,
+	0xf3, 0x39, 0xd6, 0x41, 0x72, 0x7e, 0xb6, 0x61, 0x30, 0xf6, 0xef, 0x54,
+	0x6f, 0x61, 0xdc, 0xbf, 0x8b, 0xd2, 0x62, 0x1c, 0x7f, 0xae, 0x76, 0xd1,
+	0x42, 0xae, 0x43, 0x1d, 0xdf, 0x2b, 0x72, 0xdc, 0x65, 0x1f, 0x23, 0xfc,
+	0xb6, 0x19, 0x7f, 0x7b, 0x97, 0x71, 0x0a, 0x32, 0x55, 0xda, 0xa5, 0xe0,
+	0x35, 0x97, 0xeb, 0x7a, 0x22, 0x03, 0xe7, 0xa6, 0xe8, 0x25, 0x96, 0xb7,
+	0x23, 0x2f, 0xc3, 0x7f, 0xfc, 0x38, 0xf0, 0x26, 0x9f, 0xa2, 0x41, 0x3e,
+	0x46, 0x5f, 0x24, 0xbf, 0xa8, 0x73, 0x8a, 0x05, 0x27, 0x44, 0x6d, 0x88,
+	0xa4, 0xd1, 0x59, 0xd1, 0x8b, 0xee, 0x2d, 0xc1, 0x9b, 0xec, 0x94, 0x65,
+	0x40, 0x1f, 0x81, 0x0f, 0x46, 0xe6, 0x60, 0x1d, 0x77, 0x7a, 0xde, 0xed,
+	0x9b, 0x19, 0xa7, 0x48, 0x3f, 0xf0, 0x5e, 0xd2, 0xac, 0xea, 0x2f, 0x20,
+	0xf8, 0xbd, 0xb9, 0x4f, 0xd7, 0x4b, 0xea, 0x63, 0x2d, 0x2b, 0xf4, 0x71,
+	0x47, 0xcd, 0xef, 0x66, 0xcd, 0xef, 0xe5, 0x7c, 0x39, 0x96, 0x79, 0x2c,
+	0xe7, 0x49, 0xf6, 0x28, 0x4a, 0x2e, 0x4b, 0xfc, 0x33, 0xf7, 0x8d, 0x99,
+	0x8f, 0x2a, 0x1d, 0x3c, 0xb9, 0x36, 0x1a, 0xea, 0x31, 0x26, 0x8c, 0xe4,
+	0xe4, 0x3f, 0x96, 0x22, 0x09, 0xe8, 0x45, 0x4f, 0xee, 0x51, 0xf9, 0xa7,
+	0x3c, 0xaf, 0x54, 0x18, 0xaa, 0xdb, 0xec, 0x5a, 0x07, 0xad, 0x8b, 0x9e,
+	0x5c, 0x42, 0xc7, 0xe0, 0xeb, 0x71, 0x9f, 0x94, 0xd9, 0x44, 0xe8, 0x73,
+	0x0e, 0x1a, 0xdf, 0x1f, 0xba, 0xc4, 0xfb, 0x19, 0x5f, 0xfb, 0x69, 0xe9,
+	0xa4, 0xe8, 0x71, 0x83, 0xb1, 0x5d, 0x34, 0x27, 0x74, 0x7e, 0xd6, 0x5f,
+	0xaa, 0xec, 0xaa, 0x29, 0xcc, 0x33, 0x85, 0xd8, 0x8a, 0xe1, 0xfc, 0xbe,
+	0x2f, 0x99, 0x97, 0xb1, 0xf2, 0x78, 0x4d, 0xac, 0x7c, 0x56, 0xc4, 0xca,
+	0x11, 0x27, 0x07, 0x5c, 0x01, 0x4b, 0xaf, 0x9c, 0x16, 0xec, 0x63, 0x98,
+	0x90, 0x1b, 0x7e, 0xee, 0xa2, 0xe0, 0x37, 0xe1, 0x98, 0x5f, 0xe6, 0x57,
+	0xc7, 0x79, 0xc6, 0x06, 0x5d, 0x60, 0x7c, 0xb0, 0x27, 0x36, 0x58, 0x97,
+	0x58, 0xc9, 0x7e, 0x99, 0xae, 0xe4, 0x9b, 0x58, 0xd7, 0x5b, 0xa0, 0x8d,
+	0x3c, 0xb1, 0x4e, 0xd8, 0x4d, 0x0b, 0x61, 0xc6, 0xb1, 0x89, 0x36, 0xde,
+	0x4f, 0xd6, 0x6b, 0x27, 0x98, 0xee, 0x78, 0xee, 0x2b, 0xb9, 0xd2, 0x8f,
+	0xd2, 0xe1, 0x88, 0x15, 0x9d, 0xea, 0x60, 0xbb, 0xc5, 0xe4, 0x7f, 0x87,
+	0xff, 0x77, 0x85, 0x00, 0x93, 0xc2, 0x2a, 0x7e, 0x67, 0x9d, 0x27, 0x53,
+	0xfa, 0xd1, 0x1c, 0x8f, 0x99, 0x9b, 0x82, 0xfd, 0x03, 0x3b, 0xcf, 0xe1,
+	0x7f, 0x39, 0x66, 0x65, 0x95, 0xf1, 0xfb, 0x62, 0x2a, 0x64, 0x08, 0xde,
+	0xbe, 0xce, 0x3c, 0xfe, 0x02, 0xcd, 0xf1, 0x1c, 0xae, 0x10, 0xae, 0xb5,
+	0x28, 0x19, 0xde, 0xc7, 0x78, 0xdf, 0xcd, 0x9f, 0xa8, 0xbf, 0x6a, 0xa7,
+	0x85, 0xc9, 0x31, 0x55, 0x7f, 0xf5, 0xbd, 0x06, 0xf5, 0x57, 0xb8, 0x8e,
+	0xe5, 0xfe, 0x62, 0xe9, 0xe6, 0x5c, 0xd8, 0xfd, 0x3c, 0x32, 0x92, 0xe1,
+	0x4e, 0xa1, 0x23, 0xad, 0xac, 0xfa, 0xf8, 0xd9, 0x11, 0x2b, 0x39, 0xc5,
+	0x73, 0xcc, 0xb9, 0xe7, 0x5d, 0xba, 0x19, 0x0b, 0x63, 0x9c, 0xbf, 0x66,
+	0x1c, 0xdb, 0xca, 0x53, 0x72, 0x3d, 0x85, 0x5c, 0xe9, 0xe7, 0xd1, 0xb0,
+	0x5e, 0x9f, 0xfb, 0x5a, 0xac, 0x03, 0xf4, 0xc4, 0x9f, 0x2b, 0x5d, 0xbe,
+	0x2b, 0x59, 0xd8, 0xdf, 0x06, 0xe3, 0x39, 0x66, 0x34, 0x44, 0xa9, 0x15,
+	0xa6, 0xef, 0x8b, 0x1d, 0xbe, 0x8d, 0xec, 0x95, 0x52, 0xb2, 0x2a, 0x97,
+	0xa5, 0xda, 0xef, 0x2e, 0x6d, 0xae, 0x21, 0x72, 0x96, 0x20, 0x33, 0x21,
+	0x2f, 0x53, 0x25, 0xbf, 0x03, 0xfd, 0x0e, 0xb6, 0xd0, 0x59, 0xe6, 0x57,
+	0x32, 0x1f, 0x89, 0x79, 0x27, 0xf3, 0x2c, 0x49, 0x2f, 0xf1, 0xaa, 0xd7,
+	0x0d, 0x48, 0x9c, 0x1d, 0xa9, 0xe4, 0x41, 0xba, 0xe2, 0xe9, 0x01, 0x57,
+	0x3c, 0xdd, 0x74, 0xe5, 0x41, 0x06, 0x85, 0x3e, 0x56, 0xd1, 0xa1, 0x82,
+	0x4a, 0x87, 0x82, 0xae, 0x25, 0x79, 0x59, 0xa1, 0xcc, 0xcb, 0x76, 0x6f,
+	0xc1, 0xcb, 0xbc, 0x6c, 0xd3, 0x75, 0xc5, 0x37, 0xec, 0x30, 0xe4, 0xfc,
+	0xe5, 0xe2, 0x34, 0xbd, 0xcd, 0x3c, 0xe2, 0xad, 0x62, 0x98, 0xf9, 0xc6,
+	0x24, 0xf3, 0x8d, 0x09, 0xe6, 0x1b, 0x0e, 0xc3, 0xc0, 0xe2, 0xb5, 0x5f,
+	0xf3, 0x5d, 0x59, 0x84, 0xbc, 0x98, 0xa2, 0xe7, 0x8b, 0xe0, 0xc1, 0x93,
+	0xac, 0xf3, 0x5c, 0xf3, 0x6d, 0x2c, 0x76, 0x31, 0xbe, 0x4a, 0x3d, 0xa7,
+	0xda, 0x8e, 0x41, 0xaf, 0x15, 0xf8, 0x87, 0xaf, 0x82, 0xcf, 0xbc, 0x91,
+	0xa2, 0x4e, 0x86, 0x3d, 0xe0, 0xbc, 0x8e, 0xde, 0x14, 0xaf, 0x81, 0x96,
+	0xd1, 0x13, 0xf8, 0xbb, 0xe3, 0x53, 0x3c, 0xf7, 0x4e, 0xdf, 0x02, 0xef,
+	0xcb, 0xd3, 0xe1, 0x94, 0xd9, 0xcb, 0x38, 0x7f, 0xac, 0x82, 0xf3, 0xa9,
+	0x34, 0xaf, 0xa0, 0x67, 0xb9, 0x9b, 0xc6, 0x0e, 0x44, 0xf7, 0xf6, 0x30,
+	0x9d, 0x22, 0x37, 0xa2, 0xd2, 0xa7, 0xc7, 0x4f, 0x27, 0x83, 0x6d, 0xaa,
+	0xbf, 0x8f, 0xc5, 0xf2, 0xf1, 0x03, 0xbe, 0xcf, 0x2d, 0x5f, 0x3a, 0xfb,
+	0x2a, 0x3f, 0x03, 0xc7, 0x5f, 0x85, 0xff, 0x93, 0xed, 0x83, 0x56, 0xe1,
+	0x3f, 0x2a, 0x88, 0xb1, 0x38, 0xb6, 0x27, 0x98, 0x97, 0x85, 0xd7, 0x0d,
+	0x7b, 0x3a, 0x62, 0x30, 0xd1, 0x75, 0x99, 0xbc, 0xde, 0xd2, 0xa0, 0x8c,
+	0xc1, 0xed, 0xdd, 0x2b, 0xf9, 0x06, 0xe3, 0x66, 0x30, 0x22, 0x6c, 0xb4,
+	0xa6, 0x25, 0x29, 0x27, 0x0b, 0xbc, 0xcf, 0x2b, 0xe1, 0x09, 0xde, 0xe7,
+	0x0e, 0x25, 0x23, 0x53, 0xfc, 0xbb, 0x90, 0xbf, 0x2c, 0x2b, 0x87, 0xd0,
+	0xb3, 0xda, 0x14, 0xfd, 0x20, 0x66, 0xd1, 0x6f, 0xa7, 0x83, 0xef, 0x6b,
+	0x33, 0xd6, 0x82, 0x4f, 0x7c, 0xe0, 0x4b, 0x66, 0xf1, 0x5c, 0xe0, 0x21,
+	0x7f, 0xcf, 0x4f, 0xd1, 0x85, 0x8c, 0x9e, 0xc3, 0x80, 0x61, 0xbc, 0x84,
+	0x79, 0xf8, 0x68, 0xb7, 0xf3, 0x43, 0x86, 0x17, 0x1f, 0xff, 0x71, 0xed,
+	0x9c, 0x86, 0xd5, 0x9c, 0xd0, 0xd3, 0xb2, 0x05, 0x3d, 0x7c, 0x08, 0xbd,
+	0x8f, 0x0a, 0xa2, 0xe7, 0x64, 0xb3, 0xb0, 0x4d, 0x0b, 0xc2, 0xc6, 0x28,
+	0x85, 0x2a, 0x7d, 0x30, 0xef, 0xa9, 0x39, 0xf7, 0x13, 0x5f, 0x7a, 0xf1,
+	0xa0, 0xd0, 0xc5, 0x46, 0x0e, 0xec, 0x55, 0xf5, 0xa7, 0x5d, 0xe2, 0xbe,
+	0xc6, 0x32, 0x7e, 0x7b, 0x50, 0xfd, 0xf6, 0x49, 0xa1, 0x03, 0x23, 0x2f,
+	0x2e, 0xb0, 0x24, 0xf0, 0x9c, 0xf7, 0xd7, 0x99, 0x60, 0x3c, 0x0f, 0xad,
+	0xc0, 0x77, 0x2f, 0xe0, 0xa9, 0xe1, 0x01, 0x58, 0x00, 0xf7, 0x3b, 0x14,
+	0xde, 0xdb, 0x56, 0xdc, 0xaf, 0xd7, 0xdd, 0x08, 0xce, 0xac, 0xd3, 0x64,
+	0xb0, 0x56, 0xac, 0x69, 0x8f, 0x2f, 0x92, 0xb7, 0x8c, 0xf4, 0x22, 0x6c,
+	0x1a, 0xd4, 0xb5, 0xdc, 0x85, 0xbc, 0x29, 0x9e, 0xc3, 0x1e, 0x8a, 0x24,
+	0x30, 0x2f, 0x8c, 0xd3, 0x30, 0xf8, 0xb7, 0x1a, 0x58, 0xb8, 0xaf, 0xeb,
+	0x56, 0xd7, 0xb5, 0x8a, 0xbd, 0x20, 0x03, 0xcf, 0xd1, 0xcf, 0xc6, 0x73,
+	0xf1, 0x7c, 0x5c, 0x87, 0xfb, 0xc9, 0xfb, 0xf6, 0x31, 0x7f, 0x8e, 0x4e,
+	0xc9, 0x7b, 0x19, 0x97, 0xe4, 0x6f, 0x7d, 0x8e, 0xf7, 0x7c, 0xe5, 0xfe,
+	0xf9, 0x54, 0xbf, 0x1e, 0xec, 0x5f, 0x37, 0xe5, 0x85, 0x8f, 0x13, 0xbf,
+	0x75, 0x8a, 0xdf, 0xa2, 0x4e, 0xa7, 0xd8, 0xd7, 0xf3, 0x7c, 0x3c, 0x9f,
+	0xed, 0xf2, 0xc1, 0x36, 0x4f, 0x27, 0x3a, 0x7d, 0xf9, 0x3c, 0xd6, 0xdb,
+	0xe9, 0x8b, 0x33, 0xee, 0xc7, 0xb2, 0xf1, 0xd2, 0x82, 0xe0, 0x31, 0xac,
+	0xd3, 0xf6, 0xda, 0xe6, 0x49, 0xe3, 0x4f, 0x86, 0x64, 0x6f, 0x5b, 0x7c,
+	0x67, 0xfa, 0xcb, 0x30, 0xfd, 0x65, 0x98, 0xfe, 0x32, 0x4c, 0x7f, 0x19,
+	0xa6, 0x3f, 0xb6, 0x4b, 0xdf, 0x64, 0x99, 0xf1, 0x6d, 0x96, 0x19, 0x92,
+	0x66, 0x23, 0xca, 0x8f, 0xa9, 0x69, 0xb6, 0xb6, 0x3e, 0x53, 0xd3, 0x28,
+	0xe4, 0x34, 0xf9, 0x0e, 0x8f, 0x57, 0xd3, 0xea, 0x55, 0xa6, 0xd5, 0xa6,
+	0x99, 0x7e, 0xba, 0x91, 0xc3, 0x9e, 0xd9, 0xd6, 0x79, 0xe6, 0xd1, 0x71,
+	0x3f, 0x74, 0xaa, 0x00, 0xd3, 0x13, 0x74, 0x4a, 0x9b, 0xe1, 0xde, 0x4f,
+	0x37, 0x99, 0x4f, 0xdf, 0xc8, 0x81, 0x76, 0xef, 0x52, 0xc7, 0x19, 0xa6,
+	0x5d, 0xc8, 0xb9, 0x25, 0xdf, 0xd5, 0xac, 0xc1, 0xba, 0x57, 0xc0, 0x4c,
+	0x12, 0xf8, 0xa8, 0xd0, 0xc7, 0x78, 0xdf, 0xd7, 0x99, 0xdf, 0xc3, 0x57,
+	0x87, 0xbe, 0x5f, 0x79, 0x1f, 0xcb, 0x89, 0xd0, 0x15, 0xe6, 0xa3, 0xa7,
+	0x73, 0x4b, 0x4c, 0xef, 0xbd, 0xf4, 0x85, 0x1c, 0xe4, 0x31, 0x60, 0xc4,
+	0xc7, 0x79, 0x12, 0x3e, 0x30, 0x63, 0x06, 0x6b, 0x1f, 0x4b, 0x19, 0x02,
+	0x4f, 0x9e, 0x01, 0x1c, 0x18, 0xf6, 0x67, 0xf6, 0xa2, 0x67, 0x7d, 0xc4,
+	0x68, 0x56, 0x3e, 0x45, 0x7c, 0xc7, 0x78, 0x8c, 0x05, 0xdc, 0x70, 0xdc,
+	0x28, 0xfe, 0x88, 0xf7, 0x42, 0x84, 0x19, 0x1e, 0xb5, 0x7c, 0xeb, 0x02,
+	0x7a, 0x91, 0x02, 0x5e, 0xd3, 0x51, 0x3f, 0x6a, 0xc5, 0xe9, 0x39, 0xbc,
+	0xff, 0xe0, 0x85, 0x22, 0xe6, 0xbd, 0x48, 0x0b, 0x41, 0xf0, 0x21, 0x3b,
+	0x7c, 0x9d, 0x24, 0xec, 0x5a, 0x59, 0xbf, 0xfc, 0xbc, 0x37, 0x6f, 0xb3,
+	0xa2, 0x42, 0x1f, 0x6e, 0x61, 0xfb, 0x06, 0xb0, 0x79, 0x8b, 0x71, 0x2d,
+	0x0c, 0x9b, 0x5f, 0xf1, 0xb5, 0x37, 0x99, 0xe7, 0x60, 0xcf, 0x3a, 0x85,
+	0x8c, 0xf1, 0xe2, 0x65, 0x1b, 0x8a, 0x97, 0x39, 0x2e, 0x5e, 0x96, 0x2e,
+	0xf3, 0x32, 0xc6, 0x09, 0xc1, 0xc3, 0xc0, 0xa3, 0x66, 0x59, 0x4f, 0x94,
+	0xdf, 0xa1, 0xff, 0xed, 0x16, 0x3c, 0x8b, 0x79, 0x3d, 0xdb, 0x0d, 0x85,
+	0x62, 0xca, 0x77, 0x48, 0xf0, 0x0e, 0x8d, 0xd7, 0xff, 0xa3, 0xe8, 0xa1,
+	0x55, 0xf0, 0x81, 0xf4, 0x2c, 0xf8, 0x95, 0xd7, 0xf8, 0xff, 0x02, 0x6c,
+	0x79, 0xbc, 0x13, 0x7a, 0x8d, 0xf9, 0x58, 0x21, 0x0c, 0x9b, 0xb5, 0x43,
+	0xd9, 0x36, 0xe8, 0xbb, 0xb5, 0x07, 0xb9, 0x96, 0x56, 0xb4, 0xcc, 0xc7,
+	0x76, 0x2b, 0xbf, 0x05, 0xfc, 0x8c, 0xd8, 0xeb, 0x3a, 0x5d, 0xc0, 0x82,
+	0x2e, 0xc0, 0x63, 0x03, 0x0c, 0x1f, 0xd1, 0x1b, 0x9c, 0xe8, 0x16, 0xc3,
+	0x01, 0xfb, 0x7c, 0x0b, 0xfb, 0xcc, 0xba, 0x2c, 0x05, 0xe6, 0xa6, 0x02,
+	0x03, 0x98, 0xdf, 0xc2, 0x6a, 0x85, 0x1f, 0x9e, 0xcf, 0x0c, 0x18, 0x85,
+	0xac, 0x9c, 0xe3, 0xca, 0xb8, 0xe4, 0x79, 0x85, 0x3c, 0x7a, 0x7b, 0x89,
+	0xb9, 0xf2, 0x1c, 0xf5, 0xfa, 0x04, 0xff, 0x52, 0x74, 0xbf, 0x1d, 0x5a,
+	0x4b, 0x30, 0x5d, 0x61, 0x4f, 0x52, 0x2e, 0x9c, 0x79, 0x94, 0x9f, 0x8f,
+	0x73, 0x8d, 0xd7, 0x71, 0xb3, 0xbc, 0x8e, 0x08, 0xaf, 0x03, 0x63, 0x6f,
+	0xf9, 0x6e, 0xa8, 0x75, 0xdc, 0x28, 0xaf, 0x63, 0x56, 0xad, 0x83, 0xd2,
+	0xc6, 0xcc, 0x6e, 0xa5, 0xc7, 0x6f, 0x79, 0xcf, 0xd6, 0x28, 0xeb, 0x27,
+	0xe9, 0x55, 0xc0, 0xf3, 0x1e, 0x85, 0x2f, 0x6e, 0x7f, 0xa8, 0x7b, 0x6e,
+	0xf6, 0xc4, 0x75, 0xfa, 0x5d, 0xba, 0x29, 0xf4, 0x93, 0x61, 0xd6, 0x4f,
+	0x70, 0x9e, 0x16, 0xc0, 0x87, 0xd3, 0x41, 0xf4, 0x9b, 0x1d, 0x64, 0x98,
+	0xb1, 0x5d, 0x35, 0xc5, 0x9f, 0xc2, 0x4f, 0x86, 0xfb, 0xe8, 0xeb, 0xbf,
+	0x48, 0x37, 0x17, 0xc1, 0xab, 0xa1, 0x8f, 0xca, 0x9e, 0xb4, 0x37, 0xd7,
+	0xa4, 0x9f, 0x36, 0xee, 0xe9, 0xa7, 0x85, 0x8f, 0x36, 0x0c, 0x7d, 0xdd,
+	0x84, 0x3f, 0x37, 0x26, 0xde, 0x67, 0xc1, 0xc7, 0x45, 0xdc, 0xcb, 0x8b,
+	0xef, 0x4c, 0xbb, 0x72, 0xdc, 0x90, 0x73, 0x92, 0x62, 0x3e, 0xe2, 0x98,
+	0x4d, 0x86, 0xac, 0x9d, 0xb9, 0x5c, 0xd4, 0x3a, 0x51, 0x9c, 0xf7, 0xc8,
+	0x09, 0x1b, 0x46, 0x44, 0xf8, 0x0c, 0x5a, 0x9d, 0x0e, 0x6a, 0x61, 0x39,
+	0x79, 0x8a, 0xd0, 0x13, 0xcd, 0xb6, 0xe0, 0xcb, 0xbf, 0xc0, 0xb8, 0xb7,
+	0x10, 0xb6, 0x43, 0x9f, 0x13, 0xf6, 0x25, 0xe4, 0x07, 0xde, 0xa7, 0x02,
+	0x18, 0x63, 0x0e, 0xfc, 0x7d, 0x15, 0xfd, 0x30, 0xc3, 0xbc, 0x7e, 0xf8,
+	0x81, 0x47, 0xad, 0x77, 0x58, 0xee, 0x5c, 0x10, 0xfe, 0x95, 0xb3, 0x94,
+	0x66, 0x3a, 0x3c, 0x2c, 0xe8, 0xd0, 0x18, 0x66, 0x6a, 0x61, 0xfa, 0x41,
+	0x8e, 0xc1, 0x98, 0xe8, 0xbf, 0x23, 0x6d, 0x16, 0x5e, 0xe5, 0x9a, 0xea,
+	0x6b, 0x90, 0x00, 0x6f, 0xd8, 0xbe, 0x6f, 0x21, 0xf1, 0x91, 0x7d, 0x2a,
+	0x6e, 0x5d, 0xab, 0xd6, 0x87, 0x0d, 0xfb, 0xcc, 0x12, 0x7d, 0x1f, 0x01,
+	0x3b, 0xe1, 0x17, 0x34, 0x26, 0x19, 0x6e, 0xfa, 0xdd, 0x35, 0x6e, 0xfb,
+	0xff, 0x29, 0x51, 0x9f, 0xff, 0x46, 0x51, 0xca, 0xd8, 0x34, 0xdb, 0xe6,
+	0x0b, 0x07, 0xdc, 0x3a, 0x87, 0x9d, 0x8d, 0x09, 0x9f, 0xcc, 0x00, 0x45,
+	0x97, 0x27, 0xe9, 0xb1, 0x0c, 0x78, 0x14, 0x5d, 0x8f, 0x3a, 0x78, 0xc3,
+	0x06, 0x68, 0x79, 0x92, 0xe2, 0x45, 0xc0, 0xc8, 0x47, 0x0b, 0x2c, 0x05,
+	0xd2, 0x59, 0xc4, 0xee, 0xf9, 0x7b, 0x1e, 0xef, 0x57, 0xf9, 0x15, 0xe5,
+	0xf7, 0x1e, 0xa2, 0xd8, 0x32, 0xa5, 0x92, 0xe1, 0x87, 0x45, 0xcf, 0xea,
+	0x64, 0x78, 0x5c, 0xf9, 0x68, 0x42, 0x7c, 0x1e, 0x7e, 0x2f, 0x8b, 0x1e,
+	0xcd, 0xd8, 0xa9, 0x24, 0x49, 0xdf, 0x03, 0xf1, 0x1c, 0x0c, 0x96, 0xad,
+	0xbb, 0x99, 0x57, 0x1c, 0x17, 0xfe, 0x07, 0xd6, 0x44, 0x16, 0x31, 0x1e,
+	0xbe, 0x83, 0x5e, 0x82, 0xbd, 0x95, 0xcc, 0x3e, 0xa0, 0xc6, 0x96, 0xc8,
+	0x64, 0x5c, 0x30, 0x7f, 0xc9, 0x49, 0x85, 0x8d, 0xca, 0xf5, 0xf0, 0x5d,
+	0x1c, 0x17, 0xfa, 0xe1, 0x30, 0xdb, 0x30, 0x62, 0x5c, 0x69, 0x4e, 0xf8,
+	0x21, 0xf8, 0x38, 0xff, 0xd3, 0x01, 0xfd, 0x6e, 0x03, 0x9c, 0x97, 0xfe,
+	0x09, 0xbe, 0x67, 0x9e, 0xe7, 0x51, 0x95, 0x17, 0x3f, 0x44, 0x91, 0x1d,
+	0xf8, 0x8b, 0x66, 0xef, 0xa8, 0xbf, 0x88, 0x61, 0xcd, 0xb2, 0xe5, 0x32,
+	0xd3, 0xc6, 0xdb, 0x5b, 0xda, 0x71, 0xef, 0x6b, 0x19, 0xcd, 0xb0, 0x32,
+	0xc5, 0xfb, 0x2f, 0xd0, 0xeb, 0x73, 0xa1, 0xf8, 0x29, 0xbc, 0x47, 0xc6,
+	0x97, 0x10, 0x3a, 0x6f, 0x88, 0x75, 0x17, 0xe8, 0x30, 0xa3, 0x22, 0xbe,
+	0x15, 0x79, 0xc2, 0x32, 0x16, 0xd6, 0xfa, 0xc9, 0x0f, 0xbf, 0x9a, 0xa3,
+	0x73, 0x22, 0x5a, 0x45, 0xfe, 0xba, 0x8c, 0x2b, 0x42, 0xfe, 0x82, 0x07,
+	0xfe, 0xc4, 0x97, 0x5c, 0xf3, 0xf7, 0xe9, 0x7c, 0xb7, 0x48, 0xb0, 0x9c,
+	0x4f, 0xa3, 0x78, 0x8a, 0xc6, 0x3d, 0x1d, 0xb3, 0x70, 0xbf, 0xd7, 0x0a,
+	0xb4, 0xeb, 0xd6, 0x19, 0xe0, 0x67, 0x12, 0x7b, 0x74, 0x01, 0x71, 0x5c,
+	0xa3, 0x2a, 0x1e, 0xd1, 0xc2, 0xfb, 0x04, 0x3b, 0x0f, 0xfe, 0xbb, 0xcf,
+	0xf2, 0x27, 0xe2, 0x0a, 0x27, 0x07, 0xa1, 0x27, 0xf5, 0x38, 0x8c, 0x33,
+	0x53, 0x38, 0xee, 0x67, 0xbb, 0x4b, 0xeb, 0xb5, 0xd2, 0xa7, 0xc4, 0xb6,
+	0x98, 0xda, 0x2f, 0xf8, 0x93, 0x46, 0x54, 0xbf, 0x01, 0x9b, 0xac, 0x5e,
+	0xc0, 0xe9, 0xe3, 0xa2, 0xc7, 0xad, 0x62, 0x10, 0xdb, 0xc9, 0x59, 0xc2,
+	0x3b, 0xb7, 0xd0, 0x77, 0xf3, 0x6e, 0xc0, 0x9e, 0xf7, 0xc8, 0x1d, 0xa3,
+	0xf8, 0x94, 0x7a, 0xff, 0xcf, 0x9d, 0xda, 0xb7, 0x5d, 0x1e, 0xfb, 0xf6,
+	0xbd, 0x41, 0x19, 0x03, 0xbb, 0x4b, 0x8d, 0xf1, 0xca, 0x53, 0xfd, 0xfb,
+	0xa7, 0xe1, 0x4f, 0xaa, 0xd4, 0x51, 0x5c, 0x13, 0x7c, 0xa5, 0xde, 0xa7,
+	0x1d, 0x62, 0x7e, 0x2a, 0xe9, 0xf8, 0xb8, 0x07, 0x1d, 0xf7, 0xce, 0x40,
+	0x2f, 0xb9, 0x7d, 0x3a, 0x3e, 0xd6, 0x90, 0x8e, 0xff, 0x75, 0x50, 0xfa,
+	0x54, 0xeb, 0xe9, 0x18, 0xb5, 0x3c, 0xc7, 0x8b, 0x8d, 0xfc, 0x57, 0xd8,
+	0x07, 0xd4, 0xa4, 0xc3, 0xe7, 0x01, 0x58, 0x69, 0xbf, 0x07, 0xe2, 0x7e,
+	0xc0, 0x47, 0xc4, 0x4e, 0xfe, 0x90, 0xe2, 0x8b, 0xb5, 0xb1, 0xd0, 0xcd,
+	0xae, 0xf9, 0x96, 0xc7, 0x35, 0xd0, 0xc5, 0x41, 0x0b, 0x76, 0x48, 0xda,
+	0xf4, 0x1a, 0x5e, 0xef, 0xf9, 0x0e, 0xe5, 0xec, 0x54, 0x9e, 0xe0, 0xa3,
+	0x0e, 0xd2, 0x53, 0x88, 0x2b, 0x2b, 0x1f, 0xf0, 0xd1, 0x8c, 0x5c, 0xb7,
+	0x79, 0x40, 0xe0, 0x03, 0xf4, 0xd5, 0x50, 0xc2, 0x9f, 0xe0, 0x3d, 0x95,
+	0xfe, 0xdf, 0xe4, 0x6a, 0x48, 0xed, 0x13, 0x8f, 0xc5, 0xfd, 0x3c, 0xeb,
+	0xfc, 0xb0, 0x3f, 0xf6, 0xd7, 0xd7, 0xcb, 0x79, 0xc5, 0x90, 0x05, 0x25,
+	0xfa, 0x0f, 0x96, 0x73, 0xfe, 0x03, 0xa6, 0xe8, 0xb9, 0x70, 0xb9, 0x78,
+	0x80, 0xf5, 0x47, 0xec, 0x21, 0x7c, 0x87, 0xda, 0xb7, 0xfb, 0xf6, 0x30,
+	0x75, 0xed, 0x67, 0xa9, 0x6f, 0x90, 0xc3, 0x7a, 0xa3, 0x71, 0x00, 0xf9,
+	0xe1, 0x16, 0x5f, 0x83, 0x5e, 0x51, 0x63, 0x56, 0x9c, 0x3a, 0xe0, 0x4f,
+	0x40, 0x0f, 0x68, 0x2b, 0x5d, 0x45, 0x53, 0xb3, 0x82, 0xa6, 0xe2, 0x6b,
+	0xb3, 0x8a, 0xa6, 0x66, 0x95, 0xff, 0x7c, 0x56, 0xd1, 0xd4, 0xac, 0xa2,
+	0xa9, 0x59, 0x45, 0x53, 0xb3, 0x8c, 0xd7, 0xa3, 0xac, 0xaf, 0x42, 0xf7,
+	0xd0, 0xfe, 0xcb, 0x2e, 0x4a, 0xe6, 0x70, 0x1e, 0xf2, 0xb8, 0x96, 0xae,
+	0x7e, 0x6d, 0x58, 0xfb, 0x47, 0x0b, 0x32, 0xcf, 0x8e, 0x9f, 0x85, 0x3d,
+	0x78, 0x98, 0xe1, 0x77, 0xcd, 0x37, 0xbf, 0x88, 0xb9, 0xfa, 0x28, 0x26,
+	0x7a, 0xc0, 0x36, 0x51, 0xd4, 0xad, 0xe3, 0x9a, 0xa8, 0xeb, 0x92, 0xb6,
+	0x5c, 0xaa, 0x61, 0x8d, 0x97, 0xc6, 0x8b, 0x69, 0xb5, 0x5f, 0xb5, 0x76,
+	0x4e, 0x0b, 0x25, 0xb2, 0x80, 0x2b, 0x72, 0x21, 0x2d, 0xde, 0x1b, 0x01,
+	0xa7, 0x94, 0xe9, 0x01, 0x83, 0xa3, 0x0a, 0x06, 0x4f, 0x8b, 0x35, 0x22,
+	0x97, 0x10, 0x3e, 0xc8, 0xc6, 0x70, 0x48, 0x67, 0x46, 0xf9, 0x3e, 0x8c,
+	0xfb, 0x07, 0x42, 0xcc, 0x83, 0xb6, 0x0b, 0x07, 0xf7, 0xda, 0x1b, 0xf1,
+	0x9a, 0xed, 0xd6, 0xd3, 0x5c, 0x77, 0xc9, 0x8e, 0x90, 0x92, 0x1b, 0x52,
+	0xef, 0xdd, 0xe5, 0xd8, 0x89, 0x14, 0xcf, 0xed, 0x2f, 0xc2, 0x7f, 0x39,
+	0x44, 0x6d, 0x25, 0x3a, 0x12, 0x06, 0x3e, 0x77, 0xb1, 0x5d, 0xc9, 0x73,
+	0x18, 0x2b, 0xd1, 0x85, 0xf0, 0x3e, 0xb6, 0x5d, 0xf6, 0xb3, 0x0e, 0x3a,
+	0xca, 0xff, 0x4e, 0xc4, 0xef, 0xc3, 0xbc, 0x3a, 0xf8, 0xda, 0x7e, 0x32,
+	0x7a, 0x52, 0x66, 0x2b, 0xeb, 0x07, 0x47, 0x2a, 0xf6, 0x88, 0x05, 0xff,
+	0x1c, 0xeb, 0xb6, 0xc6, 0x5c, 0xb8, 0x5b, 0xd5, 0x9c, 0xc1, 0x87, 0x8d,
+	0xf8, 0xd6, 0x3f, 0x97, 0x64, 0xaf, 0x80, 0x21, 0x75, 0xfc, 0xe3, 0x52,
+	0x64, 0x08, 0xc7, 0x78, 0xe7, 0x90, 0x3d, 0x11, 0xf1, 0xfd, 0x58, 0xea,
+	0xf2, 0x3e, 0xfb, 0x88, 0x7c, 0x3f, 0x80, 0x6d, 0x5a, 0x3e, 0x2f, 0xbc,
+	0x97, 0x3a, 0x4f, 0x25, 0x5f, 0x15, 0x74, 0x50, 0xa2, 0x7f, 0x67, 0x9a,
+	0x35, 0x09, 0xb1, 0x8c, 0x29, 0x51, 0x0b, 0x8d, 0x7c, 0xe5, 0xf9, 0x45,
+	0x3d, 0x2f, 0x47, 0xed, 0xf5, 0xfd, 0xc8, 0x37, 0xcb, 0x16, 0x68, 0x73,
+	0x99, 0x01, 0x3f, 0xda, 0xe8, 0xf2, 0x46, 0x4f, 0x50, 0xd4, 0x66, 0x77,
+	0xb3, 0x8e, 0xa3, 0xf3, 0x92, 0xc7, 0xf8, 0xfe, 0x01, 0xf1, 0xbe, 0xb9,
+	0xd8, 0x12, 0xc6, 0x35, 0xd3, 0xc8, 0x72, 0xe9, 0x21, 0xfe, 0x5d, 0xc4,
+	0x11, 0x93, 0xd4, 0xaa, 0x62, 0x04, 0x1d, 0x2a, 0xae, 0x14, 0x62, 0x5a,
+	0xaa, 0xd4, 0x1c, 0x8f, 0x94, 0x7d, 0x6d, 0xc0, 0xf1, 0x5a, 0x5f, 0xdb,
+	0x73, 0x5b, 0xc8, 0x9b, 0xad, 0xf0, 0x1a, 0x39, 0xa5, 0x2d, 0xa4, 0x7c,
+	0x88, 0xd6, 0x02, 0x6d, 0xb7, 0xb6, 0x6e, 0xc7, 0xd7, 0xb4, 0x36, 0xcf,
+	0xac, 0x9f, 0x79, 0xc7, 0x69, 0x53, 0xf8, 0xd4, 0x4c, 0xf3, 0xb9, 0x36,
+	0x96, 0xd9, 0xa8, 0x97, 0x02, 0xbc, 0xfc, 0x43, 0xa8, 0x37, 0x79, 0x32,
+	0xd0, 0x4c, 0xab, 0xab, 0xc8, 0x79, 0x78, 0xfc, 0x2e, 0x99, 0xe7, 0xfb,
+	0x08, 0xc3, 0x65, 0x3f, 0xcb, 0x37, 0x43, 0xc5, 0x70, 0x70, 0x0e, 0xbc,
+	0x41, 0xf4, 0xfd, 0x0c, 0x3c, 0x3c, 0xde, 0xc6, 0x7a, 0xbd, 0x8c, 0x01,
+	0x1c, 0xe4, 0x7b, 0x7f, 0x33, 0xf7, 0x08, 0xfc, 0x59, 0xe6, 0x61, 0xbe,
+	0x7f, 0x8c, 0xf5, 0x81, 0x08, 0x35, 0xd3, 0xca, 0x6a, 0x33, 0xeb, 0xf5,
+	0xcd, 0xac, 0x0f, 0x8c, 0x9a, 0x23, 0x3e, 0xf1, 0x2c, 0x51, 0xdb, 0xf2,
+	0xe9, 0xc0, 0x7e, 0xc6, 0x41, 0x3c, 0xeb, 0x8b, 0xea, 0x59, 0xb5, 0xcf,
+	0xb8, 0x55, 0xc2, 0xf1, 0x61, 0xff, 0xfa, 0x99, 0xab, 0x78, 0x2f, 0xd4,
+	0xe2, 0x34, 0xeb, 0xbe, 0x41, 0xf1, 0x6e, 0x46, 0x63, 0x66, 0x86, 0xed,
+	0x80, 0x30, 0x1f, 0x1f, 0xa1, 0x54, 0x31, 0x41, 0xbf, 0x57, 0x74, 0xfb,
+	0x6a, 0x8f, 0xf0, 0x9c, 0x65, 0x6d, 0x7d, 0x0b, 0xcf, 0xeb, 0x7d, 0xa7,
+	0x96, 0x67, 0xb4, 0x91, 0xff, 0x6b, 0x41, 0x6a, 0x7e, 0x11, 0xbe, 0x91,
+	0x12, 0x65, 0xc3, 0xf6, 0x85, 0xeb, 0xe2, 0xbd, 0x1b, 0x16, 0xbd, 0x22,
+	0xf2, 0x5b, 0xf9, 0x7a, 0xbe, 0xe7, 0x79, 0x8c, 0x7b, 0xc5, 0xa2, 0x2b,
+	0x8e, 0x84, 0xf7, 0x9f, 0x05, 0x82, 0xe4, 0x7f, 0x1d, 0x39, 0x48, 0xd0,
+	0xb5, 0xd6, 0xcf, 0x38, 0xfb, 0x98, 0x5f, 0xbf, 0x88, 0xeb, 0xf8, 0xf3,
+	0x75, 0x1c, 0xb7, 0xf1, 0x3a, 0x21, 0x6f, 0x91, 0x77, 0x02, 0x3e, 0xb7,
+	0x3f, 0x64, 0x0a, 0xfc, 0x3b, 0xc2, 0x38, 0xd5, 0x24, 0x7c, 0x81, 0xbd,
+	0x18, 0xeb, 0x0c, 0xb2, 0x6e, 0xb0, 0x7e, 0x66, 0x7c, 0x1f, 0x8e, 0x23,
+	0x3d, 0x7e, 0x86, 0x91, 0xc4, 0xa1, 0xb0, 0x78, 0xff, 0xa1, 0xeb, 0x2f,
+	0x70, 0x70, 0x9c, 0x78, 0x3f, 0xa1, 0x3f, 0xf0, 0x7e, 0x9a, 0xe8, 0xb3,
+	0xd4, 0x46, 0x71, 0x7e, 0x46, 0x2c, 0x27, 0xd7, 0x7d, 0xbe, 0xe8, 0x27,
+	0xe9, 0x47, 0x6a, 0x1e, 0xd6, 0xef, 0x29, 0xa4, 0x7e, 0xdc, 0x5b, 0xd3,
+	0x0a, 0xbe, 0x77, 0xd1, 0x8d, 0x5c, 0x07, 0xdd, 0x54, 0xb1, 0xa5, 0x1b,
+	0xc2, 0xae, 0x62, 0x9e, 0x9c, 0xe8, 0xa2, 0xeb, 0xab, 0x4d, 0x44, 0xbd,
+	0x6d, 0x22, 0xf6, 0x7b, 0x23, 0x97, 0xc7, 0xf3, 0x87, 0xa5, 0xdf, 0xa5,
+	0x82, 0x23, 0x37, 0x3c, 0x70, 0xe4, 0x3d, 0x81, 0x23, 0xef, 0x6d, 0x81,
+	0x23, 0x7b, 0x95, 0x2d, 0xd1, 0x46, 0xcd, 0x0a, 0x3f, 0x5e, 0x63, 0xfc,
+	0x78, 0x81, 0xf1, 0xe3, 0x50, 0x03, 0xfc, 0x30, 0x6a, 0xf0, 0xe3, 0xb0,
+	0xc0, 0x8f, 0x9f, 0x6d, 0x8a, 0x1f, 0x87, 0xfc, 0x9b, 0xf9, 0x82, 0x34,
+	0x6e, 0x0e, 0xd0, 0x4a, 0xce, 0xa1, 0xd5, 0x45, 0x9b, 0x2d, 0x7b, 0xd8,
+	0xe6, 0x88, 0x19, 0xce, 0x88, 0x7a, 0x97, 0x82, 0xc0, 0x2b, 0x96, 0xe3,
+	0x33, 0xa8, 0x69, 0xaa, 0xdb, 0x03, 0x12, 0xef, 0xa5, 0x14, 0xf0, 0x97,
+	0x7b, 0x12, 0xcb, 0xac, 0x9f, 0xf9, 0x73, 0xde, 0xc7, 0x2b, 0x6b, 0x81,
+	0x00, 0x7e, 0xf3, 0xcf, 0x04, 0x69, 0x63, 0x8d, 0xed, 0x54, 0xc6, 0xb1,
+	0xab, 0xb9, 0x21, 0xba, 0x92, 0x1b, 0xa0, 0x8d, 0xdc, 0x30, 0xbd, 0x93,
+	0xc3, 0x33, 0x00, 0x73, 0x3e, 0x16, 0x30, 0x37, 0xe8, 0x60, 0x90, 0xc7,
+	0xac, 0x0e, 0xd0, 0xfa, 0xaa, 0xc6, 0x57, 0xe0, 0x2a, 0xf6, 0x3f, 0xd2,
+	0x23, 0xeb, 0xd0, 0xea, 0x71, 0x20, 0x56, 0x85, 0x03, 0xf2, 0x1a, 0xec,
+	0xfd, 0x42, 0x7d, 0x0d, 0x6d, 0xab, 0x39, 0x83, 0x1c, 0xb8, 0x36, 0xb6,
+	0xc9, 0x6d, 0xe1, 0x73, 0x3d, 0xe8, 0x87, 0x4e, 0x6b, 0xdc, 0x4d, 0x5d,
+	0xbc, 0x07, 0x0e, 0xf2, 0x87, 0x86, 0x59, 0x3f, 0xed, 0x16, 0xfa, 0x68,
+	0xd4, 0x09, 0x84, 0x62, 0x54, 0x3a, 0x6b, 0x38, 0xe8, 0x93, 0xf8, 0x08,
+	0xdf, 0xcf, 0x50, 0x7e, 0x9e, 0x4e, 0x17, 0x3e, 0xd5, 0xea, 0x9e, 0x88,
+	0xd1, 0x9e, 0xe0, 0x39, 0x43, 0x4e, 0x56, 0xe2, 0x22, 0x54, 0x8e, 0x8b,
+	0xb4, 0xf2, 0xba, 0x25, 0x2d, 0xcd, 0x39, 0x3c, 0xae, 0xc8, 0xe3, 0x8a,
+	0x88, 0xa9, 0xf1, 0xf9, 0x55, 0xc4, 0x73, 0x87, 0x68, 0x63, 0x11, 0x34,
+	0x07, 0xff, 0x44, 0x25, 0x86, 0xba, 0xb1, 0x86, 0xf3, 0xf0, 0x51, 0x54,
+	0x62, 0xa8, 0x1b, 0x2a, 0x86, 0xba, 0xb1, 0x36, 0x2d, 0xf8, 0xf0, 0x42,
+	0x8e, 0x79, 0x40, 0xce, 0xaf, 0xf2, 0x07, 0xf7, 0xa9, 0x77, 0xf6, 0x9c,
+	0x10, 0x3e, 0xe4, 0x1e, 0x67, 0x73, 0x18, 0x1e, 0xac, 0x83, 0xe1, 0xb4,
+	0xd0, 0x83, 0xe2, 0x7c, 0xcf, 0x58, 0xee, 0x04, 0xc3, 0x73, 0x96, 0x69,
+	0x69, 0xb7, 0xa2, 0x25, 0x1d, 0x93, 0xed, 0x26, 0xf5, 0xfe, 0x1f, 0xa1,
+	0xeb, 0x4b, 0xfe, 0x33, 0x54, 0xc3, 0x7f, 0x28, 0x10, 0x1d, 0x97, 0xd7,
+	0xa7, 0x8b, 0xaf, 0x0c, 0x6b, 0xff, 0x5b, 0x9a, 0xef, 0xbb, 0x90, 0xdb,
+	0x49, 0x4c, 0x97, 0xe5, 0xa6, 0x67, 0xce, 0xe0, 0x76, 0x9f, 0xad, 0x71,
+	0xe1, 0xc4, 0x6d, 0xe0, 0x93, 0xbc, 0x47, 0x05, 0x9f, 0xfe, 0x77, 0x16,
+	0xc0, 0xb2, 0x93, 0x05, 0x98, 0x57, 0x84, 0x80, 0xf5, 0x03, 0x03, 0xb4,
+	0x8e, 0x39, 0x00, 0x1e, 0x53, 0x68, 0x02, 0xe6, 0x19, 0xa7, 0xf5, 0x40,
+	0xfb, 0xfb, 0x95, 0xc1, 0xeb, 0x5d, 0x1b, 0xa0, 0x67, 0x4f, 0x2d, 0xea,
+	0x79, 0x2c, 0x07, 0xca, 0xa3, 0x4e, 0x2a, 0x0c, 0x24, 0xe6, 0x27, 0x90,
+	0xff, 0x80, 0xfe, 0x00, 0xf9, 0x11, 0x98, 0x9f, 0x9c, 0x81, 0x72, 0xa0,
+	0x35, 0x53, 0xcd, 0x6b, 0x40, 0xfa, 0x40, 0x61, 0x08, 0x2a, 0x53, 0x41,
+	0x63, 0x1d, 0x40, 0xf6, 0x12, 0x21, 0x68, 0xd8, 0x01, 0x69, 0x20, 0xbb,
+	0x79, 0x8a, 0x08, 0x98, 0x9f, 0x14, 0x20, 0xc4, 0xd0, 0x00, 0xcf, 0x4f,
+	0xec, 0x40, 0x97, 0xc2, 0xdc, 0xf4, 0xff, 0xff, 0x31, 0x15, 0x16, 0x60,
+	0xda, 0x03, 0xad, 0xf9, 0xfc, 0xfd, 0xff, 0x80, 0x08, 0x0b, 0x43, 0x0b,
+	0x7c, 0xed, 0x9e, 0xb0, 0x3c, 0xa8, 0x9c, 0x5b, 0x00, 0x64, 0xb5, 0xc1,
+	0xeb, 0x6d, 0x16, 0xf0, 0x7d, 0xc4, 0x0b, 0x18, 0x7e, 0x01, 0xcb, 0x95,
+	0xff, 0xff, 0x97, 0xc2, 0xd5, 0x82, 0x00, 0x00, 0xd4, 0xc2, 0xcb, 0x42,
+	0x60, 0x7c, 0x00, 0x00, 0x00 };
+static u32 bnx2_COM_b09FwData[(0x0/4) + 1] = { 0x0 };
+static u32 bnx2_COM_b09FwRodata[(0x88/4) + 1] = {
+	0x08001ad8, 0x08001b14, 0x08001b14, 0x08001b14, 0x08001b14, 0x08001b14,
+	0x08001a24, 0x08001b14, 0x08001a98, 0x08001b14, 0x080019ac, 0x08001b14,
+	0x08001b14, 0x08001b14, 0x080019b8, 0x0, 0x08002a2c, 0x08002a7c,
+	0x08002aac, 0x08002adc, 0x08002b0c, 0x0, 0x08005fac, 0x08005fac,
+	0x08005fac, 0x08005fac, 0x08005fac, 0x08005fd8, 0x08005fd8, 0x08006018,
+	0x08006024, 0x08006024, 0x08005fac, 0x0, 0x0 };
+static u32 bnx2_COM_b09FwBss[(0x88/4) + 1] = { 0x0 };
+static u32 bnx2_COM_b09FwSbss[(0x5c/4) + 1] = { 0x0 };
+
+static struct fw_info bnx2_com_fw_09 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x080000b0,
+
+	.text_addr			= 0x08000000,
+	.text_len			= 0x7c5c,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_COM_b09FwText,
+	.gz_text_len			= sizeof(bnx2_COM_b09FwText),
+
+	.data_addr			= 0x08007d00,
+	.data_len			= 0x0,
+	.data_index			= 0x0,
+	.data				= bnx2_COM_b09FwData,
+
+	.sbss_addr			= 0x08007d00,
+	.sbss_len			= 0x5c,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_COM_b09FwSbss,
+
+	.bss_addr			= 0x08007d60,
+	.bss_len			= 0x88,
+	.bss_index			= 0x0,
+	.bss				= bnx2_COM_b09FwBss,
+
+	.rodata_addr			= 0x08007c60,
+	.rodata_len			= 0x88,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_COM_b09FwRodata,
+};
+
+static u8 bnx2_CP_b09FwText[] = {
+	0x1f, 0x8b, 0x08, 0x08, 0x8e, 0xfc, 0x2f, 0x45, 0x00, 0x03, 0x74, 0x65,
+	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xbd, 0x7d, 0x0d, 0x74,
+	0x5c, 0x57, 0x7d, 0xe7, 0xff, 0xdd, 0x79, 0x92, 0xc6, 0xb2, 0x2c, 0x3f,
+	0xcb, 0x63, 0x65, 0x22, 0x0b, 0x7b, 0x46, 0x7a, 0xb2, 0x95, 0x58, 0x64,
+	0xc7, 0xae, 0x00, 0x6d, 0x3b, 0x85, 0xe9, 0x48, 0xb2, 0x9d, 0x0f, 0x8a,
+	0x4c, 0x44, 0x4f, 0x5a, 0xe8, 0x22, 0xc6, 0x76, 0x48, 0x80, 0xb2, 0x4e,
+	0x09, 0x69, 0x80, 0x04, 0x0f, 0x23, 0xf9, 0x83, 0x74, 0xec, 0x51, 0x12,
+	0xc5, 0x76, 0x4f, 0x73, 0x58, 0x55, 0x92, 0x1d, 0x43, 0xa7, 0x1e, 0x27,
+	0x71, 0x68, 0xf6, 0x6c, 0x68, 0xb4, 0x4a, 0xe2, 0xa6, 0x3d, 0xd9, 0xd6,
+	0xf4, 0x84, 0x6e, 0xda, 0x43, 0x77, 0x85, 0x71, 0x88, 0x4b, 0xb3, 0x4b,
+	0xf8, 0x68, 0x61, 0xa1, 0xe5, 0xed, 0xef, 0x77, 0xef, 0x7d, 0xd2, 0xe8,
+	0xc3, 0x09, 0xa1, 0xbb, 0xf5, 0x39, 0xcf, 0x6f, 0xde, 0xfd, 0xfc, 0xdf,
+	0xff, 0xfd, 0x7f, 0xdf, 0x0f, 0xad, 0x17, 0xa9, 0x17, 0xfb, 0x6f, 0x15,
+	0x9e, 0x6d, 0x89, 0x7d, 0xbb, 0xb7, 0x5e, 0xd7, 0x73, 0x1d, 0x7e, 0x6e,
+	0x75, 0x57, 0x46, 0x95, 0xbc, 0x89, 0x7f, 0x89, 0x9f, 0xa1, 0x4c, 0x44,
+	0xc4, 0x0b, 0xfb, 0xe2, 0x23, 0x51, 0x95, 0x1e, 0xfc, 0x64, 0xd6, 0x97,
+	0x68, 0x24, 0x7d, 0xf6, 0xb3, 0xbb, 0x7d, 0x91, 0x4c, 0x79, 0x4b, 0xa2,
+	0x57, 0xfe, 0x25, 0xc8, 0xc7, 0x5c, 0x61, 0xfa, 0x5b, 0xd2, 0xff, 0xfc,
+	0x9f, 0xbe, 0xf2, 0x8e, 0xe4, 0x6b, 0xe3, 0x11, 0x89, 0x7a, 0xe9, 0x8f,
+	0x89, 0xb7, 0x49, 0xa2, 0xad, 0xe9, 0x81, 0x4f, 0x3e, 0xbc, 0xf9, 0x6f,
+	0x44, 0x1a, 0xc3, 0xb6, 0x2e, 0x07, 0x5f, 0xd9, 0x2c, 0xf9, 0x96, 0x74,
+	0x7c, 0xc8, 0x4d, 0x7b, 0xf2, 0x74, 0x45, 0x06, 0x0a, 0xc5, 0xa8, 0x44,
+	0xd2, 0x1d, 0x2f, 0xf5, 0x46, 0xf6, 0x07, 0x11, 0xdf, 0xf7, 0x7a, 0xa5,
+	0xa1, 0x27, 0xdb, 0x8d, 0xf4, 0xf2, 0x56, 0x51, 0x7e, 0x54, 0xb2, 0x15,
+	0x69, 0x50, 0xbe, 0x8f, 0x77, 0xbd, 0xa8, 0x74, 0xd2, 0xcb, 0x46, 0x5c,
+	0x29, 0x54, 0x2e, 0xac, 0x30, 0x6d, 0x96, 0xec, 0xfb, 0x6f, 0xa2, 0xe6,
+	0x8d, 0x36, 0x4b, 0x51, 0x99, 0x8d, 0xc4, 0x05, 0xfd, 0x00, 0xe6, 0x06,
+	0x19, 0x2e, 0x25, 0x24, 0x5b, 0x64, 0xbf, 0xae, 0xe4, 0x3c, 0xf6, 0xd9,
+	0x80, 0xfa, 0x2b, 0x9d, 0xe5, 0xcb, 0xb3, 0xec, 0x4b, 0x28, 0x9b, 0x40,
+	0xb9, 0x56, 0x79, 0xbc, 0x12, 0x97, 0xc7, 0x2a, 0x31, 0x79, 0xb4, 0x72,
+	0x87, 0x64, 0x50, 0xf7, 0x6c, 0x05, 0x7d, 0x97, 0x6a, 0xa5, 0x77, 0xac,
+	0x5e, 0xb2, 0x63, 0xed, 0xf1, 0x9c, 0x04, 0xc1, 0x27, 0x52, 0x1f, 0x95,
+	0xa1, 0x26, 0x94, 0x2f, 0x31, 0x2f, 0xbe, 0x20, 0x2f, 0x97, 0xda, 0xe2,
+	0xe5, 0x94, 0x23, 0x99, 0xc1, 0x64, 0x7c, 0x48, 0xf1, 0xbb, 0x46, 0xb2,
+	0x5d, 0xf8, 0x1e, 0x70, 0x25, 0xe2, 0x07, 0xc1, 0x1d, 0xa9, 0x26, 0xc0,
+	0x91, 0x4c, 0x24, 0x14, 0xeb, 0xb2, 0x5e, 0x32, 0x9f, 0x50, 0x51, 0xc9,
+	0x57, 0xae, 0x93, 0x44, 0x53, 0x10, 0xbc, 0x37, 0xe5, 0x21, 0x5d, 0xa4,
+	0xb7, 0x28, 0xfb, 0x54, 0xda, 0x47, 0x9b, 0x92, 0x52, 0xe9, 0xb5, 0x18,
+	0xc7, 0x16, 0xe0, 0xa9, 0x56, 0x32, 0x31, 0xc9, 0xa8, 0xb4, 0x24, 0x54,
+	0x7a, 0x05, 0xd2, 0x1c, 0xa9, 0xf1, 0xa7, 0x2c, 0x9d, 0xac, 0xc6, 0xb7,
+	0x0c, 0xa8, 0x74, 0xd3, 0xa2, 0xf4, 0x64, 0x42, 0xd4, 0x8f, 0xea, 0xd0,
+	0x67, 0x67, 0x46, 0x31, 0x0d, 0x6f, 0x9d, 0x76, 0xfd, 0x32, 0x69, 0x1f,
+	0x74, 0x16, 0xa6, 0x3d, 0xb5, 0x8a, 0xb0, 0x8a, 0xe2, 0xef, 0x28, 0xe0,
+	0x6a, 0x41, 0xff, 0xed, 0x5e, 0x0d, 0xc6, 0x35, 0x90, 0x4a, 0x7a, 0xfd,
+	0xea, 0xc5, 0x40, 0x9a, 0x09, 0x33, 0xf3, 0x14, 0xf2, 0x50, 0x34, 0x9d,
+	0xc2, 0xbc, 0xb9, 0x72, 0x08, 0x63, 0xbb, 0x38, 0x96, 0xf4, 0xda, 0x14,
+	0xde, 0x53, 0xfc, 0xdd, 0x34, 0x14, 0x49, 0x07, 0x41, 0x36, 0x35, 0x2e,
+	0xb9, 0x72, 0xd2, 0x9b, 0x05, 0x70, 0xbd, 0x63, 0x71, 0x8c, 0x1f, 0xe3,
+	0x88, 0x65, 0x92, 0x6b, 0xa4, 0xcb, 0xce, 0xcf, 0x5f, 0xa2, 0xef, 0x76,
+	0xef, 0x0e, 0xd5, 0xee, 0xa5, 0x54, 0xd2, 0x9b, 0x90, 0x3f, 0xc4, 0x77,
+	0x10, 0xec, 0x4a, 0x25, 0xe3, 0x79, 0xcc, 0xdd, 0xa5, 0x62, 0x4c, 0x5e,
+	0x2e, 0x26, 0x41, 0xa9, 0xc9, 0xce, 0x49, 0xd9, 0x92, 0x9a, 0x04, 0xdc,
+	0x05, 0x3c, 0x07, 0x99, 0x57, 0x46, 0x5e, 0x99, 0x75, 0x83, 0xe0, 0xe6,
+	0xd4, 0x89, 0x60, 0xa8, 0xd9, 0xd0, 0xfe, 0xd3, 0x25, 0xcc, 0x2b, 0xe6,
+	0xe9, 0xb1, 0x12, 0xe6, 0xb5, 0x84, 0x39, 0xd5, 0xf3, 0xdf, 0x89, 0xf9,
+	0x27, 0x8d, 0x90, 0x3e, 0xb6, 0x59, 0x7a, 0x7d, 0xb7, 0x7d, 0x8b, 0x64,
+	0x4b, 0x8e, 0x64, 0x53, 0x3f, 0x09, 0x32, 0x9a, 0x27, 0xc4, 0xe9, 0x2d,
+	0x91, 0x26, 0x6b, 0x00, 0x2b, 0x3f, 0x7f, 0xdd, 0x96, 0x8b, 0x3a, 0x18,
+	0x06, 0xe7, 0x83, 0xf9, 0x51, 0xe5, 0xd7, 0xd9, 0xfc, 0x90, 0xf6, 0xf9,
+	0x0f, 0x74, 0xe7, 0xcf, 0x97, 0xcb, 0x92, 0x36, 0x2b, 0x22, 0xb9, 0x07,
+	0x03, 0xe9, 0x4d, 0x01, 0x5f, 0x6c, 0xd3, 0x4b, 0x89, 0xae, 0xeb, 0xb1,
+	0x8c, 0x2e, 0x8b, 0x7f, 0x3f, 0xae, 0x41, 0x1f, 0x4e, 0x5f, 0x69, 0xbe,
+	0x6e, 0x5f, 0xe9, 0x85, 0x98, 0x85, 0x0f, 0xdf, 0x3d, 0x4e, 0xb6, 0xf2,
+	0x77, 0x76, 0x8e, 0xc3, 0x71, 0x74, 0x2d, 0x43, 0xe3, 0x2e, 0xf8, 0xc1,
+	0x93, 0x5c, 0xb1, 0x07, 0xfd, 0xc6, 0xf0, 0x0e, 0x82, 0x91, 0x54, 0x26,
+	0xe9, 0x4a, 0x1a, 0xdf, 0x03, 0x98, 0xaf, 0x0e, 0xe0, 0x4f, 0xdc, 0xec,
+	0xe6, 0x94, 0xf4, 0x55, 0x40, 0x7b, 0x95, 0x37, 0x96, 0x14, 0x7a, 0x0c,
+	0xa9, 0x7f, 0xb1, 0xb8, 0x61, 0x3f, 0x7c, 0xbb, 0x32, 0x02, 0xfa, 0x28,
+	0x8c, 0xf9, 0x32, 0x5c, 0x9c, 0xf6, 0x94, 0x24, 0x41, 0xbb, 0x69, 0xe9,
+	0xad, 0xf8, 0x52, 0x28, 0xe2, 0x5d, 0x6a, 0x07, 0xfd, 0xba, 0x92, 0x89,
+	0x9b, 0x39, 0x29, 0x14, 0x7f, 0x09, 0xe3, 0x02, 0x8e, 0x7d, 0xfe, 0xee,
+	0xb1, 0xb0, 0x80, 0xf7, 0xbb, 0x53, 0x1a, 0x3f, 0x6f, 0x0e, 0x06, 0xf6,
+	0x8d, 0x31, 0x60, 0x9c, 0x85, 0xb2, 0x8b, 0x77, 0x0c, 0xef, 0x90, 0x16,
+	0xe3, 0x80, 0xa9, 0x55, 0x86, 0x41, 0x8b, 0xbd, 0x82, 0xdf, 0x53, 0x84,
+	0x91, 0xfd, 0xb6, 0xe8, 0xdf, 0xc3, 0x63, 0x1b, 0xf4, 0x77, 0x6e, 0xa0,
+	0x45, 0xf2, 0x53, 0xe1, 0x58, 0x28, 0x0f, 0x28, 0x03, 0x92, 0x87, 0x45,
+	0x28, 0x13, 0x82, 0xe0, 0xc1, 0x14, 0xe5, 0x42, 0x10, 0x3c, 0x96, 0xa2,
+	0x9c, 0x38, 0x07, 0xfe, 0xa7, 0x6c, 0x20, 0xaf, 0xae, 0x55, 0x9c, 0x83,
+	0x6c, 0x11, 0x7d, 0x40, 0x4e, 0xe4, 0xba, 0x4e, 0x40, 0x6e, 0x50, 0xae,
+	0x5c, 0xf8, 0x44, 0xd6, 0xcf, 0xc7, 0x23, 0x1a, 0x0f, 0x98, 0x6f, 0xc8,
+	0xbc, 0x8c, 0x86, 0xbc, 0x4d, 0x0a, 0x5d, 0xa3, 0xb6, 0xcc, 0x65, 0x5d,
+	0xc6, 0x5d, 0x52, 0xe6, 0x76, 0x65, 0xf8, 0xae, 0x15, 0xf3, 0xb1, 0x42,
+	0x11, 0x4f, 0x6d, 0x9b, 0xf8, 0x2d, 0xd1, 0x9a, 0xf4, 0x97, 0x90, 0x37,
+	0x7d, 0xd7, 0x49, 0x7f, 0xb9, 0xbc, 0x59, 0x77, 0x69, 0xde, 0x88, 0xb8,
+	0x7e, 0xb2, 0x73, 0x97, 0x9a, 0x01, 0x3d, 0x05, 0xc1, 0xc9, 0x54, 0x98,
+	0xfe, 0x8f, 0xee, 0xd2, 0x3e, 0x12, 0x35, 0x4b, 0xd3, 0xee, 0x5d, 0x26,
+	0xed, 0xc4, 0x32, 0x69, 0x1b, 0x6a, 0x97, 0xa6, 0xbd, 0x7f, 0x99, 0xb4,
+	0xfb, 0x97, 0x49, 0xfb, 0x5f, 0xcb, 0xa4, 0x7d, 0x67, 0x99, 0xb4, 0xef,
+	0x2d, 0x93, 0xd6, 0x52, 0xb7, 0x34, 0xcd, 0x05, 0x3f, 0x6d, 0x92, 0x42,
+	0xec, 0x73, 0x1c, 0xbb, 0xc5, 0xcd, 0xfe, 0xc8, 0x52, 0xdc, 0xd4, 0xa0,
+	0x5c, 0xeb, 0xa2, 0x72, 0x53, 0xcb, 0x94, 0xab, 0x45, 0xb9, 0xa6, 0x45,
+	0xe5, 0x92, 0xcb, 0xe0, 0xba, 0x4e, 0xeb, 0xaf, 0x85, 0xe5, 0x0a, 0xcb,
+	0x94, 0x63, 0xfa, 0x1e, 0xdb, 0xcf, 0x16, 0x68, 0x99, 0xd7, 0x9b, 0xaf,
+	0x5a, 0x91, 0x66, 0xa6, 0xb7, 0x42, 0x47, 0xac, 0x50, 0x86, 0xdf, 0x29,
+	0x5b, 0x98, 0xe6, 0x81, 0xee, 0xa3, 0xa0, 0x3b, 0xca, 0x47, 0xf0, 0x91,
+	0x4f, 0xfe, 0x5d, 0x25, 0x43, 0xb1, 0x2d, 0xde, 0x2f, 0xa8, 0x06, 0xd0,
+	0x58, 0xd2, 0x4b, 0x28, 0xf2, 0x97, 0xe4, 0x23, 0x69, 0x3f, 0xdf, 0x2b,
+	0x2a, 0xa6, 0x24, 0x90, 0xbe, 0x94, 0x6a, 0x52, 0xb2, 0x1f, 0xfc, 0x93,
+	0x81, 0x4e, 0xda, 0x15, 0xf4, 0x6a, 0x1e, 0x32, 0x65, 0xaf, 0x2c, 0x2b,
+	0x7d, 0x39, 0x48, 0x19, 0x97, 0xce, 0xdc, 0x95, 0xf5, 0xa7, 0x7b, 0x6a,
+	0x41, 0xb3, 0x17, 0x51, 0x67, 0x07, 0x6a, 0xee, 0x2d, 0xbb, 0xd2, 0x57,
+	0xee, 0x04, 0x2f, 0x38, 0x72, 0xde, 0x5f, 0x2d, 0xe7, 0x53, 0x28, 0x5b,
+	0x89, 0xc8, 0x4c, 0xcc, 0x91, 0x19, 0x7c, 0x67, 0x53, 0xc8, 0xab, 0x84,
+	0xbc, 0xd5, 0x29, 0x07, 0x4a, 0xbe, 0x1c, 0x2e, 0xfd, 0x92, 0x0a, 0xf5,
+	0x56, 0x7f, 0x6a, 0xa5, 0x9c, 0xf6, 0x4c, 0xdb, 0x3b, 0xfc, 0x69, 0x68,
+	0x4c, 0x57, 0x2e, 0xfa, 0xc9, 0xf8, 0x8c, 0xe6, 0x89, 0x1f, 0x06, 0x7d,
+	0x68, 0x67, 0xc2, 0x4f, 0x7a, 0x7f, 0x8a, 0xef, 0xa1, 0x32, 0xed, 0x90,
+	0xf9, 0xb6, 0x86, 0xd1, 0xd6, 0xa1, 0xd2, 0x2a, 0xf9, 0xb0, 0xad, 0xbf,
+	0xdd, 0x9f, 0xee, 0x04, 0xcf, 0x79, 0xa7, 0x28, 0x23, 0x8a, 0x80, 0x6b,
+	0x10, 0xbc, 0x8d, 0xba, 0xcf, 0x69, 0x39, 0x05, 0xbb, 0xa5, 0xb8, 0x1a,
+	0x72, 0xf7, 0x1f, 0x83, 0x0f, 0xc7, 0x58, 0x9e, 0x69, 0xd4, 0x25, 0x32,
+	0xaa, 0xd2, 0x90, 0x09, 0xdd, 0x94, 0x85, 0x09, 0xc8, 0x41, 0xc8, 0x96,
+	0xd2, 0x4f, 0x83, 0x8c, 0x5b, 0x2d, 0xdf, 0x24, 0x3f, 0x5f, 0x86, 0x69,
+	0x09, 0x23, 0x2f, 0x4b, 0xb3, 0x73, 0xb2, 0x22, 0x0f, 0xf9, 0xf2, 0x74,
+	0x85, 0x72, 0xe1, 0x7a, 0xf0, 0x68, 0xab, 0xf4, 0x15, 0x93, 0xf9, 0x8c,
+	0x6c, 0xc2, 0xfc, 0x7d, 0x1e, 0x73, 0xea, 0xe2, 0xb9, 0xaf, 0x5e, 0x1a,
+	0x53, 0xd0, 0xcd, 0x4c, 0x47, 0xa3, 0xcd, 0x51, 0xc8, 0xa8, 0xdf, 0x03,
+	0x1e, 0x86, 0x39, 0xe7, 0xf1, 0x6c, 0xc4, 0x19, 0xa0, 0x3d, 0x32, 0x40,
+	0xfd, 0x50, 0x66, 0xdb, 0x84, 0x37, 0x6e, 0x7f, 0x47, 0xb5, 0x8c, 0x31,
+	0xbf, 0x1b, 0xf0, 0x3b, 0x61, 0x7f, 0x7b, 0xf8, 0xed, 0xdb, 0xdf, 0x31,
+	0xfc, 0xee, 0xb4, 0xbf, 0xa1, 0x5b, 0x8b, 0x5d, 0xfa, 0xf7, 0x48, 0x69,
+	0xfb, 0x76, 0xe5, 0x5f, 0x27, 0xb9, 0xa9, 0x56, 0x39, 0x50, 0xf4, 0xad,
+	0x6c, 0xc1, 0x23, 0x4f, 0x3a, 0x66, 0x9c, 0x80, 0x9b, 0xb2, 0xb3, 0x94,
+	0x77, 0x06, 0x08, 0x3f, 0x68, 0xa0, 0xb7, 0xb8, 0xc5, 0x5b, 0x23, 0xa4,
+	0x81, 0x11, 0xa7, 0xb7, 0xe2, 0x64, 0x60, 0xaf, 0xc5, 0x87, 0xe5, 0x30,
+	0x7e, 0x8b, 0x17, 0x49, 0x3f, 0x89, 0xb7, 0xc1, 0x01, 0xf5, 0xce, 0x70,
+	0x89, 0xf2, 0xd2, 0xc7, 0xd8, 0x13, 0x72, 0x6e, 0x81, 0x0d, 0x45, 0x5c,
+	0x28, 0xc9, 0x8d, 0x25, 0x4f, 0xe4, 0x25, 0x99, 0x1f, 0x07, 0x43, 0xec,
+	0x4a, 0xb9, 0xf2, 0xde, 0x14, 0x68, 0xf7, 0x3a, 0x47, 0xb6, 0x5f, 0xe7,
+	0xc2, 0xe6, 0xf1, 0xc7, 0xb7, 0x83, 0xfe, 0x31, 0xcf, 0x9a, 0x1e, 0xd4,
+	0x19, 0x81, 0x9d, 0x08, 0x6c, 0x9f, 0xe9, 0xea, 0x1b, 0x2e, 0xe6, 0x3e,
+	0xa6, 0xd2, 0xfb, 0x3e, 0x95, 0xed, 0xbe, 0x46, 0x72, 0x83, 0x0a, 0x38,
+	0x6a, 0x1e, 0x82, 0x1e, 0xc4, 0xb8, 0x82, 0x00, 0xf4, 0x0c, 0x79, 0x7e,
+	0xf3, 0xcd, 0x91, 0x74, 0x8d, 0xf4, 0x0e, 0x36, 0xa3, 0x0e, 0xf3, 0x88,
+	0xaf, 0xaf, 0xa2, 0x9d, 0x64, 0xa2, 0x4f, 0xe4, 0x9e, 0x91, 0xee, 0x59,
+	0x67, 0x78, 0xf4, 0x37, 0xc0, 0x93, 0x5b, 0x51, 0xff, 0x01, 0xd4, 0x7f,
+	0xcd, 0x29, 0x8c, 0xfd, 0xc8, 0x19, 0x1e, 0xfb, 0x9e, 0x33, 0x32, 0xb6,
+	0x61, 0x43, 0x7f, 0xcf, 0x86, 0x0d, 0xbb, 0x7b, 0x5c, 0x99, 0x00, 0x8f,
+	0x65, 0xbc, 0x0d, 0x1b, 0x46, 0x7a, 0xba, 0x80, 0x83, 0x2d, 0x5e, 0x9f,
+	0xf8, 0xde, 0x76, 0x01, 0xff, 0xc4, 0xd8, 0x67, 0x14, 0xf9, 0x49, 0xe4,
+	0xb3, 0x7e, 0x5c, 0xe7, 0xf7, 0xca, 0x96, 0x78, 0x93, 0xb0, 0xff, 0x88,
+	0x2d, 0x53, 0x13, 0x91, 0xfa, 0x07, 0xec, 0xfc, 0x66, 0x9c, 0x1a, 0x9f,
+	0xe9, 0x1c, 0x0b, 0xd3, 0x39, 0xb7, 0x7f, 0x67, 0x6d, 0xd5, 0xd5, 0x48,
+	0xe7, 0x37, 0x71, 0x46, 0xbc, 0xd0, 0xc6, 0xa8, 0xd1, 0xb6, 0x61, 0xae,
+	0x48, 0x9a, 0x71, 0x65, 0x4f, 0xd1, 0x41, 0x1d, 0xd0, 0xc5, 0x19, 0xfb,
+	0x1c, 0x05, 0x6c, 0x83, 0x68, 0xeb, 0xe8, 0x21, 0xd4, 0xa3, 0xcc, 0x48,
+	0x76, 0x8a, 0xfa, 0x00, 0xca, 0x6c, 0xf1, 0xd6, 0x0a, 0x6d, 0x89, 0x3b,
+	0x25, 0x57, 0x22, 0x7f, 0x77, 0x00, 0x9e, 0xa8, 0x24, 0x9a, 0xf1, 0x5d,
+	0x81, 0x4d, 0xf1, 0x60, 0x8d, 0x58, 0xdb, 0x45, 0xe6, 0x6d, 0x91, 0x3b,
+	0x94, 0xc0, 0xde, 0x18, 0x9a, 0x5c, 0x8f, 0x72, 0x0e, 0xf0, 0x42, 0xfb,
+	0x03, 0xb4, 0x36, 0x99, 0x91, 0xec, 0x26, 0xf0, 0xc9, 0xa4, 0x87, 0x6f,
+	0xc0, 0x35, 0xf9, 0x16, 0xbc, 0x23, 0xfa, 0xdb, 0xc0, 0x09, 0xbc, 0xa6,
+	0x22, 0x56, 0x67, 0x75, 0xa1, 0xef, 0xf7, 0x48, 0x76, 0x34, 0x4e, 0x5b,
+	0x62, 0x75, 0xd6, 0xcf, 0x40, 0xd7, 0x2b, 0x28, 0x41, 0x8c, 0x61, 0xd2,
+	0x81, 0x3c, 0xa9, 0x95, 0xdd, 0x8f, 0xe0, 0xf7, 0x83, 0xc6, 0xe6, 0xdd,
+	0x3d, 0xc9, 0x7e, 0x1a, 0x00, 0x13, 0x6c, 0x90, 0x47, 0x60, 0x9b, 0x3e,
+	0x02, 0x1b, 0xe4, 0x91, 0x66, 0x3c, 0x1c, 0x1b, 0xdb, 0x9f, 0x59, 0x03,
+	0x31, 0xa9, 0xbf, 0x73, 0xa4, 0x57, 0xd8, 0xea, 0xb9, 0x62, 0xca, 0x94,
+	0x2f, 0x76, 0xeb, 0xb7, 0xa1, 0xeb, 0x1e, 0xfb, 0x3b, 0xae, 0xf9, 0x3a,
+	0xdf, 0x04, 0x9a, 0xaf, 0x74, 0x69, 0x99, 0x93, 0xf5, 0xf1, 0x86, 0xcd,
+	0x99, 0x69, 0xe2, 0x18, 0xe3, 0x36, 0x2d, 0xae, 0xd3, 0x12, 0x4d, 0xd6,
+	0xde, 0x28, 0x59, 0x5b, 0x03, 0xb8, 0x19, 0x6a, 0x06, 0xc4, 0x94, 0xcf,
+	0x12, 0xe2, 0x93, 0x32, 0x00, 0xf4, 0x0b, 0x9b, 0xe2, 0xdc, 0x15, 0xe5,
+	0xdf, 0xac, 0xb6, 0xb1, 0xce, 0x56, 0x48, 0xc7, 0xa4, 0xed, 0x20, 0xb8,
+	0x3f, 0x55, 0x87, 0xf6, 0xc9, 0xf3, 0xb0, 0x40, 0x8e, 0x02, 0x26, 0x60,
+	0xa2, 0xc6, 0x3f, 0xab, 0x69, 0xa0, 0xd6, 0x27, 0x0d, 0x57, 0xf3, 0x97,
+	0xe8, 0xf1, 0x9e, 0x05, 0x8f, 0xc1, 0xbe, 0x81, 0xfd, 0xd6, 0x01, 0xdb,
+	0x98, 0x7d, 0x1c, 0xe6, 0xb7, 0xa7, 0xc0, 0x53, 0xd9, 0x39, 0x9e, 0x12,
+	0x99, 0x28, 0x12, 0x37, 0xa1, 0x5d, 0xc7, 0x79, 0x26, 0x7e, 0x32, 0x18,
+	0x33, 0xdf, 0x7d, 0x16, 0x4f, 0x3b, 0x2d, 0x9e, 0x6e, 0xb2, 0xef, 0x11,
+	0xbc, 0x69, 0xe3, 0x0d, 0xe0, 0xcd, 0xf9, 0x19, 0xc4, 0x9b, 0xbc, 0x75,
+	0x0b, 0xde, 0x28, 0x5b, 0xca, 0xc8, 0x6e, 0x6d, 0x87, 0x45, 0xe4, 0x57,
+	0xb4, 0x6c, 0xfb, 0x02, 0xe6, 0xb2, 0x48, 0xfa, 0x95, 0x7c, 0x2c, 0x02,
+	0x9c, 0x14, 0xf0, 0xfb, 0x4e, 0xd7, 0xd0, 0x2a, 0x71, 0xb2, 0xc0, 0x57,
+	0xaa, 0x82, 0x29, 0x66, 0xe5, 0x5c, 0x42, 0xdb, 0xfa, 0xb9, 0xe2, 0x07,
+	0x34, 0x5c, 0xb7, 0x42, 0xde, 0xe5, 0x45, 0x35, 0x43, 0x37, 0x80, 0x16,
+	0x54, 0x0c, 0x9a, 0x2b, 0x78, 0x06, 0x7a, 0x29, 0x37, 0x49, 0xdb, 0xb8,
+	0x8d, 0x7e, 0x49, 0x34, 0xd7, 0xd5, 0x48, 0x3a, 0x52, 0x0a, 0xf6, 0x17,
+	0xbe, 0x55, 0xae, 0x4b, 0xd3, 0xa9, 0xa3, 0xfc, 0x98, 0xb6, 0x7f, 0x5d,
+	0x1f, 0xd2, 0xd6, 0xf8, 0xbd, 0xae, 0xf2, 0xd7, 0x2e, 0x4e, 0x4b, 0x50,
+	0x0f, 0xa3, 0x5e, 0x22, 0xd7, 0xd5, 0x4c, 0x1e, 0xf3, 0x40, 0xbf, 0x19,
+	0xe5, 0x6b, 0xdf, 0x27, 0xaf, 0xba, 0x57, 0x2f, 0x2a, 0xaf, 0xdf, 0x8e,
+	0xfd, 0x76, 0xed, 0xdb, 0xb3, 0xef, 0x84, 0x7d, 0xe7, 0xdd, 0x6e, 0xbe,
+	0x1d, 0x71, 0xd3, 0x7c, 0x83, 0x92, 0xd3, 0x6c, 0x43, 0xf3, 0x95, 0x95,
+	0x33, 0x1d, 0x5e, 0x41, 0xc8, 0x57, 0x9f, 0x93, 0x5b, 0x27, 0x8d, 0xfc,
+	0xdd, 0x0e, 0x19, 0x04, 0xff, 0xcc, 0x9b, 0x11, 0xc0, 0x3f, 0x98, 0x96,
+	0x5b, 0x2b, 0xc4, 0xdb, 0xef, 0x02, 0x7f, 0x60, 0xe2, 0x7a, 0xea, 0x74,
+	0xca, 0xdd, 0x3b, 0x61, 0xf7, 0xa2, 0x7c, 0x91, 0x38, 0x1f, 0xd2, 0x73,
+	0x53, 0x28, 0xee, 0xd1, 0x73, 0x73, 0xb0, 0x38, 0x03, 0xfc, 0xdc, 0x06,
+	0xba, 0x0f, 0x82, 0x99, 0x54, 0x01, 0x94, 0xf3, 0x11, 0xfc, 0x86, 0x1d,
+	0x50, 0xfc, 0x18, 0xf2, 0x1b, 0xa5, 0x30, 0x4a, 0x9e, 0x73, 0x2d, 0x0f,
+	0xbf, 0x13, 0xfc, 0x14, 0x45, 0xbb, 0x48, 0xeb, 0xe6, 0xef, 0x9f, 0x20,
+	0x0f, 0xef, 0x49, 0x4c, 0x62, 0x33, 0x6d, 0x1d, 0xf6, 0xcd, 0xb9, 0xe3,
+	0x9c, 0xc5, 0xb4, 0x2c, 0x3f, 0x3b, 0x37, 0x6f, 0x97, 0xe7, 0xe8, 0x36,
+	0x4f, 0x1f, 0x8f, 0xf9, 0x1a, 0x56, 0xd2, 0xfd, 0xb7, 0xb4, 0x5c, 0x72,
+	0x8f, 0xce, 0xac, 0x30, 0xef, 0xc5, 0x75, 0x39, 0xe7, 0xd5, 0x34, 0x48,
+	0xbf, 0x25, 0xd9, 0x93, 0x07, 0x3f, 0x61, 0x9c, 0xd2, 0xa7, 0x7d, 0x1d,
+	0xd2, 0x04, 0x69, 0x60, 0xdc, 0xd2, 0xe6, 0x94, 0xa5, 0xcd, 0x27, 0xf1,
+	0xc6, 0x53, 0xba, 0x60, 0x69, 0xf3, 0x29, 0xbc, 0xf1, 0x94, 0x5e, 0x9c,
+	0xe3, 0xe3, 0x5e, 0xf8, 0x72, 0xdb, 0xa1, 0xdf, 0x76, 0x57, 0x40, 0xbf,
+	0xe0, 0xbb, 0x1c, 0x7c, 0x80, 0x5c, 0x69, 0x1f, 0xde, 0xec, 0x67, 0xa3,
+	0x6d, 0x3f, 0x23, 0x7b, 0x4a, 0x01, 0xc6, 0x78, 0x37, 0xc6, 0xfb, 0x39,
+	0xbc, 0x3f, 0xa3, 0xe5, 0x8c, 0xf2, 0x0f, 0x5b, 0x79, 0xf5, 0x79, 0xbc,
+	0xdb, 0xe3, 0x07, 0xa5, 0xdd, 0x8b, 0xc8, 0x34, 0xda, 0xfa, 0xba, 0xec,
+	0xa9, 0xcc, 0xe2, 0xb9, 0x84, 0xe7, 0x55, 0x3c, 0x97, 0xd1, 0xde, 0x0b,
+	0x48, 0x5f, 0x29, 0xd3, 0x5e, 0x3d, 0xca, 0xbf, 0x86, 0xdf, 0xcf, 0xcb,
+	0xd0, 0x23, 0x2f, 0xe1, 0xf9, 0x01, 0xf2, 0x9f, 0x45, 0xfd, 0x60, 0xf5,
+	0x8c, 0x4f, 0x19, 0xf6, 0x9c, 0x6d, 0x3b, 0xe5, 0xe4, 0x2a, 0xa0, 0xe9,
+	0xd2, 0x00, 0xfa, 0xde, 0xa3, 0x79, 0xa6, 0x0f, 0x32, 0x3f, 0x07, 0x19,
+	0x37, 0xa4, 0x61, 0x6a, 0x07, 0x7c, 0x79, 0xcc, 0x05, 0xde, 0x93, 0xb5,
+	0x32, 0x1b, 0xa3, 0x1d, 0x79, 0x93, 0x2e, 0x9f, 0x2b, 0x35, 0x69, 0xbb,
+	0x7a, 0x7c, 0x09, 0xff, 0xd0, 0xef, 0x0a, 0xe5, 0x81, 0x91, 0xc6, 0x13,
+	0x45, 0xca, 0x02, 0xe8, 0x9f, 0xe2, 0x08, 0xde, 0xb5, 0x5a, 0x26, 0x14,
+	0x24, 0x94, 0x07, 0xac, 0x47, 0x99, 0x50, 0x2d, 0x77, 0x28, 0x6b, 0x28,
+	0x7b, 0x28, 0x4b, 0xcc, 0x7c, 0xec, 0x7e, 0x90, 0x32, 0x1c, 0xb4, 0x10,
+	0xa3, 0xfd, 0xe1, 0x19, 0x1f, 0x64, 0xec, 0x3e, 0x2b, 0x4f, 0x47, 0xf5,
+	0x5c, 0xec, 0x29, 0xaa, 0x98, 0x2b, 0xa7, 0x91, 0x86, 0xe7, 0xf8, 0xc3,
+	0x78, 0x7f, 0x49, 0xf6, 0xe0, 0xc9, 0x1d, 0xff, 0x02, 0x7e, 0x73, 0x6e,
+	0xca, 0x28, 0x87, 0xa7, 0x74, 0x02, 0x6f, 0x3c, 0xa5, 0x31, 0x2b, 0x47,
+	0xc6, 0xad, 0x1c, 0xe1, 0x9c, 0xde, 0x04, 0x3c, 0x70, 0x7c, 0x4a, 0xc7,
+	0x17, 0xc0, 0xcf, 0x4e, 0x6e, 0xf2, 0x5d, 0xd6, 0x8f, 0x6d, 0x14, 0xc3,
+	0x83, 0x78, 0x3a, 0xc9, 0xcf, 0x0d, 0xda, 0x0e, 0xce, 0x69, 0xda, 0xfd,
+	0x2b, 0xd7, 0xf0, 0x62, 0xcc, 0xe8, 0x14, 0xaf, 0x59, 0x34, 0xef, 0xcf,
+	0xe1, 0x31, 0x63, 0xf1, 0xc8, 0xdf, 0xca, 0xfe, 0x86, 0xdc, 0x82, 0x4d,
+	0x9b, 0xf5, 0x7d, 0xcc, 0x03, 0xc6, 0x72, 0x7c, 0x14, 0x7d, 0x3b, 0xb2,
+	0xdb, 0xa7, 0x0c, 0x67, 0x0c, 0x81, 0xe3, 0x63, 0xbb, 0x48, 0xd7, 0x38,
+	0x48, 0xc9, 0xbc, 0x6f, 0x7e, 0x13, 0xe6, 0x2c, 0x23, 0x7b, 0x4b, 0xf7,
+	0x6a, 0x5f, 0xb9, 0xf6, 0x68, 0x93, 0xf5, 0x73, 0xc2, 0x72, 0xa0, 0xd5,
+	0x18, 0x6d, 0x9b, 0x2f, 0xc5, 0x0c, 0xcd, 0xf3, 0x37, 0xe5, 0x73, 0xb5,
+	0xbc, 0x37, 0x76, 0x4d, 0x61, 0x81, 0xac, 0xa3, 0x6d, 0x81, 0x39, 0x2b,
+	0x57, 0xe3, 0x9d, 0xbe, 0x3b, 0xf9, 0x8a, 0xfc, 0x74, 0x10, 0x3c, 0xf1,
+	0x5d, 0xcb, 0xfb, 0xf4, 0x35, 0xd8, 0xe7, 0x62, 0x7e, 0xf2, 0x60, 0xfb,
+	0xba, 0x72, 0x0a, 0xb6, 0xdb, 0xf6, 0xb9, 0x36, 0xae, 0x06, 0x3c, 0x51,
+	0x79, 0xa4, 0xd8, 0x20, 0x93, 0x45, 0xd5, 0x1c, 0xb1, 0xb2, 0x33, 0x22,
+	0x09, 0x4d, 0xdf, 0xb4, 0xef, 0x7a, 0xc7, 0x22, 0x96, 0xee, 0xd6, 0xd5,
+	0x48, 0xfd, 0xef, 0x42, 0xc7, 0xa6, 0xa1, 0x63, 0x1b, 0xa1, 0x83, 0x17,
+	0xcb, 0x88, 0x35, 0x35, 0x4b, 0x65, 0x04, 0xeb, 0x24, 0xe1, 0x75, 0x1f,
+	0x44, 0xbd, 0x90, 0xfe, 0xa2, 0x9a, 0xd6, 0x72, 0x92, 0x77, 0xb6, 0x57,
+	0x46, 0x9c, 0x1d, 0x95, 0xc5, 0x3a, 0x68, 0x8b, 0xe7, 0x8a, 0x81, 0xf5,
+	0x91, 0x22, 0x6d, 0xd4, 0x64, 0x2a, 0x0b, 0x9c, 0xec, 0x00, 0xcc, 0xcf,
+	0x8c, 0xc2, 0x4f, 0xa7, 0x5c, 0x06, 0xcc, 0xa7, 0x01, 0xf3, 0xc4, 0xa8,
+	0x13, 0xda, 0x06, 0xc2, 0xa0, 0xc8, 0xc4, 0x58, 0x97, 0xcc, 0x4c, 0x91,
+	0x0e, 0x21, 0x03, 0x46, 0x31, 0x9f, 0xa9, 0x15, 0xb0, 0x03, 0xd8, 0x3f,
+	0xe4, 0xf6, 0x58, 0x8b, 0xce, 0x33, 0xfa, 0xbc, 0x55, 0x66, 0xca, 0x69,
+	0x0b, 0xdb, 0xe1, 0x2a, 0xd8, 0x56, 0xcc, 0xc1, 0xb6, 0x03, 0xb0, 0xed,
+	0x5c, 0x16, 0xb6, 0xe5, 0x74, 0x71, 0x1b, 0x6c, 0x1a, 0xa3, 0x8b, 0x0d,
+	0x5e, 0x9b, 0x2d, 0x3d, 0xbc, 0xdf, 0xda, 0xbb, 0xb4, 0x89, 0x7e, 0x0a,
+	0x78, 0x48, 0x63, 0xf8, 0x3d, 0x79, 0x2f, 0x65, 0x19, 0xd2, 0xf9, 0xbd,
+	0x07, 0x65, 0xf0, 0x3d, 0xf9, 0x67, 0x2b, 0x4c, 0xd9, 0xbb, 0x2d, 0x2c,
+	0xb4, 0x13, 0x32, 0xb0, 0x89, 0xfb, 0x9c, 0xec, 0x24, 0x61, 0xf8, 0x8f,
+	0x80, 0x17, 0x79, 0x95, 0xea, 0x36, 0xf9, 0x66, 0xbb, 0xd7, 0xda, 0x76,
+	0xd8, 0x76, 0x38, 0x96, 0x95, 0x56, 0xcf, 0x87, 0xf4, 0x15, 0xda, 0xd7,
+	0x23, 0x4e, 0x66, 0xc9, 0xb8, 0xaa, 0x69, 0x8e, 0xf2, 0xd6, 0x95, 0x7e,
+	0xd0, 0x49, 0xff, 0x02, 0x5a, 0x33, 0x72, 0xc3, 0xd0, 0xf1, 0x0a, 0x3b,
+	0xbe, 0x1a, 0xc3, 0x37, 0xa9, 0x28, 0xf4, 0x21, 0xe5, 0xcd, 0x0e, 0xe3,
+	0x9b, 0xcb, 0x43, 0x80, 0x35, 0xfc, 0x3e, 0xa8, 0x6d, 0xce, 0xa7, 0x4b,
+	0x94, 0x49, 0xf3, 0xb4, 0x68, 0x7c, 0x97, 0x56, 0xf4, 0x55, 0x6d, 0xaf,
+	0xbb, 0x32, 0x60, 0xe6, 0xfc, 0x30, 0xe7, 0x9c, 0xbe, 0x48, 0xfb, 0x03,
+	0x03, 0x96, 0xbf, 0x92, 0xa3, 0x79, 0x79, 0xbb, 0x1d, 0xfb, 0x1f, 0x2e,
+	0x33, 0x77, 0x8d, 0x73, 0x73, 0x37, 0x50, 0x59, 0x3c, 0x46, 0x91, 0xb6,
+	0x07, 0x58, 0xcf, 0x85, 0x8d, 0x94, 0x92, 0x5a, 0x9f, 0xf2, 0x93, 0xb6,
+	0x12, 0xd2, 0x27, 0xb6, 0x78, 0x4d, 0xf0, 0x01, 0x9e, 0x5e, 0x62, 0x77,
+	0x25, 0xac, 0xdc, 0xa4, 0x1f, 0x1c, 0xf6, 0x91, 0xb7, 0x72, 0x32, 0x8f,
+	0xf6, 0x47, 0x9c, 0xfe, 0xca, 0x72, 0xf2, 0x32, 0x94, 0x93, 0x1c, 0x8f,
+	0x23, 0x77, 0x3c, 0x48, 0x1e, 0x7d, 0xbf, 0xb6, 0xaf, 0xb7, 0x6e, 0xab,
+	0x01, 0xfe, 0x08, 0xc7, 0xcc, 0x1a, 0xa2, 0x33, 0xf7, 0x08, 0x6c, 0x22,
+	0x3b, 0x6f, 0xbb, 0xe7, 0xe6, 0x5f, 0xd3, 0x05, 0x7e, 0x33, 0x8e, 0x6a,
+	0x68, 0xa4, 0xc6, 0x77, 0x34, 0x2d, 0xd4, 0x2e, 0xb1, 0x65, 0x39, 0x06,
+	0xda, 0xb3, 0xb5, 0xc6, 0x16, 0x2c, 0xd1, 0xfe, 0xa4, 0xec, 0xa2, 0xfd,
+	0xf9, 0x43, 0xe0, 0x88, 0xe3, 0xe9, 0xb2, 0x69, 0xb4, 0x53, 0x17, 0x8f,
+	0x6f, 0xb1, 0xff, 0x48, 0x38, 0x09, 0xb7, 0xa1, 0xad, 0x84, 0x22, 0x6c,
+	0x81, 0x0c, 0x80, 0x97, 0x39, 0x07, 0x8a, 0xb6, 0xeb, 0xb6, 0xbf, 0xa8,
+	0x31, 0x31, 0xe4, 0xd5, 0xb5, 0x52, 0xcf, 0x3e, 0xc9, 0x7f, 0x7c, 0xaf,
+	0xd2, 0xf6, 0xef, 0x52, 0x59, 0x56, 0xad, 0x7b, 0xae, 0x9e, 0xc3, 0x5f,
+	0xff, 0x82, 0x39, 0x0a, 0xf1, 0x17, 0xd2, 0x45, 0x35, 0x0e, 0x49, 0x13,
+	0x86, 0x16, 0x0c, 0x2d, 0x6e, 0xb4, 0xfa, 0x26, 0xa4, 0xbd, 0xab, 0x40,
+	0x7b, 0xf7, 0x81, 0xc6, 0x28, 0xc3, 0x19, 0x97, 0x5b, 0x8b, 0xef, 0x23,
+	0xf8, 0x0e, 0xf9, 0xe4, 0x4a, 0x32, 0x9c, 0xf2, 0x9b, 0x75, 0xb2, 0x56,
+	0xee, 0x87, 0x7e, 0x2e, 0xeb, 0x70, 0xdc, 0x94, 0xff, 0xff, 0x15, 0xed,
+	0xac, 0xad, 0x35, 0xf6, 0xca, 0x8d, 0xb5, 0x94, 0xaf, 0x6b, 0xe4, 0x60,
+	0x55, 0xda, 0x95, 0xe4, 0x77, 0xf5, 0x98, 0xd7, 0xff, 0x3f, 0x18, 0x73,
+	0x7c, 0xd1, 0x98, 0x3d, 0x3b, 0xe6, 0x77, 0x21, 0xbf, 0xc9, 0xf8, 0x38,
+	0x1e, 0xf9, 0x2e, 0x1c, 0xb3, 0xc5, 0x85, 0x1e, 0x57, 0xb5, 0x9c, 0x08,
+	0x65, 0x04, 0xc7, 0x35, 0x60, 0xc7, 0xf0, 0xb9, 0xaa, 0x71, 0x0d, 0xbc,
+	0x89, 0x71, 0xb5, 0x2e, 0x18, 0xd7, 0xf6, 0x2b, 0x8e, 0x6b, 0x39, 0x1e,
+	0x27, 0x2f, 0x87, 0xe3, 0x8b, 0xca, 0xae, 0x22, 0xc7, 0xd8, 0x8f, 0x31,
+	0x1e, 0xd4, 0xfe, 0x80, 0x19, 0x63, 0xda, 0x8e, 0x51, 0x54, 0xdb, 0xb6,
+	0x7f, 0x8f, 0xdf, 0xd5, 0xe3, 0xa3, 0xee, 0xff, 0x3e, 0x68, 0xba, 0x4e,
+	0xb2, 0x5d, 0x75, 0x56, 0xfe, 0xdf, 0x24, 0x1f, 0x2e, 0x71, 0xae, 0x93,
+	0x19, 0x91, 0x51, 0xe8, 0xe0, 0xff, 0x5c, 0xcb, 0xd8, 0xfd, 0xf6, 0x94,
+	0xd5, 0x63, 0xd0, 0x17, 0x3b, 0x60, 0xf3, 0xf5, 0x17, 0x55, 0x77, 0x44,
+	0x82, 0xe0, 0xb6, 0xd4, 0xa7, 0xd1, 0xf7, 0x7e, 0xed, 0xab, 0x2e, 0x8d,
+	0x9b, 0x3f, 0x57, 0x2b, 0x3e, 0xed, 0x0d, 0xea, 0x73, 0xe8, 0xbb, 0xe3,
+	0xb4, 0xc1, 0xb2, 0xb0, 0x93, 0x33, 0xf1, 0x88, 0xb6, 0xc5, 0xa8, 0x13,
+	0x93, 0xf1, 0x8c, 0xa4, 0xd1, 0x5f, 0x26, 0xae, 0x84, 0x7d, 0xc0, 0x56,
+	0x83, 0x0d, 0xf9, 0xe1, 0xca, 0x3e, 0x3c, 0x0f, 0xcb, 0xad, 0xb0, 0x77,
+	0x6e, 0x7d, 0xe4, 0x0b, 0x72, 0x1b, 0x6c, 0x9d, 0xdb, 0x1e, 0x19, 0x93,
+	0xbd, 0xb0, 0x6d, 0xf6, 0xc2, 0xce, 0xd9, 0x5b, 0xa1, 0xed, 0x39, 0x8e,
+	0xb2, 0xad, 0x55, 0xb4, 0x46, 0x1b, 0x87, 0xe3, 0x23, 0xee, 0x0f, 0x72,
+	0x0e, 0x52, 0x09, 0xf5, 0x8a, 0x9e, 0x97, 0xa6, 0x05, 0x69, 0xaf, 0x27,
+	0xab, 0x42, 0xfd, 0xb4, 0xca, 0xc6, 0x8d, 0x8c, 0x0d, 0x78, 0x65, 0xda,
+	0x22, 0x8d, 0x78, 0xc0, 0x33, 0xf1, 0x47, 0xda, 0xaa, 0x1e, 0x7f, 0x63,
+	0x9d, 0xf8, 0x2b, 0xeb, 0xa4, 0xfe, 0x73, 0x90, 0xaf, 0xd5, 0x34, 0xc5,
+	0xb7, 0x67, 0x75, 0x0d, 0x69, 0x8b, 0x32, 0x38, 0xa4, 0x87, 0x8d, 0xaf,
+	0x23, 0x7f, 0xaf, 0x48, 0x4f, 0xfb, 0xb9, 0x2e, 0xb3, 0xbb, 0x5b, 0x56,
+	0x33, 0x1e, 0x90, 0xad, 0xcc, 0xc7, 0x04, 0x94, 0x5f, 0x1d, 0x13, 0xa0,
+	0x9f, 0xf5, 0x01, 0xe0, 0xec, 0x16, 0x3c, 0xfb, 0x64, 0x88, 0x71, 0x87,
+	0x4a, 0x68, 0x97, 0x7f, 0xd5, 0xda, 0xe5, 0x21, 0x1c, 0x09, 0xc0, 0x61,
+	0xe4, 0xf3, 0x52, 0x3d, 0xb7, 0x50, 0x7f, 0xe7, 0xe7, 0x6c, 0xda, 0x84,
+	0xec, 0x2a, 0x71, 0xdc, 0x94, 0xc1, 0xc4, 0x4d, 0xb5, 0x0c, 0x8e, 0x5b,
+	0x3b, 0x0a, 0x65, 0xb4, 0xfc, 0x5c, 0x2a, 0x3b, 0x29, 0xf7, 0x18, 0x9f,
+	0x7f, 0x20, 0x45, 0x5a, 0x7f, 0xb7, 0x64, 0xe6, 0xe2, 0xf3, 0x02, 0x7a,
+	0x93, 0x54, 0x24, 0xad, 0xd7, 0xd3, 0xbc, 0x09, 0xd9, 0x21, 0xbd, 0x31,
+	0xc6, 0x3a, 0x19, 0xcf, 0xf3, 0xf3, 0x13, 0xb0, 0x1f, 0x86, 0x4b, 0x0a,
+	0x16, 0x7c, 0xad, 0x0c, 0x79, 0x81, 0x6c, 0x4f, 0x39, 0x3a, 0x76, 0x6c,
+	0x74, 0x6d, 0xa9, 0xce, 0xd8, 0xae, 0x8e, 0x8e, 0xff, 0xce, 0x80, 0xfa,
+	0x66, 0xb4, 0x7d, 0xab, 0xb4, 0xfe, 0x9d, 0xd6, 0x65, 0x46, 0xeb, 0xc2,
+	0x38, 0xe6, 0x8c, 0x17, 0xb1, 0xe5, 0xaa, 0xd3, 0xa7, 0xea, 0x42, 0x5b,
+	0xb0, 0x50, 0x09, 0xd3, 0x9e, 0x5c, 0x26, 0xed, 0x85, 0x65, 0xd2, 0xfe,
+	0x76, 0x99, 0x34, 0x13, 0x17, 0xec, 0x2f, 0x5e, 0x46, 0xde, 0x88, 0xe6,
+	0x55, 0x69, 0x36, 0xf6, 0x75, 0x7e, 0xae, 0xcc, 0x2a, 0xeb, 0x97, 0x31,
+	0x46, 0x6c, 0x62, 0xc3, 0x39, 0x1d, 0x1b, 0xde, 0xe2, 0x6d, 0x53, 0x8c,
+	0x75, 0x11, 0x17, 0x09, 0xd9, 0xab, 0xf1, 0x42, 0x9c, 0x7c, 0x85, 0x31,
+	0xe0, 0x3c, 0xd7, 0x5a, 0x13, 0xea, 0x4a, 0xb4, 0x3d, 0x6f, 0x9b, 0x98,
+	0x79, 0x8b, 0xe9, 0x75, 0xd5, 0x3e, 0xd8, 0x0a, 0xfd, 0xc5, 0x26, 0xd9,
+	0x3e, 0x96, 0x58, 0x41, 0xbd, 0xb5, 0x63, 0xcc, 0xf8, 0x83, 0x7b, 0xc1,
+	0x57, 0x19, 0x21, 0x8c, 0xc9, 0x94, 0x08, 0x6d, 0xe2, 0xa5, 0xb6, 0xf0,
+	0xeb, 0xb7, 0xd7, 0x7b, 0x85, 0xf6, 0x1c, 0xd8, 0x0e, 0x3f, 0x6b, 0x7b,
+	0xf5, 0xd2, 0x37, 0x16, 0xe2, 0x4a, 0xfd, 0x9c, 0xf5, 0x22, 0x57, 0xa8,
+	0xa7, 0xed, 0x12, 0x79, 0x66, 0x4e, 0x16, 0x6f, 0x84, 0xcd, 0x24, 0x41,
+	0xb6, 0x5b, 0x5a, 0x23, 0xa2, 0x63, 0x3c, 0x29, 0x23, 0x9b, 0x3b, 0xb8,
+	0xb6, 0x03, 0xfa, 0x37, 0xb6, 0x8a, 0x89, 0x9b, 0x86, 0x76, 0xca, 0x72,
+	0xb4, 0x7b, 0xbd, 0xa5, 0x5d, 0xae, 0xa9, 0xee, 0xa0, 0xcc, 0xc5, 0x9c,
+	0x18, 0x3a, 0xde, 0x5e, 0x94, 0x44, 0x48, 0xc7, 0x33, 0xf0, 0x8b, 0xab,
+	0xe9, 0x78, 0x46, 0x52, 0x9a, 0x8e, 0x6b, 0x17, 0xd0, 0x71, 0xab, 0xa5,
+	0xe3, 0x77, 0x44, 0x0d, 0x5d, 0x28, 0xad, 0xa7, 0x48, 0xa7, 0x86, 0x8e,
+	0x1d, 0x4d, 0xc7, 0x33, 0x78, 0xbb, 0x7e, 0x8f, 0x2d, 0x13, 0xb1, 0x69,
+	0xfc, 0x1d, 0xa6, 0x51, 0x2e, 0xfe, 0x66, 0xd4, 0xe8, 0xa5, 0x14, 0xe8,
+	0x28, 0x4c, 0xff, 0x60, 0xd4, 0xd0, 0x67, 0x75, 0x9a, 0x89, 0x8f, 0xf4,
+	0x17, 0xdf, 0x13, 0x5d, 0x48, 0x9f, 0x29, 0xd0, 0x67, 0x58, 0xe6, 0xf5,
+	0xe8, 0xb3, 0xde, 0xae, 0x5b, 0x44, 0xf5, 0xba, 0x7b, 0x26, 0x66, 0x68,
+	0xf5, 0x56, 0x3d, 0x76, 0x8e, 0xfb, 0xd9, 0x9f, 0x81, 0x56, 0xcd, 0xdc,
+	0x9c, 0x9f, 0xf7, 0xb7, 0x19, 0x8b, 0x4a, 0x98, 0x18, 0x36, 0xe3, 0xa4,
+	0x57, 0xb2, 0x1d, 0x8d, 0x7c, 0xaa, 0xd1, 0xf2, 0xa9, 0x71, 0x48, 0xa5,
+	0xab, 0x65, 0x76, 0x37, 0x74, 0x05, 0x6d, 0x6c, 0x2d, 0xa7, 0x91, 0xd7,
+	0x9a, 0xc8, 0x16, 0xff, 0xd9, 0xee, 0x5f, 0xe0, 0xba, 0x80, 0x0c, 0x39,
+	0x48, 0x6b, 0x2b, 0x9b, 0x71, 0x29, 0xbf, 0x11, 0xdf, 0xdd, 0xd2, 0x56,
+	0x56, 0x72, 0xfb, 0x58, 0x83, 0xec, 0x2b, 0xba, 0xf2, 0x51, 0xd4, 0xff,
+	0x48, 0xd1, 0x83, 0x3f, 0x3e, 0x1e, 0xa5, 0x5d, 0xb8, 0xb7, 0xc8, 0xf5,
+	0x49, 0xc7, 0xac, 0x19, 0x2d, 0x58, 0xf3, 0x8c, 0x48, 0x5b, 0x47, 0x01,
+	0x9e, 0x8a, 0xb8, 0x3b, 0x01, 0x47, 0x5d, 0x3a, 0x2d, 0xaf, 0x74, 0x0f,
+	0x38, 0xda, 0x97, 0x70, 0x7a, 0xe4, 0xc6, 0x4a, 0x5a, 0x6e, 0xa8, 0x98,
+	0x75, 0xd2, 0xf9, 0x75, 0xd0, 0xa4, 0x37, 0x0d, 0x9d, 0x93, 0xf1, 0x82,
+	0xe0, 0x3c, 0xe4, 0xb7, 0x3a, 0xe2, 0x4a, 0xb4, 0x23, 0x19, 0x9f, 0x16,
+	0xf3, 0x7d, 0xb1, 0xfc, 0xe3, 0x60, 0x28, 0xe6, 0xca, 0x2b, 0x3e, 0xc7,
+	0xd5, 0x23, 0xd7, 0x97, 0xab, 0xfb, 0xe3, 0x5a, 0xe9, 0x13, 0x51, 0xae,
+	0x4d, 0x64, 0x2b, 0xe5, 0x28, 0xe3, 0xe7, 0x22, 0x79, 0x69, 0x7b, 0x2b,
+	0x7c, 0x37, 0x48, 0xeb, 0xb6, 0xb7, 0x82, 0x56, 0x62, 0xd0, 0xf3, 0x5b,
+	0x01, 0xd7, 0x56, 0xc6, 0xbb, 0x18, 0xe7, 0xe2, 0xf7, 0x5f, 0xa2, 0x5f,
+	0xd6, 0xfd, 0x5d, 0xbd, 0x66, 0x25, 0x8a, 0x73, 0x6e, 0xf8, 0x65, 0x79,
+	0x5d, 0xd3, 0x38, 0x14, 0x4d, 0x8b, 0x13, 0x7d, 0x5b, 0x5c, 0x56, 0xf8,
+	0xd5, 0xfd, 0x73, 0xed, 0x57, 0x14, 0x70, 0xe8, 0xee, 0xd8, 0xdc, 0x23,
+	0x7d, 0x18, 0x5f, 0xff, 0x92, 0xf1, 0xed, 0x17, 0xc6, 0x54, 0x2f, 0x16,
+	0x39, 0x86, 0xf9, 0x71, 0xa9, 0x3f, 0x32, 0xe3, 0x8a, 0x76, 0x2c, 0x1e,
+	0x8f, 0xae, 0xaf, 0x4e, 0x01, 0x96, 0xe7, 0xf4, 0x1e, 0x81, 0x20, 0xb8,
+	0xa6, 0xe3, 0x62, 0x90, 0x58, 0x97, 0xec, 0x9c, 0x9e, 0x5f, 0xd3, 0x19,
+	0x8a, 0xa4, 0x33, 0x1a, 0xff, 0xf8, 0x4e, 0xe4, 0xca, 0xdd, 0x98, 0x3b,
+	0x71, 0x73, 0x5d, 0xae, 0xe6, 0x8d, 0x9c, 0xdf, 0x6d, 0xd7, 0xad, 0x42,
+	0xbf, 0x29, 0x08, 0x94, 0xbf, 0x58, 0x56, 0x50, 0x47, 0x61, 0xec, 0xb2,
+	0xdb, 0xee, 0x4b, 0x49, 0x31, 0x6e, 0x38, 0xe4, 0xa6, 0xa3, 0x89, 0x42,
+	0xb9, 0x0b, 0xbf, 0x1b, 0xf0, 0xfe, 0x45, 0xd8, 0x28, 0x3d, 0xb0, 0x61,
+	0x24, 0xa6, 0x8c, 0x3c, 0x00, 0xfd, 0x76, 0xe4, 0x95, 0x22, 0x3f, 0x7a,
+	0x89, 0xe1, 0x72, 0x2c, 0x31, 0x5a, 0xde, 0xcb, 0xfa, 0x28, 0x7b, 0xa5,
+	0xf8, 0x1d, 0xfb, 0x62, 0x1f, 0xf4, 0x79, 0x7f, 0x96, 0x3e, 0x5c, 0xdb,
+	0x36, 0xdb, 0x0c, 0xf1, 0xe2, 0xd2, 0x0d, 0xc7, 0xbf, 0x6e, 0xeb, 0x8f,
+	0x70, 0x7c, 0x7b, 0x2d, 0xdc, 0x8b, 0xfb, 0x7d, 0x49, 0xdb, 0x2c, 0x8f,
+	0x55, 0x68, 0x27, 0x72, 0x4d, 0x27, 0x79, 0x62, 0x5c, 0x08, 0x47, 0x10,
+	0x5c, 0x48, 0x19, 0x7d, 0xfd, 0x74, 0x85, 0xeb, 0x1a, 0x41, 0xf0, 0x5d,
+	0xda, 0xc2, 0x83, 0x25, 0xf4, 0x17, 0xe2, 0x60, 0x63, 0xde, 0x85, 0x2c,
+	0x1c, 0xe9, 0x26, 0x7e, 0x05, 0x5e, 0x69, 0x87, 0xb7, 0x4b, 0xa2, 0x89,
+	0xdf, 0x2e, 0x37, 0x24, 0x3e, 0x51, 0xf6, 0x80, 0x67, 0x8e, 0x3b, 0x96,
+	0xd8, 0x63, 0xc7, 0xcc, 0xfd, 0x20, 0xaf, 0xbf, 0x4f, 0xe3, 0xa5, 0x05,
+	0x3e, 0x12, 0x61, 0x9a, 0x87, 0x85, 0xb0, 0x25, 0x2c, 0x6e, 0x82, 0xe0,
+	0xfb, 0x29, 0xf6, 0xd9, 0xcd, 0xfd, 0x00, 0x23, 0xe8, 0x37, 0xbf, 0x56,
+	0x11, 0x0f, 0xd1, 0xc4, 0x1d, 0xe8, 0xfb, 0xb7, 0xd1, 0xf7, 0xbe, 0x32,
+	0xfb, 0x83, 0x7c, 0xc0, 0xd8, 0x47, 0x2a, 0x21, 0xbc, 0xcb, 0xf5, 0x1d,
+	0xce, 0x79, 0xa7, 0xb5, 0xeb, 0xc2, 0x6f, 0x8d, 0x48, 0x4f, 0xc1, 0x97,
+	0xcb, 0x56, 0x66, 0xd6, 0xb8, 0xf2, 0x2e, 0xc8, 0xda, 0x40, 0x4e, 0x42,
+	0x86, 0xcd, 0x68, 0xba, 0xc9, 0xae, 0xe7, 0xff, 0x11, 0xf9, 0xe4, 0x0a,
+	0xc6, 0x94, 0x7b, 0x7d, 0xda, 0xab, 0xb3, 0xc1, 0x8c, 0x4f, 0x99, 0xbc,
+	0x4a, 0xc6, 0xbd, 0x7c, 0x27, 0xf4, 0x03, 0xd2, 0x1a, 0xe9, 0x63, 0x27,
+	0xb2, 0x91, 0x64, 0x62, 0x58, 0xb8, 0xc7, 0x89, 0xfb, 0x13, 0xb8, 0xef,
+	0x87, 0xf2, 0xc0, 0x85, 0x9c, 0xe3, 0x1c, 0x9a, 0xfe, 0x86, 0xcb, 0xf3,
+	0x65, 0x0f, 0x08, 0xd7, 0x09, 0x93, 0xf1, 0xbd, 0xda, 0x26, 0x01, 0xd5,
+	0x15, 0x59, 0x76, 0x33, 0x2c, 0x12, 0xbf, 0xaa, 0xbc, 0xde, 0x73, 0x05,
+	0x3e, 0x67, 0x1c, 0x21, 0x1a, 0xcd, 0x16, 0xe5, 0xb5, 0x48, 0xb7, 0xbc,
+	0x96, 0x4d, 0xd5, 0x4b, 0xaf, 0x96, 0xf9, 0xcc, 0xd3, 0xe9, 0xb3, 0x26,
+	0xdd, 0x85, 0x2e, 0xe1, 0x9c, 0xf4, 0x40, 0x46, 0x4f, 0x00, 0x6e, 0xe2,
+	0xb0, 0x87, 0x32, 0x89, 0xf3, 0xa7, 0x54, 0x3a, 0x16, 0xcd, 0x95, 0xa5,
+	0x2f, 0x57, 0xb4, 0xb1, 0x9e, 0x01, 0x8e, 0x7f, 0x95, 0xc5, 0x43, 0xa3,
+	0xb8, 0x80, 0xad, 0x2f, 0x92, 0x70, 0xe0, 0x2b, 0x43, 0xd7, 0x3f, 0xba,
+	0x4a, 0x1a, 0x89, 0x9b, 0x1e, 0xf0, 0x52, 0x0d, 0x74, 0xd1, 0xfd, 0xcd,
+	0x5c, 0x37, 0xd5, 0x36, 0x64, 0xec, 0x63, 0xbf, 0xac, 0xd2, 0x7f, 0x1b,
+	0x57, 0xe9, 0x51, 0x2b, 0x2f, 0xa3, 0x7d, 0x94, 0x97, 0x4f, 0x97, 0x08,
+	0x8f, 0x78, 0x11, 0x3f, 0xd1, 0xd7, 0x5b, 0x16, 0x15, 0x49, 0x7b, 0xd1,
+	0xde, 0xf2, 0x42, 0xfa, 0x7f, 0xba, 0xf2, 0x61, 0x6b, 0x0b, 0x56, 0xc7,
+	0x54, 0xab, 0xf3, 0xc8, 0x83, 0xcb, 0xe5, 0x11, 0x26, 0x89, 0xae, 0x48,
+	0x5f, 0xf8, 0x54, 0x7b, 0x47, 0xde, 0xab, 0x15, 0xe2, 0x39, 0x80, 0xdc,
+	0x06, 0xae, 0xcb, 0x5c, 0xaf, 0xde, 0x8f, 0x79, 0xfb, 0x3f, 0x41, 0x26,
+	0xc6, 0x7c, 0x4f, 0xea, 0xe0, 0xdb, 0xbe, 0x0c, 0xdd, 0xf9, 0x8a, 0x7f,
+	0xe1, 0x53, 0x9d, 0x1d, 0x41, 0xf0, 0xac, 0x9f, 0x4f, 0xb8, 0x90, 0x1f,
+	0x87, 0x2d, 0xbe, 0x87, 0x81, 0xef, 0x89, 0x39, 0x7c, 0x27, 0xe4, 0x62,
+	0xd7, 0xf7, 0x03, 0xae, 0xf5, 0x0d, 0x97, 0x6f, 0xbd, 0x55, 0xa5, 0x3f,
+	0xfe, 0xa1, 0x6c, 0x37, 0xfb, 0x1b, 0x91, 0xc3, 0x95, 0x9b, 0x88, 0xbf,
+	0x28, 0xc6, 0x7a, 0x4f, 0x9f, 0x6f, 0xfa, 0xed, 0x5b, 0xd0, 0x2f, 0xe9,
+	0xe5, 0x47, 0xac, 0x8b, 0x32, 0xd5, 0x75, 0x33, 0xa0, 0xcb, 0xbc, 0xad,
+	0x3b, 0x70, 0x85, 0xba, 0xde, 0x15, 0xea, 0x1e, 0x46, 0xdd, 0x3d, 0xb6,
+	0xee, 0x85, 0xcf, 0xbc, 0xb9, 0x7e, 0x07, 0xb8, 0xc7, 0x0e, 0x3e, 0x80,
+	0xb8, 0x11, 0xff, 0x36, 0xfc, 0xbe, 0x85, 0xed, 0x28, 0xda, 0xf7, 0x23,
+	0x95, 0x21, 0x19, 0xae, 0xec, 0xc4, 0x33, 0x88, 0xb4, 0x3e, 0x3c, 0xfb,
+	0xf0, 0x3b, 0x8d, 0x47, 0xa2, 0x6e, 0xfa, 0xc2, 0x5d, 0xc3, 0x7e, 0x88,
+	0x57, 0xae, 0xcd, 0xb3, 0x0f, 0xd8, 0x17, 0x5d, 0x3f, 0x41, 0x1f, 0x61,
+	0xfa, 0x07, 0x50, 0x67, 0x1a, 0x69, 0x2b, 0x69, 0x7b, 0x62, 0xae, 0xab,
+	0xeb, 0x54, 0xc3, 0x36, 0x1d, 0xce, 0x05, 0xf2, 0x0d, 0x8d, 0xf6, 0x16,
+	0x43, 0x18, 0xef, 0x44, 0x1b, 0xe3, 0x57, 0x29, 0xff, 0x1e, 0xc2, 0x15,
+	0x57, 0xfe, 0xc7, 0xf0, 0x7e, 0x2d, 0xd8, 0x9d, 0x62, 0x4c, 0x9e, 0xf3,
+	0x7e, 0xdd, 0xaa, 0xa5, 0x7b, 0x9f, 0x42, 0x1a, 0xe8, 0x84, 0x4e, 0x69,
+	0xb0, 0x74, 0x5a, 0x80, 0xe5, 0x43, 0x1a, 0xe5, 0x98, 0x17, 0x97, 0x4d,
+	0x76, 0xe6, 0xa5, 0x07, 0xba, 0x8c, 0xb2, 0xf6, 0xd3, 0xf5, 0x26, 0x0e,
+	0x03, 0xcb, 0xd1, 0xef, 0x04, 0x3d, 0x37, 0x88, 0x87, 0xfa, 0x03, 0x11,
+	0x0f, 0x34, 0x18, 0xd6, 0x4f, 0x7a, 0x03, 0x11, 0x8e, 0x19, 0x1c, 0x5f,
+	0xe6, 0xba, 0x34, 0x6d, 0x6b, 0xd6, 0x0f, 0x6d, 0x1c, 0xfe, 0x7b, 0x59,
+	0xc4, 0x67, 0x1a, 0xdb, 0x0b, 0xde, 0x55, 0xe3, 0x2f, 0x59, 0x63, 0xc1,
+	0x38, 0xf3, 0x49, 0x68, 0x30, 0xaf, 0x4f, 0xcb, 0xe9, 0xfc, 0x35, 0x35,
+	0xd2, 0xe0, 0xf5, 0xeb, 0xdf, 0x2c, 0xd3, 0xe0, 0x81, 0x4f, 0x17, 0x95,
+	0x61, 0x1a, 0xf3, 0x0a, 0x6b, 0x94, 0xde, 0xab, 0xa4, 0xf7, 0x28, 0xc9,
+	0x83, 0xa9, 0x64, 0x62, 0x48, 0x25, 0xbd, 0x71, 0xd9, 0x0f, 0xb9, 0x43,
+	0x39, 0x39, 0x73, 0x7f, 0x44, 0xb8, 0x9f, 0xef, 0x5d, 0x92, 0xf5, 0x29,
+	0x3f, 0x0b, 0x9f, 0x57, 0x94, 0x75, 0x95, 0x97, 0x1a, 0xcc, 0xd8, 0xb8,
+	0x0f, 0x01, 0x70, 0x36, 0xd1, 0x86, 0xbb, 0xb5, 0x81, 0x3c, 0x94, 0x50,
+	0x11, 0xd9, 0x45, 0x3f, 0x5f, 0x7d, 0xb1, 0x5e, 0xea, 0xa7, 0xd7, 0x78,
+	0x52, 0xd1, 0xe9, 0x66, 0x7f, 0x60, 0xb2, 0x73, 0x48, 0x89, 0x1e, 0x7b,
+	0x46, 0xbd, 0x91, 0xcc, 0x9e, 0xb5, 0xfa, 0x23, 0x90, 0xc7, 0xb4, 0xbe,
+	0x98, 0xf9, 0xbc, 0x2b, 0x17, 0x82, 0xb6, 0x4d, 0x17, 0xda, 0xb3, 0x5d,
+	0xb4, 0x73, 0x57, 0xd9, 0xfd, 0x95, 0x8c, 0x63, 0xbd, 0x4b, 0x9e, 0xf3,
+	0x0b, 0x18, 0xf7, 0x7e, 0xb9, 0xe0, 0xb3, 0xbf, 0x99, 0xcf, 0x79, 0xc2,
+	0x74, 0xc2, 0x6e, 0xfa, 0x13, 0xf5, 0xa7, 0x80, 0x87, 0x7d, 0x52, 0x07,
+	0x5f, 0xc9, 0xee, 0x4b, 0x0e, 0xe4, 0x45, 0xcf, 0x4b, 0x8f, 0xa0, 0xad,
+	0x15, 0x3e, 0xf8, 0x10, 0x76, 0x73, 0xcd, 0x91, 0xab, 0x21, 0x77, 0x1d,
+	0xbd, 0xc7, 0x02, 0x93, 0xe1, 0x4d, 0x61, 0xde, 0x33, 0x03, 0x2c, 0x57,
+	0x2f, 0xd3, 0x31, 0xf2, 0xba, 0xe6, 0x97, 0x4f, 0x65, 0xfd, 0x76, 0x4f,
+	0x39, 0xc3, 0x8c, 0x31, 0x00, 0xaf, 0xa4, 0xcd, 0x54, 0x6c, 0xbb, 0xcf,
+	0xb6, 0x58, 0xe6, 0x2a, 0xf9, 0xf6, 0xc0, 0x85, 0x7f, 0x78, 0xd6, 0xff,
+	0x7b, 0xc0, 0x91, 0x81, 0x4c, 0xe0, 0xf3, 0x6a, 0x90, 0x8f, 0x31, 0xa6,
+	0xf5, 0xbf, 0xeb, 0xad, 0x9d, 0xac, 0x79, 0x7f, 0x58, 0xef, 0x93, 0x79,
+	0xfe, 0x33, 0x59, 0xae, 0x77, 0xc0, 0x36, 0xc9, 0x69, 0xb9, 0x18, 0xfd,
+	0x69, 0x0e, 0xf0, 0x14, 0x2a, 0xb4, 0x43, 0xfe, 0x06, 0x76, 0x88, 0xd6,
+	0x93, 0xf2, 0xed, 0x41, 0xe6, 0xb1, 0xdd, 0xec, 0xd5, 0xae, 0xd6, 0x0b,
+	0x21, 0x2c, 0xc9, 0xce, 0x1c, 0xf2, 0x47, 0xb4, 0x1d, 0xef, 0xc9, 0xac,
+	0xe7, 0xea, 0x7d, 0x27, 0xf9, 0xc1, 0x20, 0x78, 0xc5, 0x77, 0xe5, 0xa4,
+	0x86, 0xf9, 0x05, 0xf4, 0xe1, 0xc8, 0xc4, 0x80, 0xfb, 0xd3, 0x93, 0x3e,
+	0xc7, 0xc7, 0x3c, 0xae, 0x2b, 0x6d, 0x8e, 0x1b, 0xf8, 0x68, 0x9b, 0x7e,
+	0x2f, 0x98, 0x8d, 0x71, 0xdd, 0x02, 0x3c, 0x5d, 0x6a, 0xf7, 0x6e, 0x90,
+	0xdb, 0xe6, 0x6c, 0x9a, 0x69, 0x31, 0x36, 0xa3, 0xd1, 0x69, 0x17, 0xfe,
+	0x61, 0xc4, 0xbf, 0xb0, 0xba, 0x80, 0xb9, 0x81, 0x0e, 0x5b, 0x0c, 0x4b,
+	0x8a, 0xb0, 0x0c, 0x6b, 0x58, 0x62, 0xc0, 0xa5, 0x0b, 0xd9, 0x77, 0x9b,
+	0x1c, 0x02, 0xde, 0x87, 0x06, 0x45, 0x9e, 0x85, 0x4d, 0x76, 0xbe, 0x0a,
+	0x9e, 0x19, 0xc0, 0x73, 0xde, 0xe7, 0x5e, 0x00, 0xe6, 0xf9, 0xde, 0xb0,
+	0x70, 0x2f, 0x00, 0x71, 0xd8, 0x81, 0xdf, 0x22, 0x33, 0xd0, 0xbf, 0x27,
+	0xfd, 0xd7, 0x82, 0xf1, 0x18, 0x75, 0x23, 0xda, 0x99, 0xdb, 0x1b, 0x14,
+	0xc8, 0xe7, 0x53, 0xd4, 0x43, 0xb5, 0xd2, 0xb6, 0x8e, 0x7e, 0x88, 0x91,
+	0x9f, 0x37, 0xf8, 0x19, 0xf4, 0xf5, 0x5b, 0x2b, 0xa5, 0x3e, 0x2f, 0xfd,
+	0x1d, 0x75, 0xc8, 0x73, 0x6d, 0xde, 0x80, 0xce, 0xeb, 0xef, 0x38, 0x8c,
+	0xfc, 0x8f, 0xaf, 0x64, 0xbc, 0xdb, 0xf5, 0xd7, 0x4b, 0xdb, 0x1a, 0xe6,
+	0x55, 0xf3, 0xe0, 0xab, 0xdc, 0x83, 0x69, 0x75, 0x38, 0x64, 0x59, 0x29,
+	0xef, 0x71, 0xa7, 0xdd, 0x21, 0xcc, 0xc5, 0x6e, 0x9f, 0xb2, 0xed, 0xbf,
+	0xa3, 0x6e, 0x4a, 0x6e, 0xf4, 0x07, 0x91, 0x37, 0x8d, 0xbc, 0xc3, 0x36,
+	0x6f, 0xd0, 0xe6, 0x6d, 0x43, 0xde, 0x3e, 0xe0, 0xef, 0x6e, 0x9d, 0x9e,
+	0xe5, 0x6f, 0x53, 0xc7, 0x5b, 0xd9, 0x71, 0xe1, 0x33, 0x37, 0xf8, 0x84,
+	0x0b, 0x79, 0x25, 0xae, 0x8b, 0xde, 0x26, 0x79, 0x2f, 0x79, 0x0b, 0x7b,
+	0xfd, 0x66, 0xb1, 0x0e, 0xb2, 0x89, 0x7b, 0x80, 0x69, 0xb3, 0x6e, 0xf1,
+	0x5e, 0x96, 0xff, 0x40, 0xba, 0xeb, 0xc9, 0x38, 0x5f, 0x5b, 0xc9, 0xb8,
+	0xd8, 0x88, 0x4f, 0xfb, 0x3a, 0x90, 0x9c, 0x5e, 0x3f, 0xa1, 0x7f, 0x5b,
+	0x44, 0x3a, 0xe9, 0x41, 0x35, 0x45, 0xf4, 0x3e, 0x2d, 0x7e, 0x47, 0x61,
+	0xf7, 0x06, 0x42, 0x9f, 0x8f, 0x36, 0x5d, 0xc6, 0xe3, 0xda, 0x50, 0x98,
+	0xe7, 0x21, 0x4f, 0x6d, 0x8a, 0xc0, 0x26, 0xaa, 0xf5, 0x1d, 0x1d, 0xa3,
+	0x2e, 0xe8, 0x75, 0x00, 0xc6, 0xdf, 0x32, 0xf0, 0x63, 0x02, 0xe9, 0x47,
+	0xdf, 0xa4, 0xfd, 0x68, 0x87, 0xef, 0x1d, 0x10, 0xee, 0x67, 0x65, 0x7a,
+	0xbb, 0xf7, 0x6d, 0x99, 0xa7, 0xf3, 0x19, 0x49, 0x66, 0x94, 0x03, 0xff,
+	0x75, 0xab, 0x23, 0xf5, 0xb0, 0x3d, 0x6e, 0x30, 0xfa, 0xcd, 0xe3, 0x9e,
+	0xc4, 0x8b, 0xda, 0x56, 0x6b, 0xb4, 0xf3, 0x91, 0x05, 0x6e, 0xb8, 0x1f,
+	0x7c, 0xe0, 0x9e, 0xdd, 0x7e, 0x21, 0x09, 0x6a, 0xd4, 0xba, 0x71, 0x18,
+	0xb4, 0x91, 0x4d, 0x19, 0xdd, 0x78, 0xc3, 0x9c, 0x6e, 0xfc, 0xf3, 0x95,
+	0xe4, 0x89, 0xe1, 0x72, 0x1c, 0x75, 0xf5, 0x3a, 0x4a, 0x82, 0x75, 0x6b,
+	0x31, 0x9f, 0xe7, 0xfd, 0xec, 0x35, 0xa0, 0x2f, 0xc8, 0xe1, 0x64, 0xe7,
+	0x29, 0xd4, 0x2d, 0xa0, 0xee, 0xe4, 0x5c, 0x5d, 0x47, 0x46, 0x7c, 0xbd,
+	0xef, 0x59, 0x26, 0xcb, 0x21, 0x1d, 0x26, 0xe3, 0xb7, 0x6a, 0x5e, 0xe0,
+	0x7e, 0x30, 0x37, 0x71, 0x9f, 0x6c, 0xd6, 0xb4, 0xdd, 0x27, 0xdc, 0x27,
+	0xc5, 0xb6, 0xef, 0x0b, 0xda, 0xd6, 0x10, 0xbe, 0x12, 0xde, 0xa4, 0x8d,
+	0x31, 0xbc, 0xc3, 0xf9, 0x37, 0xf3, 0x3e, 0xe4, 0x10, 0xdf, 0xbf, 0x1f,
+	0xe4, 0x07, 0x39, 0x2f, 0xfc, 0x9e, 0xa7, 0xb9, 0x11, 0xd0, 0x5c, 0xc4,
+	0x7f, 0xbb, 0x0c, 0xeb, 0x3d, 0x10, 0x29, 0x99, 0xd0, 0xf1, 0xcc, 0x0b,
+	0xc1, 0x23, 0x0b, 0xe4, 0xf8, 0x47, 0x94, 0xa1, 0x21, 0xfe, 0x2e, 0x24,
+	0xea, 0x64, 0x66, 0x4d, 0x9d, 0xde, 0xf1, 0x41, 0x7c, 0x8c, 0xde, 0x73,
+	0x3b, 0xf8, 0xf5, 0xfa, 0xb9, 0xb1, 0x00, 0xdf, 0xc0, 0xe3, 0x4e, 0x63,
+	0xeb, 0x63, 0x1c, 0x19, 0xed, 0xbf, 0x67, 0x8b, 0x4a, 0xef, 0x0b, 0xa2,
+	0x8e, 0x3f, 0x00, 0x9d, 0x6a, 0xf6, 0xa4, 0xe0, 0x5d, 0xe1, 0xbc, 0x29,
+	0xed, 0x73, 0x1c, 0x04, 0x0f, 0x1f, 0xf4, 0xb3, 0x6b, 0x6a, 0x75, 0xdb,
+	0x49, 0xef, 0x7a, 0x6d, 0x13, 0x6e, 0x94, 0x99, 0x14, 0xdb, 0x23, 0x5e,
+	0xfe, 0x47, 0x30, 0xe4, 0xcd, 0xa0, 0x7f, 0x43, 0xff, 0x59, 0x5f, 0xb5,
+	0xd4, 0x49, 0xf5, 0x3e, 0x53, 0xe2, 0xc9, 0x85, 0xbd, 0xd2, 0x01, 0xfc,
+	0x18, 0x78, 0x73, 0xe5, 0xb7, 0x49, 0x21, 0xe6, 0xda, 0xb1, 0x45, 0xb4,
+	0x2f, 0x37, 0x91, 0xaa, 0x83, 0x2d, 0xf8, 0x17, 0xc1, 0xe4, 0x82, 0x31,
+	0x1e, 0xac, 0x1a, 0xe3, 0x4c, 0x02, 0xd8, 0x68, 0x89, 0xcc, 0xc9, 0x01,
+	0xf6, 0x65, 0x64, 0x52, 0x38, 0xc6, 0x3a, 0x8c, 0x71, 0xc7, 0xdc, 0x18,
+	0x0f, 0x2f, 0x1a, 0xe3, 0x61, 0x8c, 0x11, 0xf6, 0x42, 0x29, 0xd3, 0xe9,
+	0xce, 0xcf, 0xfb, 0xd5, 0x35, 0x73, 0xf3, 0x29, 0xdc, 0xeb, 0x84, 0xf1,
+	0xd3, 0xa6, 0xd8, 0x08, 0x78, 0x74, 0x5b, 0x90, 0x71, 0x0e, 0x64, 0x5b,
+	0x76, 0x4d, 0x8d, 0x1d, 0xff, 0x76, 0x96, 0x2b, 0x1b, 0x1c, 0x9c, 0x4c,
+	0xb9, 0x9d, 0x8f, 0xa0, 0xbf, 0xbd, 0x76, 0x5c, 0xbd, 0xe5, 0xab, 0x31,
+	0xae, 0x0b, 0xdf, 0xc1, 0x18, 0xe0, 0xb3, 0x9d, 0xa0, 0x0f, 0x9c, 0x18,
+	0x92, 0x05, 0xb2, 0xeb, 0x33, 0xf3, 0x72, 0xd4, 0xc0, 0x4c, 0xdb, 0xba,
+	0x30, 0x07, 0xf3, 0xdd, 0x8b, 0x60, 0xbe, 0x1b, 0x30, 0xef, 0xb3, 0xf3,
+	0xb2, 0xaf, 0x6a, 0xcf, 0x62, 0x48, 0x47, 0xfc, 0xfd, 0xbc, 0xf5, 0x45,
+	0x3e, 0x20, 0xf7, 0x97, 0x3a, 0xe5, 0xcb, 0x95, 0xe4, 0x59, 0xc6, 0xd1,
+	0xcf, 0x55, 0x92, 0xe3, 0x22, 0x5d, 0xf2, 0xc7, 0xb0, 0x73, 0xae, 0x82,
+	0x6f, 0xf1, 0x34, 0xfc, 0xd7, 0x3f, 0xa9, 0xf8, 0xf2, 0xc4, 0xdc, 0x7e,
+	0x38, 0xea, 0xba, 0xb4, 0x9c, 0x84, 0x4f, 0xbb, 0xed, 0x68, 0x1b, 0xf7,
+	0x2a, 0x11, 0xbe, 0xbb, 0xa8, 0x73, 0xda, 0x94, 0xe6, 0xc5, 0xef, 0x62,
+	0xbc, 0xa7, 0xa9, 0x6b, 0xd6, 0xfa, 0xbe, 0x77, 0xb3, 0x5a, 0x47, 0x99,
+	0x90, 0xff, 0x5a, 0xe4, 0x03, 0xf5, 0x26, 0xc6, 0x92, 0xf1, 0x9a, 0xc9,
+	0x1b, 0xdb, 0x3a, 0x12, 0x7d, 0x42, 0x5b, 0x82, 0xfe, 0x36, 0x6c, 0xa1,
+	0xd2, 0xe6, 0xf8, 0x5a, 0xa1, 0x4c, 0xa2, 0x5d, 0x94, 0x96, 0x09, 0xc0,
+	0x3e, 0x06, 0x89, 0x50, 0x68, 0xf6, 0x47, 0x7b, 0xd5, 0x44, 0x03, 0x79,
+	0x70, 0xdb, 0x19, 0xd0, 0xd7, 0x36, 0x8c, 0xa9, 0x2b, 0x79, 0x76, 0x46,
+	0x65, 0x4e, 0xac, 0x95, 0x57, 0x82, 0xa1, 0x66, 0x47, 0x9e, 0xd8, 0xc4,
+	0x3c, 0x2d, 0xb7, 0x3f, 0xd5, 0x0b, 0xf9, 0xd4, 0xce, 0x73, 0x0b, 0x03,
+	0xf2, 0x2f, 0x77, 0x80, 0x06, 0x7f, 0xb8, 0xe9, 0x6b, 0xc1, 0x6c, 0xb3,
+	0x2b, 0x5b, 0x37, 0x25, 0xbd, 0xbc, 0xc2, 0x78, 0x4a, 0x18, 0x4f, 0x09,
+	0xe3, 0xe3, 0x98, 0x4b, 0x18, 0xd7, 0x15, 0xf7, 0x4a, 0xf5, 0x2c, 0x88,
+	0xcb, 0x1a, 0x3f, 0x2d, 0x93, 0x77, 0x65, 0x83, 0xdd, 0x2b, 0x35, 0x5c,
+	0x1f, 0xae, 0xb1, 0x65, 0x64, 0x3c, 0x28, 0xf8, 0x7f, 0x70, 0x55, 0xb6,
+	0x2b, 0xb6, 0x58, 0xe7, 0xdc, 0x35, 0xaf, 0x73, 0x44, 0x9e, 0x33, 0xf3,
+	0x86, 0x39, 0xf3, 0xbd, 0x49, 0x6e, 0x86, 0x87, 0x0e, 0xdc, 0xaa, 0xf7,
+	0x3c, 0x77, 0xe0, 0x9b, 0x36, 0xd5, 0xa7, 0xf5, 0x3a, 0xe2, 0x4c, 0xf9,
+	0x1e, 0x3b, 0x77, 0xf7, 0x68, 0x3d, 0xbb, 0x75, 0xd3, 0xa5, 0x80, 0xfb,
+	0xdc, 0xbc, 0x4d, 0xcb, 0xc5, 0x22, 0x68, 0xef, 0xd5, 0x69, 0xde, 0xe7,
+	0x7a, 0x76, 0x41, 0x9f, 0x13, 0x01, 0xde, 0xe6, 0xe2, 0x62, 0xf5, 0x48,
+	0xa3, 0xbe, 0xf8, 0x69, 0x83, 0x59, 0x47, 0xa5, 0x6c, 0x58, 0x83, 0x34,
+	0xd7, 0xec, 0x05, 0x5e, 0x90, 0xf7, 0xdf, 0xea, 0xcd, 0x9e, 0x8f, 0xea,
+	0xb2, 0xe0, 0x31, 0xbd, 0x2f, 0x84, 0xfb, 0x03, 0x7f, 0x79, 0xa5, 0xb1,
+	0x4d, 0xc3, 0x7c, 0xa6, 0xff, 0x38, 0x98, 0xd0, 0x31, 0x36, 0xf6, 0xf5,
+	0x43, 0xfc, 0x5e, 0xbc, 0x5f, 0x24, 0xb4, 0x5d, 0xeb, 0x40, 0xf7, 0xda,
+	0x5f, 0x16, 0x94, 0x89, 0xe7, 0x25, 0x22, 0x13, 0x55, 0x30, 0x4e, 0x10,
+	0xee, 0x52, 0xd7, 0xaa, 0xf9, 0xd8, 0xdd, 0x6a, 0xa4, 0x11, 0xc6, 0x75,
+	0x8b, 0xf2, 0xc8, 0x1b, 0xad, 0x2b, 0x49, 0x37, 0xd3, 0xc2, 0xb4, 0xf9,
+	0x31, 0xcd, 0x68, 0xfb, 0xb9, 0x6d, 0x95, 0xde, 0xfb, 0xc4, 0x35, 0x46,
+	0xc6, 0x08, 0x63, 0x26, 0xdf, 0xf5, 0xff, 0x56, 0xd7, 0x19, 0x9a, 0xab,
+	0xa3, 0xe7, 0x02, 0xf9, 0x6e, 0x55, 0x5e, 0x35, 0xdc, 0xd4, 0x5f, 0x43,
+	0x9d, 0x75, 0xd0, 0x89, 0x17, 0x53, 0xab, 0xc3, 0xbd, 0xf0, 0xb0, 0x21,
+	0xb2, 0xd7, 0xd4, 0x5a, 0x99, 0x3f, 0x81, 0x79, 0x7d, 0x26, 0x65, 0x78,
+	0x51, 0xf3, 0x61, 0xf1, 0x36, 0xf8, 0xeb, 0xa1, 0xde, 0xa0, 0x9c, 0x26,
+	0x6f, 0x22, 0xad, 0x42, 0x9f, 0xe0, 0xc2, 0xea, 0x99, 0xae, 0x57, 0x03,
+	0xee, 0xb3, 0x7c, 0x45, 0xdb, 0x51, 0x43, 0xb2, 0xb0, 0xed, 0xd1, 0x7b,
+	0x5e, 0xbf, 0xed, 0xa1, 0x65, 0xda, 0x1e, 0xb2, 0x6d, 0x8b, 0x6b, 0xda,
+	0x8e, 0x5e, 0xa1, 0xed, 0x81, 0x37, 0x68, 0x7b, 0x70, 0x99, 0xb6, 0x07,
+	0xc3, 0xb6, 0x95, 0x69, 0xdb, 0x0b, 0xdb, 0x4e, 0x2c, 0xc2, 0xc9, 0x67,
+	0x5e, 0xbf, 0xed, 0x7d, 0xcb, 0xb4, 0xbd, 0x6f, 0x11, 0xdc, 0xc4, 0x49,
+	0x2d, 0x74, 0xff, 0x3d, 0xda, 0xe6, 0xac, 0x03, 0xdf, 0x5c, 0x84, 0xfc,
+	0x36, 0xfe, 0xc8, 0x85, 0xbb, 0x66, 0xcb, 0xe0, 0x2b, 0xf8, 0xd7, 0x99,
+	0x72, 0x03, 0x9e, 0x71, 0xd8, 0x33, 0x28, 0x07, 0x7b, 0xbc, 0x26, 0x1d,
+	0xc8, 0xc9, 0x6e, 0x96, 0xcd, 0xc7, 0x6b, 0xe7, 0xf4, 0xc6, 0x3d, 0xe8,
+	0x8f, 0x6d, 0xfb, 0x5e, 0xbf, 0xbc, 0xa6, 0xfb, 0xcb, 0x95, 0xe9, 0x8f,
+	0x21, 0xbd, 0x42, 0x1f, 0x97, 0xf5, 0x42, 0x19, 0x58, 0x67, 0xd7, 0x3e,
+	0x68, 0x6b, 0x32, 0x0e, 0xa7, 0xed, 0x51, 0x29, 0x94, 0x7f, 0x12, 0x4c,
+	0x83, 0x2e, 0x46, 0xe6, 0x74, 0xc8, 0x93, 0xab, 0x68, 0xb3, 0x8f, 0x53,
+	0xb3, 0x54, 0xc5, 0xa0, 0x46, 0x7c, 0xa6, 0xfd, 0x98, 0x6d, 0xc2, 0x0e,
+	0x0c, 0xcb, 0x32, 0x6e, 0x6c, 0x62, 0x4e, 0x67, 0x21, 0x33, 0xcd, 0x9e,
+	0x0e, 0xfa, 0x2a, 0x4f, 0x81, 0x97, 0xf7, 0x43, 0x76, 0x24, 0xf3, 0x22,
+	0x3d, 0x8d, 0xe6, 0xac, 0x45, 0x4c, 0x72, 0x5d, 0xbf, 0x69, 0xf1, 0xb8,
+	0xef, 0xce, 0xe5, 0xcf, 0x59, 0x40, 0x3e, 0x38, 0x94, 0x91, 0xd7, 0x37,
+	0x9a, 0x75, 0xbb, 0xb7, 0x36, 0x32, 0x1e, 0xa3, 0x36, 0x75, 0xaf, 0xd6,
+	0xf2, 0xc7, 0x09, 0xbf, 0xbf, 0xb2, 0xe8, 0x3b, 0xac, 0xf7, 0x93, 0xd5,
+	0x0b, 0xeb, 0x85, 0xe9, 0x70, 0x4d, 0x16, 0xa4, 0x1f, 0x58, 0xb3, 0xb0,
+	0x7e, 0xac, 0x69, 0xe1, 0xf7, 0xe0, 0xa2, 0xef, 0xcf, 0x2c, 0xfa, 0x7e,
+	0x61, 0xd1, 0xf7, 0x75, 0x6b, 0x17, 0x95, 0x5f, 0xf4, 0xfd, 0xe5, 0xb5,
+	0xcb, 0xc3, 0xfb, 0x57, 0x6b, 0x17, 0xc2, 0xf5, 0x94, 0x5e, 0x73, 0x1d,
+	0xaf, 0xb8, 0xb2, 0xbd, 0x88, 0x7c, 0xe7, 0xd6, 0x18, 0xf2, 0xe1, 0xcb,
+	0x54, 0xe7, 0x73, 0x8d, 0xe3, 0x1d, 0xb1, 0x85, 0xed, 0xcd, 0xd7, 0xdb,
+	0x31, 0x5f, 0x2f, 0x35, 0x5f, 0xcf, 0xf8, 0x23, 0x13, 0x15, 0xe6, 0x31,
+	0x3d, 0x6c, 0xd7, 0xd4, 0x1d, 0x29, 0x79, 0xfa, 0x3c, 0xc2, 0x80, 0x3e,
+	0x8f, 0x90, 0x80, 0x6f, 0xf4, 0x94, 0x8e, 0xeb, 0xaf, 0x51, 0x48, 0xaf,
+	0x34, 0xea, 0xd8, 0xbe, 0xe8, 0x33, 0x09, 0x03, 0xb0, 0xb9, 0x78, 0x0e,
+	0x21, 0x90, 0x9d, 0x29, 0xf3, 0x36, 0xe7, 0x12, 0x0e, 0x07, 0xbd, 0x5e,
+	0x10, 0x0c, 0xfb, 0x67, 0xad, 0x2c, 0xc7, 0xbb, 0x62, 0xea, 0xd0, 0xd7,
+	0x7c, 0x14, 0xfa, 0x66, 0xde, 0xc7, 0x7c, 0x8a, 0xf6, 0x3a, 0x68, 0xa6,
+	0x1b, 0x7a, 0x37, 0xf9, 0xa4, 0x68, 0xdd, 0xd1, 0x05, 0x9d, 0xeb, 0xdd,
+	0xfb, 0x3e, 0xd8, 0x3a, 0x5f, 0x06, 0xad, 0x1f, 0x4b, 0xf5, 0x68, 0xff,
+	0xff, 0x1c, 0x74, 0x31, 0xe3, 0x84, 0x8f, 0x69, 0xda, 0x22, 0x8d, 0x35,
+	0xe8, 0xb3, 0x50, 0x27, 0x53, 0x4e, 0x34, 0xdb, 0x75, 0xde, 0xc4, 0xcd,
+	0x53, 0xed, 0xde, 0x73, 0xe0, 0xb5, 0x7e, 0x7f, 0x03, 0x6c, 0x66, 0xd1,
+	0x3a, 0xbf, 0x50, 0x5a, 0x6f, 0x6d, 0x83, 0x66, 0x19, 0x77, 0xb9, 0x56,
+	0x93, 0xec, 0x19, 0x32, 0x3e, 0x66, 0x3c, 0xa1, 0x18, 0x23, 0xe6, 0xfa,
+	0x05, 0xcf, 0x39, 0x70, 0x9d, 0x9b, 0xf1, 0x90, 0xf1, 0x7b, 0x47, 0xfc,
+	0xbc, 0x17, 0xb1, 0x67, 0x23, 0xb2, 0x45, 0x43, 0x9b, 0x7b, 0xb4, 0xad,
+	0x1a, 0x05, 0x3f, 0x7d, 0x0f, 0x74, 0xcf, 0xba, 0xa4, 0xfd, 0xef, 0x04,
+	0x93, 0xae, 0x89, 0x4f, 0x29, 0xd4, 0xcb, 0x6a, 0x5c, 0x3d, 0x25, 0x07,
+	0x4a, 0xe4, 0xff, 0xa8, 0x96, 0xe5, 0xbb, 0x53, 0x94, 0x07, 0x51, 0xe0,
+	0x71, 0x0a, 0xf8, 0x6b, 0x90, 0xdd, 0x5d, 0x45, 0x94, 0x89, 0xc8, 0xd0,
+	0x40, 0x03, 0x78, 0x8f, 0x76, 0x09, 0xdf, 0x2e, 0xca, 0x7b, 0x32, 0x55,
+	0x1c, 0xd7, 0x7b, 0x9e, 0x1f, 0x43, 0xdd, 0xc7, 0xf1, 0x4c, 0x14, 0xcb,
+	0xa8, 0xf3, 0xb0, 0x2e, 0x3f, 0x31, 0xca, 0x73, 0x22, 0x02, 0x7b, 0xff,
+	0x49, 0x29, 0x4c, 0xb6, 0xc1, 0x2f, 0x99, 0x1e, 0x77, 0xe7, 0xe2, 0xe4,
+	0xff, 0xa5, 0x91, 0xeb, 0xcc, 0x85, 0xeb, 0xb8, 0x27, 0x47, 0xdc, 0x81,
+	0xcd, 0xaa, 0xb3, 0x49, 0xaf, 0xf9, 0xf4, 0x48, 0x3f, 0x6c, 0x8a, 0x9b,
+	0x2b, 0xcf, 0xc4, 0xcc, 0xda, 0xc0, 0x82, 0xf5, 0x86, 0xc3, 0xc4, 0x8a,
+	0x3a, 0xea, 0xf2, 0xdc, 0xa7, 0x4c, 0x9c, 0x81, 0xf6, 0x39, 0x1a, 0xae,
+	0xe7, 0x30, 0xcd, 0x93, 0xb6, 0xeb, 0x00, 0xd7, 0x99, 0x7f, 0xd2, 0xf2,
+	0xf5, 0x89, 0x4d, 0x61, 0x5f, 0xf9, 0x60, 0x6c, 0x53, 0x5e, 0x3e, 0x81,
+	0x27, 0x77, 0x5d, 0x72, 0x34, 0xab, 0xd8, 0xef, 0x37, 0x02, 0xc6, 0x02,
+	0x54, 0xba, 0x55, 0xf2, 0x4d, 0xd5, 0xfd, 0x33, 0xad, 0xc3, 0x2b, 0xa8,
+	0xd7, 0x83, 0x63, 0x26, 0x11, 0x03, 0x0e, 0xf2, 0x6f, 0x08, 0xcf, 0x16,
+	0xcf, 0x57, 0xcb, 0xc1, 0x73, 0xc2, 0xae, 0xd7, 0x70, 0x0d, 0x66, 0x05,
+	0xf0, 0xd2, 0x80, 0xf4, 0x09, 0x19, 0x39, 0xfe, 0x3b, 0x31, 0xee, 0x17,
+	0xaa, 0xd1, 0x7e, 0xf5, 0x7d, 0xf5, 0x26, 0x06, 0xf2, 0x2c, 0xca, 0x30,
+	0x7f, 0x1c, 0x75, 0x92, 0xf9, 0x6c, 0x64, 0xad, 0x0c, 0xe9, 0x7e, 0x83,
+	0x48, 0xdb, 0xb6, 0x7a, 0xbd, 0x4f, 0x5f, 0xce, 0x30, 0x6e, 0x11, 0xd6,
+	0x7d, 0x56, 0xef, 0x83, 0x73, 0xd3, 0xc9, 0x7c, 0x5f, 0x84, 0xf2, 0xa9,
+	0x53, 0x7a, 0xb9, 0xce, 0x73, 0x66, 0x5c, 0xd3, 0x76, 0xfb, 0x26, 0x9e,
+	0x07, 0xdd, 0x02, 0xfb, 0xef, 0x3b, 0x80, 0x89, 0x30, 0x9e, 0x40, 0x3a,
+	0x7c, 0xc2, 0xd7, 0x85, 0x61, 0xfa, 0x4d, 0xc2, 0x30, 0xfd, 0x26, 0x61,
+	0x20, 0x2e, 0x00, 0x47, 0xa5, 0x7d, 0x75, 0x68, 0x53, 0x5c, 0x85, 0x71,
+	0x1c, 0x2c, 0x4d, 0xc3, 0xbf, 0xd5, 0x31, 0x94, 0xce, 0x69, 0x45, 0x9e,
+	0xf7, 0xc0, 0x73, 0xe0, 0xad, 0x12, 0x78, 0x0f, 0xb6, 0xe1, 0x97, 0x61,
+	0x1b, 0x3e, 0x01, 0xdb, 0xf0, 0x1c, 0x6c, 0xc3, 0xc7, 0x31, 0x37, 0x8f,
+	0x2d, 0xe0, 0xd5, 0x8c, 0xe6, 0xd5, 0x42, 0xe9, 0x02, 0x78, 0xb5, 0xeb,
+	0x0a, 0xfc, 0xe8, 0xc2, 0xc6, 0xa7, 0x0d, 0xed, 0xc0, 0x96, 0xff, 0xb8,
+	0xf6, 0x8b, 0x1f, 0x4c, 0x8d, 0xb1, 0x0e, 0x68, 0x38, 0x49, 0x9f, 0x16,
+	0xf2, 0x3f, 0x99, 0x07, 0xef, 0x61, 0xac, 0x8e, 0xa3, 0xae, 0x5b, 0x23,
+	0xd4, 0x1f, 0xee, 0x36, 0xee, 0xef, 0xe6, 0x58, 0x13, 0x8b, 0xf0, 0x64,
+	0xf8, 0x73, 0x8f, 0x4f, 0x3d, 0x42, 0xbe, 0x4c, 0x7c, 0x76, 0xc4, 0xaf,
+	0xe6, 0xc5, 0x1d, 0x1c, 0x5f, 0xe0, 0x6d, 0x5a, 0xae, 0xee, 0x7c, 0xf9,
+	0x35, 0x73, 0xe5, 0x75, 0xff, 0xa3, 0xe4, 0x37, 0xe8, 0x6e, 0xe2, 0x3e,
+	0x91, 0x8d, 0x6c, 0xb0, 0xb8, 0xdf, 0x2f, 0x6d, 0xdb, 0x60, 0xaf, 0x0f,
+	0x82, 0x7e, 0xa7, 0x02, 0xf1, 0xb7, 0x85, 0x6d, 0xce, 0xb7, 0xe3, 0xd9,
+	0x76, 0x76, 0xc3, 0x96, 0xed, 0xdb, 0xc4, 0xb5, 0x5e, 0xd8, 0xf2, 0xa9,
+	0x70, 0x3e, 0x60, 0xf9, 0xea, 0x39, 0xa7, 0x0c, 0xa5, 0xec, 0x6c, 0xb0,
+	0xf1, 0x7e, 0xb6, 0x77, 0x61, 0xd1, 0x3c, 0x5d, 0x0a, 0x78, 0xce, 0x76,
+	0xc4, 0x1f, 0xab, 0xa2, 0x95, 0xbf, 0xb2, 0xb4, 0xa2, 0x16, 0x8d, 0xe3,
+	0x9c, 0xa5, 0x95, 0x10, 0xde, 0x58, 0x48, 0x2b, 0x75, 0x21, 0xad, 0xe4,
+	0xc7, 0x43, 0x5a, 0x61, 0xdd, 0x73, 0x21, 0xad, 0x24, 0xaa, 0x69, 0x25,
+	0x3f, 0xee, 0xe0, 0x59, 0x0c, 0x07, 0xe9, 0x85, 0xed, 0x90, 0x5e, 0x00,
+	0x4b, 0xa5, 0x32, 0x47, 0x2f, 0x31, 0xb4, 0x73, 0xa8, 0xa4, 0x34, 0xad,
+	0x0c, 0xa9, 0x50, 0x47, 0x78, 0x98, 0x73, 0xcc, 0xfd, 0x15, 0x69, 0x24,
+	0x65, 0x69, 0x64, 0xfe, 0x2c, 0xd1, 0x22, 0xda, 0x00, 0xee, 0x79, 0x5e,
+	0x60, 0xb3, 0xa6, 0x8d, 0xfb, 0x53, 0x2f, 0xa0, 0xec, 0x28, 0x68, 0x23,
+	0xc4, 0xc1, 0x03, 0x16, 0x07, 0x8b, 0xe7, 0xf2, 0xb4, 0xc5, 0xc1, 0xa8,
+	0xc5, 0x81, 0xe6, 0x97, 0x3c, 0xe7, 0x4c, 0x69, 0x1c, 0xd4, 0x69, 0x1c,
+	0x88, 0x0a, 0xeb, 0x9e, 0x5e, 0x06, 0x07, 0x2c, 0x33, 0xaa, 0xc7, 0x1f,
+	0xc1, 0xf8, 0xf7, 0x61, 0xfc, 0x4a, 0x8f, 0x9f, 0xf3, 0xc0, 0xf1, 0x03,
+	0x96, 0xca, 0x77, 0xe6, 0xc6, 0xdf, 0x84, 0x36, 0x0e, 0x6a, 0xdb, 0x99,
+	0xf1, 0x54, 0xea, 0x46, 0x33, 0xfe, 0xc7, 0x2a, 0xe6, 0x8c, 0xc9, 0x63,
+	0x4b, 0xf4, 0xd8, 0x0b, 0x96, 0x37, 0x7c, 0xbd, 0xce, 0xc6, 0x73, 0x6d,
+	0xe7, 0xa0, 0xbb, 0xc6, 0x52, 0x09, 0x7b, 0xe6, 0xd4, 0xd8, 0x43, 0x5f,
+	0x4d, 0x91, 0x77, 0x3e, 0xaa, 0xf7, 0xfa, 0x9d, 0xa5, 0x5d, 0x54, 0x6a,
+	0x92, 0xbe, 0xb1, 0x6a, 0xb8, 0x09, 0x6f, 0x3e, 0x50, 0x3e, 0x63, 0x37,
+	0xfb, 0xa1, 0x3b, 0x4c, 0xdc, 0x1a, 0xb4, 0x84, 0xf4, 0x64, 0xbe, 0x37,
+	0x52, 0x27, 0xea, 0x81, 0x0f, 0x60, 0xcc, 0x2e, 0x7c, 0xcc, 0x76, 0x6f,
+	0x9b, 0xa2, 0xae, 0xbb, 0xba, 0x4a, 0xd7, 0x35, 0x5b, 0x5d, 0xb7, 0x86,
+	0xba, 0x0e, 0x70, 0x3f, 0x25, 0x87, 0x4b, 0x9c, 0xbf, 0x7c, 0xa2, 0x4e,
+	0xc7, 0x40, 0x1d, 0x1b, 0xe7, 0x4b, 0xc6, 0x0f, 0x6b, 0x5a, 0xa6, 0xce,
+	0x4a, 0xea, 0xb8, 0xe4, 0x4c, 0xd7, 0x3f, 0xd9, 0x75, 0x10, 0xea, 0xb5,
+	0xef, 0x07, 0x7f, 0xb0, 0x8c, 0x5e, 0x83, 0xfe, 0xd1, 0xf6, 0x59, 0x0d,
+	0x64, 0xad, 0x9c, 0x6a, 0xc6, 0xb3, 0x9a, 0xe7, 0xc1, 0x3a, 0x3b, 0x54,
+	0xbd, 0xd4, 0x9c, 0x6a, 0x94, 0x3d, 0x63, 0x7a, 0xdd, 0x5c, 0xd4, 0x29,
+	0xe0, 0xff, 0x14, 0xcf, 0x14, 0x88, 0x3e, 0x03, 0x95, 0x1b, 0x85, 0x3f,
+	0x33, 0xf1, 0x94, 0xd9, 0x1b, 0x38, 0x56, 0xa3, 0x7f, 0xd3, 0xc6, 0x28,
+	0xa4, 0x32, 0xfa, 0xec, 0xd0, 0x1e, 0xb4, 0xd9, 0xbe, 0xa9, 0x16, 0x63,
+	0x8e, 0xa1, 0x2e, 0xf7, 0x16, 0xaa, 0x36, 0x57, 0x6a, 0xc5, 0x9d, 0x88,
+	0xea, 0xf3, 0x4b, 0x3c, 0x7f, 0x9f, 0xed, 0x69, 0x42, 0x5e, 0x44, 0xaf,
+	0x15, 0xd4, 0x9c, 0x9a, 0x3f, 0xa7, 0xae, 0x8e, 0x8a, 0x5d, 0xc3, 0x4f,
+	0x6b, 0xbd, 0x12, 0x39, 0x4a, 0x9d, 0xc3, 0xfd, 0x55, 0x3d, 0x98, 0xf7,
+	0xe5, 0xf4, 0x8d, 0x31, 0x62, 0xb3, 0x98, 0x3f, 0x75, 0x86, 0x67, 0x8d,
+	0x5b, 0xf1, 0x0e, 0xdb, 0x0b, 0xf5, 0x08, 0x74, 0xdf, 0xdb, 0x3f, 0xe1,
+	0x49, 0x3d, 0xf0, 0x3d, 0xa1, 0x80, 0x6b, 0x57, 0xd3, 0x42, 0x5e, 0x85,
+	0xb1, 0x69, 0x43, 0x0f, 0x8f, 0xbf, 0x21, 0x3f, 0x90, 0x26, 0x3a, 0x6d,
+	0x6c, 0xc1, 0xb7, 0x31, 0x7e, 0xd2, 0xb6, 0xa1, 0x87, 0x47, 0x53, 0x19,
+	0xc5, 0xbd, 0x51, 0x66, 0x1d, 0x94, 0xb4, 0x41, 0x9a, 0x4f, 0xe8, 0xf5,
+	0xd1, 0x8c, 0xbc, 0x2c, 0x99, 0xa6, 0x76, 0xd8, 0x5d, 0xff, 0xb6, 0x73,
+	0x6c, 0xee, 0x2e, 0xd0, 0x34, 0x07, 0xdd, 0xc4, 0x7d, 0xca, 0x9d, 0xf2,
+	0x5e, 0x9e, 0x57, 0x98, 0x70, 0xa0, 0x94, 0x9f, 0xd2, 0x7b, 0xbf, 0x77,
+	0x14, 0x57, 0xcb, 0xad, 0xa9, 0xa8, 0x5d, 0xe7, 0xac, 0x05, 0x1d, 0x40,
+	0x50, 0x9f, 0xaa, 0xc5, 0x13, 0x75, 0x38, 0x7f, 0x17, 0x53, 0x99, 0xa4,
+	0x22, 0xb3, 0xc3, 0xe7, 0x9f, 0x91, 0x2d, 0xde, 0x1e, 0x7d, 0xce, 0x4e,
+	0x9c, 0xba, 0x53, 0x7f, 0xe9, 0xd1, 0x06, 0x25, 0xfd, 0xcc, 0xf8, 0xb5,
+	0x7a, 0x5d, 0xab, 0x3f, 0x15, 0x04, 0x39, 0xcc, 0x5f, 0x41, 0x4c, 0xfc,
+	0x6c, 0xc2, 0x67, 0x1a, 0xfd, 0xda, 0x06, 0xa7, 0xf6, 0x4c, 0xa3, 0x63,
+	0x68, 0x45, 0x22, 0x2a, 0x5d, 0xef, 0xd4, 0x9c, 0xba, 0x93, 0x73, 0x06,
+	0xba, 0xf2, 0x1c, 0x43, 0x57, 0x31, 0x67, 0x9e, 0xae, 0xd6, 0xd9, 0xdf,
+	0x2a, 0x5d, 0x27, 0x99, 0x64, 0x1d, 0xc6, 0xdb, 0x5b, 0x0c, 0x61, 0x3c,
+	0x0c, 0xb8, 0x08, 0xcf, 0xdd, 0x18, 0xc3, 0x30, 0x9e, 0x3c, 0x60, 0x01,
+	0xb3, 0x9f, 0x2a, 0x00, 0xe6, 0x83, 0x78, 0x18, 0x27, 0x6b, 0x76, 0x22,
+	0x13, 0xd5, 0xf0, 0x12, 0xc6, 0x1f, 0x5b, 0x78, 0x5f, 0x0f, 0x56, 0x4f,
+	0x66, 0xba, 0x8b, 0x80, 0x87, 0x70, 0xde, 0x07, 0x18, 0x69, 0x97, 0x8e,
+	0xe2, 0xdb, 0x03, 0x7c, 0x63, 0x16, 0x26, 0xd0, 0xe3, 0xd8, 0x43, 0xf3,
+	0xbf, 0x8b, 0xb4, 0x93, 0x8f, 0xd9, 0xef, 0xd6, 0x45, 0x32, 0xe0, 0x15,
+	0x87, 0x78, 0x1e, 0x29, 0xbd, 0xe6, 0xc0, 0x0e, 0x00, 0xdf, 0xbf, 0xe4,
+	0x44, 0xce, 0xc4, 0xe5, 0x50, 0x91, 0x31, 0x84, 0xe3, 0x0e, 0xe7, 0x41,
+	0xf9, 0x57, 0xa1, 0x4c, 0x5c, 0xc9, 0xc4, 0xd5, 0x78, 0xde, 0x82, 0x67,
+	0x03, 0x9e, 0x8d, 0x78, 0xd6, 0xe3, 0x69, 0xc5, 0xf3, 0x2d, 0x94, 0x53,
+	0xb1, 0x3a, 0xe1, 0x7e, 0xd5, 0x16, 0xa5, 0x34, 0x1f, 0x71, 0xcf, 0xc2,
+	0x65, 0xc0, 0xe5, 0x2b, 0xd0, 0x3b, 0x1e, 0x9e, 0xf1, 0xf8, 0x3a, 0xfa,
+	0x98, 0xc5, 0xd3, 0xa9, 0xe4, 0x4c, 0x17, 0x9e, 0x14, 0x9e, 0x6e, 0x3c,
+	0x3d, 0x78, 0xd2, 0x78, 0x5e, 0x75, 0x0c, 0xcf, 0x5d, 0x02, 0xbe, 0x42,
+	0x1e, 0x01, 0xce, 0x17, 0xf0, 0x9c, 0xe7, 0xbc, 0x09, 0x9e, 0x73, 0x2c,
+	0xcf, 0x39, 0xf3, 0x3c, 0x57, 0xeb, 0xa8, 0x63, 0xf5, 0x4e, 0xe4, 0x18,
+	0x7d, 0x85, 0x5a, 0xc7, 0xf0, 0x7f, 0x44, 0x7a, 0x07, 0x41, 0x4b, 0xc7,
+	0x30, 0x67, 0xc7, 0x48, 0x57, 0x2e, 0xd2, 0xc7, 0x16, 0xf5, 0x3b, 0xfa,
+	0x26, 0xfa, 0x3d, 0x61, 0xfb, 0x7d, 0xb8, 0xaa, 0xdf, 0x83, 0x68, 0xfb,
+	0x3e, 0xdb, 0xef, 0xc1, 0xaa, 0x7e, 0x41, 0x2b, 0xc7, 0xf2, 0x78, 0x48,
+	0x17, 0x23, 0x48, 0x0f, 0x65, 0xc2, 0xdd, 0x6b, 0xa4, 0xbe, 0x46, 0x9f,
+	0x27, 0x8d, 0xf9, 0x35, 0x73, 0xba, 0x31, 0x53, 0xa5, 0x1f, 0x7e, 0x16,
+	0xfd, 0x38, 0x5c, 0xa2, 0x8d, 0x38, 0x5d, 0x25, 0x17, 0xe8, 0xfb, 0x04,
+	0x72, 0x5c, 0xfb, 0x39, 0xf4, 0x79, 0xe8, 0xff, 0x2c, 0xb6, 0xad, 0x3e,
+	0xae, 0xf7, 0xe7, 0xde, 0x55, 0x6c, 0x95, 0x4f, 0x14, 0x69, 0x13, 0x92,
+	0x5e, 0x82, 0x60, 0xcf, 0x36, 0xda, 0xa7, 0xf9, 0x60, 0x9d, 0x9f, 0xd4,
+	0xb1, 0xb5, 0x4f, 0x2e, 0xd5, 0x19, 0xa3, 0xbd, 0xf0, 0xcd, 0xb3, 0x47,
+	0x3f, 0x08, 0x9d, 0x51, 0x03, 0xb8, 0x9f, 0xd2, 0x77, 0x80, 0xec, 0x1a,
+	0x55, 0x23, 0x6b, 0x25, 0x2e, 0x37, 0x17, 0x6b, 0x61, 0xf7, 0x30, 0x56,
+	0x5e, 0x2f, 0xed, 0xdb, 0xa2, 0xe6, 0x6c, 0x8d, 0x17, 0xc3, 0x6f, 0xcf,
+	0x9c, 0xf5, 0x89, 0xc5, 0x91, 0x1f, 0x69, 0xa2, 0x1c, 0x8c, 0xf9, 0xef,
+	0xd4, 0xfb, 0x26, 0xdb, 0xb6, 0xd1, 0x6e, 0xb9, 0x41, 0xeb, 0x70, 0x77,
+	0x89, 0x9d, 0xa4, 0x5a, 0x3c, 0x99, 0xb7, 0xd1, 0x76, 0x17, 0x93, 0x09,
+	0xc2, 0xf5, 0x90, 0x70, 0x3f, 0xc1, 0x7e, 0x29, 0xa4, 0x1a, 0x25, 0x92,
+	0xe6, 0xba, 0x5c, 0xb2, 0x93, 0xb6, 0xd1, 0xc4, 0x98, 0x67, 0xcf, 0x9e,
+	0xac, 0x96, 0x0b, 0xba, 0x9f, 0x5a, 0x0d, 0xa3, 0x39, 0x8f, 0xc6, 0x35,
+	0x2f, 0x9e, 0x81, 0x72, 0xf1, 0x6e, 0xd0, 0x7a, 0x67, 0xa2, 0xcc, 0xb3,
+	0x4e, 0xf0, 0x97, 0xca, 0x31, 0x7d, 0xc6, 0xd4, 0x7b, 0x3b, 0xfc, 0xd8,
+	0xf2, 0x06, 0xd9, 0x3d, 0xb6, 0x82, 0xeb, 0x28, 0xb1, 0xb5, 0xd0, 0x1f,
+	0xac, 0xd3, 0xb6, 0x0d, 0xfe, 0xdf, 0xf8, 0x46, 0x79, 0x7c, 0x9c, 0x6d,
+	0xb7, 0xc8, 0xe4, 0x94, 0x38, 0xde, 0xdb, 0x57, 0xa2, 0x8c, 0xc7, 0xf1,
+	0x08, 0xf7, 0x3c, 0xb5, 0x6d, 0x13, 0xe5, 0xbd, 0xdd, 0x95, 0xf3, 0xdd,
+	0x11, 0xbd, 0x26, 0xe3, 0x82, 0x4e, 0xd8, 0xde, 0xf9, 0xee, 0x56, 0x39,
+	0x3b, 0x05, 0x9a, 0x80, 0xdc, 0xef, 0x3b, 0x45, 0x98, 0x44, 0xb6, 0x4f,
+	0xc0, 0x5e, 0x90, 0x76, 0x3c, 0xa0, 0x0f, 0xc8, 0xef, 0x5b, 0xbb, 0xd9,
+	0x17, 0xf4, 0x12, 0x74, 0x5c, 0xdb, 0x36, 0x23, 0x0b, 0x32, 0x13, 0x35,
+	0x48, 0x67, 0xbb, 0xf0, 0x0f, 0x07, 0xd9, 0x4e, 0x58, 0x57, 0x61, 0x4c,
+	0xb5, 0x9a, 0x5e, 0x66, 0x17, 0xe9, 0x8f, 0x73, 0x3f, 0x97, 0xfd, 0xcd,
+	0x36, 0x3a, 0x41, 0x2b, 0xbe, 0xde, 0xc3, 0x63, 0x6c, 0x2b, 0xce, 0x09,
+	0x6d, 0x22, 0xda, 0x55, 0xd7, 0x6a, 0xfb, 0x62, 0xb2, 0xc2, 0x19, 0xe4,
+	0xda, 0x48, 0x38, 0x47, 0x71, 0x39, 0x59, 0x9a, 0x9b, 0xa7, 0x0d, 0x35,
+	0x0b, 0xe7, 0x89, 0xb4, 0x92, 0x1a, 0xb2, 0xb6, 0xc7, 0x8c, 0x3c, 0x0f,
+	0xbb, 0xac, 0x53, 0xcf, 0xd9, 0x0c, 0x6c, 0x59, 0x3b, 0x67, 0xda, 0x9e,
+	0x2d, 0x84, 0x73, 0x36, 0x00, 0x8d, 0x53, 0xbe, 0x41, 0xcf, 0x99, 0x07,
+	0xba, 0xc9, 0x03, 0xef, 0x79, 0xcc, 0x53, 0x1e, 0x73, 0x94, 0x2f, 0xb7,
+	0xc8, 0xc4, 0x71, 0xd5, 0x5a, 0x23, 0x92, 0xd8, 0xed, 0xb7, 0xc8, 0xf0,
+	0x14, 0x63, 0x05, 0x1b, 0x60, 0x83, 0x6d, 0xc4, 0xd3, 0x8a, 0x6f, 0xd6,
+	0xe3, 0x1d, 0x1f, 0x0a, 0x75, 0xeb, 0x96, 0xd8, 0x59, 0x67, 0xd1, 0xf7,
+	0xd3, 0xc0, 0xc3, 0xa3, 0xc0, 0xc3, 0x3c, 0xef, 0xbc, 0x50, 0x15, 0x5f,
+	0xe2, 0x58, 0xb5, 0x0e, 0xc5, 0x78, 0x63, 0x7a, 0x3e, 0x75, 0x9c, 0xa9,
+	0x54, 0xfb, 0x66, 0xec, 0xa9, 0x38, 0xed, 0xa9, 0xdc, 0xa8, 0x67, 0xce,
+	0x60, 0x0d, 0xc0, 0x77, 0xf2, 0xf7, 0x69, 0x5a, 0x1f, 0x1a, 0x27, 0x5c,
+	0xd1, 0x10, 0xae, 0x05, 0x73, 0xc6, 0x33, 0xb3, 0x4b, 0xe3, 0x18, 0x2f,
+	0xcc, 0xed, 0x11, 0x87, 0x2e, 0x97, 0xd1, 0x14, 0xe3, 0x24, 0xad, 0xcb,
+	0xc0, 0xf4, 0x94, 0xb6, 0x61, 0x45, 0x9d, 0x96, 0x03, 0x25, 0x9e, 0xb7,
+	0xe5, 0x1a, 0xcc, 0xef, 0x31, 0x7e, 0xd4, 0x39, 0x21, 0xc7, 0xd0, 0x37,
+	0xd7, 0xc5, 0x95, 0x8d, 0xcf, 0xac, 0xb2, 0x7b, 0xf2, 0xaa, 0x63, 0x34,
+	0x66, 0xdd, 0x7c, 0xe1, 0xd9, 0x93, 0xe4, 0xc0, 0xac, 0x5e, 0x77, 0xe5,
+	0x9a, 0xa1, 0x8c, 0x46, 0xa0, 0xfd, 0x76, 0x77, 0x27, 0x7b, 0xcc, 0x59,
+	0xc3, 0x84, 0xf4, 0x97, 0xcc, 0xf8, 0x2f, 0xea, 0x7d, 0x93, 0x66, 0x7f,
+	0xb8, 0xd9, 0x53, 0xb9, 0x5f, 0x2e, 0xa6, 0xa2, 0x55, 0x73, 0x5b, 0x27,
+	0xc3, 0xc0, 0x85, 0x5e, 0xcb, 0x84, 0x5d, 0x9c, 0xeb, 0x7e, 0xbc, 0x89,
+	0x67, 0xd1, 0xa2, 0x98, 0x9f, 0xc2, 0x38, 0xcf, 0xa7, 0xb3, 0xdd, 0x2b,
+	0xb5, 0x45, 0x31, 0xcb, 0xb3, 0x4e, 0x90, 0x95, 0x6f, 0xdd, 0x12, 0xaf,
+	0xd7, 0xf9, 0x2b, 0xec, 0x99, 0x16, 0xd8, 0x0d, 0xbb, 0x02, 0xf9, 0x33,
+	0xe8, 0xc9, 0xd3, 0x76, 0x4c, 0x09, 0x1d, 0x93, 0x92, 0xe0, 0x7c, 0x2a,
+	0x6e, 0xe3, 0xce, 0x1c, 0xcb, 0x98, 0xa5, 0x6f, 0x63, 0xff, 0xcc, 0xdb,
+	0xd0, 0x5d, 0x9a, 0xd6, 0x1f, 0xd7, 0xb2, 0xb0, 0xcb, 0xda, 0xce, 0x3a,
+	0x8e, 0x73, 0x42, 0xf4, 0x1e, 0xac, 0xd0, 0x37, 0xea, 0xa8, 0xf2, 0x0b,
+	0x8c, 0x2f, 0x57, 0x18, 0x5b, 0x4e, 0x46, 0xcd, 0xfb, 0x84, 0xf4, 0xe5,
+	0xf6, 0x6c, 0xe2, 0xdd, 0x30, 0xa1, 0x2f, 0xd7, 0x65, 0x7d, 0xb9, 0x46,
+	0xed, 0xcb, 0x99, 0xd8, 0x43, 0xe3, 0x9c, 0x2f, 0x57, 0x18, 0xcb, 0x83,
+	0x56, 0x6a, 0xed, 0x59, 0x09, 0x63, 0x0b, 0x0d, 0x17, 0x5d, 0xbd, 0x6f,
+	0x24, 0x37, 0xa0, 0xe0, 0x37, 0x18, 0x1f, 0x8b, 0xb1, 0x0a, 0xa5, 0xfe,
+	0xce, 0xfa, 0x17, 0x1b, 0x24, 0xd3, 0xbc, 0x02, 0xe3, 0x7e, 0x4a, 0xcf,
+	0xb9, 0x59, 0xc3, 0x82, 0x5c, 0x1b, 0x64, 0xcc, 0x87, 0x67, 0x47, 0x35,
+	0x7f, 0x25, 0x7a, 0x23, 0x9d, 0xc6, 0x9e, 0xf5, 0x13, 0x6b, 0xa5, 0xfe,
+	0xb8, 0x53, 0x18, 0x8f, 0xda, 0x7e, 0x13, 0x80, 0xa9, 0x06, 0x73, 0xf3,
+	0x4e, 0x2b, 0x93, 0xd9, 0xf7, 0x3b, 0xea, 0x18, 0x1b, 0x98, 0x2a, 0x9a,
+	0x18, 0x60, 0x5f, 0x31, 0x12, 0x9e, 0x5b, 0x57, 0x5c, 0x47, 0xce, 0x0c,
+	0xae, 0x00, 0x2c, 0x2b, 0x96, 0xb5, 0x59, 0x1f, 0x7b, 0x43, 0x1d, 0x45,
+	0x9a, 0x7a, 0x4a, 0xef, 0x2f, 0x5c, 0xd9, 0x9d, 0xdc, 0xa9, 0xcf, 0x23,
+	0xe9, 0x58, 0x62, 0x5e, 0xb8, 0x7f, 0xf7, 0x9b, 0xf2, 0x36, 0x2d, 0xfb,
+	0x0f, 0xa4, 0xa8, 0xc7, 0xb6, 0xe9, 0xdf, 0xb5, 0xe9, 0x20, 0x38, 0xdf,
+	0xfd, 0x2c, 0x6c, 0x16, 0xdf, 0xfb, 0x96, 0xb4, 0xc7, 0x7b, 0xb5, 0x0d,
+	0x85, 0xb9, 0x1a, 0xac, 0x97, 0x15, 0xfe, 0xb8, 0xdd, 0xab, 0x68, 0xd6,
+	0x03, 0x0b, 0xc2, 0xfb, 0x17, 0x3a, 0x6c, 0x5e, 0x3e, 0xa8, 0x07, 0x3d,
+	0x7d, 0x44, 0x8c, 0xac, 0xc9, 0xcd, 0xcb, 0x1a, 0xee, 0xa7, 0xcb, 0x90,
+	0xa0, 0xdd, 0x23, 0x92, 0xe4, 0xdd, 0x49, 0xec, 0xbb, 0x20, 0x57, 0x41,
+	0x3f, 0xb3, 0x1e, 0x6d, 0x56, 0x7e, 0x73, 0x0f, 0x8a, 0xef, 0x1d, 0x84,
+	0x8e, 0xb9, 0x61, 0xa9, 0x8e, 0x89, 0xd3, 0xbf, 0xcf, 0x8d, 0xd2, 0x47,
+	0x5c, 0x89, 0x3a, 0x2d, 0xf2, 0xd1, 0xb1, 0xdf, 0x5a, 0x4b, 0x1e, 0x1b,
+	0x82, 0x7c, 0x57, 0xf7, 0x87, 0xe7, 0x2e, 0x99, 0xc6, 0x7c, 0xb6, 0x5b,
+	0x27, 0x89, 0xf7, 0x79, 0xf2, 0xc5, 0x4a, 0x32, 0x31, 0x0b, 0x1d, 0x35,
+	0xe4, 0x0c, 0xb7, 0x9a, 0xd8, 0xe9, 0xa7, 0xd6, 0x9a, 0x73, 0x5a, 0xf5,
+	0xc0, 0x69, 0x18, 0x4f, 0xad, 0xa6, 0xdd, 0x59, 0x2b, 0x97, 0x83, 0xa0,
+	0xbe, 0x5b, 0xcb, 0xe2, 0x9d, 0x94, 0xc5, 0x07, 0x52, 0x1d, 0x86, 0x07,
+	0xb4, 0xef, 0xc4, 0x3d, 0x00, 0xc0, 0x43, 0xb7, 0xcb, 0xbd, 0xd0, 0x96,
+	0x4f, 0xfd, 0xcc, 0x8c, 0x95, 0x4f, 0xca, 0x59, 0xca, 0x9f, 0x6a, 0x6b,
+	0x74, 0x81, 0xec, 0x3d, 0x34, 0x46, 0xbd, 0x9c, 0x9a, 0xfe, 0x26, 0xe4,
+	0x55, 0x4e, 0xe3, 0xa1, 0x45, 0xee, 0x1b, 0x93, 0xcc, 0x45, 0xe8, 0xac,
+	0xc2, 0xd4, 0x42, 0x1e, 0x5d, 0xda, 0x1e, 0xc7, 0x7a, 0x7a, 0xad, 0xf1,
+	0x71, 0x17, 0x8e, 0x75, 0x9a, 0x7b, 0x8c, 0xf4, 0x58, 0xb9, 0x37, 0xff,
+	0x9c, 0x1d, 0xeb, 0xca, 0x70, 0xac, 0x3d, 0x0b, 0xc7, 0x1a, 0xfa, 0xf8,
+	0xa1, 0xfc, 0x4d, 0xe8, 0xb3, 0x49, 0xfa, 0x4c, 0xcc, 0xd8, 0x4a, 0xe9,
+	0x1d, 0x6d, 0xb4, 0x72, 0xd3, 0x83, 0x0e, 0xe2, 0x79, 0xa1, 0xe9, 0xcf,
+	0x79, 0x62, 0x71, 0xa6, 0x88, 0x07, 0xca, 0xdc, 0x26, 0x7d, 0x9e, 0x71,
+	0x02, 0x7e, 0xd6, 0x87, 0x8b, 0x2c, 0x1b, 0xe6, 0x5f, 0x29, 0x46, 0x1c,
+	0xfa, 0xd6, 0xf4, 0x9f, 0x3a, 0x97, 0xc4, 0x16, 0x4c, 0x1c, 0x98, 0xf1,
+	0x5f, 0x73, 0xcf, 0x02, 0xf7, 0x7d, 0xdf, 0x01, 0xde, 0xfa, 0xed, 0x62,
+	0xb2, 0x27, 0x1b, 0xa1, 0x3c, 0x9d, 0x95, 0x43, 0x95, 0x3e, 0x69, 0xd3,
+	0x67, 0xed, 0xdf, 0x30, 0x46, 0x9c, 0xa9, 0x8e, 0x11, 0x8b, 0x63, 0x62,
+	0xc4, 0x3b, 0x7f, 0x8e, 0x18, 0xb1, 0x38, 0x26, 0x46, 0xbc, 0x9c, 0x9f,
+	0x35, 0x52, 0x9a, 0xc5, 0xb8, 0xea, 0x21, 0x53, 0x94, 0x93, 0x9b, 0x6a,
+	0xc0, 0xbb, 0x16, 0x6f, 0xc0, 0x32, 0x56, 0xc0, 0xdb, 0xc3, 0xfb, 0x20,
+	0xde, 0x31, 0x19, 0x99, 0xd3, 0x1d, 0xb3, 0x90, 0x1f, 0xd4, 0x69, 0xac,
+	0x6b, 0xfc, 0x82, 0xc9, 0x72, 0x33, 0xca, 0x5d, 0x72, 0x26, 0x58, 0xaf,
+	0xd4, 0x28, 0xc3, 0x63, 0x94, 0xdd, 0x4d, 0x32, 0x3a, 0x16, 0xda, 0xb8,
+	0x9f, 0x5d, 0xcf, 0xb5, 0x81, 0x21, 0x09, 0x6d, 0xd8, 0x67, 0xd6, 0x9b,
+	0xb5, 0xdb, 0x2d, 0x31, 0xa9, 0x5f, 0x8d, 0x39, 0x38, 0xee, 0x5c, 0x1c,
+	0x5f, 0xbd, 0xc0, 0x96, 0x4d, 0xd8, 0xd8, 0xe0, 0xb8, 0xd5, 0xc1, 0xcb,
+	0xcb, 0x88, 0xea, 0xf9, 0x8f, 0xdb, 0x73, 0xbc, 0x51, 0x7b, 0xd7, 0x5f,
+	0x42, 0xcf, 0xcf, 0x40, 0x65, 0x16, 0xfd, 0xad, 0x57, 0x99, 0x71, 0x8e,
+	0x73, 0xee, 0x7e, 0x1e, 0xc8, 0xc5, 0x56, 0x35, 0x34, 0xbe, 0x80, 0x2e,
+	0x41, 0xb7, 0x1c, 0x9b, 0x03, 0xda, 0xbd, 0x57, 0x26, 0x46, 0x09, 0x5f,
+	0x47, 0x3c, 0xa2, 0xcf, 0xf5, 0xe2, 0x7b, 0xdc, 0x9c, 0x27, 0xea, 0xad,
+	0x84, 0x67, 0x7a, 0xd7, 0x00, 0xde, 0xc5, 0xe7, 0x7a, 0xad, 0x9e, 0xd6,
+	0x36, 0x04, 0xcf, 0xf7, 0x86, 0x63, 0x58, 0x8e, 0x9e, 0x02, 0x19, 0xd6,
+	0xfb, 0x7d, 0xd7, 0xca, 0xe9, 0x07, 0xe7, 0xce, 0x17, 0x34, 0xc1, 0x56,
+	0x69, 0x85, 0xa9, 0x3c, 0xe0, 0xa6, 0xb9, 0xef, 0x82, 0xfb, 0x0b, 0x3a,
+	0xe2, 0xb7, 0xe9, 0x73, 0x1f, 0xf3, 0x67, 0xac, 0xe7, 0xcf, 0x7e, 0x84,
+	0x67, 0x5a, 0xe3, 0xd2, 0x07, 0x3a, 0xec, 0xd7, 0xe9, 0x31, 0x8c, 0x87,
+	0x6b, 0xbe, 0x1a, 0x0f, 0x90, 0x3d, 0x5c, 0xfb, 0xc5, 0xd8, 0x2b, 0x2d,
+	0x2a, 0xa7, 0xcf, 0x58, 0x47, 0x2d, 0x8d, 0x5d, 0x76, 0xf6, 0x94, 0x13,
+	0x6a, 0x4f, 0xd9, 0x57, 0x7b, 0xcb, 0x36, 0xaf, 0xfb, 0x01, 0xcc, 0x07,
+	0x7e, 0x8f, 0x17, 0x9d, 0x21, 0xe0, 0xab, 0x50, 0x3a, 0xe2, 0x64, 0xf4,
+	0xfb, 0xa8, 0x7d, 0x43, 0x0e, 0x60, 0xae, 0x7a, 0xc7, 0xa3, 0x5a, 0xde,
+	0xcf, 0xdf, 0xd3, 0x17, 0xce, 0xeb, 0x0b, 0x7a, 0x0d, 0x68, 0x5a, 0x88,
+	0x6b, 0xcf, 0xda, 0x10, 0xc7, 0x9d, 0x9c, 0xc6, 0x3d, 0xcb, 0x7c, 0x4b,
+	0xff, 0x06, 0x9d, 0x2b, 0xd3, 0x5e, 0x2b, 0xde, 0x8b, 0xf7, 0x4d, 0x86,
+	0xfa, 0x86, 0x70, 0xdf, 0x09, 0xbd, 0x16, 0xec, 0x37, 0xf2, 0x6a, 0x56,
+	0x46, 0x2a, 0x5c, 0xc3, 0x64, 0x3b, 0x48, 0x2f, 0xd7, 0xc0, 0x1e, 0x58,
+	0x78, 0xbe, 0xba, 0x7f, 0x7e, 0x1e, 0x12, 0xe3, 0x42, 0x58, 0xee, 0xd6,
+	0x67, 0x17, 0xab, 0xef, 0x1e, 0xb9, 0xf2, 0xbf, 0x70, 0xfd, 0xd0, 0xc8,
+	0x50, 0x0b, 0x47, 0x86, 0xf2, 0xce, 0xc8, 0x95, 0xaf, 0xcb, 0x41, 0xe0,
+	0xf1, 0x30, 0x60, 0x52, 0xf7, 0xf3, 0xce, 0xab, 0x57, 0xa5, 0x30, 0x59,
+	0x2f, 0xea, 0xa1, 0x82, 0xe3, 0x3e, 0x54, 0x2b, 0x91, 0x87, 0x94, 0x53,
+	0xf3, 0x50, 0xbb, 0xf6, 0xcf, 0x77, 0xa4, 0xda, 0xe3, 0x7b, 0xe5, 0xb8,
+	0xe3, 0xde, 0xaf, 0xf4, 0x59, 0xdb, 0x82, 0xc7, 0x58, 0xdf, 0x71, 0x27,
+	0x72, 0x7f, 0xd4, 0x9e, 0xd3, 0x37, 0xf1, 0xbd, 0x59, 0xcd, 0xf7, 0xdf,
+	0x58, 0x47, 0x9c, 0xcd, 0x0a, 0xf1, 0xf1, 0x59, 0xc8, 0xad, 0x4f, 0x4b,
+	0x76, 0x34, 0x31, 0x57, 0xc6, 0xec, 0xb3, 0xdf, 0xb0, 0xce, 0xf0, 0x0b,
+	0xcb, 0xbc, 0xe2, 0xf0, 0xce, 0x1c, 0xa3, 0x33, 0x3e, 0xdf, 0x12, 0xee,
+	0xb9, 0x37, 0x73, 0xca, 0xfc, 0xc6, 0x75, 0x52, 0xff, 0x0a, 0xe6, 0x8b,
+	0xfd, 0x11, 0x57, 0xab, 0xf4, 0x3d, 0x05, 0x9e, 0x6c, 0x89, 0xd7, 0xcd,
+	0xd9, 0x43, 0x46, 0xf6, 0xd6, 0x01, 0x6e, 0xc0, 0x6f, 0xec, 0x3b, 0x21,
+	0x9d, 0x0a, 0x24, 0x37, 0x69, 0xb6, 0xa3, 0x67, 0x87, 0x98, 0x39, 0x33,
+	0x34, 0xb3, 0xc2, 0xd8, 0x91, 0xf8, 0x36, 0x74, 0xa1, 0x64, 0xfb, 0xd8,
+	0x4b, 0x4e, 0x3f, 0xcf, 0x3c, 0x8a, 0xb6, 0x1b, 0x97, 0xb3, 0x09, 0xc1,
+	0x4b, 0xcf, 0x5b, 0xff, 0x32, 0x08, 0xc6, 0x52, 0x29, 0xde, 0x2b, 0xb8,
+	0x8c, 0x4f, 0xb9, 0xca, 0x99, 0x1c, 0x6d, 0x70, 0x26, 0x46, 0x03, 0xd9,
+	0x93, 0xe2, 0x9d, 0x49, 0xdc, 0x93, 0xa0, 0xe3, 0xe3, 0x48, 0x6b, 0x87,
+	0x6e, 0x7d, 0xc7, 0x3a, 0xee, 0x71, 0xbb, 0xd9, 0x6f, 0xb4, 0xe5, 0x88,
+	0x63, 0xfa, 0xca, 0xed, 0x27, 0x72, 0xc2, 0xbb, 0x8b, 0xb6, 0xc4, 0x63,
+	0x7a, 0x7f, 0xe2, 0x17, 0x50, 0x0f, 0x7d, 0x94, 0xd8, 0xaf, 0xeb, 0x4c,
+	0x40, 0x9e, 0x4d, 0x8e, 0xf1, 0xbe, 0x14, 0x9e, 0x63, 0x88, 0xb4, 0x2a,
+	0xb9, 0xd6, 0x1b, 0xb6, 0xf7, 0x69, 0xe6, 0xe1, 0x0a, 0x45, 0x74, 0xda,
+	0x16, 0x6f, 0xf7, 0xdc, 0x1d, 0x9b, 0x61, 0x5a, 0x78, 0xd7, 0xa6, 0xd2,
+	0x67, 0x56, 0xe0, 0xd3, 0x9e, 0x1e, 0x92, 0xb8, 0x33, 0x55, 0x6c, 0x75,
+	0x4e, 0x16, 0x33, 0x5b, 0xd7, 0x81, 0x3e, 0xce, 0xa7, 0x3e, 0x46, 0xf9,
+	0x05, 0xdb, 0xef, 0x45, 0xc9, 0x57, 0x3e, 0x24, 0xe3, 0x2d, 0xed, 0xde,
+	0xfd, 0x7a, 0x6e, 0x2e, 0x03, 0x67, 0x2d, 0x2a, 0x3b, 0xfa, 0xc4, 0x3a,
+	0xea, 0xb7, 0xdd, 0x45, 0x05, 0x5e, 0x56, 0xbf, 0x88, 0x07, 0x36, 0x6e,
+	0xad, 0xb6, 0x51, 0xf6, 0xa6, 0x58, 0xae, 0xc1, 0xe9, 0x1d, 0x5d, 0x85,
+	0x79, 0xdc, 0x05, 0xfd, 0xe9, 0xc0, 0x46, 0x22, 0xae, 0x1b, 0x9c, 0x3d,
+	0xa3, 0x79, 0xf4, 0xc8, 0x7d, 0xd6, 0xbc, 0xf7, 0xf0, 0x30, 0xc6, 0xa8,
+	0xe5, 0x2b, 0x78, 0xf7, 0x12, 0xd7, 0xdb, 0x83, 0x49, 0xd8, 0x06, 0xb9,
+	0xae, 0x7f, 0x67, 0xd7, 0xab, 0xa7, 0xaf, 0xb0, 0x5e, 0xed, 0xc9, 0x23,
+	0x15, 0x7d, 0x6f, 0x48, 0xe7, 0xb8, 0xe2, 0x3a, 0x6e, 0xf3, 0x55, 0x7a,
+	0x7e, 0x54, 0x87, 0xdd, 0x1b, 0x78, 0x72, 0x9d, 0xbd, 0xd3, 0x06, 0x70,
+	0x5c, 0x05, 0x18, 0x36, 0x62, 0xfc, 0x84, 0xc1, 0xd4, 0x11, 0x75, 0x4b,
+	0x9c, 0x3a, 0x70, 0x56, 0x4e, 0xaf, 0x0b, 0xf7, 0x7b, 0xa0, 0x1d, 0xc8,
+	0xb5, 0x47, 0xe3, 0x46, 0x37, 0xae, 0x5d, 0xa6, 0x9d, 0x70, 0x3c, 0x8e,
+	0x1d, 0x0f, 0x69, 0x75, 0x43, 0x0b, 0xfd, 0x89, 0x59, 0xa9, 0x5b, 0x54,
+	0x9e, 0xf1, 0xfc, 0x5d, 0xad, 0x66, 0xdf, 0x11, 0xcb, 0x7a, 0xb0, 0x4b,
+	0x69, 0xe3, 0x12, 0x77, 0x7a, 0xae, 0x8a, 0xdc, 0x5b, 0x9c, 0xf3, 0x2f,
+	0x43, 0x9e, 0x5c, 0xeb, 0xbd, 0x4d, 0x91, 0xf6, 0x42, 0xfc, 0x12, 0xb7,
+	0x09, 0xe0, 0x95, 0x71, 0x95, 0xd3, 0x41, 0x66, 0x80, 0x7c, 0xc5, 0x36,
+	0x98, 0xff, 0xa2, 0x8e, 0xe5, 0x0e, 0xa6, 0x18, 0x27, 0x6a, 0x3f, 0x71,
+	0x87, 0x0a, 0x65, 0xd3, 0x2c, 0xd7, 0x10, 0x1c, 0xde, 0x21, 0xba, 0x0b,
+	0x1d, 0x5e, 0x9c, 0x52, 0xce, 0x37, 0xc7, 0x5c, 0x7c, 0xd7, 0xd8, 0xfb,
+	0x42, 0x8d, 0x6e, 0x12, 0xf9, 0xeb, 0x70, 0xbc, 0xf1, 0x3c, 0xe6, 0xfb,
+	0x12, 0xe6, 0x7b, 0xf9, 0xfb, 0x41, 0x91, 0x57, 0x46, 0x5e, 0xf9, 0x43,
+	0x41, 0xa6, 0x89, 0xf4, 0x47, 0x9a, 0x7b, 0x3d, 0x9f, 0x59, 0xef, 0x63,
+	0x02, 0x6c, 0x67, 0xc1, 0x0b, 0x19, 0xae, 0x25, 0x07, 0xc7, 0x52, 0x37,
+	0x81, 0x17, 0x76, 0xca, 0x9f, 0xc0, 0x16, 0xf8, 0xe3, 0x4a, 0x1a, 0x3c,
+	0xd1, 0x03, 0x1e, 0xe9, 0x06, 0x5f, 0xa4, 0xb4, 0x5d, 0xfc, 0x28, 0x74,
+	0xde, 0xd9, 0x4a, 0xc9, 0xd9, 0x3b, 0x5a, 0x74, 0x72, 0xa3, 0x47, 0x41,
+	0x17, 0xdc, 0x03, 0xab, 0xae, 0xa9, 0x11, 0x37, 0x3e, 0x29, 0xa4, 0xff,
+	0x76, 0xee, 0xed, 0x68, 0x06, 0xae, 0xce, 0x10, 0x57, 0x93, 0x95, 0x2d,
+	0xde, 0x3a, 0xf0, 0x41, 0xb3, 0xe6, 0x83, 0x46, 0x27, 0xe3, 0xdd, 0x64,
+	0xf9, 0x60, 0x04, 0x7c, 0x50, 0x58, 0xc2, 0x07, 0xcf, 0x58, 0x9a, 0x9f,
+	0xae, 0xe2, 0x83, 0x49, 0x9b, 0x36, 0x7e, 0x05, 0x3e, 0xb8, 0xca, 0x4f,
+	0x3e, 0x39, 0x24, 0x27, 0xc0, 0x07, 0x0f, 0x6b, 0x3e, 0xb8, 0x4a, 0xf3,
+	0x01, 0xe3, 0x46, 0xe4, 0x85, 0x56, 0xc8, 0x0e, 0xf2, 0xc2, 0xb3, 0x32,
+	0x0b, 0x5e, 0x78, 0x51, 0xb1, 0xef, 0xcb, 0xb4, 0x0f, 0x46, 0xe9, 0x8f,
+	0x9d, 0x2a, 0x15, 0xc1, 0xbb, 0x4a, 0xbe, 0x30, 0x16, 0x04, 0x33, 0xf0,
+	0xd1, 0x1f, 0x84, 0x0d, 0xef, 0xea, 0x3b, 0x69, 0xa7, 0x61, 0xbb, 0x10,
+	0x36, 0xda, 0xe4, 0xe3, 0x0e, 0xe8, 0xfd, 0xf0, 0x04, 0xc6, 0xb0, 0x47,
+	0xfd, 0x3e, 0xfc, 0x60, 0x0f, 0xf3, 0x4a, 0xdb, 0xfe, 0xb8, 0xe6, 0x9b,
+	0x1a, 0xe8, 0x80, 0x93, 0xdd, 0x8c, 0x33, 0xf9, 0xde, 0x5e, 0xd5, 0x9e,
+	0xef, 0x03, 0xcc, 0x11, 0x75, 0xbf, 0x30, 0xc6, 0xd1, 0xb4, 0xc8, 0xb6,
+	0xa7, 0x5c, 0x18, 0x90, 0xfb, 0x6c, 0x5e, 0x3e, 0xa8, 0x83, 0x1d, 0x5a,
+	0xa7, 0x8c, 0x5d, 0xae, 0xb6, 0x25, 0xbd, 0xdf, 0x80, 0xd0, 0xac, 0x4d,
+	0x9b, 0x3d, 0x81, 0x7d, 0xc5, 0x6a, 0xbb, 0xfe, 0x5e, 0xd8, 0xf5, 0xac,
+	0x23, 0xae, 0xb1, 0xeb, 0xef, 0xb2, 0xbc, 0xc6, 0xdf, 0x9e, 0xb6, 0xf1,
+	0x0f, 0x00, 0xbe, 0x1d, 0x73, 0x36, 0x3e, 0xdb, 0xa0, 0xad, 0x21, 0x72,
+	0x03, 0xec, 0xbc, 0x1b, 0xc1, 0x83, 0x37, 0xc1, 0x8f, 0x7a, 0x77, 0xd1,
+	0x93, 0x9d, 0xc5, 0x66, 0xf8, 0xdb, 0xad, 0xf2, 0xab, 0x63, 0x1b, 0xa5,
+	0x7f, 0xf4, 0x77, 0x9a, 0xa1, 0x57, 0x61, 0x97, 0xbe, 0x08, 0x38, 0x23,
+	0x56, 0x56, 0x47, 0xc1, 0x03, 0xed, 0x89, 0x1f, 0xa8, 0x44, 0xab, 0x91,
+	0xed, 0x3c, 0x4b, 0xbe, 0x5c, 0x3b, 0x31, 0xd4, 0x67, 0x1c, 0xa5, 0x45,
+	0xce, 0x1c, 0xa7, 0xe7, 0x95, 0x80, 0x2d, 0x9e, 0x82, 0x1d, 0xb2, 0x01,
+	0xed, 0x31, 0x96, 0xbc, 0x5a, 0x9e, 0xd9, 0xea, 0xde, 0x9d, 0xd3, 0x7c,
+	0x78, 0xc9, 0xc9, 0x8e, 0xdd, 0x24, 0x85, 0xc1, 0x28, 0xc6, 0xa0, 0x9a,
+	0xd7, 0xca, 0xf5, 0xd2, 0xaf, 0xc7, 0x73, 0x59, 0x0e, 0x42, 0x1f, 0xff,
+	0x69, 0xb1, 0x5f, 0x66, 0x07, 0x9a, 0xf0, 0x1d, 0x95, 0x67, 0x8a, 0x5b,
+	0xe0, 0xef, 0xfc, 0x0a, 0x70, 0x54, 0x8b, 0xef, 0x5a, 0xe9, 0x5d, 0x47,
+	0x5e, 0x6d, 0x90, 0x19, 0xa4, 0xdf, 0x28, 0xbf, 0x64, 0xd3, 0x99, 0x46,
+	0xde, 0x68, 0x40, 0xdd, 0xa8, 0x9c, 0x2f, 0xd2, 0x96, 0xd4, 0x3c, 0xd1,
+	0xf3, 0xb2, 0x6c, 0xc9, 0xbc, 0x0c, 0xdb, 0xf4, 0x59, 0x3c, 0xcf, 0x4b,
+	0x72, 0xe7, 0x6e, 0x67, 0x4b, 0xa2, 0xdd, 0x81, 0xbe, 0xc4, 0xe3, 0x3a,
+	0x5b, 0xbc, 0x5a, 0xe7, 0x5a, 0xdb, 0x46, 0x8d, 0x3c, 0x3f, 0xa8, 0xe2,
+	0x0d, 0x98, 0x93, 0xcd, 0x4e, 0x87, 0x4d, 0xe3, 0xb7, 0xbe, 0x2f, 0x51,
+	0xda, 0xcf, 0xa8, 0x0d, 0xab, 0x44, 0xda, 0x1a, 0x60, 0xe7, 0xec, 0x11,
+	0xd5, 0xdc, 0x20, 0xae, 0xb4, 0x4f, 0xa8, 0x56, 0xa4, 0xf9, 0x36, 0x2d,
+	0xd6, 0x00, 0x9d, 0x80, 0xb4, 0x16, 0xa4, 0x6d, 0xb2, 0x69, 0x4d, 0x0d,
+	0x52, 0x8b, 0xb4, 0xcb, 0x9a, 0xe7, 0x2f, 0x76, 0xf8, 0x5e, 0xce, 0xa9,
+	0x97, 0xb6, 0x53, 0x0d, 0x90, 0x0d, 0xab, 0x65, 0x66, 0x6b, 0x9d, 0xb4,
+	0x21, 0x8f, 0x31, 0xee, 0xd4, 0xa9, 0xa8, 0xbc, 0xf3, 0x54, 0x7b, 0xfc,
+	0xa3, 0x18, 0x43, 0xfb, 0x19, 0xc6, 0xbc, 0xff, 0xac, 0x99, 0x31, 0x9f,
+	0xb6, 0x33, 0x7c, 0xd7, 0x69, 0xf9, 0x43, 0x7c, 0x98, 0x3b, 0xdf, 0x60,
+	0x63, 0x94, 0x8e, 0x3b, 0xc3, 0xa3, 0xd4, 0xdb, 0xed, 0xf6, 0x7e, 0xa2,
+	0xff, 0xd9, 0x4c, 0x5f, 0x6d, 0x82, 0x36, 0x54, 0x89, 0xfc, 0x48, 0xdd,
+	0x83, 0xf7, 0xb8, 0x23, 0x85, 0x79, 0x99, 0x35, 0x45, 0xbe, 0x3a, 0xae,
+	0xb8, 0x4f, 0x05, 0x69, 0x95, 0x77, 0x05, 0x66, 0x8e, 0xc9, 0x0b, 0x46,
+	0x2e, 0xfd, 0x9a, 0x91, 0x4b, 0xa7, 0xcf, 0x2d, 0x90, 0x4b, 0x05, 0x2d,
+	0x97, 0x06, 0x05, 0xef, 0xa9, 0x02, 0xe4, 0xd2, 0x08, 0xbe, 0x3d, 0x2d,
+	0x97, 0x62, 0x62, 0x6d, 0x64, 0x89, 0x5e, 0xc5, 0xfe, 0x27, 0x4b, 0xae,
+	0xb6, 0xa5, 0x0a, 0xe3, 0xb0, 0x43, 0x4a, 0x23, 0x56, 0x67, 0x4b, 0xba,
+	0x49, 0x3a, 0x7a, 0x7e, 0x2a, 0xa1, 0x9d, 0x39, 0xdb, 0xcc, 0x3b, 0x8f,
+	0x5f, 0x54, 0x94, 0x61, 0x27, 0x20, 0xc3, 0x1e, 0xbe, 0x82, 0x0c, 0x43,
+	0x5e, 0x19, 0x79, 0x65, 0xb6, 0xfb, 0xdd, 0x9f, 0x0e, 0x79, 0x94, 0x1f,
+	0x94, 0x19, 0x90, 0x49, 0x25, 0xc8, 0xa4, 0x12, 0xe4, 0x54, 0x09, 0x72,
+	0xa9, 0x04, 0xb9, 0x54, 0x82, 0x5c, 0x2a, 0x41, 0x2e, 0x41, 0xc6, 0x3d,
+	0x0a, 0x19, 0x67, 0x64, 0xda, 0x00, 0xed, 0x35, 0xb9, 0xcf, 0xea, 0x77,
+	0x13, 0x27, 0xe9, 0xb2, 0x7e, 0x91, 0xd9, 0xb3, 0x7a, 0xae, 0x2a, 0x2e,
+	0xb8, 0xeb, 0x88, 0xe6, 0x77, 0xcf, 0x57, 0xd7, 0x3a, 0xdc, 0x1f, 0xf3,
+	0x03, 0xed, 0xb3, 0x6f, 0xe6, 0x6f, 0xa9, 0x03, 0x5f, 0xbf, 0x62, 0xf9,
+	0x7a, 0xf3, 0x1c, 0x5f, 0x27, 0x1d, 0xc6, 0x89, 0x97, 0xe7, 0xeb, 0x16,
+	0x9b, 0x97, 0x0f, 0x56, 0x80, 0xaf, 0x57, 0x2c, 0xe2, 0xeb, 0x28, 0xf8,
+	0x7a, 0xe7, 0x12, 0xbe, 0x5e, 0xe5, 0xf4, 0xea, 0x3a, 0x3c, 0x83, 0xc6,
+	0xef, 0x5a, 0x67, 0x9e, 0xaf, 0xf7, 0x6b, 0xbe, 0x3e, 0x04, 0xbe, 0xbe,
+	0xbe, 0x8a, 0xaf, 0x77, 0x4a, 0xf2, 0x96, 0x6c, 0x64, 0xa3, 0xec, 0xbe,
+	0x5f, 0x35, 0xaf, 0x91, 0x7f, 0x11, 0x53, 0xdf, 0xf0, 0x58, 0xef, 0x58,
+	0xb3, 0xe4, 0x1e, 0xfa, 0x11, 0xd7, 0x06, 0xc8, 0x23, 0x43, 0x19, 0xc7,
+	0x93, 0x83, 0x47, 0x7e, 0x20, 0xd3, 0x9a, 0xb7, 0x44, 0xf6, 0x1c, 0x89,
+	0xca, 0xf0, 0x11, 0xc6, 0x1e, 0xbe, 0x63, 0xe9, 0xbd, 0x4e, 0x86, 0x07,
+	0xb9, 0x5f, 0xd2, 0x95, 0xdd, 0x47, 0xe0, 0x63, 0x1d, 0x61, 0xec, 0xe1,
+	0xf2, 0x1c, 0x8f, 0x4d, 0x43, 0xb6, 0xec, 0x3e, 0xa2, 0xe7, 0x1a, 0xed,
+	0x34, 0xc8, 0xa1, 0x23, 0x22, 0xb7, 0x1d, 0x71, 0xe5, 0xf6, 0x23, 0x73,
+	0xbc, 0x36, 0x10, 0xf2, 0xda, 0x9f, 0x83, 0xd7, 0xda, 0x2d, 0xaf, 0xa9,
+	0x39, 0x5e, 0xfb, 0x5a, 0x15, 0xaf, 0xb1, 0x3e, 0x79, 0xed, 0x82, 0x4d,
+	0xe3, 0xb7, 0x2b, 0x7b, 0x8f, 0xb4, 0xca, 0xee, 0x87, 0xde, 0x22, 0x7b,
+	0xee, 0x27, 0xac, 0xe6, 0x9e, 0x3c, 0xda, 0x5f, 0xe3, 0x95, 0x76, 0xb4,
+	0x1f, 0xee, 0x0f, 0xd2, 0x77, 0x65, 0x75, 0x4e, 0x48, 0x32, 0xcf, 0xfe,
+	0x6a, 0xe1, 0x3b, 0x9f, 0x82, 0x4f, 0xb1, 0x17, 0x30, 0xdd, 0x7a, 0x44,
+	0x92, 0xae, 0xbc, 0x26, 0x23, 0xa9, 0x47, 0x5b, 0x8d, 0x3d, 0x71, 0x09,
+	0xbc, 0x42, 0xfa, 0xcf, 0x48, 0xee, 0xed, 0x81, 0xf6, 0x2b, 0x46, 0xcb,
+	0x42, 0xff, 0x9f, 0x31, 0x73, 0xc7, 0xdc, 0x77, 0xc7, 0xf3, 0xbe, 0x35,
+	0xfa, 0xbc, 0x9b, 0x8e, 0xd7, 0x76, 0x33, 0xbf, 0x46, 0xef, 0x37, 0xcd,
+	0xe9, 0xb3, 0xdc, 0xac, 0xcf, 0x76, 0x62, 0x3a, 0x9e, 0x5e, 0x28, 0xf3,
+	0x8e, 0x30, 0xde, 0xbd, 0xcc, 0xbb, 0x06, 0xff, 0xf8, 0x2a, 0x13, 0x9b,
+	0x25, 0xdf, 0x7d, 0xdd, 0xc9, 0x15, 0x2f, 0xe9, 0x7d, 0x85, 0x59, 0x1f,
+	0xbf, 0xcb, 0xfc, 0x66, 0xf9, 0x4b, 0x8c, 0x71, 0x24, 0x12, 0xea, 0x81,
+	0x56, 0xee, 0x3b, 0x18, 0x9c, 0x32, 0x76, 0x94, 0xe1, 0xd1, 0x06, 0xed,
+	0x6b, 0x8c, 0xe0, 0x7b, 0xf7, 0x68, 0xa3, 0x53, 0xa0, 0x6d, 0x32, 0xd0,
+	0xe0, 0xe4, 0xc7, 0xf7, 0xb4, 0x1a, 0x9b, 0x79, 0x20, 0xce, 0x3d, 0x85,
+	0x19, 0xb5, 0x54, 0x26, 0x9f, 0x92, 0x50, 0x26, 0x27, 0x6f, 0xc9, 0xc0,
+	0xb6, 0xce, 0x1d, 0xd1, 0xf7, 0xf7, 0x25, 0xda, 0x15, 0xc7, 0xf4, 0x09,
+	0xc8, 0xd7, 0x90, 0x16, 0xe2, 0xf2, 0xf1, 0x23, 0xa4, 0x07, 0x15, 0x6b,
+	0x94, 0xdf, 0xb2, 0xf4, 0x70, 0x59, 0x8a, 0x90, 0x3b, 0x47, 0x8e, 0xdc,
+	0x2e, 0xe3, 0xbb, 0x16, 0xd3, 0xc3, 0x9e, 0x79, 0x7a, 0x88, 0xc1, 0x3e,
+	0x73, 0xaa, 0xe9, 0xe1, 0x37, 0xe7, 0xe8, 0x61, 0xdc, 0xf9, 0xd7, 0xd2,
+	0xc3, 0x0d, 0x0b, 0xe8, 0x61, 0x44, 0xd3, 0x43, 0xff, 0x1c, 0x3d, 0x8c,
+	0x1c, 0x61, 0xbf, 0x7a, 0x5d, 0xd4, 0x9b, 0x71, 0x38, 0xe7, 0x73, 0xb4,
+	0x90, 0x18, 0xd6, 0xfb, 0x44, 0x93, 0x79, 0x9e, 0x25, 0x5d, 0xa5, 0x18,
+	0x1b, 0x99, 0x9f, 0xff, 0xc6, 0x7f, 0xd3, 0xf9, 0x7f, 0x47, 0xfc, 0xff,
+	0xef, 0xfc, 0x5f, 0x8f, 0xf6, 0x29, 0x8b, 0x43, 0x79, 0x1c, 0xd2, 0xc3,
+	0x7b, 0xe2, 0x46, 0x2f, 0x70, 0x8e, 0xf9, 0x6d, 0xf6, 0xac, 0x9f, 0x83,
+	0xfc, 0x7b, 0x1c, 0xf2, 0xef, 0xb1, 0x05, 0xeb, 0x01, 0x3d, 0x36, 0x06,
+	0x11, 0xc8, 0xc1, 0xd4, 0x3c, 0x3e, 0x66, 0xba, 0x89, 0x0f, 0xb3, 0xf7,
+	0xe4, 0x6c, 0x65, 0x31, 0x4e, 0x5c, 0xbd, 0xdf, 0xe8, 0x64, 0xaa, 0x1a,
+	0x27, 0x84, 0x7b, 0xb6, 0x6a, 0x8c, 0xf8, 0x5d, 0xe6, 0xf7, 0x65, 0xbd,
+	0x87, 0xa4, 0xa0, 0xd7, 0x9f, 0x88, 0x17, 0xae, 0x3f, 0x11, 0x27, 0xae,
+	0xb6, 0xf7, 0x0b, 0xe5, 0x3a, 0xbd, 0x2f, 0xfc, 0xc0, 0x54, 0x4c, 0x66,
+	0x62, 0x8c, 0xeb, 0xf1, 0xde, 0x57, 0xfa, 0xca, 0x7e, 0xbc, 0x20, 0x79,
+	0x7b, 0xd6, 0x67, 0x95, 0xa5, 0x6d, 0xc6, 0x03, 0x79, 0x27, 0x42, 0xb8,
+	0x0e, 0xd1, 0x69, 0x65, 0x5d, 0x43, 0x55, 0x9c, 0x12, 0x78, 0x1f, 0x93,
+	0x44, 0xb6, 0x1b, 0xef, 0x29, 0xf6, 0xfd, 0xa4, 0x8c, 0x3c, 0x58, 0x86,
+	0x2d, 0xf7, 0x30, 0x74, 0x8e, 0x23, 0x10, 0x93, 0xfa, 0x2e, 0x14, 0xc2,
+	0x30, 0xa1, 0xef, 0xf5, 0xa3, 0xdf, 0x47, 0x7a, 0x88, 0xe3, 0xfb, 0xb2,
+	0x8d, 0x25, 0xc5, 0xa5, 0x50, 0xfc, 0x01, 0xe0, 0xe7, 0x1d, 0x94, 0x3f,
+	0xc2, 0xfb, 0x8d, 0xe6, 0xc3, 0xf8, 0x21, 0x03, 0xfa, 0xcd, 0xb9, 0x79,
+	0xcd, 0xc9, 0x94, 0xcd, 0xfe, 0x96, 0xaa, 0xfb, 0xf5, 0xe5, 0xb0, 0xb6,
+	0x9f, 0xd3, 0x76, 0x5f, 0x0b, 0xcf, 0xe7, 0x19, 0x1b, 0xfa, 0xcb, 0xb0,
+	0xa1, 0x9f, 0xa8, 0x64, 0xf4, 0x1a, 0xd6, 0x63, 0xb0, 0xa1, 0x1f, 0x85,
+	0xee, 0xa1, 0xce, 0x89, 0x59, 0x9d, 0x33, 0xa2, 0x76, 0x69, 0x9d, 0xf3,
+	0xd7, 0x5a, 0xe7, 0xfc, 0xea, 0x12, 0x9d, 0x73, 0x48, 0xb5, 0x8f, 0x52,
+	0xe7, 0xf4, 0xaa, 0x9d, 0x0e, 0xed, 0xc5, 0xb5, 0xcb, 0xe8, 0x9c, 0xf7,
+	0xca, 0xaf, 0xd8, 0xbc, 0xfd, 0xf2, 0xbe, 0x6d, 0x7a, 0xdd, 0xc6, 0x9b,
+	0x50, 0xbc, 0xcb, 0xce, 0xe8, 0xa0, 0xeb, 0x55, 0xa7, 0x5e, 0xef, 0xfd,
+	0x6a, 0x95, 0xce, 0x69, 0x53, 0xdd, 0x4e, 0xaf, 0xae, 0xc3, 0x78, 0x04,
+	0xbf, 0x53, 0x4e, 0x66, 0xa0, 0x0e, 0xdf, 0x71, 0x89, 0x1c, 0xc1, 0xd8,
+	0xcd, 0x7d, 0x7b, 0xca, 0xe4, 0x5d, 0x63, 0xf3, 0x54, 0x98, 0xee, 0x9a,
+	0xf4, 0x76, 0x9b, 0x6e, 0x74, 0x55, 0x9b, 0x6a, 0xd5, 0xba, 0x6a, 0x33,
+	0x18, 0x6a, 0x02, 0xfa, 0x75, 0xa2, 0x14, 0xea, 0x2c, 0xfe, 0x66, 0xbc,
+	0x99, 0x71, 0x89, 0x30, 0x6e, 0x9d, 0x40, 0x19, 0x3c, 0xa5, 0xd0, 0xa6,
+	0xe4, 0x6f, 0xf8, 0x0a, 0x78, 0xa6, 0x80, 0xd7, 0x5b, 0xc0, 0x3f, 0xbf,
+	0x5e, 0x64, 0xdc, 0xb3, 0x59, 0x8e, 0x8e, 0x55, 0xe7, 0xb5, 0xca, 0xbb,
+	0xc7, 0x36, 0xc8, 0xbe, 0x51, 0xff, 0x6a, 0xa9, 0xdf, 0x28, 0x23, 0xa3,
+	0x2f, 0xea, 0xfb, 0x40, 0xd6, 0xe8, 0x7b, 0x92, 0x78, 0x7f, 0x98, 0x91,
+	0x91, 0xfd, 0x8e, 0x91, 0x91, 0x19, 0x35, 0x6f, 0xb3, 0x86, 0x6d, 0xf2,
+	0x6e, 0xa6, 0xbe, 0xd1, 0xb8, 0xbe, 0x43, 0x7a, 0xa2, 0x72, 0xad, 0xfc,
+	0xd1, 0x71, 0x75, 0xa7, 0x9a, 0xbf, 0x4b, 0x41, 0xdb, 0xac, 0x93, 0x0b,
+	0x6c, 0xd6, 0xbf, 0x97, 0x99, 0xf7, 0x45, 0x31, 0x4e, 0xd0, 0xf0, 0x75,
+	0x2f, 0x73, 0x1d, 0xb4, 0x39, 0x26, 0x97, 0xa4, 0x4f, 0xe3, 0x8f, 0xf2,
+	0xb4, 0x01, 0x72, 0x70, 0x56, 0xeb, 0xd7, 0xb5, 0xbc, 0xf3, 0xf8, 0x08,
+	0x6d, 0xd7, 0xaf, 0x6b, 0x79, 0xb6, 0xd6, 0xda, 0xae, 0xd3, 0x90, 0xd3,
+	0x94, 0xa3, 0x37, 0xca, 0x5f, 0xdb, 0x74, 0xa6, 0x25, 0xe3, 0xb3, 0x42,
+	0x7d, 0x17, 0x83, 0x0c, 0xa5, 0x3c, 0xfd, 0x59, 0x6d, 0xd7, 0xe7, 0x6c,
+	0x1b, 0x94, 0x9f, 0x46, 0x76, 0x6f, 0x76, 0xa6, 0x6d, 0x1a, 0xbf, 0xc3,
+	0x18, 0xba, 0x9f, 0xc9, 0x59, 0x3e, 0x53, 0xce, 0x93, 0xc8, 0x5f, 0x83,
+	0x7c, 0xf2, 0xd9, 0x63, 0x9a, 0xcf, 0xb4, 0x7d, 0xe2, 0x74, 0xd9, 0x35,
+	0x85, 0xb9, 0xf5, 0x80, 0x3c, 0xf9, 0x4c, 0x1d, 0xf5, 0xa6, 0x8d, 0x3c,
+	0xf0, 0x90, 0xfe, 0x45, 0xe8, 0x0e, 0xd6, 0x45, 0xfa, 0xb1, 0x0c, 0xe6,
+	0xf0, 0x24, 0xfc, 0x9f, 0x46, 0x7c, 0x37, 0xe3, 0x7b, 0x42, 0x7e, 0x75,
+	0x30, 0xaa, 0xc7, 0x3d, 0x82, 0x71, 0x1c, 0x38, 0x82, 0x31, 0x39, 0xc6,
+	0x76, 0x76, 0xcf, 0xb8, 0x52, 0x73, 0x86, 0x7c, 0xc7, 0x33, 0x86, 0x41,
+	0xb0, 0xb7, 0x8b, 0x74, 0x9b, 0xf4, 0xfa, 0xf5, 0xf9, 0xb7, 0xcd, 0xf1,
+	0x08, 0x70, 0x72, 0x00, 0xf3, 0x31, 0x52, 0xf4, 0xbd, 0xac, 0xe3, 0xc7,
+	0x31, 0x4e, 0xd8, 0x80, 0xed, 0xb0, 0x05, 0xdb, 0x61, 0x07, 0xb6, 0xc3,
+	0x0e, 0x5c, 0x2d, 0xa7, 0xb6, 0x72, 0x7f, 0x49, 0xfe, 0x9d, 0xbc, 0x77,
+	0xf9, 0x1b, 0x3a, 0x36, 0x5f, 0x7b, 0x4b, 0x1f, 0x7c, 0x76, 0xf1, 0x92,
+	0x03, 0xdc, 0x63, 0x3f, 0xeb, 0xd5, 0xde, 0xd2, 0x2f, 0xed, 0x3d, 0xc8,
+	0xef, 0xb9, 0x24, 0x1d, 0xb7, 0x7c, 0xd8, 0xa9, 0x1d, 0xe8, 0x03, 0x1e,
+	0x33, 0x4e, 0x32, 0x3e, 0xe4, 0x30, 0x4e, 0x91, 0xdd, 0x1c, 0xd1, 0x67,
+	0xc4, 0xa6, 0x19, 0x8b, 0xb8, 0xa5, 0x3d, 0xb2, 0x25, 0xb1, 0xdb, 0x49,
+	0x0e, 0xa8, 0x48, 0x72, 0xa0, 0xcf, 0x09, 0xcb, 0xf1, 0x0e, 0x6a, 0xc8,
+	0x19, 0xc0, 0x7a, 0xa0, 0xf4, 0x75, 0xd0, 0xd3, 0x79, 0x29, 0x1c, 0x6f,
+	0x90, 0xa9, 0x62, 0xbb, 0x97, 0x55, 0x31, 0xe1, 0xbe, 0x12, 0x75, 0x0a,
+	0x44, 0x7f, 0x26, 0x2a, 0x13, 0xa3, 0x1b, 0x45, 0x69, 0xdb, 0xbd, 0x45,
+	0xb2, 0x63, 0xa3, 0x72, 0xbe, 0x5b, 0x9a, 0x14, 0xda, 0xe7, 0xdd, 0xde,
+	0xea, 0x14, 0xd7, 0x11, 0x43, 0x5e, 0x58, 0x4f, 0x3e, 0x19, 0x05, 0x0e,
+	0x41, 0xb7, 0x8c, 0xeb, 0xd6, 0x09, 0xe5, 0xde, 0xed, 0x3a, 0x66, 0xca,
+	0x38, 0x6d, 0xf5, 0x7a, 0x03, 0xf9, 0x23, 0xba, 0x2c, 0x7f, 0x4c, 0x96,
+	0xb8, 0x36, 0x23, 0x79, 0x97, 0x71, 0x61, 0x1f, 0xbf, 0xc7, 0x59, 0xb6,
+	0x4e, 0x46, 0xba, 0xf3, 0x76, 0x8f, 0xc7, 0x37, 0xc1, 0x07, 0x1c, 0x9f,
+	0x5e, 0x27, 0x01, 0xaf, 0x2f, 0x5e, 0xcf, 0x88, 0x56, 0xc9, 0x03, 0x47,
+	0x66, 0x46, 0xc3, 0xf5, 0x0f, 0xb6, 0x87, 0xef, 0x71, 0x23, 0x6f, 0xb3,
+	0x4b, 0xea, 0x11, 0x2e, 0xae, 0x55, 0x2e, 0x94, 0xb1, 0x4a, 0x9f, 0x13,
+	0xf6, 0xb4, 0x7c, 0x3d, 0x5d, 0x31, 0xb2, 0x75, 0xbc, 0x12, 0xea, 0x96,
+	0xa8, 0xd1, 0xa5, 0x4b, 0xf4, 0x89, 0x89, 0x60, 0xce, 0xeb, 0x93, 0x4b,
+	0x3a, 0x46, 0xf7, 0x6b, 0x53, 0x2d, 0xe2, 0x1e, 0x93, 0xd9, 0x11, 0xff,
+	0x54, 0x2b, 0xf7, 0x69, 0x8c, 0xa4, 0xde, 0x8c, 0x7e, 0x8c, 0xb5, 0x50,
+	0x1f, 0x0e, 0xa9, 0xb5, 0x78, 0xaf, 0xd1, 0xf4, 0x07, 0x9e, 0xc2, 0xb7,
+	0xf1, 0x13, 0xbe, 0x0c, 0x3f, 0xe1, 0x09, 0xe8, 0xba, 0x73, 0xf0, 0x13,
+	0x1e, 0x87, 0x9f, 0xf0, 0x18, 0xfc, 0x84, 0x47, 0xa1, 0x27, 0xab, 0xfd,
+	0x83, 0xe1, 0x05, 0xfe, 0x41, 0xa0, 0xf9, 0x9f, 0x31, 0xc0, 0xc7, 0xab,
+	0x7c, 0x83, 0xbd, 0x46, 0x5f, 0xc1, 0xef, 0x37, 0x7c, 0xd4, 0xa6, 0x6e,
+	0xd6, 0xfa, 0xd1, 0xec, 0xd9, 0x1d, 0x98, 0xd3, 0x57, 0x6d, 0xca, 0xe8,
+	0xab, 0x89, 0x79, 0x7d, 0x65, 0xf8, 0xe8, 0xd8, 0xa8, 0x44, 0xfc, 0xd1,
+	0xe9, 0x6c, 0x6a, 0xbb, 0xe6, 0xa1, 0x26, 0x7f, 0xa3, 0x44, 0x1e, 0x50,
+	0xcd, 0x35, 0x92, 0xb5, 0xdf, 0xa0, 0xaf, 0xa3, 0x5f, 0x47, 0x5b, 0xef,
+	0x94, 0x9c, 0xb6, 0xcf, 0xae, 0x8c, 0xef, 0x47, 0x17, 0xe1, 0xbb, 0x50,
+	0x7a, 0x56, 0xe3, 0xfc, 0x7e, 0x7d, 0x26, 0xbf, 0x41, 0x86, 0xcb, 0x21,
+	0xce, 0x79, 0x06, 0x8e, 0xfb, 0x30, 0x5a, 0x25, 0x72, 0xac, 0x45, 0xfa,
+	0x53, 0xa2, 0x72, 0xa9, 0x95, 0x7a, 0xff, 0xca, 0xa9, 0x6e, 0x89, 0xe7,
+	0xba, 0x49, 0xab, 0xf7, 0xc9, 0x84, 0x9e, 0x8b, 0x16, 0xa9, 0x39, 0x46,
+	0x1b, 0x25, 0x5c, 0xc3, 0xbb, 0xbd, 0xc5, 0xde, 0x41, 0x1d, 0x35, 0xe5,
+	0x44, 0x0e, 0xea, 0xf9, 0x9a, 0xd5, 0x7b, 0x0c, 0x6f, 0x9e, 0x62, 0x2c,
+	0x9e, 0xf7, 0xfd, 0x31, 0x0e, 0xff, 0xaf, 0x99, 0xbf, 0x42, 0x8b, 0xb1,
+	0x67, 0xd6, 0x58, 0x3b, 0xc6, 0xc4, 0xa9, 0x96, 0xb7, 0x61, 0xd8, 0x4e,
+	0xf5, 0x1d, 0xb5, 0xab, 0xe0, 0x03, 0x37, 0xa0, 0x4d, 0xae, 0x63, 0xdb,
+	0xbf, 0x17, 0xe4, 0xfd, 0xb3, 0x73, 0xc0, 0x5f, 0x85, 0xb4, 0x06, 0xe4,
+	0x31, 0x66, 0xf3, 0x85, 0x16, 0xc6, 0x65, 0xb3, 0x7e, 0xa3, 0x4d, 0x5b,
+	0xe5, 0x8c, 0x8c, 0xb6, 0xc3, 0x37, 0xe7, 0x39, 0x76, 0xe6, 0xf7, 0x73,
+	0xee, 0x84, 0x7f, 0xab, 0x69, 0x12, 0xf2, 0x67, 0x8f, 0x5c, 0x6b, 0xe3,
+	0xce, 0xd4, 0xc3, 0xbf, 0xb8, 0x60, 0xbd, 0xf6, 0x10, 0xf4, 0xd8, 0xad,
+	0x90, 0x47, 0xd4, 0xc3, 0x87, 0xe4, 0x17, 0x2c, 0x3d, 0x2f, 0xd4, 0xc3,
+	0x17, 0x85, 0xb1, 0xe1, 0x2e, 0xe4, 0xe5, 0x83, 0x28, 0xe8, 0xe1, 0x70,
+	0x95, 0xaf, 0x46, 0xbf, 0xaf, 0x2e, 0x6d, 0xd6, 0xc0, 0x16, 0xfa, 0x7d,
+	0x90, 0x03, 0xb1, 0xd0, 0xcf, 0xab, 0x9d, 0x5b, 0xa3, 0xdd, 0x69, 0xeb,
+	0x8e, 0xa4, 0x5e, 0x22, 0x8e, 0x12, 0x87, 0xe4, 0xf6, 0xf5, 0xbc, 0x26,
+	0xcf, 0xf5, 0xbf, 0xa5, 0x71, 0x26, 0x8a, 0xb4, 0xb7, 0x46, 0xc3, 0x68,
+	0xe5, 0x7c, 0x22, 0xdc, 0xbf, 0x51, 0xb0, 0x75, 0xf7, 0xd8, 0xf5, 0xf8,
+	0x82, 0x7c, 0x9b, 0x71, 0xce, 0x44, 0x5f, 0x64, 0x25, 0xcf, 0x64, 0xa3,
+	0xee, 0xed, 0xda, 0x6f, 0xcf, 0x48, 0xd8, 0x16, 0xbf, 0x6b, 0xaa, 0xda,
+	0xa6, 0x1d, 0xc5, 0xf7, 0xe2, 0xfb, 0x1b, 0x9e, 0xd7, 0x6b, 0x8b, 0xe6,
+	0x6e, 0x9a, 0x90, 0x4f, 0xc8, 0x3b, 0x09, 0x7d, 0x8e, 0xc9, 0x3f, 0x46,
+	0xbb, 0x87, 0xeb, 0xae, 0xde, 0xf4, 0x70, 0xea, 0x23, 0xfa, 0x0e, 0xd5,
+	0x71, 0x11, 0xa7, 0x90, 0xda, 0xab, 0xf7, 0x9d, 0x14, 0x74, 0x7c, 0x39,
+	0x8f, 0xf7, 0xbc, 0x8f, 0xda, 0x76, 0x8c, 0x7f, 0x0b, 0x88, 0x69, 0x1f,
+	0x04, 0x6c, 0xd4, 0x21, 0x94, 0xbd, 0x31, 0x69, 0x3b, 0xfa, 0x7e, 0xcd,
+	0x0b, 0x6b, 0xe1, 0x0b, 0xf4, 0x1e, 0x85, 0xae, 0x3e, 0x1a, 0x97, 0xfe,
+	0xa3, 0x5a, 0x37, 0x66, 0x96, 0xc6, 0x0a, 0xb6, 0x78, 0x2e, 0xfd, 0x89,
+	0x98, 0x27, 0xd7, 0x1c, 0x8d, 0xc8, 0xe1, 0xd8, 0x16, 0xaf, 0xc3, 0xb9,
+	0xd1, 0xea, 0x42, 0x43, 0x7f, 0xa0, 0x15, 0xd4, 0x37, 0xeb, 0x90, 0xbd,
+	0xf3, 0xb1, 0x6b, 0xd4, 0x7f, 0x49, 0x46, 0xc8, 0x4b, 0x95, 0x88, 0x8c,
+	0x0f, 0xb6, 0x02, 0x9e, 0xb7, 0xae, 0x07, 0x0e, 0x40, 0x53, 0x98, 0x1f,
+	0xfd, 0xf7, 0x3c, 0xdc, 0x38, 0xe5, 0x57, 0x1b, 0xfa, 0xef, 0x3b, 0x4a,
+	0x1d, 0xe6, 0x6b, 0xbe, 0x46, 0xbf, 0x5e, 0x8d, 0xf6, 0x3d, 0xc8, 0x8b,
+	0x6f, 0x11, 0xff, 0x01, 0xc8, 0xb5, 0xa3, 0x51, 0xe9, 0x38, 0xda, 0x20,
+	0x9b, 0x8e, 0xd2, 0xf7, 0xa8, 0xf6, 0x45, 0x69, 0x8b, 0x5e, 0xc2, 0xb8,
+	0x6e, 0x34, 0xf7, 0x0d, 0x4e, 0x45, 0x65, 0x1f, 0xf9, 0x15, 0x65, 0x73,
+	0xb0, 0x93, 0xb3, 0x47, 0x3d, 0xbd, 0x16, 0x9a, 0xc5, 0x38, 0xf9, 0x37,
+	0x2c, 0xfa, 0x8e, 0x1a, 0x39, 0x53, 0xa0, 0x6f, 0x32, 0xd0, 0x02, 0xbc,
+	0x3e, 0x60, 0xf9, 0xe5, 0x3d, 0xeb, 0x2d, 0x5f, 0xfe, 0x9c, 0xfc, 0x96,
+	0x5b, 0x6f, 0xe4, 0xe5, 0x87, 0xd6, 0x73, 0x2f, 0xd2, 0x5a, 0x9f, 0xef,
+	0x3a, 0x6d, 0x43, 0x18, 0xb9, 0xf9, 0x7a, 0xfc, 0x27, 0xc0, 0x51, 0xb8,
+	0xfe, 0x44, 0x3e, 0xe4, 0x1a, 0xb2, 0x3e, 0xb3, 0x92, 0x9a, 0xd1, 0x7f,
+	0x53, 0x89, 0x6b, 0x61, 0xf3, 0xf7, 0x59, 0x6d, 0xaf, 0x30, 0x36, 0xfe,
+	0x4c, 0xf8, 0x37, 0x9c, 0xaa, 0xf6, 0x19, 0x56, 0xaf, 0x75, 0x31, 0xbe,
+	0x34, 0xb7, 0x17, 0x28, 0x18, 0xd5, 0x77, 0xc2, 0xc5, 0x9c, 0x8b, 0xc5,
+	0x5a, 0xe7, 0x9b, 0x63, 0x12, 0xb8, 0x7e, 0xdc, 0xf9, 0x96, 0xcf, 0xb5,
+	0x71, 0xcf, 0x79, 0xb9, 0xe8, 0x83, 0xf7, 0xfe, 0x02, 0xe3, 0x68, 0x75,
+	0x5e, 0xc1, 0x9c, 0x1e, 0x2c, 0x65, 0x92, 0x9e, 0x8d, 0x83, 0x3f, 0x5b,
+	0x6c, 0x75, 0x9e, 0x9b, 0x8f, 0x21, 0xf5, 0x84, 0x74, 0x71, 0x88, 0x79,
+	0x65, 0xe4, 0x95, 0x19, 0xeb, 0xad, 0x77, 0x26, 0xc7, 0xec, 0x7e, 0x12,
+	0xa3, 0x8b, 0xe6, 0xd6, 0x5f, 0x06, 0xf4, 0xfa, 0x84, 0xeb, 0x4c, 0x4e,
+	0x4d, 0xaf, 0x37, 0xfb, 0x8a, 0x6a, 0x91, 0x67, 0xf6, 0x58, 0x4e, 0x4c,
+	0xd5, 0xa2, 0x4c, 0xbd, 0x33, 0xa1, 0x63, 0x5e, 0xda, 0xf6, 0x70, 0xc6,
+	0xa7, 0xea, 0x9d, 0x29, 0xbd, 0xd6, 0x1c, 0x75, 0x4e, 0x8e, 0xb1, 0xed,
+	0x28, 0xca, 0x88, 0x73, 0x0a, 0xed, 0x4d, 0x8d, 0xb5, 0xc7, 0xf7, 0x49,
+	0x3b, 0x6c, 0x01, 0xfe, 0x8d, 0x34, 0xde, 0x17, 0xe0, 0x3a, 0x53, 0x73,
+	0xed, 0x2a, 0xb4, 0xc3, 0xb2, 0xa4, 0x41, 0xf6, 0xeb, 0xa2, 0xfd, 0xa5,
+	0x6b, 0x52, 0x4b, 0x71, 0x32, 0x06, 0x9c, 0x1c, 0xb4, 0x38, 0x39, 0x61,
+	0x71, 0x32, 0x5a, 0x85, 0x93, 0x87, 0x17, 0xe1, 0xe4, 0x04, 0x70, 0xf2,
+	0xf0, 0x15, 0x70, 0x82, 0xbc, 0xf2, 0xc3, 0x16, 0x27, 0xf7, 0x2d, 0xc2,
+	0x49, 0x7e, 0x2e, 0x16, 0x6f, 0x70, 0x32, 0x02, 0x9c, 0xd4, 0xb4, 0x1a,
+	0xd8, 0x0f, 0x5a, 0x9c, 0xe0, 0x3d, 0x75, 0x10, 0x65, 0xee, 0xab, 0xc2,
+	0xc9, 0x41, 0xe0, 0xe4, 0x3e, 0x8b, 0x93, 0xc3, 0x16, 0x27, 0x87, 0x51,
+	0x26, 0x0f, 0x9c, 0x14, 0x96, 0xc1, 0xc9, 0x08, 0x70, 0x12, 0xb6, 0x5b,
+	0x40, 0x3b, 0x87, 0xab, 0x70, 0x32, 0xb2, 0x0c, 0x4e, 0xb8, 0xe6, 0x1a,
+	0xee, 0xe1, 0xbe, 0xfc, 0x06, 0x7b, 0xb8, 0x53, 0x9f, 0x7d, 0xe3, 0x3d,
+	0xdc, 0x2c, 0x73, 0xb9, 0xea, 0xcc, 0xfb, 0xb3, 0x76, 0x4f, 0x9a, 0xd9,
+	0xfb, 0x37, 0x7f, 0x0f, 0x5e, 0x3b, 0xf8, 0xbc, 0x90, 0xf7, 0xc4, 0xec,
+	0x21, 0x75, 0xb7, 0x4d, 0x81, 0xd7, 0x8e, 0xca, 0x81, 0xe3, 0xb5, 0x87,
+	0x73, 0x36, 0xcd, 0xdf, 0xd6, 0x9e, 0x57, 0x8a, 0x79, 0xe1, 0xde, 0x83,
+	0x17, 0xcd, 0x5d, 0x50, 0x31, 0x9e, 0xc7, 0xa8, 0x5e, 0x7b, 0x7e, 0xd1,
+	0xde, 0x55, 0xe4, 0xdd, 0x9b, 0xf5, 0xa7, 0x13, 0xdc, 0x57, 0x55, 0xd0,
+	0xf0, 0x72, 0x2d, 0xad, 0x47, 0xef, 0xa5, 0xca, 0x16, 0x69, 0x67, 0x27,
+	0xb8, 0x27, 0x0d, 0xf6, 0x31, 0xf7, 0xed, 0x9a, 0x7d, 0xba, 0xbd, 0x0b,
+	0xf6, 0xe9, 0x56, 0x9f, 0xef, 0x26, 0xdf, 0xcd, 0xd3, 0xcd, 0xc1, 0xb9,
+	0xbb, 0x57, 0x8f, 0x3b, 0xcf, 0xe8, 0xf8, 0x70, 0x3d, 0xe6, 0x27, 0x08,
+	0x4e, 0xa7, 0x4c, 0x5c, 0x76, 0x46, 0xc7, 0x65, 0x05, 0x1e, 0xf8, 0xb0,
+	0x8d, 0xcd, 0x76, 0xf4, 0x5c, 0x9e, 0x8b, 0xcb, 0x2e, 0xd8, 0xa3, 0xa3,
+	0xef, 0xff, 0xc8, 0x8e, 0x5e, 0xd2, 0x7b, 0x71, 0xfa, 0x52, 0x8e, 0x14,
+	0x20, 0x23, 0xf6, 0x8c, 0xbf, 0x2a, 0xc3, 0x0f, 0xf2, 0x9b, 0x3a, 0x2d,
+	0x02, 0xbd, 0x45, 0xb9, 0x9d, 0x97, 0x6c, 0x0f, 0xd3, 0x4c, 0x9d, 0x3e,
+	0xed, 0x23, 0x1f, 0x77, 0x7a, 0xe7, 0xfa, 0x27, 0x7e, 0xc3, 0x35, 0x70,
+	0xfe, 0xa6, 0x9d, 0x93, 0x71, 0xb2, 0x15, 0xe6, 0x87, 0x6b, 0xe1, 0x77,
+	0xdb, 0xfb, 0x08, 0x99, 0x5f, 0x7d, 0xff, 0xb5, 0xe1, 0xd3, 0xac, 0xfe,
+	0x3b, 0x22, 0x23, 0x4e, 0x1f, 0xea, 0x4c, 0x7b, 0x0d, 0x03, 0x2a, 0x7d,
+	0xd3, 0x00, 0xcf, 0xca, 0x4d, 0x2c, 0xf9, 0xfb, 0x01, 0xf3, 0xba, 0xb0,
+	0xa0, 0xe7, 0x94, 0xfb, 0xb0, 0xa6, 0x41, 0x8b, 0x9a, 0xb6, 0x34, 0xfd,
+	0x1f, 0x98, 0xd3, 0x91, 0xd4, 0xad, 0xd4, 0x93, 0xa1, 0x8e, 0x4c, 0xc6,
+	0xfb, 0x78, 0x7f, 0x84, 0xa6, 0x71, 0x7b, 0x97, 0xc4, 0xd4, 0x39, 0xad,
+	0xdf, 0x47, 0x52, 0xbc, 0x5f, 0x66, 0x99, 0xb2, 0xa3, 0x55, 0x65, 0xf5,
+	0xb8, 0x3d, 0xf9, 0x43, 0xcc, 0xcd, 0x17, 0x61, 0x6f, 0xf6, 0x8e, 0xbd,
+	0x0a, 0x9f, 0x31, 0x2e, 0x5f, 0x2a, 0xbd, 0x04, 0x7a, 0xcd, 0xaf, 0xb5,
+	0x77, 0xe1, 0x65, 0x01, 0x37, 0xcf, 0x38, 0xeb, 0xfd, 0xc3, 0x91, 0x3f,
+	0x02, 0x5d, 0xfc, 0xc1, 0x4b, 0xec, 0x03, 0xb0, 0x44, 0x60, 0xcf, 0xc3,
+	0x36, 0x18, 0x7f, 0x49, 0xef, 0x95, 0xbb, 0xbe, 0xfc, 0x92, 0x8e, 0x53,
+	0xf4, 0x97, 0x5b, 0x65, 0x7b, 0xb9, 0x41, 0x76, 0x40, 0x2f, 0xec, 0x28,
+	0xfb, 0x78, 0xa2, 0x72, 0x63, 0xd9, 0xcc, 0xd3, 0x47, 0xca, 0x9c, 0xef,
+	0x6d, 0x32, 0x71, 0xbc, 0x9a, 0x66, 0xa7, 0xed, 0xde, 0x31, 0xd2, 0x0f,
+	0x9e, 0x52, 0x32, 0x3f, 0xad, 0xc7, 0xce, 0x5d, 0xac, 0xc9, 0xc3, 0xb3,
+	0xc2, 0xbd, 0xf8, 0xfc, 0x1b, 0x74, 0xdf, 0x68, 0xe5, 0x19, 0x77, 0xde,
+	0x8f, 0xd8, 0x5f, 0x09, 0xf7, 0x86, 0xbf, 0xfe, 0x19, 0x10, 0xfd, 0x77,
+	0x5d, 0xf4, 0xde, 0x70, 0x4d, 0x7b, 0xd2, 0x76, 0x26, 0xa6, 0x75, 0x84,
+	0xa1, 0xf1, 0xf9, 0xbf, 0xe7, 0x22, 0xf2, 0x7f, 0x01, 0x95, 0xf6, 0x2d,
+	0x58, 0xd0, 0x73, 0x00, 0x00, 0x00 };
+static u32 bnx2_CP_b09FwData[(0x50/4) + 1] = {
+	0x00010030, 0x00000030, 0x00000000, 0x00000001, 0x00010fd0, 0x00000fd0,
+	0x00001430, 0x0000007f, 0x00030400, 0x00001000, 0x00000030, 0x00000020,
+	0x00050200, 0x00001000, 0x00000030, 0x00000010, 0x00010400, 0x00000400,
+	0x00001030, 0x00000020, 0x00000000 };
+static u32 bnx2_CP_b09FwRodata[(0x118/4) + 1] = {
+	0x080005d8, 0x080007f8, 0x0800073c, 0x08000764, 0x0800078c, 0x080007b4,
+	0x08000610, 0x080005fc, 0x08000820, 0x08000820, 0x0800062c, 0x08000648,
+	0x08000648, 0x08000820, 0x08000660, 0x08000674, 0x08000820, 0x08000688,
+	0x08000820, 0x08000820, 0x0800069c, 0x08000820, 0x08000820, 0x08000820,
+	0x08000820, 0x08000820, 0x08000820, 0x08000820, 0x08000820, 0x08000820,
+	0x08000820, 0x080006b0, 0x08000820, 0x080006c4, 0x080006d8, 0x080006ec,
+	0x08000820, 0x08000700, 0x08000714, 0x08000728, 0x08003740, 0x08003758,
+	0x08003768, 0x08003778, 0x08003790, 0x080037a8, 0x080037b8, 0x080037c8,
+	0x080037e8, 0x080037f8, 0x08003808, 0x08003898, 0x080037d8, 0x08003818,
+	0x08003828, 0x08003840, 0x08003860, 0x08003898, 0x08003878, 0x08003878,
+	0x080055f0, 0x080055f0, 0x080055f0, 0x080055f0, 0x080055f0, 0x08005618,
+	0x08005618, 0x08005640, 0x08005690, 0x08005660, 0x00000000 };
+static u32 bnx2_CP_b09FwBss[(0x870/4) + 1] = { 0x0 };
+static u32 bnx2_CP_b09FwSbss[(0xe9/4) + 1] = { 0x0 };
+
+static struct fw_info bnx2_cp_fw_09 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x0800006c,
+
+	.text_addr			= 0x08000000,
+	.text_len			= 0x73cc,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_CP_b09FwText,
+	.gz_text_len			= sizeof(bnx2_CP_b09FwText),
+
+	.data_addr			= 0x08007500,
+	.data_len			= 0x50,
+	.data_index			= 0x0,
+	.data				= bnx2_CP_b09FwData,
+
+	.sbss_addr			= 0x08007554,
+	.sbss_len			= 0xe9,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_CP_b09FwSbss,
+
+	.bss_addr			= 0x08007640,
+	.bss_len			= 0x870,
+	.bss_index			= 0x0,
+	.bss				= bnx2_CP_b09FwBss,
+
+	.rodata_addr			= 0x080073d0,
+	.rodata_len			= 0x118,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_CP_b09FwRodata,
+};
+
+static u8 bnx2_RXP_b09FwText[] = {
+	0x1f, 0x8b, 0x08, 0x08, 0x19, 0xfd, 0x2f, 0x45, 0x00, 0x03, 0x74, 0x65,
+	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xec, 0x5c, 0x6b, 0x6c,
+	0x1c, 0xd7, 0x75, 0x3e, 0xf3, 0x20, 0xb5, 0xa2, 0xf8, 0x18, 0x2e, 0x57,
+	0xcc, 0x4a, 0x66, 0xec, 0x5d, 0x71, 0x24, 0xb2, 0x16, 0x6b, 0x8c, 0xd8,
+	0xad, 0x4d, 0x04, 0x6b, 0x7b, 0x33, 0xbb, 0x92, 0x98, 0x54, 0x85, 0x29,
+	0x87, 0x75, 0x0c, 0xc3, 0x75, 0xd9, 0xa5, 0x1a, 0xbb, 0xae, 0x51, 0xc8,
+	0x8f, 0xc4, 0x06, 0x6a, 0xd6, 0x9b, 0x25, 0xdd, 0xa8, 0xe9, 0x82, 0x43,
+	0x4b, 0xaa, 0xe9, 0x02, 0x69, 0xbb, 0x20, 0xa9, 0xc7, 0x8f, 0x85, 0x56,
+	0x76, 0x52, 0xc7, 0xf9, 0xe1, 0x48, 0x50, 0x95, 0x20, 0x28, 0x0c, 0x43,
+	0x48, 0x8d, 0xd6, 0x3f, 0xda, 0x40, 0x95, 0x9f, 0x68, 0x92, 0x42, 0x41,
+	0x0b, 0xc7, 0x68, 0x6c, 0x4f, 0xbf, 0xef, 0xce, 0x0c, 0xb9, 0xa4, 0x5f,
+	0x40, 0x7f, 0xf4, 0x4f, 0xe7, 0x02, 0x8b, 0xb9, 0xf7, 0xce, 0x3d, 0xe7,
+	0x9e, 0x7b, 0xde, 0xe7, 0x0e, 0xa5, 0xdf, 0xef, 0x94, 0x0e, 0x09, 0x5b,
+	0x17, 0x7e, 0x99, 0xc3, 0x8f, 0x3d, 0x74, 0xc3, 0xd8, 0x0d, 0xa3, 0x22,
+	0x7b, 0xf6, 0x18, 0x5b, 0x12, 0x7a, 0x34, 0x1f, 0xb7, 0xb8, 0xc5, 0x2d,
+	0x6e, 0x71, 0x8b, 0x5b, 0xdc, 0xe2, 0x16, 0xb7, 0xb8, 0xc5, 0x2d, 0x6e,
+	0x71, 0x8b, 0x5b, 0xdc, 0xe2, 0x16, 0xb7, 0xb8, 0xc5, 0x2d, 0x6e, 0x71,
+	0x8b, 0x5b, 0xdc, 0xe2, 0x16, 0xb7, 0xb8, 0xc5, 0x2d, 0x6e, 0x71, 0x8b,
+	0x5b, 0xdc, 0xe2, 0x16, 0xb7, 0xb8, 0xc5, 0x2d, 0x6e, 0x71, 0x8b, 0x5b,
+	0xdc, 0xe2, 0x16, 0xb7, 0xb8, 0xc5, 0x2d, 0x6e, 0x71, 0x8b, 0x5b, 0xdc,
+	0xe2, 0xf6, 0xff, 0xbd, 0x19, 0x22, 0x16, 0x9f, 0x5d, 0xe1, 0x4f, 0x12,
+	0x7a, 0xfe, 0xf2, 0x1f, 0xba, 0xb6, 0x24, 0x8c, 0xfc, 0xcf, 0x66, 0xa6,
+	0x6d, 0x91, 0x42, 0x63, 0x77, 0xa6, 0x28, 0xef, 0xfb, 0x95, 0x94, 0x29,
+	0x9c, 0xff, 0x6c, 0xfe, 0xbd, 0xbf, 0x7d, 0xf1, 0xa6, 0xec, 0xd5, 0xba,
+	0x21, 0x09, 0x2b, 0x3f, 0xb7, 0xc7, 0xda, 0x25, 0x89, 0x01, 0xc0, 0x7c,
+	0x6b, 0xe8, 0xc7, 0xdd, 0xd2, 0x2d, 0x6b, 0x78, 0xec, 0x84, 0x5c, 0x36,
+	0x5e, 0xd0, 0xdc, 0xa6, 0xef, 0x9f, 0x70, 0x7c, 0xff, 0x87, 0xf8, 0xbd,
+	0xe5, 0x60, 0xec, 0x7d, 0xe0, 0x17, 0x4c, 0x43, 0x74, 0xfb, 0x2f, 0x34,
+	0x77, 0xb9, 0x43, 0xaa, 0x8b, 0xa6, 0xcc, 0x7a, 0x29, 0x39, 0xe2, 0x55,
+	0xb4, 0x52, 0xb3, 0xa6, 0xed, 0x3d, 0x35, 0xaf, 0xed, 0x3b, 0x75, 0x44,
+	0xdb, 0x7f, 0x6a, 0x41, 0x73, 0x4f, 0x49, 0x45, 0xdf, 0xd3, 0x29, 0x05,
+	0xeb, 0xb4, 0x56, 0x6c, 0xf6, 0x6b, 0xee, 0xe2, 0x7b, 0xbe, 0xeb, 0x64,
+	0xad, 0xbb, 0xc4, 0x2c, 0x80, 0x16, 0x71, 0x6b, 0x3e, 0xc6, 0xa6, 0x14,
+	0x52, 0xbe, 0xaf, 0xe7, 0xfd, 0x27, 0xdc, 0x9c, 0x6d, 0xe9, 0x5a, 0x4a,
+	0xaa, 0xcd, 0x7e, 0xe0, 0xed, 0xd4, 0x8a, 0x8b, 0xa6, 0x56, 0xf2, 0xfc,
+	0x73, 0xae, 0x23, 0x03, 0x86, 0xf8, 0xfe, 0x9c, 0xb3, 0x33, 0x7d, 0x48,
+	0x4e, 0x02, 0x6f, 0x03, 0xf8, 0xc4, 0xd2, 0xf3, 0xa4, 0x8f, 0x74, 0x92,
+	0xe4, 0x8a, 0x56, 0x1c, 0x8a, 0xe8, 0x93, 0x0c, 0xe9, 0x2f, 0xaf, 0xe8,
+	0xa0, 0x73, 0x8b, 0x94, 0xeb, 0x96, 0x4c, 0xad, 0x6c, 0x5c, 0x7f, 0xd9,
+	0x7f, 0x71, 0x28, 0x25, 0xcf, 0x36, 0xb3, 0x47, 0x2a, 0x92, 0x90, 0x39,
+	0x2f, 0x23, 0x7a, 0x5e, 0x0a, 0x6e, 0x6e, 0x40, 0xce, 0x35, 0xd3, 0xf2,
+	0x5c, 0xd3, 0x4e, 0x57, 0x65, 0x93, 0x94, 0x53, 0x96, 0x9c, 0x6d, 0xa6,
+	0x70, 0x46, 0xff, 0x9c, 0x6e, 0xdb, 0x56, 0x15, 0x6b, 0xab, 0xcd, 0x97,
+	0xf8, 0xef, 0x5f, 0xac, 0xe9, 0x9c, 0x82, 0xa9, 0x80, 0xee, 0x70, 0x2d,
+	0xcf, 0xa1, 0xd6, 0xaa, 0xb3, 0x04, 0x6b, 0xa5, 0x32, 0x9d, 0xc3, 0x5c,
+	0x73, 0x34, 0xe4, 0xef, 0x16, 0x9c, 0x97, 0x4f, 0x4b, 0xaa, 0x9e, 0x01,
+	0xde, 0xb0, 0xff, 0xcf, 0xc0, 0x37, 0x80, 0xf3, 0x5e, 0xab, 0x7e, 0xee,
+	0xa2, 0xa4, 0x74, 0xd9, 0x99, 0x2e, 0x0b, 0x78, 0xdb, 0xec, 0xc4, 0x98,
+	0xf4, 0xf9, 0xfe, 0x7e, 0x47, 0xac, 0xaa, 0xd3, 0x03, 0x98, 0x8c, 0x54,
+	0x9d, 0x6e, 0xe0, 0x69, 0x13, 0xcb, 0xe6, 0xb9, 0xb8, 0xd7, 0x66, 0xcc,
+	0xfb, 0x5d, 0x46, 0xde, 0xf7, 0xa7, 0x73, 0xd2, 0x1d, 0xcc, 0xed, 0x56,
+	0x38, 0xa6, 0x26, 0x34, 0xac, 0xfb, 0x05, 0x69, 0x4e, 0x24, 0xf3, 0xec,
+	0xf3, 0x99, 0x13, 0x77, 0xfe, 0xda, 0x90, 0x96, 0x34, 0x68, 0xb9, 0x26,
+	0xec, 0x83, 0xff, 0x1e, 0xf8, 0xe0, 0x7c, 0x06, 0x63, 0xed, 0x3a, 0xe0,
+	0x19, 0xae, 0x0a, 0xf7, 0xe8, 0x93, 0xa5, 0x94, 0xe8, 0x57, 0x9c, 0xde,
+	0x70, 0x5d, 0x37, 0x68, 0x8d, 0xf4, 0xa0, 0x5f, 0xe6, 0x16, 0xc9, 0xeb,
+	0x1a, 0x64, 0x83, 0xe7, 0x8d, 0x15, 0xad, 0xd0, 0x3c, 0x82, 0xbe, 0x29,
+	0xd3, 0xb6, 0x7f, 0x6e, 0xce, 0x99, 0xd7, 0x8a, 0xa7, 0x4e, 0x6a, 0xa5,
+	0x53, 0x2f, 0x68, 0x7b, 0x9b, 0x2f, 0x74, 0x49, 0x47, 0x16, 0xa7, 0x4e,
+	0xc8, 0x93, 0x9e, 0x26, 0xa4, 0x73, 0x09, 0xbc, 0x2b, 0x58, 0x15, 0x31,
+	0xed, 0x6e, 0x6d, 0x1f, 0xf0, 0xb4, 0xd9, 0x7f, 0xd2, 0x29, 0xdd, 0x86,
+	0x6c, 0xb2, 0xa3, 0xb5, 0x29, 0xf9, 0x33, 0xd0, 0x74, 0xc1, 0x49, 0x91,
+	0x5f, 0x3d, 0x01, 0x4c, 0x44, 0x07, 0xf5, 0x8a, 0x3a, 0xa5, 0x17, 0x4a,
+	0xc7, 0xff, 0xbc, 0xaf, 0x3a, 0xb2, 0x85, 0x6b, 0xa0, 0xff, 0xd6, 0xfd,
+	0xd3, 0xb6, 0xdb, 0x6b, 0x4a, 0xc5, 0xd2, 0x25, 0x6b, 0x15, 0xe5, 0x3a,
+	0x99, 0x73, 0x44, 0x8a, 0x35, 0xec, 0x69, 0x9b, 0xe0, 0x8d, 0x0d, 0xde,
+	0xec, 0x3c, 0x32, 0xa8, 0xff, 0x96, 0x64, 0xfa, 0x2b, 0x9a, 0x19, 0xf2,
+	0x71, 0x49, 0x6e, 0x51, 0xf0, 0x7a, 0xde, 0x81, 0x7e, 0x76, 0xb0, 0x8f,
+	0x7d, 0x13, 0x6a, 0x5f, 0x23, 0x6f, 0xa7, 0x97, 0x45, 0x34, 0x3d, 0xbf,
+	0x1b, 0xf8, 0xa8, 0xb7, 0x5c, 0xf7, 0x04, 0x68, 0x24, 0xed, 0xec, 0xdb,
+	0x80, 0x49, 0x88, 0xeb, 0x74, 0xb5, 0xd0, 0x49, 0x79, 0x93, 0xd7, 0xe4,
+	0x9d, 0x3a, 0xa7, 0xb6, 0x76, 0xce, 0x5f, 0xf9, 0x9b, 0x46, 0x4d, 0xf9,
+	0xa1, 0x3a, 0x2f, 0x6d, 0x8c, 0xeb, 0x52, 0xa1, 0x4e, 0x24, 0xa0, 0x47,
+	0xa2, 0x95, 0x1d, 0x6b, 0x15, 0x57, 0x59, 0x44, 0x37, 0xf2, 0x9d, 0x52,
+	0x54, 0xf4, 0x8d, 0x61, 0x2f, 0xda, 0x1e, 0x6c, 0xc8, 0xe6, 0x59, 0x38,
+	0x97, 0x87, 0x8d, 0x67, 0xd9, 0x97, 0xf2, 0x02, 0xed, 0x9d, 0xb4, 0x9d,
+	0xcf, 0xaa, 0x7f, 0x8e, 0x45, 0x5d, 0x3c, 0xd6, 0x03, 0xda, 0x38, 0x86,
+	0x1d, 0xda, 0x78, 0x3f, 0xa2, 0x89, 0x3b, 0x36, 0x08, 0xfe, 0x70, 0x9d,
+	0x9d, 0x81, 0x9c, 0x0b, 0x2e, 0xf6, 0x74, 0x9d, 0xdf, 0x50, 0x3c, 0xe8,
+	0xc5, 0x79, 0x06, 0xe7, 0xc9, 0xaf, 0x0e, 0xe8, 0xb6, 0x26, 0x65, 0x27,
+	0x9b, 0xa1, 0xdc, 0x03, 0xda, 0x75, 0xd9, 0x74, 0x63, 0x2b, 0xed, 0x91,
+	0xac, 0xa8, 0x87, 0xa6, 0x24, 0x47, 0xb9, 0x96, 0xeb, 0xb8, 0x3e, 0x3b,
+	0x26, 0xfa, 0xaf, 0x7c, 0x6b, 0xdd, 0x59, 0x6d, 0xd9, 0x31, 0x0f, 0x1a,
+	0x02, 0xde, 0x82, 0x27, 0x9f, 0xb6, 0x96, 0x7c, 0xdd, 0xc8, 0x3f, 0xae,
+	0x6d, 0x5d, 0x07, 0x9d, 0xe8, 0x27, 0x0d, 0x27, 0x3a, 0x03, 0x5b, 0x8b,
+	0x68, 0x8a, 0x64, 0xa3, 0x85, 0x38, 0x3e, 0xe9, 0x1c, 0x5c, 0x0f, 0x1f,
+	0xe0, 0xc1, 0x07, 0xc0, 0xaf, 0x3d, 0xeb, 0xc1, 0xfe, 0x3d, 0xfa, 0x8c,
+	0x8c, 0xbc, 0x38, 0x04, 0x1f, 0xb7, 0xe6, 0x63, 0xd0, 0xc6, 0xd1, 0xd7,
+	0xc5, 0x80, 0x8f, 0x99, 0xad, 0xeb, 0xb0, 0x59, 0xf0, 0x78, 0x85, 0x73,
+	0xb0, 0xed, 0x95, 0x12, 0x9e, 0xb6, 0x54, 0x1b, 0xd4, 0xab, 0xc8, 0x97,
+	0xd2, 0xe7, 0xa4, 0xe1, 0x5f, 0xe8, 0x77, 0xe8, 0x57, 0xb8, 0xd6, 0xf7,
+	0x4b, 0x0e, 0x61, 0x7d, 0x99, 0x70, 0x68, 0x43, 0x9d, 0xa2, 0x27, 0x2b,
+	0xda, 0xc1, 0x21, 0xd8, 0xd6, 0xf5, 0x6d, 0xa0, 0x95, 0x36, 0x76, 0x8d,
+	0x48, 0x3b, 0xf7, 0xfb, 0x69, 0x57, 0xf0, 0xef, 0xee, 0x36, 0x61, 0x0d,
+	0x9f, 0xef, 0x86, 0x63, 0x2d, 0xf4, 0x2d, 0x7c, 0x9f, 0xcd, 0x14, 0xa4,
+	0x3f, 0x1c, 0xb3, 0xbf, 0x4a, 0xaf, 0xa3, 0xdf, 0x98, 0x90, 0x1d, 0x27,
+	0x03, 0x9f, 0xb8, 0x63, 0xc9, 0x12, 0xfb, 0x64, 0x40, 0xe3, 0x8e, 0x33,
+	0x91, 0x6f, 0x7c, 0x1f, 0xf0, 0xa0, 0xcf, 0x5b, 0x8d, 0x03, 0x68, 0x3f,
+	0xd3, 0x60, 0x2a, 0x98, 0xdb, 0xc8, 0x0b, 0xfa, 0x63, 0xda, 0x9b, 0xd5,
+	0x6a, 0x6f, 0x7b, 0x60, 0x6f, 0x4e, 0xbb, 0x64, 0x9d, 0xbf, 0x87, 0xbd,
+	0x7d, 0xc3, 0xd1, 0xc0, 0x1b, 0x91, 0x8b, 0xb5, 0x34, 0x6c, 0xdd, 0x4c,
+	0xbf, 0x26, 0x3b, 0x33, 0xb3, 0xa2, 0xc9, 0x09, 0xce, 0x35, 0x30, 0xa7,
+	0x7c, 0x71, 0xe0, 0x0b, 0x2e, 0x1b, 0x4f, 0x81, 0x2e, 0xdf, 0x9f, 0x05,
+	0xce, 0xf2, 0x88, 0x11, 0xda, 0x56, 0x34, 0x6f, 0xdd, 0xef, 0xda, 0xee,
+	0xaf, 0x19, 0x52, 0x19, 0x6e, 0x93, 0xec, 0xf0, 0x12, 0x70, 0x4f, 0x3b,
+	0x81, 0x1d, 0x53, 0xd7, 0x97, 0x81, 0x7f, 0xce, 0x1b, 0x82, 0x1e, 0xd3,
+	0x0e, 0x40, 0x17, 0xf0, 0x2f, 0x03, 0xff, 0x5c, 0xb3, 0x4d, 0xbe, 0x6e,
+	0x46, 0xb1, 0x32, 0x3a, 0x4f, 0x37, 0x96, 0x45, 0xfb, 0x1e, 0x96, 0x2f,
+	0x7a, 0x49, 0xcd, 0x3d, 0x46, 0xff, 0x5a, 0x1d, 0x86, 0x9d, 0x68, 0x55,
+	0x87, 0x7b, 0x1b, 0xb2, 0xbc, 0xba, 0x46, 0x0a, 0xd5, 0xc0, 0x06, 0x0b,
+	0xee, 0x50, 0x25, 0x6d, 0x28, 0x5f, 0x22, 0xb2, 0x0f, 0xb6, 0xb7, 0x6c,
+	0x73, 0xcc, 0xf9, 0x60, 0x6e, 0xbc, 0xd6, 0x0f, 0x9f, 0xc8, 0xf1, 0x7b,
+	0xfe, 0xb4, 0x13, 0xcc, 0x7d, 0xa1, 0x76, 0x57, 0x37, 0xfd, 0x2e, 0xe2,
+	0x44, 0xa6, 0xea, 0xfc, 0xbb, 0x0f, 0xfd, 0x5d, 0x07, 0xf3, 0xd1, 0x78,
+	0xb2, 0xe3, 0x81, 0xce, 0x8a, 0xb6, 0xdf, 0xd6, 0xfb, 0xdb, 0x43, 0x1f,
+	0xb6, 0x1f, 0x93, 0x7b, 0x6b, 0xd5, 0xbe, 0x76, 0x79, 0xcf, 0x60, 0x1c,
+	0xbd, 0x22, 0x62, 0xba, 0xb5, 0x5d, 0xe0, 0x47, 0xb5, 0xb7, 0x65, 0x2e,
+	0x51, 0xaa, 0xf9, 0x72, 0xc1, 0x09, 0x60, 0x30, 0xee, 0x2c, 0xd6, 0xf4,
+	0xfe, 0x84, 0xac, 0x8e, 0x2d, 0xc2, 0xac, 0xc8, 0xae, 0xe1, 0x65, 0x51,
+	0xb0, 0x7d, 0x89, 0x35, 0xd8, 0x54, 0xa9, 0x56, 0xed, 0x6d, 0x19, 0xa7,
+	0x8b, 0xc0, 0xa5, 0xef, 0x59, 0x85, 0x1d, 0x58, 0x83, 0xdd, 0x2a, 0x99,
+	0x5e, 0xc2, 0xeb, 0xfd, 0x9b, 0xd7, 0x70, 0x67, 0x42, 0x7a, 0xfa, 0x36,
+	0xaf, 0xe1, 0xb0, 0x89, 0xb3, 0x65, 0x3c, 0x4c, 0x9c, 0x3b, 0xd6, 0x70,
+	0x8e, 0xac, 0xa7, 0xe7, 0xb0, 0xc0, 0x07, 0x25, 0xda, 0xf3, 0xb2, 0xe7,
+	0x62, 0x6d, 0x70, 0xe2, 0x8b, 0x82, 0x58, 0x37, 0xb2, 0x29, 0xf4, 0xc9,
+	0xe6, 0x1e, 0x17, 0xbc, 0x32, 0x85, 0x3e, 0x4e, 0x93, 0x2a, 0xe4, 0x7c,
+	0x7f, 0x43, 0xf6, 0x5c, 0x68, 0x98, 0xa1, 0x2e, 0x51, 0x27, 0xde, 0x86,
+	0x8d, 0x75, 0x4e, 0x99, 0x88, 0xc3, 0xe7, 0x94, 0x8d, 0xc9, 0x44, 0xb5,
+	0x26, 0x95, 0xed, 0xf9, 0x27, 0x7c, 0xe8, 0xe2, 0x94, 0x05, 0x3f, 0x5a,
+	0x94, 0xce, 0x31, 0x37, 0x87, 0xf9, 0x06, 0x6d, 0x0b, 0x7e, 0x05, 0xb0,
+	0xd0, 0xb5, 0x84, 0x31, 0xbf, 0xf3, 0x55, 0xd7, 0xe0, 0x3e, 0x19, 0xe4,
+	0x4d, 0x89, 0x84, 0x3e, 0x7f, 0xd5, 0xa7, 0x9e, 0x4d, 0x8f, 0x5c, 0x45,
+	0x9e, 0x62, 0xc1, 0x57, 0xc2, 0x7f, 0x40, 0xdf, 0x67, 0x9b, 0x82, 0xb8,
+	0xfe, 0x40, 0x4f, 0x60, 0x63, 0x47, 0xb7, 0x06, 0x4f, 0x31, 0xe9, 0x9b,
+	0xa7, 0x73, 0xcc, 0x01, 0xda, 0x13, 0x6e, 0x6e, 0x7c, 0x9b, 0x71, 0xe6,
+	0xc0, 0x36, 0xfd, 0x4c, 0x65, 0x9b, 0x0e, 0x9f, 0x0e, 0x9b, 0xd2, 0xdd,
+	0x1c, 0xfa, 0x67, 0x22, 0x1b, 0x4a, 0xc3, 0x86, 0xde, 0x56, 0x39, 0xc8,
+	0xb9, 0xe6, 0x29, 0xd8, 0xab, 0xa2, 0x55, 0x26, 0x90, 0x13, 0xe8, 0xa3,
+	0xef, 0x43, 0x4f, 0x70, 0x16, 0xf8, 0xc0, 0x02, 0xb8, 0xa4, 0x8f, 0xbe,
+	0x11, 0xda, 0x33, 0xfb, 0xef, 0xf8, 0x41, 0x7c, 0xf8, 0x7c, 0xb8, 0xff,
+	0x3f, 0x75, 0x07, 0x3e, 0x20, 0xc2, 0x45, 0x3c, 0xc3, 0xda, 0x04, 0xf2,
+	0x99, 0x89, 0xa6, 0xa9, 0xd1, 0x9f, 0x17, 0x3d, 0xe6, 0x21, 0xcc, 0x41,
+	0x1e, 0x0b, 0xfd, 0x22, 0x73, 0x8f, 0xce, 0x90, 0xa7, 0xb9, 0x28, 0xce,
+	0x29, 0x7b, 0x43, 0xcc, 0xc9, 0x94, 0x9d, 0x36, 0xe5, 0x93, 0xa7, 0x73,
+	0x9d, 0x58, 0x87, 0xb9, 0x26, 0xce, 0x0d, 0xbf, 0x84, 0x5c, 0x06, 0x6b,
+	0xce, 0x63, 0x7d, 0x7b, 0x68, 0xf3, 0x17, 0xa5, 0x0c, 0x9f, 0x6a, 0xda,
+	0x7c, 0x9f, 0xeb, 0x91, 0x0e, 0x8c, 0x1b, 0xd8, 0x0b, 0x7e, 0xc2, 0x50,
+	0x7c, 0x46, 0x2c, 0x48, 0x5d, 0xc7, 0x1c, 0x09, 0x6b, 0x33, 0x58, 0x4b,
+	0xbf, 0xcb, 0xb5, 0xcf, 0x82, 0x0e, 0x8c, 0x1b, 0x84, 0xa1, 0x8f, 0x12,
+	0xdf, 0xcd, 0x6d, 0x86, 0x26, 0xf9, 0xe7, 0x0c, 0x3b, 0x5a, 0x1b, 0xe1,
+	0xdd, 0xb8, 0x96, 0xf9, 0x09, 0x71, 0xf7, 0x84, 0xf1, 0x7f, 0x5c, 0x0a,
+	0xcd, 0x02, 0x7e, 0x22, 0xd3, 0xc7, 0x90, 0x8f, 0xd9, 0x6d, 0x88, 0x55,
+	0x9c, 0xdf, 0x6a, 0x05, 0x67, 0x8d, 0xe0, 0xee, 0xef, 0x5b, 0x3f, 0xfe,
+	0x42, 0x72, 0xcd, 0x47, 0xd2, 0xc2, 0xa4, 0x80, 0x18, 0x01, 0x5e, 0x65,
+	0xa6, 0x98, 0xc3, 0x15, 0x1b, 0x4a, 0xa6, 0x98, 0x1b, 0x83, 0x5f, 0x0c,
+	0xf2, 0xa2, 0x73, 0xde, 0x46, 0xd9, 0x59, 0xe0, 0x77, 0x01, 0x3c, 0xce,
+	0x40, 0x87, 0xc6, 0x01, 0x2b, 0x87, 0x81, 0x83, 0xf1, 0xd7, 0xd1, 0xf3,
+	0x49, 0x29, 0x5b, 0xcc, 0x13, 0xda, 0x49, 0x67, 0x81, 0xf6, 0xaf, 0xe7,
+	0x37, 0x63, 0x8e, 0xfd, 0x7b, 0x7b, 0x02, 0x99, 0x75, 0x71, 0x3c, 0xa1,
+	0xe7, 0x7b, 0x36, 0xcc, 0x7f, 0xbf, 0x2b, 0xa0, 0x4d, 0x8d, 0x31, 0xff,
+	0xf2, 0x86, 0xf1, 0xef, 0x25, 0xd7, 0x8f, 0xef, 0xda, 0x16, 0xea, 0x20,
+	0xfa, 0x8f, 0x85, 0xf4, 0x82, 0xb6, 0x55, 0x5a, 0xa3, 0x9c, 0x57, 0x16,
+	0x74, 0xe4, 0x7f, 0x6e, 0x6e, 0x27, 0x62, 0x7d, 0x46, 0x4a, 0x4d, 0xd0,
+	0xbd, 0x1a, 0xcb, 0x56, 0xd7, 0x54, 0xd6, 0xd6, 0x04, 0xbe, 0xbe, 0xd4,
+	0xf4, 0x91, 0x3b, 0xb5, 0xc6, 0xbd, 0x61, 0xf4, 0x2b, 0xd8, 0xa7, 0x20,
+	0xd3, 0xde, 0x85, 0x82, 0x6e, 0x1f, 0x09, 0xf2, 0x3e, 0xfb, 0x9b, 0x5a,
+	0x69, 0x99, 0xf9, 0x20, 0xec, 0xc9, 0x56, 0xf9, 0x3f, 0xe2, 0xca, 0x51,
+	0xad, 0x70, 0xea, 0x38, 0xf2, 0xc1, 0x15, 0xfc, 0x4e, 0xe3, 0xd7, 0xc0,
+	0x2f, 0xca, 0xc3, 0x9f, 0x41, 0x1e, 0xaf, 0x7c, 0x2c, 0xe2, 0x41, 0xb0,
+	0xff, 0x1b, 0x2b, 0xd0, 0xb3, 0xe3, 0x29, 0xf9, 0x86, 0xad, 0xf7, 0xe9,
+	0x81, 0x5f, 0x29, 0x20, 0x8f, 0xb5, 0xde, 0x96, 0xdf, 0x0e, 0xf3, 0x22,
+	0x91, 0xd7, 0x16, 0xc0, 0xc7, 0x91, 0xfd, 0xa1, 0xce, 0x16, 0xee, 0x75,
+	0x95, 0xff, 0x0c, 0xf3, 0x1e, 0xe4, 0x5f, 0x05, 0xb5, 0xea, 0x5b, 0xe0,
+	0x8d, 0x26, 0x6f, 0x41, 0x87, 0x5e, 0x5b, 0xe8, 0x00, 0x3d, 0xb6, 0x94,
+	0x27, 0x91, 0x2f, 0x68, 0x83, 0xd6, 0x26, 0xad, 0x03, 0x76, 0x0c, 0x1b,
+	0x57, 0x63, 0x49, 0xb4, 0xe5, 0x2f, 0xcd, 0x2c, 0xd5, 0x74, 0xac, 0x45,
+	0xee, 0x93, 0x43, 0x1f, 0xb2, 0xbf, 0xb2, 0x40, 0x38, 0x5d, 0x5e, 0x5f,
+	0x30, 0xe4, 0x4d, 0xe4, 0x52, 0x6f, 0xd9, 0x97, 0x66, 0x10, 0xb7, 0xfa,
+	0x11, 0x23, 0x50, 0x8b, 0xec, 0xa4, 0x9f, 0xde, 0x61, 0xe2, 0x59, 0xc2,
+	0x6f, 0x1f, 0x72, 0xc1, 0x8f, 0x86, 0xf9, 0xb8, 0xf5, 0xa4, 0x2d, 0x01,
+	0x18, 0xae, 0x37, 0x41, 0x5b, 0x37, 0xe4, 0x9f, 0xb5, 0xa6, 0xe4, 0xf5,
+	0x1e, 0x95, 0xaf, 0x68, 0x9c, 0x0f, 0x7c, 0xd3, 0x87, 0xe7, 0xc9, 0x67,
+	0x03, 0x3a, 0xce, 0x31, 0xdf, 0xd1, 0x87, 0x12, 0x5f, 0x76, 0xac, 0x80,
+	0xc3, 0x5c, 0x59, 0x08, 0xfa, 0xd1, 0x9c, 0x68, 0x51, 0x1c, 0xa3, 0x6f,
+	0x2c, 0xc1, 0x4e, 0x38, 0x9e, 0x10, 0x25, 0x83, 0x75, 0xf2, 0x94, 0x84,
+	0x99, 0x3f, 0x3b, 0x33, 0x67, 0x53, 0xae, 0xf0, 0x71, 0xb5, 0x48, 0xae,
+	0x94, 0x51, 0xbb, 0x54, 0x17, 0xbe, 0x09, 0xb9, 0xea, 0x61, 0xbe, 0x0f,
+	0x1b, 0x3f, 0x4e, 0xf9, 0xa2, 0xfe, 0x5b, 0x40, 0xee, 0xb3, 0x20, 0xc9,
+	0xa0, 0x5e, 0x39, 0x8a, 0x3c, 0x1f, 0xf2, 0xab, 0x1d, 0x07, 0x0e, 0xd8,
+	0x69, 0x6d, 0x05, 0x4f, 0xd4, 0x16, 0xb5, 0xd3, 0x78, 0x0e, 0xe0, 0xd9,
+	0xa0, 0x6e, 0x86, 0xb9, 0xc6, 0x87, 0xe8, 0x81, 0x3d, 0x95, 0x68, 0x4f,
+	0xf2, 0xfd, 0x66, 0x5e, 0xbe, 0xdb, 0x1c, 0x93, 0xe7, 0x9b, 0x39, 0xf9,
+	0xbb, 0xa6, 0x23, 0xdf, 0x69, 0x8e, 0xc8, 0xb7, 0x9b, 0xc3, 0xac, 0xc9,
+	0x90, 0x37, 0x65, 0x98, 0x37, 0xc9, 0xbd, 0xde, 0xad, 0xb0, 0x77, 0xca,
+	0xff, 0xd2, 0x4c, 0xa1, 0x31, 0x28, 0xe5, 0x63, 0xf0, 0xcf, 0xce, 0xcd,
+	0xac, 0x25, 0xe5, 0x11, 0x87, 0x35, 0x41, 0x1b, 0xdf, 0xa3, 0xce, 0x84,
+	0xff, 0x86, 0x3f, 0x9b, 0x4a, 0x65, 0x4f, 0xbb, 0x46, 0x47, 0xe8, 0x03,
+	0x6e, 0x49, 0x4a, 0x07, 0xf6, 0x82, 0x0f, 0x5c, 0x7a, 0x1a, 0x36, 0xa0,
+	0x6a, 0x9a, 0x04, 0xfc, 0x0d, 0x73, 0x01, 0x93, 0x76, 0x8c, 0x3a, 0x30,
+	0x9b, 0x71, 0x0d, 0xd6, 0x77, 0xb4, 0x67, 0x03, 0x81, 0x83, 0x70, 0xfb,
+	0x2c, 0xca, 0xcd, 0xb4, 0xe9, 0x57, 0x0b, 0xa1, 0x8f, 0x4b, 0x84, 0x7a,
+	0x69, 0x61, 0xfe, 0xf1, 0xd0, 0x27, 0x6f, 0xdc, 0x07, 0xf1, 0x02, 0xf9,
+	0x64, 0xb0, 0x8e, 0xb0, 0x5a, 0x08, 0xdb, 0x17, 0xce, 0x75, 0x82, 0xdf,
+	0x8e, 0x94, 0xbd, 0x37, 0x35, 0xe6, 0xd9, 0xc8, 0x77, 0x30, 0x1e, 0xc1,
+	0xf8, 0x4a, 0x38, 0xfe, 0x9c, 0x4c, 0x2f, 0x0a, 0x68, 0xfd, 0x89, 0x56,
+	0x54, 0xe3, 0x31, 0x8c, 0x75, 0x8c, 0x0d, 0xd6, 0x02, 0x68, 0x37, 0x27,
+	0xa9, 0xeb, 0xba, 0x4d, 0x5f, 0x38, 0x19, 0xfa, 0xc3, 0x82, 0x1c, 0xf6,
+	0x06, 0x0b, 0x57, 0x11, 0x33, 0xb4, 0xb6, 0x28, 0xff, 0xd9, 0x0e, 0xbe,
+	0xf8, 0xfe, 0xed, 0xac, 0xb9, 0x93, 0xa6, 0x7c, 0x7b, 0x3e, 0x6b, 0x3d,
+	0xa4, 0x7f, 0x0d, 0x67, 0xf2, 0xfd, 0x83, 0x76, 0xf6, 0xc8, 0x94, 0xde,
+	0x25, 0xdf, 0x3d, 0xca, 0xd8, 0x7b, 0x76, 0xe6, 0x07, 0xd0, 0xbd, 0xfa,
+	0x4a, 0xbb, 0xd4, 0xeb, 0xa6, 0x5c, 0x19, 0x1d, 0x04, 0x9d, 0x96, 0xd4,
+	0x1b, 0x49, 0xe4, 0x73, 0x9b, 0x65, 0xb6, 0x5f, 0x19, 0x18, 0xfc, 0xf4,
+	0xb0, 0xf2, 0xd3, 0xae, 0x8d, 0x67, 0xe3, 0xe7, 0x3d, 0xeb, 0xcf, 0x5c,
+	0x02, 0xfd, 0xd0, 0xeb, 0xe4, 0x76, 0x25, 0xe7, 0xb2, 0x37, 0x68, 0x95,
+	0x75, 0xc4, 0x2e, 0x73, 0xd0, 0xba, 0x57, 0xff, 0x2f, 0xff, 0xf3, 0x26,
+	0x65, 0xf7, 0xaa, 0xaa, 0x61, 0x54, 0xac, 0xc3, 0x7e, 0x4b, 0x2b, 0x2f,
+	0x83, 0x16, 0xf8, 0xd8, 0xc6, 0xf6, 0x70, 0x9c, 0x51, 0xbc, 0x38, 0xdb,
+	0xe8, 0x90, 0xef, 0xd4, 0xb7, 0xc8, 0x72, 0x9d, 0xef, 0xdb, 0x65, 0xa9,
+	0x3e, 0x78, 0xb5, 0x4f, 0xef, 0x97, 0xf3, 0xd7, 0x5c, 0x6f, 0xdd, 0xa3,
+	0x23, 0x37, 0x98, 0xfc, 0x40, 0x7e, 0x39, 0xda, 0x23, 0x3f, 0xfe, 0x72,
+	0xf6, 0x99, 0x3f, 0xd5, 0x61, 0x03, 0xa3, 0x9d, 0xb4, 0x6d, 0xf4, 0x39,
+	0x9f, 0xbd, 0x5a, 0xd0, 0xa9, 0xdb, 0x3f, 0x02, 0x4f, 0xb3, 0x0b, 0x81,
+	0x1d, 0x10, 0x37, 0xf1, 0x42, 0x37, 0xec, 0xef, 0x01, 0x27, 0xde, 0x35,
+	0x06, 0x81, 0xeb, 0x7b, 0x8a, 0x17, 0xb7, 0x3b, 0xd9, 0xab, 0x08, 0x49,
+	0xfe, 0x15, 0x7b, 0x70, 0x78, 0x87, 0xbe, 0x5d, 0xea, 0xe9, 0xeb, 0xad,
+	0xe7, 0xe0, 0xff, 0x0b, 0xa9, 0xec, 0x91, 0xcb, 0x72, 0x76, 0xe6, 0xa2,
+	0x4d, 0xfd, 0xa7, 0xdf, 0x78, 0x09, 0xb9, 0xa7, 0x25, 0x0b, 0x0d, 0xfa,
+	0x4b, 0xe2, 0x62, 0xfe, 0xbf, 0xcb, 0x3a, 0xac, 0x33, 0x4f, 0xc0, 0x3b,
+	0xcc, 0x1b, 0xbf, 0x4e, 0x39, 0xb7, 0x11, 0x76, 0xb8, 0xa0, 0xff, 0x62,
+	0x03, 0x8f, 0x06, 0xad, 0xbd, 0x3a, 0xf7, 0xfb, 0x37, 0xec, 0xfb, 0x0e,
+	0x68, 0x1d, 0x04, 0x2c, 0x62, 0x66, 0xba, 0x75, 0x8f, 0x57, 0xd4, 0x1e,
+	0xc7, 0x1a, 0xc8, 0xf5, 0x56, 0xf7, 0xc0, 0x5c, 0x43, 0xc7, 0x39, 0x4d,
+	0x25, 0x97, 0x2b, 0xa3, 0xe4, 0xef, 0x9e, 0x5e, 0xe6, 0x99, 0x46, 0xfe,
+	0xaf, 0xfd, 0xa8, 0x7e, 0x7c, 0x7e, 0x7e, 0x12, 0xfe, 0xd9, 0xf7, 0x2f,
+	0xec, 0x1a, 0x04, 0x0d, 0xa8, 0x43, 0xd3, 0xe4, 0xf9, 0xd9, 0x19, 0x17,
+	0x38, 0x8a, 0x0a, 0xf7, 0x25, 0x59, 0x01, 0xee, 0x09, 0xf2, 0x01, 0xb8,
+	0xe7, 0x38, 0xaf, 0x64, 0x80, 0xf9, 0x46, 0x06, 0x78, 0x23, 0xfd, 0x4c,
+	0x42, 0xd7, 0x76, 0x5b, 0xb7, 0x07, 0xba, 0x9e, 0x60, 0x6c, 0x7b, 0x0e,
+	0xba, 0x57, 0x48, 0x52, 0x7f, 0xda, 0x7b, 0xd7, 0xf4, 0xa7, 0x15, 0x7f,
+	0xbb, 0x94, 0x16, 0x12, 0xc0, 0x6b, 0xca, 0x5c, 0x8e, 0x78, 0x31, 0xae,
+	0x53, 0xf7, 0x2b, 0xa1, 0xee, 0x77, 0x86, 0xb8, 0x17, 0xc1, 0x93, 0x6c,
+	0xa6, 0xae, 0xb3, 0x8e, 0xda, 0xa6, 0x6a, 0x5b, 0x03, 0x36, 0x5d, 0xae,
+	0xb1, 0x0e, 0xe5, 0xfd, 0xc8, 0xa5, 0x99, 0x69, 0xd4, 0xaa, 0xe5, 0xda,
+	0x88, 0x56, 0x6e, 0xda, 0x5a, 0xd9, 0xa3, 0xbe, 0xed, 0xb2, 0x2e, 0x28,
+	0x1e, 0xa7, 0x65, 0xa9, 0xf9, 0x4b, 0xbf, 0xba, 0x6b, 0x13, 0xfa, 0xd0,
+	0xfd, 0x09, 0xca, 0xf7, 0xb3, 0xa4, 0x0b, 0x41, 0x9c, 0xfc, 0x4e, 0xc9,
+	0xc9, 0x21, 0x14, 0xbd, 0xc8, 0xad, 0x4e, 0x0d, 0x11, 0x3f, 0xe8, 0x48,
+	0xa5, 0x64, 0xd9, 0xe3, 0x1e, 0x67, 0x67, 0xc8, 0xcb, 0xf2, 0x71, 0x4b,
+	0x0e, 0x2b, 0xf9, 0xbd, 0xac, 0x6c, 0xbb, 0xbc, 0x62, 0xc8, 0x74, 0x72,
+	0xd0, 0x7a, 0x58, 0xb2, 0x57, 0x2f, 0x18, 0xd9, 0x67, 0xa6, 0x60, 0xd7,
+	0x4b, 0x8b, 0x86, 0xb8, 0xaa, 0xde, 0xa2, 0x8c, 0xb2, 0x0b, 0xb0, 0xfc,
+	0xf0, 0xec, 0x7b, 0x5b, 0xce, 0xde, 0x2d, 0x2b, 0x4f, 0xff, 0x26, 0x7c,
+	0xce, 0x01, 0xc8, 0xc2, 0xcc, 0x1c, 0x42, 0x5e, 0xf1, 0xb4, 0x0c, 0x5a,
+	0x55, 0xe4, 0xc9, 0xe0, 0x3b, 0xda, 0x01, 0x65, 0x03, 0x17, 0x74, 0x8c,
+	0x07, 0xc8, 0x27, 0x8e, 0xaf, 0x95, 0x0b, 0xca, 0x96, 0xd2, 0xea, 0xdd,
+	0x0e, 0xe0, 0x08, 0xde, 0x71, 0xfc, 0x19, 0xd9, 0xa1, 0xde, 0xdd, 0xa9,
+	0xde, 0x55, 0xe9, 0x2b, 0x94, 0xfc, 0xbe, 0x82, 0x3d, 0xc9, 0xe3, 0x68,
+	0xbe, 0x4b, 0x02, 0x5b, 0x8a, 0xf8, 0x6e, 0xc9, 0xc1, 0x46, 0x4a, 0xbe,
+	0x84, 0xfa, 0xe7, 0x8e, 0xc6, 0x80, 0x94, 0x20, 0xc7, 0xe9, 0xdc, 0x83,
+	0xbd, 0x3c, 0x5b, 0x71, 0x25, 0xfb, 0x8c, 0xe8, 0xa4, 0xf5, 0x4e, 0x39,
+	0xe4, 0x45, 0xf4, 0x74, 0x86, 0xf4, 0x4d, 0x86, 0xe3, 0x44, 0x48, 0x43,
+	0x2b, 0xbe, 0x4e, 0xe0, 0x42, 0xac, 0xcf, 0x79, 0x21, 0x1e, 0xfa, 0x11,
+	0xd0, 0x3a, 0x99, 0x96, 0x15, 0x8f, 0x74, 0x6c, 0x91, 0x6a, 0x8a, 0xfd,
+	0x03, 0xd0, 0x37, 0xe2, 0xd9, 0xc4, 0x7c, 0x66, 0x1d, 0x8f, 0x1f, 0x6c,
+	0x54, 0xc0, 0x63, 0xf2, 0x97, 0xeb, 0x10, 0x23, 0x3e, 0x47, 0xf9, 0xed,
+	0x46, 0x2e, 0x6f, 0x07, 0xba, 0x69, 0xad, 0xed, 0x59, 0x3a, 0xde, 0x0d,
+	0x59, 0x71, 0xdf, 0x0e, 0x99, 0x84, 0xdd, 0x17, 0xeb, 0xdc, 0x7f, 0x12,
+	0x7a, 0x74, 0x51, 0xed, 0x5f, 0x5a, 0xe9, 0x0f, 0xe1, 0x09, 0xdb, 0xbd,
+	0x01, 0xb6, 0x5d, 0xf6, 0x2e, 0x58, 0x1f, 0x01, 0xff, 0xbb, 0x80, 0xd7,
+	0xe5, 0x44, 0x8e, 0xf0, 0xc4, 0x83, 0x75, 0xf5, 0xd4, 0x27, 0xe0, 0x49,
+	0xaa, 0xba, 0xbe, 0x58, 0x6f, 0x97, 0xe2, 0x42, 0x84, 0x8b, 0x78, 0x3e,
+	0x40, 0xdd, 0x7b, 0xb7, 0xc2, 0x35, 0xad, 0x70, 0xe1, 0x7d, 0x9d, 0x3e,
+	0xe7, 0x26, 0xc0, 0xa3, 0x76, 0xb7, 0x41, 0x5b, 0xb2, 0x4b, 0xaa, 0xaa,
+	0x76, 0xef, 0x50, 0xbe, 0xa6, 0x9a, 0xdc, 0x8c, 0xf7, 0x3e, 0xf6, 0xdc,
+	0x8d, 0x7c, 0xa6, 0x1b, 0x73, 0x99, 0x0d, 0x73, 0x1b, 0xe9, 0x4f, 0x6c,
+	0xa0, 0xff, 0xbf, 0x7b, 0x19, 0x52, 0xe6, 0x72, 0xc1, 0xba, 0x12, 0xd6,
+	0xcd, 0x1e, 0x87, 0x4d, 0x30, 0x4f, 0x4f, 0x31, 0x36, 0x5f, 0xa3, 0x68,
+	0x99, 0x5d, 0xf9, 0x29, 0xd6, 0xf5, 0x03, 0x36, 0x1a, 0x07, 0x7c, 0x78,
+	0x0a, 0x78, 0x8e, 0xd7, 0xd5, 0x1d, 0x05, 0x64, 0xf0, 0xbe, 0x3a, 0x7b,
+	0xb5, 0xfe, 0x69, 0x3c, 0xbb, 0xa6, 0x85, 0x5f, 0xe4, 0x15, 0xe9, 0x25,
+	0xad, 0xbc, 0x2f, 0x82, 0xbd, 0x39, 0xd0, 0xe3, 0xa4, 0x21, 0xa5, 0x1c,
+	0xe2, 0xba, 0xc7, 0xbb, 0x57, 0xda, 0xe5, 0x40, 0x50, 0x23, 0xd8, 0x8c,
+	0xef, 0xa6, 0x3a, 0xfb, 0xa1, 0x15, 0xde, 0xbf, 0x66, 0x78, 0x47, 0x37,
+	0x8c, 0xda, 0x5b, 0x1e, 0x5a, 0xb1, 0xe5, 0x6b, 0x8d, 0x61, 0x79, 0xb8,
+	0x91, 0xb5, 0xee, 0x81, 0x0f, 0x28, 0xaf, 0xde, 0xcb, 0x6e, 0x4b, 0xd2,
+	0x7f, 0x99, 0xc8, 0x3d, 0xdb, 0xec, 0x20, 0x17, 0xa9, 0xb2, 0x36, 0x3b,
+	0x9e, 0xe5, 0x7d, 0x8d, 0x55, 0x97, 0x8d, 0xf9, 0xca, 0xff, 0x65, 0xae,
+	0xc2, 0xfd, 0xe9, 0xaf, 0x91, 0x9b, 0x78, 0xc8, 0x4d, 0x3c, 0xe4, 0x26,
+	0x1e, 0x72, 0x13, 0x0f, 0xb9, 0x89, 0x87, 0xdc, 0xc4, 0x43, 0x6e, 0xe2,
+	0x21, 0x37, 0x41, 0x1d, 0x10, 0xd4, 0x07, 0xe3, 0xc8, 0xb9, 0xe1, 0xbf,
+	0xbc, 0x5b, 0xc2, 0xdc, 0x22, 0x8a, 0xcd, 0x9c, 0x3b, 0xbf, 0xc9, 0x0d,
+	0xea, 0x2b, 0xe5, 0x13, 0x0a, 0xcd, 0x89, 0x30, 0x07, 0xe2, 0x9a, 0x28,
+	0x76, 0x73, 0x9d, 0x8c, 0xb9, 0xa8, 0x3d, 0x0b, 0x93, 0xcc, 0x91, 0x82,
+	0x98, 0x15, 0xe4, 0xe7, 0xaf, 0x22, 0x4f, 0xca, 0x20, 0x4f, 0x1a, 0x40,
+	0x4e, 0xc4, 0x7b, 0xea, 0xe8, 0x2e, 0xa9, 0xa0, 0x1d, 0xf4, 0xc6, 0xb5,
+	0x2f, 0x79, 0xcc, 0xdf, 0xed, 0x4c, 0x59, 0xd7, 0x8f, 0xf7, 0x89, 0x2f,
+	0xc5, 0xd1, 0xaf, 0x23, 0x57, 0xfe, 0x4b, 0x75, 0x6f, 0x36, 0x31, 0x44,
+	0x99, 0xdf, 0xf7, 0x31, 0xf9, 0x72, 0xc4, 0xdf, 0xe0, 0xbe, 0x4f, 0x5f,
+	0x22, 0xff, 0x44, 0x7a, 0xcf, 0x80, 0xe1, 0x67, 0x12, 0x92, 0x3c, 0xb9,
+	0x05, 0x73, 0x96, 0xf4, 0xa9, 0x3b, 0x23, 0x88, 0xf2, 0xcc, 0x7f, 0x40,
+	0x5e, 0xb6, 0xe8, 0x67, 0x78, 0xb3, 0x40, 0xbc, 0xf4, 0xaf, 0xf5, 0x99,
+	0x62, 0xbd, 0xae, 0x74, 0xea, 0x60, 0xa3, 0x84, 0x3c, 0xca, 0xe8, 0x93,
+	0x0e, 0x13, 0xb5, 0x54, 0x84, 0x9b, 0x38, 0xdf, 0x4c, 0xaa, 0x1a, 0xe7,
+	0xcc, 0xaa, 0x3c, 0x21, 0x6b, 0xee, 0x53, 0x9f, 0xa9, 0x2e, 0x64, 0xd3,
+	0xac, 0x71, 0x0b, 0x56, 0x7d, 0xe6, 0x49, 0xe0, 0x58, 0x46, 0x6e, 0x60,
+	0xa8, 0xbd, 0xeb, 0x33, 0xb3, 0x0b, 0xc1, 0xbd, 0x55, 0x40, 0x03, 0xe3,
+	0x55, 0x87, 0x18, 0x4b, 0xc1, 0xfd, 0x95, 0xae, 0x60, 0x09, 0x47, 0x78,
+	0x13, 0x70, 0x94, 0xdb, 0x30, 0x60, 0x29, 0x3b, 0xd2, 0x50, 0x9f, 0xa9,
+	0xd4, 0x5b, 0x69, 0x20, 0x1e, 0xe2, 0x8d, 0xce, 0xc3, 0xb3, 0x24, 0x45,
+	0x3f, 0xe9, 0xfb, 0xe5, 0xd1, 0x81, 0xb0, 0xae, 0x44, 0x1d, 0x79, 0xcc,
+	0x0c, 0xf4, 0x5c, 0x8d, 0xff, 0x58, 0xc5, 0xa9, 0x8c, 0xce, 0x79, 0x3e,
+	0xf1, 0x2e, 0xf7, 0x28, 0xe6, 0x30, 0x5e, 0x8e, 0xd6, 0xea, 0xe1, 0xda,
+	0xae, 0x16, 0x7e, 0xb6, 0x85, 0xfb, 0x91, 0x26, 0x9e, 0xf3, 0x15, 0xec,
+	0x45, 0xba, 0xb8, 0xc6, 0x04, 0x6d, 0x90, 0xa5, 0xf7, 0xbf, 0xe5, 0x7d,
+	0xeb, 0x99, 0xc8, 0x53, 0x13, 0x30, 0x5c, 0x4f, 0x1c, 0x11, 0x0c, 0x5e,
+	0x9c, 0x09, 0xe0, 0xf4, 0xd5, 0x7b, 0xbe, 0x4f, 0xda, 0xb7, 0x95, 0xd6,
+	0x68, 0xff, 0x08, 0xcf, 0x70, 0x20, 0xb7, 0x55, 0x78, 0xf5, 0x7f, 0x7f,
+	0xe1, 0x09, 0x5d, 0xfc, 0xd0, 0x7d, 0xe9, 0x70, 0x4b, 0x8d, 0x1c, 0xdd,
+	0x3b, 0xb0, 0xfe, 0x67, 0x3d, 0xcf, 0xef, 0x03, 0xad, 0xf5, 0x69, 0x29,
+	0x8c, 0x65, 0xdb, 0xa4, 0x60, 0xb2, 0x56, 0x19, 0x0f, 0xc7, 0x5b, 0x11,
+	0xdb, 0x38, 0xbe, 0x15, 0xfc, 0x85, 0x2e, 0x3b, 0x1d, 0x61, 0xad, 0x94,
+	0x0c, 0xbe, 0xeb, 0x0c, 0xd3, 0x8e, 0x58, 0x6b, 0x6e, 0x0e, 0xe7, 0x22,
+	0x3b, 0xa2, 0x1f, 0x36, 0xc3, 0x39, 0xfa, 0x5b, 0x1d, 0xf5, 0x12, 0xfb,
+	0xc0, 0xb3, 0xdc, 0x6a, 0x4b, 0xd1, 0x33, 0x29, 0xa7, 0x17, 0x23, 0xbf,
+	0x05, 0x9f, 0x32, 0x64, 0x86, 0xbe, 0xbf, 0x13, 0xbe, 0xaf, 0x5b, 0xf6,
+	0xc2, 0x67, 0xed, 0x83, 0xcf, 0xda, 0x8f, 0x1a, 0x75, 0x7c, 0xa5, 0xf5,
+	0x3e, 0x97, 0x75, 0x71, 0x55, 0x0e, 0x29, 0xf9, 0x57, 0x7c, 0xc3, 0xfe,
+	0x00, 0x3a, 0xb0, 0x53, 0xe5, 0x7b, 0x81, 0x4e, 0xc0, 0xdf, 0x3a, 0x49,
+	0xe8, 0xc4, 0xc6, 0x7b, 0xe3, 0x61, 0xd8, 0x46, 0x47, 0x41, 0xc5, 0x86,
+	0x95, 0x80, 0xf7, 0xd5, 0x7a, 0xc0, 0x7b, 0xf8, 0x65, 0xe0, 0x37, 0xa5,
+	0xd2, 0xb0, 0xa4, 0x82, 0x7d, 0x2b, 0xd8, 0xb7, 0x82, 0xda, 0x72, 0xb6,
+	0xd1, 0xfa, 0xed, 0xaa, 0x2b, 0xa4, 0x9d, 0xb0, 0x51, 0xdf, 0x6a, 0x39,
+	0x7f, 0xf4, 0x3c, 0x02, 0xfe, 0x3f, 0x02, 0xfe, 0x1f, 0x46, 0x4d, 0xf5,
+	0x00, 0x6a, 0xaa, 0xfb, 0x50, 0x53, 0x1d, 0x42, 0x4d, 0x35, 0x85, 0x9a,
+	0xea, 0x6e, 0xf8, 0x8f, 0x3b, 0xe1, 0x3f, 0x26, 0xe1, 0x3f, 0x26, 0xd4,
+	0x9d, 0xd1, 0x41, 0x6f, 0xe3, 0x1d, 0x4a, 0xb4, 0x17, 0xdb, 0x1b, 0x22,
+	0x50, 0x81, 0xf2, 0xb1, 0x71, 0xa9, 0x37, 0x59, 0x5b, 0x39, 0xea, 0x3e,
+	0x6c, 0xda, 0x99, 0xd4, 0xa6, 0x90, 0xbf, 0xdf, 0x33, 0xc2, 0x9a, 0x2b,
+	0xa9, 0x15, 0x55, 0xcd, 0x95, 0x7d, 0xc1, 0x45, 0x8a, 0x84, 0xdc, 0x0f,
+	0x67, 0xce, 0x9e, 0x2e, 0x1a, 0x51, 0xbd, 0xd3, 0xbb, 0x5a, 0xef, 0x2c,
+	0xcf, 0xb3, 0xde, 0x79, 0x75, 0xb5, 0xde, 0x59, 0x9e, 0x67, 0xbd, 0xf3,
+	0xca, 0xba, 0x7a, 0xe7, 0xca, 0xd3, 0x97, 0xd6, 0xd5, 0x3b, 0x57, 0x9e,
+	0x7e, 0x29, 0x1c, 0x4b, 0xa8, 0x0f, 0x21, 0xad, 0x96, 0x83, 0x67, 0x4f,
+	0x98, 0x73, 0x34, 0xfb, 0xd6, 0xff, 0xdf, 0x74, 0xca, 0x96, 0x35, 0xb1,
+	0xff, 0x68, 0x6b, 0x50, 0x23, 0xb5, 0xce, 0x77, 0xb7, 0xcc, 0x5f, 0x56,
+	0xdf, 0x4b, 0xcb, 0xb5, 0xcd, 0xef, 0xc2, 0x03, 0xcb, 0xca, 0x10, 0xf3,
+	0xbc, 0x0f, 0x7c, 0x7e, 0xf3, 0x73, 0xf5, 0x0e, 0xf5, 0xcd, 0xcd, 0x55,
+	0xf9, 0x36, 0xec, 0x7c, 0xf4, 0xd1, 0xad, 0x81, 0x2f, 0x60, 0x3f, 0xa5,
+	0x05, 0xfe, 0xfd, 0x01, 0xe0, 0x01, 0xaf, 0x3d, 0x53, 0xdd, 0x07, 0x05,
+	0xe7, 0x0d, 0xee, 0xc6, 0xcd, 0xfc, 0xe5, 0x19, 0xe6, 0xd6, 0x55, 0x85,
+	0x9b, 0xf5, 0x23, 0x6b, 0xce, 0x28, 0x06, 0x44, 0xb8, 0x5e, 0x4d, 0x05,
+	0x74, 0xbb, 0xa8, 0x1d, 0xb9, 0x26, 0x1a, 0xb7, 0xd6, 0x9a, 0x9d, 0xe1,
+	0xbd, 0xdb, 0xe5, 0x20, 0xaf, 0x52, 0xf8, 0xcc, 0x10, 0xdf, 0xcf, 0xfd,
+	0xc0, 0xf7, 0x10, 0xde, 0x6a, 0x81, 0x1f, 0x47, 0xae, 0xc7, 0x7b, 0x1a,
+	0xe6, 0x6c, 0xa6, 0xbc, 0x33, 0xdf, 0x25, 0xff, 0x79, 0xd4, 0xf7, 0x27,
+	0x9c, 0xec, 0xf0, 0x25, 0xd4, 0x1e, 0x27, 0x69, 0x27, 0xa3, 0xa4, 0x73,
+	0x30, 0x33, 0x2b, 0xa9, 0x3e, 0xd2, 0x72, 0x5e, 0x3f, 0xac, 0x7d, 0x98,
+	0x6e, 0x3d, 0xdc, 0xe7, 0x1f, 0x5b, 0xf6, 0xc9, 0xb4, 0xec, 0x53, 0xa0,
+	0xcd, 0xd6, 0xef, 0xc0, 0x99, 0x2b, 0xdb, 0xaf, 0xb7, 0x52, 0x61, 0x5d,
+	0xf6, 0xf0, 0xe8, 0x66, 0x59, 0xe8, 0xcf, 0x9e, 0x7d, 0x05, 0xf9, 0x7a,
+	0x79, 0x14, 0x73, 0xa9, 0x41, 0xbc, 0xe3, 0x7c, 0xb6, 0x8e, 0x5c, 0xf4,
+	0x6c, 0x5d, 0xb6, 0x01, 0x3e, 0x5b, 0x11, 0xe1, 0x3c, 0xfb, 0x8a, 0xb6,
+	0x7a, 0xe8, 0x03, 0xd2, 0x17, 0x70, 0xe6, 0x29, 0xd4, 0x5f, 0x87, 0x83,
+	0x7b, 0xb4, 0x70, 0x9f, 0x1b, 0xb5, 0x20, 0x6f, 0xce, 0x69, 0x95, 0xf0,
+	0x0e, 0xf0, 0x2b, 0xf0, 0x17, 0x86, 0x4e, 0xd8, 0x77, 0x80, 0x5b, 0x93,
+	0xa5, 0xa3, 0x86, 0xba, 0x3b, 0x2d, 0x8f, 0x52, 0xd6, 0x7c, 0x7e, 0x14,
+	0xef, 0xa2, 0x33, 0xfd, 0x4d, 0x78, 0xa6, 0xd1, 0xb0, 0x6e, 0x8f, 0xce,
+	0x94, 0x90, 0xd7, 0xe7, 0x2d, 0xc0, 0x8e, 0x80, 0x1f, 0x25, 0x59, 0x69,
+	0x66, 0x3e, 0x05, 0x4f, 0xad, 0x85, 0x37, 0xe6, 0x06, 0x19, 0x46, 0x75,
+	0x0f, 0x78, 0x30, 0x91, 0x86, 0x1d, 0xde, 0xd7, 0x17, 0xdd, 0xe9, 0x1a,
+	0xb6, 0xae, 0x05, 0x75, 0x3c, 0xe7, 0x07, 0x60, 0x8b, 0x19, 0xd8, 0x27,
+	0x73, 0xa6, 0x12, 0x6b, 0x15, 0xda, 0x93, 0xe5, 0x1a, 0x59, 0x6b, 0x52,
+	0x86, 0x51, 0xef, 0xf0, 0xfc, 0x79, 0x59, 0x6e, 0x46, 0x34, 0xe4, 0x60,
+	0x8f, 0x63, 0xf8, 0x8d, 0xe0, 0x9d, 0x83, 0x1f, 0x6b, 0xa5, 0x82, 0x7c,
+	0x55, 0xe5, 0xe2, 0xc8, 0xb5, 0x87, 0x48, 0xdf, 0x01, 0xac, 0xa7, 0x3e,
+	0x53, 0x4f, 0x0f, 0x88, 0xdb, 0x4f, 0x5f, 0x91, 0x06, 0x6e, 0xc0, 0x78,
+	0xaf, 0xc1, 0xd6, 0x07, 0xf0, 0xcc, 0x5a, 0x65, 0xf2, 0x56, 0xe1, 0xf7,
+	0x7d, 0x23, 0xc7, 0x6f, 0x11, 0xe3, 0xe1, 0x78, 0xd0, 0xfa, 0x1d, 0xea,
+	0x5e, 0xfa, 0x5a, 0x39, 0xbb, 0x18, 0xc5, 0xc1, 0x19, 0xd8, 0x20, 0xef,
+	0x68, 0xc7, 0xc1, 0x17, 0x8e, 0xb5, 0x30, 0x1e, 0x62, 0x7e, 0xf9, 0xaf,
+	0x70, 0xee, 0xbc, 0x9c, 0x44, 0xfd, 0x2f, 0xfd, 0x7c, 0x66, 0x80, 0x7f,
+	0x4b, 0xa8, 0xef, 0xeb, 0xe1, 0x0d, 0x9b, 0xfd, 0x71, 0xd0, 0x67, 0xb6,
+	0xc0, 0x13, 0x26, 0xac, 0x4f, 0x04, 0xf1, 0x38, 0xed, 0xdf, 0xaa, 0xe7,
+	0xef, 0x96, 0x3f, 0x50, 0x67, 0xca, 0xcb, 0xa1, 0x45, 0xdf, 0x77, 0x73,
+	0x83, 0xc3, 0xcb, 0x92, 0x1d, 0x7e, 0x52, 0x76, 0x5b, 0x7b, 0x59, 0x8f,
+	0x59, 0xc4, 0xe3, 0xdf, 0xda, 0x96, 0xf7, 0xfd, 0x13, 0xa0, 0xfd, 0x07,
+	0x6a, 0x9f, 0xbb, 0x41, 0x3f, 0x78, 0xa5, 0x6a, 0x12, 0xd2, 0x0a, 0xde,
+	0xa4, 0x48, 0x6f, 0xa7, 0x1c, 0x6a, 0x3e, 0x1f, 0xca, 0xe6, 0x11, 0x71,
+	0xbd, 0xb7, 0x0d, 0xde, 0x6f, 0x97, 0x9b, 0x8f, 0x86, 0xb4, 0xe5, 0x41,
+	0x2f, 0xf6, 0x6f, 0xfe, 0x43, 0x8a, 0xbe, 0x81, 0x32, 0x77, 0x91, 0x35,
+	0xba, 0xa3, 0xcf, 0x40, 0x07, 0x3f, 0xce, 0x0f, 0x24, 0x65, 0xbd, 0x1f,
+	0x20, 0x5c, 0xf2, 0x23, 0x74, 0x85, 0x74, 0x88, 0xf2, 0x9f, 0x2a, 0x6e,
+	0x29, 0x7c, 0xc6, 0x06, 0x5f, 0xf0, 0xa4, 0x7a, 0x9e, 0x37, 0xe8, 0x9b,
+	0x18, 0xff, 0xa8, 0xc3, 0xdd, 0xf0, 0x7f, 0xd0, 0x41, 0xd8, 0x71, 0x71,
+	0x91, 0xf7, 0x13, 0x43, 0xea, 0x4e, 0xab, 0x04, 0xd9, 0x2e, 0xf1, 0x3b,
+	0x63, 0x2a, 0xc8, 0x27, 0x83, 0xfa, 0x2b, 0x43, 0x5f, 0x88, 0xf6, 0xb8,
+	0xf2, 0x93, 0x25, 0xf5, 0x5d, 0x31, 0x89, 0x35, 0x3e, 0x9e, 0xad, 0x7f,
+	0x27, 0xf1, 0xa3, 0x42, 0xf0, 0x77, 0x12, 0xe1, 0x37, 0xde, 0x7a, 0x90,
+	0x47, 0x3c, 0xd8, 0x30, 0x65, 0xaa, 0x11, 0xfd, 0xdd, 0x04, 0xe5, 0x60,
+	0x4b, 0xb9, 0x11, 0xe5, 0x0e, 0x7e, 0x50, 0xd3, 0xac, 0x93, 0xe5, 0xe3,
+	0x61, 0x4e, 0xc4, 0x1a, 0x80, 0x3c, 0xc4, 0x78, 0x39, 0x90, 0xdf, 0x92,
+	0xbe, 0x03, 0xf2, 0x03, 0xcf, 0x3d, 0x13, 0xb6, 0x94, 0x0e, 0xe3, 0xba,
+	0xc5, 0x1a, 0x33, 0xac, 0x7b, 0xb7, 0x4b, 0x75, 0x92, 0xef, 0x13, 0xf2,
+	0xda, 0xfc, 0x40, 0x70, 0x4f, 0x24, 0x89, 0xf0, 0x3d, 0xc7, 0x49, 0x29,
+	0xab, 0xf7, 0x77, 0x86, 0xf8, 0x50, 0xa7, 0xdd, 0x19, 0x8d, 0xd3, 0x90,
+	0x63, 0x00, 0x37, 0x8d, 0x58, 0xf6, 0x55, 0xc4, 0xb1, 0x69, 0xf0, 0xbd,
+	0x38, 0x51, 0x91, 0x6b, 0x6d, 0x4b, 0xc5, 0x7d, 0x37, 0x49, 0x1d, 0xa3,
+	0x7e, 0x11, 0xa6, 0x07, 0xb9, 0x2a, 0xce, 0x3b, 0x2a, 0x53, 0x46, 0xfe,
+	0xdd, 0xdb, 0xca, 0xb5, 0xac, 0x55, 0x90, 0xf7, 0x7d, 0xd7, 0xe4, 0xf8,
+	0xfc, 0x6d, 0x0f, 0x06, 0x77, 0xfe, 0xba, 0x9e, 0xbf, 0x78, 0x5b, 0x39,
+	0xe8, 0xe3, 0xcc, 0xef, 0x86, 0x7d, 0xc2, 0x19, 0xea, 0x7b, 0xec, 0x4f,
+	0x6e, 0x32, 0xe5, 0xc2, 0x4d, 0xbe, 0x7f, 0x0f, 0xbf, 0x09, 0x85, 0x75,
+	0xac, 0xa5, 0xea, 0xd8, 0x0e, 0x95, 0x8f, 0xb8, 0xa3, 0x19, 0xad, 0x04,
+	0xdb, 0x3d, 0xe9, 0xa1, 0xe6, 0xd1, 0xb3, 0x63, 0xe7, 0x75, 0x0b, 0xf1,
+	0x37, 0x9b, 0x39, 0x2e, 0xb9, 0x3e, 0x7e, 0x63, 0x9e, 0x73, 0xb8, 0x66,
+	0x5b, 0x70, 0xef, 0x75, 0x83, 0xab, 0x7c, 0xae, 0x48, 0x18, 0x87, 0x6e,
+	0x68, 0xb5, 0x8f, 0xd6, 0x3c, 0x93, 0x76, 0x21, 0x53, 0x26, 0xe8, 0xa9,
+	0xd6, 0xa2, 0x9c, 0x8d, 0x7f, 0x0f, 0x70, 0xfe, 0xb6, 0xa7, 0x1a, 0x17,
+	0x6f, 0x9b, 0x85, 0x7c, 0x78, 0xa6, 0xd9, 0x46, 0xa4, 0x7f, 0x51, 0xdd,
+	0xc0, 0x3e, 0xe2, 0xbf, 0x87, 0xf8, 0xef, 0x21, 0xfe, 0x7b, 0x88, 0xff,
+	0x1e, 0xe2, 0xbf, 0x87, 0xf8, 0x0f, 0x1e, 0x3e, 0x07, 0x7d, 0x79, 0xd6,
+	0x9b, 0x08, 0x73, 0xb6, 0xc7, 0x56, 0x73, 0x36, 0xfe, 0xcd, 0xcb, 0xb9,
+	0xa6, 0xa2, 0xa5, 0x52, 0x91, 0x20, 0xe7, 0x15, 0x9d, 0xf9, 0x4d, 0x94,
+	0xf3, 0x7e, 0xf4, 0xf7, 0x90, 0x00, 0x8e, 0xf9, 0x1e, 0xe1, 0x2a, 0x9a,
+	0x6e, 0x13, 0x2e, 0xc8, 0xf9, 0x58, 0x67, 0xad, 0x87, 0xe1, 0x77, 0x37,
+	0xfa, 0xb6, 0xe0, 0x9b, 0x4f, 0xf0, 0x7d, 0xa9, 0x76, 0x87, 0x8b, 0x58,
+	0x5c, 0x6e, 0xa8, 0x78, 0x8c, 0x71, 0xe3, 0x0e, 0xfe, 0xad, 0x02, 0x64,
+	0xc0, 0x77, 0x5f, 0x66, 0x6d, 0x51, 0x6e, 0x20, 0x2f, 0x5a, 0x8e, 0x72,
+	0x21, 0xc0, 0x79, 0x6f, 0x6a, 0xa5, 0x05, 0xca, 0x59, 0x97, 0xd9, 0x14,
+	0x98, 0x62, 0xb7, 0xe6, 0x78, 0x97, 0x54, 0xbd, 0x74, 0xb6, 0x49, 0x7a,
+	0x46, 0x40, 0x5b, 0x74, 0x8f, 0x2c, 0x62, 0xcc, 0xa7, 0x44, 0x9f, 0x47,
+	0x7e, 0x6b, 0x0f, 0xa9, 0xbf, 0x6f, 0xe8, 0xc5, 0x3e, 0xfa, 0xfc, 0x8e,
+	0x96, 0x7b, 0x5a, 0x29, 0x04, 0x3e, 0x9b, 0xb1, 0x87, 0xe7, 0x48, 0xc1,
+	0x76, 0xdd, 0x6d, 0x38, 0x1b, 0xe4, 0xfa, 0xaf, 0x5b, 0x55, 0x0e, 0x0e,
+	0x3f, 0x7a, 0x62, 0xa8, 0xbf, 0x5f, 0xba, 0xb7, 0xcb, 0xc9, 0x21, 0xd6,
+	0x6b, 0x9b, 0x81, 0x8f, 0x6b, 0x79, 0xff, 0xb4, 0x5d, 0x4e, 0x2d, 0xc2,
+	0xcf, 0x2e, 0x66, 0x1d, 0xea, 0xf2, 0xd2, 0x50, 0x0a, 0xfe, 0xf9, 0xe6,
+	0x7e, 0xc6, 0xe7, 0xe5, 0x26, 0x75, 0xa5, 0x17, 0xf0, 0x03, 0xd0, 0xcb,
+	0x4d, 0xb0, 0x27, 0x1d, 0xfb, 0x47, 0xb8, 0xff, 0x45, 0xe1, 0xee, 0xb5,
+	0x9d, 0x6d, 0x4a, 0x37, 0xf4, 0xac, 0x95, 0xd1, 0x41, 0xfb, 0xff, 0x14,
+	0x6e, 0x6d, 0x31, 0x71, 0x5c, 0x67, 0xf8, 0x3f, 0xb3, 0xdc, 0x8c, 0xd7,
+	0x30, 0x86, 0xf5, 0xb2, 0x58, 0xae, 0xba, 0x03, 0x63, 0x33, 0xd1, 0x62,
+	0x65, 0xb0, 0xec, 0x16, 0x55, 0x96, 0xba, 0xda, 0x05, 0x42, 0xe2, 0x3a,
+	0xdd, 0x24, 0xb4, 0x72, 0xd5, 0x2a, 0x42, 0x60, 0x37, 0x8e, 0xfa, 0xd2,
+	0x46, 0x55, 0xdb, 0x37, 0xaf, 0x16, 0xec, 0x38, 0xcd, 0xac, 0x17, 0x37,
+	0x38, 0xf4, 0x71, 0xb3, 0x2c, 0x0e, 0x90, 0x75, 0x56, 0x6e, 0xf2, 0x90,
+	0x3e, 0x19, 0x6d, 0xa2, 0x24, 0x55, 0xa5, 0xbc, 0x54, 0x7d, 0xab, 0x5a,
+	0x0b, 0x27, 0xc4, 0x0f, 0xa9, 0xad, 0xf6, 0xa5, 0x77, 0x4d, 0xbf, 0xef,
+	0xcc, 0x2c, 0xc6, 0xa4, 0x51, 0x91, 0x56, 0x73, 0xe6, 0xcc, 0xb9, 0x9f,
+	0xff, 0xf2, 0xfd, 0x17, 0x3e, 0x67, 0x67, 0xba, 0xc2, 0xbd, 0xcd, 0x95,
+	0x76, 0xc6, 0xfe, 0xb6, 0xd4, 0xcc, 0x02, 0xf3, 0x1a, 0x86, 0x64, 0xdc,
+	0x64, 0xae, 0xcf, 0x96, 0x3a, 0x57, 0x66, 0x1b, 0x1b, 0x65, 0xda, 0xe4,
+	0x4b, 0x6a, 0xa6, 0xdc, 0x23, 0x17, 0x41, 0xc7, 0x85, 0xe1, 0xd6, 0xd0,
+	0xf7, 0xda, 0x19, 0xf2, 0x73, 0x77, 0x3c, 0xd4, 0x57, 0x1a, 0x57, 0x16,
+	0x34, 0xb6, 0xee, 0xde, 0xf5, 0xed, 0x49, 0xac, 0x29, 0x41, 0x5f, 0x79,
+	0x5c, 0xcb, 0x26, 0x83, 0xef, 0x7d, 0xbb, 0xde, 0xe3, 0xbb, 0xde, 0x0f,
+	0xfc, 0x8f, 0xf6, 0x2c, 0xef, 0xa6, 0x07, 0xae, 0xd3, 0x1a, 0xe5, 0x2c,
+	0x05, 0xcf, 0x36, 0x66, 0x3d, 0x2b, 0x4d, 0x5c, 0x90, 0x15, 0x5f, 0x65,
+	0xdd, 0x36, 0xc8, 0xbb, 0x36, 0x99, 0x5f, 0x04, 0xcd, 0x63, 0x1f, 0xed,
+	0x36, 0x63, 0xe4, 0x43, 0x71, 0xf2, 0x4c, 0x07, 0xae, 0xc1, 0xb0, 0x07,
+	0x13, 0x68, 0xe7, 0x3f, 0xef, 0xa6, 0xcc, 0xb3, 0xda, 0x97, 0x43, 0x3c,
+	0xe3, 0xab, 0x82, 0xce, 0xf7, 0x60, 0x9b, 0x16, 0xb9, 0x63, 0x67, 0x7a,
+	0xc2, 0x7c, 0x20, 0xd8, 0xbe, 0x5f, 0x8d, 0x13, 0x6b, 0x3c, 0xe7, 0x6e,
+	0xd7, 0x99, 0x9b, 0xc2, 0x1c, 0x2b, 0x10, 0xcd, 0x53, 0x22, 0xe5, 0xaa,
+	0xc8, 0xeb, 0xf8, 0xfd, 0xa6, 0x1a, 0xc6, 0x4f, 0x14, 0xed, 0xee, 0x93,
+	0xb2, 0x5e, 0xfa, 0x9a, 0xd4, 0xa0, 0x7f, 0xd6, 0x5c, 0xdf, 0xbf, 0xeb,
+	0x26, 0xf5, 0x99, 0xbf, 0xe8, 0x29, 0x19, 0x18, 0xa1, 0x7e, 0x6b, 0x93,
+	0x97, 0x17, 0x5b, 0x64, 0xc3, 0xb4, 0xcc, 0xbb, 0x44, 0x01, 0x5e, 0x42,
+	0x26, 0x63, 0x11, 0x8d, 0x51, 0xe5, 0x5b, 0x22, 0x5b, 0xf8, 0xb6, 0xb5,
+	0xf8, 0x4c, 0x9c, 0xfe, 0x97, 0x4f, 0x16, 0xf9, 0x6e, 0xe0, 0x69, 0x48,
+	0xc3, 0x8e, 0x00, 0xcb, 0x42, 0x08, 0x99, 0x3c, 0x77, 0xee, 0xf7, 0xbb,
+	0x5c, 0x1b, 0xea, 0x68, 0xcf, 0xb6, 0x49, 0xe1, 0x30, 0x64, 0xa2, 0x1a,
+	0xd4, 0x39, 0x47, 0x8d, 0x98, 0x96, 0xd1, 0x91, 0x0a, 0x7d, 0xf8, 0xe6,
+	0x5e, 0x2d, 0xaf, 0x33, 0x37, 0x7e, 0xa4, 0xf7, 0x82, 0x72, 0xbe, 0xe2,
+	0x92, 0x56, 0x4d, 0x59, 0x03, 0xaf, 0xad, 0xd6, 0x5f, 0xed, 0xe3, 0x5d,
+	0xad, 0xd7, 0x5f, 0x88, 0x07, 0xf6, 0x1a, 0xeb, 0x7e, 0x1c, 0x0f, 0xea,
+	0x92, 0xa1, 0xfd, 0x45, 0x3b, 0xad, 0x8c, 0xbd, 0xbd, 0x20, 0xf5, 0xa5,
+	0x9f, 0xc9, 0x3b, 0xa5, 0x9f, 0xc8, 0xaf, 0x97, 0xce, 0x00, 0x7f, 0x58,
+	0xe5, 0x3c, 0xf4, 0xc9, 0xcd, 0xba, 0xef, 0xdf, 0x74, 0xa7, 0x60, 0x2b,
+	0xf8, 0xfe, 0xef, 0xdc, 0x0d, 0x19, 0x38, 0xf6, 0x3d, 0xec, 0x39, 0x07,
+	0x1e, 0xa2, 0x2c, 0x9c, 0x04, 0xbd, 0xb9, 0x7d, 0xd2, 0x19, 0xd5, 0x74,
+	0x32, 0x78, 0xac, 0x15, 0x7b, 0x30, 0x42, 0x4c, 0xce, 0xbd, 0x8c, 0xf4,
+	0x91, 0x66, 0x8c, 0x7a, 0x09, 0xf3, 0xb7, 0x82, 0x2f, 0xf6, 0xe2, 0xa7,
+	0xe4, 0xee, 0x08, 0xd6, 0x3a, 0x42, 0xda, 0x6b, 0x95, 0x81, 0x47, 0xb1,
+	0x8f, 0x5c, 0x8b, 0xdc, 0xf3, 0x7e, 0x19, 0xa7, 0x6f, 0xef, 0x9e, 0xc7,
+	0xb2, 0xf1, 0x95, 0x2e, 0xf1, 0xa5, 0x05, 0xba, 0x7c, 0xfe, 0x78, 0x80,
+	0x9b, 0xde, 0x55, 0x43, 0x68, 0x6f, 0xe7, 0xdf, 0x53, 0xc4, 0x79, 0x79,
+	0xbf, 0x15, 0xb8, 0x7c, 0x1c, 0x78, 0x28, 0x53, 0xbf, 0x20, 0x8d, 0x91,
+	0x28, 0xda, 0x10, 0xaf, 0x68, 0x59, 0x22, 0x59, 0x8f, 0x39, 0x5a, 0xcc,
+	0x97, 0xc2, 0x1a, 0xa7, 0x74, 0x2e, 0x57, 0x1f, 0xcf, 0x9c, 0x58, 0x3e,
+	0xa8, 0xb3, 0x41, 0x23, 0xac, 0x23, 0x7d, 0xa7, 0x35, 0xa6, 0x82, 0x0e,
+	0xc5, 0x78, 0xc3, 0x92, 0xd1, 0xe5, 0x2e, 0x8c, 0x77, 0x41, 0x32, 0x6e,
+	0x73, 0xcc, 0x51, 0xb4, 0xa1, 0x9c, 0x19, 0x05, 0x96, 0xf8, 0x58, 0x8d,
+	0x2d, 0xc6, 0xa0, 0xcb, 0xe3, 0x32, 0x66, 0xee, 0xd9, 0xb1, 0xc7, 0xbc,
+	0xb6, 0x15, 0x0c, 0x63, 0x38, 0x5c, 0x53, 0xd7, 0x8e, 0x35, 0xb1, 0x3f,
+	0x7e, 0xb0, 0x93, 0x33, 0x8b, 0x0b, 0x90, 0x53, 0x0b, 0x1f, 0x66, 0xdc,
+	0x67, 0x25, 0x1b, 0x6b, 0xd3, 0xb6, 0x4d, 0x05, 0xf7, 0x92, 0xf5, 0xe8,
+	0xd3, 0xfa, 0x0e, 0xe4, 0xd0, 0x9e, 0xb0, 0x8e, 0x6d, 0xc5, 0xc8, 0xe0,
+	0xec, 0x03, 0x1b, 0x97, 0x75, 0x5f, 0x96, 0xcc, 0x42, 0x4e, 0x26, 0x74,
+	0x3f, 0x9e, 0xe1, 0x41, 0x8d, 0x43, 0xc8, 0xab, 0x03, 0xbd, 0x38, 0xcb,
+	0xd4, 0x03, 0x7b, 0x38, 0xd9, 0x4b, 0x2e, 0xfa, 0x4f, 0xc8, 0xb3, 0xf8,
+	0xd6, 0xcb, 0x3b, 0x6a, 0x93, 0x81, 0x67, 0xa0, 0x2f, 0xbd, 0x66, 0x7d,
+	0x54, 0x3e, 0xf3, 0xfc, 0x38, 0x63, 0x2e, 0x7f, 0xf6, 0x4c, 0xf9, 0xc4,
+	0xd3, 0xb1, 0xd8, 0xe9, 0x88, 0x58, 0xe7, 0x03, 0x5b, 0xfd, 0xf0, 0xf4,
+	0xbc, 0xe2, 0xf7, 0xc3, 0xe7, 0x57, 0x54, 0x07, 0xda, 0x46, 0xd1, 0x8e,
+	0xeb, 0x30, 0x65, 0xdc, 0xfb, 0xab, 0x3f, 0x73, 0xc4, 0xf7, 0x27, 0x74,
+	0x4e, 0x58, 0xca, 0x9c, 0x57, 0x4d, 0x7c, 0xee, 0x88, 0x17, 0x6b, 0xc7,
+	0x5c, 0x29, 0x73, 0x45, 0x1d, 0xc6, 0x7a, 0x58, 0xee, 0x25, 0x4f, 0x24,
+	0x36, 0x84, 0xe3, 0x5b, 0x93, 0xcb, 0x2a, 0x95, 0x1c, 0x54, 0x56, 0xba,
+	0x80, 0x5f, 0x8b, 0xd2, 0x71, 0xcc, 0x44, 0x52, 0x81, 0x77, 0xb1, 0x27,
+	0xfb, 0xa8, 0xef, 0x4f, 0xda, 0xac, 0x4f, 0x99, 0x51, 0x45, 0xdf, 0x4b,
+	0xa7, 0x8e, 0x77, 0x5e, 0x3e, 0x90, 0x32, 0x8f, 0xaa, 0xfd, 0xe1, 0xfb,
+	0x28, 0x64, 0xe6, 0xf6, 0x78, 0x67, 0x96, 0x95, 0x29, 0x2f, 0x79, 0xa9,
+	0xe4, 0xac, 0xb2, 0x72, 0x18, 0x33, 0x37, 0xa6, 0x28, 0x37, 0x52, 0x66,
+	0xa7, 0xa2, 0x7f, 0xb4, 0x5d, 0xef, 0x7b, 0x12, 0xfd, 0x53, 0xaa, 0x25,
+	0x5c, 0x0f, 0xef, 0xeb, 0x4a, 0x5f, 0xc0, 0x33, 0x94, 0x39, 0xfd, 0xc6,
+	0xcc, 0x02, 0xf3, 0xcb, 0x74, 0x5e, 0x43, 0x7a, 0xe0, 0x18, 0xdf, 0x0d,
+	0xb9, 0x7f, 0xe2, 0x6f, 0xa8, 0x43, 0xb9, 0xcc, 0x3a, 0x27, 0xe4, 0xb7,
+	0x23, 0x1a, 0x3f, 0xdf, 0x3f, 0x91, 0xd7, 0xb9, 0x8b, 0x0d, 0x35, 0x10,
+	0xee, 0x7b, 0xfb, 0xce, 0x92, 0x19, 0xf7, 0x4b, 0x1c, 0x67, 0x21, 0x72,
+	0xb2, 0x5d, 0x98, 0x23, 0x3a, 0x5e, 0x6a, 0xd2, 0x06, 0x7d, 0x03, 0xcc,
+	0x15, 0x68, 0xc6, 0xdd, 0x2f, 0x88, 0x71, 0xac, 0x63, 0x07, 0x9d, 0x00,
+	0x77, 0x02, 0xaf, 0x56, 0x31, 0x4e, 0x61, 0x51, 0xf2, 0x41, 0x7f, 0xe9,
+	0x60, 0x4e, 0x6a, 0xa1, 0xfa, 0x45, 0x63, 0x04, 0x3a, 0x70, 0x1c, 0xef,
+	0xf7, 0x4f, 0x90, 0x3e, 0x79, 0x36, 0x49, 0x35, 0xbe, 0xc4, 0xf5, 0x1c,
+	0x94, 0x89, 0x45, 0x60, 0x23, 0xfc, 0xe6, 0x17, 0x83, 0x7b, 0xbb, 0x0e,
+	0x9c, 0x3d, 0xe1, 0x99, 0x9a, 0x5f, 0x67, 0x5d, 0xc6, 0x4f, 0xc0, 0x2b,
+	0x3a, 0x8f, 0x8a, 0x7d, 0x99, 0x4b, 0x78, 0x88, 0xfa, 0xd1, 0x6d, 0x48,
+	0x0c, 0x6d, 0x89, 0x59, 0x59, 0x6f, 0x8d, 0x26, 0x23, 0xdd, 0x32, 0x0f,
+	0x79, 0x57, 0x81, 0xee, 0x2c, 0x5c, 0x89, 0xca, 0xac, 0xa7, 0xe3, 0xd9,
+	0xc9, 0x8f, 0x95, 0x2b, 0xb5, 0xfa, 0x71, 0xb9, 0x51, 0x77, 0xf4, 0x37,
+	0xea, 0xb5, 0xc2, 0xab, 0x86, 0x7c, 0xff, 0x88, 0xce, 0xa5, 0x73, 0x2a,
+	0xd2, 0xd9, 0x4f, 0xcc, 0xb3, 0xa2, 0xf3, 0xea, 0x20, 0x3b, 0x80, 0x39,
+	0xde, 0x06, 0xe6, 0x78, 0x0b, 0x98, 0xe3, 0x57, 0xc0, 0xd8, 0x37, 0x4b,
+	0x93, 0xa1, 0xfc, 0x9f, 0x86, 0x1c, 0xa2, 0xae, 0xb6, 0xce, 0xe0, 0x4e,
+	0xa7, 0xf3, 0xa0, 0xc1, 0xdb, 0xb0, 0x3f, 0xd6, 0x4b, 0x19, 0x59, 0x5d,
+	0x9a, 0x90, 0xb5, 0xa5, 0x20, 0x0f, 0xf9, 0x03, 0xe6, 0x7d, 0x8d, 0xf0,
+	0x9e, 0x1c, 0xc8, 0xa1, 0x3d, 0x32, 0x70, 0x94, 0xf2, 0xa3, 0x43, 0x96,
+	0x8b, 0xab, 0x5a, 0x0e, 0x2d, 0x17, 0x59, 0x8e, 0x88, 0xce, 0x21, 0x9b,
+	0xda, 0x90, 0x8a, 0x5b, 0x47, 0xfd, 0x3e, 0xed, 0x0f, 0x0a, 0xfc, 0xf3,
+	0x94, 0x97, 0x7f, 0x0a, 0xef, 0x5e, 0xe9, 0xdc, 0xba, 0x19, 0xb3, 0x1b,
+	0xed, 0x9a, 0xb2, 0x6b, 0x30, 0x88, 0xb9, 0xab, 0xdb, 0x68, 0x83, 0x39,
+	0x80, 0x19, 0xaf, 0x43, 0x87, 0x34, 0x9c, 0x6e, 0x8d, 0xfd, 0x1a, 0xce,
+	0x21, 0x9d, 0x77, 0xcb, 0x71, 0x0a, 0x45, 0x5b, 0xe6, 0x8a, 0x56, 0x32,
+	0x0f, 0xfa, 0xbb, 0x01, 0xbb, 0x6d, 0x15, 0x77, 0xb0, 0x86, 0x33, 0x58,
+	0xaf, 0x53, 0xcf, 0x6f, 0x6a, 0xd9, 0xbb, 0x5c, 0xff, 0x23, 0xc6, 0xb1,
+	0xce, 0xa4, 0xe5, 0x0f, 0x7d, 0x94, 0x81, 0xf4, 0x4d, 0x65, 0x75, 0xff,
+	0xa0, 0xdf, 0x2a, 0xda, 0xae, 0xd5, 0x29, 0x8f, 0x45, 0x2e, 0x79, 0x36,
+	0x74, 0xc9, 0xcb, 0x09, 0x62, 0x80, 0xb2, 0x6a, 0xf6, 0xf3, 0xc3, 0x35,
+	0xfb, 0xfe, 0x5e, 0x9b, 0xeb, 0x72, 0x42, 0xb9, 0x4d, 0xdd, 0xbf, 0xa1,
+	0xb1, 0x8d, 0x57, 0x7a, 0x56, 0xde, 0xc4, 0x7d, 0x07, 0x18, 0x27, 0x27,
+	0x6f, 0x00, 0xe3, 0xd5, 0x4b, 0xcd, 0xbc, 0xed, 0x93, 0x38, 0xa7, 0x92,
+	0x9a, 0xbb, 0xda, 0x29, 0x97, 0xaf, 0x15, 0xd4, 0x4b, 0xd7, 0x3c, 0xf5,
+	0xf3, 0xab, 0x45, 0x55, 0xb8, 0xea, 0xfb, 0xff, 0x70, 0x67, 0xe4, 0x9d,
+	0x25, 0x5f, 0x4e, 0xbb, 0x46, 0x7f, 0x44, 0x9a, 0xf9, 0x74, 0xbe, 0xdf,
+	0x01, 0xd9, 0xbc, 0x7e, 0xc0, 0xf7, 0x1f, 0x19, 0x19, 0x11, 0xe7, 0x00,
+	0x31, 0xca, 0x70, 0x82, 0x39, 0xae, 0x94, 0x39, 0x19, 0xdb, 0x3e, 0x5f,
+	0x51, 0x0a, 0xf2, 0xad, 0x3b, 0xc0, 0x2f, 0x8f, 0xee, 0x0b, 0xe3, 0x26,
+	0x3f, 0x7c, 0x9e, 0x7e, 0xe5, 0xc4, 0xe7, 0xfc, 0xca, 0xa6, 0x9c, 0x2d,
+	0xf6, 0xa2, 0x7f, 0x4c, 0x7e, 0x50, 0x8c, 0xee, 0x2a, 0x9b, 0x78, 0x3a,
+	0x46, 0xa1, 0x78, 0xcf, 0x1f, 0xd4, 0xf1, 0x03, 0x60, 0x12, 0xd3, 0xf7,
+	0x67, 0x5d, 0xce, 0xd7, 0x8d, 0xf9, 0x36, 0xcc, 0x7d, 0xd0, 0xff, 0xa7,
+	0xb5, 0x7e, 0x2e, 0x2b, 0xd8, 0xc1, 0xe0, 0xef, 0x98, 0x8c, 0x15, 0xa1,
+	0xe3, 0x15, 0xf3, 0x4c, 0x89, 0x15, 0xac, 0xc4, 0x2c, 0x64, 0xc7, 0x0c,
+	0xe4, 0xcd, 0x29, 0x1d, 0x67, 0xed, 0xd5, 0xb2, 0x67, 0x8e, 0xe5, 0x9c,
+	0xa4, 0x2b, 0x6e, 0x8f, 0x3e, 0xbf, 0xcd, 0x1b, 0x2f, 0x26, 0x82, 0x3b,
+	0x07, 0x1f, 0xe7, 0x94, 0xb4, 0xc1, 0x1e, 0xca, 0xae, 0x4c, 0x81, 0x27,
+	0x12, 0x38, 0xdb, 0x56, 0xcd, 0x0f, 0x0d, 0xe8, 0xef, 0x86, 0xf6, 0x29,
+	0x06, 0xb1, 0x8b, 0x86, 0xc9, 0x76, 0xa7, 0xd0, 0xaf, 0x5d, 0x32, 0x57,
+	0xdb, 0xb4, 0x5c, 0x7d, 0xb8, 0x2e, 0x0d, 0x1c, 0xf2, 0x04, 0xca, 0x11,
+	0xd4, 0x25, 0xc3, 0xb2, 0x81, 0xf2, 0x34, 0xca, 0x2d, 0x78, 0xb2, 0xcd,
+	0x61, 0xe0, 0x0a, 0x3c, 0x5f, 0xc3, 0x78, 0x23, 0x58, 0x73, 0xce, 0x94,
+	0x8f, 0x4e, 0x50, 0x97, 0x38, 0x06, 0x73, 0x91, 0x67, 0x6d, 0x3c, 0x6b,
+	0x45, 0x95, 0x5d, 0x60, 0x19, 0xcf, 0x72, 0xf0, 0xfd, 0x21, 0x99, 0x84,
+	0x3e, 0x99, 0x6b, 0x81, 0x4c, 0xfa, 0x68, 0x5b, 0x26, 0xb1, 0xae, 0x5d,
+	0xc6, 0xae, 0x92, 0xd7, 0x4d, 0xd0, 0x5b, 0xa7, 0x64, 0xaf, 0xc5, 0x34,
+	0x1e, 0xad, 0x80, 0x16, 0xaf, 0x83, 0xae, 0x96, 0x41, 0x53, 0x99, 0xa2,
+	0x35, 0x3a, 0xad, 0x92, 0xda, 0x2f, 0xf0, 0x38, 0xe8, 0xb5, 0xe3, 0x0a,
+	0xb1, 0x28, 0x79, 0xd9, 0x01, 0xed, 0x89, 0xdf, 0x61, 0xdb, 0x93, 0x8e,
+	0xb2, 0x41, 0x83, 0xa0, 0xcb, 0x62, 0xc0, 0xd3, 0xef, 0x29, 0x2d, 0x57,
+	0x47, 0xef, 0x48, 0x2a, 0x7d, 0x47, 0x2c, 0xc8, 0x02, 0xcb, 0xfd, 0x50,
+	0x5c, 0x8c, 0x79, 0x5c, 0x5e, 0xc7, 0x3c, 0x06, 0xf8, 0xfb, 0xc8, 0x90,
+	0xe6, 0xef, 0x51, 0x89, 0xec, 0xe6, 0x71, 0xd0, 0x1b, 0x64, 0x50, 0xc0,
+	0xd3, 0xe9, 0x90, 0x46, 0x9f, 0x06, 0xff, 0x5a, 0xb0, 0xca, 0x92, 0x32,
+	0x0f, 0xfe, 0xbf, 0x8e, 0xef, 0xb7, 0xea, 0x9f, 0xaa, 0xb9, 0x05, 0x15,
+	0xe6, 0xb2, 0x7c, 0x1b, 0x38, 0xf9, 0xf7, 0x38, 0xbb, 0x2e, 0x8d, 0xdd,
+	0x07, 0x46, 0x18, 0x4b, 0xfb, 0xb7, 0xba, 0x6c, 0x1f, 0x95, 0xcd, 0xe1,
+	0xe3, 0x28, 0xef, 0xc3, 0xd3, 0xc0, 0x39, 0x44, 0x75, 0x2c, 0x7c, 0xd9,
+	0x1b, 0x36, 0x0a, 0x3a, 0xef, 0xe0, 0x98, 0xce, 0xcf, 0x37, 0xec, 0x03,
+	0xf8, 0x4e, 0xbf, 0x0c, 0xf7, 0x06, 0xcc, 0xa4, 0x12, 0x3a, 0xc7, 0xb4,
+	0x02, 0x2c, 0xb1, 0x82, 0xf1, 0xde, 0xa7, 0x5f, 0xaf, 0x06, 0x1e, 0x1e,
+	0xfe, 0xa7, 0x9f, 0x8e, 0x31, 0x27, 0x7d, 0x33, 0x11, 0xe8, 0xbf, 0xcf,
+	0xfc, 0x4d, 0x7b, 0x6e, 0xd4, 0xc0, 0xcb, 0x6d, 0x33, 0x86, 0xb6, 0xd0,
+	0x65, 0xd0, 0x45, 0x65, 0x4d, 0xbf, 0x6c, 0x17, 0xf4, 0x2d, 0xd4, 0x52,
+	0xe6, 0x07, 0x12, 0xf4, 0x9d, 0xb7, 0xa9, 0x77, 0xda, 0x21, 0x5f, 0x92,
+	0x1a, 0x57, 0xbe, 0x6f, 0xe7, 0x21, 0x15, 0xac, 0xe4, 0x24, 0x68, 0xb4,
+	0x4d, 0x2c, 0x67, 0x5c, 0x1e, 0xcc, 0x3b, 0xab, 0xfb, 0xb2, 0x6d, 0xb3,
+	0x6f, 0x73, 0x5e, 0xae, 0x9f, 0x7b, 0xe1, 0x1e, 0xe8, 0xa7, 0x36, 0x35,
+	0x8d, 0x36, 0x6a, 0xed, 0xfd, 0x01, 0x8d, 0x36, 0xf7, 0x11, 0xfb, 0x3f,
+	0xfb, 0x20, 0x9d, 0x0c, 0x1b, 0x41, 0x0e, 0x06, 0x9e, 0x35, 0x9e, 0xe7,
+	0xa7, 0xc0, 0xf7, 0x3b, 0xe9, 0xa7, 0xe9, 0x67, 0x0c, 0xe8, 0xe7, 0x91,
+	0x6d, 0xfa, 0x21, 0xdd, 0x74, 0xca, 0xd8, 0x35, 0x5b, 0x26, 0x8a, 0xfa,
+	0xbe, 0x81, 0x35, 0xe9, 0x3f, 0x3a, 0x0e, 0xba, 0x21, 0xad, 0x93, 0xb7,
+	0x4c, 0x29, 0x83, 0x8e, 0xca, 0x90, 0x4f, 0x65, 0xd0, 0x14, 0x31, 0x50,
+	0x19, 0xf2, 0xad, 0x5c, 0xb7, 0x9c, 0x2a, 0xf6, 0x4c, 0x9d, 0xbd, 0x02,
+	0x3a, 0xba, 0x5e, 0xe7, 0xfd, 0xeb, 0x35, 0x9b, 0xd4, 0x83, 0xb7, 0xb6,
+	0xef, 0xfe, 0xef, 0xb8, 0xfb, 0x43, 0x72, 0x03, 0x76, 0xcb, 0x9b, 0xa5,
+	0x61, 0xc8, 0x24, 0x21, 0x5e, 0x04, 0x6d, 0x8c, 0xca, 0x6a, 0xe9, 0xa4,
+	0xac, 0x41, 0x3f, 0xad, 0x2f, 0x0d, 0x00, 0x4f, 0x43, 0x8e, 0xbe, 0x72,
+	0x44, 0xde, 0x58, 0x52, 0x32, 0x63, 0x43, 0xbf, 0x2c, 0xd3, 0x07, 0x0f,
+	0x7a, 0x2e, 0x77, 0xea, 0x98, 0xfd, 0x58, 0x35, 0xf0, 0xc5, 0x8f, 0x57,
+	0xbb, 0x64, 0xa2, 0x6a, 0xca, 0x63, 0xd5, 0x1e, 0x79, 0xa2, 0x1a, 0x93,
+	0xd3, 0xb5, 0x84, 0x7c, 0xa3, 0x7a, 0x50, 0x4e, 0x55, 0x0f, 0xc9, 0x93,
+	0xb5, 0xa4, 0x7c, 0x13, 0x76, 0x61, 0xae, 0xe6, 0xc8, 0x64, 0x6d, 0x58,
+	0x1e, 0xaf, 0xd1, 0xc7, 0x8e, 0xf9, 0xf0, 0xcb, 0x6e, 0xfb, 0x2e, 0xb8,
+	0xae, 0x0e, 0xac, 0xcb, 0x51, 0xe3, 0x3a, 0x66, 0x29, 0xb9, 0xc0, 0xff,
+	0x21, 0x72, 0x0e, 0x7d, 0x2f, 0xbe, 0xa2, 0xa4, 0xa2, 0xe7, 0x6f, 0xfe,
+	0xdf, 0x48, 0x54, 0xdb, 0x46, 0xe7, 0xca, 0x07, 0xd1, 0xc6, 0xa6, 0x4d,
+	0x12, 0xfa, 0x41, 0x9a, 0xfe, 0xff, 0xa6, 0xed, 0x65, 0x68, 0x1f, 0xf6,
+	0x2d, 0xda, 0x5e, 0xfa, 0xec, 0x29, 0x3f, 0x68, 0xe7, 0xd0, 0xd6, 0xda,
+	0x19, 0xe7, 0x68, 0xce, 0x7b, 0x31, 0xf7, 0xf0, 0xff, 0xa7, 0x04, 0xf1,
+	0xaa, 0xb3, 0xb5, 0x83, 0xfc, 0x3f, 0x15, 0xac, 0xe5, 0x8b, 0xf3, 0xc5,
+	0x27, 0x4a, 0x63, 0xea, 0xb1, 0x12, 0x11, 0x8d, 0x2f, 0x17, 0xb7, 0x73,
+	0xf2, 0xbe, 0x2e, 0xcb, 0x6e, 0x54, 0xaf, 0x21, 0xf0, 0xdb, 0xa7, 0x75,
+	0x7e, 0xde, 0xd8, 0x10, 0xe9, 0x8f, 0x71, 0xb8, 0xae, 0x30, 0xb6, 0x00,
+	0x6c, 0xeb, 0x9a, 0x72, 0xa9, 0x1a, 0xf8, 0xaf, 0xe6, 0x34, 0xbd, 0xbc,
+	0x05, 0x9a, 0x63, 0xfc, 0x21, 0x78, 0xe6, 0xcb, 0x41, 0xdf, 0xec, 0x90,
+	0x43, 0x7b, 0x1c, 0xfb, 0x35, 0x7a, 0x38, 0x17, 0xff, 0x4f, 0x07, 0xe5,
+	0x70, 0xbd, 0xcc, 0x2f, 0xb6, 0x35, 0x2d, 0x06, 0x31, 0x5e, 0x47, 0x9e,
+	0xc3, 0x5d, 0x54, 0x4c, 0xae, 0xbf, 0x43, 0x2a, 0x0e, 0x6d, 0x5b, 0xca,
+	0xef, 0x21, 0x29, 0x63, 0x9e, 0x8a, 0xd3, 0xf4, 0x8d, 0x05, 0x72, 0xb6,
+	0x62, 0x3e, 0x98, 0x77, 0xba, 0xbc, 0x1f, 0xef, 0xa8, 0x73, 0x80, 0x99,
+	0xa6, 0xf8, 0x7e, 0x11, 0x65, 0xfa, 0x46, 0xe6, 0xf0, 0x4c, 0x84, 0x75,
+	0xaf, 0xf5, 0x6b, 0xac, 0x7e, 0xf2, 0x41, 0xbf, 0x99, 0xb2, 0x95, 0xcf,
+	0x44, 0xb6, 0x94, 0xf1, 0x8b, 0xf5, 0x7e, 0xca, 0xdc, 0xfd, 0x36, 0x7f,
+	0x51, 0xf9, 0x8b, 0xa9, 0x7d, 0x0a, 0xe1, 0xb7, 0x3d, 0xf2, 0x94, 0xc9,
+	0xdc, 0xf5, 0xb4, 0x1a, 0x2b, 0xfd, 0x34, 0xcc, 0xd3, 0xdd, 0x52, 0xfb,
+	0x2b, 0x6f, 0xf7, 0x07, 0x79, 0xee, 0x1c, 0x7b, 0x67, 0x6e, 0xfb, 0x4e,
+	0x3a, 0x61, 0x8e, 0x7b, 0x3b, 0x70, 0xab, 0x56, 0x62, 0xe0, 0x41, 0xc8,
+	0x3b, 0xbb, 0x45, 0xf3, 0x63, 0xa1, 0xf6, 0x2f, 0x7f, 0x43, 0xf3, 0x73,
+	0xd3, 0xc7, 0xf0, 0xdb, 0x7e, 0xda, 0xb6, 0x94, 0x1b, 0x97, 0x02, 0xbf,
+	0x91, 0xb6, 0xa1, 0x21, 0x2b, 0x50, 0x47, 0x5e, 0x05, 0x9f, 0x6c, 0xb7,
+	0xe5, 0xdf, 0x7f, 0x01, 0x99, 0xe7, 0xd3, 0x46, 0x40, 0x67, 0x00, 0x00,
+	0x00 };
+static u32 bnx2_RXP_b09FwData[(0x0/4) + 1] = { 0x0 };
+static u32 bnx2_RXP_b09FwRodata[(0x278/4) + 1] = {
+	0x08003fa4, 0x08003ea4, 0x08003f48, 0x08003f60, 0x08003f78, 0x08003f98,
+	0x08003fa4, 0x08003fa4, 0x08003eac, 0x00000000, 0x080049d4, 0x08004a0c,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004a44, 0x08004c08,
+	0x08004b50, 0x08004b88, 0x08004c08, 0x08004ad8, 0x08004c08, 0x08004c08,
+	0x08004b88, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004bc8,
+	0x08004c08, 0x08004bc8, 0x08004b50, 0x08004c08, 0x08004c08, 0x08004bc8,
+	0x08004bc8, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08, 0x08004c08,
+	0x08004ab4, 0x00000000, 0x0800602c, 0x08006044, 0x08006044, 0x08006044,
+	0x0800602c, 0x08006044, 0x08006044, 0x08006044, 0x0800602c, 0x08006044,
+	0x08006044, 0x08006044, 0x0800602c, 0x08006044, 0x08006044, 0x08006044,
+	0x08006038, 0x00000000, 0x00000000 };
+static u32 bnx2_RXP_b09FwBss[(0x13dc/4) + 1] = { 0x0 };
+static u32 bnx2_RXP_b09FwSbss[(0x2c/4) + 1] = { 0x0 };
+
+static struct fw_info bnx2_rxp_fw_09 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x08003184,
+
+	.text_addr			= 0x08000000,
+	.text_len			= 0x673c,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_RXP_b09FwText,
+	.gz_text_len			= sizeof(bnx2_RXP_b09FwText),
+
+	.data_addr			= 0x080069e0,
+	.data_len			= 0x0,
+	.data_index			= 0x0,
+	.data				= bnx2_RXP_b09FwData,
+
+	.sbss_addr			= 0x080069e0,
+	.sbss_len			= 0x2c,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_RXP_b09FwSbss,
+
+	.bss_addr			= 0x08006a10,
+	.bss_len			= 0x13dc,
+	.bss_index			= 0x0,
+	.bss				= bnx2_RXP_b09FwBss,
+
+	.rodata_addr			= 0x08006740,
+	.rodata_len			= 0x278,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_RXP_b09FwRodata,
+};
+
+static u8 bnx2_TPAT_b09FwText[] = {
+	0x1f, 0x8b, 0x08, 0x08, 0xdb, 0xfd, 0x2f, 0x45, 0x00, 0x03, 0x74, 0x65,
+	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xc5, 0x58, 0x5d, 0x6c,
+	0x1c, 0x57, 0x15, 0x3e, 0xf3, 0xbb, 0x13, 0x77, 0xed, 0xbd, 0x49, 0x97,
+	0x6a, 0x13, 0xb9, 0x74, 0xc6, 0x1e, 0x3b, 0x8b, 0x1c, 0x35, 0x93, 0xb0,
+	0x24, 0x16, 0x5a, 0xd1, 0xc9, 0xcc, 0xae, 0x6b, 0xe5, 0x29, 0x86, 0xbc,
+	0xf1, 0xb2, 0xac, 0xed, 0x46, 0x54, 0x48, 0x4d, 0x51, 0x84, 0x22, 0x81,
+	0x94, 0x65, 0x76, 0x53, 0x40, 0x5a, 0x65, 0xc1, 0xa0, 0x04, 0x21, 0x84,
+	0x22, 0x9b, 0x66, 0x91, 0x58, 0x3c, 0x4d, 0xe9, 0x6b, 0x94, 0xbc, 0x90,
+	0x96, 0x17, 0x9e, 0x4b, 0x9e, 0xac, 0x02, 0x12, 0x0f, 0xa8, 0x8a, 0x78,
+	0x40, 0x15, 0x0d, 0x1e, 0xbe, 0x33, 0x3f, 0x9b, 0x5d, 0xd7, 0x29, 0x79,
+	0xa8, 0x84, 0xa5, 0xf1, 0xcc, 0xfd, 0x39, 0xf7, 0xe7, 0x7c, 0xdf, 0x77,
+	0xee, 0xb9, 0x5b, 0x92, 0x69, 0x82, 0xd2, 0xbf, 0x49, 0x3c, 0x97, 0xbe,
+	0x71, 0xf1, 0xd2, 0xe2, 0x8b, 0x27, 0x4d, 0x3a, 0x71, 0xe2, 0x45, 0xe9,
+	0x19, 0x43, 0xa6, 0xcf, 0xe0, 0x4f, 0x21, 0x12, 0xd9, 0xf8, 0xfc, 0x90,
+	0x21, 0x57, 0x6f, 0x4e, 0x7b, 0x36, 0x19, 0x4a, 0xd5, 0x79, 0x61, 0xd5,
+	0x26, 0x72, 0x07, 0x0b, 0xa6, 0x4f, 0xff, 0x89, 0x5a, 0x45, 0x95, 0xb8,
+	0xfe, 0xf9, 0xea, 0xa3, 0xe3, 0x77, 0x4e, 0x5b, 0x0f, 0x6f, 0x2a, 0x64,
+	0x88, 0x6a, 0xc3, 0x10, 0xf3, 0x64, 0x4c, 0xc3, 0xe6, 0x97, 0x47, 0x57,
+	0x34, 0x9a, 0xca, 0xc6, 0x12, 0x14, 0xf4, 0x0c, 0xaa, 0x77, 0x31, 0x8e,
+	0x7d, 0x59, 0xf2, 0x43, 0x55, 0xf2, 0x6f, 0x18, 0x24, 0x57, 0x5d, 0xc9,
+	0x0b, 0x6d, 0xb4, 0x49, 0xe4, 0x39, 0x39, 0x72, 0x45, 0x14, 0x7d, 0xd3,
+	0x91, 0x49, 0xb6, 0x77, 0xa3, 0xd9, 0xb9, 0x25, 0xc9, 0xeb, 0x2f, 0x4b,
+	0x7e, 0xdf, 0xe3, 0x7d, 0x63, 0x1d, 0x4b, 0x92, 0xdb, 0xe7, 0x77, 0xd5,
+	0xf0, 0xbb, 0x53, 0xd4, 0x28, 0x52, 0x41, 0xb6, 0xd9, 0xd6, 0x24, 0xdf,
+	0x59, 0x28, 0x29, 0x34, 0x8b, 0xe7, 0x00, 0xad, 0x3b, 0x94, 0xf7, 0x1c,
+	0x52, 0x15, 0x5b, 0x26, 0xbf, 0x28, 0xd1, 0xaf, 0x2b, 0x1a, 0x9e, 0xb3,
+	0x52, 0xad, 0xbf, 0x96, 0x8e, 0x53, 0xa4, 0x36, 0xd6, 0xd2, 0x2c, 0xf2,
+	0xda, 0x12, 0x7b, 0xcf, 0x59, 0x10, 0x32, 0xcd, 0xe2, 0x99, 0xc4, 0x77,
+	0x13, 0xfd, 0x34, 0xf2, 0x2a, 0x7b, 0xdb, 0x0e, 0xe0, 0x1b, 0xeb, 0xc4,
+	0x58, 0x5e, 0xbc, 0x0e, 0x13, 0xeb, 0xb0, 0xa9, 0xd3, 0x5b, 0xc6, 0x3e,
+	0xe6, 0x4a, 0x4d, 0xd2, 0xa9, 0x13, 0xaf, 0x7d, 0x92, 0x02, 0xa1, 0x50,
+	0x70, 0x4c, 0x23, 0xf7, 0x9c, 0x8a, 0xf2, 0x21, 0x6a, 0x09, 0x09, 0x7d,
+	0x3a, 0x29, 0x7e, 0x39, 0xb4, 0xeb, 0xa8, 0x2f, 0x50, 0x50, 0x3c, 0x28,
+	0xc9, 0xd5, 0xef, 0xa1, 0x7e, 0x4e, 0x34, 0xe9, 0xbb, 0x78, 0x4b, 0x28,
+	0x1f, 0xe4, 0xf1, 0x50, 0x96, 0x48, 0xb1, 0x49, 0x78, 0xa1, 0x49, 0xed,
+	0x30, 0xb3, 0xe5, 0xfa, 0xa4, 0xae, 0x15, 0xee, 0xc5, 0x0e, 0xfd, 0x7a,
+	0x75, 0x6a, 0x08, 0x6a, 0xa9, 0x55, 0xf4, 0xe9, 0xd9, 0xa2, 0x06, 0x9c,
+	0xdc, 0x18, 0xcf, 0x97, 0xb8, 0x9e, 0xff, 0x50, 0x6f, 0x92, 0x52, 0xb5,
+	0x85, 0x4f, 0x5f, 0xa6, 0xa4, 0x8d, 0xf7, 0x29, 0x63, 0x6f, 0xa7, 0xd2,
+	0x72, 0x51, 0x78, 0x37, 0xbe, 0x48, 0x6e, 0xec, 0x1f, 0x03, 0xdf, 0x02,
+	0x7b, 0xd4, 0x81, 0x75, 0xe0, 0xca, 0xd4, 0x2a, 0x19, 0x64, 0x2d, 0xae,
+	0xa1, 0xe5, 0x6f, 0x5d, 0x05, 0x7e, 0x67, 0xdc, 0xd4, 0xd4, 0x8e, 0x71,
+	0xfe, 0x23, 0xd6, 0xd9, 0x12, 0x06, 0xf0, 0x6e, 0x9c, 0x8f, 0xa2, 0x37,
+	0x9d, 0x28, 0xd2, 0xab, 0x76, 0xf9, 0x16, 0x2d, 0x94, 0x34, 0x9a, 0x17,
+	0x78, 0xc3, 0x8f, 0x36, 0x7c, 0xa5, 0x65, 0xeb, 0xc9, 0x78, 0x86, 0xbf,
+	0xcb, 0x12, 0x96, 0x42, 0x1f, 0x74, 0xdf, 0x63, 0x7f, 0x94, 0x97, 0x62,
+	0x9b, 0x28, 0xda, 0x5c, 0xfc, 0x34, 0x9b, 0xef, 0xa7, 0x36, 0x51, 0x54,
+	0xaf, 0xf0, 0xbc, 0x16, 0xf6, 0xcc, 0x5c, 0x25, 0xaa, 0x0f, 0x1c, 0xa3,
+	0xd9, 0xc5, 0xfa, 0x6c, 0xbc, 0x07, 0x25, 0xec, 0xc1, 0x2a, 0x9b, 0x92,
+	0x41, 0x81, 0x1d, 0xbd, 0x00, 0x7e, 0xb8, 0xbe, 0x6d, 0xbd, 0xef, 0x2b,
+	0x05, 0xda, 0x72, 0xf2, 0xd4, 0x09, 0x4b, 0x14, 0x84, 0x1d, 0xf2, 0x42,
+	0x19, 0x73, 0x14, 0x68, 0xd3, 0x7e, 0x18, 0xd5, 0x1d, 0x07, 0x7e, 0x21,
+	0xb6, 0x2b, 0xd5, 0x69, 0x1a, 0xed, 0x0b, 0x62, 0x8d, 0x1c, 0x60, 0x21,
+	0xc3, 0x37, 0xb3, 0xf1, 0x77, 0x10, 0x3a, 0x68, 0xa7, 0x96, 0x5c, 0xb1,
+	0x44, 0x40, 0x56, 0xc9, 0x53, 0x48, 0xc8, 0x55, 0x81, 0x3e, 0x2d, 0xaa,
+	0x85, 0x06, 0xed, 0x28, 0x97, 0x63, 0x7e, 0xb7, 0x7b, 0x3b, 0xd1, 0x9d,
+	0xa3, 0x25, 0xba, 0x1b, 0x16, 0xe9, 0x76, 0x48, 0x72, 0xd3, 0x01, 0x37,
+	0x8a, 0x82, 0xde, 0x0a, 0x47, 0xf7, 0xf2, 0x1b, 0xec, 0x25, 0x38, 0xa2,
+	0x40, 0x83, 0xab, 0xce, 0x3d, 0x30, 0xc8, 0x02, 0x46, 0x2d, 0xec, 0x3d,
+	0x7b, 0xf3, 0xbe, 0x76, 0xa6, 0x57, 0x6d, 0xeb, 0x87, 0x3e, 0xa3, 0x76,
+	0x4d, 0x43, 0xed, 0x5e, 0x7f, 0x0c, 0x30, 0x86, 0xa0, 0xab, 0xd0, 0x93,
+	0x0c, 0xbf, 0xcc, 0x6c, 0x1b, 0xd4, 0xef, 0xe6, 0xc8, 0xdc, 0x54, 0xa9,
+	0xd9, 0x2b, 0x92, 0x33, 0x6f, 0x99, 0x24, 0xcb, 0x45, 0x99, 0x54, 0x9a,
+	0xd9, 0x8c, 0x68, 0x09, 0xeb, 0xb8, 0x6f, 0xff, 0x48, 0xa7, 0xa9, 0xc0,
+	0xd1, 0x89, 0xfb, 0x18, 0x34, 0x73, 0xcb, 0x90, 0xfc, 0x1e, 0xef, 0x83,
+	0x7d, 0x6e, 0xa4, 0x3e, 0x57, 0x25, 0xef, 0x46, 0x8e, 0x66, 0x37, 0xfe,
+	0x11, 0x79, 0x36, 0x7c, 0x0d, 0x9e, 0xaf, 0x56, 0xbe, 0xa0, 0xd0, 0x04,
+	0xea, 0x36, 0xb9, 0xed, 0x61, 0x5a, 0xcf, 0x63, 0x44, 0x91, 0xe7, 0x3c,
+	0x4b, 0x1e, 0xf3, 0xff, 0x3c, 0xdb, 0xe4, 0x68, 0x66, 0x83, 0x75, 0x83,
+	0xf7, 0x26, 0x97, 0x79, 0x6d, 0x07, 0xa8, 0x89, 0x1d, 0x35, 0xcb, 0x45,
+	0xf8, 0x41, 0x8e, 0x35, 0xd2, 0xc4, 0x8e, 0x65, 0x7b, 0x02, 0x6f, 0x9e,
+	0xef, 0xac, 0x92, 0xf0, 0x9d, 0xe3, 0x46, 0x9e, 0x7c, 0xe0, 0xab, 0x62,
+	0x3d, 0x6b, 0x34, 0x57, 0x5a, 0x8f, 0xdb, 0x50, 0x37, 0xe0, 0x36, 0xb1,
+	0xa7, 0x0d, 0xe5, 0x41, 0xb6, 0x06, 0x70, 0xda, 0x6e, 0x63, 0x16, 0x2d,
+	0xde, 0x6b, 0xdd, 0xe1, 0xfe, 0xdc, 0xb7, 0x55, 0xd6, 0xc8, 0x2a, 0x6f,
+	0x62, 0xf4, 0x7e, 0x17, 0xfb, 0xbd, 0xce, 0xb1, 0xc8, 0x36, 0xff, 0x4a,
+	0xdc, 0x7f, 0x16, 0x7b, 0x9e, 0x5b, 0x6c, 0x73, 0xdb, 0x40, 0x23, 0x7b,
+	0xa3, 0x25, 0x54, 0xf8, 0x5f, 0x86, 0xf3, 0xfd, 0x1f, 0xff, 0x2b, 0xd2,
+	0xaa, 0xe0, 0x74, 0xa5, 0x00, 0x7c, 0x2c, 0xb3, 0x0d, 0xbd, 0xdb, 0x18,
+	0x37, 0x70, 0x14, 0xd8, 0x25, 0x38, 0x71, 0xbf, 0xa5, 0x6e, 0x44, 0xed,
+	0x78, 0xae, 0x2b, 0x3c, 0x17, 0x62, 0x92, 0xbd, 0xf8, 0x07, 0x70, 0xa3,
+	0x49, 0x79, 0x9a, 0xdf, 0xce, 0xd3, 0x85, 0x41, 0x9e, 0x66, 0xae, 0xe9,
+	0xf0, 0x43, 0x14, 0x75, 0x2a, 0xac, 0x51, 0xe0, 0x6d, 0x73, 0x3f, 0xab,
+	0xa4, 0xc8, 0xbc, 0x0e, 0xb4, 0x6f, 0x13, 0xad, 0x0d, 0x74, 0xf8, 0x4d,
+	0x1d, 0x19, 0x5b, 0xa6, 0x97, 0x7f, 0x46, 0xf4, 0xf2, 0x80, 0x6d, 0x79,
+	0xfc, 0xc4, 0xa6, 0x89, 0x3d, 0xcb, 0xc0, 0xfc, 0xc2, 0x40, 0x46, 0xbc,
+	0x40, 0x3c, 0xed, 0x7b, 0x88, 0x93, 0x35, 0x3c, 0x4b, 0x88, 0x9d, 0x8c,
+	0x0d, 0xc7, 0x91, 0x5d, 0xe0, 0xb3, 0x8c, 0xb6, 0xb3, 0xa8, 0x4b, 0xf4,
+	0xae, 0xd8, 0x3a, 0xd5, 0x9c, 0x49, 0x6a, 0x67, 0xb1, 0x4a, 0x70, 0xac,
+	0x3a, 0x08, 0x4e, 0x1d, 0x40, 0xfc, 0xf9, 0x9d, 0x32, 0x1e, 0xab, 0x10,
+	0xd3, 0x8a, 0x87, 0x11, 0x9b, 0xfa, 0xa8, 0xe7, 0xf1, 0x6e, 0xe1, 0x7d,
+	0x00, 0xe5, 0xc3, 0xe8, 0x3b, 0x1a, 0xa7, 0x32, 0xbb, 0x27, 0xc5, 0x28,
+	0xf0, 0x6e, 0xc3, 0x40, 0x7f, 0x13, 0xba, 0x61, 0x7f, 0xe7, 0x10, 0x3f,
+	0xd8, 0xe7, 0x39, 0xf8, 0x54, 0xc7, 0xdc, 0x82, 0x66, 0xb7, 0xa9, 0xa5,
+	0xa4, 0xf1, 0xcb, 0x1f, 0xc6, 0xaf, 0x52, 0xcc, 0x83, 0x20, 0x14, 0xb0,
+	0x61, 0xfd, 0x66, 0x7a, 0x65, 0xec, 0xc8, 0xf5, 0xa0, 0x65, 0x4f, 0x89,
+	0xa2, 0x55, 0xa7, 0x40, 0x4d, 0xe0, 0xee, 0x42, 0xc3, 0x4d, 0x68, 0xd8,
+	0x1f, 0xd1, 0xb0, 0xff, 0x3f, 0x35, 0x0c, 0x7d, 0x42, 0x23, 0xb7, 0xc1,
+	0xa9, 0xb7, 0x7a, 0xfb, 0xe9, 0x99, 0xb5, 0xcc, 0x9a, 0x36, 0xe9, 0xce,
+	0xd1, 0xa7, 0xd5, 0x74, 0x49, 0x7e, 0x4a, 0x4d, 0xb7, 0x58, 0xd3, 0x2a,
+	0x6b, 0xba, 0xb8, 0x57, 0xd3, 0xd3, 0x18, 0x23, 0xd1, 0xe6, 0x19, 0xb5,
+	0x48, 0xda, 0x3c, 0xf0, 0xd8, 0xc8, 0x93, 0x72, 0xed, 0x31, 0xef, 0x98,
+	0xcb, 0xfe, 0x00, 0xff, 0xb6, 0x35, 0xb4, 0x49, 0xe3, 0xf5, 0x88, 0x81,
+	0x6a, 0xd5, 0x2a, 0xad, 0xc5, 0x7d, 0x54, 0xd2, 0xe1, 0xff, 0xd7, 0x8f,
+	0x5a, 0xa6, 0x29, 0x8f, 0x6a, 0x1f, 0xea, 0xdf, 0x88, 0xae, 0x68, 0x55,
+	0x9e, 0xa7, 0x65, 0x82, 0xf3, 0xe6, 0x4f, 0x80, 0x55, 0xbb, 0xcb, 0x7c,
+	0xb7, 0x45, 0x3d, 0xe6, 0x19, 0xca, 0xd0, 0x84, 0x06, 0xde, 0xe6, 0xd0,
+	0x4f, 0xdd, 0x48, 0x74, 0x74, 0x1b, 0xe3, 0x6e, 0x75, 0x99, 0x67, 0x06,
+	0xe9, 0xd7, 0xed, 0xd2, 0x85, 0x38, 0x06, 0xcf, 0x8a, 0x25, 0x62, 0x0d,
+	0xf2, 0xb9, 0x88, 0xf6, 0x41, 0x8e, 0x94, 0x58, 0xf7, 0x13, 0xa9, 0xee,
+	0x9f, 0x87, 0xaf, 0x26, 0x50, 0x66, 0xed, 0x1f, 0x4e, 0xb5, 0x3f, 0x85,
+	0x37, 0xd7, 0xad, 0xa8, 0x09, 0x87, 0xc0, 0xc7, 0x0d, 0xc6, 0x37, 0x8f,
+	0x58, 0xc7, 0xf3, 0xff, 0x33, 0x5a, 0xb5, 0x19, 0x63, 0xdb, 0xfc, 0x01,
+	0xcd, 0x41, 0x7f, 0xa8, 0xdf, 0xe6, 0xbe, 0x6c, 0x93, 0xf5, 0x15, 0x69,
+	0xdf, 0x0f, 0xf7, 0xf4, 0x45, 0xfd, 0x36, 0xf7, 0x63, 0x7d, 0x1c, 0x22,
+	0xe5, 0x3a, 0x9f, 0xdb, 0x1e, 0xeb, 0x03, 0x76, 0x35, 0xd4, 0x71, 0x6e,
+	0xc1, 0xf6, 0x7c, 0x86, 0xf3, 0x3a, 0x39, 0xef, 0xe0, 0x73, 0x7e, 0xcf,
+	0x79, 0x3e, 0xd4, 0xc8, 0x19, 0xf0, 0xfe, 0x3b, 0xea, 0x27, 0x35, 0xb2,
+	0x02, 0x4d, 0x5c, 0x54, 0x13, 0x8d, 0xbc, 0x86, 0xf7, 0x19, 0x94, 0x57,
+	0xf6, 0x68, 0x24, 0xb3, 0x7b, 0xf2, 0x39, 0x1e, 0xf4, 0x4a, 0xf1, 0x99,
+	0xcb, 0xf3, 0x29, 0x1b, 0xd4, 0xd2, 0x52, 0x3d, 0xd4, 0x87, 0x7a, 0x98,
+	0x40, 0xcc, 0xc8, 0xa5, 0x5c, 0xc7, 0xdb, 0xfe, 0x48, 0xf1, 0x1d, 0x4b,
+	0xb4, 0x89, 0xb5, 0x31, 0x7a, 0x9e, 0xfd, 0xbf, 0xf4, 0x41, 0xe0, 0x51,
+	0x3c, 0x37, 0x72, 0x11, 0x3e, 0x17, 0xa2, 0xe8, 0x15, 0x07, 0xed, 0x59,
+	0x4e, 0x12, 0x63, 0x9f, 0xc3, 0xd9, 0xcb, 0x78, 0x20, 0x0f, 0xb4, 0x67,
+	0xa1, 0x07, 0x8e, 0x05, 0xbb, 0xd1, 0x96, 0xed, 0xa1, 0xae, 0x06, 0xff,
+	0x33, 0x26, 0xcb, 0xd2, 0x52, 0xdf, 0x60, 0x3b, 0xe8, 0x6d, 0xbf, 0x5c,
+	0x4c, 0x87, 0xae, 0x1e, 0xe3, 0xc4, 0x3c, 0x6a, 0x8e, 0xe0, 0xd4, 0x88,
+	0x71, 0xda, 0x19, 0xe2, 0xd4, 0x4c, 0x71, 0x6a, 0xc6, 0x38, 0x3d, 0x48,
+	0x71, 0xfa, 0xf3, 0x13, 0x70, 0xda, 0x79, 0x0a, 0x9c, 0x0c, 0xda, 0xb2,
+	0x4b, 0x38, 0x6f, 0xf5, 0x38, 0x77, 0xbd, 0xef, 0xec, 0x97, 0x7b, 0xb1,
+	0xdf, 0xc7, 0xb0, 0x8a, 0x18, 0xab, 0x2d, 0x1a, 0xcd, 0x43, 0x2c, 0xf3,
+	0x1e, 0x15, 0x70, 0x6e, 0xe4, 0xe9, 0xea, 0x9e, 0x5c, 0x24, 0x00, 0x4e,
+	0xb5, 0x14, 0xa7, 0xab, 0xc0, 0xa9, 0x96, 0xe2, 0xb4, 0x3e, 0x82, 0xd3,
+	0xfa, 0x18, 0x4e, 0x1c, 0x53, 0x2a, 0xc6, 0x7a, 0x37, 0xc3, 0x28, 0xc3,
+	0x47, 0xa7, 0x9b, 0x62, 0x0a, 0xfb, 0x3f, 0x4e, 0xed, 0x9f, 0xaa, 0x9c,
+	0xff, 0x02, 0xbb, 0x97, 0x54, 0x39, 0x3e, 0x17, 0xf8, 0xfb, 0x71, 0xbe,
+	0x82, 0xb9, 0x5c, 0xcf, 0xe1, 0x3d, 0x21, 0xcf, 0xb5, 0x47, 0x63, 0xd1,
+	0x07, 0x88, 0x45, 0x5c, 0xc7, 0xfd, 0x54, 0xa9, 0x06, 0xcd, 0x2b, 0xc8,
+	0xe1, 0xfd, 0x61, 0x0e, 0x9f, 0xf8, 0xe1, 0x6a, 0x9a, 0xc3, 0x6f, 0xd9,
+	0x9c, 0xc3, 0x9f, 0xd0, 0x68, 0x62, 0x39, 0xc5, 0x93, 0x79, 0x3d, 0x89,
+	0xb6, 0xb3, 0x31, 0xee, 0x6d, 0xc4, 0xf2, 0x55, 0xf8, 0xa0, 0x19, 0xf3,
+	0x13, 0x79, 0x57, 0xca, 0x5d, 0xe4, 0xbb, 0xe4, 0x87, 0x09, 0x4f, 0x3f,
+	0xdb, 0x5c, 0xec, 0xef, 0x88, 0xd9, 0x46, 0x43, 0xc5, 0x1d, 0xe0, 0x6e,
+	0x18, 0xc7, 0xea, 0x73, 0x41, 0x97, 0x5a, 0x47, 0xaa, 0x57, 0x22, 0xe0,
+	0xee, 0x7e, 0xfd, 0x34, 0x9f, 0x39, 0xf9, 0x45, 0xaf, 0x82, 0xfa, 0x81,
+	0x41, 0xc8, 0x83, 0x70, 0xa7, 0xa1, 0x96, 0x77, 0x5a, 0x42, 0xbe, 0x83,
+	0x32, 0x6c, 0x82, 0x70, 0xba, 0x21, 0x57, 0x4b, 0xe0, 0x43, 0x8b, 0x5c,
+	0xac, 0xd3, 0x0d, 0xe3, 0x7b, 0x4d, 0x43, 0xa9, 0x1a, 0xc8, 0x37, 0xc9,
+	0xc0, 0x99, 0x0f, 0x9f, 0x98, 0x46, 0x7b, 0x80, 0x9c, 0x08, 0x79, 0x80,
+	0xb7, 0x08, 0xbf, 0x1c, 0x03, 0x76, 0xa1, 0x0a, 0xdb, 0x6f, 0xe9, 0xc9,
+	0x9d, 0x88, 0xc8, 0x8b, 0xfd, 0xf5, 0x71, 0xca, 0x91, 0x38, 0xe7, 0x92,
+	0x6a, 0x3d, 0x32, 0x9b, 0x0e, 0xb8, 0x8e, 0x33, 0xa5, 0x13, 0x72, 0x5e,
+	0x7d, 0xcc, 0x90, 0xaf, 0x71, 0x3c, 0x7f, 0x00, 0x1f, 0xe2, 0x7b, 0x9b,
+	0xcf, 0x19, 0x85, 0x73, 0x73, 0xdc, 0x7d, 0xca, 0x88, 0x37, 0x34, 0x89,
+	0xd8, 0x87, 0xd8, 0x3b, 0xcd, 0x58, 0xb9, 0xc9, 0x19, 0xc4, 0xe3, 0x1d,
+	0x97, 0x93, 0x79, 0xfe, 0xa4, 0x25, 0x1c, 0xc6, 0x7d, 0x07, 0xfe, 0x5b,
+	0xed, 0x39, 0x1c, 0x73, 0x3f, 0xaf, 0xd0, 0x43, 0x8a, 0x39, 0x29, 0x4e,
+	0x20, 0x16, 0x9f, 0x86, 0x8d, 0x1b, 0xeb, 0x31, 0xc9, 0xbd, 0x32, 0x9b,
+	0x0f, 0xf7, 0x8c, 0xf1, 0x17, 0x65, 0xbc, 0xec, 0x82, 0xd3, 0x95, 0x74,
+	0xbe, 0x51, 0x8e, 0x2c, 0x20, 0xe5, 0x79, 0xa0, 0x0d, 0xf3, 0xb7, 0xa2,
+	0x8e, 0x7e, 0xbc, 0x46, 0xd6, 0x25, 0xdb, 0x1c, 0xd1, 0xc6, 0xc7, 0x99,
+	0xdd, 0x67, 0x8c, 0xea, 0xc8, 0x18, 0x45, 0xde, 0x9b, 0x68, 0x3a, 0xcf,
+	0xa4, 0xf7, 0x0c, 0x8e, 0x2d, 0x02, 0x3a, 0x95, 0x9f, 0x93, 0xb1, 0x0f,
+	0x0f, 0x7b, 0xf6, 0xe3, 0xfa, 0x5f, 0xe9, 0xe3, 0xe3, 0xfe, 0x56, 0x4d,
+	0xca, 0xc7, 0x12, 0x6e, 0xda, 0x78, 0x87, 0x0f, 0x46, 0xd6, 0xae, 0xed,
+	0x33, 0xef, 0xd7, 0x38, 0x5d, 0x43, 0xbc, 0x21, 0x57, 0xc1, 0x1d, 0xcc,
+	0x27, 0x7c, 0x87, 0xaf, 0x67, 0x3e, 0x04, 0x6f, 0xe8, 0x5c, 0x3b, 0xe5,
+	0x8b, 0x9c, 0xf0, 0x85, 0xf3, 0xba, 0xc5, 0x55, 0xf0, 0xa5, 0x0d, 0xbe,
+	0xc0, 0xae, 0xa1, 0x55, 0xa7, 0xc1, 0x05, 0x8e, 0x4d, 0x28, 0x87, 0xcc,
+	0x1d, 0xe6, 0x0a, 0xf3, 0xe6, 0x31, 0x5f, 0x5e, 0xe9, 0x1a, 0xc6, 0xe6,
+	0xa7, 0x70, 0xe5, 0x8d, 0x98, 0x2b, 0xcc, 0xd9, 0x24, 0x7e, 0x74, 0x80,
+	0x55, 0x90, 0xc6, 0x8f, 0x00, 0xf1, 0xa3, 0xc6, 0xf9, 0x4f, 0x1c, 0x0b,
+	0x12, 0xfd, 0xac, 0x41, 0x3f, 0x35, 0x85, 0xf3, 0x23, 0xd6, 0x0e, 0xdb,
+	0xb1, 0x7e, 0xd8, 0xae, 0x90, 0xda, 0x8d, 0xc7, 0x91, 0x76, 0xcf, 0x32,
+	0xb3, 0x38, 0xd2, 0x86, 0x76, 0x3a, 0xa9, 0x8e, 0xda, 0xa9, 0x8e, 0xd0,
+	0xa7, 0xa5, 0x54, 0xf8, 0x4c, 0xb0, 0x4c, 0x1f, 0xf1, 0xa3, 0x13, 0x8f,
+	0xd9, 0xa2, 0xe4, 0x2e, 0xc3, 0xda, 0xe6, 0xb8, 0x3b, 0x12, 0x6f, 0xd3,
+	0x7b, 0x6e, 0x23, 0xbe, 0xe7, 0x7e, 0x45, 0x1f, 0x8f, 0xb7, 0x38, 0x6b,
+	0xe2, 0x7b, 0xee, 0x29, 0x9d, 0xef, 0xb9, 0x01, 0x7d, 0x49, 0x1f, 0xbd,
+	0xe7, 0x06, 0x63, 0xf7, 0xdc, 0xcc, 0x96, 0xeb, 0xf7, 0x8b, 0xbb, 0x99,
+	0x4f, 0x38, 0xf6, 0x32, 0x9f, 0xf6, 0xcb, 0x15, 0xb3, 0x3e, 0x1c, 0x93,
+	0x58, 0xef, 0x1c, 0xcb, 0x92, 0xdc, 0xec, 0x6e, 0x98, 0xe9, 0xe2, 0x55,
+	0xcc, 0x83, 0x72, 0x6f, 0x3f, 0x5d, 0x18, 0xa9, 0x2e, 0x26, 0x13, 0x9b,
+	0xde, 0xa8, 0x36, 0x5e, 0xd5, 0xc7, 0xb5, 0x91, 0x8d, 0x93, 0x69, 0x23,
+	0x19, 0x73, 0x47, 0x29, 0xe1, 0x0c, 0x2c, 0x23, 0x1e, 0x09, 0xbe, 0xa3,
+	0x21, 0x5e, 0x54, 0xf3, 0xb8, 0xa7, 0x14, 0x78, 0xec, 0x76, 0xf8, 0x2c,
+	0x35, 0x8a, 0x8c, 0x0b, 0xaf, 0xff, 0x61, 0x7c, 0x7f, 0xc0, 0xba, 0x0b,
+	0x01, 0xff, 0xfe, 0xf1, 0x09, 0x3e, 0xbe, 0x06, 0x3e, 0x66, 0xfb, 0x19,
+	0xad, 0xbf, 0x34, 0x52, 0x5f, 0x4e, 0x31, 0x4f, 0x7c, 0x7e, 0x2f, 0xd5,
+	0xc8, 0x26, 0x72, 0xb7, 0xfb, 0xc8, 0x8b, 0xde, 0x44, 0xfc, 0x0e, 0x06,
+	0x1f, 0x47, 0xf7, 0x8a, 0x2a, 0x75, 0x86, 0x36, 0xbf, 0xc0, 0xba, 0x2d,
+	0x71, 0x13, 0x5f, 0x6f, 0x0c, 0xb2, 0xb1, 0xb9, 0x9d, 0xeb, 0xfe, 0x8d,
+	0xf3, 0x19, 0x79, 0xdf, 0xb0, 0xef, 0xfb, 0x11, 0xe7, 0xbb, 0x77, 0x81,
+	0xc5, 0x3b, 0xe1, 0x34, 0xfd, 0x1e, 0x1c, 0x7b, 0x3b, 0xce, 0x79, 0x93,
+	0x5c, 0x17, 0xfe, 0xc3, 0x99, 0xc7, 0x67, 0xbd, 0xf7, 0x39, 0x99, 0x2e,
+	0xd3, 0x57, 0x1d, 0xae, 0x93, 0xa9, 0x7e, 0x2a, 0x8a, 0x2e, 0xe2, 0xdc,
+	0x5f, 0x19, 0x3b, 0xf7, 0x71, 0x07, 0x3c, 0xc9, 0xf9, 0x7f, 0x96, 0xf3,
+	0xef, 0x46, 0x33, 0xf3, 0xd6, 0x4d, 0x97, 0x5c, 0xa9, 0xde, 0xe7, 0x7c,
+	0x6c, 0x98, 0x8b, 0x11, 0x1d, 0x7a, 0x14, 0xc9, 0xf3, 0x7c, 0x36, 0xbd,
+	0x9b, 0xfa, 0x1c, 0x6d, 0x37, 0x1e, 0xe1, 0x1e, 0x53, 0x8b, 0x7f, 0x17,
+	0x72, 0xfb, 0x3c, 0x0f, 0x97, 0xf1, 0x0e, 0x39, 0x47, 0x78, 0xd2, 0x6f,
+	0x35, 0x2a, 0xf0, 0xb5, 0xcc, 0x75, 0x85, 0xe2, 0x7b, 0x21, 0xee, 0x6e,
+	0x3f, 0x6f, 0x53, 0x12, 0x3b, 0x6a, 0xce, 0x39, 0xac, 0x05, 0x98, 0x88,
+	0x06, 0x30, 0x9e, 0x47, 0xac, 0xb2, 0xcc, 0x93, 0x72, 0xf2, 0x5b, 0xd5,
+	0x1a, 0xc6, 0x56, 0x4e, 0x72, 0x2e, 0xf9, 0x51, 0xb4, 0x36, 0x88, 0xcf,
+	0x44, 0x87, 0xb9, 0xe6, 0x87, 0x07, 0x65, 0x7e, 0xbb, 0x21, 0x7f, 0xeb,
+	0x98, 0xc7, 0x7c, 0x02, 0x0f, 0x8b, 0xa2, 0x76, 0xc3, 0x14, 0xf5, 0x9e,
+	0x29, 0x96, 0x7a, 0x32, 0x54, 0x52, 0xc8, 0xd1, 0x14, 0xe7, 0x08, 0x3a,
+	0xd1, 0x73, 0x58, 0xcb, 0x2d, 0x53, 0xf8, 0xc8, 0xa3, 0xbe, 0xad, 0x58,
+	0x62, 0x85, 0x76, 0xb1, 0xc7, 0x47, 0x51, 0x72, 0xa7, 0x35, 0x45, 0x6d,
+	0x38, 0xf7, 0x23, 0xcc, 0xcd, 0x6b, 0x62, 0x2d, 0xf3, 0x79, 0xb6, 0x2c,
+	0x9d, 0x83, 0x8f, 0xce, 0xf7, 0x77, 0x11, 0x43, 0xf9, 0x3c, 0xcb, 0x23,
+	0xe6, 0x59, 0x26, 0x5f, 0xf6, 0xef, 0x62, 0xff, 0xef, 0xf4, 0x80, 0x0f,
+	0x72, 0xc7, 0xb7, 0x87, 0x79, 0x1a, 0x63, 0x58, 0x06, 0x17, 0xd9, 0x3e,
+	0x8a, 0x82, 0xc5, 0x38, 0x47, 0xc1, 0x5a, 0xe6, 0xca, 0xb7, 0x90, 0xa7,
+	0xd7, 0x69, 0xa1, 0x5c, 0x8f, 0xdf, 0x11, 0x72, 0x12, 0xfe, 0x5d, 0xc0,
+	0x12, 0x4d, 0x7c, 0xd7, 0xd2, 0xef, 0x80, 0x73, 0xf8, 0x45, 0x1e, 0x83,
+	0x73, 0x79, 0xd6, 0xe1, 0x7f, 0x01, 0x17, 0xc6, 0xf1, 0xb2, 0x84, 0x14,
+	0x00, 0x00, 0x00 };
+static u32 bnx2_TPAT_b09FwData[(0x0/4) + 1] = { 0x0 };
+static u32 bnx2_TPAT_b09FwRodata[(0x0/4) + 1] = { 0x0 };
+static u32 bnx2_TPAT_b09FwBss[(0x250/4) + 1] = { 0x0 };
+static u32 bnx2_TPAT_b09FwSbss[(0x34/4) + 1] = { 0x0 };
+
+static struct fw_info bnx2_tpat_fw_09 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x08000860,
+
+	.text_addr			= 0x08000800,
+	.text_len			= 0x1480,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_TPAT_b09FwText,
+	.gz_text_len			= sizeof(bnx2_TPAT_b09FwText),
+
+	.data_addr			= 0x08001ca0,
+	.data_len			= 0x0,
+	.data_index			= 0x0,
+	.data				= bnx2_TPAT_b09FwData,
+
+	.sbss_addr			= 0x08001ca0,
+	.sbss_len			= 0x34,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_TPAT_b09FwSbss,
+
+	.bss_addr			= 0x08001ce0,
+	.bss_len			= 0x250,
+	.bss_index			= 0x0,
+	.bss				= bnx2_TPAT_b09FwBss,
+
+	.rodata_addr			= 0x00000000,
+	.rodata_len			= 0x0,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_TPAT_b09FwRodata,
+};
+
+static u8 bnx2_TXP_b09FwText[] = {
+	0x1f, 0x8b, 0x08, 0x08, 0x51, 0xfe, 0x2f, 0x45, 0x00, 0x03, 0x74, 0x65,
+	0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xcd, 0x7b, 0x7f, 0x70,
+	0x1b, 0xe7, 0x99, 0xde, 0xbb, 0x0b, 0x80, 0x04, 0x29, 0x8a, 0x5a, 0x31,
+	0x30, 0x83, 0x38, 0xb4, 0x8d, 0x15, 0x17, 0x34, 0x6d, 0xf2, 0x1c, 0x58,
+	0xe5, 0xf9, 0xd8, 0x06, 0xb5, 0xd7, 0xc0, 0x92, 0xa2, 0x63, 0x26, 0x47,
+	0xbb, 0xcc, 0x9d, 0x92, 0x51, 0x7d, 0x28, 0x48, 0x29, 0x6e, 0xe3, 0xb4,
+	0xaa, 0xe3, 0x3f, 0x34, 0x4d, 0x5b, 0xc3, 0x00, 0x25, 0xcb, 0x2e, 0x44,
+	0xd0, 0x16, 0x63, 0xa5, 0x33, 0x37, 0x53, 0x18, 0x80, 0x28, 0xe7, 0xba,
+	0x24, 0xdc, 0xe4, 0x2e, 0xe9, 0x1f, 0xc9, 0x99, 0xa5, 0x6c, 0xc5, 0x6d,
+	0xae, 0x33, 0xbe, 0x3f, 0xda, 0xa6, 0x37, 0xd7, 0x19, 0x8d, 0xfc, 0x23,
+	0xce, 0x8f, 0xb9, 0xb8, 0x69, 0x7a, 0x56, 0x5b, 0xd9, 0xe8, 0xf3, 0x7c,
+	0xbb, 0x4b, 0x82, 0x32, 0x15, 0x5b, 0xd7, 0x76, 0xa6, 0x9c, 0xc1, 0x10,
+	0xfb, 0xed, 0xb7, 0xdf, 0xf7, 0xfe, 0x7e, 0xdf, 0xe7, 0xfd, 0x16, 0x71,
+	0x91, 0x6e, 0xf1, 0xff, 0x76, 0xe3, 0x93, 0x38, 0x7a, 0xec, 0xb1, 0x3b,
+	0xc6, 0xef, 0xd8, 0x2f, 0x72, 0xe7, 0x9d, 0xb2, 0x2b, 0xaa, 0xf3, 0xe6,
+	0xdb, 0x21, 0x91, 0xdc, 0x4f, 0xe5, 0xaf, 0xfc, 0x87, 0xc7, 0x8d, 0x60,
+	0x7d, 0x7e, 0x24, 0xaa, 0xa7, 0x5f, 0xcc, 0x64, 0x2c, 0x89, 0x86, 0xd2,
+	0x33, 0x9f, 0x9d, 0xb3, 0x44, 0x6c, 0x77, 0x24, 0x91, 0x95, 0xf7, 0x5a,
+	0x85, 0x58, 0x58, 0x38, 0x7e, 0x53, 0xfa, 0xca, 0xe3, 0xdf, 0xff, 0x2d,
+	0xf3, 0x9d, 0x6a, 0x48, 0xa2, 0x46, 0x3a, 0x27, 0xc6, 0x90, 0x44, 0x07,
+	0xf0, 0xcc, 0xef, 0xdf, 0x3a, 0xa5, 0x4b, 0x6f, 0xb0, 0x56, 0x5c, 0x16,
+	0x2a, 0x6f, 0xb7, 0xbe, 0x7f, 0x6b, 0x4c, 0xfe, 0x55, 0xd3, 0x90, 0x17,
+	0x9b, 0x61, 0x6d, 0xb2, 0xd2, 0x23, 0xa5, 0x8a, 0x2b, 0xc7, 0xcb, 0x05,
+	0xc9, 0x36, 0x5f, 0x90, 0xe2, 0xb2, 0xd1, 0x9b, 0x39, 0xf7, 0x07, 0x52,
+	0x5a, 0xee, 0xeb, 0xcd, 0x9e, 0x73, 0xa5, 0x58, 0x8e, 0xf7, 0x66, 0x9a,
+	0x46, 0x6f, 0xf6, 0x4c, 0x0c, 0xd7, 0x7d, 0xbd, 0x99, 0x33, 0x66, 0x41,
+	0xa4, 0x1f, 0x73, 0xe2, 0xbd, 0xd9, 0x8a, 0x99, 0x13, 0x19, 0x4c, 0xbd,
+	0x22, 0x03, 0xbd, 0xd9, 0x66, 0x4d, 0x5b, 0x37, 0x34, 0x29, 0xfe, 0x86,
+	0x18, 0xbd, 0xe9, 0xcb, 0xad, 0x4f, 0x58, 0x86, 0xec, 0xb5, 0x64, 0xcf,
+	0x1e, 0x4b, 0x9e, 0x88, 0xa7, 0xa3, 0x92, 0x3f, 0xdd, 0x25, 0xb6, 0xe2,
+	0xc9, 0x90, 0xfc, 0x99, 0x11, 0x63, 0x43, 0x22, 0x62, 0xc7, 0x82, 0xeb,
+	0x56, 0x2b, 0x93, 0xfa, 0x02, 0xe5, 0x8a, 0xbd, 0xa4, 0x77, 0xb2, 0x29,
+	0x92, 0xa9, 0x44, 0x25, 0x93, 0x7a, 0xaf, 0xe5, 0x3d, 0x13, 0xc5, 0xbe,
+	0xe1, 0xde, 0x89, 0x4a, 0xab, 0xe5, 0xa4, 0xb0, 0x47, 0x2a, 0x78, 0x36,
+	0x22, 0xd5, 0x98, 0x5d, 0x2d, 0xa5, 0x4c, 0xdd, 0xd3, 0x09, 0x79, 0xe4,
+	0xb5, 0x2d, 0xba, 0xf5, 0xdb, 0x92, 0x8f, 0x49, 0xb5, 0x98, 0xba, 0x4b,
+	0x9e, 0x4e, 0x19, 0x72, 0x12, 0xeb, 0x3d, 0x95, 0x82, 0x1c, 0xad, 0x63,
+	0x5a, 0xa6, 0x69, 0xc6, 0x45, 0x7b, 0x5a, 0x32, 0x67, 0x06, 0x8d, 0xac,
+	0x60, 0x6f, 0xab, 0x75, 0x4b, 0x26, 0x85, 0xfd, 0x46, 0xff, 0x67, 0xcb,
+	0x8e, 0x99, 0xb9, 0xaa, 0x0c, 0x48, 0xb1, 0x32, 0x98, 0xfa, 0x13, 0xd1,
+	0xa4, 0xd3, 0xa2, 0x7c, 0x5a, 0x72, 0x3f, 0xf6, 0xcd, 0x58, 0x18, 0x6f,
+	0x8a, 0xad, 0x27, 0x23, 0xf2, 0x0f, 0x0c, 0x33, 0x91, 0x09, 0xf5, 0x4b,
+	0xf1, 0x74, 0x27, 0xe8, 0xb4, 0xfb, 0x74, 0xcc, 0x3d, 0x30, 0x26, 0xb1,
+	0x5d, 0x22, 0x5a, 0x28, 0x9d, 0xc4, 0xba, 0x22, 0x45, 0x77, 0x00, 0xcf,
+	0x26, 0xc7, 0x7f, 0x2a, 0x7b, 0x24, 0xb1, 0x37, 0x2c, 0x25, 0xb7, 0x1b,
+	0x72, 0x34, 0xa0, 0x83, 0xe4, 0xf8, 0x5f, 0x40, 0x29, 0xba, 0x95, 0x8c,
+	0x1f, 0x93, 0x9c, 0x96, 0x6d, 0x76, 0x48, 0x29, 0x19, 0x95, 0x05, 0xd0,
+	0xb1, 0x90, 0xfa, 0xa2, 0x96, 0x39, 0x77, 0x50, 0xcb, 0x9e, 0xc3, 0xbc,
+	0x66, 0xdd, 0xb7, 0x35, 0x03, 0xeb, 0xe8, 0x52, 0x4c, 0x1e, 0xc4, 0xbd,
+	0xa8, 0xcc, 0x61, 0xde, 0x1c, 0x78, 0x2a, 0x35, 0xf7, 0xc8, 0xfa, 0x6c,
+	0xac, 0x37, 0x03, 0x1d, 0x16, 0x71, 0xff, 0xb7, 0x67, 0x34, 0x31, 0x2c,
+	0x5b, 0x7e, 0x3c, 0x06, 0x1d, 0x9e, 0x81, 0xfe, 0xce, 0xc4, 0xe5, 0x78,
+	0x45, 0x62, 0xba, 0x24, 0xe3, 0x79, 0x79, 0x41, 0xea, 0x2e, 0xf5, 0x0f,
+	0x7d, 0x42, 0xdf, 0x45, 0x97, 0xcf, 0x41, 0x6f, 0x15, 0x07, 0xf2, 0x98,
+	0x02, 0x0d, 0x0f, 0x6a, 0xf7, 0xd7, 0x67, 0xb5, 0x03, 0xcd, 0x1f, 0x6b,
+	0xd2, 0x7d, 0x4c, 0xfb, 0x5c, 0xf3, 0x88, 0xe6, 0xcb, 0x1e, 0xba, 0x8b,
+	0x8a, 0x3d, 0x13, 0x95, 0x95, 0xa6, 0xa7, 0xbb, 0x1a, 0xec, 0xd3, 0x36,
+	0x6c, 0xe8, 0xe1, 0x6f, 0x6f, 0xce, 0x59, 0x69, 0xc6, 0x64, 0x01, 0xb4,
+	0x1d, 0x6f, 0x72, 0xfe, 0xef, 0x41, 0x3f, 0x51, 0x71, 0x6f, 0xed, 0x91,
+	0x1c, 0xc6, 0x8b, 0x67, 0xc4, 0xce, 0xa4, 0x74, 0x3c, 0xd3, 0x2b, 0x21,
+	0xab, 0x1f, 0x9f, 0x6e, 0x99, 0xab, 0x77, 0xda, 0x21, 0x2b, 0x26, 0x73,
+	0x4d, 0xca, 0x10, 0xff, 0x2b, 0x81, 0x1c, 0x49, 0x2b, 0xc7, 0xf9, 0x1c,
+	0xc7, 0x0d, 0x8c, 0xb7, 0x8f, 0xd1, 0x2e, 0x7a, 0x41, 0x8f, 0x39, 0x2c,
+	0x18, 0xcb, 0x57, 0x92, 0xc6, 0xe7, 0xf8, 0xbf, 0x49, 0xd9, 0x06, 0x32,
+	0x0d, 0x63, 0xae, 0x2e, 0xf9, 0x3a, 0xf6, 0x39, 0x7d, 0xa5, 0x15, 0x19,
+	0xc3, 0xb5, 0xf5, 0x4b, 0xc8, 0x92, 0xfb, 0x86, 0x41, 0x93, 0x2e, 0xb9,
+	0x3a, 0xd7, 0xe2, 0x7d, 0x81, 0xee, 0x8b, 0x7b, 0x75, 0x19, 0x86, 0x7e,
+	0x4d, 0xec, 0xd3, 0x85, 0x39, 0x3d, 0x90, 0x1f, 0x78, 0x3d, 0x87, 0xef,
+	0xe0, 0x5d, 0xb7, 0x74, 0x3c, 0xdf, 0x29, 0x73, 0x29, 0xda, 0x0b, 0xe9,
+	0xdc, 0x85, 0xb5, 0xbb, 0x64, 0xfe, 0x34, 0xe5, 0x01, 0xbb, 0xaa, 0xc4,
+	0xa4, 0x74, 0xc6, 0x34, 0x1c, 0x31, 0x21, 0x1b, 0x1b, 0xf3, 0x3a, 0x25,
+	0x67, 0xb4, 0x5a, 0x13, 0xa9, 0x11, 0xe3, 0x9b, 0xca, 0xce, 0x47, 0x8c,
+	0xa4, 0x26, 0x85, 0x8e, 0xf4, 0x10, 0x64, 0x6b, 0x1e, 0x14, 0xe1, 0xf5,
+	0x0f, 0xc4, 0x9e, 0xa5, 0xff, 0xc4, 0xb8, 0x17, 0xfc, 0xa9, 0x1f, 0xf4,
+	0xd3, 0xe7, 0x06, 0xa0, 0x97, 0xb8, 0xf2, 0x83, 0x89, 0x1d, 0xfd, 0xc0,
+	0x9c, 0xaa, 0x82, 0xdf, 0xe2, 0xb9, 0x30, 0xfd, 0x2f, 0x05, 0x73, 0x93,
+	0x5d, 0x56, 0x14, 0xb6, 0x40, 0x5a, 0xc6, 0xb1, 0x7e, 0xab, 0xf5, 0xd9,
+	0x94, 0x47, 0x53, 0xf1, 0x8c, 0x8d, 0x67, 0xc3, 0x90, 0xbb, 0xf9, 0x70,
+	0x42, 0xed, 0x3f, 0xee, 0xef, 0x6f, 0xc8, 0x1c, 0xe8, 0x2e, 0x56, 0x42,
+	0x92, 0x35, 0xb8, 0xc6, 0x9f, 0x71, 0x3c, 0xe7, 0xad, 0x05, 0xbb, 0x3d,
+	0x35, 0x68, 0xdc, 0x07, 0x5f, 0xa2, 0x8f, 0x15, 0x57, 0x29, 0x63, 0xac,
+	0x33, 0x46, 0x19, 0x1b, 0x8a, 0xc6, 0xcc, 0x19, 0xda, 0x91, 0x0c, 0x84,
+	0x84, 0x76, 0x8e, 0x98, 0x01, 0xbb, 0x2a, 0xf9, 0x76, 0x95, 0x77, 0xa9,
+	0xff, 0xbb, 0x7d, 0xff, 0xd4, 0x65, 0x28, 0x49, 0x7b, 0x7f, 0x5a, 0xb2,
+	0xf0, 0xf1, 0x39, 0xec, 0x54, 0x07, 0x4f, 0xb5, 0xca, 0x20, 0x64, 0x15,
+	0xf8, 0x1d, 0xf4, 0x3b, 0xfa, 0x6e, 0x2b, 0x88, 0x05, 0xc5, 0x0a, 0x7d,
+	0xa6, 0x68, 0xe8, 0x52, 0xc0, 0x07, 0x76, 0x63, 0x99, 0xc3, 0x99, 0x90,
+	0x39, 0x93, 0x03, 0x6d, 0xb0, 0x7b, 0xc9, 0xdc, 0x49, 0x7b, 0xc6, 0x9c,
+	0xa6, 0xec, 0x0f, 0xfc, 0xac, 0xe6, 0x52, 0x4f, 0xdd, 0xd8, 0x37, 0xa0,
+	0x29, 0x8c, 0x31, 0xae, 0x13, 0x85, 0xcd, 0x07, 0x36, 0x43, 0xfb, 0x33,
+	0xed, 0x75, 0xe9, 0x90, 0xe1, 0x24, 0x62, 0xd9, 0x19, 0x1d, 0xfa, 0x1b,
+	0x40, 0x4c, 0x09, 0xcb, 0x11, 0xc8, 0xea, 0x4b, 0x15, 0xd2, 0xe7, 0xc0,
+	0xef, 0x10, 0xdb, 0xce, 0x4c, 0xc2, 0xcf, 0xa6, 0xb4, 0x09, 0xf8, 0xc4,
+	0x67, 0xea, 0xa4, 0xa9, 0x25, 0xf4, 0x4b, 0xe7, 0x5c, 0x4e, 0x9b, 0x6c,
+	0x1e, 0xd4, 0xa6, 0xce, 0xd1, 0x4f, 0xe8, 0x23, 0xa6, 0xf1, 0x80, 0x78,
+	0x3c, 0x14, 0x9b, 0xaf, 0x68, 0xf4, 0xd5, 0xe2, 0xa9, 0x2e, 0xd0, 0xb1,
+	0x0b, 0xf4, 0x18, 0xf0, 0x3d, 0xd8, 0x97, 0x65, 0xce, 0xd0, 0x66, 0x9c,
+	0xa4, 0x95, 0xf8, 0xe7, 0xf2, 0x41, 0x39, 0x4c, 0x6c, 0xca, 0x61, 0x04,
+	0x32, 0xd9, 0x2e, 0x87, 0x85, 0x0f, 0xca, 0xc1, 0x2e, 0x40, 0x0e, 0x0b,
+	0x88, 0x43, 0x0b, 0x4d, 0xf2, 0xdc, 0x12, 0xfd, 0x4e, 0x81, 0x75, 0xca,
+	0xbd, 0x7a, 0x9a, 0x36, 0x4a, 0x3f, 0x49, 0x26, 0x4a, 0x58, 0xa1, 0xe1,
+	0xf6, 0x28, 0xdf, 0x98, 0x54, 0xb2, 0xf8, 0x30, 0x7e, 0xc9, 0xdf, 0x16,
+	0xcf, 0x53, 0x75, 0xc6, 0x1b, 0xd8, 0x79, 0xd2, 0x32, 0xbe, 0x20, 0x5b,
+	0x7c, 0xdf, 0xb7, 0xc5, 0x37, 0xf6, 0x09, 0x62, 0x10, 0x79, 0x0e, 0xe2,
+	0x31, 0x6d, 0xe5, 0xa5, 0x56, 0xc8, 0xb2, 0xa0, 0x03, 0xda, 0x0b, 0x69,
+	0x30, 0x8d, 0xcf, 0x0a, 0xfe, 0x23, 0x2e, 0xd0, 0x97, 0x72, 0x6a, 0x5e,
+	0x87, 0xe4, 0xf6, 0x7a, 0xf3, 0xe7, 0x2a, 0xad, 0x5f, 0xe8, 0xe9, 0xf7,
+	0x5b, 0x99, 0x31, 0xcb, 0xf7, 0xf1, 0xa8, 0x7c, 0xb9, 0x6e, 0xe6, 0x12,
+	0x5a, 0x8f, 0x14, 0x6e, 0x40, 0x5c, 0xa9, 0xd0, 0x3f, 0xfa, 0xaf, 0x11,
+	0xcb, 0x06, 0xfc, 0x58, 0xf6, 0x13, 0xc8, 0x9e, 0xb9, 0xe7, 0xf0, 0xfb,
+	0xeb, 0x31, 0xfe, 0x4f, 0x1a, 0x33, 0xf2, 0x05, 0xe6, 0x9b, 0x3d, 0xba,
+	0x8a, 0xdf, 0x16, 0x73, 0x41, 0x21, 0x9c, 0xee, 0x96, 0xc2, 0x5e, 0x29,
+	0x84, 0xd2, 0xf4, 0x23, 0xfa, 0x46, 0x87, 0x4f, 0x77, 0x90, 0x3b, 0xf8,
+	0x77, 0x4c, 0x17, 0x8b, 0x73, 0x90, 0x27, 0x2a, 0xe4, 0xe3, 0xbd, 0x40,
+	0x27, 0x78, 0x46, 0x22, 0x9e, 0xcd, 0x4d, 0x23, 0x66, 0x52, 0xa6, 0xed,
+	0xf6, 0xc2, 0x58, 0x2a, 0x09, 0xdd, 0x62, 0x2c, 0x15, 0x23, 0x94, 0x7e,
+	0x50, 0xb3, 0xeb, 0x5f, 0xd4, 0x6c, 0xc8, 0xce, 0x86, 0xec, 0x6c, 0xc8,
+	0x2e, 0x03, 0xd9, 0x65, 0x9b, 0xa4, 0x87, 0xb4, 0x78, 0xeb, 0x3b, 0xde,
+	0xfa, 0xa0, 0xb3, 0x5f, 0xf2, 0xca, 0xc7, 0xc9, 0x2f, 0x62, 0xb2, 0x8a,
+	0x07, 0x93, 0x9a, 0x17, 0x0f, 0xb8, 0xde, 0x14, 0x9e, 0xbf, 0x1b, 0x79,
+	0xce, 0xd6, 0x75, 0x6b, 0x4b, 0x26, 0x0b, 0x6d, 0x32, 0x29, 0xb9, 0x94,
+	0x11, 0xe7, 0xd3, 0x97, 0x5d, 0xe8, 0x3d, 0x90, 0xcb, 0x34, 0x68, 0xe8,
+	0x24, 0xef, 0x3e, 0x1f, 0x5c, 0xbf, 0xcf, 0x5f, 0xff, 0xd3, 0x58, 0x93,
+	0xbe, 0xbb, 0xd3, 0xbe, 0xdc, 0x93, 0xb9, 0xf4, 0xd7, 0xf1, 0x83, 0x5a,
+	0x02, 0x31, 0xfa, 0x45, 0xf8, 0xda, 0xc5, 0x50, 0x5c, 0xbe, 0x7f, 0xeb,
+	0x6b, 0xa8, 0x2f, 0xa4, 0x70, 0x63, 0xba, 0x95, 0x08, 0xa7, 0xdf, 0x6b,
+	0x2d, 0x8c, 0x21, 0x7e, 0xa6, 0xcd, 0x78, 0x26, 0x34, 0x2a, 0x2f, 0x35,
+	0x87, 0xe5, 0x3b, 0x4d, 0x4b, 0xfe, 0xa8, 0x99, 0x90, 0x3f, 0x6c, 0x0e,
+	0xc8, 0xb7, 0x9b, 0x71, 0xf9, 0x56, 0x33, 0xa8, 0x45, 0xe2, 0xb4, 0xa5,
+	0x5e, 0xa7, 0xb9, 0x53, 0x3d, 0x04, 0x3b, 0xc7, 0x5a, 0x99, 0xb1, 0x70,
+	0x2e, 0x94, 0x56, 0x35, 0xc2, 0xcc, 0xd1, 0xf2, 0xe3, 0x2d, 0xdd, 0xb2,
+	0x0a, 0xba, 0xde, 0x33, 0x6e, 0xdc, 0x25, 0x39, 0x3d, 0x8d, 0x31, 0x77,
+	0x3c, 0xec, 0x94, 0xbb, 0x90, 0x5f, 0xa2, 0xa8, 0x65, 0x06, 0xa4, 0x80,
+	0x75, 0x0b, 0xcd, 0x56, 0x6b, 0x29, 0xf5, 0x0f, 0x3f, 0x65, 0xfc, 0x8d,
+	0x7f, 0xd9, 0x29, 0xbd, 0xdf, 0x5e, 0x37, 0x86, 0xfe, 0xbb, 0x5f, 0x0f,
+	0xa1, 0xc6, 0xea, 0x57, 0x8b, 0xe7, 0xb4, 0xf4, 0xa8, 0x93, 0x70, 0x37,
+	0x70, 0x5f, 0xa2, 0xfd, 0xd6, 0xcf, 0x51, 0x85, 0xc8, 0xee, 0x98, 0xc5,
+	0x9a, 0x6b, 0x26, 0xfb, 0x79, 0xfc, 0xff, 0x58, 0x5a, 0xf6, 0xf4, 0xe1,
+	0xff, 0xde, 0x34, 0x4c, 0x2a, 0xcd, 0x98, 0xac, 0xb5, 0xc5, 0x64, 0xd1,
+	0x1c, 0xe4, 0xdf, 0x05, 0xf0, 0xe4, 0x40, 0x1e, 0xbf, 0xd3, 0x8c, 0x6a,
+	0xd9, 0xd3, 0xfd, 0x52, 0xaa, 0x33, 0xaf, 0x71, 0x5e, 0xd4, 0xaf, 0x7b,
+	0x78, 0xdd, 0x81, 0x6b, 0x41, 0xae, 0xf9, 0x94, 0x48, 0xaf, 0xf9, 0xa3,
+	0xcf, 0x4b, 0xdd, 0xaf, 0x5b, 0x22, 0xb2, 0xac, 0x6c, 0x8c, 0xe3, 0xaf,
+	0x65, 0xbf, 0x36, 0xb4, 0x35, 0xfe, 0xec, 0xe6, 0xf8, 0x3b, 0xd9, 0x4f,
+	0x6f, 0x8e, 0x77, 0x87, 0x3d, 0x1e, 0xc6, 0xb5, 0x99, 0x66, 0xc1, 0x1f,
+	0xbb, 0x0c, 0xb9, 0xb7, 0x5a, 0x0b, 0xc8, 0x3d, 0x45, 0xeb, 0x32, 0xea,
+	0x24, 0xc6, 0x9f, 0xeb, 0x89, 0x37, 0xdb, 0x62, 0x8d, 0x91, 0x09, 0x51,
+	0x9f, 0x51, 0xf1, 0xd6, 0xe4, 0xfd, 0x4e, 0xc4, 0x9d, 0xcb, 0xf8, 0xce,
+	0x3c, 0x17, 0xc4, 0x3c, 0xce, 0xe1, 0xf3, 0x6f, 0x5f, 0x43, 0xe7, 0x31,
+	0xe8, 0xfc, 0xff, 0x1b, 0xdd, 0xe2, 0x4f, 0xe9, 0x56, 0xc5, 0x9d, 0x97,
+	0xb6, 0xd9, 0x2c, 0xe9, 0xef, 0xf6, 0x69, 0x96, 0x68, 0x38, 0x6d, 0x38,
+	0x0b, 0xd6, 0x8d, 0x12, 0x41, 0x0d, 0x4b, 0x9b, 0x2d, 0x35, 0xbf, 0x8b,
+	0xe7, 0x99, 0x27, 0x25, 0x1a, 0x49, 0xd3, 0x2e, 0xd6, 0x07, 0x32, 0xd6,
+	0x31, 0xa7, 0xe6, 0x1e, 0x73, 0xce, 0x2a, 0x3b, 0x59, 0xbf, 0xc9, 0xab,
+	0xcd, 0x7f, 0x74, 0x13, 0x6a, 0x73, 0x3c, 0xcf, 0x98, 0xcb, 0xf1, 0x46,
+	0x4f, 0xc6, 0x62, 0x0e, 0x5a, 0x72, 0x8a, 0xf8, 0x2c, 0xa8, 0xb9, 0xaf,
+	0x0e, 0x70, 0x6e, 0x67, 0x3a, 0x76, 0xd3, 0x8f, 0xf1, 0xbf, 0x23, 0xfd,
+	0xce, 0x4d, 0x17, 0x2c, 0xae, 0x3b, 0x75, 0xd3, 0x59, 0xb5, 0x46, 0x18,
+	0xf1, 0x8c, 0xf3, 0x2e, 0xdf, 0xc4, 0x67, 0x9f, 0x44, 0x1c, 0x3f, 0xe1,
+	0x42, 0x97, 0xee, 0x8b, 0x4e, 0x1e, 0x9f, 0x39, 0xd2, 0x54, 0xe1, 0x7d,
+	0xe3, 0xe6, 0x8c, 0x15, 0x56, 0xf9, 0xf6, 0x4b, 0x98, 0x73, 0x04, 0x73,
+	0x0e, 0xbb, 0x01, 0x3f, 0xea, 0xbe, 0x93, 0xc5, 0xfd, 0xc3, 0x65, 0xc3,
+	0x71, 0xca, 0xe6, 0x38, 0x6a, 0x8e, 0xf8, 0x71, 0xe4, 0xe3, 0x1c, 0x72,
+	0xa0, 0x2d, 0xe6, 0x70, 0x41, 0xd2, 0x5d, 0x93, 0xa8, 0xe5, 0x56, 0x90,
+	0x4f, 0x50, 0x87, 0xa4, 0xaa, 0x32, 0xd8, 0x95, 0x39, 0xad, 0xc3, 0x3e,
+	0xef, 0x80, 0xbd, 0x1a, 0x8e, 0x9e, 0x44, 0x5c, 0x47, 0xdc, 0x5c, 0xa8,
+	0x58, 0x5a, 0xb6, 0x3c, 0x68, 0x94, 0xe4, 0x56, 0x59, 0x37, 0xcc, 0xf8,
+	0xa4, 0xec, 0x92, 0x6c, 0x18, 0xf3, 0x86, 0x3f, 0x2e, 0xb9, 0xb8, 0x86,
+	0xd8, 0x70, 0x03, 0xe2, 0x16, 0xeb, 0xe4, 0xf6, 0x18, 0xfa, 0x0b, 0x11,
+	0xeb, 0x8b, 0x21, 0xc6, 0x9e, 0x4e, 0x8b, 0x75, 0x3f, 0xe7, 0xed, 0x92,
+	0x8d, 0x0f, 0xcc, 0x7b, 0xb7, 0x6d, 0x5e, 0xfb, 0xf8, 0x7b, 0x18, 0xdf,
+	0x25, 0x17, 0x41, 0x47, 0x38, 0x39, 0x26, 0x25, 0xf0, 0x10, 0x39, 0xd5,
+	0x6a, 0x5d, 0x00, 0x3f, 0x3a, 0xf8, 0x2f, 0x56, 0x59, 0x0b, 0x84, 0xa4,
+	0x6a, 0xe0, 0x9e, 0xdb, 0x6a, 0xd5, 0x10, 0x46, 0xf5, 0x55, 0xd2, 0x1c,
+	0x95, 0x49, 0x77, 0x48, 0xec, 0x06, 0xe5, 0x60, 0xc2, 0xeb, 0xfe, 0xac,
+	0x2b, 0x7b, 0x86, 0x39, 0x13, 0x16, 0xb1, 0xfa, 0xe7, 0x5d, 0x19, 0xe4,
+	0x3e, 0x7d, 0xf5, 0x62, 0x57, 0x16, 0x7a, 0x0f, 0xad, 0xfe, 0xe7, 0x2e,
+	0xe7, 0x34, 0xe9, 0x0a, 0x21, 0xf7, 0xdd, 0x22, 0x45, 0xa3, 0x25, 0xdf,
+	0x44, 0x8d, 0x50, 0x1c, 0x46, 0x2e, 0x83, 0x17, 0xe8, 0xa0, 0xbb, 0x60,
+	0x48, 0xb4, 0x3b, 0xfd, 0x7d, 0xd0, 0x37, 0x06, 0xd9, 0xec, 0xc2, 0x9c,
+	0x10, 0xc6, 0x87, 0xf0, 0xbf, 0x7d, 0xfc, 0x8d, 0x2e, 0xe4, 0x05, 0xc4,
+	0x60, 0x89, 0x66, 0xc6, 0x7a, 0xb0, 0xfe, 0xf7, 0x30, 0x8e, 0x09, 0xc9,
+	0xcd, 0xf1, 0x27, 0xbc, 0xf1, 0xb7, 0x41, 0x0b, 0x9f, 0x63, 0x8d, 0x22,
+	0xd1, 0xb9, 0x31, 0x03, 0x34, 0x70, 0x6e, 0x4c, 0xcd, 0x75, 0xce, 0xd0,
+	0x06, 0x0c, 0xa7, 0x66, 0xdd, 0x2c, 0xd9, 0xe5, 0x7e, 0x99, 0x5c, 0xee,
+	0x93, 0x03, 0xcb, 0xe6, 0x4c, 0x95, 0xd8, 0x0f, 0x3c, 0x0b, 0xea, 0x30,
+	0x7d, 0x55, 0x20, 0x01, 0x33, 0x7e, 0x44, 0x06, 0xe3, 0x5f, 0x92, 0x5f,
+	0xb6, 0x90, 0xef, 0x91, 0xeb, 0x7b, 0x24, 0xac, 0xd6, 0x89, 0x07, 0x7b,
+	0xd2, 0x46, 0xb7, 0xed, 0xeb, 0x9c, 0xb9, 0xd6, 0xba, 0x70, 0xfe, 0xd5,
+	0xf8, 0x55, 0xeb, 0xfe, 0x85, 0xbf, 0xae, 0x81, 0x75, 0x07, 0xb0, 0x26,
+	0x79, 0x34, 0xbb, 0x26, 0x4e, 0x8b, 0xdd, 0x09, 0xfa, 0x9c, 0xe4, 0x8d,
+	0xc0, 0x86, 0xfd, 0x72, 0x62, 0x99, 0xf1, 0x42, 0xfa, 0xf1, 0x19, 0x8d,
+	0x48, 0x72, 0xf8, 0x1c, 0xea, 0xae, 0x09, 0xb5, 0x86, 0x57, 0x93, 0xe9,
+	0xab, 0x29, 0xd4, 0xc4, 0x3f, 0x05, 0x3d, 0xac, 0x15, 0xc8, 0x73, 0x18,
+	0xfc, 0xa6, 0x50, 0x8b, 0x11, 0x47, 0xb5, 0x1e, 0xcf, 0xa4, 0xf0, 0xfd,
+	0x5c, 0xa2, 0x2b, 0x8b, 0x98, 0x08, 0xff, 0xbe, 0x39, 0xa4, 0x72, 0x18,
+	0xf5, 0x32, 0xda, 0x45, 0x3c, 0x83, 0xe7, 0xa1, 0x27, 0xca, 0x68, 0xbc,
+	0xcb, 0xa9, 0x50, 0x46, 0x02, 0x7a, 0x2c, 0xd8, 0x64, 0x58, 0x61, 0x29,
+	0x7d, 0xd5, 0xc6, 0xbc, 0xb7, 0x42, 0xac, 0x77, 0x33, 0x16, 0xbf, 0x23,
+	0xe6, 0xac, 0x4e, 0x61, 0x2e, 0xbf, 0xdf, 0x85, 0x75, 0x07, 0x87, 0x8b,
+	0xd2, 0x31, 0x7c, 0x18, 0xf1, 0x4e, 0x1f, 0x1b, 0x01, 0x6d, 0xb4, 0xf3,
+	0x16, 0xb0, 0xc0, 0x6f, 0x81, 0x1f, 0xf8, 0x46, 0xd2, 0x92, 0xf9, 0x25,
+	0xca, 0x55, 0x3e, 0x0e, 0x1e, 0xc0, 0x7f, 0x12, 0x71, 0x8d, 0x3c, 0x70,
+	0x6f, 0x41, 0x8e, 0xbe, 0x5b, 0xf2, 0x4b, 0x51, 0x55, 0xeb, 0xdb, 0x06,
+	0xf7, 0xd7, 0x34, 0x3d, 0xdd, 0x0d, 0x1d, 0x93, 0xb7, 0x1c, 0x68, 0x7b,
+	0x0c, 0x79, 0x80, 0xbc, 0x91, 0x2f, 0xfa, 0xca, 0x28, 0xfc, 0x84, 0xf4,
+	0xfb, 0xb6, 0xa7, 0xad, 0x23, 0xa6, 0xa8, 0x38, 0x98, 0xca, 0x20, 0xb0,
+	0xbd, 0xd4, 0x1c, 0x97, 0x3f, 0x6e, 0x8e, 0xc9, 0x77, 0x9b, 0x29, 0xe4,
+	0xc0, 0x51, 0xe4, 0xc0, 0x61, 0xe4, 0x40, 0x0b, 0x39, 0x30, 0x81, 0x1c,
+	0x38, 0x80, 0x1c, 0x18, 0x47, 0x9c, 0x14, 0x39, 0xa1, 0xf2, 0x6d, 0x2c,
+	0x0a, 0xcc, 0x1d, 0xb5, 0x9b, 0x0e, 0x78, 0x99, 0xc1, 0x5e, 0xb3, 0xe0,
+	0xeb, 0x50, 0xd7, 0x44, 0x65, 0x1c, 0x31, 0xd7, 0x42, 0x3c, 0x4a, 0x20,
+	0xdf, 0x8c, 0x01, 0x6b, 0x89, 0x6c, 0x2c, 0x25, 0x10, 0x13, 0x5b, 0xe2,
+	0x00, 0x13, 0x97, 0x8c, 0x14, 0x9e, 0xdd, 0xab, 0xec, 0x33, 0x94, 0xbe,
+	0x3b, 0x2c, 0xdd, 0xa3, 0x92, 0x2f, 0x9f, 0xc4, 0x58, 0x1c, 0xeb, 0x75,
+	0x21, 0x2f, 0x31, 0x2e, 0x30, 0x06, 0x2c, 0x39, 0xbf, 0x6b, 0xd1, 0xd7,
+	0xba, 0xb5, 0xcc, 0xe9, 0x82, 0x30, 0x96, 0x23, 0x0f, 0xc0, 0x1e, 0x38,
+	0x36, 0x89, 0xe7, 0xf8, 0xfd, 0x2f, 0xfd, 0x98, 0xf9, 0xb1, 0x4e, 0x81,
+	0xd1, 0xbe, 0xc4, 0x9c, 0x67, 0x61, 0x3d, 0xb7, 0xdd, 0x4f, 0x9f, 0x47,
+	0xad, 0x14, 0xdc, 0x27, 0xae, 0x66, 0x3f, 0xe1, 0x24, 0x68, 0x1e, 0x04,
+	0xbe, 0x47, 0x6d, 0x75, 0xb0, 0x8a, 0xef, 0xed, 0xf3, 0x5d, 0xcc, 0x57,
+	0x63, 0x51, 0x23, 0x6d, 0xb1, 0x9e, 0x43, 0xac, 0x3c, 0x86, 0xb8, 0x68,
+	0x3b, 0xfa, 0x5a, 0x03, 0x7c, 0x42, 0x8e, 0x65, 0xdb, 0x09, 0x0f, 0xbd,
+	0xd6, 0x7a, 0xd6, 0x1a, 0x96, 0x89, 0xb5, 0x31, 0xc9, 0xae, 0x0d, 0xc6,
+	0xcf, 0x4b, 0xd7, 0x65, 0x5b, 0x5e, 0x6b, 0x95, 0x5c, 0xf3, 0xa4, 0x0d,
+	0xbb, 0xdc, 0xb7, 0xdf, 0x90, 0x1a, 0x30, 0xdc, 0xbe, 0xfd, 0x9d, 0xac,
+	0xe9, 0x5f, 0x14, 0x3d, 0x21, 0x99, 0x45, 0x5b, 0xc6, 0xf6, 0x07, 0xb5,
+	0xe7, 0x2f, 0x3b, 0xa4, 0x1b, 0x63, 0x6b, 0x09, 0xcc, 0x61, 0xdd, 0xaf,
+	0xfa, 0x27, 0xe0, 0x59, 0xf3, 0x9e, 0x51, 0x39, 0x8f, 0x98, 0x19, 0xbc,
+	0x37, 0x6d, 0xe7, 0xfc, 0x22, 0x70, 0x0d, 0xe4, 0x99, 0x59, 0x24, 0xee,
+	0xda, 0x05, 0x39, 0x45, 0x60, 0x23, 0xd4, 0xfd, 0x20, 0x9e, 0x6d, 0xc9,
+	0x57, 0x53, 0xb4, 0x87, 0xc7, 0x20, 0x4b, 0xac, 0x15, 0x0e, 0xf8, 0xf9,
+	0x9a, 0xcc, 0x2d, 0x51, 0x7e, 0x71, 0xd4, 0x96, 0xdc, 0x5b, 0xa2, 0x5d,
+	0xe9, 0xab, 0xeb, 0x46, 0xdb, 0xd9, 0x58, 0xc4, 0xfa, 0x43, 0xc4, 0xd8,
+	0x88, 0xd5, 0x65, 0xf6, 0x06, 0x58, 0x53, 0x1d, 0x80, 0x4e, 0xa6, 0x15,
+	0xe6, 0xce, 0xd4, 0x53, 0x62, 0x9d, 0x62, 0xac, 0x92, 0x44, 0xc8, 0x22,
+	0xbe, 0x17, 0x43, 0x4f, 0xcf, 0xe2, 0x1e, 0xe5, 0xc9, 0x5a, 0x1f, 0xf7,
+	0x57, 0xff, 0xa3, 0xd2, 0x49, 0x08, 0xba, 0xcb, 0xef, 0x67, 0x11, 0x22,
+	0x4b, 0xa1, 0x34, 0x62, 0xe0, 0x18, 0x79, 0x50, 0x7b, 0xa3, 0x9e, 0xa4,
+	0xdf, 0x81, 0x67, 0xd8, 0x46, 0x5b, 0x5d, 0xa9, 0xfe, 0x4a, 0x95, 0x08,
+	0x6c, 0x59, 0x0a, 0x91, 0x34, 0x78, 0x1a, 0xc3, 0x77, 0x38, 0xff, 0x09,
+	0xe8, 0xf3, 0x2c, 0x9e, 0x5f, 0x00, 0x5f, 0x1b, 0x65, 0xd2, 0x9d, 0x4c,
+	0x1c, 0x57, 0xbe, 0x8b, 0x6b, 0x97, 0xb5, 0xcc, 0xd7, 0xe4, 0xbc, 0xe2,
+	0xef, 0x13, 0xac, 0x9d, 0xa1, 0xa7, 0xeb, 0xe1, 0x6f, 0xf2, 0x3a, 0xf9,
+	0xf3, 0xd6, 0x67, 0xce, 0xca, 0x58, 0x09, 0xc9, 0x96, 0x5f, 0x6a, 0x85,
+	0x2d, 0x2b, 0x3e, 0xef, 0xeb, 0x31, 0xeb, 0x46, 0x41, 0x07, 0xfb, 0x00,
+	0xfb, 0x95, 0x2e, 0x41, 0x07, 0x6d, 0xa7, 0x10, 0x4d, 0x3f, 0x2e, 0x2b,
+	0x4b, 0xff, 0x54, 0x6a, 0x4b, 0x05, 0xa9, 0x2f, 0xfd, 0x23, 0x39, 0xb7,
+	0xd4, 0x92, 0x0b, 0x29, 0x15, 0x93, 0xac, 0x0e, 0xe5, 0xcf, 0x72, 0xa3,
+	0x87, 0x07, 0x93, 0xe3, 0x97, 0x20, 0xc0, 0x95, 0xaa, 0x47, 0xfb, 0x54,
+	0x1b, 0xed, 0x17, 0x60, 0x6b, 0xaf, 0x58, 0xa4, 0x7f, 0x4c, 0x6a, 0x65,
+	0xd2, 0xfe, 0xa0, 0xa2, 0xfd, 0xc0, 0x26, 0xed, 0x92, 0x0b, 0x59, 0xa4,
+	0x7f, 0x27, 0xda, 0x81, 0xf3, 0xfb, 0x49, 0x7f, 0x02, 0xcf, 0x7e, 0xd0,
+	0xfe, 0x6a, 0xee, 0x6b, 0xad, 0x8d, 0x72, 0x44, 0xd1, 0x1c, 0x4a, 0x8f,
+	0x41, 0x3e, 0xaf, 0xb5, 0xd6, 0x5d, 0xfa, 0x11, 0xbe, 0xbb, 0xf7, 0x20,
+	0x46, 0xf5, 0x61, 0xaf, 0x5e, 0xc9, 0xcf, 0x46, 0x11, 0x27, 0xc7, 0xa1,
+	0xdb, 0x2e, 0xe5, 0x87, 0x08, 0x17, 0xd0, 0xd9, 0x34, 0xe6, 0x1f, 0xa2,
+	0xbf, 0x29, 0xb9, 0x38, 0x90, 0x4b, 0xb1, 0x9c, 0x8e, 0xa0, 0xfe, 0xc7,
+	0x3e, 0x86, 0x93, 0x73, 0xf9, 0xcc, 0x00, 0x62, 0x1a, 0xff, 0x7f, 0x64,
+	0x7b, 0x28, 0x20, 0xd6, 0x42, 0xe7, 0x3d, 0x90, 0x1f, 0xe8, 0x18, 0x9b,
+	0x41, 0x6e, 0x4d, 0x0e, 0xd7, 0x54, 0x7f, 0x91, 0x71, 0xe5, 0x28, 0xf2,
+	0xe9, 0x21, 0x7c, 0xbc, 0xfd, 0x26, 0x9a, 0xdc, 0x73, 0x3b, 0x4f, 0x45,
+	0x77, 0x7d, 0x2f, 0x01, 0x52, 0xa6, 0xc9, 0x7d, 0x0b, 0x12, 0x4a, 0x87,
+	0xb0, 0x2f, 0xc7, 0x7a, 0x10, 0x63, 0x06, 0xa2, 0xd9, 0xe6, 0xcf, 0x31,
+	0x4e, 0x5f, 0x66, 0x7c, 0x0f, 0x68, 0x1f, 0xc5, 0x9a, 0x8c, 0xbb, 0x63,
+	0xe0, 0x99, 0x35, 0x26, 0xe3, 0x26, 0xf2, 0x48, 0xe3, 0x47, 0xcc, 0x2d,
+	0xf8, 0x3e, 0xe0, 0x7f, 0xe7, 0x7d, 0x89, 0xde, 0x9c, 0x36, 0xab, 0x05,
+	0x31, 0xb1, 0x27, 0x74, 0x6e, 0xc5, 0xa5, 0xd8, 0x30, 0x5f, 0x20, 0x66,
+	0xd4, 0x29, 0x83, 0x35, 0xca, 0x89, 0xfd, 0x27, 0xd4, 0x7f, 0xb5, 0xe7,
+	0x21, 0x8f, 0xa8, 0xec, 0xb5, 0x0e, 0x22, 0xa6, 0x80, 0xfe, 0xca, 0x18,
+	0x78, 0x63, 0x8f, 0x66, 0x10, 0xf9, 0x2b, 0x04, 0x21, 0xa0, 0x96, 0x5a,
+	0x0b, 0xc9, 0xbd, 0xe1, 0x11, 0xa3, 0x28, 0x8f, 0x46, 0x58, 0x36, 0x17,
+	0xd6, 0x98, 0x07, 0xc2, 0xb2, 0xb0, 0x26, 0x72, 0x69, 0x91, 0x71, 0x45,
+	0xfd, 0x41, 0xe6, 0x86, 0x33, 0x8f, 0x3c, 0x5b, 0x5a, 0x62, 0x8c, 0x61,
+	0x9c, 0xb8, 0x01, 0xba, 0x48, 0x7e, 0xe3, 0xab, 0xc8, 0x49, 0xa5, 0xf2,
+	0x20, 0x62, 0xa6, 0xac, 0xeb, 0x90, 0x29, 0x72, 0x19, 0x6b, 0xd4, 0x1d,
+	0xfa, 0x32, 0x41, 0x4f, 0x26, 0x2a, 0xc5, 0x45, 0xf6, 0x63, 0xa2, 0xa0,
+	0x85, 0x35, 0x76, 0x48, 0xd5, 0x3f, 0x37, 0xa8, 0xd8, 0xca, 0xff, 0xe1,
+	0xb6, 0x7d, 0x93, 0x27, 0xf7, 0xe9, 0x8c, 0x63, 0x37, 0x8b, 0x3d, 0x63,
+	0x77, 0x1d, 0xa8, 0x74, 0x48, 0xb5, 0x8f, 0x76, 0x49, 0xfd, 0xbf, 0xa0,
+	0x62, 0xed, 0x02, 0x78, 0x2a, 0x2e, 0x12, 0xe3, 0x86, 0x31, 0x2f, 0xe6,
+	0xcf, 0xa3, 0x5c, 0xff, 0x89, 0xcc, 0xed, 0x7f, 0x17, 0x74, 0x79, 0x71,
+	0x2d, 0xbf, 0x1f, 0xf1, 0x76, 0x46, 0x97, 0x3b, 0xef, 0x1a, 0xc7, 0xb3,
+	0xcc, 0x81, 0xef, 0xf8, 0x78, 0x92, 0x63, 0xec, 0x61, 0x81, 0xbe, 0x15,
+	0x03, 0xff, 0xfb, 0xa4, 0xb0, 0x12, 0x85, 0x1c, 0x90, 0x4b, 0x6b, 0xde,
+	0x5a, 0xac, 0x77, 0x4f, 0x42, 0x47, 0xfa, 0xa9, 0xa8, 0x44, 0x4e, 0xf5,
+	0x49, 0xf8, 0xeb, 0xdd, 0xd2, 0xf1, 0xf5, 0x21, 0x09, 0x7d, 0xdd, 0x64,
+	0x4e, 0x4f, 0x9c, 0x80, 0xbe, 0xe6, 0x65, 0x5c, 0x9e, 0x44, 0xde, 0x62,
+	0x5e, 0x57, 0x76, 0x6a, 0xf4, 0x4b, 0x08, 0x05, 0xab, 0xfe, 0x8c, 0x2d,
+	0x8f, 0xee, 0xff, 0x85, 0xea, 0x33, 0x01, 0xc3, 0x8b, 0xfe, 0xfc, 0x94,
+	0xd8, 0xcd, 0x77, 0x21, 0x6b, 0xc3, 0x79, 0xed, 0xd6, 0xa0, 0xa6, 0x1c,
+	0x56, 0xfd, 0xc2, 0x47, 0xf7, 0x7b, 0x35, 0x25, 0xf0, 0xb8, 0xe6, 0xa8,
+	0x9a, 0x12, 0xf1, 0x35, 0xcc, 0x79, 0xfd, 0xa2, 0x63, 0xaf, 0xbc, 0x0c,
+	0x42, 0x4f, 0xb7, 0x88, 0x7d, 0x08, 0x7e, 0xf1, 0x9c, 0x2c, 0xe9, 0x69,
+	0x4d, 0xad, 0x19, 0x7a, 0x86, 0x71, 0x8a, 0xf1, 0x8b, 0x36, 0x9e, 0x4c,
+	0x14, 0x61, 0x7f, 0xa1, 0xe7, 0x19, 0xa3, 0x3c, 0xdb, 0x9e, 0x68, 0x8b,
+	0x75, 0x0b, 0x95, 0x7b, 0xa0, 0x43, 0xd4, 0xf2, 0x16, 0xe2, 0x9c, 0x81,
+	0x5c, 0x6e, 0xf1, 0xda, 0xeb, 0xe1, 0xe5, 0x63, 0x31, 0x75, 0x5d, 0xac,
+	0x7a, 0x18, 0xdc, 0x5b, 0x9f, 0x75, 0x07, 0x62, 0x4c, 0x93, 0x74, 0x70,
+	0xdf, 0x01, 0x09, 0x3d, 0x17, 0x93, 0xf0, 0x73, 0xb4, 0x3f, 0x33, 0xe1,
+	0x40, 0x7e, 0x0b, 0x16, 0x31, 0xd0, 0x0a, 0xb0, 0xc5, 0xcd, 0xa2, 0xaf,
+	0x0c, 0xc0, 0x77, 0xcc, 0x78, 0x55, 0x92, 0x12, 0xaa, 0x45, 0xe5, 0xad,
+	0x45, 0x33, 0x41, 0x7b, 0x39, 0x6b, 0x61, 0xbc, 0xd9, 0x75, 0x79, 0x5d,
+	0x51, 0xc1, 0xb1, 0x2f, 0x87, 0x80, 0x19, 0x86, 0x6d, 0xbd, 0x47, 0x5e,
+	0x87, 0xbe, 0x73, 0x6a, 0xec, 0x66, 0xac, 0x0b, 0x1a, 0x9e, 0x33, 0xc1,
+	0x03, 0xd7, 0xfd, 0x1e, 0xd6, 0x54, 0xf8, 0xca, 0xd9, 0x60, 0x4d, 0xba,
+	0x48, 0xdb, 0xed, 0x83, 0xdd, 0xe1, 0xba, 0xd9, 0x21, 0xb9, 0xd9, 0x84,
+	0xe8, 0x8b, 0x9f, 0x91, 0xc1, 0xfd, 0xba, 0xc7, 0x8f, 0xe2, 0x91, 0x63,
+	0xec, 0xc7, 0xdd, 0xae, 0xfc, 0x51, 0x5f, 0x83, 0xcd, 0x3c, 0x48, 0x1d,
+	0x23, 0xf7, 0x23, 0x8f, 0x31, 0x8e, 0x85, 0x90, 0xc7, 0xb2, 0x4d, 0x4f,
+	0xef, 0xd5, 0x07, 0xfb, 0xe5, 0xc9, 0xe7, 0x68, 0x4f, 0xb8, 0xb7, 0x69,
+	0x53, 0x41, 0x0f, 0x98, 0xf7, 0x2c, 0x39, 0xf9, 0x6c, 0x50, 0x73, 0xb0,
+	0xbe, 0x32, 0xe3, 0x07, 0xc0, 0x8f, 0x7e, 0x27, 0xe3, 0x81, 0xae, 0x6c,
+	0x37, 0x6f, 0x59, 0x5e, 0xdd, 0x51, 0x49, 0xb0, 0x2f, 0x6e, 0xb0, 0x4e,
+	0xb3, 0xe3, 0x9e, 0xbc, 0x8b, 0x18, 0x2b, 0x35, 0x67, 0x11, 0xa3, 0x23,
+	0x72, 0x71, 0xd6, 0x86, 0xee, 0x3f, 0x0b, 0xba, 0x0e, 0x75, 0x11, 0x23,
+	0x5f, 0x9c, 0x75, 0x70, 0x7d, 0x48, 0xd5, 0x66, 0xa1, 0x3b, 0x61, 0xc7,
+	0xcd, 0x7e, 0xfa, 0x91, 0xaf, 0xa7, 0x84, 0x56, 0x5c, 0x32, 0xb5, 0x12,
+	0x62, 0xf6, 0x64, 0x8a, 0x39, 0xbe, 0x53, 0xf5, 0x4d, 0xd9, 0xaf, 0xc9,
+	0x2b, 0xbc, 0xb0, 0x4f, 0x2b, 0x56, 0x19, 0xe7, 0x0b, 0xf1, 0x0e, 0x21,
+	0x0e, 0x11, 0xad, 0x66, 0x51, 0x27, 0x9a, 0x9c, 0x57, 0xbd, 0x58, 0x11,
+	0xc7, 0x3d, 0x42, 0x19, 0x68, 0xf5, 0xea, 0x3e, 0xad, 0x50, 0x0d, 0xc9,
+	0xc5, 0x18, 0xe9, 0x4e, 0xa8, 0xfa, 0x7d, 0xbf, 0xb2, 0xb5, 0x1e, 0xe4,
+	0x12, 0xd8, 0x4c, 0xea, 0x93, 0xd8, 0x57, 0x8d, 0xc1, 0xa6, 0xa8, 0x7b,
+	0xea, 0x5d, 0xc5, 0x48, 0x5f, 0xf7, 0x3b, 0xe5, 0x4c, 0xd0, 0x51, 0x26,
+	0x7e, 0xef, 0xf4, 0xf1, 0xfb, 0xa2, 0x5f, 0x0f, 0x3d, 0x26, 0xac, 0x53,
+	0x16, 0x2a, 0xa4, 0x05, 0xf1, 0xd6, 0xdd, 0xc9, 0x96, 0x28, 0x47, 0x2f,
+	0xa6, 0x1c, 0x45, 0x1d, 0xa3, 0xaf, 0x19, 0xbe, 0x0d, 0xf0, 0x6f, 0x14,
+	0xf7, 0xbc, 0x5a, 0xaa, 0xd8, 0x8c, 0xc0, 0xdf, 0xa7, 0x21, 0x23, 0xea,
+	0x06, 0xfa, 0x5b, 0xe3, 0x99, 0x0a, 0xf4, 0xb7, 0xf6, 0xf2, 0xfb, 0x76,
+	0x1f, 0x63, 0xde, 0xb0, 0x3c, 0x89, 0xf1, 0x13, 0x67, 0x48, 0xcf, 0xb8,
+	0x8f, 0xc7, 0x12, 0x90, 0x09, 0x63, 0xfc, 0xa8, 0xbc, 0xd5, 0x70, 0x14,
+	0xfe, 0xdb, 0xb7, 0x7f, 0x46, 0xe6, 0xdd, 0x59, 0xe0, 0x3f, 0xc8, 0xdf,
+	0x48, 0xc0, 0x3f, 0xe3, 0x2a, 0x3e, 0x1e, 0xfe, 0x68, 0x35, 0x49, 0xd8,
+	0xcb, 0xd9, 0xf7, 0x5e, 0x67, 0xce, 0xde, 0x0d, 0xfc, 0xf5, 0x91, 0xd6,
+	0x0f, 0x79, 0xeb, 0xff, 0x17, 0xe8, 0xea, 0x73, 0xd8, 0x23, 0x0a, 0xfa,
+	0xfa, 0x29, 0xd3, 0x0f, 0x7b, 0x4e, 0xf7, 0x9e, 0xbb, 0xff, 0x3a, 0xe9,
+	0x32, 0xa4, 0x01, 0x8c, 0x50, 0x50, 0x79, 0x94, 0xb5, 0x62, 0xc4, 0xd7,
+	0xdf, 0x31, 0x60, 0x67, 0xae, 0x1b, 0xc4, 0xde, 0x4e, 0x29, 0xf4, 0x05,
+	0xf5, 0x27, 0x62, 0xf6, 0xe6, 0x78, 0x50, 0xcf, 0xf2, 0xf9, 0x94, 0x93,
+	0x2f, 0xb3, 0x4f, 0xc8, 0x5c, 0xc0, 0x31, 0x65, 0x87, 0x1f, 0x42, 0xb7,
+	0x09, 0xcf, 0x20, 0xdd, 0xf7, 0x29, 0xba, 0x1d, 0x45, 0x37, 0xfd, 0x8b,
+	0x67, 0x3a, 0xec, 0xa3, 0x05, 0x7d, 0x33, 0xae, 0x07, 0x4c, 0x00, 0x7d,
+	0x7f, 0x17, 0x3a, 0xfe, 0x4e, 0x05, 0x98, 0xa0, 0x02, 0x4c, 0x80, 0x3d,
+	0xbe, 0x0d, 0x1d, 0x7f, 0xab, 0x02, 0x4c, 0x50, 0x89, 0xfb, 0x3d, 0x0a,
+	0x9b, 0x98, 0xfe, 0x23, 0xda, 0x6e, 0xd0, 0x93, 0xb9, 0xda, 0x2e, 0x39,
+	0xce, 0xf9, 0x01, 0x36, 0x8e, 0xc2, 0x8e, 0x78, 0x6e, 0x11, 0xf4, 0x3b,
+	0xfc, 0x1c, 0xd1, 0xe0, 0xb9, 0x00, 0x72, 0x44, 0x83, 0xe7, 0x18, 0x23,
+	0xf1, 0x10, 0x30, 0x61, 0x48, 0xe2, 0xc2, 0x5e, 0xef, 0xdc, 0x18, 0xd6,
+	0x1a, 0x1d, 0x84, 0x27, 0x75, 0xa8, 0xbe, 0xd6, 0x71, 0xd5, 0x6f, 0x40,
+	0x5c, 0xa8, 0x06, 0xb5, 0x5b, 0x52, 0x26, 0x96, 0x88, 0x33, 0x65, 0xaf,
+	0x9e, 0x86, 0x0e, 0x5c, 0x62, 0xc3, 0xcd, 0xbe, 0xf4, 0x70, 0x1d, 0x7b,
+	0x16, 0x2d, 0x8f, 0xbe, 0xe3, 0xee, 0xd6, 0x33, 0x07, 0x10, 0x9f, 0xa7,
+	0xca, 0x09, 0x99, 0x2c, 0x7b, 0x98, 0x00, 0xf5, 0xcf, 0x55, 0xfd, 0x51,
+	0x9b, 0x7a, 0x80, 0xfe, 0x36, 0x6d, 0x23, 0x71, 0x3e, 0x45, 0x19, 0x53,
+	0xff, 0xd3, 0xaa, 0x67, 0x7d, 0xa0, 0xee, 0xf5, 0xe5, 0x27, 0x95, 0x2d,
+	0x84, 0x19, 0x67, 0xa8, 0x3f, 0xcf, 0x87, 0x61, 0x17, 0x79, 0x37, 0x90,
+	0x4b, 0x3b, 0x1e, 0xf9, 0xbc, 0x26, 0xd6, 0x4e, 0xe3, 0xb9, 0xb6, 0xf1,
+	0xcd, 0xfb, 0x3e, 0xbd, 0x88, 0x7d, 0x9b, 0x3d, 0x06, 0xc6, 0xa9, 0xad,
+	0xf1, 0x10, 0xea, 0x87, 0xb0, 0xba, 0x8f, 0x18, 0xde, 0x88, 0x49, 0xb6,
+	0x61, 0x89, 0x53, 0xe5, 0x3c, 0xf6, 0x2d, 0x18, 0x8f, 0x9e, 0x90, 0xec,
+	0x52, 0xaf, 0xe4, 0x62, 0x66, 0xca, 0x96, 0xbf, 0x27, 0x1b, 0xcb, 0x85,
+	0x04, 0xcf, 0x0d, 0x0b, 0x33, 0x1a, 0x9e, 0x7b, 0x18, 0xd7, 0xa4, 0xd9,
+	0x92, 0xc3, 0x65, 0xe6, 0x9d, 0x91, 0x78, 0x03, 0xf7, 0x72, 0xb3, 0xec,
+	0xd5, 0x54, 0x61, 0x93, 0x66, 0xa2, 0x8a, 0x78, 0xf0, 0x72, 0x99, 0xfb,
+	0x01, 0x1b, 0x95, 0xd9, 0xcf, 0x09, 0xee, 0x3f, 0x01, 0x1c, 0x88, 0x58,
+	0x1d, 0xf3, 0xe7, 0x28, 0x5e, 0x6d, 0x23, 0x2c, 0x81, 0xae, 0x3b, 0x65,
+	0xdd, 0x8f, 0xbb, 0xb5, 0xb2, 0xd7, 0x47, 0x39, 0x4b, 0x7a, 0xdc, 0xff,
+	0xd5, 0x5a, 0x8f, 0xa1, 0x16, 0xda, 0xe4, 0xf5, 0x8f, 0xb9, 0x8f, 0x81,
+	0xb0, 0x2b, 0x27, 0xdc, 0x40, 0x26, 0xbc, 0xcf, 0x31, 0x9e, 0x8d, 0xb6,
+	0x5a, 0x67, 0xad, 0xf6, 0x9e, 0xdf, 0xf5, 0xf4, 0xcc, 0xde, 0xb8, 0x2d,
+	0x63, 0xbd, 0xe6, 0xa0, 0x26, 0xf6, 0x7b, 0x66, 0x87, 0x46, 0xbc, 0x9e,
+	0xd9, 0xfc, 0xc8, 0xf6, 0x9e, 0xd9, 0xcf, 0x6f, 0xf3, 0x7a, 0x66, 0x17,
+	0x9d, 0x22, 0x3e, 0x5e, 0xcf, 0x6c, 0xf8, 0x76, 0xaf, 0x67, 0xf6, 0xf0,
+	0xed, 0x5e, 0xcf, 0xec, 0x91, 0x11, 0xaf, 0x67, 0xf6, 0xfb, 0xb7, 0x6f,
+	0xef, 0x99, 0x3d, 0x36, 0xb2, 0xbd, 0x67, 0x26, 0x13, 0xc8, 0x77, 0x13,
+	0x5b, 0x3d, 0xb3, 0xf2, 0xc8, 0xb5, 0x7b, 0x66, 0xaf, 0x06, 0x78, 0x1d,
+	0xfc, 0x8c, 0x81, 0x87, 0x14, 0xf0, 0xfa, 0x28, 0xf0, 0xfa, 0xaf, 0xeb,
+	0x59, 0x87, 0xc1, 0xe7, 0xcd, 0x7e, 0x5e, 0xb8, 0x1e, 0xdc, 0x7e, 0xbb,
+	0xff, 0x8c, 0xa0, 0xde, 0x4d, 0xf8, 0xb5, 0x0a, 0xb1, 0xfb, 0x1e, 0xbf,
+	0x66, 0xfb, 0x6b, 0xd1, 0xad, 0xf3, 0xec, 0xf6, 0xff, 0x37, 0xa0, 0xf4,
+	0x0e, 0xf0, 0x3c, 0xf9, 0x79, 0x0d, 0xb5, 0x1f, 0xf9, 0x47, 0xa2, 0xef,
+	0xbe, 0xe8, 0x7c, 0xd5, 0x22, 0xc6, 0x7f, 0x1c, 0xbe, 0x6a, 0xef, 0x0d,
+	0xc9, 0x3a, 0xfc, 0x96, 0x39, 0xea, 0xa4, 0x64, 0x31, 0x3f, 0xab, 0xe6,
+	0x27, 0x26, 0xb6, 0xe6, 0xa7, 0x26, 0xbe, 0xaa, 0x6a, 0x52, 0xf3, 0x5f,
+	0xe3, 0xf3, 0x0d, 0x65, 0xdf, 0x96, 0x87, 0xe1, 0x9d, 0x4a, 0x80, 0xb7,
+	0xc2, 0x3e, 0x76, 0x36, 0x1c, 0xdb, 0x9d, 0xc0, 0x33, 0xe6, 0x8b, 0xb6,
+	0x34, 0x14, 0x7e, 0x0f, 0xa5, 0xcd, 0x17, 0x73, 0xaa, 0x5e, 0x33, 0x9c,
+	0xbc, 0x1b, 0xd4, 0xdf, 0xa8, 0xa1, 0x86, 0x06, 0xd5, 0xf9, 0x9b, 0xbe,
+	0x36, 0x8c, 0x3c, 0xd6, 0x5e, 0x63, 0xb3, 0xae, 0xd6, 0xfd, 0xba, 0xda,
+	0x90, 0xbb, 0xf7, 0xb7, 0x63, 0x73, 0x99, 0xf8, 0x5b, 0x0a, 0x9b, 0xef,
+	0x42, 0x6d, 0x4e, 0xec, 0x4d, 0x1c, 0x43, 0x0c, 0x41, 0x7c, 0xce, 0x7e,
+	0x01, 0xeb, 0x19, 0xe6, 0x46, 0xd6, 0x37, 0x31, 0x7c, 0xf8, 0xbe, 0x41,
+	0x80, 0xd1, 0x3b, 0xfc, 0xf8, 0xce, 0xba, 0x28, 0xc0, 0x2a, 0x77, 0x75,
+	0x7b, 0xb5, 0xd1, 0x2e, 0xcd, 0xab, 0x3f, 0x13, 0xfe, 0x9c, 0xf0, 0x26,
+	0x16, 0x0e, 0x6f, 0x62, 0xe1, 0x6d, 0xe7, 0x30, 0xa2, 0xde, 0x6d, 0x50,
+	0xe7, 0x39, 0x3c, 0xdf, 0x11, 0x4d, 0x4f, 0xf3, 0x8c, 0x07, 0x38, 0xc7,
+	0xe2, 0x99, 0x0f, 0x7d, 0xe9, 0x41, 0x2d, 0x5b, 0x37, 0x10, 0xef, 0x99,
+	0x7f, 0x90, 0x6b, 0xcb, 0xc1, 0xd9, 0x62, 0xa0, 0x27, 0xca, 0x8e, 0x63,
+	0x7f, 0xaa, 0xa1, 0xe6, 0x4d, 0x45, 0xac, 0x43, 0xa0, 0x65, 0x0a, 0xff,
+	0x03, 0x99, 0xde, 0xa3, 0x72, 0x5f, 0x27, 0x6c, 0xf6, 0x78, 0x85, 0xd8,
+	0xf5, 0x71, 0x69, 0xf8, 0xf8, 0x75, 0x65, 0xc9, 0xc3, 0xae, 0xe1, 0xed,
+	0xd8, 0x35, 0xb5, 0x21, 0x1e, 0x8d, 0x07, 0x76, 0xa4, 0xd1, 0x70, 0x5e,
+	0x19, 0x22, 0x66, 0x25, 0x9d, 0xcc, 0x3d, 0xd3, 0x88, 0x81, 0xcc, 0x39,
+	0xcc, 0x37, 0xc4, 0xa5, 0xd7, 0xa2, 0x4f, 0x8d, 0x1d, 0xed, 0xb0, 0xa2,
+	0xf8, 0xcc, 0x83, 0x8e, 0x19, 0x3c, 0x93, 0x96, 0x85, 0xd3, 0x5f, 0xd1,
+	0x9c, 0xfa, 0x3c, 0xe8, 0x99, 0x42, 0xae, 0xa3, 0x2d, 0x15, 0x0c, 0xcf,
+	0x8e, 0xd6, 0x11, 0xf7, 0x5d, 0xc6, 0x02, 0xd4, 0xae, 0xa8, 0x47, 0xca,
+	0x8c, 0xbd, 0x3c, 0xeb, 0x0a, 0x62, 0x2e, 0xfb, 0x26, 0xa8, 0x59, 0x59,
+	0xbb, 0x2e, 0x72, 0xdf, 0xed, 0xba, 0xa8, 0xb9, 0xc4, 0x5d, 0x86, 0xb3,
+	0xbe, 0x46, 0xdc, 0xf8, 0x51, 0x31, 0xa4, 0xe1, 0xbc, 0x3c, 0x44, 0x1c,
+	0x79, 0x3d, 0xf8, 0xd1, 0x84, 0x34, 0xcd, 0x17, 0xd6, 0xf5, 0x76, 0xfc,
+	0xe8, 0x61, 0xc7, 0xcc, 0xda, 0x41, 0xac, 0xc9, 0xda, 0x8c, 0x38, 0xd1,
+	0x44, 0x98, 0x1b, 0xc4, 0xb3, 0x83, 0xe0, 0xc7, 0xc3, 0x8a, 0x59, 0x60,
+	0xc5, 0xbf, 0x03, 0xac, 0x58, 0x92, 0xf7, 0xa2, 0xc4, 0x8a, 0xb6, 0x8f,
+	0x15, 0x1d, 0xd8, 0x71, 0x7e, 0x9b, 0x1d, 0x6b, 0xaa, 0x07, 0xc5, 0x7b,
+	0x79, 0x60, 0xbd, 0xec, 0xa2, 0x79, 0x1d, 0xf8, 0x50, 0x93, 0x98, 0x3a,
+	0xaf, 0x0f, 0xb7, 0xad, 0x19, 0xe0, 0xc0, 0x7d, 0x0a, 0xdf, 0xdd, 0x57,
+	0xd9, 0x85, 0xda, 0x44, 0xe1, 0x3d, 0xff, 0x9c, 0x2f, 0x7c, 0xd5, 0xd9,
+	0x67, 0xb8, 0xed, 0xec, 0x73, 0x0b, 0x17, 0xe2, 0x39, 0xbf, 0xc7, 0x17,
+	0x81, 0xde, 0xfe, 0x07, 0x69, 0x82, 0x5f, 0xd1, 0x07, 0x34, 0xcf, 0x4f,
+	0xb6, 0xe1, 0xc3, 0xff, 0x7a, 0x15, 0x3e, 0x44, 0xce, 0x5a, 0x89, 0x49,
+	0x06, 0xd8, 0xd0, 0x5e, 0xe3, 0x5a, 0xf4, 0xe5, 0x51, 0xe9, 0x00, 0x7f,
+	0x9d, 0x8b, 0x7d, 0xc0, 0x44, 0xdd, 0x12, 0x05, 0x36, 0x8a, 0x28, 0x6c,
+	0x34, 0x44, 0x0c, 0x33, 0x7c, 0x18, 0x98, 0xa6, 0xb1, 0x89, 0x8f, 0xcc,
+	0xd4, 0x0f, 0xa0, 0x97, 0x87, 0x95, 0xad, 0x8c, 0xcb, 0x53, 0x88, 0x9d,
+	0x1d, 0x6b, 0xc0, 0x75, 0x2b, 0x1e, 0x6e, 0x8a, 0x5c, 0x85, 0x9b, 0x8e,
+	0xec, 0x88, 0x9b, 0x54, 0xbf, 0x7e, 0x9c, 0x32, 0x79, 0xdd, 0xf5, 0xfa,
+	0xf5, 0x97, 0x5c, 0xaf, 0x5f, 0xff, 0xba, 0xdb, 0xde, 0xaf, 0xbf, 0x49,
+	0x8a, 0x86, 0x69, 0x5f, 0x94, 0xab, 0xfa, 0xf5, 0x33, 0xec, 0x7f, 0x57,
+	0xbb, 0xbc, 0xbe, 0x7c, 0xb7, 0xdf, 0xaf, 0x37, 0xa5, 0xb8, 0x6d, 0xdc,
+	0x90, 0xb7, 0xad, 0xa0, 0x5f, 0xff, 0x2f, 0x30, 0xd6, 0x83, 0x3d, 0xb6,
+	0xf7, 0xea, 0x2f, 0xb9, 0xec, 0xd5, 0xc7, 0x38, 0xcf, 0xef, 0xd5, 0x73,
+	0x1e, 0x6a, 0x78, 0x97, 0x7d, 0xfa, 0x9b, 0x21, 0x8b, 0x7e, 0xc8, 0xa1,
+	0x4f, 0x3a, 0x9e, 0x8b, 0x73, 0x8e, 0xea, 0xcf, 0x5f, 0x74, 0x63, 0x78,
+	0xce, 0xeb, 0xa3, 0x1f, 0x86, 0x5d, 0x1d, 0xd9, 0xec, 0xcf, 0x7b, 0x7b,
+	0xbc, 0xe1, 0x6e, 0x5f, 0x7f, 0xfb, 0x3a, 0x03, 0xfe, 0x3a, 0x31, 0xac,
+	0x13, 0xbf, 0x6a, 0x9d, 0xad, 0x7e, 0xfc, 0x1b, 0xae, 0xd7, 0x8b, 0x77,
+	0x4e, 0x8b, 0xdd, 0x81, 0x98, 0xfc, 0xe2, 0xd0, 0x8d, 0xfe, 0x1a, 0x9b,
+	0xbd, 0x78, 0xc6, 0x0e, 0xe0, 0x75, 0xc6, 0x0f, 0x3e, 0xff, 0xff, 0xbe,
+	0x17, 0xcf, 0x3e, 0xbc, 0x77, 0x9e, 0x42, 0xff, 0x04, 0x2e, 0x7f, 0xd6,
+	0xeb, 0xc1, 0x4f, 0x54, 0x82, 0xde, 0x3a, 0xeb, 0xc6, 0xe0, 0xbd, 0x8c,
+	0xc1, 0xc4, 0x71, 0xa1, 0xad, 0x90, 0x3e, 0xae, 0xdb, 0x23, 0x73, 0x0a,
+	0x17, 0xc1, 0xa6, 0x92, 0xd7, 0xc6, 0xc6, 0xb5, 0xc5, 0x00, 0x1b, 0xc7,
+	0x14, 0x36, 0xae, 0xad, 0x05, 0xd8, 0x38, 0x73, 0x0d, 0x6c, 0xfc, 0xdf,
+	0xba, 0xbc, 0xf8, 0x1f, 0x95, 0x82, 0xc2, 0xc6, 0xd7, 0x7a, 0xc7, 0x86,
+	0xf7, 0xba, 0x89, 0x03, 0xc4, 0x3b, 0x17, 0xef, 0xbb, 0x86, 0xaf, 0x05,
+	0x78, 0x99, 0xb9, 0xbe, 0x5f, 0x66, 0x9e, 0xdb, 0xc2, 0xcb, 0x1e, 0x26,
+	0x36, 0x13, 0x47, 0x55, 0x2e, 0x04, 0x3e, 0x68, 0xb2, 0xef, 0x7d, 0x48,
+	0xd9, 0x6e, 0xa9, 0x32, 0xab, 0x70, 0x59, 0x1e, 0xb5, 0x6d, 0x51, 0xe1,
+	0x60, 0x62, 0xe0, 0x2e, 0x91, 0xbe, 0x20, 0x17, 0x05, 0x18, 0x33, 0xec,
+	0xc7, 0x67, 0x9e, 0x29, 0xbc, 0x1d, 0xca, 0x10, 0x03, 0xbb, 0x41, 0x8d,
+	0x40, 0x39, 0x0f, 0x23, 0x76, 0x19, 0xbe, 0xac, 0x3c, 0x9f, 0xdd, 0xb7,
+	0xff, 0x87, 0xef, 0xdb, 0x06, 0xe3, 0x5a, 0x80, 0x11, 0x51, 0x03, 0x55,
+	0xc6, 0xd5, 0xbb, 0x0e, 0x1e, 0x46, 0xf4, 0xf0, 0x61, 0xd6, 0x9d, 0x01,
+	0x4e, 0x9e, 0x95, 0x09, 0xe0, 0xf3, 0xf5, 0xdf, 0x65, 0xef, 0x29, 0xc0,
+	0x44, 0x36, 0xfe, 0xb7, 0xf7, 0xa2, 0x78, 0xdd, 0xa1, 0xce, 0xfe, 0xce,
+	0x0f, 0x45, 0xdb, 0xc6, 0xff, 0x3e, 0xe2, 0x37, 0xea, 0xa1, 0x0a, 0x73,
+	0x1d, 0xb1, 0xd0, 0x6f, 0x40, 0x07, 0x63, 0xd7, 0xc0, 0x42, 0x57, 0xe7,
+	0x26, 0xe6, 0xcb, 0xad, 0xbc, 0xe4, 0x6c, 0xe6, 0x25, 0xee, 0xf1, 0xeb,
+	0x72, 0x27, 0xc7, 0x6c, 0x23, 0x62, 0x4d, 0xe1, 0x33, 0x8f, 0x7c, 0xbd,
+	0x3d, 0x37, 0xcd, 0x5d, 0x47, 0x6e, 0x9a, 0x50, 0xb9, 0x89, 0xf4, 0xa2,
+	0x9e, 0x83, 0x4c, 0xbe, 0x0b, 0x59, 0x7e, 0x07, 0xb4, 0xff, 0x11, 0xf8,
+	0xf9, 0x43, 0x60, 0xac, 0x6f, 0x03, 0x63, 0x7d, 0xab, 0xd2, 0xfe, 0x0e,
+	0xc3, 0xb8, 0xb0, 0x0e, 0xf4, 0xea, 0x66, 0x0f, 0xc3, 0x1f, 0x86, 0x57,
+	0x35, 0xca, 0x86, 0x33, 0x57, 0x1e, 0x31, 0xe6, 0xbd, 0xf3, 0xd2, 0x44,
+	0x4e, 0xd2, 0x88, 0x11, 0xcc, 0x15, 0xea, 0x3a, 0xce, 0x7e, 0x25, 0xb1,
+	0x42, 0x5d, 0xd5, 0x93, 0x43, 0x52, 0x6d, 0x78, 0xf8, 0x6a, 0xe1, 0x8c,
+	0xb7, 0xc6, 0x9c, 0x8f, 0xaf, 0xf2, 0x3e, 0xbe, 0xca, 0x35, 0x36, 0x12,
+	0xac, 0xcf, 0x17, 0x52, 0xdb, 0x31, 0xd5, 0x61, 0x1f, 0x53, 0xcd, 0xff,
+	0x15, 0x31, 0x15, 0xf7, 0xca, 0xe3, 0x99, 0xc9, 0xa5, 0x84, 0x1c, 0x80,
+	0x7c, 0x27, 0xca, 0xd4, 0x93, 0x69, 0xc3, 0x6e, 0x3e, 0x44, 0x57, 0x8e,
+	0xd2, 0x4b, 0x28, 0xe9, 0xe9, 0x69, 0x12, 0x7a, 0x9a, 0xf8, 0xb5, 0xf5,
+	0x8d, 0x1a, 0x33, 0xde, 0x1c, 0x8b, 0xe2, 0xf3, 0x7f, 0xaa, 0x23, 0xd2,
+	0x4f, 0x3d, 0x5d, 0x8d, 0xb9, 0xae, 0x07, 0x7b, 0x6d, 0xc7, 0x5d, 0xb6,
+	0xc2, 0x5d, 0x1d, 0xfe, 0x9c, 0x99, 0x89, 0x49, 0xe8, 0xf0, 0xdf, 0x63,
+	0xce, 0x9f, 0xc0, 0xb7, 0x7e, 0x88, 0x78, 0xfd, 0xef, 0xa0, 0x8b, 0x7f,
+	0x8b, 0xda, 0xe0, 0x55, 0xe4, 0x9f, 0x1f, 0x60, 0x6c, 0x0b, 0xc7, 0xa8,
+	0x33, 0xfa, 0xd1, 0x8c, 0x95, 0x98, 0x28, 0xba, 0x89, 0x09, 0x0f, 0x7f,
+	0xfc, 0xea, 0x6f, 0x66, 0xac, 0x29, 0xbe, 0xc3, 0x00, 0xf9, 0xfe, 0xf9,
+	0xdd, 0x73, 0x0a, 0x7b, 0x04, 0x98, 0x23, 0x67, 0x73, 0xff, 0x92, 0x9b,
+	0x9a, 0xa8, 0xe1, 0xe3, 0x61, 0x9b, 0xef, 0xd9, 0x1e, 0xb6, 0x59, 0xfa,
+	0xeb, 0xd4, 0xbb, 0x87, 0x6b, 0x1e, 0x4e, 0xd3, 0xaf, 0xeb, 0xc0, 0x1c,
+	0x35, 0xf8, 0x64, 0xa1, 0x69, 0xab, 0xcf, 0xf1, 0x8a, 0x6d, 0x46, 0x20,
+	0x1f, 0xf6, 0x58, 0x4f, 0xd1, 0x0b, 0x5d, 0xd3, 0x28, 0x13, 0x81, 0xba,
+	0x66, 0xfc, 0x9f, 0xf9, 0xd7, 0x4f, 0xfb, 0xd7, 0x4f, 0xf9, 0xd7, 0x27,
+	0x91, 0x77, 0x9f, 0x54, 0xb9, 0x93, 0xe3, 0x1c, 0x83, 0x72, 0x5d, 0xac,
+	0x85, 0xf5, 0xce, 0x8e, 0xfe, 0xb4, 0x55, 0x8d, 0x79, 0xfe, 0x5c, 0x68,
+	0x3a, 0xf8, 0xfc, 0x63, 0x7c, 0x0e, 0xe2, 0x33, 0x8d, 0xcf, 0x63, 0xf8,
+	0x6c, 0xca, 0x54, 0xcb, 0x56, 0xa8, 0xa3, 0x61, 0xc9, 0x62, 0xbc, 0x88,
+	0x3a, 0x34, 0x93, 0x7a, 0x44, 0x8a, 0xf5, 0x92, 0x94, 0x96, 0x34, 0xe9,
+	0xb6, 0xd2, 0x52, 0xaa, 0x1f, 0x93, 0xe3, 0x4b, 0xde, 0xb9, 0x61, 0x57,
+	0xda, 0xc6, 0xdc, 0x96, 0x3c, 0x9c, 0x7a, 0x5c, 0xf4, 0x3b, 0x8f, 0x61,
+	0x9e, 0xe8, 0xc5, 0xd1, 0xdb, 0xd4, 0xf9, 0x58, 0x3d, 0xe5, 0xc9, 0xf8,
+	0x80, 0x65, 0x9b, 0xc8, 0x5b, 0xc3, 0x4f, 0x62, 0xed, 0x8c, 0x7a, 0x3f,
+	0x30, 0x2d, 0x27, 0x4e, 0x6f, 0xec, 0xf5, 0x62, 0xa9, 0x69, 0xbc, 0x81,
+	0x4d, 0xeb, 0xe0, 0xc3, 0x46, 0xec, 0x9b, 0x82, 0x9d, 0x1f, 0x71, 0xc3,
+	0xda, 0x04, 0x62, 0xe0, 0x84, 0xab, 0x6a, 0x3d, 0xc4, 0x2a, 0xc3, 0x49,
+	0x9e, 0x8a, 0xe1, 0x9a, 0xef, 0xd0, 0x20, 0x0f, 0x2a, 0x5b, 0xd9, 0x00,
+	0x8e, 0xd1, 0x54, 0xbf, 0xaf, 0xb4, 0x79, 0x0e, 0xa4, 0xfa, 0xff, 0x4e,
+	0x32, 0xa9, 0x4b, 0x7e, 0x8c, 0x38, 0xd6, 0x56, 0xb9, 0xa8, 0x51, 0xb6,
+	0x3f, 0xc1, 0xda, 0xf0, 0x75, 0x61, 0x5e, 0xbb, 0x07, 0xf3, 0x06, 0x10,
+	0x7f, 0x71, 0xaf, 0x09, 0x5e, 0x4e, 0x93, 0x57, 0x3e, 0x83, 0xda, 0xb6,
+	0xfa, 0x49, 0xbd, 0xb8, 0xe4, 0xd7, 0x44, 0xaa, 0x76, 0x48, 0x48, 0x7d,
+	0xf3, 0xcc, 0xc9, 0xeb, 0x93, 0xd4, 0xdd, 0x00, 0x3b, 0xf4, 0x60, 0x0e,
+	0xeb, 0x08, 0xc8, 0xc8, 0x3b, 0x27, 0x53, 0x67, 0x64, 0x45, 0xf7, 0x93,
+	0x7a, 0x69, 0x29, 0x83, 0x71, 0xf6, 0xa4, 0xf1, 0xbd, 0xaa, 0x2b, 0xec,
+	0x7f, 0x31, 0x74, 0x58, 0x1a, 0xd5, 0x16, 0xe8, 0x45, 0x8e, 0xdd, 0x7b,
+	0x58, 0x6a, 0xd5, 0x79, 0x79, 0xa1, 0xba, 0xb1, 0x0b, 0xd8, 0x09, 0x32,
+	0x25, 0xfd, 0x3d, 0x3e, 0xfd, 0x54, 0x41, 0x30, 0x0e, 0x79, 0x9e, 0x2e,
+	0xc4, 0xbd, 0xba, 0x96, 0xb8, 0xed, 0x24, 0xdf, 0x1b, 0x8c, 0xf3, 0x3d,
+	0xbe, 0x23, 0xcb, 0xb4, 0xc9, 0x8b, 0xf7, 0x2e, 0x58, 0x9f, 0x92, 0x0b,
+	0xa9, 0x3d, 0xb2, 0x91, 0x52, 0xf5, 0x2f, 0xf1, 0x01, 0x7c, 0xdb, 0x34,
+	0xd6, 0xe5, 0x6e, 0x39, 0x01, 0x3f, 0xbd, 0x90, 0xca, 0xa9, 0xf3, 0x9b,
+	0xe3, 0x4d, 0x62, 0xfb, 0x79, 0xd6, 0x54, 0xb2, 0xae, 0x7a, 0x60, 0xad,
+	0xd6, 0x64, 0x8a, 0xb1, 0xa7, 0x43, 0x36, 0x14, 0xd6, 0xf2, 0x7a, 0xe3,
+	0x1b, 0xb3, 0x9e, 0x6f, 0x84, 0x94, 0xbd, 0xff, 0x1b, 0xd0, 0x71, 0x0c,
+	0x36, 0x1b, 0x51, 0x73, 0x42, 0xe9, 0x4e, 0x7f, 0x8e, 0xc2, 0x66, 0x6d,
+	0x73, 0xc6, 0xc7, 0x33, 0x96, 0xf1, 0xa9, 0x8c, 0x35, 0x36, 0xe1, 0xf5,
+	0x53, 0x4c, 0xc3, 0xd6, 0x2e, 0xb6, 0x82, 0xf7, 0x4d, 0xa6, 0xe0, 0x4f,
+	0x2f, 0x6d, 0x62, 0x63, 0x18, 0xe7, 0xf3, 0x0b, 0xd0, 0x6b, 0x58, 0x3a,
+	0x4e, 0xb5, 0xee, 0x99, 0x4b, 0x8d, 0x24, 0x8e, 0x08, 0xdf, 0x30, 0x62,
+	0xfd, 0x6c, 0x82, 0xda, 0x05, 0xe4, 0xc3, 0x2b, 0xc4, 0x08, 0xc3, 0xe7,
+	0xe5, 0xca, 0x3d, 0x99, 0xd4, 0x7e, 0xad, 0x36, 0x8b, 0xea, 0xe4, 0xf9,
+	0x29, 0xe6, 0xd3, 0xa3, 0xec, 0x7d, 0x86, 0x4e, 0xbd, 0xad, 0x39, 0x65,
+	0xf5, 0x8e, 0x39, 0xf4, 0xd2, 0xd2, 0xe6, 0x21, 0x37, 0x3c, 0x3f, 0xc3,
+	0x04, 0xa8, 0x5b, 0x83, 0x09, 0x47, 0x72, 0xec, 0x71, 0x49, 0x7e, 0x59,
+	0xf6, 0x65, 0x10, 0x47, 0xed, 0x99, 0x0e, 0x99, 0x6f, 0x18, 0xce, 0xe0,
+	0xe2, 0x51, 0xac, 0x31, 0x87, 0xb5, 0xa6, 0x51, 0x83, 0xcc, 0x22, 0x27,
+	0x53, 0xae, 0x8c, 0xd5, 0x0f, 0x43, 0x46, 0x37, 0xf1, 0xcc, 0x78, 0x3c,
+	0x27, 0xe6, 0x4c, 0x41, 0xad, 0xfb, 0x9e, 0x96, 0x1f, 0xfd, 0x18, 0x72,
+	0x5a, 0x58, 0x0e, 0x24, 0x45, 0x9f, 0x4e, 0x86, 0xdf, 0x9f, 0xb3, 0x38,
+	0x16, 0xe5, 0x98, 0x8e, 0xb1, 0xf0, 0xe7, 0x92, 0x51, 0x3d, 0x93, 0x34,
+	0xc7, 0xd9, 0xef, 0x0d, 0x59, 0x73, 0x12, 0x7a, 0xbe, 0xb7, 0x47, 0xba,
+	0xa7, 0xa5, 0x77, 0xd5, 0x1c, 0x7f, 0x1d, 0xb4, 0x84, 0x55, 0x6c, 0x9f,
+	0x13, 0xdd, 0x1f, 0xef, 0xd9, 0x1c, 0x0f, 0xfb, 0xe3, 0xd3, 0xd2, 0xbd,
+	0x3a, 0x62, 0xbc, 0x29, 0x87, 0xb1, 0x66, 0x48, 0x2e, 0xa1, 0xa6, 0xb1,
+	0x86, 0xe6, 0xe0, 0x73, 0x0f, 0x91, 0x96, 0x83, 0xc0, 0x14, 0xf0, 0x09,
+	0xd4, 0xd9, 0xd6, 0x27, 0xe5, 0x4b, 0x46, 0x97, 0xe4, 0x55, 0x4d, 0x1b,
+	0xf6, 0x7a, 0xa5, 0xb0, 0xf3, 0x5b, 0x87, 0xa6, 0x77, 0x7b, 0xfd, 0x00,
+	0x9e, 0x67, 0x8c, 0x62, 0xec, 0x4a, 0x6b, 0xc5, 0xe2, 0x18, 0xef, 0x5d,
+	0x69, 0xd5, 0xad, 0x11, 0x23, 0xab, 0x45, 0xfd, 0x73, 0xed, 0x79, 0xc5,
+	0x7b, 0xa1, 0x3a, 0x68, 0xd4, 0xe4, 0x16, 0x2d, 0x7b, 0x03, 0xf2, 0x83,
+	0xfb, 0x19, 0xcc, 0xbd, 0xd2, 0xca, 0x58, 0xf3, 0xaa, 0x7f, 0x5f, 0x93,
+	0xe0, 0x9a, 0xeb, 0x8c, 0x18, 0x93, 0xea, 0xd9, 0x11, 0xe3, 0x84, 0xd6,
+	0xfe, 0xac, 0xa1, 0x4d, 0x6e, 0x7b, 0xb6, 0x5b, 0xc9, 0x28, 0x64, 0x79,
+	0x73, 0x4a, 0xd5, 0x69, 0x79, 0xda, 0xe5, 0xbc, 0x2b, 0xad, 0xac, 0x15,
+	0xd1, 0x4e, 0xdc, 0xc0, 0x18, 0xc8, 0xb9, 0x97, 0xaf, 0xda, 0x87, 0xd7,
+	0xd7, 0xda, 0xe3, 0x5d, 0xd9, 0xbe, 0xc7, 0x2e, 0x35, 0xe7, 0x82, 0x9a,
+	0x13, 0x56, 0xb2, 0xde, 0xbe, 0xcf, 0xcf, 0x64, 0xfb, 0x3e, 0xdd, 0x9b,
+	0x3c, 0x97, 0xb0, 0xe6, 0x93, 0x98, 0x5b, 0x76, 0x07, 0xe3, 0x75, 0xb9,
+	0xdc, 0xca, 0x5b, 0x17, 0xe5, 0xc2, 0xe6, 0xda, 0xbf, 0xc2, 0x75, 0x3b,
+	0x4d, 0xbf, 0xf2, 0x69, 0xe4, 0x77, 0x8e, 0x3d, 0xaa, 0xe4, 0xbd, 0xdb,
+	0x1a, 0x3c, 0x58, 0xd3, 0xcc, 0xf1, 0x9f, 0x09, 0x75, 0x75, 0x44, 0xc5,
+	0x98, 0xdb, 0xa0, 0xa7, 0x7d, 0xcf, 0xc0, 0x67, 0x47, 0x6d, 0x35, 0xe7,
+	0x92, 0x35, 0x2d, 0xfb, 0x4e, 0x0d, 0x1a, 0x97, 0x10, 0xdb, 0x9c, 0x18,
+	0xaf, 0x51, 0x2b, 0x59, 0x7c, 0x27, 0xfe, 0x4e, 0xd6, 0x01, 0xd0, 0xe5,
+	0xe0, 0xf0, 0xcf, 0xe4, 0xa8, 0x9c, 0xa8, 0x1c, 0x43, 0x2e, 0x9c, 0x93,
+	0xe1, 0x67, 0x90, 0x37, 0x2a, 0x05, 0x3c, 0x49, 0x9f, 0xf5, 0x72, 0xe0,
+	0x9c, 0x7a, 0xcf, 0xfc, 0x24, 0xea, 0x64, 0xd8, 0x6e, 0x79, 0x70, 0x78,
+	0x05, 0xcf, 0xbc, 0xa0, 0x70, 0xa8, 0x2b, 0x0d, 0xf8, 0x40, 0xe2, 0xf9,
+	0x3d, 0xb2, 0xfb, 0x01, 0xda, 0x22, 0x32, 0xfd, 0x6d, 0x11, 0xf5, 0xee,
+	0xbd, 0x6e, 0x75, 0x8a, 0xec, 0xa5, 0xdd, 0x34, 0x61, 0x63, 0x73, 0xde,
+	0x99, 0xd6, 0xb6, 0x6b, 0x73, 0xe6, 0xa2, 0xac, 0x2a, 0xfb, 0xbb, 0x7d,
+	0xd5, 0xfb, 0x3f, 0xba, 0x8a, 0x72, 0x38, 0x39, 0x2d, 0x77, 0xac, 0x7a,
+	0xf6, 0x56, 0x5a, 0x3a, 0xaa, 0xe4, 0x3a, 0xa7, 0xe4, 0xda, 0x92, 0xc3,
+	0x29, 0xca, 0x9c, 0xbc, 0xf0, 0xbd, 0x40, 0x4f, 0x16, 0xf7, 0xfb, 0xf6,
+	0x33, 0xf8, 0x0c, 0x7f, 0x57, 0x42, 0xd9, 0xb0, 0xee, 0xbe, 0x7f, 0x37,
+	0xcf, 0x59, 0xf7, 0xad, 0x92, 0xcf, 0x1b, 0xb7, 0xf1, 0xf9, 0x14, 0x62,
+	0xea, 0xd0, 0x90, 0xc7, 0xeb, 0xab, 0x4b, 0x1f, 0xce, 0xeb, 0x37, 0x37,
+	0x79, 0x0d, 0x49, 0x43, 0xd5, 0xaf, 0x5d, 0xbd, 0xd2, 0x8d, 0xa8, 0x07,
+	0x7b, 0xf8, 0x09, 0xf6, 0x9a, 0x12, 0xd2, 0xe0, 0xed, 0xb7, 0xe1, 0x92,
+	0x96, 0x80, 0x76, 0xd2, 0x73, 0x9f, 0xaf, 0x2f, 0xee, 0x7f, 0x74, 0xc7,
+	0x7b, 0x97, 0xc4, 0x70, 0x86, 0x31, 0xa6, 0x2b, 0x9d, 0x65, 0x7d, 0xff,
+	0x9a, 0x16, 0x5d, 0xe9, 0xcc, 0xde, 0xd4, 0xd9, 0xeb, 0xd0, 0x59, 0x5d,
+	0x7e, 0x13, 0xbc, 0xc0, 0x9f, 0x9f, 0x19, 0x31, 0x0e, 0x13, 0x5b, 0x18,
+	0x5c, 0x0f, 0x31, 0xd4, 0xd7, 0x5d, 0xc7, 0x47, 0xd0, 0xdd, 0x9b, 0xa2,
+	0xf4, 0x07, 0x7e, 0x90, 0x7f, 0xd4, 0xf3, 0x8c, 0x61, 0xe4, 0xa9, 0x43,
+	0xf9, 0x3e, 0x69, 0x53, 0x67, 0xfc, 0x33, 0x9e, 0x3e, 0x95, 0x6f, 0xfb,
+	0xfa, 0xcc, 0xcd, 0x50, 0x67, 0xe6, 0x6e, 0x4f, 0x7f, 0x9d, 0x6a, 0xce,
+	0x62, 0x32, 0xa1, 0xfc, 0xda, 0x1a, 0xba, 0x65, 0x37, 0x75, 0xf8, 0xb4,
+	0xeb, 0xfd, 0x2f, 0xbb, 0xd3, 0xb2, 0xe8, 0x7e, 0x98, 0x1e, 0x3d, 0x1d,
+	0x4e, 0x88, 0xe7, 0x3f, 0x57, 0xeb, 0x4f, 0x5f, 0x0d, 0x2b, 0x5b, 0x9d,
+	0x80, 0xec, 0x4e, 0x56, 0x3e, 0xe6, 0xdb, 0xb7, 0xc7, 0xeb, 0xd0, 0x87,
+	0xf0, 0x7a, 0xb8, 0x3c, 0x68, 0xbc, 0x8d, 0xb5, 0x26, 0x15, 0x86, 0x8b,
+	0x88, 0xe3, 0xf3, 0x9a, 0xd8, 0xe4, 0x35, 0xa0, 0xcd, 0x9b, 0x97, 0x65,
+	0x5d, 0xea, 0x32, 0x3e, 0x3d, 0xaa, 0xde, 0xbf, 0x7f, 0xa3, 0xcc, 0xb8,
+	0x0c, 0xcc, 0x13, 0xeb, 0x93, 0x4b, 0x8d, 0x84, 0x5c, 0x22, 0x96, 0x18,
+	0xc3, 0x7f, 0xf7, 0x98, 0x9f, 0x9b, 0xa3, 0xf2, 0x66, 0xb9, 0xbd, 0x56,
+	0x1c, 0x95, 0xd7, 0xcb, 0x41, 0xbd, 0x48, 0x2c, 0xcb, 0xfc, 0x3f, 0x27,
+	0x6f, 0x2d, 0x0d, 0xca, 0xfa, 0x0c, 0xf2, 0xf8, 0x10, 0x65, 0x30, 0x62,
+	0x7c, 0x46, 0xfd, 0xbe, 0xe2, 0x4a, 0xeb, 0xbc, 0x85, 0x75, 0x97, 0x5b,
+	0x72, 0x84, 0xe7, 0xd0, 0xfc, 0xde, 0xf8, 0x04, 0x56, 0xe1, 0xbc, 0x3e,
+	0xa9, 0x2d, 0xa3, 0x2e, 0x2f, 0x73, 0x5d, 0xca, 0x69, 0x5a, 0x7d, 0x9f,
+	0xc4, 0x3e, 0xf7, 0xf3, 0xbd, 0xf4, 0x18, 0x75, 0x71, 0xa5, 0xb5, 0x61,
+	0xf1, 0x1c, 0x72, 0x4e, 0x1a, 0xd0, 0xd7, 0x97, 0x93, 0x3c, 0x27, 0xcf,
+	0x0b, 0x7f, 0x9f, 0x52, 0x6b, 0xcc, 0xa0, 0x16, 0xb8, 0xd2, 0x5a, 0xb0,
+	0x9e, 0x52, 0x7a, 0x6a, 0x54, 0x1f, 0xf0, 0xc7, 0x79, 0xcd, 0x7b, 0x86,
+	0xb3, 0x6f, 0x88, 0xf5, 0xe7, 0x03, 0xc8, 0xff, 0xac, 0x3d, 0x89, 0xb7,
+	0x28, 0x8b, 0x04, 0x6a, 0x5c, 0xae, 0xc5, 0xdf, 0x04, 0x25, 0x87, 0xf3,
+	0x32, 0x09, 0x7a, 0x80, 0xcb, 0x5c, 0xc6, 0xfd, 0x5b, 0x65, 0x23, 0xe6,
+	0xc5, 0x77, 0xbe, 0xaf, 0xb5, 0x81, 0x98, 0xbf, 0xb1, 0x19, 0xf3, 0xfb,
+	0x71, 0x6d, 0x38, 0xa9, 0xa1, 0xff, 0x84, 0xf5, 0xd9, 0x77, 0x61, 0xcc,
+	0x1f, 0xc7, 0x7c, 0x8e, 0xf5, 0x49, 0x69, 0x59, 0x6c, 0xf6, 0x99, 0x6a,
+	0xc2, 0x77, 0x31, 0x72, 0xb2, 0xd8, 0x18, 0x8c, 0x9f, 0xd7, 0x1c, 0xf5,
+	0xce, 0x46, 0x72, 0x88, 0x7d, 0xb7, 0x3e, 0x69, 0x2c, 0x4b, 0x22, 0x94,
+	0x7e, 0x48, 0xdc, 0x86, 0x87, 0xb9, 0x17, 0x34, 0xf6, 0xdf, 0x6c, 0x69,
+	0x6c, 0x9f, 0x63, 0x84, 0xd2, 0x87, 0xe4, 0x0f, 0xfc, 0x39, 0x8e, 0x9a,
+	0xf3, 0x1f, 0x76, 0xf3, 0xec, 0xab, 0xe1, 0xf6, 0x82, 0x06, 0xd2, 0x76,
+	0x63, 0xfb, 0xbe, 0x89, 0xad, 0x7d, 0xb9, 0x27, 0x6a, 0x98, 0xbd, 0x36,
+	0xf6, 0x7d, 0x15, 0xcf, 0x3c, 0x04, 0x3a, 0xae, 0x84, 0x74, 0xeb, 0x21,
+	0x29, 0x36, 0xae, 0xde, 0xa3, 0x9d, 0x06, 0x3e, 0xc3, 0xf5, 0xb9, 0xcf,
+	0x21, 0xd0, 0x77, 0x45, 0xd3, 0xad, 0x43, 0x90, 0xa5, 0xb7, 0x47, 0xe8,
+	0x39, 0xd3, 0xf8, 0xa1, 0x0c, 0x89, 0xbe, 0xa2, 0x29, 0xf9, 0xeb, 0xb5,
+	0x51, 0x38, 0xc4, 0x94, 0x74, 0xaf, 0xcd, 0x4a, 0x68, 0x8d, 0x3d, 0x00,
+	0xda, 0x22, 0xf5, 0xb8, 0x0b, 0x7e, 0x2c, 0x76, 0xd8, 0x22, 0xde, 0x67,
+	0x1f, 0x77, 0xb5, 0x57, 0x7a, 0x89, 0xf7, 0x59, 0x0f, 0x1c, 0xc4, 0x7f,
+	0xd6, 0x04, 0x2f, 0xb5, 0x32, 0xa9, 0x77, 0x54, 0xde, 0xcc, 0x37, 0x78,
+	0xdf, 0x4c, 0x88, 0xf0, 0x1e, 0xe3, 0x43, 0x9f, 0x44, 0xbe, 0x3e, 0x8c,
+	0x98, 0x90, 0x03, 0x76, 0xc6, 0xba, 0xa7, 0x86, 0x24, 0xec, 0xbd, 0xeb,
+	0xa0, 0xfa, 0x25, 0x6f, 0x2d, 0x9b, 0xfe, 0xef, 0x53, 0x64, 0xdf, 0xf9,
+	0x14, 0x7b, 0x9a, 0x03, 0xb0, 0x53, 0xd6, 0x23, 0xa2, 0x6f, 0xa0, 0xde,
+	0xbc, 0xd4, 0x88, 0xf6, 0xf2, 0x7d, 0xcb, 0xd7, 0x5d, 0x5c, 0x13, 0xbb,
+	0xc7, 0x14, 0x56, 0xf4, 0xef, 0xf1, 0x3b, 0xea, 0xa0, 0x6d, 0x98, 0x32,
+	0x01, 0x4c, 0xc9, 0x3a, 0x69, 0xca, 0x7f, 0xe7, 0xcd, 0x70, 0x4e, 0x6c,
+	0xab, 0x95, 0x86, 0x65, 0x03, 0x38, 0x6b, 0xdd, 0xb5, 0x10, 0x07, 0xdf,
+	0xd6, 0xea, 0x65, 0xf5, 0xbb, 0x34, 0xed, 0x01, 0x60, 0xac, 0x44, 0x9f,
+	0xaa, 0x75, 0x4e, 0x3e, 0x20, 0x9e, 0xbd, 0xc3, 0xca, 0x54, 0xcc, 0x5a,
+	0xaf, 0x7a, 0xb5, 0xc5, 0x46, 0x75, 0x4a, 0xfe, 0xd4, 0x5d, 0x50, 0xbd,
+	0xd2, 0x25, 0xd4, 0x1b, 0xe1, 0x45, 0x55, 0x6b, 0xb5, 0xe1, 0x54, 0xc4,
+	0xb7, 0x67, 0x8f, 0xc0, 0x07, 0x4d, 0xf5, 0x6e, 0x81, 0xbe, 0xd2, 0x6a,
+	0x65, 0x11, 0x2f, 0x74, 0xcb, 0x32, 0x8a, 0xc8, 0x73, 0x59, 0xf5, 0x7e,
+	0x0a, 0xfd, 0xf7, 0xf7, 0x54, 0x1c, 0x96, 0x1a, 0x64, 0xf3, 0x5c, 0x02,
+	0xeb, 0x68, 0xca, 0x3e, 0x43, 0x4a, 0x0f, 0x0f, 0x28, 0xec, 0x1a, 0x5a,
+	0x41, 0x80, 0x5a, 0x1b, 0x12, 0x59, 0x81, 0xbf, 0xc2, 0x77, 0xc3, 0x6b,
+	0xd4, 0x01, 0x65, 0x3b, 0x2b, 0x11, 0xc8, 0x9e, 0x58, 0x22, 0xb4, 0x48,
+	0x19, 0xc7, 0x61, 0x17, 0x5c, 0x07, 0x32, 0xe6, 0xbb, 0x2c, 0xcb, 0x1d,
+	0xf2, 0x4c, 0xc3, 0xf4, 0xdf, 0x3d, 0x7f, 0x89, 0xef, 0xa3, 0xeb, 0x73,
+	0x63, 0x03, 0xc4, 0x4f, 0x52, 0x6a, 0x00, 0x63, 0x9c, 0x66, 0x0d, 0xce,
+	0x18, 0x50, 0x88, 0x47, 0x94, 0xaf, 0xb3, 0x06, 0xf6, 0x7c, 0x9f, 0xf8,
+	0x3a, 0x62, 0x11, 0xdb, 0x8e, 0x62, 0x8f, 0x9d, 0xe4, 0xea, 0xd5, 0x9e,
+	0x93, 0xa0, 0xf3, 0xfc, 0x92, 0x39, 0x55, 0x90, 0x14, 0xdf, 0x71, 0x9e,
+	0xb1, 0xc1, 0xf7, 0x06, 0xe2, 0xe4, 0x42, 0x85, 0xef, 0x33, 0x17, 0xe1,
+	0x59, 0x53, 0x72, 0xbe, 0xcc, 0x1a, 0xf0, 0x76, 0xe8, 0x8b, 0xd7, 0xc5,
+	0xf1, 0x10, 0xfc, 0xff, 0xa2, 0xc1, 0xdf, 0x91, 0xf1, 0x77, 0x41, 0x66,
+	0x2a, 0xa1, 0x1d, 0x84, 0x8e, 0x0b, 0x46, 0xc4, 0xb7, 0x03, 0xa7, 0x4c,
+	0x8c, 0x35, 0x62, 0x9c, 0xc3, 0xf7, 0x97, 0xdd, 0xcb, 0x2d, 0xd6, 0x3f,
+	0x17, 0x10, 0xe7, 0xa6, 0x92, 0x53, 0xb0, 0x9d, 0x42, 0xbc, 0x13, 0xb4,
+	0xfe, 0x5d, 0xdc, 0xcb, 0xbb, 0xdc, 0xc7, 0x4c, 0x5d, 0x94, 0x22, 0x30,
+	0xfd, 0x48, 0xe2, 0x65, 0xd9, 0x83, 0x3a, 0x55, 0x93, 0x37, 0x2d, 0x73,
+	0x5c, 0x34, 0xb5, 0xde, 0xf0, 0x7d, 0xb0, 0xbd, 0x37, 0x10, 0xdf, 0x3a,
+	0xfc, 0xda, 0x3d, 0x5b, 0x26, 0x16, 0x3a, 0xaa, 0xde, 0x05, 0xb8, 0x60,
+	0xb1, 0x7f, 0xc7, 0xdf, 0x44, 0xfe, 0xa5, 0xda, 0x63, 0xeb, 0x8c, 0x8d,
+	0xfd, 0x63, 0xd2, 0xe7, 0xf1, 0x78, 0xc0, 0xf2, 0x68, 0xe4, 0x3a, 0x91,
+	0xb6, 0x75, 0xce, 0xfb, 0xeb, 0x9c, 0xf5, 0xd7, 0xa9, 0xf9, 0xeb, 0x5c,
+	0xd8, 0x5c, 0xe7, 0x6e, 0xe8, 0xbf, 0xd5, 0x7a, 0x0a, 0xf8, 0x21, 0x93,
+	0x6a, 0xb5, 0x1c, 0xd4, 0x59, 0xa5, 0xd1, 0x79, 0x75, 0x46, 0xaa, 0xa7,
+	0xbf, 0x71, 0x6f, 0xc6, 0x2a, 0xc4, 0xc3, 0x0a, 0x7b, 0xa0, 0x92, 0x82,
+	0x1d, 0x16, 0xc4, 0xc3, 0xdc, 0x3c, 0xb7, 0xf3, 0xce, 0xf5, 0xba, 0xa1,
+	0xc3, 0x1c, 0x72, 0x86, 0x91, 0x39, 0x67, 0x49, 0x61, 0xdf, 0x6f, 0xea,
+	0xb0, 0xf3, 0x5e, 0xe4, 0x87, 0x9f, 0xc0, 0x66, 0x8c, 0x4c, 0xbd, 0x91,
+	0x43, 0xbd, 0xc3, 0xf9, 0x77, 0x40, 0x8f, 0x85, 0x4c, 0xad, 0x51, 0xc8,
+	0x9c, 0xe5, 0x79, 0x0e, 0xe6, 0xd5, 0x1a, 0x3d, 0x90, 0x7b, 0x8f, 0xea,
+	0x8b, 0xbc, 0x5c, 0x8e, 0x31, 0x06, 0xc1, 0xd6, 0x63, 0x18, 0x8b, 0xab,
+	0xdf, 0x68, 0xd5, 0xdd, 0x65, 0xf8, 0x74, 0x02, 0xe3, 0xd5, 0xae, 0x49,
+	0x85, 0x47, 0x2d, 0x59, 0x71, 0x7f, 0xa5, 0x15, 0xcb, 0x97, 0xb5, 0x52,
+	0x79, 0x18, 0x73, 0x46, 0xf9, 0x5b, 0x9f, 0x3d, 0xc0, 0x49, 0x53, 0xd5,
+	0x1d, 0x69, 0x4a, 0x80, 0x26, 0xbd, 0x8d, 0xa6, 0x04, 0xe8, 0x41, 0xcc,
+	0x3c, 0xc5, 0xde, 0xf1, 0xa8, 0x9c, 0x28, 0xf3, 0x9d, 0x26, 0xfe, 0x46,
+	0xd5, 0x90, 0x30, 0xb0, 0x65, 0xe4, 0x94, 0x19, 0x5f, 0x57, 0xbd, 0x1a,
+	0x73, 0xb8, 0x2e, 0x23, 0xa9, 0xba, 0xa8, 0xfc, 0x92, 0x38, 0x81, 0x7c,
+	0xf5, 0x86, 0xdb, 0x23, 0x6f, 0xfa, 0x7b, 0x5d, 0x14, 0x9e, 0x33, 0x6e,
+	0xdf, 0xeb, 0xc9, 0x4a, 0x2a, 0xf3, 0x8a, 0x15, 0xf2, 0xf9, 0xea, 0xc3,
+	0x5e, 0x7b, 0x30, 0x37, 0x95, 0x39, 0xdf, 0xd8, 0x69, 0xae, 0x83, 0xb9,
+	0x91, 0xb6, 0xb9, 0x0e, 0xe6, 0xf5, 0x20, 0xef, 0xf5, 0x28, 0x9e, 0x4a,
+	0xa0, 0xeb, 0x52, 0x99, 0x3c, 0xf1, 0x0c, 0x82, 0x7b, 0x1a, 0xc4, 0xc6,
+	0x53, 0xe2, 0x9f, 0xd9, 0xf2, 0xf7, 0x7a, 0x57, 0xf5, 0x6b, 0x94, 0x0d,
+	0x4c, 0x58, 0x3c, 0x9b, 0x99, 0xd1, 0xb2, 0xf5, 0x3c, 0x72, 0xd5, 0x8d,
+	0xc4, 0x43, 0x29, 0x1b, 0xb9, 0x92, 0xe7, 0x3c, 0x8d, 0x72, 0x81, 0xef,
+	0x3d, 0xc3, 0x2e, 0xde, 0x21, 0x5e, 0xbe, 0x31, 0xa4, 0xde, 0x43, 0x70,
+	0xfc, 0x73, 0x20, 0x31, 0x32, 0x63, 0x7c, 0xf7, 0xe0, 0x6e, 0xa9, 0x2f,
+	0x7f, 0x11, 0x63, 0x19, 0xe4, 0xc5, 0x43, 0x5a, 0xe6, 0xdc, 0x24, 0xae,
+	0x1f, 0xc2, 0x35, 0xe2, 0xf0, 0x72, 0x0e, 0xf7, 0x1f, 0xc2, 0xf5, 0xbc,
+	0x96, 0x6d, 0xe6, 0x70, 0xfd, 0x30, 0xae, 0x27, 0x48, 0x9b, 0xf3, 0x8a,
+	0x35, 0xa5, 0xd9, 0x58, 0xcb, 0x3e, 0x37, 0x89, 0x4f, 0xfb, 0x7a, 0xbc,
+	0x07, 0x3d, 0x95, 0x79, 0x3e, 0x96, 0x04, 0x4d, 0x0f, 0x6a, 0x4e, 0xbd,
+	0x1b, 0x6b, 0x0c, 0xe1, 0x79, 0xda, 0x54, 0xfb, 0x39, 0xd4, 0x6d, 0xaa,
+	0x67, 0x14, 0x4a, 0xa7, 0x81, 0x77, 0x1f, 0x41, 0xde, 0xd7, 0xc4, 0xb1,
+	0x1e, 0x97, 0x62, 0x2a, 0x2d, 0x0b, 0xf5, 0x90, 0x64, 0x63, 0x05, 0x7c,
+	0x2f, 0x48, 0x66, 0x1c, 0xf7, 0xeb, 0xb4, 0x05, 0xce, 0x2b, 0x49, 0xb1,
+	0x4a, 0xfc, 0xce, 0x7e, 0xd1, 0x57, 0xc0, 0x37, 0xfb, 0x44, 0x79, 0xc8,
+	0x20, 0x46, 0xfb, 0xdd, 0xa1, 0xa7, 0xe5, 0xbd, 0xd3, 0x8c, 0x7c, 0xac,
+	0x65, 0xea, 0xfe, 0x59, 0x9d, 0xc5, 0xdf, 0x2b, 0xb1, 0x47, 0x25, 0xc5,
+	0x50, 0x9a, 0x7d, 0x0e, 0xd5, 0x17, 0x4f, 0x79, 0x67, 0x7a, 0xed, 0xef,
+	0x90, 0x04, 0xfe, 0xc2, 0x7d, 0xbf, 0x82, 0xe7, 0xbd, 0xbe, 0x54, 0xb6,
+	0xf9, 0x41, 0x5d, 0xf0, 0x5d, 0xfd, 0x15, 0xe8, 0xe2, 0xfc, 0x87, 0xf6,
+	0xb9, 0xd8, 0xe3, 0x9a, 0x47, 0x2c, 0x62, 0x7f, 0x2c, 0x90, 0xdf, 0xd5,
+	0x34, 0x92, 0xbe, 0xc3, 0x58, 0x4b, 0x52, 0x8c, 0xb3, 0xb9, 0x58, 0x42,
+	0xd5, 0xbe, 0x1b, 0x4b, 0xf2, 0xc4, 0x16, 0xbd, 0xa4, 0x95, 0x72, 0x78,
+	0x04, 0xf5, 0x1a, 0x7f, 0xff, 0xf0, 0xb8, 0xe4, 0x53, 0xec, 0xd1, 0x84,
+	0x90, 0x0b, 0x0b, 0xf8, 0xbe, 0x25, 0xb7, 0x92, 0x2f, 0xb7, 0x7c, 0xf5,
+	0x5b, 0x4a, 0x77, 0x35, 0x8b, 0xfb, 0x05, 0xbd, 0x8b, 0x69, 0xa5, 0xb3,
+	0x9a, 0x7a, 0xcf, 0x36, 0xe0, 0x3d, 0xe8, 0xbf, 0xed, 0x6c, 0x73, 0x93,
+	0x16, 0x69, 0xfb, 0x38, 0xdf, 0x5b, 0x18, 0xb6, 0x85, 0xf4, 0x93, 0x0f,
+	0xe6, 0xac, 0xe0, 0x5c, 0x34, 0xe0, 0x21, 0xe0, 0xf3, 0xa3, 0xca, 0x85,
+	0x74, 0xee, 0x31, 0xa4, 0x7b, 0xca, 0x08, 0x59, 0xcc, 0x01, 0x9f, 0xf6,
+	0xfb, 0xf8, 0xff, 0x37, 0xe5, 0xea, 0xf1, 0x1e, 0x86, 0xa8, 0xfc, 0xdf,
+	0x8b, 0xee, 0xa0, 0xf7, 0xab, 0xcf, 0x80, 0x0d, 0xe7, 0xac, 0xb5, 0xc5,
+	0x67, 0x6d, 0x07, 0x3e, 0x6b, 0x3e, 0x9f, 0x1f, 0x7e, 0x4e, 0xea, 0xd1,
+	0x59, 0x5b, 0xb2, 0xc1, 0x23, 0x6d, 0x6a, 0x27, 0x7b, 0xe3, 0x6f, 0x9c,
+	0xd4, 0xef, 0xad, 0xa2, 0xb6, 0x7b, 0xad, 0x5e, 0x27, 0xeb, 0x64, 0xcf,
+	0xee, 0xce, 0x22, 0xd7, 0x55, 0xab, 0x5e, 0xcd, 0x5c, 0x75, 0xd9, 0x6b,
+	0xde, 0x69, 0x6f, 0x0d, 0x34, 0xff, 0x8e, 0x7a, 0xef, 0xa4, 0xe4, 0x7a,
+	0x7d, 0xa9, 0x6a, 0xb5, 0x3d, 0x57, 0xde, 0xc0, 0x3c, 0x39, 0x5c, 0x90,
+	0x19, 0xe8, 0x31, 0x89, 0xeb, 0x9b, 0xe5, 0xe5, 0x65, 0x75, 0x86, 0xe4,
+	0x9f, 0xd5, 0xf0, 0x0c, 0x46, 0x9d, 0x43, 0x23, 0x5e, 0xcd, 0xaa, 0x78,
+	0xbd, 0xb1, 0xac, 0xee, 0xa9, 0xdf, 0x3c, 0xd4, 0xdd, 0x19, 0xc4, 0x73,
+	0xd4, 0x06, 0xd6, 0x6e, 0x29, 0xa2, 0x86, 0x3e, 0x6b, 0xcd, 0x18, 0xc4,
+	0x29, 0x5c, 0x6b, 0x03, 0x6b, 0x9d, 0x5f, 0x96, 0xbd, 0x7c, 0xa7, 0xa3,
+	0xaa, 0xce, 0xbd, 0xbc, 0x7e, 0xf5, 0xbc, 0x04, 0xbf, 0xd7, 0x8d, 0xfa,
+	0x39, 0x8e, 0xef, 0x95, 0xf0, 0xb7, 0xa7, 0x8c, 0x01, 0xa8, 0x6b, 0x66,
+	0x0a, 0x58, 0xaf, 0xd5, 0xf2, 0xfa, 0xd9, 0x2d, 0xd8, 0x7d, 0x84, 0xbf,
+	0x65, 0xc0, 0xdf, 0x23, 0xb0, 0x13, 0xf8, 0xc1, 0xe6, 0x38, 0xaf, 0x59,
+	0x4b, 0x04, 0xd7, 0x4c, 0x58, 0xff, 0x1b, 0xed, 0xcc, 0xfa, 0xa1, 0x98,
+	0x41, 0x00, 0x00, 0x00 };
+static u32 bnx2_TXP_b09FwData[(0xd0/4) + 1] = {
+	0x00000000, 0x00000014, 0x00000014, 0x00000014, 0x00000014, 0x00000010,
+	0x00000030, 0x00000030, 0x00000000, 0x00000000, 0x00000000, 0x00000010,
+	0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00008002, 0x00000000,
+	0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000006,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000,
+	0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+static u32 bnx2_TXP_b09FwRodata[(0x30/4) + 1] = {
+	0x08003be8, 0x08003c14, 0x08003c5c, 0x08003c5c, 0x08003ae8, 0x08003b14,
+	0x08003b14, 0x08003c5c, 0x08003c5c, 0x08003c5c, 0x08003b7c, 0x00000000,
+	0x00000000 };
+static u32 bnx2_TXP_b09FwBss[(0xa20/4) + 1] = { 0x0 };
+static u32 bnx2_TXP_b09FwSbss[(0x80/4) + 1] = { 0x0 };
+
+static struct fw_info bnx2_txp_fw_09 = {
+	.ver_major			= 0x1,
+	.ver_minor			= 0x0,
+	.ver_fix			= 0x0,
+
+	.start_addr			= 0x08000060,
+
+	.text_addr			= 0x08000000,
+	.text_len			= 0x4194,
+	.text_index			= 0x0,
+	.gz_text			= bnx2_TXP_b09FwText,
+	.gz_text_len			= sizeof(bnx2_TXP_b09FwText),
+
+	.data_addr			= 0x080041e0,
+	.data_len			= 0xd0,
+	.data_index			= 0x0,
+	.data				= bnx2_TXP_b09FwData,
+
+	.sbss_addr			= 0x080042b0,
+	.sbss_len			= 0x80,
+	.sbss_index			= 0x0,
+	.sbss				= bnx2_TXP_b09FwSbss,
+
+	.bss_addr			= 0x08004330,
+	.bss_len			= 0xa20,
+	.bss_index			= 0x0,
+	.bss				= bnx2_TXP_b09FwBss,
+
+	.rodata_addr			= 0x08004198,
+	.rodata_len			= 0x30,
+	.rodata_index			= 0x0,
+	.rodata				= bnx2_TXP_b09FwRodata,
+};
+
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 17a4611..6482aed 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1336,6 +1336,13 @@
 		goto err_undo_flags;
 	}
 
+	if (slave_dev->get_stats == NULL) {
+		printk(KERN_NOTICE DRV_NAME
+			": %s: the driver for slave device %s does not provide "
+			"get_stats function, network statistics will be "
+			"inaccurate.\n", bond_dev->name, slave_dev->name);
+	}
+
 	new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
 	if (!new_slave) {
 		res = -ENOMEM;
@@ -3605,33 +3612,35 @@
 	read_lock_bh(&bond->lock);
 
 	bond_for_each_slave(bond, slave, i) {
-		sstats = slave->dev->get_stats(slave->dev);
+		if (slave->dev->get_stats) {
+			sstats = slave->dev->get_stats(slave->dev);
 
-		stats->rx_packets += sstats->rx_packets;
-		stats->rx_bytes += sstats->rx_bytes;
-		stats->rx_errors += sstats->rx_errors;
-		stats->rx_dropped += sstats->rx_dropped;
+			stats->rx_packets += sstats->rx_packets;
+			stats->rx_bytes += sstats->rx_bytes;
+			stats->rx_errors += sstats->rx_errors;
+			stats->rx_dropped += sstats->rx_dropped;
 
-		stats->tx_packets += sstats->tx_packets;
-		stats->tx_bytes += sstats->tx_bytes;
-		stats->tx_errors += sstats->tx_errors;
-		stats->tx_dropped += sstats->tx_dropped;
+			stats->tx_packets += sstats->tx_packets;
+			stats->tx_bytes += sstats->tx_bytes;
+			stats->tx_errors += sstats->tx_errors;
+			stats->tx_dropped += sstats->tx_dropped;
 
-		stats->multicast += sstats->multicast;
-		stats->collisions += sstats->collisions;
+			stats->multicast += sstats->multicast;
+			stats->collisions += sstats->collisions;
 
-		stats->rx_length_errors += sstats->rx_length_errors;
-		stats->rx_over_errors += sstats->rx_over_errors;
-		stats->rx_crc_errors += sstats->rx_crc_errors;
-		stats->rx_frame_errors += sstats->rx_frame_errors;
-		stats->rx_fifo_errors += sstats->rx_fifo_errors;
-		stats->rx_missed_errors += sstats->rx_missed_errors;
+			stats->rx_length_errors += sstats->rx_length_errors;
+			stats->rx_over_errors += sstats->rx_over_errors;
+			stats->rx_crc_errors += sstats->rx_crc_errors;
+			stats->rx_frame_errors += sstats->rx_frame_errors;
+			stats->rx_fifo_errors += sstats->rx_fifo_errors;
+			stats->rx_missed_errors += sstats->rx_missed_errors;
 
-		stats->tx_aborted_errors += sstats->tx_aborted_errors;
-		stats->tx_carrier_errors += sstats->tx_carrier_errors;
-		stats->tx_fifo_errors += sstats->tx_fifo_errors;
-		stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
-		stats->tx_window_errors += sstats->tx_window_errors;
+			stats->tx_aborted_errors += sstats->tx_aborted_errors;
+			stats->tx_carrier_errors += sstats->tx_carrier_errors;
+			stats->tx_fifo_errors += sstats->tx_fifo_errors;
+			stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
+			stats->tx_window_errors += sstats->tx_window_errors;
+		}
 	}
 
 	read_unlock_bh(&bond->lock);
@@ -3675,7 +3684,7 @@
 			mii->val_out = 0;
 			read_lock_bh(&bond->lock);
 			read_lock(&bond->curr_slave_lock);
-			if (bond->curr_active_slave) {
+			if (netif_carrier_ok(bond->dev)) {
 				mii->val_out = BMSR_LSTATUS;
 			}
 			read_unlock(&bond->curr_slave_lock);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 521c5b7..c812648 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2825,7 +2825,7 @@
 		u64 csum_start_off, csum_stuff_off;
 
 		csum_start_off = (u64) (skb->h.raw - skb->data);
-		csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+		csum_stuff_off = csum_start_off + skb->csum_offset;
 
 		ctrl =  TX_DESC_CSUM_EN |
 			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
@@ -4066,9 +4066,9 @@
 	return 0;
 }
 
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
 {
-	struct cas *cp = (struct cas *) data;
+	struct cas *cp = container_of(work, struct cas, reset_task);
 #if 0
 	int pending = atomic_read(&cp->reset_task_pending);
 #else
@@ -5006,7 +5006,7 @@
 	atomic_set(&cp->reset_task_pending_spare, 0);
 	atomic_set(&cp->reset_task_pending_mtu, 0);
 #endif
-	INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+	INIT_WORK(&cp->reset_task, cas_reset_task);
 
 	/* Default link parameters */
 	if (link_mode >= 0 && link_mode <= 6)
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
index 54c78d9..382d23f 100644
--- a/drivers/net/chelsio/Makefile
+++ b/drivers/net/chelsio/Makefile
@@ -1,11 +1,11 @@
 #
-# Chelsio 10Gb NIC driver for Linux.
+# Chelsio T1 driver
 #
 
 obj-$(CONFIG_CHELSIO_T1) += cxgb.o
 
-EXTRA_CFLAGS += -Idrivers/net/chelsio $(DEBUG_FLAGS)
+cxgb-$(CONFIG_CHELSIO_T1_1G) += ixf1010.o mac.o mv88e1xxx.o vsc7326.o vsc8244.o
+cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \
+	     mv88x201x.o my3126.o $(cxgb-y)
 
 
-cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
-
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 5d9dd14..74758d2 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -45,6 +45,7 @@
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include <linux/ethtool.h>
+#include <linux/if_vlan.h>
 #include <linux/mii.h>
 #include <linux/crc32.h>
 #include <linux/init.h>
@@ -53,13 +54,30 @@
 
 #define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
 #define DRV_NAME "cxgb"
-#define DRV_VERSION "2.1.1"
+#define DRV_VERSION "2.2"
 #define PFX      DRV_NAME ": "
 
 #define CH_ERR(fmt, ...)   printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
 #define CH_WARN(fmt, ...)  printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
 #define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
 
+/*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+#define CH_MSG(adapter, level, category, fmt, ...) do { \
+	if ((adapter)->msg_enable & NETIF_MSG_##category) \
+		printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
+		       ## __VA_ARGS__); \
+} while (0)
+
+#ifdef DEBUG
+# define CH_DBG(adapter, category, fmt, ...) \
+	CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+#else
+# define CH_DBG(fmt, ...)
+#endif
+
 #define CH_DEVICE(devid, ssid, idx) \
 	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
 
@@ -71,10 +89,6 @@
 
 typedef struct adapter adapter_t;
 
-void t1_elmer0_ext_intr(adapter_t *adapter);
-void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
-			int speed, int duplex, int fc);
-
 struct t1_rx_mode {
 	struct net_device *dev;
 	u32 idx;
@@ -97,26 +111,53 @@
 }
 
 #define	MAX_NPORTS 4
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+#define NMTUS      8
+#define TCB_SIZE   128
 
 #define SPEED_INVALID 0xffff
 #define DUPLEX_INVALID 0xff
 
 enum {
 	CHBT_BOARD_N110,
-	CHBT_BOARD_N210
+	CHBT_BOARD_N210,
+	CHBT_BOARD_7500,
+	CHBT_BOARD_8000,
+	CHBT_BOARD_CHT101,
+	CHBT_BOARD_CHT110,
+	CHBT_BOARD_CHT210,
+	CHBT_BOARD_CHT204,
+	CHBT_BOARD_CHT204V,
+	CHBT_BOARD_CHT204E,
+	CHBT_BOARD_CHN204,
+	CHBT_BOARD_COUGAR,
+	CHBT_BOARD_6800,
+	CHBT_BOARD_SIMUL,
 };
 
 enum {
+	CHBT_TERM_FPGA,
 	CHBT_TERM_T1,
-	CHBT_TERM_T2
+	CHBT_TERM_T2,
+	CHBT_TERM_T3
 };
 
 enum {
+	CHBT_MAC_CHELSIO_A,
+	CHBT_MAC_IXF1010,
 	CHBT_MAC_PM3393,
+	CHBT_MAC_VSC7321,
+	CHBT_MAC_DUMMY
 };
 
 enum {
+	CHBT_PHY_88E1041,
+	CHBT_PHY_88E1111,
 	CHBT_PHY_88X2010,
+	CHBT_PHY_XPAK,
+	CHBT_PHY_MY3126,
+	CHBT_PHY_8244,
+	CHBT_PHY_DUMMY
 };
 
 enum {
@@ -150,16 +191,44 @@
 	unsigned char  is_pcix;
 };
 
+struct tp_params {
+	unsigned int pm_size;
+	unsigned int cm_size;
+	unsigned int pm_rx_base;
+	unsigned int pm_tx_base;
+	unsigned int pm_rx_pg_size;
+	unsigned int pm_tx_pg_size;
+	unsigned int pm_rx_num_pgs;
+	unsigned int pm_tx_num_pgs;
+	unsigned int rx_coalescing_size;
+	unsigned int use_5tuple_mode;
+};
+
+struct mc5_params {
+	unsigned int mode;       /* selects MC5 width */
+	unsigned int nservers;   /* size of server region */
+	unsigned int nroutes;    /* size of routing region */
+};
+
+/* Default MC5 region sizes */
+#define DEFAULT_SERVER_REGION_LEN 256
+#define DEFAULT_RT_REGION_LEN 1024
+
 struct adapter_params {
 	struct sge_params sge;
+	struct mc5_params mc5;
+	struct tp_params  tp;
 	struct chelsio_pci_params pci;
 
 	const struct board_info *brd_info;
 
+	unsigned short mtus[NMTUS];
 	unsigned int   nports;          /* # of ethernet ports */
 	unsigned int   stats_update_period;
 	unsigned short chip_revision;
 	unsigned char  chip_version;
+	unsigned char  is_asic;
+	unsigned char  has_msi;
 };
 
 struct link_config {
@@ -207,17 +276,20 @@
 	/* Terminator modules. */
 	struct sge    *sge;
 	struct peespi *espi;
+	struct petp   *tp;
 
 	struct port_info port[MAX_NPORTS];
-	struct work_struct stats_update_task;
+	struct delayed_work stats_update_task;
 	struct timer_list stats_update_timer;
 
-	struct semaphore mib_mutex;
 	spinlock_t tpi_lock;
 	spinlock_t work_lock;
+	spinlock_t mac_lock;
+
 	/* guards async operations */
 	spinlock_t async_lock ____cacheline_aligned;
 	u32 slow_intr_mask;
+	int t1powersave;
 };
 
 enum {                                           /* adapter flags */
@@ -256,6 +328,11 @@
 	const char             *desc;
 };
 
+static inline int t1_is_asic(const adapter_t *adapter)
+{
+	return adapter->params.is_asic;
+}
+
 extern struct pci_device_id t1_pci_tbl[];
 
 static inline int adapter_matches_type(const adapter_t *adapter,
@@ -285,13 +362,15 @@
 	return board_info(adap)->clock_core / 1000000;
 }
 
+extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
 extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
 extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
 
 extern void t1_interrupts_enable(adapter_t *adapter);
 extern void t1_interrupts_disable(adapter_t *adapter);
 extern void t1_interrupts_clear(adapter_t *adapter);
-extern int elmer0_ext_intr_handler(adapter_t *adapter);
+extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
 extern int t1_slow_intr_handler(adapter_t *adapter);
 
 extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
@@ -305,9 +384,7 @@
 extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
 extern void t1_free_sw_modules(adapter_t *adapter);
 extern void t1_fatal_err(adapter_t *adapter);
-
-extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
-extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
-extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
-
+extern void t1_link_changed(adapter_t *adapter, int port_id);
+extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+			    int speed, int duplex, int pause);
 #endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
index 3412342..cf91434 100644
--- a/drivers/net/chelsio/cphy.h
+++ b/drivers/net/chelsio/cphy.h
@@ -52,7 +52,14 @@
 /* PHY interrupt types */
 enum {
 	cphy_cause_link_change = 0x1,
-	cphy_cause_error = 0x2
+	cphy_cause_error = 0x2,
+	cphy_cause_fifo_error = 0x3
+};
+
+enum {
+	PHY_LINK_UP = 0x1,
+	PHY_AUTONEG_RDY = 0x2,
+	PHY_AUTONEG_EN = 0x4
 };
 
 struct cphy;
@@ -81,7 +88,18 @@
 /* A PHY instance */
 struct cphy {
 	int addr;                            /* PHY address */
+	int state;	/* Link status state machine */
 	adapter_t *adapter;                  /* associated adapter */
+
+	struct delayed_work phy_update;
+
+	u16 bmsr;
+	int count;
+	int act_count;
+	int act_on;
+
+	u32 elmer_gpo;
+
 	struct cphy_ops *ops;                /* PHY operations */
 	int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
 			 int reg_addr, unsigned int *val);
@@ -142,6 +160,10 @@
 	int (*reset)(adapter_t *adapter);
 };
 
+extern struct gphy t1_my3126_ops;
+extern struct gphy t1_mv88e1xxx_ops;
+extern struct gphy t1_vsc8244_ops;
+extern struct gphy t1_xpak_ops;
 extern struct gphy t1_mv88x201x_ops;
 extern struct gphy t1_dummy_phy_ops;
 
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
index 5b357d9..35f565b 100644
--- a/drivers/net/chelsio/cpl5_cmd.h
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -46,24 +46,385 @@
 #endif
 
 enum CPL_opcode {
+	CPL_PASS_OPEN_REQ     = 0x1,
+	CPL_PASS_OPEN_RPL     = 0x2,
+	CPL_PASS_ESTABLISH    = 0x3,
+	CPL_PASS_ACCEPT_REQ   = 0xE,
+	CPL_PASS_ACCEPT_RPL   = 0x4,
+	CPL_ACT_OPEN_REQ      = 0x5,
+	CPL_ACT_OPEN_RPL      = 0x6,
+	CPL_CLOSE_CON_REQ     = 0x7,
+	CPL_CLOSE_CON_RPL     = 0x8,
+	CPL_CLOSE_LISTSRV_REQ = 0x9,
+	CPL_CLOSE_LISTSRV_RPL = 0xA,
+	CPL_ABORT_REQ         = 0xB,
+	CPL_ABORT_RPL         = 0xC,
+	CPL_PEER_CLOSE        = 0xD,
+	CPL_ACT_ESTABLISH     = 0x17,
+
+	CPL_GET_TCB           = 0x24,
+	CPL_GET_TCB_RPL       = 0x25,
+	CPL_SET_TCB           = 0x26,
+	CPL_SET_TCB_FIELD     = 0x27,
+	CPL_SET_TCB_RPL       = 0x28,
+	CPL_PCMD              = 0x29,
+
+	CPL_PCMD_READ         = 0x31,
+	CPL_PCMD_READ_RPL     = 0x32,
+
+
+	CPL_RX_DATA           = 0xA0,
+	CPL_RX_DATA_DDP       = 0xA1,
+	CPL_RX_DATA_ACK       = 0xA3,
 	CPL_RX_PKT            = 0xAD,
+	CPL_RX_ISCSI_HDR      = 0xAF,
+	CPL_TX_DATA_ACK       = 0xB0,
+	CPL_TX_DATA           = 0xB1,
 	CPL_TX_PKT            = 0xB2,
 	CPL_TX_PKT_LSO        = 0xB6,
+
+	CPL_RTE_DELETE_REQ    = 0xC0,
+	CPL_RTE_DELETE_RPL    = 0xC1,
+	CPL_RTE_WRITE_REQ     = 0xC2,
+	CPL_RTE_WRITE_RPL     = 0xD3,
+	CPL_RTE_READ_REQ      = 0xC3,
+	CPL_RTE_READ_RPL      = 0xC4,
+	CPL_L2T_WRITE_REQ     = 0xC5,
+	CPL_L2T_WRITE_RPL     = 0xD4,
+	CPL_L2T_READ_REQ      = 0xC6,
+	CPL_L2T_READ_RPL      = 0xC7,
+	CPL_SMT_WRITE_REQ     = 0xC8,
+	CPL_SMT_WRITE_RPL     = 0xD5,
+	CPL_SMT_READ_REQ      = 0xC9,
+	CPL_SMT_READ_RPL      = 0xCA,
+	CPL_ARP_MISS_REQ      = 0xCD,
+	CPL_ARP_MISS_RPL      = 0xCE,
+	CPL_MIGRATE_C2T_REQ   = 0xDC,
+	CPL_MIGRATE_C2T_RPL   = 0xDD,
+	CPL_ERROR             = 0xD7,
+
+    /* internal: driver -> TOM */
+	CPL_MSS_CHANGE        = 0xE1
 };
 
-enum {                /* TX_PKT_LSO ethernet types */
+#define NUM_CPL_CMDS 256
+
+enum CPL_error {
+	CPL_ERR_NONE               = 0,
+	CPL_ERR_TCAM_PARITY        = 1,
+	CPL_ERR_TCAM_FULL          = 3,
+	CPL_ERR_CONN_RESET         = 20,
+	CPL_ERR_CONN_EXIST         = 22,
+	CPL_ERR_ARP_MISS           = 23,
+	CPL_ERR_BAD_SYN            = 24,
+	CPL_ERR_CONN_TIMEDOUT      = 30,
+	CPL_ERR_XMIT_TIMEDOUT      = 31,
+	CPL_ERR_PERSIST_TIMEDOUT   = 32,
+	CPL_ERR_FINWAIT2_TIMEDOUT  = 33,
+	CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+	CPL_ERR_ABORT_FAILED       = 42,
+	CPL_ERR_GENERAL            = 99
+};
+
+enum {
+	CPL_CONN_POLICY_AUTO = 0,
+	CPL_CONN_POLICY_ASK  = 1,
+	CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+	ULP_MODE_NONE   = 0,
+	ULP_MODE_TCPDDP = 1,
+	ULP_MODE_ISCSI  = 2,
+	ULP_MODE_IWARP  = 3,
+	ULP_MODE_SSL    = 4
+};
+
+enum {
+	CPL_PASS_OPEN_ACCEPT,
+	CPL_PASS_OPEN_REJECT
+};
+
+enum {
+	CPL_ABORT_SEND_RST = 0,
+	CPL_ABORT_NO_RST,
+	CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum {                // TX_PKT_LSO ethernet types
 	CPL_ETH_II,
 	CPL_ETH_II_VLAN,
 	CPL_ETH_802_3,
 	CPL_ETH_802_3_VLAN
 };
 
-struct cpl_rx_data {
+union opcode_tid {
+    u32 opcode_tid;
+    u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x)    ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+	u16 mss;
+	u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd:4;
+	u8 ecn:1;
+	u8 sack:1;
+	u8 tstamp:1;
+#else
+	u8 tstamp:1;
+	u8 sack:1;
+	u8 ecn:1;
+	u8 rsvd:4;
+#endif
+};
+
+struct cpl_pass_open_req {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 opt0h;
+	u32 opt0l;
+	u32 peer_netmask;
+	u32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u8 resvd[7];
+	u8 status;
+};
+
+struct cpl_pass_establish {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 tos_tid;
+	u8  l2t_idx;
+	u8  rsvd[3];
+	u32 snd_isn;
+	u32 rcv_isn;
+};
+
+struct cpl_pass_accept_req {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 tos_tid;
+    struct tcp_options tcp_options;
+	u8  dst_mac[6];
+	u16 vlan_tag;
+	u8  src_mac[6];
+	u8  rsvd[2];
+	u32 rcv_isn;
+	u32 unknown_tcp_options;
+};
+
+struct cpl_pass_accept_rpl {
+	union opcode_tid ot;
 	u32 rsvd0;
+	u32 rsvd1;
+	u32 peer_ip;
+	u32 opt0h;
+	union {
+	u32 opt0l;
+	struct {
+	    u8 rsvd[3];
+	    u8 status;
+	};
+    };
+};
+
+struct cpl_act_open_req {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 opt0h;
+	u32 opt0l;
+	u32 iff_vlantag;
+	u32 rsvd;
+};
+
+struct cpl_act_open_rpl {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 new_tid;
+	u8  rsvd[3];
+	u8  status;
+};
+
+struct cpl_act_establish {
+	union opcode_tid ot;
+	u16 local_port;
+	u16 peer_port;
+	u32 local_ip;
+	u32 peer_ip;
+	u32 tos_tid;
+	u32 rsvd;
+	u32 snd_isn;
+	u32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+	union opcode_tid ot;
+	u16 len;
+	u8 rsvd;
+	u8 status;
+};
+
+struct cpl_set_tcb {
+	union opcode_tid ot;
+	u16 len;
+	u16 rsvd;
+};
+
+struct cpl_set_tcb_field {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 offset;
+	u32 mask;
+	u32 val;
+};
+
+struct cpl_set_tcb_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_pcmd {
+	union opcode_tid ot;
+	u16 dlen_in;
+	u16 dlen_out;
+	u32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_read {
+	union opcode_tid ot;
+	u32 rsvd1;
+	u16 rsvd2;
+	u32 addr;
+	u16 len;
+};
+
+struct cpl_pcmd_read_rpl {
+	union opcode_tid ot;
+	u16 len;
+};
+
+struct cpl_close_con_req {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+	u32 snd_nxt;
+	u32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_close_listserv_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_req {
+	union opcode_tid ot;
+	u32 rsvd0;
+	u8  rsvd1;
+	u8  cmd;
+	u8  rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+	union opcode_tid ot;
+	u32 rsvd0;
+	u8  rsvd1;
+	u8  status;
+	u8  rsvd2[6];
+};
+
+struct cpl_peer_close {
+	union opcode_tid ot;
+	u32 rsvd;
+};
+
+struct cpl_tx_data {
+	union opcode_tid ot;
+	u32 len;
+	u32 rsvd0;
+	u16 urg;
+	u16 flags;
+};
+
+struct cpl_tx_data_ack {
+	union opcode_tid ot;
+	u32 ack_seq;
+};
+
+struct cpl_rx_data {
+	union opcode_tid ot;
 	u32 len;
 	u32 seq;
 	u16 urg;
-	u8  rsvd1;
+	u8  rsvd;
+	u8  status;
+};
+
+struct cpl_rx_data_ack {
+	union opcode_tid ot;
+	u32 credit;
+};
+
+struct cpl_rx_data_ddp {
+	union opcode_tid ot;
+	u32 len;
+	u32 seq;
+	u32 nxt_seq;
+	u32 ulp_crc;
+	u16 ddp_status;
+	u8  rsvd;
 	u8  status;
 };
 
@@ -99,9 +460,9 @@
 	u8 ip_csum_dis:1;
 	u8 l4_csum_dis:1;
 	u8 vlan_valid:1;
-	u8 rsvd:1;
+	u8 :1;
 #else
-	u8 rsvd:1;
+	u8 :1;
 	u8 vlan_valid:1;
 	u8 l4_csum_dis:1;
 	u8 ip_csum_dis:1;
@@ -110,8 +471,7 @@
 	u16 vlan;
 	__be32 len;
 
-	u32 rsvd2;
-	u8 rsvd3;
+	u8 rsvd[5];
 #if defined(__LITTLE_ENDIAN_BITFIELD)
 	u8 tcp_hdr_words:4;
 	u8 ip_hdr_words:4;
@@ -138,8 +498,142 @@
 	u8 iff:4;
 #endif
 	u16 csum;
-	__be16 vlan;
+	u16 vlan;
 	u16 len;
 };
 
+struct cpl_l2t_write_req {
+	union opcode_tid ot;
+	u32 params;
+	u8 rsvd1[2];
+	u8 dst_mac[6];
+};
+
+struct cpl_l2t_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+	union opcode_tid ot;
+	u32 params;
+	u8 rsvd1[2];
+	u8 dst_mac[6];
+};
+
+struct cpl_smt_write_req {
+	union opcode_tid ot;
+	u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:1;
+	u8 mtu_idx:3;
+	u8 iff:4;
+#else
+	u8 iff:4;
+	u8 mtu_idx:3;
+	u8 rsvd1:1;
+#endif
+	u16 rsvd2;
+	u16 rsvd3;
+	u8  src_mac1[6];
+	u16 rsvd4;
+	u8  src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+	union opcode_tid ot;
+	u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:4;
+	u8 iff:4;
+#else
+	u8 iff:4;
+	u8 rsvd1:4;
+#endif
+	u16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+	union opcode_tid ot;
+	u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:1;
+	u8 mtu_idx:3;
+	u8 rsvd0:4;
+#else
+	u8 rsvd0:4;
+	u8 mtu_idx:3;
+	u8 rsvd1:1;
+#endif
+	u16 rsvd2;
+	u16 rsvd3;
+	u8  src_mac1[6];
+	u16 rsvd4;
+	u8  src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+	union opcode_tid ot;
+	u32 params;
+};
+
+struct cpl_rte_delete_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+	union opcode_tid ot;
+	u32 params;
+	u32 netmask;
+	u32 faddr;
+};
+
+struct cpl_rte_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+	union opcode_tid ot;
+	u32 params;
+};
+
+struct cpl_rte_read_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd0[2];
+	u8 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 rsvd1:7;
+	u8 select:1;
+#else
+	u8 select:1;
+	u8 rsvd1:7;
+#endif
+	u8 rsvd2[3];
+	u32 addr;
+};
+
+struct cpl_mss_change {
+	union opcode_tid ot;
+	u32 mss;
+};
+
 #endif /* _CXGB_CPL5_CMD_H_ */
+
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index ad7ff96..de48ead 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -45,7 +45,6 @@
 #include <linux/if_vlan.h>
 #include <linux/mii.h>
 #include <linux/sockios.h>
-#include <linux/proc_fs.h>
 #include <linux/dma-mapping.h>
 #include <asm/uaccess.h>
 
@@ -54,36 +53,10 @@
 #include "gmac.h"
 #include "cphy.h"
 #include "sge.h"
+#include "tp.h"
 #include "espi.h"
+#include "elmer0.h"
 
-#ifdef work_struct
-#include <linux/tqueue.h>
-#define INIT_WORK INIT_TQUEUE
-#define schedule_work schedule_task
-#define flush_scheduled_work flush_scheduled_tasks
-
-static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
-{
-	mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
-}
-
-static inline void cancel_mac_stats_update(struct adapter *ap)
-{
-	del_timer_sync(&ap->stats_update_timer);
-	flush_scheduled_tasks();
-}
-
-/*
- * Stats update timer for 2.4.  It schedules a task to do the actual update as
- * we need to access MAC statistics in process context.
- */
-static void mac_stats_timer(unsigned long data)
-{
-	struct adapter *ap = (struct adapter *)data;
-
-	schedule_task(&ap->stats_update_task);
-}
-#else
 #include <linux/workqueue.h>
 
 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
@@ -95,7 +68,6 @@
 {
 	cancel_delayed_work(&ap->stats_update_task);
 }
-#endif
 
 #define MAX_CMDQ_ENTRIES 16384
 #define MAX_CMDQ1_ENTRIES 1024
@@ -103,10 +75,9 @@
 #define MAX_RX_JUMBO_BUFFERS 16384
 #define MAX_TX_BUFFERS_HIGH	16384U
 #define MAX_TX_BUFFERS_LOW	1536U
+#define MAX_TX_BUFFERS		1460U
 #define MIN_FL_ENTRIES 32
 
-#define PORT_MASK ((1 << MAX_NPORTS) - 1)
-
 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -124,8 +95,21 @@
 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
 module_param(dflt_msg_enable, int, 0);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
 
+#define HCLOCK 0x0
+#define LCLOCK 0x1
+
+/* T1 cards powersave mode */
+static int t1_clock(struct adapter *adapter, int mode);
+static int t1powersave = 1;	/* HW default is powersave mode. */
+
+module_param(t1powersave, int, 0);
+MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
+
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 
 static const char pci_speed[][4] = {
 	"33", "66", "100", "133"
@@ -149,7 +133,7 @@
 static void link_report(struct port_info *p)
 {
 	if (!netif_carrier_ok(p->dev))
-        	printk(KERN_INFO "%s: link down\n", p->dev->name);
+		printk(KERN_INFO "%s: link down\n", p->dev->name);
 	else {
 		const char *s = "10Mbps";
 
@@ -159,13 +143,13 @@
 			case SPEED_100:   s = "100Mbps"; break;
 		}
 
-        printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+	printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
 		       p->dev->name, s,
 		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 	}
 }
 
-void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
+void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
 			int speed, int duplex, int pause)
 {
 	struct port_info *p = &adapter->port[port_id];
@@ -177,6 +161,22 @@
 			netif_carrier_off(p->dev);
 		link_report(p);
 
+		/* multi-ports: inform toe */
+		if ((speed > 0) && (adapter->params.nports > 1)) {
+			unsigned int sched_speed = 10;
+			switch (speed) {
+			case SPEED_1000:
+				sched_speed = 1000;
+				break;
+			case SPEED_100:
+				sched_speed = 100;
+				break;
+			case SPEED_10:
+				sched_speed = 10;
+				break;
+			}
+			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
+		}
 	}
 }
 
@@ -195,8 +195,10 @@
 static void enable_hw_csum(struct adapter *adapter)
 {
 	if (adapter->flags & TSO_CAPABLE)
-		t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
-	t1_tp_set_tcp_checksum_offload(adapter, 1);
+		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
+	if (adapter->flags & UDP_CSUM_CAPABLE)
+		t1_tp_set_udp_checksum_offload(adapter->tp, 1);
+	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
 }
 
 /*
@@ -217,11 +219,19 @@
 	}
 
 	t1_interrupts_clear(adapter);
-	if ((err = request_irq(adapter->pdev->irq,
-			       t1_select_intr_handler(adapter), IRQF_SHARED,
-			       adapter->name, adapter))) {
+
+	adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0;
+	err = request_irq(adapter->pdev->irq,
+			  t1_select_intr_handler(adapter),
+			  adapter->params.has_msi ? 0 : IRQF_SHARED,
+			  adapter->name, adapter);
+	if (err) {
+		if (adapter->params.has_msi)
+			pci_disable_msi(adapter->pdev);
+
 		goto out_err;
 	}
+
 	t1_sge_start(adapter->sge);
 	t1_interrupts_enable(adapter);
  out_err:
@@ -236,6 +246,8 @@
 	t1_sge_stop(adapter->sge);
 	t1_interrupts_disable(adapter);
 	free_irq(adapter->pdev->irq, adapter);
+	if (adapter->params.has_msi)
+		pci_disable_msi(adapter->pdev);
 }
 
 static int cxgb_open(struct net_device *dev)
@@ -290,7 +302,7 @@
 
 	/* Do a full update of the MAC stats */
 	pstats = p->mac->ops->statistics_update(p->mac,
-						      MAC_STATS_UPDATE_FULL);
+						MAC_STATS_UPDATE_FULL);
 
 	ns->tx_packets = pstats->TxUnicastFramesOK +
 		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
@@ -344,47 +356,53 @@
 }
 
 static char stats_strings[][ETH_GSTRING_LEN] = {
-        "TxOctetsOK",
-        "TxOctetsBad",
-        "TxUnicastFramesOK",
-        "TxMulticastFramesOK",
-        "TxBroadcastFramesOK",
-        "TxPauseFrames",
-        "TxFramesWithDeferredXmissions",
-        "TxLateCollisions",
-        "TxTotalCollisions",
-        "TxFramesAbortedDueToXSCollisions",
-        "TxUnderrun",
-        "TxLengthErrors",
-        "TxInternalMACXmitError",
-        "TxFramesWithExcessiveDeferral",
-        "TxFCSErrors",
+	"TxOctetsOK",
+	"TxOctetsBad",
+	"TxUnicastFramesOK",
+	"TxMulticastFramesOK",
+	"TxBroadcastFramesOK",
+	"TxPauseFrames",
+	"TxFramesWithDeferredXmissions",
+	"TxLateCollisions",
+	"TxTotalCollisions",
+	"TxFramesAbortedDueToXSCollisions",
+	"TxUnderrun",
+	"TxLengthErrors",
+	"TxInternalMACXmitError",
+	"TxFramesWithExcessiveDeferral",
+	"TxFCSErrors",
 
-        "RxOctetsOK",
-        "RxOctetsBad",
-        "RxUnicastFramesOK",
-        "RxMulticastFramesOK",
-        "RxBroadcastFramesOK",
-        "RxPauseFrames",
-        "RxFCSErrors",
-        "RxAlignErrors",
-        "RxSymbolErrors",
-        "RxDataErrors",
-        "RxSequenceErrors",
-        "RxRuntErrors",
-        "RxJabberErrors",
-        "RxInternalMACRcvError",
-        "RxInRangeLengthErrors",
-        "RxOutOfRangeLengthField",
-        "RxFrameTooLongErrors",
+	"RxOctetsOK",
+	"RxOctetsBad",
+	"RxUnicastFramesOK",
+	"RxMulticastFramesOK",
+	"RxBroadcastFramesOK",
+	"RxPauseFrames",
+	"RxFCSErrors",
+	"RxAlignErrors",
+	"RxSymbolErrors",
+	"RxDataErrors",
+	"RxSequenceErrors",
+	"RxRuntErrors",
+	"RxJabberErrors",
+	"RxInternalMACRcvError",
+	"RxInRangeLengthErrors",
+	"RxOutOfRangeLengthField",
+	"RxFrameTooLongErrors",
 
-	"TSO",
-	"VLANextractions",
-	"VLANinsertions",
+	/* Port stats */
+	"RxPackets",
 	"RxCsumGood",
+	"TxPackets",
 	"TxCsumOffload",
-	"RxDrops"
+	"TxTso",
+	"RxVlan",
+	"TxVlan",
 
+	/* Interrupt stats */
+	"rx drops",
+	"pure_rsps",
+	"unhandled irqs",
 	"respQ_empty",
 	"respQ_overflow",
 	"freelistQ_empty",
@@ -392,11 +410,7 @@
 	"pkt_mismatch",
 	"cmdQ_full0",
 	"cmdQ_full1",
-	"tx_ipfrags",
-	"tx_reg_pkts",
-	"tx_lso_pkts",
-	"tx_do_cksum",
-	
+
 	"espi_DIP2ParityErr",
 	"espi_DIP4Err",
 	"espi_RxDrops",
@@ -404,7 +418,7 @@
 	"espi_RxOvfl",
 	"espi_ParityErr"
 };
- 
+
 #define T2_REGMAP_SIZE (3 * 1024)
 
 static int get_regs_len(struct net_device *dev)
@@ -439,65 +453,77 @@
 	struct adapter *adapter = dev->priv;
 	struct cmac *mac = adapter->port[dev->if_port].mac;
 	const struct cmac_statistics *s;
-	const struct sge_port_stats *ss;
 	const struct sge_intr_counts *t;
+	struct sge_port_stats ss;
 
 	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
-	ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
+
+	*data++ = s->TxOctetsOK;
+	*data++ = s->TxOctetsBad;
+	*data++ = s->TxUnicastFramesOK;
+	*data++ = s->TxMulticastFramesOK;
+	*data++ = s->TxBroadcastFramesOK;
+	*data++ = s->TxPauseFrames;
+	*data++ = s->TxFramesWithDeferredXmissions;
+	*data++ = s->TxLateCollisions;
+	*data++ = s->TxTotalCollisions;
+	*data++ = s->TxFramesAbortedDueToXSCollisions;
+	*data++ = s->TxUnderrun;
+	*data++ = s->TxLengthErrors;
+	*data++ = s->TxInternalMACXmitError;
+	*data++ = s->TxFramesWithExcessiveDeferral;
+	*data++ = s->TxFCSErrors;
+
+	*data++ = s->RxOctetsOK;
+	*data++ = s->RxOctetsBad;
+	*data++ = s->RxUnicastFramesOK;
+	*data++ = s->RxMulticastFramesOK;
+	*data++ = s->RxBroadcastFramesOK;
+	*data++ = s->RxPauseFrames;
+	*data++ = s->RxFCSErrors;
+	*data++ = s->RxAlignErrors;
+	*data++ = s->RxSymbolErrors;
+	*data++ = s->RxDataErrors;
+	*data++ = s->RxSequenceErrors;
+	*data++ = s->RxRuntErrors;
+	*data++ = s->RxJabberErrors;
+	*data++ = s->RxInternalMACRcvError;
+	*data++ = s->RxInRangeLengthErrors;
+	*data++ = s->RxOutOfRangeLengthField;
+	*data++ = s->RxFrameTooLongErrors;
+
+	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
+	*data++ = ss.rx_packets;
+	*data++ = ss.rx_cso_good;
+	*data++ = ss.tx_packets;
+	*data++ = ss.tx_cso;
+	*data++ = ss.tx_tso;
+	*data++ = ss.vlan_xtract;
+	*data++ = ss.vlan_insert;
+
 	t = t1_sge_get_intr_counts(adapter->sge);
+	*data++ = t->rx_drops;
+	*data++ = t->pure_rsps;
+	*data++ = t->unhandled_irqs;
+	*data++ = t->respQ_empty;
+	*data++ = t->respQ_overflow;
+	*data++ = t->freelistQ_empty;
+	*data++ = t->pkt_too_big;
+	*data++ = t->pkt_mismatch;
+	*data++ = t->cmdQ_full[0];
+	*data++ = t->cmdQ_full[1];
 
-        *data++ = s->TxOctetsOK;
-        *data++ = s->TxOctetsBad;
-        *data++ = s->TxUnicastFramesOK;
-        *data++ = s->TxMulticastFramesOK;
-        *data++ = s->TxBroadcastFramesOK;
-        *data++ = s->TxPauseFrames;
-        *data++ = s->TxFramesWithDeferredXmissions;
-        *data++ = s->TxLateCollisions;
-        *data++ = s->TxTotalCollisions;
-        *data++ = s->TxFramesAbortedDueToXSCollisions;
-        *data++ = s->TxUnderrun;
-        *data++ = s->TxLengthErrors;
-        *data++ = s->TxInternalMACXmitError;
-        *data++ = s->TxFramesWithExcessiveDeferral;
-        *data++ = s->TxFCSErrors;
+	if (adapter->espi) {
+		const struct espi_intr_counts *e;
 
-        *data++ = s->RxOctetsOK;
-        *data++ = s->RxOctetsBad;
-        *data++ = s->RxUnicastFramesOK;
-        *data++ = s->RxMulticastFramesOK;
-        *data++ = s->RxBroadcastFramesOK;
-        *data++ = s->RxPauseFrames;
-        *data++ = s->RxFCSErrors;
-        *data++ = s->RxAlignErrors;
-        *data++ = s->RxSymbolErrors;
-        *data++ = s->RxDataErrors;
-        *data++ = s->RxSequenceErrors;
-        *data++ = s->RxRuntErrors;
-        *data++ = s->RxJabberErrors;
-        *data++ = s->RxInternalMACRcvError;
-        *data++ = s->RxInRangeLengthErrors;
-        *data++ = s->RxOutOfRangeLengthField;
-        *data++ = s->RxFrameTooLongErrors;
-
-	*data++ = ss->tso;
-	*data++ = ss->vlan_xtract;
-	*data++ = ss->vlan_insert;
-	*data++ = ss->rx_cso_good;
-	*data++ = ss->tx_cso;
-	*data++ = ss->rx_drops;
-
-	*data++ = (u64)t->respQ_empty;
-	*data++ = (u64)t->respQ_overflow;
-	*data++ = (u64)t->freelistQ_empty;
-	*data++ = (u64)t->pkt_too_big;
-	*data++ = (u64)t->pkt_mismatch;
-	*data++ = (u64)t->cmdQ_full[0];
-	*data++ = (u64)t->cmdQ_full[1];
-	*data++ = (u64)t->tx_ipfrags;
-	*data++ = (u64)t->tx_reg_pkts;
-	*data++ = (u64)t->tx_lso_pkts;
-	*data++ = (u64)t->tx_do_cksum;
+		e = t1_espi_get_intr_counts(adapter->espi);
+		*data++ = e->DIP2_parity_err;
+		*data++ = e->DIP4_err;
+		*data++ = e->rx_drops;
+		*data++ = e->tx_drops;
+		*data++ = e->rx_ovflw;
+		*data++ = e->parity_err;
+	}
 }
 
 static inline void reg_block_dump(struct adapter *ap, void *buf,
@@ -521,6 +547,15 @@
 
 	memset(buf, 0, T2_REGMAP_SIZE);
 	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
+	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
+	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
+	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
+	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
+	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
+	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
+	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
+	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
 }
 
 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -539,12 +574,12 @@
 		cmd->duplex = -1;
 	}
 
-        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
-        cmd->phy_address = p->phy->addr;
-        cmd->transceiver = XCVR_EXTERNAL;
-        cmd->autoneg = p->link_config.autoneg;
-        cmd->maxtxpkt = 0;
-        cmd->maxrxpkt = 0;
+	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+	cmd->phy_address = p->phy->addr;
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->autoneg = p->link_config.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
 	return 0;
 }
 
@@ -715,7 +750,7 @@
 		return -EINVAL;
 
 	if (adapter->flags & FULL_INIT_DONE)
-        return -EBUSY;
+	return -EBUSY;
 
 	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@ -759,7 +794,9 @@
 
 static int get_eeprom_len(struct net_device *dev)
 {
-    return EEPROM_SIZE;
+ 	struct adapter *adapter = dev->priv;
+
+ 	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
 }
 
 #define EEPROM_MAGIC(ap) \
@@ -809,47 +846,36 @@
 	.set_tso           = set_tso,
 };
 
-static void cxgb_proc_cleanup(struct adapter *adapter,
-					struct proc_dir_entry *dir)
-{
-	const char *name;
-	name = adapter->name;
-	remove_proc_entry(name, dir);
-}
-//#define chtoe_setup_toedev(adapter) NULL
-#define update_mtu_tab(adapter)
-#define write_smt_entry(adapter, idx)
-
 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 {
-        struct adapter *adapter = dev->priv;
-        struct mii_ioctl_data *data = if_mii(req);
+	struct adapter *adapter = dev->priv;
+	struct mii_ioctl_data *data = if_mii(req);
 
 	switch (cmd) {
-        case SIOCGMIIPHY:
-                data->phy_id = adapter->port[dev->if_port].phy->addr;
-                /* FALLTHRU */
-        case SIOCGMIIREG: {
+	case SIOCGMIIPHY:
+		data->phy_id = adapter->port[dev->if_port].phy->addr;
+		/* FALLTHRU */
+	case SIOCGMIIREG: {
 		struct cphy *phy = adapter->port[dev->if_port].phy;
 		u32 val;
 
 		if (!phy->mdio_read)
-            return -EOPNOTSUPP;
+	    return -EOPNOTSUPP;
 		phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 			       &val);
-                data->val_out = val;
-                break;
+		data->val_out = val;
+		break;
 	}
-        case SIOCSMIIREG: {
+	case SIOCSMIIREG: {
 		struct cphy *phy = adapter->port[dev->if_port].phy;
 
-                if (!capable(CAP_NET_ADMIN))
-                    return -EPERM;
+		if (!capable(CAP_NET_ADMIN))
+		    return -EPERM;
 		if (!phy->mdio_write)
-            return -EOPNOTSUPP;
+	    return -EOPNOTSUPP;
 		phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
 			        data->val_in);
-                break;
+		break;
 	}
 
 	default:
@@ -865,9 +891,9 @@
 	struct cmac *mac = adapter->port[dev->if_port].mac;
 
 	if (!mac->ops->set_mtu)
-        return -EOPNOTSUPP;
+	return -EOPNOTSUPP;
 	if (new_mtu < 68)
-        return -EINVAL;
+	return -EINVAL;
 	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 		return ret;
 	dev->mtu = new_mtu;
@@ -918,7 +944,7 @@
 	struct adapter *adapter = dev->priv;
 
 	local_irq_save(flags);
-        t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
+	t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
 	local_irq_restore(flags);
 }
 #endif
@@ -927,10 +953,11 @@
  * Periodic accumulation of MAC statistics.  This is used only if the MAC
  * does not have any other way to prevent stats counter overflow.
  */
-static void mac_stats_task(void *data)
+static void mac_stats_task(struct work_struct *work)
 {
 	int i;
-	struct adapter *adapter = data;
+	struct adapter *adapter =
+		container_of(work, struct adapter, stats_update_task.work);
 
 	for_each_port(adapter, i) {
 		struct port_info *p = &adapter->port[i];
@@ -951,18 +978,19 @@
 /*
  * Processes elmer0 external interrupts in process context.
  */
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
 {
-	struct adapter *adapter = data;
+	struct adapter *adapter =
+		container_of(work, struct adapter, ext_intr_handler_task);
 
-	elmer0_ext_intr_handler(adapter);
+	t1_elmer0_ext_intr_handler(adapter);
 
 	/* Now reenable external interrupts */
 	spin_lock_irq(&adapter->async_lock);
 	adapter->slow_intr_mask |= F_PL_INTR_EXT;
 	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
 	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
-                   adapter->regs + A_PL_ENABLE);
+		   adapter->regs + A_PL_ENABLE);
 	spin_unlock_irq(&adapter->async_lock);
 }
 
@@ -978,7 +1006,7 @@
 	 */
 	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
 	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
-                   adapter->regs + A_PL_ENABLE);
+		   adapter->regs + A_PL_ENABLE);
 	schedule_work(&adapter->ext_intr_handler_task);
 }
 
@@ -1011,7 +1039,7 @@
 
 	err = pci_enable_device(pdev);
 	if (err)
-        return err;
+		return err;
 
 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 		CH_ERR("%s: cannot find PCI device memory base address\n",
@@ -1043,7 +1071,7 @@
 
 	pci_set_master(pdev);
 
-    mmio_start = pci_resource_start(pdev, 0);
+	mmio_start = pci_resource_start(pdev, 0);
 	mmio_len = pci_resource_len(pdev, 0);
 	bi = t1_get_board_info(ent->driver_data);
 
@@ -1081,21 +1109,15 @@
 			adapter->msg_enable = dflt_msg_enable;
 			adapter->mmio_len = mmio_len;
 
-			init_MUTEX(&adapter->mib_mutex);
 			spin_lock_init(&adapter->tpi_lock);
 			spin_lock_init(&adapter->work_lock);
 			spin_lock_init(&adapter->async_lock);
+			spin_lock_init(&adapter->mac_lock);
 
 			INIT_WORK(&adapter->ext_intr_handler_task,
-				  ext_intr_task, adapter);
-			INIT_WORK(&adapter->stats_update_task, mac_stats_task,
-				  adapter);
-#ifdef work_struct
-			init_timer(&adapter->stats_update_timer);
-			adapter->stats_update_timer.function = mac_stats_timer;
-			adapter->stats_update_timer.data =
-				(unsigned long)adapter;
-#endif
+				  ext_intr_task);
+			INIT_DELAYED_WORK(&adapter->stats_update_task,
+					  mac_stats_task);
 
 			pci_set_drvdata(pdev, netdev);
 		}
@@ -1122,16 +1144,19 @@
 			netdev->vlan_rx_register = vlan_rx_register;
 			netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
 #endif
-			adapter->flags |= TSO_CAPABLE;
-			netdev->features |= NETIF_F_TSO;
+
+			/* T204: disable TSO */
+			if (!(is_T2(adapter)) || bi->port_number != 4) {
+				adapter->flags |= TSO_CAPABLE;
+				netdev->features |= NETIF_F_TSO;
+			}
 		}
 
 		netdev->open = cxgb_open;
 		netdev->stop = cxgb_close;
 		netdev->hard_start_xmit = t1_start_xmit;
 		netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
-			sizeof(struct cpl_tx_pkt_lso) :
-			sizeof(struct cpl_tx_pkt);
+			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
 		netdev->get_stats = t1_get_stats;
 		netdev->set_multicast_list = t1_set_rxmode;
 		netdev->do_ioctl = t1_ioctl;
@@ -1142,7 +1167,7 @@
 #endif
 		netdev->weight = 64;
 
-        SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+		SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
 	}
 
 	if (t1_init_sw_modules(adapter, bi) < 0) {
@@ -1169,7 +1194,7 @@
 			if (!adapter->registered_device_map)
 				adapter->name = adapter->port[i].dev->name;
 
-                __set_bit(i, &adapter->registered_device_map);
+			__set_bit(i, &adapter->registered_device_map);
 		}
 	}
 	if (!adapter->registered_device_map) {
@@ -1182,18 +1207,28 @@
 	       bi->desc, adapter->params.chip_revision,
 	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
 	       adapter->params.pci.speed, adapter->params.pci.width);
+
+	/*
+	 * Set the T1B ASIC and memory clocks.
+	 */
+	if (t1powersave)
+		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
+	else
+		adapter->t1powersave = HCLOCK;
+	if (t1_is_T1B(adapter))
+		t1_clock(adapter, t1powersave);
+
 	return 0;
 
  out_release_adapter_res:
 	t1_free_sw_modules(adapter);
  out_free_dev:
 	if (adapter) {
-		if (adapter->regs) iounmap(adapter->regs);
+		if (adapter->regs)
+			iounmap(adapter->regs);
 		for (i = bi->port_number - 1; i >= 0; --i)
-			if (adapter->port[i].dev) {
-				cxgb_proc_cleanup(adapter, proc_root_driver);
-				kfree(adapter->port[i].dev);
-			}
+			if (adapter->port[i].dev)
+				free_netdev(adapter->port[i].dev);
 	}
 	pci_release_regions(pdev);
  out_disable_pdev:
@@ -1202,6 +1237,155 @@
 	return err;
 }
 
+static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
+{
+	int data;
+	int i;
+	u32 val;
+
+	enum {
+		S_CLOCK = 1 << 3,
+		S_DATA = 1 << 4
+	};
+
+	for (i = (nbits - 1); i > -1; i--) {
+
+		udelay(50);
+
+		data = ((bitdata >> i) & 0x1);
+		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+
+		if (data)
+			val |= S_DATA;
+		else
+			val &= ~S_DATA;
+
+		udelay(50);
+
+		/* Set SCLOCK low */
+		val &= ~S_CLOCK;
+		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		udelay(50);
+
+		/* Write SCLOCK high */
+		val |= S_CLOCK;
+		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+	}
+}
+
+static int t1_clock(struct adapter *adapter, int mode)
+{
+	u32 val;
+	int M_CORE_VAL;
+	int M_MEM_VAL;
+
+	enum {
+		M_CORE_BITS = 9,
+		T_CORE_VAL = 0,
+		T_CORE_BITS = 2,
+		N_CORE_VAL = 0,
+		N_CORE_BITS = 2,
+		M_MEM_BITS = 9,
+		T_MEM_VAL = 0,
+		T_MEM_BITS = 2,
+		N_MEM_VAL = 0,
+		N_MEM_BITS = 2,
+		NP_LOAD = 1 << 17,
+		S_LOAD_MEM = 1 << 5,
+		S_LOAD_CORE = 1 << 6,
+		S_CLOCK = 1 << 3
+	};
+
+	if (!t1_is_T1B(adapter))
+		return -ENODEV;	/* Can't re-clock this chip. */
+
+	if (mode & 2) {
+		return 0;	/* show current mode. */
+	}
+
+	if ((adapter->t1powersave & 1) == (mode & 1))
+		return -EALREADY;	/* ASIC already running in mode. */
+
+	if ((mode & 1) == HCLOCK) {
+		M_CORE_VAL = 0x14;
+		M_MEM_VAL = 0x18;
+		adapter->t1powersave = HCLOCK;	/* overclock */
+	} else {
+		M_CORE_VAL = 0xe;
+		M_MEM_VAL = 0x10;
+		adapter->t1powersave = LCLOCK;	/* underclock */
+	}
+
+	/* Don't interrupt this serial stream! */
+	spin_lock(&adapter->tpi_lock);
+
+	/* Initialize for ASIC core */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= NP_LOAD;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_CORE;
+	val &= ~S_CLOCK;
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+
+	/* Serial program the ASIC clock synthesizer */
+	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
+	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
+	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
+	udelay(50);
+
+	/* Finish ASIC core */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= S_LOAD_CORE;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_CORE;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+
+	/* Initialize for memory */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= NP_LOAD;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_MEM;
+	val &= ~S_CLOCK;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+
+	/* Serial program the memory clock synthesizer */
+	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
+	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
+	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
+	udelay(50);
+
+	/* Finish memory */
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= S_LOAD_MEM;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(50);
+	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~S_LOAD_MEM;
+	udelay(50);
+	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+	spin_unlock(&adapter->tpi_lock);
+
+	return 0;
+}
+
 static inline void t1_sw_reset(struct pci_dev *pdev)
 {
 	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
@@ -1223,10 +1407,9 @@
 		t1_free_sw_modules(adapter);
 		iounmap(adapter->regs);
 		while (--i >= 0)
-			if (adapter->port[i].dev) {
-				cxgb_proc_cleanup(adapter, proc_root_driver);
-				kfree(adapter->port[i].dev);
-			}
+			if (adapter->port[i].dev)
+				free_netdev(adapter->port[i].dev);
+
 		pci_release_regions(pdev);
 		pci_disable_device(pdev);
 		pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
index 5590cb2..9ebecaa 100644
--- a/drivers/net/chelsio/elmer0.h
+++ b/drivers/net/chelsio/elmer0.h
@@ -39,6 +39,12 @@
 #ifndef _CXGB_ELMER0_H_
 #define _CXGB_ELMER0_H_
 
+/* ELMER0 flavors */
+enum {
+	ELMER0_XC2S300E_6FT256_C,
+	ELMER0_XC2S100E_6TQ144_C
+};
+
 /* ELMER0 registers */
 #define A_ELMER0_VERSION 0x100000
 #define A_ELMER0_PHY_CFG 0x100004
@@ -149,3 +155,4 @@
 #define MI1_OP_INDIRECT_READ     3
 
 #endif /* _CXGB_ELMER0_H_ */
+
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 542e5e0..4192f0f 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -81,46 +81,36 @@
 	return busy;
 }
 
-/* 1. Deassert rx_reset_core. */
-/* 2. Program TRICN_CNFG registers. */
-/* 3. Deassert rx_reset_link */
 static int tricn_init(adapter_t *adapter)
 {
-	int     i               = 0;
-	int     stat            = 0;
-	int     timeout         = 0;
-	int     is_ready        = 0;
+	int i, sme = 1;
 
-	/* 1 */
-	timeout=1000;
-	do {
-		stat = readl(adapter->regs + A_ESPI_RX_RESET);
-		is_ready = (stat & 0x4);
-		timeout--;
-		udelay(5);
-	} while (!is_ready || (timeout==0));
-	writel(0x2, adapter->regs + A_ESPI_RX_RESET);
-	if (timeout==0)
-	{
-		CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
-		t1_fatal_err(adapter);
+	if (!(readl(adapter->regs + A_ESPI_RX_RESET)  & F_RX_CLK_STATUS)) {
+		CH_ERR("%s: ESPI clock not ready\n", adapter->name);
+		return -1;
 	}
 
-	/* 2 */
-	tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
-	tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
-	tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
-	for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
-	for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
-	for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
-	for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
-	for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
-	for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
-	for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
-	for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+	writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET);
 
-	/* 3 */
-	writel(0x3, adapter->regs + A_ESPI_RX_RESET);
+	if (sme) {
+		tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
+	}
+	for (i = 1; i <= 8; i++)
+		tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
+	for (i = 1; i <= 2; i++)
+		tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
+	for (i = 1; i <= 3; i++)
+		tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+	tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1);
+	tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1);
+	tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1);
+	tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80);
+	tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1);
+
+	writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST,
+	       adapter->regs + A_ESPI_RX_RESET);
 
 	return 0;
 }
@@ -143,6 +133,7 @@
 
 void t1_espi_intr_clear(struct peespi *espi)
 {
+	readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
 	writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
 	writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
 }
@@ -157,7 +148,6 @@
 
 int t1_espi_intr_handler(struct peespi *espi)
 {
-	u32 cnt;
 	u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
 
 	if (status & F_DIP4ERR)
@@ -177,7 +167,7 @@
 		 * Must read the error count to clear the interrupt
 		 * that it causes.
 		 */
-		cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+		readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
 	}
 
 	/*
@@ -192,7 +182,7 @@
 
 const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
 {
-    return &espi->intr_cnt;
+	return &espi->intr_cnt;
 }
 
 static void espi_setup_for_pm3393(adapter_t *adapter)
@@ -210,17 +200,45 @@
 	writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
 }
 
-/* T2 Init part --  */
-/* 1. Set T_ESPI_MISCCTRL_ADDR */
-/* 2. Init ESPI registers. */
-/* 3. Init TriCN Hard Macro */
+static void espi_setup_for_vsc7321(adapter_t *adapter)
+{
+        writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+        writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+        writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+	writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+	writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+	writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+	writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG);
+
+	writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+}
+
+/*
+ * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
+ */
+static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
+{
+	writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+	if (nports == 4) {
+		if (is_T2(adapter)) {
+			writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+			writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+		} else {
+			writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+			writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+		}
+	} else {
+		writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+		writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+	}
+	writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG);
+
+}
+
 int t1_espi_init(struct peespi *espi, int mac_type, int nports)
 {
-	u32 cnt;
-
 	u32 status_enable_extra = 0;
 	adapter_t *adapter = espi->adapter;
-	u32 status, burstval = 0x800100;
 
 	/* Disable ESPI training.  MACs that can handle it enable it below. */
 	writel(0, adapter->regs + A_ESPI_TRAIN);
@@ -229,38 +247,20 @@
 		writel(V_OUT_OF_SYNC_COUNT(4) |
 		       V_DIP2_PARITY_ERR_THRES(3) |
 		       V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
-		if (nports == 4) {
-			/* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
-			burstval = 0x200040;
-		}
-	}
-	writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+        	writel(nports == 4 ? 0x200040 : 0x1000080,
+		       adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+	} else
+        	writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
 
-	switch (mac_type) {
-	case CHBT_MAC_PM3393:
+	if (mac_type == CHBT_MAC_PM3393)
 		espi_setup_for_pm3393(adapter);
-		break;
-	default:
+	else if (mac_type == CHBT_MAC_VSC7321)
+		espi_setup_for_vsc7321(adapter);
+	else if (mac_type == CHBT_MAC_IXF1010) {
+		status_enable_extra = F_INTEL1010MODE;
+		espi_setup_for_ixf1010(adapter, nports);
+	} else
 		return -1;
-	}
-
-	/*
-	 * Make sure any pending interrupts from the SPI are
-	 * Cleared before enabling the interrupt.
-	 */
-	writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
-	status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
-	if (status & F_DIP2PARITYERR) {
-		cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
-	}
-
-	/*
-	 * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
-	 * write the status as is.
-	 */
-	if (status && t1_is_T1B(espi->adapter))
-		status = 1;
-	writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
 
 	writel(status_enable_extra | F_RXSTATUSENABLE,
 	       adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
@@ -271,9 +271,11 @@
 		 * Always position the control at the 1st port egress IN
 		 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
 		 */
-		espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
-				   & ~MON_MASK) | (F_MONITORED_DIRECTION
-				   | F_MONITORED_INTERFACE);
+		espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL);
+		espi->misc_ctrl &= ~MON_MASK;
+		espi->misc_ctrl |= F_MONITORED_DIRECTION;
+		if (adapter->params.nports == 1)
+			espi->misc_ctrl |= F_MONITORED_INTERFACE;
 		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
 		spin_lock_init(&espi->lock);
 	}
@@ -299,8 +301,7 @@
 {
 	struct peespi *espi = adapter->espi;
 
-	if (!is_T2(adapter))
-		return;
+	if (!is_T2(adapter)) return;
 	spin_lock(&espi->lock);
 	espi->misc_ctrl = (val & ~MON_MASK) |
 			  (espi->misc_ctrl & MON_MASK);
@@ -310,27 +311,61 @@
 
 u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
 {
-	u32 sel;
-
 	struct peespi *espi = adapter->espi;
+	u32 sel;
 
 	if (!is_T2(adapter))
 		return 0;
+
 	sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
 	if (!wait) {
 		if (!spin_trylock(&espi->lock))
 			return 0;
-	}
-	else
+	} else
 		spin_lock(&espi->lock);
+
 	if ((sel != (espi->misc_ctrl & MON_MASK))) {
 		writel(((espi->misc_ctrl & ~MON_MASK) | sel),
 		       adapter->regs + A_ESPI_MISC_CONTROL);
 		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
 		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
-	}
-	else
+	} else
 		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
 	spin_unlock(&espi->lock);
 	return sel;
 }
+
+/*
+ * This function is for T204 only.
+ * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
+ * one shot, since there is no per port counter on the out side.
+ */
+int
+t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
+{
+        struct peespi *espi = adapter->espi;
+	u8 i, nport = (u8)adapter->params.nports;
+
+        if (!wait) {
+                if (!spin_trylock(&espi->lock))
+                        return -1;
+        } else
+                spin_lock(&espi->lock);
+
+	if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) {
+		espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
+					F_MONITORED_DIRECTION;
+                writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ 	}
+	for (i = 0 ; i < nport; i++, valp++) {
+		if (i) {
+			writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
+			       adapter->regs + A_ESPI_MISC_CONTROL);
+		}
+                *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+        }
+
+        writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+        spin_unlock(&espi->lock);
+        return 0;
+}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
index c90e37f..84f2c98 100644
--- a/drivers/net/chelsio/espi.h
+++ b/drivers/net/chelsio/espi.h
@@ -64,5 +64,6 @@
 
 void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
 u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
+int t1_espi_get_mon_t204(adapter_t *, u32 *, u8);
 
 #endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h
new file mode 100644
index 0000000..17a3c2b
--- /dev/null
+++ b/drivers/net/chelsio/fpga_defs.h
@@ -0,0 +1,232 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */
+
+/*
+ * FPGA specific definitions
+ */
+
+#ifndef __CHELSIO_FPGA_DEFS_H__
+#define __CHELSIO_FPGA_DEFS_H__
+
+#define FPGA_PCIX_ADDR_VERSION               0xA08
+#define FPGA_PCIX_ADDR_STAT                  0xA0C
+
+/* FPGA master interrupt Cause/Enable bits */
+#define FPGA_PCIX_INTERRUPT_SGE_ERROR        0x1
+#define FPGA_PCIX_INTERRUPT_SGE_DATA         0x2
+#define FPGA_PCIX_INTERRUPT_TP               0x4
+#define FPGA_PCIX_INTERRUPT_MC3              0x8
+#define FPGA_PCIX_INTERRUPT_GMAC             0x10
+#define FPGA_PCIX_INTERRUPT_PCIX             0x20
+
+/* TP interrupt register addresses */
+#define FPGA_TP_ADDR_INTERRUPT_ENABLE        0xA10
+#define FPGA_TP_ADDR_INTERRUPT_CAUSE         0xA14
+#define FPGA_TP_ADDR_VERSION                 0xA18
+
+/* TP interrupt Cause/Enable bits */
+#define FPGA_TP_INTERRUPT_MC4                0x1
+#define FPGA_TP_INTERRUPT_MC5                0x2
+
+/*
+ * PM interrupt register addresses
+ */
+#define FPGA_MC3_REG_INTRENABLE              0xA20
+#define FPGA_MC3_REG_INTRCAUSE               0xA24
+#define FPGA_MC3_REG_VERSION                 0xA28
+
+/*
+ * GMAC interrupt register addresses
+ */
+#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE      0xA30
+#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE       0xA34
+#define FPGA_GMAC_ADDR_VERSION               0xA38
+
+/* GMAC Cause/Enable bits */
+#define FPGA_GMAC_INTERRUPT_PORT0            0x1
+#define FPGA_GMAC_INTERRUPT_PORT1            0x2
+#define FPGA_GMAC_INTERRUPT_PORT2            0x4
+#define FPGA_GMAC_INTERRUPT_PORT3            0x8
+
+/* MI0 registers */
+#define A_MI0_CLK 0xb00
+
+#define S_MI0_CLK_DIV    0
+#define M_MI0_CLK_DIV    0xff
+#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV)
+#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV)
+
+#define S_MI0_CLK_CNT    8
+#define M_MI0_CLK_CNT    0xff
+#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT)
+#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT)
+
+#define A_MI0_CSR 0xb04
+
+#define S_MI0_CSR_POLL    0
+#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL)
+#define F_MI0_CSR_POLL    V_MI0_CSR_POLL(1U)
+
+#define S_MI0_PREAMBLE    1
+#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE)
+#define F_MI0_PREAMBLE    V_MI0_PREAMBLE(1U)
+
+#define S_MI0_INTR_ENABLE    2
+#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE)
+#define F_MI0_INTR_ENABLE    V_MI0_INTR_ENABLE(1U)
+
+#define S_MI0_BUSY    3
+#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY)
+#define F_MI0_BUSY    V_MI0_BUSY(1U)
+
+#define S_MI0_MDIO    4
+#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO)
+#define F_MI0_MDIO    V_MI0_MDIO(1U)
+
+#define A_MI0_ADDR 0xb08
+
+#define S_MI0_PHY_REG_ADDR    0
+#define M_MI0_PHY_REG_ADDR    0x1f
+#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR)
+#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR)
+
+#define S_MI0_PHY_ADDR    5
+#define M_MI0_PHY_ADDR    0x1f
+#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR)
+#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR)
+
+#define A_MI0_DATA_EXT 0xb0c
+#define A_MI0_DATA_INT 0xb10
+
+/* GMAC registers */
+#define A_GMAC_MACID_LO 0x28
+#define A_GMAC_MACID_HI 0x2c
+#define A_GMAC_CSR 0x30
+
+#define S_INTERFACE    0
+#define M_INTERFACE    0x3
+#define V_INTERFACE(x) ((x) << S_INTERFACE)
+#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE)
+
+#define S_MAC_TX_ENABLE    2
+#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE)
+#define F_MAC_TX_ENABLE    V_MAC_TX_ENABLE(1U)
+
+#define S_MAC_RX_ENABLE    3
+#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE)
+#define F_MAC_RX_ENABLE    V_MAC_RX_ENABLE(1U)
+
+#define S_MAC_LB_ENABLE    4
+#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE)
+#define F_MAC_LB_ENABLE    V_MAC_LB_ENABLE(1U)
+
+#define S_MAC_SPEED    5
+#define M_MAC_SPEED    0x3
+#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED)
+#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED)
+
+#define S_MAC_HD_FC_ENABLE    7
+#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE)
+#define F_MAC_HD_FC_ENABLE    V_MAC_HD_FC_ENABLE(1U)
+
+#define S_MAC_HALF_DUPLEX    8
+#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX)
+#define F_MAC_HALF_DUPLEX    V_MAC_HALF_DUPLEX(1U)
+
+#define S_MAC_PROMISC    9
+#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC)
+#define F_MAC_PROMISC    V_MAC_PROMISC(1U)
+
+#define S_MAC_MC_ENABLE    10
+#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE)
+#define F_MAC_MC_ENABLE    V_MAC_MC_ENABLE(1U)
+
+#define S_MAC_RESET    11
+#define V_MAC_RESET(x) ((x) << S_MAC_RESET)
+#define F_MAC_RESET    V_MAC_RESET(1U)
+
+#define S_MAC_RX_PAUSE_ENABLE    12
+#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE)
+#define F_MAC_RX_PAUSE_ENABLE    V_MAC_RX_PAUSE_ENABLE(1U)
+
+#define S_MAC_TX_PAUSE_ENABLE    13
+#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE)
+#define F_MAC_TX_PAUSE_ENABLE    V_MAC_TX_PAUSE_ENABLE(1U)
+
+#define S_MAC_LWM_ENABLE    14
+#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE)
+#define F_MAC_LWM_ENABLE    V_MAC_LWM_ENABLE(1U)
+
+#define S_MAC_MAGIC_PKT_ENABLE    15
+#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE)
+#define F_MAC_MAGIC_PKT_ENABLE    V_MAC_MAGIC_PKT_ENABLE(1U)
+
+#define S_MAC_ISL_ENABLE    16
+#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE)
+#define F_MAC_ISL_ENABLE    V_MAC_ISL_ENABLE(1U)
+
+#define S_MAC_JUMBO_ENABLE    17
+#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE)
+#define F_MAC_JUMBO_ENABLE    V_MAC_JUMBO_ENABLE(1U)
+
+#define S_MAC_RX_PAD_ENABLE    18
+#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE)
+#define F_MAC_RX_PAD_ENABLE    V_MAC_RX_PAD_ENABLE(1U)
+
+#define S_MAC_RX_CRC_ENABLE    19
+#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE)
+#define F_MAC_RX_CRC_ENABLE    V_MAC_RX_CRC_ENABLE(1U)
+
+#define A_GMAC_IFS 0x34
+
+#define S_MAC_IFS2    0
+#define M_MAC_IFS2    0x3f
+#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2)
+#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2)
+
+#define S_MAC_IFS1    8
+#define M_MAC_IFS1    0x7f
+#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1)
+#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1)
+
+#define A_GMAC_JUMBO_FRAME_LEN 0x38
+#define A_GMAC_LNK_DLY 0x3c
+#define A_GMAC_PAUSETIME 0x40
+#define A_GMAC_MCAST_LO 0x44
+#define A_GMAC_MCAST_HI 0x48
+#define A_GMAC_MCAST_MASK_LO 0x4c
+#define A_GMAC_MCAST_MASK_HI 0x50
+#define A_GMAC_RMT_CNT 0x54
+#define A_GMAC_RMT_DATA 0x58
+#define A_GMAC_BACKOFF_SEED 0x5c
+#define A_GMAC_TXF_THRES 0x60
+
+#define S_TXF_READ_THRESHOLD    0
+#define M_TXF_READ_THRESHOLD    0xff
+#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD)
+#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD)
+
+#define S_TXF_WRITE_THRESHOLD    16
+#define M_TXF_WRITE_THRESHOLD    0xff
+#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD)
+#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD)
+
+#define MAC_REG_BASE 0x600
+#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg))
+
+#define MAC_REG_IDLO(idx)              MAC_REG_ADDR(idx, A_GMAC_MACID_LO)
+#define MAC_REG_IDHI(idx)              MAC_REG_ADDR(idx, A_GMAC_MACID_HI)
+#define MAC_REG_CSR(idx)               MAC_REG_ADDR(idx, A_GMAC_CSR)
+#define MAC_REG_IFS(idx)               MAC_REG_ADDR(idx, A_GMAC_IFS)
+#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN)
+#define MAC_REG_LINKDLY(idx)           MAC_REG_ADDR(idx, A_GMAC_LNK_DLY)
+#define MAC_REG_PAUSETIME(idx)         MAC_REG_ADDR(idx, A_GMAC_PAUSETIME)
+#define MAC_REG_CASTLO(idx)            MAC_REG_ADDR(idx, A_GMAC_MCAST_LO)
+#define MAC_REG_MCASTHI(idx)           MAC_REG_ADDR(idx, A_GMAC_MCAST_HI)
+#define MAC_REG_CASTMASKLO(idx)        MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO)
+#define MAC_REG_MCASTMASKHI(idx)       MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI)
+#define MAC_REG_RMCNT(idx)             MAC_REG_ADDR(idx, A_GMAC_RMT_CNT)
+#define MAC_REG_RMDATA(idx)            MAC_REG_ADDR(idx, A_GMAC_RMT_DATA)
+#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED)
+#define MAC_REG_TXFTHRESHOLDS(idx)     MAC_REG_ADDR(idx, A_GMAC_TXF_THRES)
+
+#endif
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
index 746b0ee..a2b8ad9 100644
--- a/drivers/net/chelsio/gmac.h
+++ b/drivers/net/chelsio/gmac.h
@@ -62,6 +62,8 @@
 	u64 TxInternalMACXmitError;
 	u64 TxFramesWithExcessiveDeferral;
 	u64 TxFCSErrors;
+	u64 TxJumboFramesOK;
+	u64 TxJumboOctetsOK;
 
 	/* Receive */
 	u64 RxOctetsOK;
@@ -81,6 +83,8 @@
 	u64 RxInRangeLengthErrors;
 	u64 RxOutOfRangeLengthField;
 	u64 RxFrameTooLongErrors;
+	u64 RxJumboFramesOK;
+	u64 RxJumboOctetsOK;
 };
 
 struct cmac_ops {
@@ -128,6 +132,7 @@
 extern struct gmac t1_pm3393_ops;
 extern struct gmac t1_chelsio_mac_ops;
 extern struct gmac t1_vsc7321_ops;
+extern struct gmac t1_vsc7326_ops;
 extern struct gmac t1_ixf1010_ops;
 extern struct gmac t1_dummy_mac_ops;
 
diff --git a/drivers/net/chelsio/ixf1010.c b/drivers/net/chelsio/ixf1010.c
new file mode 100644
index 0000000..5b8f144
--- /dev/null
+++ b/drivers/net/chelsio/ixf1010.c
@@ -0,0 +1,485 @@
+/* $Date: 2005/11/12 02:13:49 $ $RCSfile: ixf1010.c,v $ $Revision: 1.36 $ */
+#include "gmac.h"
+#include "elmer0.h"
+
+/* Update fast changing statistics every 15 seconds */
+#define STATS_TICK_SECS 15
+/* 30 minutes for full statistics update */
+#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
+
+/*
+ * The IXF1010 can handle frames up to 16383 bytes but it's optimized for
+ * frames up to 9831 (0x2667) bytes, so we limit jumbo frame size to this.
+ * This length includes ethernet header and FCS.
+ */
+#define MAX_FRAME_SIZE 0x2667
+
+/* MAC registers */
+enum {
+	/* Per-port registers */
+	REG_MACADDR_LOW = 0,
+	REG_MACADDR_HIGH = 0x4,
+	REG_FDFC_TYPE = 0xC,
+	REG_FC_TX_TIMER_VALUE = 0x1c,
+	REG_IPG_RX_TIME1 = 0x28,
+	REG_IPG_RX_TIME2 = 0x2c,
+	REG_IPG_TX_TIME = 0x30,
+	REG_PAUSE_THRES = 0x38,
+	REG_MAX_FRAME_SIZE = 0x3c,
+	REG_RGMII_SPEED = 0x40,
+	REG_FC_ENABLE = 0x48,
+	REG_DISCARD_CTRL_FRAMES = 0x54,
+	REG_DIVERSE_CONFIG = 0x60,
+	REG_RX_FILTER = 0x64,
+	REG_MC_ADDR_LOW = 0x68,
+	REG_MC_ADDR_HIGH = 0x6c,
+
+	REG_RX_OCTETS_OK = 0x80,
+	REG_RX_OCTETS_BAD = 0x84,
+	REG_RX_UC_PKTS = 0x88,
+	REG_RX_MC_PKTS = 0x8c,
+	REG_RX_BC_PKTS = 0x90,
+	REG_RX_FCS_ERR = 0xb0,
+	REG_RX_TAGGED = 0xb4,
+	REG_RX_DATA_ERR = 0xb8,
+	REG_RX_ALIGN_ERR = 0xbc,
+	REG_RX_LONG_ERR = 0xc0,
+	REG_RX_JABBER_ERR = 0xc4,
+	REG_RX_PAUSE_FRAMES = 0xc8,
+	REG_RX_UNKNOWN_CTRL_FRAMES = 0xcc,
+	REG_RX_VERY_LONG_ERR = 0xd0,
+	REG_RX_RUNT_ERR = 0xd4,
+	REG_RX_SHORT_ERR = 0xd8,
+	REG_RX_SYMBOL_ERR = 0xe4,
+
+	REG_TX_OCTETS_OK = 0x100,
+	REG_TX_OCTETS_BAD = 0x104,
+	REG_TX_UC_PKTS = 0x108,
+	REG_TX_MC_PKTS = 0x10c,
+	REG_TX_BC_PKTS = 0x110,
+	REG_TX_EXCESSIVE_LEN_DROP = 0x14c,
+	REG_TX_UNDERRUN = 0x150,
+	REG_TX_TAGGED = 0x154,
+	REG_TX_PAUSE_FRAMES = 0x15C,
+
+	/* Global registers */
+	REG_PORT_ENABLE = 0x1400,
+
+	REG_JTAG_ID = 0x1430,
+
+	RX_FIFO_HIGH_WATERMARK_BASE = 0x1600,
+	RX_FIFO_LOW_WATERMARK_BASE = 0x1628,
+	RX_FIFO_FRAMES_REMOVED_BASE = 0x1650,
+
+	REG_RX_ERR_DROP = 0x167c,
+	REG_RX_FIFO_OVERFLOW_EVENT = 0x1680,
+
+	TX_FIFO_HIGH_WATERMARK_BASE = 0x1800,
+	TX_FIFO_LOW_WATERMARK_BASE = 0x1828,
+	TX_FIFO_XFER_THRES_BASE = 0x1850,
+
+	REG_TX_FIFO_OVERFLOW_EVENT = 0x1878,
+	REG_TX_FIFO_OOS_EVENT = 0x1884,
+
+	TX_FIFO_FRAMES_REMOVED_BASE = 0x1888,
+
+	REG_SPI_RX_BURST = 0x1c00,
+	REG_SPI_RX_TRAINING = 0x1c04,
+	REG_SPI_RX_CALENDAR = 0x1c08,
+	REG_SPI_TX_SYNC = 0x1c0c
+};
+
+enum {                     /* RMON registers */
+	REG_RxOctetsTotalOK = 0x80,
+	REG_RxOctetsBad = 0x84,
+	REG_RxUCPkts = 0x88,
+	REG_RxMCPkts = 0x8c,
+	REG_RxBCPkts = 0x90,
+	REG_RxJumboPkts = 0xac,
+	REG_RxFCSErrors = 0xb0,
+	REG_RxDataErrors = 0xb8,
+	REG_RxAlignErrors = 0xbc,
+	REG_RxLongErrors = 0xc0,
+	REG_RxJabberErrors = 0xc4,
+	REG_RxPauseMacControlCounter = 0xc8,
+	REG_RxVeryLongErrors = 0xd0,
+	REG_RxRuntErrors = 0xd4,
+	REG_RxShortErrors = 0xd8,
+	REG_RxSequenceErrors = 0xe0,
+	REG_RxSymbolErrors = 0xe4,
+
+	REG_TxOctetsTotalOK = 0x100,
+	REG_TxOctetsBad = 0x104,
+	REG_TxUCPkts = 0x108,
+	REG_TxMCPkts = 0x10c,
+	REG_TxBCPkts = 0x110,
+	REG_TxJumboPkts = 0x12C,
+	REG_TxTotalCollisions = 0x134,
+	REG_TxExcessiveLengthDrop = 0x14c,
+	REG_TxUnderrun = 0x150,
+	REG_TxCRCErrors = 0x158,
+	REG_TxPauseFrames = 0x15c
+};
+
+enum {
+	DIVERSE_CONFIG_PAD_ENABLE = 0x80,
+	DIVERSE_CONFIG_CRC_ADD = 0x40
+};
+
+#define MACREG_BASE            0
+#define MACREG(mac, mac_reg)   ((mac)->instance->mac_base + (mac_reg))
+
+struct _cmac_instance {
+	u32 mac_base;
+	u32 index;
+	u32 version;
+	u32 ticks;
+};
+
+static void disable_port(struct cmac *mac)
+{
+	u32 val;
+
+	t1_tpi_read(mac->adapter, REG_PORT_ENABLE, &val);
+	val &= ~(1 << mac->instance->index);
+	t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val);
+}
+
+#define RMON_UPDATE(mac, name, stat_name) \
+	t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
+	(mac)->stats.stat_name += val;
+
+/*
+ * Read the current values of the RMON counters and add them to the cumulative
+ * port statistics.  The HW RMON counters are cleared by this operation.
+ */
+static void port_stats_update(struct cmac *mac)
+{
+	u32 val;
+
+	/* Rx stats */
+	RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK);
+	RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad);
+	RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK);
+	RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK);
+	RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK);
+	RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK);
+	RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors);
+	RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors);
+	RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors);
+	RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors);
+	RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames);
+	RMON_UPDATE(mac, RxDataErrors, RxDataErrors);
+	RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors);
+	RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors);
+	RMON_UPDATE(mac, RxShortErrors, RxRuntErrors);
+	RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors);
+	RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
+
+	/* Tx stats (skip collision stats as we are full-duplex only) */
+	RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
+	RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad);
+	RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK);
+	RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK);
+	RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK);
+	RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK);
+	RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames);
+	RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors);
+	RMON_UPDATE(mac, TxUnderrun, TxUnderrun);
+	RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors);
+}
+
+/* No-op interrupt operation as this MAC does not support interrupts */
+static int mac_intr_op(struct cmac *mac)
+{
+	return 0;
+}
+
+/* Expect MAC address to be in network byte order. */
+static int mac_set_address(struct cmac *mac, u8 addr[6])
+{
+	u32 addr_lo, addr_hi;
+
+	addr_lo = addr[2];
+	addr_lo = (addr_lo << 8) | addr[3];
+	addr_lo = (addr_lo << 8) | addr[4];
+	addr_lo = (addr_lo << 8) | addr[5];
+
+	addr_hi = addr[0];
+	addr_hi = (addr_hi << 8) | addr[1];
+
+	t1_tpi_write(mac->adapter, MACREG(mac, REG_MACADDR_LOW), addr_lo);
+	t1_tpi_write(mac->adapter, MACREG(mac, REG_MACADDR_HIGH), addr_hi);
+	return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+	u32 addr_lo, addr_hi;
+
+	t1_tpi_read(mac->adapter, MACREG(mac, REG_MACADDR_LOW), &addr_lo);
+	t1_tpi_read(mac->adapter, MACREG(mac, REG_MACADDR_HIGH), &addr_hi);
+
+	addr[0] = (u8) (addr_hi >> 8);
+	addr[1] = (u8) addr_hi;
+	addr[2] = (u8) (addr_lo >> 24);
+	addr[3] = (u8) (addr_lo >> 16);
+	addr[4] = (u8) (addr_lo >> 8);
+	addr[5] = (u8) addr_lo;
+	return 0;
+}
+
+/* This is intended to reset a port, not the whole MAC */
+static int mac_reset(struct cmac *mac)
+{
+	return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+	u32 val, new_mode;
+	adapter_t *adapter = mac->adapter;
+	u32 addr_lo, addr_hi;
+	u8 *addr;
+
+	t1_tpi_read(adapter, MACREG(mac, REG_RX_FILTER), &val);
+	new_mode = val & ~7;
+	if (!t1_rx_mode_promisc(rm) && mac->instance->version > 0)
+		new_mode |= 1;     /* only set if version > 0 due to erratum */
+	if (!t1_rx_mode_promisc(rm) && !t1_rx_mode_allmulti(rm)
+	     && t1_rx_mode_mc_cnt(rm) <= 1)
+		new_mode |= 2;
+	if (new_mode != val)
+		t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), new_mode);
+	switch (t1_rx_mode_mc_cnt(rm)) {
+	case 0:
+		t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_LOW), 0);
+		t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_HIGH), 0);
+		break;
+	case 1:
+		addr = t1_get_next_mcaddr(rm);
+		addr_lo = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+			addr[5];
+		addr_hi = (addr[0] << 8) | addr[1];
+		t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_LOW), addr_lo);
+		t1_tpi_write(adapter, MACREG(mac, REG_MC_ADDR_HIGH), addr_hi);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+	/* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */
+	if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL;
+	t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE),
+		     mtu + 14 + 4);
+	return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+				   int fc)
+{
+	u32 val;
+
+	if (speed >= 0 && speed != SPEED_100 && speed != SPEED_1000)
+		return -1;
+	if (duplex >= 0 && duplex != DUPLEX_FULL)
+		return -1;
+
+	if (speed >= 0) {
+		val = speed == SPEED_100 ? 1 : 2;
+		t1_tpi_write(mac->adapter, MACREG(mac, REG_RGMII_SPEED), val);
+	}
+
+	t1_tpi_read(mac->adapter, MACREG(mac, REG_FC_ENABLE), &val);
+	val &= ~3;
+	if (fc & PAUSE_RX)
+		val |= 1;
+	if (fc & PAUSE_TX)
+		val |= 2;
+	t1_tpi_write(mac->adapter, MACREG(mac, REG_FC_ENABLE), val);
+	return 0;
+}
+
+static int mac_get_speed_duplex_fc(struct cmac *mac, int *speed, int *duplex,
+				   int *fc)
+{
+	u32 val;
+
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	if (speed) {
+		t1_tpi_read(mac->adapter, MACREG(mac, REG_RGMII_SPEED),
+			 &val);
+		*speed = (val & 2) ? SPEED_1000 : SPEED_100;
+	}
+	if (fc) {
+		t1_tpi_read(mac->adapter, MACREG(mac, REG_FC_ENABLE), &val);
+		*fc = 0;
+		if (val & 1)
+			*fc |= PAUSE_RX;
+		if (val & 2)
+			*fc |= PAUSE_TX;
+	}
+	return 0;
+}
+
+static void enable_port(struct cmac *mac)
+{
+	u32 val;
+	u32 index = mac->instance->index;
+	adapter_t *adapter = mac->adapter;
+
+	t1_tpi_read(adapter, MACREG(mac, REG_DIVERSE_CONFIG), &val);
+	val |= DIVERSE_CONFIG_CRC_ADD | DIVERSE_CONFIG_PAD_ENABLE;
+	t1_tpi_write(adapter, MACREG(mac, REG_DIVERSE_CONFIG), val);
+	if (mac->instance->version > 0)
+		t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), 3);
+	else /* Don't enable unicast address filtering due to IXF1010 bug */
+		t1_tpi_write(adapter, MACREG(mac, REG_RX_FILTER), 2);
+
+	t1_tpi_read(adapter, REG_RX_ERR_DROP, &val);
+	val |= (1 << index);
+	t1_tpi_write(adapter, REG_RX_ERR_DROP, val);
+
+	/*
+	 * Clear the port RMON registers by adding their current values to the
+	 * cumulatice port stats and then clearing the stats.  Really.
+	 */
+	port_stats_update(mac);
+	memset(&mac->stats, 0, sizeof(struct cmac_statistics));
+	mac->instance->ticks = 0;
+
+	t1_tpi_read(adapter, REG_PORT_ENABLE, &val);
+	val |= (1 << index);
+	t1_tpi_write(adapter, REG_PORT_ENABLE, val);
+
+       	index <<= 2;
+        if (is_T2(adapter)) {
+		/* T204: set the Fifo water level & threshold */
+		t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740);
+		t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730);
+		t1_tpi_write(adapter, TX_FIFO_HIGH_WATERMARK_BASE + index, 0x600);
+		t1_tpi_write(adapter, TX_FIFO_LOW_WATERMARK_BASE + index, 0x1d0);
+		t1_tpi_write(adapter, TX_FIFO_XFER_THRES_BASE + index, 0x1100);
+	} else {
+	/*
+	 * Set the TX Fifo Threshold to 0x400 instead of 0x100 to work around
+	 * Underrun problem. Intel has blessed this solution.
+	 */
+		t1_tpi_write(adapter, TX_FIFO_XFER_THRES_BASE + index, 0x400);
+	}
+}
+
+/* IXF1010 ports do not have separate enables for TX and RX */
+static int mac_enable(struct cmac *mac, int which)
+{
+	if (which & (MAC_DIRECTION_RX | MAC_DIRECTION_TX))
+		enable_port(mac);
+	return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+	if (which & (MAC_DIRECTION_RX | MAC_DIRECTION_TX))
+		disable_port(mac);
+	return 0;
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics.  Since the counters are only 32 bits
+ * some of them can overflow in less than a minute at GigE speeds, so this
+ * function should be called every 30 seconds or so.
+ *
+ * To cut down on reading costs we update only the octet counters at each tick
+ * and do a full update at major ticks, which can be every 30 minutes or more.
+ */
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+							   int flag)
+{
+	if (flag == MAC_STATS_UPDATE_FULL ||
+	    MAJOR_UPDATE_TICKS <= mac->instance->ticks) {
+		port_stats_update(mac);
+		mac->instance->ticks = 0;
+	} else {
+		u32 val;
+
+		RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK);
+		RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
+		mac->instance->ticks++;
+	}
+	return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+	kfree(mac);
+}
+
+static struct cmac_ops ixf1010_ops = {
+	.destroy                  = mac_destroy,
+	.reset                    = mac_reset,
+	.interrupt_enable         = mac_intr_op,
+	.interrupt_disable        = mac_intr_op,
+	.interrupt_clear          = mac_intr_op,
+	.enable                   = mac_enable,
+	.disable                  = mac_disable,
+	.set_mtu                  = mac_set_mtu,
+	.set_rx_mode              = mac_set_rx_mode,
+	.set_speed_duplex_fc      = mac_set_speed_duplex_fc,
+	.get_speed_duplex_fc      = mac_get_speed_duplex_fc,
+	.statistics_update        = mac_update_statistics,
+	.macaddress_get           = mac_get_address,
+	.macaddress_set           = mac_set_address,
+};
+
+static int ixf1010_mac_reset(adapter_t *adapter)
+{
+	u32 val;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	if ((val & 1) != 0) {
+		val &= ~1;
+		t1_tpi_write(adapter, A_ELMER0_GPO, val);
+		udelay(2);
+	}
+	val |= 1;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(2);
+
+	t1_tpi_write(adapter, REG_PORT_ENABLE, 0);
+	return 0;
+}
+
+static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index)
+{
+	struct cmac *mac;
+	u32 val;
+
+	if (index > 9) return NULL;
+
+	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+	if (!mac) return NULL;
+
+	mac->ops = &ixf1010_ops;
+	mac->instance = (cmac_instance *)(mac + 1);
+
+	mac->instance->mac_base = MACREG_BASE + (index * 0x200);
+	mac->instance->index    = index;
+	mac->adapter  = adapter;
+	mac->instance->ticks    = 0;
+
+	t1_tpi_read(adapter, REG_JTAG_ID, &val);
+	mac->instance->version = val >> 28;
+	return mac;
+}
+
+struct gmac t1_ixf1010_ops = {
+	STATS_TICK_SECS,
+	ixf1010_mac_create,
+	ixf1010_mac_reset
+};
diff --git a/drivers/net/chelsio/mac.c b/drivers/net/chelsio/mac.c
new file mode 100644
index 0000000..6af39dc
--- /dev/null
+++ b/drivers/net/chelsio/mac.c
@@ -0,0 +1,368 @@
+/* $Date: 2005/10/22 00:42:59 $ $RCSfile: mac.c,v $ $Revision: 1.32 $ */
+#include "gmac.h"
+#include "regs.h"
+#include "fpga_defs.h"
+
+#define MAC_CSR_INTERFACE_GMII      0x0
+#define MAC_CSR_INTERFACE_TBI       0x1
+#define MAC_CSR_INTERFACE_MII       0x2
+#define MAC_CSR_INTERFACE_RMII      0x3
+
+/* Chelsio's MAC statistics. */
+struct mac_statistics {
+
+	/* Transmit */
+	u32 TxFramesTransmittedOK;
+	u32 TxReserved1;
+	u32 TxReserved2;
+	u32 TxOctetsTransmittedOK;
+	u32 TxFramesWithDeferredXmissions;
+	u32 TxLateCollisions;
+	u32 TxFramesAbortedDueToXSCollisions;
+	u32 TxFramesLostDueToIntMACXmitError;
+	u32 TxReserved3;
+	u32 TxMulticastFrameXmittedOK;
+	u32 TxBroadcastFramesXmittedOK;
+	u32 TxFramesWithExcessiveDeferral;
+	u32 TxPAUSEMACCtrlFramesTransmitted;
+
+	/* Receive */
+	u32 RxFramesReceivedOK;
+	u32 RxFrameCheckSequenceErrors;
+	u32 RxAlignmentErrors;
+	u32 RxOctetsReceivedOK;
+	u32 RxFramesLostDueToIntMACRcvError;
+	u32 RxMulticastFramesReceivedOK;
+	u32 RxBroadcastFramesReceivedOK;
+	u32 RxInRangeLengthErrors;
+	u32 RxTxOutOfRangeLengthField;
+	u32 RxFrameTooLongErrors;
+	u32 RxPAUSEMACCtrlFramesReceived;
+};
+
+static int static_aPorts[] = {
+	FPGA_GMAC_INTERRUPT_PORT0,
+	FPGA_GMAC_INTERRUPT_PORT1,
+	FPGA_GMAC_INTERRUPT_PORT2,
+	FPGA_GMAC_INTERRUPT_PORT3
+};
+
+struct _cmac_instance {
+	u32 index;
+};
+
+static int mac_intr_enable(struct cmac *mac)
+{
+	u32 mac_intr;
+
+	if (t1_is_asic(mac->adapter)) {
+		/* ASIC */
+
+		/* We don't use the on chip MAC for ASIC products. */
+	} else {
+		/* FPGA */
+
+		/* Set parent gmac interrupt. */
+		mac_intr = readl(mac->adapter->regs + A_PL_ENABLE);
+		mac_intr |= FPGA_PCIX_INTERRUPT_GMAC;
+		writel(mac_intr, mac->adapter->regs + A_PL_ENABLE);
+
+		mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+		mac_intr |= static_aPorts[mac->instance->index];
+		writel(mac_intr,
+		       mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+	}
+
+	return 0;
+}
+
+static int mac_intr_disable(struct cmac *mac)
+{
+	u32 mac_intr;
+
+	if (t1_is_asic(mac->adapter)) {
+		/* ASIC */
+
+		/* We don't use the on chip MAC for ASIC products. */
+	} else {
+		/* FPGA */
+
+		/* Set parent gmac interrupt. */
+		mac_intr = readl(mac->adapter->regs + A_PL_ENABLE);
+		mac_intr &= ~FPGA_PCIX_INTERRUPT_GMAC;
+		writel(mac_intr, mac->adapter->regs + A_PL_ENABLE);
+
+		mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+		mac_intr &= ~(static_aPorts[mac->instance->index]);
+		writel(mac_intr,
+		       mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE);
+	}
+
+	return 0;
+}
+
+static int mac_intr_clear(struct cmac *mac)
+{
+	u32 mac_intr;
+
+	if (t1_is_asic(mac->adapter)) {
+		/* ASIC */
+
+		/* We don't use the on chip MAC for ASIC products. */
+	} else {
+		/* FPGA */
+
+		/* Set parent gmac interrupt. */
+		writel(FPGA_PCIX_INTERRUPT_GMAC,
+		       mac->adapter->regs +  A_PL_CAUSE);
+		mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+		mac_intr |= (static_aPorts[mac->instance->index]);
+		writel(mac_intr,
+		       mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+	}
+
+	return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+	u32 data32_lo, data32_hi;
+
+	data32_lo = readl(mac->adapter->regs
+			  + MAC_REG_IDLO(mac->instance->index));
+	data32_hi = readl(mac->adapter->regs
+			  + MAC_REG_IDHI(mac->instance->index));
+
+	addr[0] = (u8) ((data32_hi >> 8) & 0xFF);
+	addr[1] = (u8) ((data32_hi) & 0xFF);
+	addr[2] = (u8) ((data32_lo >> 24) & 0xFF);
+	addr[3] = (u8) ((data32_lo >> 16) & 0xFF);
+	addr[4] = (u8) ((data32_lo >> 8) & 0xFF);
+	addr[5] = (u8) ((data32_lo) & 0xFF);
+	return 0;
+}
+
+static int mac_reset(struct cmac *mac)
+{
+	u32 data32;
+	int mac_in_reset, time_out = 100;
+	int idx = mac->instance->index;
+
+	data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx));
+	writel(data32 | F_MAC_RESET,
+	       mac->adapter->regs + MAC_REG_CSR(idx));
+
+	do {
+		data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx));
+
+		mac_in_reset = data32 & F_MAC_RESET;
+		if (mac_in_reset)
+			udelay(1);
+	} while (mac_in_reset && --time_out);
+
+	if (mac_in_reset) {
+		CH_ERR("%s: MAC %d reset timed out\n",
+		       mac->adapter->name, idx);
+		return 2;
+	}
+
+	return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+	u32 val;
+
+	val = readl(mac->adapter->regs
+			    + MAC_REG_CSR(mac->instance->index));
+	val &= ~(F_MAC_PROMISC | F_MAC_MC_ENABLE);
+	val |= V_MAC_PROMISC(t1_rx_mode_promisc(rm) != 0);
+	val |= V_MAC_MC_ENABLE(t1_rx_mode_allmulti(rm) != 0);
+	writel(val,
+	       mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+
+	return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+				   int fc)
+{
+	u32 data32;
+
+	data32 = readl(mac->adapter->regs
+			       + MAC_REG_CSR(mac->instance->index));
+	data32 &= ~(F_MAC_HALF_DUPLEX | V_MAC_SPEED(M_MAC_SPEED) |
+		V_INTERFACE(M_INTERFACE) | F_MAC_TX_PAUSE_ENABLE |
+		F_MAC_RX_PAUSE_ENABLE);
+
+	switch (speed) {
+	case SPEED_10:
+	case SPEED_100:
+		data32 |= V_INTERFACE(MAC_CSR_INTERFACE_MII);
+		data32 |= V_MAC_SPEED(speed == SPEED_10 ? 0 : 1);
+		break;
+	case SPEED_1000:
+		data32 |= V_INTERFACE(MAC_CSR_INTERFACE_GMII);
+		data32 |= V_MAC_SPEED(2);
+		break;
+	}
+
+	if (duplex >= 0)
+		data32 |= V_MAC_HALF_DUPLEX(duplex == DUPLEX_HALF);
+
+	if (fc >= 0) {
+		data32 |= V_MAC_RX_PAUSE_ENABLE((fc & PAUSE_RX) != 0);
+		data32 |= V_MAC_TX_PAUSE_ENABLE((fc & PAUSE_TX) != 0);
+	}
+
+	writel(data32,
+	       mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+	return 0;
+}
+
+static int mac_enable(struct cmac *mac, int which)
+{
+	u32 val;
+
+	val = readl(mac->adapter->regs
+			    + MAC_REG_CSR(mac->instance->index));
+	if (which & MAC_DIRECTION_RX)
+		val |= F_MAC_RX_ENABLE;
+	if (which & MAC_DIRECTION_TX)
+		val |= F_MAC_TX_ENABLE;
+	writel(val,
+	       mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+	return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+	u32 val;
+
+	val = readl(mac->adapter->regs
+			    + MAC_REG_CSR(mac->instance->index));
+	if (which & MAC_DIRECTION_RX)
+		val &= ~F_MAC_RX_ENABLE;
+	if (which & MAC_DIRECTION_TX)
+		val &= ~F_MAC_TX_ENABLE;
+	writel(val,
+	       mac->adapter->regs + MAC_REG_CSR(mac->instance->index));
+	return 0;
+}
+
+#if 0
+static int mac_set_ifs(struct cmac *mac, u32 mode)
+{
+	t1_write_reg_4(mac->adapter,
+		       MAC_REG_IFS(mac->instance->index),
+		       mode);
+	return 0;
+}
+
+static int mac_enable_isl(struct cmac *mac)
+{
+	u32 data32 = readl(mac->adapter->regs
+				   + MAC_REG_CSR(mac->instance->index));
+	data32 |= F_MAC_RX_ENABLE | F_MAC_TX_ENABLE;
+	t1_write_reg_4(mac->adapter,
+		       MAC_REG_CSR(mac->instance->index),
+		       data32);
+	return 0;
+}
+#endif
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+	if (mtu > 9600)
+		return -EINVAL;
+	writel(mtu + ETH_HLEN + VLAN_HLEN,
+	       mac->adapter->regs + MAC_REG_LARGEFRAMELENGTH(mac->instance->index));
+
+	return 0;
+}
+
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+							   int flag)
+{
+	struct mac_statistics st;
+	u32 *p = (u32 *) & st, i;
+
+	writel(0,
+	       mac->adapter->regs + MAC_REG_RMCNT(mac->instance->index));
+
+	for (i = 0; i < sizeof(st) / sizeof(u32); i++)
+		*p++ = readl(mac->adapter->regs
+			     + MAC_REG_RMDATA(mac->instance->index));
+
+	/* XXX convert stats */
+	return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+	kfree(mac);
+}
+
+static struct cmac_ops chelsio_mac_ops = {
+	.destroy                 = mac_destroy,
+	.reset                   = mac_reset,
+	.interrupt_enable        = mac_intr_enable,
+	.interrupt_disable       = mac_intr_disable,
+	.interrupt_clear         = mac_intr_clear,
+	.enable                  = mac_enable,
+	.disable                 = mac_disable,
+	.set_mtu                 = mac_set_mtu,
+	.set_rx_mode             = mac_set_rx_mode,
+	.set_speed_duplex_fc     = mac_set_speed_duplex_fc,
+	.macaddress_get          = mac_get_address,
+	.statistics_update       = mac_update_statistics,
+};
+
+static struct cmac *mac_create(adapter_t *adapter, int index)
+{
+	struct cmac *mac;
+	u32 data32;
+
+	if (index >= 4)
+		return NULL;
+
+	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+	if (!mac)
+		return NULL;
+
+	mac->ops = &chelsio_mac_ops;
+	mac->instance = (cmac_instance *) (mac + 1);
+
+	mac->instance->index = index;
+	mac->adapter = adapter;
+
+	data32 = readl(adapter->regs + MAC_REG_CSR(mac->instance->index));
+	data32 &= ~(F_MAC_RESET | F_MAC_PROMISC | F_MAC_PROMISC |
+		    F_MAC_LB_ENABLE | F_MAC_RX_ENABLE | F_MAC_TX_ENABLE);
+	data32 |= F_MAC_JUMBO_ENABLE;
+	writel(data32, adapter->regs + MAC_REG_CSR(mac->instance->index));
+
+	/* Initialize the random backoff seed. */
+	data32 = 0x55aa + (3 * index);
+	writel(data32,
+	       adapter->regs + MAC_REG_GMRANDBACKOFFSEED(mac->instance->index));
+
+	/* Check to see if the mac address needs to be set manually. */
+	data32 = readl(adapter->regs + MAC_REG_IDLO(mac->instance->index));
+	if (data32 == 0 || data32 == 0xffffffff) {
+		/*
+		 * Add a default MAC address if we can't read one.
+		 */
+		writel(0x43FFFFFF - index,
+		       adapter->regs + MAC_REG_IDLO(mac->instance->index));
+		writel(0x0007,
+		       adapter->regs + MAC_REG_IDHI(mac->instance->index));
+	}
+
+	(void) mac_set_mtu(mac, 1500);
+	return mac;
+}
+
+struct gmac t1_chelsio_mac_ops = {
+	.create = mac_create
+};
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c
new file mode 100644
index 0000000..28ac93f
--- /dev/null
+++ b/drivers/net/chelsio/mv88e1xxx.c
@@ -0,0 +1,397 @@
+/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */
+#include "common.h"
+#include "mv88e1xxx.h"
+#include "cphy.h"
+#include "elmer0.h"
+
+/* MV88E1XXX MDI crossover register values */
+#define CROSSOVER_MDI   0
+#define CROSSOVER_MDIX  1
+#define CROSSOVER_AUTO  3
+
+#define INTR_ENABLE_MASK 0x6CA0
+
+/*
+ * Set the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+	u32 val;
+
+	(void) simple_mdio_read(cphy, reg, &val);
+	(void) simple_mdio_write(cphy, reg, val | bitval);
+}
+
+/*
+ * Clear the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+	u32 val;
+
+	(void) simple_mdio_read(cphy, reg, &val);
+	(void) simple_mdio_write(cphy, reg, val & ~bitval);
+}
+
+/*
+ * NAME:   phy_reset
+ *
+ * DESC:   Reset the given PHY's port. NOTE: This is not a global
+ *         chip reset.
+ *
+ * PARAMS: cphy     - Pointer to PHY instance data.
+ *
+ * RETURN:  0 - Successfull reset.
+ *         -1 - Timeout.
+ */
+static int mv88e1xxx_reset(struct cphy *cphy, int wait)
+{
+	u32 ctl;
+	int time_out = 1000;
+
+	mdio_set_bit(cphy, MII_BMCR, BMCR_RESET);
+
+	do {
+		(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+		ctl &= BMCR_RESET;
+		if (ctl)
+			udelay(1);
+	} while (ctl && --time_out);
+
+	return ctl ? -1 : 0;
+}
+
+static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
+{
+	/* Enable PHY interrupts. */
+	(void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER,
+		   INTR_ENABLE_MASK);
+
+	/* Enable Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer |= ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter)) {
+		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+                }
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+	return 0;
+}
+
+static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
+{
+	/* Disable all phy interrupts. */
+	(void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0);
+
+	/* Disable Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer &= ~ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter)) {
+		    elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
+                }
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+	return 0;
+}
+
+static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
+{
+	u32 elmer;
+
+	/* Clear PHY interrupts by reading the register. */
+	(void) simple_mdio_read(cphy,
+			MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer);
+
+	/* Clear Marvell interrupts through Elmer0. */
+	if (t1_is_asic(cphy->adapter)) {
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+		elmer |= ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter)) {
+		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+                }
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	}
+	return 0;
+}
+
+/*
+ * Set the PHY speed and duplex.  This also disables auto-negotiation, except
+ * for 1Gb/s, where auto-negotiation is mandatory.
+ */
+static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+	u32 ctl;
+
+	(void) simple_mdio_read(phy, MII_BMCR, &ctl);
+	if (speed >= 0) {
+		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+		if (speed == SPEED_100)
+			ctl |= BMCR_SPEED100;
+		else if (speed == SPEED_1000)
+			ctl |= BMCR_SPEED1000;
+	}
+	if (duplex >= 0) {
+		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+		if (duplex == DUPLEX_FULL)
+			ctl |= BMCR_FULLDPLX;
+	}
+	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for 1Gb/s */
+		ctl |= BMCR_ANENABLE;
+	(void) simple_mdio_write(phy, MII_BMCR, ctl);
+	return 0;
+}
+
+static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
+{
+	u32 data32;
+
+	(void) simple_mdio_read(cphy,
+			MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32);
+	data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE);
+	data32 |= V_PSCR_MDI_XOVER_MODE(crossover);
+	(void) simple_mdio_write(cphy,
+			MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32);
+	return 0;
+}
+
+static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
+{
+	u32 ctl;
+
+	(void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
+
+	(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+	/* restart autoneg for change to take effect */
+	ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
+	(void) simple_mdio_write(cphy, MII_BMCR, ctl);
+	return 0;
+}
+
+static int mv88e1xxx_autoneg_disable(struct cphy *cphy)
+{
+	u32 ctl;
+
+	/*
+	 * Crossover *must* be set to manual in order to disable auto-neg.
+	 * The Alaska FAQs document highlights this point.
+	 */
+	(void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI);
+
+	/*
+	 * Must include autoneg reset when disabling auto-neg. This
+	 * is described in the Alaska FAQ document.
+	 */
+	(void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+	ctl &= ~BMCR_ANENABLE;
+	(void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART);
+	return 0;
+}
+
+static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
+{
+	mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART);
+	return 0;
+}
+
+static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+	u32 val = 0;
+
+	if (advertise_map &
+	    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
+		(void) simple_mdio_read(phy, MII_GBCR, &val);
+		val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL);
+		if (advertise_map & ADVERTISED_1000baseT_Half)
+			val |= GBCR_ADV_1000HALF;
+		if (advertise_map & ADVERTISED_1000baseT_Full)
+			val |= GBCR_ADV_1000FULL;
+	}
+	(void) simple_mdio_write(phy, MII_GBCR, val);
+
+	val = 1;
+	if (advertise_map & ADVERTISED_10baseT_Half)
+		val |= ADVERTISE_10HALF;
+	if (advertise_map & ADVERTISED_10baseT_Full)
+		val |= ADVERTISE_10FULL;
+	if (advertise_map & ADVERTISED_100baseT_Half)
+		val |= ADVERTISE_100HALF;
+	if (advertise_map & ADVERTISED_100baseT_Full)
+		val |= ADVERTISE_100FULL;
+	if (advertise_map & ADVERTISED_PAUSE)
+		val |= ADVERTISE_PAUSE;
+	if (advertise_map & ADVERTISED_ASYM_PAUSE)
+		val |= ADVERTISE_PAUSE_ASYM;
+	(void) simple_mdio_write(phy, MII_ADVERTISE, val);
+	return 0;
+}
+
+static int mv88e1xxx_set_loopback(struct cphy *cphy, int on)
+{
+	if (on)
+		mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+	else
+		mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+	return 0;
+}
+
+static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
+				     int *speed, int *duplex, int *fc)
+{
+	u32 status;
+	int sp = -1, dplx = -1, pause = 0;
+
+	(void) simple_mdio_read(cphy,
+			MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+	if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
+		if (status & V_PSSR_RX_PAUSE)
+			pause |= PAUSE_RX;
+		if (status & V_PSSR_TX_PAUSE)
+			pause |= PAUSE_TX;
+		dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+		sp = G_PSSR_SPEED(status);
+		if (sp == 0)
+			sp = SPEED_10;
+		else if (sp == 1)
+			sp = SPEED_100;
+		else
+			sp = SPEED_1000;
+	}
+	if (link_ok)
+		*link_ok = (status & V_PSSR_LINK) != 0;
+	if (speed)
+		*speed = sp;
+	if (duplex)
+		*duplex = dplx;
+	if (fc)
+		*fc = pause;
+	return 0;
+}
+
+static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
+{
+	u32 val;
+
+	(void) simple_mdio_read(cphy,
+		MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val);
+
+	/*
+	 * Set the downshift counter to 2 so we try to establish Gb link
+	 * twice before downshifting.
+	 */
+	val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT));
+
+	if (downshift_enable)
+		val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2);
+	(void) simple_mdio_write(cphy,
+			MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val);
+	return 0;
+}
+
+static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
+{
+	int cphy_cause = 0;
+	u32 status;
+
+	/*
+	 * Loop until cause reads zero. Need to handle bouncing interrupts.
+         */
+	while (1) {
+		u32 cause;
+
+		(void) simple_mdio_read(cphy,
+				MV88E1XXX_INTERRUPT_STATUS_REGISTER,
+				&cause);
+		cause &= INTR_ENABLE_MASK;
+		if (!cause) break;
+
+		if (cause & MV88E1XXX_INTR_LINK_CHNG) {
+			(void) simple_mdio_read(cphy,
+				MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+
+			if (status & MV88E1XXX_INTR_LINK_CHNG) {
+				cphy->state |= PHY_LINK_UP;
+			} else {
+				cphy->state &= ~PHY_LINK_UP;
+				if (cphy->state & PHY_AUTONEG_EN)
+					cphy->state &= ~PHY_AUTONEG_RDY;
+				cphy_cause |= cphy_cause_link_change;
+			}
+		}
+
+		if (cause & MV88E1XXX_INTR_AUTONEG_DONE)
+			cphy->state |= PHY_AUTONEG_RDY;
+
+		if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) ==
+			(PHY_LINK_UP | PHY_AUTONEG_RDY))
+				cphy_cause |= cphy_cause_link_change;
+	}
+	return cphy_cause;
+}
+
+static void mv88e1xxx_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops mv88e1xxx_ops = {
+	.destroy              = mv88e1xxx_destroy,
+	.reset                = mv88e1xxx_reset,
+	.interrupt_enable     = mv88e1xxx_interrupt_enable,
+	.interrupt_disable    = mv88e1xxx_interrupt_disable,
+	.interrupt_clear      = mv88e1xxx_interrupt_clear,
+	.interrupt_handler    = mv88e1xxx_interrupt_handler,
+	.autoneg_enable       = mv88e1xxx_autoneg_enable,
+	.autoneg_disable      = mv88e1xxx_autoneg_disable,
+	.autoneg_restart      = mv88e1xxx_autoneg_restart,
+	.advertise            = mv88e1xxx_advertise,
+	.set_loopback         = mv88e1xxx_set_loopback,
+	.set_speed_duplex     = mv88e1xxx_set_speed_duplex,
+	.get_link_status      = mv88e1xxx_get_link_status,
+};
+
+static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
+					 struct mdio_ops *mdio_ops)
+{
+	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+	if (!cphy) return NULL;
+
+	cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
+
+	/* Configure particular PHY's to run in a different mode. */
+	if ((board_info(adapter)->caps & SUPPORTED_TP) &&
+	    board_info(adapter)->chip_phy == CHBT_PHY_88E1111) {
+		/*
+		 * Configure the PHY transmitter as class A to reduce EMI.
+		 */
+		(void) simple_mdio_write(cphy,
+				MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB);
+		(void) simple_mdio_write(cphy,
+				MV88E1XXX_EXTENDED_REGISTER, 0x8004);
+	}
+	(void) mv88e1xxx_downshift_set(cphy, 1);   /* Enable downshift */
+
+        /* LED */
+	if (is_T2(adapter)) {
+		(void) simple_mdio_write(cphy,
+				MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
+        }
+
+	return cphy;
+}
+
+static int mv88e1xxx_phy_reset(adapter_t* adapter)
+{
+	return 0;
+}
+
+struct gphy t1_mv88e1xxx_ops = {
+	mv88e1xxx_phy_create,
+	mv88e1xxx_phy_reset
+};
diff --git a/drivers/net/chelsio/mv88e1xxx.h b/drivers/net/chelsio/mv88e1xxx.h
new file mode 100644
index 0000000..967cc42
--- /dev/null
+++ b/drivers/net/chelsio/mv88e1xxx.h
@@ -0,0 +1,127 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */
+#ifndef CHELSIO_MV8E1XXX_H
+#define CHELSIO_MV8E1XXX_H
+
+#ifndef BMCR_SPEED1000
+# define BMCR_SPEED1000 0x40
+#endif
+
+#ifndef ADVERTISE_PAUSE
+# define ADVERTISE_PAUSE 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#define MII_GBCR 9       /* 1000Base-T control register */
+#define MII_GBSR 10      /* 1000Base-T status register */
+
+/* 1000Base-T control register fields */
+#define GBCR_ADV_1000HALF         0x100
+#define GBCR_ADV_1000FULL         0x200
+#define GBCR_PREFER_MASTER        0x400
+#define GBCR_MANUAL_AS_MASTER     0x800
+#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
+
+/* 1000Base-T status register fields */
+#define GBSR_LP_1000HALF  0x400
+#define GBSR_LP_1000FULL  0x800
+#define GBSR_REMOTE_OK    0x1000
+#define GBSR_LOCAL_OK     0x2000
+#define GBSR_LOCAL_MASTER 0x4000
+#define GBSR_MASTER_FAULT 0x8000
+
+/* Marvell PHY interrupt status bits. */
+#define MV88E1XXX_INTR_JABBER          0x0001
+#define MV88E1XXX_INTR_POLARITY_CHNG   0x0002
+#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010
+#define MV88E1XXX_INTR_DOWNSHIFT       0x0020
+#define MV88E1XXX_INTR_MDI_XOVER_CHNG  0x0040
+#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080
+#define MV88E1XXX_INTR_FALSE_CARRIER   0x0100
+#define MV88E1XXX_INTR_SYMBOL_ERROR    0x0200
+#define MV88E1XXX_INTR_LINK_CHNG       0x0400
+#define MV88E1XXX_INTR_AUTONEG_DONE    0x0800
+#define MV88E1XXX_INTR_PAGE_RECV       0x1000
+#define MV88E1XXX_INTR_DUPLEX_CHNG     0x2000
+#define MV88E1XXX_INTR_SPEED_CHNG      0x4000
+#define MV88E1XXX_INTR_AUTONEG_ERR     0x8000
+
+/* Marvell PHY specific registers. */
+#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER               16
+#define MV88E1XXX_SPECIFIC_STATUS_REGISTER              17
+#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER             18
+#define MV88E1XXX_INTERRUPT_STATUS_REGISTER             19
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER       20
+#define MV88E1XXX_RECV_ERR_CNTR_REGISTER                21
+#define MV88E1XXX_RES_REGISTER                          22
+#define MV88E1XXX_GLOBAL_STATUS_REGISTER                23
+#define MV88E1XXX_LED_CONTROL_REGISTER                  24
+#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER          25
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER     26
+#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER      27
+#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER         28
+#define MV88E1XXX_EXTENDED_ADDR_REGISTER                29
+#define MV88E1XXX_EXTENDED_REGISTER                     30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE    5
+#define M_PSCR_MDI_XOVER_MODE    0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT    9
+#define M_DOWNSHIFT_CNT    0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN    7
+#define M_PSSR_CABLE_LEN    0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+#define S_PSSR_LINK 10
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+#define S_PSSR_STATUS_RESOLVED 11
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+#define S_PSSR_DUPLEX 13
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+#define S_PSSR_SPEED    14
+#define M_PSSR_SPEED    0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+#endif
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
index db503428..c8e8948 100644
--- a/drivers/net/chelsio/mv88x201x.c
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -85,29 +85,33 @@
 
 static int mv88x201x_interrupt_enable(struct cphy *cphy)
 {
-	u32 elmer;
-
 	/* Enable PHY LASI interrupts. */
 	mdio_write(cphy, 0x1, 0x9002, 0x1);
 
 	/* Enable Marvell interrupts through Elmer0. */
-	t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
-	elmer |= ELMER0_GP_BIT6;
-	t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer |= ELMER0_GP_BIT6;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
 	return 0;
 }
 
 static int mv88x201x_interrupt_disable(struct cphy *cphy)
 {
-	u32 elmer;
-
 	/* Disable PHY LASI interrupts. */
 	mdio_write(cphy, 0x1, 0x9002, 0x0);
 
 	/* Disable Marvell interrupts through Elmer0. */
-	t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
-	elmer &= ~ELMER0_GP_BIT6;
-	t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer &= ~ELMER0_GP_BIT6;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
 	return 0;
 }
 
@@ -140,9 +144,11 @@
 #endif
 
 	/* Clear Marvell interrupts through Elmer0. */
-	t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
-	elmer |= ELMER0_GP_BIT6;
-	t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	if (t1_is_asic(cphy->adapter)) {
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+		elmer |= ELMER0_GP_BIT6;
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	}
 	return 0;
 }
 
@@ -205,11 +211,11 @@
 					 struct mdio_ops *mdio_ops)
 {
 	u32 val;
-	struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
+	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
 
 	if (!cphy)
 		return NULL;
-	memset(cphy, 0, sizeof(*cphy));
+
 	cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
 
 	/* Commands the PHY to enable XFP's clock. */
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
new file mode 100644
index 0000000..c7731b6
--- /dev/null
+++ b/drivers/net/chelsio/my3126.c
@@ -0,0 +1,206 @@
+/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */
+#include "cphy.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+/* Port Reset */
+static int my3126_reset(struct cphy *cphy, int wait)
+{
+	/*
+	 * This can be done through registers.  It is not required since
+	 * a full chip reset is used.
+	 */
+	return (0);
+}
+
+static int my3126_interrupt_enable(struct cphy *cphy)
+{
+	schedule_delayed_work(&cphy->phy_update, HZ/30);
+	t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
+	return (0);
+}
+
+static int my3126_interrupt_disable(struct cphy *cphy)
+{
+	cancel_rearming_delayed_work(&cphy->phy_update);
+	return (0);
+}
+
+static int my3126_interrupt_clear(struct cphy *cphy)
+{
+	return (0);
+}
+
+#define OFFSET(REG_ADDR)    (REG_ADDR << 2)
+
+static int my3126_interrupt_handler(struct cphy *cphy)
+{
+	u32 val;
+	u16 val16;
+	u16 status;
+	u32 act_count;
+	adapter_t *adapter;
+	adapter = cphy->adapter;
+
+	if (cphy->count == 50) {
+		mdio_read(cphy, 0x1, 0x1, &val);
+		val16 = (u16) val;
+		status = cphy->bmsr ^ val16;
+
+		if (status & BMSR_LSTATUS)
+			t1_link_changed(adapter, 0);
+		cphy->bmsr = val16;
+
+		/* We have only enabled link change interrupts so it
+		   must be that
+		 */
+		cphy->count = 0;
+	}
+
+	t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL),
+		SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+	t1_tpi_read(adapter,
+		OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count);
+	t1_tpi_read(adapter,
+		OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val);
+	act_count += val;
+
+	/* Populate elmer_gpo with the register value */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	cphy->elmer_gpo = val;
+
+	if ( (val & (1 << 8)) || (val & (1 << 19)) ||
+	     (cphy->act_count == act_count) || cphy->act_on ) {
+		if (is_T2(adapter))
+			val |= (1 << 9);
+		else if (t1_is_T1B(adapter))
+			val |= (1 << 20);
+		cphy->act_on = 0;
+	} else {
+		if (is_T2(adapter))
+			val &= ~(1 << 9);
+		else if (t1_is_T1B(adapter))
+			val &= ~(1 << 20);
+		cphy->act_on = 1;
+	}
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+	cphy->elmer_gpo = val;
+	cphy->act_count = act_count;
+	cphy->count++;
+
+	return cphy_cause_link_change;
+}
+
+static void my3216_poll(struct work_struct *work)
+{
+	struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
+
+	my3126_interrupt_handler(cphy);
+}
+
+static int my3126_set_loopback(struct cphy *cphy, int on)
+{
+	return (0);
+}
+
+/* To check the activity LED */
+static int my3126_get_link_status(struct cphy *cphy,
+			int *link_ok, int *speed, int *duplex, int *fc)
+{
+	u32 val;
+	u16 val16;
+	adapter_t *adapter;
+
+	adapter = cphy->adapter;
+	mdio_read(cphy, 0x1, 0x1, &val);
+	val16 = (u16) val;
+
+	/* Populate elmer_gpo with the register value */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	cphy->elmer_gpo = val;
+
+	*link_ok = (val16 & BMSR_LSTATUS);
+
+	if (*link_ok) {
+		/* Turn on the LED. */
+		if (is_T2(adapter))
+			 val &= ~(1 << 8);
+		else if (t1_is_T1B(adapter))
+			 val &= ~(1 << 19);
+	} else {
+		/* Turn off the LED. */
+		if (is_T2(adapter))
+			 val |= (1 << 8);
+		else if (t1_is_T1B(adapter))
+			 val |= (1 << 19);
+	}
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	cphy->elmer_gpo = val;
+	*speed = SPEED_10000;
+	*duplex = DUPLEX_FULL;
+
+	/* need to add flow control */
+	if (fc)
+		*fc = PAUSE_RX | PAUSE_TX;
+
+	return (0);
+}
+
+static void my3126_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops my3126_ops = {
+	.destroy		= my3126_destroy,
+	.reset			= my3126_reset,
+	.interrupt_enable	= my3126_interrupt_enable,
+	.interrupt_disable	= my3126_interrupt_disable,
+	.interrupt_clear	= my3126_interrupt_clear,
+	.interrupt_handler	= my3126_interrupt_handler,
+	.get_link_status	= my3126_get_link_status,
+	.set_loopback		= my3126_set_loopback,
+};
+
+static struct cphy *my3126_phy_create(adapter_t *adapter,
+			int phy_addr, struct mdio_ops *mdio_ops)
+{
+	struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
+
+	if (cphy)
+		cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
+
+	INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
+	cphy->bmsr = 0;
+
+	return (cphy);
+}
+
+/* Chip Reset */
+static int my3126_phy_reset(adapter_t * adapter)
+{
+	u32 val;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~4;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	msleep(100);
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+	msleep(1000);
+
+	/* Now lets enable the Laser. Delay 100us */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= 0x8000;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(100);
+	return (0);
+}
+
+struct gphy t1_my3126_ops = {
+	my3126_phy_create,
+	my3126_phy_reset
+};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 04a1404..63cabeb 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -43,21 +43,7 @@
 #include "elmer0.h"
 #include "suni1x10gexp_regs.h"
 
-/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
- */
-enum {
-    MMD_RESERVED,
-    MMD_PMAPMD,
-    MMD_WIS,
-    MMD_PCS,
-    MMD_PHY_XGXS,	/* XGMII Extender Sublayer */
-    MMD_DTE_XGXS,
-};
-
-enum {
-    PHY_XGXS_CTRL_1,
-    PHY_XGXS_STATUS_1
-};
+#include <linux/crc32.h>
 
 #define OFFSET(REG_ADDR)    (REG_ADDR << 2)
 
@@ -88,6 +74,8 @@
 	RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
 	RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
 	RxUndersizedFrames =  SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
+	RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
+	RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
 
 	TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
 	TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
@@ -95,7 +83,9 @@
 	TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
 	TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
 	TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
-	TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
+	TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
+	TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
+	TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
 };
 
 struct _cmac_instance {
@@ -124,12 +114,12 @@
 
 /*
  * Enable interrupts for the PM3393
-
-	1. Enable PM3393 BLOCK interrupts.
-	2. Enable PM3393 Master Interrupt bit(INTE)
-	3. Enable ELMER's PM3393 bit.
-	4. Enable Terminator external interrupt.
-*/
+ *
+ *	1. Enable PM3393 BLOCK interrupts.
+ *	2. Enable PM3393 Master Interrupt bit(INTE)
+ *	3. Enable ELMER's PM3393 bit.
+ *	4. Enable Terminator external interrupt.
+ */
 static int pm3393_interrupt_enable(struct cmac *cmac)
 {
 	u32 pl_intr;
@@ -257,14 +247,12 @@
 static int pm3393_interrupt_handler(struct cmac *cmac)
 {
 	u32 master_intr_status;
-/*
-	1. Read master interrupt register.
-	2. Read BLOCK's interrupt status registers.
-	3. Handle BLOCK interrupts.
-*/
+
 	/* Read the master interrupt status register. */
 	pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
 	       &master_intr_status);
+	CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n",
+	       master_intr_status);
 
 	/* TBD XXX Lets just clear everything for now */
 	pm3393_interrupt_clear(cmac);
@@ -307,11 +295,7 @@
 	 * The PHY doesn't give us link status indication on its own so have
 	 * the link management code query it instead.
 	 */
-	{
-		extern void link_changed(adapter_t *adapter, int port_id);
-
-		link_changed(cmac->adapter, 0);
-	}
+	t1_link_changed(cmac->adapter, 0);
 	return 0;
 }
 
@@ -363,33 +347,6 @@
 	return 0;
 }
 
-static u32 calc_crc(u8 *b, int len)
-{
-	int i;
-	u32 crc = (u32)~0;
-
-	/* calculate crc one bit at a time */
-	while (len--) {
-		crc ^= *b++;
-		for (i = 0; i < 8; i++) {
-			if (crc & 0x1)
-				crc = (crc >> 1) ^ 0xedb88320;
-			else
-				crc = (crc >> 1);
-		}
-	}
-
-	/* reverse bits */
-	crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
-	crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
-	crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
-	/* swap bytes */
-	crc = (crc >> 16) | (crc << 16);
-	crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
-
-	return crc;
-}
-
 static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
 {
 	int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
@@ -423,7 +380,7 @@
 		u16 mc_filter[4] = { 0, };
 
 		while ((addr = t1_get_next_mcaddr(rm))) {
-			bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f;	/* bit[23:28] */
+			bit = (ether_crc(ETH_ALEN, addr) >> 23) & 0x3f;	/* bit[23:28] */
 			mc_filter[bit >> 4] |= 1 << (bit & 0xf);
 		}
 		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
@@ -471,20 +428,29 @@
 	return 0;
 }
 
+static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
+			       int over)
+{
+	u32 val0, val1, val2;
+
+	t1_tpi_read(adapter, offs, &val0);
+	t1_tpi_read(adapter, offs + 4, &val1);
+	t1_tpi_read(adapter, offs + 8, &val2);
+
+	*val &= ~0ull << 40;
+	*val |= val0 & 0xffff;
+	*val |= (val1 & 0xffff) << 16;
+	*val |= (u64)(val2 & 0xff) << 32;
+
+	if (over)
+		*val += 1ull << 40;
+}
+
 #define RMON_UPDATE(mac, name, stat_name) \
-	{ \
-		t1_tpi_read((mac)->adapter, OFFSET(name), &val0);	\
-		t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
-		t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
-		(mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
-						(((u64)val1 & 0xffff) << 16) | \
-						(((u64)val2 & 0xff) << 32) | \
-						((mac)->stats.stat_name & \
-							(~(u64)0 << 40)); \
-		if (ro &	\
-			((name -  SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
-			(mac)->stats.stat_name += ((u64)1 << 40); \
-	}
+	pm3393_rmon_update((mac)->adapter, OFFSET(name), 		\
+			   &(mac)->stats.stat_name,			\
+		   (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)))
+
 
 static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
 							      int flag)
@@ -519,6 +485,8 @@
 	RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
 	RMON_UPDATE(mac, RxFragments, RxRuntErrors);
 	RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
+	RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
+	RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
 
 	/* Tx stats */
 	RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
@@ -529,6 +497,8 @@
 	RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
 	RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
 	RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
+	RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
+	RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
 
 	return &mac->stats;
 }
@@ -631,10 +601,9 @@
 {
 	struct cmac *cmac;
 
-	cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
+	cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
 	if (!cmac)
 		return NULL;
-	memset(cmac, 0, sizeof(*cmac));
 
 	cmac->ops = &pm3393_ops;
 	cmac->instance = (cmac_instance *) (cmac + 1);
@@ -815,6 +784,12 @@
 
 		successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
 				    && is_xaui_mabc_pll_locked);
+
+		CH_DBG(adapter, HW,
+		       "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
+		       "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
+		       i, is_pl4_reset_finished, val, is_pl4_outof_lock,
+		       is_xaui_mabc_pll_locked);
 	}
 	return successful_reset ? 0 : 1;
 }
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
index b90e11f..c80bf4d 100644
--- a/drivers/net/chelsio/regs.h
+++ b/drivers/net/chelsio/regs.h
@@ -71,6 +71,10 @@
 #define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
 #define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
 
+#define S_DISABLE_CMDQ0_GTS    8
+#define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS)
+#define F_DISABLE_CMDQ0_GTS    V_DISABLE_CMDQ0_GTS(1U)
+
 #define S_DISABLE_CMDQ1_GTS    9
 #define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
 #define F_DISABLE_CMDQ1_GTS    V_DISABLE_CMDQ1_GTS(1U)
@@ -87,12 +91,18 @@
 #define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
 #define F_ENABLE_BIG_ENDIAN    V_ENABLE_BIG_ENDIAN(1U)
 
+#define S_FL_SELECTION_CRITERIA    13
+#define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA)
+#define F_FL_SELECTION_CRITERIA    V_FL_SELECTION_CRITERIA(1U)
+
 #define S_ISCSI_COALESCE    14
 #define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
 #define F_ISCSI_COALESCE    V_ISCSI_COALESCE(1U)
 
 #define S_RX_PKT_OFFSET    15
+#define M_RX_PKT_OFFSET    0x7
 #define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
+#define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET)
 
 #define S_VLAN_XTRACT    18
 #define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
@@ -108,16 +118,114 @@
 #define A_SG_FL1BASELWR 0x20
 #define A_SG_FL1BASEUPR 0x24
 #define A_SG_CMD0SIZE 0x28
+
+#define S_CMDQ0_SIZE    0
+#define M_CMDQ0_SIZE    0x1ffff
+#define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE)
+#define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE)
+
 #define A_SG_FL0SIZE 0x2c
+
+#define S_FL0_SIZE    0
+#define M_FL0_SIZE    0x1ffff
+#define V_FL0_SIZE(x) ((x) << S_FL0_SIZE)
+#define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE)
+
 #define A_SG_RSPSIZE 0x30
+
+#define S_RESPQ_SIZE    0
+#define M_RESPQ_SIZE    0x1ffff
+#define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE)
+#define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE)
+
 #define A_SG_RSPBASELWR 0x34
 #define A_SG_RSPBASEUPR 0x38
 #define A_SG_FLTHRESHOLD 0x3c
+
+#define S_FL_THRESHOLD    0
+#define M_FL_THRESHOLD    0xffff
+#define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD)
+#define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD)
+
 #define A_SG_RSPQUEUECREDIT 0x40
+
+#define S_RESPQ_CREDIT    0
+#define M_RESPQ_CREDIT    0x1ffff
+#define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT)
+#define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT)
+
 #define A_SG_SLEEPING 0x48
+
+#define S_SLEEPING    0
+#define M_SLEEPING    0xffff
+#define V_SLEEPING(x) ((x) << S_SLEEPING)
+#define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING)
+
 #define A_SG_INTRTIMER 0x4c
+
+#define S_INTERRUPT_TIMER_COUNT    0
+#define M_INTERRUPT_TIMER_COUNT    0xffffff
+#define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT)
+#define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT)
+
+#define A_SG_CMD0PTR 0x50
+
+#define S_CMDQ0_POINTER    0
+#define M_CMDQ0_POINTER    0xffff
+#define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER)
+#define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER)
+
+#define S_CURRENT_GENERATION_BIT    16
+#define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT)
+#define F_CURRENT_GENERATION_BIT    V_CURRENT_GENERATION_BIT(1U)
+
+#define A_SG_CMD1PTR 0x54
+
+#define S_CMDQ1_POINTER    0
+#define M_CMDQ1_POINTER    0xffff
+#define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER)
+#define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER)
+
+#define A_SG_FL0PTR 0x58
+
+#define S_FL0_POINTER    0
+#define M_FL0_POINTER    0xffff
+#define V_FL0_POINTER(x) ((x) << S_FL0_POINTER)
+#define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER)
+
+#define A_SG_FL1PTR 0x5c
+
+#define S_FL1_POINTER    0
+#define M_FL1_POINTER    0xffff
+#define V_FL1_POINTER(x) ((x) << S_FL1_POINTER)
+#define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER)
+
+#define A_SG_VERSION 0x6c
+
+#define S_DAY    0
+#define M_DAY    0x1f
+#define V_DAY(x) ((x) << S_DAY)
+#define G_DAY(x) (((x) >> S_DAY) & M_DAY)
+
+#define S_MONTH    5
+#define M_MONTH    0xf
+#define V_MONTH(x) ((x) << S_MONTH)
+#define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH)
+
 #define A_SG_CMD1SIZE 0xb0
+
+#define S_CMDQ1_SIZE    0
+#define M_CMDQ1_SIZE    0x1ffff
+#define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE)
+#define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE)
+
 #define A_SG_FL1SIZE 0xb4
+
+#define S_FL1_SIZE    0
+#define M_FL1_SIZE    0x1ffff
+#define V_FL1_SIZE(x) ((x) << S_FL1_SIZE)
+#define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE)
+
 #define A_SG_INT_ENABLE 0xb8
 
 #define S_RESPQ_EXHAUSTED    0
@@ -144,21 +252,369 @@
 #define A_SG_RESPACCUTIMER 0xc0
 
 /* MC3 registers */
+#define A_MC3_CFG 0x100
+
+#define S_CLK_ENABLE    0
+#define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE)
+#define F_CLK_ENABLE    V_CLK_ENABLE(1U)
 
 #define S_READY    1
 #define V_READY(x) ((x) << S_READY)
 #define F_READY    V_READY(1U)
 
-/* MC4 registers */
+#define S_READ_TO_WRITE_DELAY    2
+#define M_READ_TO_WRITE_DELAY    0x7
+#define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY)
+#define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY)
 
+#define S_WRITE_TO_READ_DELAY    5
+#define M_WRITE_TO_READ_DELAY    0x7
+#define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY)
+#define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY)
+
+#define S_MC3_BANK_CYCLE    8
+#define M_MC3_BANK_CYCLE    0xf
+#define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE)
+#define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE)
+
+#define S_REFRESH_CYCLE    12
+#define M_REFRESH_CYCLE    0xf
+#define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE)
+#define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE)
+
+#define S_PRECHARGE_CYCLE    16
+#define M_PRECHARGE_CYCLE    0x3
+#define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE)
+#define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE)
+
+#define S_ACTIVE_TO_READ_WRITE_DELAY    18
+#define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY)
+#define F_ACTIVE_TO_READ_WRITE_DELAY    V_ACTIVE_TO_READ_WRITE_DELAY(1U)
+
+#define S_ACTIVE_TO_PRECHARGE_DELAY    19
+#define M_ACTIVE_TO_PRECHARGE_DELAY    0x7
+#define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY)
+#define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY)
+
+#define S_WRITE_RECOVERY_DELAY    22
+#define M_WRITE_RECOVERY_DELAY    0x3
+#define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY)
+#define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY)
+
+#define S_DENSITY    24
+#define M_DENSITY    0x3
+#define V_DENSITY(x) ((x) << S_DENSITY)
+#define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY)
+
+#define S_ORGANIZATION    26
+#define V_ORGANIZATION(x) ((x) << S_ORGANIZATION)
+#define F_ORGANIZATION    V_ORGANIZATION(1U)
+
+#define S_BANKS    27
+#define V_BANKS(x) ((x) << S_BANKS)
+#define F_BANKS    V_BANKS(1U)
+
+#define S_UNREGISTERED    28
+#define V_UNREGISTERED(x) ((x) << S_UNREGISTERED)
+#define F_UNREGISTERED    V_UNREGISTERED(1U)
+
+#define S_MC3_WIDTH    29
+#define M_MC3_WIDTH    0x3
+#define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH)
+#define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH)
+
+#define S_MC3_SLOW    31
+#define V_MC3_SLOW(x) ((x) << S_MC3_SLOW)
+#define F_MC3_SLOW    V_MC3_SLOW(1U)
+
+#define A_MC3_MODE 0x104
+
+#define S_MC3_MODE    0
+#define M_MC3_MODE    0x3fff
+#define V_MC3_MODE(x) ((x) << S_MC3_MODE)
+#define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE)
+
+#define S_BUSY    31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY    V_BUSY(1U)
+
+#define A_MC3_EXT_MODE 0x108
+
+#define S_MC3_EXTENDED_MODE    0
+#define M_MC3_EXTENDED_MODE    0x3fff
+#define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE)
+#define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE)
+
+#define A_MC3_PRECHARG 0x10c
+#define A_MC3_REFRESH 0x110
+
+#define S_REFRESH_ENABLE    0
+#define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE)
+#define F_REFRESH_ENABLE    V_REFRESH_ENABLE(1U)
+
+#define S_REFRESH_DIVISOR    1
+#define M_REFRESH_DIVISOR    0x3fff
+#define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR)
+#define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR)
+
+#define A_MC3_STROBE 0x114
+
+#define S_MASTER_DLL_RESET    0
+#define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET)
+#define F_MASTER_DLL_RESET    V_MASTER_DLL_RESET(1U)
+
+#define S_MASTER_DLL_TAP_COUNT    1
+#define M_MASTER_DLL_TAP_COUNT    0xff
+#define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT)
+#define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT)
+
+#define S_MASTER_DLL_LOCKED    9
+#define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED)
+#define F_MASTER_DLL_LOCKED    V_MASTER_DLL_LOCKED(1U)
+
+#define S_MASTER_DLL_MAX_TAP_COUNT    10
+#define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT)
+#define F_MASTER_DLL_MAX_TAP_COUNT    V_MASTER_DLL_MAX_TAP_COUNT(1U)
+
+#define S_MASTER_DLL_TAP_COUNT_OFFSET    11
+#define M_MASTER_DLL_TAP_COUNT_OFFSET    0x3f
+#define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET)
+#define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET)
+
+#define S_SLAVE_DLL_RESET    11
+#define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET)
+#define F_SLAVE_DLL_RESET    V_SLAVE_DLL_RESET(1U)
+
+#define S_SLAVE_DLL_DELTA    12
+#define M_SLAVE_DLL_DELTA    0xf
+#define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA)
+#define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT    17
+#define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT    0x3f
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE    23
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE)
+#define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE    V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U)
+
+#define S_SLAVE_DELAY_LINE_TAP_COUNT    24
+#define M_SLAVE_DELAY_LINE_TAP_COUNT    0x3f
+#define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT)
+
+#define A_MC3_ECC_CNTL 0x118
+
+#define S_ECC_GENERATION_ENABLE    0
+#define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE)
+#define F_ECC_GENERATION_ENABLE    V_ECC_GENERATION_ENABLE(1U)
+
+#define S_ECC_CHECK_ENABLE    1
+#define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE)
+#define F_ECC_CHECK_ENABLE    V_ECC_CHECK_ENABLE(1U)
+
+#define S_CORRECTABLE_ERROR_COUNT    2
+#define M_CORRECTABLE_ERROR_COUNT    0xff
+#define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT)
+#define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT)
+
+#define S_UNCORRECTABLE_ERROR_COUNT    10
+#define M_UNCORRECTABLE_ERROR_COUNT    0xff
+#define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT)
+#define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT)
+
+#define A_MC3_CE_ADDR 0x11c
+
+#define S_MC3_CE_ADDR    4
+#define M_MC3_CE_ADDR    0xfffffff
+#define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR)
+#define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR)
+
+#define A_MC3_CE_DATA0 0x120
+#define A_MC3_CE_DATA1 0x124
+#define A_MC3_CE_DATA2 0x128
+#define A_MC3_CE_DATA3 0x12c
+#define A_MC3_CE_DATA4 0x130
+#define A_MC3_UE_ADDR 0x134
+
+#define S_MC3_UE_ADDR    4
+#define M_MC3_UE_ADDR    0xfffffff
+#define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR)
+#define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR)
+
+#define A_MC3_UE_DATA0 0x138
+#define A_MC3_UE_DATA1 0x13c
+#define A_MC3_UE_DATA2 0x140
+#define A_MC3_UE_DATA3 0x144
+#define A_MC3_UE_DATA4 0x148
+#define A_MC3_BD_ADDR 0x14c
+#define A_MC3_BD_DATA0 0x150
+#define A_MC3_BD_DATA1 0x154
+#define A_MC3_BD_DATA2 0x158
+#define A_MC3_BD_DATA3 0x15c
+#define A_MC3_BD_DATA4 0x160
+#define A_MC3_BD_OP 0x164
+
+#define S_BACK_DOOR_OPERATION    0
+#define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION)
+#define F_BACK_DOOR_OPERATION    V_BACK_DOOR_OPERATION(1U)
+
+#define A_MC3_BIST_ADDR_BEG 0x168
+#define A_MC3_BIST_ADDR_END 0x16c
+#define A_MC3_BIST_DATA 0x170
+#define A_MC3_BIST_OP 0x174
+
+#define S_OP    0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP    V_OP(1U)
+
+#define S_DATA_PATTERN    1
+#define M_DATA_PATTERN    0x3
+#define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN)
+#define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN)
+
+#define S_CONTINUOUS    3
+#define V_CONTINUOUS(x) ((x) << S_CONTINUOUS)
+#define F_CONTINUOUS    V_CONTINUOUS(1U)
+
+#define A_MC3_INT_ENABLE 0x178
+
+#define S_MC3_CORR_ERR    0
+#define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR)
+#define F_MC3_CORR_ERR    V_MC3_CORR_ERR(1U)
+
+#define S_MC3_UNCORR_ERR    1
+#define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR)
+#define F_MC3_UNCORR_ERR    V_MC3_UNCORR_ERR(1U)
+
+#define S_MC3_PARITY_ERR    2
+#define M_MC3_PARITY_ERR    0xff
+#define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR)
+#define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR)
+
+#define S_MC3_ADDR_ERR    10
+#define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR)
+#define F_MC3_ADDR_ERR    V_MC3_ADDR_ERR(1U)
+
+#define A_MC3_INT_CAUSE 0x17c
+
+/* MC4 registers */
 #define A_MC4_CFG 0x180
+
+#define S_POWER_UP    0
+#define V_POWER_UP(x) ((x) << S_POWER_UP)
+#define F_POWER_UP    V_POWER_UP(1U)
+
+#define S_MC4_BANK_CYCLE    8
+#define M_MC4_BANK_CYCLE    0x7
+#define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE)
+#define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE)
+
+#define S_MC4_NARROW    24
+#define V_MC4_NARROW(x) ((x) << S_MC4_NARROW)
+#define F_MC4_NARROW    V_MC4_NARROW(1U)
+
 #define S_MC4_SLOW    25
 #define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
 #define F_MC4_SLOW    V_MC4_SLOW(1U)
 
-/* TPI registers */
+#define S_MC4A_WIDTH    24
+#define M_MC4A_WIDTH    0x3
+#define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH)
+#define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH)
 
+#define S_MC4A_SLOW    26
+#define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW)
+#define F_MC4A_SLOW    V_MC4A_SLOW(1U)
+
+#define A_MC4_MODE 0x184
+
+#define S_MC4_MODE    0
+#define M_MC4_MODE    0x7fff
+#define V_MC4_MODE(x) ((x) << S_MC4_MODE)
+#define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE)
+
+#define A_MC4_EXT_MODE 0x188
+
+#define S_MC4_EXTENDED_MODE    0
+#define M_MC4_EXTENDED_MODE    0x7fff
+#define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE)
+#define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE)
+
+#define A_MC4_REFRESH 0x190
+#define A_MC4_STROBE 0x194
+#define A_MC4_ECC_CNTL 0x198
+#define A_MC4_CE_ADDR 0x19c
+
+#define S_MC4_CE_ADDR    4
+#define M_MC4_CE_ADDR    0xffffff
+#define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR)
+#define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR)
+
+#define A_MC4_CE_DATA0 0x1a0
+#define A_MC4_CE_DATA1 0x1a4
+#define A_MC4_CE_DATA2 0x1a8
+#define A_MC4_CE_DATA3 0x1ac
+#define A_MC4_CE_DATA4 0x1b0
+#define A_MC4_UE_ADDR 0x1b4
+
+#define S_MC4_UE_ADDR    4
+#define M_MC4_UE_ADDR    0xffffff
+#define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR)
+#define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR)
+
+#define A_MC4_UE_DATA0 0x1b8
+#define A_MC4_UE_DATA1 0x1bc
+#define A_MC4_UE_DATA2 0x1c0
+#define A_MC4_UE_DATA3 0x1c4
+#define A_MC4_UE_DATA4 0x1c8
+#define A_MC4_BD_ADDR 0x1cc
+
+#define S_MC4_BACK_DOOR_ADDR    0
+#define M_MC4_BACK_DOOR_ADDR    0xfffffff
+#define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR)
+#define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR)
+
+#define A_MC4_BD_DATA0 0x1d0
+#define A_MC4_BD_DATA1 0x1d4
+#define A_MC4_BD_DATA2 0x1d8
+#define A_MC4_BD_DATA3 0x1dc
+#define A_MC4_BD_DATA4 0x1e0
+#define A_MC4_BD_OP 0x1e4
+
+#define S_OPERATION    0
+#define V_OPERATION(x) ((x) << S_OPERATION)
+#define F_OPERATION    V_OPERATION(1U)
+
+#define A_MC4_BIST_ADDR_BEG 0x1e8
+#define A_MC4_BIST_ADDR_END 0x1ec
+#define A_MC4_BIST_DATA 0x1f0
+#define A_MC4_BIST_OP 0x1f4
+#define A_MC4_INT_ENABLE 0x1f8
+
+#define S_MC4_CORR_ERR    0
+#define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR)
+#define F_MC4_CORR_ERR    V_MC4_CORR_ERR(1U)
+
+#define S_MC4_UNCORR_ERR    1
+#define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR)
+#define F_MC4_UNCORR_ERR    V_MC4_UNCORR_ERR(1U)
+
+#define S_MC4_ADDR_ERR    2
+#define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR)
+#define F_MC4_ADDR_ERR    V_MC4_ADDR_ERR(1U)
+
+#define A_MC4_INT_CAUSE 0x1fc
+
+/* TPI registers */
 #define A_TPI_ADDR 0x280
+
+#define S_TPI_ADDRESS    0
+#define M_TPI_ADDRESS    0xffffff
+#define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS)
+#define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS)
+
 #define A_TPI_WR_DATA 0x284
 #define A_TPI_RD_DATA 0x288
 #define A_TPI_CSR 0x28c
@@ -171,6 +627,10 @@
 #define V_TPIRDY(x) ((x) << S_TPIRDY)
 #define F_TPIRDY    V_TPIRDY(1U)
 
+#define S_INT_DIR    31
+#define V_INT_DIR(x) ((x) << S_INT_DIR)
+#define F_INT_DIR    V_INT_DIR(1U)
+
 #define A_TPI_PAR 0x29c
 
 #define S_TPIPAR    0
@@ -178,14 +638,26 @@
 #define V_TPIPAR(x) ((x) << S_TPIPAR)
 #define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
 
-/* TP registers */
 
+/* TP registers */
 #define A_TP_IN_CONFIG 0x300
 
+#define S_TP_IN_CSPI_TUNNEL    0
+#define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL)
+#define F_TP_IN_CSPI_TUNNEL    V_TP_IN_CSPI_TUNNEL(1U)
+
+#define S_TP_IN_CSPI_ETHERNET    1
+#define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET)
+#define F_TP_IN_CSPI_ETHERNET    V_TP_IN_CSPI_ETHERNET(1U)
+
 #define S_TP_IN_CSPI_CPL    3
 #define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
 #define F_TP_IN_CSPI_CPL    V_TP_IN_CSPI_CPL(1U)
 
+#define S_TP_IN_CSPI_POS    4
+#define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS)
+#define F_TP_IN_CSPI_POS    V_TP_IN_CSPI_POS(1U)
+
 #define S_TP_IN_CSPI_CHECK_IP_CSUM    5
 #define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
 #define F_TP_IN_CSPI_CHECK_IP_CSUM    V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
@@ -194,10 +666,22 @@
 #define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
 #define F_TP_IN_CSPI_CHECK_TCP_CSUM    V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
 
+#define S_TP_IN_ESPI_TUNNEL    7
+#define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL)
+#define F_TP_IN_ESPI_TUNNEL    V_TP_IN_ESPI_TUNNEL(1U)
+
 #define S_TP_IN_ESPI_ETHERNET    8
 #define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
 #define F_TP_IN_ESPI_ETHERNET    V_TP_IN_ESPI_ETHERNET(1U)
 
+#define S_TP_IN_ESPI_CPL    10
+#define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL)
+#define F_TP_IN_ESPI_CPL    V_TP_IN_ESPI_CPL(1U)
+
+#define S_TP_IN_ESPI_POS    11
+#define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS)
+#define F_TP_IN_ESPI_POS    V_TP_IN_ESPI_POS(1U)
+
 #define S_TP_IN_ESPI_CHECK_IP_CSUM    12
 #define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
 #define F_TP_IN_ESPI_CHECK_IP_CSUM    V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
@@ -212,14 +696,42 @@
 
 #define A_TP_OUT_CONFIG 0x304
 
+#define S_TP_OUT_C_ETH    0
+#define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH)
+#define F_TP_OUT_C_ETH    V_TP_OUT_C_ETH(1U)
+
 #define S_TP_OUT_CSPI_CPL    2
 #define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
 #define F_TP_OUT_CSPI_CPL    V_TP_OUT_CSPI_CPL(1U)
 
+#define S_TP_OUT_CSPI_POS    3
+#define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS)
+#define F_TP_OUT_CSPI_POS    V_TP_OUT_CSPI_POS(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_IP_CSUM    4
+#define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_IP_CSUM    V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_TCP_CSUM    5
+#define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_TCP_CSUM    V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U)
+
 #define S_TP_OUT_ESPI_ETHERNET    6
 #define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
 #define F_TP_OUT_ESPI_ETHERNET    V_TP_OUT_ESPI_ETHERNET(1U)
 
+#define S_TP_OUT_ESPI_TAG_ETHERNET    7
+#define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET)
+#define F_TP_OUT_ESPI_TAG_ETHERNET    V_TP_OUT_ESPI_TAG_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_CPL    8
+#define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL)
+#define F_TP_OUT_ESPI_CPL    V_TP_OUT_ESPI_CPL(1U)
+
+#define S_TP_OUT_ESPI_POS    9
+#define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS)
+#define F_TP_OUT_ESPI_POS    V_TP_OUT_ESPI_POS(1U)
+
 #define S_TP_OUT_ESPI_GENERATE_IP_CSUM    10
 #define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
 #define F_TP_OUT_ESPI_GENERATE_IP_CSUM    V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
@@ -233,6 +745,16 @@
 #define S_IP_TTL    0
 #define M_IP_TTL    0xff
 #define V_IP_TTL(x) ((x) << S_IP_TTL)
+#define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL)
+
+#define S_TCAM_SERVER_REGION_USAGE    8
+#define M_TCAM_SERVER_REGION_USAGE    0x3
+#define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE)
+#define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE)
+
+#define S_QOS_MAPPING    10
+#define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING)
+#define F_QOS_MAPPING    V_QOS_MAPPING(1U)
 
 #define S_TCP_CSUM    11
 #define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
@@ -246,31 +768,476 @@
 #define V_IP_CSUM(x) ((x) << S_IP_CSUM)
 #define F_IP_CSUM    V_IP_CSUM(1U)
 
+#define S_IP_ID_SPLIT    14
+#define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT)
+#define F_IP_ID_SPLIT    V_IP_ID_SPLIT(1U)
+
 #define S_PATH_MTU    15
 #define V_PATH_MTU(x) ((x) << S_PATH_MTU)
 #define F_PATH_MTU    V_PATH_MTU(1U)
 
 #define S_5TUPLE_LOOKUP    17
+#define M_5TUPLE_LOOKUP    0x3
 #define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
+#define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP)
+
+#define S_IP_FRAGMENT_DROP    19
+#define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP)
+#define F_IP_FRAGMENT_DROP    V_IP_FRAGMENT_DROP(1U)
+
+#define S_PING_DROP    20
+#define V_PING_DROP(x) ((x) << S_PING_DROP)
+#define F_PING_DROP    V_PING_DROP(1U)
+
+#define S_PROTECT_MODE    21
+#define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE)
+#define F_PROTECT_MODE    V_PROTECT_MODE(1U)
+
+#define S_SYN_COOKIE_ALGORITHM    22
+#define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM)
+#define F_SYN_COOKIE_ALGORITHM    V_SYN_COOKIE_ALGORITHM(1U)
+
+#define S_ATTACK_FILTER    23
+#define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER)
+#define F_ATTACK_FILTER    V_ATTACK_FILTER(1U)
+
+#define S_INTERFACE_TYPE    24
+#define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE)
+#define F_INTERFACE_TYPE    V_INTERFACE_TYPE(1U)
+
+#define S_DISABLE_RX_FLOW_CONTROL    25
+#define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL)
+#define F_DISABLE_RX_FLOW_CONTROL    V_DISABLE_RX_FLOW_CONTROL(1U)
 
 #define S_SYN_COOKIE_PARAMETER    26
+#define M_SYN_COOKIE_PARAMETER    0x3f
 #define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
+#define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER)
+
+#define A_TP_GLOBAL_RX_CREDITS 0x30c
+#define A_TP_CM_SIZE 0x310
+#define A_TP_CM_MM_BASE 0x314
+
+#define S_CM_MEMMGR_BASE    0
+#define M_CM_MEMMGR_BASE    0xfffffff
+#define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE)
+#define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE)
+
+#define A_TP_CM_TIMER_BASE 0x318
+
+#define S_CM_TIMER_BASE    0
+#define M_CM_TIMER_BASE    0xfffffff
+#define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE)
+#define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE)
+
+#define A_TP_PM_SIZE 0x31c
+#define A_TP_PM_TX_BASE 0x320
+#define A_TP_PM_DEFRAG_BASE 0x324
+#define A_TP_PM_RX_BASE 0x328
+#define A_TP_PM_RX_PG_SIZE 0x32c
+#define A_TP_PM_RX_MAX_PGS 0x330
+#define A_TP_PM_TX_PG_SIZE 0x334
+#define A_TP_PM_TX_MAX_PGS 0x338
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_TIMESTAMP    0
+#define M_TIMESTAMP    0x3
+#define V_TIMESTAMP(x) ((x) << S_TIMESTAMP)
+#define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP)
+
+#define S_WINDOW_SCALE    2
+#define M_WINDOW_SCALE    0x3
+#define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE)
+#define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE)
+
+#define S_SACK    4
+#define M_SACK    0x3
+#define V_SACK(x) ((x) << S_SACK)
+#define G_SACK(x) (((x) >> S_SACK) & M_SACK)
+
+#define S_ECN    6
+#define M_ECN    0x3
+#define V_ECN(x) ((x) << S_ECN)
+#define G_ECN(x) (((x) >> S_ECN) & M_ECN)
+
+#define S_SACK_ALGORITHM    8
+#define M_SACK_ALGORITHM    0x3
+#define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM)
+#define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM)
+
+#define S_MSS    10
+#define V_MSS(x) ((x) << S_MSS)
+#define F_MSS    V_MSS(1U)
+
+#define S_DEFAULT_PEER_MSS    16
+#define M_DEFAULT_PEER_MSS    0xffff
+#define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS)
+#define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_DACK_MODE    0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE    V_DACK_MODE(1U)
+
+#define S_DACK_AUTO_MGMT    1
+#define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT)
+#define F_DACK_AUTO_MGMT    V_DACK_AUTO_MGMT(1U)
+
+#define S_DACK_AUTO_CAREFUL    2
+#define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL)
+#define F_DACK_AUTO_CAREFUL    V_DACK_AUTO_CAREFUL(1U)
+
+#define S_DACK_MSS_SELECTOR    3
+#define M_DACK_MSS_SELECTOR    0x3
+#define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR)
+#define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR)
+
+#define S_DACK_BYTE_THRESHOLD    5
+#define M_DACK_BYTE_THRESHOLD    0xfffff
+#define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD)
+#define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD)
 
 #define A_TP_PC_CONFIG 0x348
+
+#define S_TP_ACCESS_LATENCY    0
+#define M_TP_ACCESS_LATENCY    0xf
+#define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY)
+#define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY)
+
+#define S_HELD_FIN_DISABLE    4
+#define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE)
+#define F_HELD_FIN_DISABLE    V_HELD_FIN_DISABLE(1U)
+
+#define S_DDP_FC_ENABLE    5
+#define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE)
+#define F_DDP_FC_ENABLE    V_DDP_FC_ENABLE(1U)
+
+#define S_RDMA_ERR_ENABLE    6
+#define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE)
+#define F_RDMA_ERR_ENABLE    V_RDMA_ERR_ENABLE(1U)
+
+#define S_FAST_PDU_DELIVERY    7
+#define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY)
+#define F_FAST_PDU_DELIVERY    V_FAST_PDU_DELIVERY(1U)
+
+#define S_CLEAR_FIN    8
+#define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN)
+#define F_CLEAR_FIN    V_CLEAR_FIN(1U)
+
 #define S_DIS_TX_FILL_WIN_PUSH    12
 #define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
 #define F_DIS_TX_FILL_WIN_PUSH    V_DIS_TX_FILL_WIN_PUSH(1U)
 
 #define S_TP_PC_REV    30
 #define M_TP_PC_REV    0x3
+#define V_TP_PC_REV(x) ((x) << S_TP_PC_REV)
 #define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
+
+#define A_TP_BACKOFF0 0x350
+
+#define S_ELEMENT0    0
+#define M_ELEMENT0    0xff
+#define V_ELEMENT0(x) ((x) << S_ELEMENT0)
+#define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0)
+
+#define S_ELEMENT1    8
+#define M_ELEMENT1    0xff
+#define V_ELEMENT1(x) ((x) << S_ELEMENT1)
+#define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1)
+
+#define S_ELEMENT2    16
+#define M_ELEMENT2    0xff
+#define V_ELEMENT2(x) ((x) << S_ELEMENT2)
+#define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2)
+
+#define S_ELEMENT3    24
+#define M_ELEMENT3    0xff
+#define V_ELEMENT3(x) ((x) << S_ELEMENT3)
+#define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3)
+
+#define A_TP_BACKOFF1 0x354
+#define A_TP_BACKOFF2 0x358
+#define A_TP_BACKOFF3 0x35c
+#define A_TP_PARA_REG0 0x360
+
+#define S_VAR_MULT    0
+#define M_VAR_MULT    0xf
+#define V_VAR_MULT(x) ((x) << S_VAR_MULT)
+#define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT)
+
+#define S_VAR_GAIN    4
+#define M_VAR_GAIN    0xf
+#define V_VAR_GAIN(x) ((x) << S_VAR_GAIN)
+#define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN)
+
+#define S_SRTT_GAIN    8
+#define M_SRTT_GAIN    0xf
+#define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN)
+#define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN)
+
+#define S_RTTVAR_INIT    12
+#define M_RTTVAR_INIT    0xf
+#define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT)
+#define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT)
+
+#define S_DUP_THRESH    20
+#define M_DUP_THRESH    0xf
+#define V_DUP_THRESH(x) ((x) << S_DUP_THRESH)
+#define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH)
+
+#define S_INIT_CONG_WIN    24
+#define M_INIT_CONG_WIN    0x7
+#define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN)
+#define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN)
+
+#define A_TP_PARA_REG1 0x364
+
+#define S_INITIAL_SLOW_START_THRESHOLD    0
+#define M_INITIAL_SLOW_START_THRESHOLD    0xffff
+#define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD)
+#define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD)
+
+#define S_RECEIVE_BUFFER_SIZE    16
+#define M_RECEIVE_BUFFER_SIZE    0xffff
+#define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE)
+#define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE)
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_RX_COALESCE_SIZE    0
+#define M_RX_COALESCE_SIZE    0xffff
+#define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE)
+#define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE)
+
+#define S_MAX_RX_SIZE    16
+#define M_MAX_RX_SIZE    0xffff
+#define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE)
+#define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_RX_COALESCING_PSH_DELIVER    0
+#define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER)
+#define F_RX_COALESCING_PSH_DELIVER    V_RX_COALESCING_PSH_DELIVER(1U)
+
+#define S_RX_COALESCING_ENABLE    1
+#define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE)
+#define F_RX_COALESCING_ENABLE    V_RX_COALESCING_ENABLE(1U)
+
+#define S_TAHOE_ENABLE    2
+#define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE)
+#define F_TAHOE_ENABLE    V_TAHOE_ENABLE(1U)
+
+#define S_MAX_REORDER_FRAGMENTS    12
+#define M_MAX_REORDER_FRAGMENTS    0x7
+#define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS)
+#define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_DELAYED_ACK_TIMER_RESOLUTION    0
+#define M_DELAYED_ACK_TIMER_RESOLUTION    0x3f
+#define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION)
+#define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION)
+
+#define S_GENERIC_TIMER_RESOLUTION    16
+#define M_GENERIC_TIMER_RESOLUTION    0x3f
+#define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION)
+#define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION)
+
+#define A_TP_2MSL 0x394
+
+#define S_2MSL    0
+#define M_2MSL    0x3fffffff
+#define V_2MSL(x) ((x) << S_2MSL)
+#define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL)
+
+#define A_TP_RXT_MIN 0x398
+
+#define S_RETRANSMIT_TIMER_MIN    0
+#define M_RETRANSMIT_TIMER_MIN    0xffff
+#define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN)
+#define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN)
+
+#define A_TP_RXT_MAX 0x39c
+
+#define S_RETRANSMIT_TIMER_MAX    0
+#define M_RETRANSMIT_TIMER_MAX    0x3fffffff
+#define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX)
+#define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX)
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define S_PERSIST_TIMER_MIN    0
+#define M_PERSIST_TIMER_MIN    0xffff
+#define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN)
+#define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN)
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define S_PERSIST_TIMER_MAX    0
+#define M_PERSIST_TIMER_MAX    0x3fffffff
+#define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX)
+#define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX)
+
+#define A_TP_KEEP_IDLE 0x3ac
+
+#define S_KEEP_ALIVE_IDLE_TIME    0
+#define M_KEEP_ALIVE_IDLE_TIME    0x3fffffff
+#define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME)
+#define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME)
+
+#define A_TP_KEEP_INTVL 0x3b0
+
+#define S_KEEP_ALIVE_INTERVAL_TIME    0
+#define M_KEEP_ALIVE_INTERVAL_TIME    0x3fffffff
+#define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME)
+#define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME)
+
+#define A_TP_INIT_SRTT 0x3b4
+
+#define S_INITIAL_SRTT    0
+#define M_INITIAL_SRTT    0xffff
+#define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT)
+#define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT)
+
+#define A_TP_DACK_TIME 0x3b8
+
+#define S_DELAYED_ACK_TIME    0
+#define M_DELAYED_ACK_TIME    0x7ff
+#define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME)
+#define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME)
+
+#define A_TP_FINWAIT2_TIME 0x3bc
+
+#define S_FINWAIT2_TIME    0
+#define M_FINWAIT2_TIME    0x3fffffff
+#define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME)
+#define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME)
+
+#define A_TP_FAST_FINWAIT2_TIME 0x3c0
+
+#define S_FAST_FINWAIT2_TIME    0
+#define M_FAST_FINWAIT2_TIME    0x3fffffff
+#define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME)
+#define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME)
+
+#define A_TP_SHIFT_CNT 0x3c4
+
+#define S_KEEPALIVE_MAX    0
+#define M_KEEPALIVE_MAX    0xff
+#define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX)
+#define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX)
+
+#define S_WINDOWPROBE_MAX    8
+#define M_WINDOWPROBE_MAX    0xff
+#define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX)
+#define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX)
+
+#define S_RETRANSMISSION_MAX    16
+#define M_RETRANSMISSION_MAX    0xff
+#define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX)
+#define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX)
+
+#define S_SYN_MAX    24
+#define M_SYN_MAX    0xff
+#define V_SYN_MAX(x) ((x) << S_SYN_MAX)
+#define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX)
+
+#define A_TP_QOS_REG0 0x3e0
+
+#define S_L3_VALUE    0
+#define M_L3_VALUE    0x3f
+#define V_L3_VALUE(x) ((x) << S_L3_VALUE)
+#define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE)
+
+#define A_TP_QOS_REG1 0x3e4
+#define A_TP_QOS_REG2 0x3e8
+#define A_TP_QOS_REG3 0x3ec
+#define A_TP_QOS_REG4 0x3f0
+#define A_TP_QOS_REG5 0x3f4
+#define A_TP_QOS_REG6 0x3f8
+#define A_TP_QOS_REG7 0x3fc
+#define A_TP_MTU_REG0 0x404
+#define A_TP_MTU_REG1 0x408
+#define A_TP_MTU_REG2 0x40c
+#define A_TP_MTU_REG3 0x410
+#define A_TP_MTU_REG4 0x414
+#define A_TP_MTU_REG5 0x418
+#define A_TP_MTU_REG6 0x41c
+#define A_TP_MTU_REG7 0x420
 #define A_TP_RESET 0x44c
+
 #define S_TP_RESET    0
 #define V_TP_RESET(x) ((x) << S_TP_RESET)
 #define F_TP_RESET    V_TP_RESET(1U)
 
+#define S_CM_MEMMGR_INIT    1
+#define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT)
+#define F_CM_MEMMGR_INIT    V_CM_MEMMGR_INIT(1U)
+
+#define A_TP_MIB_INDEX 0x450
+#define A_TP_MIB_DATA 0x454
+#define A_TP_SYNC_TIME_HI 0x458
+#define A_TP_SYNC_TIME_LO 0x45c
+#define A_TP_CM_MM_RX_FLST_BASE 0x460
+
+#define S_CM_MEMMGR_RX_FREE_LIST_BASE    0
+#define M_CM_MEMMGR_RX_FREE_LIST_BASE    0xfffffff
+#define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_TX_FLST_BASE 0x464
+
+#define S_CM_MEMMGR_TX_FREE_LIST_BASE    0
+#define M_CM_MEMMGR_TX_FREE_LIST_BASE    0xfffffff
+#define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_P_FLST_BASE 0x468
+
+#define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE    0
+#define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE    0xfffffff
+#define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+#define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_MAX_P 0x46c
+
+#define S_CM_MEMMGR_MAX_PSTRUCT    0
+#define M_CM_MEMMGR_MAX_PSTRUCT    0xfffffff
+#define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT)
+#define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT)
+
 #define A_TP_INT_ENABLE 0x470
+
+#define S_TX_FREE_LIST_EMPTY    0
+#define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY)
+#define F_TX_FREE_LIST_EMPTY    V_TX_FREE_LIST_EMPTY(1U)
+
+#define S_RX_FREE_LIST_EMPTY    1
+#define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY)
+#define F_RX_FREE_LIST_EMPTY    V_RX_FREE_LIST_EMPTY(1U)
+
 #define A_TP_INT_CAUSE 0x474
+#define A_TP_TIMER_SEPARATOR 0x4a4
+
+#define S_DISABLE_PAST_TIMER_INSERTION    0
+#define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION)
+#define F_DISABLE_PAST_TIMER_INSERTION    V_DISABLE_PAST_TIMER_INSERTION(1U)
+
+#define S_MODULATION_TIMER_SEPARATOR    1
+#define M_MODULATION_TIMER_SEPARATOR    0x7fff
+#define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR)
+#define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR)
+
+#define S_GLOBAL_TIMER_SEPARATOR    16
+#define M_GLOBAL_TIMER_SEPARATOR    0xffff
+#define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR)
+#define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR)
+
+#define A_TP_CM_FC_MODE 0x4b0
+#define A_TP_PC_CONGESTION_CNTL 0x4b4
 #define A_TP_TX_DROP_CONFIG 0x4b8
 
 #define S_ENABLE_TX_DROP    31
@@ -282,12 +1249,108 @@
 #define F_ENABLE_TX_ERROR    V_ENABLE_TX_ERROR(1U)
 
 #define S_DROP_TICKS_CNT    4
+#define M_DROP_TICKS_CNT    0x3ffffff
 #define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
+#define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT)
 
 #define S_NUM_PKTS_DROPPED    0
+#define M_NUM_PKTS_DROPPED    0xf
 #define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
+#define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED)
+
+#define A_TP_TX_DROP_COUNT 0x4bc
+
+/* RAT registers */
+#define A_RAT_ROUTE_CONTROL 0x580
+
+#define S_USE_ROUTE_TABLE    0
+#define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE)
+#define F_USE_ROUTE_TABLE    V_USE_ROUTE_TABLE(1U)
+
+#define S_ENABLE_CSPI    1
+#define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI)
+#define F_ENABLE_CSPI    V_ENABLE_CSPI(1U)
+
+#define S_ENABLE_PCIX    2
+#define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX)
+#define F_ENABLE_PCIX    V_ENABLE_PCIX(1U)
+
+#define A_RAT_ROUTE_TABLE_INDEX 0x584
+
+#define S_ROUTE_TABLE_INDEX    0
+#define M_ROUTE_TABLE_INDEX    0xf
+#define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX)
+#define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX)
+
+#define A_RAT_ROUTE_TABLE_DATA 0x588
+#define A_RAT_NO_ROUTE 0x58c
+
+#define S_CPL_OPCODE    0
+#define M_CPL_OPCODE    0xff
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE)
+
+#define A_RAT_INTR_ENABLE 0x590
+
+#define S_ZEROROUTEERROR    0
+#define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR)
+#define F_ZEROROUTEERROR    V_ZEROROUTEERROR(1U)
+
+#define S_CSPIFRAMINGERROR    1
+#define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR)
+#define F_CSPIFRAMINGERROR    V_CSPIFRAMINGERROR(1U)
+
+#define S_SGEFRAMINGERROR    2
+#define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR)
+#define F_SGEFRAMINGERROR    V_SGEFRAMINGERROR(1U)
+
+#define S_TPFRAMINGERROR    3
+#define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR)
+#define F_TPFRAMINGERROR    V_TPFRAMINGERROR(1U)
+
+#define A_RAT_INTR_CAUSE 0x594
 
 /* CSPI registers */
+#define A_CSPI_RX_AE_WM 0x810
+#define A_CSPI_RX_AF_WM 0x814
+#define A_CSPI_CALENDAR_LEN 0x818
+
+#define S_CALENDARLENGTH    0
+#define M_CALENDARLENGTH    0xffff
+#define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH)
+#define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH)
+
+#define A_CSPI_FIFO_STATUS_ENABLE 0x820
+
+#define S_FIFOSTATUSENABLE    0
+#define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE)
+#define F_FIFOSTATUSENABLE    V_FIFOSTATUSENABLE(1U)
+
+#define A_CSPI_MAXBURST1_MAXBURST2 0x828
+
+#define S_MAXBURST1    0
+#define M_MAXBURST1    0xffff
+#define V_MAXBURST1(x) ((x) << S_MAXBURST1)
+#define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1)
+
+#define S_MAXBURST2    16
+#define M_MAXBURST2    0xffff
+#define V_MAXBURST2(x) ((x) << S_MAXBURST2)
+#define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2)
+
+#define A_CSPI_TRAIN 0x82c
+
+#define S_CSPI_TRAIN_ALPHA    0
+#define M_CSPI_TRAIN_ALPHA    0xffff
+#define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA)
+#define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA)
+
+#define S_CSPI_TRAIN_DATA_MAXT    16
+#define M_CSPI_TRAIN_DATA_MAXT    0xffff
+#define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT)
+#define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT)
+
+#define A_CSPI_INTR_STATUS 0x848
 
 #define S_DIP4ERR    0
 #define V_DIP4ERR(x) ((x) << S_DIP4ERR)
@@ -309,22 +1372,63 @@
 #define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
 #define F_RAMPARITYERR    V_RAMPARITYERR(1U)
 
-/* ESPI registers */
+#define A_CSPI_INTR_ENABLE 0x84c
 
+/* ESPI registers */
 #define A_ESPI_SCH_TOKEN0 0x880
+
+#define S_SCHTOKEN0    0
+#define M_SCHTOKEN0    0xffff
+#define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0)
+#define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0)
+
 #define A_ESPI_SCH_TOKEN1 0x884
+
+#define S_SCHTOKEN1    0
+#define M_SCHTOKEN1    0xffff
+#define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1)
+#define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1)
+
 #define A_ESPI_SCH_TOKEN2 0x888
+
+#define S_SCHTOKEN2    0
+#define M_SCHTOKEN2    0xffff
+#define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2)
+#define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2)
+
 #define A_ESPI_SCH_TOKEN3 0x88c
+
+#define S_SCHTOKEN3    0
+#define M_SCHTOKEN3    0xffff
+#define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3)
+#define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3)
+
 #define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
+
+#define S_ALMOSTEMPTY    0
+#define M_ALMOSTEMPTY    0xffff
+#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY)
+#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY)
+
 #define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
+
+#define S_ALMOSTFULL    0
+#define M_ALMOSTFULL    0xffff
+#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL)
+#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL)
+
 #define A_ESPI_CALENDAR_LENGTH 0x898
 #define A_PORT_CONFIG 0x89c
 
 #define S_RX_NPORTS    0
+#define M_RX_NPORTS    0xff
 #define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
+#define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS)
 
 #define S_TX_NPORTS    8
+#define M_TX_NPORTS    0xff
 #define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
+#define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS)
 
 #define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
 
@@ -332,12 +1436,124 @@
 #define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
 #define F_RXSTATUSENABLE    V_RXSTATUSENABLE(1U)
 
+#define S_TXDROPENABLE    1
+#define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE)
+#define F_TXDROPENABLE    V_TXDROPENABLE(1U)
+
+#define S_RXENDIANMODE    2
+#define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE)
+#define F_RXENDIANMODE    V_RXENDIANMODE(1U)
+
+#define S_TXENDIANMODE    3
+#define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE)
+#define F_TXENDIANMODE    V_TXENDIANMODE(1U)
+
 #define S_INTEL1010MODE    4
 #define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
 #define F_INTEL1010MODE    V_INTEL1010MODE(1U)
 
 #define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
 #define A_ESPI_TRAIN 0x8ac
+
+#define S_MAXTRAINALPHA    0
+#define M_MAXTRAINALPHA    0xffff
+#define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA)
+#define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA)
+
+#define S_MAXTRAINDATA    16
+#define M_MAXTRAINDATA    0xffff
+#define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA)
+#define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA)
+
+#define A_RAM_STATUS 0x8b0
+
+#define S_RXFIFOPARITYERROR    0
+#define M_RXFIFOPARITYERROR    0x3ff
+#define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR)
+#define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR)
+
+#define S_TXFIFOPARITYERROR    10
+#define M_TXFIFOPARITYERROR    0x3ff
+#define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR)
+#define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR)
+
+#define S_RXFIFOOVERFLOW    20
+#define M_RXFIFOOVERFLOW    0x3ff
+#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW)
+#define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW)
+
+#define A_TX_DROP_COUNT0 0x8b4
+
+#define S_TXPORT0DROPCNT    0
+#define M_TXPORT0DROPCNT    0xffff
+#define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT)
+#define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT)
+
+#define S_TXPORT1DROPCNT    16
+#define M_TXPORT1DROPCNT    0xffff
+#define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT)
+#define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT)
+
+#define A_TX_DROP_COUNT1 0x8b8
+
+#define S_TXPORT2DROPCNT    0
+#define M_TXPORT2DROPCNT    0xffff
+#define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT)
+#define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT)
+
+#define S_TXPORT3DROPCNT    16
+#define M_TXPORT3DROPCNT    0xffff
+#define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT)
+#define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT)
+
+#define A_RX_DROP_COUNT0 0x8bc
+
+#define S_RXPORT0DROPCNT    0
+#define M_RXPORT0DROPCNT    0xffff
+#define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT)
+#define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT)
+
+#define S_RXPORT1DROPCNT    16
+#define M_RXPORT1DROPCNT    0xffff
+#define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT)
+#define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT)
+
+#define A_RX_DROP_COUNT1 0x8c0
+
+#define S_RXPORT2DROPCNT    0
+#define M_RXPORT2DROPCNT    0xffff
+#define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT)
+#define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT)
+
+#define S_RXPORT3DROPCNT    16
+#define M_RXPORT3DROPCNT    0xffff
+#define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT)
+#define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT)
+
+#define A_DIP4_ERROR_COUNT 0x8c4
+
+#define S_DIP4ERRORCNT    0
+#define M_DIP4ERRORCNT    0xfff
+#define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT)
+#define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT)
+
+#define S_DIP4ERRORCNTSHADOW    12
+#define M_DIP4ERRORCNTSHADOW    0xfff
+#define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW)
+#define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW)
+
+#define S_TRICN_RX_TRAIN_ERR    24
+#define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR)
+#define F_TRICN_RX_TRAIN_ERR    V_TRICN_RX_TRAIN_ERR(1U)
+
+#define S_TRICN_RX_TRAINING    25
+#define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING)
+#define F_TRICN_RX_TRAINING    V_TRICN_RX_TRAINING(1U)
+
+#define S_TRICN_RX_TRAIN_OK    26
+#define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK)
+#define F_TRICN_RX_TRAIN_OK    V_TRICN_RX_TRAIN_OK(1U)
+
 #define A_ESPI_INTR_STATUS 0x8c8
 
 #define S_DIP2PARITYERR    5
@@ -347,19 +1563,56 @@
 #define A_ESPI_INTR_ENABLE 0x8cc
 #define A_RX_DROP_THRESHOLD 0x8d0
 #define A_ESPI_RX_RESET 0x8ec
+
+#define S_ESPI_RX_LNK_RST    0
+#define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST)
+#define F_ESPI_RX_LNK_RST    V_ESPI_RX_LNK_RST(1U)
+
+#define S_ESPI_RX_CORE_RST    1
+#define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST)
+#define F_ESPI_RX_CORE_RST    V_ESPI_RX_CORE_RST(1U)
+
+#define S_RX_CLK_STATUS    2
+#define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS)
+#define F_RX_CLK_STATUS    V_RX_CLK_STATUS(1U)
+
 #define A_ESPI_MISC_CONTROL 0x8f0
 
 #define S_OUT_OF_SYNC_COUNT    0
+#define M_OUT_OF_SYNC_COUNT    0xf
 #define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
+#define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT)
+
+#define S_DIP2_COUNT_MODE_ENABLE    4
+#define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE)
+#define F_DIP2_COUNT_MODE_ENABLE    V_DIP2_COUNT_MODE_ENABLE(1U)
 
 #define S_DIP2_PARITY_ERR_THRES    5
+#define M_DIP2_PARITY_ERR_THRES    0xf
 #define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
+#define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES)
 
 #define S_DIP4_THRES    9
+#define M_DIP4_THRES    0xfff
 #define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
+#define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES)
+
+#define S_DIP4_THRES_ENABLE    21
+#define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE)
+#define F_DIP4_THRES_ENABLE    V_DIP4_THRES_ENABLE(1U)
+
+#define S_FORCE_DISABLE_STATUS    22
+#define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS)
+#define F_FORCE_DISABLE_STATUS    V_FORCE_DISABLE_STATUS(1U)
+
+#define S_DYNAMIC_DESKEW    23
+#define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW)
+#define F_DYNAMIC_DESKEW    V_DYNAMIC_DESKEW(1U)
 
 #define S_MONITORED_PORT_NUM    25
+#define M_MONITORED_PORT_NUM    0x3
 #define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
+#define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM)
 
 #define S_MONITORED_DIRECTION    27
 #define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
@@ -370,33 +1623,125 @@
 #define F_MONITORED_INTERFACE    V_MONITORED_INTERFACE(1U)
 
 #define A_ESPI_DIP2_ERR_COUNT 0x8f4
+
+#define S_DIP2_ERR_CNT    0
+#define M_DIP2_ERR_CNT    0xf
+#define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT)
+#define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT)
+
 #define A_ESPI_CMD_ADDR 0x8f8
 
 #define S_WRITE_DATA    0
+#define M_WRITE_DATA    0xff
 #define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
+#define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA)
 
 #define S_REGISTER_OFFSET    8
+#define M_REGISTER_OFFSET    0xf
 #define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
+#define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET)
 
 #define S_CHANNEL_ADDR    12
+#define M_CHANNEL_ADDR    0xf
 #define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
+#define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR)
 
 #define S_MODULE_ADDR    16
+#define M_MODULE_ADDR    0x3
 #define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
+#define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR)
 
 #define S_BUNDLE_ADDR    20
+#define M_BUNDLE_ADDR    0x3
 #define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
+#define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR)
 
 #define S_SPI4_COMMAND    24
+#define M_SPI4_COMMAND    0xff
 #define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
+#define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND)
 
 #define A_ESPI_GOSTAT 0x8fc
+
+#define S_READ_DATA    0
+#define M_READ_DATA    0xff
+#define V_READ_DATA(x) ((x) << S_READ_DATA)
+#define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA)
+
 #define S_ESPI_CMD_BUSY    8
 #define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
 #define F_ESPI_CMD_BUSY    V_ESPI_CMD_BUSY(1U)
 
-/* PL registers */
+#define S_ERROR_ACK    9
+#define V_ERROR_ACK(x) ((x) << S_ERROR_ACK)
+#define F_ERROR_ACK    V_ERROR_ACK(1U)
 
+#define S_UNMAPPED_ERR    10
+#define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR)
+#define F_UNMAPPED_ERR    V_UNMAPPED_ERR(1U)
+
+#define S_TRANSACTION_TIMER    16
+#define M_TRANSACTION_TIMER    0xff
+#define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER)
+#define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER)
+
+
+/* ULP registers */
+#define A_ULP_ULIMIT 0x980
+#define A_ULP_TAGMASK 0x984
+#define A_ULP_HREG_INDEX 0x988
+#define A_ULP_HREG_DATA 0x98c
+#define A_ULP_INT_ENABLE 0x990
+#define A_ULP_INT_CAUSE 0x994
+
+#define S_HREG_PAR_ERR    0
+#define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR)
+#define F_HREG_PAR_ERR    V_HREG_PAR_ERR(1U)
+
+#define S_EGRS_DATA_PAR_ERR    1
+#define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR)
+#define F_EGRS_DATA_PAR_ERR    V_EGRS_DATA_PAR_ERR(1U)
+
+#define S_INGRS_DATA_PAR_ERR    2
+#define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR)
+#define F_INGRS_DATA_PAR_ERR    V_INGRS_DATA_PAR_ERR(1U)
+
+#define S_PM_INTR    3
+#define V_PM_INTR(x) ((x) << S_PM_INTR)
+#define F_PM_INTR    V_PM_INTR(1U)
+
+#define S_PM_E2C_SYNC_ERR    4
+#define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR)
+#define F_PM_E2C_SYNC_ERR    V_PM_E2C_SYNC_ERR(1U)
+
+#define S_PM_C2E_SYNC_ERR    5
+#define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR)
+#define F_PM_C2E_SYNC_ERR    V_PM_C2E_SYNC_ERR(1U)
+
+#define S_PM_E2C_EMPTY_ERR    6
+#define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR)
+#define F_PM_E2C_EMPTY_ERR    V_PM_E2C_EMPTY_ERR(1U)
+
+#define S_PM_C2E_EMPTY_ERR    7
+#define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR)
+#define F_PM_C2E_EMPTY_ERR    V_PM_C2E_EMPTY_ERR(1U)
+
+#define S_PM_PAR_ERR    8
+#define M_PM_PAR_ERR    0xffff
+#define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR)
+#define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR)
+
+#define S_PM_E2C_WRT_FULL    24
+#define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL)
+#define F_PM_E2C_WRT_FULL    V_PM_E2C_WRT_FULL(1U)
+
+#define S_PM_C2E_WRT_FULL    25
+#define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL)
+#define F_PM_C2E_WRT_FULL    V_PM_C2E_WRT_FULL(1U)
+
+#define A_ULP_PIO_CTRL 0x998
+
+/* PL registers */
 #define A_PL_ENABLE 0xa00
 
 #define S_PL_INTR_SGE_ERR    0
@@ -407,14 +1752,38 @@
 #define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
 #define F_PL_INTR_SGE_DATA    V_PL_INTR_SGE_DATA(1U)
 
+#define S_PL_INTR_MC3    2
+#define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3)
+#define F_PL_INTR_MC3    V_PL_INTR_MC3(1U)
+
+#define S_PL_INTR_MC4    3
+#define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4)
+#define F_PL_INTR_MC4    V_PL_INTR_MC4(1U)
+
+#define S_PL_INTR_MC5    4
+#define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5)
+#define F_PL_INTR_MC5    V_PL_INTR_MC5(1U)
+
+#define S_PL_INTR_RAT    5
+#define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT)
+#define F_PL_INTR_RAT    V_PL_INTR_RAT(1U)
+
 #define S_PL_INTR_TP    6
 #define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
 #define F_PL_INTR_TP    V_PL_INTR_TP(1U)
 
+#define S_PL_INTR_ULP    7
+#define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP)
+#define F_PL_INTR_ULP    V_PL_INTR_ULP(1U)
+
 #define S_PL_INTR_ESPI    8
 #define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
 #define F_PL_INTR_ESPI    V_PL_INTR_ESPI(1U)
 
+#define S_PL_INTR_CSPI    9
+#define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI)
+#define F_PL_INTR_CSPI    V_PL_INTR_CSPI(1U)
+
 #define S_PL_INTR_PCIX    10
 #define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
 #define F_PL_INTR_PCIX    V_PL_INTR_PCIX(1U)
@@ -426,43 +1795,374 @@
 #define A_PL_CAUSE 0xa04
 
 /* MC5 registers */
-
 #define A_MC5_CONFIG 0xc04
 
+#define S_MODE    0
+#define V_MODE(x) ((x) << S_MODE)
+#define F_MODE    V_MODE(1U)
+
 #define S_TCAM_RESET    1
 #define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
 #define F_TCAM_RESET    V_TCAM_RESET(1U)
 
+#define S_TCAM_READY    2
+#define V_TCAM_READY(x) ((x) << S_TCAM_READY)
+#define F_TCAM_READY    V_TCAM_READY(1U)
+
+#define S_DBGI_ENABLE    4
+#define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE)
+#define F_DBGI_ENABLE    V_DBGI_ENABLE(1U)
+
 #define S_M_BUS_ENABLE    5
 #define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
 #define F_M_BUS_ENABLE    V_M_BUS_ENABLE(1U)
 
-/* PCICFG registers */
+#define S_PARITY_ENABLE    6
+#define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE)
+#define F_PARITY_ENABLE    V_PARITY_ENABLE(1U)
 
+#define S_SYN_ISSUE_MODE    7
+#define M_SYN_ISSUE_MODE    0x3
+#define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE)
+#define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE)
+
+#define S_BUILD    16
+#define V_BUILD(x) ((x) << S_BUILD)
+#define F_BUILD    V_BUILD(1U)
+
+#define S_COMPRESSION_ENABLE    17
+#define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE)
+#define F_COMPRESSION_ENABLE    V_COMPRESSION_ENABLE(1U)
+
+#define S_NUM_LIP    18
+#define M_NUM_LIP    0x3f
+#define V_NUM_LIP(x) ((x) << S_NUM_LIP)
+#define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP)
+
+#define S_TCAM_PART_CNT    24
+#define M_TCAM_PART_CNT    0x3
+#define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT)
+#define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT)
+
+#define S_TCAM_PART_TYPE    26
+#define M_TCAM_PART_TYPE    0x3
+#define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE)
+#define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE)
+
+#define S_TCAM_PART_SIZE    28
+#define M_TCAM_PART_SIZE    0x3
+#define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE)
+#define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE)
+
+#define S_TCAM_PART_TYPE_HI    30
+#define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI)
+#define F_TCAM_PART_TYPE_HI    V_TCAM_PART_TYPE_HI(1U)
+
+#define A_MC5_SIZE 0xc08
+
+#define S_SIZE    0
+#define M_SIZE    0x3fffff
+#define V_SIZE(x) ((x) << S_SIZE)
+#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE)
+
+#define A_MC5_ROUTING_TABLE_INDEX 0xc0c
+
+#define S_START_OF_ROUTING_TABLE    0
+#define M_START_OF_ROUTING_TABLE    0x3fffff
+#define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE)
+#define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE)
+
+#define A_MC5_SERVER_INDEX 0xc14
+
+#define S_START_OF_SERVER_INDEX    0
+#define M_START_OF_SERVER_INDEX    0x3fffff
+#define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX)
+#define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX)
+
+#define A_MC5_LIP_RAM_ADDR 0xc18
+
+#define S_LOCAL_IP_RAM_ADDR    0
+#define M_LOCAL_IP_RAM_ADDR    0x3f
+#define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR)
+#define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR)
+
+#define S_RAM_WRITE_ENABLE    8
+#define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE)
+#define F_RAM_WRITE_ENABLE    V_RAM_WRITE_ENABLE(1U)
+
+#define A_MC5_LIP_RAM_DATA 0xc1c
+#define A_MC5_RSP_LATENCY 0xc20
+
+#define S_SEARCH_RESPONSE_LATENCY    0
+#define M_SEARCH_RESPONSE_LATENCY    0x1f
+#define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY)
+#define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY)
+
+#define S_LEARN_RESPONSE_LATENCY    8
+#define M_LEARN_RESPONSE_LATENCY    0x1f
+#define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY)
+#define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY)
+
+#define A_MC5_PARITY_LATENCY 0xc24
+
+#define S_SRCHLAT    0
+#define M_SRCHLAT    0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT)
+
+#define S_PARLAT    8
+#define M_PARLAT    0x1f
+#define V_PARLAT(x) ((x) << S_PARLAT)
+#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT)
+
+#define A_MC5_WR_LRN_VERIFY 0xc28
+
+#define S_POVEREN    0
+#define V_POVEREN(x) ((x) << S_POVEREN)
+#define F_POVEREN    V_POVEREN(1U)
+
+#define S_LRNVEREN    1
+#define V_LRNVEREN(x) ((x) << S_LRNVEREN)
+#define F_LRNVEREN    V_LRNVEREN(1U)
+
+#define S_VWVEREN    2
+#define V_VWVEREN(x) ((x) << S_VWVEREN)
+#define F_VWVEREN    V_VWVEREN(1U)
+
+#define A_MC5_PART_ID_INDEX 0xc2c
+
+#define S_IDINDEX    0
+#define M_IDINDEX    0xf
+#define V_IDINDEX(x) ((x) << S_IDINDEX)
+#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX)
+
+#define A_MC5_RESET_MAX 0xc30
+
+#define S_RSTMAX    0
+#define M_RSTMAX    0x1ff
+#define V_RSTMAX(x) ((x) << S_RSTMAX)
+#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX)
+
+#define A_MC5_INT_ENABLE 0xc40
+
+#define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR    0
+#define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR    V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR    1
+#define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR    V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_RT_REGION_ERR    2
+#define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR)
+#define F_MC5_INT_HIT_IN_RT_REGION_ERR    V_MC5_INT_HIT_IN_RT_REGION_ERR(1U)
+
+#define S_MC5_INT_MISS_ERR    3
+#define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR)
+#define F_MC5_INT_MISS_ERR    V_MC5_INT_MISS_ERR(1U)
+
+#define S_MC5_INT_LIP0_ERR    4
+#define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR)
+#define F_MC5_INT_LIP0_ERR    V_MC5_INT_LIP0_ERR(1U)
+
+#define S_MC5_INT_LIP_MISS_ERR    5
+#define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR)
+#define F_MC5_INT_LIP_MISS_ERR    V_MC5_INT_LIP_MISS_ERR(1U)
+
+#define S_MC5_INT_PARITY_ERR    6
+#define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR)
+#define F_MC5_INT_PARITY_ERR    V_MC5_INT_PARITY_ERR(1U)
+
+#define S_MC5_INT_ACTIVE_REGION_FULL    7
+#define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL)
+#define F_MC5_INT_ACTIVE_REGION_FULL    V_MC5_INT_ACTIVE_REGION_FULL(1U)
+
+#define S_MC5_INT_NFA_SRCH_ERR    8
+#define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR)
+#define F_MC5_INT_NFA_SRCH_ERR    V_MC5_INT_NFA_SRCH_ERR(1U)
+
+#define S_MC5_INT_SYN_COOKIE    9
+#define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE)
+#define F_MC5_INT_SYN_COOKIE    V_MC5_INT_SYN_COOKIE(1U)
+
+#define S_MC5_INT_SYN_COOKIE_BAD    10
+#define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD)
+#define F_MC5_INT_SYN_COOKIE_BAD    V_MC5_INT_SYN_COOKIE_BAD(1U)
+
+#define S_MC5_INT_SYN_COOKIE_OFF    11
+#define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF)
+#define F_MC5_INT_SYN_COOKIE_OFF    V_MC5_INT_SYN_COOKIE_OFF(1U)
+
+#define S_MC5_INT_UNKNOWN_CMD    15
+#define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD)
+#define F_MC5_INT_UNKNOWN_CMD    V_MC5_INT_UNKNOWN_CMD(1U)
+
+#define S_MC5_INT_REQUESTQ_PARITY_ERR    16
+#define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR)
+#define F_MC5_INT_REQUESTQ_PARITY_ERR    V_MC5_INT_REQUESTQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DISPATCHQ_PARITY_ERR    17
+#define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR)
+#define F_MC5_INT_DISPATCHQ_PARITY_ERR    V_MC5_INT_DISPATCHQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DEL_ACT_EMPTY    18
+#define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY)
+#define F_MC5_INT_DEL_ACT_EMPTY    V_MC5_INT_DEL_ACT_EMPTY(1U)
+
+#define A_MC5_INT_CAUSE 0xc44
+#define A_MC5_INT_TID 0xc48
+#define A_MC5_INT_PTID 0xc4c
+#define A_MC5_DBGI_CONFIG 0xc74
+#define A_MC5_DBGI_REQ_CMD 0xc78
+
+#define S_CMDMODE    0
+#define M_CMDMODE    0x7
+#define V_CMDMODE(x) ((x) << S_CMDMODE)
+#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE)
+
+#define S_SADRSEL    4
+#define V_SADRSEL(x) ((x) << S_SADRSEL)
+#define F_SADRSEL    V_SADRSEL(1U)
+
+#define S_WRITE_BURST_SIZE    22
+#define M_WRITE_BURST_SIZE    0x3ff
+#define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE)
+#define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE)
+
+#define A_MC5_DBGI_REQ_ADDR0 0xc7c
+#define A_MC5_DBGI_REQ_ADDR1 0xc80
+#define A_MC5_DBGI_REQ_ADDR2 0xc84
+#define A_MC5_DBGI_REQ_DATA0 0xc88
+#define A_MC5_DBGI_REQ_DATA1 0xc8c
+#define A_MC5_DBGI_REQ_DATA2 0xc90
+#define A_MC5_DBGI_REQ_DATA3 0xc94
+#define A_MC5_DBGI_REQ_DATA4 0xc98
+#define A_MC5_DBGI_REQ_MASK0 0xc9c
+#define A_MC5_DBGI_REQ_MASK1 0xca0
+#define A_MC5_DBGI_REQ_MASK2 0xca4
+#define A_MC5_DBGI_REQ_MASK3 0xca8
+#define A_MC5_DBGI_REQ_MASK4 0xcac
+#define A_MC5_DBGI_RSP_STATUS 0xcb0
+
+#define S_DBGI_RSP_VALID    0
+#define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID)
+#define F_DBGI_RSP_VALID    V_DBGI_RSP_VALID(1U)
+
+#define S_DBGI_RSP_HIT    1
+#define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT)
+#define F_DBGI_RSP_HIT    V_DBGI_RSP_HIT(1U)
+
+#define S_DBGI_RSP_ERR    2
+#define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR)
+#define F_DBGI_RSP_ERR    V_DBGI_RSP_ERR(1U)
+
+#define S_DBGI_RSP_ERR_REASON    8
+#define M_DBGI_RSP_ERR_REASON    0x7
+#define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON)
+#define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON)
+
+#define A_MC5_DBGI_RSP_DATA0 0xcb4
+#define A_MC5_DBGI_RSP_DATA1 0xcb8
+#define A_MC5_DBGI_RSP_DATA2 0xcbc
+#define A_MC5_DBGI_RSP_DATA3 0xcc0
+#define A_MC5_DBGI_RSP_DATA4 0xcc4
+#define A_MC5_DBGI_RSP_LAST_CMD 0xcc8
+#define A_MC5_POPEN_DATA_WR_CMD 0xccc
+#define A_MC5_POPEN_MASK_WR_CMD 0xcd0
+#define A_MC5_AOPEN_SRCH_CMD 0xcd4
+#define A_MC5_AOPEN_LRN_CMD 0xcd8
+#define A_MC5_SYN_SRCH_CMD 0xcdc
+#define A_MC5_SYN_LRN_CMD 0xce0
+#define A_MC5_ACK_SRCH_CMD 0xce4
+#define A_MC5_ACK_LRN_CMD 0xce8
+#define A_MC5_ILOOKUP_CMD 0xcec
+#define A_MC5_ELOOKUP_CMD 0xcf0
+#define A_MC5_DATA_WRITE_CMD 0xcf4
+#define A_MC5_DATA_READ_CMD 0xcf8
+#define A_MC5_MASK_WRITE_CMD 0xcfc
+
+/* PCICFG registers */
 #define A_PCICFG_PM_CSR 0x44
 #define A_PCICFG_VPD_ADDR 0x4a
 
+#define S_VPD_ADDR    0
+#define M_VPD_ADDR    0x7fff
+#define V_VPD_ADDR(x) ((x) << S_VPD_ADDR)
+#define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR)
+
 #define S_VPD_OP_FLAG    15
 #define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
 #define F_VPD_OP_FLAG    V_VPD_OP_FLAG(1U)
 
 #define A_PCICFG_VPD_DATA 0x4c
-
+#define A_PCICFG_PCIX_CMD 0x60
 #define A_PCICFG_INTR_ENABLE 0xf4
-#define A_PCICFG_INTR_CAUSE 0xf8
 
+#define S_MASTER_PARITY_ERR    0
+#define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR)
+#define F_MASTER_PARITY_ERR    V_MASTER_PARITY_ERR(1U)
+
+#define S_SIG_TARGET_ABORT    1
+#define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT)
+#define F_SIG_TARGET_ABORT    V_SIG_TARGET_ABORT(1U)
+
+#define S_RCV_TARGET_ABORT    2
+#define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT)
+#define F_RCV_TARGET_ABORT    V_RCV_TARGET_ABORT(1U)
+
+#define S_RCV_MASTER_ABORT    3
+#define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT)
+#define F_RCV_MASTER_ABORT    V_RCV_MASTER_ABORT(1U)
+
+#define S_SIG_SYS_ERR    4
+#define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR)
+#define F_SIG_SYS_ERR    V_SIG_SYS_ERR(1U)
+
+#define S_DET_PARITY_ERR    5
+#define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR)
+#define F_DET_PARITY_ERR    V_DET_PARITY_ERR(1U)
+
+#define S_PIO_PARITY_ERR    6
+#define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR)
+#define F_PIO_PARITY_ERR    V_PIO_PARITY_ERR(1U)
+
+#define S_WF_PARITY_ERR    7
+#define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR)
+#define F_WF_PARITY_ERR    V_WF_PARITY_ERR(1U)
+
+#define S_RF_PARITY_ERR    8
+#define M_RF_PARITY_ERR    0x3
+#define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR)
+#define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR)
+
+#define S_CF_PARITY_ERR    10
+#define M_CF_PARITY_ERR    0x3
+#define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR)
+#define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR)
+
+#define A_PCICFG_INTR_CAUSE 0xf8
 #define A_PCICFG_MODE 0xfc
 
 #define S_PCI_MODE_64BIT    0
 #define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
 #define F_PCI_MODE_64BIT    V_PCI_MODE_64BIT(1U)
 
+#define S_PCI_MODE_66MHZ    1
+#define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ)
+#define F_PCI_MODE_66MHZ    V_PCI_MODE_66MHZ(1U)
+
+#define S_PCI_MODE_PCIX_INITPAT    2
+#define M_PCI_MODE_PCIX_INITPAT    0x7
+#define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT)
+#define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT)
+
 #define S_PCI_MODE_PCIX    5
 #define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
 #define F_PCI_MODE_PCIX    V_PCI_MODE_PCIX(1U)
 
 #define S_PCI_MODE_CLK    6
 #define M_PCI_MODE_CLK    0x3
+#define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK)
 #define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
 
 #endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 9799c12..0ca8d87 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -42,12 +42,14 @@
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
+#include <linux/ktime.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/skbuff.h>
 #include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/tcp.h>
 #include <linux/ip.h>
 #include <linux/in.h>
 #include <linux/if_arp.h>
@@ -57,10 +59,8 @@
 #include "regs.h"
 #include "espi.h"
 
-
-#ifdef NETIF_F_TSO
-#include <linux/tcp.h>
-#endif
+/* This belongs in if_ether.h */
+#define ETH_P_CPL5 0xf
 
 #define SGE_CMDQ_N		2
 #define SGE_FREELQ_N		2
@@ -73,6 +73,7 @@
 #define SGE_INTRTIMER_NRES	1000
 #define SGE_RX_COPY_THRES	256
 #define SGE_RX_SM_BUF_SIZE	1536
+#define SGE_TX_DESC_MAX_PLEN	16384
 
 # define SGE_RX_DROP_THRES 2
 
@@ -184,17 +185,17 @@
 	unsigned long   status;         /* HW DMA fetch status */
 	unsigned int    in_use;         /* # of in-use command descriptors */
 	unsigned int	size;	        /* # of descriptors */
-	unsigned int	processed;      /* total # of descs HW has processed */
-	unsigned int	cleaned;        /* total # of descs SW has reclaimed */
-	unsigned int	stop_thres;     /* SW TX queue suspend threshold */
+	unsigned int    processed;      /* total # of descs HW has processed */
+	unsigned int    cleaned;        /* total # of descs SW has reclaimed */
+	unsigned int    stop_thres;     /* SW TX queue suspend threshold */
 	u16		pidx;           /* producer index (SW) */
 	u16		cidx;           /* consumer index (HW) */
 	u8		genbit;         /* current generation (=valid) bit */
-	u8		sop;            /* is next entry start of packet? */
+	u8              sop;            /* is next entry start of packet? */
 	struct cmdQ_e  *entries;        /* HW command descriptor Q */
 	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
-	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
 	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
+ 	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
 };
 
 struct freelQ {
@@ -203,8 +204,8 @@
 	u16		pidx;           /* producer index (SW) */
 	u16		cidx;           /* consumer index (HW) */
 	u16		rx_buffer_size; /* Buffer size on this free list */
-	u16		dma_offset;     /* DMA offset to align IP headers */
-	u16		recycleq_idx;   /* skb recycle q to use */
+	u16             dma_offset;     /* DMA offset to align IP headers */
+	u16             recycleq_idx;   /* skb recycle q to use */
 	u8		genbit;	        /* current generation (=valid) bit */
 	struct freelQ_e	*entries;       /* HW freelist descriptor Q */
 	struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
@@ -226,6 +227,29 @@
 	CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
 };
 
+/* T204 TX SW scheduler */
+
+/* Per T204 TX port */
+struct sched_port {
+	unsigned int	avail;		/* available bits - quota */
+	unsigned int	drain_bits_per_1024ns; /* drain rate */
+	unsigned int	speed;		/* drain rate, mbps */
+	unsigned int	mtu;		/* mtu size */
+	struct sk_buff_head skbq;	/* pending skbs */
+};
+
+/* Per T204 device */
+struct sched {
+	ktime_t         last_updated;   /* last time quotas were computed */
+	unsigned int 	max_avail;	/* max bits to be sent to any port */
+	unsigned int 	port;		/* port index (round robin ports) */
+	unsigned int 	num;		/* num skbs in per port queues */
+	struct sched_port p[MAX_NPORTS];
+	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+};
+static void restart_sched(unsigned long);
+
+
 /*
  * Main SGE data structure
  *
@@ -243,18 +267,240 @@
 	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
 	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
 	unsigned int	intrtimer_nres;	/* no-resource interrupt timer */
-	unsigned int	fixed_intrtimer;/* non-adaptive interrupt timer */
+	unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
 	struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
 	struct timer_list espibug_timer;
-	unsigned int	espibug_timeout;
-	struct sk_buff	*espibug_skb;
+	unsigned long	espibug_timeout;
+	struct sk_buff	*espibug_skb[MAX_NPORTS];
 	u32		sge_control;	/* shadow value of sge control reg */
 	struct sge_intr_counts stats;
-	struct sge_port_stats port_stats[MAX_NPORTS];
+	struct sge_port_stats *port_stats[MAX_NPORTS];
+	struct sched	*tx_sched;
 	struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
 };
 
 /*
+ * stop tasklet and free all pending skb's
+ */
+static void tx_sched_stop(struct sge *sge)
+{
+	struct sched *s = sge->tx_sched;
+	int i;
+
+	tasklet_kill(&s->sched_tsk);
+
+	for (i = 0; i < MAX_NPORTS; i++)
+		__skb_queue_purge(&s->p[s->port].skbq);
+}
+
+/*
+ * t1_sched_update_parms() is called when the MTU or link speed changes. It
+ * re-computes scheduler parameters to scope with the change.
+ */
+unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
+				   unsigned int mtu, unsigned int speed)
+{
+	struct sched *s = sge->tx_sched;
+	struct sched_port *p = &s->p[port];
+	unsigned int max_avail_segs;
+
+	pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
+	if (speed)
+		p->speed = speed;
+	if (mtu)
+		p->mtu = mtu;
+
+	if (speed || mtu) {
+		unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
+		do_div(drain, (p->mtu + 50) * 1000);
+		p->drain_bits_per_1024ns = (unsigned int) drain;
+
+		if (p->speed < 1000)
+			p->drain_bits_per_1024ns =
+				90 * p->drain_bits_per_1024ns / 100;
+	}
+
+	if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
+		p->drain_bits_per_1024ns -= 16;
+		s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
+		max_avail_segs = max(1U, 4096 / (p->mtu - 40));
+	} else {
+		s->max_avail = 16384;
+		max_avail_segs = max(1U, 9000 / (p->mtu - 40));
+	}
+
+	pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
+		 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
+		 p->speed, s->max_avail, max_avail_segs,
+		 p->drain_bits_per_1024ns);
+
+	return max_avail_segs * (p->mtu - 40);
+}
+
+/*
+ * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
+ * data that can be pushed per port.
+ */
+void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
+{
+	struct sched *s = sge->tx_sched;
+	unsigned int i;
+
+	s->max_avail = val;
+	for (i = 0; i < MAX_NPORTS; i++)
+		t1_sched_update_parms(sge, i, 0, 0);
+}
+
+/*
+ * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
+ * is draining.
+ */
+void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
+					 unsigned int val)
+{
+	struct sched *s = sge->tx_sched;
+	struct sched_port *p = &s->p[port];
+	p->drain_bits_per_1024ns = val * 1024 / 1000;
+	t1_sched_update_parms(sge, port, 0, 0);
+}
+
+
+/*
+ * get_clock() implements a ns clock (see ktime_get)
+ */
+static inline ktime_t get_clock(void)
+{
+	struct timespec ts;
+
+	ktime_get_ts(&ts);
+	return timespec_to_ktime(ts);
+}
+
+/*
+ * tx_sched_init() allocates resources and does basic initialization.
+ */
+static int tx_sched_init(struct sge *sge)
+{
+	struct sched *s;
+	int i;
+
+	s = kzalloc(sizeof (struct sched), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	pr_debug("tx_sched_init\n");
+	tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+	sge->tx_sched = s;
+
+	for (i = 0; i < MAX_NPORTS; i++) {
+		skb_queue_head_init(&s->p[i].skbq);
+		t1_sched_update_parms(sge, i, 1500, 1000);
+	}
+
+	return 0;
+}
+
+/*
+ * sched_update_avail() computes the delta since the last time it was called
+ * and updates the per port quota (number of bits that can be sent to the any
+ * port).
+ */
+static inline int sched_update_avail(struct sge *sge)
+{
+	struct sched *s = sge->tx_sched;
+	ktime_t now = get_clock();
+	unsigned int i;
+	long long delta_time_ns;
+
+	delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
+
+	pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
+	if (delta_time_ns < 15000)
+		return 0;
+
+	for (i = 0; i < MAX_NPORTS; i++) {
+		struct sched_port *p = &s->p[i];
+		unsigned int delta_avail;
+
+		delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
+		p->avail = min(p->avail + delta_avail, s->max_avail);
+	}
+
+	s->last_updated = now;
+
+	return 1;
+}
+
+/*
+ * sched_skb() is called from two different places. In the tx path, any
+ * packet generating load on an output port will call sched_skb()
+ * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
+ * context (skb == NULL).
+ * The scheduler only returns a skb (which will then be sent) if the
+ * length of the skb is <= the current quota of the output port.
+ */
+static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
+				unsigned int credits)
+{
+	struct sched *s = sge->tx_sched;
+	struct sk_buff_head *skbq;
+	unsigned int i, len, update = 1;
+
+	pr_debug("sched_skb %p\n", skb);
+	if (!skb) {
+		if (!s->num)
+			return NULL;
+	} else {
+		skbq = &s->p[skb->dev->if_port].skbq;
+		__skb_queue_tail(skbq, skb);
+		s->num++;
+		skb = NULL;
+	}
+
+	if (credits < MAX_SKB_FRAGS + 1)
+		goto out;
+
+ again:
+	for (i = 0; i < MAX_NPORTS; i++) {
+		s->port = ++s->port & (MAX_NPORTS - 1);
+		skbq = &s->p[s->port].skbq;
+
+		skb = skb_peek(skbq);
+
+		if (!skb)
+			continue;
+
+		len = skb->len;
+		if (len <= s->p[s->port].avail) {
+			s->p[s->port].avail -= len;
+			s->num--;
+			__skb_unlink(skb, skbq);
+			goto out;
+		}
+		skb = NULL;
+	}
+
+	if (update-- && sched_update_avail(sge))
+		goto again;
+
+ out:
+ 	/* If there are more pending skbs, we use the hardware to schedule us
+	 * again.
+	 */
+	if (s->num && !skb) {
+		struct cmdQ *q = &sge->cmdQ[0];
+		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+		}
+	}
+	pr_debug("sched_skb ret %p\n", skb);
+
+	return skb;
+}
+
+/*
  * PIO to indicate that memory mapped Q contains valid descriptor(s).
  */
 static inline void doorbell_pio(struct adapter *adapter, u32 val)
@@ -335,10 +581,9 @@
 			goto err_no_mem;
 		memset(q->entries, 0, size);
 		size = sizeof(struct freelQ_ce) * q->size;
-		q->centries = kmalloc(size, GFP_KERNEL);
+		q->centries = kzalloc(size, GFP_KERNEL);
 		if (!q->centries)
 			goto err_no_mem;
-		memset(q->centries, 0, size);
 	}
 
 	/*
@@ -351,8 +596,11 @@
 	sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
 		sizeof(struct cpl_rx_data) +
 		sge->freelQ[!sge->jumbo_fl].dma_offset;
-	sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
-		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+		size = (16 * 1024) -
+		    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
 
 	/*
 	 * Setup which skb recycle Q should be used when recycling buffers from
@@ -389,17 +637,23 @@
 	q->in_use -= n;
 	ce = &q->centries[cidx];
 	while (n--) {
-		if (q->sop)
-			pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
-			 		 pci_unmap_len(ce, dma_len),
-					 PCI_DMA_TODEVICE);
-		else
-			pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
-			 	       pci_unmap_len(ce, dma_len),
-				       PCI_DMA_TODEVICE);
-		q->sop = 0;
+		if (q->sop) {
+			if (likely(pci_unmap_len(ce, dma_len))) {
+				pci_unmap_single(pdev,
+						 pci_unmap_addr(ce, dma_addr),
+			 			 pci_unmap_len(ce, dma_len),
+						 PCI_DMA_TODEVICE);
+				q->sop = 0;
+			}
+		} else {
+			if (likely(pci_unmap_len(ce, dma_len))) {
+				pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
+			 		       pci_unmap_len(ce, dma_len),
+					       PCI_DMA_TODEVICE);
+			}
+		}
 		if (ce->skb) {
-			dev_kfree_skb(ce->skb);
+			dev_kfree_skb_any(ce->skb);
 			q->sop = 1;
 		}
 		ce++;
@@ -463,10 +717,9 @@
 			goto err_no_mem;
 		memset(q->entries, 0, size);
 		size = sizeof(struct cmdQ_ce) * q->size;
-		q->centries = kmalloc(size, GFP_KERNEL);
+		q->centries = kzalloc(size, GFP_KERNEL);
 		if (!q->centries)
 			goto err_no_mem;
-		memset(q->centries, 0, size);
 	}
 
 	/*
@@ -506,7 +759,7 @@
 		sge->sge_control |= F_VLAN_XTRACT;
 	if (adapter->open_device_map) {
 		writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
-		readl(adapter->regs + A_SG_CONTROL); /* flush */
+		readl(adapter->regs + A_SG_CONTROL);   /* flush */
 	}
 }
 
@@ -540,7 +793,6 @@
 	sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
 		F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
 		V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
-		F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
 		V_RX_PKT_OFFSET(sge->rx_pkt_pad);
 
 #if defined(__BIG_ENDIAN_BITFIELD)
@@ -568,9 +820,12 @@
  */
 void t1_sge_destroy(struct sge *sge)
 {
-	if (sge->espibug_skb)
-		kfree_skb(sge->espibug_skb);
+	int i;
 
+	for_each_port(sge->adapter, i)
+		free_percpu(sge->port_stats[i]);
+
+	kfree(sge->tx_sched);
 	free_tx_resources(sge);
 	free_rx_resources(sge);
 	kfree(sge);
@@ -735,14 +990,28 @@
 	return 0;
 }
 
-const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
 {
 	return &sge->stats;
 }
 
-const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
+void t1_sge_get_port_stats(const struct sge *sge, int port,
+			   struct sge_port_stats *ss)
 {
-	return &sge->port_stats[port];
+	int cpu;
+
+	memset(ss, 0, sizeof(*ss));
+	for_each_possible_cpu(cpu) {
+		struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
+
+		ss->rx_packets += st->rx_packets;
+		ss->rx_cso_good += st->rx_cso_good;
+		ss->tx_packets += st->tx_packets;
+		ss->tx_cso += st->tx_cso;
+		ss->tx_tso += st->tx_tso;
+		ss->vlan_xtract += st->vlan_xtract;
+		ss->vlan_insert += st->vlan_insert;
+	}
 }
 
 /**
@@ -856,6 +1125,99 @@
 }
 
 /*
+ * T1/T2 SGE limits the maximum DMA size per TX descriptor to
+ * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
+ * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
+ * Note that the *_large_page_tx_descs stuff will be optimized out when
+ * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
+ *
+ * compute_large_page_descs() computes how many additional descriptors are
+ * required to break down the stack's request.
+ */
+static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
+{
+	unsigned int count = 0;
+	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+		unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+		unsigned int i, len = skb->len - skb->data_len;
+		while (len > SGE_TX_DESC_MAX_PLEN) {
+			count++;
+			len -= SGE_TX_DESC_MAX_PLEN;
+		}
+		for (i = 0; nfrags--; i++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			len = frag->size;
+			while (len > SGE_TX_DESC_MAX_PLEN) {
+				count++;
+				len -= SGE_TX_DESC_MAX_PLEN;
+			}
+		}
+	}
+	return count;
+}
+
+/*
+ * Write a cmdQ entry.
+ *
+ * Since this function writes the 'flags' field, it must not be used to
+ * write the first cmdQ entry.
+ */
+static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
+				 unsigned int len, unsigned int gen,
+				 unsigned int eop)
+{
+	if (unlikely(len > SGE_TX_DESC_MAX_PLEN))
+		BUG();
+	e->addr_lo = (u32)mapping;
+	e->addr_hi = (u64)mapping >> 32;
+	e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
+	e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
+}
+
+/*
+ * See comment for previous function.
+ *
+ * write_tx_descs_large_page() writes additional SGE tx descriptors if
+ * *desc_len exceeds HW's capability.
+ */
+static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
+						     struct cmdQ_e **e,
+						     struct cmdQ_ce **ce,
+						     unsigned int *gen,
+						     dma_addr_t *desc_mapping,
+						     unsigned int *desc_len,
+						     unsigned int nfrags,
+						     struct cmdQ *q)
+{
+	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+		struct cmdQ_e *e1 = *e;
+		struct cmdQ_ce *ce1 = *ce;
+
+		while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
+			*desc_len -= SGE_TX_DESC_MAX_PLEN;
+			write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
+				      *gen, nfrags == 0 && *desc_len == 0);
+			ce1->skb = NULL;
+			pci_unmap_len_set(ce1, dma_len, 0);
+			*desc_mapping += SGE_TX_DESC_MAX_PLEN;
+			if (*desc_len) {
+				ce1++;
+				e1++;
+				if (++pidx == q->size) {
+					pidx = 0;
+					*gen ^= 1;
+					ce1 = q->centries;
+					e1 = q->entries;
+				}
+			}
+		}
+		*e = e1;
+		*ce = ce1;
+	}
+	return pidx;
+}
+
+/*
  * Write the command descriptors to transmit the given skb starting at
  * descriptor pidx with the given generation.
  */
@@ -863,50 +1225,84 @@
 				  unsigned int pidx, unsigned int gen,
 				  struct cmdQ *q)
 {
-	dma_addr_t mapping;
+	dma_addr_t mapping, desc_mapping;
 	struct cmdQ_e *e, *e1;
 	struct cmdQ_ce *ce;
-	unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
+	unsigned int i, flags, first_desc_len, desc_len,
+	    nfrags = skb_shinfo(skb)->nr_frags;
+
+	e = e1 = &q->entries[pidx];
+	ce = &q->centries[pidx];
 
 	mapping = pci_map_single(adapter->pdev, skb->data,
-				 skb->len - skb->data_len, PCI_DMA_TODEVICE);
-	ce = &q->centries[pidx];
+				skb->len - skb->data_len, PCI_DMA_TODEVICE);
+
+	desc_mapping = mapping;
+	desc_len = skb->len - skb->data_len;
+
+	flags = F_CMD_DATAVALID | F_CMD_SOP |
+	    V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
+	    V_CMD_GEN2(gen);
+	first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
+	    desc_len : SGE_TX_DESC_MAX_PLEN;
+	e->addr_lo = (u32)desc_mapping;
+	e->addr_hi = (u64)desc_mapping >> 32;
+	e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
+	ce->skb = NULL;
+	pci_unmap_len_set(ce, dma_len, 0);
+
+	if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
+	    desc_len > SGE_TX_DESC_MAX_PLEN) {
+		desc_mapping += first_desc_len;
+		desc_len -= first_desc_len;
+		e1++;
+		ce++;
+		if (++pidx == q->size) {
+			pidx = 0;
+			gen ^= 1;
+			e1 = q->entries;
+			ce = q->centries;
+		}
+		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+						 &desc_mapping, &desc_len,
+						 nfrags, q);
+
+		if (likely(desc_len))
+			write_tx_desc(e1, desc_mapping, desc_len, gen,
+				      nfrags == 0);
+	}
+
 	ce->skb = NULL;
 	pci_unmap_addr_set(ce, dma_addr, mapping);
 	pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
 
-	flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
-		V_CMD_GEN2(gen);
-	e = &q->entries[pidx];
-	e->addr_lo = (u32)mapping;
-	e->addr_hi = (u64)mapping >> 32;
-	e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
-	for (e1 = e, i = 0; nfrags--; i++) {
+	for (i = 0; nfrags--; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-		ce++;
 		e1++;
+		ce++;
 		if (++pidx == q->size) {
 			pidx = 0;
 			gen ^= 1;
-			ce = q->centries;
 			e1 = q->entries;
+			ce = q->centries;
 		}
 
 		mapping = pci_map_page(adapter->pdev, frag->page,
 				       frag->page_offset, frag->size,
 				       PCI_DMA_TODEVICE);
+		desc_mapping = mapping;
+		desc_len = frag->size;
+
+		pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+						 &desc_mapping, &desc_len,
+						 nfrags, q);
+		if (likely(desc_len))
+			write_tx_desc(e1, desc_mapping, desc_len, gen,
+				      nfrags == 0);
 		ce->skb = NULL;
 		pci_unmap_addr_set(ce, dma_addr, mapping);
 		pci_unmap_len_set(ce, dma_len, frag->size);
-
-		e1->addr_lo = (u32)mapping;
-		e1->addr_hi = (u64)mapping >> 32;
-		e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
-		e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
-			    V_CMD_GEN2(gen);
 	}
-
 	ce->skb = skb;
 	wmb();
 	e->flags = flags;
@@ -920,26 +1316,56 @@
 	unsigned int reclaim = q->processed - q->cleaned;
 
 	if (reclaim) {
+		pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
+			 q->processed, q->cleaned);
 		free_cmdQ_buffers(sge, q, reclaim);
 		q->cleaned += reclaim;
 	}
 }
 
-#ifndef SET_ETHTOOL_OPS
-# define __netif_rx_complete(dev) netif_rx_complete(dev)
-#endif
-
 /*
- * We cannot use the standard netif_rx_schedule_prep() because we have multiple
- * ports plus the TOE all multiplexing onto a single response queue, therefore
- * accepting new responses cannot depend on the state of any particular port.
- * So define our own equivalent that omits the netif_running() test.
+ * Called from tasklet. Checks the scheduler for any
+ * pending skbs that can be sent.
  */
-static inline int napi_schedule_prep(struct net_device *dev)
+static void restart_sched(unsigned long arg)
 {
-	return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
-}
+	struct sge *sge = (struct sge *) arg;
+	struct adapter *adapter = sge->adapter;
+	struct cmdQ *q = &sge->cmdQ[0];
+	struct sk_buff *skb;
+	unsigned int credits, queued_skb = 0;
 
+	spin_lock(&q->lock);
+	reclaim_completed_tx(sge, q);
+
+	credits = q->size - q->in_use;
+	pr_debug("restart_sched credits=%d\n", credits);
+	while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
+		unsigned int genbit, pidx, count;
+	        count = 1 + skb_shinfo(skb)->nr_frags;
+       		count += compute_large_page_tx_descs(skb);
+		q->in_use += count;
+		genbit = q->genbit;
+		pidx = q->pidx;
+		q->pidx += count;
+		if (q->pidx >= q->size) {
+			q->pidx -= q->size;
+			q->genbit ^= 1;
+		}
+		write_tx_descs(adapter, skb, pidx, genbit, q);
+	        credits = q->size - q->in_use;
+		queued_skb = 1;
+	}
+
+	if (queued_skb) {
+		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+	}
+	spin_unlock(&q->lock);
+}
 
 /**
  *	sge_rx - process an ingress ethernet packet
@@ -954,31 +1380,39 @@
 	struct sk_buff *skb;
 	struct cpl_rx_pkt *p;
 	struct adapter *adapter = sge->adapter;
+	struct sge_port_stats *st;
 
-	sge->stats.ethernet_pkts++;
 	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
 			 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
 			 SGE_RX_DROP_THRES);
-	if (!skb) {
-		sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
+	if (unlikely(!skb)) {
+		sge->stats.rx_drops++;
 		return 0;
 	}
 
 	p = (struct cpl_rx_pkt *)skb->data;
 	skb_pull(skb, sizeof(*p));
+	if (p->iff >= adapter->params.nports) {
+		kfree_skb(skb);
+		return 0;
+	}
+
 	skb->dev = adapter->port[p->iff].dev;
 	skb->dev->last_rx = jiffies;
+	st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+	st->rx_packets++;
+
 	skb->protocol = eth_type_trans(skb, skb->dev);
 	if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
 	    skb->protocol == htons(ETH_P_IP) &&
 	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
-		sge->port_stats[p->iff].rx_cso_good++;
+		++st->rx_cso_good;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	} else
 		skb->ip_summed = CHECKSUM_NONE;
 
 	if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
-		sge->port_stats[p->iff].vlan_xtract++;
+		st->vlan_xtract++;
 		if (adapter->params.sge.polling)
 			vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
 						 ntohs(p->vlan));
@@ -1039,18 +1473,24 @@
 	struct cmdQ *cmdq = &sge->cmdQ[0];
 
 	cmdq->processed += pr0;
-
+	if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
+		freelQs_empty(sge);
+		flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
+	}
 	if (flags & F_CMDQ0_ENABLE) {
 		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
-	
+
 		if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
 		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
 			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
 			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
 		}
-	 	flags &= ~F_CMDQ0_ENABLE;
+		if (sge->tx_sched)
+			tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+
+		flags &= ~F_CMDQ0_ENABLE;
 	}
-	
+
 	if (unlikely(sge->stopped_tx_queues != 0))
 		restart_tx_queues(sge);
 
@@ -1241,20 +1681,21 @@
 		if (e->GenerationBit == q->genbit) {
 			if (e->DataValid ||
 			    process_pure_responses(adapter, e)) {
-				if (likely(napi_schedule_prep(sge->netdev)))
+				if (likely(__netif_rx_schedule_prep(sge->netdev)))
 					__netif_rx_schedule(sge->netdev);
-				else
-					printk(KERN_CRIT
+				else if (net_ratelimit())
+					printk(KERN_INFO
 					       "NAPI schedule failure!\n");
 			} else
-			writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+				writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+
 			handled = 1;
 			goto unlock;
 		} else
-		writel(q->cidx, adapter->regs + A_SG_SLEEPING);
-	}  else
-	if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
-		printk(KERN_ERR "data interrupt while NAPI running\n");
+			writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+	}  else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) {
+	        printk(KERN_ERR "data interrupt while NAPI running\n");
+	}
 	
 	handled = t1_slow_intr_handler(adapter);
 	if (!handled)
@@ -1335,34 +1776,59 @@
 {
 	struct sge *sge = adapter->sge;
 	struct cmdQ *q = &sge->cmdQ[qid];
-	unsigned int credits, pidx, genbit, count;
+	unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
 
-	spin_lock(&q->lock);
+	if (!spin_trylock(&q->lock))
+		return NETDEV_TX_LOCKED;
+
 	reclaim_completed_tx(sge, q);
 
 	pidx = q->pidx;
 	credits = q->size - q->in_use;
 	count = 1 + skb_shinfo(skb)->nr_frags;
+	count += compute_large_page_tx_descs(skb);
 
-	{	/* Ethernet packet */
-	 	if (unlikely(credits < count)) {
+	/* Ethernet packet */
+	if (unlikely(credits < count)) {
+		if (!netif_queue_stopped(dev)) {
 			netif_stop_queue(dev);
 			set_bit(dev->if_port, &sge->stopped_tx_queues);
 			sge->stats.cmdQ_full[2]++;
-			spin_unlock(&q->lock);
-			if (!netif_queue_stopped(dev))
-				CH_ERR("%s: Tx ring full while queue awake!\n",
-				       adapter->name);
-			return NETDEV_TX_BUSY;
+			CH_ERR("%s: Tx ring full while queue awake!\n",
+			       adapter->name);
 		}
-		if (unlikely(credits - count < q->stop_thres)) {
-			sge->stats.cmdQ_full[2]++;
-			netif_stop_queue(dev);
-			set_bit(dev->if_port, &sge->stopped_tx_queues);
-		}
+		spin_unlock(&q->lock);
+		return NETDEV_TX_BUSY;
 	}
+
+	if (unlikely(credits - count < q->stop_thres)) {
+		netif_stop_queue(dev);
+		set_bit(dev->if_port, &sge->stopped_tx_queues);
+		sge->stats.cmdQ_full[2]++;
+	}
+
+	/* T204 cmdQ0 skbs that are destined for a certain port have to go
+	 * through the scheduler.
+	 */
+	if (sge->tx_sched && !qid && skb->dev) {
+	use_sched:
+		use_sched_skb = 1;
+		/* Note that the scheduler might return a different skb than
+		 * the one passed in.
+		 */
+		skb = sched_skb(sge, skb, credits);
+		if (!skb) {
+			spin_unlock(&q->lock);
+			return NETDEV_TX_OK;
+		}
+		pidx = q->pidx;
+		count = 1 + skb_shinfo(skb)->nr_frags;
+		count += compute_large_page_tx_descs(skb);
+	}
+
 	q->in_use += count;
 	genbit = q->genbit;
+	pidx = q->pidx;
 	q->pidx += count;
 	if (q->pidx >= q->size) {
 		q->pidx -= q->size;
@@ -1388,6 +1854,14 @@
 			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
 		}
 	}
+
+	if (use_sched_skb) {
+		if (spin_trylock(&q->lock)) {
+			credits = q->size - q->in_use;
+			skb = NULL;
+			goto use_sched;
+		}
+	}
 	return NETDEV_TX_OK;
 }
 
@@ -1412,16 +1886,20 @@
 int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct adapter *adapter = dev->priv;
-	struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
 	struct sge *sge = adapter->sge;
+	struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
 	struct cpl_tx_pkt *cpl;
+	struct sk_buff *orig_skb = skb;
+	int ret;
 
-#ifdef NETIF_F_TSO
-	if (skb_is_gso(skb)) {
+	if (skb->protocol == htons(ETH_P_CPL5))
+		goto send;
+
+	if (skb_shinfo(skb)->gso_size) {
 		int eth_type;
 		struct cpl_tx_pkt_lso *hdr;
 
-		st->tso++;
+		++st->tx_tso;
 
 		eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
 			CPL_ETH_II : CPL_ETH_II_VLAN;
@@ -1432,13 +1910,10 @@
 		hdr->ip_hdr_words = skb->nh.iph->ihl;
 		hdr->tcp_hdr_words = skb->h.th->doff;
 		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
-						skb_shinfo(skb)->gso_size));
+							  skb_shinfo(skb)->gso_size));
 		hdr->len = htonl(skb->len - sizeof(*hdr));
 		cpl = (struct cpl_tx_pkt *)hdr;
-		sge->stats.tx_lso_pkts++;
-	} else
-#endif
-	{
+	} else {
 		/*
 	 	 * Packets shorter than ETH_HLEN can break the MAC, drop them
 		 * early.  Also, we may get oversized packets because some
@@ -1447,6 +1922,8 @@
 		 */
 		if (unlikely(skb->len < ETH_HLEN ||
 			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+			pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
+				 skb->len, eth_hdr_len(skb->data), dev->mtu);
 			dev_kfree_skb_any(skb);
 			return NETDEV_TX_OK;
 		}
@@ -1456,9 +1933,9 @@
 		 * components, such as pktgen, do not handle it right.
 		 * Complain when this happens but try to fix things up.
 		 */
-		if (unlikely(skb_headroom(skb) <
-			     dev->hard_header_len - ETH_HLEN)) {
-			struct sk_buff *orig_skb = skb;
+		if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+			pr_debug("%s: headroom %d header_len %d\n", dev->name,
+				 skb_headroom(skb), dev->hard_header_len);
 
 			if (net_ratelimit())
 				printk(KERN_ERR "%s: inadequate headroom in "
@@ -1471,19 +1948,21 @@
 
 		if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
 		    skb->ip_summed == CHECKSUM_PARTIAL &&
-		    skb->nh.iph->protocol == IPPROTO_UDP)
+		    skb->nh.iph->protocol == IPPROTO_UDP) {
 			if (unlikely(skb_checksum_help(skb))) {
+				pr_debug("%s: unable to do udp checksum\n", dev->name);
 				dev_kfree_skb_any(skb);
 				return NETDEV_TX_OK;
 			}
+		}
 
 		/* Hmmm, assuming to catch the gratious arp... and we'll use
 		 * it to flush out stuck espi packets...
-		  */
-		if (unlikely(!adapter->sge->espibug_skb)) {
+		 */
+		if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
 			if (skb->protocol == htons(ETH_P_ARP) &&
 			    skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
-				adapter->sge->espibug_skb = skb;
+				adapter->sge->espibug_skb[dev->if_port] = skb;
 				/* We want to re-use this skb later. We
 				 * simply bump the reference count and it
 				 * will not be freed...
@@ -1499,8 +1978,6 @@
 		/* the length field isn't used so don't bother setting it */
 
 		st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
-		sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_PARTIAL);
-		sge->stats.tx_reg_pkts++;
 	}
 	cpl->iff = dev->if_port;
 
@@ -1513,8 +1990,19 @@
 #endif
 		cpl->vlan_valid = 0;
 
+send:
+	st->tx_packets++;
 	dev->trans_start = jiffies;
-	return t1_sge_tx(skb, adapter, 0, dev);
+	ret = t1_sge_tx(skb, adapter, 0, dev);
+
+	/* If transmit busy, and we reallocated skb's due to headroom limit,
+	 * then silently discard to avoid leak.
+	 */
+	if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
+ 		dev_kfree_skb_any(skb);
+		ret = NETDEV_TX_OK;
+ 	}
+	return ret;
 }
 
 /*
@@ -1532,10 +2020,9 @@
 			continue;
 
 		reclaim_completed_tx(sge, q);
-		if (i == 0 && q->in_use)   /* flush pending credits */
-			writel(F_CMDQ0_ENABLE,
-				sge->adapter->regs + A_SG_DOORBELL);
-
+		if (i == 0 && q->in_use) {    /* flush pending credits */
+			writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+		}
 		spin_unlock(&q->lock);
 	}
 	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
@@ -1582,11 +2069,20 @@
  */
 void t1_sge_stop(struct sge *sge)
 {
+	int i;
 	writel(0, sge->adapter->regs + A_SG_CONTROL);
-	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
 	if (is_T2(sge->adapter))
 		del_timer_sync(&sge->espibug_timer);
+
 	del_timer_sync(&sge->tx_reclaim_timer);
+	if (sge->tx_sched)
+		tx_sched_stop(sge);
+
+	for (i = 0; i < MAX_NPORTS; i++)
+		if (sge->espibug_skb[i])
+			kfree_skb(sge->espibug_skb[i]);
 }
 
 /*
@@ -1599,44 +2095,82 @@
 
 	writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
 	doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
-	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+	readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
 
 	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
 
-	if (is_T2(sge->adapter)) 
+	if (is_T2(sge->adapter))
 		mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
 }
 
 /*
  * Callback for the T2 ESPI 'stuck packet feature' workaorund
  */
-static void espibug_workaround(void *data)
+static void espibug_workaround_t204(unsigned long data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *sge = adapter->sge;
+	unsigned int nports = adapter->params.nports;
+	u32 seop[MAX_NPORTS];
+
+	if (adapter->open_device_map & PORT_MASK) {
+		int i;
+		if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) {
+			return;
+		}
+		for (i = 0; i < nports; i++) {
+	        	struct sk_buff *skb = sge->espibug_skb[i];
+			if ( (netif_running(adapter->port[i].dev)) &&
+			     !(netif_queue_stopped(adapter->port[i].dev)) &&
+			     (seop[i] && ((seop[i] & 0xfff) == 0)) &&
+			     skb ) {
+	                	if (!skb->cb[0]) {
+	                        	u8 ch_mac_addr[ETH_ALEN] =
+	                            	{0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+	                        	memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+	                               	ch_mac_addr, ETH_ALEN);
+	                        	memcpy(skb->data + skb->len - 10,
+						ch_mac_addr, ETH_ALEN);
+	                        	skb->cb[0] = 0xff;
+	                	}
+
+	                	/* bump the reference count to avoid freeing of
+	                 	 * the skb once the DMA has completed.
+	                 	 */
+	                	skb = skb_get(skb);
+	                	t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
+			}
+		}
+	}
+	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+static void espibug_workaround(unsigned long data)
 {
 	struct adapter *adapter = (struct adapter *)data;
 	struct sge *sge = adapter->sge;
 
 	if (netif_running(adapter->port[0].dev)) {
-		struct sk_buff *skb = sge->espibug_skb;
+	        struct sk_buff *skb = sge->espibug_skb[0];
+	        u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
 
-		u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+	        if ((seop & 0xfff0fff) == 0xfff && skb) {
+	                if (!skb->cb[0]) {
+	                        u8 ch_mac_addr[ETH_ALEN] =
+	                            {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+	                        memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+	                               ch_mac_addr, ETH_ALEN);
+	                        memcpy(skb->data + skb->len - 10, ch_mac_addr,
+	                               ETH_ALEN);
+	                        skb->cb[0] = 0xff;
+	                }
 
-		if ((seop & 0xfff0fff) == 0xfff && skb) {
-			if (!skb->cb[0]) {
-				u8 ch_mac_addr[ETH_ALEN] =
-				    {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
-				memcpy(skb->data + sizeof(struct cpl_tx_pkt),
-				    ch_mac_addr, ETH_ALEN);
-				memcpy(skb->data + skb->len - 10, ch_mac_addr,
-				    ETH_ALEN);
-				skb->cb[0] = 0xff;
-			}
-
-			/* bump the reference count to avoid freeing of the
-			 * skb once the DMA has completed.
-			 */
-			skb = skb_get(skb);
-			t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
-		}
+	                /* bump the reference count to avoid freeing of the
+	                 * skb once the DMA has completed.
+	                 */
+	                skb = skb_get(skb);
+	                t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+	        }
 	}
 	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
 }
@@ -1647,26 +2181,42 @@
 struct sge * __devinit t1_sge_create(struct adapter *adapter,
 				     struct sge_params *p)
 {
-	struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
+	struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
+	int i;
 
 	if (!sge)
 		return NULL;
-	memset(sge, 0, sizeof(*sge));
 
 	sge->adapter = adapter;
 	sge->netdev = adapter->port[0].dev;
 	sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
 	sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 
+	for_each_port(adapter, i) {
+		sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
+		if (!sge->port_stats[i])
+			goto nomem_port;
+	}
+
 	init_timer(&sge->tx_reclaim_timer);
 	sge->tx_reclaim_timer.data = (unsigned long)sge;
 	sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
 
 	if (is_T2(sge->adapter)) {
 		init_timer(&sge->espibug_timer);
-		sge->espibug_timer.function = (void *)&espibug_workaround;
+
+		if (adapter->params.nports > 1) {
+			tx_sched_init(sge);
+			sge->espibug_timer.function = espibug_workaround_t204;
+		} else {
+			sge->espibug_timer.function = espibug_workaround;
+		}
 		sge->espibug_timer.data = (unsigned long)sge->adapter;
+
 		sge->espibug_timeout = 1;
+		/* for T204, every 10ms */
+		if (adapter->params.nports > 1)
+			sge->espibug_timeout = HZ/100;
 	}
 	 
 
@@ -1674,10 +2224,25 @@
 	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
 	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
 	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
-	p->rx_coalesce_usecs =  50;
+	if (sge->tx_sched) {
+		if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
+			p->rx_coalesce_usecs = 15;
+		else
+			p->rx_coalesce_usecs = 50;
+	} else
+		p->rx_coalesce_usecs = 50;
+
 	p->coalesce_enable = 0;
 	p->sample_interval_usecs = 0;
 	p->polling = 0;
 
 	return sge;
+nomem_port:
+	while (i >= 0) {
+		free_percpu(sge->port_stats[i]);
+		--i;
+	}
+	kfree(sge);
+	return NULL;
+
 }
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index 91af47b..7ceb011 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
@@ -44,6 +44,9 @@
 #include <asm/byteorder.h>
 
 struct sge_intr_counts {
+	unsigned int rx_drops;        /* # of packets dropped due to no mem */
+	unsigned int pure_rsps;        /* # of non-payload responses */
+	unsigned int unhandled_irqs;   /* # of unhandled interrupts */
 	unsigned int respQ_empty;      /* # times respQ empty */
 	unsigned int respQ_overflow;   /* # respQ overflow (fatal) */
 	unsigned int freelistQ_empty;  /* # times freelist empty */
@@ -51,24 +54,16 @@
 	unsigned int pkt_mismatch;
 	unsigned int cmdQ_full[3];     /* not HW IRQ, host cmdQ[] full */
 	unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
-	unsigned int ethernet_pkts;    /* # of Ethernet packets received */
-	unsigned int offload_pkts;     /* # of offload packets received */
-	unsigned int offload_bundles;  /* # of offload pkt bundles delivered */
-	unsigned int pure_rsps;        /* # of non-payload responses */
-	unsigned int unhandled_irqs;   /* # of unhandled interrupts */
-	unsigned int tx_ipfrags;
-	unsigned int tx_reg_pkts;
-	unsigned int tx_lso_pkts;
-	unsigned int tx_do_cksum;
 };
 
 struct sge_port_stats {
-	unsigned long rx_cso_good;     /* # of successful RX csum offloads */
-	unsigned long tx_cso;          /* # of TX checksum offloads */
-	unsigned long vlan_xtract;     /* # of VLAN tag extractions */
-	unsigned long vlan_insert;     /* # of VLAN tag extractions */
-	unsigned long tso;             /* # of TSO requests */
-	unsigned long rx_drops;        /* # of packets dropped due to no mem */
+	u64 rx_packets;      /* # of Ethernet packets received */
+	u64 rx_cso_good;     /* # of successful RX csum offloads */
+	u64 tx_packets;      /* # of TX packets */
+	u64 tx_cso;          /* # of TX checksum offloads */
+	u64 tx_tso;          /* # of TSO requests */
+	u64 vlan_xtract;     /* # of VLAN tag extractions */
+	u64 vlan_insert;     /* # of VLAN tag insertions */
 };
 
 struct sk_buff;
@@ -90,7 +85,11 @@
 void t1_sge_intr_enable(struct sge *);
 void t1_sge_intr_disable(struct sge *);
 void t1_sge_intr_clear(struct sge *);
-const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
-const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge);
+void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *);
+void t1_sched_set_max_avail_bytes(struct sge *, unsigned int);
+void t1_sched_set_drain_bits_per_us(struct sge *, unsigned int, unsigned int);
+unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int,
+			   unsigned int);
 
 #endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 12e4e96..22ed9a3 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -43,6 +43,7 @@
 #include "gmac.h"
 #include "cphy.h"
 #include "sge.h"
+#include "tp.h"
 #include "espi.h"
 
 /**
@@ -59,7 +60,7 @@
  *	otherwise.
  */
 static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
-		    int attempts, int delay)
+			   int attempts, int delay)
 {
 	while (1) {
 		u32 val = readl(adapter->regs + reg) & mask;
@@ -78,7 +79,7 @@
 /*
  * Write a register over the TPI interface (unlocked and locked versions).
  */
-static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
 {
 	int tpi_busy;
 
@@ -98,16 +99,16 @@
 {
 	int ret;
 
-	spin_lock(&(adapter)->tpi_lock);
+	spin_lock(&adapter->tpi_lock);
 	ret = __t1_tpi_write(adapter, addr, value);
-	spin_unlock(&(adapter)->tpi_lock);
+	spin_unlock(&adapter->tpi_lock);
 	return ret;
 }
 
 /*
  * Read a register over the TPI interface (unlocked and locked versions).
  */
-static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
 {
 	int tpi_busy;
 
@@ -128,18 +129,26 @@
 {
 	int ret;
 
-	spin_lock(&(adapter)->tpi_lock);
+	spin_lock(&adapter->tpi_lock);
 	ret = __t1_tpi_read(adapter, addr, valp);
-	spin_unlock(&(adapter)->tpi_lock);
+	spin_unlock(&adapter->tpi_lock);
 	return ret;
 }
 
 /*
+ * Set a TPI parameter.
+ */
+static void t1_tpi_par(adapter_t *adapter, u32 value)
+{
+	writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR);
+}
+
+/*
  * Called when a port's link settings change to propagate the new values to the
  * associated PHY and MAC.  After performing the common tasks it invokes an
  * OS-specific handler.
  */
-/* static */ void link_changed(adapter_t *adapter, int port_id)
+void t1_link_changed(adapter_t *adapter, int port_id)
 {
 	int link_ok, speed, duplex, fc;
 	struct cphy *phy = adapter->port[port_id].phy;
@@ -159,23 +168,83 @@
 		mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
 		lc->fc = (unsigned char)fc;
 	}
-	t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+	t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
 }
 
 static int t1_pci_intr_handler(adapter_t *adapter)
 {
 	u32 pcix_cause;
 
-    	pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
+	pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
 
 	if (pcix_cause) {
 		pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
-					 pcix_cause);
+				       pcix_cause);
 		t1_fatal_err(adapter);    /* PCI errors are fatal */
 	}
 	return 0;
 }
 
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+#include "cspi.h"
+#endif
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+
+/*
+ * PHY interrupt handler for FPGA boards.
+ */
+static int fpga_phy_intr_handler(adapter_t *adapter)
+{
+	int p;
+	u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+
+	for_each_port(adapter, p)
+		if (cause & (1 << p)) {
+			struct cphy *phy = adapter->port[p].phy;
+			int phy_cause = phy->ops->interrupt_handler(phy);
+
+			if (phy_cause & cphy_cause_link_change)
+				t1_link_changed(adapter, p);
+		}
+	writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+	return 0;
+}
+
+/*
+ * Slow path interrupt handler for FPGAs.
+ */
+static int fpga_slow_intr(adapter_t *adapter)
+{
+	u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+	cause &= ~F_PL_INTR_SGE_DATA;
+	if (cause & F_PL_INTR_SGE_ERR)
+		t1_sge_intr_error_handler(adapter->sge);
+
+	if (cause & FPGA_PCIX_INTERRUPT_GMAC)
+                fpga_phy_intr_handler(adapter);
+
+	if (cause & FPGA_PCIX_INTERRUPT_TP) {
+                /*
+		 * FPGA doesn't support MC4 interrupts and it requires
+		 * this odd layer of indirection for MC5.
+                 */
+		u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+
+		/* Clear TP interrupt */
+		writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+	}
+	if (cause & FPGA_PCIX_INTERRUPT_PCIX)
+		t1_pci_intr_handler(adapter);
+
+	/* Clear the interrupts just processed. */
+	if (cause)
+		writel(cause, adapter->regs + A_PL_CAUSE);
+
+	return cause != 0;
+}
+#endif
 
 /*
  * Wait until Elmer's MI1 interface is ready for new operations.
@@ -212,12 +281,62 @@
 	t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
 }
 
+#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+/*
+ * Elmer MI1 MDIO read/write operations.
+ */
+static int mi1_mdio_read(adapter_t *adapter, int phy_addr, int mmd_addr,
+			 int reg_addr, unsigned int *valp)
+{
+	u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	if (mmd_addr)
+		return -EINVAL;
+
+	spin_lock(&adapter->tpi_lock);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter,
+			A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+	__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
+	spin_unlock(&adapter->tpi_lock);
+	return 0;
+}
+
+static int mi1_mdio_write(adapter_t *adapter, int phy_addr, int mmd_addr,
+			  int reg_addr, unsigned int val)
+{
+	u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	if (mmd_addr)
+		return -EINVAL;
+
+	spin_lock(&adapter->tpi_lock);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+	__t1_tpi_write(adapter,
+			A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+	spin_unlock(&adapter->tpi_lock);
+	return 0;
+}
+
+#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+static struct mdio_ops mi1_mdio_ops = {
+	mi1_mdio_init,
+	mi1_mdio_read,
+	mi1_mdio_write
+};
+#endif
+
+#endif
+
 static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
 			     int reg_addr, unsigned int *valp)
 {
 	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
 
-	spin_lock(&(adapter)->tpi_lock);
+	spin_lock(&adapter->tpi_lock);
 
 	/* Write the address we want. */
 	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@@ -227,12 +346,13 @@
 	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
 
 	/* Write the operation we want. */
-	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
+	__t1_tpi_write(adapter,
+			A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
 	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
 
 	/* Read the data. */
 	__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
-	spin_unlock(&(adapter)->tpi_lock);
+	spin_unlock(&adapter->tpi_lock);
 	return 0;
 }
 
@@ -241,7 +361,7 @@
 {
 	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
 
-	spin_lock(&(adapter)->tpi_lock);
+	spin_lock(&adapter->tpi_lock);
 
 	/* Write the address we want. */
 	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
@@ -254,7 +374,7 @@
 	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
 	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
 	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
-	spin_unlock(&(adapter)->tpi_lock);
+	spin_unlock(&adapter->tpi_lock);
 	return 0;
 }
 
@@ -265,12 +385,25 @@
 };
 
 enum {
+	CH_BRD_T110_1CU,
 	CH_BRD_N110_1F,
 	CH_BRD_N210_1F,
+	CH_BRD_T210_1F,
+	CH_BRD_T210_1CU,
+	CH_BRD_N204_4CU,
 };
 
 static struct board_info t1_board[] = {
 
+{ CHBT_BOARD_CHT110, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T1,
+  CHBT_MAC_PM3393, CHBT_PHY_MY3126,
+  125000000/*clk-core*/, 150000000/*clk-mc3*/, 125000000/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/,
+  1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_my3126_ops, &mi1_mdio_ext_ops,
+  "Chelsio T110 1x10GBase-CX4 TOE" },
+
 { CHBT_BOARD_N110, 1/*ports#*/,
   SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
   CHBT_MAC_PM3393, CHBT_PHY_88X2010,
@@ -289,12 +422,47 @@
   &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
   "Chelsio N210 1x10GBaseX NIC" },
 
+{ CHBT_BOARD_CHT210, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2,
+  CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+  125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+  "Chelsio T210 1x10GBaseX TOE" },
+
+{ CHBT_BOARD_CHT210, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full /*caps*/, CHBT_TERM_T2,
+  CHBT_MAC_PM3393, CHBT_PHY_MY3126,
+  125000000/*clk-core*/, 133000000/*clk-mc3*/, 125000000/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 1/*mdien*/,
+  1/*mdiinv*/, 1/*mdc*/, 1/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_my3126_ops, &mi1_mdio_ext_ops,
+  "Chelsio T210 1x10GBase-CX4 TOE" },
+
+#ifdef CONFIG_CHELSIO_T1_1G
+{ CHBT_BOARD_CHN204, 4/*ports#*/,
+  SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
+  SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+  SUPPORTED_PAUSE | SUPPORTED_TP /*caps*/, CHBT_TERM_T2, CHBT_MAC_VSC7321, CHBT_PHY_88E1111,
+  100000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+  4/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 4/*phybaseaddr*/, &t1_vsc7326_ops,
+  &t1_mv88e1xxx_ops, &mi1_mdio_ops,
+  "Chelsio N204 4x100/1000BaseT NIC" },
+#endif
+
 };
 
 struct pci_device_id t1_pci_tbl[] = {
+	CH_DEVICE(8, 0, CH_BRD_T110_1CU),
+	CH_DEVICE(8, 1, CH_BRD_T110_1CU),
 	CH_DEVICE(7, 0, CH_BRD_N110_1F),
 	CH_DEVICE(10, 1, CH_BRD_N210_1F),
-	{ 0, }
+	CH_DEVICE(11, 1, CH_BRD_T210_1F),
+	CH_DEVICE(14, 1, CH_BRD_T210_1CU),
+	CH_DEVICE(16, 1, CH_BRD_N204_4CU),
+	{ 0 }
 };
 
 MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
@@ -390,9 +558,14 @@
 	if (lc->supported & SUPPORTED_Autoneg) {
 		lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
 		if (fc) {
-			lc->advertising |= ADVERTISED_ASYM_PAUSE;
-			if (fc == (PAUSE_RX | PAUSE_TX))
+			if (fc == ((PAUSE_RX | PAUSE_TX) &
+				   (mac->adapter->params.nports < 2)))
 				lc->advertising |= ADVERTISED_PAUSE;
+			else {
+				lc->advertising |= ADVERTISED_ASYM_PAUSE;
+				if (fc == PAUSE_RX)
+					lc->advertising |= ADVERTISED_PAUSE;
+			}
 		}
 		phy->ops->advertise(phy, lc->advertising);
 
@@ -403,11 +576,15 @@
 			mac->ops->set_speed_duplex_fc(mac, lc->speed,
 						      lc->duplex, fc);
 			/* Also disables autoneg */
+			phy->state = PHY_AUTONEG_RDY;
 			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
 			phy->ops->reset(phy, 0);
-		} else
+		} else {
+			phy->state = PHY_AUTONEG_EN;
 			phy->ops->autoneg_enable(phy); /* also resets PHY */
+		}
 	} else {
+		phy->state = PHY_AUTONEG_RDY;
 		mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
 		lc->fc = (unsigned char)fc;
 		phy->ops->reset(phy, 0);
@@ -418,24 +595,109 @@
 /*
  * External interrupt handler for boards using elmer0.
  */
-int elmer0_ext_intr_handler(adapter_t *adapter)
+int t1_elmer0_ext_intr_handler(adapter_t *adapter)
 {
-    	struct cphy *phy;
+	struct cphy *phy;
 	int phy_cause;
-    	u32 cause;
+	u32 cause;
 
 	t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
 
 	switch (board_info(adapter)->board) {
+#ifdef CONFIG_CHELSIO_T1_1G
+        case CHBT_BOARD_CHT204:
+        case CHBT_BOARD_CHT204E:
+        case CHBT_BOARD_CHN204:
+        case CHBT_BOARD_CHT204V: {
+                int i, port_bit;
+		for_each_port(adapter, i) {
+			port_bit = i + 1;
+			if (!(cause & (1 << port_bit))) continue;
+
+	                phy = adapter->port[i].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+				t1_link_changed(adapter, i);
+		}
+                break;
+        }
+	case CHBT_BOARD_CHT101:
+		if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
+			phy = adapter->port[0].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+				t1_link_changed(adapter, 0);
+		}
+		break;
+	case CHBT_BOARD_7500: {
+		int p;
+    		/*
+		 * Elmer0's interrupt cause isn't useful here because there is
+		 * only one bit that can be set for all 4 ports.  This means
+		 * we are forced to check every PHY's interrupt status
+		 * register to see who initiated the interrupt.
+     		 */
+    		for_each_port(adapter, p) {
+			phy = adapter->port[p].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+			    t1_link_changed(adapter, p);
+		}
+		break;
+	}
+#endif
+	case CHBT_BOARD_CHT210:
 	case CHBT_BOARD_N210:
 	case CHBT_BOARD_N110:
 		if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
 			phy = adapter->port[0].phy;
 			phy_cause = phy->ops->interrupt_handler(phy);
 			if (phy_cause & cphy_cause_link_change)
-				link_changed(adapter, 0);
+				t1_link_changed(adapter, 0);
 		}
 		break;
+	case CHBT_BOARD_8000:
+	case CHBT_BOARD_CHT110:
+    		CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
+		       cause);
+		if (cause & ELMER0_GP_BIT1) {        /* PMC3393 INTB */
+			struct cmac *mac = adapter->port[0].mac;
+
+			mac->ops->interrupt_handler(mac);
+		}
+		if (cause & ELMER0_GP_BIT5) {        /* XPAK MOD_DETECT */
+			u32 mod_detect;
+
+			t1_tpi_read(adapter,
+					A_ELMER0_GPI_STAT, &mod_detect);
+	    		CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
+			       mod_detect ? "removed" : "inserted");
+    		}
+		break;
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+	case CHBT_BOARD_COUGAR:
+		if (adapter->params.nports == 1) {
+			if (cause & ELMER0_GP_BIT1) {         /* Vitesse MAC */
+				struct cmac *mac = adapter->port[0].mac;
+				mac->ops->interrupt_handler(mac);
+			}
+			if (cause & ELMER0_GP_BIT5) {     /* XPAK MOD_DETECT */
+			}
+		} else {
+			int i, port_bit;
+
+			for_each_port(adapter, i) {
+				port_bit = i ? i + 1 : 0;
+				if (!(cause & (1 << port_bit))) continue;
+
+				phy = adapter->port[i].phy;
+				phy_cause = phy->ops->interrupt_handler(phy);
+				if (phy_cause & cphy_cause_link_change)
+					t1_link_changed(adapter, i);
+			}
+		}
+		break;
+#endif
 	}
 	t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
 	return 0;
@@ -445,11 +707,11 @@
 void t1_interrupts_enable(adapter_t *adapter)
 {
 	unsigned int i;
-	u32 pl_intr;
 
-	adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
+	adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP;
 
 	t1_sge_intr_enable(adapter->sge);
+	t1_tp_intr_enable(adapter->tp);
 	if (adapter->espi) {
 		adapter->slow_intr_mask |= F_PL_INTR_ESPI;
 		t1_espi_intr_enable(adapter->espi);
@@ -462,15 +724,17 @@
 	}
 
 	/* Enable PCIX & external chip interrupts on ASIC boards. */
-	pl_intr = readl(adapter->regs + A_PL_ENABLE);
+	if (t1_is_asic(adapter)) {
+		u32 pl_intr = readl(adapter->regs + A_PL_ENABLE);
 
-	/* PCI-X interrupts */
-	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
-			       0xffffffff);
+		/* PCI-X interrupts */
+		pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
+				       0xffffffff);
 
-	adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
-	pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
-	writel(pl_intr, adapter->regs + A_PL_ENABLE);
+		adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+		pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+		writel(pl_intr, adapter->regs + A_PL_ENABLE);
+	}
 }
 
 /* Disables all interrupts. */
@@ -479,6 +743,7 @@
 	unsigned int i;
 
 	t1_sge_intr_disable(adapter->sge);
+	t1_tp_intr_disable(adapter->tp);
 	if (adapter->espi)
 		t1_espi_intr_disable(adapter->espi);
 
@@ -489,7 +754,8 @@
 	}
 
 	/* Disable PCIX & external chip interrupts. */
-	writel(0, adapter->regs + A_PL_ENABLE);
+	if (t1_is_asic(adapter))
+	    	writel(0, adapter->regs + A_PL_ENABLE);
 
 	/* PCI-X interrupts */
 	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
@@ -501,10 +767,9 @@
 void t1_interrupts_clear(adapter_t* adapter)
 {
 	unsigned int i;
-	u32 pl_intr;
-
 
 	t1_sge_intr_clear(adapter->sge);
+	t1_tp_intr_clear(adapter->tp);
 	if (adapter->espi)
 		t1_espi_intr_clear(adapter->espi);
 
@@ -515,10 +780,12 @@
 	}
 
 	/* Enable interrupts for external devices. */
-    	pl_intr = readl(adapter->regs + A_PL_CAUSE);
+	if (t1_is_asic(adapter)) {
+		u32 pl_intr = readl(adapter->regs + A_PL_CAUSE);
 
-	writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
-	       adapter->regs + A_PL_CAUSE);
+		writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
+		       adapter->regs + A_PL_CAUSE);
+	}
 
 	/* PCI-X interrupts */
 	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
@@ -527,7 +794,7 @@
 /*
  * Slow path interrupt handler for ASICs.
  */
-int t1_slow_intr_handler(adapter_t *adapter)
+static int asic_slow_intr(adapter_t *adapter)
 {
 	u32 cause = readl(adapter->regs + A_PL_CAUSE);
 
@@ -536,89 +803,54 @@
 		return 0;
 	if (cause & F_PL_INTR_SGE_ERR)
 		t1_sge_intr_error_handler(adapter->sge);
+	if (cause & F_PL_INTR_TP)
+		t1_tp_intr_handler(adapter->tp);
 	if (cause & F_PL_INTR_ESPI)
 		t1_espi_intr_handler(adapter->espi);
 	if (cause & F_PL_INTR_PCIX)
 		t1_pci_intr_handler(adapter);
 	if (cause & F_PL_INTR_EXT)
-		t1_elmer0_ext_intr(adapter);
+		t1_elmer0_ext_intr_handler(adapter);
 
 	/* Clear the interrupts just processed. */
 	writel(cause, adapter->regs + A_PL_CAUSE);
-	(void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
+	readl(adapter->regs + A_PL_CAUSE); /* flush writes */
 	return 1;
 }
 
-/* Pause deadlock avoidance parameters */
-#define DROP_MSEC 16
-#define DROP_PKTS_CNT  1
-
-static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
+int t1_slow_intr_handler(adapter_t *adapter)
 {
-	u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
-
-	if (enable)
-		val |= csum_bit;
-	else
-		val &= ~csum_bit;
-	writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(adapter))
+		return fpga_slow_intr(adapter);
+#endif
+	return asic_slow_intr(adapter);
 }
 
-void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
+/* Power sequencing is a work-around for Intel's XPAKs. */
+static void power_sequence_xpak(adapter_t* adapter)
 {
-	set_csum_offload(adapter, F_IP_CSUM, enable);
-}
+    	u32 mod_detect;
+    	u32 gpo;
 
-void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
-{
-	set_csum_offload(adapter, F_UDP_CSUM, enable);
-}
-
-void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
-{
-	set_csum_offload(adapter, F_TCP_CSUM, enable);
-}
-
-static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
-{
-	u32 val;
-
-	val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
-	      F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
-	val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
-	       F_TP_IN_ESPI_CHECK_TCP_CSUM;
-	writel(val, adapter->regs + A_TP_IN_CONFIG);
-	writel(F_TP_OUT_CSPI_CPL |
-	       F_TP_OUT_ESPI_ETHERNET |
-	       F_TP_OUT_ESPI_GENERATE_IP_CSUM |
-	       F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
-	       adapter->regs + A_TP_OUT_CONFIG);
-
-	val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
-	val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
-	writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
-
-	/*
-	 * Enable pause frame deadlock prevention.
-	 */
-	if (is_T2(adapter)) {
-		u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
-
-		writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
-		       V_DROP_TICKS_CNT(drop_ticks) |
-		       V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
-		       adapter->regs + A_TP_TX_DROP_CONFIG);
+    	/* Check for XPAK */
+    	t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
+	if (!(ELMER0_GP_BIT5 & mod_detect)) {
+		/* XPAK is present */
+		t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
+		gpo |= ELMER0_GP_BIT18;
+		t1_tpi_write(adapter, A_ELMER0_GPO, gpo);
 	}
-
-	writel(F_TP_RESET, adapter->regs + A_TP_RESET);
 }
 
 int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
 			       struct adapter_params *p)
 {
 	p->chip_version = bi->chip_term;
+	p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
 	if (p->chip_version == CHBT_TERM_T1 ||
-	    p->chip_version == CHBT_TERM_T2) {
+	    p->chip_version == CHBT_TERM_T2 ||
+	    p->chip_version == CHBT_TERM_FPGA) {
 		u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
 
 		val = G_TP_PC_REV(val);
@@ -640,11 +872,38 @@
 static int board_init(adapter_t *adapter, const struct board_info *bi)
 {
 	switch (bi->board) {
+	case CHBT_BOARD_8000:
 	case CHBT_BOARD_N110:
 	case CHBT_BOARD_N210:
-		writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
+	case CHBT_BOARD_CHT210:
+	case CHBT_BOARD_COUGAR:
+    		t1_tpi_par(adapter, 0xf);
     		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
 		break;
+	case CHBT_BOARD_CHT110:
+    		t1_tpi_par(adapter, 0xf);
+    		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
+
+    		/* TBD XXX Might not need.  This fixes a problem
+     		 *         described in the Intel SR XPAK errata.
+     		 */
+    		power_sequence_xpak(adapter);
+		break;
+#ifdef CONFIG_CHELSIO_T1_1G
+    case CHBT_BOARD_CHT204E:
+		        /* add config space write here */
+	case CHBT_BOARD_CHT204:
+	case CHBT_BOARD_CHT204V:
+	case CHBT_BOARD_CHN204:
+                t1_tpi_par(adapter, 0xf);
+                t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
+                break;
+	case CHBT_BOARD_CHT101:
+	case CHBT_BOARD_7500:
+    		t1_tpi_par(adapter, 0xf);
+    		t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
+		break;
+#endif
 	}
 	return 0;
 }
@@ -666,11 +925,16 @@
 		       adapter->regs + A_MC5_CONFIG);
 	}
 
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+	if (adapter->cspi && t1_cspi_init(adapter->cspi))
+		goto out_err;
+#endif
 	if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
 					  bi->espi_nports))
 		goto out_err;
 
-	t1_tp_reset(adapter, bi->clock_core);
+	if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core))
+		goto out_err;
 
 	err = t1_sge_configure(adapter->sge, &adapter->params.sge);
 	if (err)
@@ -714,8 +978,14 @@
 
 	if (adapter->sge)
 		t1_sge_destroy(adapter->sge);
+	if (adapter->tp)
+		t1_tp_destroy(adapter->tp);
 	if (adapter->espi)
 		t1_espi_destroy(adapter->espi);
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+        if (adapter->cspi)
+		t1_cspi_destroy(adapter->cspi);
+#endif
 }
 
 static void __devinit init_link_config(struct link_config *lc,
@@ -735,6 +1005,13 @@
 	}
 }
 
+#ifdef CONFIG_CHELSIO_T1_COUGAR
+	if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
+		CH_ERR("%s: CSPI initialization failed\n",
+		       adapter->name);
+		goto error;
+        }
+#endif
 
 /*
  * Allocate and initialize the data structures that hold the SW state of
@@ -762,6 +1039,13 @@
 		goto error;
 	}
 
+	adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
+	if (!adapter->tp) {
+		CH_ERR("%s: TP initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
 	board_init(adapter, bi);
 	bi->mdio_ops->init(adapter, bi);
 	if (bi->gphy->reset)
@@ -793,7 +1077,9 @@
 		 * Get the port's MAC addresses either from the EEPROM if one
 		 * exists or the one hardcoded in the MAC.
 		 */
-		if (vpd_macaddress_get(adapter, i, hw_addr)) {
+		if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
+			mac->ops->macaddress_get(mac, hw_addr);
+		else if (vpd_macaddress_get(adapter, i, hw_addr)) {
 			CH_ERR("%s: could not read MAC address from VPD ROM\n",
 			       adapter->port[i].dev->name);
 			goto error;
@@ -806,7 +1092,7 @@
 	t1_interrupts_clear(adapter);
 	return 0;
 
- error:
+error:
 	t1_free_sw_modules(adapter);
 	return -1;
 }
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
index 81816c2..269d097 100644
--- a/drivers/net/chelsio/suni1x10gexp_regs.h
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -32,6 +32,30 @@
 #ifndef _CXGB_SUNI1x10GEXP_REGS_H_
 #define _CXGB_SUNI1x10GEXP_REGS_H_
 
+/*
+** Space allocated for each Exact Match Filter
+**     There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003
+
+#define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)       ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER )
+
+/*
+** Space allocated for VLAN-Id Filter
+**      There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001
+
+#define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)   ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER )
+
+/*
+** Space allocated for each MSTAT Counter
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004
+
+#define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)       ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT )
+
+
 /******************************************************************************/
 /** S/UNI-1x10GE-XP REGISTER ADDRESS MAP                                     **/
 /******************************************************************************/
@@ -39,33 +63,125 @@
 /* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit        */
 /******************************************************************************/
 
+
+#define SUNI1x10GEXP_REG_IDENTIFICATION                                  0x0000
+#define SUNI1x10GEXP_REG_PRODUCT_REVISION                                0x0001
+#define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL                        0x0002
+#define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL                              0x0003
 #define SUNI1x10GEXP_REG_DEVICE_STATUS                                   0x0004
+#define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE               0x0005
+
+#define SUNI1x10GEXP_REG_MDIO_COMMAND                                    0x0006
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE                           0x0007
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS                           0x0008
+#define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS                                 0x0009
+#define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA                        0x000A
+#define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA                           0x000B
+
+#define SUNI1x10GEXP_REG_OAM_INTF_CTRL                                   0x000C
 #define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS                         0x000D
 #define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE                         0x000E
+#define SUNI1x10GEXP_REG_FREE                                            0x000F
+
+#define SUNI1x10GEXP_REG_XTEF_MISC_CTRL                                  0x0010
+#define SUNI1x10GEXP_REG_XRF_MISC_CTRL                                   0x0011
+
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1                            0x0100
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2                            0x0101
 #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE                    0x0102
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE                   0x0103
 #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS                    0x0104
+#define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG                         0x0107
+
 #define SUNI1x10GEXP_REG_RXXG_CONFIG_1                                   0x2040
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_2                                   0x2041
 #define SUNI1x10GEXP_REG_RXXG_CONFIG_3                                   0x2042
 #define SUNI1x10GEXP_REG_RXXG_INTERRUPT                                  0x2043
 #define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH                           0x2045
 #define SUNI1x10GEXP_REG_RXXG_SA_15_0                                    0x2046
 #define SUNI1x10GEXP_REG_RXXG_SA_31_16                                   0x2047
 #define SUNI1x10GEXP_REG_RXXG_SA_47_32                                   0x2048
+#define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD                     0x2049
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId)      (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW                     0x204A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID                     0x204B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH                    0x204C
 #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW                     0x204D
 #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID                     0x204E
 #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH                    0x204F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW                     0x2050
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID                     0x2051
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH                    0x2052
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW                     0x2053
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID                     0x2054
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH                    0x2055
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW                     0x2056
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID                     0x2057
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH                    0x2058
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW                     0x2059
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID                     0x205A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH                    0x205B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW                     0x205C
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID                     0x205D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH                    0x205E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW                     0x205F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID                     0x2060
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH                    0x2061
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0                          0x2062
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1                          0x2063
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2                          0x2064
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3                          0x2065
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4                          0x2066
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5                          0x2067
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6                          0x2068
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7                          0x2069
 #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW                         0x206A
 #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW                      0x206B
 #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH                     0x206C
 #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH                        0x206D
 #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0                   0x206E
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1                   0x206F
 #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2                   0x2070
+
+#define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL                            0x2081
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0                       0x2084
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1                       0x2085
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2                       0x2086
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3                       0x2087
 #define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE                            0x2088
 #define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS                            0x2089
+#define SUNI1x10GEXP_REG_XRF_ERR_STATUS                                  0x208A
 #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE                       0x208B
 #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS                       0x208C
+#define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES                              0x2092
+
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG                                    0x20C0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG                           0x20C1
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG                           0x20C2
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG_2                                  0x20C3
+#define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG                                0x20C4
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES                             0x20C5
 #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE                          0x20C7
 #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS                          0x20C8
+#define SUNI1x10GEXP_REG_RXOAM_STATUS                                    0x20C9
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT                             0x20CA
+#define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT                       0x20CB
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB                 0x20CC
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB                 0x20CD
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB               0x20CE
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB               0x20CF
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB               0x20D0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB               0x20D1
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB                     0x20D2
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB                     0x20D3
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB                     0x20D4
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB                     0x20D5
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB                 0x20D6
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB                 0x20D7
+
 #define SUNI1x10GEXP_REG_MSTAT_CONTROL                                   0x2100
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0                        0x2101
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1                        0x2102
@@ -75,50 +191,321 @@
 #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1                          0x2106
 #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2                          0x2107
 #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3                          0x2108
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS                     0x2109
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW                    0x210A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE                 0x210B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH                   0x210C
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId)   (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId)   (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId)  (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW                             0x2110
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID                             0x2111
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH                            0x2112
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD                           0x2113
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW                             0x2114
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID                             0x2115
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH                            0x2116
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD                           0x2117
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW                             0x2118
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID                             0x2119
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH                            0x211A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD                           0x211B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW                             0x211C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID                             0x211D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH                            0x211E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD                           0x211F
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW                             0x2120
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID                             0x2121
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH                            0x2122
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD                           0x2123
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW                             0x2124
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID                             0x2125
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH                            0x2126
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD                           0x2127
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW                             0x2128
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID                             0x2129
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH                            0x212A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD                           0x212B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW                             0x212C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID                             0x212D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH                            0x212E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD                           0x212F
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW                             0x2130
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID                             0x2131
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH                            0x2132
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD                           0x2133
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW                             0x2134
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID                             0x2135
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH                            0x2136
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD                           0x2137
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW                            0x2138
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID                            0x2139
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH                           0x213A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD                          0x213B
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW                            0x213C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID                            0x213D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH                           0x213E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD                          0x213F
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW                            0x2140
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID                            0x2141
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH                           0x2142
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD                          0x2143
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW                            0x2144
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID                            0x2145
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH                           0x2146
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD                          0x2147
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW                            0x2148
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID                            0x2149
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH                           0x214A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD                          0x214B
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW                            0x214C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID                            0x214D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH                           0x214E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD                          0x214F
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW                            0x2150
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID                            0x2151
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH                           0x2152
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD                          0x2153
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW                            0x2154
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID                            0x2155
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH                           0x2156
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD                          0x2157
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW                            0x2158
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID                            0x2159
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH                           0x215A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD                          0x215B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW                            0x215C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID                            0x215D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH                           0x215E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD                          0x215F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW                            0x2160
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID                            0x2161
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH                           0x2162
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD                          0x2163
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW                            0x2164
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID                            0x2165
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH                           0x2166
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD                          0x2167
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW                            0x2168
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID                            0x2169
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH                           0x216A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD                          0x216B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW                            0x216C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID                            0x216D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH                           0x216E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD                          0x216F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW                            0x2170
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID                            0x2171
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH                           0x2172
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD                          0x2173
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW                            0x2174
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID                            0x2175
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH                           0x2176
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD                          0x2177
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW                            0x2178
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID                            0x2179
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH                           0x217a
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD                          0x217b
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW                            0x217c
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID                            0x217d
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH                           0x217e
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD                          0x217f
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW                            0x2180
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID                            0x2181
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH                           0x2182
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD                          0x2183
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW                            0x2184
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID                            0x2185
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH                           0x2186
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD                          0x2187
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW                            0x2188
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID                            0x2189
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH                           0x218A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD                          0x218B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW                            0x218C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID                            0x218D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH                           0x218E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD                          0x218F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW                            0x2190
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID                            0x2191
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH                           0x2192
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD                          0x2193
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW                            0x2194
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID                            0x2195
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH                           0x2196
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD                          0x2197
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW                            0x2198
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID                            0x2199
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH                           0x219A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD                          0x219B
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW                            0x219C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID                            0x219D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH                           0x219E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD                          0x219F
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW                            0x21A0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID                            0x21A1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH                           0x21A2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD                          0x21A3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW                            0x21A4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID                            0x21A5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH                           0x21A6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD                          0x21A7
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW                            0x21A8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID                            0x21A9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH                           0x21AA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD                          0x21AB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW                            0x21AC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID                            0x21AD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH                           0x21AE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD                          0x21AF
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW                            0x21B0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID                            0x21B1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH                           0x21B2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD                          0x21B3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW                            0x21B4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID                            0x21B5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH                           0x21B6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD                          0x21B7
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW                            0x21B8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID                            0x21B9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH                           0x21BA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD                          0x21BB
 #define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW                            0x21BC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID                            0x21BD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH                           0x21BE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD                          0x21BF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW                            0x21C0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID                            0x21C1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH                           0x21C2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD                          0x21C3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW                            0x21C4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID                            0x21C5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH                           0x21C6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD                          0x21C7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW                            0x21C8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID                            0x21C9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH                           0x21CA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD                          0x21CB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW                            0x21CC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID                            0x21CD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH                           0x21CE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD                          0x21CF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW                            0x21D0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID                            0x21D1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH                           0x21D2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD                          0x21D3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW                            0x21D4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID                            0x21D5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH                           0x21D6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD                          0x21D7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW                            0x21D8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID                            0x21D9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH                           0x21DA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD                          0x21DB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW                            0x21DC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID                            0x21DD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH                           0x21DE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD                          0x21DF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW                            0x21E0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID                            0x21E1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH                           0x21E2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD                          0x21E3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW                            0x21E4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID                            0x21E5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH                           0x21E6
+#define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM                               51
+
+#define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG                              0x2200
+#define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION                          0x2201
 #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE                       0x2209
 #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT                    0x220A
+#define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS                      0x220D
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION     0x220E
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT              0x220F
+#define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT        0x2210
+#define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT      0x2211
+
+#define SUNI1x10GEXP_REG_PL4MOS_CONFIG                                   0x2240
+#define SUNI1x10GEXP_REG_PL4MOS_MASK                                     0x2241
+#define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING                         0x2242
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1                                0x2243
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2                                0x2244
+#define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE                            0x2245
+
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG                                   0x2280
 #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK                           0x2282
 #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT                                0x2283
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T                             0x2284
+
 #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS                        0x2300
 #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE                        0x2301
 #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK                          0x2302
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS                        0x2303
+#define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS                      0x2304
+#define SUNI1x10GEXP_REG_PL4IO_CONFIG                                    0x2305
+
 #define SUNI1x10GEXP_REG_TXXG_CONFIG_1                                   0x3040
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_2                                   0x3041
 #define SUNI1x10GEXP_REG_TXXG_CONFIG_3                                   0x3042
 #define SUNI1x10GEXP_REG_TXXG_INTERRUPT                                  0x3043
+#define SUNI1x10GEXP_REG_TXXG_STATUS                                     0x3044
 #define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE                             0x3045
+#define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE                             0x3046
 #define SUNI1x10GEXP_REG_TXXG_SA_15_0                                    0x3047
 #define SUNI1x10GEXP_REG_TXXG_SA_31_16                                   0x3048
 #define SUNI1x10GEXP_REG_TXXG_SA_47_32                                   0x3049
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER                                0x304D
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL                       0x304E
+#define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER                       0x3051
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG                       0x3052
+
+#define SUNI1x10GEXP_REG_XTEF_CTRL                                       0x3080
 #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS                           0x3084
 #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE                           0x3085
+#define SUNI1x10GEXP_REG_XTEF_VISIBILITY                                 0x3086
+
+#define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG                                0x30C0
+#define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG                          0x30C1
+#define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG                      0x30C2
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES                        0x30C3
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES                        0x30C4
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES                        0x30C5
 #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE                          0x30C6
 #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS                          0x30C7
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB                          0x30C8
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB                          0x30C9
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB                        0x30CA
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB                        0x30CB
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK                            0x30CC
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK                            0x30CD
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK                            0x30CE
+#define SUNI1x10GEXP_REG_TXOAM_COSET                                     0x30CF
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB                 0x30D0
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB                 0x30D1
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB               0x30D2
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB               0x30D3
+
+
+#define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG                              0x3200
+#define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS                         0x3201
+#define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS                      0x3202
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT                       0x3203
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT                      0x3204
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT    0x3205
+#define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT  0x3206
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD           0x3207
 #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE                 0x320C
 #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION             0x320D
+#define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION                          0x3210
+
+#define SUNI1x10GEXP_REG_PL4IDU_CONFIG                                   0x3280
 #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK                           0x3282
 #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT                                0x3283
 
+
+/*----------------------------------------*/
+#define SUNI1x10GEXP_REG_MAX_OFFSET                                      0x3480
+
 /******************************************************************************/
 /*                 -- End register offset definitions --                      */
 /******************************************************************************/
@@ -127,6 +514,81 @@
 /** SUNI-1x10GE-XP REGISTER BIT MASKS                                        **/
 /******************************************************************************/
 
+#define SUNI1x10GEXP_BITMSK_BITS_1   0x00001
+#define SUNI1x10GEXP_BITMSK_BITS_2   0x00003
+#define SUNI1x10GEXP_BITMSK_BITS_3   0x00007
+#define SUNI1x10GEXP_BITMSK_BITS_4   0x0000f
+#define SUNI1x10GEXP_BITMSK_BITS_5   0x0001f
+#define SUNI1x10GEXP_BITMSK_BITS_6   0x0003f
+#define SUNI1x10GEXP_BITMSK_BITS_7   0x0007f
+#define SUNI1x10GEXP_BITMSK_BITS_8   0x000ff
+#define SUNI1x10GEXP_BITMSK_BITS_9   0x001ff
+#define SUNI1x10GEXP_BITMSK_BITS_10  0x003ff
+#define SUNI1x10GEXP_BITMSK_BITS_11  0x007ff
+#define SUNI1x10GEXP_BITMSK_BITS_12  0x00fff
+#define SUNI1x10GEXP_BITMSK_BITS_13  0x01fff
+#define SUNI1x10GEXP_BITMSK_BITS_14  0x03fff
+#define SUNI1x10GEXP_BITMSK_BITS_15  0x07fff
+#define SUNI1x10GEXP_BITMSK_BITS_16  0x0ffff
+
+#define mSUNI1x10GEXP_CLR_MSBITS_1(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_15)
+#define mSUNI1x10GEXP_CLR_MSBITS_2(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_14)
+#define mSUNI1x10GEXP_CLR_MSBITS_3(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_13)
+#define mSUNI1x10GEXP_CLR_MSBITS_4(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_12)
+#define mSUNI1x10GEXP_CLR_MSBITS_5(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_11)
+#define mSUNI1x10GEXP_CLR_MSBITS_6(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_10)
+#define mSUNI1x10GEXP_CLR_MSBITS_7(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_9)
+#define mSUNI1x10GEXP_CLR_MSBITS_8(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_8)
+#define mSUNI1x10GEXP_CLR_MSBITS_9(v)  ((v) & SUNI1x10GEXP_BITMSK_BITS_7)
+#define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6)
+#define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5)
+#define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4)
+#define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3)
+#define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2)
+#define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1)
+
+#define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0)
+
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x0001: S/UNI-1x10GE-XP Product Revision
+ *    Bit 3-0  REVISION
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_REVISION  0x000F
+
+/*----------------------------------------------------------------------------
+ * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control
+ *    Bit 2  XAUI_ARESETB
+ *    Bit 1  PL4_ARESETB
+ *    Bit 0  DRESETB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XAUI_ARESET  0x0004
+#define SUNI1x10GEXP_BITMSK_PL4_ARESET   0x0002
+#define SUNI1x10GEXP_BITMSK_DRESETB      0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control
+ *    Bit 11  PL4IO_OUTCLKSEL
+ *    Bit 9   SYSPCSLB
+ *    Bit 8   LINEPCSLB
+ *    Bit 7   MSTAT_BYPASS
+ *    Bit 6   RXXG_BYPASS
+ *    Bit 5   TXXG_BYPASS
+ *    Bit 4   SOP_PAD_EN
+ *    Bit 1   LOS_INV
+ *    Bit 0   OVERRIDE_LOS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL  0x0800
+#define SUNI1x10GEXP_BITMSK_SYSPCSLB         0x0200
+#define SUNI1x10GEXP_BITMSK_LINEPCSLB        0x0100
+#define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS     0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_BYPASS      0x0040
+#define SUNI1x10GEXP_BITMSK_TXXG_BYPASS      0x0020
+#define SUNI1x10GEXP_BITMSK_SOP_PAD_EN       0x0010
+#define SUNI1x10GEXP_BITMSK_LOS_INV          0x0002
+#define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS     0x0001
+
 /*----------------------------------------------------------------------------
  * Register 0x0004: S/UNI-1x10GE-XP Device Status
  *    Bit 9 TOP_SXRA_EXPIRED
@@ -141,7 +603,10 @@
  *    Bit 0 TOP_PL4_OUT_ROOL
  *----------------------------------------------------------------------------*/
 #define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED  0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY     0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_DTRB          0x0080
 #define SUNI1x10GEXP_BITMSK_TOP_EXPIRED       0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_PAUSED        0x0020
 #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL   0x0010
 #define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL   0x0008
 #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL   0x0004
@@ -149,12 +614,219 @@
 #define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL  0x0001
 
 /*----------------------------------------------------------------------------
+ * Register 0x0005: Global Performance Update and Clock Monitors
+ *    Bit 15 TIP
+ *    Bit 8  XAUI_REF_CLKA
+ *    Bit 7  RXLANE3CLKA
+ *    Bit 6  RXLANE2CLKA
+ *    Bit 5  RXLANE1CLKA
+ *    Bit 4  RXLANE0CLKA
+ *    Bit 3  CSUCLKA
+ *    Bit 2  TDCLKA
+ *    Bit 1  RSCLKA
+ *    Bit 0  RDCLKA
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TIP            0x8000
+#define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA  0x0100
+#define SUNI1x10GEXP_BITMSK_RXLANE3CLKA    0x0080
+#define SUNI1x10GEXP_BITMSK_RXLANE2CLKA    0x0040
+#define SUNI1x10GEXP_BITMSK_RXLANE1CLKA    0x0020
+#define SUNI1x10GEXP_BITMSK_RXLANE0CLKA    0x0010
+#define SUNI1x10GEXP_BITMSK_CSUCLKA        0x0008
+#define SUNI1x10GEXP_BITMSK_TDCLKA         0x0004
+#define SUNI1x10GEXP_BITMSK_RSCLKA         0x0002
+#define SUNI1x10GEXP_BITMSK_RDCLKA         0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0006: MDIO Command
+ *    Bit 4 MDIO_RDINC
+ *    Bit 3 MDIO_RSTAT
+ *    Bit 2 MDIO_LCTLD
+ *    Bit 1 MDIO_LCTLA
+ *    Bit 0 MDIO_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_RDINC  0x0010
+#define SUNI1x10GEXP_BITMSK_MDIO_RSTAT  0x0008
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLD  0x0004
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLA  0x0002
+#define SUNI1x10GEXP_BITMSK_MDIO_SPRE   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0007: MDIO Interrupt Enable
+ *    Bit 0 MDIO_BUSY_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0008: MDIO Interrupt Status
+ *    Bit 0 MDIO_BUSYI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSYI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0009: MMD PHY Address
+ *    Bit 12-8 MDIO_DEVADR
+ *    Bit 4-0 MDIO_PRTADR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_DEVADR  0x1F00
+#define SUNI1x10GEXP_BITOFF_MDIO_DEVADR  8
+#define SUNI1x10GEXP_BITMSK_MDIO_PRTADR  0x001F
+#define SUNI1x10GEXP_BITOFF_MDIO_PRTADR  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x000C: OAM Interface Control
+ *    Bit 6 MDO_OD_ENB
+ *    Bit 5 MDI_INV
+ *    Bit 4 MDI_SEL
+ *    Bit 3 RXOAMEN
+ *    Bit 2 RXOAMCLKEN
+ *    Bit 1 TXOAMEN
+ *    Bit 0 TXOAMCLKEN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDO_OD_ENB  0x0040
+#define SUNI1x10GEXP_BITMSK_MDI_INV     0x0020
+#define SUNI1x10GEXP_BITMSK_MDI_SEL     0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAMEN     0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAMCLKEN  0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAMEN     0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAMCLKEN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status
+ *    Bit 15 TOP_PL4IO_INT
+ *    Bit 14 TOP_IRAM_INT
+ *    Bit 13 TOP_ERAM_INT
+ *    Bit 12 TOP_XAUI_INT
+ *    Bit 11 TOP_MSTAT_INT
+ *    Bit 10 TOP_RXXG_INT
+ *    Bit 9 TOP_TXXG_INT
+ *    Bit 8 TOP_XRF_INT
+ *    Bit 7 TOP_XTEF_INT
+ *    Bit 6 TOP_MDIO_BUSY_INT
+ *    Bit 5 TOP_RXOAM_INT
+ *    Bit 4 TOP_TXOAM_INT
+ *    Bit 3 TOP_IFLX_INT
+ *    Bit 2 TOP_EFLX_INT
+ *    Bit 1 TOP_PL4ODP_INT
+ *    Bit 0 TOP_PL4IDU_INT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT      0x8000
+#define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT       0x4000
+#define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT       0x2000
+#define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT       0x1000
+#define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT      0x0800
+#define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT       0x0400
+#define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT       0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_XRF_INT        0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT       0x0080
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT  0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT      0x0020
+#define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT      0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT       0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT       0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT     0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT     0x0001
+
+/*----------------------------------------------------------------------------
  * Register 0x000E:PM3393 Global interrupt enable
  *    Bit 15 TOP_INTE
  *----------------------------------------------------------------------------*/
 #define SUNI1x10GEXP_BITMSK_TOP_INTE  0x8000
 
 /*----------------------------------------------------------------------------
+ * Register 0x0010: XTEF Miscellaneous Control
+ *    Bit 7 RF_VAL
+ *    Bit 6 RF_OVERRIDE
+ *    Bit 5 LF_VAL
+ *    Bit 4 LF_OVERRIDE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RF_VAL             0x0080
+#define SUNI1x10GEXP_BITMSK_RF_OVERRIDE        0x0040
+#define SUNI1x10GEXP_BITMSK_LF_VAL             0x0020
+#define SUNI1x10GEXP_BITMSK_LF_OVERRIDE        0x0010
+#define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL  0x00F0
+
+/*----------------------------------------------------------------------------
+ * Register 0x0011: XRF Miscellaneous Control
+ *    Bit 6-4 EN_IDLE_REP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_IDLE_REP  0x0070
+
+/*----------------------------------------------------------------------------
+ * Register 0x0100: SERDES 3125 Configuration Register 1
+ *    Bit 10 RXEQB_3
+ *    Bit 8  RXEQB_2
+ *    Bit 6  RXEQB_1
+ *    Bit 4  RXEQB_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXEQB    0x0FF0
+#define SUNI1x10GEXP_BITOFF_RXEQB_3  10
+#define SUNI1x10GEXP_BITOFF_RXEQB_2  8
+#define SUNI1x10GEXP_BITOFF_RXEQB_1  6
+#define SUNI1x10GEXP_BITOFF_RXEQB_0  4
+
+/*----------------------------------------------------------------------------
+ * Register 0x0101: SERDES 3125 Configuration Register 2
+ *    Bit 12 YSEL
+ *    Bit  7 PRE_EMPH_3
+ *    Bit  6 PRE_EMPH_2
+ *    Bit  5 PRE_EMPH_1
+ *    Bit  4 PRE_EMPH_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_YSEL        0x1000
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH    0x00F0
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_3  0x0080
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_2  0x0040
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_1  0x0020
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_0  0x0010
+
+/*----------------------------------------------------------------------------
+ * Register 0x0102: SERDES 3125 Interrupt Enable Register
+ *    Bit 3 LASIE
+ *    Bit 2 SPLL_RAE
+ *    Bit 1 MPLL_RAE
+ *    Bit 0 PLL_LOCKE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIE      0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAE   0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAE   0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKE  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0103: SERDES 3125 Interrupt Visibility Register
+ *    Bit 3 LASIV
+ *    Bit 2 SPLL_RAV
+ *    Bit 1 MPLL_RAV
+ *    Bit 0 PLL_LOCKV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIV      0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAV   0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAV   0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKV  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0104: SERDES 3125 Interrupt Status Register
+ *    Bit 3 LASII
+ *    Bit 2 SPLL_RAI
+ *    Bit 1 MPLL_RAI
+ *    Bit 0 PLL_LOCKI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASII      0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAI   0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAI   0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0107: SERDES 3125 Test Configuration
+ *    Bit 12 DUALTX
+ *    Bit 10 HC_1
+ *    Bit  9 HC_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_DUALTX  0x1000
+#define SUNI1x10GEXP_BITMSK_HC      0x0600
+#define SUNI1x10GEXP_BITOFF_HC_0    9
+
+/*----------------------------------------------------------------------------
  * Register 0x2040: RXXG Configuration 1
  *    Bit 15  RXXG_RXEN
  *    Bit 14  RXXG_ROCF
@@ -168,11 +840,84 @@
  *    Bit 2-0 RXXG_MIFG
  *----------------------------------------------------------------------------*/
 #define SUNI1x10GEXP_BITMSK_RXXG_RXEN       0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_ROCF       0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP  0x2000
 #define SUNI1x10GEXP_BITMSK_RXXG_PUREP      0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_LONGP      0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_PARF       0x0100
 #define SUNI1x10GEXP_BITMSK_RXXG_FLCHK      0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL  0x0020
 #define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP  0x0008
 
 /*----------------------------------------------------------------------------
+ * Register 0x02041: RXXG Configuration 2
+ *    Bit 7-0 RXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE  0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x2042: RXXG Configuration 3
+ *    Bit 15 RXXG_MIN_LERRE
+ *    Bit 14 RXXG_MAX_LERRE
+ *    Bit 12 RXXG_LINE_ERRE
+ *    Bit 10 RXXG_RX_OVRE
+ *    Bit 9  RXXG_ADR_FILTERE
+ *    Bit 8  RXXG_ERR_FILTERE
+ *    Bit 5  RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE     0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE     0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE     0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE       0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE   0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE  0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE     0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2043: RXXG Interrupt
+ *    Bit 15 RXXG_MIN_LERRI
+ *    Bit 14 RXXG_MAX_LERRI
+ *    Bit 12 RXXG_LINE_ERRI
+ *    Bit 10 RXXG_RX_OVRI
+ *    Bit 9  RXXG_ADR_FILTERI
+ *    Bit 8  RXXG_ERR_FILTERI
+ *    Bit 5  RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI    0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI    0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI    0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI      0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI  0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI  0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE    0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2049: RXXG Receive FIFO Threshold
+ *    Bit 2-0 RXXG_CUT_THRU
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU  0x0007
+#define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2062H - 0x2069: RXXG Exact Match VID
+ *    Bit 11-0 RXXG_VID_MATCH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH  0x0FFF
+#define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x206EH - 0x206F: RXXG Address Filter Control
+ *    Bit 3 RXXG_FORWARD_ENABLE
+ *    Bit 2 RXXG_VLAN_ENABLE
+ *    Bit 1 RXXG_SRC_ADDR
+ *    Bit 0 RXXG_MATCH_ENABLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE  0x0008
+#define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE     0x0004
+#define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR        0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE    0x0001
+
+/*----------------------------------------------------------------------------
  * Register 0x2070: RXXG Address Filter Control 2
  *    Bit 1 RXXG_PMODE
  *    Bit 0 RXXG_MHASH_EN
@@ -181,15 +926,446 @@
 #define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN  0x0001
 
 /*----------------------------------------------------------------------------
+ * Register 0x2081: XRF Control Register 2
+ *    Bit 6   EN_PKT_GEN
+ *    Bit 4-2 PATT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_PKT_GEN  0x0040
+#define SUNI1x10GEXP_BITMSK_PATT        0x001C
+#define SUNI1x10GEXP_BITOFF_PATT        2
+
+/*----------------------------------------------------------------------------
+ * Register 0x2088: XRF Interrupt Enable
+ *    Bit 12-9 LANE_HICERE
+ *    Bit 8-5  HS_SD_LANEE
+ *    Bit 4    ALIGN_STATUS_ERRE
+ *    Bit 3-0  LANE_SYNC_STAT_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERE          0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERE          9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEE          0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEE          5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE    0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2089: XRF Interrupt Status
+ *    Bit 12-9 LANE_HICERI
+ *    Bit 8-5  HS_SD_LANEI
+ *    Bit 4    ALIGN_STATUS_ERRI
+ *    Bit 3-0  LANE_SYNC_STAT_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERI          0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERI          9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEI          0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEI          5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI    0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208A: XRF Error Status
+ *    Bit 8-5  HS_SD_LANE
+ *    Bit 4    ALIGN_STATUS_ERR
+ *    Bit 3-0  LANE_SYNC_STAT_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE3          0x0100
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE2          0x0080
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE1          0x0040
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE0          0x0020
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR     0x0010
+#define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR  0x0008
+#define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR  0x0004
+#define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR  0x0002
+#define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x208B: XRF Diagnostic Interrupt Enable
+ *    Bit 7-4 LANE_OVERRUNE
+ *    Bit 3-0 LANE_UNDERRUNE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE   0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE   4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208C: XRF Diagnostic Interrupt Status
+ *    Bit 7-4 LANE_OVERRUNI
+ *    Bit 3-0 LANE_UNDERRUNI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI   0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI   4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI  0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C0: RXOAM Configuration
+ *    Bit 15    RXOAM_BUSY
+ *    Bit 14-12 RXOAM_F2_SEL
+ *    Bit 10-8  RXOAM_F1_SEL
+ *    Bit 7-6   RXOAM_FILTER_CTRL
+ *    Bit 5-0   RXOAM_PX_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_BUSY         0x8000
+#define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL       0x7000
+#define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL       12
+#define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL       0x0700
+#define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL       8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL  0x00C0
+#define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL  6
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN        0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN        0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C1,0x20C2: RXOAM Filter Configuration
+ *    Bit 15-8 RXOAM_FX_MASK
+ *    Bit 7-0  RXOAM_FX_VAL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK  0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK  8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL   0x00FF
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl   0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C3: RXOAM Configuration Register 2
+ *    Bit 13    RXOAM_REC_BYTE_VAL
+ *    Bit 11-10 RXOAM_BYPASS_MODE
+ *    Bit 5-0   RXOAM_PX_CLEAR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL  0x2000
+#define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE   0x0C00
+#define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE   10
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR      0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR      0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C4: RXOAM HEC Configuration
+ *    Bit 15-8 RXOAM_COSET
+ *    Bit 2    RXOAM_HEC_ERR_PKT
+ *    Bit 0    RXOAM_HEC_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_COSET        0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_COSET        8
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT  0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN       0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C7: RXOAM Interrupt Enable
+ *    Bit 10 RXOAM_FILTER_THRSHE
+ *    Bit 9  RXOAM_OAM_ERRE
+ *    Bit 8  RXOAM_HECE_THRSHE
+ *    Bit 7  RXOAM_SOPE
+ *    Bit 6  RXOAM_RFE
+ *    Bit 5  RXOAM_LFE
+ *    Bit 4  RXOAM_DV_ERRE
+ *    Bit 3  RXOAM_DATA_INVALIDE
+ *    Bit 2  RXOAM_FILTER_DROPE
+ *    Bit 1  RXOAM_HECE
+ *    Bit 0  RXOAM_OFLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE  0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE       0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE    0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPE           0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFE            0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFE            0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE        0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE  0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE   0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE           0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLE           0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C8: RXOAM Interrupt Status
+ *    Bit 10 RXOAM_FILTER_THRSHI
+ *    Bit 9  RXOAM_OAM_ERRI
+ *    Bit 8  RXOAM_HECE_THRSHI
+ *    Bit 7  RXOAM_SOPI
+ *    Bit 6  RXOAM_RFI
+ *    Bit 5  RXOAM_LFI
+ *    Bit 4  RXOAM_DV_ERRI
+ *    Bit 3  RXOAM_DATA_INVALIDI
+ *    Bit 2  RXOAM_FILTER_DROPI
+ *    Bit 1  RXOAM_HECI
+ *    Bit 0  RXOAM_OFLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI  0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI       0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI    0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPI           0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFI            0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFI            0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI        0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI  0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI   0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECI           0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLI           0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C9: RXOAM Status
+ *    Bit 10 RXOAM_FILTER_THRSHV
+ *    Bit 8  RXOAM_HECE_THRSHV
+ *    Bit 6  RXOAM_RFV
+ *    Bit 5  RXOAM_LFV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV  0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV    0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFV            0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFV            0x0020
+
+/*----------------------------------------------------------------------------
  * Register 0x2100: MSTAT Control
  *    Bit 2 MSTAT_WRITE
  *    Bit 1 MSTAT_CLEAR
  *    Bit 0 MSTAT_SNAP
  *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE  0x0004
 #define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR  0x0002
 #define SUNI1x10GEXP_BITMSK_MSTAT_SNAP   0x0001
 
 /*----------------------------------------------------------------------------
+ * Register 0x2109: MSTAT Counter Write Address
+ *    Bit 5-0 MSTAT_WRITE_ADDRESS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F
+#define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2200: IFLX Global Configuration Register
+ *    Bit 15   IFLX_IRCU_ENABLE
+ *    Bit 14   IFLX_IDSWT_ENABLE
+ *    Bit 13-0 IFLX_IFD_CNT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE   0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE  0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT       0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT       0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2209: IFLX FIFO Overflow Enable
+ *    Bit 0 IFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220A: IFLX FIFO Overflow Interrupt
+ *    Bit 0 IFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220D: IFLX Indirect Channel Address
+ *    Bit 15 IFLX_BUSY
+ *    Bit 14 IFLX_RWB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_BUSY  0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_RWB   0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision
+ *    Bit 9-0 IFLX_LOLIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_LOLIM  0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_LOLIM  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x220F: IFLX Indirect Logical FIFO High Limit
+ *    Bit 9-0 IFLX_HILIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_HILIM  0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_HILIM  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit
+ *    Bit 15   IFLX_FULL
+ *    Bit 14   IFLX_AFULL
+ *    Bit 13-0 IFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_FULL   0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFULL  0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFTH   0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AFTH   0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit
+ *    Bit 15   IFLX_EMPTY
+ *    Bit 14   IFLX_AEMPTY
+ *    Bit 13-0 IFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_EMPTY   0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY  0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AETH    0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AETH    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2240: PL4MOS Configuration Register
+ *    Bit 3 PL4MOS_RE_INIT
+ *    Bit 2 PL4MOS_EN
+ *    Bit 1 PL4MOS_NO_STATUS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT          0x0008
+#define SUNI1x10GEXP_BITMSK_PL4MOS_EN               0x0004
+#define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS        0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2243: PL4MOS MaxBurst1 Register
+ *    Bit 11-0 PL4MOS_MAX_BURST1
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1  0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2244: PL4MOS MaxBurst2 Register
+ *    Bit 11-0 PL4MOS_MAX_BURST2
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2  0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2245: PL4MOS Transfer Size Register
+ *    Bit 7-0 PL4MOS_MAX_TRANSFER
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER  0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2280: PL4ODP Configuration
+ *    Bit 15-12 PL4ODP_REPEAT_T
+ *    Bit 8     PL4ODP_SOP_RULE
+ *    Bit 1     PL4ODP_EN_PORTS
+ *    Bit 0     PL4ODP_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T   0xF000
+#define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T   12
+#define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS   0x0002
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD    0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2282: PL4ODP Interrupt Mask
+ *    Bit 0 PL4ODP_OUT_DISE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE     0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE  0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE  0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE    0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE    0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE      0x0002
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x2283: PL4ODP Interrupt
+ *    Bit 0 PL4ODP_OUT_DISI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI     0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI  0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI  0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI    0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI    0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI      0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2300:  PL4IO Lock Detect Status
+ *    Bit 15 PL4IO_OUT_ROOLV
+ *    Bit 12 PL4IO_IS_ROOLV
+ *    Bit 11 PL4IO_DIP2_ERRV
+ *    Bit 8  PL4IO_ID_ROOLV
+ *    Bit 4  PL4IO_IS_DOOLV
+ *    Bit 0  PL4IO_ID_DOOLV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV   0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV  0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV   0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2301:  PL4IO Lock Detect Change
+ *    Bit 15 PL4IO_OUT_ROOLI
+ *    Bit 12 PL4IO_IS_ROOLI
+ *    Bit 11 PL4IO_DIP2_ERRI
+ *    Bit 8  PL4IO_ID_ROOLI
+ *    Bit 4  PL4IO_IS_DOOLI
+ *    Bit 0  PL4IO_ID_DOOLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI   0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI  0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI   0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2302:  PL4IO Lock Detect Mask
+ *    Bit 15 PL4IO_OUT_ROOLE
+ *    Bit 12 PL4IO_IS_ROOLE
+ *    Bit 11 PL4IO_DIP2_ERRE
+ *    Bit 8  PL4IO_ID_ROOLE
+ *    Bit 4  PL4IO_IS_DOOLE
+ *    Bit 0  PL4IO_ID_DOOLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE   0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE  0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE   0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE   0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2303:  PL4IO Lock Detect Limits
+ *    Bit 15-8 PL4IO_REF_LIMIT
+ *    Bit 7-0  PL4IO_TRAN_LIMIT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT   0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT   8
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT  0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2304:  PL4IO Calendar Repetitions
+ *    Bit 15-8 PL4IO_IN_MUL
+ *    Bit 7-0  PL4IO_OUT_MUL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL   0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL   8
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL  0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2305:  PL4IO Configuration
+ *    Bit 15  PL4IO_DIP2_ERR_CHK
+ *    Bit 11  PL4IO_ODAT_DIS
+ *    Bit 10  PL4IO_TRAIN_DIS
+ *    Bit 9   PL4IO_OSTAT_DIS
+ *    Bit 8   PL4IO_ISTAT_DIS
+ *    Bit 7   PL4IO_NO_ISTAT
+ *    Bit 6   PL4IO_STAT_OUTSEL
+ *    Bit 5   PL4IO_INSEL
+ *    Bit 4   PL4IO_DLSEL
+ *    Bit 1-0 PL4IO_OUTSEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK  0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS      0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS     0x0400
+#define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS     0x0200
+#define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS     0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT      0x0080
+#define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL   0x0040
+#define SUNI1x10GEXP_BITMSK_PL4IO_INSEL         0x0020
+#define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL         0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL        0x0003
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL        0
+
+/*----------------------------------------------------------------------------
  * Register 0x3040: TXXG Configuration Register 1
  *    Bit 15   TXXG_TXEN0
  *    Bit 13   TXXG_HOSTPAUSE
@@ -202,12 +1378,266 @@
  *    Bit 0    TXXG_SPRE
  *----------------------------------------------------------------------------*/
 #define SUNI1x10GEXP_BITMSK_TXXG_TXEN0        0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE    0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_IPGT         0x1F80
 #define SUNI1x10GEXP_BITOFF_TXXG_IPGT         7
 #define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN  0x0020
 #define SUNI1x10GEXP_BITMSK_TXXG_CRCEN        0x0010
 #define SUNI1x10GEXP_BITMSK_TXXG_FCTX         0x0008
 #define SUNI1x10GEXP_BITMSK_TXXG_FCRX         0x0004
 #define SUNI1x10GEXP_BITMSK_TXXG_PADEN        0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_SPRE         0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3041: TXXG Configuration Register 2
+ *    Bit 7-0   TXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE  0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3042: TXXG Configuration Register 3
+ *    Bit 15 TXXG_FIFO_ERRE
+ *    Bit 14 TXXG_FIFO_UDRE
+ *    Bit 13 TXXG_MAX_LERRE
+ *    Bit 12 TXXG_MIN_LERRE
+ *    Bit 11 TXXG_XFERE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE  0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE  0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE  0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE  0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERE      0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3043: TXXG Interrupt
+ *    Bit 15 TXXG_FIFO_ERRI
+ *    Bit 14 TXXG_FIFO_UDRI
+ *    Bit 13 TXXG_MAX_LERRI
+ *    Bit 12 TXXG_MIN_LERRI
+ *    Bit 11 TXXG_XFERI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI  0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI  0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI  0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI  0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERI      0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3044: TXXG Status Register
+ *    Bit 1 TXXG_TXACTIVE
+ *    Bit 0 TXXG_PAUSED
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE  0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_PAUSED    0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3046: TXXG TX_MINFR -  Transmit Min Frame Size Register
+ *    Bit 7-0 TXXG_TX_MINFR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR  0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3052: TXXG Pause Quantum Value Configuration Register
+ *    Bit 7-0 TXXG_FC_PAUSE_QNTM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM  0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3080: XTEF Control
+ *    Bit 3-0 XTEF_FORCE_PARITY_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR  0x000F
+#define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR  0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3084: XTEF Interrupt Event Register
+ *    Bit 0 XTEF_LOST_SYNCI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3085: XTEF Interrupt Enable Register
+ *    Bit 0 XTEF_LOST_SYNCE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3086: XTEF Visibility Register
+ *    Bit 0 XTEF_LOST_SYNCV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C0: TXOAM OAM Configuration
+ *    Bit 15   TXOAM_HEC_EN
+ *    Bit 14   TXOAM_EMPTYCODE_EN
+ *    Bit 13   TXOAM_FORCE_IDLE
+ *    Bit 12   TXOAM_IGNORE_IDLE
+ *    Bit 11-6 TXOAM_PX_OVERWRITE
+ *    Bit 5-0  TXOAM_PX_SEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN        0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN  0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE    0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE   0x1000
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE  0x0FC0
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE  6
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL        0x003F
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL        0
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C1: TXOAM Mini-Packet Rate Configuration
+ *    Bit 15   TXOAM_MINIDIS
+ *    Bit 14   TXOAM_BUSY
+ *    Bit 13   TXOAM_TRANS_EN
+ *    Bit 10-0 TXOAM_MINIRATE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS   0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_BUSY      0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN  0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE  0x07FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration
+ *    Bit 13-10 TXOAM_FTHRESH
+ *    Bit 9-6   TXOAM_MINIPOST
+ *    Bit 5-0   TXOAM_MINIPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH   0x3C00
+#define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH   10
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST  0x03C0
+#define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST  6
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE   0x003F
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C6: TXOAM Interrupt Enable
+ *    Bit 2 TXOAM_SOP_ERRE
+ *    Bit 1 TXOAM_OFLE
+ *    Bit 0 TXOAM_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE    0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLE        0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRE        0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C7: TXOAM Interrupt Status
+ *    Bit 2 TXOAM_SOP_ERRI
+ *    Bit 1 TXOAM_OFLI
+ *    Bit 0 TXOAM_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI    0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLI        0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRI        0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30CF: TXOAM Coset
+ *    Bit 7-0 TXOAM_COSET
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_COSET  0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3200: EFLX Global Configuration
+ *    Bit 15 EFLX_ERCU_EN
+ *    Bit 7  EFLX_EN_EDSWT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT  0x0080
+
+/*----------------------------------------------------------------------------
+ * Register 0x3201: EFLX ERCU Global Status
+ *    Bit 13 EFLX_OVF_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR  0x2000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3202: EFLX Indirect Channel Address
+ *    Bit 15 EFLX_BUSY
+ *    Bit 14 EFLX_RDWRB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_BUSY   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_RDWRB  0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3203: EFLX Indirect Logical FIFO Low Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_LOLIM                    0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_LOLIM                    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3204: EFLX Indirect Logical FIFO High Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_HILIM                    0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_HILIM                    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit
+ *    Bit 15   EFLX_FULL
+ *    Bit 14   EFLX_AFULL
+ *    Bit 13-0 EFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_FULL   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFULL  0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFTH   0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AFTH   0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit
+ *    Bit 15   EFLX_EMPTY
+ *    Bit 14   EFLX_AEMPTY
+ *    Bit 13-0 EFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_EMPTY   0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY  0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AETH    0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AETH    0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU                 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU                 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x320C: EFLX FIFO Overflow Error Enable
+ *    Bit 0 EFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFE  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x320D: EFLX FIFO Overflow Error Indication
+ *    Bit 0 EFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFI  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3210: EFLX Channel Provision
+ *    Bit 0 EFLX_PROV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_PROV  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3280: PL4IDU Configuration
+ *    Bit 2 PL4IDU_SYNCH_ON_TRAIN
+ *    Bit 1 PL4IDU_EN_PORTS
+ *    Bit 0 PL4IDU_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN  0x0004
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS        0x0002
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD         0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3282: PL4IDU Interrupt Mask
+ *    Bit 1 PL4IDU_DIP4E
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E       0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x3283: PL4IDU Interrupt
+ *    Bit 1 PL4IDU_DIP4I
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I       0x0002
 
 #endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
 
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
new file mode 100644
index 0000000..0ca0b6e
--- /dev/null
+++ b/drivers/net/chelsio/tp.c
@@ -0,0 +1,178 @@
+/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */
+#include "common.h"
+#include "regs.h"
+#include "tp.h"
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+#endif
+
+struct petp {
+	adapter_t *adapter;
+};
+
+/* Pause deadlock avoidance parameters */
+#define DROP_MSEC 16
+#define DROP_PKTS_CNT  1
+
+static void tp_init(adapter_t * ap, const struct tp_params *p,
+		    unsigned int tp_clk)
+{
+	if (t1_is_asic(ap)) {
+		u32 val;
+
+		val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+		    F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+		if (!p->pm_size)
+			val |= F_OFFLOAD_DISABLE;
+		else
+			val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
+			    F_TP_IN_ESPI_CHECK_TCP_CSUM;
+		writel(val, ap->regs + A_TP_IN_CONFIG);
+		writel(F_TP_OUT_CSPI_CPL |
+		       F_TP_OUT_ESPI_ETHERNET |
+		       F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+		       F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
+		       ap->regs + A_TP_OUT_CONFIG);
+		writel(V_IP_TTL(64) |
+		       F_PATH_MTU /* IP DF bit */  |
+		       V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
+		       V_SYN_COOKIE_PARAMETER(29),
+		       ap->regs + A_TP_GLOBAL_CONFIG);
+		/*
+		 * Enable pause frame deadlock prevention.
+		 */
+		if (is_T2(ap) && ap->params.nports > 1) {
+			u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+			writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+			       V_DROP_TICKS_CNT(drop_ticks) |
+			       V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+			       ap->regs + A_TP_TX_DROP_CONFIG);
+		}
+
+	}
+}
+
+void t1_tp_destroy(struct petp *tp)
+{
+	kfree(tp);
+}
+
+struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
+{
+	struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+	if (!tp)
+		return NULL;
+
+	tp->adapter = adapter;
+
+	return tp;
+}
+
+void t1_tp_intr_enable(struct petp *tp)
+{
+	u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(tp->adapter)) {
+		/* FPGA */
+		writel(0xffffffff,
+		       tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+		writel(tp_intr | FPGA_PCIX_INTERRUPT_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	} else
+#endif
+	{
+		/* We don't use any TP interrupts */
+		writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+		writel(tp_intr | F_PL_INTR_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	}
+}
+
+void t1_tp_intr_disable(struct petp *tp)
+{
+	u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(tp->adapter)) {
+		/* FPGA */
+		writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+		writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	} else
+#endif
+	{
+		writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+		writel(tp_intr & ~F_PL_INTR_TP,
+		       tp->adapter->regs + A_PL_ENABLE);
+	}
+}
+
+void t1_tp_intr_clear(struct petp *tp)
+{
+#ifdef CONFIG_CHELSIO_T1_1G
+	if (!t1_is_asic(tp->adapter)) {
+		writel(0xffffffff,
+		       tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+		writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE);
+		return;
+	}
+#endif
+	writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE);
+	writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE);
+}
+
+int t1_tp_intr_handler(struct petp *tp)
+{
+	u32 cause;
+
+#ifdef CONFIG_CHELSIO_T1_1G
+	/* FPGA doesn't support TP interrupts. */
+	if (!t1_is_asic(tp->adapter))
+		return 1;
+#endif
+
+	cause = readl(tp->adapter->regs + A_TP_INT_CAUSE);
+	writel(cause, tp->adapter->regs + A_TP_INT_CAUSE);
+	return 0;
+}
+
+static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
+{
+	u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+
+	if (enable)
+		val |= csum_bit;
+	else
+		val &= ~csum_bit;
+	writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+}
+
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
+{
+	set_csum_offload(tp, F_IP_CSUM, enable);
+}
+
+void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable)
+{
+	set_csum_offload(tp, F_UDP_CSUM, enable);
+}
+
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
+{
+	set_csum_offload(tp, F_TCP_CSUM, enable);
+}
+
+/*
+ * Initialize TP state.  tp_params contains initial settings for some TP
+ * parameters, particularly the one-time PM and CM settings.
+ */
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
+{
+	adapter_t *adapter = tp->adapter;
+
+	tp_init(adapter, p, tp_clk);
+	writel(F_TP_RESET, adapter->regs +  A_TP_RESET);
+	return 0;
+}
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h
new file mode 100644
index 0000000..32fc71e
--- /dev/null
+++ b/drivers/net/chelsio/tp.h
@@ -0,0 +1,73 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */
+#ifndef CHELSIO_TP_H
+#define CHELSIO_TP_H
+
+#include "common.h"
+
+#define TP_MAX_RX_COALESCING_SIZE 16224U
+
+struct tp_mib_statistics {
+
+	/* IP */
+	u32 ipInReceive_hi;
+	u32 ipInReceive_lo;
+	u32 ipInHdrErrors_hi;
+	u32 ipInHdrErrors_lo;
+	u32 ipInAddrErrors_hi;
+	u32 ipInAddrErrors_lo;
+	u32 ipInUnknownProtos_hi;
+	u32 ipInUnknownProtos_lo;
+	u32 ipInDiscards_hi;
+	u32 ipInDiscards_lo;
+	u32 ipInDelivers_hi;
+	u32 ipInDelivers_lo;
+	u32 ipOutRequests_hi;
+	u32 ipOutRequests_lo;
+	u32 ipOutDiscards_hi;
+	u32 ipOutDiscards_lo;
+	u32 ipOutNoRoutes_hi;
+	u32 ipOutNoRoutes_lo;
+	u32 ipReasmTimeout;
+	u32 ipReasmReqds;
+	u32 ipReasmOKs;
+	u32 ipReasmFails;
+
+	u32 reserved[8];
+
+	/* TCP */
+	u32 tcpActiveOpens;
+	u32 tcpPassiveOpens;
+	u32 tcpAttemptFails;
+	u32 tcpEstabResets;
+	u32 tcpOutRsts;
+	u32 tcpCurrEstab;
+	u32 tcpInSegs_hi;
+	u32 tcpInSegs_lo;
+	u32 tcpOutSegs_hi;
+	u32 tcpOutSegs_lo;
+	u32 tcpRetransSeg_hi;
+	u32 tcpRetransSeg_lo;
+	u32 tcpInErrs_hi;
+	u32 tcpInErrs_lo;
+	u32 tcpRtoMin;
+	u32 tcpRtoMax;
+};
+
+struct petp;
+struct tp_params;
+
+struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
+void t1_tp_destroy(struct petp *tp);
+
+void t1_tp_intr_disable(struct petp *tp);
+void t1_tp_intr_enable(struct petp *tp);
+void t1_tp_intr_clear(struct petp *tp);
+int t1_tp_intr_handler(struct petp *tp);
+
+void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
+void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable);
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
+int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
+#endif
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
new file mode 100644
index 0000000..85dc3b1
--- /dev/null
+++ b/drivers/net/chelsio/vsc7326.c
@@ -0,0 +1,725 @@
+/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */
+
+/* Driver for Vitesse VSC7326 (Schaumburg) MAC */
+
+#include "gmac.h"
+#include "elmer0.h"
+#include "vsc7326_reg.h"
+
+/* Update fast changing statistics every 15 seconds */
+#define STATS_TICK_SECS 15
+/* 30 minutes for full statistics update */
+#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
+
+#define MAX_MTU 9600
+
+/* The egress WM value 0x01a01fff should be used only when the
+ * interface is down (MAC port disabled). This is a workaround
+ * for disabling the T2/MAC flow-control. When the interface is
+ * enabled, the WM value should be set to 0x014a03F0.
+ */
+#define WM_DISABLE	0x01a01fff
+#define WM_ENABLE	0x014a03F0
+
+struct init_table {
+	u32 addr;
+	u32 data;
+};
+
+struct _cmac_instance {
+	u32 index;
+	u32 ticks;
+};
+
+#define INITBLOCK_SLEEP	0xffffffff
+
+static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
+{
+	u32 status, vlo, vhi;
+	int i;
+
+	spin_lock_bh(&adapter->mac_lock);
+	t1_tpi_read(adapter, (addr << 2) + 4, &vlo);
+	i = 0;
+	do {
+		t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+		t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+		status = (vhi << 16) | vlo;
+		i++;
+	} while (((status & 1) == 0) && (i < 50));
+	if (i == 50)
+		CH_ERR("Invalid tpi read from MAC, breaking loop.\n");
+
+	t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
+	t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
+
+	*val = (vhi << 16) | vlo;
+
+	/* CH_ERR("rd: block: 0x%x  sublock: 0x%x  reg: 0x%x  data: 0x%x\n",
+		((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+		((addr&0x01fe)>>1), *val); */
+	spin_unlock_bh(&adapter->mac_lock);
+}
+
+static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
+{
+	spin_lock_bh(&adapter->mac_lock);
+	t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
+	t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
+	/* CH_ERR("wr: block: 0x%x  sublock: 0x%x  reg: 0x%x  data: 0x%x\n",
+		((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+		((addr&0x01fe)>>1), data); */
+	spin_unlock_bh(&adapter->mac_lock);
+}
+
+/* Hard reset the MAC.  This wipes out *all* configuration. */
+static void vsc7326_full_reset(adapter_t* adapter)
+{
+	u32 val;
+	u32 result = 0xffff;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~1;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(2);
+	val |= 0x1;	/* Enable mac MAC itself */
+	val |= 0x800;	/* Turn off the red LED */
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	mdelay(1);
+	vsc_write(adapter, REG_SW_RESET, 0x80000001);
+	do {
+		mdelay(1);
+		vsc_read(adapter, REG_SW_RESET, &result);
+	} while (result != 0x0);
+}
+
+static struct init_table vsc7326_reset[] = {
+	{      REG_IFACE_MODE, 0x00000000 },
+	{         REG_CRC_CFG, 0x00000020 },
+	{   REG_PLL_CLK_SPEED, 0x00050c00 },
+	{   REG_PLL_CLK_SPEED, 0x00050c00 },
+	{            REG_MSCH, 0x00002f14 },
+	{       REG_SPI4_MISC, 0x00040409 },
+	{     REG_SPI4_DESKEW, 0x00080000 },
+	{ REG_SPI4_ING_SETUP2, 0x08080004 },
+	{ REG_SPI4_ING_SETUP0, 0x04111004 },
+	{ REG_SPI4_EGR_SETUP0, 0x80001a04 },
+	{ REG_SPI4_ING_SETUP1, 0x02010000 },
+	{      REG_AGE_INC(0), 0x00000000 },
+	{      REG_AGE_INC(1), 0x00000000 },
+	{     REG_ING_CONTROL, 0x0a200011 },
+	{     REG_EGR_CONTROL, 0xa0010091 },
+};
+
+static struct init_table vsc7326_portinit[4][22] = {
+	{	/* Port 0 */
+			/* FIFO setup */
+		{           REG_DBG(0), 0x000004f0 },
+		{           REG_HDX(0), 0x00073101 },
+		{        REG_TEST(0,0), 0x00000022 },
+		{        REG_TEST(1,0), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,0), 0x003f0000 },
+		{  REG_TOP_BOTTOM(1,0), 0x00120000 },
+		{ REG_HIGH_LOW_WM(0,0), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,0), WM_DISABLE },
+		{   REG_CT_THRHLD(0,0), 0x00000000 },
+		{   REG_CT_THRHLD(1,0), 0x00000000 },
+		{         REG_BUCKE(0), 0x0002ffff },
+		{         REG_BUCKI(0), 0x0002ffff },
+		{        REG_TEST(0,0), 0x00000020 },
+		{        REG_TEST(1,0), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(0), 0x00002710 },
+		{     REG_PORT_FAIL(0), 0x00000002 },
+		{    REG_NORMALIZER(0), 0x00000a64 },
+		{        REG_DENORM(0), 0x00000010 },
+		{     REG_STICK_BIT(0), 0x03baa370 },
+		{     REG_DEV_SETUP(0), 0x00000083 },
+		{     REG_DEV_SETUP(0), 0x00000082 },
+		{      REG_MODE_CFG(0), 0x0200259f },
+	},
+	{	/* Port 1 */
+			/* FIFO setup */
+		{           REG_DBG(1), 0x000004f0 },
+		{           REG_HDX(1), 0x00073101 },
+		{        REG_TEST(0,1), 0x00000022 },
+		{        REG_TEST(1,1), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,1), 0x007e003f },
+		{  REG_TOP_BOTTOM(1,1), 0x00240012 },
+		{ REG_HIGH_LOW_WM(0,1), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,1), WM_DISABLE },
+		{   REG_CT_THRHLD(0,1), 0x00000000 },
+		{   REG_CT_THRHLD(1,1), 0x00000000 },
+		{         REG_BUCKE(1), 0x0002ffff },
+		{         REG_BUCKI(1), 0x0002ffff },
+		{        REG_TEST(0,1), 0x00000020 },
+		{        REG_TEST(1,1), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(1), 0x00002710 },
+		{     REG_PORT_FAIL(1), 0x00000002 },
+		{    REG_NORMALIZER(1), 0x00000a64 },
+		{        REG_DENORM(1), 0x00000010 },
+		{     REG_STICK_BIT(1), 0x03baa370 },
+		{     REG_DEV_SETUP(1), 0x00000083 },
+		{     REG_DEV_SETUP(1), 0x00000082 },
+		{      REG_MODE_CFG(1), 0x0200259f },
+	},
+	{	/* Port 2 */
+			/* FIFO setup */
+		{           REG_DBG(2), 0x000004f0 },
+		{           REG_HDX(2), 0x00073101 },
+		{        REG_TEST(0,2), 0x00000022 },
+		{        REG_TEST(1,2), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,2), 0x00bd007e },
+		{  REG_TOP_BOTTOM(1,2), 0x00360024 },
+		{ REG_HIGH_LOW_WM(0,2), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,2), WM_DISABLE },
+		{   REG_CT_THRHLD(0,2), 0x00000000 },
+		{   REG_CT_THRHLD(1,2), 0x00000000 },
+		{         REG_BUCKE(2), 0x0002ffff },
+		{         REG_BUCKI(2), 0x0002ffff },
+		{        REG_TEST(0,2), 0x00000020 },
+		{        REG_TEST(1,2), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(2), 0x00002710 },
+		{     REG_PORT_FAIL(2), 0x00000002 },
+		{    REG_NORMALIZER(2), 0x00000a64 },
+		{        REG_DENORM(2), 0x00000010 },
+		{     REG_STICK_BIT(2), 0x03baa370 },
+		{     REG_DEV_SETUP(2), 0x00000083 },
+		{     REG_DEV_SETUP(2), 0x00000082 },
+		{      REG_MODE_CFG(2), 0x0200259f },
+	},
+	{	/* Port 3 */
+			/* FIFO setup */
+		{           REG_DBG(3), 0x000004f0 },
+		{           REG_HDX(3), 0x00073101 },
+		{        REG_TEST(0,3), 0x00000022 },
+		{        REG_TEST(1,3), 0x00000022 },
+		{  REG_TOP_BOTTOM(0,3), 0x00fc00bd },
+		{  REG_TOP_BOTTOM(1,3), 0x00480036 },
+		{ REG_HIGH_LOW_WM(0,3), 0x07460757 },
+		{ REG_HIGH_LOW_WM(1,3), WM_DISABLE },
+		{   REG_CT_THRHLD(0,3), 0x00000000 },
+		{   REG_CT_THRHLD(1,3), 0x00000000 },
+		{         REG_BUCKE(3), 0x0002ffff },
+		{         REG_BUCKI(3), 0x0002ffff },
+		{        REG_TEST(0,3), 0x00000020 },
+		{        REG_TEST(1,3), 0x00000020 },
+			/* Port config */
+		{       REG_MAX_LEN(3), 0x00002710 },
+		{     REG_PORT_FAIL(3), 0x00000002 },
+		{    REG_NORMALIZER(3), 0x00000a64 },
+		{        REG_DENORM(3), 0x00000010 },
+		{     REG_STICK_BIT(3), 0x03baa370 },
+		{     REG_DEV_SETUP(3), 0x00000083 },
+		{     REG_DEV_SETUP(3), 0x00000082 },
+		{      REG_MODE_CFG(3), 0x0200259f },
+	},
+};
+
+static void run_table(adapter_t *adapter, struct init_table *ib, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (ib[i].addr == INITBLOCK_SLEEP) {
+			udelay( ib[i].data );
+			CH_ERR("sleep %d us\n",ib[i].data);
+		} else {
+			vsc_write( adapter, ib[i].addr, ib[i].data );
+		}
+	}
+}
+
+static int bist_rd(adapter_t *adapter, int moduleid, int address)
+{
+	int data=0;
+	u32 result=0;
+
+	if(	(address != 0x0) &&
+		(address != 0x1) &&
+		(address != 0x2) &&
+		(address != 0xd) &&
+		(address != 0xe))
+			CH_ERR("No bist address: 0x%x\n", address);
+
+	data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
+		((moduleid & 0xff) << 0));
+	vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+	udelay(10);
+
+	vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
+	if((result & (1<<9)) != 0x0)
+		CH_ERR("Still in bist read: 0x%x\n", result);
+	else if((result & (1<<8)) != 0x0)
+		CH_ERR("bist read error: 0x%x\n", result);
+
+	return(result & 0xff);
+}
+
+static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
+{
+	int data=0;
+	u32 result=0;
+
+	if(	(address != 0x0) &&
+		(address != 0x1) &&
+		(address != 0x2) &&
+		(address != 0xd) &&
+		(address != 0xe))
+			CH_ERR("No bist address: 0x%x\n", address);
+
+	if( value>255 )
+		CH_ERR("Suspicious write out of range value: 0x%x\n", value);
+
+	data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
+		((moduleid & 0xff) << 0));
+	vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+	udelay(5);
+
+	vsc_read(adapter, REG_RAM_BIST_CMD, &result);
+	if((result & (1<<27)) != 0x0)
+		CH_ERR("Still in bist write: 0x%x\n", result);
+	else if((result & (1<<26)) != 0x0)
+		CH_ERR("bist write error: 0x%x\n", result);
+
+	return(0);
+}
+
+static int run_bist(adapter_t *adapter, int moduleid)
+{
+	/*run bist*/
+	(void) bist_wr(adapter,moduleid, 0x00, 0x02);
+	(void) bist_wr(adapter,moduleid, 0x01, 0x01);
+
+	return(0);
+}
+
+static int check_bist(adapter_t *adapter, int moduleid)
+{
+	int result=0;
+	int column=0;
+	/*check bist*/
+	result = bist_rd(adapter,moduleid, 0x02);
+	column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
+			(bist_rd(adapter,moduleid, 0x0d)));
+	if ((result & 3) != 0x3)
+		CH_ERR("Result: 0x%x  BIST error in ram %d, column: 0x%04x\n",
+			result, moduleid, column);
+	return(0);
+}
+
+static int enable_mem(adapter_t *adapter, int moduleid)
+{
+	/*enable mem*/
+	(void) bist_wr(adapter,moduleid, 0x00, 0x00);
+	return(0);
+}
+
+static int run_bist_all(adapter_t *adapter)
+{
+	int port=0;
+	u32 val=0;
+
+	vsc_write(adapter, REG_MEM_BIST, 0x5);
+	vsc_read(adapter, REG_MEM_BIST, &val);
+
+	for(port=0; port<12; port++){
+		vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
+	}
+
+	udelay(300);
+	vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
+	udelay(300);
+
+	(void) run_bist(adapter,13);
+	(void) run_bist(adapter,14);
+	(void) run_bist(adapter,20);
+	(void) run_bist(adapter,21);
+	mdelay(200);
+	(void) check_bist(adapter,13);
+	(void) check_bist(adapter,14);
+	(void) check_bist(adapter,20);
+	(void) check_bist(adapter,21);
+	udelay(100);
+	(void) enable_mem(adapter,13);
+	(void) enable_mem(adapter,14);
+	(void) enable_mem(adapter,20);
+	(void) enable_mem(adapter,21);
+	udelay(300);
+	vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
+	udelay(300);
+	for(port=0; port<12; port++){
+		vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
+	}
+	udelay(300);
+	vsc_write(adapter, REG_MEM_BIST, 0x0);
+	mdelay(10);
+	return(0);
+}
+
+static int mac_intr_handler(struct cmac *mac)
+{
+	return 0;
+}
+
+static int mac_intr_enable(struct cmac *mac)
+{
+	return 0;
+}
+
+static int mac_intr_disable(struct cmac *mac)
+{
+	return 0;
+}
+
+static int mac_intr_clear(struct cmac *mac)
+{
+	return 0;
+}
+
+/* Expect MAC address to be in network byte order. */
+static int mac_set_address(struct cmac* mac, u8 addr[6])
+{
+	u32 val;
+	int port = mac->instance->index;
+
+	vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port),
+		  (addr[3] << 16) | (addr[4] << 8) | addr[5]);
+	vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port),
+		  (addr[0] << 16) | (addr[1] << 8) | addr[2]);
+
+	vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val);
+	val &= ~0xf0000000;
+	vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28));
+
+	vsc_write(mac->adapter, REG_ING_FFILT_MASK0,
+		  0xffff0000 | (addr[4] << 8) | addr[5]);
+	vsc_write(mac->adapter, REG_ING_FFILT_MASK1,
+		  0xffff0000 | (addr[2] << 8) | addr[3]);
+	vsc_write(mac->adapter, REG_ING_FFILT_MASK2,
+		  0xffff0000 | (addr[0] << 8) | addr[1]);
+	return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+	u32 addr_lo, addr_hi;
+	int port = mac->instance->index;
+
+	vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo);
+	vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi);
+
+	addr[0] = (u8) (addr_hi >> 16);
+	addr[1] = (u8) (addr_hi >> 8);
+	addr[2] = (u8) addr_hi;
+	addr[3] = (u8) (addr_lo >> 16);
+	addr[4] = (u8) (addr_lo >> 8);
+	addr[5] = (u8) addr_lo;
+	return 0;
+}
+
+/* This is intended to reset a port, not the whole MAC */
+static int mac_reset(struct cmac *mac)
+{
+	int index = mac->instance->index;
+
+	run_table(mac->adapter, vsc7326_portinit[index],
+		  ARRAY_SIZE(vsc7326_portinit[index]));
+
+	return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+	u32 v;
+	int port = mac->instance->index;
+
+	vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v);
+	v |= 1 << 12;
+
+	if (t1_rx_mode_promisc(rm))
+		v &= ~(1 << (port + 16));
+	else
+		v |= 1 << (port + 16);
+
+	vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v);
+	return 0;
+}
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+	int port = mac->instance->index;
+
+	if (mtu > MAX_MTU)
+		return -EINVAL;
+
+	/* max_len includes header and FCS */
+	vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4);
+	return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+				   int fc)
+{
+	u32 v;
+	int enable, port = mac->instance->index;
+
+	if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 &&
+	    speed != SPEED_1000)
+		return -1;
+	if (duplex > 0 && duplex != DUPLEX_FULL)
+		return -1;
+
+	if (speed >= 0) {
+		vsc_read(mac->adapter, REG_MODE_CFG(port), &v);
+		enable = v & 3;             /* save tx/rx enables */
+		v &= ~0xf;
+		v |= 4;                     /* full duplex */
+		if (speed == SPEED_1000)
+			v |= 8;             /* GigE */
+		enable |= v;
+		vsc_write(mac->adapter, REG_MODE_CFG(port), v);
+
+		if (speed == SPEED_1000)
+			v = 0x82;
+		else if (speed == SPEED_100)
+			v = 0x84;
+		else	/* SPEED_10 */
+			v = 0x86;
+		vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */
+		vsc_write(mac->adapter, REG_DEV_SETUP(port), v);
+		vsc_read(mac->adapter, REG_DBG(port), &v);
+		v &= ~0xff00;
+		if (speed == SPEED_1000)
+			v |= 0x400;
+		else if (speed == SPEED_100)
+			v |= 0x2000;
+		else	/* SPEED_10 */
+			v |= 0xff00;
+		vsc_write(mac->adapter, REG_DBG(port), v);
+
+		vsc_write(mac->adapter, REG_TX_IFG(port),
+			  speed == SPEED_1000 ? 5 : 0x11);
+		if (duplex == DUPLEX_HALF)
+			enable = 0x0;	/* 100 or 10 */
+		else if (speed == SPEED_1000)
+			enable = 0xc;
+		else	/* SPEED_100 or 10 */
+			enable = 0x4;
+		enable |= 0x9 << 10;	/* IFG1 */
+		enable |= 0x6 << 6;	/* IFG2 */
+		enable |= 0x1 << 4;	/* VLAN */
+		enable |= 0x3;		/* RX/TX EN */
+		vsc_write(mac->adapter, REG_MODE_CFG(port), enable);
+
+	}
+
+	vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v);
+	v &= 0xfff0ffff;
+	v |= 0x20000;      /* xon/xoff */
+	if (fc & PAUSE_RX)
+		v |= 0x40000;
+	if (fc & PAUSE_TX)
+		v |= 0x80000;
+	if (fc == (PAUSE_RX | PAUSE_TX))
+		v |= 0x10000;
+	vsc_write(mac->adapter, REG_PAUSE_CFG(port), v);
+	return 0;
+}
+
+static int mac_enable(struct cmac *mac, int which)
+{
+	u32 val;
+	int port = mac->instance->index;
+
+	/* Write the correct WM value when the port is enabled. */
+	vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE);
+
+	vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+	if (which & MAC_DIRECTION_RX)
+		val |= 0x2;
+	if (which & MAC_DIRECTION_TX)
+		val |= 1;
+	vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+	return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+	u32 val;
+	int i, port = mac->instance->index;
+
+	/* Reset the port, this also writes the correct WM value */
+	mac_reset(mac);
+
+	vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+	if (which & MAC_DIRECTION_RX)
+		val &= ~0x2;
+	if (which & MAC_DIRECTION_TX)
+		val &= ~0x1;
+	vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+	vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+
+	/* Clear stats */
+	for (i = 0; i <= 0x3a; ++i)
+		vsc_write(mac->adapter, CRA(4, port, i), 0);
+
+	/* Clear sofware counters */
+	memset(&mac->stats, 0, sizeof(struct cmac_statistics));
+
+	return 0;
+}
+
+static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
+{
+	u32 v, lo;
+
+	vsc_read(mac->adapter, addr, &v);
+	lo = *stat;
+	*stat = *stat - lo + v;
+
+	if (v == 0)
+		return;
+
+	if (v < lo)
+		*stat += (1ULL << 32);
+}
+
+static void port_stats_update(struct cmac *mac)
+{
+	int port = mac->instance->index;
+
+	/* Rx stats */
+	rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
+	rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
+	rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK);
+	rmon_update(mac, REG_RX_MULTICAST(port),
+		    &mac->stats.RxMulticastFramesOK);
+	rmon_update(mac, REG_RX_BROADCAST(port),
+		    &mac->stats.RxBroadcastFramesOK);
+	rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors);
+	rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors);
+	rmon_update(mac, REG_RX_OVERSIZE(port),
+		    &mac->stats.RxFrameTooLongErrors);
+	rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames);
+	rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors);
+	rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors);
+	rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors);
+	rmon_update(mac, REG_RX_SYMBOL_CARRIER(port),
+		    &mac->stats.RxSymbolErrors);
+	rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port),
+            &mac->stats.RxJumboFramesOK);
+
+	/* Tx stats (skip collision stats as we are full-duplex only) */
+	rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
+	rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK);
+	rmon_update(mac, REG_TX_MULTICAST(port),
+		    &mac->stats.TxMulticastFramesOK);
+	rmon_update(mac, REG_TX_BROADCAST(port),
+		    &mac->stats.TxBroadcastFramesOK);
+	rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames);
+	rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun);
+	rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port),
+            &mac->stats.TxJumboFramesOK);
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics.  Since the counters are only 32 bits
+ * some of them can overflow in less than a minute at GigE speeds, so this
+ * function should be called every 30 seconds or so.
+ *
+ * To cut down on reading costs we update only the octet counters at each tick
+ * and do a full update at major ticks, which can be every 30 minutes or more.
+ */
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+							   int flag)
+{
+	if (flag == MAC_STATS_UPDATE_FULL ||
+	    mac->instance->ticks >= MAJOR_UPDATE_TICKS) {
+		port_stats_update(mac);
+		mac->instance->ticks = 0;
+	} else {
+		int port = mac->instance->index;
+
+		rmon_update(mac, REG_RX_OK_BYTES(port),
+			    &mac->stats.RxOctetsOK);
+		rmon_update(mac, REG_RX_BAD_BYTES(port),
+			    &mac->stats.RxOctetsBad);
+		rmon_update(mac, REG_TX_OK_BYTES(port),
+			    &mac->stats.TxOctetsOK);
+		mac->instance->ticks++;
+	}
+	return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+	kfree(mac);
+}
+
+static struct cmac_ops vsc7326_ops = {
+	.destroy                  = mac_destroy,
+	.reset                    = mac_reset,
+	.interrupt_handler        = mac_intr_handler,
+	.interrupt_enable         = mac_intr_enable,
+	.interrupt_disable        = mac_intr_disable,
+	.interrupt_clear          = mac_intr_clear,
+	.enable                   = mac_enable,
+	.disable                  = mac_disable,
+	.set_mtu                  = mac_set_mtu,
+	.set_rx_mode              = mac_set_rx_mode,
+	.set_speed_duplex_fc      = mac_set_speed_duplex_fc,
+	.statistics_update        = mac_update_statistics,
+	.macaddress_get           = mac_get_address,
+	.macaddress_set           = mac_set_address,
+};
+
+static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
+{
+	struct cmac *mac;
+	u32 val;
+	int i;
+
+	mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+	if (!mac) return NULL;
+
+	mac->ops = &vsc7326_ops;
+	mac->instance = (cmac_instance *)(mac + 1);
+	mac->adapter  = adapter;
+
+	mac->instance->index = index;
+	mac->instance->ticks = 0;
+
+	i = 0;
+	do {
+		u32 vhi, vlo;
+
+		vhi = vlo = 0;
+		t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+		udelay(1);
+		t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+		udelay(5);
+		val = (vhi << 16) | vlo;
+	} while ((++i < 10000) && (val == 0xffffffff));
+
+	return mac;
+}
+
+static int vsc7326_mac_reset(adapter_t *adapter)
+{
+	vsc7326_full_reset(adapter);
+	(void) run_bist_all(adapter);
+	run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset));
+	return 0;
+}
+
+struct gmac t1_vsc7326_ops = {
+	.stats_update_period = STATS_TICK_SECS,
+	.create              = vsc7326_mac_create,
+	.reset               = vsc7326_mac_reset,
+};
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h
new file mode 100644
index 0000000..491bcf7
--- /dev/null
+++ b/drivers/net/chelsio/vsc7326_reg.h
@@ -0,0 +1,286 @@
+/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */
+#ifndef _VSC7321_REG_H_
+#define _VSC7321_REG_H_
+
+/* Register definitions for Vitesse VSC7321 (Meigs II) MAC
+ *
+ * Straight off the data sheet, VMDS-10038 Rev 2.0 and
+ * PD0011-01-14-Meigs-II 2002-12-12
+ */
+
+/* Just 'cause it's in here doesn't mean it's used. */
+
+#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1))
+
+/* System and CPU comm's registers */
+#define REG_CHIP_ID		CRA(0x7,0xf,0x00)	/* Chip ID */
+#define REG_BLADE_ID		CRA(0x7,0xf,0x01)	/* Blade ID */
+#define REG_SW_RESET		CRA(0x7,0xf,0x02)	/* Global Soft Reset */
+#define REG_MEM_BIST		CRA(0x7,0xf,0x04)	/* mem */
+#define REG_IFACE_MODE		CRA(0x7,0xf,0x07)	/* Interface mode */
+#define REG_MSCH		CRA(0x7,0x2,0x06)	/* CRC error count */
+#define REG_CRC_CNT		CRA(0x7,0x2,0x0a)	/* CRC error count */
+#define REG_CRC_CFG		CRA(0x7,0x2,0x0b)	/* CRC config */
+#define REG_SI_TRANSFER_SEL	CRA(0x7,0xf,0x18)	/* SI Transfer Select */
+#define REG_PLL_CLK_SPEED	CRA(0x7,0xf,0x19)	/* Clock Speed Selection */
+#define REG_SYS_CLK_SELECT	CRA(0x7,0xf,0x1c)	/* System Clock Select */
+#define REG_GPIO_CTRL		CRA(0x7,0xf,0x1d)	/* GPIO Control */
+#define REG_GPIO_OUT		CRA(0x7,0xf,0x1e)	/* GPIO Out */
+#define REG_GPIO_IN		CRA(0x7,0xf,0x1f)	/* GPIO In */
+#define REG_CPU_TRANSFER_SEL	CRA(0x7,0xf,0x20)	/* CPU Transfer Select */
+#define REG_LOCAL_DATA		CRA(0x7,0xf,0xfe)	/* Local CPU Data Register */
+#define REG_LOCAL_STATUS	CRA(0x7,0xf,0xff)	/* Local CPU Status Register */
+
+/* Aggregator registers */
+#define REG_AGGR_SETUP		CRA(0x7,0x1,0x00)	/* Aggregator Setup */
+#define REG_PMAP_TABLE		CRA(0x7,0x1,0x01)	/* Port map table */
+#define REG_MPLS_BIT0		CRA(0x7,0x1,0x08)	/* MPLS bit0 position */
+#define REG_MPLS_BIT1		CRA(0x7,0x1,0x09)	/* MPLS bit1 position */
+#define REG_MPLS_BIT2		CRA(0x7,0x1,0x0a)	/* MPLS bit2 position */
+#define REG_MPLS_BIT3		CRA(0x7,0x1,0x0b)	/* MPLS bit3 position */
+#define REG_MPLS_BITMASK	CRA(0x7,0x1,0x0c)	/* MPLS bit mask */
+#define REG_PRE_BIT0POS		CRA(0x7,0x1,0x10)	/* Preamble bit0 position */
+#define REG_PRE_BIT1POS		CRA(0x7,0x1,0x11)	/* Preamble bit1 position */
+#define REG_PRE_BIT2POS		CRA(0x7,0x1,0x12)	/* Preamble bit2 position */
+#define REG_PRE_BIT3POS		CRA(0x7,0x1,0x13)	/* Preamble bit3 position */
+#define REG_PRE_ERR_CNT		CRA(0x7,0x1,0x14)	/* Preamble parity error count */
+
+/* BIST registers */
+/*#define REG_RAM_BIST_CMD	CRA(0x7,0x2,0x00)*/	/* RAM BIST Command Register */
+/*#define REG_RAM_BIST_RESULT	CRA(0x7,0x2,0x01)*/	/* RAM BIST Read Status/Result */
+#define REG_RAM_BIST_CMD	CRA(0x7,0x1,0x00)	/* RAM BIST Command Register */
+#define REG_RAM_BIST_RESULT	CRA(0x7,0x1,0x01)	/* RAM BIST Read Status/Result */
+#define   BIST_PORT_SELECT	0x00			/* BIST port select */
+#define   BIST_COMMAND		0x01			/* BIST enable/disable */
+#define   BIST_STATUS		0x02			/* BIST operation status */
+#define   BIST_ERR_CNT_LSB	0x03			/* BIST error count lo 8b */
+#define   BIST_ERR_CNT_MSB	0x04			/* BIST error count hi 8b */
+#define   BIST_ERR_SEL_LSB	0x05			/* BIST error select lo 8b */
+#define   BIST_ERR_SEL_MSB	0x06			/* BIST error select hi 8b */
+#define   BIST_ERROR_STATE	0x07			/* BIST engine internal state */
+#define   BIST_ERR_ADR0		0x08			/* BIST error address lo 8b */
+#define   BIST_ERR_ADR1		0x09			/* BIST error address lomid 8b */
+#define   BIST_ERR_ADR2		0x0a			/* BIST error address himid 8b */
+#define   BIST_ERR_ADR3		0x0b			/* BIST error address hi 8b */
+
+/* FIFO registers
+ *   ie = 0 for ingress, 1 for egress
+ *   fn = FIFO number, 0-9
+ */
+#define REG_TEST(ie,fn)		CRA(0x2,ie&1,0x00+fn)	/* Mode & Test Register */
+#define REG_TOP_BOTTOM(ie,fn)	CRA(0x2,ie&1,0x10+fn)	/* FIFO Buffer Top & Bottom */
+#define REG_TAIL(ie,fn)		CRA(0x2,ie&1,0x20+fn)	/* FIFO Write Pointer */
+#define REG_HEAD(ie,fn)		CRA(0x2,ie&1,0x30+fn)	/* FIFO Read Pointer */
+#define REG_HIGH_LOW_WM(ie,fn)	CRA(0x2,ie&1,0x40+fn)	/* Flow Control Water Marks */
+#define REG_CT_THRHLD(ie,fn)	CRA(0x2,ie&1,0x50+fn)	/* Cut Through Threshold */
+#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn)	/* Drop & CRC Error Counter */
+#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn)	/* Input Side Debug Counter */
+#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn)	/* Input Side Debug Counter */
+#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn)	/* Input Side Debug Counter */
+
+/* Traffic shaper buckets
+ *   ie = 0 for ingress, 1 for egress
+ *   bn = bucket number 0-10 (yes, 11 buckets)
+ */
+/* OK, this one's kinda ugly.  Some hardware designers are perverse. */
+#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4))
+#define REG_TRAFFIC_SHAPER_CONTROL(ie)	CRA(0x2,ie&1,0x3b)
+
+#define REG_SRAM_ADR(ie)	CRA(0x2,ie&1,0x0e)	/* FIFO SRAM address */
+#define REG_SRAM_WR_STRB(ie)	CRA(0x2,ie&1,0x1e)	/* FIFO SRAM write strobe */
+#define REG_SRAM_RD_STRB(ie)	CRA(0x2,ie&1,0x2e)	/* FIFO SRAM read strobe */
+#define REG_SRAM_DATA_0(ie)	CRA(0x2,ie&1,0x3e)	/* FIFO SRAM data lo 8b */
+#define REG_SRAM_DATA_1(ie)	CRA(0x2,ie&1,0x4e)	/* FIFO SRAM data lomid 8b */
+#define REG_SRAM_DATA_2(ie)	CRA(0x2,ie&1,0x5e)	/* FIFO SRAM data himid 8b */
+#define REG_SRAM_DATA_3(ie)	CRA(0x2,ie&1,0x6e)	/* FIFO SRAM data hi 8b */
+#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e)	/* FIFO SRAM tag */
+/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */
+#define REG_CONTROL(ie)		CRA(0x2,ie&1,0x0f)	/* FIFO control */
+#define REG_ING_CONTROL		CRA(0x2,0x0,0x0f)	/* Ingress control (alias) */
+#define REG_EGR_CONTROL		CRA(0x2,0x1,0x0f)	/* Egress control (alias) */
+#define REG_AGE_TIMER(ie)	CRA(0x2,ie&1,0x1f)	/* Aging timer */
+#define REG_AGE_INC(ie)		CRA(0x2,ie&1,0x2f)	/* Aging increment */
+#define DEBUG_OUT(ie)		CRA(0x2,ie&1,0x3f)	/* Output debug counter control */
+#define DEBUG_CNT(ie)		CRA(0x2,ie&1,0x4f)	/* Output debug counter */
+
+/* SPI4 interface */
+#define REG_SPI4_MISC		CRA(0x5,0x0,0x00)	/* Misc Register */
+#define REG_SPI4_STATUS		CRA(0x5,0x0,0x01)	/* CML Status */
+#define REG_SPI4_ING_SETUP0	CRA(0x5,0x0,0x02)	/* Ingress Status Channel Setup */
+#define REG_SPI4_ING_SETUP1	CRA(0x5,0x0,0x03)	/* Ingress Data Training Setup */
+#define REG_SPI4_ING_SETUP2	CRA(0x5,0x0,0x04)	/* Ingress Data Burst Size Setup */
+#define REG_SPI4_EGR_SETUP0	CRA(0x5,0x0,0x05)	/* Egress Status Channel Setup */
+#define REG_SPI4_DBG_CNT(n)	CRA(0x5,0x0,0x10+n)	/* Debug counters 0-9 */
+#define REG_SPI4_DBG_SETUP	CRA(0x5,0x0,0x1A)	/* Debug counters setup */
+#define REG_SPI4_TEST		CRA(0x5,0x0,0x20)	/* Test Setup Register */
+#define REG_TPGEN_UP0		CRA(0x5,0x0,0x21)	/* Test Pattern generator user pattern 0 */
+#define REG_TPGEN_UP1		CRA(0x5,0x0,0x22)	/* Test Pattern generator user pattern 1 */
+#define REG_TPCHK_UP0		CRA(0x5,0x0,0x23)	/* Test Pattern checker user pattern 0 */
+#define REG_TPCHK_UP1		CRA(0x5,0x0,0x24)	/* Test Pattern checker user pattern 1 */
+#define REG_TPSAM_P0		CRA(0x5,0x0,0x25)	/* Sampled pattern 0 */
+#define REG_TPSAM_P1		CRA(0x5,0x0,0x26)	/* Sampled pattern 1 */
+#define REG_TPERR_CNT		CRA(0x5,0x0,0x27)	/* Pattern checker error counter */
+#define REG_SPI4_STICKY		CRA(0x5,0x0,0x30)	/* Sticky bits register */
+#define REG_SPI4_DBG_INH	CRA(0x5,0x0,0x31)	/* Core egress & ingress inhibit */
+#define REG_SPI4_DBG_STATUS	CRA(0x5,0x0,0x32)	/* Sampled ingress status */
+#define REG_SPI4_DBG_GRANT	CRA(0x5,0x0,0x33)	/* Ingress cranted credit value */
+
+#define REG_SPI4_DESKEW 	CRA(0x5,0x0,0x43)	/* Ingress cranted credit value */
+
+/* 10GbE MAC Block Registers */
+/* Note that those registers that are exactly the same for 10GbE as for
+ * tri-speed are only defined with the version that needs a port number.
+ * Pass 0xa in those cases.
+ *
+ * Also note that despite the presence of a MAC address register, this part
+ * does no ingress MAC address filtering.  That register is used only for
+ * pause frame detection and generation.
+ */
+/* 10GbE specific, and different from tri-speed */
+#define REG_MISC_10G		CRA(0x1,0xa,0x00)	/* Misc 10GbE setup */
+#define REG_PAUSE_10G		CRA(0x1,0xa,0x01)	/* Pause register */
+#define REG_NORMALIZER_10G	CRA(0x1,0xa,0x05)	/* 10G normalizer */
+#define REG_STICKY_RX		CRA(0x1,0xa,0x06)	/* RX debug register */
+#define REG_DENORM_10G		CRA(0x1,0xa,0x07)	/* Denormalizer  */
+#define REG_STICKY_TX		CRA(0x1,0xa,0x08)	/* TX sticky bits */
+#define REG_MAX_RXHIGH		CRA(0x1,0xa,0x0a)	/* XGMII lane 0-3 debug */
+#define REG_MAX_RXLOW		CRA(0x1,0xa,0x0b)	/* XGMII lane 4-7 debug */
+#define REG_MAC_TX_STICKY	CRA(0x1,0xa,0x0c)	/* MAC Tx state sticky debug */
+#define REG_MAC_TX_RUNNING	CRA(0x1,0xa,0x0d)	/* MAC Tx state running debug */
+#define REG_TX_ABORT_AGE	CRA(0x1,0xa,0x14)	/* Aged Tx frames discarded */
+#define REG_TX_ABORT_SHORT	CRA(0x1,0xa,0x15)	/* Short Tx frames discarded */
+#define REG_TX_ABORT_TAXI	CRA(0x1,0xa,0x16)	/* Taxi error frames discarded */
+#define REG_TX_ABORT_UNDERRUN	CRA(0x1,0xa,0x17)	/* Tx Underrun abort counter */
+#define REG_TX_DENORM_DISCARD	CRA(0x1,0xa,0x18)	/* Tx denormalizer discards */
+#define REG_XAUI_STAT_A		CRA(0x1,0xa,0x20)	/* XAUI status A */
+#define REG_XAUI_STAT_B		CRA(0x1,0xa,0x21)	/* XAUI status B */
+#define REG_XAUI_STAT_C		CRA(0x1,0xa,0x22)	/* XAUI status C */
+#define REG_XAUI_CONF_A		CRA(0x1,0xa,0x23)	/* XAUI configuration A */
+#define REG_XAUI_CONF_B		CRA(0x1,0xa,0x24)	/* XAUI configuration B */
+#define REG_XAUI_CODE_GRP_CNT	CRA(0x1,0xa,0x25)	/* XAUI code group error count */
+#define REG_XAUI_CONF_TEST_A	CRA(0x1,0xa,0x26)	/* XAUI test register A */
+#define REG_PDERRCNT		CRA(0x1,0xa,0x27)	/* XAUI test register B */
+
+/* pn = port number 0-9 for tri-speed, 10 for 10GbE */
+/* Both tri-speed and 10GbE */
+#define REG_MAX_LEN(pn)		CRA(0x1,pn,0x02)	/* Max length */
+#define REG_MAC_HIGH_ADDR(pn)	CRA(0x1,pn,0x03)	/* Upper 24 bits of MAC addr */
+#define REG_MAC_LOW_ADDR(pn)	CRA(0x1,pn,0x04)	/* Lower 24 bits of MAC addr */
+
+/* tri-speed only
+ * pn = port number, 0-9
+ */
+#define REG_MODE_CFG(pn)	CRA(0x1,pn,0x00)	/* Mode configuration */
+#define REG_PAUSE_CFG(pn)	CRA(0x1,pn,0x01)	/* Pause configuration */
+#define REG_NORMALIZER(pn)	CRA(0x1,pn,0x05)	/* Normalizer */
+#define REG_TBI_STATUS(pn)	CRA(0x1,pn,0x06)	/* TBI status */
+#define REG_PCS_STATUS_DBG(pn)	CRA(0x1,pn,0x07)	/* PCS status debug */
+#define REG_PCS_CTRL(pn)	CRA(0x1,pn,0x08)	/* PCS control */
+#define REG_TBI_CONFIG(pn)	CRA(0x1,pn,0x09)	/* TBI configuration */
+#define REG_STICK_BIT(pn)	CRA(0x1,pn,0x0a)	/* Sticky bits */
+#define REG_DEV_SETUP(pn)	CRA(0x1,pn,0x0b)	/* MAC clock/reset setup */
+#define REG_DROP_CNT(pn)	CRA(0x1,pn,0x0c)	/* Drop counter */
+#define REG_PORT_POS(pn)	CRA(0x1,pn,0x0d)	/* Preamble port position */
+#define REG_PORT_FAIL(pn)	CRA(0x1,pn,0x0e)	/* Preamble port position */
+#define REG_SERDES_CONF(pn)	CRA(0x1,pn,0x0f)	/* SerDes configuration */
+#define REG_SERDES_TEST(pn)	CRA(0x1,pn,0x10)	/* SerDes test */
+#define REG_SERDES_STAT(pn)	CRA(0x1,pn,0x11)	/* SerDes status */
+#define REG_SERDES_COM_CNT(pn)	CRA(0x1,pn,0x12)	/* SerDes comma counter */
+#define REG_DENORM(pn)		CRA(0x1,pn,0x15)	/* Frame denormalization */
+#define REG_DBG(pn)		CRA(0x1,pn,0x16)	/* Device 1G debug */
+#define REG_TX_IFG(pn)		CRA(0x1,pn,0x18)	/* Tx IFG config */
+#define REG_HDX(pn)		CRA(0x1,pn,0x19)	/* Half-duplex config */
+
+/* Statistics */
+/* pn = port number, 0-a, a = 10GbE */
+#define REG_RX_IN_BYTES(pn)	CRA(0x4,pn,0x00)	/* # Rx in octets */
+#define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01)	/* Frames w/ symbol errors */
+#define REG_RX_PAUSE(pn)	CRA(0x4,pn,0x02)	/* # pause frames received */
+#define REG_RX_UNSUP_OPCODE(pn)	CRA(0x4,pn,0x03)	/* # control frames with unsupported opcode */
+#define REG_RX_OK_BYTES(pn)	CRA(0x4,pn,0x04)	/* # octets in good frames */
+#define REG_RX_BAD_BYTES(pn)	CRA(0x4,pn,0x05)	/* # octets in bad frames */
+#define REG_RX_UNICAST(pn)	CRA(0x4,pn,0x06)	/* # good unicast frames */
+#define REG_RX_MULTICAST(pn)	CRA(0x4,pn,0x07)	/* # good multicast frames */
+#define REG_RX_BROADCAST(pn)	CRA(0x4,pn,0x08)	/* # good broadcast frames */
+#define REG_CRC(pn)		CRA(0x4,pn,0x09)	/* # frames w/ bad CRC only */
+#define REG_RX_ALIGNMENT(pn)	CRA(0x4,pn,0x0a)	/* # frames w/ alignment err */
+#define REG_RX_UNDERSIZE(pn)	CRA(0x4,pn,0x0b)	/* # frames undersize */
+#define REG_RX_FRAGMENTS(pn)	CRA(0x4,pn,0x0c)	/* # frames undersize w/ crc err */
+#define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d)	/* # frames with length error */
+#define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e)	/* # frames with illegal length field */
+#define REG_RX_OVERSIZE(pn)	CRA(0x4,pn,0x0f)	/* # frames oversize */
+#define REG_RX_JABBERS(pn)	CRA(0x4,pn,0x10)	/* # frames oversize w/ crc err */
+#define REG_RX_SIZE_64(pn)	CRA(0x4,pn,0x11)	/* # frames 64 octets long */
+#define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12)	/* # frames 65-127 octets */
+#define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13)	/* # frames 128-255 */
+#define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14)	/* # frames 256-511 */
+#define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15)	/* # frames 512-1023 */
+#define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16)	/* # frames 1024-1518 */
+#define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17)	/* # frames 1519-max */
+
+#define REG_TX_OUT_BYTES(pn)	CRA(0x4,pn,0x18)	/* # octets tx */
+#define REG_TX_PAUSE(pn)	CRA(0x4,pn,0x19)	/* # pause frames sent */
+#define REG_TX_OK_BYTES(pn)	CRA(0x4,pn,0x1a)	/* # octets tx OK */
+#define REG_TX_UNICAST(pn)	CRA(0x4,pn,0x1b)	/* # frames unicast */
+#define REG_TX_MULTICAST(pn)	CRA(0x4,pn,0x1c)	/* # frames multicast */
+#define REG_TX_BROADCAST(pn)	CRA(0x4,pn,0x1d)	/* # frames broadcast */
+#define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e)	/* # frames tx after multiple collisions */
+#define REG_TX_LATE_COLL(pn)	CRA(0x4,pn,0x1f)	/* # late collisions detected */
+#define REG_TX_XCOLL(pn)	CRA(0x4,pn,0x20)	/* # frames lost, excessive collisions */
+#define REG_TX_DEFER(pn)	CRA(0x4,pn,0x21)	/* # frames deferred on first tx attempt */
+#define REG_TX_XDEFER(pn)	CRA(0x4,pn,0x22)	/* # frames excessively deferred */
+#define REG_TX_CSENSE(pn)	CRA(0x4,pn,0x23)	/* carrier sense errors at frame end */
+#define REG_TX_SIZE_64(pn)	CRA(0x4,pn,0x24)	/* # frames 64 octets long */
+#define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25)	/* # frames 65-127 octets */
+#define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26)	/* # frames 128-255 */
+#define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27)	/* # frames 256-511 */
+#define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28)	/* # frames 512-1023 */
+#define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29)	/* # frames 1024-1518 */
+#define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a)	/* # frames 1519-max */
+#define REG_TX_SINGLE_COLL(pn)	CRA(0x4,pn,0x2b)	/* # frames tx after single collision */
+#define REG_TX_BACKOFF2(pn)	CRA(0x4,pn,0x2c)	/* # frames tx ok after 2 backoffs/collisions */
+#define REG_TX_BACKOFF3(pn)	CRA(0x4,pn,0x2d)	/*   after 3 backoffs/collisions */
+#define REG_TX_BACKOFF4(pn)	CRA(0x4,pn,0x2e)	/*   after 4 */
+#define REG_TX_BACKOFF5(pn)	CRA(0x4,pn,0x2f)	/*   after 5 */
+#define REG_TX_BACKOFF6(pn)	CRA(0x4,pn,0x30)	/*   after 6 */
+#define REG_TX_BACKOFF7(pn)	CRA(0x4,pn,0x31)	/*   after 7 */
+#define REG_TX_BACKOFF8(pn)	CRA(0x4,pn,0x32)	/*   after 8 */
+#define REG_TX_BACKOFF9(pn)	CRA(0x4,pn,0x33)	/*   after 9 */
+#define REG_TX_BACKOFF10(pn)	CRA(0x4,pn,0x34)	/*   after 10 */
+#define REG_TX_BACKOFF11(pn)	CRA(0x4,pn,0x35)	/*   after 11 */
+#define REG_TX_BACKOFF12(pn)	CRA(0x4,pn,0x36)	/*   after 12 */
+#define REG_TX_BACKOFF13(pn)	CRA(0x4,pn,0x37)	/*   after 13 */
+#define REG_TX_BACKOFF14(pn)	CRA(0x4,pn,0x38)	/*   after 14 */
+#define REG_TX_BACKOFF15(pn)	CRA(0x4,pn,0x39)	/*   after 15 */
+#define REG_TX_UNDERRUN(pn)	CRA(0x4,pn,0x3a)	/* # frames dropped from underrun */
+#define REG_RX_XGMII_PROT_ERR	CRA(0x4,0xa,0x3b)	/* # protocol errors detected on XGMII interface */
+#define REG_RX_IPG_SHRINK(pn)	CRA(0x4,pn,0x3c)	/* # of IPG shrinks detected */
+
+#define REG_STAT_STICKY1G(pn)	CRA(0x4,pn,0x3e)	/* tri-speed sticky bits */
+#define REG_STAT_STICKY10G	CRA(0x4,0xa,0x3e)	/* 10GbE sticky bits */
+#define REG_STAT_INIT(pn)	CRA(0x4,pn,0x3f)	/* Clear all statistics */
+
+/* MII-Management Block registers */
+/* These are for MII-M interface 0, which is the bidirectional LVTTL one.  If
+ * we hooked up to the one with separate directions, the middle 0x0 needs to
+ * change to 0x1.  And the current errata states that MII-M 1 doesn't work.
+ */
+
+#define REG_MIIM_STATUS		CRA(0x3,0x0,0x00)	/* MII-M Status */
+#define REG_MIIM_CMD		CRA(0x3,0x0,0x01)	/* MII-M Command */
+#define REG_MIIM_DATA		CRA(0x3,0x0,0x02)	/* MII-M Data */
+#define REG_MIIM_PRESCALE	CRA(0x3,0x0,0x03)	/* MII-M MDC Prescale */
+
+#define REG_ING_FFILT_UM_EN	CRA(0x2, 0, 0xd)
+#define REG_ING_FFILT_BE_EN	CRA(0x2, 0, 0x1d)
+#define REG_ING_FFILT_VAL0	CRA(0x2, 0, 0x2d)
+#define REG_ING_FFILT_VAL1	CRA(0x2, 0, 0x3d)
+#define REG_ING_FFILT_MASK0	CRA(0x2, 0, 0x4d)
+#define REG_ING_FFILT_MASK1	CRA(0x2, 0, 0x5d)
+#define REG_ING_FFILT_MASK2	CRA(0x2, 0, 0x6d)
+#define REG_ING_FFILT_ETYPE	CRA(0x2, 0, 0x7d)
+
+
+/* Whew. */
+
+#endif
diff --git a/drivers/net/chelsio/vsc8244.c b/drivers/net/chelsio/vsc8244.c
new file mode 100644
index 0000000..c493e78
--- /dev/null
+++ b/drivers/net/chelsio/vsc8244.c
@@ -0,0 +1,368 @@
+/*
+ * This file is part of the Chelsio T2 Ethernet driver.
+ *
+ * Copyright (C) 2005 Chelsio Communications.  All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ */
+
+#include "common.h"
+#include "cphy.h"
+#include "elmer0.h"
+
+#ifndef ADVERTISE_PAUSE_CAP
+# define ADVERTISE_PAUSE_CAP 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#ifndef MII_CTRL1000
+# define MII_CTRL1000 9
+#endif
+
+#ifndef ADVERTISE_1000FULL
+# define ADVERTISE_1000FULL 0x200
+# define ADVERTISE_1000HALF 0x100
+#endif
+
+/* VSC8244 PHY specific registers. */
+enum {
+	VSC8244_INTR_ENABLE   = 25,
+	VSC8244_INTR_STATUS   = 26,
+	VSC8244_AUX_CTRL_STAT = 28,
+};
+
+enum {
+	VSC_INTR_RX_ERR     = 1 << 0,
+	VSC_INTR_MS_ERR     = 1 << 1,  /* master/slave resolution error */
+	VSC_INTR_CABLE      = 1 << 2,  /* cable impairment */
+	VSC_INTR_FALSE_CARR = 1 << 3,  /* false carrier */
+	VSC_INTR_MEDIA_CHG  = 1 << 4,  /* AMS media change */
+	VSC_INTR_RX_FIFO    = 1 << 5,  /* Rx FIFO over/underflow */
+	VSC_INTR_TX_FIFO    = 1 << 6,  /* Tx FIFO over/underflow */
+	VSC_INTR_DESCRAMBL  = 1 << 7,  /* descrambler lock-lost */
+	VSC_INTR_SYMBOL_ERR = 1 << 8,  /* symbol error */
+	VSC_INTR_NEG_DONE   = 1 << 10, /* autoneg done */
+	VSC_INTR_NEG_ERR    = 1 << 11, /* autoneg error */
+	VSC_INTR_LINK_CHG   = 1 << 13, /* link change */
+	VSC_INTR_ENABLE     = 1 << 15, /* interrupt enable */
+};
+
+#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+	 		   VSC_INTR_NEG_DONE)
+#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
+		   VSC_INTR_ENABLE)
+
+/* PHY specific auxiliary control & status register fields */
+#define S_ACSR_ACTIPHY_TMR    0
+#define M_ACSR_ACTIPHY_TMR    0x3
+#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
+
+#define S_ACSR_SPEED    3
+#define M_ACSR_SPEED    0x3
+#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
+
+#define S_ACSR_DUPLEX 5
+#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
+
+#define S_ACSR_ACTIPHY 6
+#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
+
+/*
+ * Reset the PHY.  This PHY completes reset immediately so we never wait.
+ */
+static int vsc8244_reset(struct cphy *cphy, int wait)
+{
+	int err;
+	unsigned int ctl;
+
+	err = simple_mdio_read(cphy, MII_BMCR, &ctl);
+	if (err)
+		return err;
+
+	ctl &= ~BMCR_PDOWN;
+	ctl |= BMCR_RESET;
+	return simple_mdio_write(cphy, MII_BMCR, ctl);
+}
+
+static int vsc8244_intr_enable(struct cphy *cphy)
+{
+	simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK);
+
+    /* Enable interrupts through Elmer */
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer |= ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter)) {
+		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+                }
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+
+    return 0;
+}
+
+static int vsc8244_intr_disable(struct cphy *cphy)
+{
+	simple_mdio_write(cphy, VSC8244_INTR_ENABLE, 0);
+
+	if (t1_is_asic(cphy->adapter)) {
+		u32 elmer;
+
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+		elmer &= ~ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter)) {
+		    elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
+                }
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	}
+
+    return 0;
+}
+
+static int vsc8244_intr_clear(struct cphy *cphy)
+{
+	u32 val;
+    u32 elmer;
+
+	/* Clear PHY interrupts by reading the register. */
+	simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val);
+
+	if (t1_is_asic(cphy->adapter)) {
+		t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+		elmer |= ELMER0_GP_BIT1;
+		if (is_T2(cphy->adapter)) {
+		    elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+                }
+		t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	}
+
+    return 0;
+}
+
+/*
+ * Force the PHY speed and duplex.  This also disables auto-negotiation, except
+ * for 1Gb/s, where auto-negotiation is mandatory.
+ */
+static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+	int err;
+	unsigned int ctl;
+
+	err = simple_mdio_read(phy, MII_BMCR, &ctl);
+	if (err)
+		return err;
+
+	if (speed >= 0) {
+		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+		if (speed == SPEED_100)
+			ctl |= BMCR_SPEED100;
+		else if (speed == SPEED_1000)
+			ctl |= BMCR_SPEED1000;
+	}
+	if (duplex >= 0) {
+		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+		if (duplex == DUPLEX_FULL)
+			ctl |= BMCR_FULLDPLX;
+	}
+	if (ctl & BMCR_SPEED1000)  /* auto-negotiation required for 1Gb/s */
+		ctl |= BMCR_ANENABLE;
+	return simple_mdio_write(phy, MII_BMCR, ctl);
+}
+
+int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits)
+{
+    int ret;
+    unsigned int val;
+
+    ret = mdio_read(phy, mmd, reg, &val);
+    if (!ret)
+        ret = mdio_write(phy, mmd, reg, val | bits);
+    return ret;
+}
+
+static int vsc8244_autoneg_enable(struct cphy *cphy)
+{
+	return t1_mdio_set_bits(cphy, 0, MII_BMCR,
+				BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int vsc8244_autoneg_restart(struct cphy *cphy)
+{
+	return t1_mdio_set_bits(cphy, 0, MII_BMCR, BMCR_ANRESTART);
+}
+
+static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+	int err;
+	unsigned int val = 0;
+
+	err = simple_mdio_read(phy, MII_CTRL1000, &val);
+	if (err)
+		return err;
+
+	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+	if (advertise_map & ADVERTISED_1000baseT_Half)
+		val |= ADVERTISE_1000HALF;
+	if (advertise_map & ADVERTISED_1000baseT_Full)
+		val |= ADVERTISE_1000FULL;
+
+	err = simple_mdio_write(phy, MII_CTRL1000, val);
+	if (err)
+		return err;
+
+	val = 1;
+	if (advertise_map & ADVERTISED_10baseT_Half)
+		val |= ADVERTISE_10HALF;
+	if (advertise_map & ADVERTISED_10baseT_Full)
+		val |= ADVERTISE_10FULL;
+	if (advertise_map & ADVERTISED_100baseT_Half)
+		val |= ADVERTISE_100HALF;
+	if (advertise_map & ADVERTISED_100baseT_Full)
+		val |= ADVERTISE_100FULL;
+	if (advertise_map & ADVERTISED_PAUSE)
+		val |= ADVERTISE_PAUSE_CAP;
+	if (advertise_map & ADVERTISED_ASYM_PAUSE)
+		val |= ADVERTISE_PAUSE_ASYM;
+	return simple_mdio_write(phy, MII_ADVERTISE, val);
+}
+
+static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok,
+				     int *speed, int *duplex, int *fc)
+{
+	unsigned int bmcr, status, lpa, adv;
+	int err, sp = -1, dplx = -1, pause = 0;
+
+	err = simple_mdio_read(cphy, MII_BMCR, &bmcr);
+	if (!err)
+		err = simple_mdio_read(cphy, MII_BMSR, &status);
+	if (err)
+		return err;
+
+	if (link_ok) {
+		/*
+		 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+		 * once more to get the current link state.
+		 */
+		if (!(status & BMSR_LSTATUS))
+			err = simple_mdio_read(cphy, MII_BMSR, &status);
+		if (err)
+			return err;
+		*link_ok = (status & BMSR_LSTATUS) != 0;
+	}
+	if (!(bmcr & BMCR_ANENABLE)) {
+		dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+		if (bmcr & BMCR_SPEED1000)
+			sp = SPEED_1000;
+		else if (bmcr & BMCR_SPEED100)
+			sp = SPEED_100;
+		else
+			sp = SPEED_10;
+	} else if (status & BMSR_ANEGCOMPLETE) {
+		err = simple_mdio_read(cphy, VSC8244_AUX_CTRL_STAT, &status);
+		if (err)
+			return err;
+
+		dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+		sp = G_ACSR_SPEED(status);
+		if (sp == 0)
+			sp = SPEED_10;
+		else if (sp == 1)
+			sp = SPEED_100;
+		else
+			sp = SPEED_1000;
+
+		if (fc && dplx == DUPLEX_FULL) {
+			err = simple_mdio_read(cphy, MII_LPA, &lpa);
+			if (!err)
+				err = simple_mdio_read(cphy, MII_ADVERTISE,
+						       &adv);
+			if (err)
+				return err;
+
+			if (lpa & adv & ADVERTISE_PAUSE_CAP)
+				pause = PAUSE_RX | PAUSE_TX;
+			else if ((lpa & ADVERTISE_PAUSE_CAP) &&
+				 (lpa & ADVERTISE_PAUSE_ASYM) &&
+				 (adv & ADVERTISE_PAUSE_ASYM))
+				pause = PAUSE_TX;
+			else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
+				 (adv & ADVERTISE_PAUSE_CAP))
+				pause = PAUSE_RX;
+		}
+	}
+	if (speed)
+		*speed = sp;
+	if (duplex)
+		*duplex = dplx;
+	if (fc)
+		*fc = pause;
+	return 0;
+}
+
+static int vsc8244_intr_handler(struct cphy *cphy)
+{
+	unsigned int cause;
+	int err, cphy_cause = 0;
+
+	err = simple_mdio_read(cphy, VSC8244_INTR_STATUS, &cause);
+	if (err)
+		return err;
+
+	cause &= INTR_MASK;
+	if (cause & CFG_CHG_INTR_MASK)
+		cphy_cause |= cphy_cause_link_change;
+	if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
+		cphy_cause |= cphy_cause_fifo_error;
+	return cphy_cause;
+}
+
+static void vsc8244_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops vsc8244_ops = {
+	.destroy              = vsc8244_destroy,
+	.reset                = vsc8244_reset,
+	.interrupt_enable     = vsc8244_intr_enable,
+	.interrupt_disable    = vsc8244_intr_disable,
+	.interrupt_clear      = vsc8244_intr_clear,
+	.interrupt_handler    = vsc8244_intr_handler,
+	.autoneg_enable       = vsc8244_autoneg_enable,
+	.autoneg_restart      = vsc8244_autoneg_restart,
+	.advertise            = vsc8244_advertise,
+	.set_speed_duplex     = vsc8244_set_speed_duplex,
+	.get_link_status      = vsc8244_get_link_status
+};
+
+static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops)
+{
+	struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+	if (!cphy) return NULL;
+
+	cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops);
+
+	return cphy;
+}
+
+
+static int vsc8244_phy_reset(adapter_t* adapter)
+{
+	return 0;
+}
+
+struct gphy t1_vsc8244_ops = {
+	vsc8244_phy_create,
+	vsc8244_phy_reset
+};
+
+
diff --git a/drivers/net/chelsio/vsc8244_reg.h b/drivers/net/chelsio/vsc8244_reg.h
new file mode 100644
index 0000000..d3c1829
--- /dev/null
+++ b/drivers/net/chelsio/vsc8244_reg.h
@@ -0,0 +1,172 @@
+/* $Date: 2005/11/23 16:28:53 $ $RCSfile: vsc8244_reg.h,v $ $Revision: 1.1 $ */
+#ifndef CHELSIO_MV8E1XXX_H
+#define CHELSIO_MV8E1XXX_H
+
+#ifndef BMCR_SPEED1000
+# define BMCR_SPEED1000 0x40
+#endif
+
+#ifndef ADVERTISE_PAUSE
+# define ADVERTISE_PAUSE 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#define MII_GBMR 1       /* 1000Base-T mode register */
+#define MII_GBCR 9       /* 1000Base-T control register */
+#define MII_GBSR 10      /* 1000Base-T status register */
+
+/* 1000Base-T control register fields */
+#define GBCR_ADV_1000HALF         0x100
+#define GBCR_ADV_1000FULL         0x200
+#define GBCR_PREFER_MASTER        0x400
+#define GBCR_MANUAL_AS_MASTER     0x800
+#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
+
+/* 1000Base-T status register fields */
+#define GBSR_LP_1000HALF  0x400
+#define GBSR_LP_1000FULL  0x800
+#define GBSR_REMOTE_OK    0x1000
+#define GBSR_LOCAL_OK     0x2000
+#define GBSR_LOCAL_MASTER 0x4000
+#define GBSR_MASTER_FAULT 0x8000
+
+/* Vitesse PHY interrupt status bits. */
+#if 0
+#define VSC8244_INTR_JABBER          0x0001
+#define VSC8244_INTR_POLARITY_CHNG   0x0002
+#define VSC8244_INTR_ENG_DETECT_CHNG 0x0010
+#define VSC8244_INTR_DOWNSHIFT       0x0020
+#define VSC8244_INTR_MDI_XOVER_CHNG  0x0040
+#define VSC8244_INTR_FIFO_OVER_UNDER 0x0080
+#define VSC8244_INTR_FALSE_CARRIER   0x0100
+#define VSC8244_INTR_SYMBOL_ERROR    0x0200
+#define VSC8244_INTR_LINK_CHNG       0x0400
+#define VSC8244_INTR_AUTONEG_DONE    0x0800
+#define VSC8244_INTR_PAGE_RECV       0x1000
+#define VSC8244_INTR_DUPLEX_CHNG     0x2000
+#define VSC8244_INTR_SPEED_CHNG      0x4000
+#define VSC8244_INTR_AUTONEG_ERR     0x8000
+#else
+//#define VSC8244_INTR_JABBER          0x0001
+//#define VSC8244_INTR_POLARITY_CHNG   0x0002
+//#define VSC8244_INTR_BIT2            0x0004
+//#define VSC8244_INTR_BIT3            0x0008
+#define VSC8244_INTR_RX_ERR          0x0001
+#define VSC8244_INTR_MASTER_SLAVE    0x0002
+#define VSC8244_INTR_CABLE_IMPAIRED  0x0004
+#define VSC8244_INTR_FALSE_CARRIER   0x0008
+//#define VSC8244_INTR_ENG_DETECT_CHNG 0x0010
+//#define VSC8244_INTR_DOWNSHIFT       0x0020
+//#define VSC8244_INTR_MDI_XOVER_CHNG  0x0040
+//#define VSC8244_INTR_FIFO_OVER_UNDER 0x0080
+#define VSC8244_INTR_BIT4            0x0010
+#define VSC8244_INTR_FIFO_RX         0x0020
+#define VSC8244_INTR_FIFO_OVER_UNDER 0x0040
+#define VSC8244_INTR_LOCK_LOST       0x0080
+//#define VSC8244_INTR_FALSE_CARRIER   0x0100
+//#define VSC8244_INTR_SYMBOL_ERROR    0x0200
+//#define VSC8244_INTR_LINK_CHNG       0x0400
+//#define VSC8244_INTR_AUTONEG_DONE    0x0800
+#define VSC8244_INTR_SYMBOL_ERROR    0x0100
+#define VSC8244_INTR_ENG_DETECT_CHNG 0x0200
+#define VSC8244_INTR_AUTONEG_DONE    0x0400
+#define VSC8244_INTR_AUTONEG_ERR     0x0800
+//#define VSC8244_INTR_PAGE_RECV       0x1000
+//#define VSC8244_INTR_DUPLEX_CHNG     0x2000
+//#define VSC8244_INTR_SPEED_CHNG      0x4000
+//#define VSC8244_INTR_AUTONEG_ERR     0x8000
+#define VSC8244_INTR_DUPLEX_CHNG     0x1000
+#define VSC8244_INTR_LINK_CHNG       0x2000
+#define VSC8244_INTR_SPEED_CHNG      0x4000
+#define VSC8244_INTR_STATUS          0x8000
+#endif
+
+
+/* Vitesse PHY specific registers. */
+#define VSC8244_SPECIFIC_CNTRL_REGISTER               16
+#define VSC8244_SPECIFIC_STATUS_REGISTER              0x1c
+#define VSC8244_INTERRUPT_ENABLE_REGISTER             0x19
+#define VSC8244_INTERRUPT_STATUS_REGISTER             0x1a
+#define VSC8244_EXT_PHY_SPECIFIC_CNTRL_REGISTER       20
+#define VSC8244_RECV_ERR_CNTR_REGISTER                21
+#define VSC8244_RES_REGISTER                          22
+#define VSC8244_GLOBAL_STATUS_REGISTER                23
+#define VSC8244_LED_CONTROL_REGISTER                  24
+#define VSC8244_MANUAL_LED_OVERRIDE_REGISTER          25
+#define VSC8244_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER     26
+#define VSC8244_EXT_PHY_SPECIFIC_STATUS_REGISTER      27
+#define VSC8244_VIRTUAL_CABLE_TESTER_REGISTER         28
+#define VSC8244_EXTENDED_ADDR_REGISTER                29
+#define VSC8244_EXTENDED_REGISTER                     30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE    5
+#define M_PSCR_MDI_XOVER_MODE    0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT    9
+#define M_DOWNSHIFT_CNT    0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN    7
+#define M_PSSR_CABLE_LEN    0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+//#define S_PSSR_LINK 10
+//#define S_PSSR_LINK 13
+#define S_PSSR_LINK 2
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+//#define S_PSSR_STATUS_RESOLVED 11
+//#define S_PSSR_STATUS_RESOLVED 10
+#define S_PSSR_STATUS_RESOLVED 15
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+//#define S_PSSR_DUPLEX 13
+//#define S_PSSR_DUPLEX 12
+#define S_PSSR_DUPLEX 5
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+//#define S_PSSR_SPEED    14
+//#define S_PSSR_SPEED    14
+#define S_PSSR_SPEED    3
+#define M_PSSR_SPEED    0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+#endif
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 4ffc9b4..4612f71 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -588,10 +588,10 @@
 				goto out2;
 			}
 	}
-	printk(KERN_DEBUG "PP_addr at %x[%x]: 0x%x\n",
-			ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
 
 	ioaddr &= ~3;
+	printk(KERN_DEBUG "PP_addr at %x[%x]: 0x%x\n",
+			ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
 	writeword(ioaddr, ADD_PORT, PP_ChipID);
 
 	tmp = readword(ioaddr, DATA_PORT);
@@ -1974,7 +1974,7 @@
 	return ret;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	unregister_netdev(dev_cs89x0);
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 690bb40..8396e41 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -43,7 +43,6 @@
  * modify the following "#define": (see <asm/io.h> for more info)
 #define REALLY_SLOW_IO
  */
-#define SLOW_IO_BY_JUMPING /* Looks "better" than dummy write to port 0x80 :-) */
 
 /* use 0 for production, 1 for verification, >2 for debug */
 #ifdef DE600_DEBUG
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 00e2a8a..4ae0fed 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -40,6 +40,10 @@
  *
  *      v0.009: Module support fixes, multiple interfaces support, various
  *              bits. macro
+ *
+ *      v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
+ *              PMAX requirement to only use halfword accesses to the
+ *              buffer. macro
  */
 
 #include <linux/crc32.h>
@@ -54,6 +58,7 @@
 #include <linux/spinlock.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <linux/types.h>
 
 #include <asm/addrspace.h>
 #include <asm/system.h>
@@ -67,7 +72,7 @@
 #include <asm/dec/tc.h>
 
 static char version[] __devinitdata =
-"declance.c: v0.009 by Linux MIPS DECstation task force\n";
+"declance.c: v0.010 by Linux MIPS DECstation task force\n";
 
 MODULE_AUTHOR("Linux MIPS DECstation task force");
 MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
@@ -110,24 +115,25 @@
 #define	LE_C3_BCON	0x1	/* Byte control */
 
 /* Receive message descriptor 1 */
-#define LE_R1_OWN       0x80	/* Who owns the entry */
-#define LE_R1_ERR       0x40	/* Error: if FRA, OFL, CRC or BUF is set */
-#define LE_R1_FRA       0x20	/* FRA: Frame error */
-#define LE_R1_OFL       0x10	/* OFL: Frame overflow */
-#define LE_R1_CRC       0x08	/* CRC error */
-#define LE_R1_BUF       0x04	/* BUF: Buffer error */
-#define LE_R1_SOP       0x02	/* Start of packet */
-#define LE_R1_EOP       0x01	/* End of packet */
-#define LE_R1_POK       0x03	/* Packet is complete: SOP + EOP */
+#define LE_R1_OWN	0x8000	/* Who owns the entry */
+#define LE_R1_ERR	0x4000	/* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA	0x2000	/* FRA: Frame error */
+#define LE_R1_OFL	0x1000	/* OFL: Frame overflow */
+#define LE_R1_CRC	0x0800	/* CRC error */
+#define LE_R1_BUF	0x0400	/* BUF: Buffer error */
+#define LE_R1_SOP	0x0200	/* Start of packet */
+#define LE_R1_EOP	0x0100	/* End of packet */
+#define LE_R1_POK	0x0300	/* Packet is complete: SOP + EOP */
 
-#define LE_T1_OWN       0x80	/* Lance owns the packet */
-#define LE_T1_ERR       0x40	/* Error summary */
-#define LE_T1_EMORE     0x10	/* Error: more than one retry needed */
-#define LE_T1_EONE      0x08	/* Error: one retry needed */
-#define LE_T1_EDEF      0x04	/* Error: deferred */
-#define LE_T1_SOP       0x02	/* Start of packet */
-#define LE_T1_EOP       0x01	/* End of packet */
-#define LE_T1_POK	0x03	/* Packet is complete: SOP + EOP */
+/* Transmit message descriptor 1 */
+#define LE_T1_OWN	0x8000	/* Lance owns the packet */
+#define LE_T1_ERR	0x4000	/* Error summary */
+#define LE_T1_EMORE	0x1000	/* Error: more than one retry needed */
+#define LE_T1_EONE	0x0800	/* Error: one retry needed */
+#define LE_T1_EDEF	0x0400	/* Error: deferred */
+#define LE_T1_SOP	0x0200	/* Start of packet */
+#define LE_T1_EOP	0x0100	/* End of packet */
+#define LE_T1_POK	0x0300	/* Packet is complete: SOP + EOP */
 
 #define LE_T3_BUF       0x8000	/* Buffer error */
 #define LE_T3_UFL       0x4000	/* Error underflow */
@@ -156,69 +162,57 @@
 #undef TEST_HITS
 #define ZERO 0
 
-/* The DS2000/3000 have a linear 64 KB buffer.
-
- * The PMAD-AA has 128 kb buffer on-board.
+/*
+ * The DS2100/3100 have a linear 64 kB buffer which supports halfword
+ * accesses only.  Each halfword of the buffer is word-aligned in the
+ * CPU address space.
  *
- * The IOASIC LANCE devices use a shared memory region. This region as seen
- * from the CPU is (max) 128 KB long and has to be on an 128 KB boundary.
- * The LANCE sees this as a 64 KB long continuous memory region.
+ * The PMAD-AA has a 128 kB buffer on-board.
  *
- * The LANCE's DMA address is used as an index in this buffer and DMA takes
- * place in bursts of eight 16-Bit words which are packed into four 32-Bit words
- * by the IOASIC. This leads to a strange padding: 16 bytes of valid data followed
- * by a 16 byte gap :-(.
+ * The IOASIC LANCE devices use a shared memory region.  This region
+ * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
+ * boundary.  The LANCE sees this as a 64 kB long continuous memory
+ * region.
+ *
+ * The LANCE's DMA address is used as an index in this buffer and DMA
+ * takes place in bursts of eight 16-bit words which are packed into
+ * four 32-bit words by the IOASIC.  This leads to a strange padding:
+ * 16 bytes of valid data followed by a 16 byte gap :-(.
  */
 
 struct lance_rx_desc {
 	unsigned short rmd0;		/* low address of packet */
-	short gap0;
-	unsigned char rmd1_hadr;	/* high address of packet */
-	unsigned char rmd1_bits;	/* descriptor bits */
-	short gap1;
+	unsigned short rmd1;		/* high address of packet
+					   and descriptor bits */
 	short length;			/* 2s complement (negative!)
 					   of buffer length */
-	short gap2;
 	unsigned short mblength;	/* actual number of bytes received */
-	short gap3;
 };
 
 struct lance_tx_desc {
 	unsigned short tmd0;		/* low address of packet */
-	short gap0;
-	unsigned char tmd1_hadr;	/* high address of packet */
-	unsigned char tmd1_bits;	/* descriptor bits */
-	short gap1;
+	unsigned short tmd1;		/* high address of packet
+					   and descriptor bits */
 	short length;			/* 2s complement (negative!)
 					   of buffer length */
-	short gap2;
 	unsigned short misc;
-	short gap3;
 };
 
 
 /* First part of the LANCE initialization block, described in databook. */
 struct lance_init_block {
 	unsigned short mode;		/* pre-set mode (reg. 15) */
-	short gap0;
 
-	unsigned char phys_addr[12];	/* physical ethernet address
-					   only 0, 1, 4, 5, 8, 9 are valid
-					   2, 3, 6, 7, 10, 11 are gaps */
-	unsigned short filter[8];	/* multicast filter
-					   only 0, 2, 4, 6 are valid
-					   1, 3, 5, 7 are gaps */
+	unsigned short phys_addr[3];	/* physical ethernet address */
+	unsigned short filter[4];	/* multicast filter */
 
 	/* Receive and transmit ring base, along with extra bits. */
 	unsigned short rx_ptr;		/* receive descriptor addr */
-	short gap1;
 	unsigned short rx_len;		/* receive len and high addr */
-	short gap2;
 	unsigned short tx_ptr;		/* transmit descriptor addr */
-	short gap3;
 	unsigned short tx_len;		/* transmit len and high addr */
-	short gap4;
-	short gap5[8];
+
+	short gap[4];
 
 	/* The buffer descriptors */
 	struct lance_rx_desc brx_ring[RX_RING_SIZE];
@@ -226,15 +220,28 @@
 };
 
 #define BUF_OFFSET_CPU sizeof(struct lance_init_block)
-#define BUF_OFFSET_LNC (sizeof(struct lance_init_block)>>1)
+#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
 
-#define libdesc_offset(rt, elem) \
-((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
+#define shift_off(off, type)						\
+	(type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
 
-/*
- * This works *only* for the ring descriptors
- */
-#define LANCE_ADDR(x) (CPHYSADDR(x) >> 1)
+#define lib_off(rt, type)						\
+	shift_off(offsetof(struct lance_init_block, rt), type)
+
+#define lib_ptr(ib, rt, type) 						\
+	((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
+
+#define rds_off(rt, type)						\
+	shift_off(offsetof(struct lance_rx_desc, rt), type)
+
+#define rds_ptr(rd, rt, type) 						\
+	((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
+
+#define tds_off(rt, type)						\
+	shift_off(offsetof(struct lance_tx_desc, rt), type)
+
+#define tds_ptr(td, rt, type) 						\
+	((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
 
 struct lance_private {
 	struct net_device *next;
@@ -242,7 +249,6 @@
 	int slot;
 	int dma_irq;
 	volatile struct lance_regs *ll;
-	volatile struct lance_init_block *init_block;
 
 	spinlock_t	lock;
 
@@ -260,8 +266,8 @@
 	char *tx_buf_ptr_cpu[TX_RING_SIZE];
 
 	/* Pointers to the ring buffers as seen from the LANCE */
-	char *rx_buf_ptr_lnc[RX_RING_SIZE];
-	char *tx_buf_ptr_lnc[TX_RING_SIZE];
+	uint rx_buf_ptr_lnc[RX_RING_SIZE];
+	uint tx_buf_ptr_lnc[TX_RING_SIZE];
 };
 
 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
@@ -294,7 +300,7 @@
 static void load_csrs(struct lance_private *lp)
 {
 	volatile struct lance_regs *ll = lp->ll;
-	int leptr;
+	uint leptr;
 
 	/* The address space as seen from the LANCE
 	 * begins at address 0. HK
@@ -316,12 +322,14 @@
  * Our specialized copy routines
  *
  */
-void cp_to_buf(const int type, void *to, const void *from, int len)
+static void cp_to_buf(const int type, void *to, const void *from, int len)
 {
 	unsigned short *tp, *fp, clen;
 	unsigned char *rtp, *rfp;
 
-	if (type == PMAX_LANCE) {
+	if (type == PMAD_LANCE) {
+		memcpy(to, from, len);
+	} else if (type == PMAX_LANCE) {
 		clen = len >> 1;
 		tp = (unsigned short *) to;
 		fp = (unsigned short *) from;
@@ -370,12 +378,14 @@
 	iob();
 }
 
-void cp_from_buf(const int type, void *to, const void *from, int len)
+static void cp_from_buf(const int type, void *to, const void *from, int len)
 {
 	unsigned short *tp, *fp, clen;
 	unsigned char *rtp, *rfp;
 
-	if (type == PMAX_LANCE) {
+	if (type == PMAD_LANCE) {
+		memcpy(to, from, len);
+	} else if (type == PMAX_LANCE) {
 		clen = len >> 1;
 		tp = (unsigned short *) to;
 		fp = (unsigned short *) from;
@@ -431,12 +441,10 @@
 static void lance_init_ring(struct net_device *dev)
 {
 	struct lance_private *lp = netdev_priv(dev);
-	volatile struct lance_init_block *ib;
-	int leptr;
+	volatile u16 *ib = (volatile u16 *)dev->mem_start;
+	uint leptr;
 	int i;
 
-	ib = (struct lance_init_block *) (dev->mem_start);
-
 	/* Lock out other processes while setting up hardware */
 	netif_stop_queue(dev);
 	lp->rx_new = lp->tx_new = 0;
@@ -445,55 +453,64 @@
 	/* Copy the ethernet address to the lance init block.
 	 * XXX bit 0 of the physical address registers has to be zero
 	 */
-	ib->phys_addr[0] = dev->dev_addr[0];
-	ib->phys_addr[1] = dev->dev_addr[1];
-	ib->phys_addr[4] = dev->dev_addr[2];
-	ib->phys_addr[5] = dev->dev_addr[3];
-	ib->phys_addr[8] = dev->dev_addr[4];
-	ib->phys_addr[9] = dev->dev_addr[5];
+	*lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
+				     dev->dev_addr[0];
+	*lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
+				     dev->dev_addr[2];
+	*lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
+				     dev->dev_addr[4];
 	/* Setup the initialization block */
 
 	/* Setup rx descriptor pointer */
-	leptr = LANCE_ADDR(libdesc_offset(brx_ring, 0));
-	ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
-	ib->rx_ptr = leptr;
+	leptr = offsetof(struct lance_init_block, brx_ring);
+	*lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
+					 (leptr >> 16);
+	*lib_ptr(ib, rx_ptr, lp->type) = leptr;
 	if (ZERO)
-		printk("RX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(brx_ring, 0));
+		printk("RX ptr: %8.8x(%8.8x)\n",
+		       leptr, lib_off(brx_ring, lp->type));
 
 	/* Setup tx descriptor pointer */
-	leptr = LANCE_ADDR(libdesc_offset(btx_ring, 0));
-	ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
-	ib->tx_ptr = leptr;
+	leptr = offsetof(struct lance_init_block, btx_ring);
+	*lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
+					 (leptr >> 16);
+	*lib_ptr(ib, tx_ptr, lp->type) = leptr;
 	if (ZERO)
-		printk("TX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(btx_ring, 0));
+		printk("TX ptr: %8.8x(%8.8x)\n",
+		       leptr, lib_off(btx_ring, lp->type));
 
 	if (ZERO)
 		printk("TX rings:\n");
 
 	/* Setup the Tx ring entries */
 	for (i = 0; i < TX_RING_SIZE; i++) {
-		leptr = (int) lp->tx_buf_ptr_lnc[i];
-		ib->btx_ring[i].tmd0 = leptr;
-		ib->btx_ring[i].tmd1_hadr = leptr >> 16;
-		ib->btx_ring[i].tmd1_bits = 0;
-		ib->btx_ring[i].length = 0xf000;	/* The ones required by tmd2 */
-		ib->btx_ring[i].misc = 0;
+		leptr = lp->tx_buf_ptr_lnc[i];
+		*lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
+		*lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
+							   0xff;
+		*lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
+						/* The ones required by tmd2 */
+		*lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
 		if (i < 3 && ZERO)
-			printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->tx_buf_ptr_cpu[i]);
+			printk("%d: 0x%8.8x(0x%8.8x)\n",
+			       i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
 	}
 
 	/* Setup the Rx ring entries */
 	if (ZERO)
 		printk("RX rings:\n");
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		leptr = (int) lp->rx_buf_ptr_lnc[i];
-		ib->brx_ring[i].rmd0 = leptr;
-		ib->brx_ring[i].rmd1_hadr = leptr >> 16;
-		ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
-		ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
-		ib->brx_ring[i].mblength = 0;
+		leptr = lp->rx_buf_ptr_lnc[i];
+		*lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
+		*lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
+							    0xff) |
+							   LE_R1_OWN;
+		*lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
+							     0xf000;
+		*lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
 		if (i < 3 && ZERO)
-			printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->rx_buf_ptr_cpu[i]);
+			printk("%d: 0x%8.8x(0x%8.8x)\n",
+			       i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
 	}
 	iob();
 }
@@ -511,11 +528,13 @@
 		udelay(10);
 	}
 	if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
-		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
+		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+		       i, ll->rdp);
 		return -1;
 	}
 	if ((ll->rdp & LE_C0_ERR)) {
-		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
+		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+		       i, ll->rdp);
 		return -1;
 	}
 	writereg(&ll->rdp, LE_C0_IDON);
@@ -528,12 +547,11 @@
 static int lance_rx(struct net_device *dev)
 {
 	struct lance_private *lp = netdev_priv(dev);
-	volatile struct lance_init_block *ib;
-	volatile struct lance_rx_desc *rd = 0;
-	unsigned char bits;
-	int len = 0;
-	struct sk_buff *skb = 0;
-	ib = (struct lance_init_block *) (dev->mem_start);
+	volatile u16 *ib = (volatile u16 *)dev->mem_start;
+	volatile u16 *rd;
+	unsigned short bits;
+	int entry, len;
+	struct sk_buff *skb;
 
 #ifdef TEST_HITS
 	{
@@ -542,19 +560,22 @@
 		printk("[");
 		for (i = 0; i < RX_RING_SIZE; i++) {
 			if (i == lp->rx_new)
-				printk("%s", ib->brx_ring[i].rmd1_bits &
+				printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+						      lp->type) &
 					     LE_R1_OWN ? "_" : "X");
 			else
-				printk("%s", ib->brx_ring[i].rmd1_bits &
+				printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+						      lp->type) &
 					     LE_R1_OWN ? "." : "1");
 		}
 		printk("]");
 	}
 #endif
 
-	for (rd = &ib->brx_ring[lp->rx_new];
-	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
-	     rd = &ib->brx_ring[lp->rx_new]) {
+	for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
+	     !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
+	     rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
+		entry = lp->rx_new;
 
 		/* We got an incomplete frame? */
 		if ((bits & LE_R1_POK) != LE_R1_POK) {
@@ -575,16 +596,18 @@
 			if (bits & LE_R1_EOP)
 				lp->stats.rx_errors++;
 		} else {
-			len = (rd->mblength & 0xfff) - 4;
+			len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
 			skb = dev_alloc_skb(len + 2);
 
 			if (skb == 0) {
 				printk("%s: Memory squeeze, deferring packet.\n",
 				       dev->name);
 				lp->stats.rx_dropped++;
-				rd->mblength = 0;
-				rd->rmd1_bits = LE_R1_OWN;
-				lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
+				*rds_ptr(rd, mblength, lp->type) = 0;
+				*rds_ptr(rd, rmd1, lp->type) =
+					((lp->rx_buf_ptr_lnc[entry] >> 16) &
+					 0xff) | LE_R1_OWN;
+				lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
 				return 0;
 			}
 			lp->stats.rx_bytes += len;
@@ -594,8 +617,7 @@
 			skb_put(skb, len);	/* make room */
 
 			cp_from_buf(lp->type, skb->data,
-				    (char *)lp->rx_buf_ptr_cpu[lp->rx_new],
-				    len);
+				    (char *)lp->rx_buf_ptr_cpu[entry], len);
 
 			skb->protocol = eth_type_trans(skb, dev);
 			netif_rx(skb);
@@ -604,10 +626,11 @@
 		}
 
 		/* Return the packet to the pool */
-		rd->mblength = 0;
-		rd->length = -RX_BUFF_SIZE | 0xf000;
-		rd->rmd1_bits = LE_R1_OWN;
-		lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
+		*rds_ptr(rd, mblength, lp->type) = 0;
+		*rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
+		*rds_ptr(rd, rmd1, lp->type) =
+			((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
+		lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
 	}
 	return 0;
 }
@@ -615,24 +638,24 @@
 static void lance_tx(struct net_device *dev)
 {
 	struct lance_private *lp = netdev_priv(dev);
-	volatile struct lance_init_block *ib;
+	volatile u16 *ib = (volatile u16 *)dev->mem_start;
 	volatile struct lance_regs *ll = lp->ll;
-	volatile struct lance_tx_desc *td;
+	volatile u16 *td;
 	int i, j;
 	int status;
-	ib = (struct lance_init_block *) (dev->mem_start);
+
 	j = lp->tx_old;
 
 	spin_lock(&lp->lock);
 
 	for (i = j; i != lp->tx_new; i = j) {
-		td = &ib->btx_ring[i];
+		td = lib_ptr(ib, btx_ring[i], lp->type);
 		/* If we hit a packet not owned by us, stop */
-		if (td->tmd1_bits & LE_T1_OWN)
+		if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
 			break;
 
-		if (td->tmd1_bits & LE_T1_ERR) {
-			status = td->misc;
+		if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
+			status = *tds_ptr(td, misc, lp->type);
 
 			lp->stats.tx_errors++;
 			if (status & LE_T3_RTY)
@@ -667,18 +690,19 @@
 				init_restart_lance(lp);
 				goto out;
 			}
-		} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+		} else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
+			   LE_T1_POK) {
 			/*
 			 * So we don't count the packet more than once.
 			 */
-			td->tmd1_bits &= ~(LE_T1_POK);
+			*tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
 
 			/* One collision before packet was sent. */
-			if (td->tmd1_bits & LE_T1_EONE)
+			if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
 				lp->stats.collisions++;
 
 			/* More than one collision, be optimistic. */
-			if (td->tmd1_bits & LE_T1_EMORE)
+			if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
 				lp->stats.collisions += 2;
 
 			lp->stats.tx_packets++;
@@ -752,7 +776,7 @@
 
 static int lance_open(struct net_device *dev)
 {
-	volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
+	volatile u16 *ib = (volatile u16 *)dev->mem_start;
 	struct lance_private *lp = netdev_priv(dev);
 	volatile struct lance_regs *ll = lp->ll;
 	int status = 0;
@@ -769,11 +793,11 @@
 	 *
 	 * BTW it is common bug in all lance drivers! --ANK
 	 */
-	ib->mode = 0;
-	ib->filter [0] = 0;
-	ib->filter [2] = 0;
-	ib->filter [4] = 0;
-	ib->filter [6] = 0;
+	*lib_ptr(ib, mode, lp->type) = 0;
+	*lib_ptr(ib, filter[0], lp->type) = 0;
+	*lib_ptr(ib, filter[1], lp->type) = 0;
+	*lib_ptr(ib, filter[2], lp->type) = 0;
+	*lib_ptr(ib, filter[3], lp->type) = 0;
 
 	lance_init_ring(dev);
 	load_csrs(lp);
@@ -874,12 +898,10 @@
 {
 	struct lance_private *lp = netdev_priv(dev);
 	volatile struct lance_regs *ll = lp->ll;
-	volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
-	int entry, skblen, len;
+	volatile u16 *ib = (volatile u16 *)dev->mem_start;
+	int entry, len;
 
-	skblen = skb->len;
-
-	len = skblen;
+	len = skb->len;
 
 	if (len < ETH_ZLEN) {
 		if (skb_padto(skb, ETH_ZLEN))
@@ -889,23 +911,17 @@
 
 	lp->stats.tx_bytes += len;
 
-	entry = lp->tx_new & TX_RING_MOD_MASK;
-	ib->btx_ring[entry].length = (-len);
-	ib->btx_ring[entry].misc = 0;
+	entry = lp->tx_new;
+	*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
+	*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
 
-	cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data,
-		  skblen);
-
-	/* Clear the slack of the packet, do I need this? */
-	/* For a firewall it's a good idea - AC */
-/*
-   if (len != skblen)
-   memset ((char *) &ib->tx_buf [entry][skblen], 0, (len - skblen) << 1);
- */
+	cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
 
 	/* Now, give the packet to the lance */
-	ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
-	lp->tx_new = (lp->tx_new + 1) & TX_RING_MOD_MASK;
+	*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
+		((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
+		(LE_T1_POK | LE_T1_OWN);
+	lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
 
 	if (TX_BUFFS_AVAIL <= 0)
 		netif_stop_queue(dev);
@@ -930,8 +946,8 @@
 
 static void lance_load_multicast(struct net_device *dev)
 {
-	volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
-	volatile u16 *mcast_table = (u16 *) & ib->filter;
+	struct lance_private *lp = netdev_priv(dev);
+	volatile u16 *ib = (volatile u16 *)dev->mem_start;
 	struct dev_mc_list *dmi = dev->mc_list;
 	char *addrs;
 	int i;
@@ -939,17 +955,17 @@
 
 	/* set all multicast bits */
 	if (dev->flags & IFF_ALLMULTI) {
-		ib->filter[0] = 0xffff;
-		ib->filter[2] = 0xffff;
-		ib->filter[4] = 0xffff;
-		ib->filter[6] = 0xffff;
+		*lib_ptr(ib, filter[0], lp->type) = 0xffff;
+		*lib_ptr(ib, filter[1], lp->type) = 0xffff;
+		*lib_ptr(ib, filter[2], lp->type) = 0xffff;
+		*lib_ptr(ib, filter[3], lp->type) = 0xffff;
 		return;
 	}
 	/* clear the multicast filter */
-	ib->filter[0] = 0;
-	ib->filter[2] = 0;
-	ib->filter[4] = 0;
-	ib->filter[6] = 0;
+	*lib_ptr(ib, filter[0], lp->type) = 0;
+	*lib_ptr(ib, filter[1], lp->type) = 0;
+	*lib_ptr(ib, filter[2], lp->type) = 0;
+	*lib_ptr(ib, filter[3], lp->type) = 0;
 
 	/* Add addresses */
 	for (i = 0; i < dev->mc_count; i++) {
@@ -962,7 +978,7 @@
 
 		crc = ether_crc_le(ETH_ALEN, addrs);
 		crc = crc >> 26;
-		mcast_table[2 * (crc >> 4)] |= 1 << (crc & 0xf);
+		*lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
 	}
 	return;
 }
@@ -970,11 +986,9 @@
 static void lance_set_multicast(struct net_device *dev)
 {
 	struct lance_private *lp = netdev_priv(dev);
-	volatile struct lance_init_block *ib;
+	volatile u16 *ib = (volatile u16 *)dev->mem_start;
 	volatile struct lance_regs *ll = lp->ll;
 
-	ib = (struct lance_init_block *) (dev->mem_start);
-
 	if (!netif_running(dev))
 		return;
 
@@ -992,9 +1006,9 @@
 	lance_init_ring(dev);
 
 	if (dev->flags & IFF_PROMISC) {
-		ib->mode |= LE_MO_PROM;
+		*lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
 	} else {
-		ib->mode &= ~LE_MO_PROM;
+		*lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
 		lance_load_multicast(dev);
 	}
 	load_csrs(lp);
@@ -1051,7 +1065,6 @@
 	lp->type = type;
 	lp->slot = slot;
 	switch (type) {
-#ifdef CONFIG_TC
 	case ASIC_LANCE:
 		dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
 
@@ -1073,20 +1086,20 @@
 		 */
 		for (i = 0; i < RX_RING_SIZE; i++) {
 			lp->rx_buf_ptr_cpu[i] =
-				(char *)(dev->mem_start + BUF_OFFSET_CPU +
+				(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
 					 2 * i * RX_BUFF_SIZE);
 			lp->rx_buf_ptr_lnc[i] =
-				(char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+				(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
 		}
 		for (i = 0; i < TX_RING_SIZE; i++) {
 			lp->tx_buf_ptr_cpu[i] =
-				(char *)(dev->mem_start + BUF_OFFSET_CPU +
+				(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
 					 2 * RX_RING_SIZE * RX_BUFF_SIZE +
 					 2 * i * TX_BUFF_SIZE);
 			lp->tx_buf_ptr_lnc[i] =
-				(char *)(BUF_OFFSET_LNC +
-					 RX_RING_SIZE * RX_BUFF_SIZE +
-					 i * TX_BUFF_SIZE);
+				(BUF_OFFSET_LNC +
+				 RX_RING_SIZE * RX_BUFF_SIZE +
+				 i * TX_BUFF_SIZE);
 		}
 
 		/* Setup I/O ASIC LANCE DMA.  */
@@ -1095,11 +1108,12 @@
 			     CPHYSADDR(dev->mem_start) << 3);
 
 		break;
-
+#ifdef CONFIG_TC
 	case PMAD_LANCE:
 		claim_tc_card(slot);
 
 		dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot));
+		dev->mem_end = dev->mem_start + 0x100000;
 		dev->base_addr = dev->mem_start + 0x100000;
 		dev->irq = get_tc_irq_nr(slot);
 		esar_base = dev->mem_start + 0x1c0002;
@@ -1110,7 +1124,7 @@
 				(char *)(dev->mem_start + BUF_OFFSET_CPU +
 					 i * RX_BUFF_SIZE);
 			lp->rx_buf_ptr_lnc[i] =
-				(char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+				(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
 		}
 		for (i = 0; i < TX_RING_SIZE; i++) {
 			lp->tx_buf_ptr_cpu[i] =
@@ -1118,18 +1132,18 @@
 					 RX_RING_SIZE * RX_BUFF_SIZE +
 					 i * TX_BUFF_SIZE);
 			lp->tx_buf_ptr_lnc[i] =
-				(char *)(BUF_OFFSET_LNC +
-					 RX_RING_SIZE * RX_BUFF_SIZE +
-					 i * TX_BUFF_SIZE);
+				(BUF_OFFSET_LNC +
+				 RX_RING_SIZE * RX_BUFF_SIZE +
+				 i * TX_BUFF_SIZE);
 		}
 
 		break;
 #endif
-
 	case PMAX_LANCE:
 		dev->irq = dec_interrupt[DEC_IRQ_LANCE];
 		dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
 		dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
+		dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
 		esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
 		lp->dma_irq = -1;
 
@@ -1138,20 +1152,20 @@
 		 */
 		for (i = 0; i < RX_RING_SIZE; i++) {
 			lp->rx_buf_ptr_cpu[i] =
-				(char *)(dev->mem_start + BUF_OFFSET_CPU +
+				(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
 					 2 * i * RX_BUFF_SIZE);
 			lp->rx_buf_ptr_lnc[i] =
-				(char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+				(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
 		}
 		for (i = 0; i < TX_RING_SIZE; i++) {
 			lp->tx_buf_ptr_cpu[i] =
-				(char *)(dev->mem_start + BUF_OFFSET_CPU +
+				(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
 					 2 * RX_RING_SIZE * RX_BUFF_SIZE +
 					 2 * i * TX_BUFF_SIZE);
 			lp->tx_buf_ptr_lnc[i] =
-				(char *)(BUF_OFFSET_LNC +
-					 RX_RING_SIZE * RX_BUFF_SIZE +
-					 i * TX_BUFF_SIZE);
+				(BUF_OFFSET_LNC +
+				 RX_RING_SIZE * RX_BUFF_SIZE +
+				 i * TX_BUFF_SIZE);
 		}
 
 		break;
@@ -1279,10 +1293,8 @@
 	/* Then handle onboard devices. */
 	if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
 		if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
-#ifdef CONFIG_TC
 			if (dec_lance_init(ASIC_LANCE, -1) >= 0)
 				count++;
-#endif
 		} else if (!TURBOCHANNEL) {
 			if (dec_lance_init(PMAX_LANCE, -1) >= 0)
 				count++;
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 8f514cc..dc3ab3b 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -192,6 +192,7 @@
  *		04 Aug 2003	macro		Converted to the DMA API.
  *		14 Aug 2004	macro		Fix device names reported.
  *		14 Jun 2005	macro		Use irqreturn_t.
+ *		23 Oct 2006	macro		Big-endian host support.
  */
 
 /* Include files */
@@ -218,8 +219,8 @@
 
 /* Version information string should be updated prior to each new release!  */
 #define DRV_NAME "defxx"
-#define DRV_VERSION "v1.08"
-#define DRV_RELDATE "2005/06/14"
+#define DRV_VERSION "v1.09"
+#define DRV_RELDATE "2006/10/23"
 
 static char version[] __devinitdata =
 	DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
@@ -859,6 +860,7 @@
 		       print_name);
 		return(DFX_K_FAILURE);
 	}
+	data = cpu_to_le32(data);
 	memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32));
 
 	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
@@ -867,6 +869,7 @@
 		       print_name);
 		return(DFX_K_FAILURE);
 	}
+	data = cpu_to_le32(data);
 	memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16));
 
 	/*
@@ -1085,27 +1088,23 @@
 		}
 
 	/*
-	 * Set base address of Descriptor Block and bring adapter to DMA_AVAILABLE state
+	 * Set the base address of Descriptor Block and bring adapter
+	 * to DMA_AVAILABLE state.
 	 *
-	 * Note: We also set the literal and data swapping requirements in this
-	 *	     command.  Since this driver presently runs on Intel platforms
-	 *		 which are Little Endian, we'll tell the adapter to byte swap
-	 *		 data only.  This code will need to change when we support
-	 *		 Big Endian systems (eg. PowerPC).
+	 * Note: We also set the literal and data swapping requirements
+	 *       in this command.
 	 *
-	 * Assumption: 32-bit physical address of descriptor block is 8Kbyte
-	 *             aligned.  That is, bits 0-12 of the address must be zero.
+	 * Assumption: 32-bit physical address of descriptor block
+	 *       is 8Kbyte aligned.
 	 */
-
-	if (dfx_hw_port_ctrl_req(bp,
-							PI_PCTRL_M_INIT,
-							(u32) (bp->descr_block_phys | PI_PDATA_A_INIT_M_BSWAP_DATA),
-							0,
-							NULL) != DFX_K_SUCCESS)
-		{
-		printk("%s: Could not set descriptor block address!\n", bp->dev->name);
-		return(DFX_K_FAILURE);
-		}
+	if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
+				 (u32)(bp->descr_block_phys |
+				       PI_PDATA_A_INIT_M_BSWAP_INIT),
+				 0, NULL) != DFX_K_SUCCESS) {
+		printk("%s: Could not set descriptor block address!\n",
+		       bp->dev->name);
+		return DFX_K_FAILURE;
+	}
 
 	/* Set transmit flush timeout value */
 
diff --git a/drivers/net/defxx.h b/drivers/net/defxx.h
index 8b1e9a1..2ce8f97 100644
--- a/drivers/net/defxx.h
+++ b/drivers/net/defxx.h
@@ -25,6 +25,7 @@
  *							macros to DEFXX.C.
  *		12-Sep-96	LVS		Removed packet request header pointers.
  *		04 Aug 2003	macro		Converted to the DMA API.
+ *		23 Oct 2006	macro		Big-endian host support.
  */
 
 #ifndef _DEFXX_H_
@@ -1344,7 +1345,7 @@
 
 /* Register definition structures are defined for both big and little endian systems */
 
-#ifndef  BIG_ENDIAN
+#ifndef __BIG_ENDIAN
 
 /* Little endian format of Type 1 Producer register */
 
@@ -1402,7 +1403,11 @@
 		} index;
 	} PI_TYPE_2_CONSUMER;
 
-#else
+/* Define swapping required by DMA transfers.  */
+#define PI_PDATA_A_INIT_M_BSWAP_INIT	\
+	(PI_PDATA_A_INIT_M_BSWAP_DATA)
+
+#else /* __BIG_ENDIAN */
 
 /* Big endian format of Type 1 Producer register */
 
@@ -1460,7 +1465,11 @@
 		} index;
 	} PI_TYPE_2_CONSUMER;
 
-#endif	/* #ifndef BIG_ENDIAN */
+/* Define swapping required by DMA transfers.  */
+#define PI_PDATA_A_INIT_M_BSWAP_INIT	\
+	(PI_PDATA_A_INIT_M_BSWAP_DATA | PI_PDATA_A_INIT_M_BSWAP_LITERAL)
+
+#endif /* __BIG_ENDIAN */
 
 /* Define EISA controller register offsets */
 
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index f87f6e3..5113eef 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1252,24 +1252,22 @@
 	struct depca_private *lp = (struct depca_private *) dev->priv;
 	u_long ioaddr = dev->base_addr;
 
-	if (dev) {
-		netif_stop_queue(dev);
-		while (lp->tx_old != lp->tx_new);	/* Wait for the ring to empty */
+	netif_stop_queue(dev);
+	while (lp->tx_old != lp->tx_new);	/* Wait for the ring to empty */
 
-		STOP_DEPCA;	/* Temporarily stop the depca.  */
-		depca_init_ring(dev);	/* Initialize the descriptor rings */
+	STOP_DEPCA;	/* Temporarily stop the depca.  */
+	depca_init_ring(dev);	/* Initialize the descriptor rings */
 
-		if (dev->flags & IFF_PROMISC) {	/* Set promiscuous mode */
-			lp->init_block.mode |= PROM;
-		} else {
-			SetMulticastFilter(dev);
-			lp->init_block.mode &= ~PROM;	/* Unset promiscuous mode */
-		}
-
-		LoadCSRs(dev);	/* Reload CSR3 */
-		InitRestartDepca(dev);	/* Resume normal operation. */
-		netif_start_queue(dev);	/* Unlock the TX ring */
+	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous mode */
+		lp->init_block.mode |= PROM;
+	} else {
+		SetMulticastFilter(dev);
+		lp->init_block.mode &= ~PROM;	/* Unset promiscuous mode */
 	}
+
+	LoadCSRs(dev);	/* Reload CSR3 */
+	InitRestartDepca(dev);	/* Resume normal operation. */
+	netif_start_queue(dev);	/* Unlock the TX ring */
 }
 
 /*
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 19ab344..03bf164 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1215,7 +1215,7 @@
 *  the literal in the instruction before the code is loaded, the
 *  driver can change the algorithm.
 *
-*  INTDELAY - This loads the dead-man timer with its inital value.
+*  INTDELAY - This loads the dead-man timer with its initial value.
 *    When this timer expires the interrupt is asserted, and the
 *    timer is reset each time a new packet is received.  (see
 *    BUNDLEMAX below to set the limit on number of chained packets)
@@ -2102,9 +2102,10 @@
 	schedule_work(&nic->tx_timeout_task);
 }
 
-static void e100_tx_timeout_task(struct net_device *netdev)
+static void e100_tx_timeout_task(struct work_struct *work)
 {
-	struct nic *nic = netdev_priv(netdev);
+	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+	struct net_device *netdev = nic->netdev;
 
 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
 		readb(&nic->csr->scb.status));
@@ -2637,8 +2638,7 @@
 	nic->blink_timer.function = e100_blink_led;
 	nic->blink_timer.data = (unsigned long)nic;
 
-	INIT_WORK(&nic->tx_timeout_task,
-		(void (*)(void *))e100_tx_timeout_task, netdev);
+	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
 
 	if((err = e100_alloc(nic))) {
 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 7ecce43..f091042 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -59,6 +59,9 @@
 #include <linux/capability.h>
 #include <linux/in.h>
 #include <linux/ip.h>
+#ifdef NETIF_F_TSO6
+#include <linux/ipv6.h>
+#endif
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <net/pkt_sched.h>
@@ -254,6 +257,17 @@
 	spinlock_t tx_queue_lock;
 #endif
 	atomic_t irq_sem;
+	unsigned int detect_link;
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+	/* Interrupt Throttle Rate */
+	uint32_t itr;
+	uint32_t itr_setting;
+	uint16_t tx_itr;
+	uint16_t rx_itr;
+
 	struct work_struct reset_task;
 	uint8_t fc_autoneg;
 
@@ -262,6 +276,7 @@
 
 	/* TX */
 	struct e1000_tx_ring *tx_ring;      /* One per active queue */
+	unsigned int restart_queue;
 	unsigned long tx_queue_len;
 	uint32_t txd_cmd;
 	uint32_t tx_int_delay;
@@ -310,8 +325,6 @@
 	uint64_t gorcl_old;
 	uint16_t rx_ps_bsize0;
 
-	/* Interrupt Throttle Rate */
-	uint32_t itr;
 
 	/* OS defined structs */
 	struct net_device *netdev;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c564adb..da459f7 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -85,6 +85,7 @@
 	{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
 	{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
 	{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+	{ "tx_restart_queue", E1000_STAT(restart_queue) },
 	{ "rx_long_length_errors", E1000_STAT(stats.roc) },
 	{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
 	{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -133,9 +134,7 @@
 
 		if (hw->autoneg == 1) {
 			ecmd->advertising |= ADVERTISED_Autoneg;
-
 			/* the e1000 autoneg seems to match ethtool nicely */
-
 			ecmd->advertising |= hw->autoneg_advertised;
 		}
 
@@ -285,7 +284,7 @@
 			e1000_reset(adapter);
 	} else
 		retval = ((hw->media_type == e1000_media_type_fiber) ?
-			   e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+			  e1000_setup_link(hw) : e1000_force_mac_fc(hw));
 
 	clear_bit(__E1000_RESETTING, &adapter->flags);
 	return retval;
@@ -350,6 +349,13 @@
 	else
 		netdev->features &= ~NETIF_F_TSO;
 
+#ifdef NETIF_F_TSO6
+	if (data)
+		netdev->features |= NETIF_F_TSO6;
+	else
+		netdev->features &= ~NETIF_F_TSO6;
+#endif
+
 	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
 	adapter->tso_force = TRUE;
 	return 0;
@@ -774,7 +780,7 @@
 	/* The status register is Read Only, so a write should fail.
 	 * Some bits that get toggled are ignored.
 	 */
-        switch (adapter->hw.mac_type) {
+	switch (adapter->hw.mac_type) {
 	/* there are several bits on newer hardware that are r/w */
 	case e1000_82571:
 	case e1000_82572:
@@ -802,12 +808,14 @@
 	}
 	/* restore previous status */
 	E1000_WRITE_REG(&adapter->hw, STATUS, before);
+
 	if (adapter->hw.mac_type != e1000_ich8lan) {
 		REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
 		REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
 		REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
 		REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
 	}
+
 	REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
 	REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
 	REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
@@ -820,8 +828,9 @@
 	REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
 
 	REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
 	before = (adapter->hw.mac_type == e1000_ich8lan ?
-			0x06C3B33E : 0x06DFB3FE);
+	          0x06C3B33E : 0x06DFB3FE);
 	REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
 	REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
 
@@ -834,10 +843,10 @@
 		REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
 		REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
 		value = (adapter->hw.mac_type == e1000_ich8lan ?
-				E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+		         E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
 		for (i = 0; i < value; i++) {
 			REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
-					 0xFFFFFFFF);
+			                 0xFFFFFFFF);
 		}
 
 	} else {
@@ -883,8 +892,7 @@
 }
 
 static irqreturn_t
-e1000_test_intr(int irq,
-		void *data)
+e1000_test_intr(int irq, void *data)
 {
 	struct net_device *netdev = (struct net_device *) data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -905,11 +913,11 @@
 
 	/* NOTE: we don't test MSI interrupts here, yet */
 	/* Hook up test interrupt handler just for this test */
-	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
-			 netdev->name, netdev))
+	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+	                 netdev))
 		shared_int = FALSE;
 	else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
-			      netdev->name, netdev)) {
+	         netdev->name, netdev)) {
 		*data = 1;
 		return -1;
 	}
@@ -925,6 +933,7 @@
 
 		if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
 			continue;
+
 		/* Interrupt to test */
 		mask = 1 << i;
 
@@ -1674,7 +1683,7 @@
 		if (e1000_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		/* Offline tests aren't run; pass by default */
+		/* Online tests aren't run; pass by default */
 		data[0] = 0;
 		data[1] = 0;
 		data[2] = 0;
@@ -1717,6 +1726,7 @@
 		retval = 0;
 		break;
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
 		/* quad port adapters only support WoL on port A */
 		if (!adapter->quad_port_a) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 65077f3..3655d90 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -385,6 +385,7 @@
     case E1000_DEV_ID_82571EB_FIBER:
     case E1000_DEV_ID_82571EB_SERDES:
     case E1000_DEV_ID_82571EB_QUAD_COPPER:
+    case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
             hw->mac_type = e1000_82571;
         break;
     case E1000_DEV_ID_82572EI_COPPER:
@@ -408,6 +409,8 @@
     case E1000_DEV_ID_ICH8_IGP_AMT:
     case E1000_DEV_ID_ICH8_IGP_C:
     case E1000_DEV_ID_ICH8_IFE:
+    case E1000_DEV_ID_ICH8_IFE_GT:
+    case E1000_DEV_ID_ICH8_IFE_G:
     case E1000_DEV_ID_ICH8_IGP_M:
         hw->mac_type = e1000_ich8lan;
         break;
@@ -2367,6 +2370,7 @@
 
         /* Need to reset the PHY or these changes will be ignored */
         mii_ctrl_reg |= MII_CR_RESET;
+
     /* Disable MDI-X support for 10/100 */
     } else if (hw->phy_type == e1000_phy_ife) {
         ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
@@ -2379,6 +2383,7 @@
         ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
         if (ret_val)
             return ret_val;
+
     } else {
         /* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
          * forced whenever speed or duplex are forced.
@@ -3868,7 +3873,7 @@
 *
 * hw - Struct containing variables accessed by shared code
 *
-* Sets bit 15 of the MII Control regiser
+* Sets bit 15 of the MII Control register
 ******************************************************************************/
 int32_t
 e1000_phy_reset(struct e1000_hw *hw)
@@ -3940,14 +3945,15 @@
         E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
 
-        /* Write VR power-down enable */
+        /* Write VR power-down enable - bits 9:8 should be 10b */
         e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
-        e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
-                            IGP3_VR_CTRL_MODE_SHUT);
+        phy_data |= (1 << 9);
+        phy_data &= ~(1 << 8);
+        e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
 
         /* Read it back and test */
         e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
-        if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
+        if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
             break;
 
         /* Issue PHY reset and repeat at most one more time */
@@ -4549,7 +4555,7 @@
     case e1000_ich8lan:
         {
         int32_t  i = 0;
-        uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
+        uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
 
         eeprom->type = e1000_eeprom_ich8;
         eeprom->use_eerd = FALSE;
@@ -4565,12 +4571,14 @@
             }
         }
 
-        hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
-                              ICH8_FLASH_SECTOR_SIZE;
+        hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
+                              ICH_FLASH_SECTOR_SIZE;
 
-        hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
-        hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
-        hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
+        hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
+        hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
+
+        hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
+
         hw->flash_bank_size /= 2 * sizeof(uint16_t);
 
         break;
@@ -5620,8 +5628,8 @@
                  * signature is valid.  We want to do this after the write
                  * has completed so that we don't mark the segment valid
                  * while the write is still in progress */
-                if (i == E1000_ICH8_NVM_SIG_WORD)
-                    high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
+                if (i == E1000_ICH_NVM_SIG_WORD)
+                    high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
 
                 error = e1000_verify_write_ich8_byte(hw,
                             (i << 1) + new_bank_offset + 1, high_byte);
@@ -5643,18 +5651,18 @@
              * erase as well since these bits are 11 to start with
              * and we need to change bit 14 to 0b */
             e1000_read_ich8_byte(hw,
-                                 E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+                                 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
                                  &high_byte);
             high_byte &= 0xBF;
             error = e1000_verify_write_ich8_byte(hw,
-                        E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
+                        E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
             /* And invalidate the previously valid segment by setting
              * its signature word (0x13) high_byte to 0b. This can be
              * done without an erase because flash erase sets all bits
              * to 1's. We can write 1's to 0's without an erase */
             if (error == E1000_SUCCESS) {
                 error = e1000_verify_write_ich8_byte(hw,
-                            E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
+                            E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
             }
 
             /* Clear the now not used entry in the cache */
@@ -5841,6 +5849,7 @@
     hash_reg = (hash_value >> 5) & 0x7F;
     if (hw->mac_type == e1000_ich8lan)
         hash_reg &= 0x1F;
+
     hash_bit = hash_value & 0x1F;
 
     mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
@@ -6026,6 +6035,7 @@
         else
             eeprom_data = ID_LED_DEFAULT;
     }
+
     for (i = 0; i < 4; i++) {
         temp = (eeprom_data >> (i << 2)) & led_mask;
         switch (temp) {
@@ -8486,7 +8496,7 @@
 
     DEBUGFUNC("e1000_ich8_cycle_init");
 
-    hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
 
     /* May be check the Flash Des Valid bit in Hw status */
     if (hsfsts.hsf_status.fldesvalid == 0) {
@@ -8499,7 +8509,7 @@
     hsfsts.hsf_status.flcerr = 1;
     hsfsts.hsf_status.dael = 1;
 
-    E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
 
     /* Either we should have a hardware SPI cycle in progress bit to check
      * against, in order to start a new cycle or FDONE bit should be changed
@@ -8514,13 +8524,13 @@
         /* There is no cycle running at present, so we can start a cycle */
         /* Begin by setting Flash Cycle Done. */
         hsfsts.hsf_status.flcdone = 1;
-        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
         error = E1000_SUCCESS;
     } else {
         /* otherwise poll for sometime so the current cycle has a chance
          * to end before giving up. */
-        for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
-            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+        for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
             if (hsfsts.hsf_status.flcinprog == 0) {
                 error = E1000_SUCCESS;
                 break;
@@ -8531,7 +8541,7 @@
             /* Successful in waiting for previous cycle to timeout,
              * now set the Flash Cycle Done. */
             hsfsts.hsf_status.flcdone = 1;
-            E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
         } else {
             DEBUGOUT("Flash controller busy, cannot get access");
         }
@@ -8553,13 +8563,13 @@
     uint32_t i = 0;
 
     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
-    hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+    hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
     hsflctl.hsf_ctrl.flcgo = 1;
-    E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
 
     /* wait till FDONE bit is set to 1 */
     do {
-        hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+        hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
         if (hsfsts.hsf_status.flcdone == 1)
             break;
         udelay(1);
@@ -8593,10 +8603,10 @@
     DEBUGFUNC("e1000_read_ich8_data");
 
     if (size < 1  || size > 2 || data == 0x0 ||
-        index > ICH8_FLASH_LINEAR_ADDR_MASK)
+        index > ICH_FLASH_LINEAR_ADDR_MASK)
         return error;
 
-    flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
                            hw->flash_base_addr;
 
     do {
@@ -8606,25 +8616,25 @@
         if (error != E1000_SUCCESS)
             break;
 
-        hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
         hsflctl.hsf_ctrl.fldbcount = size - 1;
-        hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
-        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
 
         /* Write the last 24 bits of index into Flash Linear address field in
          * Flash Address */
         /* TODO: TBD maybe check the index against the size of flash */
 
-        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
 
-        error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
 
         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
          * sequence a few more times, else read in (shift in) the Flash Data0,
          * the order is least significant byte first msb to lsb */
         if (error == E1000_SUCCESS) {
-            flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
+            flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
             if (size == 1) {
                 *data = (uint8_t)(flash_data & 0x000000FF);
             } else if (size == 2) {
@@ -8634,9 +8644,9 @@
         } else {
             /* If we've gotten here, then things are probably completely hosed,
              * but if the error condition is detected, it won't hurt to give
-             * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
              */
-            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
             if (hsfsts.hsf_status.flcerr == 1) {
                 /* Repeat for some time before giving up. */
                 continue;
@@ -8645,7 +8655,7 @@
                 break;
             }
         }
-    } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
 
     return error;
 }
@@ -8672,10 +8682,10 @@
     DEBUGFUNC("e1000_write_ich8_data");
 
     if (size < 1  || size > 2 || data > size * 0xff ||
-        index > ICH8_FLASH_LINEAR_ADDR_MASK)
+        index > ICH_FLASH_LINEAR_ADDR_MASK)
         return error;
 
-    flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
                            hw->flash_base_addr;
 
     do {
@@ -8685,34 +8695,34 @@
         if (error != E1000_SUCCESS)
             break;
 
-        hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
         hsflctl.hsf_ctrl.fldbcount = size -1;
-        hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
-        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
 
         /* Write the last 24 bits of index into Flash Linear address field in
          * Flash Address */
-        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
 
         if (size == 1)
             flash_data = (uint32_t)data & 0x00FF;
         else
             flash_data = (uint32_t)data;
 
-        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
 
         /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
          * sequence a few more times else done */
-        error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
         if (error == E1000_SUCCESS) {
             break;
         } else {
             /* If we're here, then things are most likely completely hosed,
              * but if the error condition is detected, it won't hurt to give
-             * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
              */
-            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
             if (hsfsts.hsf_status.flcerr == 1) {
                 /* Repeat for some time before giving up. */
                 continue;
@@ -8721,7 +8731,7 @@
                 break;
             }
         }
-    } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
 
     return error;
 }
@@ -8840,7 +8850,7 @@
     int32_t  j = 0;
     int32_t  error_flag = 0;
 
-    hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
 
     /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
     /* 00: The Hw sector is 256 bytes, hence we need to erase 16
@@ -8853,19 +8863,14 @@
      * 11: The Hw sector size is 64K bytes */
     if (hsfsts.hsf_status.berasesz == 0x0) {
         /* Hw sector size 256 */
-        sub_sector_size = ICH8_FLASH_SEG_SIZE_256;
-        bank_size = ICH8_FLASH_SECTOR_SIZE;
-        iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
+        sub_sector_size = ICH_FLASH_SEG_SIZE_256;
+        bank_size = ICH_FLASH_SECTOR_SIZE;
+        iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
     } else if (hsfsts.hsf_status.berasesz == 0x1) {
-        bank_size = ICH8_FLASH_SEG_SIZE_4K;
-        iteration = 1;
-    } else if (hw->mac_type != e1000_ich8lan &&
-               hsfsts.hsf_status.berasesz == 0x2) {
-        /* 8K erase size invalid for ICH8 - added in for ICH9 */
-        bank_size = ICH9_FLASH_SEG_SIZE_8K;
+        bank_size = ICH_FLASH_SEG_SIZE_4K;
         iteration = 1;
     } else if (hsfsts.hsf_status.berasesz == 0x3) {
-        bank_size = ICH8_FLASH_SEG_SIZE_64K;
+        bank_size = ICH_FLASH_SEG_SIZE_64K;
         iteration = 1;
     } else {
         return error;
@@ -8883,9 +8888,9 @@
 
             /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
              * Control */
-            hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
-            hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
-            E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+            hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+            hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
 
             /* Write the last 24 bits of an index within the block into Flash
              * Linear address field in Flash Address.  This probably needs to
@@ -8893,17 +8898,17 @@
              * the software bank size (4, 8 or 64 KBytes) */
             flash_linear_address = bank * bank_size + j * sub_sector_size;
             flash_linear_address += hw->flash_base_addr;
-            flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
+            flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
 
-            E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+            E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
 
-            error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_ERASE_TIMEOUT);
+            error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
             /* Check if FCERR is set to 1.  If 1, clear it and try the whole
              * sequence a few more times else Done */
             if (error == E1000_SUCCESS) {
                 break;
             } else {
-                hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+                hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
                 if (hsfsts.hsf_status.flcerr == 1) {
                     /* repeat for some time before giving up */
                     continue;
@@ -8912,7 +8917,7 @@
                     break;
                 }
             }
-        } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+        } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
         if (error_flag == 1)
             break;
     }
@@ -9013,5 +9018,3 @@
     return E1000_SUCCESS;
 }
 
-
-
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 449a603..3321fb1 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -128,11 +128,13 @@
 /* PCI bus widths */
 typedef enum {
     e1000_bus_width_unknown = 0,
+    /* These PCIe values should literally match the possible return values
+     * from config space */
+    e1000_bus_width_pciex_1 = 1,
+    e1000_bus_width_pciex_2 = 2,
+    e1000_bus_width_pciex_4 = 4,
     e1000_bus_width_32,
     e1000_bus_width_64,
-    e1000_bus_width_pciex_1,
-    e1000_bus_width_pciex_2,
-    e1000_bus_width_pciex_4,
     e1000_bus_width_reserved
 } e1000_bus_width;
 
@@ -326,6 +328,7 @@
 int32_t e1000_phy_reset(struct e1000_hw *hw);
 int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
 int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
+
 void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
 
 /* EEPROM Functions */
@@ -390,7 +393,6 @@
                                   uint16_t length);
 boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
 boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-
 int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
 int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
 int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
@@ -473,6 +475,7 @@
 #define E1000_DEV_ID_82571EB_FIBER       0x105F
 #define E1000_DEV_ID_82571EB_SERDES      0x1060
 #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
 #define E1000_DEV_ID_82572EI_COPPER      0x107D
 #define E1000_DEV_ID_82572EI_FIBER       0x107E
 #define E1000_DEV_ID_82572EI_SERDES      0x107F
@@ -490,6 +493,8 @@
 #define E1000_DEV_ID_ICH8_IGP_AMT        0x104A
 #define E1000_DEV_ID_ICH8_IGP_C          0x104B
 #define E1000_DEV_ID_ICH8_IFE            0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT         0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G          0x10C5
 #define E1000_DEV_ID_ICH8_IGP_M          0x104D
 
 
@@ -576,6 +581,7 @@
  * E1000_RAR_ENTRIES - 1 multicast addresses.
  */
 #define E1000_RAR_ENTRIES 15
+
 #define E1000_RAR_ENTRIES_ICH8LAN  6
 
 #define MIN_NUMBER_OF_DESCRIPTORS  8
@@ -1335,9 +1341,9 @@
     uint64_t gotch;
     uint64_t rnbc;
     uint64_t ruc;
+    uint64_t rfc;
     uint64_t roc;
     uint64_t rlerrc;
-    uint64_t rfc;
     uint64_t rjc;
     uint64_t mgprc;
     uint64_t mgpdc;
@@ -1577,8 +1583,8 @@
 #define E1000_HICR_FW_RESET  0xC0
 
 #define E1000_SHADOW_RAM_WORDS     2048
-#define E1000_ICH8_NVM_SIG_WORD    0x13
-#define E1000_ICH8_NVM_SIG_MASK    0xC0
+#define E1000_ICH_NVM_SIG_WORD     0x13
+#define E1000_ICH_NVM_SIG_MASK     0xC0
 
 /* EEPROM Read */
 #define E1000_EERD_START      0x00000001 /* Start Read */
@@ -3172,6 +3178,7 @@
 #define IGP3_VR_CTRL \
         PHY_REG(776, 18) /* Voltage regulator control register */
 #define IGP3_VR_CTRL_MODE_SHUT       0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK       0x0300 /* Shutdown VR Mask */
 
 #define IGP3_CAPABILITY \
         PHY_REG(776, 19) /* IGP3 Capability Register */
@@ -3256,41 +3263,40 @@
 #define IFE_PSCL_PROBE_LEDS_OFF              0x0006  /* Force LEDs 0 and 2 off */
 #define IFE_PSCL_PROBE_LEDS_ON               0x0007  /* Force LEDs 0 and 2 on */
 
-#define ICH8_FLASH_COMMAND_TIMEOUT           5000    /* 5000 uSecs - adjusted */
-#define ICH8_FLASH_ERASE_TIMEOUT             3000000 /* Up to 3 seconds - worst case */
-#define ICH8_FLASH_CYCLE_REPEAT_COUNT        10      /* 10 cycles */
-#define ICH8_FLASH_SEG_SIZE_256              256
-#define ICH8_FLASH_SEG_SIZE_4K               4096
-#define ICH9_FLASH_SEG_SIZE_8K               8192
-#define ICH8_FLASH_SEG_SIZE_64K              65536
+#define ICH_FLASH_COMMAND_TIMEOUT            5000    /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT              3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT         10      /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256               256
+#define ICH_FLASH_SEG_SIZE_4K                4096
+#define ICH_FLASH_SEG_SIZE_64K               65536
 
-#define ICH8_CYCLE_READ                      0x0
-#define ICH8_CYCLE_RESERVED                  0x1
-#define ICH8_CYCLE_WRITE                     0x2
-#define ICH8_CYCLE_ERASE                     0x3
+#define ICH_CYCLE_READ                       0x0
+#define ICH_CYCLE_RESERVED                   0x1
+#define ICH_CYCLE_WRITE                      0x2
+#define ICH_CYCLE_ERASE                      0x3
 
-#define ICH8_FLASH_GFPREG   0x0000
-#define ICH8_FLASH_HSFSTS   0x0004
-#define ICH8_FLASH_HSFCTL   0x0006
-#define ICH8_FLASH_FADDR    0x0008
-#define ICH8_FLASH_FDATA0   0x0010
-#define ICH8_FLASH_FRACC    0x0050
-#define ICH8_FLASH_FREG0    0x0054
-#define ICH8_FLASH_FREG1    0x0058
-#define ICH8_FLASH_FREG2    0x005C
-#define ICH8_FLASH_FREG3    0x0060
-#define ICH8_FLASH_FPR0     0x0074
-#define ICH8_FLASH_FPR1     0x0078
-#define ICH8_FLASH_SSFSTS   0x0090
-#define ICH8_FLASH_SSFCTL   0x0092
-#define ICH8_FLASH_PREOP    0x0094
-#define ICH8_FLASH_OPTYPE   0x0096
-#define ICH8_FLASH_OPMENU   0x0098
+#define ICH_FLASH_GFPREG   0x0000
+#define ICH_FLASH_HSFSTS   0x0004
+#define ICH_FLASH_HSFCTL   0x0006
+#define ICH_FLASH_FADDR    0x0008
+#define ICH_FLASH_FDATA0   0x0010
+#define ICH_FLASH_FRACC    0x0050
+#define ICH_FLASH_FREG0    0x0054
+#define ICH_FLASH_FREG1    0x0058
+#define ICH_FLASH_FREG2    0x005C
+#define ICH_FLASH_FREG3    0x0060
+#define ICH_FLASH_FPR0     0x0074
+#define ICH_FLASH_FPR1     0x0078
+#define ICH_FLASH_SSFSTS   0x0090
+#define ICH_FLASH_SSFCTL   0x0092
+#define ICH_FLASH_PREOP    0x0094
+#define ICH_FLASH_OPTYPE   0x0096
+#define ICH_FLASH_OPMENU   0x0098
 
-#define ICH8_FLASH_REG_MAPSIZE      0x00A0
-#define ICH8_FLASH_SECTOR_SIZE      4096
-#define ICH8_GFPREG_BASE_MASK       0x1FFF
-#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_REG_MAPSIZE      0x00A0
+#define ICH_FLASH_SECTOR_SIZE      4096
+#define ICH_GFPREG_BASE_MASK       0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
 
 /* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
 /* Offset 04h HSFSTS */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 726ec5e..73f3a85 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -27,6 +27,7 @@
 *******************************************************************************/
 
 #include "e1000.h"
+#include <net/ip6_checksum.h>
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -35,7 +36,7 @@
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "7.2.9-k4"DRIVERNAPI
+#define DRV_VERSION "7.3.15-k2"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -103,6 +104,9 @@
 	INTEL_E1000_ETHERNET_DEVICE(0x10B9),
 	INTEL_E1000_ETHERNET_DEVICE(0x10BA),
 	INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+	INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+	INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+	INTEL_E1000_ETHERNET_DEVICE(0x10C5),
 	/* required last entry */
 	{0,}
 };
@@ -154,6 +158,9 @@
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 static int e1000_set_mac(struct net_device *netdev, void *p);
 static irqreturn_t e1000_intr(int irq, void *data);
+#ifdef CONFIG_PCI_MSI
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+#endif
 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
                                     struct e1000_tx_ring *tx_ring);
 #ifdef CONFIG_E1000_NAPI
@@ -183,7 +190,7 @@
 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 static void e1000_tx_timeout(struct net_device *dev);
-static void e1000_reset_task(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
 static void e1000_smartspeed(struct e1000_adapter *adapter);
 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                        struct sk_buff *skb);
@@ -285,7 +292,7 @@
 
 	flags = IRQF_SHARED;
 #ifdef CONFIG_PCI_MSI
-	if (adapter->hw.mac_type > e1000_82547_rev_2) {
+	if (adapter->hw.mac_type >= e1000_82571) {
 		adapter->have_msi = TRUE;
 		if ((err = pci_enable_msi(adapter->pdev))) {
 			DPRINTK(PROBE, ERR,
@@ -293,8 +300,14 @@
 			adapter->have_msi = FALSE;
 		}
 	}
-	if (adapter->have_msi)
+	if (adapter->have_msi) {
 		flags &= ~IRQF_SHARED;
+		err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
+		                  netdev->name, netdev);
+		if (err)
+			DPRINTK(PROBE, ERR,
+			       "Unable to allocate interrupt Error: %d\n", err);
+	} else
 #endif
 	if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
 	                       netdev->name, netdev)))
@@ -375,7 +388,7 @@
  * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that the
  * driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the netowrk i/f is closed.
+ * of the f/w this means that the network i/f is closed.
  *
  **/
 
@@ -416,7 +429,7 @@
  * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that
  * the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the netowrk i/f is open.
+ * of the f/w this means that the network i/f is open.
  *
  **/
 
@@ -426,6 +439,7 @@
 	uint32_t ctrl_ext;
 	uint32_t swsm;
 	uint32_t extcnf;
+
 	/* Let firmware know the driver has taken over */
 	switch (adapter->hw.mac_type) {
 	case e1000_82571:
@@ -601,9 +615,6 @@
 e1000_reset(struct e1000_adapter *adapter)
 {
 	uint32_t pba, manc;
-#ifdef DISABLE_MULR
-	uint32_t tctl;
-#endif
 	uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
 
 	/* Repartition Pba for greater than 9k mtu
@@ -670,12 +681,7 @@
 	e1000_reset_hw(&adapter->hw);
 	if (adapter->hw.mac_type >= e1000_82544)
 		E1000_WRITE_REG(&adapter->hw, WUC, 0);
-#ifdef DISABLE_MULR
-	/* disable Multiple Reads in Transmit Control Register for debugging */
-	tctl = E1000_READ_REG(hw, TCTL);
-	E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR);
 
-#endif
 	if (e1000_init_hw(&adapter->hw))
 		DPRINTK(PROBE, ERR, "Hardware Error\n");
 	e1000_update_mng_vlan(adapter);
@@ -851,9 +857,9 @@
 	   (adapter->hw.mac_type != e1000_82547))
 		netdev->features |= NETIF_F_TSO;
 
-#ifdef NETIF_F_TSO_IPV6
+#ifdef NETIF_F_TSO6
 	if (adapter->hw.mac_type > e1000_82547_rev_2)
-		netdev->features |= NETIF_F_TSO_IPV6;
+		netdev->features |= NETIF_F_TSO6;
 #endif
 #endif
 	if (pci_using_dac)
@@ -908,8 +914,7 @@
 	adapter->phy_info_timer.function = &e1000_update_phy_info;
 	adapter->phy_info_timer.data = (unsigned long) adapter;
 
-	INIT_WORK(&adapter->reset_task,
-		(void (*)(void *))e1000_reset_task, netdev);
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
 
 	e1000_check_options(adapter);
 
@@ -968,6 +973,7 @@
 		break;
 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
 		/* if quad port adapter, disable WoL on all but port A */
 		if (global_quad_port_a != 0)
 			adapter->eeprom_wol = 0;
@@ -1279,12 +1285,10 @@
 		return -EBUSY;
 
 	/* allocate transmit descriptors */
-
 	if ((err = e1000_setup_all_tx_resources(adapter)))
 		goto err_setup_tx;
 
 	/* allocate receive descriptors */
-
 	if ((err = e1000_setup_all_rx_resources(adapter)))
 		goto err_setup_rx;
 
@@ -1569,6 +1573,8 @@
 
 	if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
 		tarc = E1000_READ_REG(hw, TARC0);
+		/* set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later */
 		tarc |= (1 << 21);
 		E1000_WRITE_REG(hw, TARC0, tarc);
 	} else if (hw->mac_type == e1000_80003es2lan) {
@@ -1583,8 +1589,11 @@
 	e1000_config_collision_dist(hw);
 
 	/* Setup Transmit Descriptor Settings for eop descriptor */
-	adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
-		E1000_TXD_CMD_IFCS;
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+	/* only set IDE if we are delaying interrupts using the timers */
+	if (adapter->tx_int_delay)
+		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
 
 	if (hw->mac_type < e1000_82543)
 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
@@ -1821,8 +1830,11 @@
 		/* Configure extra packet-split registers */
 		rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
 		rfctl |= E1000_RFCTL_EXTEN;
-		/* disable IPv6 packet split support */
-		rfctl |= E1000_RFCTL_IPV6_DIS;
+		/* disable packet split support for IPv6 extension headers,
+		 * because some malformed IPv6 headers can hang the RX */
+		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+		          E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
 		E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
 
 		rctl |= E1000_RCTL_DTYP_PS;
@@ -1885,7 +1897,7 @@
 
 	if (hw->mac_type >= e1000_82540) {
 		E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
-		if (adapter->itr > 1)
+		if (adapter->itr_setting != 0)
 			E1000_WRITE_REG(hw, ITR,
 				1000000000 / (adapter->itr * 256));
 	}
@@ -1895,11 +1907,11 @@
 		/* Reset delay timers after every interrupt */
 		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
 #ifdef CONFIG_E1000_NAPI
-		/* Auto-Mask interrupts upon ICR read. */
+		/* Auto-Mask interrupts upon ICR access */
 		ctrl_ext |= E1000_CTRL_EXT_IAME;
+		E1000_WRITE_REG(hw, IAM, 0xffffffff);
 #endif
 		E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
-		E1000_WRITE_REG(hw, IAM, ~0);
 		E1000_WRITE_FLUSH(hw);
 	}
 
@@ -1938,6 +1950,12 @@
 		E1000_WRITE_REG(hw, RXCSUM, rxcsum);
 	}
 
+	/* enable early receives on 82573, only takes effect if using > 2048
+	 * byte total frame size.  for example only for jumbo frames */
+#define E1000_ERT_2048 0x100
+	if (hw->mac_type == e1000_82573)
+		E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
+
 	/* Enable Receives */
 	E1000_WRITE_REG(hw, RCTL, rctl);
 }
@@ -1991,10 +2009,13 @@
 				buffer_info->dma,
 				buffer_info->length,
 				PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
 	}
-	if (buffer_info->skb)
+	if (buffer_info->skb) {
 		dev_kfree_skb_any(buffer_info->skb);
-	memset(buffer_info, 0, sizeof(struct e1000_buffer));
+		buffer_info->skb = NULL;
+	}
+	/* buffer_info must be completely set up in the transmit path */
 }
 
 /**
@@ -2418,6 +2439,7 @@
 		DPRINTK(LINK, INFO,
 			"Gigabit has been disabled, downgrading speed\n");
 	}
+
 	if (adapter->hw.mac_type == e1000_82573) {
 		e1000_enable_tx_pkt_filtering(&adapter->hw);
 		if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2462,13 +2484,12 @@
 			if ((adapter->hw.mac_type == e1000_82571 ||
 			     adapter->hw.mac_type == e1000_82572) &&
 			    txb2b == 0) {
-#define SPEED_MODE_BIT (1 << 21)
 				uint32_t tarc0;
 				tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
-				tarc0 &= ~SPEED_MODE_BIT;
+				tarc0 &= ~(1 << 21);
 				E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
 			}
-				
+
 #ifdef NETIF_F_TSO
 			/* disable TSO for pcie and 10/100 speeds, to avoid
 			 * some hardware issues */
@@ -2480,9 +2501,15 @@
 					DPRINTK(PROBE,INFO,
 				        "10/100 speed: disabling TSO\n");
 					netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+					netdev->features &= ~NETIF_F_TSO6;
+#endif
 					break;
 				case SPEED_1000:
 					netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+					netdev->features |= NETIF_F_TSO6;
+#endif
 					break;
 				default:
 					/* oops */
@@ -2549,19 +2576,6 @@
 		}
 	}
 
-	/* Dynamic mode for Interrupt Throttle Rate (ITR) */
-	if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
-		/* Symmetric Tx/Rx gets a reduced ITR=2000; Total
-		 * asymmetrical Tx or Rx gets ITR=8000; everyone
-		 * else is between 2000-8000. */
-		uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
-		uint32_t dif = (adapter->gotcl > adapter->gorcl ?
-			adapter->gotcl - adapter->gorcl :
-			adapter->gorcl - adapter->gotcl) / 10000;
-		uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
-		E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
-	}
-
 	/* Cause software interrupt to ensure rx ring is cleaned */
 	E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
 
@@ -2577,6 +2591,135 @@
 	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
 }
 
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+                                   uint16_t itr_setting,
+                                   int packets,
+                                   int bytes)
+{
+	unsigned int retval = itr_setting;
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (unlikely(hw->mac_type < e1000_82540))
+		goto update_itr_done;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+
+	switch (itr_setting) {
+	case lowest_latency:
+		if ((packets < 5) && (bytes > 512))
+			retval = low_latency;
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			if ((packets < 10) ||
+			     ((bytes/packets) > 1200))
+				retval = bulk_latency;
+			else if ((packets > 35))
+				retval = lowest_latency;
+		} else if (packets <= 2 && bytes < 512)
+			retval = lowest_latency;
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35)
+				retval = low_latency;
+		} else {
+			if (bytes < 6000)
+				retval = low_latency;
+		}
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	uint16_t current_itr;
+	uint32_t new_itr = adapter->itr;
+
+	if (unlikely(hw->mac_type < e1000_82540))
+		return;
+
+	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+	if (unlikely(adapter->link_speed != SPEED_1000)) {
+		current_itr = 0;
+		new_itr = 4000;
+		goto set_itr_now;
+	}
+
+	adapter->tx_itr = e1000_update_itr(adapter,
+	                            adapter->tx_itr,
+	                            adapter->total_tx_packets,
+	                            adapter->total_tx_bytes);
+	adapter->rx_itr = e1000_update_itr(adapter,
+	                            adapter->rx_itr,
+	                            adapter->total_rx_packets,
+	                            adapter->total_rx_bytes);
+
+	current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+	/* conservative mode eliminates the lowest_latency setting */
+	if (current_itr == lowest_latency && (adapter->itr_setting == 3))
+		current_itr = low_latency;
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 70000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+		new_itr = 4000;
+		break;
+	default:
+		break;
+	}
+
+set_itr_now:
+	if (new_itr != adapter->itr) {
+		/* this attempts to bias the interrupt rate towards Bulk
+		 * by adding intermediate steps when interrupt rate is
+		 * increasing */
+		new_itr = new_itr > adapter->itr ?
+		             min(adapter->itr + (new_itr >> 2), new_itr) :
+		             new_itr;
+		adapter->itr = new_itr;
+		E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
+	}
+
+	return;
+}
+
 #define E1000_TX_FLAGS_CSUM		0x00000001
 #define E1000_TX_FLAGS_VLAN		0x00000002
 #define E1000_TX_FLAGS_TSO		0x00000004
@@ -2617,7 +2760,7 @@
 						   0);
 			cmd_length = E1000_TXD_CMD_IP;
 			ipcse = skb->h.raw - skb->data - 1;
-#ifdef NETIF_F_TSO_IPV6
+#ifdef NETIF_F_TSO6
 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
 			skb->nh.ipv6h->payload_len = 0;
 			skb->h.th->check =
@@ -2653,6 +2796,7 @@
 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
 		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
 
 		if (++i == tx_ring->count) i = 0;
 		tx_ring->next_to_use = i;
@@ -2681,12 +2825,13 @@
 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
 
 		context_desc->upper_setup.tcp_fields.tucss = css;
-		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
 		context_desc->upper_setup.tcp_fields.tucse = 0;
 		context_desc->tcp_seg_setup.data = 0;
 		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
 
 		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
 
 		if (unlikely(++i == tx_ring->count)) i = 0;
 		tx_ring->next_to_use = i;
@@ -2755,6 +2900,7 @@
 				size,
 				PCI_DMA_TODEVICE);
 		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
 
 		len -= size;
 		offset += size;
@@ -2794,6 +2940,7 @@
 					size,
 					PCI_DMA_TODEVICE);
 			buffer_info->time_stamp = jiffies;
+			buffer_info->next_to_watch = i;
 
 			len -= size;
 			offset += size;
@@ -2859,6 +3006,9 @@
 
 	tx_ring->next_to_use = i;
 	writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it syncronizes IO on IA64/Altix systems */
+	mmiowb();
 }
 
 /**
@@ -2952,6 +3102,7 @@
 
 	/* A reprieve! */
 	netif_start_queue(netdev);
+	++adapter->restart_queue;
 	return 0;
 }
 
@@ -3010,9 +3161,9 @@
 		max_per_txd = min(mss << 2, max_per_txd);
 		max_txd_pwr = fls(max_per_txd) - 1;
 
-	/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
-	 * points to just header, pull a few bytes of payload from
-	 * frags into skb->data */
+		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+		* points to just header, pull a few bytes of payload from
+		* frags into skb->data */
 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
 		if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
 			switch (adapter->hw.mac_type) {
@@ -3154,9 +3305,10 @@
 }
 
 static void
-e1000_reset_task(struct net_device *netdev)
+e1000_reset_task(struct work_struct *work)
 {
-	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_adapter *adapter =
+		container_of(work, struct e1000_adapter, reset_task);
 
 	e1000_reinit_locked(adapter);
 }
@@ -3316,12 +3468,12 @@
 	adapter->stats.roc += E1000_READ_REG(hw, ROC);
 
 	if (adapter->hw.mac_type != e1000_ich8lan) {
-	adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
-	adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
-	adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
-	adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
-	adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
-	adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+		adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
+		adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
+		adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
+		adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
+		adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
+		adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
 	}
 
 	adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
@@ -3352,12 +3504,12 @@
 	adapter->stats.tpr += E1000_READ_REG(hw, TPR);
 
 	if (adapter->hw.mac_type != e1000_ich8lan) {
-	adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
-	adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
-	adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
-	adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
-	adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
-	adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+		adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
+		adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
+		adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
+		adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
+		adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
+		adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
 	}
 
 	adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
@@ -3383,18 +3535,17 @@
 		adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
 
 		if (adapter->hw.mac_type != e1000_ich8lan) {
-		adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
-		adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
-		adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
-		adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
-		adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
-		adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
-		adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+			adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
+			adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
+			adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
+			adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
+			adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
+			adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
+			adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
 		}
 	}
 
 	/* Fill out the OS statistics structure */
-
 	adapter->net_stats.rx_packets = adapter->stats.gprc;
 	adapter->net_stats.tx_packets = adapter->stats.gptc;
 	adapter->net_stats.rx_bytes = adapter->stats.gorcl;
@@ -3426,7 +3577,6 @@
 	/* Tx Dropped needs to be maintained elsewhere */
 
 	/* Phy Stats */
-
 	if (hw->media_type == e1000_media_type_copper) {
 		if ((adapter->link_speed == SPEED_1000) &&
 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
@@ -3442,6 +3592,95 @@
 
 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
 }
+#ifdef CONFIG_PCI_MSI
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static
+irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+#ifndef CONFIG_E1000_NAPI
+	int i;
+#endif
+
+	/* this code avoids the read of ICR but has to get 1000 interrupts
+	 * at every link change event before it will notice the change */
+	if (++adapter->detect_link >= 1000) {
+		uint32_t icr = E1000_READ_REG(hw, ICR);
+#ifdef CONFIG_E1000_NAPI
+		/* read ICR disables interrupts using IAM, so keep up with our
+		 * enable/disable accounting */
+		atomic_inc(&adapter->irq_sem);
+#endif
+		adapter->detect_link = 0;
+		if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
+		    (icr & E1000_ICR_INT_ASSERTED)) {
+			hw->get_link_status = 1;
+			/* 80003ES2LAN workaround--
+			* For packet buffer work-around on link down event;
+			* disable receives here in the ISR and
+			* reset adapter in watchdog
+			*/
+			if (netif_carrier_ok(netdev) &&
+			    (adapter->hw.mac_type == e1000_80003es2lan)) {
+				/* disable receives */
+				uint32_t rctl = E1000_READ_REG(hw, RCTL);
+				E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+			}
+			/* guard against interrupt when we're going down */
+			if (!test_bit(__E1000_DOWN, &adapter->flags))
+				mod_timer(&adapter->watchdog_timer,
+				          jiffies + 1);
+		}
+	} else {
+		E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
+		                                         E1000_ICR_LSC)));
+		/* bummer we have to flush here, but things break otherwise as
+		 * some event appears to be lost or delayed and throughput
+		 * drops.  In almost all tests this flush is un-necessary */
+		E1000_WRITE_FLUSH(hw);
+#ifdef CONFIG_E1000_NAPI
+		/* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
+		 * masked.  No need for the IMC write, but it does mean we
+		 * should account for it ASAP. */
+		atomic_inc(&adapter->irq_sem);
+#endif
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	if (likely(netif_rx_schedule_prep(netdev))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__netif_rx_schedule(netdev);
+	} else
+		e1000_irq_enable(adapter);
+#else
+	adapter->total_tx_bytes = 0;
+	adapter->total_rx_bytes = 0;
+	adapter->total_tx_packets = 0;
+	adapter->total_rx_packets = 0;
+
+	for (i = 0; i < E1000_MAX_INTR; i++)
+		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+			break;
+
+	if (likely(adapter->itr_setting & 3))
+		e1000_set_itr(adapter);
+#endif
+
+	return IRQ_HANDLED;
+}
+#endif
 
 /**
  * e1000_intr - Interrupt Handler
@@ -3458,7 +3697,17 @@
 	uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
 #ifndef CONFIG_E1000_NAPI
 	int i;
-#else
+#endif
+	if (unlikely(!icr))
+		return IRQ_NONE;  /* Not our interrupt */
+
+#ifdef CONFIG_E1000_NAPI
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt */
+	if (unlikely(hw->mac_type >= e1000_82571 &&
+	             !(icr & E1000_ICR_INT_ASSERTED)))
+		return IRQ_NONE;
+
 	/* Interrupt Auto-Mask...upon reading ICR,
 	 * interrupts are masked.  No need for the
 	 * IMC write, but it does mean we should
@@ -3467,14 +3716,6 @@
 		atomic_inc(&adapter->irq_sem);
 #endif
 
-	if (unlikely(!icr)) {
-#ifdef CONFIG_E1000_NAPI
-		if (hw->mac_type >= e1000_82571)
-			e1000_irq_enable(adapter);
-#endif
-		return IRQ_NONE;  /* Not our interrupt */
-	}
-
 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
 		hw->get_link_status = 1;
 		/* 80003ES2LAN workaround--
@@ -3495,13 +3736,20 @@
 
 #ifdef CONFIG_E1000_NAPI
 	if (unlikely(hw->mac_type < e1000_82571)) {
+		/* disable interrupts, without the synchronize_irq bit */
 		atomic_inc(&adapter->irq_sem);
 		E1000_WRITE_REG(hw, IMC, ~0);
 		E1000_WRITE_FLUSH(hw);
 	}
-	if (likely(netif_rx_schedule_prep(netdev)))
+	if (likely(netif_rx_schedule_prep(netdev))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
 		__netif_rx_schedule(netdev);
-	else
+	} else
+		/* this really should not happen! if it does it is basically a
+		 * bug, but not a hard error, so enable ints and continue */
 		e1000_irq_enable(adapter);
 #else
 	/* Writing IMC and IMS is needed for 82547.
@@ -3519,16 +3767,23 @@
 		E1000_WRITE_REG(hw, IMC, ~0);
 	}
 
+	adapter->total_tx_bytes = 0;
+	adapter->total_rx_bytes = 0;
+	adapter->total_tx_packets = 0;
+	adapter->total_rx_packets = 0;
+
 	for (i = 0; i < E1000_MAX_INTR; i++)
 		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
 		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
 			break;
 
+	if (likely(adapter->itr_setting & 3))
+		e1000_set_itr(adapter);
+
 	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
 		e1000_irq_enable(adapter);
 
 #endif
-
 	return IRQ_HANDLED;
 }
 
@@ -3572,6 +3827,8 @@
 	if ((!tx_cleaned && (work_done == 0)) ||
 	   !netif_running(poll_dev)) {
 quit_polling:
+		if (likely(adapter->itr_setting & 3))
+			e1000_set_itr(adapter);
 		netif_rx_complete(poll_dev);
 		e1000_irq_enable(adapter);
 		return 0;
@@ -3598,6 +3855,7 @@
 	unsigned int count = 0;
 #endif
 	boolean_t cleaned = FALSE;
+	unsigned int total_tx_bytes=0, total_tx_packets=0;
 
 	i = tx_ring->next_to_clean;
 	eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3609,13 +3867,19 @@
 			buffer_info = &tx_ring->buffer_info[i];
 			cleaned = (i == eop);
 
+			if (cleaned) {
+				/* this packet count is wrong for TSO but has a
+				 * tendency to make dynamic ITR change more
+				 * towards bulk */
+				total_tx_packets++;
+				total_tx_bytes += buffer_info->skb->len;
+			}
 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
-			memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+			tx_desc->upper.data = 0;
 
 			if (unlikely(++i == tx_ring->count)) i = 0;
 		}
 
-
 		eop = tx_ring->buffer_info[i].next_to_watch;
 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
 #ifdef CONFIG_E1000_NAPI
@@ -3634,8 +3898,10 @@
 		 * sees the new next_to_clean.
 		 */
 		smp_mb();
-		if (netif_queue_stopped(netdev))
+		if (netif_queue_stopped(netdev)) {
 			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
 	}
 
 	if (adapter->detect_tx_hung) {
@@ -3673,6 +3939,8 @@
 			netif_stop_queue(netdev);
 		}
 	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
 	return cleaned;
 }
 
@@ -3752,6 +4020,7 @@
 	unsigned int i;
 	int cleaned_count = 0;
 	boolean_t cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
 
 	i = rx_ring->next_to_clean;
 	rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3760,6 +4029,7 @@
 	while (rx_desc->status & E1000_RXD_STAT_DD) {
 		struct sk_buff *skb;
 		u8 status;
+
 #ifdef CONFIG_E1000_NAPI
 		if (*work_done >= work_to_do)
 			break;
@@ -3817,6 +4087,10 @@
 		 * done after the TBI_ACCEPT workaround above */
 		length -= 4;
 
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += length;
+		total_rx_packets++;
+
 		/* code added for copybreak, this should improve
 		 * performance for small packets with large amounts
 		 * of reassembly being done in the stack */
@@ -3832,12 +4106,11 @@
 				/* save the skb in buffer_info as good */
 				buffer_info->skb = skb;
 				skb = new_skb;
-				skb_put(skb, length);
 			}
-		} else
-			skb_put(skb, length);
-
+			/* else just continue with the old one */
+		}
 		/* end copybreak code */
+		skb_put(skb, length);
 
 		/* Receive Checksum Offload */
 		e1000_rx_checksum(adapter,
@@ -3886,6 +4159,8 @@
 	if (cleaned_count)
 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
 	return cleaned;
 }
 
@@ -3915,6 +4190,7 @@
 	uint32_t length, staterr;
 	int cleaned_count = 0;
 	boolean_t cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
 
 	i = rx_ring->next_to_clean;
 	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
@@ -3999,7 +4275,7 @@
 			goto copydone;
 		} /* if */
 		}
-		
+
 		for (j = 0; j < adapter->rx_ps_pages; j++) {
 			if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
 				break;
@@ -4019,6 +4295,9 @@
 		pskb_trim(skb, skb->len - 4);
 
 copydone:
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
 		e1000_rx_checksum(adapter, staterr,
 				  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
 		skb->protocol = eth_type_trans(skb, netdev);
@@ -4067,6 +4346,8 @@
 	if (cleaned_count)
 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
 	return cleaned;
 }
 
@@ -4234,7 +4515,7 @@
 		}
 
 		skb = netdev_alloc_skb(netdev,
-				       adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+		                       adapter->rx_ps_bsize0 + NET_IP_ALIGN);
 
 		if (unlikely(!skb)) {
 			adapter->alloc_rx_buff_failed++;
@@ -4511,7 +4792,6 @@
     return E1000_SUCCESS;
 }
 
-
 void
 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
 {
@@ -4534,12 +4814,12 @@
 		E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
 
 		if (adapter->hw.mac_type != e1000_ich8lan) {
-		/* enable VLAN receive filtering */
-		rctl = E1000_READ_REG(&adapter->hw, RCTL);
-		rctl |= E1000_RCTL_VFE;
-		rctl &= ~E1000_RCTL_CFIEN;
-		E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
-		e1000_update_mng_vlan(adapter);
+			/* enable VLAN receive filtering */
+			rctl = E1000_READ_REG(&adapter->hw, RCTL);
+			rctl |= E1000_RCTL_VFE;
+			rctl &= ~E1000_RCTL_CFIEN;
+			E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+			e1000_update_mng_vlan(adapter);
 		}
 	} else {
 		/* disable VLAN tag insert/strip */
@@ -4548,14 +4828,16 @@
 		E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
 
 		if (adapter->hw.mac_type != e1000_ich8lan) {
-		/* disable VLAN filtering */
-		rctl = E1000_READ_REG(&adapter->hw, RCTL);
-		rctl &= ~E1000_RCTL_VFE;
-		E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
-		if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
-			e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-		}
+			/* disable VLAN filtering */
+			rctl = E1000_READ_REG(&adapter->hw, RCTL);
+			rctl &= ~E1000_RCTL_VFE;
+			E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+			if (adapter->mng_vlan_id !=
+			    (uint16_t)E1000_MNG_VLAN_NONE) {
+				e1000_vlan_rx_kill_vid(netdev,
+				                       adapter->mng_vlan_id);
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+			}
 		}
 	}
 
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index a464cb2..18afc0c 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -107,17 +107,16 @@
 
 #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
 
-#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
     writel((value), ((a)->flash_address + reg)))
 
-#define E1000_READ_ICH8_REG(a, reg) ( \
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
     readl((a)->flash_address + reg))
 
-#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
     writew((value), ((a)->flash_address + reg)))
 
-#define E1000_READ_ICH8_REG16(a, reg) ( \
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
     readw((a)->flash_address + reg))
 
-
 #endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 9c3c1ac..cbfcd7f 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -44,16 +44,6 @@
  */
 
 #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
-/* Module Parameters are always initialized to -1, so that the driver
- * can tell the difference between no user specified value or the
- * user asking for the default value.
- * The true default values are loaded in when e1000_check_options is called.
- *
- * This is a GCC extension to ANSI C.
- * See the item "Labeled Elements in Initializers" in the section
- * "Extensions to the C Language Family" of the GCC documentation.
- */
-
 #define E1000_PARAM(X, desc) \
 	static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
 	static int num_##X = 0; \
@@ -67,7 +57,6 @@
  *
  * Default Value: 256
  */
-
 E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
 
 /* Receive Descriptor Count
@@ -77,7 +66,6 @@
  *
  * Default Value: 256
  */
-
 E1000_PARAM(RxDescriptors, "Number of receive descriptors");
 
 /* User Specified Speed Override
@@ -90,7 +78,6 @@
  *
  * Default Value: 0
  */
-
 E1000_PARAM(Speed, "Speed setting");
 
 /* User Specified Duplex Override
@@ -102,7 +89,6 @@
  *
  * Default Value: 0
  */
-
 E1000_PARAM(Duplex, "Duplex setting");
 
 /* Auto-negotiation Advertisement Override
@@ -119,8 +105,9 @@
  *
  * Default Value: 0x2F (copper); 0x20 (fiber)
  */
-
 E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
 
 /* User Specified Flow Control Override
  *
@@ -132,8 +119,8 @@
  *
  * Default Value: Read flow control settings from the EEPROM
  */
-
 E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
 
 /* XsumRX - Receive Checksum Offload Enable/Disable
  *
@@ -144,53 +131,54 @@
  *
  * Default Value: 1
  */
-
 E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
 
 /* Transmit Interrupt Delay in units of 1.024 microseconds
+ *  Tx interrupt delay needs to typically be set to something non zero
  *
  * Valid Range: 0-65535
- *
- * Default Value: 64
  */
-
 E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV                   8
+#define MAX_TXDELAY               0xFFFF
+#define MIN_TXDELAY                    0
 
 /* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
  *
  * Valid Range: 0-65535
- *
- * Default Value: 0
  */
-
 E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV                  32
+#define MAX_TXABSDELAY            0xFFFF
+#define MIN_TXABSDELAY                 0
 
 /* Receive Interrupt Delay in units of 1.024 microseconds
+ *   hardware will likely hang if you set this to anything but zero.
  *
  * Valid Range: 0-65535
- *
- * Default Value: 0
  */
-
 E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR                   0
+#define MAX_RXDELAY               0xFFFF
+#define MIN_RXDELAY                    0
 
 /* Receive Absolute Interrupt Delay in units of 1.024 microseconds
  *
  * Valid Range: 0-65535
- *
- * Default Value: 128
  */
-
 E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV                   8
+#define MAX_RXABSDELAY            0xFFFF
+#define MIN_RXABSDELAY                 0
 
 /* Interrupt Throttle Rate (interrupts/sec)
  *
- * Valid Range: 100-100000 (0=off, 1=dynamic)
- *
- * Default Value: 8000
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
  */
-
 E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR                    3
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
 
 /* Enable Smart Power Down of the PHY
  *
@@ -198,7 +186,6 @@
  *
  * Default Value: 0 (disabled)
  */
-
 E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
 
 /* Enable Kumeran Lock Loss workaround
@@ -207,33 +194,8 @@
  *
  * Default Value: 1 (enabled)
  */
-
 E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
 
-#define AUTONEG_ADV_DEFAULT  0x2F
-#define AUTONEG_ADV_MASK     0x2F
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
-
-#define DEFAULT_RDTR                   0
-#define MAX_RXDELAY               0xFFFF
-#define MIN_RXDELAY                    0
-
-#define DEFAULT_RADV                 128
-#define MAX_RXABSDELAY            0xFFFF
-#define MIN_RXABSDELAY                 0
-
-#define DEFAULT_TIDV                  64
-#define MAX_TXDELAY               0xFFFF
-#define MIN_TXDELAY                    0
-
-#define DEFAULT_TADV                  64
-#define MAX_TXABSDELAY            0xFFFF
-#define MIN_TXABSDELAY                 0
-
-#define DEFAULT_ITR                 8000
-#define MAX_ITR                   100000
-#define MIN_ITR                      100
-
 struct e1000_option {
 	enum { enable_option, range_option, list_option } type;
 	char *name;
@@ -510,15 +472,27 @@
 				break;
 			case 1:
 				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
-				        opt.name);
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 3:
+				DPRINTK(PROBE, INFO,
+				        "%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
 				break;
 			default:
 				e1000_validate_option(&adapter->itr, &opt,
-				                      adapter);
+				        adapter);
+				/* save the setting, because the dynamic bits change itr */
+				adapter->itr_setting = adapter->itr;
 				break;
 			}
 		} else {
-			adapter->itr = opt.def;
+			adapter->itr_setting = opt.def;
+			adapter->itr = 20000;
 		}
 	}
 	{ /* Smart Power Down */
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index d39e848..c62d9c6 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -463,7 +463,7 @@
 	release_region(dev->base_addr, E21_IO_EXTENT);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index a4eb0dc..b446309 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1827,7 +1827,7 @@
 	return n_eepro ? 0 : -ENODEV;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int i;
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index e14be02..4a50fcb 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1719,7 +1719,7 @@
 	return -ENXIO;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 6ad6961..83fa32f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2224,11 +2224,12 @@
 	return ret;
 }
 
-static void ehea_reset_port(void *data)
+static void ehea_reset_port(struct work_struct *work)
 {
 	int ret;
-	struct net_device *dev = data;
-	struct ehea_port *port = netdev_priv(dev);
+	struct ehea_port *port =
+		container_of(work, struct ehea_port, reset_task);
+	struct net_device *dev = port->netdev;
 
 	port->resets++;
 	down(&port->port_lock);
@@ -2379,7 +2380,7 @@
 	dev->tx_timeout = &ehea_tx_watchdog;
 	dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
-	INIT_WORK(&port->reset_task, ehea_reset_port, dev);
+	INIT_WORK(&port->reset_task, ehea_reset_port);
 
 	ehea_set_ethtool_ops(dev);
 
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 72ef7bd..f143e13 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,6 +26,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/mm.h>
 #include "ehea.h"
 #include "ehea_phyp.h"
 #include "ehea_qmr.h"
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index fd7b32a..2d2ea94 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -455,7 +455,7 @@
 	iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index b7b8bc2..93283e3 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1475,7 +1475,7 @@
 	return -ENXIO;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index c5ed635..439f413 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -110,6 +110,8 @@
  *	0.55: 22 Mar 2006: Add flow control (pause frame).
  *	0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
  *	0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
+ *	0.58: 30 Oct 2006: Added support for sideband management unit.
+ *	0.59: 30 Oct 2006: Added support for recoverable error.
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
@@ -126,7 +128,7 @@
 #else
 #define DRIVERNAPI
 #endif
-#define FORCEDETH_VERSION		"0.57"
+#define FORCEDETH_VERSION		"0.59"
 #define DRV_NAME			"forcedeth"
 
 #include <linux/module.h>
@@ -174,11 +176,12 @@
 #define DEV_HAS_PAUSEFRAME_TX   0x0200  /* device supports tx pause frames */
 #define DEV_HAS_STATISTICS      0x0400  /* device supports hw statistics */
 #define DEV_HAS_TEST_EXTENDED   0x0800  /* device supports extended diagnostic test */
+#define DEV_HAS_MGMT_UNIT       0x1000  /* device supports management unit */
 
 enum {
 	NvRegIrqStatus = 0x000,
 #define NVREG_IRQSTAT_MIIEVENT	0x040
-#define NVREG_IRQSTAT_MASK		0x1ff
+#define NVREG_IRQSTAT_MASK		0x81ff
 	NvRegIrqMask = 0x004,
 #define NVREG_IRQ_RX_ERROR		0x0001
 #define NVREG_IRQ_RX			0x0002
@@ -189,15 +192,16 @@
 #define NVREG_IRQ_LINK			0x0040
 #define NVREG_IRQ_RX_FORCED		0x0080
 #define NVREG_IRQ_TX_FORCED		0x0100
+#define NVREG_IRQ_RECOVER_ERROR		0x8000
 #define NVREG_IRQMASK_THROUGHPUT	0x00df
 #define NVREG_IRQMASK_CPU		0x0040
 #define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
 #define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
-#define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
+#define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
 
 #define NVREG_IRQ_UNKNOWN	(~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
 					NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
-					NVREG_IRQ_TX_FORCED))
+					NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
 
 	NvRegUnknownSetupReg6 = 0x008,
 #define NVREG_UNKSETUP6_VAL		3
@@ -222,6 +226,15 @@
 #define NVREG_MAC_RESET_ASSERT	0x0F3
 	NvRegTransmitterControl = 0x084,
 #define NVREG_XMITCTL_START	0x01
+#define NVREG_XMITCTL_MGMT_ST	0x40000000
+#define NVREG_XMITCTL_SYNC_MASK		0x000f0000
+#define NVREG_XMITCTL_SYNC_NOT_READY	0x0
+#define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
+#define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
+#define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
+#define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
+#define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
+#define NVREG_XMITCTL_HOST_LOADED	0x00004000
 	NvRegTransmitterStatus = 0x088,
 #define NVREG_XMITSTAT_BUSY	0x01
 
@@ -304,8 +317,8 @@
 #define NVREG_MIISTAT_LINKCHANGE	0x0008
 #define NVREG_MIISTAT_MASK		0x000f
 #define NVREG_MIISTAT_MASK2		0x000f
-	NvRegUnknownSetupReg4 = 0x184,
-#define NVREG_UNKSETUP4_VAL	8
+	NvRegMIIMask = 0x184,
+#define NVREG_MII_LINKCHANGE		0x0008
 
 	NvRegAdapterControl = 0x188,
 #define NVREG_ADAPTCTL_START	0x02
@@ -707,6 +720,7 @@
 	unsigned int phy_model;
 	u16 gigabit;
 	int intr_test;
+	int recover_error;
 
 	/* General data: RO fields */
 	dma_addr_t ring_addr;
@@ -719,6 +733,7 @@
 	u32 driver_data;
 	u32 register_size;
 	int rx_csum;
+	u32 mac_in_use;
 
 	void __iomem *base;
 
@@ -2443,6 +2458,23 @@
 			printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
 						dev->name, events);
 		}
+		if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
+			spin_lock(&np->lock);
+			/* disable interrupts on the nic */
+			if (!(np->msi_flags & NV_MSI_X_ENABLED))
+				writel(0, base + NvRegIrqMask);
+			else
+				writel(np->irqmask, base + NvRegIrqMask);
+			pci_push(base);
+
+			if (!np->in_shutdown) {
+				np->nic_poll_irq = np->irqmask;
+				np->recover_error = 1;
+				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+			}
+			spin_unlock(&np->lock);
+			break;
+		}
 #ifdef CONFIG_FORCEDETH_NAPI
 		if (events & NVREG_IRQ_RX_ALL) {
 			netif_rx_schedule(dev);
@@ -2673,6 +2705,20 @@
 			spin_unlock_irqrestore(&np->lock, flags);
 			np->link_timeout = jiffies + LINK_TIMEOUT;
 		}
+		if (events & NVREG_IRQ_RECOVER_ERROR) {
+			spin_lock_irq(&np->lock);
+			/* disable interrupts on the nic */
+			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
+			pci_push(base);
+
+			if (!np->in_shutdown) {
+				np->nic_poll_irq |= NVREG_IRQ_OTHER;
+				np->recover_error = 1;
+				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+			}
+			spin_unlock_irq(&np->lock);
+			break;
+		}
 		if (events & (NVREG_IRQ_UNKNOWN)) {
 			printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
 						dev->name, events);
@@ -2902,6 +2948,42 @@
 	}
 	np->nic_poll_irq = 0;
 
+	if (np->recover_error) {
+		np->recover_error = 0;
+		printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
+		if (netif_running(dev)) {
+			netif_tx_lock_bh(dev);
+			spin_lock(&np->lock);
+			/* stop engines */
+			nv_stop_rx(dev);
+			nv_stop_tx(dev);
+			nv_txrx_reset(dev);
+			/* drain rx queue */
+			nv_drain_rx(dev);
+			nv_drain_tx(dev);
+			/* reinit driver view of the rx queue */
+			set_bufsize(dev);
+			if (nv_init_ring(dev)) {
+				if (!np->in_shutdown)
+					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+			}
+			/* reinit nic view of the rx queue */
+			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+			writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+				base + NvRegRingSizes);
+			pci_push(base);
+			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+			pci_push(base);
+
+			/* restart rx engine */
+			nv_start_rx(dev);
+			nv_start_tx(dev);
+			spin_unlock(&np->lock);
+			netif_tx_unlock_bh(dev);
+		}
+	}
+
 	/* FIXME: Do we need synchronize_irq(dev->irq) here? */
 
 	writel(mask, base + NvRegIrqMask);
@@ -4030,6 +4112,54 @@
 	/* nothing to do */
 };
 
+/* The mgmt unit and driver use a semaphore to access the phy during init */
+static int nv_mgmt_acquire_sema(struct net_device *dev)
+{
+	u8 __iomem *base = get_hwbase(dev);
+	int i;
+	u32 tx_ctrl, mgmt_sema;
+
+	for (i = 0; i < 10; i++) {
+		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
+		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
+			break;
+		msleep(500);
+	}
+
+	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
+		return 0;
+
+	for (i = 0; i < 2; i++) {
+		tx_ctrl = readl(base + NvRegTransmitterControl);
+		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
+		writel(tx_ctrl, base + NvRegTransmitterControl);
+
+		/* verify that semaphore was acquired */
+		tx_ctrl = readl(base + NvRegTransmitterControl);
+		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
+		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
+			return 1;
+		else
+			udelay(50);
+	}
+
+	return 0;
+}
+
+/* Indicate to mgmt unit whether driver is loaded or not */
+static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
+{
+	u8 __iomem *base = get_hwbase(dev);
+	u32 tx_ctrl;
+
+	tx_ctrl = readl(base + NvRegTransmitterControl);
+	if (loaded)
+		tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
+	else
+		tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
+	writel(tx_ctrl, base + NvRegTransmitterControl);
+}
+
 static int nv_open(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
@@ -4085,7 +4215,7 @@
 			NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
 			KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
 
-	writel(0, base + NvRegUnknownSetupReg4);
+	writel(0, base + NvRegMIIMask);
 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
 	writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
 
@@ -4111,7 +4241,7 @@
 	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
 			base + NvRegAdapterControl);
 	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
-	writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
+	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
 	if (np->wolenabled)
 		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
 
@@ -4230,6 +4360,8 @@
 	u8 __iomem *base;
 	int err, i;
 	u32 powerstate, txreg;
+	u32 phystate_orig = 0, phystate;
+	int phyinitialized = 0;
 
 	dev = alloc_etherdev(sizeof(struct fe_priv));
 	err = -ENOMEM;
@@ -4514,6 +4646,48 @@
 		np->need_linktimer = 0;
 	}
 
+	/* clear phy state and temporarily halt phy interrupts */
+	writel(0, base + NvRegMIIMask);
+	phystate = readl(base + NvRegAdapterControl);
+	if (phystate & NVREG_ADAPTCTL_RUNNING) {
+		phystate_orig = 1;
+		phystate &= ~NVREG_ADAPTCTL_RUNNING;
+		writel(phystate, base + NvRegAdapterControl);
+	}
+	writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+
+	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
+		writel(0x1, base + 0x204); pci_push(base);
+		msleep(500);
+		/* management unit running on the mac? */
+		np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
+		if (np->mac_in_use) {
+			u32 mgmt_sync;
+			/* management unit setup the phy already? */
+			mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
+			if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) {
+				if (!nv_mgmt_acquire_sema(dev)) {
+					for (i = 0; i < 5000; i++) {
+						msleep(1);
+						mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
+						if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY)
+							continue;
+						if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT)
+							phyinitialized = 1;
+						break;
+					}
+				} else {
+					/* we need to init the phy */
+				}
+			} else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
+				/* phy is inited by SMU */
+				phyinitialized = 1;
+			} else {
+				/* we need to init the phy */
+			}
+		}
+	}
+
 	/* find a suitable phy */
 	for (i = 1; i <= 32; i++) {
 		int id1, id2;
@@ -4545,8 +4719,14 @@
 		goto out_error;
 	}
 
-	/* reset it */
-	phy_init(dev);
+	if (!phyinitialized) {
+		/* reset it */
+		phy_init(dev);
+	}
+
+	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
+		nv_mgmt_driver_loaded(dev, 1);
+	}
 
 	/* set default link speed settings */
 	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
@@ -4565,6 +4745,10 @@
 	return 0;
 
 out_error:
+	if (phystate_orig)
+		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
+	if (np->mac_in_use)
+		nv_mgmt_driver_loaded(dev, 0);
 	pci_set_drvdata(pci_dev, NULL);
 out_freering:
 	free_rings(dev);
@@ -4594,6 +4778,9 @@
 	writel(np->orig_mac[0], base + NvRegMacAddrA);
 	writel(np->orig_mac[1], base + NvRegMacAddrB);
 
+	if (np->mac_in_use)
+		nv_mgmt_driver_loaded(dev, 0);
+
 	/* free all structures */
 	free_rings(dev);
 	iounmap(get_hwbase(dev));
@@ -4603,6 +4790,50 @@
 	pci_set_drvdata(pci_dev, NULL);
 }
 
+#ifdef CONFIG_PM
+static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct fe_priv *np = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		goto out;
+
+	netif_device_detach(dev);
+
+	// Gross.
+	nv_close(dev);
+
+	pci_save_state(pdev);
+	pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+out:
+	return 0;
+}
+
+static int nv_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	int rc = 0;
+
+	if (!netif_running(dev))
+		goto out;
+
+	netif_device_attach(dev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_enable_wake(pdev, PCI_D0, 0);
+
+	rc = nv_open(dev);
+out:
+	return rc;
+}
+#else
+#define nv_suspend NULL
+#define nv_resume NULL
+#endif /* CONFIG_PM */
+
 static struct pci_device_id pci_tbl[] = {
 	{	/* nForce Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
@@ -4658,43 +4889,59 @@
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP61 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{	/* MCP65 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
+	{	/* MCP67 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
+	{	/* MCP67 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
+	{	/* MCP67 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
+	{	/* MCP67 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
 	},
 	{0,},
 };
@@ -4704,9 +4951,10 @@
 	.id_table = pci_tbl,
 	.probe = nv_probe,
 	.remove = __devexit_p(nv_remove),
+	.suspend = nv_suspend,
+	.resume	= nv_resume,
 };
 
-
 static int __init init_nic(void)
 {
 	printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index cb39587..889d3a1 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -779,7 +779,8 @@
 	fep->oldspeed = 0;
 	fep->oldduplex = -1;
 	if(fep->fpi->bus_id)
-		phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0);
+		phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
+				PHY_INTERFACE_MODE_MII);
 	else {
 		printk("No phy bus ID specified in BSP code\n");
 		return -EINVAL;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index a06d8d1..baa3514 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -9,7 +9,7 @@
  * Author: Andy Fleming
  * Maintainer: Kumar Gala
  *
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -133,6 +133,9 @@
 #ifdef CONFIG_GFAR_NAPI
 static int gfar_poll(struct net_device *dev, int *budget);
 #endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gfar_netpoll(struct net_device *dev);
+#endif
 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
 static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -260,6 +263,9 @@
 	dev->poll = gfar_poll;
 	dev->weight = GFAR_DEV_WEIGHT;
 #endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = gfar_netpoll;
+#endif
 	dev->stop = gfar_close;
 	dev->get_stats = gfar_get_stats;
 	dev->change_mtu = gfar_change_mtu;
@@ -392,6 +398,38 @@
 }
 
 
+/* Reads the controller's registers to determine what interface
+ * connects it to the PHY.
+ */
+static phy_interface_t gfar_get_interface(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+	u32 ecntrl = gfar_read(&priv->regs->ecntrl);
+
+	if (ecntrl & ECNTRL_SGMII_MODE)
+		return PHY_INTERFACE_MODE_SGMII;
+
+	if (ecntrl & ECNTRL_TBI_MODE) {
+		if (ecntrl & ECNTRL_REDUCED_MODE)
+			return PHY_INTERFACE_MODE_RTBI;
+		else
+			return PHY_INTERFACE_MODE_TBI;
+	}
+
+	if (ecntrl & ECNTRL_REDUCED_MODE) {
+		if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+			return PHY_INTERFACE_MODE_RMII;
+		else
+			return PHY_INTERFACE_MODE_RGMII;
+	}
+
+	if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+		return PHY_INTERFACE_MODE_GMII;
+
+	return PHY_INTERFACE_MODE_MII;
+}
+
+
 /* Initializes driver's PHY state, and attaches to the PHY.
  * Returns 0 on success.
  */
@@ -403,6 +441,7 @@
 		SUPPORTED_1000baseT_Full : 0;
 	struct phy_device *phydev;
 	char phy_id[BUS_ID_SIZE];
+	phy_interface_t interface;
 
 	priv->oldlink = 0;
 	priv->oldspeed = 0;
@@ -410,7 +449,9 @@
 
 	snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
 
-	phydev = phy_connect(dev, phy_id, &adjust_link, 0);
+	interface = gfar_get_interface(dev);
+
+	phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
 
 	if (IS_ERR(phydev)) {
 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
@@ -1536,6 +1577,33 @@
 }
 #endif
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void gfar_netpoll(struct net_device *dev)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	/* If the device has multiple interrupts, run tx/rx */
+	if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+		disable_irq(priv->interruptTransmit);
+		disable_irq(priv->interruptReceive);
+		disable_irq(priv->interruptError);
+		gfar_interrupt(priv->interruptTransmit, dev);
+		enable_irq(priv->interruptError);
+		enable_irq(priv->interruptReceive);
+		enable_irq(priv->interruptTransmit);
+	} else {
+		disable_irq(priv->interruptTransmit);
+		gfar_interrupt(priv->interruptTransmit, dev);
+		enable_irq(priv->interruptTransmit);
+	}
+}
+#endif
+
 /* The interrupt handler for devices with one interrupt */
 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
 {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 9e81a50..39e9e32 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -160,7 +160,10 @@
 
 #define ECNTRL_INIT_SETTINGS	0x00001000
 #define ECNTRL_TBI_MODE         0x00000020
+#define ECNTRL_REDUCED_MODE	0x00000010
 #define ECNTRL_R100		0x00000008
+#define ECNTRL_REDUCED_MII_MODE	0x00000004
+#define ECNTRL_SGMII_MODE	0x00000002
 
 #define MRBLR_INIT_SETTINGS	DEFAULT_RX_BUFFER_SIZE
 
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 1ed9ccc..3c33d6f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -168,8 +168,9 @@
 	int magic;
 
         struct pardevice *pdev;
+	struct net_device *dev;
 	unsigned int work_running;
-	struct work_struct run_work;
+	struct delayed_work run_work;
 	unsigned int modem;
 	unsigned int bitrate;
 	unsigned char stat;
@@ -659,16 +660,18 @@
 #define GETTICK(x)
 #endif /* __i386__ */
 
-static void epp_bh(struct net_device *dev)
+static void epp_bh(struct work_struct *work)
 {
+	struct net_device *dev;
 	struct baycom_state *bc;
 	struct parport *pp;
 	unsigned char stat;
 	unsigned char tmp[2];
 	unsigned int time1 = 0, time2 = 0, time3 = 0;
 	int cnt, cnt2;
-	
-	bc = netdev_priv(dev);
+
+	bc = container_of(work, struct baycom_state, run_work.work);
+	dev = bc->dev;
 	if (!bc->work_running)
 		return;
 	baycom_int_freq(bc);
@@ -889,7 +892,7 @@
                 return -EBUSY;
         }
         dev->irq = /*pp->irq*/ 0;
-	INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev);
+	INIT_DELAYED_WORK(&bc->run_work, epp_bh);
 	bc->work_running = 1;
 	bc->modem = EPP_CONVENTIONAL;
 	if (eppconfig(bc))
@@ -1213,6 +1216,7 @@
 	/*
 	 * initialize part of the baycom_state struct
 	 */
+	bc->dev = dev;
 	bc->magic = BAYCOM_MAGIC;
 	bc->cfg.fclk = 19666600;
 	bc->cfg.bps = 9600;
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 0f8b9af..e6e721a 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -252,7 +252,7 @@
 static irqreturn_t scc_isr(int irq, void *dev_id);
 static void rx_isr(struct scc_priv *priv);
 static void special_condition(struct scc_priv *priv, int rc);
-static void rx_bh(void *arg);
+static void rx_bh(struct work_struct *);
 static void tx_isr(struct scc_priv *priv);
 static void es_isr(struct scc_priv *priv);
 static void tm_isr(struct scc_priv *priv);
@@ -579,7 +579,7 @@
 		priv->param.clocks = TCTRxCP | RCRTxCP;
 		priv->param.persist = 256;
 		priv->param.dma = -1;
-		INIT_WORK(&priv->rx_work, rx_bh, priv);
+		INIT_WORK(&priv->rx_work, rx_bh);
 		dev->priv = priv;
 		sprintf(dev->name, "dmascc%i", 2 * n + i);
 		dev->base_addr = card_base;
@@ -1272,9 +1272,9 @@
 }
 
 
-static void rx_bh(void *arg)
+static void rx_bh(struct work_struct *ugli_api)
 {
-	struct scc_priv *priv = arg;
+	struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
 	int i = priv->rx_tail;
 	int cb;
 	unsigned long flags;
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 6abcfd2..99a36cc 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -482,7 +482,7 @@
 	release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 2947097..635b13c 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -444,7 +444,7 @@
 	release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 91326ea..f970bfb 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -31,7 +31,16 @@
 #include <asm/amigahw.h>
 #include <linux/zorro.h>
 
-#include "8390.h"
+#define EI_SHIFT(x)	(ei_local->reg_offset[x])
+#define ei_inb(port)   in_8(port)
+#define ei_outb(val,port)  out_8(port,val)
+#define ei_inb_p(port)   in_8(port)
+#define ei_outb_p(val,port)  out_8(port,val)
+
+static const char version[] =
+    "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
 
 #define NE_EN0_DCFG     (0x0e*2)
 
@@ -100,7 +109,7 @@
 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
     };
 
-    dev = alloc_ei_netdev();
+    dev = ____alloc_ei_netdev(0);
     if (!dev)
 	return -ENOMEM;
     SET_MODULE_OWNER(dev);
@@ -117,7 +126,7 @@
     dev->irq = IRQ_AMIGA_PORTS;
 
     /* Install the Interrupt handler */
-    if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, IRQF_SHARED, "Hydra Ethernet",
+    if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet",
 		    dev)) {
 	free_netdev(dev);
 	return -EAGAIN;
@@ -139,10 +148,10 @@
     dev->open = &hydra_open;
     dev->stop = &hydra_close;
 #ifdef CONFIG_NET_POLL_CONTROLLER
-    dev->poll_controller = ei_poll;
+    dev->poll_controller = __ei_poll;
 #endif
 
-    NS8390_init(dev, 0);
+    __NS8390_init(dev, 0);
 
     err = register_netdev(dev);
     if (err) {
@@ -164,7 +173,7 @@
 
 static int hydra_open(struct net_device *dev)
 {
-    ei_open(dev);
+    __ei_open(dev);
     return 0;
 }
 
@@ -172,7 +181,7 @@
 {
     if (ei_debug > 1)
 	printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
-    ei_close(dev);
+    __ei_close(dev);
     return 0;
 }
 
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index f73f10a..407d2ac 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -24,6 +24,7 @@
 #include <linux/netdevice.h>
 
 #include <asm/io.h>
+#include <asm/dcr.h>
 
 /*
  * These MAL "versions" probably aren't the real versions IBM uses for these 
@@ -191,6 +192,7 @@
 
 struct ibm_ocp_mal {
 	int			dcrbase;
+	dcr_host_t		dcrhost;
 
 	struct list_head	poll_list;
 	struct net_device	poll_dev;
@@ -207,12 +209,12 @@
 
 static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg)
 {
-	return mfdcr(mal->dcrbase + reg);
+	return dcr_read(mal->dcrhost, mal->dcrbase + reg);
 }
 
 static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val)
 {
-	mtdcr(mal->dcrbase + reg, val);
+	dcr_write(mal->dcrhost, mal->dcrbase + reg, val);
 }
 
 /* Register MAL devices */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 44c9f99..99343b5 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -50,7 +50,6 @@
 #include <asm/semaphore.h>
 #include <asm/hvcall.h>
 #include <asm/atomic.h>
-#include <asm/iommu.h>
 #include <asm/vio.h>
 #include <asm/uaccess.h>
 #include <linux/seq_file.h>
@@ -1000,8 +999,6 @@
 	adapter->mac_addr = 0;
 	memcpy(&adapter->mac_addr, mac_addr_p, 6);
 
-	adapter->liobn = dev->iommu_table->it_index;
-
 	netdev->irq = dev->irq;
 	netdev->open               = ibmveth_open;
 	netdev->poll               = ibmveth_poll;
@@ -1115,7 +1112,6 @@
 	seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
 
 	seq_printf(seq, "Unit Address:    0x%x\n", adapter->vdev->unit_address);
-	seq_printf(seq, "LIOBN:           0x%lx\n", adapter->liobn);
 	seq_printf(seq, "Current MAC:     %02X:%02X:%02X:%02X:%02X:%02X\n",
 		   current_mac[0], current_mac[1], current_mac[2],
 		   current_mac[3], current_mac[4], current_mac[5]);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index f5b25bf..bb69cca 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -118,7 +118,6 @@
     struct net_device_stats stats;
     unsigned int mcastFilterSize;
     unsigned long mac_addr;
-    unsigned long liobn;
     void * buffer_list_addr;
     void * filter_list_addr;
     dma_addr_t buffer_list_dma;
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index f56b00e..f0d30cf 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -57,7 +57,6 @@
 #include <net/ip.h>
 
 #include <asm/byteorder.h>
-#include <asm/checksum.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 14bda76..6e95645 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1793,10 +1793,8 @@
 err_out_2:
 	usb_free_urb(self->tx_urb);
 err_out_1:
-	for (i = 0; i < self->max_rx_urb; i++) {
-		if (self->rx_urb[i])
-			usb_free_urb(self->rx_urb[i]);
-	}
+	for (i = 0; i < self->max_rx_urb; i++)
+		usb_free_urb(self->rx_urb[i]);
 	free_netdev(net);
 err_out:
 	return ret;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b32c52e..f0c61f3 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -560,9 +560,9 @@
 	return ret;
 }
 
-static void mcs_speed_work(void *arg)
+static void mcs_speed_work(struct work_struct *work)
 {
-	struct mcs_cb *mcs = arg;
+	struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
 	struct net_device *netdev = mcs->netdev;
 
 	mcs_speed_change(mcs);
@@ -927,7 +927,7 @@
 	irda_qos_bits_to_value(&mcs->qos);
 
 	/* Speed change work initialisation*/
-	INIT_WORK(&mcs->work, mcs_speed_work, mcs);
+	INIT_WORK(&mcs->work, mcs_speed_work);
 
 	/* Override the network functions we need to use */
 	ndev->hard_start_xmit = mcs_hard_xmit;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index f9a1c88..9137e23 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -704,9 +704,9 @@
 	return 0;
 }
 
-static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
+static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
 {
-	struct net_device *dev = dev_get_drvdata(_dev);
+	struct net_device *dev = platform_get_drvdata(_dev);
 	struct pxa_irda *si;
 
 	if (dev && netif_running(dev)) {
@@ -718,9 +718,9 @@
 	return 0;
 }
 
-static int pxa_irda_resume(struct device *_dev)
+static int pxa_irda_resume(struct platform_device *_dev)
 {
-	struct net_device *dev = dev_get_drvdata(_dev);
+	struct net_device *dev = platform_get_drvdata(_dev);
 	struct pxa_irda *si;
 
 	if (dev && netif_running(dev)) {
@@ -746,9 +746,8 @@
 	return io->head ? 0 : -ENOMEM;
 }
 
-static int pxa_irda_probe(struct device *_dev)
+static int pxa_irda_probe(struct platform_device *pdev)
 {
-	struct platform_device *pdev = to_platform_device(_dev);
 	struct net_device *dev;
 	struct pxa_irda *si;
 	unsigned int baudrate_mask;
@@ -822,9 +821,9 @@
 	return err;
 }
 
-static int pxa_irda_remove(struct device *_dev)
+static int pxa_irda_remove(struct platform_device *_dev)
 {
-	struct net_device *dev = dev_get_drvdata(_dev);
+	struct net_device *dev = platform_get_drvdata(_dev);
 
 	if (dev) {
 		struct pxa_irda *si = netdev_priv(dev);
@@ -840,9 +839,10 @@
 	return 0;
 }
 
-static struct device_driver pxa_ir_driver = {
-	.name		= "pxa2xx-ir",
-	.bus		= &platform_bus_type,
+static struct platform_driver pxa_ir_driver = {
+	.driver         = {
+		.name   = "pxa2xx-ir",
+	},
 	.probe		= pxa_irda_probe,
 	.remove		= pxa_irda_remove,
 	.suspend	= pxa_irda_suspend,
@@ -851,12 +851,12 @@
 
 static int __init pxa_irda_init(void)
 {
-	return driver_register(&pxa_ir_driver);
+	return platform_driver_register(&pxa_ir_driver);
 }
 
 static void __exit pxa_irda_exit(void)
 {
-	driver_unregister(&pxa_ir_driver);
+	platform_driver_unregister(&pxa_ir_driver);
 }
 
 module_init(pxa_irda_init);
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 9fa294a..2a57bc6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -22,7 +22,7 @@
 
 struct sir_fsm {
 	struct semaphore	sem;
-	struct work_struct      work;
+	struct delayed_work	work;
 	unsigned		state, substate;
 	int			param;
 	int			result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 3b5854d..17b0c3a 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -100,9 +100,9 @@
  * Both must be unlocked/restarted on completion - but only on final exit.
  */
 
-static void sirdev_config_fsm(void *data)
+static void sirdev_config_fsm(struct work_struct *work)
 {
-	struct sir_dev *dev = data;
+	struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
 	struct sir_fsm *fsm = &dev->fsm;
 	int next_state;
 	int ret = -1;
@@ -309,8 +309,8 @@
 	fsm->param = param;
 	fsm->result = 0;
 
-	INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
-	queue_work(irda_sir_wq, &fsm->work);
+	INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+	queue_delayed_work(irda_sir_wq, &fsm->work, 0);
 	return 0;
 }
 
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 3b4c478..c14a746 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -50,6 +50,7 @@
 #include <linux/usb.h>
 #include <linux/crc32.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <net/irda/irda.h>
 #include <net/irda/irlap.h>
 #include <net/irda/irda_device.h>
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 2284e2c..d6f4f18 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -166,7 +166,7 @@
 
 struct veth_lpar_connection {
 	HvLpIndex remote_lp;
-	struct work_struct statemachine_wq;
+	struct delayed_work statemachine_wq;
 	struct veth_msg *msgs;
 	int num_events;
 	struct veth_cap_data local_caps;
@@ -456,7 +456,7 @@
 
 static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
 {
-	schedule_work(&cnx->statemachine_wq);
+	schedule_delayed_work(&cnx->statemachine_wq, 0);
 }
 
 static void veth_take_cap(struct veth_lpar_connection *cnx,
@@ -638,9 +638,11 @@
 }
 
 /* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(void *p)
+static void veth_statemachine(struct work_struct *work)
 {
-	struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p;
+	struct veth_lpar_connection *cnx =
+		container_of(work, struct veth_lpar_connection,
+			     statemachine_wq.work);
 	int rlp = cnx->remote_lp;
 	int rc;
 
@@ -827,7 +829,7 @@
 
 	cnx->remote_lp = rlp;
 	spin_lock_init(&cnx->lock);
-	INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
+	INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
 
 	init_timer(&cnx->ack_timer);
 	cnx->ack_timer.function = veth_timed_ack;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e09f575..e628126 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -106,7 +106,7 @@
 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
 void ixgb_set_ethtool_ops(struct net_device *netdev);
 static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
 static void ixgb_vlan_rx_register(struct net_device *netdev,
 				  struct vlan_group *grp);
 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -489,8 +489,7 @@
 	adapter->watchdog_timer.function = &ixgb_watchdog;
 	adapter->watchdog_timer.data = (unsigned long)adapter;
 
-	INIT_WORK(&adapter->tx_timeout_task,
-		  (void (*)(void *))ixgb_tx_timeout_task, netdev);
+	INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 
 	strcpy(netdev->name, "eth%d");
 	if((err = register_netdev(netdev)))
@@ -1249,7 +1248,7 @@
 	if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 		struct ixgb_buffer *buffer_info;
 		css = skb->h.raw - skb->data;
-		cso = (skb->h.raw + skb->csum) - skb->data;
+		cso = css + skb->csum_offset;
 
 		i = adapter->tx_ring.next_to_use;
 		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
@@ -1493,9 +1492,10 @@
 }
 
 static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
 {
-	struct ixgb_adapter *adapter = netdev_priv(netdev);
+	struct ixgb_adapter *adapter =
+		container_of(work, struct ixgb_adapter, tx_timeout_task);
 
 	adapter->tx_timeout_count++;
 	ixgb_down(adapter, TRUE);
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 6efbd49..a384332 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -57,6 +57,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+#include <linux/mm.h>
 #include <linux/bitops.h>
 
 #include <asm/io.h>
@@ -367,7 +368,7 @@
 	kfree(lp);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index f4d815b..ea392f2 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -119,14 +119,14 @@
 #define DEB(x,y)	if (i596_debug & (x)) { y; }
 
 
-#define  CHECK_WBACK(addr,len) \
-	do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
+#define  CHECK_WBACK(priv, addr,len) \
+	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
-#define  CHECK_INV(addr,len) \
-	do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
+#define  CHECK_INV(priv, addr,len) \
+	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
 
-#define  CHECK_WBACK_INV(addr,len) \
-	do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+#define  CHECK_WBACK_INV(priv, addr,len) \
+	do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
 
 
 #define PA_I82596_RESET		0	/* Offsets relative to LASI-LAN-Addr.*/
@@ -449,10 +449,10 @@
 
 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-	CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+	CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
 	while (--delcnt && lp->iscp.stat) {
 		udelay(10);
-		CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+		CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
 	}
 	if (!delcnt) {
 		printk("%s: %s, iscp.stat %04x, didn't clear\n",
@@ -466,10 +466,10 @@
 
 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-	CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
 	while (--delcnt && lp->scb.command) {
 		udelay(10);
-		CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+		CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
 	}
 	if (!delcnt) {
 		printk("%s: %s, status %4.4x, cmd %4.4x.\n",
@@ -522,7 +522,7 @@
 			rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
 		rbd = rbd->v_next;
 	} while (rbd != lp->rbd_head);
-	CHECK_INV(lp, sizeof(struct i596_private));
+	CHECK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
@@ -592,7 +592,7 @@
 	rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
 	rfd->cmd = CMD_EOL|CMD_FLEX;
 
-	CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+	CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 static inline void remove_rx_bufs(struct net_device *dev)
@@ -629,7 +629,7 @@
 	lp->rbd_head = lp->rbds;
 	lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
 
-	CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+	CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
@@ -663,8 +663,8 @@
 
 	DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
 
-	CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
-	CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
+	CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
+	CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
 
 	MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
 
@@ -678,25 +678,25 @@
 	rebuild_rx_bufs(dev);
 
 	lp->scb.command = 0;
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
 	enable_irq(dev->irq);	/* enable IRQs from LAN */
 
 	DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
 	memcpy(lp->cf_cmd.i596_config, init_setup, 14);
 	lp->cf_cmd.cmd.command = CmdConfigure;
-	CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
+	CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
 	i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
 	DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
 	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
 	lp->sa_cmd.cmd.command = CmdSASetup;
-	CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
+	CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
 	i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
 	DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
 	lp->tdr_cmd.cmd.command = CmdTDR;
-	CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
+	CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
 	i596_add_cmd(dev, &lp->tdr_cmd.cmd);
 
 	spin_lock_irqsave (&lp->lock, flags);
@@ -708,7 +708,7 @@
 	DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
 	lp->scb.command = RX_START;
 	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
 	CA(dev);
 
@@ -740,13 +740,13 @@
 
 	rfd = lp->rfd_head;		/* Ref next frame to check */
 
-	CHECK_INV(rfd, sizeof(struct i596_rfd));
+	CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
 	while ((rfd->stat) & STAT_C) {	/* Loop while complete frames */
 		if (rfd->rbd == I596_NULL)
 			rbd = NULL;
 		else if (rfd->rbd == lp->rbd_head->b_addr) {
 			rbd = lp->rbd_head;
-			CHECK_INV(rbd, sizeof(struct i596_rbd));
+			CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
 		}
 		else {
 			printk("%s: rbd chain broken!\n", dev->name);
@@ -790,7 +790,7 @@
 				dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
 				rbd->v_data = newskb->data;
 				rbd->b_data = WSWAPchar(dma_addr);
-				CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+				CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
 			}
 			else
 				skb = dev_alloc_skb(pkt_len + 2);
@@ -842,7 +842,7 @@
 		if (rbd != NULL && (rbd->count & 0x4000)) {
 			rbd->count = 0;
 			lp->rbd_head = rbd->v_next;
-			CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+			CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
 		}
 
 		/* Tidy the frame descriptor, marking it as end of list */
@@ -860,10 +860,10 @@
 
 		lp->scb.rfd = rfd->b_next;
 		lp->rfd_head = rfd->v_next;
-		CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
-		CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
+		CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
+		CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
 		rfd = lp->rfd_head;
-		CHECK_INV(rfd, sizeof(struct i596_rfd));
+		CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
 	}
 
 	DEB(DEB_RXFRAME, printk("frames %d\n", frames));
@@ -902,12 +902,12 @@
 			ptr->v_next = NULL;
 			ptr->b_next = I596_NULL;
 		}
-		CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
+		CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
 	}
 
 	wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
 	lp->scb.cmd = I596_NULL;
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 }
 
 
@@ -925,7 +925,7 @@
 
 	/* FIXME: this command might cause an lpmc */
 	lp->scb.command = CUC_ABORT | RX_ABORT;
-	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 	CA(dev);
 
 	/* wait for shutdown */
@@ -951,20 +951,20 @@
 	cmd->command |= (CMD_EOL | CMD_INTR);
 	cmd->v_next = NULL;
 	cmd->b_next = I596_NULL;
-	CHECK_WBACK(cmd, sizeof(struct i596_cmd));
+	CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
 
 	spin_lock_irqsave (&lp->lock, flags);
 
 	if (lp->cmd_head != NULL) {
 		lp->cmd_tail->v_next = cmd;
 		lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
-		CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
+		CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
 	} else {
 		lp->cmd_head = cmd;
 		wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
 		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
 		lp->scb.command = CUC_START;
-		CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+		CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 		CA(dev);
 	}
 	lp->cmd_tail = cmd;
@@ -998,12 +998,12 @@
 	data = virt_to_dma(lp,tint);
 
 	tint[1] = -1;
-	CHECK_WBACK(tint,PAGE_SIZE);
+	CHECK_WBACK(lp, tint, PAGE_SIZE);
 
 	MPU_PORT(dev, 1, data);
 
 	for(data = 1000000; data; data--) {
-		CHECK_INV(tint,PAGE_SIZE);
+		CHECK_INV(lp, tint, PAGE_SIZE);
 		if(tint[1] != -1)
 			break;
 
@@ -1061,7 +1061,7 @@
 		/* Issue a channel attention signal */
 		DEB(DEB_ERRORS, printk("Kicking board.\n"));
 		lp->scb.command = CUC_START | RX_START;
-		CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
+		CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
 		CA (dev);
 		lp->last_restart = lp->stats.tx_packets;
 	}
@@ -1118,8 +1118,8 @@
 		tbd->data = WSWAPchar(tx_cmd->dma_addr);
 
 		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
-		CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
-		CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
+		CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
+		CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
 		i596_add_cmd(dev, &tx_cmd->cmd);
 
 		lp->stats.tx_packets++;
@@ -1228,7 +1228,7 @@
 	lp->dma_addr = dma_addr;
 	lp->dev = gen_dev;
 
-	CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
+	CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
 
 	i = register_netdev(dev);
 	if (i) {
@@ -1295,7 +1295,7 @@
 			DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
 
 		while (lp->cmd_head != NULL) {
-			CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
+			CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
 			if (!(lp->cmd_head->status & STAT_C))
 				break;
 
@@ -1358,7 +1358,7 @@
 			}
 			ptr->v_next = NULL;
 		        ptr->b_next = I596_NULL;
-			CHECK_WBACK(ptr, sizeof(struct i596_cmd));
+			CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
 			lp->last_cmd = jiffies;
 		}
 
@@ -1372,13 +1372,13 @@
 
 			ptr->command &= 0x1fff;
 			ptr = ptr->v_next;
-			CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
+			CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
 		}
 
 		if ((lp->cmd_head != NULL))
 			ack_cmd |= CUC_START;
 		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
-		CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
+		CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
 	}
 	if ((status & 0x1000) || (status & 0x4000)) {
 		if ((status & 0x4000))
@@ -1397,7 +1397,7 @@
 	}
 	wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
 	lp->scb.command = ack_cmd;
-	CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
 	/* DANGER: I suspect that some kind of interrupt
 	 acknowledgement aside from acking the 82596 might be needed
@@ -1426,7 +1426,7 @@
 
 	wait_cmd(dev, lp, 100, "close1 timed out");
 	lp->scb.command = CUC_ABORT | RX_ABORT;
-	CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+	CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
 	CA(dev);
 
@@ -1486,7 +1486,7 @@
 			       dev->name);
 		else {
 			lp->cf_cmd.cmd.command = CmdConfigure;
-			CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
+			CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
 			i596_add_cmd(dev, &lp->cf_cmd.cmd);
 		}
 	}
@@ -1514,7 +1514,7 @@
 				DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
 						dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
 		}
-		CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
+		CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
 		i596_add_cmd(dev, &cmd->cmd);
 	}
 }
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
new file mode 100644
index 0000000..e726c06
--- /dev/null
+++ b/drivers/net/lib8390.c
@@ -0,0 +1,1097 @@
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+	Written 1992-94 by Donald Becker.
+
+	Copyright 1993 United States Government as represented by the
+	Director, National Security Agency.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+
+  This is the chip-specific code for many 8390-based ethernet adaptors.
+  This is not a complete driver, it must be combined with board-specific
+  code such as ne.c, wd.c, 3c503.c, etc.
+
+  Seeing how at least eight drivers use this code, (not counting the
+  PCMCIA ones either) it is easy to break some card by what seems like
+  a simple innocent change. Please contact me or Donald if you think
+  you have found something that needs changing. -- PG
+
+
+  Changelog:
+
+  Paul Gortmaker	: remove set_bit lock, other cleanups.
+  Paul Gortmaker	: add ei_get_8390_hdr() so we can pass skb's to
+			  ei_block_input() for eth_io_copy_and_sum().
+  Paul Gortmaker	: exchange static int ei_pingpong for a #define,
+			  also add better Tx error handling.
+  Paul Gortmaker	: rewrite Rx overrun handling as per NS specs.
+  Alexey Kuznetsov	: use the 8390's six bit hash multicast filter.
+  Paul Gortmaker	: tweak ANK's above multicast changes a bit.
+  Paul Gortmaker	: update packet statistics for v2.1.x
+  Alan Cox		: support arbitary stupid port mappings on the
+  			  68K Macintosh. Support >16bit I/O spaces
+  Paul Gortmaker	: add kmod support for auto-loading of the 8390
+			  module by all drivers that require it.
+  Alan Cox		: Spinlocking work, added 'BUG_83C690'
+  Paul Gortmaker	: Separate out Tx timeout code from Tx path.
+  Paul Gortmaker	: Remove old unused single Tx buffer code.
+  Hayato Fujiwara	: Add m32r support.
+  Paul Gortmaker	: use skb_padto() instead of stack scratch area
+
+  Sources:
+  The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+  */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#define NS8390_CORE
+#include "8390.h"
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+   routines.
+	void reset_8390(struct net_device *dev)
+		Resets the board associated with DEV, including a hardware reset of
+		the 8390.  This is only called when there is a transmit timeout, and
+		it is always followed by 8390_init().
+	void block_output(struct net_device *dev, int count, const unsigned char *buf,
+					  int start_page)
+		Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
+		"page" value uses the 8390's 256-byte pages.
+	void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+		Read the 4 byte, page aligned 8390 header. *If* there is a
+		subsequent read, it will be of the rest of the packet.
+	void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+		Read COUNT bytes from the packet buffer into the skb data area. Start
+		reading from RING_OFFSET, the address as the 8390 sees it.  This will always
+		follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_tx_timeout(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+								int start_page);
+static void set_multicast_list(struct net_device *dev);
+static void do_set_multicast_list(struct net_device *dev);
+static void __NS8390_init(struct net_device *dev, int startp);
+
+/*
+ *	SMP and the 8390 setup.
+ *
+ *	The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ *	a page register that controls bank and packet buffer access. We guard
+ *	this with ei_local->page_lock. Nobody should assume or set the page other
+ *	than zero when the lock is not held. Lock holders must restore page 0
+ *	before unlocking. Even pure readers must take the lock to protect in
+ *	page 0.
+ *
+ *	To make life difficult the chip can also be very slow. We therefore can't
+ *	just use spinlocks. For the longer lockups we disable the irq the device
+ *	sits on and hold the lock. We must hold the lock because there is a dual
+ *	processor case other than interrupts (get stats/set multicast list in
+ *	parallel with each other and transmit).
+ *
+ *	Note: in theory we can just disable the irq on the card _but_ there is
+ *	a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ *	enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ *	Finally by special arrangement for the purpose of being generally
+ *	annoying the transmit function is called bh atomic. That places
+ *	restrictions on the user context callers as disable_irq won't save
+ *	them.
+ */
+
+
+
+/**
+ * ei_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+static int __ei_open(struct net_device *dev)
+{
+	unsigned long flags;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+	/* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
+	    wrapper that does e.g. media check & then calls ei_tx_timeout. */
+	if (dev->tx_timeout == NULL)
+		 dev->tx_timeout = ei_tx_timeout;
+	if (dev->watchdog_timeo <= 0)
+		 dev->watchdog_timeo = TX_TIMEOUT;
+
+	/*
+	 *	Grab the page lock so we own the register set, then call
+	 *	the init function.
+	 */
+
+      	spin_lock_irqsave(&ei_local->page_lock, flags);
+	__NS8390_init(dev, 1);
+	/* Set the flag before we drop the lock, That way the IRQ arrives
+	   after its set and we get no silly warnings */
+	netif_start_queue(dev);
+      	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+	ei_local->irqlock = 0;
+	return 0;
+}
+
+/**
+ * ei_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
+ */
+static int __ei_close(struct net_device *dev)
+{
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	unsigned long flags;
+
+	/*
+	 *	Hold the page lock during close
+	 */
+
+      	spin_lock_irqsave(&ei_local->page_lock, flags);
+	__NS8390_init(dev, 0);
+      	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+	netif_stop_queue(dev);
+	return 0;
+}
+
+/**
+ * ei_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+static void ei_tx_timeout(struct net_device *dev)
+{
+	unsigned long e8390_base = dev->base_addr;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	int txsr, isr, tickssofar = jiffies - dev->trans_start;
+	unsigned long flags;
+
+#if defined(CONFIG_M32R) && defined(CONFIG_SMP)
+	unsigned long icucr;
+
+	local_irq_save(flags);
+	icucr = inl(M32R_ICU_CR1_PORTL);
+	icucr |= M32R_ICUCR_ISMOD11;
+	outl(icucr, M32R_ICU_CR1_PORTL);
+	local_irq_restore(flags);
+#endif
+	ei_local->stat.tx_errors++;
+
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	txsr = ei_inb(e8390_base+EN0_TSR);
+	isr = ei_inb(e8390_base+EN0_ISR);
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+	printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+		dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+		(isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+	if (!isr && !ei_local->stat.tx_packets)
+	{
+		/* The 8390 probably hasn't gotten on the cable yet. */
+		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
+	}
+
+	/* Ugly but a reset can be slow, yet must be protected */
+
+	disable_irq_nosync_lockdep(dev->irq);
+	spin_lock(&ei_local->page_lock);
+
+	/* Try to restart the card.  Perhaps the user has fixed something. */
+	ei_reset_8390(dev);
+	__NS8390_init(dev, 1);
+
+	spin_unlock(&ei_local->page_lock);
+	enable_irq_lockdep(dev->irq);
+	netif_wake_queue(dev);
+}
+
+/**
+ * ei_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	unsigned long e8390_base = dev->base_addr;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	int send_length = skb->len, output_page;
+	unsigned long flags;
+	char buf[ETH_ZLEN];
+	char *data = skb->data;
+
+	if (skb->len < ETH_ZLEN) {
+		memset(buf, 0, ETH_ZLEN);	/* more efficient than doing just the needed bits */
+		memcpy(buf, data, skb->len);
+		send_length = ETH_ZLEN;
+		data = buf;
+	}
+
+	/* Mask interrupts from the ethercard.
+	   SMP: We have to grab the lock here otherwise the IRQ handler
+	   on another CPU can flip window and race the IRQ mask set. We end
+	   up trashing the mcast filter not disabling irqs if we don't lock */
+
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	ei_outb_p(0x00, e8390_base + EN0_IMR);
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+
+	/*
+	 *	Slow phase with lock held.
+	 */
+
+	disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
+
+	spin_lock(&ei_local->page_lock);
+
+	ei_local->irqlock = 1;
+
+	/*
+	 * We have two Tx slots available for use. Find the first free
+	 * slot, and then perform some sanity checks. With two Tx bufs,
+	 * you get very close to transmitting back-to-back packets. With
+	 * only one Tx buf, the transmitter sits idle while you reload the
+	 * card, leaving a substantial gap between each transmitted packet.
+	 */
+
+	if (ei_local->tx1 == 0)
+	{
+		output_page = ei_local->tx_start_page;
+		ei_local->tx1 = send_length;
+		if (ei_debug  &&  ei_local->tx2 > 0)
+			printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+				dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+	}
+	else if (ei_local->tx2 == 0)
+	{
+		output_page = ei_local->tx_start_page + TX_PAGES/2;
+		ei_local->tx2 = send_length;
+		if (ei_debug  &&  ei_local->tx1 > 0)
+			printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+				dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+	}
+	else
+	{	/* We should never get here. */
+		if (ei_debug)
+			printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+				dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+		ei_local->irqlock = 0;
+		netif_stop_queue(dev);
+		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+		spin_unlock(&ei_local->page_lock);
+		enable_irq_lockdep_irqrestore(dev->irq, &flags);
+		ei_local->stat.tx_errors++;
+		return 1;
+	}
+
+	/*
+	 * Okay, now upload the packet and trigger a send if the transmitter
+	 * isn't already sending. If it is busy, the interrupt handler will
+	 * trigger the send later, upon receiving a Tx done interrupt.
+	 */
+
+	ei_block_output(dev, send_length, data, output_page);
+
+	if (! ei_local->txing)
+	{
+		ei_local->txing = 1;
+		NS8390_trigger_send(dev, send_length, output_page);
+		dev->trans_start = jiffies;
+		if (output_page == ei_local->tx_start_page)
+		{
+			ei_local->tx1 = -1;
+			ei_local->lasttx = -1;
+		}
+		else
+		{
+			ei_local->tx2 = -1;
+			ei_local->lasttx = -2;
+		}
+	}
+	else ei_local->txqueue++;
+
+	if (ei_local->tx1  &&  ei_local->tx2)
+		netif_stop_queue(dev);
+	else
+		netif_start_queue(dev);
+
+	/* Turn 8390 interrupts back on. */
+	ei_local->irqlock = 0;
+	ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+	spin_unlock(&ei_local->page_lock);
+	enable_irq_lockdep_irqrestore(dev->irq, &flags);
+
+	dev_kfree_skb (skb);
+	ei_local->stat.tx_bytes += send_length;
+
+	return 0;
+}
+
+/**
+ * ei_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * necessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+static irqreturn_t __ei_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	unsigned long e8390_base = dev->base_addr;
+	int interrupts, nr_serviced = 0;
+	struct ei_device *ei_local = netdev_priv(dev);
+
+	/*
+	 *	Protect the irq test too.
+	 */
+
+	spin_lock(&ei_local->page_lock);
+
+	if (ei_local->irqlock)
+	{
+#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+		/* The "irqlock" check is only for testing. */
+		printk(ei_local->irqlock
+			   ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+			   : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+			   dev->name, ei_inb_p(e8390_base + EN0_ISR),
+			   ei_inb_p(e8390_base + EN0_IMR));
+#endif
+		spin_unlock(&ei_local->page_lock);
+		return IRQ_NONE;
+	}
+
+	/* Change to page 0 and read the intr status reg. */
+	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+	if (ei_debug > 3)
+		printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
+			   ei_inb_p(e8390_base + EN0_ISR));
+
+	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
+	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0
+		   && ++nr_serviced < MAX_SERVICE)
+	{
+		if (!netif_running(dev)) {
+			printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+			/* rmk - acknowledge the interrupts */
+			ei_outb_p(interrupts, e8390_base + EN0_ISR);
+			interrupts = 0;
+			break;
+		}
+		if (interrupts & ENISR_OVER)
+			ei_rx_overrun(dev);
+		else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
+		{
+			/* Got a good (?) packet. */
+			ei_receive(dev);
+		}
+		/* Push the next to-transmit packet through. */
+		if (interrupts & ENISR_TX)
+			ei_tx_intr(dev);
+		else if (interrupts & ENISR_TX_ERR)
+			ei_tx_err(dev);
+
+		if (interrupts & ENISR_COUNTERS)
+		{
+			ei_local->stat.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
+			ei_local->stat.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1);
+			ei_local->stat.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2);
+			ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
+		}
+
+		/* Ignore any RDC interrupts that make it back to here. */
+		if (interrupts & ENISR_RDC)
+		{
+			ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
+		}
+
+		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+	}
+
+	if (interrupts && ei_debug)
+	{
+		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+		if (nr_serviced >= MAX_SERVICE)
+		{
+			/* 0xFF is valid for a card removal */
+			if(interrupts!=0xFF)
+				printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
+				   dev->name, interrupts);
+			ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+		} else {
+			printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+			ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+		}
+	}
+	spin_unlock(&ei_local->page_lock);
+	return IRQ_RETVAL(nr_serviced > 0);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void __ei_poll(struct net_device *dev)
+{
+	disable_irq_lockdep(dev->irq);
+	__ei_interrupt(dev->irq, dev);
+	enable_irq_lockdep(dev->irq);
+}
+#endif
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+	unsigned long e8390_base = dev->base_addr;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
+	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+	printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+	if (txsr & ENTSR_ABT)
+		printk("excess-collisions ");
+	if (txsr & ENTSR_ND)
+		printk("non-deferral ");
+	if (txsr & ENTSR_CRS)
+		printk("lost-carrier ");
+	if (txsr & ENTSR_FU)
+		printk("FIFO-underrun ");
+	if (txsr & ENTSR_CDH)
+		printk("lost-heartbeat ");
+	printk("\n");
+#endif
+
+	ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
+
+	if (tx_was_aborted)
+		ei_tx_intr(dev);
+	else
+	{
+		ei_local->stat.tx_errors++;
+		if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+		if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+		if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+	}
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+	unsigned long e8390_base = dev->base_addr;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	int status = ei_inb(e8390_base + EN0_TSR);
+
+	ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
+
+	/*
+	 * There are two Tx buffers, see which one finished, and trigger
+	 * the send of another one if it exists.
+	 */
+	ei_local->txqueue--;
+
+	if (ei_local->tx1 < 0)
+	{
+		if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+			printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
+				ei_local->name, ei_local->lasttx, ei_local->tx1);
+		ei_local->tx1 = 0;
+		if (ei_local->tx2 > 0)
+		{
+			ei_local->txing = 1;
+			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+			dev->trans_start = jiffies;
+			ei_local->tx2 = -1,
+			ei_local->lasttx = 2;
+		}
+		else ei_local->lasttx = 20, ei_local->txing = 0;
+	}
+	else if (ei_local->tx2 < 0)
+	{
+		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
+			printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+				ei_local->name, ei_local->lasttx, ei_local->tx2);
+		ei_local->tx2 = 0;
+		if (ei_local->tx1 > 0)
+		{
+			ei_local->txing = 1;
+			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+			dev->trans_start = jiffies;
+			ei_local->tx1 = -1;
+			ei_local->lasttx = 1;
+		}
+		else
+			ei_local->lasttx = 10, ei_local->txing = 0;
+	}
+//	else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
+//			dev->name, ei_local->lasttx);
+
+	/* Minimize Tx latency: update the statistics after we restart TXing. */
+	if (status & ENTSR_COL)
+		ei_local->stat.collisions++;
+	if (status & ENTSR_PTX)
+		ei_local->stat.tx_packets++;
+	else
+	{
+		ei_local->stat.tx_errors++;
+		if (status & ENTSR_ABT)
+		{
+			ei_local->stat.tx_aborted_errors++;
+			ei_local->stat.collisions += 16;
+		}
+		if (status & ENTSR_CRS)
+			ei_local->stat.tx_carrier_errors++;
+		if (status & ENTSR_FU)
+			ei_local->stat.tx_fifo_errors++;
+		if (status & ENTSR_CDH)
+			ei_local->stat.tx_heartbeat_errors++;
+		if (status & ENTSR_OWC)
+			ei_local->stat.tx_window_errors++;
+	}
+	netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+	unsigned long e8390_base = dev->base_addr;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	unsigned char rxing_page, this_frame, next_frame;
+	unsigned short current_offset;
+	int rx_pkt_count = 0;
+	struct e8390_pkt_hdr rx_frame;
+	int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
+
+	while (++rx_pkt_count < 10)
+	{
+		int pkt_len, pkt_stat;
+
+		/* Get the rx page (incoming packet pointer). */
+		ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
+		rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
+		ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+
+		/* Remove one frame from the ring.  Boundary is always a page behind. */
+		this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
+		if (this_frame >= ei_local->stop_page)
+			this_frame = ei_local->rx_start_page;
+
+		/* Someday we'll omit the previous, iff we never get this message.
+		   (There is at least one clone claimed to have a problem.)
+
+		   Keep quiet if it looks like a card removal. One problem here
+		   is that some clones crash in roughly the same way.
+		 */
+		if (ei_debug > 0  &&  this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
+			printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
+				   dev->name, this_frame, ei_local->current_page);
+
+		if (this_frame == rxing_page)	/* Read all the frames? */
+			break;				/* Done for now */
+
+		current_offset = this_frame << 8;
+		ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+		pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+		pkt_stat = rx_frame.status;
+
+		next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+		/* Check for bogosity warned by 3c503 book: the status byte is never
+		   written.  This happened a lot during testing! This code should be
+		   cleaned up someday. */
+		if (rx_frame.next != next_frame
+			&& rx_frame.next != next_frame + 1
+			&& rx_frame.next != next_frame - num_rx_pages
+			&& rx_frame.next != next_frame + 1 - num_rx_pages) {
+			ei_local->current_page = rxing_page;
+			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
+			ei_local->stat.rx_errors++;
+			continue;
+		}
+
+		if (pkt_len < 60  ||  pkt_len > 1518)
+		{
+			if (ei_debug)
+				printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+					   dev->name, rx_frame.count, rx_frame.status,
+					   rx_frame.next);
+			ei_local->stat.rx_errors++;
+			ei_local->stat.rx_length_errors++;
+		}
+		 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
+		{
+			struct sk_buff *skb;
+
+			skb = dev_alloc_skb(pkt_len+2);
+			if (skb == NULL)
+			{
+				if (ei_debug > 1)
+					printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
+						   dev->name, pkt_len);
+				ei_local->stat.rx_dropped++;
+				break;
+			}
+			else
+			{
+				skb_reserve(skb,2);	/* IP headers on 16 byte boundaries */
+				skb->dev = dev;
+				skb_put(skb, pkt_len);	/* Make room */
+				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+				skb->protocol=eth_type_trans(skb,dev);
+				netif_rx(skb);
+				dev->last_rx = jiffies;
+				ei_local->stat.rx_packets++;
+				ei_local->stat.rx_bytes += pkt_len;
+				if (pkt_stat & ENRSR_PHY)
+					ei_local->stat.multicast++;
+			}
+		}
+		else
+		{
+			if (ei_debug)
+				printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+					   dev->name, rx_frame.status, rx_frame.next,
+					   rx_frame.count);
+			ei_local->stat.rx_errors++;
+			/* NB: The NIC counts CRC, frame and missed errors. */
+			if (pkt_stat & ENRSR_FO)
+				ei_local->stat.rx_fifo_errors++;
+		}
+		next_frame = rx_frame.next;
+
+		/* This _should_ never happen: it's here for avoiding bad clones. */
+		if (next_frame >= ei_local->stop_page) {
+			printk("%s: next frame inconsistency, %#2x\n", dev->name,
+				   next_frame);
+			next_frame = ei_local->rx_start_page;
+		}
+		ei_local->current_page = next_frame;
+		ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+	}
+
+	/* We used to also ack ENISR_OVER here, but that would sometimes mask
+	   a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
+	ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
+	return;
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network."  Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+	unsigned long e8390_base = dev->base_addr;
+	unsigned char was_txing, must_resend = 0;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+	/*
+	 * Record whether a Tx was in progress and then issue the
+	 * stop command.
+	 */
+	was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+	if (ei_debug > 1)
+		printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+	ei_local->stat.rx_over_errors++;
+
+	/*
+	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+	 * Early datasheets said to poll the reset bit, but now they say that
+	 * it "is not a reliable indicator and subsequently should be ignored."
+	 * We wait at least 10ms.
+	 */
+
+	mdelay(10);
+
+	/*
+	 * Reset RBCR[01] back to zero as per magic incantation.
+	 */
+	ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
+	ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+	/*
+	 * See if any Tx was interrupted or not. According to NS, this
+	 * step is vital, and skipping it will cause no end of havoc.
+	 */
+
+	if (was_txing)
+	{
+		unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+		if (!tx_completed)
+			must_resend = 1;
+	}
+
+	/*
+	 * Have to enter loopback mode and then restart the NIC before
+	 * you are allowed to slurp packets up off the ring.
+	 */
+	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+	ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+	/*
+	 * Clear the Rx ring of all the debris, and ack the interrupt.
+	 */
+	ei_receive(dev);
+	ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
+
+	/*
+	 * Leave loopback mode, and resend any packet that got stopped.
+	 */
+	ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
+	if (must_resend)
+    		ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ *	Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	unsigned long flags;
+
+	/* If the card is stopped, just return the present stats. */
+	if (!netif_running(dev))
+		return &ei_local->stat;
+
+	spin_lock_irqsave(&ei_local->page_lock,flags);
+	/* Read the counter registers, assuming we are in page 0. */
+	ei_local->stat.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
+	ei_local->stat.rx_crc_errors   += ei_inb_p(ioaddr + EN0_COUNTER1);
+	ei_local->stat.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2);
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+	return &ei_local->stat;
+}
+
+/*
+ * Form the 64 bit 8390 multicast table from the linked list of addresses
+ * associated with this dev structure.
+ */
+
+static inline void make_mc_bits(u8 *bits, struct net_device *dev)
+{
+	struct dev_mc_list *dmi;
+
+	for (dmi=dev->mc_list; dmi; dmi=dmi->next)
+	{
+		u32 crc;
+		if (dmi->dmi_addrlen != ETH_ALEN)
+		{
+			printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
+			continue;
+		}
+		crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+		/*
+		 * The 8390 uses the 6 most significant bits of the
+		 * CRC to index the multicast table.
+		 */
+		bits[crc>>29] |= (1<<((crc>>26)&7));
+	}
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ *	Set or clear the multicast filter for this adaptor. May be called
+ *	from a BH in 2.1.x. Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+	unsigned long e8390_base = dev->base_addr;
+	int i;
+	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+
+	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
+	{
+		memset(ei_local->mcfilter, 0, 8);
+		if (dev->mc_list)
+			make_mc_bits(ei_local->mcfilter, dev);
+	}
+	else
+		memset(ei_local->mcfilter, 0xFF, 8);	/* mcast set to accept-all */
+
+	/*
+	 * DP8390 manuals don't specify any magic sequence for altering
+	 * the multicast regs on an already running card. To be safe, we
+	 * ensure multicast mode is off prior to loading up the new hash
+	 * table. If this proves to be not enough, we can always resort
+	 * to stopping the NIC, loading the table and then restarting.
+	 *
+	 * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
+	 * Elite16) appear to be write-only. The NS 8390 data sheet lists
+	 * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
+	 * Ultra32 EISA) appears to have this bug fixed.
+	 */
+
+	if (netif_running(dev))
+		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+	ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
+	for(i = 0; i < 8; i++)
+	{
+		ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
+#ifndef BUG_83C690
+		if(ei_inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
+			printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
+#endif
+	}
+	ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
+
+  	if(dev->flags&IFF_PROMISC)
+  		ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
+	else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+  		ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
+  	else
+  		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+ }
+
+/*
+ *	Called without lock held. This is invoked from user context and may
+ *	be parallel to just about everything else. Its also fairly quick and
+ *	not called too often. Must protect against both bh and irq users
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+	unsigned long flags;
+	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+
+	spin_lock_irqsave(&ei_local->page_lock, flags);
+	do_set_multicast_list(dev);
+	spin_unlock_irqrestore(&ei_local->page_lock, flags);
+}
+
+/**
+ * ethdev_setup - init rest of 8390 device struct
+ * @dev: network device structure to init
+ *
+ * Initialize the rest of the 8390 device structure.  Do NOT __init
+ * this, as it is used by 8390 based modular drivers too.
+ */
+
+static void ethdev_setup(struct net_device *dev)
+{
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	if (ei_debug > 1)
+		printk(version);
+
+	dev->hard_start_xmit = &ei_start_xmit;
+	dev->get_stats	= get_stats;
+	dev->set_multicast_list = &set_multicast_list;
+
+	ether_setup(dev);
+
+	spin_lock_init(&ei_local->page_lock);
+}
+
+/**
+ * alloc_ei_netdev - alloc_etherdev counterpart for 8390
+ * @size: extra bytes to allocate
+ *
+ * Allocate 8390-specific net_device.
+ */
+static struct net_device *____alloc_ei_netdev(int size)
+{
+	return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
+				ethdev_setup);
+}
+
+
+
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * NS8390_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean.  non-zero value to initiate chip processing
+ *
+ *	Must be called with lock held.
+ */
+
+static void __NS8390_init(struct net_device *dev, int startp)
+{
+	unsigned long e8390_base = dev->base_addr;
+	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	int i;
+	int endcfg = ei_local->word16
+	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
+	    : 0x48;
+
+	if(sizeof(struct e8390_pkt_hdr)!=4)
+    		panic("8390.c: header struct mispacked\n");
+	/* Follow National Semi's recommendations for initing the DP83902. */
+	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+	ei_outb_p(endcfg, e8390_base + EN0_DCFG);	/* 0x48 or 0x49 */
+	/* Clear the remote byte count registers. */
+	ei_outb_p(0x00,  e8390_base + EN0_RCNTLO);
+	ei_outb_p(0x00,  e8390_base + EN0_RCNTHI);
+	/* Set to monitor and loopback mode -- this is vital!. */
+	ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
+	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+	/* Set the transmit page and receive ring. */
+	ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+	ei_local->tx1 = ei_local->tx2 = 0;
+	ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+	ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);	/* 3c503 says 0x3f,NS0x26*/
+	ei_local->current_page = ei_local->rx_start_page;		/* assert boundary+1 */
+	ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+	/* Clear the pending interrupts and mask. */
+	ei_outb_p(0xFF, e8390_base + EN0_ISR);
+	ei_outb_p(0x00,  e8390_base + EN0_IMR);
+
+	/* Copy the station address into the DS8390 registers. */
+
+	ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+	for(i = 0; i < 6; i++)
+	{
+		ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+		if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
+			printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+	}
+
+	ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+	netif_start_queue(dev);
+	ei_local->tx1 = ei_local->tx2 = 0;
+	ei_local->txing = 0;
+
+	if (startp)
+	{
+		ei_outb_p(0xff,  e8390_base + EN0_ISR);
+		ei_outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
+		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+		ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+		/* 3c503 TechMan says rxconfig only after the NIC is started. */
+		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
+		do_set_multicast_list(dev);	/* (re)load the mcast table */
+	}
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+   Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+								int start_page)
+{
+	unsigned long e8390_base = dev->base_addr;
+ 	struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+
+	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
+
+	if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
+	{
+		printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
+			dev->name);
+		return;
+	}
+	ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+	ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+	ei_outb_p(start_page, e8390_base + EN0_TPSR);
+	ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 5795ee1..0a08d0c 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -440,7 +440,7 @@
 	iounmap(ei_status.mem);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index ade6ff8..a12bb64 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -39,7 +39,16 @@
 #include <asm/hwtest.h>
 #include <asm/macints.h>
 
-#include "8390.h"
+static char version[] =
+	"mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+
+#define EI_SHIFT(x)	(ei_local->reg_offset[x])
+#define ei_inb(port)   in_8(port)
+#define ei_outb(val,port)  out_8(port,val)
+#define ei_inb_p(port)   in_8(port)
+#define ei_outb_p(val,port)  out_8(port,val)
+
+#include "lib8390.c"
 
 #define WD_START_PG			0x00	/* First page of TX buffer */
 #define CABLETRON_RX_START_PG		0x00    /* First page of RX buffer */
@@ -116,9 +125,6 @@
 	1, /* dayna-lc */
 };
 
-static char version[] __initdata =
-	"mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
-
 extern enum mac8390_type mac8390_ident(struct nubus_dev * dev);
 extern int mac8390_memsize(unsigned long membase);
 extern int mac8390_memtest(struct net_device * dev);
@@ -237,7 +243,7 @@
 	if (!MACH_IS_MAC)
 		return ERR_PTR(-ENODEV);
 
-	dev = alloc_ei_netdev();
+	dev = ____alloc_ei_netdev(0);
 	if (!dev)
 		return ERR_PTR(-ENOMEM);
 
@@ -438,7 +444,7 @@
 	dev->open = &mac8390_open;
 	dev->stop = &mac8390_close;
 #ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = ei_poll;
+	dev->poll_controller = __ei_poll;
 #endif
 
 	/* GAR, ei_status is actually a macro even though it looks global */
@@ -510,7 +516,7 @@
 		return -ENODEV;
 	}
 
-	NS8390_init(dev, 0);
+	__NS8390_init(dev, 0);
 
 	/* Good, done, now spit out some messages */
 	printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
@@ -532,8 +538,8 @@
 
 static int mac8390_open(struct net_device *dev)
 {
-	ei_open(dev);
-	if (request_irq(dev->irq, ei_interrupt, 0, "8390 Ethernet", dev)) {
+	__ei_open(dev);
+	if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
 		printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
 		return -EAGAIN;
 	}
@@ -543,7 +549,7 @@
 static int mac8390_close(struct net_device *dev)
 {
 	free_irq(dev->irq, dev);
-	ei_close(dev);
+	__ei_close(dev);
 	return 0;
 }
 
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
new file mode 100644
index 0000000..bd0ce98
--- /dev/null
+++ b/drivers/net/macb.c
@@ -0,0 +1,1210 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/board.h>
+
+#include "macb.h"
+
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+
+#define RX_BUFFER_SIZE		128
+#define RX_RING_SIZE		512
+#define RX_RING_BYTES		(sizeof(struct dma_desc) * RX_RING_SIZE)
+
+/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
+#define RX_OFFSET		2
+
+#define TX_RING_SIZE		128
+#define DEF_TX_RING_PENDING	(TX_RING_SIZE - 1)
+#define TX_RING_BYTES		(sizeof(struct dma_desc) * TX_RING_SIZE)
+
+#define TX_RING_GAP(bp)						\
+	(TX_RING_SIZE - (bp)->tx_pending)
+#define TX_BUFFS_AVAIL(bp)					\
+	(((bp)->tx_tail <= (bp)->tx_head) ?			\
+	 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head :	\
+	 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
+#define NEXT_TX(n)		(((n) + 1) & (TX_RING_SIZE - 1))
+
+#define NEXT_RX(n)		(((n) + 1) & (RX_RING_SIZE - 1))
+
+/* minimum number of free TX descriptors before waking up TX process */
+#define MACB_TX_WAKEUP_THRESH	(TX_RING_SIZE / 4)
+
+#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
+				 | MACB_BIT(ISR_ROVR))
+
+static void __macb_set_hwaddr(struct macb *bp)
+{
+	u32 bottom;
+	u16 top;
+
+	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+	macb_writel(bp, SA1B, bottom);
+	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+	macb_writel(bp, SA1T, top);
+}
+
+static void __init macb_get_hwaddr(struct macb *bp)
+{
+	u32 bottom;
+	u16 top;
+	u8 addr[6];
+
+	bottom = macb_readl(bp, SA1B);
+	top = macb_readl(bp, SA1T);
+
+	addr[0] = bottom & 0xff;
+	addr[1] = (bottom >> 8) & 0xff;
+	addr[2] = (bottom >> 16) & 0xff;
+	addr[3] = (bottom >> 24) & 0xff;
+	addr[4] = top & 0xff;
+	addr[5] = (top >> 8) & 0xff;
+
+	if (is_valid_ether_addr(addr))
+		memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+}
+
+static void macb_enable_mdio(struct macb *bp)
+{
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&bp->lock, flags);
+	reg = macb_readl(bp, NCR);
+	reg |= MACB_BIT(MPE);
+	macb_writel(bp, NCR, reg);
+	macb_writel(bp, IER, MACB_BIT(MFD));
+	spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void macb_disable_mdio(struct macb *bp)
+{
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&bp->lock, flags);
+	reg = macb_readl(bp, NCR);
+	reg &= ~MACB_BIT(MPE);
+	macb_writel(bp, NCR, reg);
+	macb_writel(bp, IDR, MACB_BIT(MFD));
+	spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static int macb_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	struct macb *bp = netdev_priv(dev);
+	int value;
+
+	mutex_lock(&bp->mdio_mutex);
+
+	macb_enable_mdio(bp);
+	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+			      | MACB_BF(RW, MACB_MAN_READ)
+			      | MACB_BF(PHYA, phy_id)
+			      | MACB_BF(REGA, location)
+			      | MACB_BF(CODE, MACB_MAN_CODE)));
+
+	wait_for_completion(&bp->mdio_complete);
+
+	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+	macb_disable_mdio(bp);
+	mutex_unlock(&bp->mdio_mutex);
+
+	return value;
+}
+
+static void macb_mdio_write(struct net_device *dev, int phy_id,
+			    int location, int val)
+{
+	struct macb *bp = netdev_priv(dev);
+
+	dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n",
+		phy_id, location, val);
+
+	mutex_lock(&bp->mdio_mutex);
+	macb_enable_mdio(bp);
+
+	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+			      | MACB_BF(RW, MACB_MAN_WRITE)
+			      | MACB_BF(PHYA, phy_id)
+			      | MACB_BF(REGA, location)
+			      | MACB_BF(CODE, MACB_MAN_CODE)
+			      | MACB_BF(DATA, val)));
+
+	wait_for_completion(&bp->mdio_complete);
+
+	macb_disable_mdio(bp);
+	mutex_unlock(&bp->mdio_mutex);
+}
+
+static int macb_phy_probe(struct macb *bp)
+{
+	int phy_address;
+	u16 phyid1, phyid2;
+
+	for (phy_address = 0; phy_address < 32; phy_address++) {
+		phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1);
+		phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2);
+
+		if (phyid1 != 0xffff && phyid1 != 0x0000
+		    && phyid2 != 0xffff && phyid2 != 0x0000)
+			break;
+	}
+
+	if (phy_address == 32)
+		return -ENODEV;
+
+	dev_info(&bp->pdev->dev,
+		 "detected PHY at address %d (ID %04x:%04x)\n",
+		 phy_address, phyid1, phyid2);
+
+	bp->mii.phy_id = phy_address;
+	return 0;
+}
+
+static void macb_set_media(struct macb *bp, int media)
+{
+	u32 reg;
+
+	spin_lock_irq(&bp->lock);
+	reg = macb_readl(bp, NCFGR);
+	reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+	if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL))
+		reg |= MACB_BIT(SPD);
+	if (media & ADVERTISE_FULL)
+		reg |= MACB_BIT(FD);
+	macb_writel(bp, NCFGR, reg);
+	spin_unlock_irq(&bp->lock);
+}
+
+static void macb_check_media(struct macb *bp, int ok_to_print, int init_media)
+{
+	struct mii_if_info *mii = &bp->mii;
+	unsigned int old_carrier, new_carrier;
+	int advertise, lpa, media, duplex;
+
+	/* if forced media, go no further */
+	if (mii->force_media)
+		return;
+
+	/* check current and old link status */
+	old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
+	new_carrier = (unsigned int) mii_link_ok(mii);
+
+	/* if carrier state did not change, assume nothing else did */
+	if (!init_media && old_carrier == new_carrier)
+		return;
+
+	/* no carrier, nothing much to do */
+	if (!new_carrier) {
+		netif_carrier_off(mii->dev);
+		printk(KERN_INFO "%s: link down\n", mii->dev->name);
+		return;
+	}
+
+	/*
+	 * we have carrier, see who's on the other end
+	 */
+	netif_carrier_on(mii->dev);
+
+	/* get MII advertise and LPA values */
+	if (!init_media && mii->advertising) {
+		advertise = mii->advertising;
+	} else {
+		advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+		mii->advertising = advertise;
+	}
+	lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+
+	/* figure out media and duplex from advertise and LPA values */
+	media = mii_nway_result(lpa & advertise);
+	duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+
+	if (ok_to_print)
+		printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
+		       mii->dev->name,
+		       media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
+		       duplex ? "full" : "half", lpa);
+
+	mii->full_duplex = duplex;
+
+	/* Let the MAC know about the new link state */
+	macb_set_media(bp, media);
+}
+
+static void macb_update_stats(struct macb *bp)
+{
+	u32 __iomem *reg = bp->regs + MACB_PFR;
+	u32 *p = &bp->hw_stats.rx_pause_frames;
+	u32 *end = &bp->hw_stats.tx_pause_frames + 1;
+
+	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
+
+	for(; p < end; p++, reg++)
+		*p += readl(reg);
+}
+
+static void macb_periodic_task(void *arg)
+{
+	struct macb *bp = arg;
+
+	macb_update_stats(bp);
+	macb_check_media(bp, 1, 0);
+
+	schedule_delayed_work(&bp->periodic_task, HZ);
+}
+
+static void macb_tx(struct macb *bp)
+{
+	unsigned int tail;
+	unsigned int head;
+	u32 status;
+
+	status = macb_readl(bp, TSR);
+	macb_writel(bp, TSR, status);
+
+	dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
+		(unsigned long)status);
+
+	if (status & MACB_BIT(UND)) {
+		printk(KERN_ERR "%s: TX underrun, resetting buffers\n",
+		       bp->dev->name);
+		bp->tx_head = bp->tx_tail = 0;
+	}
+
+	if (!(status & MACB_BIT(COMP)))
+		/*
+		 * This may happen when a buffer becomes complete
+		 * between reading the ISR and scanning the
+		 * descriptors.  Nothing to worry about.
+		 */
+		return;
+
+	head = bp->tx_head;
+	for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
+		struct ring_info *rp = &bp->tx_skb[tail];
+		struct sk_buff *skb = rp->skb;
+		u32 bufstat;
+
+		BUG_ON(skb == NULL);
+
+		rmb();
+		bufstat = bp->tx_ring[tail].ctrl;
+
+		if (!(bufstat & MACB_BIT(TX_USED)))
+			break;
+
+		dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
+			tail, skb->data);
+		dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+				 DMA_TO_DEVICE);
+		bp->stats.tx_packets++;
+		bp->stats.tx_bytes += skb->len;
+		rp->skb = NULL;
+		dev_kfree_skb_irq(skb);
+	}
+
+	bp->tx_tail = tail;
+	if (netif_queue_stopped(bp->dev) &&
+	    TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+		netif_wake_queue(bp->dev);
+}
+
+static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+			 unsigned int last_frag)
+{
+	unsigned int len;
+	unsigned int frag;
+	unsigned int offset = 0;
+	struct sk_buff *skb;
+
+	len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+
+	dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+		first_frag, last_frag, len);
+
+	skb = dev_alloc_skb(len + RX_OFFSET);
+	if (!skb) {
+		bp->stats.rx_dropped++;
+		for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+			bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+			if (frag == last_frag)
+				break;
+		}
+		wmb();
+		return 1;
+	}
+
+	skb_reserve(skb, RX_OFFSET);
+	skb->dev = bp->dev;
+	skb->ip_summed = CHECKSUM_NONE;
+	skb_put(skb, len);
+
+	for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+		unsigned int frag_len = RX_BUFFER_SIZE;
+
+		if (offset + frag_len > len) {
+			BUG_ON(frag != last_frag);
+			frag_len = len - offset;
+		}
+		memcpy(skb->data + offset,
+		       bp->rx_buffers + (RX_BUFFER_SIZE * frag),
+		       frag_len);
+		offset += RX_BUFFER_SIZE;
+		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+		wmb();
+
+		if (frag == last_frag)
+			break;
+	}
+
+	skb->protocol = eth_type_trans(skb, bp->dev);
+
+	bp->stats.rx_packets++;
+	bp->stats.rx_bytes += len;
+	bp->dev->last_rx = jiffies;
+	dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
+		skb->len, skb->csum);
+	netif_receive_skb(skb);
+
+	return 0;
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+				  unsigned int end)
+{
+	unsigned int frag;
+
+	for (frag = begin; frag != end; frag = NEXT_RX(frag))
+		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+	wmb();
+
+	/*
+	 * When this happens, the hardware stats registers for
+	 * whatever caused this is updated, so we don't have to record
+	 * anything.
+	 */
+}
+
+static int macb_rx(struct macb *bp, int budget)
+{
+	int received = 0;
+	unsigned int tail = bp->rx_tail;
+	int first_frag = -1;
+
+	for (; budget > 0; tail = NEXT_RX(tail)) {
+		u32 addr, ctrl;
+
+		rmb();
+		addr = bp->rx_ring[tail].addr;
+		ctrl = bp->rx_ring[tail].ctrl;
+
+		if (!(addr & MACB_BIT(RX_USED)))
+			break;
+
+		if (ctrl & MACB_BIT(RX_SOF)) {
+			if (first_frag != -1)
+				discard_partial_frame(bp, first_frag, tail);
+			first_frag = tail;
+		}
+
+		if (ctrl & MACB_BIT(RX_EOF)) {
+			int dropped;
+			BUG_ON(first_frag == -1);
+
+			dropped = macb_rx_frame(bp, first_frag, tail);
+			first_frag = -1;
+			if (!dropped) {
+				received++;
+				budget--;
+			}
+		}
+	}
+
+	if (first_frag != -1)
+		bp->rx_tail = first_frag;
+	else
+		bp->rx_tail = tail;
+
+	return received;
+}
+
+static int macb_poll(struct net_device *dev, int *budget)
+{
+	struct macb *bp = netdev_priv(dev);
+	int orig_budget, work_done, retval = 0;
+	u32 status;
+
+	status = macb_readl(bp, RSR);
+	macb_writel(bp, RSR, status);
+
+	if (!status) {
+		/*
+		 * This may happen if an interrupt was pending before
+		 * this function was called last time, and no packets
+		 * have been received since.
+		 */
+		netif_rx_complete(dev);
+		goto out;
+	}
+
+	dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
+		(unsigned long)status, *budget);
+
+	if (!(status & MACB_BIT(REC))) {
+		dev_warn(&bp->pdev->dev,
+			 "No RX buffers complete, status = %02lx\n",
+			 (unsigned long)status);
+		netif_rx_complete(dev);
+		goto out;
+	}
+
+	orig_budget = *budget;
+	if (orig_budget > dev->quota)
+		orig_budget = dev->quota;
+
+	work_done = macb_rx(bp, orig_budget);
+	if (work_done < orig_budget) {
+		netif_rx_complete(dev);
+		retval = 0;
+	} else {
+		retval = 1;
+	}
+
+	/*
+	 * We've done what we can to clean the buffers. Make sure we
+	 * get notified when new packets arrive.
+	 */
+out:
+	macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+
+	/* TODO: Handle errors */
+
+	return retval;
+}
+
+static irqreturn_t macb_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct macb *bp = netdev_priv(dev);
+	u32 status;
+
+	status = macb_readl(bp, ISR);
+
+	if (unlikely(!status))
+		return IRQ_NONE;
+
+	spin_lock(&bp->lock);
+
+	while (status) {
+		if (status & MACB_BIT(MFD))
+			complete(&bp->mdio_complete);
+
+		/* close possible race with dev_close */
+		if (unlikely(!netif_running(dev))) {
+			macb_writel(bp, IDR, ~0UL);
+			break;
+		}
+
+		if (status & MACB_RX_INT_FLAGS) {
+			if (netif_rx_schedule_prep(dev)) {
+				/*
+				 * There's no point taking any more interrupts
+				 * until we have processed the buffers
+				 */
+				macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+				dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n");
+				__netif_rx_schedule(dev);
+			}
+		}
+
+		if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND)))
+			macb_tx(bp);
+
+		/*
+		 * Link change detection isn't possible with RMII, so we'll
+		 * add that if/when we get our hands on a full-blown MII PHY.
+		 */
+
+		if (status & MACB_BIT(HRESP)) {
+			/*
+			 * TODO: Reset the hardware, and maybe move the printk
+			 * to a lower-priority context as well (work queue?)
+			 */
+			printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
+			       dev->name);
+		}
+
+		status = macb_readl(bp, ISR);
+	}
+
+	spin_unlock(&bp->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct macb *bp = netdev_priv(dev);
+	dma_addr_t mapping;
+	unsigned int len, entry;
+	u32 ctrl;
+
+#ifdef DEBUG
+	int i;
+	dev_dbg(&bp->pdev->dev,
+		"start_xmit: len %u head %p data %p tail %p end %p\n",
+		skb->len, skb->head, skb->data, skb->tail, skb->end);
+	dev_dbg(&bp->pdev->dev,
+		"data:");
+	for (i = 0; i < 16; i++)
+		printk(" %02x", (unsigned int)skb->data[i]);
+	printk("\n");
+#endif
+
+	len = skb->len;
+	spin_lock_irq(&bp->lock);
+
+	/* This is a hard error, log it. */
+	if (TX_BUFFS_AVAIL(bp) < 1) {
+		netif_stop_queue(dev);
+		spin_unlock_irq(&bp->lock);
+		dev_err(&bp->pdev->dev,
+			"BUG! Tx Ring full when queue awake!\n");
+		dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
+			bp->tx_head, bp->tx_tail);
+		return 1;
+	}
+
+	entry = bp->tx_head;
+	dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 len, DMA_TO_DEVICE);
+	bp->tx_skb[entry].skb = skb;
+	bp->tx_skb[entry].mapping = mapping;
+	dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
+		skb->data, (unsigned long)mapping);
+
+	ctrl = MACB_BF(TX_FRMLEN, len);
+	ctrl |= MACB_BIT(TX_LAST);
+	if (entry == (TX_RING_SIZE - 1))
+		ctrl |= MACB_BIT(TX_WRAP);
+
+	bp->tx_ring[entry].addr = mapping;
+	bp->tx_ring[entry].ctrl = ctrl;
+	wmb();
+
+	entry = NEXT_TX(entry);
+	bp->tx_head = entry;
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+	if (TX_BUFFS_AVAIL(bp) < 1)
+		netif_stop_queue(dev);
+
+	spin_unlock_irq(&bp->lock);
+
+	dev->trans_start = jiffies;
+
+	return 0;
+}
+
+static void macb_free_consistent(struct macb *bp)
+{
+	if (bp->tx_skb) {
+		kfree(bp->tx_skb);
+		bp->tx_skb = NULL;
+	}
+	if (bp->rx_ring) {
+		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
+				  bp->rx_ring, bp->rx_ring_dma);
+		bp->rx_ring = NULL;
+	}
+	if (bp->tx_ring) {
+		dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+				  bp->tx_ring, bp->tx_ring_dma);
+		bp->tx_ring = NULL;
+	}
+	if (bp->rx_buffers) {
+		dma_free_coherent(&bp->pdev->dev,
+				  RX_RING_SIZE * RX_BUFFER_SIZE,
+				  bp->rx_buffers, bp->rx_buffers_dma);
+		bp->rx_buffers = NULL;
+	}
+}
+
+static int macb_alloc_consistent(struct macb *bp)
+{
+	int size;
+
+	size = TX_RING_SIZE * sizeof(struct ring_info);
+	bp->tx_skb = kmalloc(size, GFP_KERNEL);
+	if (!bp->tx_skb)
+		goto out_err;
+
+	size = RX_RING_BYTES;
+	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+					 &bp->rx_ring_dma, GFP_KERNEL);
+	if (!bp->rx_ring)
+		goto out_err;
+	dev_dbg(&bp->pdev->dev,
+		"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+		size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+
+	size = TX_RING_BYTES;
+	bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+					 &bp->tx_ring_dma, GFP_KERNEL);
+	if (!bp->tx_ring)
+		goto out_err;
+	dev_dbg(&bp->pdev->dev,
+		"Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+		size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+
+	size = RX_RING_SIZE * RX_BUFFER_SIZE;
+	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+					    &bp->rx_buffers_dma, GFP_KERNEL);
+	if (!bp->rx_buffers)
+		goto out_err;
+	dev_dbg(&bp->pdev->dev,
+		"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+		size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+
+	return 0;
+
+out_err:
+	macb_free_consistent(bp);
+	return -ENOMEM;
+}
+
+static void macb_init_rings(struct macb *bp)
+{
+	int i;
+	dma_addr_t addr;
+
+	addr = bp->rx_buffers_dma;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		bp->rx_ring[i].addr = addr;
+		bp->rx_ring[i].ctrl = 0;
+		addr += RX_BUFFER_SIZE;
+	}
+	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		bp->tx_ring[i].addr = 0;
+		bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+	}
+	bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+	bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+}
+
+static void macb_reset_hw(struct macb *bp)
+{
+	/* Make sure we have the write buffer for ourselves */
+	wmb();
+
+	/*
+	 * Disable RX and TX (XXX: Should we halt the transmission
+	 * more gracefully?)
+	 */
+	macb_writel(bp, NCR, 0);
+
+	/* Clear the stats registers (XXX: Update stats first?) */
+	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+
+	/* Clear all status flags */
+	macb_writel(bp, TSR, ~0UL);
+	macb_writel(bp, RSR, ~0UL);
+
+	/* Disable all interrupts */
+	macb_writel(bp, IDR, ~0UL);
+	macb_readl(bp, ISR);
+}
+
+static void macb_init_hw(struct macb *bp)
+{
+	u32 config;
+
+	macb_reset_hw(bp);
+	__macb_set_hwaddr(bp);
+
+	config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
+	config |= MACB_BIT(PAE);		/* PAuse Enable */
+	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
+	if (bp->dev->flags & IFF_PROMISC)
+		config |= MACB_BIT(CAF);	/* Copy All Frames */
+	if (!(bp->dev->flags & IFF_BROADCAST))
+		config |= MACB_BIT(NBC);	/* No BroadCast */
+	macb_writel(bp, NCFGR, config);
+
+	/* Initialize TX and RX buffers */
+	macb_writel(bp, RBQP, bp->rx_ring_dma);
+	macb_writel(bp, TBQP, bp->tx_ring_dma);
+
+	/* Enable TX and RX */
+	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE));
+
+	/* Enable interrupts */
+	macb_writel(bp, IER, (MACB_BIT(RCOMP)
+			      | MACB_BIT(RXUBR)
+			      | MACB_BIT(ISR_TUND)
+			      | MACB_BIT(ISR_RLE)
+			      | MACB_BIT(TXERR)
+			      | MACB_BIT(TCOMP)
+			      | MACB_BIT(ISR_ROVR)
+			      | MACB_BIT(HRESP)));
+}
+
+static void macb_init_phy(struct net_device *dev)
+{
+	struct macb *bp = netdev_priv(dev);
+
+	/* Set some reasonable default settings */
+	macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE,
+			ADVERTISE_CSMA | ADVERTISE_ALL);
+	macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR,
+			(BMCR_SPEED100 | BMCR_ANENABLE
+			 | BMCR_ANRESTART | BMCR_FULLDPLX));
+}
+
+static int macb_open(struct net_device *dev)
+{
+	struct macb *bp = netdev_priv(dev);
+	int err;
+
+	dev_dbg(&bp->pdev->dev, "open\n");
+
+	if (!is_valid_ether_addr(dev->dev_addr))
+		return -EADDRNOTAVAIL;
+
+	err = macb_alloc_consistent(bp);
+	if (err) {
+		printk(KERN_ERR
+		       "%s: Unable to allocate DMA memory (error %d)\n",
+		       dev->name, err);
+		return err;
+	}
+
+	macb_init_rings(bp);
+	macb_init_hw(bp);
+	macb_init_phy(dev);
+
+	macb_check_media(bp, 1, 1);
+	netif_start_queue(dev);
+
+	schedule_delayed_work(&bp->periodic_task, HZ);
+
+	return 0;
+}
+
+static int macb_close(struct net_device *dev)
+{
+	struct macb *bp = netdev_priv(dev);
+	unsigned long flags;
+
+	cancel_rearming_delayed_work(&bp->periodic_task);
+
+	netif_stop_queue(dev);
+
+	spin_lock_irqsave(&bp->lock, flags);
+	macb_reset_hw(bp);
+	netif_carrier_off(dev);
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	macb_free_consistent(bp);
+
+	return 0;
+}
+
+static struct net_device_stats *macb_get_stats(struct net_device *dev)
+{
+	struct macb *bp = netdev_priv(dev);
+	struct net_device_stats *nstat = &bp->stats;
+	struct macb_stats *hwstat = &bp->hw_stats;
+
+	/* Convert HW stats into netdevice stats */
+	nstat->rx_errors = (hwstat->rx_fcs_errors +
+			    hwstat->rx_align_errors +
+			    hwstat->rx_resource_errors +
+			    hwstat->rx_overruns +
+			    hwstat->rx_oversize_pkts +
+			    hwstat->rx_jabbers +
+			    hwstat->rx_undersize_pkts +
+			    hwstat->sqe_test_errors +
+			    hwstat->rx_length_mismatch);
+	nstat->tx_errors = (hwstat->tx_late_cols +
+			    hwstat->tx_excessive_cols +
+			    hwstat->tx_underruns +
+			    hwstat->tx_carrier_errors);
+	nstat->collisions = (hwstat->tx_single_cols +
+			     hwstat->tx_multiple_cols +
+			     hwstat->tx_excessive_cols);
+	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+				   hwstat->rx_jabbers +
+				   hwstat->rx_undersize_pkts +
+				   hwstat->rx_length_mismatch);
+	nstat->rx_over_errors = hwstat->rx_resource_errors;
+	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
+	nstat->rx_frame_errors = hwstat->rx_align_errors;
+	nstat->rx_fifo_errors = hwstat->rx_overruns;
+	/* XXX: What does "missed" mean? */
+	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
+	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+	nstat->tx_fifo_errors = hwstat->tx_underruns;
+	/* Don't know about heartbeat or window errors... */
+
+	return nstat;
+}
+
+static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct macb *bp = netdev_priv(dev);
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bp->lock, flags);
+	ret = mii_ethtool_gset(&bp->mii, cmd);
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	return ret;
+}
+
+static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct macb *bp = netdev_priv(dev);
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bp->lock, flags);
+	ret = mii_ethtool_sset(&bp->mii, cmd);
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	return ret;
+}
+
+static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct macb *bp = netdev_priv(dev);
+
+	strcpy(info->driver, bp->pdev->dev.driver->name);
+	strcpy(info->version, "$Revision: 1.14 $");
+	strcpy(info->bus_info, bp->pdev->dev.bus_id);
+}
+
+static int macb_nway_reset(struct net_device *dev)
+{
+	struct macb *bp = netdev_priv(dev);
+	return mii_nway_restart(&bp->mii);
+}
+
+static struct ethtool_ops macb_ethtool_ops = {
+	.get_settings		= macb_get_settings,
+	.set_settings		= macb_set_settings,
+	.get_drvinfo		= macb_get_drvinfo,
+	.nway_reset		= macb_nway_reset,
+	.get_link		= ethtool_op_get_link,
+};
+
+static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct macb *bp = netdev_priv(dev);
+	int ret;
+	unsigned long flags;
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	spin_lock_irqsave(&bp->lock, flags);
+	ret = generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL);
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	return ret;
+}
+
+static ssize_t macb_mii_show(const struct class_device *cd, char *buf,
+			unsigned long addr)
+{
+	struct net_device *dev = to_net_dev(cd);
+	struct macb *bp = netdev_priv(dev);
+	ssize_t ret = -EINVAL;
+
+	if (netif_running(dev)) {
+		int value;
+		value = macb_mdio_read(dev, bp->mii.phy_id, addr);
+		ret = sprintf(buf, "0x%04x\n", (uint16_t)value);
+	}
+
+	return ret;
+}
+
+#define MII_ENTRY(name, addr)					\
+static ssize_t show_##name(struct class_device *cd, char *buf)	\
+{								\
+	return macb_mii_show(cd, buf, addr);			\
+}								\
+static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+MII_ENTRY(bmcr, MII_BMCR);
+MII_ENTRY(bmsr, MII_BMSR);
+MII_ENTRY(physid1, MII_PHYSID1);
+MII_ENTRY(physid2, MII_PHYSID2);
+MII_ENTRY(advertise, MII_ADVERTISE);
+MII_ENTRY(lpa, MII_LPA);
+MII_ENTRY(expansion, MII_EXPANSION);
+
+static struct attribute *macb_mii_attrs[] = {
+	&class_device_attr_bmcr.attr,
+	&class_device_attr_bmsr.attr,
+	&class_device_attr_physid1.attr,
+	&class_device_attr_physid2.attr,
+	&class_device_attr_advertise.attr,
+	&class_device_attr_lpa.attr,
+	&class_device_attr_expansion.attr,
+	NULL,
+};
+
+static struct attribute_group macb_mii_group = {
+	.name	= "mii",
+	.attrs	= macb_mii_attrs,
+};
+
+static void macb_unregister_sysfs(struct net_device *net)
+{
+	struct class_device *class_dev = &net->class_dev;
+
+	sysfs_remove_group(&class_dev->kobj, &macb_mii_group);
+}
+
+static int macb_register_sysfs(struct net_device *net)
+{
+	struct class_device *class_dev = &net->class_dev;
+	int ret;
+
+	ret = sysfs_create_group(&class_dev->kobj, &macb_mii_group);
+	if (ret)
+		printk(KERN_WARNING
+		       "%s: sysfs mii attribute registration failed: %d\n",
+		       net->name, ret);
+	return ret;
+}
+static int __devinit macb_probe(struct platform_device *pdev)
+{
+	struct eth_platform_data *pdata;
+	struct resource *regs;
+	struct net_device *dev;
+	struct macb *bp;
+	unsigned long pclk_hz;
+	u32 config;
+	int err = -ENXIO;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "no mmio resource defined\n");
+		goto err_out;
+	}
+
+	err = -ENOMEM;
+	dev = alloc_etherdev(sizeof(*bp));
+	if (!dev) {
+		dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
+		goto err_out;
+	}
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	/* TODO: Actually, we have some interesting features... */
+	dev->features |= 0;
+
+	bp = netdev_priv(dev);
+	bp->pdev = pdev;
+	bp->dev = dev;
+
+	spin_lock_init(&bp->lock);
+
+	bp->pclk = clk_get(&pdev->dev, "pclk");
+	if (IS_ERR(bp->pclk)) {
+		dev_err(&pdev->dev, "failed to get pclk\n");
+		goto err_out_free_dev;
+	}
+	bp->hclk = clk_get(&pdev->dev, "hclk");
+	if (IS_ERR(bp->hclk)) {
+		dev_err(&pdev->dev, "failed to get hclk\n");
+		goto err_out_put_pclk;
+	}
+
+	clk_enable(bp->pclk);
+	clk_enable(bp->hclk);
+
+	bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!bp->regs) {
+		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+		err = -ENOMEM;
+		goto err_out_disable_clocks;
+	}
+
+	dev->irq = platform_get_irq(pdev, 0);
+	err = request_irq(dev->irq, macb_interrupt, SA_SAMPLE_RANDOM,
+			  dev->name, dev);
+	if (err) {
+		printk(KERN_ERR
+		       "%s: Unable to request IRQ %d (error %d)\n",
+		       dev->name, dev->irq, err);
+		goto err_out_iounmap;
+	}
+
+	dev->open = macb_open;
+	dev->stop = macb_close;
+	dev->hard_start_xmit = macb_start_xmit;
+	dev->get_stats = macb_get_stats;
+	dev->do_ioctl = macb_ioctl;
+	dev->poll = macb_poll;
+	dev->weight = 64;
+	dev->ethtool_ops = &macb_ethtool_ops;
+
+	dev->base_addr = regs->start;
+
+	INIT_WORK(&bp->periodic_task, macb_periodic_task, bp);
+	mutex_init(&bp->mdio_mutex);
+	init_completion(&bp->mdio_complete);
+
+	/* Set MII management clock divider */
+	pclk_hz = clk_get_rate(bp->pclk);
+	if (pclk_hz <= 20000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV8);
+	else if (pclk_hz <= 40000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV16);
+	else if (pclk_hz <= 80000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV32);
+	else
+		config = MACB_BF(CLK, MACB_CLK_DIV64);
+	macb_writel(bp, NCFGR, config);
+
+	bp->mii.dev = dev;
+	bp->mii.mdio_read = macb_mdio_read;
+	bp->mii.mdio_write = macb_mdio_write;
+	bp->mii.phy_id_mask = 0x1f;
+	bp->mii.reg_num_mask = 0x1f;
+
+	macb_get_hwaddr(bp);
+	err = macb_phy_probe(bp);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n");
+		goto err_out_free_irq;
+	}
+
+	pdata = pdev->dev.platform_data;
+	if (pdata && pdata->is_rmii)
+		macb_writel(bp, USRIO, 0);
+	else
+		macb_writel(bp, USRIO, MACB_BIT(MII));
+
+	bp->tx_pending = DEF_TX_RING_PENDING;
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+		goto err_out_free_irq;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	macb_register_sysfs(dev);
+
+	printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d "
+	       "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
+	       dev->name, dev->base_addr, dev->irq,
+	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+	return 0;
+
+err_out_free_irq:
+	free_irq(dev->irq, dev);
+err_out_iounmap:
+	iounmap(bp->regs);
+err_out_disable_clocks:
+	clk_disable(bp->hclk);
+	clk_disable(bp->pclk);
+	clk_put(bp->hclk);
+err_out_put_pclk:
+	clk_put(bp->pclk);
+err_out_free_dev:
+	free_netdev(dev);
+err_out:
+	platform_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static int __devexit macb_remove(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct macb *bp;
+
+	dev = platform_get_drvdata(pdev);
+
+	if (dev) {
+		bp = netdev_priv(dev);
+		macb_unregister_sysfs(dev);
+		unregister_netdev(dev);
+		free_irq(dev->irq, dev);
+		iounmap(bp->regs);
+		clk_disable(bp->hclk);
+		clk_disable(bp->pclk);
+		clk_put(bp->hclk);
+		clk_put(bp->pclk);
+		free_netdev(dev);
+		platform_set_drvdata(pdev, NULL);
+	}
+
+	return 0;
+}
+
+static struct platform_driver macb_driver = {
+	.probe		= macb_probe,
+	.remove		= __devexit_p(macb_remove),
+	.driver		= {
+		.name		= "macb",
+	},
+};
+
+static int __init macb_init(void)
+{
+	return platform_driver_register(&macb_driver);
+}
+
+static void __exit macb_exit(void)
+{
+	platform_driver_unregister(&macb_driver);
+}
+
+module_init(macb_init);
+module_exit(macb_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
+MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
new file mode 100644
index 0000000..8c253db
--- /dev/null
+++ b/drivers/net/macb.h
@@ -0,0 +1,387 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MACB_H
+#define _MACB_H
+
+/* MACB register offsets */
+#define MACB_NCR				0x0000
+#define MACB_NCFGR				0x0004
+#define MACB_NSR				0x0008
+#define MACB_TSR				0x0014
+#define MACB_RBQP				0x0018
+#define MACB_TBQP				0x001c
+#define MACB_RSR				0x0020
+#define MACB_ISR				0x0024
+#define MACB_IER				0x0028
+#define MACB_IDR				0x002c
+#define MACB_IMR				0x0030
+#define MACB_MAN				0x0034
+#define MACB_PTR				0x0038
+#define MACB_PFR				0x003c
+#define MACB_FTO				0x0040
+#define MACB_SCF				0x0044
+#define MACB_MCF				0x0048
+#define MACB_FRO				0x004c
+#define MACB_FCSE				0x0050
+#define MACB_ALE				0x0054
+#define MACB_DTF				0x0058
+#define MACB_LCOL				0x005c
+#define MACB_EXCOL				0x0060
+#define MACB_TUND				0x0064
+#define MACB_CSE				0x0068
+#define MACB_RRE				0x006c
+#define MACB_ROVR				0x0070
+#define MACB_RSE				0x0074
+#define MACB_ELE				0x0078
+#define MACB_RJA				0x007c
+#define MACB_USF				0x0080
+#define MACB_STE				0x0084
+#define MACB_RLE				0x0088
+#define MACB_TPF				0x008c
+#define MACB_HRB				0x0090
+#define MACB_HRT				0x0094
+#define MACB_SA1B				0x0098
+#define MACB_SA1T				0x009c
+#define MACB_SA2B				0x00a0
+#define MACB_SA2T				0x00a4
+#define MACB_SA3B				0x00a8
+#define MACB_SA3T				0x00ac
+#define MACB_SA4B				0x00b0
+#define MACB_SA4T				0x00b4
+#define MACB_TID				0x00b8
+#define MACB_TPQ				0x00bc
+#define MACB_USRIO				0x00c0
+#define MACB_WOL				0x00c4
+
+/* Bitfields in NCR */
+#define MACB_LB_OFFSET				0
+#define MACB_LB_SIZE				1
+#define MACB_LLB_OFFSET				1
+#define MACB_LLB_SIZE				1
+#define MACB_RE_OFFSET				2
+#define MACB_RE_SIZE				1
+#define MACB_TE_OFFSET				3
+#define MACB_TE_SIZE				1
+#define MACB_MPE_OFFSET				4
+#define MACB_MPE_SIZE				1
+#define MACB_CLRSTAT_OFFSET			5
+#define MACB_CLRSTAT_SIZE			1
+#define MACB_INCSTAT_OFFSET			6
+#define MACB_INCSTAT_SIZE			1
+#define MACB_WESTAT_OFFSET			7
+#define MACB_WESTAT_SIZE			1
+#define MACB_BP_OFFSET				8
+#define MACB_BP_SIZE				1
+#define MACB_TSTART_OFFSET			9
+#define MACB_TSTART_SIZE			1
+#define MACB_THALT_OFFSET			10
+#define MACB_THALT_SIZE				1
+#define MACB_NCR_TPF_OFFSET			11
+#define MACB_NCR_TPF_SIZE			1
+#define MACB_TZQ_OFFSET				12
+#define MACB_TZQ_SIZE				1
+
+/* Bitfields in NCFGR */
+#define MACB_SPD_OFFSET				0
+#define MACB_SPD_SIZE				1
+#define MACB_FD_OFFSET				1
+#define MACB_FD_SIZE				1
+#define MACB_BIT_RATE_OFFSET			2
+#define MACB_BIT_RATE_SIZE			1
+#define MACB_JFRAME_OFFSET			3
+#define MACB_JFRAME_SIZE			1
+#define MACB_CAF_OFFSET				4
+#define MACB_CAF_SIZE				1
+#define MACB_NBC_OFFSET				5
+#define MACB_NBC_SIZE				1
+#define MACB_NCFGR_MTI_OFFSET			6
+#define MACB_NCFGR_MTI_SIZE			1
+#define MACB_UNI_OFFSET				7
+#define MACB_UNI_SIZE				1
+#define MACB_BIG_OFFSET				8
+#define MACB_BIG_SIZE				1
+#define MACB_EAE_OFFSET				9
+#define MACB_EAE_SIZE				1
+#define MACB_CLK_OFFSET				10
+#define MACB_CLK_SIZE				2
+#define MACB_RTY_OFFSET				12
+#define MACB_RTY_SIZE				1
+#define MACB_PAE_OFFSET				13
+#define MACB_PAE_SIZE				1
+#define MACB_RBOF_OFFSET			14
+#define MACB_RBOF_SIZE				2
+#define MACB_RLCE_OFFSET			16
+#define MACB_RLCE_SIZE				1
+#define MACB_DRFCS_OFFSET			17
+#define MACB_DRFCS_SIZE				1
+#define MACB_EFRHD_OFFSET			18
+#define MACB_EFRHD_SIZE				1
+#define MACB_IRXFCS_OFFSET			19
+#define MACB_IRXFCS_SIZE			1
+
+/* Bitfields in NSR */
+#define MACB_NSR_LINK_OFFSET			0
+#define MACB_NSR_LINK_SIZE			1
+#define MACB_MDIO_OFFSET			1
+#define MACB_MDIO_SIZE				1
+#define MACB_IDLE_OFFSET			2
+#define MACB_IDLE_SIZE				1
+
+/* Bitfields in TSR */
+#define MACB_UBR_OFFSET				0
+#define MACB_UBR_SIZE				1
+#define MACB_COL_OFFSET				1
+#define MACB_COL_SIZE				1
+#define MACB_TSR_RLE_OFFSET			2
+#define MACB_TSR_RLE_SIZE			1
+#define MACB_TGO_OFFSET				3
+#define MACB_TGO_SIZE				1
+#define MACB_BEX_OFFSET				4
+#define MACB_BEX_SIZE				1
+#define MACB_COMP_OFFSET			5
+#define MACB_COMP_SIZE				1
+#define MACB_UND_OFFSET				6
+#define MACB_UND_SIZE				1
+
+/* Bitfields in RSR */
+#define MACB_BNA_OFFSET				0
+#define MACB_BNA_SIZE				1
+#define MACB_REC_OFFSET				1
+#define MACB_REC_SIZE				1
+#define MACB_OVR_OFFSET				2
+#define MACB_OVR_SIZE				1
+
+/* Bitfields in ISR/IER/IDR/IMR */
+#define MACB_MFD_OFFSET				0
+#define MACB_MFD_SIZE				1
+#define MACB_RCOMP_OFFSET			1
+#define MACB_RCOMP_SIZE				1
+#define MACB_RXUBR_OFFSET			2
+#define MACB_RXUBR_SIZE				1
+#define MACB_TXUBR_OFFSET			3
+#define MACB_TXUBR_SIZE				1
+#define MACB_ISR_TUND_OFFSET			4
+#define MACB_ISR_TUND_SIZE			1
+#define MACB_ISR_RLE_OFFSET			5
+#define MACB_ISR_RLE_SIZE			1
+#define MACB_TXERR_OFFSET			6
+#define MACB_TXERR_SIZE				1
+#define MACB_TCOMP_OFFSET			7
+#define MACB_TCOMP_SIZE				1
+#define MACB_ISR_LINK_OFFSET			9
+#define MACB_ISR_LINK_SIZE			1
+#define MACB_ISR_ROVR_OFFSET			10
+#define MACB_ISR_ROVR_SIZE			1
+#define MACB_HRESP_OFFSET			11
+#define MACB_HRESP_SIZE				1
+#define MACB_PFR_OFFSET				12
+#define MACB_PFR_SIZE				1
+#define MACB_PTZ_OFFSET				13
+#define MACB_PTZ_SIZE				1
+
+/* Bitfields in MAN */
+#define MACB_DATA_OFFSET			0
+#define MACB_DATA_SIZE				16
+#define MACB_CODE_OFFSET			16
+#define MACB_CODE_SIZE				2
+#define MACB_REGA_OFFSET			18
+#define MACB_REGA_SIZE				5
+#define MACB_PHYA_OFFSET			23
+#define MACB_PHYA_SIZE				5
+#define MACB_RW_OFFSET				28
+#define MACB_RW_SIZE				2
+#define MACB_SOF_OFFSET				30
+#define MACB_SOF_SIZE				2
+
+/* Bitfields in USRIO */
+#define MACB_MII_OFFSET				0
+#define MACB_MII_SIZE				1
+#define MACB_EAM_OFFSET				1
+#define MACB_EAM_SIZE				1
+#define MACB_TX_PAUSE_OFFSET			2
+#define MACB_TX_PAUSE_SIZE			1
+#define MACB_TX_PAUSE_ZERO_OFFSET		3
+#define MACB_TX_PAUSE_ZERO_SIZE			1
+
+/* Bitfields in WOL */
+#define MACB_IP_OFFSET				0
+#define MACB_IP_SIZE				16
+#define MACB_MAG_OFFSET				16
+#define MACB_MAG_SIZE				1
+#define MACB_ARP_OFFSET				17
+#define MACB_ARP_SIZE				1
+#define MACB_SA1_OFFSET				18
+#define MACB_SA1_SIZE				1
+#define MACB_WOL_MTI_OFFSET			19
+#define MACB_WOL_MTI_SIZE			1
+
+/* Constants for CLK */
+#define MACB_CLK_DIV8				0
+#define MACB_CLK_DIV16				1
+#define MACB_CLK_DIV32				2
+#define MACB_CLK_DIV64				3
+
+/* Constants for MAN register */
+#define MACB_MAN_SOF				1
+#define MACB_MAN_WRITE				1
+#define MACB_MAN_READ				2
+#define MACB_MAN_CODE				2
+
+/* Bit manipulation macros */
+#define MACB_BIT(name)					\
+	(1 << MACB_##name##_OFFSET)
+#define MACB_BF(name,value)				\
+	(((value) & ((1 << MACB_##name##_SIZE) - 1))	\
+	 << MACB_##name##_OFFSET)
+#define MACB_BFEXT(name,value)\
+	(((value) >> MACB_##name##_OFFSET)		\
+	 & ((1 << MACB_##name##_SIZE) - 1))
+#define MACB_BFINS(name,value,old)			\
+	(((old) & ~(((1 << MACB_##name##_SIZE) - 1)	\
+		    << MACB_##name##_OFFSET))		\
+	 | MACB_BF(name,value))
+
+/* Register access macros */
+#define macb_readl(port,reg)				\
+	readl((port)->regs + MACB_##reg)
+#define macb_writel(port,reg,value)			\
+	writel((value), (port)->regs + MACB_##reg)
+
+struct dma_desc {
+	u32	addr;
+	u32	ctrl;
+};
+
+/* DMA descriptor bitfields */
+#define MACB_RX_USED_OFFSET			0
+#define MACB_RX_USED_SIZE			1
+#define MACB_RX_WRAP_OFFSET			1
+#define MACB_RX_WRAP_SIZE			1
+#define MACB_RX_WADDR_OFFSET			2
+#define MACB_RX_WADDR_SIZE			30
+
+#define MACB_RX_FRMLEN_OFFSET			0
+#define MACB_RX_FRMLEN_SIZE			12
+#define MACB_RX_OFFSET_OFFSET			12
+#define MACB_RX_OFFSET_SIZE			2
+#define MACB_RX_SOF_OFFSET			14
+#define MACB_RX_SOF_SIZE			1
+#define MACB_RX_EOF_OFFSET			15
+#define MACB_RX_EOF_SIZE			1
+#define MACB_RX_CFI_OFFSET			16
+#define MACB_RX_CFI_SIZE			1
+#define MACB_RX_VLAN_PRI_OFFSET			17
+#define MACB_RX_VLAN_PRI_SIZE			3
+#define MACB_RX_PRI_TAG_OFFSET			20
+#define MACB_RX_PRI_TAG_SIZE			1
+#define MACB_RX_VLAN_TAG_OFFSET			21
+#define MACB_RX_VLAN_TAG_SIZE			1
+#define MACB_RX_TYPEID_MATCH_OFFSET		22
+#define MACB_RX_TYPEID_MATCH_SIZE		1
+#define MACB_RX_SA4_MATCH_OFFSET		23
+#define MACB_RX_SA4_MATCH_SIZE			1
+#define MACB_RX_SA3_MATCH_OFFSET		24
+#define MACB_RX_SA3_MATCH_SIZE			1
+#define MACB_RX_SA2_MATCH_OFFSET		25
+#define MACB_RX_SA2_MATCH_SIZE			1
+#define MACB_RX_SA1_MATCH_OFFSET		26
+#define MACB_RX_SA1_MATCH_SIZE			1
+#define MACB_RX_EXT_MATCH_OFFSET		28
+#define MACB_RX_EXT_MATCH_SIZE			1
+#define MACB_RX_UHASH_MATCH_OFFSET		29
+#define MACB_RX_UHASH_MATCH_SIZE		1
+#define MACB_RX_MHASH_MATCH_OFFSET		30
+#define MACB_RX_MHASH_MATCH_SIZE		1
+#define MACB_RX_BROADCAST_OFFSET		31
+#define MACB_RX_BROADCAST_SIZE			1
+
+#define MACB_TX_FRMLEN_OFFSET			0
+#define MACB_TX_FRMLEN_SIZE			11
+#define MACB_TX_LAST_OFFSET			15
+#define MACB_TX_LAST_SIZE			1
+#define MACB_TX_NOCRC_OFFSET			16
+#define MACB_TX_NOCRC_SIZE			1
+#define MACB_TX_BUF_EXHAUSTED_OFFSET		27
+#define MACB_TX_BUF_EXHAUSTED_SIZE		1
+#define MACB_TX_UNDERRUN_OFFSET			28
+#define MACB_TX_UNDERRUN_SIZE			1
+#define MACB_TX_ERROR_OFFSET			29
+#define MACB_TX_ERROR_SIZE			1
+#define MACB_TX_WRAP_OFFSET			30
+#define MACB_TX_WRAP_SIZE			1
+#define MACB_TX_USED_OFFSET			31
+#define MACB_TX_USED_SIZE			1
+
+struct ring_info {
+	struct sk_buff		*skb;
+	dma_addr_t		mapping;
+};
+
+/*
+ * Hardware-collected statistics. Used when updating the network
+ * device stats by a periodic timer.
+ */
+struct macb_stats {
+	u32	rx_pause_frames;
+	u32	tx_ok;
+	u32	tx_single_cols;
+	u32	tx_multiple_cols;
+	u32	rx_ok;
+	u32	rx_fcs_errors;
+	u32	rx_align_errors;
+	u32	tx_deferred;
+	u32	tx_late_cols;
+	u32	tx_excessive_cols;
+	u32	tx_underruns;
+	u32	tx_carrier_errors;
+	u32	rx_resource_errors;
+	u32	rx_overruns;
+	u32	rx_symbol_errors;
+	u32	rx_oversize_pkts;
+	u32	rx_jabbers;
+	u32	rx_undersize_pkts;
+	u32	sqe_test_errors;
+	u32	rx_length_mismatch;
+	u32	tx_pause_frames;
+};
+
+struct macb {
+	void __iomem		*regs;
+
+	unsigned int		rx_tail;
+	struct dma_desc		*rx_ring;
+	void			*rx_buffers;
+
+	unsigned int		tx_head, tx_tail;
+	struct dma_desc		*tx_ring;
+	struct ring_info	*tx_skb;
+
+	spinlock_t		lock;
+	struct platform_device	*pdev;
+	struct clk		*pclk;
+	struct clk		*hclk;
+	struct net_device	*dev;
+	struct net_device_stats	stats;
+	struct macb_stats	hw_stats;
+
+	dma_addr_t		rx_ring_dma;
+	dma_addr_t		tx_ring_dma;
+	dma_addr_t		rx_buffers_dma;
+
+	unsigned int		rx_pending, tx_pending;
+
+	struct work_struct	periodic_task;
+
+	struct mutex		mdio_mutex;
+	struct completion	mdio_complete;
+	struct mii_if_info	mii;
+};
+
+#endif /* _MACB_H */
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index c1aa60b..e1d97cd 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -33,7 +33,6 @@
 #include <asm/ip32/ip32_ints.h>
 
 #include <asm/io.h>
-#include <asm/checksum.h>
 #include <asm/scatterlist.h>
 #include <linux/dma-mapping.h>
 
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 9997081..c41ae42 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -277,9 +277,11 @@
  *
  * Actual routine to reset the adapter when a timeout on Tx has occurred
  */
-static void mv643xx_eth_tx_timeout_task(struct net_device *dev)
+static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
 {
-	struct mv643xx_private *mp = netdev_priv(dev);
+	struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
+						  tx_timeout_task);
+	struct net_device *dev = mp->mii.dev; /* yuck */
 
 	if (!netif_running(dev))
 		return;
@@ -1098,7 +1100,7 @@
 					 ETH_TX_ENABLE_INTERRUPT;
 			mp->tx_skb[tx_index] = skb;
 		} else
-			mp->tx_skb[tx_index] = 0;
+			mp->tx_skb[tx_index] = NULL;
 
 		desc = &mp->p_tx_desc_area[tx_index];
 		desc->l4i_chk = 0;
@@ -1134,7 +1136,7 @@
 		eth_tx_fill_frag_descs(mp, skb);
 
 		length = skb_headlen(skb);
-		mp->tx_skb[tx_index] = 0;
+		mp->tx_skb[tx_index] = NULL;
 	} else {
 		cmd_sts |= ETH_ZERO_PADDING |
 			   ETH_TX_LAST_DESC |
@@ -1360,8 +1362,7 @@
 #endif
 
 	/* Configure the timeout task */
-	INIT_WORK(&mp->tx_timeout_task,
-			(void (*)(void *))mv643xx_eth_tx_timeout_task, dev);
+	INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
 
 	spin_lock_init(&mp->lock);
 
diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c
index 56a82d8..e246d00 100644
--- a/drivers/net/mvme147.c
+++ b/drivers/net/mvme147.c
@@ -184,7 +184,7 @@
 MODULE_LICENSE("GPL");
 
 static struct net_device *dev_mvme147_lance;
-int init_module(void)
+int __init init_module(void)
 {
 	dev_mvme147_lance = mvme147lance_probe(-1);
 	if (IS_ERR(dev_mvme147_lance))
@@ -192,7 +192,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	struct m147lance_private *lp = dev_mvme147_lance->priv;
 	unregister_netdev(dev_mvme147_lance);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 806081b..81f127a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -89,7 +89,7 @@
 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
 
-#define MYRI10GE_NO_CONFIRM_DATA 0xffffffff
+#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
 
 struct myri10ge_rx_buffer_state {
@@ -156,8 +156,8 @@
 	int sram_size;
 	unsigned long board_span;
 	unsigned long iomem_base;
-	u32 __iomem *irq_claim;
-	u32 __iomem *irq_deassert;
+	__be32 __iomem *irq_claim;
+	__be32 __iomem *irq_deassert;
 	char *mac_addr_string;
 	struct mcp_cmd_response *cmd;
 	dma_addr_t cmd_bus;
@@ -165,10 +165,10 @@
 	dma_addr_t fw_stats_bus;
 	struct pci_dev *pdev;
 	int msi_enabled;
-	unsigned int link_state;
+	__be32 link_state;
 	unsigned int rdma_tags_available;
 	int intr_coal_delay;
-	u32 __iomem *intr_coal_delay_ptr;
+	__be32 __iomem *intr_coal_delay_ptr;
 	int mtrr;
 	int wake_queue;
 	int stop_queue;
@@ -273,6 +273,11 @@
 
 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
 
+static inline void put_be32(__be32 val, __be32 __iomem *p)
+{
+	__raw_writel((__force __u32)val, (__force void __iomem *)p);
+}
+
 static int
 myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
 		  struct myri10ge_cmd *data, int atomic)
@@ -296,7 +301,7 @@
 
 	buf->response_addr.low = htonl(dma_low);
 	buf->response_addr.high = htonl(dma_high);
-	response->result = MYRI10GE_NO_RESPONSE_RESULT;
+	response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
 	mb();
 	myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
 
@@ -311,14 +316,14 @@
 		 * (1ms will be enough for those commands) */
 		for (sleep_total = 0;
 		     sleep_total < 1000
-		     && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+		     && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
 		     sleep_total += 10)
 			udelay(10);
 	} else {
 		/* use msleep for most command */
 		for (sleep_total = 0;
 		     sleep_total < 15
-		     && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+		     && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
 		     sleep_total++)
 			msleep(1);
 	}
@@ -393,7 +398,7 @@
 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
 {
 	char __iomem *submit;
-	u32 buf[16];
+	__be32 buf[16];
 	u32 dma_low, dma_high;
 	int i;
 
@@ -410,7 +415,7 @@
 
 	buf[0] = htonl(dma_high);	/* confirm addr MSW */
 	buf[1] = htonl(dma_low);	/* confirm addr LSW */
-	buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA);	/* confirm data */
+	buf[2] = MYRI10GE_NO_CONFIRM_DATA;	/* confirm data */
 	buf[3] = htonl(dma_high);	/* dummy addr MSW */
 	buf[4] = htonl(dma_low);	/* dummy addr LSW */
 	buf[5] = htonl(enable);	/* enable? */
@@ -479,7 +484,7 @@
 	}
 
 	/* check id */
-	hdr_offset = ntohl(*(u32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
+	hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
 	if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
 		dev_err(dev, "Bad firmware file\n");
 		status = -EINVAL;
@@ -550,7 +555,7 @@
 static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
 {
 	char __iomem *submit;
-	u32 buf[16];
+	__be32 buf[16];
 	u32 dma_low, dma_high, size;
 	int status, i;
 
@@ -600,7 +605,7 @@
 
 	buf[0] = htonl(dma_high);	/* confirm addr MSW */
 	buf[1] = htonl(dma_low);	/* confirm addr LSW */
-	buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA);	/* confirm data */
+	buf[2] = MYRI10GE_NO_CONFIRM_DATA;	/* confirm data */
 
 	/* FIX: All newest firmware should un-protect the bottom of
 	 * the sram before handoff. However, the very first interfaces
@@ -705,21 +710,21 @@
 
 	status |=
 	    myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
-	mgp->irq_claim = (__iomem u32 *) (mgp->sram + cmd.data0);
+	mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
 	if (!mgp->msi_enabled) {
 		status |= myri10ge_send_cmd
 		    (mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0);
-		mgp->irq_deassert = (__iomem u32 *) (mgp->sram + cmd.data0);
+		mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
 
 	}
 	status |= myri10ge_send_cmd
 	    (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
-	mgp->intr_coal_delay_ptr = (__iomem u32 *) (mgp->sram + cmd.data0);
+	mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
 	if (status != 0) {
 		dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
 		return status;
 	}
-	__raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+	put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
 
 	/* Run a small DMA test.
 	 * The magic multipliers to the length tell the firmware
@@ -786,14 +791,16 @@
 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
 		    struct mcp_kreq_ether_recv *src)
 {
-	u32 low;
+	__be32 low;
 
 	low = src->addr_low;
-	src->addr_low = DMA_32BIT_MASK;
-	myri10ge_pio_copy(dst, src, 8 * sizeof(*src));
+	src->addr_low = htonl(DMA_32BIT_MASK);
+	myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
+	mb();
+	myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
 	mb();
 	src->addr_low = low;
-	__raw_writel(low, &dst->addr_low);
+	put_be32(low, &dst->addr_low);
 	mb();
 }
 
@@ -939,11 +946,11 @@
 	return retval;
 }
 
-static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
+static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
 {
 	struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
 
-	if ((skb->protocol == ntohs(ETH_P_8021Q)) &&
+	if ((skb->protocol == htons(ETH_P_8021Q)) &&
 	    (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
 	     vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
 		skb->csum = hw_csum;
@@ -953,7 +960,7 @@
 
 static inline unsigned long
 myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
-		 int bytes, int len, int csum)
+		 int bytes, int len, __wsum csum)
 {
 	dma_addr_t bus;
 	struct sk_buff *skb;
@@ -986,12 +993,12 @@
 
 	skb->protocol = eth_type_trans(skb, mgp->dev);
 	if (mgp->csum_flag) {
-		if ((skb->protocol == ntohs(ETH_P_IP)) ||
-		    (skb->protocol == ntohs(ETH_P_IPV6))) {
-			skb->csum = ntohs((u16) csum);
+		if ((skb->protocol == htons(ETH_P_IP)) ||
+		    (skb->protocol == htons(ETH_P_IPV6))) {
+			skb->csum = csum;
 			skb->ip_summed = CHECKSUM_COMPLETE;
 		} else
-			myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
+			myri10ge_vlan_ip_csum(skb, csum);
 	}
 
 	netif_receive_skb(skb);
@@ -1060,12 +1067,12 @@
 	int idx = rx_done->idx;
 	int cnt = rx_done->cnt;
 	u16 length;
-	u16 checksum;
+	__wsum checksum;
 
 	while (rx_done->entry[idx].length != 0 && *limit != 0) {
 		length = ntohs(rx_done->entry[idx].length);
 		rx_done->entry[idx].length = 0;
-		checksum = ntohs(rx_done->entry[idx].checksum);
+		checksum = csum_unfold(rx_done->entry[idx].checksum);
 		if (length <= mgp->small_bytes)
 			rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small,
 						 mgp->small_bytes,
@@ -1142,7 +1149,7 @@
 
 	if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) {
 		netif_rx_complete(netdev);
-		__raw_writel(htonl(3), mgp->irq_claim);
+		put_be32(htonl(3), mgp->irq_claim);
 		return 0;
 	}
 	return 1;
@@ -1166,7 +1173,7 @@
 		netif_rx_schedule(mgp->dev);
 
 	if (!mgp->msi_enabled) {
-		__raw_writel(0, mgp->irq_deassert);
+		put_be32(0, mgp->irq_deassert);
 		if (!myri10ge_deassert_wait)
 			stats->valid = 0;
 		mb();
@@ -1195,7 +1202,7 @@
 
 	myri10ge_check_statblock(mgp);
 
-	__raw_writel(htonl(3), mgp->irq_claim + 1);
+	put_be32(htonl(3), mgp->irq_claim + 1);
 	return (IRQ_HANDLED);
 }
 
@@ -1233,7 +1240,7 @@
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
 
 	mgp->intr_coal_delay = coal->rx_coalesce_usecs;
-	__raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+	put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
 	return 0;
 }
 
@@ -1748,7 +1755,7 @@
 		goto abort_with_rings;
 	}
 
-	mgp->link_state = -1;
+	mgp->link_state = htonl(~0U);
 	mgp->rdma_tags_available = 15;
 
 	netif_poll_enable(mgp->dev);	/* must happen prior to any irq */
@@ -1876,7 +1883,7 @@
 
 	/* re-write the last 32-bits with the valid flags */
 	src->flags = last_flags;
-	__raw_writel(*((u32 *) src + 3), (u32 __iomem *) dst + 3);
+	put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
 	tx->req += cnt;
 	mb();
 }
@@ -1919,7 +1926,8 @@
 	struct myri10ge_tx_buf *tx = &mgp->tx;
 	struct skb_frag_struct *frag;
 	dma_addr_t bus;
-	u32 low, high_swapped;
+	u32 low;
+	__be32 high_swapped;
 	unsigned int len;
 	int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
 	u16 pseudo_hdr_offset, cksum_offset;
@@ -1955,7 +1963,7 @@
 	flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 		cksum_offset = (skb->h.raw - skb->data);
-		pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
+		pseudo_hdr_offset = cksum_offset + skb->csum_offset;
 		/* If the headers are excessively large, then we must
 		 * fall back to a software checksum */
 		if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
@@ -1964,7 +1972,6 @@
 			cksum_offset = 0;
 			pseudo_hdr_offset = 0;
 		} else {
-			pseudo_hdr_offset = htons(pseudo_hdr_offset);
 			odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
 			flags |= MXGEFW_FLAGS_CKSUM;
 		}
@@ -1986,7 +1993,7 @@
 		/* for TSO, pseudo_hdr_offset holds mss.
 		 * The firmware figures out where to put
 		 * the checksum by parsing the header. */
-		pseudo_hdr_offset = htons(mss);
+		pseudo_hdr_offset = mss;
 	} else
 #endif				/*NETIF_F_TSO */
 		/* Mark small packets, and pad out tiny packets */
@@ -2086,7 +2093,7 @@
 #endif				/* NETIF_F_TSO */
 			req->addr_high = high_swapped;
 			req->addr_low = htonl(low);
-			req->pseudo_hdr_offset = pseudo_hdr_offset;
+			req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
 			req->pad = 0;	/* complete solid 16-byte block; does this matter? */
 			req->rdma_count = 1;
 			req->length = htons(seglen);
@@ -2199,6 +2206,7 @@
 	struct myri10ge_cmd cmd;
 	struct myri10ge_priv *mgp;
 	struct dev_mc_list *mc_list;
+	__be32 data[2] = {0, 0};
 	int err;
 
 	mgp = netdev_priv(dev);
@@ -2237,10 +2245,9 @@
 
 	/* Walk the multicast list, and add each address */
 	for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
-		memcpy(&cmd.data0, &mc_list->dmi_addr, 4);
-		memcpy(&cmd.data1, ((char *)&mc_list->dmi_addr) + 4, 2);
-		cmd.data0 = htonl(cmd.data0);
-		cmd.data1 = htonl(cmd.data1);
+		memcpy(data, &mc_list->dmi_addr, 6);
+		cmd.data0 = ntohl(data[0]);
+		cmd.data1 = ntohl(data[1]);
 		err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
 					&cmd, 1);
 
@@ -2615,9 +2622,10 @@
  * This watchdog is used to check whether the board has suffered
  * from a parity error and needs to be recovered.
  */
-static void myri10ge_watchdog(void *arg)
+static void myri10ge_watchdog(struct work_struct *work)
 {
-	struct myri10ge_priv *mgp = arg;
+	struct myri10ge_priv *mgp =
+		container_of(work, struct myri10ge_priv, watchdog_work);
 	u32 reboot;
 	int status;
 	u16 cmd, vendor;
@@ -2887,7 +2895,7 @@
 		    (unsigned long)mgp);
 
 	SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
-	INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+	INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
 	status = register_netdev(netdev);
 	if (status != 0) {
 		dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index 9519ae7..29463b3 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -6,23 +6,23 @@
 
 /* 8 Bytes */
 struct mcp_dma_addr {
-	u32 high;
-	u32 low;
+	__be32 high;
+	__be32 low;
 };
 
 /* 4 Bytes */
 struct mcp_slot {
-	u16 checksum;
-	u16 length;
+	__sum16 checksum;
+	__be16 length;
 };
 
 /* 64 Bytes */
 struct mcp_cmd {
-	u32 cmd;
-	u32 data0;		/* will be low portion if data > 32 bits */
+	__be32 cmd;
+	__be32 data0;		/* will be low portion if data > 32 bits */
 	/* 8 */
-	u32 data1;		/* will be high portion if data > 32 bits */
-	u32 data2;		/* currently unused.. */
+	__be32 data1;		/* will be high portion if data > 32 bits */
+	__be32 data2;		/* currently unused.. */
 	/* 16 */
 	struct mcp_dma_addr response_addr;
 	/* 24 */
@@ -31,8 +31,8 @@
 
 /* 8 Bytes */
 struct mcp_cmd_response {
-	u32 data;
-	u32 result;
+	__be32 data;
+	__be32 result;
 };
 
 /*
@@ -73,10 +73,10 @@
 
 /* 16 Bytes */
 struct mcp_kreq_ether_send {
-	u32 addr_high;
-	u32 addr_low;
-	u16 pseudo_hdr_offset;
-	u16 length;
+	__be32 addr_high;
+	__be32 addr_low;
+	__be16 pseudo_hdr_offset;
+	__be16 length;
 	u8 pad;
 	u8 rdma_count;
 	u8 cksum_offset;	/* where to start computing cksum */
@@ -85,8 +85,8 @@
 
 /* 8 Bytes */
 struct mcp_kreq_ether_recv {
-	u32 addr_high;
-	u32 addr_low;
+	__be32 addr_high;
+	__be32 addr_low;
 };
 
 /* Commands */
@@ -219,19 +219,19 @@
 
 struct mcp_irq_data {
 	/* add new counters at the beginning */
-	u32 future_use[5];
-	u32 dropped_multicast_filtered;
+	__be32 future_use[5];
+	__be32 dropped_multicast_filtered;
 	/* 40 Bytes */
-	u32 send_done_count;
+	__be32 send_done_count;
 
-	u32 link_up;
-	u32 dropped_link_overflow;
-	u32 dropped_link_error_or_filtered;
-	u32 dropped_runt;
-	u32 dropped_overrun;
-	u32 dropped_no_small_buffer;
-	u32 dropped_no_big_buffer;
-	u32 rdma_tags_available;
+	__be32 link_up;
+	__be32 dropped_link_overflow;
+	__be32 dropped_link_error_or_filtered;
+	__be32 dropped_runt;
+	__be32 dropped_overrun;
+	__be32 dropped_no_small_buffer;
+	__be32 dropped_no_big_buffer;
+	__be32 rdma_tags_available;
 
 	u8 tx_stopped;
 	u8 link_down;
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 487f779..16a810d 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -36,7 +36,7 @@
 struct mcp_gen_header {
 	/* the first 4 fields are filled at compile time */
 	unsigned header_length;
-	unsigned mcp_type;
+	__be32 mcp_type;
 	char version[128];
 	unsigned mcp_globals;	/* pointer to mcp-type specific structure */
 
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 7747bfd..ee26ef5 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -39,7 +39,6 @@
 #include <asm/auxio.h>
 #include <asm/pgtable.h>
 #include <asm/irq.h>
-#include <asm/checksum.h>
 
 #include "myri_sbus.h"
 #include "myri_code.h"
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index eb893d7..38fd525 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -33,6 +33,8 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 
+#define EI_SHIFT(x)	(ei_local->reg_offset[x])
+
 #include "8390.h"
 
 #define DRV_NAME "ne-h8300"
@@ -52,6 +54,11 @@
 
 /* ---- No user-serviceable parts below ---- */
 
+static const char version[] =
+    "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
+
 #define NE_BASE	 (dev->base_addr)
 #define NE_CMD	 	0x00
 #define NE_DATAPORT	(ei_status.word16?0x20:0x10)	/* NatSemi-defined port window offset. */
@@ -162,7 +169,7 @@
 #ifndef MODULE
 struct net_device * __init ne_probe(int unit)
 {
-	struct net_device *dev = alloc_ei_netdev();
+	struct net_device *dev = ____alloc_ei_netdev(0);
 	int err;
 
 	if (!dev)
@@ -283,7 +290,7 @@
 
 	/* Snarf the interrupt now.  There's no point in waiting since we cannot
 	   share and the board will usually be enabled. */
-	ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
+	ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev);
 	if (ret) {
 		printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
 		goto err_out;
@@ -318,9 +325,9 @@
 	dev->open = &ne_open;
 	dev->stop = &ne_close;
 #ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = ei_poll;
+	dev->poll_controller = __ei_poll;
 #endif
-	NS8390_init(dev, 0);
+	__NS8390_init(dev, 0);
 
 	ret = register_netdev(dev);
 	if (ret)
@@ -335,7 +342,7 @@
 
 static int ne_open(struct net_device *dev)
 {
-	ei_open(dev);
+	__ei_open(dev);
 	return 0;
 }
 
@@ -343,7 +350,7 @@
 {
 	if (ei_debug > 1)
 		printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
-	ei_close(dev);
+	__ei_close(dev);
 	return 0;
 }
 
@@ -584,7 +591,7 @@
 		if (time_after(jiffies, dma_start + 2*HZ/100)) {		/* 20ms */
 			printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
 			ne_reset_8390(dev);
-			NS8390_init(dev,1);
+			__NS8390_init(dev,1);
 			break;
 		}
 
@@ -620,7 +627,7 @@
 	int err;
 
 	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-		struct net_device *dev = alloc_ei_netdev();
+		struct net_device *dev = ____alloc_ei_netdev(0);
 		if (!dev)
 			break;
 		if (io[this_dev]) {
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 787aa42..a5c4199 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -867,7 +867,7 @@
 	release_region(dev->base_addr, NE_IO_EXTENT);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index 5fccfea..089b5bb 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -813,7 +813,7 @@
 	release_region(dev->base_addr, NE_IO_EXTENT);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index d663289..1a6fed7 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/mm.h>
 
 #include <asm/io.h>
 #include <asm/system.h>
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index bf58db2..69233f6 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -60,7 +60,6 @@
 	.local_port = 6665,
 	.remote_port = 6666,
 	.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
-	.drop = netpoll_queue,
 };
 static int configured = 0;
 
@@ -102,6 +101,8 @@
 
 static int init_netconsole(void)
 {
+	int err;
+
 	if(strlen(config))
 		option_setup(config);
 
@@ -110,8 +111,9 @@
 		return 0;
 	}
 
-	if(netpoll_setup(&np))
-		return -EINVAL;
+	err = netpoll_setup(&np);
+	if (err)
+		return err;
 
 	register_console(&netconsole);
 	printk(KERN_INFO "netconsole: network logging started\n");
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
new file mode 100644
index 0000000..a07cdc6
--- /dev/null
+++ b/drivers/net/netxen/Makefile
@@ -0,0 +1,35 @@
+# Copyright (C) 2003 - 2006 NetXen, Inc.
+# All rights reserved.
+# 
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#                            
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#                                   
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA  02111-1307, USA.
+# 
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.
+# 
+# Contact Information:
+#    info@netxen.com
+# NetXen,
+# 3965 Freedom Circle, Fourth floor,
+# Santa Clara, CA 95054
+#
+# Makefile for the NetXen NIC Driver
+#
+
+
+obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
+
+netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
+	netxen_nic_isr.o netxen_nic_ethtool.o netxen_nic_niu.o
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
new file mode 100644
index 0000000..b5410be
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic.h
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef _NETXEN_NIC_H_
+#define _NETXEN_NIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+
+#include <linux/mm.h>
+#include <linux/mman.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#include "netxen_nic_hw.h"
+
+#define NETXEN_NIC_BUILD_NO     "1"
+#define _NETXEN_NIC_LINUX_MAJOR 3
+#define _NETXEN_NIC_LINUX_MINOR 3
+#define _NETXEN_NIC_LINUX_SUBVERSION 2
+#define NETXEN_NIC_LINUX_VERSIONID  "3.3.2" "-" NETXEN_NIC_BUILD_NO
+#define NETXEN_NIC_FW_VERSIONID "3.3.2"
+
+#define RCV_DESC_RINGSIZE	\
+	(sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
+#define STATUS_DESC_RINGSIZE	\
+	(sizeof(struct status_desc)* adapter->max_rx_desc_count)
+#define LRO_DESC_RINGSIZE	\
+	(sizeof(rcvDesc_t) * adapter->max_lro_rx_desc_count)
+#define TX_RINGSIZE	\
+	(sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
+#define RCV_BUFFSIZE	\
+	(sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
+#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
+
+#define NETXEN_NETDEV_STATUS		0x1
+#define NETXEN_RCV_PRODUCER_OFFSET	0
+#define NETXEN_RCV_PEG_DB_ID		2
+#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
+
+#define ADDR_IN_WINDOW1(off)	\
+	((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+/*
+ * In netxen_nic_down(), we must wait for any pending callback requests into
+ * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
+ * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
+ * does this synchronization.
+ *
+ * Normally, schedule_work()/flush_scheduled_work() could have worked, but
+ * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
+ * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
+ * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
+ * linkwatch_event() to be executed which also attempts to acquire the rtnl
+ * lock thus causing a deadlock.
+ */
+
+#define SCHEDULE_WORK(tp)	queue_work(netxen_workq, tp)
+#define FLUSH_SCHEDULED_WORK()	flush_workqueue(netxen_workq)
+extern struct workqueue_struct *netxen_workq;
+
+/* 
+ * normalize a 64MB crb address to 32MB PCI window 
+ * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
+ */
+#define NETXEN_CRB_NORMAL(reg)	\
+	((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST)
+
+#define NETXEN_CRB_NORMALIZE(adapter, reg) \
+	pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
+
+#define DB_NORMALIZE(adapter, off) \
+	(adapter->ahw.db_base + (off))
+
+#define NX_P2_C0		0x24
+#define NX_P2_C1		0x25
+
+#define FIRST_PAGE_GROUP_START	0
+#define FIRST_PAGE_GROUP_END	0x100000
+
+#define SECOND_PAGE_GROUP_START	0x4000000
+#define SECOND_PAGE_GROUP_END	0x66BC000
+
+#define THIRD_PAGE_GROUP_START	0x70E4000
+#define THIRD_PAGE_GROUP_END	0x8000000
+
+#define FIRST_PAGE_GROUP_SIZE  FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START
+#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
+#define THIRD_PAGE_GROUP_SIZE  THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
+
+#define MAX_RX_BUFFER_LENGTH		1760
+#define MAX_RX_JUMBO_BUFFER_LENGTH 	9046
+#define MAX_RX_LRO_BUFFER_LENGTH	((48*1024)-512)
+#define RX_DMA_MAP_LEN			(MAX_RX_BUFFER_LENGTH - 2)
+#define RX_JUMBO_DMA_MAP_LEN	\
+	(MAX_RX_JUMBO_BUFFER_LENGTH - 2)
+#define RX_LRO_DMA_MAP_LEN		(MAX_RX_LRO_BUFFER_LENGTH - 2)
+#define NETXEN_ROM_ROUNDUP		0x80000000ULL
+
+/*
+ * Maximum number of ring contexts
+ */
+#define MAX_RING_CTX 1
+
+/* Opcodes to be used with the commands */
+enum {
+	TX_ETHER_PKT = 0x01,
+/* The following opcodes are for IP checksum	*/
+	TX_TCP_PKT,
+	TX_UDP_PKT,
+	TX_IP_PKT,
+	TX_TCP_LSO,
+	TX_IPSEC,
+	TX_IPSEC_CMD
+};
+
+/* The following opcodes are for internal consumption. */
+#define NETXEN_CONTROL_OP	0x10
+#define PEGNET_REQUEST		0x11
+
+#define	MAX_NUM_CARDS		4
+
+#define MAX_BUFFERS_PER_CMD	32
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_START		0xff00
+#define PHAN_INITIALIZE_FAILED		0xffff
+#define PHAN_INITIALIZE_COMPLETE	0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK	0xf00f
+
+#define NUM_RCV_DESC_RINGS	3	/* No of Rcv Descriptor contexts */
+
+/* descriptor types */
+#define RCV_DESC_NORMAL		0x01
+#define RCV_DESC_JUMBO		0x02
+#define RCV_DESC_LRO		0x04
+#define RCV_DESC_NORMAL_CTXID	0
+#define RCV_DESC_JUMBO_CTXID	1
+#define RCV_DESC_LRO_CTXID	2
+
+#define RCV_DESC_TYPE(ID) \
+	((ID == RCV_DESC_JUMBO_CTXID)	\
+		? RCV_DESC_JUMBO	\
+		: ((ID == RCV_DESC_LRO_CTXID)	\
+			? RCV_DESC_LRO :	\
+			(RCV_DESC_NORMAL)))
+
+#define MAX_CMD_DESCRIPTORS		1024
+#define MAX_RCV_DESCRIPTORS		32768
+#define MAX_JUMBO_RCV_DESCRIPTORS	4096
+#define MAX_LRO_RCV_DESCRIPTORS		2048
+#define MAX_RCVSTATUS_DESCRIPTORS	MAX_RCV_DESCRIPTORS
+#define MAX_JUMBO_RCV_DESC	MAX_JUMBO_RCV_DESCRIPTORS
+#define MAX_RCV_DESC		MAX_RCV_DESCRIPTORS
+#define MAX_RCVSTATUS_DESC	MAX_RCV_DESCRIPTORS
+#define MAX_EPG_DESCRIPTORS	(MAX_CMD_DESCRIPTORS * 8)
+#define NUM_RCV_DESC		(MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS + \
+				 MAX_LRO_RCV_DESCRIPTORS)
+#define MIN_TX_COUNT	4096
+#define MIN_RX_COUNT	4096
+#define NETXEN_CTX_SIGNATURE	0xdee0
+#define NETXEN_RCV_PRODUCER(ringid)	(ringid)
+#define MAX_FRAME_SIZE	0x10000	/* 64K MAX size for LSO */
+
+#define PHAN_PEG_RCV_INITIALIZED	0xff01
+#define PHAN_PEG_RCV_START_INITIALIZE	0xff00
+
+#define get_next_index(index, length)	\
+	(((index) + 1) & ((length) - 1))
+
+#define get_index_range(index,length,count)	\
+	(((index) + (count)) & ((length) - 1))
+
+#define MPORT_SINGLE_FUNCTION_MODE 0x1111
+
+extern unsigned long long netxen_dma_mask;
+
+/*
+ * NetXen host-peg signal message structure
+ *
+ *	Bit 0-1		: peg_id => 0x2 for tx and 01 for rx
+ *	Bit 2		: priv_id => must be 1
+ *	Bit 3-17	: count => for doorbell
+ *	Bit 18-27	: ctx_id => Context id
+ *	Bit 28-31	: opcode
+ */
+
+typedef u32 netxen_ctx_msg;
+
+#define _netxen_set_bits(config_word, start, bits, val)	{\
+	unsigned long long mask = (((1ULL << (bits)) - 1) << (start));	\
+	unsigned long long value = (val);	\
+	(config_word) &= ~mask;	\
+	(config_word) |= (((value) << (start)) & mask); \
+}
+
+#define netxen_set_msg_peg_id(config_word, val)	\
+	_netxen_set_bits(config_word, 0, 2, val)
+#define netxen_set_msg_privid(config_word)	\
+	set_bit(2, (unsigned long*)&config_word)
+#define netxen_set_msg_count(config_word, val)	\
+	_netxen_set_bits(config_word, 3, 15, val)
+#define netxen_set_msg_ctxid(config_word, val)	\
+	_netxen_set_bits(config_word, 18, 10, val)
+#define netxen_set_msg_opcode(config_word, val)	\
+	_netxen_set_bits(config_word, 28, 4, val)
+
+struct netxen_rcv_context {
+	u32 rcv_ring_addr_lo;
+	u32 rcv_ring_addr_hi;
+	u32 rcv_ring_size;
+	u32 rsrvd;
+};
+
+struct netxen_ring_ctx {
+
+	/* one command ring */
+	u64 cmd_consumer_offset;
+	u32 cmd_ring_addr_lo;
+	u32 cmd_ring_addr_hi;
+	u32 cmd_ring_size;
+	u32 rsrvd;
+
+	/* three receive rings */
+	struct netxen_rcv_context rcv_ctx[3];
+
+	/* one status ring */
+	u32 sts_ring_addr_lo;
+	u32 sts_ring_addr_hi;
+	u32 sts_ring_size;
+
+	u32 ctx_id;
+} __attribute__ ((aligned(64)));
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+/*
+ * The size of reference handle been changed to 16 bits to pass the MSS fields
+ * for the LSO packet
+ */
+
+#define FLAGS_CHECKSUM_ENABLED	0x01
+#define FLAGS_LSO_ENABLED	0x02
+#define FLAGS_IPSEC_SA_ADD	0x04
+#define FLAGS_IPSEC_SA_DELETE	0x08
+#define FLAGS_VLAN_TAGGED	0x10
+
+#define netxen_set_cmd_desc_port(cmd_desc, var)	\
+	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+
+#define netxen_set_cmd_desc_flags(cmd_desc, val)	\
+	_netxen_set_bits((cmd_desc)->flags_opcode, 0, 7, val)
+#define netxen_set_cmd_desc_opcode(cmd_desc, val)	\
+	_netxen_set_bits((cmd_desc)->flags_opcode, 7, 6, val)
+
+#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val)	\
+	_netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 0, 8, val);
+#define netxen_set_cmd_desc_totallength(cmd_desc, val)	\
+	_netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 8, 24, val);
+
+#define netxen_get_cmd_desc_opcode(cmd_desc)	\
+	(((cmd_desc)->flags_opcode >> 7) & 0x003F)
+#define netxen_get_cmd_desc_totallength(cmd_desc)	\
+	(((cmd_desc)->num_of_buffers_total_length >> 8) & 0x0FFFFFF)
+
+struct cmd_desc_type0 {
+	u8 tcp_hdr_offset;	/* For LSO only */
+	u8 ip_hdr_offset;	/* For LSO only */
+	/* Bit pattern: 0-6 flags, 7-12 opcode, 13-15 unused */
+	u16 flags_opcode;
+	/* Bit pattern: 0-7 total number of segments,
+	   8-31 Total size of the packet */
+	u32 num_of_buffers_total_length;
+	union {
+		struct {
+			u32 addr_low_part2;
+			u32 addr_high_part2;
+		};
+		u64 addr_buffer2;
+	};
+
+	u16 reference_handle;	/* changed to u16 to add mss */
+	u16 mss;		/* passed by NDIS_PACKET for LSO */
+	/* Bit pattern 0-3 port, 0-3 ctx id */
+	u8 port_ctxid;
+	u8 total_hdr_length;	/* LSO only : MAC+IP+TCP Hdr size */
+	u16 conn_id;		/* IPSec offoad only */
+
+	union {
+		struct {
+			u32 addr_low_part3;
+			u32 addr_high_part3;
+		};
+		u64 addr_buffer3;
+	};
+	union {
+		struct {
+			u32 addr_low_part1;
+			u32 addr_high_part1;
+		};
+		u64 addr_buffer1;
+	};
+
+	u16 buffer1_length;
+	u16 buffer2_length;
+	u16 buffer3_length;
+	u16 buffer4_length;
+
+	union {
+		struct {
+			u32 addr_low_part4;
+			u32 addr_high_part4;
+		};
+		u64 addr_buffer4;
+	};
+
+	u64 unused;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+	u16 reference_handle;
+	u16 reserved;
+	u32 buffer_length;	/* allocated buffer length (usually 2K) */
+	u64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define RCV_NIC_PKT	(0xA)
+#define STATUS_NIC_PKT	((RCV_NIC_PKT) << 12)
+
+/* for status field in status_desc */
+#define STATUS_NEED_CKSUM	(1)
+#define STATUS_CKSUM_OK		(2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST	(0x1)
+#define STATUS_OWNER_PHANTOM	(0x2)
+
+#define NETXEN_PROT_IP		(1)
+#define NETXEN_PROT_UNKNOWN	(0)
+
+/* Note: sizeof(status_desc) should always be a mutliple of 2 */
+
+#define netxen_get_sts_desc_lro_cnt(status_desc)	\
+	((status_desc)->lro & 0x7F)
+#define netxen_get_sts_desc_lro_last_frag(status_desc)	\
+	(((status_desc)->lro & 0x80) >> 7)
+
+#define netxen_get_sts_port(status_desc)	\
+	((status_desc)->status_desc_data & 0x0F)
+#define netxen_get_sts_status(status_desc)	\
+	(((status_desc)->status_desc_data >> 4) & 0x0F)
+#define netxen_get_sts_type(status_desc)	\
+	(((status_desc)->status_desc_data >> 8) & 0x0F)
+#define netxen_get_sts_totallength(status_desc)	\
+	(((status_desc)->status_desc_data >> 12) & 0xFFFF)
+#define netxen_get_sts_refhandle(status_desc)	\
+	(((status_desc)->status_desc_data >> 28) & 0xFFFF)
+#define netxen_get_sts_prot(status_desc)	\
+	(((status_desc)->status_desc_data >> 44) & 0x0F)
+#define netxen_get_sts_owner(status_desc)	\
+	(((status_desc)->status_desc_data >> 56) & 0x03)
+#define netxen_get_sts_opcode(status_desc)	\
+	(((status_desc)->status_desc_data >> 58) & 0x03F)
+
+#define netxen_clear_sts_owner(status_desc)	\
+	((status_desc)->status_desc_data &=	\
+	~(((unsigned long long)3) << 56 ))
+#define netxen_set_sts_owner(status_desc, val)	\
+	((status_desc)->status_desc_data |=	\
+	(((unsigned long long)((val) & 0x3)) << 56 ))
+
+struct status_desc {
+	/* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+	   28-43 reference_handle, 44-47 protocol, 48-52 unused
+	   53-55 desc_cnt, 56-57 owner, 58-63 opcode
+	 */
+	u64 status_desc_data;
+	u32 hash_value;
+	u8 hash_type;
+	u8 msg_type;
+	u8 unused;
+	/* Bit pattern: 0-6 lro_count indicates frag sequence,
+	   7 last_frag indicates last frag */
+	u8 lro;
+} __attribute__ ((aligned(8)));
+
+enum {
+	NETXEN_RCV_PEG_0 = 0,
+	NETXEN_RCV_PEG_1
+};
+/* The version of the main data structure */
+#define	NETXEN_BDINFO_VERSION 1
+
+/* Magic number to let user know flash is programmed */
+#define	NETXEN_BDINFO_MAGIC 0x12345678
+
+/* Max number of Gig ports on a Phantom board */
+#define NETXEN_MAX_PORTS 4
+
+typedef enum {
+	NETXEN_BRDTYPE_P1_BD = 0x0000,
+	NETXEN_BRDTYPE_P1_SB = 0x0001,
+	NETXEN_BRDTYPE_P1_SMAX = 0x0002,
+	NETXEN_BRDTYPE_P1_SOCK = 0x0003,
+
+	NETXEN_BRDTYPE_P2_SOCK_31 = 0x0008,
+	NETXEN_BRDTYPE_P2_SOCK_35 = 0x0009,
+	NETXEN_BRDTYPE_P2_SB35_4G = 0x000a,
+	NETXEN_BRDTYPE_P2_SB31_10G = 0x000b,
+	NETXEN_BRDTYPE_P2_SB31_2G = 0x000c,
+
+	NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d,
+	NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e,
+	NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f
+} netxen_brdtype_t;
+
+typedef enum {
+	NETXEN_BRDMFG_INVENTEC = 1
+} netxen_brdmfg;
+
+typedef enum {
+	MEM_ORG_128Mbx4 = 0x0,	/* DDR1 only */
+	MEM_ORG_128Mbx8 = 0x1,	/* DDR1 only */
+	MEM_ORG_128Mbx16 = 0x2,	/* DDR1 only */
+	MEM_ORG_256Mbx4 = 0x3,
+	MEM_ORG_256Mbx8 = 0x4,
+	MEM_ORG_256Mbx16 = 0x5,
+	MEM_ORG_512Mbx4 = 0x6,
+	MEM_ORG_512Mbx8 = 0x7,
+	MEM_ORG_512Mbx16 = 0x8,
+	MEM_ORG_1Gbx4 = 0x9,
+	MEM_ORG_1Gbx8 = 0xa,
+	MEM_ORG_1Gbx16 = 0xb,
+	MEM_ORG_2Gbx4 = 0xc,
+	MEM_ORG_2Gbx8 = 0xd,
+	MEM_ORG_2Gbx16 = 0xe,
+	MEM_ORG_128Mbx32 = 0x10002,	/* GDDR only */
+	MEM_ORG_256Mbx32 = 0x10005	/* GDDR only */
+} netxen_mn_mem_org_t;
+
+typedef enum {
+	MEM_ORG_512Kx36 = 0x0,
+	MEM_ORG_1Mx36 = 0x1,
+	MEM_ORG_2Mx36 = 0x2
+} netxen_sn_mem_org_t;
+
+typedef enum {
+	MEM_DEPTH_4MB = 0x1,
+	MEM_DEPTH_8MB = 0x2,
+	MEM_DEPTH_16MB = 0x3,
+	MEM_DEPTH_32MB = 0x4,
+	MEM_DEPTH_64MB = 0x5,
+	MEM_DEPTH_128MB = 0x6,
+	MEM_DEPTH_256MB = 0x7,
+	MEM_DEPTH_512MB = 0x8,
+	MEM_DEPTH_1GB = 0x9,
+	MEM_DEPTH_2GB = 0xa,
+	MEM_DEPTH_4GB = 0xb,
+	MEM_DEPTH_8GB = 0xc,
+	MEM_DEPTH_16GB = 0xd,
+	MEM_DEPTH_32GB = 0xe
+} netxen_mem_depth_t;
+
+struct netxen_board_info {
+	u32 header_version;
+
+	u32 board_mfg;
+	u32 board_type;
+	u32 board_num;
+	u32 chip_id;
+	u32 chip_minor;
+	u32 chip_major;
+	u32 chip_pkg;
+	u32 chip_lot;
+
+	u32 port_mask;		/* available niu ports */
+	u32 peg_mask;		/* available pegs */
+	u32 icache_ok;		/* can we run with icache? */
+	u32 dcache_ok;		/* can we run with dcache? */
+	u32 casper_ok;
+
+	u32 mac_addr_lo_0;
+	u32 mac_addr_lo_1;
+	u32 mac_addr_lo_2;
+	u32 mac_addr_lo_3;
+
+	/* MN-related config */
+	u32 mn_sync_mode;	/* enable/ sync shift cclk/ sync shift mclk */
+	u32 mn_sync_shift_cclk;
+	u32 mn_sync_shift_mclk;
+	u32 mn_wb_en;
+	u32 mn_crystal_freq;	/* in MHz */
+	u32 mn_speed;		/* in MHz */
+	u32 mn_org;
+	u32 mn_depth;
+	u32 mn_ranks_0;		/* ranks per slot */
+	u32 mn_ranks_1;		/* ranks per slot */
+	u32 mn_rd_latency_0;
+	u32 mn_rd_latency_1;
+	u32 mn_rd_latency_2;
+	u32 mn_rd_latency_3;
+	u32 mn_rd_latency_4;
+	u32 mn_rd_latency_5;
+	u32 mn_rd_latency_6;
+	u32 mn_rd_latency_7;
+	u32 mn_rd_latency_8;
+	u32 mn_dll_val[18];
+	u32 mn_mode_reg;	/* MIU DDR Mode Register */
+	u32 mn_ext_mode_reg;	/* MIU DDR Extended Mode Register */
+	u32 mn_timing_0;	/* MIU Memory Control Timing Rgister */
+	u32 mn_timing_1;	/* MIU Extended Memory Ctrl Timing Register */
+	u32 mn_timing_2;	/* MIU Extended Memory Ctrl Timing2 Register */
+
+	/* SN-related config */
+	u32 sn_sync_mode;	/* enable/ sync shift cclk / sync shift mclk */
+	u32 sn_pt_mode;		/* pass through mode */
+	u32 sn_ecc_en;
+	u32 sn_wb_en;
+	u32 sn_crystal_freq;
+	u32 sn_speed;
+	u32 sn_org;
+	u32 sn_depth;
+	u32 sn_dll_tap;
+	u32 sn_rd_latency;
+
+	u32 mac_addr_hi_0;
+	u32 mac_addr_hi_1;
+	u32 mac_addr_hi_2;
+	u32 mac_addr_hi_3;
+
+	u32 magic;		/* indicates flash has been initialized */
+
+	u32 mn_rdimm;
+	u32 mn_dll_override;
+
+};
+
+#define FLASH_NUM_PORTS		(4)
+
+struct netxen_flash_mac_addr {
+	u32 flash_addr[32];
+};
+
+struct netxen_user_old_info {
+	u8 flash_md5[16];
+	u8 crbinit_md5[16];
+	u8 brdcfg_md5[16];
+	/* bootloader */
+	u32 bootld_version;
+	u32 bootld_size;
+	u8 bootld_md5[16];
+	/* image */
+	u32 image_version;
+	u32 image_size;
+	u8 image_md5[16];
+	/* primary image status */
+	u32 primary_status;
+	u32 secondary_present;
+
+	/* MAC address , 4 ports */
+	struct netxen_flash_mac_addr mac_addr[FLASH_NUM_PORTS];
+};
+#define FLASH_NUM_MAC_PER_PORT	32
+struct netxen_user_info {
+	u8 flash_md5[16 * 64];
+	/* bootloader */
+	u32 bootld_version;
+	u32 bootld_size;
+	/* image */
+	u32 image_version;
+	u32 image_size;
+	/* primary image status */
+	u32 primary_status;
+	u32 secondary_present;
+
+	/* MAC address , 4 ports, 32 address per port */
+	u64 mac_addr[FLASH_NUM_PORTS * FLASH_NUM_MAC_PER_PORT];
+	u32 sub_sys_id;
+	u8 serial_num[32];
+
+	/* Any user defined data */
+};
+
+/*
+ * Flash Layout - new format.
+ */
+struct netxen_new_user_info {
+	u8 flash_md5[16 * 64];
+	/* bootloader */
+	u32 bootld_version;
+	u32 bootld_size;
+	/* image */
+	u32 image_version;
+	u32 image_size;
+	/* primary image status */
+	u32 primary_status;
+	u32 secondary_present;
+
+	/* MAC address , 4 ports, 32 address per port */
+	u64 mac_addr[FLASH_NUM_PORTS * FLASH_NUM_MAC_PER_PORT];
+	u32 sub_sys_id;
+	u8 serial_num[32];
+
+	/* Any user defined data */
+};
+
+#define SECONDARY_IMAGE_PRESENT 0xb3b4b5b6
+#define SECONDARY_IMAGE_ABSENT	0xffffffff
+#define PRIMARY_IMAGE_GOOD	0x5a5a5a5a
+#define PRIMARY_IMAGE_BAD	0xffffffff
+
+/* Flash memory map */
+typedef enum {
+	CRBINIT_START = 0,	/* Crbinit section */
+	BRDCFG_START = 0x4000,	/* board config */
+	INITCODE_START = 0x6000,	/* pegtune code */
+	BOOTLD_START = 0x10000,	/* bootld */
+	IMAGE_START = 0x43000,	/* compressed image */
+	SECONDARY_START = 0x200000,	/* backup images */
+	PXE_START = 0x3E0000,	/* user defined region */
+	USER_START = 0x3E8000,	/* User defined region for new boards */
+	FIXED_START = 0x3F0000	/* backup of crbinit */
+} netxen_flash_map_t;
+
+#define USER_START_OLD PXE_START	/* for backward compatibility */
+
+#define FLASH_START		(CRBINIT_START)
+#define INIT_SECTOR		(0)
+#define PRIMARY_START 		(BOOTLD_START)
+#define FLASH_CRBINIT_SIZE 	(0x4000)
+#define FLASH_BRDCFG_SIZE 	(sizeof(struct netxen_board_info))
+#define FLASH_USER_SIZE		(sizeof(struct netxen_user_info)/sizeof(u32))
+#define FLASH_SECONDARY_SIZE 	(USER_START-SECONDARY_START)
+#define NUM_PRIMARY_SECTORS	(0x20)
+#define NUM_CONFIG_SECTORS 	(1)
+#define PFX "NetXen: "
+extern char netxen_nic_driver_name[];
+
+/* Note: Make sure to not call this before adapter->port is valid */
+#if !defined(NETXEN_DEBUG)
+#define DPRINTK(klevel, fmt, args...)	do { \
+	} while (0)
+#else
+#define DPRINTK(klevel, fmt, args...)	do { \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\
+		(adapter != NULL && \
+		adapter->port[0] != NULL && \
+		adapter->port[0]->netdev != NULL) ? \
+		adapter->port[0]->netdev->name : NULL, \
+		## args); } while(0)
+#endif
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE	(128)
+
+/*
+ * netxen_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}.
+ */
+struct netxen_skb_frag {
+	u64 dma;
+	u32 length;
+};
+
+/*    Following defines are for the state of the buffers    */
+#define	NETXEN_BUFFER_FREE	0
+#define	NETXEN_BUFFER_BUSY	1
+
+/*
+ * There will be one netxen_buffer per skb packet.    These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct netxen_cmd_buffer {
+	struct sk_buff *skb;
+	struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+	u32 total_length;
+	u32 mss;
+	u16 port;
+	u8 cmd;
+	u8 frag_count;
+	unsigned long time_stamp;
+	u32 state;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct netxen_rx_buffer {
+	struct sk_buff *skb;
+	u64 dma;
+	u16 ref_handle;
+	u16 state;
+	u32 lro_expected_frags;
+	u32 lro_current_frags;
+	u32 lro_length;
+};
+
+/* Board types */
+#define	NETXEN_NIC_GBE	0x01
+#define	NETXEN_NIC_XGBE	0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct netxen_hardware_context {
+	struct pci_dev *pdev;
+	void __iomem *pci_base0;
+	void __iomem *pci_base1;
+	void __iomem *pci_base2;
+	void __iomem *db_base;
+	unsigned long db_len;
+
+	u8 revision_id;
+	u16 board_type;
+	u16 max_ports;
+	struct netxen_board_info boardcfg;
+	u32 xg_linkup;
+	u32 qg_linksup;
+	/* Address of cmd ring in Phantom */
+	struct cmd_desc_type0 *cmd_desc_head;
+	struct pci_dev *cmd_desc_pdev;
+	dma_addr_t cmd_desc_phys_addr;
+	struct netxen_adapter *adapter;
+};
+
+#define RCV_RING_LRO	RCV_DESC_LRO
+
+#define MINIMUM_ETHERNET_FRAME_SIZE	64	/* With FCS */
+#define ETHERNET_FCS_SIZE		4
+
+struct netxen_adapter_stats {
+	u64 ints;
+	u64 hostints;
+	u64 otherints;
+	u64 process_rcv;
+	u64 process_xmit;
+	u64 noxmitdone;
+	u64 xmitcsummed;
+	u64 post_called;
+	u64 posted;
+	u64 lastposted;
+	u64 goodskbposts;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct netxen_rcv_desc_ctx {
+	u32 flags;
+	u32 producer;
+	u32 rcv_pending;	/* Num of bufs posted in phantom */
+	u32 rcv_free;		/* Num of bufs in free list */
+	dma_addr_t phys_addr;
+	struct pci_dev *phys_pdev;
+	struct rcv_desc *desc_head;	/* address of rx ring in Phantom */
+	u32 max_rx_desc_count;
+	u32 dma_size;
+	u32 skb_size;
+	struct netxen_rx_buffer *rx_buf_arr;	/* rx buffers for receive   */
+	int begin_alloc;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct netxen_recv_context {
+	struct netxen_rcv_desc_ctx rcv_desc[NUM_RCV_DESC_RINGS];
+	u32 status_rx_producer;
+	u32 status_rx_consumer;
+	dma_addr_t rcv_status_desc_phys_addr;
+	struct pci_dev *rcv_status_desc_pdev;
+	struct status_desc *rcv_status_desc_head;
+};
+
+#define NETXEN_NIC_MSI_ENABLED 0x02
+#define NETXEN_DMA_MASK	0xfffffffe
+#define NETXEN_DB_MAPSIZE_BYTES    0x1000
+
+struct netxen_dummy_dma {
+	void *addr;
+	dma_addr_t phys_addr;
+};
+
+struct netxen_adapter {
+	struct netxen_hardware_context ahw;
+	int port_count;		/* Number of configured ports  */
+	int active_ports;	/* Number of open ports */
+	struct netxen_port *port[NETXEN_MAX_PORTS];	/* ptr to each port  */
+	spinlock_t tx_lock;
+	spinlock_t lock;
+	struct work_struct watchdog_task;
+	struct work_struct tx_timeout_task;
+	struct net_device *netdev;
+	struct timer_list watchdog_timer;
+
+	u32 curr_window;
+
+	u32 cmd_producer;
+	u32 *cmd_consumer;
+
+	u32 last_cmd_consumer;
+	u32 max_tx_desc_count;
+	u32 max_rx_desc_count;
+	u32 max_jumbo_rx_desc_count;
+	u32 max_lro_rx_desc_count;
+	/* Num of instances active on cmd buffer ring */
+	u32 proc_cmd_buf_counter;
+
+	u32 num_threads, total_threads;	/*Use to keep track of xmit threads */
+
+	u32 flags;
+	u32 irq;
+	int driver_mismatch;
+	u32 temp;
+
+	struct netxen_adapter_stats stats;
+
+	struct netxen_cmd_buffer *cmd_buf_arr;	/* Command buffers for xmit */
+
+	/*
+	 * Receive instances. These can be either one per port,
+	 * or one per peg, etc.
+	 */
+	struct netxen_recv_context recv_ctx[MAX_RCV_CTX];
+
+	int is_up;
+	int number;
+	struct netxen_dummy_dma dummy_dma;
+
+	/* Context interface shared between card and host */
+	struct netxen_ring_ctx *ctx_desc;
+	struct pci_dev *ctx_desc_pdev;
+	dma_addr_t ctx_desc_phys_addr;
+	int (*enable_phy_interrupts) (struct netxen_adapter *, int);
+	int (*disable_phy_interrupts) (struct netxen_adapter *, int);
+	void (*handle_phy_intr) (struct netxen_adapter *);
+	int (*macaddr_set) (struct netxen_port *, netxen_ethernet_macaddr_t);
+	int (*set_mtu) (struct netxen_port *, int);
+	int (*set_promisc) (struct netxen_adapter *, int,
+			    netxen_niu_prom_mode_t);
+	int (*unset_promisc) (struct netxen_adapter *, int,
+			      netxen_niu_prom_mode_t);
+	int (*phy_read) (struct netxen_adapter *, long phy, long reg, u32 *);
+	int (*phy_write) (struct netxen_adapter *, long phy, long reg, u32 val);
+	int (*init_port) (struct netxen_adapter *, int);
+	void (*init_niu) (struct netxen_adapter *);
+	int (*stop_port) (struct netxen_adapter *, int);
+};				/* netxen_adapter structure */
+
+/* Max number of xmit producer threads that can run simultaneously */
+#define	MAX_XMIT_PRODUCERS		16
+
+struct netxen_port_stats {
+	u64 rcvdbadskb;
+	u64 xmitcalled;
+	u64 xmitedframes;
+	u64 xmitfinished;
+	u64 badskblen;
+	u64 nocmddescriptor;
+	u64 polled;
+	u64 uphappy;
+	u64 updropped;
+	u64 uplcong;
+	u64 uphcong;
+	u64 upmcong;
+	u64 updunno;
+	u64 skbfreed;
+	u64 txdropped;
+	u64 txnullskb;
+	u64 csummed;
+	u64 no_rcv;
+	u64 rxbytes;
+	u64 txbytes;
+};
+
+struct netxen_port {
+	struct netxen_adapter *adapter;
+
+	u16 portnum;		/* GBE port number */
+	u16 link_speed;
+	u16 link_duplex;
+	u16 link_autoneg;
+
+	int flags;
+
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+	struct netxen_port_stats stats;
+};
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off)    \
+	((adapter)->ahw.pci_base0 + (off))
+#define PCI_OFFSET_SECOND_RANGE(adapter, off)   \
+	((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
+#define PCI_OFFSET_THIRD_RANGE(adapter, off)    \
+	((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
+
+static inline void __iomem *pci_base_offset(struct netxen_adapter *adapter,
+					    unsigned long off)
+{
+	if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
+		return (adapter->ahw.pci_base0 + off);
+	} else if ((off < SECOND_PAGE_GROUP_END) &&
+		   (off >= SECOND_PAGE_GROUP_START)) {
+		return (adapter->ahw.pci_base1 + off - SECOND_PAGE_GROUP_START);
+	} else if ((off < THIRD_PAGE_GROUP_END) &&
+		   (off >= THIRD_PAGE_GROUP_START)) {
+		return (adapter->ahw.pci_base2 + off - THIRD_PAGE_GROUP_START);
+	}
+	return NULL;
+}
+
+static inline void __iomem *pci_base(struct netxen_adapter *adapter,
+				     unsigned long off)
+{
+	if ((off < FIRST_PAGE_GROUP_END) && (off >= FIRST_PAGE_GROUP_START)) {
+		return adapter->ahw.pci_base0;
+	} else if ((off < SECOND_PAGE_GROUP_END) &&
+		   (off >= SECOND_PAGE_GROUP_START)) {
+		return adapter->ahw.pci_base1;
+	} else if ((off < THIRD_PAGE_GROUP_END) &&
+		   (off >= THIRD_PAGE_GROUP_START)) {
+		return adapter->ahw.pci_base2;
+	}
+	return NULL;
+}
+
+int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+					  int port);
+int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+					 int port);
+int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+					   int port);
+int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+					  int port);
+int netxen_niu_xgbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+					 int port);
+int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+					int port);
+void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter);
+void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter);
+void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter, int port,
+				 long enable);
+void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter, int port,
+				  long enable);
+int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy, long reg,
+			    __le32 * readval);
+int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long phy,
+			     long reg, __le32 val);
+
+/* Functions available from netxen_nic_hw.c */
+int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu);
+int netxen_nic_set_mtu_gb(struct netxen_port *port, int new_mtu);
+void netxen_nic_init_niu_gb(struct netxen_adapter *adapter);
+void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw);
+void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val);
+int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off);
+void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value);
+void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value);
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter);
+int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
+			  int len);
+int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
+			   int len);
+int netxen_nic_hw_read_ioctl(struct netxen_adapter *adapter, u64 off,
+			     void *data, int len);
+int netxen_nic_hw_write_ioctl(struct netxen_adapter *adapter, u64 off,
+			      void *data, int len);
+int netxen_nic_pci_mem_write_ioctl(struct netxen_adapter *adapter,
+				   u64 off, void *data, int size);
+int netxen_nic_pci_mem_read_ioctl(struct netxen_adapter *adapter,
+				  u64 off, void *data, int size);
+void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
+				 unsigned long off, int data);
+
+/* Functions from netxen_nic_init.c */
+void netxen_free_adapter_offload(struct netxen_adapter *adapter);
+int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
+void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
+void netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
+int netxen_rom_se(struct netxen_adapter *adapter, int addr);
+int netxen_do_rom_se(struct netxen_adapter *adapter, int addr);
+
+/* Functions from netxen_nic_isr.c */
+void netxen_nic_isr_other(struct netxen_adapter *adapter);
+void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 port,
+				 u32 link);
+void netxen_handle_port_int(struct netxen_adapter *adapter, u32 port,
+			    u32 enable);
+void netxen_nic_stop_all_ports(struct netxen_adapter *adapter);
+void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
+void netxen_initialize_adapter_hw(struct netxen_adapter *adapter);
+void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
+		   struct pci_dev **used_dev);
+void netxen_initialize_adapter_ops(struct netxen_adapter *adapter);
+int netxen_init_firmware(struct netxen_adapter *adapter);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+void netxen_tso_check(struct netxen_adapter *adapter,
+		      struct cmd_desc_type0 *desc, struct sk_buff *skb);
+int netxen_nic_hw_resources(struct netxen_adapter *adapter);
+void netxen_nic_clear_stats(struct netxen_adapter *adapter);
+int
+netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
+		    struct netxen_port *port);
+int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
+int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
+void netxen_watchdog_task(struct work_struct *work);
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
+			    u32 ringid);
+void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, u32 ctx,
+				 u32 ringid);
+int netxen_process_cmd_ring(unsigned long data);
+u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
+void netxen_nic_set_multi(struct net_device *netdev);
+int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
+int netxen_nic_set_mac(struct net_device *netdev, void *p);
+struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
+
+static inline void netxen_nic_disable_int(struct netxen_adapter *adapter)
+{
+	/*
+	 * ISR_INT_MASK: Can be read from window 0 or 1.
+	 */
+	writel(0x7ff, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK));
+
+}
+
+static inline void netxen_nic_enable_int(struct netxen_adapter *adapter)
+{
+	u32 mask;
+
+	switch (adapter->ahw.board_type) {
+	case NETXEN_NIC_GBE:
+		mask = 0x77b;
+		break;
+	case NETXEN_NIC_XGBE:
+		mask = 0x77f;
+		break;
+	default:
+		mask = 0x7ff;
+		break;
+	}
+
+	writel(mask, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK));
+
+	if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
+		mask = 0xbff;
+		writel(mask, PCI_OFFSET_SECOND_RANGE(adapter,
+						     ISR_INT_TARGET_MASK));
+	}
+}
+
+/*
+ * NetXen Board information
+ */
+
+#define NETXEN_MAX_SHORT_NAME 16
+struct netxen_brdinfo {
+	netxen_brdtype_t brdtype;	/* type of board */
+	long ports;		/* max no of physical ports */
+	char short_name[NETXEN_MAX_SHORT_NAME];
+};
+
+static const struct netxen_brdinfo netxen_boards[] = {
+	{NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
+	{NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
+	{NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"},
+	{NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
+	{NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
+	{NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
+};
+
+#define NUM_SUPPORTED_BOARDS (sizeof(netxen_boards)/sizeof(struct netxen_brdinfo))
+
+static inline void get_brd_port_by_type(u32 type, int *ports)
+{
+	int i, found = 0;
+	for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+		if (netxen_boards[i].brdtype == type) {
+			*ports = netxen_boards[i].ports;
+			found = 1;
+			break;
+		}
+	}
+	if (!found)
+		*ports = 0;
+}
+
+static inline void get_brd_name_by_type(u32 type, char *name)
+{
+	int i, found = 0;
+	for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+		if (netxen_boards[i].brdtype == type) {
+			strcpy(name, netxen_boards[i].short_name);
+			found = 1;
+			break;
+		}
+
+	}
+	if (!found)
+		name = "Unknown";
+}
+
+int netxen_is_flash_supported(struct netxen_adapter *adapter);
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[]);
+extern void netxen_change_ringparam(struct netxen_adapter *adapter);
+extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
+				int *valp);
+
+extern struct ethtool_ops netxen_nic_ethtool_ops;
+
+#endif				/* __NETXEN_NIC_H_ */
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
new file mode 100644
index 0000000..2ab4885
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * ethtool support for netxen nic
+ *
+ */
+
+#include <linux/types.h>
+#include <asm/uaccess.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/version.h>
+
+#include "netxen_nic_hw.h"
+#include "netxen_nic.h"
+#include "netxen_nic_phan_reg.h"
+#include "netxen_nic_ioctl.h"
+
+struct netxen_nic_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_port *)0)->m), \
+			offsetof(struct netxen_port, m)
+
+#define NETXEN_NIC_PORT_WINDOW 0x10000
+#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
+
+static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
+	{"rcvd_bad_skb", NETXEN_NIC_STAT(stats.rcvdbadskb)},
+	{"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
+	{"xmited_frames", NETXEN_NIC_STAT(stats.xmitedframes)},
+	{"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
+	{"bad_skb_len", NETXEN_NIC_STAT(stats.badskblen)},
+	{"no_cmd_desc", NETXEN_NIC_STAT(stats.nocmddescriptor)},
+	{"polled", NETXEN_NIC_STAT(stats.polled)},
+	{"uphappy", NETXEN_NIC_STAT(stats.uphappy)},
+	{"updropped", NETXEN_NIC_STAT(stats.updropped)},
+	{"uplcong", NETXEN_NIC_STAT(stats.uplcong)},
+	{"uphcong", NETXEN_NIC_STAT(stats.uphcong)},
+	{"upmcong", NETXEN_NIC_STAT(stats.upmcong)},
+	{"updunno", NETXEN_NIC_STAT(stats.updunno)},
+	{"skb_freed", NETXEN_NIC_STAT(stats.skbfreed)},
+	{"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
+	{"tx_null_skb", NETXEN_NIC_STAT(stats.txnullskb)},
+	{"csummed", NETXEN_NIC_STAT(stats.csummed)},
+	{"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)},
+	{"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)},
+	{"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)},
+};
+
+#define NETXEN_NIC_STATS_LEN	\
+	sizeof(netxen_nic_gstrings_stats) / sizeof(struct netxen_nic_stats)
+
+static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register_Test_offline", "EEPROM_Test_offline",
+	"Interrupt_Test_offline", "Loopback_Test_offline",
+	"Link_Test_on_offline"
+};
+
+#define NETXEN_NIC_TEST_LEN sizeof(netxen_nic_gstrings_test) / ETH_GSTRING_LEN
+
+#define NETXEN_NIC_REGS_COUNT 42
+#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
+#define NETXEN_MAX_EEPROM_LEN   1024
+
+static int netxen_nic_get_eeprom_len(struct net_device *dev)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	int n;
+
+	if ((netxen_rom_fast_read(adapter, 0, &n) == 0)
+	    && (n & NETXEN_ROM_ROUNDUP)) {
+		n &= ~NETXEN_ROM_ROUNDUP;
+		if (n < NETXEN_MAX_EEPROM_LEN)
+			return n;
+	}
+	return 0;
+}
+
+static void
+netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	u32 fw_major = 0;
+	u32 fw_minor = 0;
+	u32 fw_build = 0;
+
+	strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
+	strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+	fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
+					      NETXEN_FW_VERSION_MAJOR));
+	fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter,
+					      NETXEN_FW_VERSION_MINOR));
+	fw_build = readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB));
+	sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+	strncpy(drvinfo->bus_info, pci_name(port->pdev), 32);
+	drvinfo->n_stats = NETXEN_NIC_STATS_LEN;
+	drvinfo->testinfo_len = NETXEN_NIC_TEST_LEN;
+	drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
+	drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
+}
+
+static int
+netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	struct netxen_board_info *boardinfo = &adapter->ahw.boardcfg;
+
+	/* read which mode */
+	if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+				   SUPPORTED_10baseT_Full |
+				   SUPPORTED_100baseT_Half |
+				   SUPPORTED_100baseT_Full |
+				   SUPPORTED_1000baseT_Half |
+				   SUPPORTED_1000baseT_Full);
+
+		ecmd->advertising = (ADVERTISED_100baseT_Half |
+				     ADVERTISED_100baseT_Full |
+				     ADVERTISED_1000baseT_Half |
+				     ADVERTISED_1000baseT_Full);
+
+		ecmd->port = PORT_TP;
+
+		if (netif_running(dev)) {
+			ecmd->speed = port->link_speed;
+			ecmd->duplex = port->link_duplex;
+		} else
+			return -EIO;	/* link absent */
+	} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+		ecmd->supported = (SUPPORTED_TP |
+				   SUPPORTED_1000baseT_Full |
+				   SUPPORTED_10000baseT_Full);
+		ecmd->advertising = (ADVERTISED_TP |
+				     ADVERTISED_1000baseT_Full |
+				     ADVERTISED_10000baseT_Full);
+		ecmd->port = PORT_TP;
+
+		ecmd->speed = SPEED_10000;
+		ecmd->duplex = DUPLEX_FULL;
+		ecmd->autoneg = AUTONEG_DISABLE;
+	} else
+		return -EIO;
+
+	ecmd->phy_address = port->portnum;
+	ecmd->transceiver = XCVR_EXTERNAL;
+
+	switch ((netxen_brdtype_t) boardinfo->board_type) {
+	case NETXEN_BRDTYPE_P2_SB35_4G:
+	case NETXEN_BRDTYPE_P2_SB31_2G:
+		ecmd->supported |= SUPPORTED_Autoneg;
+		ecmd->advertising |= ADVERTISED_Autoneg;
+	case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+		ecmd->supported |= SUPPORTED_TP;
+		ecmd->advertising |= ADVERTISED_TP;
+		ecmd->port = PORT_TP;
+		ecmd->autoneg = (boardinfo->board_type ==
+				 NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
+		    (AUTONEG_DISABLE) : (port->link_autoneg);
+		break;
+	case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+	case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+		ecmd->supported |= SUPPORTED_MII;
+		ecmd->advertising |= ADVERTISED_MII;
+		ecmd->port = PORT_FIBRE;
+		ecmd->autoneg = AUTONEG_DISABLE;
+		break;
+	case NETXEN_BRDTYPE_P2_SB31_10G:
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_FIBRE;
+		ecmd->autoneg = AUTONEG_DISABLE;
+		break;
+	default:
+		printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
+		       (netxen_brdtype_t) boardinfo->board_type);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
+netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	__le32 status;
+
+	/* read which mode */
+	if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+		/* autonegotiation */
+		if (adapter->phy_write
+		    && adapter->phy_write(adapter, port->portnum,
+					  NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+					  (__le32) ecmd->autoneg) != 0)
+			return -EIO;
+		else
+			port->link_autoneg = ecmd->autoneg;
+
+		if (adapter->phy_read
+		    && adapter->phy_read(adapter, port->portnum,
+					 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+					 &status) != 0)
+			return -EIO;
+
+		/* speed */
+		switch (ecmd->speed) {
+		case SPEED_10:
+			netxen_set_phy_speed(status, 0);
+			break;
+		case SPEED_100:
+			netxen_set_phy_speed(status, 1);
+			break;
+		case SPEED_1000:
+			netxen_set_phy_speed(status, 2);
+			break;
+		}
+		/* set duplex mode */
+		if (ecmd->duplex == DUPLEX_HALF)
+			netxen_clear_phy_duplex(status);
+		if (ecmd->duplex == DUPLEX_FULL)
+			netxen_set_phy_duplex(status);
+		if (adapter->phy_write
+		    && adapter->phy_write(adapter, port->portnum,
+					  NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+					  *((int *)&status)) != 0)
+			return -EIO;
+		else {
+			port->link_speed = ecmd->speed;
+			port->link_duplex = ecmd->duplex;
+		}
+	} else
+		return -EOPNOTSUPP;
+
+	if (netif_running(dev)) {
+		dev->stop(dev);
+		dev->open(dev);
+	}
+	return 0;
+}
+
+static int netxen_nic_get_regs_len(struct net_device *dev)
+{
+	return NETXEN_NIC_REGS_LEN;
+}
+
+struct netxen_niu_regs {
+	__le32 reg[NETXEN_NIC_REGS_COUNT];
+};
+
+static struct netxen_niu_regs niu_registers[] = {
+	{
+	 /* GB Mode */
+	 {
+	  NETXEN_NIU_GB_SERDES_RESET,
+	  NETXEN_NIU_GB0_MII_MODE,
+	  NETXEN_NIU_GB1_MII_MODE,
+	  NETXEN_NIU_GB2_MII_MODE,
+	  NETXEN_NIU_GB3_MII_MODE,
+	  NETXEN_NIU_GB0_GMII_MODE,
+	  NETXEN_NIU_GB1_GMII_MODE,
+	  NETXEN_NIU_GB2_GMII_MODE,
+	  NETXEN_NIU_GB3_GMII_MODE,
+	  NETXEN_NIU_REMOTE_LOOPBACK,
+	  NETXEN_NIU_GB0_HALF_DUPLEX,
+	  NETXEN_NIU_GB1_HALF_DUPLEX,
+	  NETXEN_NIU_RESET_SYS_FIFOS,
+	  NETXEN_NIU_GB_CRC_DROP,
+	  NETXEN_NIU_GB_DROP_WRONGADDR,
+	  NETXEN_NIU_TEST_MUX_CTL,
+
+	  NETXEN_NIU_GB_MAC_CONFIG_0(0),
+	  NETXEN_NIU_GB_MAC_CONFIG_1(0),
+	  NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0),
+	  NETXEN_NIU_GB_MAX_FRAME_SIZE(0),
+	  NETXEN_NIU_GB_TEST_REG(0),
+	  NETXEN_NIU_GB_MII_MGMT_CONFIG(0),
+	  NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+	  NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+	  NETXEN_NIU_GB_MII_MGMT_CTRL(0),
+	  NETXEN_NIU_GB_MII_MGMT_STATUS(0),
+	  NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
+	  NETXEN_NIU_GB_INTERFACE_CTRL(0),
+	  NETXEN_NIU_GB_INTERFACE_STATUS(0),
+	  NETXEN_NIU_GB_STATION_ADDR_0(0),
+	  NETXEN_NIU_GB_STATION_ADDR_1(0),
+	  -1,
+	  }
+	 },
+	{
+	 /* XG Mode */
+	 {
+	  NETXEN_NIU_XG_SINGLE_TERM,
+	  NETXEN_NIU_XG_DRIVE_HI,
+	  NETXEN_NIU_XG_DRIVE_LO,
+	  NETXEN_NIU_XG_DTX,
+	  NETXEN_NIU_XG_DEQ,
+	  NETXEN_NIU_XG_WORD_ALIGN,
+	  NETXEN_NIU_XG_RESET,
+	  NETXEN_NIU_XG_POWER_DOWN,
+	  NETXEN_NIU_XG_RESET_PLL,
+	  NETXEN_NIU_XG_SERDES_LOOPBACK,
+	  NETXEN_NIU_XG_DO_BYTE_ALIGN,
+	  NETXEN_NIU_XG_TX_ENABLE,
+	  NETXEN_NIU_XG_RX_ENABLE,
+	  NETXEN_NIU_XG_STATUS,
+	  NETXEN_NIU_XG_PAUSE_THRESHOLD,
+	  NETXEN_NIU_XGE_CONFIG_0,
+	  NETXEN_NIU_XGE_CONFIG_1,
+	  NETXEN_NIU_XGE_IPG,
+	  NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+	  NETXEN_NIU_XGE_STATION_ADDR_0_1,
+	  NETXEN_NIU_XGE_STATION_ADDR_1_LO,
+	  NETXEN_NIU_XGE_STATUS,
+	  NETXEN_NIU_XGE_MAX_FRAME_SIZE,
+	  NETXEN_NIU_XGE_PAUSE_FRAME_VALUE,
+	  NETXEN_NIU_XGE_TX_BYTE_CNT,
+	  NETXEN_NIU_XGE_TX_FRAME_CNT,
+	  NETXEN_NIU_XGE_RX_BYTE_CNT,
+	  NETXEN_NIU_XGE_RX_FRAME_CNT,
+	  NETXEN_NIU_XGE_AGGR_ERROR_CNT,
+	  NETXEN_NIU_XGE_MULTICAST_FRAME_CNT,
+	  NETXEN_NIU_XGE_UNICAST_FRAME_CNT,
+	  NETXEN_NIU_XGE_CRC_ERROR_CNT,
+	  NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
+	  NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
+	  NETXEN_NIU_XGE_LOCAL_ERROR_CNT,
+	  NETXEN_NIU_XGE_REMOTE_ERROR_CNT,
+	  NETXEN_NIU_XGE_CONTROL_CHAR_CNT,
+	  NETXEN_NIU_XGE_PAUSE_FRAME_CNT,
+	  -1,
+	  }
+	 }
+};
+
+static void
+netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	__le32 mode, *regs_buff = p;
+	void __iomem *addr;
+	int i, window;
+
+	memset(p, 0, NETXEN_NIC_REGS_LEN);
+	regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+	    (port->pdev)->device;
+	/* which mode */
+	NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_MODE, &regs_buff[0]);
+	mode = regs_buff[0];
+
+	/* Common registers to all the modes */
+	NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER,
+				   &regs_buff[2]);
+	/* GB/XGB Mode */
+	mode = (mode / 2) - 1;
+	window = 0;
+	if (mode <= 1) {
+		for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) {
+			/* GB: port specific registers */
+			if (mode == 0 && i >= 19)
+				window = port->portnum * NETXEN_NIC_PORT_WINDOW;
+
+			NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode].
+						   reg[i - 3] + window,
+						   &regs_buff[i]);
+		}
+
+	}
+}
+
+static void
+netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
+	/* options can be added depending upon the mode */
+	wol->wolopts = 0;
+}
+
+static u32 netxen_nic_get_link(struct net_device *dev)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	__le32 status;
+
+	/* read which mode */
+	if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+		if (adapter->phy_read
+		    && adapter->phy_read(adapter, port->portnum,
+					 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+					 &status) != 0)
+			return -EIO;
+		else
+			return (netxen_get_phy_link(status));
+	} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+		int val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+		return val == XG_LINK_UP;
+	}
+	return -EIO;
+}
+
+static int
+netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		      u8 * bytes)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	int offset;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16);
+	for (offset = 0; offset < eeprom->len; offset++)
+		if (netxen_rom_fast_read
+		    (adapter, (8 * offset) + 8, (int *)eeprom->data) == -1)
+			return -EIO;
+	return 0;
+}
+
+static void
+netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	int i;
+
+	ring->rx_pending = 0;
+	ring->rx_jumbo_pending = 0;
+	for (i = 0; i < MAX_RCV_CTX; ++i) {
+		ring->rx_pending += adapter->recv_ctx[i].
+		    rcv_desc[RCV_DESC_NORMAL_CTXID].rcv_pending;
+		ring->rx_jumbo_pending += adapter->recv_ctx[i].
+		    rcv_desc[RCV_DESC_JUMBO_CTXID].rcv_pending;
+	}
+
+	ring->rx_max_pending = adapter->max_rx_desc_count;
+	ring->tx_max_pending = adapter->max_tx_desc_count;
+	ring->rx_jumbo_max_pending = adapter->max_jumbo_rx_desc_count;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static void
+netxen_nic_get_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *pause)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	__le32 val;
+
+	if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+		/* get flow control settings */
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
+				   (u32 *) & val);
+		pause->rx_pause = netxen_gb_get_rx_flowctl(val);
+		pause->tx_pause = netxen_gb_get_tx_flowctl(val);
+		/* get autoneg settings */
+		pause->autoneg = port->link_autoneg;
+	}
+}
+
+static int
+netxen_nic_set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *pause)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	__le32 val;
+	unsigned int autoneg;
+
+	/* read mode */
+	if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
+		/* set flow control */
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
+				   (u32 *) & val);
+		if (pause->tx_pause)
+			netxen_gb_tx_flowctl(val);
+		else
+			netxen_gb_unset_tx_flowctl(val);
+		if (pause->rx_pause)
+			netxen_gb_rx_flowctl(val);
+		else
+			netxen_gb_unset_rx_flowctl(val);
+
+		netxen_nic_write_w0(adapter,
+				    NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
+				    *(u32 *) (&val));
+		/* set autoneg */
+		autoneg = pause->autoneg;
+		if (adapter->phy_write
+		    && adapter->phy_write(adapter, port->portnum,
+					  NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+					  (__le32) autoneg) != 0)
+			return -EIO;
+		else {
+			port->link_autoneg = pause->autoneg;
+			return 0;
+		}
+	} else
+		return -EOPNOTSUPP;
+}
+
+static int netxen_nic_reg_test(struct net_device *dev)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	struct netxen_adapter *adapter = port->adapter;
+	u32 data_read, data_written, save;
+	__le32 mode;
+
+	/* 
+	 * first test the "Read Only" registers by writing which mode
+	 */
+	netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
+	if (netxen_get_niu_enable_ge(mode)) {	/* GB Mode */
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
+				   &data_read);
+
+		save = data_read;
+		if (data_read)
+			data_written = data_read & NETXEN_NIC_INVALID_DATA;
+		else
+			data_written = NETXEN_NIC_INVALID_DATA;
+		netxen_nic_write_w0(adapter,
+				    NETXEN_NIU_GB_MII_MGMT_STATUS(port->
+								  portnum),
+				    data_written);
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_MII_MGMT_STATUS(port->portnum),
+				   &data_read);
+
+		if (data_written == data_read) {
+			netxen_nic_write_w0(adapter,
+					    NETXEN_NIU_GB_MII_MGMT_STATUS(port->
+									  portnum),
+					    save);
+
+			return 0;
+		}
+
+		/* netxen_niu_gb_mii_mgmt_indicators is read only */
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
+								   portnum),
+				   &data_read);
+
+		save = data_read;
+		if (data_read)
+			data_written = data_read & NETXEN_NIC_INVALID_DATA;
+		else
+			data_written = NETXEN_NIC_INVALID_DATA;
+		netxen_nic_write_w0(adapter,
+				    NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
+								    portnum),
+				    data_written);
+
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_MII_MGMT_INDICATE(port->
+								   portnum),
+				   &data_read);
+
+		if (data_written == data_read) {
+			netxen_nic_write_w0(adapter,
+					    NETXEN_NIU_GB_MII_MGMT_INDICATE
+					    (port->portnum), save);
+			return 0;
+		}
+
+		/* netxen_niu_gb_interface_status is read only */
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_INTERFACE_STATUS(port->
+								  portnum),
+				   &data_read);
+
+		save = data_read;
+		if (data_read)
+			data_written = data_read & NETXEN_NIC_INVALID_DATA;
+		else
+			data_written = NETXEN_NIC_INVALID_DATA;
+		netxen_nic_write_w0(adapter,
+				    NETXEN_NIU_GB_INTERFACE_STATUS(port->
+								   portnum),
+				    data_written);
+
+		netxen_nic_read_w0(adapter,
+				   NETXEN_NIU_GB_INTERFACE_STATUS(port->
+								  portnum),
+				   &data_read);
+
+		if (data_written == data_read) {
+			netxen_nic_write_w0(adapter,
+					    NETXEN_NIU_GB_INTERFACE_STATUS
+					    (port->portnum), save);
+
+			return 0;
+		}
+	}			/* GB Mode */
+	return 1;
+}
+
+static int netxen_nic_diag_test_count(struct net_device *dev)
+{
+	return NETXEN_NIC_TEST_LEN;
+}
+
+static void
+netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+		     u64 * data)
+{
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {	/* offline tests */
+		/* link test */
+		if (!(data[4] = (u64) netxen_nic_get_link(dev)))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (netif_running(dev))
+			dev->stop(dev);
+
+		/* register tests */
+		if (!(data[0] = netxen_nic_reg_test(dev)))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+		/* other tests pass as of now */
+		data[1] = data[2] = data[3] = 1;
+		if (netif_running(dev))
+			dev->open(dev);
+	} else {		/* online tests */
+		/* link test */
+		if (!(data[4] = (u64) netxen_nic_get_link(dev)))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* other tests pass by default */
+		data[0] = data[1] = data[2] = data[3] = 1;
+	}
+}
+
+static void
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+	int index;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *netxen_nic_gstrings_test,
+		       NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_STATS:
+		for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+			memcpy(data + index * ETH_GSTRING_LEN,
+			       netxen_nic_gstrings_stats[index].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+}
+
+static int netxen_nic_get_stats_count(struct net_device *dev)
+{
+	return NETXEN_NIC_STATS_LEN;
+}
+
+static void
+netxen_nic_get_ethtool_stats(struct net_device *dev,
+			     struct ethtool_stats *stats, u64 * data)
+{
+	struct netxen_port *port = netdev_priv(dev);
+	int index;
+
+	for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+		char *p =
+		    (char *)port + netxen_nic_gstrings_stats[index].stat_offset;
+		data[index] =
+		    (netxen_nic_gstrings_stats[index].sizeof_stat ==
+		     sizeof(u64)) ? *(u64 *) p : *(u32 *) p;
+	}
+
+}
+
+struct ethtool_ops netxen_nic_ethtool_ops = {
+	.get_settings = netxen_nic_get_settings,
+	.set_settings = netxen_nic_set_settings,
+	.get_drvinfo = netxen_nic_get_drvinfo,
+	.get_regs_len = netxen_nic_get_regs_len,
+	.get_regs = netxen_nic_get_regs,
+	.get_wol = netxen_nic_get_wol,
+	.get_link = netxen_nic_get_link,
+	.get_eeprom_len = netxen_nic_get_eeprom_len,
+	.get_eeprom = netxen_nic_get_eeprom,
+	.get_ringparam = netxen_nic_get_ringparam,
+	.get_pauseparam = netxen_nic_get_pauseparam,
+	.set_pauseparam = netxen_nic_set_pauseparam,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = ethtool_op_set_tso,
+	.self_test_count = netxen_nic_diag_test_count,
+	.self_test = netxen_nic_diag_test,
+	.get_strings = netxen_nic_get_strings,
+	.get_stats_count = netxen_nic_get_stats_count,
+	.get_ethtool_stats = netxen_nic_get_ethtool_stats,
+	.get_perm_addr = ethtool_op_get_perm_addr,
+};
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
new file mode 100644
index 0000000..fe8b675
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -0,0 +1,678 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef __NETXEN_NIC_HDR_H_
+#define __NETXEN_NIC_HDR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <asm/semaphore.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <asm/uaccess.h>
+#include <asm/string.h>		/* for memset */
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+typedef __le32 netxen_crbword_t;	/* single word in CRB space */
+
+enum {
+	NETXEN_HW_H0_CH_HUB_ADR = 0x05,
+	NETXEN_HW_H1_CH_HUB_ADR = 0x0E,
+	NETXEN_HW_H2_CH_HUB_ADR = 0x03,
+	NETXEN_HW_H3_CH_HUB_ADR = 0x01,
+	NETXEN_HW_H4_CH_HUB_ADR = 0x06,
+	NETXEN_HW_H5_CH_HUB_ADR = 0x07,
+	NETXEN_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/*  Hub 0 */
+enum {
+	NETXEN_HW_MN_CRB_AGT_ADR = 0x15,
+	NETXEN_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/*  Hub 1 */
+enum {
+	NETXEN_HW_PS_CRB_AGT_ADR = 0x73,
+	NETXEN_HW_SS_CRB_AGT_ADR = 0x20,
+	NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+	NETXEN_HW_QMS_CRB_AGT_ADR = 0x00,
+	NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01,
+	NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02,
+	NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03,
+	NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04,
+	NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58,
+	NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59,
+	NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a,
+	NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+	NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+	NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+	NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12,
+	NETXEN_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/*  Hub 2 */
+enum {
+	NETXEN_HW_NIU_CRB_AGT_ADR = 0x31,
+	NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19,
+	NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+	NETXEN_HW_SN_CRB_AGT_ADR = 0x10,
+	NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20,
+	NETXEN_HW_LPC_CRB_AGT_ADR = 0x22,
+	NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+	NETXEN_HW_QM_CRB_AGT_ADR = 0x66,
+	NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60,
+	NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61,
+	NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62,
+	NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63,
+	NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09,
+	NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+	NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+	NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/*  Hub 3 */
+enum {
+	NETXEN_HW_PH_CRB_AGT_ADR = 0x1A,
+	NETXEN_HW_SRE_CRB_AGT_ADR = 0x50,
+	NETXEN_HW_EG_CRB_AGT_ADR = 0x51,
+	NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/*  Hub 4 */
+enum {
+	NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40,
+	NETXEN_HW_PEGN1_CRB_AGT_ADR,
+	NETXEN_HW_PEGN2_CRB_AGT_ADR,
+	NETXEN_HW_PEGN3_CRB_AGT_ADR,
+	NETXEN_HW_PEGNI_CRB_AGT_ADR,
+	NETXEN_HW_PEGND_CRB_AGT_ADR,
+	NETXEN_HW_PEGNC_CRB_AGT_ADR,
+	NETXEN_HW_PEGR0_CRB_AGT_ADR,
+	NETXEN_HW_PEGR1_CRB_AGT_ADR,
+	NETXEN_HW_PEGR2_CRB_AGT_ADR,
+	NETXEN_HW_PEGR3_CRB_AGT_ADR
+};
+
+/*  Hub 5 */
+enum {
+	NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40,
+	NETXEN_HW_PEGS1_CRB_AGT_ADR,
+	NETXEN_HW_PEGS2_CRB_AGT_ADR,
+	NETXEN_HW_PEGS3_CRB_AGT_ADR,
+	NETXEN_HW_PEGSI_CRB_AGT_ADR,
+	NETXEN_HW_PEGSD_CRB_AGT_ADR,
+	NETXEN_HW_PEGSC_CRB_AGT_ADR
+};
+
+/*  Hub 6 */
+enum {
+	NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46,
+	NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47,
+	NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48,
+	NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49,
+	NETXEN_HW_NCM_CRB_AGT_ADR = 0x16,
+	NETXEN_HW_TMR_CRB_AGT_ADR = 0x17,
+	NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05,
+	NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06,
+	NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/*  Floaters - non existent modules */
+#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR	0x67
+
+/*  This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+	NETXEN_HW_PX_MAP_CRB_PH = 0,
+	NETXEN_HW_PX_MAP_CRB_PS,
+	NETXEN_HW_PX_MAP_CRB_MN,
+	NETXEN_HW_PX_MAP_CRB_MS,
+	NETXEN_HW_PX_MAP_CRB_PGR1,
+	NETXEN_HW_PX_MAP_CRB_SRE,
+	NETXEN_HW_PX_MAP_CRB_NIU,
+	NETXEN_HW_PX_MAP_CRB_QMN,
+	NETXEN_HW_PX_MAP_CRB_SQN0,
+	NETXEN_HW_PX_MAP_CRB_SQN1,
+	NETXEN_HW_PX_MAP_CRB_SQN2,
+	NETXEN_HW_PX_MAP_CRB_SQN3,
+	NETXEN_HW_PX_MAP_CRB_QMS,
+	NETXEN_HW_PX_MAP_CRB_SQS0,
+	NETXEN_HW_PX_MAP_CRB_SQS1,
+	NETXEN_HW_PX_MAP_CRB_SQS2,
+	NETXEN_HW_PX_MAP_CRB_SQS3,
+	NETXEN_HW_PX_MAP_CRB_PGN0,
+	NETXEN_HW_PX_MAP_CRB_PGN1,
+	NETXEN_HW_PX_MAP_CRB_PGN2,
+	NETXEN_HW_PX_MAP_CRB_PGN3,
+	NETXEN_HW_PX_MAP_CRB_PGND,
+	NETXEN_HW_PX_MAP_CRB_PGNI,
+	NETXEN_HW_PX_MAP_CRB_PGS0,
+	NETXEN_HW_PX_MAP_CRB_PGS1,
+	NETXEN_HW_PX_MAP_CRB_PGS2,
+	NETXEN_HW_PX_MAP_CRB_PGS3,
+	NETXEN_HW_PX_MAP_CRB_PGSD,
+	NETXEN_HW_PX_MAP_CRB_PGSI,
+	NETXEN_HW_PX_MAP_CRB_SN,
+	NETXEN_HW_PX_MAP_CRB_PGR2,
+	NETXEN_HW_PX_MAP_CRB_EG,
+	NETXEN_HW_PX_MAP_CRB_PH2,
+	NETXEN_HW_PX_MAP_CRB_PS2,
+	NETXEN_HW_PX_MAP_CRB_CAM,
+	NETXEN_HW_PX_MAP_CRB_CAS0,
+	NETXEN_HW_PX_MAP_CRB_CAS1,
+	NETXEN_HW_PX_MAP_CRB_CAS2,
+	NETXEN_HW_PX_MAP_CRB_C2C0,
+	NETXEN_HW_PX_MAP_CRB_C2C1,
+	NETXEN_HW_PX_MAP_CRB_TIMR,
+	NETXEN_HW_PX_MAP_CRB_PGR3,
+	NETXEN_HW_PX_MAP_CRB_RPMX1,
+	NETXEN_HW_PX_MAP_CRB_RPMX2,
+	NETXEN_HW_PX_MAP_CRB_RPMX3,
+	NETXEN_HW_PX_MAP_CRB_RPMX4,
+	NETXEN_HW_PX_MAP_CRB_RPMX5,
+	NETXEN_HW_PX_MAP_CRB_RPMX6,
+	NETXEN_HW_PX_MAP_CRB_RPMX7,
+	NETXEN_HW_PX_MAP_CRB_XDMA,
+	NETXEN_HW_PX_MAP_CRB_I2Q,
+	NETXEN_HW_PX_MAP_CRB_ROMUSB,
+	NETXEN_HW_PX_MAP_CRB_CAS3,
+	NETXEN_HW_PX_MAP_CRB_RPMX0,
+	NETXEN_HW_PX_MAP_CRB_RPMX8,
+	NETXEN_HW_PX_MAP_CRB_RPMX9,
+	NETXEN_HW_PX_MAP_CRB_OCM0,
+	NETXEN_HW_PX_MAP_CRB_OCM1,
+	NETXEN_HW_PX_MAP_CRB_SMB,
+	NETXEN_HW_PX_MAP_CRB_I2C0,
+	NETXEN_HW_PX_MAP_CRB_I2C1,
+	NETXEN_HW_PX_MAP_CRB_LPC,
+	NETXEN_HW_PX_MAP_CRB_PGNC,
+	NETXEN_HW_PX_MAP_CRB_PGR0
+};
+
+/*  This field defines CRB adr [31:20] of the agents */
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MN	\
+	((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PH	\
+	((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MS	\
+	((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PS	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SS	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB	\
+	((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU	\
+	((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0	\
+	((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1	\
+	((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_EG	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3	\
+	((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3	\
+	((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI	\
+	((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD	\
+	((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0	\
+	((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1	\
+	((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2	\
+	((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3	\
+	((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC	\
+	((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SN	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC	\
+	((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
+
+/*
+ * MAX_RCV_CTX : The number of receive contexts that are available on
+ * the phantom.
+ */
+#define MAX_RCV_CTX			1
+
+#define NETXEN_SRE_INT_STATUS		(NETXEN_CRB_SRE + 0x00034)
+#define NETXEN_SRE_PBI_ACTIVE_STATUS	(NETXEN_CRB_SRE + 0x01014)
+#define NETXEN_SRE_L1RE_CTL		(NETXEN_CRB_SRE + 0x03000)
+#define NETXEN_SRE_L2RE_CTL		(NETXEN_CRB_SRE + 0x05000)
+#define NETXEN_SRE_BUF_CTL		(NETXEN_CRB_SRE + 0x01000)
+
+#define	NETXEN_DMA_BASE(U)	(NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16))
+#define	NETXEN_DMA_COMMAND(U)	(NETXEN_DMA_BASE(U) + 0x00008)
+
+#define NETXEN_I2Q_CLR_PCI_HI	(NETXEN_CRB_I2Q + 0x00034)
+
+#define PEG_NETWORK_BASE(N)	(NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20))
+#define CRB_REG_EX_PC		0x3c
+
+#define ROMUSB_GLB	(NETXEN_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM	(NETXEN_CRB_ROMUSB + 0x10000)
+
+#define NETXEN_ROMUSB_GLB_STATUS	(ROMUSB_GLB + 0x0004)
+#define NETXEN_ROMUSB_GLB_SW_RESET	(ROMUSB_GLB + 0x0008)
+#define NETXEN_ROMUSB_GLB_PAD_GPIO_I	(ROMUSB_GLB + 0x000c)
+#define NETXEN_ROMUSB_GLB_CAS_RST	(ROMUSB_GLB + 0x0038)
+#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL	(ROMUSB_GLB + 0x0044)
+#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE	(ROMUSB_GLB + 0x005c)
+#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL	(ROMUSB_GLB + 0x00A8)
+
+#define NETXEN_ROMUSB_GPIO(n)		(ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define NETXEN_ROMUSB_ROM_INSTR_OPCODE	(ROMUSB_ROM + 0x0004)
+#define NETXEN_ROMUSB_ROM_ADDRESS	(ROMUSB_ROM + 0x0008)
+#define NETXEN_ROMUSB_ROM_WDATA		(ROMUSB_ROM + 0x000c)
+#define NETXEN_ROMUSB_ROM_ABYTE_CNT	(ROMUSB_ROM + 0x0010)
+#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define NETXEN_ROMUSB_ROM_RDATA		(ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER	0x0d417340
+
+/******************************************************************************
+*
+*    Definitions specific to M25P flash
+*
+*******************************************************************************
+*   Instructions
+*/
+#define M25P_INSTR_WREN		0x06
+#define M25P_INSTR_WRDI		0x04
+#define M25P_INSTR_RDID		0x9f
+#define M25P_INSTR_RDSR		0x05
+#define M25P_INSTR_WRSR		0x01
+#define M25P_INSTR_READ		0x03
+#define M25P_INSTR_FAST_READ	0x0b
+#define M25P_INSTR_PP		0x02
+#define M25P_INSTR_SE		0xd8
+#define M25P_INSTR_BE		0xc7
+#define M25P_INSTR_DP		0xb9
+#define M25P_INSTR_RES		0xab
+
+/* all are 1MB windows */
+
+#define NETXEN_PCI_CRB_WINDOWSIZE	0x00100000
+#define NETXEN_PCI_CRB_WINDOW(A)	\
+	(NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE)
+
+#define NETXEN_CRB_NIU		NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU)
+#define NETXEN_CRB_SRE		NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE)
+#define NETXEN_CRB_ROMUSB	\
+	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
+#define NETXEN_CRB_I2Q		NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_MAX		NETXEN_PCI_CRB_WINDOW(64)
+
+#define NETXEN_CRB_PCIX_HOST	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
+#define NETXEN_CRB_PCIX_HOST2	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2)
+#define NETXEN_CRB_PEG_NET_0	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0)
+#define NETXEN_CRB_PEG_NET_1	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1)
+#define NETXEN_CRB_PEG_NET_2	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2)
+#define NETXEN_CRB_PEG_NET_3	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3)
+#define NETXEN_CRB_PEG_NET_D	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
+#define NETXEN_CRB_PEG_NET_I	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
+#define NETXEN_CRB_DDR_NET	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
+
+#define NETXEN_CRB_PCIX_MD	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
+#define NETXEN_CRB_PCIE		NETXEN_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR		(NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK		(NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW	(NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS	(NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK	(NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK))
+
+#define NETXEN_PCI_MAPSIZE	128
+#define NETXEN_PCI_DDR_NET	(0x00000000UL)
+#define NETXEN_PCI_QDR_NET	(0x04000000UL)
+#define NETXEN_PCI_DIRECT_CRB	(0x04400000UL)
+#define NETXEN_PCI_CAMQM_MAX	(0x04ffffffUL)
+#define NETXEN_PCI_OCM0		(0x05000000UL)
+#define NETXEN_PCI_OCM0_MAX	(0x050fffffUL)
+#define NETXEN_PCI_OCM1		(0x05100000UL)
+#define NETXEN_PCI_OCM1_MAX	(0x051fffffUL)
+#define NETXEN_PCI_CRBSPACE	(0x06000000UL)
+
+#define NETXEN_CRB_CAM	NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
+
+#define NETXEN_ADDR_DDR_NET	(0x0000000000000000ULL)
+#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define NETXEN_ADDR_OCM0	(0x0000000200000000ULL)
+#define NETXEN_ADDR_OCM0_MAX	(0x00000002000fffffULL)
+#define NETXEN_ADDR_OCM1	(0x0000000200400000ULL)
+#define NETXEN_ADDR_OCM1_MAX	(0x00000002004fffffULL)
+#define NETXEN_ADDR_QDR_NET	(0x0000000300000000ULL)
+#define NETXEN_ADDR_QDR_NET_MAX (0x00000003003fffffULL)
+
+	/* 200ms delay in each loop */
+#define	NETXEN_NIU_PHY_WAITLEN		200000
+	/* 10 seconds before we give up */
+#define	NETXEN_NIU_PHY_WAITMAX		50
+#define	NETXEN_NIU_MAX_GBE_PORTS	4
+
+#define	NETXEN_NIU_MODE			(NETXEN_CRB_NIU + 0x00000)
+
+#define	NETXEN_NIU_XG_SINGLE_TERM	(NETXEN_CRB_NIU + 0x00004)
+#define	NETXEN_NIU_XG_DRIVE_HI		(NETXEN_CRB_NIU + 0x00008)
+#define	NETXEN_NIU_XG_DRIVE_LO		(NETXEN_CRB_NIU + 0x0000c)
+#define	NETXEN_NIU_XG_DTX		(NETXEN_CRB_NIU + 0x00010)
+#define	NETXEN_NIU_XG_DEQ		(NETXEN_CRB_NIU + 0x00014)
+#define	NETXEN_NIU_XG_WORD_ALIGN	(NETXEN_CRB_NIU + 0x00018)
+#define	NETXEN_NIU_XG_RESET		(NETXEN_CRB_NIU + 0x0001c)
+#define	NETXEN_NIU_XG_POWER_DOWN	(NETXEN_CRB_NIU + 0x00020)
+#define	NETXEN_NIU_XG_RESET_PLL		(NETXEN_CRB_NIU + 0x00024)
+#define	NETXEN_NIU_XG_SERDES_LOOPBACK	(NETXEN_CRB_NIU + 0x00028)
+#define	NETXEN_NIU_XG_DO_BYTE_ALIGN	(NETXEN_CRB_NIU + 0x0002c)
+#define	NETXEN_NIU_XG_TX_ENABLE		(NETXEN_CRB_NIU + 0x00030)
+#define	NETXEN_NIU_XG_RX_ENABLE		(NETXEN_CRB_NIU + 0x00034)
+#define	NETXEN_NIU_XG_STATUS		(NETXEN_CRB_NIU + 0x00038)
+#define	NETXEN_NIU_XG_PAUSE_THRESHOLD	(NETXEN_CRB_NIU + 0x0003c)
+#define	NETXEN_NIU_INT_MASK		(NETXEN_CRB_NIU + 0x00040)
+#define	NETXEN_NIU_ACTIVE_INT		(NETXEN_CRB_NIU + 0x00044)
+#define	NETXEN_NIU_MASKABLE_INT		(NETXEN_CRB_NIU + 0x00048)
+
+#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER	(NETXEN_CRB_NIU + 0x0004c)
+
+#define	NETXEN_NIU_GB_SERDES_RESET	(NETXEN_CRB_NIU + 0x00050)
+#define	NETXEN_NIU_GB0_GMII_MODE	(NETXEN_CRB_NIU + 0x00054)
+#define	NETXEN_NIU_GB0_MII_MODE		(NETXEN_CRB_NIU + 0x00058)
+#define	NETXEN_NIU_GB1_GMII_MODE	(NETXEN_CRB_NIU + 0x0005c)
+#define	NETXEN_NIU_GB1_MII_MODE		(NETXEN_CRB_NIU + 0x00060)
+#define	NETXEN_NIU_GB2_GMII_MODE	(NETXEN_CRB_NIU + 0x00064)
+#define	NETXEN_NIU_GB2_MII_MODE		(NETXEN_CRB_NIU + 0x00068)
+#define	NETXEN_NIU_GB3_GMII_MODE	(NETXEN_CRB_NIU + 0x0006c)
+#define	NETXEN_NIU_GB3_MII_MODE		(NETXEN_CRB_NIU + 0x00070)
+#define	NETXEN_NIU_REMOTE_LOOPBACK	(NETXEN_CRB_NIU + 0x00074)
+#define	NETXEN_NIU_GB0_HALF_DUPLEX	(NETXEN_CRB_NIU + 0x00078)
+#define	NETXEN_NIU_GB1_HALF_DUPLEX	(NETXEN_CRB_NIU + 0x0007c)
+#define	NETXEN_NIU_RESET_SYS_FIFOS	(NETXEN_CRB_NIU + 0x00088)
+#define	NETXEN_NIU_GB_CRC_DROP		(NETXEN_CRB_NIU + 0x0008c)
+#define	NETXEN_NIU_GB_DROP_WRONGADDR	(NETXEN_CRB_NIU + 0x00090)
+#define	NETXEN_NIU_TEST_MUX_CTL		(NETXEN_CRB_NIU + 0x00094)
+#define	NETXEN_NIU_XG_PAUSE_CTL		(NETXEN_CRB_NIU + 0x00098)
+#define	NETXEN_NIU_XG_PAUSE_LEVEL	(NETXEN_CRB_NIU + 0x000dc)
+#define	NETXEN_NIU_XG_SEL		(NETXEN_CRB_NIU + 0x00128)
+
+#define NETXEN_NIU_FULL_LEVEL_XG	(NETXEN_CRB_NIU + 0x00450)
+
+#define NETXEN_NIU_XG1_RESET	    	(NETXEN_CRB_NIU + 0x0011c)
+#define NETXEN_NIU_XG1_POWER_DOWN	(NETXEN_CRB_NIU + 0x00120)
+#define NETXEN_NIU_XG1_RESET_PLL	(NETXEN_CRB_NIU + 0x00124)
+
+#define NETXEN_MAC_ADDR_CNTL_REG	(NETXEN_CRB_NIU + 0x1000)
+
+#define	NETXEN_MULTICAST_ADDR_HI_0	(NETXEN_CRB_NIU + 0x1010)
+#define NETXEN_MULTICAST_ADDR_HI_1	(NETXEN_CRB_NIU + 0x1014)
+#define NETXEN_MULTICAST_ADDR_HI_2	(NETXEN_CRB_NIU + 0x1018)
+#define NETXEN_MULTICAST_ADDR_HI_3	(NETXEN_CRB_NIU + 0x101c)
+
+#define	NETXEN_NIU_GB_MAC_CONFIG_0(I)		\
+	(NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
+#define	NETXEN_NIU_GB_MAC_CONFIG_1(I)		\
+	(NETXEN_CRB_NIU + 0x30004 + (I)*0x10000)
+#define	NETXEN_NIU_GB_MAC_IPG_IFG(I)		\
+	(NETXEN_CRB_NIU + 0x30008 + (I)*0x10000)
+#define	NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I)	\
+	(NETXEN_CRB_NIU + 0x3000c + (I)*0x10000)
+#define	NETXEN_NIU_GB_MAX_FRAME_SIZE(I)		\
+	(NETXEN_CRB_NIU + 0x30010 + (I)*0x10000)
+#define	NETXEN_NIU_GB_TEST_REG(I)		\
+	(NETXEN_CRB_NIU + 0x3001c + (I)*0x10000)
+#define	NETXEN_NIU_GB_MII_MGMT_CONFIG(I)	\
+	(NETXEN_CRB_NIU + 0x30020 + (I)*0x10000)
+#define	NETXEN_NIU_GB_MII_MGMT_COMMAND(I)	\
+	(NETXEN_CRB_NIU + 0x30024 + (I)*0x10000)
+#define	NETXEN_NIU_GB_MII_MGMT_ADDR(I)		\
+	(NETXEN_CRB_NIU + 0x30028 + (I)*0x10000)
+#define	NETXEN_NIU_GB_MII_MGMT_CTRL(I)		\
+	(NETXEN_CRB_NIU + 0x3002c + (I)*0x10000)
+#define	NETXEN_NIU_GB_MII_MGMT_STATUS(I)	\
+	(NETXEN_CRB_NIU + 0x30030 + (I)*0x10000)
+#define	NETXEN_NIU_GB_MII_MGMT_INDICATE(I)	\
+	(NETXEN_CRB_NIU + 0x30034 + (I)*0x10000)
+#define	NETXEN_NIU_GB_INTERFACE_CTRL(I)		\
+	(NETXEN_CRB_NIU + 0x30038 + (I)*0x10000)
+#define	NETXEN_NIU_GB_INTERFACE_STATUS(I)	\
+	(NETXEN_CRB_NIU + 0x3003c + (I)*0x10000)
+#define	NETXEN_NIU_GB_STATION_ADDR_0(I)		\
+	(NETXEN_CRB_NIU + 0x30040 + (I)*0x10000)
+#define	NETXEN_NIU_GB_STATION_ADDR_1(I)		\
+	(NETXEN_CRB_NIU + 0x30044 + (I)*0x10000)
+
+#define	NETXEN_NIU_XGE_CONFIG_0			(NETXEN_CRB_NIU + 0x70000)
+#define	NETXEN_NIU_XGE_CONFIG_1			(NETXEN_CRB_NIU + 0x70004)
+#define	NETXEN_NIU_XGE_IPG			(NETXEN_CRB_NIU + 0x70008)
+#define	NETXEN_NIU_XGE_STATION_ADDR_0_HI	(NETXEN_CRB_NIU + 0x7000c)
+#define	NETXEN_NIU_XGE_STATION_ADDR_0_1		(NETXEN_CRB_NIU + 0x70010)
+#define	NETXEN_NIU_XGE_STATION_ADDR_1_LO	(NETXEN_CRB_NIU + 0x70014)
+#define	NETXEN_NIU_XGE_STATUS			(NETXEN_CRB_NIU + 0x70018)
+#define	NETXEN_NIU_XGE_MAX_FRAME_SIZE		(NETXEN_CRB_NIU + 0x7001c)
+#define	NETXEN_NIU_XGE_PAUSE_FRAME_VALUE	(NETXEN_CRB_NIU + 0x70020)
+#define	NETXEN_NIU_XGE_TX_BYTE_CNT		(NETXEN_CRB_NIU + 0x70024)
+#define	NETXEN_NIU_XGE_TX_FRAME_CNT		(NETXEN_CRB_NIU + 0x70028)
+#define	NETXEN_NIU_XGE_RX_BYTE_CNT		(NETXEN_CRB_NIU + 0x7002c)
+#define	NETXEN_NIU_XGE_RX_FRAME_CNT		(NETXEN_CRB_NIU + 0x70030)
+#define	NETXEN_NIU_XGE_AGGR_ERROR_CNT		(NETXEN_CRB_NIU + 0x70034)
+#define	NETXEN_NIU_XGE_MULTICAST_FRAME_CNT 	(NETXEN_CRB_NIU + 0x70038)
+#define	NETXEN_NIU_XGE_UNICAST_FRAME_CNT	(NETXEN_CRB_NIU + 0x7003c)
+#define	NETXEN_NIU_XGE_CRC_ERROR_CNT		(NETXEN_CRB_NIU + 0x70040)
+#define	NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR	(NETXEN_CRB_NIU + 0x70044)
+#define	NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR	(NETXEN_CRB_NIU + 0x70048)
+#define	NETXEN_NIU_XGE_LOCAL_ERROR_CNT		(NETXEN_CRB_NIU + 0x7004c)
+#define	NETXEN_NIU_XGE_REMOTE_ERROR_CNT		(NETXEN_CRB_NIU + 0x70050)
+#define	NETXEN_NIU_XGE_CONTROL_CHAR_CNT		(NETXEN_CRB_NIU + 0x70054)
+#define	NETXEN_NIU_XGE_PAUSE_FRAME_CNT		(NETXEN_CRB_NIU + 0x70058)
+#define NETXEN_NIU_XG1_CONFIG_0			(NETXEN_CRB_NIU + 0x80000)
+#define NETXEN_NIU_XG1_CONFIG_1			(NETXEN_CRB_NIU + 0x80004)
+#define NETXEN_NIU_XG1_IPG			(NETXEN_CRB_NIU + 0x80008)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_HI	(NETXEN_CRB_NIU + 0x8000c)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_1		(NETXEN_CRB_NIU + 0x80010)
+#define NETXEN_NIU_XG1_STATION_ADDR_1_LO	(NETXEN_CRB_NIU + 0x80014)
+#define NETXEN_NIU_XG1_STATUS		    	(NETXEN_CRB_NIU + 0x80018)
+#define NETXEN_NIU_XG1_MAX_FRAME_SIZE	   	(NETXEN_CRB_NIU + 0x8001c)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE	(NETXEN_CRB_NIU + 0x80020)
+#define NETXEN_NIU_XG1_TX_BYTE_CNT		(NETXEN_CRB_NIU + 0x80024)
+#define NETXEN_NIU_XG1_TX_FRAME_CNT	 	(NETXEN_CRB_NIU + 0x80028)
+#define NETXEN_NIU_XG1_RX_BYTE_CNT	  	(NETXEN_CRB_NIU + 0x8002c)
+#define NETXEN_NIU_XG1_RX_FRAME_CNT	 	(NETXEN_CRB_NIU + 0x80030)
+#define NETXEN_NIU_XG1_AGGR_ERROR_CNT	   	(NETXEN_CRB_NIU + 0x80034)
+#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT	(NETXEN_CRB_NIU + 0x80038)
+#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT	(NETXEN_CRB_NIU + 0x8003c)
+#define NETXEN_NIU_XG1_CRC_ERROR_CNT		(NETXEN_CRB_NIU + 0x80040)
+#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR	(NETXEN_CRB_NIU + 0x80044)
+#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR	(NETXEN_CRB_NIU + 0x80048)
+#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT		(NETXEN_CRB_NIU + 0x8004c)
+#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT		(NETXEN_CRB_NIU + 0x80050)
+#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT		(NETXEN_CRB_NIU + 0x80054)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT		(NETXEN_CRB_NIU + 0x80058)
+
+/* XG Link status */
+#define XG_LINK_UP	0x10
+#define XG_LINK_DOWN	0x20
+
+#define NETXEN_CAM_RAM_BASE	(NETXEN_CRB_CAM + 0x02000)
+#define NETXEN_CAM_RAM(reg)	(NETXEN_CAM_RAM_BASE + (reg))
+#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
+#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
+#define NETXEN_FW_VERSION_SUB	(NETXEN_CAM_RAM(0x158))
+#define NETXEN_ROM_LOCK_ID	(NETXEN_CAM_RAM(0x100))
+
+#define NETXEN_PHY_LOCK_ID	(NETXEN_CAM_RAM(0x120))
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER		0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO	(0x10000)
+/*   via CRB  (PS side only)     */
+#define PCIX_PS_OP_ADDR_HI	(0x10004)
+
+#define PCIX_INT_VECTOR		(0x10100)
+#define PCIX_INT_MASK		(0x10104)
+
+#define PCIX_MN_WINDOW		(0x10200)
+#define PCIX_MS_WINDOW		(0x10204)
+#define PCIX_SN_WINDOW		(0x10208)
+#define PCIX_CRB_WINDOW		(0x10210)
+
+#define PCIX_TARGET_STATUS	(0x10118)
+#define PCIX_TARGET_MASK	(0x10128)
+
+#define PCIX_MSI_F0		(0x13000)
+
+#define PCIX_PS_MEM_SPACE	(0x90000)
+
+#define NETXEN_PCIX_PH_REG(reg)	(NETXEN_CRB_PCIE + (reg))
+#define NETXEN_PCIX_PS_REG(reg)	(NETXEN_CRB_PCIX_MD + (reg))
+
+#define NETXEN_PCIE_REG(reg)	(NETXEN_CRB_PCIE + (reg))
+
+#define PCIE_MAX_DMA_XFER_SIZE	(0x1404c)
+
+#define PCIE_DCR		0x00d8
+
+#define PCIE_SEM2_LOCK		(0x1c010)	/* Flash lock   */
+#define PCIE_SEM2_UNLOCK	(0x1c014)	/* Flash unlock */
+#define PCIE_SEM3_LOCK	  	(0x1c018)	/* Phy lock     */
+#define PCIE_SEM3_UNLOCK	(0x1c01c)	/* Phy unlock   */
+
+#define PCIE_TGT_SPLIT_CHICKEN	(0x12080)
+
+#define PCIE_MAX_MASTER_SPLIT	(0x14048)
+
+#endif				/* __NETXEN_NIC_HDR_H_ */
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
new file mode 100644
index 0000000..9147b60
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -0,0 +1,1293 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Source file for NIC routines to access the Phantom hardware
+ *
+ */
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+#include "netxen_nic_phan_reg.h"
+
+/*  PCI Windowing for DDR regions.  */
+
+#define ADDR_IN_RANGE(addr, low, high)	\
+	(((addr) <= (high)) && ((addr) >= (low)))
+
+#define NETXEN_FLASH_BASE	(BOOTLD_START)
+#define NETXEN_PHANTOM_MEM_BASE	(NETXEN_FLASH_BASE)
+#define NETXEN_MAX_MTU		8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
+#define NETXEN_MIN_MTU		64
+#define NETXEN_ETH_FCS_SIZE     4
+#define NETXEN_ENET_HEADER_SIZE 14
+#define NETXEN_WINDOW_ONE 	0x2000000	/*CRB Window: bit 25 of CRB address */
+#define NETXEN_FIRMWARE_LEN 	((16 * 1024) / 4)
+#define NETXEN_NIU_HDRSIZE	(0x1 << 6)
+#define NETXEN_NIU_TLRSIZE	(0x1 << 5)
+
+#define lower32(x)		((u32)((x) & 0xffffffff))
+#define upper32(x)			\
+	((u32)(((unsigned long long)(x) >> 32) & 0xffffffff))
+
+#define NETXEN_NIC_ZERO_PAUSE_ADDR     0ULL
+#define NETXEN_NIC_UNIT_PAUSE_ADDR     0x200ULL
+#define NETXEN_NIC_EPG_PAUSE_ADDR1     0x2200010000c28001ULL
+#define NETXEN_NIC_EPG_PAUSE_ADDR2     0x0100088866554433ULL
+
+#define NETXEN_NIC_WINDOW_MARGIN 0x100000
+
+unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter,
+					unsigned long long addr);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+
+int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	struct sockaddr *addr = p;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	DPRINTK(INFO, "valid ether addr\n");
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+	if (adapter->macaddr_set)
+		adapter->macaddr_set(port, addr->sa_data);
+
+	return 0;
+}
+
+/*
+ * netxen_nic_set_multi - Multicast
+ */
+void netxen_nic_set_multi(struct net_device *netdev)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	struct dev_mc_list *mc_ptr;
+	__le32 netxen_mac_addr_cntl_data = 0;
+
+	mc_ptr = netdev->mc_list;
+	if (netdev->flags & IFF_PROMISC) {
+		if (adapter->set_promisc)
+			adapter->set_promisc(adapter,
+					     port->portnum,
+					     NETXEN_NIU_PROMISC_MODE);
+	} else {
+		if (adapter->unset_promisc &&
+		    adapter->ahw.boardcfg.board_type
+		    != NETXEN_BRDTYPE_P2_SB31_10G_IMEZ)
+			adapter->unset_promisc(adapter,
+					       port->portnum,
+					       NETXEN_NIU_NON_PROMISC_MODE);
+	}
+	if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+		netxen_nic_mcr_set_mode_select(netxen_mac_addr_cntl_data, 0x03);
+		netxen_nic_mcr_set_id_pool0(netxen_mac_addr_cntl_data, 0x00);
+		netxen_nic_mcr_set_id_pool1(netxen_mac_addr_cntl_data, 0x00);
+		netxen_nic_mcr_set_id_pool2(netxen_mac_addr_cntl_data, 0x00);
+		netxen_nic_mcr_set_id_pool3(netxen_mac_addr_cntl_data, 0x00);
+		netxen_nic_mcr_set_enable_xtnd0(netxen_mac_addr_cntl_data);
+		netxen_nic_mcr_set_enable_xtnd1(netxen_mac_addr_cntl_data);
+		netxen_nic_mcr_set_enable_xtnd2(netxen_mac_addr_cntl_data);
+		netxen_nic_mcr_set_enable_xtnd3(netxen_mac_addr_cntl_data);
+	} else {
+		netxen_nic_mcr_set_mode_select(netxen_mac_addr_cntl_data, 0x00);
+		netxen_nic_mcr_set_id_pool0(netxen_mac_addr_cntl_data, 0x00);
+		netxen_nic_mcr_set_id_pool1(netxen_mac_addr_cntl_data, 0x01);
+		netxen_nic_mcr_set_id_pool2(netxen_mac_addr_cntl_data, 0x02);
+		netxen_nic_mcr_set_id_pool3(netxen_mac_addr_cntl_data, 0x03);
+	}
+	writel(netxen_mac_addr_cntl_data,
+	       NETXEN_CRB_NORMALIZE(adapter, NETXEN_MAC_ADDR_CNTL_REG));
+	if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+		writel(netxen_mac_addr_cntl_data,
+		       NETXEN_CRB_NORMALIZE(adapter,
+					    NETXEN_MULTICAST_ADDR_HI_0));
+	} else {
+		writel(netxen_mac_addr_cntl_data,
+		       NETXEN_CRB_NORMALIZE(adapter,
+					    NETXEN_MULTICAST_ADDR_HI_1));
+	}
+	netxen_mac_addr_cntl_data = 0;
+	writel(netxen_mac_addr_cntl_data,
+	       NETXEN_CRB_NORMALIZE(adapter, NETXEN_NIU_GB_DROP_WRONGADDR));
+}
+
+/*
+ * netxen_nic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	int eff_mtu = mtu + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE;
+
+	if ((eff_mtu > NETXEN_MAX_MTU) || (eff_mtu < NETXEN_MIN_MTU)) {
+		printk(KERN_ERR "%s: %s %d is not supported.\n",
+		       netxen_nic_driver_name, netdev->name, mtu);
+		return -EINVAL;
+	}
+
+	if (adapter->set_mtu)
+		adapter->set_mtu(port, mtu);
+	netdev->mtu = mtu;
+
+	return 0;
+}
+
+/*
+ * check if the firmware has been downloaded and ready to run  and
+ * setup the address for the descriptors in the adapter
+ */
+int netxen_nic_hw_resources(struct netxen_adapter *adapter)
+{
+	struct netxen_hardware_context *hw = &adapter->ahw;
+	u32 state = 0;
+	void *addr;
+	int loops = 0, err = 0;
+	int ctx, ring;
+	u32 card_cmdring = 0;
+	struct netxen_recv_context *recv_ctx;
+	struct netxen_rcv_desc_ctx *rcv_desc;
+
+	DPRINTK(INFO, "crb_base: %lx %x", NETXEN_PCI_CRBSPACE,
+		PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCI_CRBSPACE));
+	DPRINTK(INFO, "cam base: %lx %x", NETXEN_CRB_CAM,
+		pci_base_offset(adapter, NETXEN_CRB_CAM));
+	DPRINTK(INFO, "cam RAM: %lx %x", NETXEN_CAM_RAM_BASE,
+		pci_base_offset(adapter, NETXEN_CAM_RAM_BASE));
+
+	/* Window 1 call */
+	card_cmdring = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_CMDRING));
+
+	DPRINTK(INFO, "Command Peg sends 0x%x for cmdring base\n",
+		card_cmdring);
+
+	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+		DPRINTK(INFO, "Command Peg ready..waiting for rcv peg\n");
+		loops = 0;
+		state = 0;
+		/* Window 1 call */
+		state = readl(NETXEN_CRB_NORMALIZE(adapter,
+						   recv_crb_registers[ctx].
+						   crb_rcvpeg_state));
+		while (state != PHAN_PEG_RCV_INITIALIZED && loops < 20) {
+			udelay(100);
+			/* Window 1 call */
+			state = readl(NETXEN_CRB_NORMALIZE(adapter,
+							   recv_crb_registers
+							   [ctx].
+							   crb_rcvpeg_state));
+			loops++;
+		}
+		if (loops >= 20) {
+			printk(KERN_ERR "Rcv Peg initialization not complete:"
+			       "%x.\n", state);
+			err = -EIO;
+			return err;
+		}
+	}
+	DPRINTK(INFO, "Recieve Peg ready too. starting stuff\n");
+
+	addr = netxen_alloc(adapter->ahw.pdev,
+			    sizeof(struct netxen_ring_ctx) +
+			    sizeof(uint32_t),
+			    (dma_addr_t *) & adapter->ctx_desc_phys_addr,
+			    &adapter->ctx_desc_pdev);
+
+	printk("ctx_desc_phys_addr: 0x%llx\n",
+	       (u64) adapter->ctx_desc_phys_addr);
+	if (addr == NULL) {
+		DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
+		err = -ENOMEM;
+		return err;
+	}
+	memset(addr, 0, sizeof(struct netxen_ring_ctx));
+	adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
+	adapter->ctx_desc->cmd_consumer_offset = adapter->ctx_desc_phys_addr
+	    + sizeof(struct netxen_ring_ctx);
+	adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
+					      sizeof(struct netxen_ring_ctx));
+
+	addr = pci_alloc_consistent(adapter->ahw.pdev,
+				    sizeof(struct cmd_desc_type0) *
+				    adapter->max_tx_desc_count,
+				    (dma_addr_t *) & hw->cmd_desc_phys_addr);
+	printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr);
+
+	if (addr == NULL) {
+		DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
+		netxen_free_hw_resources(adapter);
+		return -ENOMEM;
+	}
+
+	adapter->ctx_desc->cmd_ring_addr_lo =
+	    hw->cmd_desc_phys_addr & 0xffffffffUL;
+	adapter->ctx_desc->cmd_ring_addr_hi =
+	    ((u64) hw->cmd_desc_phys_addr >> 32);
+	adapter->ctx_desc->cmd_ring_size = adapter->max_tx_desc_count;
+
+	hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
+
+	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+		recv_ctx = &adapter->recv_ctx[ctx];
+
+		for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+			rcv_desc = &recv_ctx->rcv_desc[ring];
+			addr = netxen_alloc(adapter->ahw.pdev,
+					    RCV_DESC_RINGSIZE,
+					    &rcv_desc->phys_addr,
+					    &rcv_desc->phys_pdev);
+			if (addr == NULL) {
+				DPRINTK(ERR, "bad return from "
+					"pci_alloc_consistent\n");
+				netxen_free_hw_resources(adapter);
+				err = -ENOMEM;
+				return err;
+			}
+			rcv_desc->desc_head = (struct rcv_desc *)addr;
+			adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_lo =
+			    rcv_desc->phys_addr & 0xffffffffUL;
+			adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_hi =
+			    ((u64) rcv_desc->phys_addr >> 32);
+			adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
+			    rcv_desc->max_rx_desc_count;
+		}
+
+		addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE,
+				    &recv_ctx->rcv_status_desc_phys_addr,
+				    &recv_ctx->rcv_status_desc_pdev);
+		if (addr == NULL) {
+			DPRINTK(ERR, "bad return from"
+				" pci_alloc_consistent\n");
+			netxen_free_hw_resources(adapter);
+			err = -ENOMEM;
+			return err;
+		}
+		recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
+		adapter->ctx_desc->sts_ring_addr_lo =
+		    recv_ctx->rcv_status_desc_phys_addr & 0xffffffffUL;
+		adapter->ctx_desc->sts_ring_addr_hi =
+		    ((u64) recv_ctx->rcv_status_desc_phys_addr >> 32);
+		adapter->ctx_desc->sts_ring_size = adapter->max_rx_desc_count;
+
+	}
+	/* Window = 1 */
+
+	writel(lower32(adapter->ctx_desc_phys_addr),
+	       NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_LO));
+	writel(upper32(adapter->ctx_desc_phys_addr),
+	       NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_HI));
+	writel(NETXEN_CTX_SIGNATURE,
+	       NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_SIGNATURE_REG));
+	return err;
+}
+
+void netxen_free_hw_resources(struct netxen_adapter *adapter)
+{
+	struct netxen_recv_context *recv_ctx;
+	struct netxen_rcv_desc_ctx *rcv_desc;
+	int ctx, ring;
+
+	if (adapter->ctx_desc != NULL) {
+		pci_free_consistent(adapter->ctx_desc_pdev,
+				    sizeof(struct netxen_ring_ctx) +
+				    sizeof(uint32_t),
+				    adapter->ctx_desc,
+				    adapter->ctx_desc_phys_addr);
+		adapter->ctx_desc = NULL;
+	}
+
+	if (adapter->ahw.cmd_desc_head != NULL) {
+		pci_free_consistent(adapter->ahw.cmd_desc_pdev,
+				    sizeof(struct cmd_desc_type0) *
+				    adapter->max_tx_desc_count,
+				    adapter->ahw.cmd_desc_head,
+				    adapter->ahw.cmd_desc_phys_addr);
+		adapter->ahw.cmd_desc_head = NULL;
+	}
+	/* Special handling: there are 2 ports on this board */
+	if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) {
+		adapter->ahw.max_ports = 2;
+	}
+
+	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+		recv_ctx = &adapter->recv_ctx[ctx];
+		for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+			rcv_desc = &recv_ctx->rcv_desc[ring];
+
+			if (rcv_desc->desc_head != NULL) {
+				pci_free_consistent(rcv_desc->phys_pdev,
+						    RCV_DESC_RINGSIZE,
+						    rcv_desc->desc_head,
+						    rcv_desc->phys_addr);
+				rcv_desc->desc_head = NULL;
+			}
+		}
+
+		if (recv_ctx->rcv_status_desc_head != NULL) {
+			pci_free_consistent(recv_ctx->rcv_status_desc_pdev,
+					    STATUS_DESC_RINGSIZE,
+					    recv_ctx->rcv_status_desc_head,
+					    recv_ctx->
+					    rcv_status_desc_phys_addr);
+			recv_ctx->rcv_status_desc_head = NULL;
+		}
+	}
+}
+
+void netxen_tso_check(struct netxen_adapter *adapter,
+		      struct cmd_desc_type0 *desc, struct sk_buff *skb)
+{
+	if (desc->mss) {
+		desc->total_hdr_length = sizeof(struct ethhdr) +
+		    ((skb->nh.iph)->ihl * sizeof(u32)) +
+		    ((skb->h.th)->doff * sizeof(u32));
+		netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
+	} else if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		if (skb->nh.iph->protocol == IPPROTO_TCP) {
+			netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
+		} else if (skb->nh.iph->protocol == IPPROTO_UDP) {
+			netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
+		} else {
+			return;
+		}
+	}
+	adapter->stats.xmitcsummed++;
+	desc->tcp_hdr_offset = skb->h.raw - skb->data;
+	netxen_set_cmd_desc_totallength(desc,
+					cpu_to_le32
+					(netxen_get_cmd_desc_totallength
+					 (desc)));
+	desc->ip_hdr_offset = skb->nh.raw - skb->data;
+}
+
+int netxen_is_flash_supported(struct netxen_adapter *adapter)
+{
+	const int locs[] = { 0, 0x4, 0x100, 0x4000, 0x4128 };
+	int addr, val01, val02, i, j;
+
+	/* if the flash size less than 4Mb, make huge war cry and die */
+	for (j = 1; j < 4; j++) {
+		addr = j * NETXEN_NIC_WINDOW_MARGIN;
+		for (i = 0; i < (sizeof(locs) / sizeof(locs[0])); i++) {
+			if (netxen_rom_fast_read(adapter, locs[i], &val01) == 0
+			    && netxen_rom_fast_read(adapter, (addr + locs[i]),
+						    &val02) == 0) {
+				if (val01 == val02)
+					return -1;
+			} else
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
+				  int size, u32 * buf)
+{
+	int i, addr;
+	u32 *ptr32;
+
+	addr = base;
+	ptr32 = buf;
+	for (i = 0; i < size / sizeof(u32); i++) {
+		if (netxen_rom_fast_read(adapter, addr, ptr32) == -1)
+			return -1;
+		ptr32++;
+		addr += sizeof(u32);
+	}
+	if ((char *)buf + size > (char *)ptr32) {
+		u32 local;
+
+		if (netxen_rom_fast_read(adapter, addr, &local) == -1)
+			return -1;
+		memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
+	}
+
+	return 0;
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[])
+{
+	u32 *pmac = (u32 *) & mac[0];
+
+	if (netxen_get_flash_block(adapter,
+				   USER_START +
+				   offsetof(struct netxen_new_user_info,
+					    mac_addr),
+				   FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) {
+		return -1;
+	}
+	if (*mac == ~0ULL) {
+		if (netxen_get_flash_block(adapter,
+					   USER_START_OLD +
+					   offsetof(struct netxen_user_old_info,
+						    mac_addr),
+					   FLASH_NUM_PORTS * sizeof(u64),
+					   pmac) == -1)
+			return -1;
+		if (*mac == ~0ULL)
+			return -1;
+	}
+	return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw)
+{
+	void __iomem *offset;
+	u32 tmp;
+	int count = 0;
+
+	if (adapter->curr_window == wndw)
+		return;
+
+	/*
+	 * Move the CRB window.
+	 * We need to write to the "direct access" region of PCI
+	 * to avoid a race condition where the window register has
+	 * not been successfully written across CRB before the target
+	 * register address is received by PCI. The direct region bypasses
+	 * the CRB bus.
+	 */
+	offset =
+	    PCI_OFFSET_SECOND_RANGE(adapter,
+				    NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW));
+
+	if (wndw & 0x1)
+		wndw = NETXEN_WINDOW_ONE;
+
+	writel(wndw, offset);
+
+	/* MUST make sure window is set before we forge on... */
+	while ((tmp = readl(offset)) != wndw) {
+		printk(KERN_WARNING "%s: %s WARNING: CRB window value not "
+		       "registered properly: 0x%08x.\n",
+		       netxen_nic_driver_name, __FUNCTION__, tmp);
+		mdelay(1);
+		if (count >= 10)
+			break;
+		count++;
+	}
+
+	adapter->curr_window = wndw;
+}
+
+void netxen_load_firmware(struct netxen_adapter *adapter)
+{
+	int i;
+	long data, size = 0;
+	long flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE;
+	u64 off;
+	void __iomem *addr;
+
+	size = NETXEN_FIRMWARE_LEN;
+	writel(1, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST));
+
+	for (i = 0; i < size; i++) {
+		if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0) {
+			DPRINTK(ERR,
+				"Error in netxen_rom_fast_read(). Will skip"
+				"loading flash image\n");
+			return;
+		}
+		off = netxen_nic_pci_set_window(adapter, memaddr);
+		addr = pci_base_offset(adapter, off);
+		writel(data, addr);
+		flashaddr += 4;
+		memaddr += 4;
+	}
+	udelay(100);
+	/* make sure Casper is powered on */
+	writel(0x3fff,
+	       NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL));
+	writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST));
+
+	udelay(100);
+}
+
+int
+netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
+		       int len)
+{
+	void __iomem *addr;
+
+	if (ADDR_IN_WINDOW1(off)) {
+		addr = NETXEN_CRB_NORMALIZE(adapter, off);
+	} else {		/* Window 0 */
+		addr = pci_base_offset(adapter, off);
+		netxen_nic_pci_change_crbwindow(adapter, 0);
+	}
+
+	DPRINTK(INFO, "writing to base %lx offset %llx addr %p"
+		" data %llx len %d\n",
+		pci_base(adapter, off), off, addr,
+		*(unsigned long long *)data, len);
+	if (!addr) {
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+		return 1;
+	}
+
+	switch (len) {
+	case 1:
+		writeb(*(u8 *) data, addr);
+		break;
+	case 2:
+		writew(*(u16 *) data, addr);
+		break;
+	case 4:
+		writel(*(u32 *) data, addr);
+		break;
+	case 8:
+		writeq(*(u64 *) data, addr);
+		break;
+	default:
+		DPRINTK(INFO,
+			"writing data %lx to offset %llx, num words=%d\n",
+			*(unsigned long *)data, off, (len >> 3));
+
+		netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
+					    (len >> 3));
+		break;
+	}
+	if (!ADDR_IN_WINDOW1(off))
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+
+	return 0;
+}
+
+int
+netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
+		      int len)
+{
+	void __iomem *addr;
+
+	if (ADDR_IN_WINDOW1(off)) {	/* Window 1 */
+		addr = NETXEN_CRB_NORMALIZE(adapter, off);
+	} else {		/* Window 0 */
+		addr = pci_base_offset(adapter, off);
+		netxen_nic_pci_change_crbwindow(adapter, 0);
+	}
+
+	DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n",
+		pci_base(adapter, off), off, addr);
+	if (!addr) {
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+		return 1;
+	}
+	switch (len) {
+	case 1:
+		*(u8 *) data = readb(addr);
+		break;
+	case 2:
+		*(u16 *) data = readw(addr);
+		break;
+	case 4:
+		*(u32 *) data = readl(addr);
+		break;
+	case 8:
+		*(u64 *) data = readq(addr);
+		break;
+	default:
+		netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
+					   (len >> 3));
+		break;
+	}
+	DPRINTK(INFO, "read %lx\n", *(unsigned long *)data);
+
+	if (!ADDR_IN_WINDOW1(off))
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+
+	return 0;
+}
+
+void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val)
+{				/* Only for window 1 */
+	void __iomem *addr;
+
+	addr = NETXEN_CRB_NORMALIZE(adapter, off);
+	DPRINTK(INFO, "writing to base %lx offset %llx addr %p data %x\n",
+		pci_base(adapter, off), off, addr, val);
+	writel(val, addr);
+
+}
+
+int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off)
+{				/* Only for window 1 */
+	void __iomem *addr;
+	int val;
+
+	addr = NETXEN_CRB_NORMALIZE(adapter, off);
+	DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n",
+		pci_base(adapter, off), off, addr);
+	val = readl(addr);
+	writel(val, addr);
+
+	return val;
+}
+
+/* Change the window to 0, write and change back to window 1. */
+void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value)
+{
+	void __iomem *addr;
+
+	netxen_nic_pci_change_crbwindow(adapter, 0);
+	addr = pci_base_offset(adapter, index);
+	writel(value, addr);
+	netxen_nic_pci_change_crbwindow(adapter, 1);
+}
+
+/* Change the window to 0, read and change back to window 1. */
+void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value)
+{
+	void __iomem *addr;
+
+	addr = pci_base_offset(adapter, index);
+
+	netxen_nic_pci_change_crbwindow(adapter, 0);
+	*value = readl(addr);
+	netxen_nic_pci_change_crbwindow(adapter, 1);
+}
+
+int netxen_pci_set_window_warning_count = 0;
+
+unsigned long
+netxen_nic_pci_set_window(struct netxen_adapter *adapter,
+			  unsigned long long addr)
+{
+	static int ddr_mn_window = -1;
+	static int qdr_sn_window = -1;
+	int window;
+
+	if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+		/* DDR network side */
+		addr -= NETXEN_ADDR_DDR_NET;
+		window = (addr >> 25) & 0x3ff;
+		if (ddr_mn_window != window) {
+			ddr_mn_window = window;
+			writel(window, PCI_OFFSET_SECOND_RANGE(adapter,
+							       NETXEN_PCIX_PH_REG
+							       (PCIX_MN_WINDOW)));
+			/* MUST make sure window is set before we forge on... */
+			readl(PCI_OFFSET_SECOND_RANGE(adapter,
+						      NETXEN_PCIX_PH_REG
+						      (PCIX_MN_WINDOW)));
+		}
+		addr -= (window * NETXEN_WINDOW_ONE);
+		addr += NETXEN_PCI_DDR_NET;
+	} else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+		addr -= NETXEN_ADDR_OCM0;
+		addr += NETXEN_PCI_OCM0;
+	} else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+		addr -= NETXEN_ADDR_OCM1;
+		addr += NETXEN_PCI_OCM1;
+	} else
+	    if (ADDR_IN_RANGE
+		(addr, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX)) {
+		/* QDR network side */
+		addr -= NETXEN_ADDR_QDR_NET;
+		window = (addr >> 22) & 0x3f;
+		if (qdr_sn_window != window) {
+			qdr_sn_window = window;
+			writel((window << 22),
+			       PCI_OFFSET_SECOND_RANGE(adapter,
+						       NETXEN_PCIX_PH_REG
+						       (PCIX_SN_WINDOW)));
+			/* MUST make sure window is set before we forge on... */
+			readl(PCI_OFFSET_SECOND_RANGE(adapter,
+						      NETXEN_PCIX_PH_REG
+						      (PCIX_SN_WINDOW)));
+		}
+		addr -= (window * 0x400000);
+		addr += NETXEN_PCI_QDR_NET;
+	} else {
+		/*
+		 * peg gdb frequently accesses memory that doesn't exist,
+		 * this limits the chit chat so debugging isn't slowed down.
+		 */
+		if ((netxen_pci_set_window_warning_count++ < 8)
+		    || (netxen_pci_set_window_warning_count % 64 == 0))
+			printk("%s: Warning:netxen_nic_pci_set_window()"
+			       " Unknown address range!\n",
+			       netxen_nic_driver_name);
+
+	}
+	return addr;
+}
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter)
+{
+	int rv = 0;
+	int addr = BRDCFG_START;
+	struct netxen_board_info *boardinfo;
+	int index;
+	u32 *ptr32;
+
+	boardinfo = &adapter->ahw.boardcfg;
+	ptr32 = (u32 *) boardinfo;
+
+	for (index = 0; index < sizeof(struct netxen_board_info) / sizeof(u32);
+	     index++) {
+		if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) {
+			return -EIO;
+		}
+		ptr32++;
+		addr += sizeof(u32);
+	}
+	if (boardinfo->magic != NETXEN_BDINFO_MAGIC) {
+		printk("%s: ERROR reading %s board config."
+		       " Read %x, expected %x\n", netxen_nic_driver_name,
+		       netxen_nic_driver_name,
+		       boardinfo->magic, NETXEN_BDINFO_MAGIC);
+		rv = -1;
+	}
+	if (boardinfo->header_version != NETXEN_BDINFO_VERSION) {
+		printk("%s: Unknown board config version."
+		       " Read %x, expected %x\n", netxen_nic_driver_name,
+		       boardinfo->header_version, NETXEN_BDINFO_VERSION);
+		rv = -1;
+	}
+
+	DPRINTK(INFO, "Discovered board type:0x%x  ", boardinfo->board_type);
+	switch ((netxen_brdtype_t) boardinfo->board_type) {
+	case NETXEN_BRDTYPE_P2_SB35_4G:
+		adapter->ahw.board_type = NETXEN_NIC_GBE;
+		break;
+	case NETXEN_BRDTYPE_P2_SB31_10G:
+	case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+	case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+	case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+		adapter->ahw.board_type = NETXEN_NIC_XGBE;
+		break;
+	case NETXEN_BRDTYPE_P1_BD:
+	case NETXEN_BRDTYPE_P1_SB:
+	case NETXEN_BRDTYPE_P1_SMAX:
+	case NETXEN_BRDTYPE_P1_SOCK:
+		adapter->ahw.board_type = NETXEN_NIC_GBE;
+		break;
+	default:
+		printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
+		       boardinfo->board_type);
+		break;
+	}
+
+	return rv;
+}
+
+/* NIU access sections */
+
+int netxen_nic_set_mtu_gb(struct netxen_port *port, int new_mtu)
+{
+	struct netxen_adapter *adapter = port->adapter;
+	netxen_nic_write_w0(adapter,
+			    NETXEN_NIU_GB_MAX_FRAME_SIZE(port->portnum),
+			    new_mtu);
+	return 0;
+}
+
+int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu)
+{
+	struct netxen_adapter *adapter = port->adapter;
+	new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE;
+	netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
+	return 0;
+}
+
+void netxen_nic_init_niu_gb(struct netxen_adapter *adapter)
+{
+	int portno;
+	for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++)
+		netxen_niu_gbe_init_port(adapter, portno);
+}
+
+void netxen_nic_stop_all_ports(struct netxen_adapter *adapter)
+{
+	int port_nr;
+	struct netxen_port *port;
+
+	for (port_nr = 0; port_nr < adapter->ahw.max_ports; port_nr++) {
+		port = adapter->port[port_nr];
+		if (adapter->stop_port)
+			adapter->stop_port(adapter, port->portnum);
+	}
+}
+
+void
+netxen_crb_writelit_adapter(struct netxen_adapter *adapter, unsigned long off,
+			    int data)
+{
+	void __iomem *addr;
+
+	if (ADDR_IN_WINDOW1(off)) {
+		writel(data, NETXEN_CRB_NORMALIZE(adapter, off));
+	} else {
+		netxen_nic_pci_change_crbwindow(adapter, 0);
+		addr = pci_base_offset(adapter, off);
+		writel(data, addr);
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+	}
+}
+
+void netxen_nic_set_link_parameters(struct netxen_port *port)
+{
+	struct netxen_adapter *adapter = port->adapter;
+	__le32 status;
+	__le32 autoneg;
+	__le32 mode;
+
+	netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
+	if (netxen_get_niu_enable_ge(mode)) {	/* Gb 10/100/1000 Mbps mode */
+		if (adapter->phy_read
+		    && adapter->
+		    phy_read(adapter, port->portnum,
+			     NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+			     &status) == 0) {
+			if (netxen_get_phy_link(status)) {
+				switch (netxen_get_phy_speed(status)) {
+				case 0:
+					port->link_speed = SPEED_10;
+					break;
+				case 1:
+					port->link_speed = SPEED_100;
+					break;
+				case 2:
+					port->link_speed = SPEED_1000;
+					break;
+				default:
+					port->link_speed = -1;
+					break;
+				}
+				switch (netxen_get_phy_duplex(status)) {
+				case 0:
+					port->link_duplex = DUPLEX_HALF;
+					break;
+				case 1:
+					port->link_duplex = DUPLEX_FULL;
+					break;
+				default:
+					port->link_duplex = -1;
+					break;
+				}
+				if (adapter->phy_read
+				    && adapter->
+				    phy_read(adapter, port->portnum,
+					     NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+					     &autoneg) != 0)
+					port->link_autoneg = autoneg;
+			} else
+				goto link_down;
+		} else {
+		      link_down:
+			port->link_speed = -1;
+			port->link_duplex = -1;
+		}
+	}
+}
+
+void netxen_nic_flash_print(struct netxen_adapter *adapter)
+{
+	int valid = 1;
+	u32 fw_major = 0;
+	u32 fw_minor = 0;
+	u32 fw_build = 0;
+	char brd_name[NETXEN_MAX_SHORT_NAME];
+	struct netxen_new_user_info user_info;
+	int i, addr = USER_START;
+	u32 *ptr32;
+
+	struct netxen_board_info *board_info = &(adapter->ahw.boardcfg);
+	if (board_info->magic != NETXEN_BDINFO_MAGIC) {
+		printk
+		    ("NetXen Unknown board config, Read 0x%x expected as 0x%x\n",
+		     board_info->magic, NETXEN_BDINFO_MAGIC);
+		valid = 0;
+	}
+	if (board_info->header_version != NETXEN_BDINFO_VERSION) {
+		printk("NetXen Unknown board config version."
+		       " Read %x, expected %x\n",
+		       board_info->header_version, NETXEN_BDINFO_VERSION);
+		valid = 0;
+	}
+	if (valid) {
+		ptr32 = (u32 *) & user_info;
+		for (i = 0;
+		     i < sizeof(struct netxen_new_user_info) / sizeof(u32);
+		     i++) {
+			if (netxen_rom_fast_read(adapter, addr, ptr32) == -1) {
+				printk("%s: ERROR reading %s board userarea.\n",
+				       netxen_nic_driver_name,
+				       netxen_nic_driver_name);
+				return;
+			}
+			ptr32++;
+			addr += sizeof(u32);
+		}
+		get_brd_name_by_type(board_info->board_type, brd_name);
+
+		printk("NetXen %s Board S/N %s  Chip id 0x%x\n",
+		       brd_name, user_info.serial_num, board_info->chip_id);
+
+		printk("NetXen %s Board #%d, Chip id 0x%x\n",
+		       board_info->board_type == 0x0b ? "XGB" : "GBE",
+		       board_info->board_num, board_info->chip_id);
+		fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
+						      NETXEN_FW_VERSION_MAJOR));
+		fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter,
+						      NETXEN_FW_VERSION_MINOR));
+		fw_build =
+		    readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB));
+
+		printk("NetXen Firmware version %d.%d.%d\n", fw_major, fw_minor,
+		       fw_build);
+	}
+	if (fw_major != _NETXEN_NIC_LINUX_MAJOR) {
+		printk(KERN_ERR "The mismatch in driver version and firmware "
+		       "version major number\n"
+		       "Driver version major number = %d \t"
+		       "Firmware version major number = %d \n",
+		       _NETXEN_NIC_LINUX_MAJOR, fw_major);
+		adapter->driver_mismatch = 1;
+	}
+	if (fw_minor != _NETXEN_NIC_LINUX_MINOR) {
+		printk(KERN_ERR "The mismatch in driver version and firmware "
+		       "version minor number\n"
+		       "Driver version minor number = %d \t"
+		       "Firmware version minor number = %d \n",
+		       _NETXEN_NIC_LINUX_MINOR, fw_minor);
+		adapter->driver_mismatch = 1;
+	}
+	if (adapter->driver_mismatch)
+		printk(KERN_INFO "Use the driver with version no %d.%d.xxx\n",
+		       fw_major, fw_minor);
+}
+
+int netxen_crb_read_val(struct netxen_adapter *adapter, unsigned long off)
+{
+	int data;
+	netxen_nic_hw_read_wx(adapter, off, &data, 4);
+	return data;
+}
+
+int netxen_nic_hw_write_ioctl(struct netxen_adapter *adapter, u64 off,
+			      void *data, int len)
+{
+	void *addr;
+	u64 offset = off;
+	u8 *mem_ptr = NULL;
+	unsigned long mem_base;
+	unsigned long mem_page;
+
+	if (ADDR_IN_WINDOW1(off)) {
+		addr = NETXEN_CRB_NORMALIZE(adapter, off);
+		if (!addr) {
+			mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+			offset = NETXEN_CRB_NORMAL(off);
+			mem_page = offset & PAGE_MASK;
+			if (mem_page != ((offset + len - 1) & PAGE_MASK))
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+			else
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE);
+			if (mem_ptr == 0UL) {
+				return 1;
+			}
+			addr = mem_ptr;
+			addr += offset & (PAGE_SIZE - 1);
+		}
+	} else {
+		addr = pci_base_offset(adapter, off);
+		if (!addr) {
+			mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+			mem_page = off & PAGE_MASK;
+			if (mem_page != ((off + len - 1) & PAGE_MASK))
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+			else
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE);
+			if (mem_ptr == 0UL) {
+				return 1;
+			}
+			addr = mem_ptr;
+			addr += off & (PAGE_SIZE - 1);
+		}
+		netxen_nic_pci_change_crbwindow(adapter, 0);
+	}
+	switch (len) {
+	case 1:
+		writeb(*(u8 *) data, addr);
+		break;
+	case 2:
+		writew(*(u16 *) data, addr);
+		break;
+	case 4:
+		writel(*(u32 *) data, addr);
+		break;
+	case 8:
+		writeq(*(u64 *) data, addr);
+		break;
+	default:
+		DPRINTK(INFO,
+			"writing data %lx to offset %llx, num words=%d\n",
+			*(unsigned long *)data, off, (len >> 3));
+
+		netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
+					    (len >> 3));
+		break;
+	}
+
+	if (!ADDR_IN_WINDOW1(off))
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return 0;
+}
+
+int netxen_nic_hw_read_ioctl(struct netxen_adapter *adapter, u64 off,
+			     void *data, int len)
+{
+	void *addr;
+	u64 offset;
+	u8 *mem_ptr = NULL;
+	unsigned long mem_base;
+	unsigned long mem_page;
+
+	if (ADDR_IN_WINDOW1(off)) {
+		addr = NETXEN_CRB_NORMALIZE(adapter, off);
+		if (!addr) {
+			mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+			offset = NETXEN_CRB_NORMAL(off);
+			mem_page = offset & PAGE_MASK;
+			if (mem_page != ((offset + len - 1) & PAGE_MASK))
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+			else
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE);
+			if (mem_ptr == 0UL) {
+				*(u8 *) data = 0;
+				return 1;
+			}
+			addr = mem_ptr;
+			addr += offset & (PAGE_SIZE - 1);
+		}
+	} else {
+		addr = pci_base_offset(adapter, off);
+		if (!addr) {
+			mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+			mem_page = off & PAGE_MASK;
+			if (mem_page != ((off + len - 1) & PAGE_MASK))
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+			else
+				mem_ptr =
+				    ioremap(mem_base + mem_page, PAGE_SIZE);
+			if (mem_ptr == 0UL)
+				return 1;
+			addr = mem_ptr;
+			addr += off & (PAGE_SIZE - 1);
+		}
+		netxen_nic_pci_change_crbwindow(adapter, 0);
+	}
+	switch (len) {
+	case 1:
+		*(u8 *) data = readb(addr);
+		break;
+	case 2:
+		*(u16 *) data = readw(addr);
+		break;
+	case 4:
+		*(u32 *) data = readl(addr);
+		break;
+	case 8:
+		*(u64 *) data = readq(addr);
+		break;
+	default:
+		netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
+					   (len >> 3));
+		break;
+	}
+	if (!ADDR_IN_WINDOW1(off))
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return 0;
+}
+
+int netxen_nic_pci_mem_write_ioctl(struct netxen_adapter *adapter, u64 off,
+				   void *data, int size)
+{
+	void *addr;
+	int ret = 0;
+	u8 *mem_ptr = NULL;
+	unsigned long mem_base;
+	unsigned long mem_page;
+
+	if (data == NULL || off > (128 * 1024 * 1024)) {
+		printk(KERN_ERR "%s: data: %p off:%llx\n",
+		       netxen_nic_driver_name, data, off);
+		return 1;
+	}
+	off = netxen_nic_pci_set_window(adapter, off);
+	/* Corner case : Malicious user tried to break the driver by reading
+	   last few bytes in ranges and tries to read further addresses.
+	 */
+	if (!pci_base(adapter, off + size - 1) && pci_base(adapter, off)) {
+		printk(KERN_ERR "%s: Invalid access to memory address range"
+		       " 0x%llx - 0x%llx\n", netxen_nic_driver_name, off,
+		       off + size);
+		return 1;
+	}
+	addr = pci_base_offset(adapter, off);
+	DPRINTK(INFO, "writing data %llx to offset %llx\n",
+		*(unsigned long long *)data, off);
+	if (!addr) {
+		mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+		mem_page = off & PAGE_MASK;
+		/* Map two pages whenever user tries to access addresses in two
+		   consecutive pages.
+		 */
+		if (mem_page != ((off + size - 1) & PAGE_MASK))
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+		else
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+		if (mem_ptr == 0UL) {
+			return 1;
+		}
+		addr = mem_ptr;
+		addr += off & (PAGE_SIZE - 1);
+	}
+	switch (size) {
+	case 1:
+		writeb(*(u8 *) data, addr);
+		break;
+	case 2:
+		writew(*(u16 *) data, addr);
+		break;
+	case 4:
+		writel(*(u32 *) data, addr);
+		break;
+	case 8:
+		writeq(*(u64 *) data, addr);
+		break;
+	default:
+		DPRINTK(INFO,
+			"writing data %lx to offset %llx, num words=%d\n",
+			*(unsigned long *)data, off, (size >> 3));
+
+		netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
+					    (size >> 3));
+		break;
+	}
+
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	DPRINTK(INFO, "wrote %llx\n", *(unsigned long long *)data);
+
+	return ret;
+}
+
+int netxen_nic_pci_mem_read_ioctl(struct netxen_adapter *adapter,
+				  u64 off, void *data, int size)
+{
+	void *addr;
+	int ret = 0;
+	u8 *mem_ptr = NULL;
+	unsigned long mem_base;
+	unsigned long mem_page;
+
+	if (data == NULL || off > (128 * 1024 * 1024)) {
+		printk(KERN_ERR "%s: data: %p off:%llx\n",
+		       netxen_nic_driver_name, data, off);
+		return 1;
+	}
+	off = netxen_nic_pci_set_window(adapter, off);
+	/* Corner case : Malicious user tried to break the driver by reading
+	   last few bytes in ranges and tries to read further addresses.
+	 */
+	if (!pci_base(adapter, off + size - 1) && pci_base(adapter, off)) {
+		printk(KERN_ERR "%s: Invalid access to memory address range"
+		       " 0x%llx - 0x%llx\n", netxen_nic_driver_name, off,
+		       off + size);
+		return 1;
+	}
+	addr = pci_base_offset(adapter, off);
+	if (!addr) {
+		mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+		mem_page = off & PAGE_MASK;
+		/* Map two pages whenever user tries to access addresses in two
+		   consecutive pages.
+		 */
+		if (mem_page != ((off + size - 1) & PAGE_MASK))
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+		else
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+		if (mem_ptr == 0UL) {
+			*(u8 *) data = 0;
+			return 1;
+		}
+		addr = mem_ptr;
+		addr += off & (PAGE_SIZE - 1);
+	}
+	switch (size) {
+	case 1:
+		*(u8 *) data = readb(addr);
+		break;
+	case 2:
+		*(u16 *) data = readw(addr);
+		break;
+	case 4:
+		*(u32 *) data = readl(addr);
+		break;
+	case 8:
+		*(u64 *) data = readq(addr);
+		break;
+	default:
+		netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
+					   (size >> 3));
+		break;
+	}
+
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	DPRINTK(INFO, "read %llx\n", *(unsigned long long *)data);
+
+	return ret;
+}
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
new file mode 100644
index 0000000..0685633
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *                            
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *                                   
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Structures, enums, and macros for the MAC
+ *
+ */
+
+#ifndef __NETXEN_NIC_HW_H_
+#define __NETXEN_NIC_HW_H_
+
+#include "netxen_nic_hdr.h"
+
+/* Hardware memory size of 128 meg */
+#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
+
+#ifndef readq
+static inline u64 readq(void __iomem * addr)
+{
+	return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem * addr)
+{
+	writel(((u32) (val)), (addr));
+	writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+static inline void netxen_nic_hw_block_write64(u64 __iomem * data_ptr,
+					       u64 __iomem * addr,
+					       int num_words)
+{
+	int num;
+	for (num = 0; num < num_words; num++) {
+		writeq(readq((void __iomem *)data_ptr), addr);
+		addr++;
+		data_ptr++;
+	}
+}
+
+static inline void netxen_nic_hw_block_read64(u64 __iomem * data_ptr,
+					      u64 __iomem * addr, int num_words)
+{
+	int num;
+	for (num = 0; num < num_words; num++) {
+		writeq(readq((void __iomem *)addr), data_ptr);
+		addr++;
+		data_ptr++;
+	}
+
+}
+
+struct netxen_adapter;
+
+#define NETXEN_PCI_MAPSIZE_BYTES  (NETXEN_PCI_MAPSIZE << 20)
+
+#define NETXEN_NIC_LOCKED_READ_REG(X, Y)	\
+	addr = pci_base_offset(adapter, X);	\
+	*(u32 *)Y = readl((void __iomem*) addr);
+
+struct netxen_port;
+void netxen_nic_set_link_parameters(struct netxen_port *port);
+void netxen_nic_flash_print(struct netxen_adapter *adapter);
+int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off,
+			   void *data, int len);
+void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
+				 unsigned long off, int data);
+int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off,
+			  void *data, int len);
+
+typedef u8 netxen_ethernet_macaddr_t[6];
+
+/* Nibble or Byte mode for phy interface (GbE mode only) */
+typedef enum {
+	NETXEN_NIU_10_100_MB = 0,
+	NETXEN_NIU_1000_MB
+} netxen_niu_gbe_ifmode_t;
+
+#define _netxen_crb_get_bit(var, bit)  ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ *	Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ *	Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ *	Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ *	Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ *	Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ *	Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ *	Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ *	Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ *	Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ *	Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ *	Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ *	Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+
+#define netxen_gb_enable_tx(config_word)	\
+	set_bit(0, (unsigned long*)(&config_word))
+#define netxen_gb_enable_rx(config_word)	\
+	set_bit(2, (unsigned long*)(&config_word))
+#define netxen_gb_tx_flowctl(config_word)	\
+	set_bit(4, (unsigned long*)(&config_word))
+#define netxen_gb_rx_flowctl(config_word)	\
+	set_bit(5, (unsigned long*)(&config_word))
+#define netxen_gb_tx_reset_pb(config_word)	\
+		set_bit(16, (unsigned long*)(&config_word))
+#define netxen_gb_rx_reset_pb(config_word)	\
+		set_bit(17, (unsigned long*)(&config_word))
+#define netxen_gb_tx_reset_mac(config_word)	\
+		set_bit(18, (unsigned long*)(&config_word))
+#define netxen_gb_rx_reset_mac(config_word)	\
+		set_bit(19, (unsigned long*)(&config_word))
+#define netxen_gb_soft_reset(config_word)	\
+		set_bit(31, (unsigned long*)(&config_word))
+
+#define netxen_gb_unset_tx_flowctl(config_word)	\
+		clear_bit(4, (unsigned long *)(&config_word))
+#define netxen_gb_unset_rx_flowctl(config_word)	\
+		clear_bit(5, (unsigned long*)(&config_word))
+
+#define netxen_gb_get_tx_synced(config_word)	\
+		_netxen_crb_get_bit((config_word), 1)
+#define netxen_gb_get_rx_synced(config_word)	\
+		_netxen_crb_get_bit((config_word), 3)
+#define netxen_gb_get_tx_flowctl(config_word)	\
+		_netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_rx_flowctl(config_word)	\
+		_netxen_crb_get_bit((config_word), 5)
+#define netxen_gb_get_soft_reset(config_word)	\
+		_netxen_crb_get_bit((config_word), 31)
+
+/*
+ * NIU GB MAC Config Register 1 (applies to GB0, GB1, GB2, GB3)
+ *
+ *	Bit 0	    : duplex => 1:full duplex mode, 0:half duplex
+ *	Bit 1	    : crc_enable => 1:append CRC to xmit frames, 0:dont append
+ *	Bit 2	    : padshort => 1:pad short frames and add CRC, 0:dont pad
+ *	Bit 4	    : checklength => 1:check framelen with actual,0:dont check
+ *	Bit 5	    : hugeframes => 1:allow oversize xmit frames, 0:dont allow
+ *	Bits 8-9    : intfmode => 01:nibble (10/100), 10:byte (1000)
+ *	Bits 12-15  : preamblelen => preamble field length in bytes, default 7
+ */
+
+#define netxen_gb_set_duplex(config_word)	\
+		set_bit(0, (unsigned long*)&config_word)
+#define netxen_gb_set_crc_enable(config_word)	\
+		set_bit(1, (unsigned long*)&config_word)
+#define netxen_gb_set_padshort(config_word)	\
+		set_bit(2, (unsigned long*)&config_word)
+#define netxen_gb_set_checklength(config_word)	\
+		set_bit(4, (unsigned long*)&config_word)
+#define netxen_gb_set_hugeframes(config_word)	\
+		set_bit(5, (unsigned long*)&config_word)
+#define netxen_gb_set_preamblelen(config_word, val)	\
+		((config_word) |= ((val) << 12) & 0xF000)
+#define netxen_gb_set_intfmode(config_word, val)		\
+		((config_word) |= ((val) << 8) & 0x300)
+
+#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16)
+
+#define netxen_gb_set_mii_mgmt_clockselect(config_word, val)	\
+		((config_word) |= ((val) & 0x07))
+#define netxen_gb_mii_mgmt_reset(config_word)	\
+		set_bit(31, (unsigned long*)&config_word)
+#define netxen_gb_mii_mgmt_unset(config_word)	\
+		clear_bit(31, (unsigned long*)&config_word)
+
+/*
+ * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
+ * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op
+ * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op
+ */
+
+#define netxen_gb_mii_mgmt_set_read_cycle(config_word)	\
+		set_bit(0, (unsigned long*)&config_word)
+#define netxen_gb_mii_mgmt_reg_addr(config_word, val)	\
+		((config_word) |= ((val) & 0x1F))
+#define netxen_gb_mii_mgmt_phy_addr(config_word, val)	\
+		((config_word) |= (((val) & 0x1F) << 8))
+
+/*
+ * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3)
+ * Read-only register.
+ * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle
+ * Bit 1 : scanning => 1:scan operation in progress, 0:idle
+ * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle
+ */
+#define netxen_get_gb_mii_mgmt_busy(config_word)	\
+		_netxen_crb_get_bit(config_word, 0)
+#define netxen_get_gb_mii_mgmt_scanning(config_word)	\
+		_netxen_crb_get_bit(config_word, 1)
+#define netxen_get_gb_mii_mgmt_notvalid(config_word)	\
+		_netxen_crb_get_bit(config_word, 2)
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+typedef enum {
+	NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL = 0,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS = 1,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 = 2,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 = 3,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG = 4,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART = 5,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE = 6,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT = 7,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE = 8,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL = 9,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS = 10,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS = 15,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL = 16,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS = 17,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE = 18,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS = 19,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE = 20,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT = 21,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL = 24,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE = 25,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET = 26,
+	NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE = 27
+} netxen_niu_phy_register_t;
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0      : jabber => 1:jabber detected, 0:not
+ * Bit 1      : polarity => 1:polarity reversed, 0:normal
+ * Bit 2      : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3      : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4      : energydetect => 1:sleep, 0:active
+ * Bit 5      : downshift => 1:downshift, 0:no downshift
+ * Bit 6      : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9   : cablelen => not valid in 10Mb/s mode
+ *			0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10     : link => 1:link up, 0:link down
+ * Bit 11     : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12     : pagercvd => 1:page received, 0:page not received
+ * Bit 13     : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define netxen_get_phy_cablelen(config_word) (((config_word) >> 7) & 0x07)
+#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define netxen_set_phy_speed(config_word, val)	\
+		((config_word) |= ((val & 0x03) << 14))
+#define netxen_set_phy_duplex(config_word)	\
+		set_bit(13, (unsigned long*)&config_word)
+#define netxen_clear_phy_duplex(config_word)	\
+		clear_bit(13, (unsigned long*)&config_word)
+
+#define netxen_get_phy_jabber(config_word)	\
+		_netxen_crb_get_bit(config_word, 0)
+#define netxen_get_phy_polarity(config_word)	\
+		_netxen_crb_get_bit(config_word, 1)
+#define netxen_get_phy_recvpause(config_word)	\
+		_netxen_crb_get_bit(config_word, 2)
+#define netxen_get_phy_xmitpause(config_word)	\
+		_netxen_crb_get_bit(config_word, 3)
+#define netxen_get_phy_energydetect(config_word) \
+		_netxen_crb_get_bit(config_word, 4)
+#define netxen_get_phy_downshift(config_word)	\
+		_netxen_crb_get_bit(config_word, 5)
+#define netxen_get_phy_crossover(config_word)	\
+		_netxen_crb_get_bit(config_word, 6)
+#define netxen_get_phy_link(config_word)	\
+		_netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_resolved(config_word)	\
+		_netxen_crb_get_bit(config_word, 11)
+#define netxen_get_phy_pagercvd(config_word)	\
+		_netxen_crb_get_bit(config_word, 12)
+#define netxen_get_phy_duplex(config_word)	\
+		_netxen_crb_get_bit(config_word, 13)
+
+/*
+ * Interrupt Register definition
+ * This definition applies to registers 18 and 19 (int enable and int status).
+ * Bit 0 : jabber
+ * Bit 1 : polarity_changed
+ * Bit 4 : energy_detect
+ * Bit 5 : downshift
+ * Bit 6 : mdi_xover_changed
+ * Bit 7 : fifo_over_underflow
+ * Bit 8 : false_carrier
+ * Bit 9 : symbol_error
+ * Bit 10: link_status_changed
+ * Bit 11: autoneg_completed
+ * Bit 12: page_received
+ * Bit 13: duplex_changed
+ * Bit 14: speed_changed
+ * Bit 15: autoneg_error
+ */
+
+#define netxen_get_phy_int_jabber(config_word)	\
+		_netxen_crb_get_bit(config_word, 0)
+#define netxen_get_phy_int_polarity_changed(config_word)	\
+		_netxen_crb_get_bit(config_word, 1)
+#define netxen_get_phy_int_energy_detect(config_word)	\
+		_netxen_crb_get_bit(config_word, 4)
+#define netxen_get_phy_int_downshift(config_word)	\
+		_netxen_crb_get_bit(config_word, 5)
+#define netxen_get_phy_int_mdi_xover_changed(config_word)	\
+		_netxen_crb_get_bit(config_word, 6)
+#define netxen_get_phy_int_fifo_over_underflow(config_word)	\
+		_netxen_crb_get_bit(config_word, 7)
+#define netxen_get_phy_int_false_carrier(config_word)	\
+		_netxen_crb_get_bit(config_word, 8)
+#define netxen_get_phy_int_symbol_error(config_word)	\
+		_netxen_crb_get_bit(config_word, 9)
+#define netxen_get_phy_int_link_status_changed(config_word)	\
+		_netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_int_autoneg_completed(config_word)	\
+		_netxen_crb_get_bit(config_word, 11)
+#define netxen_get_phy_int_page_received(config_word)	\
+		_netxen_crb_get_bit(config_word, 12)
+#define netxen_get_phy_int_duplex_changed(config_word)	\
+		_netxen_crb_get_bit(config_word, 13)
+#define netxen_get_phy_int_speed_changed(config_word)	\
+		_netxen_crb_get_bit(config_word, 14)
+#define netxen_get_phy_int_autoneg_error(config_word)	\
+		_netxen_crb_get_bit(config_word, 15)
+
+#define netxen_set_phy_int_link_status_changed(config_word)	\
+		set_bit(10, (unsigned long*)&config_word)
+#define netxen_set_phy_int_autoneg_completed(config_word)	\
+		set_bit(11, (unsigned long*)&config_word)
+#define netxen_set_phy_int_speed_changed(config_word)	\
+		set_bit(14, (unsigned long*)&config_word)
+
+/*
+ * NIU Mode Register.
+ * Bit 0 : enable FibreChannel
+ * Bit 1 : enable 10/100/1000 Ethernet
+ * Bit 2 : enable 10Gb Ethernet
+ */
+
+#define netxen_get_niu_enable_ge(config_word)	\
+		_netxen_crb_get_bit(config_word, 1)
+
+/* Promiscous mode options (GbE mode only) */
+typedef enum {
+	NETXEN_NIU_PROMISC_MODE = 0,
+	NETXEN_NIU_NON_PROMISC_MODE
+} netxen_niu_prom_mode_t;
+
+/*
+ * NIU GB Drop CRC Register
+ * 
+ * Bit 0 : drop_gb0 => 1:drop pkts with bad CRCs, 0:pass them on
+ * Bit 1 : drop_gb1 => 1:drop pkts with bad CRCs, 0:pass them on
+ * Bit 2 : drop_gb2 => 1:drop pkts with bad CRCs, 0:pass them on
+ * Bit 3 : drop_gb3 => 1:drop pkts with bad CRCs, 0:pass them on
+ */
+
+#define netxen_set_gb_drop_gb0(config_word)	\
+		set_bit(0, (unsigned long*)&config_word)
+#define netxen_set_gb_drop_gb1(config_word)	\
+		set_bit(1, (unsigned long*)&config_word)
+#define netxen_set_gb_drop_gb2(config_word)	\
+		set_bit(2, (unsigned long*)&config_word)
+#define netxen_set_gb_drop_gb3(config_word)	\
+		set_bit(3, (unsigned long*)&config_word)
+
+#define netxen_clear_gb_drop_gb0(config_word)	\
+		clear_bit(0, (unsigned long*)&config_word)
+#define netxen_clear_gb_drop_gb1(config_word)	\
+		clear_bit(1, (unsigned long*)&config_word)
+#define netxen_clear_gb_drop_gb2(config_word)	\
+		clear_bit(2, (unsigned long*)&config_word)
+#define netxen_clear_gb_drop_gb3(config_word)	\
+		clear_bit(3, (unsigned long*)&config_word)
+
+/*
+ * NIU XG MAC Config Register
+ *
+ * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable
+ * Bit 2 : rx_enable => 1:enable frame recv, 0:disable
+ * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op
+ * Bit 27: xaui_framer_reset
+ * Bit 28: xaui_rx_reset
+ * Bit 29: xaui_tx_reset
+ * Bit 30: xg_ingress_afifo_reset
+ * Bit 31: xg_egress_afifo_reset
+ */
+
+#define netxen_xg_soft_reset(config_word)	\
+		set_bit(4, (unsigned long*)&config_word)
+
+/*
+ * MAC Control Register
+ * 
+ * Bit 0-1   : id_pool0
+ * Bit 2     : enable_xtnd0
+ * Bit 4-5   : id_pool1
+ * Bit 6     : enable_xtnd1
+ * Bit 8-9   : id_pool2
+ * Bit 10    : enable_xtnd2
+ * Bit 12-13 : id_pool3
+ * Bit 14    : enable_xtnd3
+ * Bit 24-25 : mode_select
+ * Bit 28-31 : enable_pool
+ */
+
+#define netxen_nic_mcr_set_id_pool0(config, val)	\
+		((config) |= ((val) &0x03))
+#define netxen_nic_mcr_set_enable_xtnd0(config)	\
+		(set_bit(3, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_id_pool1(config, val)	\
+		((config) |= (((val) & 0x03) << 4))
+#define netxen_nic_mcr_set_enable_xtnd1(config)	\
+		(set_bit(6, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_id_pool2(config, val)	\
+		((config) |= (((val) & 0x03) << 8))
+#define netxen_nic_mcr_set_enable_xtnd2(config)	\
+		(set_bit(10, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_id_pool3(config, val)	\
+		((config) |= (((val) & 0x03) << 12))
+#define netxen_nic_mcr_set_enable_xtnd3(config)	\
+		(set_bit(14, (unsigned long *)&(config)))
+#define netxen_nic_mcr_set_mode_select(config, val)	\
+		((config) |= (((val) & 0x03) << 24))
+#define netxen_nic_mcr_set_enable_pool(config, val)	\
+		((config) |= (((val) & 0x0f) << 28))
+
+/* Set promiscuous mode for a GbE interface */
+int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port,
+				    netxen_niu_prom_mode_t mode);
+int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
+				       int port, netxen_niu_prom_mode_t mode);
+
+/* get/set the MAC address for a given MAC */
+int netxen_niu_macaddr_get(struct netxen_adapter *adapter, int port,
+			   netxen_ethernet_macaddr_t * addr);
+int netxen_niu_macaddr_set(struct netxen_port *port,
+			   netxen_ethernet_macaddr_t addr);
+
+/* XG versons */
+int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int port,
+			      netxen_ethernet_macaddr_t * addr);
+int netxen_niu_xg_macaddr_set(struct netxen_port *port,
+			      netxen_ethernet_macaddr_t addr);
+
+/* Generic enable for GbE ports. Will detect the speed of the link. */
+int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port);
+
+int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port);
+
+/* Disable a GbE interface */
+int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port);
+
+int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port);
+
+#endif				/* __NETXEN_NIC_HW_H_ */
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
new file mode 100644
index 0000000..869725f
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -0,0 +1,1524 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Source file for NIC routines to initialize the Phantom Hardware
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+#include "netxen_nic_ioctl.h"
+#include "netxen_nic_phan_reg.h"
+
+struct crb_addr_pair {
+	long addr;
+	long data;
+};
+
+#define NETXEN_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
+#define NETXEN_ADDR_ERROR ((unsigned long ) 0xffffffff )
+
+#define crb_addr_transform(name) \
+	crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
+	NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
+
+#define NETXEN_NIC_XDMA_RESET 0x8000ff
+
+static inline void
+netxen_nic_locked_write_reg(struct netxen_adapter *adapter,
+			    unsigned long off, int *data)
+{
+	void __iomem *addr = pci_base_offset(adapter, off);
+	writel(*data, addr);
+}
+
+static void crb_addr_transform_setup(void)
+{
+	crb_addr_transform(XDMA);
+	crb_addr_transform(TIMR);
+	crb_addr_transform(SRE);
+	crb_addr_transform(SQN3);
+	crb_addr_transform(SQN2);
+	crb_addr_transform(SQN1);
+	crb_addr_transform(SQN0);
+	crb_addr_transform(SQS3);
+	crb_addr_transform(SQS2);
+	crb_addr_transform(SQS1);
+	crb_addr_transform(SQS0);
+	crb_addr_transform(RPMX7);
+	crb_addr_transform(RPMX6);
+	crb_addr_transform(RPMX5);
+	crb_addr_transform(RPMX4);
+	crb_addr_transform(RPMX3);
+	crb_addr_transform(RPMX2);
+	crb_addr_transform(RPMX1);
+	crb_addr_transform(RPMX0);
+	crb_addr_transform(ROMUSB);
+	crb_addr_transform(SN);
+	crb_addr_transform(QMN);
+	crb_addr_transform(QMS);
+	crb_addr_transform(PGNI);
+	crb_addr_transform(PGND);
+	crb_addr_transform(PGN3);
+	crb_addr_transform(PGN2);
+	crb_addr_transform(PGN1);
+	crb_addr_transform(PGN0);
+	crb_addr_transform(PGSI);
+	crb_addr_transform(PGSD);
+	crb_addr_transform(PGS3);
+	crb_addr_transform(PGS2);
+	crb_addr_transform(PGS1);
+	crb_addr_transform(PGS0);
+	crb_addr_transform(PS);
+	crb_addr_transform(PH);
+	crb_addr_transform(NIU);
+	crb_addr_transform(I2Q);
+	crb_addr_transform(EG);
+	crb_addr_transform(MN);
+	crb_addr_transform(MS);
+	crb_addr_transform(CAS2);
+	crb_addr_transform(CAS1);
+	crb_addr_transform(CAS0);
+	crb_addr_transform(CAM);
+	crb_addr_transform(C2C1);
+	crb_addr_transform(C2C0);
+}
+
+int netxen_init_firmware(struct netxen_adapter *adapter)
+{
+	u32 state = 0, loops = 0, err = 0;
+
+	/* Window 1 call */
+	state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+
+	if (state == PHAN_INITIALIZE_ACK)
+		return 0;
+
+	while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
+		udelay(100);
+		/* Window 1 call */
+		state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+
+		loops++;
+	}
+	if (loops >= 2000) {
+		printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
+		       state);
+		err = -EIO;
+		return err;
+	}
+	/* Window 1 call */
+	writel(MPORT_SINGLE_FUNCTION_MODE,
+	       NETXEN_CRB_NORMALIZE(adapter, CRB_MPORT_MODE));
+	writel(PHAN_INITIALIZE_ACK,
+	       NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+
+	return err;
+}
+
+#define NETXEN_ADDR_LIMIT 0xffffffffULL
+
+void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
+		   struct pci_dev **used_dev)
+{
+	void *addr;
+
+	addr = pci_alloc_consistent(pdev, sz, ptr);
+	if ((unsigned long long)(*ptr) < NETXEN_ADDR_LIMIT) {
+		*used_dev = pdev;
+		return addr;
+	}
+	pci_free_consistent(pdev, sz, addr, *ptr);
+	addr = pci_alloc_consistent(NULL, sz, ptr);
+	*used_dev = NULL;
+	return addr;
+}
+
+void netxen_initialize_adapter_sw(struct netxen_adapter *adapter)
+{
+	int ctxid, ring;
+	u32 i;
+	u32 num_rx_bufs = 0;
+	struct netxen_rcv_desc_ctx *rcv_desc;
+
+	DPRINTK(INFO, "initializing some queues: %p\n", adapter);
+	for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
+		for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+			struct netxen_rx_buffer *rx_buf;
+			rcv_desc = &adapter->recv_ctx[ctxid].rcv_desc[ring];
+			rcv_desc->rcv_free = rcv_desc->max_rx_desc_count;
+			rcv_desc->begin_alloc = 0;
+			rx_buf = rcv_desc->rx_buf_arr;
+			num_rx_bufs = rcv_desc->max_rx_desc_count;
+			/*
+			 * Now go through all of them, set reference handles
+			 * and put them in the queues.
+			 */
+			for (i = 0; i < num_rx_bufs; i++) {
+				rx_buf->ref_handle = i;
+				rx_buf->state = NETXEN_BUFFER_FREE;
+				DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:"
+					"%p\n", ctxid, i, rx_buf);
+				rx_buf++;
+			}
+		}
+	}
+}
+
+void netxen_initialize_adapter_hw(struct netxen_adapter *adapter)
+{
+	int ports = 0;
+	struct netxen_board_info *board_info = &(adapter->ahw.boardcfg);
+
+	if (netxen_nic_get_board_info(adapter) != 0)
+		printk("%s: Error getting board config info.\n",
+		       netxen_nic_driver_name);
+	get_brd_port_by_type(board_info->board_type, &ports);
+	if (ports == 0)
+		printk(KERN_ERR "%s: Unknown board type\n",
+		       netxen_nic_driver_name);
+	adapter->ahw.max_ports = ports;
+}
+
+void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
+{
+	switch (adapter->ahw.board_type) {
+	case NETXEN_NIC_GBE:
+		adapter->enable_phy_interrupts =
+		    netxen_niu_gbe_enable_phy_interrupts;
+		adapter->disable_phy_interrupts =
+		    netxen_niu_gbe_disable_phy_interrupts;
+		adapter->handle_phy_intr = netxen_nic_gbe_handle_phy_intr;
+		adapter->macaddr_set = netxen_niu_macaddr_set;
+		adapter->set_mtu = netxen_nic_set_mtu_gb;
+		adapter->set_promisc = netxen_niu_set_promiscuous_mode;
+		adapter->unset_promisc = netxen_niu_set_promiscuous_mode;
+		adapter->phy_read = netxen_niu_gbe_phy_read;
+		adapter->phy_write = netxen_niu_gbe_phy_write;
+		adapter->init_port = netxen_niu_gbe_init_port;
+		adapter->init_niu = netxen_nic_init_niu_gb;
+		adapter->stop_port = netxen_niu_disable_gbe_port;
+		break;
+
+	case NETXEN_NIC_XGBE:
+		adapter->enable_phy_interrupts =
+		    netxen_niu_xgbe_enable_phy_interrupts;
+		adapter->disable_phy_interrupts =
+		    netxen_niu_xgbe_disable_phy_interrupts;
+		adapter->handle_phy_intr = netxen_nic_xgbe_handle_phy_intr;
+		adapter->macaddr_set = netxen_niu_xg_macaddr_set;
+		adapter->set_mtu = netxen_nic_set_mtu_xgb;
+		adapter->init_port = netxen_niu_xg_init_port;
+		adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
+		adapter->unset_promisc = netxen_niu_xg_set_promiscuous_mode;
+		adapter->stop_port = netxen_niu_disable_xg_port;
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
+ * address to external PCI CRB address.
+ */
+unsigned long netxen_decode_crb_addr(unsigned long addr)
+{
+	int i;
+	unsigned long base_addr, offset, pci_base;
+
+	crb_addr_transform_setup();
+
+	pci_base = NETXEN_ADDR_ERROR;
+	base_addr = addr & 0xfff00000;
+	offset = addr & 0x000fffff;
+
+	for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
+		if (crb_addr_xform[i] == base_addr) {
+			pci_base = i << 20;
+			break;
+		}
+	}
+	if (pci_base == NETXEN_ADDR_ERROR)
+		return pci_base;
+	else
+		return (pci_base + offset);
+}
+
+static long rom_max_timeout = 10000;
+static long rom_lock_timeout = 1000000;
+
+static inline int rom_lock(struct netxen_adapter *adapter)
+{
+	int iter;
+	u32 done = 0;
+	int timeout = 0;
+
+	while (!done) {
+		/* acquire semaphore2 from PCI HW block */
+		netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
+				   &done);
+		if (done == 1)
+			break;
+		if (timeout >= rom_lock_timeout)
+			return -EIO;
+
+		timeout++;
+		/*
+		 * Yield CPU
+		 */
+		if (!in_atomic())
+			schedule();
+		else {
+			for (iter = 0; iter < 20; iter++)
+				cpu_relax();	/*This a nop instr on i386 */
+		}
+	}
+	netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+	return 0;
+}
+
+int netxen_wait_rom_done(struct netxen_adapter *adapter)
+{
+	long timeout = 0;
+	long done = 0;
+
+	while (done == 0) {
+		done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
+		done &= 2;
+		timeout++;
+		if (timeout >= rom_max_timeout) {
+			printk("Timeout reached  waiting for rom done");
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+static inline int netxen_rom_wren(struct netxen_adapter *adapter)
+{
+	/* Set write enable latch in ROM status register */
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+			     M25P_INSTR_WREN);
+	if (netxen_wait_rom_done(adapter)) {
+		return -1;
+	}
+	return 0;
+}
+
+static inline unsigned int netxen_rdcrbreg(struct netxen_adapter *adapter,
+					   unsigned int addr)
+{
+	unsigned int data = 0xdeaddead;
+	data = netxen_nic_reg_read(adapter, addr);
+	return data;
+}
+
+static inline int netxen_do_rom_rdsr(struct netxen_adapter *adapter)
+{
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+			     M25P_INSTR_RDSR);
+	if (netxen_wait_rom_done(adapter)) {
+		return -1;
+	}
+	return netxen_rdcrbreg(adapter, NETXEN_ROMUSB_ROM_RDATA);
+}
+
+static inline void netxen_rom_unlock(struct netxen_adapter *adapter)
+{
+	u32 val;
+
+	/* release semaphore2 */
+	netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
+
+}
+
+int netxen_rom_wip_poll(struct netxen_adapter *adapter)
+{
+	long timeout = 0;
+	long wip = 1;
+	int val;
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+	while (wip != 0) {
+		val = netxen_do_rom_rdsr(adapter);
+		wip = val & 1;
+		timeout++;
+		if (timeout > rom_max_timeout) {
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static inline int do_rom_fast_write(struct netxen_adapter *adapter, int addr,
+				    int data)
+{
+	if (netxen_rom_wren(adapter)) {
+		return -1;
+	}
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+			     M25P_INSTR_PP);
+	if (netxen_wait_rom_done(adapter)) {
+		netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+		return -1;
+	}
+
+	return netxen_rom_wip_poll(adapter);
+}
+
+static inline int
+do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+	udelay(100);		/* prevent bursting on CRB */
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+	if (netxen_wait_rom_done(adapter)) {
+		printk("Error waiting for rom done\n");
+		return -EIO;
+	}
+	/* reset abyte_cnt and dummy_byte_cnt */
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+	udelay(100);		/* prevent bursting on CRB */
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+	*valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
+	return 0;
+}
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+	int ret;
+
+	if (rom_lock(adapter) != 0)
+		return -EIO;
+
+	ret = do_rom_fast_read(adapter, addr, valp);
+	netxen_rom_unlock(adapter);
+	return ret;
+}
+
+int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
+{
+	int ret = 0;
+
+	if (rom_lock(adapter) != 0) {
+		return -1;
+	}
+	ret = do_rom_fast_write(adapter, addr, data);
+	netxen_rom_unlock(adapter);
+	return ret;
+}
+int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
+{
+	netxen_rom_wren(adapter);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE,
+			     M25P_INSTR_SE);
+	if (netxen_wait_rom_done(adapter)) {
+		netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+		return -1;
+	}
+	return netxen_rom_wip_poll(adapter);
+}
+
+int netxen_rom_se(struct netxen_adapter *adapter, int addr)
+{
+	int ret = 0;
+	if (rom_lock(adapter) != 0) {
+		return -1;
+	}
+	ret = netxen_do_rom_se(adapter, addr);
+	netxen_rom_unlock(adapter);
+	return ret;
+}
+
+#define NETXEN_BOARDTYPE		0x4008
+#define NETXEN_BOARDNUM 		0x400c
+#define NETXEN_CHIPNUM			0x4010
+#define NETXEN_ROMBUS_RESET		0xFFFFFFFF
+#define NETXEN_ROM_FIRST_BARRIER	0x800000000ULL
+#define NETXEN_ROM_FOUND_INIT		0x400
+
+int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
+{
+	int addr, val, status;
+	int n, i;
+	int init_delay = 0;
+	struct crb_addr_pair *buf;
+	unsigned long off;
+
+	/* resetall */
+	status = netxen_nic_get_board_info(adapter);
+	if (status)
+		printk("%s: netxen_pinit_from_rom: Error getting board info\n",
+		       netxen_nic_driver_name);
+
+	netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
+				    NETXEN_ROMBUS_RESET);
+
+	if (verbose) {
+		int val;
+		if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
+			printk("P2 ROM board type: 0x%08x\n", val);
+		else
+			printk("Could not read board type\n");
+		if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
+			printk("P2 ROM board  num: 0x%08x\n", val);
+		else
+			printk("Could not read board number\n");
+		if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
+			printk("P2 ROM chip   num: 0x%08x\n", val);
+		else
+			printk("Could not read chip number\n");
+	}
+
+	if (netxen_rom_fast_read(adapter, 0, &n) == 0
+	    && (n & NETXEN_ROM_FIRST_BARRIER)) {
+		n &= ~NETXEN_ROM_ROUNDUP;
+		if (n < NETXEN_ROM_FOUND_INIT) {
+			if (verbose)
+				printk("%s: %d CRB init values found"
+				       " in ROM.\n", netxen_nic_driver_name, n);
+		} else {
+			printk("%s:n=0x%x Error! NetXen card flash not"
+			       " initialized.\n", __FUNCTION__, n);
+			return -EIO;
+		}
+		buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+		if (buf == NULL) {
+			printk("%s: netxen_pinit_from_rom: Unable to calloc "
+			       "memory.\n", netxen_nic_driver_name);
+			return -ENOMEM;
+		}
+		for (i = 0; i < n; i++) {
+			if (netxen_rom_fast_read(adapter, 8 * i + 4, &val) != 0
+			    || netxen_rom_fast_read(adapter, 8 * i + 8,
+						    &addr) != 0)
+				return -EIO;
+
+			buf[i].addr = addr;
+			buf[i].data = val;
+
+			if (verbose)
+				printk("%s: PCI:     0x%08x == 0x%08x\n",
+				       netxen_nic_driver_name, (unsigned int)
+				       netxen_decode_crb_addr((unsigned long)
+							      addr), val);
+		}
+		for (i = 0; i < n; i++) {
+
+			off =
+			    netxen_decode_crb_addr((unsigned long)buf[i].addr) +
+			    NETXEN_PCI_CRBSPACE;
+			/* skipping cold reboot MAGIC */
+			if (off == NETXEN_CAM_RAM(0x1fc))
+				continue;
+
+			/* After writing this register, HW needs time for CRB */
+			/* to quiet down (else crb_window returns 0xffffffff) */
+			if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
+				init_delay = 1;
+				/* hold xdma in reset also */
+				buf[i].data = NETXEN_NIC_XDMA_RESET;
+			}
+
+			if (ADDR_IN_WINDOW1(off)) {
+				writel(buf[i].data,
+				       NETXEN_CRB_NORMALIZE(adapter, off));
+			} else {
+				netxen_nic_pci_change_crbwindow(adapter, 0);
+				writel(buf[i].data,
+				       pci_base_offset(adapter, off));
+
+				netxen_nic_pci_change_crbwindow(adapter, 1);
+			}
+			if (init_delay == 1) {
+				ssleep(1);
+				init_delay = 0;
+			}
+			msleep(1);
+		}
+		kfree(buf);
+
+		/* disable_peg_cache_all */
+
+		/* unreset_net_cache */
+		netxen_nic_hw_read_wx(adapter, NETXEN_ROMUSB_GLB_SW_RESET, &val,
+				      4);
+		netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
+					    (val & 0xffffff0f));
+		/* p2dn replyCount */
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+		/* disable_peg_cache 0 */
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+		/* disable_peg_cache 1 */
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+
+		/* peg_clr_all */
+
+		/* peg_clr 0 */
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8,
+					    0);
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc,
+					    0);
+		/* peg_clr 1 */
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8,
+					    0);
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc,
+					    0);
+		/* peg_clr 2 */
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8,
+					    0);
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc,
+					    0);
+		/* peg_clr 3 */
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8,
+					    0);
+		netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc,
+					    0);
+	}
+	return 0;
+}
+
+int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
+{
+	uint64_t addr;
+	uint32_t hi;
+	uint32_t lo;
+
+	adapter->dummy_dma.addr =
+	    pci_alloc_consistent(adapter->ahw.pdev,
+				 NETXEN_HOST_DUMMY_DMA_SIZE,
+				 &adapter->dummy_dma.phys_addr);
+	if (adapter->dummy_dma.addr == NULL) {
+		printk("%s: ERROR: Could not allocate dummy DMA memory\n",
+		       __FUNCTION__);
+		return -ENOMEM;
+	}
+
+	addr = (uint64_t) adapter->dummy_dma.phys_addr;
+	hi = (addr >> 32) & 0xffffffff;
+	lo = addr & 0xffffffff;
+
+	writel(hi, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI));
+	writel(lo, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO));
+
+	return 0;
+}
+
+void netxen_free_adapter_offload(struct netxen_adapter *adapter)
+{
+	if (adapter->dummy_dma.addr) {
+		pci_free_consistent(adapter->ahw.pdev,
+				    NETXEN_HOST_DUMMY_DMA_SIZE,
+				    adapter->dummy_dma.addr,
+				    adapter->dummy_dma.phys_addr);
+		adapter->dummy_dma.addr = NULL;
+	}
+}
+
+void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
+{
+	u32 val = 0;
+	int loops = 0;
+
+	if (!pegtune_val) {
+		while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
+			udelay(100);
+			schedule();
+			val =
+			    readl(NETXEN_CRB_NORMALIZE
+				  (adapter, CRB_CMDPEG_STATE));
+			loops++;
+		}
+		if (val != PHAN_INITIALIZE_COMPLETE)
+			printk("WARNING: Initial boot wait loop failed...\n");
+	}
+}
+
+int netxen_nic_rx_has_work(struct netxen_adapter *adapter)
+{
+	int ctx;
+
+	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+		struct netxen_recv_context *recv_ctx =
+		    &(adapter->recv_ctx[ctx]);
+		u32 consumer;
+		struct status_desc *desc_head;
+		struct status_desc *desc;
+
+		consumer = recv_ctx->status_rx_consumer;
+		desc_head = recv_ctx->rcv_status_desc_head;
+		desc = &desc_head[consumer];
+
+		if (((le16_to_cpu(netxen_get_sts_owner(desc)))
+		     & STATUS_OWNER_HOST))
+			return 1;
+	}
+
+	return 0;
+}
+
+static inline int netxen_nic_check_temp(struct netxen_adapter *adapter)
+{
+	int port_num;
+	struct netxen_port *port;
+	struct net_device *netdev;
+	uint32_t temp, temp_state, temp_val;
+	int rv = 0;
+
+	temp = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_TEMP_STATE));
+
+	temp_state = nx_get_temp_state(temp);
+	temp_val = nx_get_temp_val(temp);
+
+	if (temp_state == NX_TEMP_PANIC) {
+		printk(KERN_ALERT
+		       "%s: Device temperature %d degrees C exceeds"
+		       " maximum allowed. Hardware has been shut down.\n",
+		       netxen_nic_driver_name, temp_val);
+		for (port_num = 0; port_num < adapter->ahw.max_ports;
+		     port_num++) {
+			port = adapter->port[port_num];
+			netdev = port->netdev;
+
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+		rv = 1;
+	} else if (temp_state == NX_TEMP_WARN) {
+		if (adapter->temp == NX_TEMP_NORMAL) {
+			printk(KERN_ALERT
+			       "%s: Device temperature %d degrees C "
+			       "exceeds operating range."
+			       " Immediate action needed.\n",
+			       netxen_nic_driver_name, temp_val);
+		}
+	} else {
+		if (adapter->temp == NX_TEMP_WARN) {
+			printk(KERN_INFO
+			       "%s: Device temperature is now %d degrees C"
+			       " in normal range.\n", netxen_nic_driver_name,
+			       temp_val);
+		}
+	}
+	adapter->temp = temp_state;
+	return rv;
+}
+
+void netxen_watchdog_task(struct work_struct *work)
+{
+	int port_num;
+	struct netxen_port *port;
+	struct net_device *netdev;
+	struct netxen_adapter *adapter =
+		container_of(work, struct netxen_adapter, watchdog_task);
+
+	if (netxen_nic_check_temp(adapter))
+		return;
+
+	for (port_num = 0; port_num < adapter->ahw.max_ports; port_num++) {
+		port = adapter->port[port_num];
+		netdev = port->netdev;
+
+		if ((netif_running(netdev)) && !netif_carrier_ok(netdev)) {
+			printk(KERN_INFO "%s port %d, %s carrier is now ok\n",
+			       netxen_nic_driver_name, port_num, netdev->name);
+			netif_carrier_on(netdev);
+		}
+
+		if (netif_queue_stopped(netdev))
+			netif_wake_queue(netdev);
+	}
+
+	if (adapter->handle_phy_intr)
+		adapter->handle_phy_intr(adapter);
+	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
+/*
+ * netxen_process_rcv() send the received packet to the protocol stack.
+ * and if the number of receives exceeds RX_BUFFERS_REFILL, then we
+ * invoke the routine to send more rx buffers to the Phantom...
+ */
+void
+netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
+		   struct status_desc *desc)
+{
+	struct netxen_port *port = adapter->port[netxen_get_sts_port(desc)];
+	struct pci_dev *pdev = port->pdev;
+	struct net_device *netdev = port->netdev;
+	int index = le16_to_cpu(netxen_get_sts_refhandle(desc));
+	struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
+	struct netxen_rx_buffer *buffer;
+	struct sk_buff *skb;
+	u32 length = le16_to_cpu(netxen_get_sts_totallength(desc));
+	u32 desc_ctx;
+	struct netxen_rcv_desc_ctx *rcv_desc;
+	int ret;
+
+	desc_ctx = netxen_get_sts_type(desc);
+	if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
+		printk("%s: %s Bad Rcv descriptor ring\n",
+		       netxen_nic_driver_name, netdev->name);
+		return;
+	}
+
+	rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
+	if (unlikely(index > rcv_desc->max_rx_desc_count)) {
+		DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
+			index, rcv_desc->max_rx_desc_count);
+		return;
+	}
+	buffer = &rcv_desc->rx_buf_arr[index];
+	if (desc_ctx == RCV_DESC_LRO_CTXID) {
+		buffer->lro_current_frags++;
+		if (netxen_get_sts_desc_lro_last_frag(desc)) {
+			buffer->lro_expected_frags =
+			    netxen_get_sts_desc_lro_cnt(desc);
+			buffer->lro_length = length;
+		}
+		if (buffer->lro_current_frags != buffer->lro_expected_frags) {
+			if (buffer->lro_expected_frags != 0) {
+				printk("LRO: (refhandle:%x) recv frag."
+				       "wait for last. flags: %x expected:%d"
+				       "have:%d\n", index,
+				       netxen_get_sts_desc_lro_last_frag(desc),
+				       buffer->lro_expected_frags,
+				       buffer->lro_current_frags);
+			}
+			return;
+		}
+	}
+
+	pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size,
+			 PCI_DMA_FROMDEVICE);
+
+	skb = (struct sk_buff *)buffer->skb;
+
+	if (likely(netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) {
+		port->stats.csummed++;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+	skb->dev = netdev;
+	if (desc_ctx == RCV_DESC_LRO_CTXID) {
+		/* True length was only available on the last pkt */
+		skb_put(skb, buffer->lro_length);
+	} else {
+		skb_put(skb, length);
+	}
+
+	skb->protocol = eth_type_trans(skb, netdev);
+
+	ret = netif_receive_skb(skb);
+
+	/*
+	 * RH: Do we need these stats on a regular basis. Can we get it from
+	 * Linux stats.
+	 */
+	switch (ret) {
+	case NET_RX_SUCCESS:
+		port->stats.uphappy++;
+		break;
+
+	case NET_RX_CN_LOW:
+		port->stats.uplcong++;
+		break;
+
+	case NET_RX_CN_MOD:
+		port->stats.upmcong++;
+		break;
+
+	case NET_RX_CN_HIGH:
+		port->stats.uphcong++;
+		break;
+
+	case NET_RX_DROP:
+		port->stats.updropped++;
+		break;
+
+	default:
+		port->stats.updunno++;
+		break;
+	}
+
+	netdev->last_rx = jiffies;
+
+	rcv_desc->rcv_free++;
+	rcv_desc->rcv_pending--;
+
+	/*
+	 * We just consumed one buffer so post a buffer.
+	 */
+	adapter->stats.post_called++;
+	buffer->skb = NULL;
+	buffer->state = NETXEN_BUFFER_FREE;
+	buffer->lro_current_frags = 0;
+	buffer->lro_expected_frags = 0;
+
+	port->stats.no_rcv++;
+	port->stats.rxbytes += length;
+}
+
+/* Process Receive status ring */
+u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
+{
+	struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
+	struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
+	struct status_desc *desc;	/* used to read status desc here */
+	u32 consumer = recv_ctx->status_rx_consumer;
+	u32 producer = 0;
+	int count = 0, ring;
+
+	DPRINTK(INFO, "procesing receive\n");
+	/*
+	 * we assume in this case that there is only one port and that is
+	 * port #1...changes need to be done in firmware to indicate port
+	 * number as part of the descriptor. This way we will be able to get
+	 * the netdev which is associated with that device.
+	 */
+	while (count < max) {
+		desc = &desc_head[consumer];
+		if (!
+		    (le16_to_cpu(netxen_get_sts_owner(desc)) &
+		     STATUS_OWNER_HOST)) {
+			DPRINTK(ERR, "desc %p ownedby %x\n", desc,
+				netxen_get_sts_owner(desc));
+			break;
+		}
+		netxen_process_rcv(adapter, ctxid, desc);
+		netxen_clear_sts_owner(desc);
+		netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
+		consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
+		count++;
+	}
+	if (count) {
+		for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+			netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
+		}
+	}
+
+	/* update the consumer index in phantom */
+	if (count) {
+		adapter->stats.process_rcv++;
+		recv_ctx->status_rx_consumer = consumer;
+		recv_ctx->status_rx_producer = producer;
+
+		/* Window = 1 */
+		writel(consumer,
+		       NETXEN_CRB_NORMALIZE(adapter,
+					    recv_crb_registers[ctxid].
+					    crb_rcv_status_consumer));
+	}
+
+	return count;
+}
+
+/* Process Command status ring */
+int netxen_process_cmd_ring(unsigned long data)
+{
+	u32 last_consumer;
+	u32 consumer;
+	struct netxen_adapter *adapter = (struct netxen_adapter *)data;
+	int count1 = 0;
+	int count2 = 0;
+	struct netxen_cmd_buffer *buffer;
+	struct netxen_port *port;	/* port #1 */
+	struct netxen_port *nport;
+	struct pci_dev *pdev;
+	struct netxen_skb_frag *frag;
+	u32 i;
+	struct sk_buff *skb = NULL;
+	int p;
+	int done;
+
+	spin_lock(&adapter->tx_lock);
+	last_consumer = adapter->last_cmd_consumer;
+	DPRINTK(INFO, "procesing xmit complete\n");
+	/* we assume in this case that there is only one port and that is
+	 * port #1...changes need to be done in firmware to indicate port
+	 * number as part of the descriptor. This way we will be able to get
+	 * the netdev which is associated with that device.
+	 */
+
+	consumer = *(adapter->cmd_consumer);
+	if (last_consumer == consumer) {	/* Ring is empty    */
+		DPRINTK(INFO, "last_consumer %d == consumer %d\n",
+			last_consumer, consumer);
+		spin_unlock(&adapter->tx_lock);
+		return 1;
+	}
+
+	adapter->proc_cmd_buf_counter++;
+	adapter->stats.process_xmit++;
+	/*
+	 * Not needed - does not seem to be used anywhere.
+	 * adapter->cmd_consumer = consumer;
+	 */
+	spin_unlock(&adapter->tx_lock);
+
+	while ((last_consumer != consumer) && (count1 < MAX_STATUS_HANDLE)) {
+		buffer = &adapter->cmd_buf_arr[last_consumer];
+		port = adapter->port[buffer->port];
+		pdev = port->pdev;
+		frag = &buffer->frag_array[0];
+		skb = buffer->skb;
+		if (skb && (cmpxchg(&buffer->skb, skb, 0) == skb)) {
+			pci_unmap_single(pdev, frag->dma, frag->length,
+					 PCI_DMA_TODEVICE);
+			for (i = 1; i < buffer->frag_count; i++) {
+				DPRINTK(INFO, "getting fragment no %d\n", i);
+				frag++;	/* Get the next frag */
+				pci_unmap_page(pdev, frag->dma, frag->length,
+					       PCI_DMA_TODEVICE);
+			}
+
+			port->stats.skbfreed++;
+			dev_kfree_skb_any(skb);
+			skb = NULL;
+		} else if (adapter->proc_cmd_buf_counter == 1) {
+			port->stats.txnullskb++;
+		}
+		if (unlikely(netif_queue_stopped(port->netdev)
+			     && netif_carrier_ok(port->netdev))
+		    && ((jiffies - port->netdev->trans_start) >
+			port->netdev->watchdog_timeo)) {
+			SCHEDULE_WORK(&port->adapter->tx_timeout_task);
+		}
+
+		last_consumer = get_next_index(last_consumer,
+					       adapter->max_tx_desc_count);
+		count1++;
+	}
+	adapter->stats.noxmitdone += count1;
+
+	count2 = 0;
+	spin_lock(&adapter->tx_lock);
+	if ((--adapter->proc_cmd_buf_counter) == 0) {
+		adapter->last_cmd_consumer = last_consumer;
+		while ((adapter->last_cmd_consumer != consumer)
+		       && (count2 < MAX_STATUS_HANDLE)) {
+			buffer =
+			    &adapter->cmd_buf_arr[adapter->last_cmd_consumer];
+			count2++;
+			if (buffer->skb)
+				break;
+			else
+				adapter->last_cmd_consumer =
+				    get_next_index(adapter->last_cmd_consumer,
+						   adapter->max_tx_desc_count);
+		}
+	}
+	if (count1 || count2) {
+		for (p = 0; p < adapter->ahw.max_ports; p++) {
+			nport = adapter->port[p];
+			if (netif_queue_stopped(nport->netdev)
+			    && (nport->flags & NETXEN_NETDEV_STATUS)) {
+				netif_wake_queue(nport->netdev);
+				nport->flags &= ~NETXEN_NETDEV_STATUS;
+			}
+		}
+	}
+	/*
+	 * If everything is freed up to consumer then check if the ring is full
+	 * If the ring is full then check if more needs to be freed and
+	 * schedule the call back again.
+	 *
+	 * This happens when there are 2 CPUs. One could be freeing and the
+	 * other filling it. If the ring is full when we get out of here and
+	 * the card has already interrupted the host then the host can miss the
+	 * interrupt.
+	 *
+	 * There is still a possible race condition and the host could miss an
+	 * interrupt. The card has to take care of this.
+	 */
+	if (adapter->last_cmd_consumer == consumer &&
+	    (((adapter->cmd_producer + 1) %
+	      adapter->max_tx_desc_count) == adapter->last_cmd_consumer)) {
+		consumer = *(adapter->cmd_consumer);
+	}
+	done = (adapter->last_cmd_consumer == consumer);
+
+	spin_unlock(&adapter->tx_lock);
+	DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer,
+		__FUNCTION__);
+	return (done);
+}
+
+/*
+ * netxen_post_rx_buffers puts buffer in the Phantom memory
+ */
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
+{
+	struct pci_dev *pdev = adapter->ahw.pdev;
+	struct sk_buff *skb;
+	struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
+	struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+	uint producer;
+	struct rcv_desc *pdesc;
+	struct netxen_rx_buffer *buffer;
+	int count = 0;
+	int index = 0;
+	netxen_ctx_msg msg = 0;
+	dma_addr_t dma;
+
+	adapter->stats.post_called++;
+	rcv_desc = &recv_ctx->rcv_desc[ringid];
+
+	producer = rcv_desc->producer;
+	index = rcv_desc->begin_alloc;
+	buffer = &rcv_desc->rx_buf_arr[index];
+	/* We can start writing rx descriptors into the phantom memory. */
+	while (buffer->state == NETXEN_BUFFER_FREE) {
+		skb = dev_alloc_skb(rcv_desc->skb_size);
+		if (unlikely(!skb)) {
+			/*
+			 * TODO
+			 * We need to schedule the posting of buffers to the pegs.
+			 */
+			rcv_desc->begin_alloc = index;
+			DPRINTK(ERR, "netxen_post_rx_buffers: "
+				" allocated only %d buffers\n", count);
+			break;
+		}
+
+		count++;	/* now there should be no failure */
+		pdesc = &rcv_desc->desc_head[producer];
+
+#if defined(XGB_DEBUG)
+		*(unsigned long *)(skb->head) = 0xc0debabe;
+		if (skb_is_nonlinear(skb)) {
+			printk("Allocated SKB @%p is nonlinear\n");
+		}
+#endif
+		skb_reserve(skb, 2);
+		/* This will be setup when we receive the
+		 * buffer after it has been filled  FSL  TBD TBD
+		 * skb->dev = netdev;
+		 */
+		dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size,
+				     PCI_DMA_FROMDEVICE);
+		pdesc->addr_buffer = dma;
+		buffer->skb = skb;
+		buffer->state = NETXEN_BUFFER_BUSY;
+		buffer->dma = dma;
+		/* make a rcv descriptor  */
+		pdesc->reference_handle = buffer->ref_handle;
+		pdesc->buffer_length = rcv_desc->dma_size;
+		DPRINTK(INFO, "done writing descripter\n");
+		producer =
+		    get_next_index(producer, rcv_desc->max_rx_desc_count);
+		index = get_next_index(index, rcv_desc->max_rx_desc_count);
+		buffer = &rcv_desc->rx_buf_arr[index];
+	}
+	/* if we did allocate buffers, then write the count to Phantom */
+	if (count) {
+		rcv_desc->begin_alloc = index;
+		rcv_desc->rcv_pending += count;
+		adapter->stats.lastposted = count;
+		adapter->stats.posted += count;
+		rcv_desc->producer = producer;
+		if (rcv_desc->rcv_free >= 32) {
+			rcv_desc->rcv_free = 0;
+			/* Window = 1 */
+			writel((producer - 1) &
+			       (rcv_desc->max_rx_desc_count - 1),
+			       NETXEN_CRB_NORMALIZE(adapter,
+						    recv_crb_registers[0].
+						    rcv_desc_crb[ringid].
+						    crb_rcv_producer_offset));
+			/*
+			 * Write a doorbell msg to tell phanmon of change in
+			 * receive ring producer
+			 */
+			netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
+			netxen_set_msg_privid(msg);
+			netxen_set_msg_count(msg,
+					     ((producer -
+					       1) & (rcv_desc->
+						     max_rx_desc_count - 1)));
+			netxen_set_msg_ctxid(msg, 0);
+			netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
+			writel(msg,
+			       DB_NORMALIZE(adapter,
+					    NETXEN_RCV_PRODUCER_OFFSET));
+		}
+	}
+}
+
+void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx,
+				 uint32_t ringid)
+{
+	struct pci_dev *pdev = adapter->ahw.pdev;
+	struct sk_buff *skb;
+	struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
+	struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+	u32 producer;
+	struct rcv_desc *pdesc;
+	struct netxen_rx_buffer *buffer;
+	int count = 0;
+	int index = 0;
+
+	adapter->stats.post_called++;
+	rcv_desc = &recv_ctx->rcv_desc[ringid];
+
+	producer = rcv_desc->producer;
+	index = rcv_desc->begin_alloc;
+	buffer = &rcv_desc->rx_buf_arr[index];
+	/* We can start writing rx descriptors into the phantom memory. */
+	while (buffer->state == NETXEN_BUFFER_FREE) {
+		skb = dev_alloc_skb(rcv_desc->skb_size);
+		if (unlikely(!skb)) {
+			/*
+			 * We need to schedule the posting of buffers to the pegs.
+			 */
+			rcv_desc->begin_alloc = index;
+			DPRINTK(ERR, "netxen_post_rx_buffers_nodb: "
+				" allocated only %d buffers\n", count);
+			break;
+		}
+		count++;	/* now there should be no failure */
+		pdesc = &rcv_desc->desc_head[producer];
+		skb_reserve(skb, 2);
+		/* 
+		 * This will be setup when we receive the
+		 * buffer after it has been filled
+		 * skb->dev = netdev;
+		 */
+		buffer->skb = skb;
+		buffer->state = NETXEN_BUFFER_BUSY;
+		buffer->dma = pci_map_single(pdev, skb->data,
+					     rcv_desc->dma_size,
+					     PCI_DMA_FROMDEVICE);
+
+		/* make a rcv descriptor  */
+		pdesc->reference_handle = le16_to_cpu(buffer->ref_handle);
+		pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size);
+		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+		DPRINTK(INFO, "done writing descripter\n");
+		producer =
+		    get_next_index(producer, rcv_desc->max_rx_desc_count);
+		index = get_next_index(index, rcv_desc->max_rx_desc_count);
+		buffer = &rcv_desc->rx_buf_arr[index];
+	}
+
+	/* if we did allocate buffers, then write the count to Phantom */
+	if (count) {
+		rcv_desc->begin_alloc = index;
+		rcv_desc->rcv_pending += count;
+		adapter->stats.lastposted = count;
+		adapter->stats.posted += count;
+		rcv_desc->producer = producer;
+		if (rcv_desc->rcv_free >= 32) {
+			rcv_desc->rcv_free = 0;
+			/* Window = 1 */
+			writel((producer - 1) &
+			       (rcv_desc->max_rx_desc_count - 1),
+			       NETXEN_CRB_NORMALIZE(adapter,
+						    recv_crb_registers[0].
+						    rcv_desc_crb[ringid].
+						    crb_rcv_producer_offset));
+			wmb();
+		}
+	}
+}
+
+int netxen_nic_tx_has_work(struct netxen_adapter *adapter)
+{
+	if (find_diff_among(adapter->last_cmd_consumer,
+			    adapter->cmd_producer,
+			    adapter->max_tx_desc_count) > 0)
+		return 1;
+
+	return 0;
+}
+
+int
+netxen_nic_fill_statistics(struct netxen_adapter *adapter,
+			   struct netxen_port *port,
+			   struct netxen_statistics *netxen_stats)
+{
+	void __iomem *addr;
+
+	if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+		netxen_nic_pci_change_crbwindow(adapter, 0);
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_TX_BYTE_CNT,
+					   &(netxen_stats->tx_bytes));
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_TX_FRAME_CNT,
+					   &(netxen_stats->tx_packets));
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_RX_BYTE_CNT,
+					   &(netxen_stats->rx_bytes));
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_RX_FRAME_CNT,
+					   &(netxen_stats->rx_packets));
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_AGGR_ERROR_CNT,
+					   &(netxen_stats->rx_errors));
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_CRC_ERROR_CNT,
+					   &(netxen_stats->rx_crc_errors));
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
+					   &(netxen_stats->
+					     rx_long_length_error));
+		NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
+					   &(netxen_stats->
+					     rx_short_length_error));
+
+		netxen_nic_pci_change_crbwindow(adapter, 1);
+	} else {
+		spin_lock_bh(&adapter->tx_lock);
+		netxen_stats->tx_bytes = port->stats.txbytes;
+		netxen_stats->tx_packets = port->stats.xmitedframes +
+		    port->stats.xmitfinished;
+		netxen_stats->rx_bytes = port->stats.rxbytes;
+		netxen_stats->rx_packets = port->stats.no_rcv;
+		netxen_stats->rx_errors = port->stats.rcvdbadskb;
+		netxen_stats->tx_errors = port->stats.nocmddescriptor;
+		netxen_stats->rx_short_length_error = port->stats.uplcong;
+		netxen_stats->rx_long_length_error = port->stats.uphcong;
+		netxen_stats->rx_crc_errors = 0;
+		netxen_stats->rx_mac_errors = 0;
+		spin_unlock_bh(&adapter->tx_lock);
+	}
+	return 0;
+}
+
+void netxen_nic_clear_stats(struct netxen_adapter *adapter)
+{
+	struct netxen_port *port;
+	int port_num;
+
+	memset(&adapter->stats, 0, sizeof(adapter->stats));
+	for (port_num = 0; port_num < adapter->ahw.max_ports; port_num++) {
+		port = adapter->port[port_num];
+		memset(&port->stats, 0, sizeof(port->stats));
+	}
+}
+
+int
+netxen_nic_clear_statistics(struct netxen_adapter *adapter,
+			    struct netxen_port *port)
+{
+	int data = 0;
+
+	netxen_nic_pci_change_crbwindow(adapter, 0);
+
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_TX_BYTE_CNT, &data);
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_TX_FRAME_CNT,
+				    &data);
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_RX_BYTE_CNT, &data);
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_RX_FRAME_CNT,
+				    &data);
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_AGGR_ERROR_CNT,
+				    &data);
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_CRC_ERROR_CNT,
+				    &data);
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
+				    &data);
+	netxen_nic_locked_write_reg(adapter, NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
+				    &data);
+
+	netxen_nic_pci_change_crbwindow(adapter, 1);
+	netxen_nic_clear_stats(adapter);
+	return 0;
+}
+
+int
+netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
+		    struct netxen_port *port)
+{
+	struct netxen_nic_ioctl_data data;
+	struct netxen_nic_ioctl_data *up_data;
+	int retval = 0;
+	struct netxen_statistics netxen_stats;
+
+	up_data = (void *)u_data;
+
+	DPRINTK(INFO, "doing ioctl for %p\n", adapter);
+	if (copy_from_user(&data, (void __user *)up_data, sizeof(data))) {
+		/* evil user tried to crash the kernel */
+		DPRINTK(ERR, "bad copy from userland: %d\n", (int)sizeof(data));
+		retval = -EFAULT;
+		goto error_out;
+	}
+
+	/* Shouldn't access beyond legal limits of  "char u[64];" member */
+	if (!data.ptr && (data.size > sizeof(data.u))) {
+		/* evil user tried to crash the kernel */
+		DPRINTK(ERR, "bad size: %d\n", data.size);
+		retval = -EFAULT;
+		goto error_out;
+	}
+
+	switch (data.cmd) {
+	case netxen_nic_cmd_pci_read:
+		if ((retval = netxen_nic_hw_read_ioctl(adapter, data.off,
+						       &(data.u), data.size)))
+			goto error_out;
+		if (copy_to_user
+		    ((void __user *)&(up_data->u), &(data.u), data.size)) {
+			DPRINTK(ERR, "bad copy to userland: %d\n",
+				(int)sizeof(data));
+			retval = -EFAULT;
+			goto error_out;
+		}
+		data.rv = 0;
+		break;
+
+	case netxen_nic_cmd_pci_write:
+		if ((retval = netxen_nic_hw_write_ioctl(adapter, data.off,
+							&(data.u), data.size)))
+			goto error_out;
+		data.rv = 0;
+		break;
+
+	case netxen_nic_cmd_pci_mem_read:
+		if (netxen_nic_pci_mem_read_ioctl(adapter, data.off, &(data.u),
+						  data.size)) {
+			DPRINTK(ERR, "Failed to read the data.\n");
+			retval = -EFAULT;
+			goto error_out;
+		}
+		if (copy_to_user
+		    ((void __user *)&(up_data->u), &(data.u), data.size)) {
+			DPRINTK(ERR, "bad copy to userland: %d\n",
+				(int)sizeof(data));
+			retval = -EFAULT;
+			goto error_out;
+		}
+		data.rv = 0;
+		break;
+
+	case netxen_nic_cmd_pci_mem_write:
+		if ((retval = netxen_nic_pci_mem_write_ioctl(adapter, data.off,
+							     &(data.u),
+							     data.size)))
+			goto error_out;
+		data.rv = 0;
+		break;
+
+	case netxen_nic_cmd_pci_config_read:
+		switch (data.size) {
+		case 1:
+			data.rv = pci_read_config_byte(adapter->ahw.pdev,
+						       data.off,
+						       (char *)&(data.u));
+			break;
+		case 2:
+			data.rv = pci_read_config_word(adapter->ahw.pdev,
+						       data.off,
+						       (short *)&(data.u));
+			break;
+		case 4:
+			data.rv = pci_read_config_dword(adapter->ahw.pdev,
+							data.off,
+							(u32 *) & (data.u));
+			break;
+		}
+		if (copy_to_user
+		    ((void __user *)&(up_data->u), &(data.u), data.size)) {
+			DPRINTK(ERR, "bad copy to userland: %d\n",
+				(int)sizeof(data));
+			retval = -EFAULT;
+			goto error_out;
+		}
+		break;
+
+	case netxen_nic_cmd_pci_config_write:
+		switch (data.size) {
+		case 1:
+			data.rv = pci_write_config_byte(adapter->ahw.pdev,
+							data.off,
+							*(char *)&(data.u));
+			break;
+		case 2:
+			data.rv = pci_write_config_word(adapter->ahw.pdev,
+							data.off,
+							*(short *)&(data.u));
+			break;
+		case 4:
+			data.rv = pci_write_config_dword(adapter->ahw.pdev,
+							 data.off,
+							 *(u32 *) & (data.u));
+			break;
+		}
+		break;
+
+	case netxen_nic_cmd_get_stats:
+		data.rv =
+		    netxen_nic_fill_statistics(adapter, port, &netxen_stats);
+		if (copy_to_user
+		    ((void __user *)(up_data->ptr), (void *)&netxen_stats,
+		     sizeof(struct netxen_statistics))) {
+			DPRINTK(ERR, "bad copy to userland: %d\n",
+				(int)sizeof(netxen_stats));
+			retval = -EFAULT;
+			goto error_out;
+		}
+		up_data->rv = data.rv;
+		break;
+
+	case netxen_nic_cmd_clear_stats:
+		data.rv = netxen_nic_clear_statistics(adapter, port);
+		up_data->rv = data.rv;
+		break;
+
+	case netxen_nic_cmd_get_version:
+		if (copy_to_user
+		    ((void __user *)&(up_data->u), NETXEN_NIC_LINUX_VERSIONID,
+		     sizeof(NETXEN_NIC_LINUX_VERSIONID))) {
+			DPRINTK(ERR, "bad copy to userland: %d\n",
+				(int)sizeof(data));
+			retval = -EFAULT;
+			goto error_out;
+		}
+		break;
+
+	default:
+		DPRINTK(INFO, "bad command %d for %p\n", data.cmd, adapter);
+		retval = -EOPNOTSUPP;
+		goto error_out;
+	}
+	put_user(data.rv, (&(up_data->rv)));
+	DPRINTK(INFO, "done ioctl for %p well.\n", adapter);
+
+      error_out:
+	return retval;
+}
diff --git a/drivers/net/netxen/netxen_nic_ioctl.h b/drivers/net/netxen/netxen_nic_ioctl.h
new file mode 100644
index 0000000..1221fa5
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_ioctl.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef __NETXEN_NIC_IOCTL_H__
+#define __NETXEN_NIC_IOCTL_H__
+
+#include <linux/sockios.h>
+
+#define NETXEN_CMD_START	SIOCDEVPRIVATE
+#define NETXEN_NIC_CMD		(NETXEN_CMD_START + 1)
+#define NETXEN_NIC_NAME		(NETXEN_CMD_START + 2)
+#define NETXEN_NIC_NAME_LEN	16
+#define NETXEN_NIC_NAME_RSP	"NETXEN-UNM"
+
+typedef enum {
+	netxen_nic_cmd_none = 0,
+	netxen_nic_cmd_pci_read,
+	netxen_nic_cmd_pci_write,
+	netxen_nic_cmd_pci_mem_read,
+	netxen_nic_cmd_pci_mem_write,
+	netxen_nic_cmd_pci_config_read,
+	netxen_nic_cmd_pci_config_write,
+	netxen_nic_cmd_get_stats,
+	netxen_nic_cmd_clear_stats,
+	netxen_nic_cmd_get_version
+} netxen_nic_ioctl_cmd_t;
+
+struct netxen_nic_ioctl_data {
+	u32 cmd;
+	u32 unused1;
+	u64 off;
+	u32 size;
+	u32 rv;
+	char u[64];
+	void *ptr;
+};
+
+struct netxen_statistics {
+	u64 rx_packets;
+	u64 tx_packets;
+	u64 rx_bytes;
+	u64 rx_errors;
+	u64 tx_bytes;
+	u64 tx_errors;
+	u64 rx_crc_errors;
+	u64 rx_short_length_error;
+	u64 rx_long_length_error;
+	u64 rx_mac_errors;
+};
+
+#endif				/* __NETXEN_NIC_IOCTL_H_ */
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c
new file mode 100644
index 0000000..1b45f50
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_isr.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *                            
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *                                   
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+#include "netxen_nic_phan_reg.h"
+
+/*
+ * netxen_nic_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ */
+struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct net_device_stats *stats = &port->net_stats;
+
+	memset(stats, 0, sizeof(*stats));
+
+	/* total packets received   */
+	stats->rx_packets = port->stats.no_rcv;
+	/* total packets transmitted    */
+	stats->tx_packets = port->stats.xmitedframes + port->stats.xmitfinished;
+	/* total bytes received     */
+	stats->rx_bytes = port->stats.rxbytes;
+	/* total bytes transmitted  */
+	stats->tx_bytes = port->stats.txbytes;
+	/* bad packets received     */
+	stats->rx_errors = port->stats.rcvdbadskb;
+	/* packet transmit problems */
+	stats->tx_errors = port->stats.nocmddescriptor;
+	/* no space in linux buffers    */
+	stats->rx_dropped = port->stats.updropped;
+	/* no space available in linux  */
+	stats->tx_dropped = port->stats.txdropped;
+
+	return stats;
+}
+
+void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 portno,
+				 u32 link)
+{
+	struct net_device *netdev = (adapter->port[portno])->netdev;
+
+	if (link)
+		netif_carrier_on(netdev);
+	else
+		netif_carrier_off(netdev);
+}
+
+void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno,
+			    u32 enable)
+{
+	__le32 int_src;
+	struct netxen_port *port;
+
+	/*  This should clear the interrupt source */
+	if (adapter->phy_read)
+		adapter->phy_read(adapter, portno,
+				  NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
+				  &int_src);
+	if (int_src == 0) {
+		DPRINTK(INFO, "No phy interrupts for port #%d\n", portno);
+		return;
+	}
+	if (adapter->disable_phy_interrupts)
+		adapter->disable_phy_interrupts(adapter, portno);
+
+	port = adapter->port[portno];
+
+	if (netxen_get_phy_int_jabber(int_src))
+		DPRINTK(INFO, "Jabber interrupt \n");
+
+	if (netxen_get_phy_int_polarity_changed(int_src))
+		DPRINTK(INFO, "POLARITY CHANGED int \n");
+
+	if (netxen_get_phy_int_energy_detect(int_src))
+		DPRINTK(INFO, "ENERGY DETECT INT \n");
+
+	if (netxen_get_phy_int_downshift(int_src))
+		DPRINTK(INFO, "DOWNSHIFT INT \n");
+	/* write it down later.. */
+	if ((netxen_get_phy_int_speed_changed(int_src))
+	    || (netxen_get_phy_int_link_status_changed(int_src))) {
+		__le32 status;
+
+		DPRINTK(INFO, "SPEED CHANGED OR LINK STATUS CHANGED \n");
+
+		if (adapter->phy_read
+		    && adapter->phy_read(adapter, portno,
+					 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+					 &status) == 0) {
+			if (netxen_get_phy_int_link_status_changed(int_src)) {
+				if (netxen_get_phy_link(status)) {
+					netxen_niu_gbe_init_port(adapter,
+								 portno);
+					printk("%s: %s Link UP\n",
+					       netxen_nic_driver_name,
+					       port->netdev->name);
+
+				} else {
+					printk("%s: %s Link DOWN\n",
+					       netxen_nic_driver_name,
+					       port->netdev->name);
+				}
+				netxen_indicate_link_status(adapter, portno,
+							    netxen_get_phy_link
+							    (status));
+			}
+		}
+	}
+	if (adapter->enable_phy_interrupts)
+		adapter->enable_phy_interrupts(adapter, portno);
+}
+
+void netxen_nic_isr_other(struct netxen_adapter *adapter)
+{
+	u32 portno;
+	u32 val, linkup, qg_linksup;
+
+	/* verify the offset */
+	val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+	if (val == adapter->ahw.qg_linksup)
+		return;
+
+	qg_linksup = adapter->ahw.qg_linksup;
+	adapter->ahw.qg_linksup = val;
+	DPRINTK(INFO, "link update 0x%08x\n", val);
+	for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) {
+		linkup = val & 1;
+		if (linkup != (qg_linksup & 1)) {
+			printk(KERN_INFO "%s: PORT %d link %s\n",
+			       netxen_nic_driver_name, portno,
+			       ((linkup == 0) ? "down" : "up"));
+			netxen_indicate_link_status(adapter, portno, linkup);
+			if (linkup)
+				netxen_nic_set_link_parameters(adapter->
+							       port[portno]);
+
+		}
+		val = val >> 1;
+		qg_linksup = qg_linksup >> 1;
+	}
+
+	adapter->stats.otherints++;
+
+}
+
+void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
+{
+	netxen_nic_isr_other(adapter);
+}
+
+void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
+{
+	struct net_device *netdev = adapter->port[0]->netdev;
+	u32 val;
+
+	/* WINDOW = 1 */
+	val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE));
+
+	if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) {
+		printk(KERN_INFO "%s: %s NIC Link is down\n",
+		       netxen_nic_driver_name, netdev->name);
+		adapter->ahw.xg_linkup = 0;
+		/* read twice to clear sticky bits */
+		/* WINDOW = 0 */
+		netxen_nic_read_w0(adapter, NETXEN_NIU_XG_STATUS, &val);
+		netxen_nic_read_w0(adapter, NETXEN_NIU_XG_STATUS, &val);
+
+		if ((val & 0xffb) != 0xffb) {
+			printk(KERN_INFO "%s ISR: Sync/Align BAD: 0x%08x\n",
+			       netxen_nic_driver_name, val);
+		}
+	} else if (adapter->ahw.xg_linkup == 0 && val == XG_LINK_UP) {
+		printk(KERN_INFO "%s: %s NIC Link is up\n",
+		       netxen_nic_driver_name, netdev->name);
+		adapter->ahw.xg_linkup = 1;
+	}
+}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
new file mode 100644
index 0000000..575b71b
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -0,0 +1,1210 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ *  Main source file for NetXen NIC Driver on Linux
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include "netxen_nic_hw.h"
+
+#include "netxen_nic.h"
+#define DEFINE_GLOBAL_RECV_CRB
+#include "netxen_nic_phan_reg.h"
+#include "netxen_nic_ioctl.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+
+#define PHAN_VENDOR_ID 0x4040
+
+MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+
+char netxen_nic_driver_name[] = "netxen-nic";
+static char netxen_nic_driver_string[] = "NetXen Network Driver version "
+    NETXEN_NIC_LINUX_VERSIONID;
+
+struct netxen_adapter *g_adapter = NULL;
+
+#define NETXEN_NETDEV_WEIGHT 120
+#define NETXEN_ADAPTER_UP_MAGIC 777
+#define NETXEN_NIC_PEG_TUNE 0
+
+u8 nx_p2_id = NX_P2_C0;
+
+#define DMA_32BIT_MASK	0x00000000ffffffffULL
+#define DMA_35BIT_MASK	0x00000007ffffffffULL
+
+/* Local functions to NetXen NIC driver */
+static int __devinit netxen_nic_probe(struct pci_dev *pdev,
+				      const struct pci_device_id *ent);
+static void __devexit netxen_nic_remove(struct pci_dev *pdev);
+static int netxen_nic_open(struct net_device *netdev);
+static int netxen_nic_close(struct net_device *netdev);
+static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
+static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
+static void netxen_watchdog(unsigned long);
+static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
+static int netxen_nic_ioctl(struct net_device *netdev,
+			    struct ifreq *ifr, int cmd);
+static int netxen_nic_poll(struct net_device *dev, int *budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev);
+#endif
+static irqreturn_t netxen_intr(int irq, void *data);
+
+/*  PCI Device ID Table  */
+static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
+	{PCI_DEVICE(0x4040, 0x0001)},
+	{PCI_DEVICE(0x4040, 0x0002)},
+	{PCI_DEVICE(0x4040, 0x0003)},
+	{PCI_DEVICE(0x4040, 0x0004)},
+	{PCI_DEVICE(0x4040, 0x0005)},
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
+
+struct workqueue_struct *netxen_workq;
+static void netxen_watchdog(unsigned long);
+
+/*
+ * netxen_nic_probe()
+ *
+ * The Linux system will invoke this after identifying the vendor ID and
+ * device Id in the pci_tbl supported by this module.
+ *
+ * A quad port card has one operational PCI config space, (function 0),
+ * which is used to access all four ports.
+ *
+ * This routine will initialize the adapter, and setup the global parameters
+ * along with the port's specific structure.
+ */
+static int __devinit
+netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *netdev = NULL;
+	struct netxen_adapter *adapter = NULL;
+	struct netxen_port *port = NULL;
+	void __iomem *mem_ptr0 = NULL;
+	void __iomem *mem_ptr1 = NULL;
+	void __iomem *mem_ptr2 = NULL;
+
+	u8 *db_ptr = NULL;
+	unsigned long mem_base, mem_len, db_base, db_len;
+	int pci_using_dac, i, err;
+	int ring;
+	struct netxen_recv_context *recv_ctx = NULL;
+	struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+	struct netxen_cmd_buffer *cmd_buf_arr = NULL;
+	u64 mac_addr[FLASH_NUM_PORTS + 1];
+	int valid_mac = 0;
+	static int netxen_cards_found = 0;
+
+	printk(KERN_INFO "%s \n", netxen_nic_driver_string);
+	/* In current scheme, we use only PCI function 0 */
+	if (PCI_FUNC(pdev->devfn) != 0) {
+		DPRINTK(ERR, "NetXen function %d will not be enabled.\n",
+			PCI_FUNC(pdev->devfn));
+		return -ENODEV;
+	}
+	if ((err = pci_enable_device(pdev)))
+		return err;
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		err = -ENODEV;
+		goto err_out_disable_pdev;
+	}
+
+	if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
+		goto err_out_disable_pdev;
+
+	pci_set_master(pdev);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id);
+	if (nx_p2_id == NX_P2_C1 &&
+	    (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
+	    (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
+		pci_using_dac = 1;
+	} else {
+		if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
+		    (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
+			goto err_out_free_res;
+
+		pci_using_dac = 0;
+	}
+
+	/* remap phys address */
+	mem_base = pci_resource_start(pdev, 0);	/* 0 is for BAR 0 */
+	mem_len = pci_resource_len(pdev, 0);
+
+	/* 128 Meg of memory */
+	mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+	mem_ptr1 =
+	    ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE);
+	mem_ptr2 =
+	    ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+
+	if ((mem_ptr0 == 0UL) || (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) {
+		DPRINTK(ERR,
+			"Cannot remap adapter memory aborting.:"
+			"0 -> %p, 1 -> %p, 2 -> %p\n",
+			mem_ptr0, mem_ptr1, mem_ptr2);
+
+		err = -EIO;
+		goto err_out_iounmap;
+	}
+	db_base = pci_resource_start(pdev, 4);	/* doorbell is on bar 4 */
+	db_len = pci_resource_len(pdev, 4);
+
+	if (db_len == 0) {
+		printk(KERN_ERR "%s: doorbell is disabled\n",
+		       netxen_nic_driver_name);
+		err = -EIO;
+		goto err_out_iounmap;
+	}
+	DPRINTK(INFO, "doorbell ioremap from %lx a size of %lx\n", db_base,
+		db_len);
+
+	db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
+	if (db_ptr == 0UL) {
+		printk(KERN_ERR "%s: Failed to allocate doorbell map.",
+		       netxen_nic_driver_name);
+		err = -EIO;
+		goto err_out_iounmap;
+	}
+	DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr);
+
+/*
+ *      Allocate a adapter structure which will manage all the initialization
+ *      as well as the common resources for all ports...
+ *      all the ports will have pointer to this adapter as well as Adapter
+ *      will have pointers of all the ports structures.
+ */
+
+	/* One adapter structure for all 4 ports....   */
+	adapter = kzalloc(sizeof(struct netxen_adapter), GFP_KERNEL);
+	if (adapter == NULL) {
+		printk(KERN_ERR "%s: Could not allocate adapter memory:%d\n",
+		       netxen_nic_driver_name,
+		       (int)sizeof(struct netxen_adapter));
+		err = -ENOMEM;
+		goto err_out_dbunmap;
+	}
+
+	if (netxen_cards_found == 0) {
+		g_adapter = adapter;
+	}
+	adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS;
+	adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
+	adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
+	adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
+
+	pci_set_drvdata(pdev, adapter);
+
+	cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
+	if (cmd_buf_arr == NULL) {
+		printk(KERN_ERR
+		       "%s: Could not allocate cmd_buf_arr memory:%d\n",
+		       netxen_nic_driver_name, (int)TX_RINGSIZE);
+		err = -ENOMEM;
+		goto err_out_free_adapter;
+	}
+	memset(cmd_buf_arr, 0, TX_RINGSIZE);
+
+	for (i = 0; i < MAX_RCV_CTX; ++i) {
+		recv_ctx = &adapter->recv_ctx[i];
+		for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+			rcv_desc = &recv_ctx->rcv_desc[ring];
+			switch (RCV_DESC_TYPE(ring)) {
+			case RCV_DESC_NORMAL:
+				rcv_desc->max_rx_desc_count =
+				    adapter->max_rx_desc_count;
+				rcv_desc->flags = RCV_DESC_NORMAL;
+				rcv_desc->dma_size = RX_DMA_MAP_LEN;
+				rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH;
+				break;
+
+			case RCV_DESC_JUMBO:
+				rcv_desc->max_rx_desc_count =
+				    adapter->max_jumbo_rx_desc_count;
+				rcv_desc->flags = RCV_DESC_JUMBO;
+				rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN;
+				rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
+				break;
+
+			case RCV_RING_LRO:
+				rcv_desc->max_rx_desc_count =
+				    adapter->max_lro_rx_desc_count;
+				rcv_desc->flags = RCV_DESC_LRO;
+				rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN;
+				rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
+				break;
+
+			}
+			rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
+			    vmalloc(RCV_BUFFSIZE);
+
+			if (rcv_desc->rx_buf_arr == NULL) {
+				printk(KERN_ERR "%s: Could not allocate"
+				       "rcv_desc->rx_buf_arr memory:%d\n",
+				       netxen_nic_driver_name,
+				       (int)RCV_BUFFSIZE);
+				err = -ENOMEM;
+				goto err_out_free_rx_buffer;
+			}
+			memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE);
+		}
+
+	}
+
+	adapter->cmd_buf_arr = cmd_buf_arr;
+	adapter->ahw.pci_base0 = mem_ptr0;
+	adapter->ahw.pci_base1 = mem_ptr1;
+	adapter->ahw.pci_base2 = mem_ptr2;
+	adapter->ahw.db_base = db_ptr;
+	adapter->ahw.db_len = db_len;
+	spin_lock_init(&adapter->tx_lock);
+	spin_lock_init(&adapter->lock);
+	netxen_initialize_adapter_sw(adapter);	/* initialize the buffers in adapter */
+#ifdef CONFIG_IA64
+	netxen_pinit_from_rom(adapter, 0);
+	udelay(500);
+	netxen_load_firmware(adapter);
+#endif
+
+	/*
+	 * Set the CRB window to invalid. If any register in window 0 is
+	 * accessed it should set the window to 0 and then reset it to 1.
+	 */
+	adapter->curr_window = 255;
+	/*
+	 *  Adapter in our case is quad port so initialize it before
+	 *  initializing the ports
+	 */
+	netxen_initialize_adapter_hw(adapter);	/* initialize the adapter */
+
+	netxen_initialize_adapter_ops(adapter);
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->ahw.xg_linkup = 0;
+	adapter->watchdog_timer.function = &netxen_watchdog;
+	adapter->watchdog_timer.data = (unsigned long)adapter;
+	INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
+	adapter->ahw.pdev = pdev;
+	adapter->proc_cmd_buf_counter = 0;
+	adapter->ahw.revision_id = nx_p2_id;
+
+	if (pci_enable_msi(pdev)) {
+		adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
+		printk(KERN_WARNING "%s: unable to allocate MSI interrupt"
+		       " error\n", netxen_nic_driver_name);
+	} else
+		adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+
+	if (netxen_is_flash_supported(adapter) == 0 &&
+	    netxen_get_flash_mac_addr(adapter, mac_addr) == 0)
+		valid_mac = 1;
+	else
+		valid_mac = 0;
+
+	/*
+	 * Initialize all the CRB registers here.
+	 */
+	writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
+	writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
+	writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
+
+	/* do this before waking up pegs so that we have valid dummy dma addr */
+	err = netxen_initialize_adapter_offload(adapter);
+	if (err) {
+		goto err_out_free_dev;
+	}
+
+	/* Unlock the HW, prompting the boot sequence */
+	writel(1,
+	       NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
+
+	/* Handshake with the card before we register the devices. */
+	netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+
+	/* initialize the all the ports */
+	adapter->active_ports = 0;
+
+	for (i = 0; i < adapter->ahw.max_ports; i++) {
+		netdev = alloc_etherdev(sizeof(struct netxen_port));
+		if (!netdev) {
+			printk(KERN_ERR "%s: could not allocate netdev for port"
+			       " %d\n", netxen_nic_driver_name, i + 1);
+			goto err_out_free_dev;
+		}
+
+		SET_MODULE_OWNER(netdev);
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		port = netdev_priv(netdev);
+		port->netdev = netdev;
+		port->pdev = pdev;
+		port->adapter = adapter;
+		port->portnum = i;	/* Gigabit port number from 0-3 */
+
+		netdev->open = netxen_nic_open;
+		netdev->stop = netxen_nic_close;
+		netdev->hard_start_xmit = netxen_nic_xmit_frame;
+		netdev->get_stats = netxen_nic_get_stats;
+		netdev->set_multicast_list = netxen_nic_set_multi;
+		netdev->set_mac_address = netxen_nic_set_mac;
+		netdev->change_mtu = netxen_nic_change_mtu;
+		netdev->do_ioctl = netxen_nic_ioctl;
+		netdev->tx_timeout = netxen_tx_timeout;
+		netdev->watchdog_timeo = HZ;
+
+		SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+		netdev->poll = netxen_nic_poll;
+		netdev->weight = NETXEN_NETDEV_WEIGHT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+		netdev->poll_controller = netxen_nic_poll_controller;
+#endif
+		/* ScatterGather support */
+		netdev->features = NETIF_F_SG;
+		netdev->features |= NETIF_F_IP_CSUM;
+		netdev->features |= NETIF_F_TSO;
+
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+
+		if (valid_mac) {
+			unsigned char *p = (unsigned char *)&mac_addr[i];
+			netdev->dev_addr[0] = *(p + 5);
+			netdev->dev_addr[1] = *(p + 4);
+			netdev->dev_addr[2] = *(p + 3);
+			netdev->dev_addr[3] = *(p + 2);
+			netdev->dev_addr[4] = *(p + 1);
+			netdev->dev_addr[5] = *(p + 0);
+
+			memcpy(netdev->perm_addr, netdev->dev_addr,
+			       netdev->addr_len);
+			if (!is_valid_ether_addr(netdev->perm_addr)) {
+				printk(KERN_ERR "%s: Bad MAC address "
+				       "%02x:%02x:%02x:%02x:%02x:%02x.\n",
+				       netxen_nic_driver_name,
+				       netdev->dev_addr[0],
+				       netdev->dev_addr[1],
+				       netdev->dev_addr[2],
+				       netdev->dev_addr[3],
+				       netdev->dev_addr[4],
+				       netdev->dev_addr[5]);
+			} else {
+				if (adapter->macaddr_set)
+					adapter->macaddr_set(port,
+							     netdev->dev_addr);
+			}
+		}
+		adapter->netdev = netdev;
+		INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
+		netif_carrier_off(netdev);
+		netif_stop_queue(netdev);
+
+		if ((err = register_netdev(netdev))) {
+			printk(KERN_ERR "%s: register_netdev failed port #%d"
+			       " aborting\n", netxen_nic_driver_name, i + 1);
+			err = -EIO;
+			free_netdev(netdev);
+			goto err_out_free_dev;
+		}
+		adapter->port_count++;
+		adapter->port[i] = port;
+	}
+
+	/*
+	 * delay a while to ensure that the Pegs are up & running.
+	 * Otherwise, we might see some flaky behaviour.
+	 */
+	udelay(100);
+
+	switch (adapter->ahw.board_type) {
+	case NETXEN_NIC_GBE:
+		printk("%s: QUAD GbE board initialized\n",
+		       netxen_nic_driver_name);
+		break;
+
+	case NETXEN_NIC_XGBE:
+		printk("%s: XGbE board initialized\n", netxen_nic_driver_name);
+		break;
+	}
+
+	adapter->number = netxen_cards_found;
+	adapter->driver_mismatch = 0;
+
+	return 0;
+
+      err_out_free_dev:
+	if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+		pci_disable_msi(pdev);
+	for (i = 0; i < adapter->port_count; i++) {
+		port = adapter->port[i];
+		if ((port) && (port->netdev)) {
+			unregister_netdev(port->netdev);
+			free_netdev(port->netdev);
+		}
+	}
+
+	netxen_free_adapter_offload(adapter);
+
+      err_out_free_rx_buffer:
+	for (i = 0; i < MAX_RCV_CTX; ++i) {
+		recv_ctx = &adapter->recv_ctx[i];
+		for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+			rcv_desc = &recv_ctx->rcv_desc[ring];
+			if (rcv_desc->rx_buf_arr != NULL) {
+				vfree(rcv_desc->rx_buf_arr);
+				rcv_desc->rx_buf_arr = NULL;
+			}
+		}
+	}
+	vfree(cmd_buf_arr);
+
+      err_out_free_adapter:
+	pci_set_drvdata(pdev, NULL);
+	kfree(adapter);
+
+      err_out_dbunmap:
+	if (db_ptr)
+		iounmap(db_ptr);
+
+      err_out_iounmap:
+	if (mem_ptr0)
+		iounmap(mem_ptr0);
+	if (mem_ptr1)
+		iounmap(mem_ptr1);
+	if (mem_ptr2)
+		iounmap(mem_ptr2);
+
+      err_out_free_res:
+	pci_release_regions(pdev);
+      err_out_disable_pdev:
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void __devexit netxen_nic_remove(struct pci_dev *pdev)
+{
+	struct netxen_adapter *adapter;
+	struct netxen_port *port;
+	struct netxen_rx_buffer *buffer;
+	struct netxen_recv_context *recv_ctx;
+	struct netxen_rcv_desc_ctx *rcv_desc;
+	int i;
+	int ctxid, ring;
+
+	adapter = pci_get_drvdata(pdev);
+	if (adapter == NULL)
+		return;
+
+	netxen_nic_stop_all_ports(adapter);
+	/* leave the hw in the same state as reboot */
+	netxen_load_firmware(adapter);
+	netxen_free_adapter_offload(adapter);
+
+	udelay(500);		/* Delay for a while to drain the DMA engines */
+	for (i = 0; i < adapter->port_count; i++) {
+		port = adapter->port[i];
+		if ((port) && (port->netdev)) {
+			unregister_netdev(port->netdev);
+			free_netdev(port->netdev);
+		}
+	}
+
+	if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
+		pci_disable_msi(pdev);
+	pci_set_drvdata(pdev, NULL);
+	if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
+		netxen_free_hw_resources(adapter);
+
+	iounmap(adapter->ahw.db_base);
+	iounmap(adapter->ahw.pci_base0);
+	iounmap(adapter->ahw.pci_base1);
+	iounmap(adapter->ahw.pci_base2);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
+		recv_ctx = &adapter->recv_ctx[ctxid];
+		for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
+			rcv_desc = &recv_ctx->rcv_desc[ring];
+			for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) {
+				buffer = &(rcv_desc->rx_buf_arr[i]);
+				if (buffer->state == NETXEN_BUFFER_FREE)
+					continue;
+				pci_unmap_single(pdev, buffer->dma,
+						 rcv_desc->dma_size,
+						 PCI_DMA_FROMDEVICE);
+				if (buffer->skb != NULL)
+					dev_kfree_skb_any(buffer->skb);
+			}
+			vfree(rcv_desc->rx_buf_arr);
+		}
+	}
+
+	vfree(adapter->cmd_buf_arr);
+	kfree(adapter);
+}
+
+/*
+ * Called when a network interface is made active
+ * @returns 0 on success, negative value on failure
+ */
+static int netxen_nic_open(struct net_device *netdev)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	int err = 0;
+	int ctx, ring;
+
+	if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
+		err = netxen_init_firmware(adapter);
+		if (err != 0) {
+			printk(KERN_ERR "Failed to init firmware\n");
+			return -EIO;
+		}
+		netxen_nic_flash_print(adapter);
+		if (adapter->init_niu)
+			adapter->init_niu(adapter);
+
+		/* setup all the resources for the Phantom... */
+		/* this include the descriptors for rcv, tx, and status */
+		netxen_nic_clear_stats(adapter);
+		err = netxen_nic_hw_resources(adapter);
+		if (err) {
+			printk(KERN_ERR "Error in setting hw resources:%d\n",
+			       err);
+			return err;
+		}
+		if (adapter->init_port
+		    && adapter->init_port(adapter, port->portnum) != 0) {
+			printk(KERN_ERR "%s: Failed to initialize port %d\n",
+			       netxen_nic_driver_name, port->portnum);
+			netxen_free_hw_resources(adapter);
+			return -EIO;
+		}
+		for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+			for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
+				netxen_post_rx_buffers(adapter, ctx, ring);
+		}
+		adapter->irq = adapter->ahw.pdev->irq;
+		err = request_irq(adapter->ahw.pdev->irq, &netxen_intr,
+				  SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name,
+				  adapter);
+		if (err) {
+			printk(KERN_ERR "request_irq failed with: %d\n", err);
+			netxen_free_hw_resources(adapter);
+			return err;
+		}
+
+		adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
+	}
+	adapter->active_ports++;
+	if (adapter->active_ports == 1) {
+		if (!adapter->driver_mismatch)
+			mod_timer(&adapter->watchdog_timer, jiffies);
+
+		netxen_nic_enable_int(adapter);
+	}
+
+	/* Done here again so that even if phantom sw overwrote it,
+	 * we set it */
+	if (adapter->macaddr_set)
+		adapter->macaddr_set(port, netdev->dev_addr);
+	netxen_nic_set_link_parameters(port);
+
+	netxen_nic_set_multi(netdev);
+	if (adapter->set_mtu)
+		adapter->set_mtu(port, netdev->mtu);
+
+	if (!adapter->driver_mismatch)
+		netif_start_queue(netdev);
+
+	return 0;
+}
+
+/*
+ * netxen_nic_close - Disables a network interface entry point
+ */
+static int netxen_nic_close(struct net_device *netdev)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	int i, j;
+	struct netxen_cmd_buffer *cmd_buff;
+	struct netxen_skb_frag *buffrag;
+
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	adapter->active_ports--;
+
+	if (!adapter->active_ports) {
+		netxen_nic_disable_int(adapter);
+		if (adapter->irq)
+			free_irq(adapter->irq, adapter);
+		cmd_buff = adapter->cmd_buf_arr;
+		for (i = 0; i < adapter->max_tx_desc_count; i++) {
+			buffrag = cmd_buff->frag_array;
+			if (buffrag->dma) {
+				pci_unmap_single(port->pdev, buffrag->dma,
+						 buffrag->length,
+						 PCI_DMA_TODEVICE);
+				buffrag->dma = (u64) NULL;
+			}
+			for (j = 0; j < cmd_buff->frag_count; j++) {
+				buffrag++;
+				if (buffrag->dma) {
+					pci_unmap_page(port->pdev,
+						       buffrag->dma,
+						       buffrag->length,
+						       PCI_DMA_TODEVICE);
+					buffrag->dma = (u64) NULL;
+				}
+			}
+			/* Free the skb we received in netxen_nic_xmit_frame */
+			if (cmd_buff->skb) {
+				dev_kfree_skb_any(cmd_buff->skb);
+				cmd_buff->skb = NULL;
+			}
+			cmd_buff++;
+		}
+		FLUSH_SCHEDULED_WORK();
+		del_timer_sync(&adapter->watchdog_timer);
+	}
+
+	return 0;
+}
+
+static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	struct netxen_hardware_context *hw = &adapter->ahw;
+	unsigned int first_seg_len = skb->len - skb->data_len;
+	struct netxen_skb_frag *buffrag;
+	unsigned int i;
+
+	u32 producer = 0;
+	u32 saved_producer = 0;
+	struct cmd_desc_type0 *hwdesc;
+	int k;
+	struct netxen_cmd_buffer *pbuf = NULL;
+	static int dropped_packet = 0;
+	int frag_count;
+	u32 local_producer = 0;
+	u32 max_tx_desc_count = 0;
+	u32 last_cmd_consumer = 0;
+	int no_of_desc;
+
+	port->stats.xmitcalled++;
+	frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+	if (unlikely(skb->len <= 0)) {
+		dev_kfree_skb_any(skb);
+		port->stats.badskblen++;
+		return NETDEV_TX_OK;
+	}
+
+	if (frag_count > MAX_BUFFERS_PER_CMD) {
+		printk("%s: %s netxen_nic_xmit_frame: frag_count (%d)"
+		       "too large, can handle only %d frags\n",
+		       netxen_nic_driver_name, netdev->name,
+		       frag_count, MAX_BUFFERS_PER_CMD);
+		port->stats.txdropped++;
+		if ((++dropped_packet & 0xff) == 0xff)
+			printk("%s: %s droppped packets = %d\n",
+			       netxen_nic_driver_name, netdev->name,
+			       dropped_packet);
+
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * Everything is set up. Now, we just need to transmit it out.
+	 * Note that we have to copy the contents of buffer over to
+	 * right place. Later on, this can be optimized out by de-coupling the
+	 * producer index from the buffer index.
+	 */
+      retry_getting_window:
+	spin_lock_bh(&adapter->tx_lock);
+	if (adapter->total_threads == MAX_XMIT_PRODUCERS) {
+		spin_unlock_bh(&adapter->tx_lock);
+		/*
+		 * Yield CPU
+		 */
+		if (!in_atomic())
+			schedule();
+		else {
+			for (i = 0; i < 20; i++)
+				cpu_relax();	/*This a nop instr on i386 */
+		}
+		goto retry_getting_window;
+	}
+	local_producer = adapter->cmd_producer;
+	/* There 4 fragments per descriptor */
+	no_of_desc = (frag_count + 3) >> 2;
+	if (netdev->features & NETIF_F_TSO) {
+		if (skb_shinfo(skb)->gso_size > 0) {
+
+			no_of_desc++;
+			if (((skb->nh.iph)->ihl * sizeof(u32)) +
+			    ((skb->h.th)->doff * sizeof(u32)) +
+			    sizeof(struct ethhdr) >
+			    (sizeof(struct cmd_desc_type0) - 2)) {
+				no_of_desc++;
+			}
+		}
+	}
+	k = adapter->cmd_producer;
+	max_tx_desc_count = adapter->max_tx_desc_count;
+	last_cmd_consumer = adapter->last_cmd_consumer;
+	if ((k + no_of_desc) >=
+	    ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
+	     last_cmd_consumer)) {
+		port->stats.nocmddescriptor++;
+		DPRINTK(ERR, "No command descriptors available,"
+			" producer = %d, consumer = %d count=%llu,"
+			" dropping packet\n", producer,
+			adapter->last_cmd_consumer,
+			port->stats.nocmddescriptor);
+
+		netif_stop_queue(netdev);
+		port->flags |= NETXEN_NETDEV_STATUS;
+		spin_unlock_bh(&adapter->tx_lock);
+		return NETDEV_TX_BUSY;
+	}
+	k = get_index_range(k, max_tx_desc_count, no_of_desc);
+	adapter->cmd_producer = k;
+	adapter->total_threads++;
+	adapter->num_threads++;
+
+	spin_unlock_bh(&adapter->tx_lock);
+	/* Copy the descriptors into the hardware    */
+	producer = local_producer;
+	saved_producer = producer;
+	hwdesc = &hw->cmd_desc_head[producer];
+	memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
+	/* Take skb->data itself */
+	pbuf = &adapter->cmd_buf_arr[producer];
+	if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) {
+		pbuf->mss = skb_shinfo(skb)->gso_size;
+		hwdesc->mss = skb_shinfo(skb)->gso_size;
+	} else {
+		pbuf->mss = 0;
+		hwdesc->mss = 0;
+	}
+	pbuf->total_length = skb->len;
+	pbuf->skb = skb;
+	pbuf->cmd = TX_ETHER_PKT;
+	pbuf->frag_count = frag_count;
+	pbuf->port = port->portnum;
+	buffrag = &pbuf->frag_array[0];
+	buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len,
+				      PCI_DMA_TODEVICE);
+	buffrag->length = first_seg_len;
+	netxen_set_cmd_desc_totallength(hwdesc, skb->len);
+	netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count);
+	netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
+
+	netxen_set_cmd_desc_port(hwdesc, port->portnum);
+	hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
+	hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+
+	for (i = 1, k = 1; i < frag_count; i++, k++) {
+		struct skb_frag_struct *frag;
+		int len, temp_len;
+		unsigned long offset;
+		dma_addr_t temp_dma;
+
+		/* move to next desc. if there is a need */
+		if ((i & 0x3) == 0) {
+			k = 0;
+			producer = get_next_index(producer,
+						  adapter->max_tx_desc_count);
+			hwdesc = &hw->cmd_desc_head[producer];
+			memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
+		}
+		frag = &skb_shinfo(skb)->frags[i - 1];
+		len = frag->size;
+		offset = frag->page_offset;
+
+		temp_len = len;
+		temp_dma = pci_map_page(port->pdev, frag->page, offset,
+					len, PCI_DMA_TODEVICE);
+
+		buffrag++;
+		buffrag->dma = temp_dma;
+		buffrag->length = temp_len;
+
+		DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k);
+		switch (k) {
+		case 0:
+			hwdesc->buffer1_length = cpu_to_le16(temp_len);
+			hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
+			break;
+		case 1:
+			hwdesc->buffer2_length = cpu_to_le16(temp_len);
+			hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
+			break;
+		case 2:
+			hwdesc->buffer3_length = cpu_to_le16(temp_len);
+			hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
+			break;
+		case 3:
+			hwdesc->buffer4_length = temp_len;
+			hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
+			break;
+		}
+		frag++;
+	}
+	producer = get_next_index(producer, adapter->max_tx_desc_count);
+
+	/* might change opcode to TX_TCP_LSO */
+	netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
+
+	/* For LSO, we need to copy the MAC/IP/TCP headers into
+	 * the descriptor ring
+	 */
+	if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer])
+	    == TX_TCP_LSO) {
+		int hdr_len, first_hdr_len, more_hdr;
+		hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
+		if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
+			first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
+			more_hdr = 1;
+		} else {
+			first_hdr_len = hdr_len;
+			more_hdr = 0;
+		}
+		/* copy the MAC/IP/TCP headers to the cmd descriptor list */
+		hwdesc = &hw->cmd_desc_head[producer];
+
+		/* copy the first 64 bytes */
+		memcpy(((void *)hwdesc) + 2,
+		       (void *)(skb->data), first_hdr_len);
+		producer = get_next_index(producer, max_tx_desc_count);
+
+		if (more_hdr) {
+			hwdesc = &hw->cmd_desc_head[producer];
+			/* copy the next 64 bytes - should be enough except
+			 * for pathological case
+			 */
+			memcpy((void *)hwdesc, (void *)(skb->data) +
+			       first_hdr_len, hdr_len - first_hdr_len);
+			producer = get_next_index(producer, max_tx_desc_count);
+		}
+	}
+	spin_lock_bh(&adapter->tx_lock);
+	port->stats.txbytes +=
+	    netxen_get_cmd_desc_totallength(&hw->cmd_desc_head[saved_producer]);
+	/* Code to update the adapter considering how many producer threads
+	   are currently working */
+	if ((--adapter->num_threads) == 0) {
+		/* This is the last thread */
+		u32 crb_producer = adapter->cmd_producer;
+		writel(crb_producer,
+		       NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
+		wmb();
+		adapter->total_threads = 0;
+	}
+
+	port->stats.xmitfinished++;
+	spin_unlock_bh(&adapter->tx_lock);
+
+	netdev->trans_start = jiffies;
+
+	DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer);
+
+	DPRINTK(INFO, "Done. Send\n");
+	return NETDEV_TX_OK;
+}
+
+static void netxen_watchdog(unsigned long v)
+{
+	struct netxen_adapter *adapter = (struct netxen_adapter *)v;
+	if (adapter != g_adapter) {
+		printk("%s: ***BUG*** adapter[%p] != g_adapter[%p]\n",
+		       __FUNCTION__, adapter, g_adapter);
+		return;
+	}
+
+	SCHEDULE_WORK(&adapter->watchdog_task);
+}
+
+static void netxen_tx_timeout(struct net_device *netdev)
+{
+	struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+
+	SCHEDULE_WORK(&port->adapter->tx_timeout_task);
+}
+
+static void netxen_tx_timeout_task(struct work_struct *work)
+{
+	struct netxen_adapter *adapter =
+		container_of(work, struct netxen_adapter, tx_timeout_task);
+	struct net_device *netdev = adapter->netdev;
+	unsigned long flags;
+
+	printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
+	       netxen_nic_driver_name, netdev->name);
+
+	spin_lock_irqsave(&adapter->lock, flags);
+	netxen_nic_close(netdev);
+	netxen_nic_open(netdev);
+	spin_unlock_irqrestore(&adapter->lock, flags);
+	netdev->trans_start = jiffies;
+	netif_wake_queue(netdev);
+}
+
+static int
+netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+	u32 ret = 0;
+
+	DPRINTK(INFO, "Entered handle ISR\n");
+
+	adapter->stats.ints++;
+
+	if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
+		int count = 0;
+		u32 mask;
+		mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
+		if ((mask & 0x80) == 0) {
+			/* not our interrupt */
+			return ret;
+		}
+		netxen_nic_disable_int(adapter);
+		/* Window = 0 or 1 */
+		do {
+			writel(0xffffffff, PCI_OFFSET_SECOND_RANGE(adapter,
+						ISR_INT_TARGET_STATUS));
+			mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
+		} while (((mask & 0x80) != 0) && (++count < 32));
+		if ((mask & 0x80) != 0)
+			printk("Could not disable interrupt completely\n");
+
+	}
+	adapter->stats.hostints++;
+
+	if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) {
+		if (netif_rx_schedule_prep(netdev)) {
+			/*
+			 * Interrupts are already disabled.
+			 */
+			__netif_rx_schedule(netdev);
+		} else {
+			static unsigned int intcount = 0;
+			if ((++intcount & 0xfff) == 0xfff)
+				printk(KERN_ERR
+				       "%s: %s interrupt %d while in poll\n",
+				       netxen_nic_driver_name, netdev->name,
+				       intcount);
+		}
+		ret = 1;
+	}
+
+	if (ret == 0) {
+		netxen_nic_enable_int(adapter);
+	}
+
+	return ret;
+}
+
+/*
+ * netxen_intr - Interrupt Handler
+ * @irq: interrupt number
+ * data points to adapter stucture (which may be handling more than 1 port
+ */
+irqreturn_t netxen_intr(int irq, void *data)
+{
+	struct netxen_adapter *adapter;
+	struct netxen_port *port;
+	struct net_device *netdev;
+	int i;
+
+	if (unlikely(!irq)) {
+		return IRQ_NONE;	/* Not our interrupt */
+	}
+
+	adapter = (struct netxen_adapter *)data;
+	for (i = 0; i < adapter->ahw.max_ports; i++) {
+		port = adapter->port[i];
+		netdev = port->netdev;
+
+		/* process our status queue (for all 4 ports) */
+		if (netif_running(netdev)) {
+			netxen_handle_int(adapter, netdev);
+			break;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int netxen_nic_poll(struct net_device *netdev, int *budget)
+{
+	struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	int work_to_do = min(*budget, netdev->quota);
+	int done = 1;
+	int ctx;
+	int this_work_done;
+	int work_done = 0;
+
+	DPRINTK(INFO, "polling for %d descriptors\n", *budget);
+	port->stats.polled++;
+
+	work_done = 0;
+	for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
+		/*
+		 * Fairness issue. This will give undue weight to the
+		 * receive context 0.
+		 */
+
+		/*
+		 * To avoid starvation, we give each of our receivers,
+		 * a fraction of the quota. Sometimes, it might happen that we
+		 * have enough quota to process every packet, but since all the
+		 * packets are on one context, it gets only half of the quota,
+		 * and ends up not processing it.
+		 */
+		this_work_done = netxen_process_rcv_ring(adapter, ctx,
+							 work_to_do /
+							 MAX_RCV_CTX);
+		work_done += this_work_done;
+	}
+
+	netdev->quota -= work_done;
+	*budget -= work_done;
+
+	if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0)
+		done = 0;
+
+	if (netxen_process_cmd_ring((unsigned long)adapter) == 0)
+		done = 0;
+
+	DPRINTK(INFO, "new work_done: %d work_to_do: %d\n",
+		work_done, work_to_do);
+	if (done) {
+		netif_rx_complete(netdev);
+		netxen_nic_enable_int(adapter);
+	}
+
+	return !done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev)
+{
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	disable_irq(adapter->irq);
+	netxen_intr(adapter->irq, adapter);
+	enable_irq(adapter->irq);
+}
+#endif
+/*
+ * netxen_nic_ioctl ()    We provide the tcl/phanmon support through these
+ * ioctls.
+ */
+static int
+netxen_nic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	int err = 0;
+	unsigned long nr_bytes = 0;
+	struct netxen_port *port = netdev_priv(netdev);
+	struct netxen_adapter *adapter = port->adapter;
+	char dev_name[NETXEN_NIC_NAME_LEN];
+
+	DPRINTK(INFO, "doing ioctl for %s\n", netdev->name);
+	switch (cmd) {
+	case NETXEN_NIC_CMD:
+		err = netxen_nic_do_ioctl(adapter, (void *)ifr->ifr_data, port);
+		break;
+
+	case NETXEN_NIC_NAME:
+		DPRINTK(INFO, "ioctl cmd for NetXen\n");
+		if (ifr->ifr_data) {
+			sprintf(dev_name, "%s-%d", NETXEN_NIC_NAME_RSP,
+				port->portnum);
+			nr_bytes =
+			    copy_to_user((char __user *)ifr->ifr_data, dev_name,
+					 NETXEN_NIC_NAME_LEN);
+			if (nr_bytes)
+				err = -EIO;
+
+		}
+		break;
+
+	default:
+		DPRINTK(INFO, "ioctl cmd %x not supported\n", cmd);
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static struct pci_driver netxen_driver = {
+	.name = netxen_nic_driver_name,
+	.id_table = netxen_pci_tbl,
+	.probe = netxen_nic_probe,
+	.remove = __devexit_p(netxen_nic_remove)
+};
+
+/* Driver Registration on NetXen card    */
+
+static int __init netxen_init_module(void)
+{
+	if ((netxen_workq = create_singlethread_workqueue("netxen")) == 0)
+		return -ENOMEM;
+
+	return pci_module_init(&netxen_driver);
+}
+
+module_init(netxen_init_module);
+
+static void __exit netxen_exit_module(void)
+{
+	/*
+	 * Wait for some time to allow the dma to drain, if any.
+	 */
+	destroy_workqueue(netxen_workq);
+	pci_unregister_driver(&netxen_driver);
+}
+
+module_exit(netxen_exit_module);
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
new file mode 100644
index 0000000..4987dc7
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ *
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ *
+ *
+ * Provides access to the Network Interface Unit h/w block.
+ *
+ */
+
+#include "netxen_nic.h"
+
+#define NETXEN_GB_MAC_SOFT_RESET	0x80000000
+#define NETXEN_GB_MAC_RESET_PROT_BLK   0x000F0000
+#define NETXEN_GB_MAC_ENABLE_TX_RX     0x00000005
+#define NETXEN_GB_MAC_PAUSED_FRMS      0x00000020
+
+static long phy_lock_timeout = 100000000;
+
+static inline int phy_lock(struct netxen_adapter *adapter)
+{
+	int i;
+	int done = 0, timeout = 0;
+
+	while (!done) {
+		done =
+		    readl(pci_base_offset
+			  (adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK)));
+		if (done == 1)
+			break;
+		if (timeout >= phy_lock_timeout) {
+			return -1;
+		}
+		timeout++;
+		if (!in_atomic())
+			schedule();
+		else {
+			for (i = 0; i < 20; i++)
+				cpu_relax();
+		}
+	}
+
+	writel(PHY_LOCK_DRIVER,
+	       NETXEN_CRB_NORMALIZE(adapter, NETXEN_PHY_LOCK_ID));
+	return 0;
+}
+
+static inline int phy_unlock(struct netxen_adapter *adapter)
+{
+	readl(pci_base_offset(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)));
+
+	return 0;
+}
+
+/* 
+ * netxen_niu_gbe_phy_read - read a register from the GbE PHY via
+ * mii management interface.
+ *
+ * Note: The MII management interface goes through port 0.
+ *	Individual phys are addressed as follows:
+ * @param phy  [15:8]  phy id
+ * @param reg  [7:0]   register number
+ *
+ * @returns  0 on success
+ *	  -1 on error
+ *
+ */
+int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
+			    long reg, __le32 * readval)
+{
+	long timeout = 0;
+	long result = 0;
+	long restore = 0;
+	__le32 address;
+	__le32 command;
+	__le32 status;
+	__le32 mac_cfg0;
+
+	if (phy_lock(adapter) != 0) {
+		return -1;
+	}
+
+	/*
+	 * MII mgmt all goes through port 0 MAC interface,
+	 * so it cannot be in reset
+	 */
+
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
+				  &mac_cfg0, 4))
+		return -EIO;
+	if (netxen_gb_get_soft_reset(mac_cfg0)) {
+		__le32 temp;
+		temp = 0;
+		netxen_gb_tx_reset_pb(temp);
+		netxen_gb_rx_reset_pb(temp);
+		netxen_gb_tx_reset_mac(temp);
+		netxen_gb_rx_reset_mac(temp);
+		if (netxen_nic_hw_write_wx(adapter,
+					   NETXEN_NIU_GB_MAC_CONFIG_0(0),
+					   &temp, 4))
+			return -EIO;
+		restore = 1;
+	}
+
+	address = 0;
+	netxen_gb_mii_mgmt_reg_addr(address, reg);
+	netxen_gb_mii_mgmt_phy_addr(address, phy);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+				   &address, 4))
+		return -EIO;
+	command = 0;		/* turn off any prior activity */
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+				   &command, 4))
+		return -EIO;
+	/* send read command */
+	netxen_gb_mii_mgmt_set_read_cycle(command);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+				   &command, 4))
+		return -EIO;
+
+	status = 0;
+	do {
+		if (netxen_nic_hw_read_wx(adapter,
+					  NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
+					  &status, 4))
+			return -EIO;
+		timeout++;
+	} while ((netxen_get_gb_mii_mgmt_busy(status)
+		  || netxen_get_gb_mii_mgmt_notvalid(status))
+		 && (timeout++ < NETXEN_NIU_PHY_WAITMAX));
+
+	if (timeout < NETXEN_NIU_PHY_WAITMAX) {
+		if (netxen_nic_hw_read_wx(adapter,
+					  NETXEN_NIU_GB_MII_MGMT_STATUS(0),
+					  readval, 4))
+			return -EIO;
+		result = 0;
+	} else
+		result = -1;
+
+	if (restore)
+		if (netxen_nic_hw_write_wx(adapter,
+					   NETXEN_NIU_GB_MAC_CONFIG_0(0),
+					   &mac_cfg0, 4))
+			return -EIO;
+	phy_unlock(adapter);
+	return result;
+}
+
+/* 
+ * netxen_niu_gbe_phy_write - write a register to the GbE PHY via
+ * mii management interface.
+ *
+ * Note: The MII management interface goes through port 0.
+ *	Individual phys are addressed as follows:
+ * @param phy      [15:8]  phy id
+ * @param reg      [7:0]   register number
+ *
+ * @returns  0 on success
+ *	  -1 on error
+ *
+ */
+int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
+			     long phy, long reg, __le32 val)
+{
+	long timeout = 0;
+	long result = 0;
+	long restore = 0;
+	__le32 address;
+	__le32 command;
+	__le32 status;
+	__le32 mac_cfg0;
+
+	/*
+	 * MII mgmt all goes through port 0 MAC interface, so it
+	 * cannot be in reset
+	 */
+
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0),
+				  &mac_cfg0, 4))
+		return -EIO;
+	if (netxen_gb_get_soft_reset(mac_cfg0)) {
+		__le32 temp;
+		temp = 0;
+		netxen_gb_tx_reset_pb(temp);
+		netxen_gb_rx_reset_pb(temp);
+		netxen_gb_tx_reset_mac(temp);
+		netxen_gb_rx_reset_mac(temp);
+
+		if (netxen_nic_hw_write_wx(adapter,
+					   NETXEN_NIU_GB_MAC_CONFIG_0(0),
+					   &temp, 4))
+			return -EIO;
+		restore = 1;
+	}
+
+	command = 0;		/* turn off any prior activity */
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
+				   &command, 4))
+		return -EIO;
+
+	address = 0;
+	netxen_gb_mii_mgmt_reg_addr(address, reg);
+	netxen_gb_mii_mgmt_phy_addr(address, phy);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0),
+				   &address, 4))
+		return -EIO;
+
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0),
+				   &val, 4))
+		return -EIO;
+
+	status = 0;
+	do {
+		if (netxen_nic_hw_read_wx(adapter,
+					  NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
+					  &status, 4))
+			return -EIO;
+		timeout++;
+	} while ((netxen_get_gb_mii_mgmt_busy(status))
+		 && (timeout++ < NETXEN_NIU_PHY_WAITMAX));
+
+	if (timeout < NETXEN_NIU_PHY_WAITMAX)
+		result = 0;
+	else
+		result = -EIO;
+
+	/* restore the state of port 0 MAC in case we tampered with it */
+	if (restore)
+		if (netxen_nic_hw_write_wx(adapter,
+					   NETXEN_NIU_GB_MAC_CONFIG_0(0),
+					   &mac_cfg0, 4))
+			return -EIO;
+
+	return result;
+}
+
+int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+					  int port)
+{
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x3f);
+	return 0;
+}
+
+int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter,
+					 int port)
+{
+	int result = 0;
+	__le32 enable = 0;
+	netxen_set_phy_int_link_status_changed(enable);
+	netxen_set_phy_int_autoneg_completed(enable);
+	netxen_set_phy_int_speed_changed(enable);
+
+	if (0 !=
+	    netxen_niu_gbe_phy_write(adapter, port,
+				     NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE,
+				     enable))
+		result = -EIO;
+
+	return result;
+}
+
+int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+					   int port)
+{
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_INT_MASK, 0x7f);
+	return 0;
+}
+
+int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter,
+					  int port)
+{
+	int result = 0;
+	if (0 !=
+	    netxen_niu_gbe_phy_write(adapter, port,
+				     NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE, 0))
+		result = -EIO;
+
+	return result;
+}
+
+int netxen_niu_xgbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+					 int port)
+{
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_ACTIVE_INT, -1);
+	return 0;
+}
+
+int netxen_niu_gbe_clear_phy_interrupts(struct netxen_adapter *adapter,
+					int port)
+{
+	int result = 0;
+	if (0 !=
+	    netxen_niu_gbe_phy_write(adapter, port,
+				     NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
+				     -EIO))
+		result = -EIO;
+
+	return result;
+}
+
+/* 
+ * netxen_niu_gbe_set_mii_mode- Set 10/100 Mbit Mode for GbE MAC
+ *
+ */
+void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter,
+				 int port, long enable)
+{
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2);
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				    0x80000000);
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				    0x0000f0025);
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port),
+				    0xf1ff);
+	netxen_crb_writelit_adapter(adapter,
+				    NETXEN_NIU_GB0_GMII_MODE + (port << 3), 0);
+	netxen_crb_writelit_adapter(adapter,
+				    NETXEN_NIU_GB0_MII_MODE + (port << 3), 1);
+	netxen_crb_writelit_adapter(adapter,
+				    (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
+	netxen_crb_writelit_adapter(adapter,
+				    NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
+
+	if (enable) {
+		/* 
+		 * Do NOT enable flow control until a suitable solution for 
+		 *  shutting down pause frames is found. 
+		 */
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_NIU_GB_MAC_CONFIG_0(port),
+					    0x5);
+	}
+
+	if (netxen_niu_gbe_enable_phy_interrupts(adapter, port))
+		printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n");
+	if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+		printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n");
+}
+
+/* 
+ * netxen_niu_gbe_set_gmii_mode- Set GbE Mode for GbE MAC
+ */
+void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter,
+				  int port, long enable)
+{
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_MODE, 0x2);
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				    0x80000000);
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				    0x0000f0025);
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB_MAC_CONFIG_1(port),
+				    0xf2ff);
+	netxen_crb_writelit_adapter(adapter,
+				    NETXEN_NIU_GB0_MII_MODE + (port << 3), 0);
+	netxen_crb_writelit_adapter(adapter,
+				    NETXEN_NIU_GB0_GMII_MODE + (port << 3), 1);
+	netxen_crb_writelit_adapter(adapter,
+				    (NETXEN_NIU_GB0_HALF_DUPLEX + port * 4), 0);
+	netxen_crb_writelit_adapter(adapter,
+				    NETXEN_NIU_GB_MII_MGMT_CONFIG(port), 0x7);
+
+	if (enable) {
+		/* 
+		 * Do NOT enable flow control until a suitable solution for 
+		 *  shutting down pause frames is found. 
+		 */
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_NIU_GB_MAC_CONFIG_0(port),
+					    0x5);
+	}
+
+	if (netxen_niu_gbe_enable_phy_interrupts(adapter, port))
+		printk(KERN_ERR PFX "ERROR enabling PHY interrupts\n");
+	if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+		printk(KERN_ERR PFX "ERROR clearing PHY interrupts\n");
+}
+
+int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
+{
+	int result = 0;
+	__le32 status;
+	if (adapter->disable_phy_interrupts)
+		adapter->disable_phy_interrupts(adapter, port);
+	mdelay(2);
+
+	if (0 ==
+	    netxen_niu_gbe_phy_read(adapter, port,
+				    NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+				    (__le32 *) & status)) {
+		if (netxen_get_phy_link(status)) {
+			if (netxen_get_phy_speed(status) == 2) {
+				netxen_niu_gbe_set_gmii_mode(adapter, port, 1);
+			} else if ((netxen_get_phy_speed(status) == 1)
+				   || (netxen_get_phy_speed(status) == 0)) {
+				netxen_niu_gbe_set_mii_mode(adapter, port, 1);
+			} else {
+				result = -1;
+			}
+
+		} else {
+			/*
+			 * We don't have link. Cable  must be unconnected.
+			 * Enable phy interrupts so we take action when
+			 * plugged in.
+			 */
+
+			netxen_crb_writelit_adapter(adapter,
+						    NETXEN_NIU_GB_MAC_CONFIG_0
+						    (port),
+						    NETXEN_GB_MAC_SOFT_RESET);
+			netxen_crb_writelit_adapter(adapter,
+						    NETXEN_NIU_GB_MAC_CONFIG_0
+						    (port),
+						    NETXEN_GB_MAC_RESET_PROT_BLK
+						    | NETXEN_GB_MAC_ENABLE_TX_RX
+						    |
+						    NETXEN_GB_MAC_PAUSED_FRMS);
+			if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+				printk(KERN_ERR PFX
+				       "ERROR clearing PHY interrupts\n");
+			if (netxen_niu_gbe_enable_phy_interrupts(adapter, port))
+				printk(KERN_ERR PFX
+				       "ERROR enabling PHY interrupts\n");
+			if (netxen_niu_gbe_clear_phy_interrupts(adapter, port))
+				printk(KERN_ERR PFX
+				       "ERROR clearing PHY interrupts\n");
+			result = -1;
+		}
+	} else {
+		result = -EIO;
+	}
+	return result;
+}
+
+int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
+{
+	long reg = 0, ret = 0;
+
+	if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) {
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_NIU_XG1_CONFIG_0, 0x5);
+		/* XXX hack for Mez cards: both ports in promisc mode */
+		netxen_nic_hw_read_wx(adapter,
+				      NETXEN_NIU_XGE_CONFIG_1, &reg, 4);
+		reg = (reg | 0x2000UL);
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_NIU_XGE_CONFIG_1, reg);
+		reg = 0;
+		netxen_nic_hw_read_wx(adapter,
+				      NETXEN_NIU_XG1_CONFIG_1, &reg, 4);
+		reg = (reg | 0x2000UL);
+		netxen_crb_writelit_adapter(adapter,
+					    NETXEN_NIU_XG1_CONFIG_1, reg);
+	}
+
+	return ret;
+}
+
+/* 
+ * netxen_niu_gbe_handle_phy_interrupt - Handles GbE PHY interrupts
+ * @param enable 0 means don't enable the port
+ *		 1 means enable (or re-enable) the port
+ */
+int netxen_niu_gbe_handle_phy_interrupt(struct netxen_adapter *adapter,
+					int port, long enable)
+{
+	int result = 0;
+	__le32 int_src;
+
+	printk(KERN_INFO PFX "NETXEN: Handling PHY interrupt on port %d"
+	       " (device enable = %d)\n", (int)port, (int)enable);
+
+	/*
+	 * The read of the PHY INT status will clear the pending
+	 * interrupt status
+	 */
+	if (netxen_niu_gbe_phy_read(adapter, port,
+				    NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
+				    &int_src) != 0)
+		result = -EINVAL;
+	else {
+		printk(KERN_INFO PFX "PHY Interrupt source = 0x%x \n", int_src);
+		if (netxen_get_phy_int_jabber(int_src))
+			printk(KERN_INFO PFX "jabber Interrupt ");
+		if (netxen_get_phy_int_polarity_changed(int_src))
+			printk(KERN_INFO PFX "polarity changed ");
+		if (netxen_get_phy_int_energy_detect(int_src))
+			printk(KERN_INFO PFX "energy detect \n");
+		if (netxen_get_phy_int_downshift(int_src))
+			printk(KERN_INFO PFX "downshift \n");
+		if (netxen_get_phy_int_mdi_xover_changed(int_src))
+			printk(KERN_INFO PFX "mdi_xover_changed ");
+		if (netxen_get_phy_int_fifo_over_underflow(int_src))
+			printk(KERN_INFO PFX "fifo_over_underflow ");
+		if (netxen_get_phy_int_false_carrier(int_src))
+			printk(KERN_INFO PFX "false_carrier ");
+		if (netxen_get_phy_int_symbol_error(int_src))
+			printk(KERN_INFO PFX "symbol_error ");
+		if (netxen_get_phy_int_autoneg_completed(int_src))
+			printk(KERN_INFO PFX "autoneg_completed ");
+		if (netxen_get_phy_int_page_received(int_src))
+			printk(KERN_INFO PFX "page_received ");
+		if (netxen_get_phy_int_duplex_changed(int_src))
+			printk(KERN_INFO PFX "duplex_changed ");
+		if (netxen_get_phy_int_autoneg_error(int_src))
+			printk(KERN_INFO PFX "autoneg_error ");
+		if ((netxen_get_phy_int_speed_changed(int_src))
+		    || (netxen_get_phy_int_link_status_changed(int_src))) {
+			__le32 status;
+
+			printk(KERN_INFO PFX
+			       "speed_changed or link status changed");
+			if (netxen_niu_gbe_phy_read
+			    (adapter, port,
+			     NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+			     &status) == 0) {
+				if (netxen_get_phy_speed(status) == 2) {
+					printk
+					    (KERN_INFO PFX "Link speed changed"
+					     " to 1000 Mbps\n");
+					netxen_niu_gbe_set_gmii_mode(adapter,
+								     port,
+								     enable);
+				} else if (netxen_get_phy_speed(status) == 1) {
+					printk
+					    (KERN_INFO PFX "Link speed changed"
+					     " to 100 Mbps\n");
+					netxen_niu_gbe_set_mii_mode(adapter,
+								    port,
+								    enable);
+				} else if (netxen_get_phy_speed(status) == 0) {
+					printk
+					    (KERN_INFO PFX "Link speed changed"
+					     " to 10 Mbps\n");
+					netxen_niu_gbe_set_mii_mode(adapter,
+								    port,
+								    enable);
+				} else {
+					printk(KERN_ERR PFX "ERROR reading"
+					       "PHY status. Illegal speed.\n");
+					result = -1;
+				}
+			} else {
+				printk(KERN_ERR PFX
+				       "ERROR reading PHY status.\n");
+				result = -1;
+			}
+
+		}
+		printk(KERN_INFO "\n");
+	}
+	return result;
+}
+
+/*
+ * Return the current station MAC address.
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
+			   int phy, netxen_ethernet_macaddr_t * addr)
+{
+	u64 result = 0;
+	__le32 stationhigh;
+	__le32 stationlow;
+
+	if (addr == NULL)
+		return -EINVAL;
+	if ((phy < 0) || (phy > 3))
+		return -EINVAL;
+
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy),
+				  &stationhigh, 4))
+		return -EIO;
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
+				  &stationlow, 4))
+		return -EIO;
+
+	result = (u64) netxen_gb_get_stationaddress_low(stationlow);
+	result |= (u64) stationhigh << 16;
+	memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t));
+
+	return 0;
+}
+
+/*
+ * Set the station MAC address.
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_macaddr_set(struct netxen_port *port,
+			   netxen_ethernet_macaddr_t addr)
+{
+	__le32 temp = 0;
+	struct netxen_adapter *adapter = port->adapter;
+	int phy = port->portnum;
+	unsigned char mac_addr[6];
+	int i;
+
+	for (i = 0; i < 10; i++) {
+		memcpy(&temp, addr, 2);
+		temp <<= 16;
+		if (netxen_nic_hw_write_wx
+		    (adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &temp, 4))
+			return -EIO;
+
+		temp = 0;
+
+		memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
+		if (netxen_nic_hw_write_wx
+		    (adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &temp, 4))
+			return -2;
+
+		netxen_niu_macaddr_get(adapter, phy,
+				       (netxen_ethernet_macaddr_t *) mac_addr);
+		if (memcmp(mac_addr, addr, 6) == 0)
+			break;
+	}
+
+	if (i == 10) {
+		printk(KERN_ERR "%s: cannot set Mac addr for %s\n",
+		       netxen_nic_driver_name, port->netdev->name);
+		printk(KERN_ERR "MAC address set: "
+		       "%02x:%02x:%02x:%02x:%02x:%02x.\n",
+		       addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+		printk(KERN_ERR "MAC address get: "
+		       "%02x:%02x:%02x:%02x:%02x:%02x.\n",
+		       mac_addr[0],
+		       mac_addr[1],
+		       mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
+	}
+	return 0;
+}
+
+/* Enable a GbE interface */
+int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
+			       int port, netxen_niu_gbe_ifmode_t mode)
+{
+	__le32 mac_cfg0;
+	__le32 mac_cfg1;
+	__le32 mii_cfg;
+
+	if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+		return -EINVAL;
+
+	mac_cfg0 = 0;
+	netxen_gb_soft_reset(mac_cfg0);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				   &mac_cfg0, 4))
+		return -EIO;
+	mac_cfg0 = 0;
+	netxen_gb_enable_tx(mac_cfg0);
+	netxen_gb_enable_rx(mac_cfg0);
+	netxen_gb_unset_rx_flowctl(mac_cfg0);
+	netxen_gb_tx_reset_pb(mac_cfg0);
+	netxen_gb_rx_reset_pb(mac_cfg0);
+	netxen_gb_tx_reset_mac(mac_cfg0);
+	netxen_gb_rx_reset_mac(mac_cfg0);
+
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				   &mac_cfg0, 4))
+		return -EIO;
+	mac_cfg1 = 0;
+	netxen_gb_set_preamblelen(mac_cfg1, 0xf);
+	netxen_gb_set_duplex(mac_cfg1);
+	netxen_gb_set_crc_enable(mac_cfg1);
+	netxen_gb_set_padshort(mac_cfg1);
+	netxen_gb_set_checklength(mac_cfg1);
+	netxen_gb_set_hugeframes(mac_cfg1);
+
+	if (mode == NETXEN_NIU_10_100_MB) {
+		netxen_gb_set_intfmode(mac_cfg1, 1);
+		if (netxen_nic_hw_write_wx(adapter,
+					   NETXEN_NIU_GB_MAC_CONFIG_1(port),
+					   &mac_cfg1, 4))
+			return -EIO;
+
+		/* set mii mode */
+		netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_GMII_MODE +
+					    (port << 3), 0);
+		netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_MII_MODE +
+					    (port << 3), 1);
+
+	} else if (mode == NETXEN_NIU_1000_MB) {
+		netxen_gb_set_intfmode(mac_cfg1, 2);
+		if (netxen_nic_hw_write_wx(adapter,
+					   NETXEN_NIU_GB_MAC_CONFIG_1(port),
+					   &mac_cfg1, 4))
+			return -EIO;
+		/* set gmii mode */
+		netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_MII_MODE +
+					    (port << 3), 0);
+		netxen_crb_writelit_adapter(adapter, NETXEN_NIU_GB0_GMII_MODE +
+					    (port << 3), 1);
+	}
+	mii_cfg = 0;
+	netxen_gb_set_mii_mgmt_clockselect(mii_cfg, 7);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port),
+				   &mii_cfg, 4))
+		return -EIO;
+	mac_cfg0 = 0;
+	netxen_gb_enable_tx(mac_cfg0);
+	netxen_gb_enable_rx(mac_cfg0);
+	netxen_gb_unset_rx_flowctl(mac_cfg0);
+	netxen_gb_unset_tx_flowctl(mac_cfg0);
+
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				   &mac_cfg0, 4))
+		return -EIO;
+	return 0;
+}
+
+/* Disable a GbE interface */
+int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port)
+{
+	__le32 mac_cfg0;
+
+	if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+		return -EINVAL;
+
+	mac_cfg0 = 0;
+	netxen_gb_soft_reset(mac_cfg0);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+				   &mac_cfg0, 4))
+		return -EIO;
+	return 0;
+}
+
+/* Disable an XG interface */
+int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port)
+{
+	__le32 mac_cfg;
+
+	if (port != 0)
+		return -EINVAL;
+
+	mac_cfg = 0;
+	netxen_xg_soft_reset(mac_cfg);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_CONFIG_0,
+				   &mac_cfg, 4))
+		return -EIO;
+	return 0;
+}
+
+/* Set promiscuous mode for a GbE interface */
+int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port,
+				    netxen_niu_prom_mode_t mode)
+{
+	__le32 reg;
+
+	if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+		return -EINVAL;
+
+	/* save previous contents */
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
+				  &reg, 4))
+		return -EIO;
+	if (mode == NETXEN_NIU_PROMISC_MODE) {
+		switch (port) {
+		case 0:
+			netxen_clear_gb_drop_gb0(reg);
+			break;
+		case 1:
+			netxen_clear_gb_drop_gb1(reg);
+			break;
+		case 2:
+			netxen_clear_gb_drop_gb2(reg);
+			break;
+		case 3:
+			netxen_clear_gb_drop_gb3(reg);
+			break;
+		default:
+			return -EIO;
+		}
+	} else {
+		switch (port) {
+		case 0:
+			netxen_set_gb_drop_gb0(reg);
+			break;
+		case 1:
+			netxen_set_gb_drop_gb1(reg);
+			break;
+		case 2:
+			netxen_set_gb_drop_gb2(reg);
+			break;
+		case 3:
+			netxen_set_gb_drop_gb3(reg);
+			break;
+		default:
+			return -EIO;
+		}
+	}
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR,
+				   &reg, 4))
+		return -EIO;
+	return 0;
+}
+
+/*
+ * Set the MAC address for an XG port
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_xg_macaddr_set(struct netxen_port *port,
+			      netxen_ethernet_macaddr_t addr)
+{
+	__le32 temp = 0;
+	struct netxen_adapter *adapter = port->adapter;
+
+	memcpy(&temp, addr, 2);
+	temp = cpu_to_le32(temp);
+	temp <<= 16;
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
+				   &temp, 4))
+		return -EIO;
+
+	temp = 0;
+
+	memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
+	temp = cpu_to_le32(temp);
+	if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+				   &temp, 4))
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * Return the current station MAC address.
+ * Note that the passed-in value must already be in network byte order.
+ */
+int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int phy,
+			      netxen_ethernet_macaddr_t * addr)
+{
+	__le32 stationhigh;
+	__le32 stationlow;
+	u64 result;
+
+	if (addr == NULL)
+		return -EINVAL;
+	if (phy != 0)
+		return -EINVAL;
+
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
+				  &stationhigh, 4))
+		return -EIO;
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
+				  &stationlow, 4))
+		return -EIO;
+
+	result = ((u64) stationlow) >> 16;
+	result |= (u64) stationhigh << 16;
+	memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t));
+
+	return 0;
+}
+
+int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
+				       int port, netxen_niu_prom_mode_t mode)
+{
+	__le32 reg;
+
+	if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+		return -EINVAL;
+
+	if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_CONFIG_1, &reg, 4))
+		return -EIO;
+	if (mode == NETXEN_NIU_PROMISC_MODE)
+		reg = (reg | 0x2000UL);
+	else
+		reg = (reg & ~0x2000UL);
+
+	netxen_crb_writelit_adapter(adapter, NETXEN_NIU_XGE_CONFIG_1, reg);
+
+	return 0;
+}
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
new file mode 100644
index 0000000..7879f85
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2003 - 2006 NetXen, Inc.
+ * All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA  02111-1307, USA.
+ * 
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.
+ * 
+ * Contact Information:
+ *    info@netxen.com
+ * NetXen,
+ * 3965 Freedom Circle, Fourth floor,
+ * Santa Clara, CA 95054
+ */
+
+#ifndef __NIC_PHAN_REG_H_
+#define __NIC_PHAN_REG_H_
+
+/* 
+ * CRB Registers or queue message done only at initialization time.
+ */
+#define NIC_CRB_BASE               NETXEN_CAM_RAM(0x200)
+#define NETXEN_NIC_REG(X)             (NIC_CRB_BASE+(X))
+
+#define CRB_PHAN_CNTRL_LO_OFFSET    NETXEN_NIC_REG(0x00)
+#define CRB_PHAN_CNTRL_HI_OFFSET    NETXEN_NIC_REG(0x04)
+#define CRB_CMD_PRODUCER_OFFSET     NETXEN_NIC_REG(0x08)
+#define CRB_CMD_CONSUMER_OFFSET     NETXEN_NIC_REG(0x0c)
+#define CRB_PAUSE_ADDR_LO           NETXEN_NIC_REG(0x10)	/* C0 EPG BUG  */
+#define CRB_PAUSE_ADDR_HI           NETXEN_NIC_REG(0x14)
+#define CRB_HOST_CMD_ADDR_HI        NETXEN_NIC_REG(0x18)	/* host add:cmd ring */
+#define CRB_HOST_CMD_ADDR_LO        NETXEN_NIC_REG(0x1c)
+#define CRB_CMD_INTR_LOOP           NETXEN_NIC_REG(0x20)	/* 4 regs for perf */
+#define CRB_CMD_DMA_LOOP            NETXEN_NIC_REG(0x24)
+#define CRB_RCV_INTR_LOOP           NETXEN_NIC_REG(0x28)
+#define CRB_RCV_DMA_LOOP            NETXEN_NIC_REG(0x2c)
+#define CRB_ENABLE_TX_INTR          NETXEN_NIC_REG(0x30)	/* phantom init status */
+#define CRB_MMAP_ADDR_3             NETXEN_NIC_REG(0x34)
+#define CRB_CMDPEG_CMDRING          NETXEN_NIC_REG(0x38)
+#define CRB_HOST_DUMMY_BUF_ADDR_HI  NETXEN_NIC_REG(0x3c)
+#define CRB_HOST_DUMMY_BUF_ADDR_LO  NETXEN_NIC_REG(0x40)
+#define CRB_MMAP_ADDR_0             NETXEN_NIC_REG(0x44)
+#define CRB_MMAP_ADDR_1             NETXEN_NIC_REG(0x48)
+#define CRB_MMAP_ADDR_2             NETXEN_NIC_REG(0x4c)
+#define CRB_CMDPEG_STATE            NETXEN_NIC_REG(0x50)
+#define CRB_MMAP_SIZE_0             NETXEN_NIC_REG(0x54)
+#define CRB_MMAP_SIZE_1             NETXEN_NIC_REG(0x58)
+#define CRB_MMAP_SIZE_2             NETXEN_NIC_REG(0x5c)
+#define CRB_MMAP_SIZE_3             NETXEN_NIC_REG(0x60)
+#define CRB_GLOBAL_INT_COAL         NETXEN_NIC_REG(0x64)	/* interrupt coalescing */
+#define CRB_INT_COAL_MODE           NETXEN_NIC_REG(0x68)
+#define CRB_MAX_RCV_BUFS            NETXEN_NIC_REG(0x6c)
+#define CRB_TX_INT_THRESHOLD        NETXEN_NIC_REG(0x70)
+#define CRB_RX_PKT_TIMER            NETXEN_NIC_REG(0x74)
+#define CRB_TX_PKT_TIMER            NETXEN_NIC_REG(0x78)
+#define CRB_RX_PKT_CNT              NETXEN_NIC_REG(0x7c)
+#define CRB_RX_TMR_CNT              NETXEN_NIC_REG(0x80)
+#define CRB_RX_LRO_TIMER            NETXEN_NIC_REG(0x84)
+#define CRB_RX_LRO_MID_TIMER        NETXEN_NIC_REG(0x88)
+#define CRB_DMA_MAX_RCV_BUFS        NETXEN_NIC_REG(0x8c)
+#define CRB_MAX_DMA_ENTRIES         NETXEN_NIC_REG(0x90)
+#define CRB_XG_STATE                NETXEN_NIC_REG(0x94)	/* XG Link status */
+#define CRB_AGENT_GO                NETXEN_NIC_REG(0x98)	/* NIC pkt gen agent */
+#define CRB_AGENT_TX_SIZE           NETXEN_NIC_REG(0x9c)
+#define CRB_AGENT_TX_TYPE           NETXEN_NIC_REG(0xa0)
+#define CRB_AGENT_TX_ADDR           NETXEN_NIC_REG(0xa4)
+#define CRB_AGENT_TX_MSS            NETXEN_NIC_REG(0xa8)
+#define CRB_TX_STATE                NETXEN_NIC_REG(0xac)	/* Debug -performance */
+#define CRB_TX_COUNT                NETXEN_NIC_REG(0xb0)
+#define CRB_RX_STATE                NETXEN_NIC_REG(0xb4)
+#define CRB_RX_PERF_DEBUG_1         NETXEN_NIC_REG(0xb8)
+#define CRB_RX_LRO_CONTROL          NETXEN_NIC_REG(0xbc)	/* LRO On/OFF */
+#define CRB_RX_LRO_START_NUM        NETXEN_NIC_REG(0xc0)
+#define CRB_MPORT_MODE              NETXEN_NIC_REG(0xc4)	/* Multiport Mode */
+#define CRB_CMD_RING_SIZE           NETXEN_NIC_REG(0xc8)
+#define CRB_INT_VECTOR              NETXEN_NIC_REG(0xd4)
+#define CRB_CTX_RESET               NETXEN_NIC_REG(0xd8)
+#define CRB_HOST_STS_PROD           NETXEN_NIC_REG(0xdc)
+#define CRB_HOST_STS_CONS           NETXEN_NIC_REG(0xe0)
+#define CRB_PEG_CMD_PROD            NETXEN_NIC_REG(0xe4)
+#define CRB_PEG_CMD_CONS            NETXEN_NIC_REG(0xe8)
+#define CRB_HOST_BUFFER_PROD        NETXEN_NIC_REG(0xec)
+#define CRB_HOST_BUFFER_CONS        NETXEN_NIC_REG(0xf0)
+#define CRB_JUMBO_BUFFER_PROD       NETXEN_NIC_REG(0xf4)
+#define CRB_JUMBO_BUFFER_CONS       NETXEN_NIC_REG(0xf8)
+
+#define CRB_CMD_PRODUCER_OFFSET_1   NETXEN_NIC_REG(0x1ac)
+#define CRB_CMD_CONSUMER_OFFSET_1   NETXEN_NIC_REG(0x1b0)
+#define CRB_TEMP_STATE              NETXEN_NIC_REG(0x1b4)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define nx_get_temp_val(x)		((x) >> 16)
+#define nx_get_temp_state(x)		((x) & 0xffff)
+#define nx_encode_temp(val, state)	(((val) << 16) | (state))
+
+/* CRB registers per Rcv Descriptor ring */
+struct netxen_rcv_desc_crb {
+	u32 crb_rcv_producer_offset __attribute__ ((aligned(512)));
+	u32 crb_rcv_consumer_offset;
+	u32 crb_globalrcv_ring;
+	u32 crb_rcv_ring_size;
+};
+
+/*
+ * CRB registers used by the receive peg logic.
+ */
+
+struct netxen_recv_crb {
+	struct netxen_rcv_desc_crb rcv_desc_crb[NUM_RCV_DESC_RINGS];
+	u32 crb_rcvstatus_ring;
+	u32 crb_rcv_status_producer;
+	u32 crb_rcv_status_consumer;
+	u32 crb_rcvpeg_state;
+	u32 crb_status_ring_size;
+};
+
+#if defined(DEFINE_GLOBAL_RECV_CRB)
+struct netxen_recv_crb recv_crb_registers[] = {
+	/*
+	 * Instance 0.
+	 */
+	{
+	 /* rcv_desc_crb: */
+	 {
+	  {
+	   /* crb_rcv_producer_offset: */
+	   NETXEN_NIC_REG(0x100),
+	   /* crb_rcv_consumer_offset: */
+	   NETXEN_NIC_REG(0x104),
+	   /* crb_gloablrcv_ring: */
+	   NETXEN_NIC_REG(0x108),
+	   /* crb_rcv_ring_size */
+	   NETXEN_NIC_REG(0x10c),
+
+	   },
+	  /* Jumbo frames */
+	  {
+	   /* crb_rcv_producer_offset: */
+	   NETXEN_NIC_REG(0x110),
+	   /* crb_rcv_consumer_offset: */
+	   NETXEN_NIC_REG(0x114),
+	   /* crb_gloablrcv_ring: */
+	   NETXEN_NIC_REG(0x118),
+	   /* crb_rcv_ring_size */
+	   NETXEN_NIC_REG(0x11c),
+	   },
+	  /* LRO */
+	  {
+	   /* crb_rcv_producer_offset: */
+	   NETXEN_NIC_REG(0x120),
+	   /* crb_rcv_consumer_offset: */
+	   NETXEN_NIC_REG(0x124),
+	   /* crb_gloablrcv_ring: */
+	   NETXEN_NIC_REG(0x128),
+	   /* crb_rcv_ring_size */
+	   NETXEN_NIC_REG(0x12c),
+	   }
+	  },
+	 /* crb_rcvstatus_ring: */
+	 NETXEN_NIC_REG(0x130),
+	 /* crb_rcv_status_producer: */
+	 NETXEN_NIC_REG(0x134),
+	 /* crb_rcv_status_consumer: */
+	 NETXEN_NIC_REG(0x138),
+	 /* crb_rcvpeg_state: */
+	 NETXEN_NIC_REG(0x13c),
+	 /* crb_status_ring_size */
+	 NETXEN_NIC_REG(0x140),
+
+	 },
+	/*
+	 * Instance 1,
+	 */
+	{
+	 /* rcv_desc_crb: */
+	 {
+	  {
+	   /* crb_rcv_producer_offset: */
+	   NETXEN_NIC_REG(0x144),
+	   /* crb_rcv_consumer_offset: */
+	   NETXEN_NIC_REG(0x148),
+	   /* crb_globalrcv_ring: */
+	   NETXEN_NIC_REG(0x14c),
+	   /* crb_rcv_ring_size */
+	   NETXEN_NIC_REG(0x150),
+
+	   },
+	  /* Jumbo frames */
+	  {
+	   /* crb_rcv_producer_offset: */
+	   NETXEN_NIC_REG(0x154),
+	   /* crb_rcv_consumer_offset: */
+	   NETXEN_NIC_REG(0x158),
+	   /* crb_globalrcv_ring: */
+	   NETXEN_NIC_REG(0x15c),
+	   /* crb_rcv_ring_size */
+	   NETXEN_NIC_REG(0x160),
+	   },
+	  /* LRO */
+	  {
+	   /* crb_rcv_producer_offset: */
+	   NETXEN_NIC_REG(0x164),
+	   /* crb_rcv_consumer_offset: */
+	   NETXEN_NIC_REG(0x168),
+	   /* crb_globalrcv_ring: */
+	   NETXEN_NIC_REG(0x16c),
+	   /* crb_rcv_ring_size */
+	   NETXEN_NIC_REG(0x170),
+	   }
+
+	  },
+	 /* crb_rcvstatus_ring: */
+	 NETXEN_NIC_REG(0x174),
+	 /* crb_rcv_status_producer: */
+	 NETXEN_NIC_REG(0x178),
+	 /* crb_rcv_status_consumer: */
+	 NETXEN_NIC_REG(0x17c),
+	 /* crb_rcvpeg_state: */
+	 NETXEN_NIC_REG(0x180),
+	 /* crb_status_ring_size */
+	 NETXEN_NIC_REG(0x184),
+
+	 },
+};
+
+u64 ctx_addr_sig_regs[][3] = {
+	{NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
+	{NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
+	{NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
+	{NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+};
+
+#else
+extern struct netxen_recv_crb recv_crb_registers[];
+extern u64 ctx_addr_sig_regs[][3];
+#define CRB_CTX_ADDR_REG_LO            (ctx_addr_sig_regs[0][0])
+#define CRB_CTX_ADDR_REG_HI            (ctx_addr_sig_regs[0][2])
+#define CRB_CTX_SIGNATURE_REG       (ctx_addr_sig_regs[0][1])
+#endif				/* DEFINE_GLOBAL_RECEIVE_CRB */
+
+/*
+ * Temperature control.
+ */
+enum {
+	NX_TEMP_NORMAL = 0x1,	/* Normal operating range */
+	NX_TEMP_WARN,		/* Sound alert, temperature getting high */
+	NX_TEMP_PANIC		/* Fatal error, hardware has shut down. */
+};
+
+#endif				/* __NIC_PHAN_REG_H_ */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 26e42f6..196993a 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -1335,7 +1335,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(dev_ni52);
 	release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 340ad0d..1578f4d 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1259,7 +1259,7 @@
 	return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
  	unregister_netdev(dev_ni65);
  	cleanup_card(dev_ni65);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index b0127c7..568daeb 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -414,10 +414,10 @@
 
 	struct sk_buff	*skbs[NR_RX_DESC];
 
-	u32		*next_rx_desc;
+	__le32		*next_rx_desc;
 	u16		next_rx, next_empty;
 
-	u32		*descs;
+	__le32		*descs;
 	dma_addr_t	phy_descs;
 };
 
@@ -427,6 +427,7 @@
 	u8			__iomem *base;
 
 	struct pci_dev		*pci_dev;
+	struct net_device	*ndev;
 
 #ifdef NS83820_VLAN_ACCEL_SUPPORT
 	struct vlan_group	*vlgrp;
@@ -459,7 +460,7 @@
 	struct sk_buff	*tx_skbs[NR_TX_DESC];
 
 	char		pad[16] __attribute__((aligned(16)));
-	u32		*tx_descs;
+	__le32		*tx_descs;
 	dma_addr_t	tx_phy_descs;
 
 	struct timer_list	tx_watchdog;
@@ -533,7 +534,7 @@
  * conditions, still route realtime traffic with as low jitter as
  * possible.
  */
-static inline void build_rx_desc(struct ns83820 *dev, u32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts)
+static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts)
 {
 	desc_addr_set(desc + DESC_LINK, link);
 	desc_addr_set(desc + DESC_BUFPTR, buf);
@@ -547,7 +548,7 @@
 {
 	unsigned next_empty;
 	u32 cmdsts;
-	u32 *sg;
+	__le32 *sg;
 	dma_addr_t buf;
 
 	next_empty = dev->rx_info.next_empty;
@@ -631,10 +632,10 @@
 }
 
 /* REFILL */
-static inline void queue_refill(void *_dev)
+static inline void queue_refill(struct work_struct *work)
 {
-	struct net_device *ndev = _dev;
-	struct ns83820 *dev = PRIV(ndev);
+	struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
+	struct net_device *ndev = dev->ndev;
 
 	rx_refill(ndev, GFP_KERNEL);
 	if (dev->rx_info.up)
@@ -874,7 +875,8 @@
 	struct rx_info *info = &dev->rx_info;
 	unsigned next_rx;
 	int rx_rc, len;
-	u32 cmdsts, *desc;
+	u32 cmdsts;
+	__le32 *desc;
 	unsigned long flags;
 	int nr = 0;
 
@@ -1010,7 +1012,8 @@
 static void do_tx_done(struct net_device *ndev)
 {
 	struct ns83820 *dev = PRIV(ndev);
-	u32 cmdsts, tx_done_idx, *desc;
+	u32 cmdsts, tx_done_idx;
+	__le32 *desc;
 
 	dprintk("do_tx_done(%p)\n", ndev);
 	tx_done_idx = dev->tx_done_idx;
@@ -1077,7 +1080,7 @@
 		struct sk_buff *skb = dev->tx_skbs[i];
 		dev->tx_skbs[i] = NULL;
 		if (skb) {
-			u32 *desc = dev->tx_descs + (i * DESC_SIZE);
+			__le32 *desc = dev->tx_descs + (i * DESC_SIZE);
 			pci_unmap_single(dev->pci_dev,
 					desc_addr_get(desc + DESC_BUFPTR),
 					le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
@@ -1107,7 +1110,7 @@
 	skb_frag_t *frag;
 	int stopped = 0;
 	int do_intr = 0;
-	volatile u32 *first_desc;
+	volatile __le32 *first_desc;
 
 	dprintk("ns83820_hard_start_xmit\n");
 
@@ -1180,7 +1183,7 @@
 	first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
 
 	for (;;) {
-		volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
+		volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
 
 		dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
 			(unsigned long long)buf);
@@ -1455,7 +1458,8 @@
 static void ns83820_tx_timeout(struct net_device *ndev)
 {
 	struct ns83820 *dev = PRIV(ndev);
-        u32 tx_done_idx, *desc;
+        u32 tx_done_idx;
+	__le32 *desc;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->tx_lock, flags);
@@ -1841,6 +1845,7 @@
 
 	ndev = alloc_etherdev(sizeof(struct ns83820));
 	dev = PRIV(ndev);
+	dev->ndev = ndev;
 	err = -ENOMEM;
 	if (!dev)
 		goto out;
@@ -1853,7 +1858,7 @@
 	SET_MODULE_OWNER(ndev);
 	SET_NETDEV_DEV(ndev, &pci_dev->dev);
 
-	INIT_WORK(&dev->tq_refill, queue_refill, ndev);
+	INIT_WORK(&dev->tq_refill, queue_refill);
 	tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
 
 	err = pci_enable_device(pci_dev);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 0460099..794cc61 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -338,7 +338,6 @@
 	struct net_device *dev = link->priv;
 	struct el3_private *lp = netdev_priv(dev);
 	tuple_t tuple;
-	cisparse_t parse;
 	unsigned short buf[32];
 	int last_fn, last_ret, i, j;
 	kio_addr_t ioaddr;
@@ -350,17 +349,6 @@
 
 	DEBUG(0, "3c574_config(0x%p)\n", link);
 
-	tuple.Attributes = 0;
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	tuple.TupleData = (cisdata_t *)buf;
-	tuple.TupleDataMax = 64;
-	tuple.TupleOffset = 0;
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
-
 	link->io.IOAddrLines = 16;
 	for (i = j = 0; j < 0x400; j += 0x20) {
 		link->io.BasePort1 = j ^ 0x300;
@@ -382,6 +370,10 @@
 	/* The 3c574 normally uses an EEPROM for configuration info, including
 	   the hardware address.  The future products may include a modem chip
 	   and put the address in the CIS. */
+	tuple.Attributes = 0;
+	tuple.TupleData = (cisdata_t *)buf;
+	tuple.TupleDataMax = 64;
+	tuple.TupleOffset = 0;
 	tuple.DesiredTuple = 0x88;
 	if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
 		pcmcia_get_tuple_data(link, &tuple);
@@ -397,12 +389,9 @@
 			goto failed;
 		}
 	}
-	tuple.DesiredTuple = CISTPL_VERS_1;
-	if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS &&
-		pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS &&
-		pcmcia_parse_tuple(link, &tuple, &parse) == CS_SUCCESS) {
-		cardname = parse.version_1.str + parse.version_1.ofs[1];
-	} else
+	if (link->prod_id[1])
+		cardname = link->prod_id[1];
+	else
 		cardname = "3Com 3c574";
 
 	{
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 231fa2c..1e73ff7 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -253,7 +253,6 @@
     struct net_device *dev = link->priv;
     struct el3_private *lp = netdev_priv(dev);
     tuple_t tuple;
-    cisparse_t parse;
     u16 buf[32], *phys_addr;
     int last_fn, last_ret, i, j, multi = 0, fifo;
     kio_addr_t ioaddr;
@@ -263,26 +262,16 @@
 
     phys_addr = (u16 *)dev->dev_addr;
     tuple.Attributes = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-    
-    /* Is this a 3c562? */
-    tuple.DesiredTuple = CISTPL_MANFID;
     tuple.Attributes = TUPLE_RETURN_COMMON;
-    if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
-	(pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
-	if (le16_to_cpu(buf[0]) != MANFID_3COM)
+
+    /* Is this a 3c562? */
+    if (link->manf_id != MANFID_3COM)
 	    printk(KERN_INFO "3c589_cs: hmmm, is this really a "
 		   "3Com card??\n");
-	multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562);
-    }
+    multi = (link->card_id == PRODID_3COM_3C562);
 
     /* For the 3c562, the base address must be xx00-xx7f */
     link->io.IOAddrLines = 16;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 5ddd574..6139048 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -299,11 +299,7 @@
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
+
     /* don't trust the CIS on this; Linksys got it wrong */
     link->conf.Present = 0x63;
 
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 48434d7..91f65e9 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -249,12 +249,9 @@
 static int com20020_config(struct pcmcia_device *link)
 {
     struct arcnet_local *lp;
-    tuple_t tuple;
-    cisparse_t parse;
     com20020_dev_t *info;
     struct net_device *dev;
     int i, last_ret, last_fn;
-    u_char buf[64];
     int ioaddr;
 
     info = link->priv;
@@ -264,16 +261,6 @@
 
     DEBUG(0, "com20020_config(0x%p)\n", link);
 
-    tuple.Attributes = 0;
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = 64;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-
     DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1);
     i = !CS_SUCCESS;
     if (!link->io.BasePort1)
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 65f6fdf..0d7de61 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -342,7 +342,7 @@
     tuple_t tuple;
     cisparse_t parse;
     u_short buf[32];
-    int i, last_fn, last_ret, ret;
+    int i, last_fn = 0, last_ret = 0, ret;
     kio_addr_t ioaddr;
     cardtype_t cardtype;
     char *card_name = "unknown";
@@ -350,21 +350,9 @@
 
     DEBUG(0, "fmvj18x_config(0x%p)\n", link);
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
     tuple.TupleData = (u_char *)buf;
     tuple.TupleDataMax = 64;
     tuple.TupleOffset = 0;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-
-    link->conf.ConfigBase = parse.config.base; 
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.DesiredTuple = CISTPL_FUNCE;
     tuple.TupleOffset = 0;
     if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
@@ -374,17 +362,12 @@
 	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
 	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
 	link->conf.ConfigIndex = parse.cftable_entry.index;
-	tuple.DesiredTuple = CISTPL_MANFID;
-	if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
-	    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	else
-	    buf[0] = 0xffff;
-	switch (le16_to_cpu(buf[0])) {
+	switch (link->manf_id) {
 	case MANFID_TDK:
 	    cardtype = TDK;
-	    if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410
-			|| le16_to_cpu(buf[1]) == PRODID_TDK_NP9610
-			|| le16_to_cpu(buf[1]) == PRODID_TDK_MN3200) {
+	    if (link->card_id == PRODID_TDK_GN3410
+			|| link->card_id == PRODID_TDK_NP9610
+			|| link->card_id == PRODID_TDK_MN3200) {
 		/* MultiFunction Card */
 		link->conf.ConfigBase = 0x800;
 		link->conf.ConfigIndex = 0x47;
@@ -395,11 +378,11 @@
 	    cardtype = CONTEC;
 	    break;
 	case MANFID_FUJITSU:
-	    if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10302)
+	    if (link->card_id == PRODID_FUJITSU_MBH10302)
                 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
                    but these are MBH10304 based card. */ 
 		cardtype = MBH10304;
-	    else if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304)
+	    else if (link->card_id == PRODID_FUJITSU_MBH10304)
 		cardtype = MBH10304;
 	    else
 		cardtype = LA501;
@@ -409,14 +392,9 @@
 	}
     } else {
 	/* old type card */
-	tuple.DesiredTuple = CISTPL_MANFID;
-	if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
-	    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	else
-	    buf[0] = 0xffff;
-	switch (le16_to_cpu(buf[0])) {
+	switch (link->manf_id) {
 	case MANFID_FUJITSU:
-	    if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304) {
+	    if (link->card_id == PRODID_FUJITSU_MBH10304) {
 		cardtype = XXX10304;    /* MBH10304 with buggy CIS */
 	        link->conf.ConfigIndex = 0x20;
 	    } else {
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index bc0ca41..a956a51 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -222,24 +222,12 @@
     ibmtr_dev_t *info = link->priv;
     struct net_device *dev = info->dev;
     struct tok_info *ti = netdev_priv(dev);
-    tuple_t tuple;
-    cisparse_t parse;
     win_req_t req;
     memreq_t mem;
     int i, last_ret, last_fn;
-    u_char buf[64];
 
     DEBUG(0, "ibmtr_config(0x%p)\n", link);
 
-    tuple.Attributes = 0;
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = 64;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
     link->conf.ConfigIndex = 0x61;
 
     /* Determine if this is PRIMARY or ALTERNATE. */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index e77110e..3b70774 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -656,23 +656,12 @@
   struct net_device *dev = link->priv;
   mace_private *lp = netdev_priv(dev);
   tuple_t tuple;
-  cisparse_t parse;
   u_char buf[64];
   int i, last_ret, last_fn;
   kio_addr_t ioaddr;
 
   DEBUG(0, "nmclan_config(0x%p)\n", link);
 
-  tuple.Attributes = 0;
-  tuple.TupleData = buf;
-  tuple.TupleDataMax = 64;
-  tuple.TupleOffset = 0;
-  tuple.DesiredTuple = CISTPL_CONFIG;
-  CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-  CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-  CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-  link->conf.ConfigBase = parse.config.base;
-
   CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
   CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
   CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
@@ -686,6 +675,7 @@
   tuple.TupleData = buf;
   tuple.TupleDataMax = 64;
   tuple.TupleOffset = 0;
+  tuple.Attributes = 0;
   CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
   CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
   memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 0c00d18..2b1238e 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -519,31 +519,15 @@
     tuple_t tuple;
     cisparse_t parse;
     int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
-    int manfid = 0, prodid = 0, has_shmem = 0;
+    int has_shmem = 0;
     u_short buf[64];
     hw_info_t *hw_info;
 
     DEBUG(0, "pcnet_config(0x%p)\n", link);
 
-    tuple.Attributes = 0;
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
-    tuple.DesiredTuple = CISTPL_MANFID;
-    tuple.Attributes = TUPLE_RETURN_COMMON;
-    if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
- 	(pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
-	manfid = le16_to_cpu(buf[0]);
-	prodid = le16_to_cpu(buf[1]);
-    }
-
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
     tuple.Attributes = 0;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -589,8 +573,8 @@
 	link->conf.Attributes |= CONF_ENABLE_SPKR;
 	link->conf.Status = CCSR_AUDIO_ENA;
     }
-    if ((manfid == MANFID_IBM) &&
-	(prodid == PRODID_IBM_HOME_AND_AWAY))
+    if ((link->manf_id == MANFID_IBM) &&
+	(link->card_id == PRODID_IBM_HOME_AND_AWAY))
 	link->conf.ConfigIndex |= 0x10;
 
     CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
@@ -624,10 +608,10 @@
     info->flags = hw_info->flags;
     /* Check for user overrides */
     info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
-    if ((manfid == MANFID_SOCKET) &&
-	((prodid == PRODID_SOCKET_LPE) ||
-	 (prodid == PRODID_SOCKET_LPE_CF) ||
-	 (prodid == PRODID_SOCKET_EIO)))
+    if ((link->manf_id == MANFID_SOCKET) &&
+	((link->card_id == PRODID_SOCKET_LPE) ||
+	 (link->card_id == PRODID_SOCKET_LPE_CF) ||
+	 (link->card_id == PRODID_SOCKET_EIO)))
 	info->flags &= ~USE_BIG_BUF;
     if (!use_big_buf)
 	info->flags &= ~USE_BIG_BUF;
@@ -1096,7 +1080,6 @@
 
     /* Check for pending interrupt with expired latency timer: with
        this, we can limp along even if the interrupt is blocked */
-    outb_p(E8390_NODMA+E8390_PAGE0, nic_base + E8390_CMD);
     if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
 	if (!info->fast_poll)
 	    printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 20fcc35..530df88 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -560,16 +560,8 @@
 
     /* Read the station address from the CIS.  It is stored as the last
        (fourth) string in the Version 1 Version/ID tuple. */
-    tuple->DesiredTuple = CISTPL_VERS_1;
-    if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
-	rc = -1;
-	goto free_cfg_mem;
-    }
-    /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
-    if (next_tuple(link, tuple, parse) != CS_SUCCESS)
-	first_tuple(link, tuple, parse);
-    if (parse->version_1.ns > 3) {
-	station_addr = parse->version_1.str + parse->version_1.ofs[3];
+    if (link->prod_id[3]) {
+	station_addr = link->prod_id[3];
 	if (cvt_ascii_address(dev, station_addr) == 0) {
 		rc = 0;
 		goto free_cfg_mem;
@@ -744,15 +736,12 @@
 	}
     }
     /* Try the third string in the Version 1 Version/ID tuple. */
-    tuple->DesiredTuple = CISTPL_VERS_1;
-    if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
-	rc = -1;
-	goto free_cfg_mem;
-    }
-    station_addr = parse->version_1.str + parse->version_1.ofs[2];
-    if (cvt_ascii_address(dev, station_addr) == 0) {
-	rc = 0;
-	goto free_cfg_mem;
+    if (link->prod_id[2]) {
+	station_addr = link->prod_id[2];
+	if (cvt_ascii_address(dev, station_addr) == 0) {
+		rc = 0;
+		goto free_cfg_mem;
+	}
     }
 
     rc = -1;
@@ -970,10 +959,6 @@
 {
     struct net_device *dev = link->priv;
     struct smc_private *smc = netdev_priv(dev);
-    struct smc_cfg_mem *cfg_mem;
-    tuple_t *tuple;
-    cisparse_t *parse;
-    u_char *buf;
     char *name;
     int i, j, rev;
     kio_addr_t ioaddr;
@@ -981,30 +966,8 @@
 
     DEBUG(0, "smc91c92_config(0x%p)\n", link);
 
-    cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL);
-    if (!cfg_mem)
-	goto config_failed;
-
-    tuple = &cfg_mem->tuple;
-    parse = &cfg_mem->parse;
-    buf = cfg_mem->buf;
-
-    tuple->Attributes = tuple->TupleOffset = 0;
-    tuple->TupleData = (cisdata_t *)buf;
-    tuple->TupleDataMax = 64;
-
-    tuple->DesiredTuple = CISTPL_CONFIG;
-    i = first_tuple(link, tuple, parse);
-    CS_EXIT_TEST(i, ParseTuple, config_failed);
-    link->conf.ConfigBase = parse->config.base;
-    link->conf.Present = parse->config.rmask[0];
-
-    tuple->DesiredTuple = CISTPL_MANFID;
-    tuple->Attributes = TUPLE_RETURN_COMMON;
-    if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
-	smc->manfid = parse->manfid.manf;
-	smc->cardid = parse->manfid.card;
-    }
+    smc->manfid = link->manf_id;
+    smc->cardid = link->card_id;
 
     if ((smc->manfid == MANFID_OSITECH) &&
 	(smc->cardid != PRODID_OSITECH_SEVEN)) {
@@ -1134,14 +1097,12 @@
     	    printk(KERN_NOTICE "  No MII transceivers found!\n");
 	}
     }
-    kfree(cfg_mem);
     return 0;
 
 config_undo:
     unregister_netdev(dev);
 config_failed:			/* CS_EXIT_TEST() calls jump to here... */
     smc91c92_release(link);
-    kfree(cfg_mem);
     return -ENODEV;
 } /* smc91c92_config */
 
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index f3914f5..8478dca 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -332,6 +332,7 @@
  */
 
 typedef struct local_info_t {
+	struct net_device	*dev;
 	struct pcmcia_device	*p_dev;
     dev_node_t node;
     struct net_device_stats stats;
@@ -353,7 +354,7 @@
  */
 static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void do_tx_timeout(struct net_device *dev);
-static void xirc2ps_tx_timeout_task(void *data);
+static void xirc2ps_tx_timeout_task(struct work_struct *work);
 static struct net_device_stats *do_get_stats(struct net_device *dev);
 static void set_addresses(struct net_device *dev);
 static void set_multicast_list(struct net_device *dev);
@@ -567,6 +568,7 @@
     if (!dev)
 	    return -ENOMEM;
     local = netdev_priv(dev);
+    local->dev = dev;
     local->p_dev = link;
     link->priv = dev;
 
@@ -591,7 +593,7 @@
 #ifdef HAVE_TX_TIMEOUT
     dev->tx_timeout = do_tx_timeout;
     dev->watchdog_timeo = TX_TIMEOUT;
-    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
+    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
 #endif
 
     return xirc2ps_config(link);
@@ -707,22 +709,11 @@
  * Returns: true if this is a CE2
  */
 static int
-has_ce2_string(struct pcmcia_device * link)
+has_ce2_string(struct pcmcia_device * p_dev)
 {
-    tuple_t tuple;
-    cisparse_t parse;
-    u_char buf[256];
-
-    tuple.Attributes = 0;
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = 254;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_VERS_1;
-    if (!first_tuple(link, &tuple, &parse) && parse.version_1.ns > 2) {
-	if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
-	    return 1;
-    }
-    return 0;
+	if (p_dev->prod_id[2] && strstr(p_dev->prod_id[2], "CE2"))
+		return 1;
+	return 0;
 }
 
 /****************
@@ -792,13 +783,6 @@
 	goto failure;
     }
 
-    /* get configuration stuff */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    if ((err=first_tuple(link, &tuple, &parse)))
-	goto cis_error;
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present =    parse.config.rmask[0];
-
     /* get the ethernet address from the CIS */
     tuple.DesiredTuple = CISTPL_FUNCE;
     for (err = first_tuple(link, &tuple, &parse); !err;
@@ -1062,8 +1046,6 @@
     xirc2ps_release(link);
     return -ENODEV;
 
-  cis_error:
-    printk(KNOT_XIRC "unable to parse CIS\n");
   failure:
     return -ENODEV;
 } /* xirc2ps_config */
@@ -1344,9 +1326,11 @@
 /*====================================================================*/
 
 static void
-xirc2ps_tx_timeout_task(void *data)
+xirc2ps_tx_timeout_task(struct work_struct *work)
 {
-    struct net_device *dev = data;
+	local_info_t *local =
+		container_of(work, local_info_t, tx_timeout_task);
+	struct net_device *dev = local->dev;
     /* reset the card */
     do_reset(dev,1);
     dev->trans_start = jiffies;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index b79ec0d..f994f12 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -56,13 +56,19 @@
 	---help---
 	  Currently supports the LAN83C185 PHY
 
+config BROADCOM_PHY
+	tristate "Drivers for Broadcom PHYs"
+	depends on PHYLIB
+	---help---
+	  Currently supports the BCM5411, BCM5421 and BCM5461 PHYs.
+
 config FIXED_PHY
 	tristate "Drivers for PHY emulation on fixed speed/link"
 	depends on PHYLIB
 	---help---
 	  Adds the driver to PHY layer to cover the boards that do not have any PHY bound,
-	  but with the ability to manipulate with speed/link in software. The relavant MII
-	  speed/duplex parameters could be effectively handled in user-specified  fuction.
+	  but with the ability to manipulate the speed/link in software. The relevant MII
+	  speed/duplex parameters could be effectively handled in a user-specified function.
 	  Currently tested with mpc866ads.
 
 config FIXED_MII_10_FDX
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 320f832..bcd1efb 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -10,4 +10,5 @@
 obj-$(CONFIG_QSEMI_PHY)		+= qsemi.o
 obj-$(CONFIG_SMSC_PHY)		+= smsc.o
 obj-$(CONFIG_VITESSE_PHY)	+= vitesse.o
+obj-$(CONFIG_BROADCOM_PHY)	+= broadcom.o
 obj-$(CONFIG_FIXED_PHY)		+= fixed.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
new file mode 100644
index 0000000..29666c8
--- /dev/null
+++ b/drivers/net/phy/broadcom.c
@@ -0,0 +1,175 @@
+/*
+ *	drivers/net/phy/broadcom.c
+ *
+ *	Broadcom BCM5411, BCM5421 and BCM5461 Gigabit Ethernet
+ *	transceivers.
+ *
+ *	Copyright (c) 2006  Maciej W. Rozycki
+ *
+ *	Inspired by code written by Amy Fong.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define MII_BCM54XX_ECR		0x10	/* BCM54xx extended control register */
+#define MII_BCM54XX_ECR_IM	0x1000	/* Interrupt mask */
+#define MII_BCM54XX_ECR_IF	0x0800	/* Interrupt force */
+
+#define MII_BCM54XX_ESR		0x11	/* BCM54xx extended status register */
+#define MII_BCM54XX_ESR_IS	0x1000	/* Interrupt status */
+
+#define MII_BCM54XX_ISR		0x1a	/* BCM54xx interrupt status register */
+#define MII_BCM54XX_IMR		0x1b	/* BCM54xx interrupt mask register */
+#define MII_BCM54XX_INT_CRCERR	0x0001	/* CRC error */
+#define MII_BCM54XX_INT_LINK	0x0002	/* Link status changed */
+#define MII_BCM54XX_INT_SPEED	0x0004	/* Link speed change */
+#define MII_BCM54XX_INT_DUPLEX	0x0008	/* Duplex mode changed */
+#define MII_BCM54XX_INT_LRS	0x0010	/* Local receiver status changed */
+#define MII_BCM54XX_INT_RRS	0x0020	/* Remote receiver status changed */
+#define MII_BCM54XX_INT_SSERR	0x0040	/* Scrambler synchronization error */
+#define MII_BCM54XX_INT_UHCD	0x0080	/* Unsupported HCD negotiated */
+#define MII_BCM54XX_INT_NHCD	0x0100	/* No HCD */
+#define MII_BCM54XX_INT_NHCDL	0x0200	/* No HCD link */
+#define MII_BCM54XX_INT_ANPR	0x0400	/* Auto-negotiation page received */
+#define MII_BCM54XX_INT_LC	0x0800	/* All counters below 128 */
+#define MII_BCM54XX_INT_HC	0x1000	/* Counter above 32768 */
+#define MII_BCM54XX_INT_MDIX	0x2000	/* MDIX status change */
+#define MII_BCM54XX_INT_PSERR	0x4000	/* Pair swap error */
+
+MODULE_DESCRIPTION("Broadcom PHY driver");
+MODULE_AUTHOR("Maciej W. Rozycki");
+MODULE_LICENSE("GPL");
+
+static int bcm54xx_config_init(struct phy_device *phydev)
+{
+	int reg, err;
+
+	reg = phy_read(phydev, MII_BCM54XX_ECR);
+	if (reg < 0)
+		return reg;
+
+	/* Mask interrupts globally.  */
+	reg |= MII_BCM54XX_ECR_IM;
+	err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+	if (err < 0)
+		return err;
+
+	/* Unmask events we are interested in.  */
+	reg = ~(MII_BCM54XX_INT_DUPLEX |
+		MII_BCM54XX_INT_SPEED |
+		MII_BCM54XX_INT_LINK);
+	err = phy_write(phydev, MII_BCM54XX_IMR, reg);
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int bcm54xx_ack_interrupt(struct phy_device *phydev)
+{
+	int reg;
+
+	/* Clear pending interrupts.  */
+	reg = phy_read(phydev, MII_BCM54XX_ISR);
+	if (reg < 0)
+		return reg;
+
+	return 0;
+}
+
+static int bcm54xx_config_intr(struct phy_device *phydev)
+{
+	int reg, err;
+
+	reg = phy_read(phydev, MII_BCM54XX_ECR);
+	if (reg < 0)
+		return reg;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		reg &= ~MII_BCM54XX_ECR_IM;
+	else
+		reg |= MII_BCM54XX_ECR_IM;
+
+	err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+	return err;
+}
+
+static struct phy_driver bcm5411_driver = {
+	.phy_id		= 0x00206070,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM5411",
+	.features	= PHY_GBIT_FEATURES,
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.config_init	= bcm54xx_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= bcm54xx_ack_interrupt,
+	.config_intr	= bcm54xx_config_intr,
+	.driver 	= { .owner = THIS_MODULE },
+};
+
+static struct phy_driver bcm5421_driver = {
+	.phy_id		= 0x002060e0,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM5421",
+	.features	= PHY_GBIT_FEATURES,
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.config_init	= bcm54xx_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= bcm54xx_ack_interrupt,
+	.config_intr	= bcm54xx_config_intr,
+	.driver 	= { .owner = THIS_MODULE },
+};
+
+static struct phy_driver bcm5461_driver = {
+	.phy_id		= 0x002060c0,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM5461",
+	.features	= PHY_GBIT_FEATURES,
+	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+	.config_init	= bcm54xx_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= bcm54xx_ack_interrupt,
+	.config_intr	= bcm54xx_config_intr,
+	.driver 	= { .owner = THIS_MODULE },
+};
+
+static int __init broadcom_init(void)
+{
+	int ret;
+
+	ret = phy_driver_register(&bcm5411_driver);
+	if (ret)
+		goto out_5411;
+	ret = phy_driver_register(&bcm5421_driver);
+	if (ret)
+		goto out_5421;
+	ret = phy_driver_register(&bcm5461_driver);
+	if (ret)
+		goto out_5461;
+	return ret;
+
+out_5461:
+	phy_driver_unregister(&bcm5421_driver);
+out_5421:
+	phy_driver_unregister(&bcm5411_driver);
+out_5411:
+	return ret;
+}
+
+static void __exit broadcom_exit(void)
+{
+	phy_driver_unregister(&bcm5461_driver);
+	phy_driver_unregister(&bcm5421_driver);
+	phy_driver_unregister(&bcm5411_driver);
+}
+
+module_init(broadcom_init);
+module_exit(broadcom_exit);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index f14e992..096d4a1 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -254,7 +254,7 @@
 		goto device_create_fail;
 	}
 
-	phydev->irq = -1;
+	phydev->irq = PHY_IGNORE_INTERRUPT;
 	phydev->dev.bus = &mdio_bus_type;
 
 	if(number)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3af9fcf..e175f39 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -7,6 +7,7 @@
  * Author: Andy Fleming
  *
  * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006  Maciej W. Rozycki
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -32,6 +33,8 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -394,7 +397,7 @@
 EXPORT_SYMBOL(phy_start_aneg);
 
 
-static void phy_change(void *data);
+static void phy_change(struct work_struct *work);
 static void phy_timer(unsigned long data);
 
 /* phy_start_machine:
@@ -484,6 +487,9 @@
 {
 	struct phy_device *phydev = phy_dat;
 
+	if (PHY_HALTED == phydev->state)
+		return IRQ_NONE;		/* It can't be ours.  */
+
 	/* The MDIO bus is not allowed to be written in interrupt
 	 * context, so we need to disable the irq here.  A work
 	 * queue will write the PHY to disable and clear the
@@ -549,7 +555,7 @@
 {
 	int err = 0;
 
-	INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+	INIT_WORK(&phydev->phy_queue, phy_change);
 
 	if (request_irq(phydev->irq, phy_interrupt,
 				IRQF_SHARED,
@@ -577,6 +583,12 @@
 	if (err)
 		phy_error(phydev);
 
+	/*
+	 * Finish any pending work; we might have been scheduled
+	 * to be called from keventd ourselves, though.
+	 */
+	run_scheduled_work(&phydev->phy_queue);
+
 	free_irq(phydev->irq, phydev);
 
 	return err;
@@ -585,10 +597,11 @@
 
 
 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void phy_change(void *data)
+static void phy_change(struct work_struct *work)
 {
 	int err;
-	struct phy_device *phydev = data;
+	struct phy_device *phydev =
+		container_of(work, struct phy_device, phy_queue);
 
 	err = phy_disable_interrupts(phydev);
 
@@ -603,7 +616,8 @@
 	enable_irq(phydev->irq);
 
 	/* Reenable interrupts */
-	err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+	if (PHY_HALTED != phydev->state)
+		err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
 
 	if (err)
 		goto irq_enable_err;
@@ -624,18 +638,24 @@
 	if (PHY_HALTED == phydev->state)
 		goto out_unlock;
 
-	if (phydev->irq != PHY_POLL) {
-		/* Clear any pending interrupts */
-		phy_clear_interrupt(phydev);
+	phydev->state = PHY_HALTED;
 
+	if (phydev->irq != PHY_POLL) {
 		/* Disable PHY Interrupts */
 		phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
-	}
 
-	phydev->state = PHY_HALTED;
+		/* Clear any pending interrupts */
+		phy_clear_interrupt(phydev);
+	}
 
 out_unlock:
 	spin_unlock(&phydev->lock);
+
+	/*
+	 * Cannot call flush_scheduled_work() here as desired because
+	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+	 * will not reenable interrupts.
+	 */
 }
 
 
@@ -693,60 +713,57 @@
 
 			break;
 		case PHY_AN:
+			err = phy_read_status(phydev);
+
+			if (err < 0)
+				break;
+
+			/* If the link is down, give up on
+			 * negotiation for now */
+			if (!phydev->link) {
+				phydev->state = PHY_NOLINK;
+				netif_carrier_off(phydev->attached_dev);
+				phydev->adjust_link(phydev->attached_dev);
+				break;
+			}
+
 			/* Check if negotiation is done.  Break
 			 * if there's an error */
 			err = phy_aneg_done(phydev);
 			if (err < 0)
 				break;
 
-			/* If auto-negotiation is done, we change to
-			 * either RUNNING, or NOLINK */
+			/* If AN is done, we're running */
 			if (err > 0) {
-				err = phy_read_status(phydev);
-
-				if (err)
-					break;
-
-				if (phydev->link) {
-					phydev->state = PHY_RUNNING;
-					netif_carrier_on(phydev->attached_dev);
-				} else {
-					phydev->state = PHY_NOLINK;
-					netif_carrier_off(phydev->attached_dev);
-				}
-
+				phydev->state = PHY_RUNNING;
+				netif_carrier_on(phydev->attached_dev);
 				phydev->adjust_link(phydev->attached_dev);
 
 			} else if (0 == phydev->link_timeout--) {
-				/* The counter expired, so either we
-				 * switch to forced mode, or the
-				 * magic_aneg bit exists, and we try aneg
-				 * again */
-				if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
-					int idx;
-
-					/* We'll start from the
-					 * fastest speed, and work
-					 * our way down */
-					idx = phy_find_valid(0,
-							phydev->supported);
-
-					phydev->speed = settings[idx].speed;
-					phydev->duplex = settings[idx].duplex;
-					
-					phydev->autoneg = AUTONEG_DISABLE;
-					phydev->state = PHY_FORCING;
-					phydev->link_timeout =
-						PHY_FORCE_TIMEOUT;
-
-					pr_info("Trying %d/%s\n",
-							phydev->speed,
-							DUPLEX_FULL ==
-							phydev->duplex ?
-							"FULL" : "HALF");
-				}
+				int idx;
 
 				needs_aneg = 1;
+				/* If we have the magic_aneg bit,
+				 * we try again */
+				if (phydev->drv->flags & PHY_HAS_MAGICANEG)
+					break;
+
+				/* The timer expired, and we still
+				 * don't have a setting, so we try
+				 * forcing it until we find one that
+				 * works, starting from the fastest speed,
+				 * and working our way down */
+				idx = phy_find_valid(0, phydev->supported);
+
+				phydev->speed = settings[idx].speed;
+				phydev->duplex = settings[idx].duplex;
+
+				phydev->autoneg = AUTONEG_DISABLE;
+
+				pr_info("Trying %d/%s\n", phydev->speed,
+						DUPLEX_FULL ==
+						phydev->duplex ?
+						"FULL" : "HALF");
 			}
 			break;
 		case PHY_NOLINK:
@@ -762,7 +779,7 @@
 			}
 			break;
 		case PHY_FORCING:
-			err = phy_read_status(phydev);
+			err = genphy_update_link(phydev);
 
 			if (err)
 				break;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3bbd5e7..b01fc70 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -59,6 +59,7 @@
 	dev->duplex = -1;
 	dev->pause = dev->asym_pause = 0;
 	dev->link = 1;
+	dev->interface = PHY_INTERFACE_MODE_GMII;
 
 	dev->autoneg = AUTONEG_ENABLE;
 
@@ -137,11 +138,12 @@
  *   the desired functionality.
  */
 struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
-		void (*handler)(struct net_device *), u32 flags)
+		void (*handler)(struct net_device *), u32 flags,
+		u32 interface)
 {
 	struct phy_device *phydev;
 
-	phydev = phy_attach(dev, phy_id, flags);
+	phydev = phy_attach(dev, phy_id, flags, interface);
 
 	if (IS_ERR(phydev))
 		return phydev;
@@ -186,7 +188,7 @@
 }
 
 struct phy_device *phy_attach(struct net_device *dev,
-		const char *phy_id, u32 flags)
+		const char *phy_id, u32 flags, u32 interface)
 {
 	struct bus_type *bus = &mdio_bus_type;
 	struct phy_device *phydev;
@@ -231,6 +233,20 @@
 
 	phydev->dev_flags = flags;
 
+	phydev->interface = interface;
+
+	/* Do initial configuration here, now that
+	 * we have certain key parameters
+	 * (dev_flags and interface) */
+	if (phydev->drv->config_init) {
+		int err;
+
+		err = phydev->drv->config_init(phydev);
+
+		if (err < 0)
+			return ERR_PTR(err);
+	}
+
 	return phydev;
 }
 EXPORT_SYMBOL(phy_attach);
@@ -427,6 +443,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(genphy_update_link);
 
 /* genphy_read_status
  *
@@ -611,13 +628,8 @@
 
 	spin_unlock(&phydev->lock);
 
-	if (err < 0)
-		return err;
-
-	if (phydev->drv->config_init)
-		err = phydev->drv->config_init(phydev);
-
 	return err;
+
 }
 
 static int phy_remove(struct device *dev)
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 71afb27..6bb085f 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -138,9 +138,9 @@
 #define PLIP_NIBBLE_WAIT        3000
 
 /* Bottom halves */
-static void plip_kick_bh(struct net_device *dev);
-static void plip_bh(struct net_device *dev);
-static void plip_timer_bh(struct net_device *dev);
+static void plip_kick_bh(struct work_struct *work);
+static void plip_bh(struct work_struct *work);
+static void plip_timer_bh(struct work_struct *work);
 
 /* Interrupt handler */
 static void plip_interrupt(int irq, void *dev_id);
@@ -207,9 +207,10 @@
 
 struct net_local {
 	struct net_device_stats enet_stats;
+	struct net_device *dev;
 	struct work_struct immediate;
-	struct work_struct deferred;
-	struct work_struct timer;
+	struct delayed_work deferred;
+	struct delayed_work timer;
 	struct plip_local snd_data;
 	struct plip_local rcv_data;
 	struct pardevice *pardev;
@@ -306,11 +307,11 @@
 	nl->nibble	= PLIP_NIBBLE_WAIT;
 
 	/* Initialize task queue structures */
-	INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
-	INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
+	INIT_WORK(&nl->immediate, plip_bh);
+	INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
 
 	if (dev->irq == -1)
-		INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
+		INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
 
 	spin_lock_init(&nl->lock);
 }
@@ -319,9 +320,10 @@
    This routine is kicked by do_timer().
    Request `plip_bh' to be invoked. */
 static void
-plip_kick_bh(struct net_device *dev)
+plip_kick_bh(struct work_struct *work)
 {
-	struct net_local *nl = netdev_priv(dev);
+	struct net_local *nl =
+		container_of(work, struct net_local, deferred.work);
 
 	if (nl->is_deferred)
 		schedule_work(&nl->immediate);
@@ -362,9 +364,9 @@
 
 /* Bottom half handler of PLIP. */
 static void
-plip_bh(struct net_device *dev)
+plip_bh(struct work_struct *work)
 {
-	struct net_local *nl = netdev_priv(dev);
+	struct net_local *nl = container_of(work, struct net_local, immediate);
 	struct plip_local *snd = &nl->snd_data;
 	struct plip_local *rcv = &nl->rcv_data;
 	plip_func f;
@@ -372,20 +374,21 @@
 
 	nl->is_deferred = 0;
 	f = connection_state_table[nl->connection];
-	if ((r = (*f)(dev, nl, snd, rcv)) != OK
-	    && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
+	if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
+	    && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
 		nl->is_deferred = 1;
 		schedule_delayed_work(&nl->deferred, 1);
 	}
 }
 
 static void
-plip_timer_bh(struct net_device *dev)
+plip_timer_bh(struct work_struct *work)
 {
-	struct net_local *nl = netdev_priv(dev);
+	struct net_local *nl =
+		container_of(work, struct net_local, timer.work);
 
 	if (!(atomic_read (&nl->kill_timer))) {
-		plip_interrupt (-1, dev);
+		plip_interrupt (-1, nl->dev);
 
 		schedule_delayed_work(&nl->timer, 1);
 	}
@@ -1284,6 +1287,7 @@
 		}
 
 		nl = netdev_priv(dev);
+		nl->dev = dev;
 		nl->pardev = parport_register_device(port, name, plip_preempt,
 						 plip_wakeup, plip_interrupt,
 						 0, dev);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index f5802e7..c6de566 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -860,7 +860,7 @@
 			err = PTR_ERR(ppp_class);
 			goto out_chrdev;
 		}
-		class_device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+		device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), "ppp");
 	}
 
 out:
@@ -2675,7 +2675,7 @@
 	cardmap_destroy(&all_ppp_units);
 	if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
 		printk(KERN_ERR "PPP: failed to unregister PPP device\n");
-	class_device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
+	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
 	class_destroy(ppp_class);
 }
 
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 0adee73..315d5c3 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -393,7 +393,7 @@
 
 	po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source);
 	if (po != NULL)
-		return sk_receive_skb(sk_pppox(po), skb);
+		return sk_receive_skb(sk_pppox(po), skb, 0);
 drop:
 	kfree_skb(skb);
 out:
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index ec640f6..d79d141 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2008,7 +2008,7 @@
 			       "%s: Another function issued a reset to the "
 			       "chip. ISR value = %x.\n", ndev->name, value);
 		}
-		queue_work(qdev->workqueue, &qdev->reset_work);
+		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
 		spin_unlock(&qdev->adapter_lock);
 	} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
 		ql_disable_interrupts(qdev);
@@ -3182,11 +3182,13 @@
 	/*
 	 * Wake up the worker to process this event.
 	 */
-	queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+	queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
 }
 
-static void ql_reset_work(struct ql3_adapter *qdev)
+static void ql_reset_work(struct work_struct *work)
 {
+	struct ql3_adapter *qdev =
+		container_of(work, struct ql3_adapter, reset_work.work);
 	struct net_device *ndev = qdev->ndev;
 	u32 value;
 	struct ql_tx_buf_cb *tx_cb;
@@ -3278,9 +3280,12 @@
 	}
 }
 
-static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+static void ql_tx_timeout_work(struct work_struct *work)
 {
-	ql_cycle_adapter(qdev,QL_DO_RESET);
+	struct ql3_adapter *qdev =
+		container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+	ql_cycle_adapter(qdev, QL_DO_RESET);
 }
 
 static void ql_get_board_info(struct ql3_adapter *qdev)
@@ -3459,9 +3464,8 @@
 	netif_stop_queue(ndev);
 
 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
-	INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
-	INIT_WORK(&qdev->tx_timeout_work,
-		  (void (*)(void *))ql_tx_timeout_work, qdev);
+	INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+	INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
 
 	init_timer(&qdev->adapter_timer);
 	qdev->adapter_timer.function = ql3xxx_timer;
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 65da2c0..ea94de7 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1186,8 +1186,8 @@
 	u32 numPorts;
 	struct net_device_stats stats;
 	struct workqueue_struct *workqueue;
-	struct work_struct reset_work;
-	struct work_struct tx_timeout_work;
+	struct delayed_work reset_work;
+	struct delayed_work tx_timeout_work;
 	u32 max_frame_size;
 };
 
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b977ed8..f83b41d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -225,6 +225,7 @@
 
 static int rx_copybreak = 200;
 static int use_dac;
+static int ignore_parity_err;
 static struct {
 	u32 msg_enable;
 } debug = { -1 };
@@ -424,6 +425,7 @@
 struct rtl8169_private {
 	void __iomem *mmio_addr;	/* memory map physical address */
 	struct pci_dev *pci_dev;	/* Index of PCI device */
+	struct net_device *dev;
 	struct net_device_stats stats;	/* statistics of net device */
 	spinlock_t lock;		/* spin lock flag */
 	u32 msg_enable;
@@ -455,7 +457,7 @@
 	void (*phy_reset_enable)(void __iomem *);
 	unsigned int (*phy_reset_pending)(void __iomem *);
 	unsigned int (*link_ok)(void __iomem *);
-	struct work_struct task;
+	struct delayed_work task;
 	unsigned wol_enabled : 1;
 };
 
@@ -469,6 +471,8 @@
 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 module_param_named(debug, debug.msg_enable, int, 0);
 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+module_param_named(ignore_parity_err, ignore_parity_err, bool, 0);
+MODULE_PARM_DESC(ignore_parity_err, "Ignore PCI parity error as target. Default: false");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(RTL8169_VERSION);
 
@@ -571,8 +575,8 @@
 {
 	unsigned int val;
 
-	val = (mdio_read(ioaddr, MII_BMCR) | BMCR_RESET) & 0xffff;
-	mdio_write(ioaddr, MII_BMCR, val);
+	mdio_write(ioaddr, MII_BMCR, BMCR_RESET);
+	val = mdio_read(ioaddr, MII_BMCR);
 }
 
 static void rtl8169_check_link_status(struct net_device *dev,
@@ -1283,11 +1287,6 @@
 	/* Shazam ! */
 
 	if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
-		mdio_write(ioaddr, 31, 0x0001);
-		mdio_write(ioaddr,  9, 0x273a);
-		mdio_write(ioaddr, 14, 0x7bfb);
-		mdio_write(ioaddr, 27, 0x841e);
-
 		mdio_write(ioaddr, 31, 0x0002);
 		mdio_write(ioaddr,  1, 0x90d0);
 		mdio_write(ioaddr, 31, 0x0000);
@@ -1406,6 +1405,22 @@
 	free_netdev(dev);
 }
 
+static void rtl8169_phy_reset(struct net_device *dev,
+			      struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	int i;
+
+	tp->phy_reset_enable(ioaddr);
+	for (i = 0; i < 100; i++) {
+		if (!tp->phy_reset_pending(ioaddr))
+			return;
+		msleep(1);
+	}
+	if (netif_msg_link(tp))
+		printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+}
+
 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
@@ -1434,6 +1449,8 @@
 
 	rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
 
+	rtl8169_phy_reset(dev, tp);
+
 	rtl8169_set_speed(dev, autoneg, speed, duplex);
 
 	if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
@@ -1492,6 +1509,7 @@
 	SET_MODULE_OWNER(dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	tp = netdev_priv(dev);
+	tp->dev = dev;
 	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 
 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1764,7 +1782,7 @@
 	if (retval < 0)
 		goto err_free_rx;
 
-	INIT_WORK(&tp->task, NULL, dev);
+	INIT_DELAYED_WORK(&tp->task, NULL);
 
 	rtl8169_hw_start(dev);
 
@@ -1797,12 +1815,25 @@
 	RTL_R8(ChipCmd);
 }
 
-static void
-rtl8169_hw_start(struct net_device *dev)
+static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	u32 cfg = rtl8169_rx_config;
+
+	cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+	RTL_W32(RxConfig, cfg);
+
+	/* Set DMA burst size and Interframe Gap Time */
+	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+		(InterFrameGap << TxInterFrameGapShift));
+}
+
+static void rtl8169_hw_start(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
 	struct pci_dev *pdev = tp->pci_dev;
+	u16 cmd;
 	u32 i;
 
 	/* Soft reset the chip. */
@@ -1815,6 +1846,11 @@
 		msleep_interruptible(1);
 	}
 
+	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
+		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
+		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+	}
+
 	if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
 		pci_write_config_word(pdev, 0x68, 0x00);
 		pci_write_config_word(pdev, 0x69, 0x08);
@@ -1822,8 +1858,6 @@
 
 	/* Undocumented stuff. */
 	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
-		u16 cmd;
-
 		/* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
 		if ((RTL_R8(Config2) & 0x07) & 0x01)
 			RTL_W32(0x7c, 0x0007ffff);
@@ -1835,23 +1869,29 @@
 		pci_write_config_word(pdev, PCI_COMMAND, cmd);
 	}
 
-
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
+	if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_04))
+		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
 	RTL_W8(EarlyTxThres, EarlyTxThld);
 
 	/* Low hurts. Let's disable the filtering. */
 	RTL_W16(RxMaxSize, 16383);
 
-	/* Set Rx Config register */
-	i = rtl8169_rx_config |
-		(RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
-	RTL_W32(RxConfig, i);
+	if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
+	    (tp->mac_version == RTL_GIGA_MAC_VER_04))
+		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+		rtl8169_set_rx_tx_config_registers(tp);
 
-	/* Set DMA burst size and Interframe Gap Time */
-	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
-		(InterFrameGap << TxInterFrameGapShift));
+	cmd = RTL_R16(CPlusCmd);
+	RTL_W16(CPlusCmd, cmd);
 
-	tp->cp_cmd |= RTL_R16(CPlusCmd) | PCIMulRW;
+	tp->cp_cmd |= cmd | PCIMulRW;
 
 	if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
 	    (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
@@ -1877,7 +1917,15 @@
 	RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
 	RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
 	RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
-	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+	if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
+	    (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
+	    (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
+	    (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
+		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+		rtl8169_set_rx_tx_config_registers(tp);
+	}
+
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 
 	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
@@ -1972,7 +2020,7 @@
 	if (!skb)
 		goto err_out;
 
-	skb_reserve(skb, align);
+	skb_reserve(skb, (align - 1) & (u32)skb->data);
 	*sk_buff = skb;
 
 	mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
@@ -2087,11 +2135,11 @@
 	tp->cur_tx = tp->dirty_tx = 0;
 }
 
-static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 
-	PREPARE_WORK(&tp->task, task, dev);
+	PREPARE_DELAYED_WORK(&tp->task, task);
 	schedule_delayed_work(&tp->task, 4);
 }
 
@@ -2110,9 +2158,11 @@
 	netif_poll_enable(dev);
 }
 
-static void rtl8169_reinit_task(void *_data)
+static void rtl8169_reinit_task(struct work_struct *work)
 {
-	struct net_device *dev = _data;
+	struct rtl8169_private *tp =
+		container_of(work, struct rtl8169_private, task.work);
+	struct net_device *dev = tp->dev;
 	int ret;
 
 	if (netif_running(dev)) {
@@ -2135,10 +2185,11 @@
 	}
 }
 
-static void rtl8169_reset_task(void *_data)
+static void rtl8169_reset_task(struct work_struct *work)
 {
-	struct net_device *dev = _data;
-	struct rtl8169_private *tp = netdev_priv(dev);
+	struct rtl8169_private *tp =
+		container_of(work, struct rtl8169_private, task.work);
+	struct net_device *dev = tp->dev;
 
 	if (!netif_running(dev))
 		return;
@@ -2332,12 +2383,17 @@
 	/*
 	 * The recovery sequence below admits a very elaborated explanation:
 	 * - it seems to work;
-	 * - I did not see what else could be done.
+	 * - I did not see what else could be done;
+	 * - it makes iop3xx happy.
 	 *
 	 * Feel free to adjust to your needs.
 	 */
-	pci_write_config_word(pdev, PCI_COMMAND,
-			      pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+	if (ignore_parity_err)
+		pci_cmd &= ~PCI_COMMAND_PARITY;
+	else
+		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
+
+	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
 
 	pci_write_config_word(pdev, PCI_STATUS,
 		pci_status & (PCI_STATUS_DETECTED_PARITY |
@@ -2351,10 +2407,11 @@
 		tp->cp_cmd &= ~PCIDAC;
 		RTL_W16(CPlusCmd, tp->cp_cmd);
 		dev->features &= ~NETIF_F_HIGHDMA;
-		rtl8169_schedule_work(dev, rtl8169_reinit_task);
 	}
 
 	rtl8169_hw_reset(ioaddr);
+
+	rtl8169_schedule_work(dev, rtl8169_reinit_task);
 }
 
 static void
@@ -2434,7 +2491,7 @@
 
 		skb = dev_alloc_skb(pkt_size + align);
 		if (skb) {
-			skb_reserve(skb, align);
+			skb_reserve(skb, (align - 1) & (u32)skb->data);
 			eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
 			*sk_buff = skb;
 			rtl8169_mark_to_asic(desc, rx_buf_sz);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 33569ec..250cdbe 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5872,9 +5872,9 @@
  * Description: Sets the link status for the adapter
  */
 
-static void s2io_set_link(unsigned long data)
+static void s2io_set_link(struct work_struct *work)
 {
-	nic_t *nic = (nic_t *) data;
+	nic_t *nic = container_of(work, nic_t, set_link_task);
 	struct net_device *dev = nic->dev;
 	XENA_dev_config_t __iomem *bar0 = nic->bar0;
 	register u64 val64;
@@ -6379,10 +6379,10 @@
  * spin lock.
  */
 
-static void s2io_restart_nic(unsigned long data)
+static void s2io_restart_nic(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *) data;
-	nic_t *sp = dev->priv;
+	nic_t *sp = container_of(work, nic_t, rst_timer_task);
+	struct net_device *dev = sp->dev;
 
 	s2io_card_down(sp);
 	if (s2io_card_up(sp)) {
@@ -6992,10 +6992,8 @@
 
 	dev->tx_timeout = &s2io_tx_watchdog;
 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
-	INIT_WORK(&sp->rst_timer_task,
-		  (void (*)(void *)) s2io_restart_nic, dev);
-	INIT_WORK(&sp->set_link_task,
-		  (void (*)(void *)) s2io_set_link, sp);
+	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
+	INIT_WORK(&sp->set_link_task, s2io_set_link);
 
 	pci_save_state(sp->pdev);
 
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 12b719f..3b0bafd 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1000,7 +1000,7 @@
 static irqreturn_t s2io_isr(int irq, void *dev_id);
 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
 static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(unsigned long data);
+static void s2io_set_link(struct work_struct *work);
 static int s2io_set_swapper(nic_t * sp);
 static void s2io_card_down(nic_t *nic);
 static int s2io_card_up(nic_t *nic);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index d9d0a3a..0d6c95c 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -750,7 +750,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(dev_seeq);
 	release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index aaba458..b70ed79 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -280,6 +280,7 @@
 struct sis190_private {
 	void __iomem *mmio_addr;
 	struct pci_dev *pci_dev;
+	struct net_device *dev;
 	struct net_device_stats stats;
 	spinlock_t lock;
 	u32 rx_buf_sz;
@@ -897,10 +898,11 @@
 	netif_start_queue(dev);
 }
 
-static void sis190_phy_task(void * data)
+static void sis190_phy_task(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct sis190_private *tp = netdev_priv(dev);
+	struct sis190_private *tp =
+		container_of(work, struct sis190_private, phy_task);
+	struct net_device *dev = tp->dev;
 	void __iomem *ioaddr = tp->mmio_addr;
 	int phy_id = tp->mii_if.phy_id;
 	u16 val;
@@ -1047,7 +1049,7 @@
 	if (rc < 0)
 		goto err_free_rx_1;
 
-	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+	INIT_WORK(&tp->phy_task, sis190_phy_task);
 
 	sis190_request_timer(dev);
 
@@ -1436,6 +1438,7 @@
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	tp = netdev_priv(dev);
+	tp->dev = dev;
 	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
 
 	rc = pci_enable_device(pdev);
@@ -1798,7 +1801,7 @@
 
 	sis190_init_rxfilter(dev);
 
-	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+	INIT_WORK(&tp->phy_task, sis190_phy_task);
 
 	dev->open = sis190_open;
 	dev->stop = sis190_close;
diff --git a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h
index 778d9e6..3fa6717 100644
--- a/drivers/net/sk98lin/h/skdrv2nd.h
+++ b/drivers/net/sk98lin/h/skdrv2nd.h
@@ -160,7 +160,7 @@
 
 /*
 ** Interim definition of SK_DRV_TIMER placed in this file until 
-** common modules have boon finallized
+** common modules have been finalized
 */
 #define SK_DRV_TIMER			11 
 #define	SK_DRV_MODERATION_TIMER		1
diff --git a/drivers/net/sk98lin/skdim.c b/drivers/net/sk98lin/skdim.c
index 07c1b4c..37ce03f 100644
--- a/drivers/net/sk98lin/skdim.c
+++ b/drivers/net/sk98lin/skdim.c
@@ -252,7 +252,7 @@
 
 /*******************************************************************************
 ** Function     : SkDimDisplayModerationSettings
-** Description  : Displays the current settings regaring interrupt moderation
+** Description  : Displays the current settings regarding interrupt moderation
 ** Programmer   : Ralph Roesler
 ** Last Modified: 22-mar-03
 ** Returns      : void (!)
@@ -510,7 +510,7 @@
 
 /*******************************************************************************
 ** Function     : DisableIntMod()
-** Description  : Disbles the interrupt moderation independent of what inter-
+** Description  : Disables the interrupt moderation independent of what inter-
 **                rupts are running or not
 ** Programmer   : Ralph Roesler
 ** Last Modified: 23-mar-03
diff --git a/drivers/net/sk98lin/skethtool.c b/drivers/net/sk98lin/skethtool.c
index e5cb5b5..3646069 100644
--- a/drivers/net/sk98lin/skethtool.c
+++ b/drivers/net/sk98lin/skethtool.c
@@ -581,6 +581,30 @@
 	return 0;
 }
 
+static int getRegsLen(struct net_device *dev)
+{
+	return 0x4000;
+}
+
+/*
+ * Returns copy of whole control register region
+ * Note: skip RAM address register because accessing it will
+ * 	 cause bus hangs!
+ */
+static void getRegs(struct net_device *dev, struct ethtool_regs *regs,
+			  void *p)
+{
+	DEV_NET *pNet = netdev_priv(dev);
+	const void __iomem *io = pNet->pAC->IoBase;
+
+	regs->version = 1;
+	memset(p, 0, regs->len);
+	memcpy_fromio(p, io, B3_RAM_ADDR);
+
+	memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+		      regs->len - B3_RI_WTO_R1);
+}
+
 const struct ethtool_ops SkGeEthtoolOps = {
 	.get_settings		= getSettings,
 	.set_settings		= setSettings,
@@ -599,4 +623,6 @@
 	.set_tx_csum		= setTxCsum,
 	.get_rx_csum		= getRxCsum,
 	.set_rx_csum		= setRxCsum,
+	.get_regs		= getRegs,
+	.get_regs_len		= getRegsLen,
 };
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index d4913c3..92d11b9 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -113,6 +113,8 @@
 #include	<linux/init.h>
 #include	<linux/dma-mapping.h>
 #include	<linux/ip.h>
+#include	<linux/mii.h>
+#include	<linux/mm.h>
 
 #include	"h/skdrv1st.h"
 #include	"h/skdrv2nd.h"
@@ -1561,7 +1563,7 @@
 
 	if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
 		u16 hdrlen = pMessage->h.raw - pMessage->data;
-		u16 offset = hdrlen + pMessage->csum;
+		u16 offset = hdrlen + pMessage->csum_offset;
 
 		if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) &&
 			(pAC->GIni.GIChipRev == 0) &&
@@ -1680,7 +1682,7 @@
 	*/
 	if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
 		u16 hdrlen = pMessage->h.raw - pMessage->data;
-		u16 offset = hdrlen + pMessage->csum;
+		u16 offset = hdrlen + pMessage->csum_offset;
 
 		Control = BMU_STFWD;
 
@@ -2843,6 +2845,56 @@
 	return(&pAC->stats);
 } /* SkGeStats */
 
+/*
+ * Basic MII register access
+ */
+static int SkGeMiiIoctl(struct net_device *dev,
+			struct mii_ioctl_data *data, int cmd)
+{
+	DEV_NET *pNet = netdev_priv(dev);
+	SK_AC *pAC = pNet->pAC;
+	SK_IOC IoC = pAC->IoBase;
+	int Port = pNet->PortNr;
+	SK_GEPORT *pPrt = &pAC->GIni.GP[Port];
+	unsigned long Flags;
+	int err = 0;
+	int reg = data->reg_num & 0x1f;
+	SK_U16 val = data->val_in;
+
+	if (!netif_running(dev))
+		return -ENODEV;	/* Phy still in reset */
+
+	spin_lock_irqsave(&pAC->SlowPathLock, Flags);
+	switch(cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = pPrt->PhyAddr;
+
+		/* fallthru */
+	case SIOCGMIIREG:
+		if (pAC->GIni.GIGenesis)
+			SkXmPhyRead(pAC, IoC, Port, reg, &val);
+		else
+			SkGmPhyRead(pAC, IoC, Port, reg, &val);
+
+		data->val_out = val;
+		break;
+
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			err = -EPERM;
+
+		else if (pAC->GIni.GIGenesis)
+			SkXmPhyWrite(pAC, IoC, Port, reg, val);
+		else
+			SkGmPhyWrite(pAC, IoC, Port, reg, val);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+	}
+        spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+	return err;
+}
+
 
 /*****************************************************************************
  *
@@ -2876,6 +2928,9 @@
 	pNet = netdev_priv(dev);
 	pAC = pNet->pAC;
 	
+	if (cmd == SIOCGMIIPHY || cmd == SIOCSMIIREG || cmd == SIOCGMIIREG)
+	    return SkGeMiiIoctl(dev, if_mii(rq), cmd);
+
 	if(copy_from_user(&Ioctl, rq->ifr_data, sizeof(SK_GE_IOCTL))) {
 		return -EFAULT;
 	}
diff --git a/drivers/net/sk98lin/skgesirq.c b/drivers/net/sk98lin/skgesirq.c
index ab66d80..3e7aa49 100644
--- a/drivers/net/sk98lin/skgesirq.c
+++ b/drivers/net/sk98lin/skgesirq.c
@@ -1319,7 +1319,7 @@
 	SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &Isrc);
 
 #ifdef xDEBUG
-	if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT) ==
+	if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) ==
 		(PHY_B_IS_SCR_S_ER | PHY_B_IS_RRS_CHANGE | PHY_B_IS_LRS_CHANGE)) {
 
 		SK_U32	Stat1, Stat2, Stat3;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b294903..b60f045 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1327,10 +1327,11 @@
  * Since internal PHY is wired to a level triggered pin, can't
  * get an interrupt when carrier is detected.
  */
-static void xm_link_timer(void *arg)
+static void xm_link_timer(struct work_struct *work)
 {
-	struct net_device *dev = arg;
-	struct skge_port *skge = netdev_priv(arg);
+	struct skge_port *skge =
+		container_of(work, struct skge_port, link_thread.work);
+	struct net_device *dev = skge->netdev;
  	struct skge_hw *hw = skge->hw;
 	int port = skge->port;
 
@@ -2154,8 +2155,6 @@
 	int port = skge->port;
 	u16 ctrl;
 
-	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
-
 	ctrl = gma_read16(hw, port, GM_GP_CTRL);
 	ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
 	gma_write16(hw, port, GM_GP_CTRL, ctrl);
@@ -2167,7 +2166,6 @@
 		gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
 	}
 
-	yukon_reset(hw, port);
 	skge_link_down(skge);
 
 	yukon_init(hw, port);
@@ -2255,6 +2253,7 @@
 {
 	struct skge_hw *hw = skge->hw;
 	int port = skge->port;
+	struct net_device *dev = hw->dev[port];
 
 	netif_stop_queue(skge->netdev);
 	netif_carrier_off(skge->netdev);
@@ -2268,6 +2267,8 @@
 		yukon_init(hw, port);
 	}
 	mutex_unlock(&hw->phy_mutex);
+
+	dev->set_multicast_list(dev);
 }
 
 /* Basic MII support */
@@ -2565,7 +2566,7 @@
 
 		td->csum_offs = 0;
 		td->csum_start = offset;
-		td->csum_write = offset + skb->csum;
+		td->csum_write = offset + skb->csum_offset;
 	} else
 		control = BMU_CHECK;
 
@@ -3072,9 +3073,9 @@
  * because accessing phy registers requires spin wait which might
  * cause excess interrupt latency.
  */
-static void skge_extirq(void *arg)
+static void skge_extirq(struct work_struct *work)
 {
-	struct skge_hw *hw = arg;
+	struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
 	int port;
 
 	mutex_lock(&hw->phy_mutex);
@@ -3456,7 +3457,7 @@
 	skge->port = port;
 
 	/* Only used for Genesis XMAC */
-	INIT_WORK(&skge->link_thread, xm_link_timer, dev);
+	INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
 
 	if (hw->chip_id != CHIP_ID_GENESIS) {
 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3543,7 +3544,7 @@
 
 	hw->pdev = pdev;
 	mutex_init(&hw->phy_mutex);
-	INIT_WORK(&hw->phy_work, skge_extirq, hw);
+	INIT_WORK(&hw->phy_work, skge_extirq);
 	spin_lock_init(&hw->hw_lock);
 
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 537c0aa..f6223c5 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -389,10 +389,10 @@
 /* Packet Arbiter Registers */
 /*	B3_PA_CTRL		16 bit	Packet Arbiter Ctrl Register */
 enum {
-	PA_CLR_TO_TX2	= 1<<13,	/* Clear IRQ Packet Timeout TX2 */
-	PA_CLR_TO_TX1	= 1<<12,	/* Clear IRQ Packet Timeout TX1 */
-	PA_CLR_TO_RX2	= 1<<11,	/* Clear IRQ Packet Timeout RX2 */
-	PA_CLR_TO_RX1	= 1<<10,	/* Clear IRQ Packet Timeout RX1 */
+	PA_CLR_TO_TX2	= 1<<13,/* Clear IRQ Packet Timeout TX2 */
+	PA_CLR_TO_TX1	= 1<<12,/* Clear IRQ Packet Timeout TX1 */
+	PA_CLR_TO_RX2	= 1<<11,/* Clear IRQ Packet Timeout RX2 */
+	PA_CLR_TO_RX1	= 1<<10,/* Clear IRQ Packet Timeout RX1 */
 	PA_ENA_TO_TX2	= 1<<9,	/* Enable  Timeout Timer TX2 */
 	PA_DIS_TO_TX2	= 1<<8,	/* Disable Timeout Timer TX2 */
 	PA_ENA_TO_TX1	= 1<<7,	/* Enable  Timeout Timer TX1 */
@@ -481,14 +481,14 @@
 /* RAM Buffer Register Offsets */
 enum {
 
-	RB_START	= 0x00,/* 32 bit	RAM Buffer Start Address */
+	RB_START= 0x00,/* 32 bit	RAM Buffer Start Address */
 	RB_END	= 0x04,/* 32 bit	RAM Buffer End Address */
 	RB_WP	= 0x08,/* 32 bit	RAM Buffer Write Pointer */
 	RB_RP	= 0x0c,/* 32 bit	RAM Buffer Read Pointer */
-	RB_RX_UTPP	= 0x10,/* 32 bit	Rx Upper Threshold, Pause Packet */
-	RB_RX_LTPP	= 0x14,/* 32 bit	Rx Lower Threshold, Pause Packet */
-	RB_RX_UTHP	= 0x18,/* 32 bit	Rx Upper Threshold, High Prio */
-	RB_RX_LTHP	= 0x1c,/* 32 bit	Rx Lower Threshold, High Prio */
+	RB_RX_UTPP= 0x10,/* 32 bit	Rx Upper Threshold, Pause Packet */
+	RB_RX_LTPP= 0x14,/* 32 bit	Rx Lower Threshold, Pause Packet */
+	RB_RX_UTHP= 0x18,/* 32 bit	Rx Upper Threshold, High Prio */
+	RB_RX_LTHP= 0x1c,/* 32 bit	Rx Lower Threshold, High Prio */
 	/* 0x10 - 0x1f:	reserved at Tx RAM Buffer Registers */
 	RB_PC	= 0x20,/* 32 bit	RAM Buffer Packet Counter */
 	RB_LEV	= 0x24,/* 32 bit	RAM Buffer Level Register */
@@ -532,7 +532,7 @@
 	PHY_ADDR_MARV	= 0,
 };
 
-#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
+#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs))
 
 /* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
 enum {
@@ -578,15 +578,15 @@
 	MFF_DIS_TIST	= 1<<2,	/* Disable Time Stamp Gener */
 	MFF_CLR_INTIST	= 1<<1,	/* Clear IRQ No Time Stamp */
 	MFF_CLR_INSTAT	= 1<<0,	/* Clear IRQ No Status */
-#define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT
+	MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT,
 };
 
 /*	TX_MFF_CTRL1	16 bit	Transmit MAC FIFO Control Reg 1 */
 enum {
-	MFF_CLR_PERR	= 1<<15,	/* Clear Parity Error IRQ */
-								/* Bit 14:	reserved */
-	MFF_ENA_PKT_REC	= 1<<13,	/* Enable  Packet Recovery */
-	MFF_DIS_PKT_REC	= 1<<12,	/* Disable Packet Recovery */
+	MFF_CLR_PERR	= 1<<15, /* Clear Parity Error IRQ */
+
+	MFF_ENA_PKT_REC	= 1<<13, /* Enable  Packet Recovery */
+	MFF_DIS_PKT_REC	= 1<<12, /* Disable Packet Recovery */
 
 	MFF_ENA_W4E	= 1<<7,	/* Enable  Wait for Empty */
 	MFF_DIS_W4E	= 1<<6,	/* Disable Wait for Empty */
@@ -595,9 +595,10 @@
 	MFF_DIS_LOOPB	= 1<<2,	/* Disable Loopback */
 	MFF_CLR_MAC_RST	= 1<<1,	/* Clear XMAC Reset */
 	MFF_SET_MAC_RST	= 1<<0,	/* Set   XMAC Reset */
+
+	MFF_TX_CTRL_DEF	 = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH,
 };
 
-#define MFF_TX_CTRL_DEF	(MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH)
 
 /*	RX_MFF_TST2	 	 8 bit	Receive MAC FIFO Test Register 2 */
 /*	TX_MFF_TST2	 	 8 bit	Transmit MAC FIFO Test Register 2 */
@@ -1304,8 +1305,8 @@
 
 /* special defines for FIBER (88E1011S only) */
 enum {
-	PHY_M_AN_ASP_X	= 1<<8, /* Asymmetric Pause */
-	PHY_M_AN_PC_X	= 1<<7, /* MAC Pause implemented */
+	PHY_M_AN_ASP_X		= 1<<8, /* Asymmetric Pause */
+	PHY_M_AN_PC_X		= 1<<7, /* MAC Pause implemented */
 	PHY_M_AN_1000X_AHD	= 1<<6, /* Advertise 10000Base-X Half Duplex */
 	PHY_M_AN_1000X_AFD	= 1<<5, /* Advertise 10000Base-X Full Duplex */
 };
@@ -1320,7 +1321,7 @@
 
 /*****  PHY_MARV_1000T_CTRL	16 bit r/w	1000Base-T Control Reg *****/
 enum {
-	PHY_M_1000C_TEST	= 7<<13,/* Bit 15..13:	Test Modes */
+	PHY_M_1000C_TEST= 7<<13,/* Bit 15..13:	Test Modes */
 	PHY_M_1000C_MSE	= 1<<12, /* Manual Master/Slave Enable */
 	PHY_M_1000C_MSC	= 1<<11, /* M/S Configuration (1=Master) */
 	PHY_M_1000C_MPD	= 1<<10, /* Multi-Port Device */
@@ -1349,7 +1350,7 @@
 	PHY_M_PC_EN_DET_PLUS	= 3<<8, /* Energy Detect Plus (Mode 2) */
 };
 
-#define PHY_M_PC_MDI_XMODE(x)	(((x)<<5) & PHY_M_PC_MDIX_MSK)
+#define PHY_M_PC_MDI_XMODE(x)	((((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
 
 enum {
 	PHY_M_PC_MAN_MDI	= 0, /* 00 = Manual MDI configuration */
@@ -1432,24 +1433,24 @@
 	PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
 	PHY_M_EC_M_DSC_MSK  = 3<<10, /* Bit 11..10:	Master Downshift Counter */
 					/* (88E1011 only) */
-	PHY_M_EC_S_DSC_MSK	= 3<<8,/* Bit  9.. 8:	Slave  Downshift Counter */
+	PHY_M_EC_S_DSC_MSK  = 3<<8,  /* Bit  9.. 8:	Slave  Downshift Counter */
 				       /* (88E1011 only) */
-	PHY_M_EC_M_DSC_MSK2	= 7<<9,/* Bit 11.. 9:	Master Downshift Counter */
+	PHY_M_EC_M_DSC_MSK2  = 7<<9, /* Bit 11.. 9:	Master Downshift Counter */
 					/* (88E1111 only) */
-	PHY_M_EC_DOWN_S_ENA	= 1<<8, /* Downshift Enable (88E1111 only) */
+	PHY_M_EC_DOWN_S_ENA  = 1<<8, /* Downshift Enable (88E1111 only) */
 					/* !!! Errata in spec. (1 = disable) */
-	PHY_M_EC_RX_TIM_CT	= 1<<7, /* RGMII Rx Timing Control*/
-	PHY_M_EC_MAC_S_MSK	= 7<<4,/* Bit  6.. 4:	Def. MAC interface speed */
-	PHY_M_EC_FIB_AN_ENA	= 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
-	PHY_M_EC_DTE_D_ENA	= 1<<2, /* DTE Detect Enable (88E1111 only) */
-	PHY_M_EC_TX_TIM_CT	= 1<<1, /* RGMII Tx Timing Control */
-	PHY_M_EC_TRANS_DIS	= 1<<0, /* Transmitter Disable (88E1111 only) */};
+	PHY_M_EC_RX_TIM_CT   = 1<<7, /* RGMII Rx Timing Control*/
+	PHY_M_EC_MAC_S_MSK   = 7<<4, /* Bit  6.. 4:	Def. MAC interface speed */
+	PHY_M_EC_FIB_AN_ENA  = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+	PHY_M_EC_DTE_D_ENA   = 1<<2, /* DTE Detect Enable (88E1111 only) */
+	PHY_M_EC_TX_TIM_CT   = 1<<1, /* RGMII Tx Timing Control */
+	PHY_M_EC_TRANS_DIS   = 1<<0, /* Transmitter Disable (88E1111 only) */};
 
-#define PHY_M_EC_M_DSC(x)	((x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
-#define PHY_M_EC_S_DSC(x)	((x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
-#define PHY_M_EC_MAC_S(x)	((x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
+#define PHY_M_EC_M_DSC(x)	((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x)	((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_MAC_S(x)	((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
 
-#define PHY_M_EC_M_DSC_2(x)	((x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
+#define PHY_M_EC_M_DSC_2(x)	((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
 											/* 100=5x; 101=6x; 110=7x; 111=8x */
 enum {
 	MAC_TX_CLK_0_MHZ	= 2,
@@ -1468,10 +1469,12 @@
 	PHY_M_LEDC_LK_C_MSK	= 7<<3,/* Bit  5.. 3: Link Control Mask */
 					/* (88E1111 only) */
 };
+#define PHY_M_LED_PULS_DUR(x)	(((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+#define PHY_M_LED_BLINK_RT(x)	(((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
 
 enum {
-	PHY_M_LEDC_LINK_MSK	= 3<<3,/* Bit  4.. 3: Link Control Mask */
-									/* (88E1011 only) */
+	PHY_M_LEDC_LINK_MSK	= 3<<3, /* Bit  4.. 3: Link Control Mask */
+					/* (88E1011 only) */
 	PHY_M_LEDC_DP_CTRL	= 1<<2, /* Duplex Control */
 	PHY_M_LEDC_DP_C_MSB	= 1<<2, /* Duplex Control (MSB, 88E1111 only) */
 	PHY_M_LEDC_RX_CTRL	= 1<<1, /* Rx Activity / Link */
@@ -1479,27 +1482,24 @@
 	PHY_M_LEDC_TX_C_MSB	= 1<<0, /* Tx Control (MSB, 88E1111 only) */
 };
 
-#define PHY_M_LED_PULS_DUR(x)	(((x)<<12) & PHY_M_LEDC_PULS_MSK)
-
 enum {
-	PULS_NO_STR	= 0,/* no pulse stretching */
-	PULS_21MS	= 1,/* 21 ms to 42 ms */
-	PULS_42MS	= 2,/* 42 ms to 84 ms */
-	PULS_84MS	= 3,/* 84 ms to 170 ms */
-	PULS_170MS	= 4,/* 170 ms to 340 ms */
-	PULS_340MS	= 5,/* 340 ms to 670 ms */
-	PULS_670MS	= 6,/* 670 ms to 1.3 s */
-	PULS_1300MS	= 7,/* 1.3 s to 2.7 s */
+	PULS_NO_STR	= 0, /* no pulse stretching */
+	PULS_21MS	= 1, /* 21 ms to 42 ms */
+	PULS_42MS	= 2, /* 42 ms to 84 ms */
+	PULS_84MS	= 3, /* 84 ms to 170 ms */
+	PULS_170MS	= 4, /* 170 ms to 340 ms */
+	PULS_340MS	= 5, /* 340 ms to 670 ms */
+	PULS_670MS	= 6, /* 670 ms to 1.3 s */
+	PULS_1300MS	= 7, /* 1.3 s to 2.7 s */
 };
 
-#define PHY_M_LED_BLINK_RT(x)	(((x)<<8) & PHY_M_LEDC_BL_R_MSK)
 
 enum {
-	BLINK_42MS	= 0,/* 42 ms */
-	BLINK_84MS	= 1,/* 84 ms */
-	BLINK_170MS	= 2,/* 170 ms */
-	BLINK_340MS	= 3,/* 340 ms */
-	BLINK_670MS	= 4,/* 670 ms */
+	BLINK_42MS	= 0, /* 42 ms */
+	BLINK_84MS	= 1, /* 84 ms */
+	BLINK_170MS	= 2, /* 170 ms */
+	BLINK_340MS	= 3, /* 340 ms */
+	BLINK_670MS	= 4, /* 670 ms */
 };
 
 /*****  PHY_MARV_LED_OVER	16 bit r/w	Manual LED Override Reg *****/
@@ -1525,7 +1525,7 @@
 	PHY_M_EC2_FO_IMPED	= 1<<5, /* Fiber Output Impedance */
 	PHY_M_EC2_FO_M_CLK	= 1<<4, /* Fiber Mode Clock Enable */
 	PHY_M_EC2_FO_BOOST	= 1<<3, /* Fiber Output Boost */
-	PHY_M_EC2_FO_AM_MSK	= 7,/* Bit  2.. 0:	Fiber Output Amplitude */
+	PHY_M_EC2_FO_AM_MSK	= 7, /* Bit  2.. 0:	Fiber Output Amplitude */
 };
 
 /*****  PHY_MARV_EXT_P_STAT 16 bit r/w	Ext. PHY Specific Status *****/
@@ -1550,7 +1550,7 @@
 	PHY_M_CABD_DIS_WAIT	= 1<<15, /* Disable Waiting Period (Page 1) */
 					/* (88E1111 only) */
 	PHY_M_CABD_STAT_MSK	= 3<<13, /* Bit 14..13: Status Mask */
-	PHY_M_CABD_AMPL_MSK	= 0x1f<<8,/* Bit 12.. 8: Amplitude Mask */
+	PHY_M_CABD_AMPL_MSK	= 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */
 					/* (88E1111 only) */
 	PHY_M_CABD_DIST_MSK	= 0xff, /* Bit  7.. 0: Distance Mask */
 };
@@ -1605,9 +1605,9 @@
 
 /*****  PHY_MARV_PHY_CTRL (page 3)		16 bit r/w	LED Control Reg. *****/
 enum {
-	PHY_M_LEDC_LOS_MSK	= 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
+	PHY_M_LEDC_LOS_MSK	= 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */
 	PHY_M_LEDC_INIT_MSK	= 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
-	PHY_M_LEDC_STA1_MSK	= 0xf<<4,/* Bit  7.. 4: STAT1 LED Ctrl. Mask */
+	PHY_M_LEDC_STA1_MSK	= 0xf<<4, /* Bit  7.. 4: STAT1 LED Ctrl. Mask */
 	PHY_M_LEDC_STA0_MSK	= 0xf, /* Bit  3.. 0: STAT0 LED Ctrl. Mask */
 };
 
@@ -1804,8 +1804,8 @@
 
 /*	GM_SMI_CTRL			16 bit r/w	SMI Control Register */
 enum {
-	GM_SMI_CT_PHY_A_MSK	= 0x1f<<11,/* Bit 15..11:	PHY Device Address */
-	GM_SMI_CT_REG_A_MSK	= 0x1f<<6,/* Bit 10.. 6:	PHY Register Address */
+	GM_SMI_CT_PHY_A_MSK	= 0x1f<<11, /* Bit 15..11:	PHY Device Address */
+	GM_SMI_CT_REG_A_MSK	= 0x1f<<6, /* Bit 10.. 6:	PHY Register Address */
 	GM_SMI_CT_OP_RD		= 1<<5,	/* Bit  5:	OpCode Read (0=Write)*/
 	GM_SMI_CT_RD_VAL	= 1<<4,	/* Bit  4:	Read Valid (Read completed) */
 	GM_SMI_CT_BUSY		= 1<<3,	/* Bit  3:	Busy (Operation in progress) */
@@ -1875,9 +1875,9 @@
 
 /*	TX_GMF_CTRL_T	32 bit	Tx GMAC FIFO Control/Test */
 enum {
-	GMF_WSP_TST_ON	= 1<<18,/* Write Shadow Pointer Test On */
-	GMF_WSP_TST_OFF	= 1<<17,/* Write Shadow Pointer Test Off */
-	GMF_WSP_STEP	= 1<<16,/* Write Shadow Pointer Step/Increment */
+	GMF_WSP_TST_ON	= 1<<18, /* Write Shadow Pointer Test On */
+	GMF_WSP_TST_OFF	= 1<<17, /* Write Shadow Pointer Test Off */
+	GMF_WSP_STEP	= 1<<16, /* Write Shadow Pointer Step/Increment */
 
 	GMF_CLI_TX_FU	= 1<<6,	/* Clear IRQ Tx FIFO Underrun */
 	GMF_CLI_TX_FC	= 1<<5,	/* Clear IRQ Tx Frame Complete */
@@ -2111,18 +2111,18 @@
 
 /*	XM_MMU_CMD	16 bit r/w	MMU Command Register */
 enum {
-	XM_MMU_PHY_RDY	= 1<<12,/* Bit 12:	PHY Read Ready */
-	XM_MMU_PHY_BUSY	= 1<<11,/* Bit 11:	PHY Busy */
-	XM_MMU_IGN_PF	= 1<<10,/* Bit 10:	Ignore Pause Frame */
-	XM_MMU_MAC_LB	= 1<<9,	/* Bit  9:	Enable MAC Loopback */
-	XM_MMU_FRC_COL	= 1<<7,	/* Bit  7:	Force Collision */
-	XM_MMU_SIM_COL	= 1<<6,	/* Bit  6:	Simulate Collision */
-	XM_MMU_NO_PRE	= 1<<5,	/* Bit  5:	No MDIO Preamble */
-	XM_MMU_GMII_FD	= 1<<4,	/* Bit  4:	GMII uses Full Duplex */
-	XM_MMU_RAT_CTRL	= 1<<3,	/* Bit  3:	Enable Rate Control */
-	XM_MMU_GMII_LOOP= 1<<2,	/* Bit  2:	PHY is in Loopback Mode */
-	XM_MMU_ENA_RX	= 1<<1,	/* Bit  1:	Enable Receiver */
-	XM_MMU_ENA_TX	= 1<<0,	/* Bit  0:	Enable Transmitter */
+	XM_MMU_PHY_RDY	= 1<<12, /* Bit 12:	PHY Read Ready */
+	XM_MMU_PHY_BUSY	= 1<<11, /* Bit 11:	PHY Busy */
+	XM_MMU_IGN_PF	= 1<<10, /* Bit 10:	Ignore Pause Frame */
+	XM_MMU_MAC_LB	= 1<<9,	 /* Bit  9:	Enable MAC Loopback */
+	XM_MMU_FRC_COL	= 1<<7,	 /* Bit  7:	Force Collision */
+	XM_MMU_SIM_COL	= 1<<6,	 /* Bit  6:	Simulate Collision */
+	XM_MMU_NO_PRE	= 1<<5,	 /* Bit  5:	No MDIO Preamble */
+	XM_MMU_GMII_FD	= 1<<4,	 /* Bit  4:	GMII uses Full Duplex */
+	XM_MMU_RAT_CTRL	= 1<<3,	 /* Bit  3:	Enable Rate Control */
+	XM_MMU_GMII_LOOP= 1<<2,	 /* Bit  2:	PHY is in Loopback Mode */
+	XM_MMU_ENA_RX	= 1<<1,	 /* Bit  1:	Enable Receiver */
+	XM_MMU_ENA_TX	= 1<<0,	 /* Bit  0:	Enable Transmitter */
 };
 
 
@@ -2456,7 +2456,7 @@
 
 	struct net_device_stats net_stats;
 
-	struct work_struct   link_thread;
+	struct delayed_work  link_thread;
 	enum pause_control   flow_control;
 	enum pause_status    flow_status;
 	u8		     rx_csum;
@@ -2506,7 +2506,7 @@
 }
 
 /* MAC Related Registers inside the device. */
-#define SK_REG(port,reg)	(((port)<<7)+(reg))
+#define SK_REG(port,reg)	(((port)<<7)+(u16)(reg))
 #define SK_XMAC_REG(port, reg) \
 	((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
 
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 16616f5..fb1d2c3 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -100,32 +100,32 @@
 MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)");
 
 static const struct pci_device_id sky2_id_table[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
 	{ 0 }
 };
 
@@ -521,7 +521,7 @@
 		/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
 		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
 		/* turn off the Rx LED (LED_RX) */
-		ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
+		ledover &= ~PHY_M_LED_MO_RX;
 	}
 
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
@@ -544,7 +544,7 @@
 
 		if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
 			/* turn on 100 Mbps LED (LED_LINK100) */
-			ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+			ledover |= PHY_M_LED_MO_100;
 		}
 
 		if (ledover)
@@ -676,17 +676,15 @@
 	/* Flush Rx MAC FIFO on any flow control or error */
 	sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
 
-	/* Set threshold to 0xa (64 bytes)
-	 *  ASF disabled so no need to do WA dev #4.30
-	 */
-	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
+	/* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
+	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
 
 	/* Configure Tx MAC FIFO */
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
 	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
-		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8);
+		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 		if (hw->dev[port]->mtu > ETH_DATA_LEN) {
 			/* set Tx GMAC FIFO Almost Empty Threshold */
@@ -698,10 +696,15 @@
 
 }
 
-/* Assign Ram Buffer allocation in units of 64bit (8 bytes) */
-static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 end)
+/* Assign Ram Buffer allocation to queue */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
 {
-	pr_debug(PFX "q %d %#x %#x\n", q, start, end);
+	u32 end;
+
+	/* convert from K bytes to qwords used for hw register */
+	start *= 1024/8;
+	space *= 1024/8;
+	end = start + space - 1;
 
 	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
 	sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -710,7 +713,6 @@
 	sky2_write32(hw, RB_ADDR(q, RB_RP), start);
 
 	if (q == Q_R1 || q == Q_R2) {
-		u32 space = end - start + 1;
 		u32 tp = space - space/4;
 
 		/* On receive queue's set the thresholds
@@ -1060,10 +1062,16 @@
 	sky2->rx_put = sky2->rx_next = 0;
 	sky2_qset(hw, rxq);
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
-		/* MAC Rx RAM Read is controlled by hardware */
+	/* On PCI express lowering the watermark gives better performance */
+	if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+		sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
+
+	/* These chips have no ram buffer?
+	 * MAC Rx RAM Read is controlled by hardware */
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+	    (hw->chip_rev == CHIP_REV_YU_EC_U_A1
+	     || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
 		sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
-	}
 
 	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
 
@@ -1139,7 +1147,7 @@
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
 	unsigned port = sky2->port;
-	u32 ramsize, rxspace, imask;
+	u32 ramsize, imask;
 	int cap, err = -ENOMEM;
 	struct net_device *otherdev = hw->dev[sky2->port^1];
 
@@ -1192,20 +1200,25 @@
 
 	sky2_mac_init(hw, port);
 
-	/* Determine available ram buffer space in qwords.  */
-	ramsize = sky2_read8(hw, B2_E_0) * 4096/8;
+	/* Register is number of 4K blocks on internal RAM buffer. */
+	ramsize = sky2_read8(hw, B2_E_0) * 4;
+	printk(KERN_INFO PFX "%s: ram buffer %dK\n", dev->name, ramsize);
 
-	if (ramsize > 6*1024/8)
-		rxspace = ramsize - (ramsize + 2) / 3;
-	else
-		rxspace = ramsize / 2;
+	if (ramsize > 0) {
+		u32 rxspace;
 
-	sky2_ramset(hw, rxqaddr[port], 0, rxspace-1);
-	sky2_ramset(hw, txqaddr[port], rxspace, ramsize-1);
+		if (ramsize < 16)
+			rxspace = ramsize / 2;
+		else
+			rxspace = 8 + (2*(ramsize - 16))/3;
 
-	/* Make sure SyncQ is disabled */
-	sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
-		    RB_RST_SET);
+		sky2_ramset(hw, rxqaddr[port], 0, rxspace);
+		sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+
+		/* Make sure SyncQ is disabled */
+		sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
+			    RB_RST_SET);
+	}
 
 	sky2_qset(hw, txqaddr[port]);
 
@@ -1350,7 +1363,7 @@
 		u32 tcpsum;
 
 		tcpsum = offset << 16;		/* sum start */
-		tcpsum |= offset + skb->csum;	/* sum write */
+		tcpsum |= offset + skb->csum_offset;	/* sum write */
 
 		ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
 		if (skb->nh.iph->protocol == IPPROTO_UDP)
@@ -1453,7 +1466,7 @@
 			if (unlikely(netif_msg_tx_done(sky2)))
 				printk(KERN_DEBUG "%s: tx done %u\n",
 				       dev->name, idx);
-			dev_kfree_skb(re->skb);
+			dev_kfree_skb_any(re->skb);
 		}
 
 		le->opcode = 0;	/* paranoia */
@@ -1509,7 +1522,7 @@
 
 	/* WA for dev. #4.209 */
 	if (hw->chip_id == CHIP_ID_YUKON_EC_U
-	    && hw->chip_rev == CHIP_REV_YU_EC_U_A1)
+	    && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
 		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
 			     sky2->speed != SPEED_1000 ?
 			     TX_STFW_ENA : TX_STFW_DIS);
@@ -2065,7 +2078,7 @@
 		case OP_RXSTAT:
 			skb = sky2_receive(dev, length, status);
 			if (!skb)
-				break;
+				goto force_update;
 
 			skb->protocol = eth_type_trans(skb, dev);
 			dev->last_rx = jiffies;
@@ -2081,8 +2094,8 @@
 
 			/* Update receiver after 16 frames */
 			if (++buf_write[le->link] == RX_BUF_WRITE) {
-				sky2_put_idx(hw, rxqaddr[le->link],
-					     sky2->rx_put);
+force_update:
+				sky2_put_idx(hw, rxqaddr[le->link], sky2->rx_put);
 				buf_write[le->link] = 0;
 			}
 
@@ -2917,18 +2930,8 @@
 
 	default:
 		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
-		gm_phy_write(hw, port, PHY_MARV_LED_OVER,
-			     on ? PHY_M_LED_MO_DUP(MO_LED_ON) |
-			     PHY_M_LED_MO_10(MO_LED_ON) |
-			     PHY_M_LED_MO_100(MO_LED_ON) |
-			     PHY_M_LED_MO_1000(MO_LED_ON) |
-			     PHY_M_LED_MO_RX(MO_LED_ON)
-			     : PHY_M_LED_MO_DUP(MO_LED_OFF) |
-			     PHY_M_LED_MO_10(MO_LED_OFF) |
-			     PHY_M_LED_MO_100(MO_LED_OFF) |
-			     PHY_M_LED_MO_1000(MO_LED_OFF) |
-			     PHY_M_LED_MO_RX(MO_LED_OFF));
-
+		gm_phy_write(hw, port, PHY_MARV_LED_OVER, 
+			     on ? PHY_M_LED_ALL : 0);
 	}
 }
 
@@ -3311,7 +3314,7 @@
 		return IRQ_NONE;
 
 	if (status & Y2_IS_IRQ_SW) {
-		hw->msi_detected = 1;
+		hw->msi = 1;
 		wake_up(&hw->msi_wait);
 		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
 	}
@@ -3330,7 +3333,7 @@
 
 	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
 
-	err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw);
+	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
 	if (err) {
 		printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
 		       pci_name(pdev), pdev->irq);
@@ -3340,9 +3343,9 @@
 	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
 	sky2_read8(hw, B0_CTST);
 
-	wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
+	wait_event_timeout(hw->msi_wait, hw->msi, HZ/10);
 
-	if (!hw->msi_detected) {
+	if (!hw->msi) {
 		/* MSI test failed, go back to INTx mode */
 		printk(KERN_INFO PFX "%s: No interrupt generated using MSI, "
 		       "switching to INTx mode.\n",
@@ -3475,7 +3478,8 @@
 		goto err_out_free_netdev;
 	}
 
-	err = request_irq(pdev->irq,  sky2_intr, IRQF_SHARED, dev->name, hw);
+	err = request_irq(pdev->irq,  sky2_intr, hw->msi ? 0 : IRQF_SHARED,
+			  dev->name, hw);
 	if (err) {
 		printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
 		       pci_name(pdev), pdev->irq);
@@ -3505,7 +3509,8 @@
 	return 0;
 
 err_out_unregister:
-	pci_disable_msi(pdev);
+	if (hw->msi)
+		pci_disable_msi(pdev);
 	unregister_netdev(dev);
 err_out_free_netdev:
 	free_netdev(dev);
@@ -3548,7 +3553,8 @@
 	sky2_read8(hw, B0_CTST);
 
 	free_irq(pdev->irq, hw);
-	pci_disable_msi(pdev);
+	if (hw->msi)
+		pci_disable_msi(pdev);
 	pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 6d2a23f..6ed1d47 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -383,8 +383,13 @@
 	CHIP_REV_YU_EC_A2    = 1,  /* Chip Rev. for Yukon-EC A2 */
 	CHIP_REV_YU_EC_A3    = 2,  /* Chip Rev. for Yukon-EC A3 */
 
-	CHIP_REV_YU_EC_U_A0  = 0,
-	CHIP_REV_YU_EC_U_A1  = 1,
+	CHIP_REV_YU_EC_U_A0  = 1,
+	CHIP_REV_YU_EC_U_A1  = 2,
+	CHIP_REV_YU_EC_U_B0  = 3,
+
+	CHIP_REV_YU_FE_A1    = 1,
+	CHIP_REV_YU_FE_A2    = 2,
+
 };
 
 /*	B2_Y2_CLK_GATE	 8 bit	Clock Gating (Yukon-2 only) */
@@ -603,7 +608,7 @@
 	PHY_ADDR_MARV	= 0,
 };
 
-#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
+#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs))
 
 
 enum {
@@ -675,6 +680,7 @@
 			  BMU_FIFO_ENA | BMU_OP_ON,
 
 	BMU_WM_DEFAULT = 0x600,
+	BMU_WM_PEX     = 0x80,
 };
 
 /* Tx BMU Control / Status Registers (Yukon-2) */
@@ -1055,7 +1061,7 @@
 	PHY_M_PC_EN_DET_PLUS	= 3<<8, /* Energy Detect Plus (Mode 2) */
 };
 
-#define PHY_M_PC_MDI_XMODE(x)	(((x)<<5) & PHY_M_PC_MDIX_MSK)
+#define PHY_M_PC_MDI_XMODE(x)	(((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
 
 enum {
 	PHY_M_PC_MAN_MDI	= 0, /* 00 = Manual MDI configuration */
@@ -1151,13 +1157,13 @@
 	PHY_M_EC_TX_TIM_CT  = 1<<1, /* RGMII Tx Timing Control */
 	PHY_M_EC_TRANS_DIS  = 1<<0, /* Transmitter Disable (88E1111 only) */};
 
-#define PHY_M_EC_M_DSC(x)	((x)<<10 & PHY_M_EC_M_DSC_MSK)
+#define PHY_M_EC_M_DSC(x)	((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK)
 					/* 00=1x; 01=2x; 10=3x; 11=4x */
-#define PHY_M_EC_S_DSC(x)	((x)<<8 & PHY_M_EC_S_DSC_MSK)
+#define PHY_M_EC_S_DSC(x)	((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK)
 					/* 00=dis; 01=1x; 10=2x; 11=3x */
-#define PHY_M_EC_DSC_2(x)	((x)<<9 & PHY_M_EC_M_DSC_MSK2)
+#define PHY_M_EC_DSC_2(x)	((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2)
 					/* 000=1x; 001=2x; 010=3x; 011=4x */
-#define PHY_M_EC_MAC_S(x)	((x)<<4 & PHY_M_EC_MAC_S_MSK)
+#define PHY_M_EC_MAC_S(x)	((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK)
 					/* 01X=0; 110=2.5; 111=25 (MHz) */
 
 /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
@@ -1168,7 +1174,7 @@
 };
 /* !!! Errata in spec. (1 = disable) */
 
-#define PHY_M_PC_DSC(x)			(((x)<<12) & PHY_M_PC_DSC_MSK)
+#define PHY_M_PC_DSC(x)			(((u16)(x)<<12) & PHY_M_PC_DSC_MSK)
 											/* 100=5x; 101=6x; 110=7x; 111=8x */
 enum {
 	MAC_TX_CLK_0_MHZ	= 2,
@@ -1198,7 +1204,7 @@
 	PHY_M_LEDC_TX_C_MSB	= 1<<0, /* Tx Control (MSB, 88E1111 only) */
 };
 
-#define PHY_M_LED_PULS_DUR(x)	(((x)<<12) & PHY_M_LEDC_PULS_MSK)
+#define PHY_M_LED_PULS_DUR(x)	(((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
 
 /*****  PHY_MARV_PHY_STAT (page 3)16 bit r/w	Polarity Control Reg. *****/
 enum {
@@ -1228,7 +1234,7 @@
 	PULS_1300MS	= 7,/* 1.3 s to 2.7 s */
 };
 
-#define PHY_M_LED_BLINK_RT(x)	(((x)<<8) & PHY_M_LEDC_BL_R_MSK)
+#define PHY_M_LED_BLINK_RT(x)	(((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
 
 enum {
 	BLINK_42MS	= 0,/* 42 ms */
@@ -1238,21 +1244,18 @@
 	BLINK_670MS	= 4,/* 670 ms */
 };
 
-/*****  PHY_MARV_LED_OVER	16 bit r/w	Manual LED Override Reg *****/
-#define PHY_M_LED_MO_SGMII(x)	((x)<<14) /* Bit 15..14:  SGMII AN Timer */
-										/* Bit 13..12:	reserved */
-#define PHY_M_LED_MO_DUP(x)	((x)<<10) /* Bit 11..10:  Duplex */
-#define PHY_M_LED_MO_10(x)	((x)<<8) /* Bit  9.. 8:  Link 10 */
-#define PHY_M_LED_MO_100(x)	((x)<<6) /* Bit  7.. 6:  Link 100 */
-#define PHY_M_LED_MO_1000(x)	((x)<<4) /* Bit  5.. 4:  Link 1000 */
-#define PHY_M_LED_MO_RX(x)	((x)<<2) /* Bit  3.. 2:  Rx */
-#define PHY_M_LED_MO_TX(x)	((x)<<0) /* Bit  1.. 0:  Tx */
-
+/**** PHY_MARV_LED_OVER    16 bit r/w LED control */
 enum {
-	MO_LED_NORM	= 0,
-	MO_LED_BLINK	= 1,
-	MO_LED_OFF	= 2,
-	MO_LED_ON	= 3,
+	PHY_M_LED_MO_DUP  = 3<<10,/* Bit 11..10:  Duplex */
+	PHY_M_LED_MO_10	  = 3<<8, /* Bit  9.. 8:  Link 10 */
+	PHY_M_LED_MO_100  = 3<<6, /* Bit  7.. 6:  Link 100 */
+	PHY_M_LED_MO_1000 = 3<<4, /* Bit  5.. 4:  Link 1000 */
+	PHY_M_LED_MO_RX	  = 3<<2, /* Bit  3.. 2:  Rx */
+	PHY_M_LED_MO_TX	  = 3<<0, /* Bit  1.. 0:  Tx */
+
+	PHY_M_LED_ALL	  = PHY_M_LED_MO_DUP | PHY_M_LED_MO_10 
+			    | PHY_M_LED_MO_100 | PHY_M_LED_MO_1000
+			    | PHY_M_LED_MO_RX,
 };
 
 /*****  PHY_MARV_EXT_CTRL_2	16 bit r/w	Ext. PHY Specific Ctrl 2 *****/
@@ -1289,9 +1292,9 @@
 	PHY_M_FELP_LED0_MSK = 0xf, /* Bit  3.. 0: LED0 Mask (SPEED) */
 };
 
-#define PHY_M_FELP_LED2_CTRL(x)	(((x)<<8) & PHY_M_FELP_LED2_MSK)
-#define PHY_M_FELP_LED1_CTRL(x)	(((x)<<4) & PHY_M_FELP_LED1_MSK)
-#define PHY_M_FELP_LED0_CTRL(x)	(((x)<<0) & PHY_M_FELP_LED0_MSK)
+#define PHY_M_FELP_LED2_CTRL(x)	(((u16)(x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x)	(((u16)(x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x)	(((u16)(x)<<0) & PHY_M_FELP_LED0_MSK)
 
 enum {
 	LED_PAR_CTRL_COLX	= 0x00,
@@ -1547,8 +1550,8 @@
 	GM_SMI_CT_BUSY		= 1<<3,	/* Bit  3:	Busy (Operation in progress) */
 };
 
-#define GM_SMI_CT_PHY_AD(x)	(((x)<<11) & GM_SMI_CT_PHY_A_MSK)
-#define GM_SMI_CT_REG_AD(x)	(((x)<<6) & GM_SMI_CT_REG_A_MSK)
+#define GM_SMI_CT_PHY_AD(x)	(((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x)	(((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK)
 
 /*	GM_PHY_ADDR				16 bit r/w	GPHY Address Register */
 enum {
@@ -1895,7 +1898,7 @@
 	dma_addr_t   	     st_dma;
 
 	struct timer_list    idle_timer;
-	int		     msi_detected;
+	int		     msi;
 	wait_queue_head_t    msi_wait;
 };
 
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 889ef0d..d70bc97 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -593,7 +593,7 @@
 	iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index e10755e..2c5319c 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -437,7 +437,7 @@
 	return -ENXIO;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	int this_dev;
 
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index c0d13d6..bd6e845 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -1616,7 +1616,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(devSMC9194);
 	free_irq(devSMC9194->irq, devSMC9194);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 95b6478..e62a958 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -210,6 +210,7 @@
 
 	/* work queue */
 	struct work_struct phy_configure;
+	struct net_device *dev;
 	int	work_pending;
 
 	spinlock_t lock;
@@ -1114,10 +1115,11 @@
  * of autonegotiation.)  If the RPC ANEG bit is cleared, the selection
  * is controlled by the RPC SPEED and RPC DPLX bits.
  */
-static void smc_phy_configure(void *data)
+static void smc_phy_configure(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct smc_local *lp = netdev_priv(dev);
+	struct smc_local *lp =
+		container_of(work, struct smc_local, phy_configure);
+	struct net_device *dev = lp->dev;
 	void __iomem *ioaddr = lp->base;
 	int phyaddr = lp->mii.phy_id;
 	int my_phy_caps; /* My PHY capabilities */
@@ -1592,7 +1594,7 @@
 
 	/* Configure the PHY, initialize the link state */
 	if (lp->phy_type != 0)
-		smc_phy_configure(dev);
+		smc_phy_configure(&lp->phy_configure);
 	else {
 		spin_lock_irq(&lp->lock);
 		smc_10bt_check_media(dev, 1);
@@ -1972,7 +1974,8 @@
 #endif
 
 	tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
-	INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
+	INIT_WORK(&lp->phy_configure, smc_phy_configure);
+	lp->dev = dev;
 	lp->mii.phy_id_mask = 0x1f;
 	lp->mii.reg_num_mask = 0x1f;
 	lp->mii.force_media = 0;
@@ -2322,7 +2325,7 @@
 			smc_reset(ndev);
 			smc_enable(ndev);
 			if (lp->phy_type != 0)
-				smc_phy_configure(ndev);
+				smc_phy_configure(&lp->phy_configure);
 			netif_device_attach(ndev);
 		}
 	}
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index a864016..9367c57 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -238,7 +238,7 @@
 #define SMC_CAN_USE_16BIT	1
 #define SMC_CAN_USE_32BIT	0
 
-#define SMC_inb(a, r)		inb((u32)a) + (r))
+#define SMC_inb(a, r)		inb(((u32)a) + (r))
 #define SMC_inw(a, r)		inw(((u32)a) + (r))
 #define SMC_outb(v, a, r)	outb(v, ((u32)a) + (r))
 #define SMC_outw(v, a, r)	outw(v, ((u32)a) + (r))
@@ -434,6 +434,24 @@
 
 #define SMC_IRQ_FLAGS		(0)
 
+#elif	defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT	1
+#define SMC_CAN_USE_16BIT	1
+#define SMC_CAN_USE_32BIT	1
+#define SMC_NOWAIT		1
+
+#define SMC_inb(a, r)		readb((a) + (r))
+#define SMC_inw(a, r)		readw((a) + (r))
+#define SMC_inl(a, r)		readl((a) + (r))
+#define SMC_outb(v, a, r)	writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)	writew(v, (a) + (r))
+#define SMC_outl(v, a, r)	writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l)	readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l)	writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS		(0)
+
 #else
 
 #define SMC_CAN_USE_8BIT	1
@@ -1216,7 +1234,7 @@
 		if (SMC_CAN_USE_32BIT) {				\
 			void *__ptr = (p);				\
 			int __len = (l);				\
-			void *__ioaddr = ioaddr;			\
+			void __iomem *__ioaddr = ioaddr;		\
 			if (__len >= 2 && (unsigned long)__ptr & 2) {	\
 				__len -= 2;				\
 				SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \
@@ -1240,7 +1258,7 @@
 		if (SMC_CAN_USE_32BIT) {				\
 			void *__ptr = (p);				\
 			int __len = (l);				\
-			void *__ioaddr = ioaddr;			\
+			void __iomem *__ioaddr = ioaddr;		\
 			if ((unsigned long)__ptr & 2) {			\
 				/*					\
 				 * We want 32bit alignment here.	\
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 418138d..ebb6aa3 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -88,12 +88,11 @@
 static inline u32
 spider_net_read_reg(struct spider_net_card *card, u32 reg)
 {
-	u32 value;
-
-	value = readl(card->regs + reg);
-	value = le32_to_cpu(value);
-
-	return value;
+	/* We use the powerpc specific variants instead of readl_be() because
+	 * we know spidernet is not a real PCI device and we can thus avoid the
+	 * performance hit caused by the PCI workarounds.
+	 */
+	return in_be32(card->regs + reg);
 }
 
 /**
@@ -105,8 +104,11 @@
 static inline void
 spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
 {
-	value = cpu_to_le32(value);
-	writel(value, card->regs + reg);
+	/* We use the powerpc specific variants instead of writel_be() because
+	 * we know spidernet is not a real PCI device and we can thus avoid the
+	 * performance hit caused by the PCI workarounds.
+	 */
+	out_be32(card->regs + reg, value);
 }
 
 /** spider_net_write_phy - write to phy register
@@ -644,20 +646,12 @@
 	struct spider_net_descr *descr;
 	dma_addr_t buf;
 	unsigned long flags;
-	int length;
 
-	length = skb->len;
-	if (length < ETH_ZLEN) {
-		if (skb_pad(skb, ETH_ZLEN-length))
-			return 0;
-		length = ETH_ZLEN;
-	}
-
-	buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE);
+	buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
 	if (pci_dma_mapping_error(buf)) {
 		if (netif_msg_tx_err(card) && net_ratelimit())
 			pr_err("could not iommu-map packet (%p, %i). "
-				  "Dropping packet\n", skb->data, length);
+				  "Dropping packet\n", skb->data, skb->len);
 		card->spider_stats.tx_iommu_map_error++;
 		return -ENOMEM;
 	}
@@ -667,7 +661,7 @@
 	card->tx_chain.head = descr->next;
 
 	descr->buf_addr = buf;
-	descr->buf_size = length;
+	descr->buf_size = skb->len;
 	descr->next_descr_addr = 0;
 	descr->skb = skb;
 	descr->data_status = 0;
@@ -802,8 +796,8 @@
 
 		/* unmap the skb */
 		if (skb) {
-			int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
-			pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE);
+			pci_unmap_single(card->pdev, buf_addr, skb->len,
+					PCI_DMA_TODEVICE);
 			dev_kfree_skb(skb);
 		}
 	}
@@ -1641,7 +1635,7 @@
 			     SPIDER_NET_INT2_MASK_VALUE);
 
 	spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
-			     SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS);
+			     SPIDER_NET_GDTBSTA);
 }
 
 /**
@@ -1945,10 +1939,11 @@
  * called as task when tx hangs, resets interface (if interface is up)
  */
 static void
-spider_net_tx_timeout_task(void *data)
+spider_net_tx_timeout_task(struct work_struct *work)
 {
-	struct net_device *netdev = data;
-	struct spider_net_card *card = netdev_priv(netdev);
+	struct spider_net_card *card =
+		container_of(work, struct spider_net_card, tx_timeout_task);
+	struct net_device *netdev = card->netdev;
 
 	if (!(netdev->flags & IFF_UP))
 		goto out;
@@ -2122,7 +2117,7 @@
 	card = netdev_priv(netdev);
 	card->netdev = netdev;
 	card->msg_enable = SPIDER_NET_DEFAULT_MSG;
-	INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
+	INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
 	init_waitqueue_head(&card->waitq);
 	atomic_set(&card->tx_timeout_task_counter, 0);
 
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index b3b4611..3e196df 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -24,7 +24,7 @@
 #ifndef _SPIDER_NET_H
 #define _SPIDER_NET_H
 
-#define VERSION "1.1 A"
+#define VERSION "1.6 A"
 
 #include "sungem_phy.h"
 
@@ -217,8 +217,7 @@
 #define SPIDER_NET_GDTBSTA             0x00000300
 #define SPIDER_NET_GDTDCEIDIS          0x00000002
 #define SPIDER_NET_DMA_TX_VALUE        SPIDER_NET_TX_DMA_EN | \
-                                       SPIDER_NET_GDTBSTA | \
-                                       SPIDER_NET_GDTDCEIDIS
+                                       SPIDER_NET_GDTBSTA
 
 #define SPIDER_NET_DMA_TX_FEND_VALUE	0x00030003
 
@@ -328,7 +327,8 @@
 	SPIDER_NET_GRISPDNGINT
 };
 
-#define SPIDER_NET_TXINT	( (1 << SPIDER_NET_GDTFDCINT) )
+#define SPIDER_NET_TXINT	( (1 << SPIDER_NET_GDTFDCINT) | \
+                             (1 << SPIDER_NET_GDTDCEINT) )
 
 /* We rely on flagged descriptor interrupts */
 #define SPIDER_NET_RXINT	( (1 << SPIDER_NET_GDAFDCINT) )
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 7a0aee6..bf873ea 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -41,6 +41,7 @@
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/if_vlan.h>
+#include <linux/mm.h>
 #include <asm/processor.h>		/* Processor type for cache alignment. */
 #include <asm/uaccess.h>
 #include <asm/io.h>
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index b865db3..c62e85d 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -38,6 +38,7 @@
 #include <linux/skbuff.h>
 #include <linux/bitops.h>
 
+#include <asm/cacheflush.h>
 #include <asm/setup.h>
 #include <asm/irq.h>
 #include <asm/io.h>
@@ -944,7 +945,7 @@
 
 static struct net_device *sun3lance_dev;
 
-int init_module(void)
+int __init init_module(void)
 {
 	sun3lance_dev = sun3lance_probe(-1);
 	if (IS_ERR(sun3lance_dev))
@@ -952,7 +953,7 @@
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
 	unregister_netdev(sun3lance_dev);
 #ifdef CONFIG_SUN3
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 41c503d..c06ecc8 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -264,8 +264,6 @@
 	ASICCtrl = 0x30,
 	EEData = 0x34,
 	EECtrl = 0x36,
-	TxStartThresh = 0x3c,
-	RxEarlyThresh = 0x3e,
 	FlashAddr = 0x40,
 	FlashData = 0x44,
 	TxStatus = 0x46,
@@ -790,6 +788,7 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base;
+	unsigned long flags;
 	int i;
 
 	/* Do we need to reset the chip??? */
@@ -834,6 +833,10 @@
 		iowrite8(0x01, ioaddr + DebugCtrl1);
 	netif_start_queue(dev);
 
+	spin_lock_irqsave(&np->lock, flags);
+	reset_tx(dev);
+	spin_unlock_irqrestore(&np->lock, flags);
+
 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 
 	if (netif_msg_ifup(np))
@@ -1081,6 +1084,8 @@
 
 	/* free all tx skbuff */
 	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_ring[i].next_desc = 0;
+
 		skb = np->tx_skbuff[i];
 		if (skb) {
 			pci_unmap_single(np->pci_dev,
@@ -1096,6 +1101,10 @@
 	}
 	np->cur_tx = np->dirty_tx = 0;
 	np->cur_task = 0;
+
+	np->last_tx = NULL;
+	iowrite8(127, ioaddr + TxDMAPollPeriod);
+
 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 	return 0;
 }
@@ -1111,6 +1120,7 @@
 	int tx_cnt;
 	int tx_status;
 	int handled = 0;
+	int i;
 
 
 	do {
@@ -1153,21 +1163,24 @@
 						np->stats.tx_fifo_errors++;
 					if (tx_status & 0x02)
 						np->stats.tx_window_errors++;
+
 					/*
 					** This reset has been verified on
 					** DFE-580TX boards ! phdm@macqel.be.
 					*/
 					if (tx_status & 0x10) {	/* TxUnderrun */
-						unsigned short txthreshold;
-
-						txthreshold = ioread16 (ioaddr + TxStartThresh);
 						/* Restart Tx FIFO and transmitter */
 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
-						iowrite16 (txthreshold, ioaddr + TxStartThresh);
 						/* No need to reset the Tx pointer here */
 					}
-					/* Restart the Tx. */
-					iowrite16 (TxEnable, ioaddr + MACCtrl1);
+					/* Restart the Tx. Need to make sure tx enabled */
+					i = 10;
+					do {
+						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
+						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
+							break;
+						mdelay(1);
+					} while (--i);
 				}
 				/* Yup, this is a documentation bug.  It cost me *hours*. */
 				iowrite16 (0, ioaddr + TxStatus);
@@ -1629,6 +1642,14 @@
 	struct sk_buff *skb;
 	int i;
 
+	/* Wait and kill tasklet */
+	tasklet_kill(&np->rx_tasklet);
+	tasklet_kill(&np->tx_tasklet);
+	np->cur_tx = 0;
+	np->dirty_tx = 0;
+	np->cur_task = 0;
+	np->last_tx = NULL;
+
 	netif_stop_queue(dev);
 
 	if (netif_msg_ifdown(np)) {
@@ -1643,12 +1664,26 @@
 	/* Disable interrupts by clearing the interrupt mask. */
 	iowrite16(0x0000, ioaddr + IntrEnable);
 
+	/* Disable Rx and Tx DMA for safely release resource */
+	iowrite32(0x500, ioaddr + DMACtrl);
+
 	/* Stop the chip's Tx and Rx processes. */
 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
 
-	/* Wait and kill tasklet */
-	tasklet_kill(&np->rx_tasklet);
-	tasklet_kill(&np->tx_tasklet);
+    	for (i = 2000; i > 0; i--) {
+ 		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+			break;
+		mdelay(1);
+    	}
+
+    	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+			ioaddr +ASICCtrl + 2);
+
+    	for (i = 2000; i > 0; i--) {
+ 		if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
+			break;
+		mdelay(1);
+    	}
 
 #ifdef __i386__
 	if (netif_msg_hw(np)) {
@@ -1686,6 +1721,7 @@
 		}
 	}
 	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_ring[i].next_desc = 0;
 		skb = np->tx_skbuff[i];
 		if (skb) {
 			pci_unmap_single(np->pci_dev,
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 253e96e..785e4a5 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -56,6 +56,7 @@
 #include <linux/if_vlan.h>
 #include <linux/bitops.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -1030,7 +1031,7 @@
 		u64 csum_start_off, csum_stuff_off;
 
 		csum_start_off = (u64) (skb->h.raw - skb->data);
-		csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+		csum_stuff_off = csum_start_off + skb->csum_offset;
 
 		ctrl = (TXDCTRL_CENAB |
 			(csum_start_off << 15) |
@@ -2281,9 +2282,9 @@
 	}
 }
 
-static void gem_reset_task(void *data)
+static void gem_reset_task(struct work_struct *work)
 {
-	struct gem *gp = (struct gem *) data;
+	struct gem *gp = container_of(work, struct gem, reset_task);
 
 	mutex_lock(&gp->pm_mutex);
 
@@ -3043,7 +3044,7 @@
 	gp->link_timer.function = gem_link_timer;
 	gp->link_timer.data = (unsigned long) gp;
 
-	INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+	INIT_WORK(&gp->reset_task, gem_reset_task);
 
 	gp->lstate = link_down;
 	gp->timer_ticks = 0;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 9d7cd13..ef67173 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -32,6 +32,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+#include <linux/mm.h>
 #include <linux/bitops.h>
 
 #include <asm/system.h>
@@ -2272,7 +2273,7 @@
 		u32 csum_start_off, csum_stuff_off;
 
 		csum_start_off = (u32) (skb->h.raw - skb->data);
-		csum_stuff_off = (u32) ((skb->h.raw + skb->csum) - skb->data);
+		csum_stuff_off = csum_start_off + skb->csum_offset;
 
 		tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
 			    ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
@@ -3012,6 +3013,11 @@
 #endif
 
 	err = -ENODEV;
+
+	if (pci_enable_device(pdev))
+		goto err_out;
+	pci_set_master(pdev);
+
 	if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
 		qp = quattro_pci_find(pdev);
 		if (qp == NULL)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index c20bb99..571320ae 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"3.69"
-#define DRV_MODULE_RELDATE	"November 15, 2006"
+#define DRV_MODULE_VERSION	"3.70"
+#define DRV_MODULE_RELDATE	"December 1, 2006"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -192,6 +192,7 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
@@ -1061,7 +1062,7 @@
 {
 	struct tg3 *tp_peer = tp;
 
-	if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
+	if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
 		return;
 
 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
@@ -1212,8 +1213,8 @@
 				      power_control);
 		udelay(100);	/* Delay after power state change */
 
-		/* Switch out of Vaux if it is not a LOM */
-		if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
+		/* Switch out of Vaux if it is a NIC */
+		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
 
 		return 0;
@@ -1401,8 +1402,10 @@
 static void tg3_link_report(struct tg3 *tp)
 {
 	if (!netif_carrier_ok(tp->dev)) {
-		printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
-	} else {
+		if (netif_msg_link(tp))
+			printk(KERN_INFO PFX "%s: Link is down.\n",
+			       tp->dev->name);
+	} else if (netif_msg_link(tp)) {
 		printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
 		       tp->dev->name,
 		       (tp->link_config.active_speed == SPEED_1000 ?
@@ -1557,12 +1560,6 @@
 
 		tg3_writephy(tp, MII_ADVERTISE, new_adv);
 	} else if (tp->link_config.speed == SPEED_INVALID) {
-		tp->link_config.advertising =
-			(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-			 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-			 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
-			 ADVERTISED_Autoneg | ADVERTISED_MII);
-
 		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
 			tp->link_config.advertising &=
 				~(ADVERTISED_1000baseT_Half |
@@ -1706,25 +1703,36 @@
 	return err;
 }
 
-static int tg3_copper_is_advertising_all(struct tg3 *tp)
+static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
 {
-	u32 adv_reg, all_mask;
+	u32 adv_reg, all_mask = 0;
+
+	if (mask & ADVERTISED_10baseT_Half)
+		all_mask |= ADVERTISE_10HALF;
+	if (mask & ADVERTISED_10baseT_Full)
+		all_mask |= ADVERTISE_10FULL;
+	if (mask & ADVERTISED_100baseT_Half)
+		all_mask |= ADVERTISE_100HALF;
+	if (mask & ADVERTISED_100baseT_Full)
+		all_mask |= ADVERTISE_100FULL;
 
 	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
 		return 0;
 
-	all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
-		    ADVERTISE_100HALF | ADVERTISE_100FULL);
 	if ((adv_reg & all_mask) != all_mask)
 		return 0;
 	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
 		u32 tg3_ctrl;
 
+		all_mask = 0;
+		if (mask & ADVERTISED_1000baseT_Half)
+			all_mask |= ADVERTISE_1000HALF;
+		if (mask & ADVERTISED_1000baseT_Full)
+			all_mask |= ADVERTISE_1000FULL;
+
 		if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
 			return 0;
 
-		all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
-			    MII_TG3_CTRL_ADV_1000_FULL);
 		if ((tg3_ctrl & all_mask) != all_mask)
 			return 0;
 	}
@@ -1884,7 +1892,8 @@
 				/* Force autoneg restart if we are exiting
 				 * low power mode.
 				 */
-				if (!tg3_copper_is_advertising_all(tp))
+				if (!tg3_copper_is_advertising_all(tp,
+						tp->link_config.advertising))
 					current_link_up = 0;
 			} else {
 				current_link_up = 0;
@@ -3654,9 +3663,9 @@
 }
 #endif
 
-static void tg3_reset_task(void *_data)
+static void tg3_reset_task(struct work_struct *work)
 {
-	struct tg3 *tp = _data;
+	struct tg3 *tp = container_of(work, struct tg3, reset_task);
 	unsigned int restart_timer;
 
 	tg3_full_lock(tp, 0);
@@ -3703,8 +3712,9 @@
 {
 	struct tg3 *tp = netdev_priv(dev);
 
-	printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
-	       dev->name);
+	if (netif_msg_tx_err(tp))
+		printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
+		       dev->name);
 
 	schedule_work(&tp->reset_task);
 }
@@ -6396,16 +6406,17 @@
 	udelay(40);
 
 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
-	 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
+	 * If TG3_FLG2_IS_NIC is zero, we should read the
 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
 	 * whether used as inputs or outputs, are set by boot code after
 	 * reset.
 	 */
-	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+	if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
 		u32 gpio_mask;
 
-		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
-			    GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
+		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
+			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
+			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
 
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
@@ -6417,8 +6428,9 @@
 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
 
 		/* GPIO1 must be driven high for eeprom write protect */
-		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
-				       GRC_LCLCTRL_GPIO_OUTPUT1);
+		if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
+			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+					       GRC_LCLCTRL_GPIO_OUTPUT1);
 	}
 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 	udelay(100);
@@ -8656,7 +8668,9 @@
 	return 0;
 
 out:
-	printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
+	if (netif_msg_hw(tp))
+		printk(KERN_ERR PFX "Register test failed at offset %x\n",
+		       offset);
 	tw32(offset, save_val);
 	return -EIO;
 }
@@ -8781,17 +8795,20 @@
 					tg3_writephy(tp, 0x10, phy & ~0x4000);
 				tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
 			}
-		}
-		val = BMCR_LOOPBACK | BMCR_FULLDPLX;
-		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
-			val |= BMCR_SPEED100;
-		else
-			val |= BMCR_SPEED1000;
+			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
+		} else
+			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
 
 		tg3_writephy(tp, MII_BMCR, val);
 		udelay(40);
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+
+		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+			   MAC_MODE_LINK_POLARITY;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 			tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
+		} else
+			mac_mode |= MAC_MODE_PORT_MODE_GMII;
 
 		/* reset to prevent losing 1st rx packet intermittently */
 		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
@@ -8799,12 +8816,6 @@
 			udelay(10);
 			tw32_f(MAC_RX_MODE, tp->rx_mode);
 		}
-		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
-			   MAC_MODE_LINK_POLARITY;
-		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
-			mac_mode |= MAC_MODE_PORT_MODE_MII;
-		else
-			mac_mode |= MAC_MODE_PORT_MODE_GMII;
 		if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
@@ -9456,16 +9467,12 @@
 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
 static void __devinit tg3_nvram_init(struct tg3 *tp)
 {
-	int j;
-
 	tw32_f(GRC_EEPROM_ADDR,
 	     (EEPROM_ADDR_FSM_RESET |
 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
 	       EEPROM_ADDR_CLKPERD_SHIFT)));
 
-	/* XXX schedule_timeout() ... */
-	for (j = 0; j < 100; j++)
-		udelay(10);
+	msleep(1);
 
 	/* Enable seeprom accesses. */
 	tw32_f(GRC_LOCAL_CTRL,
@@ -9526,12 +9533,12 @@
 	      EEPROM_ADDR_ADDR_MASK) |
 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
 
-	for (i = 0; i < 10000; i++) {
+	for (i = 0; i < 1000; i++) {
 		tmp = tr32(GRC_EEPROM_ADDR);
 
 		if (tmp & EEPROM_ADDR_COMPLETE)
 			break;
-		udelay(100);
+		msleep(1);
 	}
 	if (!(tmp & EEPROM_ADDR_COMPLETE))
 		return -EBUSY;
@@ -9656,12 +9663,12 @@
 			EEPROM_ADDR_START |
 			EEPROM_ADDR_WRITE);
 
-		for (j = 0; j < 10000; j++) {
+		for (j = 0; j < 1000; j++) {
 			val = tr32(GRC_EEPROM_ADDR);
 
 			if (val & EEPROM_ADDR_COMPLETE)
 				break;
-			udelay(100);
+			msleep(1);
 		}
 		if (!(val & EEPROM_ADDR_COMPLETE)) {
 			rc = -EBUSY;
@@ -9965,8 +9972,10 @@
 	tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
-		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM))
+		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
 			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
+		}
 		return;
 	}
 
@@ -10066,10 +10075,17 @@
 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
 
-		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
+		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
 			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
-		else
+			if ((tp->pdev->subsystem_vendor ==
+			     PCI_VENDOR_ID_ARIMA) &&
+			    (tp->pdev->subsystem_device == 0x205a ||
+			     tp->pdev->subsystem_device == 0x2063))
+				tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+		} else {
 			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
+		}
 
 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -10147,7 +10163,7 @@
 
 	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
-		u32 bmsr, adv_reg, tg3_ctrl;
+		u32 bmsr, adv_reg, tg3_ctrl, mask;
 
 		tg3_readphy(tp, MII_BMSR, &bmsr);
 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -10171,7 +10187,10 @@
 					     MII_TG3_CTRL_ENABLE_AS_MASTER);
 		}
 
-		if (!tg3_copper_is_advertising_all(tp)) {
+		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
+		if (!tg3_copper_is_advertising_all(tp, mask)) {
 			tg3_writephy(tp, MII_ADVERTISE, adv_reg);
 
 			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
@@ -10695,7 +10714,7 @@
 		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
 
 	/* Get eeprom hw config before calling tg3_set_power_state().
-	 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
+	 * In particular, the TG3_FLG2_IS_NIC flag must be
 	 * determined before calling tg3_set_power_state() so that
 	 * we know whether or not to switch out of Vaux power.
 	 * When the flag is set, it means that GPIO1 is used for eeprom
@@ -10862,7 +10881,8 @@
 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
-	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)) ||
+	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
+	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 		tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
 
@@ -11734,7 +11754,7 @@
 #endif
 	spin_lock_init(&tp->lock);
 	spin_lock_init(&tp->indirect_lock);
-	INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+	INIT_WORK(&tp->reset_task, tg3_reset_task);
 
 	tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
 	if (tp->regs == 0UL) {
@@ -11912,13 +11932,15 @@
 
 	pci_set_drvdata(pdev, dev);
 
-	printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
+	printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
 	       dev->name,
 	       tp->board_part_number,
 	       tp->pci_chip_rev_id,
 	       tg3_phy_string(tp),
 	       tg3_bus_string(tp, str),
-	       (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
+	       ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
+		((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
+		 "10/100/1000Base-T")));
 
 	for (i = 0; i < 6; i++)
 		printk("%2.2x%c", dev->dev_addr[i],
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 92f5300..dfaf4ed 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2233,6 +2233,7 @@
 #define TG3_FLG2_PCI_EXPRESS		0x00000200
 #define TG3_FLG2_ASF_NEW_HANDSHAKE	0x00000400
 #define TG3_FLG2_HW_AUTONEG		0x00000800
+#define TG3_FLG2_IS_NIC			0x00001000
 #define TG3_FLG2_PHY_SERDES		0x00002000
 #define TG3_FLG2_CAPACITIVE_COUPLING	0x00004000
 #define TG3_FLG2_FLASH			0x00008000
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index e14f5a0..f85f002 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -296,6 +296,7 @@
 static int	TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
 static int      TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
 static void	TLan_tx_timeout( struct net_device *dev);
+static void	TLan_tx_timeout_work(struct work_struct *work);
 static int 	tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
 
 static u32	TLan_HandleInvalid( struct net_device *, u16 );
@@ -562,6 +563,7 @@
 	priv = netdev_priv(dev);
 
 	priv->pciDev = pdev;
+	priv->dev = dev;
 
 	/* Is this a PCI device? */
 	if (pdev) {
@@ -634,7 +636,7 @@
 
 	/* This will be used when we get an adapter error from
 	 * within our irq handler */
-	INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev);
+	INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
 
 	spin_lock_init(&priv->lock);
 
@@ -1040,6 +1042,25 @@
 }
 
 
+	/***************************************************************
+	 * 	TLan_tx_timeout_work
+	 *
+	 * 	Returns: nothing
+	 *
+	 * 	Params:
+	 * 		work	work item of device which timed out
+	 *
+	 **************************************************************/
+
+static void TLan_tx_timeout_work(struct work_struct *work)
+{
+	TLanPrivateInfo	*priv =
+		container_of(work, TLanPrivateInfo, tlan_tqueue);
+
+	TLan_tx_timeout(priv->dev);
+}
+
+
 
 	/***************************************************************
 	 *	TLan_StartTx
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index a44e2f2..41ce0b6 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -170,6 +170,7 @@
 typedef struct tlan_private_tag {
 	struct net_device       *nextDevice;
 	struct pci_dev		*pciDev;
+	struct net_device       *dev;
 	void			*dmaStorage;
 	dma_addr_t		dmaStorageDMA;
 	unsigned int		dmaSize;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index bfe5986..0d97e10 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -1826,7 +1826,7 @@
 	skb->protocol = tr_type_trans(skb, dev);
 	if (IPv4_p) {
 		skb->csum = chksum;
-		skb->ip_summed = 1;
+		skb->ip_summed = CHECKSUM_COMPLETE;
 	}
 	netif_rx(skb);
 	dev->last_rx = jiffies;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index cd142d0..8f4ecc1 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1771,7 +1771,7 @@
 
 static int __init olympic_pci_init(void) 
 {
-	return pci_module_init (&olympic_driver) ; 
+	return pci_register_driver(&olympic_driver) ;
 }
 
 static void __exit olympic_pci_cleanup(void)
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 46dabdb..cec282a 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -5706,7 +5706,7 @@
         return found ? 0 : -ENODEV;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
         int i;
 
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 0000000..893808a
--- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1708 @@
+/*******************************************************************************
+
+  Copyright(c) 2006 Tundra Semiconductor Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com  * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ *   additional PHY devices (alexandre.bounine@tundra.com)
+ * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#define MII_READ_DELAY 10000	/* max link wait time in msec */
+
+#define TSI108_RXRING_LEN     256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE     1536
+
+#define TSI108_TXRING_LEN     256
+
+#define TSI108_TX_INT_FREQ    64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+	void  __iomem *regs;	/* Base of normal regs */
+	void  __iomem *phyregs;	/* Base of register bank used for PHY access */
+
+	unsigned int phy;		/* Index of PHY for this interface */
+	unsigned int irq_num;
+	unsigned int id;
+
+	struct timer_list timer;/* Timer that triggers the check phy function */
+	unsigned int rxtail;	/* Next entry in rxring to read */
+	unsigned int rxhead;	/* Next entry in rxring to give a new buffer */
+	unsigned int rxfree;	/* Number of free, allocated RX buffers */
+
+	unsigned int rxpending;	/* Non-zero if there are still descriptors
+				 * to be processed from a previous descriptor
+				 * interrupt condition that has been cleared */
+
+	unsigned int txtail;	/* Next TX descriptor to check status on */
+	unsigned int txhead;	/* Next TX descriptor to use */
+
+	/* Number of free TX descriptors.  This could be calculated from
+	 * rxhead and rxtail if one descriptor were left unused to disambiguate
+	 * full and empty conditions, but it's simpler to just keep track
+	 * explicitly. */
+
+	unsigned int txfree;
+
+	unsigned int phy_ok;		/* The PHY is currently powered on. */
+
+	/* PHY status (duplex is 1 for half, 2 for full,
+	 * so that the default 0 indicates that neither has
+	 * yet been configured). */
+
+	unsigned int link_up;
+	unsigned int speed;
+	unsigned int duplex;
+
+	tx_desc *txring;
+	rx_desc *rxring;
+	struct sk_buff *txskbs[TSI108_TXRING_LEN];
+	struct sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+	dma_addr_t txdma, rxdma;
+
+	/* txlock nests in misclock and phy_lock */
+
+	spinlock_t txlock, misclock;
+
+	/* stats is used to hold the upper bits of each hardware counter,
+	 * and tmpstats is used to hold the full values for returning
+	 * to the caller of get_stats().  They must be separate in case
+	 * an overflow interrupt occurs before the stats are consumed.
+	 */
+
+	struct net_device_stats stats;
+	struct net_device_stats tmpstats;
+
+	/* These stats are kept separate in hardware, thus require individual
+	 * fields for handling carry.  They are combined in get_stats.
+	 */
+
+	unsigned long rx_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_short_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_long_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_underruns;	/* Add to rx_length_errors */
+	unsigned long rx_overruns;	/* Add to rx_length_errors */
+
+	unsigned long tx_coll_abort;	/* Add to tx_aborted_errors/collisions */
+	unsigned long tx_pause_drop;	/* Add to tx_aborted_errors */
+
+	unsigned long mc_hash[16];
+	u32 msg_enable;			/* debug message level */
+	struct mii_if_info mii_if;
+	unsigned int init_media;
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+	.probe = tsi108_init_one,
+	.remove = tsi108_ether_remove,
+	.driver	= {
+		.name = "tsi-ethernet",
+	},
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	printk("Dumping %s...\n", dev->name);
+	printk("intstat %x intmask %x phy_ok %d"
+	       " link %d speed %d duplex %d\n",
+	       TSI_READ(TSI108_EC_INTSTAT),
+	       TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
+	       data->link_up, data->speed, data->duplex);
+
+	printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+	       data->txhead, data->txtail, data->txfree,
+	       TSI_READ(TSI108_EC_TXSTAT),
+	       TSI_READ(TSI108_EC_TXESTAT),
+	       TSI_READ(TSI108_EC_TXERR));
+
+	printk("RX: head %d, tail %d, free %d, stat %x,"
+	       " estat %x, err %x, pending %d\n\n",
+	       data->rxhead, data->rxtail, data->rxfree,
+	       TSI_READ(TSI108_EC_RXSTAT),
+	       TSI_READ(TSI108_EC_RXESTAT),
+	       TSI_READ(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
+{
+	unsigned i;
+
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+	for (i = 0; i < 100; i++) {
+		if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+		      (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+			break;
+		udelay(10);
+	}
+
+	if (i == 100)
+		return 0xffff;
+	else
+		return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN));
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data,
+				int reg, u16 val)
+{
+	unsigned i = 100;
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
+	while (i--) {
+		if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+			TSI108_MAC_MII_IND_BUSY))
+			break;
+		udelay(10);
+	}
+}
+
+static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return tsi108_read_mii(data, reg);
+}
+
+static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	tsi108_write_mii(data, reg, val);
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+					int reg, u16 val)
+{
+	unsigned i = 1000;
+	TSI_WRITE(TSI108_MAC_MII_ADDR,
+			     (0x1e << TSI108_MAC_MII_ADDR_PHY)
+			     | (reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
+	while(i--) {
+		if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static int mii_speed(struct mii_if_info *mii)
+{
+	int advert, lpa, val, media;
+	int lpa2 = 0;
+	int speed;
+
+	if (!mii_link_ok(mii))
+		return 0;
+
+	val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
+	if ((val & BMSR_ANEGCOMPLETE) == 0)
+		return 0;
+
+	advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
+	lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
+	media = mii_nway_result(advert & lpa);
+
+	if (mii->supports_gmii)
+		lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
+
+	speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+			(media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
+	return speed;
+}
+
+static void tsi108_check_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 mac_cfg2_reg, portctrl_reg;
+	u32 duplex;
+	u32 speed;
+	unsigned long flags;
+
+	/* Do a dummy read, as for some reason the first read
+	 * after a link becomes up returns link down, even if
+	 * it's been a while since the link came up.
+	 */
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	if (!data->phy_ok)
+		goto out;
+
+	tsi108_read_mii(data, MII_BMSR);
+
+	duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
+	data->init_media = 0;
+
+	if (netif_carrier_ok(dev)) {
+
+		speed = mii_speed(&data->mii_if);
+
+		if ((speed != data->speed) || duplex) {
+
+			mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
+			portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
+
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+
+			if (speed == 1000) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+			} else {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+				portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			}
+
+			data->speed = speed;
+
+			if (data->mii_if.full_duplex) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 2;
+			} else {
+				mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 1;
+			}
+
+			TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
+			TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
+
+			if (data->link_up == 0) {
+				/* The manual says it can take 3-4 usecs for the speed change
+				 * to take effect.
+				 */
+				udelay(5);
+
+				spin_lock(&data->txlock);
+				if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
+					netif_wake_queue(dev);
+
+				data->link_up = 1;
+				spin_unlock(&data->txlock);
+			}
+		}
+
+	} else {
+		if (data->link_up == 1) {
+			netif_stop_queue(dev);
+			data->link_up = 0;
+			printk(KERN_NOTICE "%s : link is down\n", dev->name);
+		}
+
+		goto out;
+	}
+
+
+out:
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+		      unsigned long *upper)
+{
+	if (carry & carry_bit)
+		*upper += carry_shift;
+}
+
+static void tsi108_stat_carry(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 carry1, carry2;
+
+	spin_lock_irq(&data->misclock);
+
+	carry1 = TSI_READ(TSI108_STAT_CARRY1);
+	carry2 = TSI_READ(TSI108_STAT_CARRY2);
+
+	TSI_WRITE(TSI108_STAT_CARRY1, carry1);
+	TSI_WRITE(TSI108_STAT_CARRY2, carry2);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+			      TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+			      TSI108_STAT_RXPKTS_CARRY,
+			      &data->stats.rx_packets);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+			      TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+			      TSI108_STAT_RXMCAST_CARRY,
+			      &data->stats.multicast);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+			      TSI108_STAT_RXALIGN_CARRY,
+			      &data->stats.rx_frame_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+			      TSI108_STAT_RXLENGTH_CARRY,
+			      &data->stats.rx_length_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+			      TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+			      TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+			      TSI108_STAT_RXDROP_CARRY,
+			      &data->stats.rx_missed_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+			      TSI108_STAT_TXPKTS_CARRY,
+			      &data->stats.tx_packets);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+			      TSI108_STAT_TXEXDEF_CARRY,
+			      &data->stats.tx_aborted_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+			      TSI108_STAT_TXTCOL_CARRY,
+			      &data->stats.collisions);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+			      TSI108_STAT_TXPAUSEDROP_CARRY,
+			      &data->tx_pause_drop);
+
+	spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+		 int carry_shift, unsigned long *upper)
+{
+	int carryreg;
+	unsigned long val;
+
+	if (reg < 0xb0)
+		carryreg = TSI108_STAT_CARRY1;
+	else
+		carryreg = TSI108_STAT_CARRY2;
+
+      again:
+	val = TSI_READ(reg) | *upper;
+
+	/* Check to see if it overflowed, but the interrupt hasn't
+	 * been serviced yet.  If so, handle the carry here, and
+	 * try again.
+	 */
+
+	if (unlikely(TSI_READ(carryreg) & carry_bit)) {
+		*upper += carry_shift;
+		TSI_WRITE(carryreg, carry_bit);
+		goto again;
+	}
+
+	return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
+{
+	unsigned long excol;
+
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	spin_lock_irq(&data->misclock);
+
+	data->tmpstats.rx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+			     TSI108_STAT_CARRY1_RXPKTS,
+			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+	data->tmpstats.tx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+			     TSI108_STAT_CARRY2_TXPKTS,
+			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+	data->tmpstats.rx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+			     TSI108_STAT_CARRY1_RXBYTES,
+			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	data->tmpstats.tx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+			     TSI108_STAT_CARRY2_TXBYTES,
+			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	data->tmpstats.multicast =
+	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+			     TSI108_STAT_CARRY1_RXMCAST,
+			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+				 TSI108_STAT_CARRY2_TXEXCOL,
+				 TSI108_STAT_TXEXCOL_CARRY,
+				 &data->tx_coll_abort);
+
+	data->tmpstats.collisions =
+	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+			     TSI108_STAT_CARRY2_TXTCOL,
+			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+	data->tmpstats.collisions += excol;
+
+	data->tmpstats.rx_length_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+			     TSI108_STAT_CARRY1_RXLENGTH,
+			     TSI108_STAT_RXLENGTH_CARRY,
+			     &data->stats.rx_length_errors);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+			     TSI108_STAT_CARRY1_RXRUNT,
+			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+			     TSI108_STAT_CARRY1_RXJUMBO,
+			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	data->tmpstats.rx_frame_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+			     TSI108_STAT_CARRY1_RXALIGN,
+			     TSI108_STAT_RXALIGN_CARRY,
+			     &data->stats.rx_frame_errors);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFCS,
+			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+			     &data->rx_fcs);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+			     TSI108_STAT_CARRY1_RXFRAG,
+			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	data->tmpstats.rx_missed_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXDROP,
+			     TSI108_STAT_CARRY1_RXDROP,
+			     TSI108_STAT_RXDROP_CARRY,
+			     &data->stats.rx_missed_errors);
+
+	/* These three are maintained by software. */
+	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+	data->tmpstats.tx_aborted_errors =
+	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+			     TSI108_STAT_CARRY2_TXEXDEF,
+			     TSI108_STAT_TXEXDEF_CARRY,
+			     &data->stats.tx_aborted_errors);
+
+	data->tmpstats.tx_aborted_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+			     TSI108_STAT_CARRY2_TXPAUSE,
+			     TSI108_STAT_TXPAUSEDROP_CARRY,
+			     &data->tx_pause_drop);
+
+	data->tmpstats.tx_aborted_errors += excol;
+
+	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+	    data->tmpstats.rx_crc_errors +
+	    data->tmpstats.rx_frame_errors +
+	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+	spin_unlock_irq(&data->misclock);
+	return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
+{
+	TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
+			     TSI108_EC_RXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+			     | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
+			     TSI108_EC_TXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_complete_tx(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int tx;
+	struct sk_buff *skb;
+	int release = 0;
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		tx = data->txtail;
+
+		if (data->txring[tx].misc & TSI108_TX_OWN)
+			break;
+
+		skb = data->txskbs[tx];
+
+		if (!(data->txring[tx].misc & TSI108_TX_OK))
+			printk("%s: bad tx packet, misc %x\n",
+			       dev->name, data->txring[tx].misc);
+
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+
+		if (data->txring[tx].misc & TSI108_TX_EOF) {
+			dev_kfree_skb_any(skb);
+			release++;
+		}
+	}
+
+	if (release) {
+		if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
+			netif_wake_queue(dev);
+	}
+}
+
+static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int frags = skb_shinfo(skb)->nr_frags + 1;
+	int i;
+
+	if (!data->phy_ok && net_ratelimit())
+		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+	if (!data->link_up) {
+		printk(KERN_ERR "%s: Transmit while link is down!\n",
+		       dev->name);
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+
+		if (net_ratelimit())
+			printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+			       dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+	}
+
+	spin_lock_irq(&data->txlock);
+
+	for (i = 0; i < frags; i++) {
+		int misc = 0;
+		int tx = data->txhead;
+
+		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+		 * the interrupt bit.  TX descriptor-complete interrupts are
+		 * enabled when the queue fills up, and masked when there is
+		 * still free space.  This way, when saturating the outbound
+		 * link, the tx interrupts are kept to a reasonable level.
+		 * When the queue is not full, reclamation of skbs still occurs
+		 * as new packets are transmitted, or on a queue-empty
+		 * interrupt.
+		 */
+
+		if ((tx % TSI108_TX_INT_FREQ == 0) &&
+		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+			misc = TSI108_TX_INT;
+
+		data->txskbs[tx] = skb;
+
+		if (i == 0) {
+			data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
+					skb->len - skb->data_len, DMA_TO_DEVICE);
+			data->txring[tx].len = skb->len - skb->data_len;
+			misc |= TSI108_TX_SOF;
+		} else {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+			data->txring[tx].buf0 =
+			    dma_map_page(NULL, frag->page, frag->page_offset,
+					    frag->size, DMA_TO_DEVICE);
+			data->txring[tx].len = frag->size;
+		}
+
+		if (i == frags - 1)
+			misc |= TSI108_TX_EOF;
+
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Tx Frame contents (%d)\n", dev->name,
+			       skb->len);
+			for (i = 0; i < skb->len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+		data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+		data->txfree--;
+	}
+
+	tsi108_complete_tx(dev);
+
+	/* This must be done after the check for completed tx descriptors,
+	 * so that the tail pointer is correct.
+	 */
+
+	if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+		tsi108_restart_tx(data);
+
+	spin_unlock_irq(&data->txlock);
+	return NETDEV_TX_OK;
+}
+
+static int tsi108_complete_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree && done != budget) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		if (data->rxring[rx].misc & TSI108_RX_OWN)
+			break;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		done++;
+
+		if (data->rxring[rx].misc & TSI108_RX_BAD) {
+			spin_lock_irq(&data->misclock);
+
+			if (data->rxring[rx].misc & TSI108_RX_CRC)
+				data->stats.rx_crc_errors++;
+			if (data->rxring[rx].misc & TSI108_RX_OVER)
+				data->stats.rx_fifo_errors++;
+
+			spin_unlock_irq(&data->misclock);
+
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Rx Frame contents (%d)\n",
+			       dev->name, data->rxring[rx].len);
+			for (i = 0; i < data->rxring[rx].len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+
+		skb->dev = dev;
+		skb_put(skb, data->rxring[rx].len);
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	}
+
+	return done;
+}
+
+static int tsi108_refill_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+		int rx = data->rxhead;
+		struct sk_buff *skb;
+
+		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
+
+		data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
+							TSI108_RX_SKB_SIZE,
+							DMA_FROM_DEVICE);
+
+		/* Sometimes the hardware sets blen to zero after packet
+		 * reception, even though the manual says that it's only ever
+		 * modified by the driver.
+		 */
+
+		data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
+		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+		data->rxfree++;
+		done++;
+	}
+
+	if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
+			   TSI108_EC_RXSTAT_QUEUE0))
+		tsi108_restart_rx(data, dev);
+
+	return done;
+}
+
+static int tsi108_poll(struct net_device *dev, int *budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_RXESTAT);
+	u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
+	int total_budget = min(*budget, dev->quota);
+	int num_received = 0, num_filled = 0, budget_used;
+
+	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+	TSI_WRITE(TSI108_EC_RXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, intstat);
+
+	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+		num_received = tsi108_complete_rx(dev, total_budget);
+
+	/* This should normally fill no more slots than the number of
+	 * packets received in tsi108_complete_rx().  The exception
+	 * is when we previously ran out of memory for RX SKBs.  In that
+	 * case, it's helpful to obey the budget, not only so that the
+	 * CPU isn't hogged, but so that memory (which may still be low)
+	 * is not hogged by one device.
+	 *
+	 * A work unit is considered to be two SKBs to allow us to catch
+	 * up when the ring has shrunk due to out-of-memory but we're
+	 * still removing the full budget's worth of packets each time.
+	 */
+
+	if (data->rxfree < TSI108_RXRING_LEN)
+		num_filled = tsi108_refill_rx(dev, total_budget * 2);
+
+	if (intstat & TSI108_INT_RXERROR) {
+		u32 err = TSI_READ(TSI108_EC_RXERR);
+		TSI_WRITE(TSI108_EC_RXERR, err);
+
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_DEBUG "%s: RX error %x\n",
+				       dev->name, err);
+
+			if (!(TSI_READ(TSI108_EC_RXSTAT) &
+			      TSI108_EC_RXSTAT_QUEUE0))
+				tsi108_restart_rx(data, dev);
+		}
+	}
+
+	if (intstat & TSI108_INT_RXOVERRUN) {
+		spin_lock_irq(&data->misclock);
+		data->stats.rx_fifo_errors++;
+		spin_unlock_irq(&data->misclock);
+	}
+
+	budget_used = max(num_received, num_filled / 2);
+
+	*budget -= budget_used;
+	dev->quota -= budget_used;
+
+	if (budget_used != total_budget) {
+		data->rxpending = 0;
+		netif_rx_complete(dev);
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK)
+				     & ~(TSI108_INT_RXQUEUE0
+					 | TSI108_INT_RXTHRESH |
+					 TSI108_INT_RXOVERRUN |
+					 TSI108_INT_RXERROR |
+					 TSI108_INT_RXWAIT));
+
+		/* IRQs are level-triggered, so no need to re-check */
+		return 0;
+	} else {
+		data->rxpending = 1;
+	}
+
+	return 1;
+}
+
+static void tsi108_rx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A race could cause dev to already be scheduled, so it's not an
+	 * error if that happens (and interrupts shouldn't be re-masked,
+	 * because that can cause harmful races, if poll has already
+	 * unmasked them but not cleared LINK_STATE_SCHED).
+	 *
+	 * This can happen if this code races with tsi108_poll(), which masks
+	 * the interrupts after tsi108_irq_one() read the mask, but before
+	 * netif_rx_schedule is called.  It could also happen due to calls
+	 * from tsi108_check_rxring().
+	 */
+
+	if (netif_rx_schedule_prep(dev)) {
+		/* Mask, rather than ack, the receive interrupts.  The ack
+		 * will happen in tsi108_poll().
+		 */
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK) |
+				     TSI108_INT_RXQUEUE0
+				     | TSI108_INT_RXTHRESH |
+				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+				     TSI108_INT_RXWAIT);
+		__netif_rx_schedule(dev);
+	} else {
+		if (!netif_running(dev)) {
+			/* This can happen if an interrupt occurs while the
+			 * interface is being brought down, as the START
+			 * bit is cleared before the stop function is called.
+			 *
+			 * In this case, the interrupts must be masked, or
+			 * they will continue indefinitely.
+			 *
+			 * There's a race here if the interface is brought down
+			 * and then up in rapid succession, as the device could
+			 * be made running after the above check and before
+			 * the masking below.  This will only happen if the IRQ
+			 * thread has a lower priority than the task brining
+			 * up the interface.  Fixing this race would likely
+			 * require changes in generic code.
+			 */
+
+			TSI_WRITE(TSI108_EC_INTMASK,
+					     TSI_READ
+					     (TSI108_EC_INTMASK) |
+					     TSI108_INT_RXQUEUE0 |
+					     TSI108_INT_RXTHRESH |
+					     TSI108_INT_RXOVERRUN |
+					     TSI108_INT_RXERROR |
+					     TSI108_INT_RXWAIT);
+		}
+	}
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A poll is scheduled, as opposed to caling tsi108_refill_rx
+	 * directly, so as to keep the receive path single-threaded
+	 * (and thus not needing a lock).
+	 */
+
+	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+		tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_TXESTAT);
+
+	TSI_WRITE(TSI108_EC_TXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+		u32 err = TSI_READ(TSI108_EC_TXERR);
+		TSI_WRITE(TSI108_EC_TXERR, err);
+
+		if (err && net_ratelimit())
+			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+	}
+
+	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+		spin_lock(&data->txlock);
+		tsi108_complete_tx(dev);
+		spin_unlock(&data->txlock);
+	}
+}
+
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 stat = TSI_READ(TSI108_EC_INTSTAT);
+
+	if (!(stat & TSI108_INT_ANY))
+		return IRQ_NONE;	/* Not our interrupt */
+
+	stat &= ~TSI_READ(TSI108_EC_INTMASK);
+
+	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+		    TSI108_INT_TXERROR))
+		tsi108_tx_int(dev);
+	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+		    TSI108_INT_RXERROR))
+		tsi108_rx_int(dev);
+
+	if (stat & TSI108_INT_SFN) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+	}
+
+	if (stat & TSI108_INT_STATCARRY) {
+		tsi108_stat_carry(dev);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tsi108_stop_ethernet(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int i = 1000;
+	/* Disable all TX and RX queues ... */
+	TSI_WRITE(TSI108_EC_TXCTRL, 0);
+	TSI_WRITE(TSI108_EC_RXCTRL, 0);
+
+	/* ...and wait for them to become idle */
+	while(i--) {
+		if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
+			break;
+		udelay(10);
+	}
+	i = 1000;
+	while(i--){
+		if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_PORTCTRL,
+			     TSI_READ(TSI108_EC_PORTCTRL) &
+			     ~TSI108_EC_PORTCTRL_STATRST);
+
+	TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_TXCFG,
+			     TSI_READ(TSI108_EC_TXCFG) &
+			     ~TSI108_EC_TXCFG_RST);
+
+	TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI_READ(TSI108_EC_RXCFG) &
+			     ~TSI108_EC_RXCFG_RST);
+
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
+			     ~(TSI108_MAC_MII_MGMT_RST |
+			       TSI108_MAC_MII_MGMT_CLK)) | 0x07);
+}
+
+static int tsi108_get_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
+	u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+
+	/* Note that the octets are reversed from what the manual says,
+	 * producing an even weirder ordering...
+	 */
+	if (word2 == 0 && word1 == 0) {
+		dev->dev_addr[0] = 0x00;
+		dev->dev_addr[1] = 0x06;
+		dev->dev_addr[2] = 0xd2;
+		dev->dev_addr[3] = 0x00;
+		dev->dev_addr[4] = 0x00;
+		if (0x8 == data->phy)
+			dev->dev_addr[5] = 0x01;
+		else
+			dev->dev_addr[5] = 0x02;
+
+		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+		TSI_WRITE(TSI108_MAC_ADDR1, word1);
+		TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	} else {
+		dev->dev_addr[0] = (word2 >> 16) & 0xff;
+		dev->dev_addr[1] = (word2 >> 24) & 0xff;
+		dev->dev_addr[2] = (word1 >> 0) & 0xff;
+		dev->dev_addr[3] = (word1 >> 8) & 0xff;
+		dev->dev_addr[4] = (word1 >> 16) & 0xff;
+		dev->dev_addr[5] = (word1 >> 24) & 0xff;
+	}
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsi108_set_mac(struct net_device *dev, void *addr)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1, word2;
+	int i;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	for (i = 0; i < 6; i++)
+		/* +2 is for the offset of the HW addr type */
+		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+	spin_lock_irq(&data->misclock);
+	TSI_WRITE(TSI108_MAC_ADDR1, word1);
+	TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	spin_lock(&data->txlock);
+
+	if (data->txfree && data->link_up)
+		netif_wake_queue(dev);
+
+	spin_unlock(&data->txlock);
+	spin_unlock_irq(&data->misclock);
+	return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
+
+	if (dev->flags & IFF_PROMISC) {
+		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+		goto out;
+	}
+
+	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+	if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+		int i;
+		struct dev_mc_list *mc = dev->mc_list;
+		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+		memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+		while (mc) {
+			u32 hash, crc;
+
+			if (mc->dmi_addrlen == 6) {
+				crc = ether_crc(6, mc->dmi_addr);
+				hash = crc >> 23;
+
+				__set_bit(hash, &data->mc_hash[0]);
+			} else {
+				printk(KERN_ERR
+				       "%s: got multicast address of length %d "
+				       "instead of 6.\n", dev->name,
+				       mc->dmi_addrlen);
+			}
+
+			mc = mc->next;
+		}
+
+		TSI_WRITE(TSI108_EC_HASHADDR,
+				     TSI108_EC_HASHADDR_AUTOINC |
+				     TSI108_EC_HASHADDR_MCAST);
+
+		for (i = 0; i < 16; i++) {
+			/* The manual says that the hardware may drop
+			 * back-to-back writes to the data register.
+			 */
+			udelay(1);
+			TSI_WRITE(TSI108_EC_HASHDATA,
+					     data->mc_hash[i]);
+		}
+	}
+
+      out:
+	TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 i = 0;
+	u16 phyval = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
+	while (i--){
+		if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
+			break;
+		udelay(10);
+	}
+	if (i == 0)
+		printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+
+#if (TSI108_PHY_TYPE == PHY_BCM54XX)	/* Broadcom BCM54xx PHY */
+	tsi108_write_mii(data, 0x09, 0x0300);
+	tsi108_write_mii(data, 0x10, 0x1020);
+	tsi108_write_mii(data, 0x1c, 0x8c00);
+#endif
+
+	tsi108_write_mii(data,
+			 MII_BMCR,
+			 BMCR_ANENABLE | BMCR_ANRESTART);
+	while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
+		cpu_relax();
+
+	/* Set G/MII mode and receive clock select in TBI control #2.  The
+	 * second port won't work if this isn't done, even though we don't
+	 * use TBI mode.
+	 */
+
+	tsi108_write_tbi(data, 0x11, 0x30);
+
+	/* FIXME: It seems to take more than 2 back-to-back reads to the
+	 * PHY_STAT register before the link up status bit is set.
+	 */
+
+	data->link_up = 1;
+
+	while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
+		 BMSR_LSTATUS)) {
+		if (i++ > (MII_READ_DELAY / 10)) {
+			data->link_up = 0;
+			break;
+		}
+		spin_unlock_irqrestore(&phy_lock, flags);
+		msleep(10);
+		spin_lock_irqsave(&phy_lock, flags);
+	}
+
+	printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
+	data->phy_ok = 1;
+	data->init_media = 1;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+	tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
+	data->phy_ok = 0;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+	int i;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+	unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+	i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+	if (i != 0) {
+		printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+		       data->id, data->irq_num);
+		return i;
+	} else {
+		dev->irq = data->irq_num;
+		printk(KERN_NOTICE
+		       "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+		       data->id, dev->irq, dev->name);
+	}
+
+	data->rxring = dma_alloc_coherent(NULL, rxring_size,
+			&data->rxdma, GFP_KERNEL);
+
+	if (!data->rxring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for rxring!\n");
+		return -ENOMEM;
+	} else {
+		memset(data->rxring, 0, rxring_size);
+	}
+
+	data->txring = dma_alloc_coherent(NULL, txring_size,
+			&data->txdma, GFP_KERNEL);
+
+	if (!data->txring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for txring!\n");
+		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+		return -ENOMEM;
+	} else {
+		memset(data->txring, 0, txring_size);
+	}
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+		data->rxring[i].blen = TSI108_RXBUF_SIZE;
+		data->rxring[i].vlan = 0;
+	}
+
+	data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+	data->rxtail = 0;
+	data->rxhead = 0;
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+
+		if (!skb) {
+			/* Bah.  No memory for now, but maybe we'll get
+			 * some more later.
+			 * For now, we'll live with the smaller ring.
+			 */
+			printk(KERN_WARNING
+			       "%s: Could only allocate %d receive skb(s).\n",
+			       dev->name, i);
+			data->rxhead = i;
+			break;
+		}
+
+		data->rxskbs[i] = skb;
+		/* Align the payload on a 4-byte boundary */
+		skb_reserve(skb, 2);
+		data->rxskbs[i] = skb;
+		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+	}
+
+	data->rxfree = i;
+	TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+	for (i = 0; i < TSI108_TXRING_LEN; i++) {
+		data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+		data->txring[i].misc = 0;
+	}
+
+	data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+	data->txtail = 0;
+	data->txhead = 0;
+	data->txfree = TSI108_TXRING_LEN;
+	TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
+	tsi108_init_phy(dev);
+
+	setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+	mod_timer(&data->timer, jiffies + 1);
+
+	tsi108_restart_rx(data, dev);
+
+	TSI_WRITE(TSI108_EC_INTSTAT, ~0);
+
+	TSI_WRITE(TSI108_EC_INTMASK,
+			     ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+			       TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+			       TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+			       TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+	TSI_WRITE(TSI108_MAC_CFG1,
+			     TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int tsi108_close(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&data->timer);
+
+	tsi108_stop_ethernet(dev);
+	tsi108_kill_phy(dev);
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	/* Check for any pending TX packets, and drop them. */
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		int tx = data->txtail;
+		struct sk_buff *skb;
+		skb = data->txskbs[tx];
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+		dev_kfree_skb(skb);
+	}
+
+	synchronize_irq(data->irq_num);
+	free_irq(data->irq_num, dev);
+
+	/* Discard the RX ring. */
+
+	while (data->rxfree) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		dev_kfree_skb(skb);
+	}
+
+	dma_free_coherent(0,
+			    TSI108_RXRING_LEN * sizeof(rx_desc),
+			    data->rxring, data->rxdma);
+	dma_free_coherent(0,
+			    TSI108_TXRING_LEN * sizeof(tx_desc),
+			    data->txring, data->txdma);
+
+	return 0;
+}
+
+static void tsi108_init_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+			     TSI108_MAC_CFG2_PADCRC);
+
+	TSI_WRITE(TSI108_EC_TXTHRESH,
+			     (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+			     (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK1,
+			     ~(TSI108_STAT_CARRY1_RXBYTES |
+			       TSI108_STAT_CARRY1_RXPKTS |
+			       TSI108_STAT_CARRY1_RXFCS |
+			       TSI108_STAT_CARRY1_RXMCAST |
+			       TSI108_STAT_CARRY1_RXALIGN |
+			       TSI108_STAT_CARRY1_RXLENGTH |
+			       TSI108_STAT_CARRY1_RXRUNT |
+			       TSI108_STAT_CARRY1_RXJUMBO |
+			       TSI108_STAT_CARRY1_RXFRAG |
+			       TSI108_STAT_CARRY1_RXJABBER |
+			       TSI108_STAT_CARRY1_RXDROP));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK2,
+			     ~(TSI108_STAT_CARRY2_TXBYTES |
+			       TSI108_STAT_CARRY2_TXPKTS |
+			       TSI108_STAT_CARRY2_TXEXDEF |
+			       TSI108_STAT_CARRY2_TXEXCOL |
+			       TSI108_STAT_CARRY2_TXTCOL |
+			       TSI108_STAT_CARRY2_TXPAUSE));
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+	TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+			     TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+			     TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
+			     TSI108_EC_TXQ_BUFCFG_BURST256 |
+			     TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
+			     TSI108_EC_RXQ_BUFCFG_BURST256 |
+			     TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
+}
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+	struct net_device *dev = NULL;
+	struct tsi108_prv_data *data = NULL;
+	hw_info *einfo;
+	int err = 0;
+
+	einfo = pdev->dev.platform_data;
+
+	if (NULL == einfo) {
+		printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+		       pdev->id);
+		return -ENODEV;
+	}
+
+	/* Create an ethernet device instance */
+
+	dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+	if (!dev) {
+		printk("tsi108_eth: Could not allocate a device structure\n");
+		return -ENOMEM;
+	}
+
+	printk("tsi108_eth%d: probe...\n", pdev->id);
+	data = netdev_priv(dev);
+
+	pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+			pdev->id, einfo->regs, einfo->phyregs,
+			einfo->phy, einfo->irq_num);
+
+	data->regs = ioremap(einfo->regs, 0x400);
+	if (NULL == data->regs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+
+	data->phyregs = ioremap(einfo->phyregs, 0x400);
+	if (NULL == data->phyregs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+/* MII setup */
+	data->mii_if.dev = dev;
+	data->mii_if.mdio_read = tsi108_mdio_read;
+	data->mii_if.mdio_write = tsi108_mdio_write;
+	data->mii_if.phy_id = einfo->phy;
+	data->mii_if.phy_id_mask = 0x1f;
+	data->mii_if.reg_num_mask = 0x1f;
+	data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
+
+	data->phy = einfo->phy;
+	data->irq_num = einfo->irq_num;
+	data->id = pdev->id;
+	dev->open = tsi108_open;
+	dev->stop = tsi108_close;
+	dev->hard_start_xmit = tsi108_send_packet;
+	dev->set_mac_address = tsi108_set_mac;
+	dev->set_multicast_list = tsi108_set_rx_mode;
+	dev->get_stats = tsi108_get_stats;
+	dev->poll = tsi108_poll;
+	dev->do_ioctl = tsi108_do_ioctl;
+	dev->weight = 64;  /* 64 is more suitable for GigE interface - klai */
+
+	/* Apparently, the Linux networking code won't use scatter-gather
+	 * if the hardware doesn't do checksums.  However, it's faster
+	 * to checksum in place and use SG, as (among other reasons)
+	 * the cache won't be dirtied (which then has to be flushed
+	 * before DMA).  The checksumming is done by the driver (via
+	 * a new function skb_csum_dev() in net/core/skbuff.c).
+	 */
+
+	dev->features = NETIF_F_HIGHDMA;
+	SET_MODULE_OWNER(dev);
+
+	spin_lock_init(&data->txlock);
+	spin_lock_init(&data->misclock);
+
+	tsi108_reset_ether(data);
+	tsi108_kill_phy(dev);
+
+	if ((err = tsi108_get_mac(dev)) != 0) {
+		printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
+		       dev->name);
+		goto register_fail;
+	}
+
+	tsi108_init_mac(dev);
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+				dev->name);
+		goto register_fail;
+	}
+
+	printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
+	       "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+#ifdef DEBUG
+	data->msg_enable = DEBUG;
+	dump_eth_one(dev);
+#endif
+
+	return 0;
+
+register_fail:
+	iounmap(data->regs);
+	iounmap(data->phyregs);
+
+regs_fail:
+	free_netdev(dev);
+	return err;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer.
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+	struct net_device *dev = (struct net_device *)dev_ptr;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	tsi108_check_phy(dev);
+	tsi108_check_rxring(dev);
+	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+	int ret;
+	ret = platform_driver_register (&tsi_eth_driver);
+	if (ret < 0){
+		printk("tsi108_ether_init: error initializing ethernet "
+		       "device\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct tsi108_prv_data *priv = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	tsi108_stop_ethernet(dev);
+	platform_set_drvdata(pdev, NULL);
+	iounmap(priv->regs);
+	iounmap(priv->phyregs);
+	free_netdev(dev);
+
+	return 0;
+}
+static void tsi108_ether_exit(void)
+{
+	platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tsi108_eth.h b/drivers/net/tsi108_eth.h
new file mode 100644
index 0000000..77a769d
--- /dev/null
+++ b/drivers/net/tsi108_eth.h
@@ -0,0 +1,365 @@
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Kong Lai, <kong.lai@tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller.
+ */
+
+#ifndef __TSI108_ETH_H
+#define __TSI108_ETH_H
+
+#include <linux/types.h>
+
+#define TSI_WRITE(offset, val) \
+	out_be32((data->regs + (offset)), val)
+
+#define TSI_READ(offset) \
+	in_be32((data->regs + (offset)))
+
+#define TSI_WRITE_PHY(offset, val) \
+	out_be32((data->phyregs + (offset)), val)
+
+#define TSI_READ_PHY(offset) \
+	in_be32((data->phyregs + (offset)))
+
+/*
+ * PHY Configuration Options
+ *
+ * NOTE: Enable set of definitions corresponding to your board type
+ */
+#define PHY_MV88E	1	/* Marvel 88Exxxx PHY */
+#define PHY_BCM54XX	2	/* Broardcom BCM54xx PHY */
+#define TSI108_PHY_TYPE	PHY_MV88E
+
+/*
+ * TSI108 GIGE port registers
+ */
+
+#define TSI108_ETH_PORT_NUM		2
+#define TSI108_PBM_PORT			2
+#define TSI108_SDRAM_PORT		4
+
+#define TSI108_MAC_CFG1			(0x000)
+#define TSI108_MAC_CFG1_SOFTRST		(1 << 31)
+#define TSI108_MAC_CFG1_LOOPBACK	(1 << 8)
+#define TSI108_MAC_CFG1_RXEN		(1 << 2)
+#define TSI108_MAC_CFG1_TXEN		(1 << 0)
+
+#define TSI108_MAC_CFG2			(0x004)
+#define TSI108_MAC_CFG2_DFLT_PREAMBLE	(7 << 12)
+#define TSI108_MAC_CFG2_IFACE_MASK	(3 << 8)
+#define TSI108_MAC_CFG2_NOGIG		(1 << 8)
+#define TSI108_MAC_CFG2_GIG		(2 << 8)
+#define TSI108_MAC_CFG2_PADCRC		(1 << 2)
+#define TSI108_MAC_CFG2_FULLDUPLEX	(1 << 0)
+
+#define TSI108_MAC_MII_MGMT_CFG		(0x020)
+#define TSI108_MAC_MII_MGMT_CLK		(7 << 0)
+#define TSI108_MAC_MII_MGMT_RST		(1 << 31)
+
+#define TSI108_MAC_MII_CMD		(0x024)
+#define TSI108_MAC_MII_CMD_READ		(1 << 0)
+
+#define TSI108_MAC_MII_ADDR		(0x028)
+#define TSI108_MAC_MII_ADDR_REG		0
+#define TSI108_MAC_MII_ADDR_PHY		8
+
+#define TSI108_MAC_MII_DATAOUT		(0x02c)
+#define TSI108_MAC_MII_DATAIN		(0x030)
+
+#define TSI108_MAC_MII_IND		(0x034)
+#define TSI108_MAC_MII_IND_NOTVALID	(1 << 2)
+#define TSI108_MAC_MII_IND_SCANNING	(1 << 1)
+#define TSI108_MAC_MII_IND_BUSY		(1 << 0)
+
+#define TSI108_MAC_IFCTRL		(0x038)
+#define TSI108_MAC_IFCTRL_PHYMODE	(1 << 24)
+
+#define TSI108_MAC_ADDR1		(0x040)
+#define TSI108_MAC_ADDR2		(0x044)
+
+#define TSI108_STAT_RXBYTES		(0x06c)
+#define TSI108_STAT_RXBYTES_CARRY	(1 << 24)
+
+#define TSI108_STAT_RXPKTS		(0x070)
+#define TSI108_STAT_RXPKTS_CARRY	(1 << 18)
+
+#define TSI108_STAT_RXFCS		(0x074)
+#define TSI108_STAT_RXFCS_CARRY		(1 << 12)
+
+#define TSI108_STAT_RXMCAST		(0x078)
+#define TSI108_STAT_RXMCAST_CARRY	(1 << 18)
+
+#define TSI108_STAT_RXALIGN		(0x08c)
+#define TSI108_STAT_RXALIGN_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXLENGTH		(0x090)
+#define TSI108_STAT_RXLENGTH_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXRUNT		(0x09c)
+#define TSI108_STAT_RXRUNT_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXJUMBO		(0x0a0)
+#define TSI108_STAT_RXJUMBO_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXFRAG		(0x0a4)
+#define TSI108_STAT_RXFRAG_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXJABBER		(0x0a8)
+#define TSI108_STAT_RXJABBER_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXDROP		(0x0ac)
+#define TSI108_STAT_RXDROP_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXBYTES		(0x0b0)
+#define TSI108_STAT_TXBYTES_CARRY	(1 << 24)
+
+#define TSI108_STAT_TXPKTS		(0x0b4)
+#define TSI108_STAT_TXPKTS_CARRY	(1 << 18)
+
+#define TSI108_STAT_TXEXDEF		(0x0c8)
+#define TSI108_STAT_TXEXDEF_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXEXCOL		(0x0d8)
+#define TSI108_STAT_TXEXCOL_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXTCOL		(0x0dc)
+#define TSI108_STAT_TXTCOL_CARRY	(1 << 13)
+
+#define TSI108_STAT_TXPAUSEDROP		(0x0e4)
+#define TSI108_STAT_TXPAUSEDROP_CARRY	(1 << 12)
+
+#define TSI108_STAT_CARRY1		(0x100)
+#define TSI108_STAT_CARRY1_RXBYTES	(1 << 16)
+#define TSI108_STAT_CARRY1_RXPKTS	(1 << 15)
+#define TSI108_STAT_CARRY1_RXFCS	(1 << 14)
+#define TSI108_STAT_CARRY1_RXMCAST	(1 << 13)
+#define TSI108_STAT_CARRY1_RXALIGN	(1 << 8)
+#define TSI108_STAT_CARRY1_RXLENGTH	(1 << 7)
+#define TSI108_STAT_CARRY1_RXRUNT	(1 << 4)
+#define TSI108_STAT_CARRY1_RXJUMBO	(1 << 3)
+#define TSI108_STAT_CARRY1_RXFRAG	(1 << 2)
+#define TSI108_STAT_CARRY1_RXJABBER	(1 << 1)
+#define TSI108_STAT_CARRY1_RXDROP	(1 << 0)
+
+#define TSI108_STAT_CARRY2		(0x104)
+#define TSI108_STAT_CARRY2_TXBYTES	(1 << 13)
+#define TSI108_STAT_CARRY2_TXPKTS	(1 << 12)
+#define TSI108_STAT_CARRY2_TXEXDEF	(1 << 7)
+#define TSI108_STAT_CARRY2_TXEXCOL	(1 << 3)
+#define TSI108_STAT_CARRY2_TXTCOL	(1 << 2)
+#define TSI108_STAT_CARRY2_TXPAUSE	(1 << 0)
+
+#define TSI108_STAT_CARRYMASK1		(0x108)
+#define TSI108_STAT_CARRYMASK2		(0x10c)
+
+#define TSI108_EC_PORTCTRL		(0x200)
+#define TSI108_EC_PORTCTRL_STATRST	(1 << 31)
+#define TSI108_EC_PORTCTRL_STATEN	(1 << 28)
+#define TSI108_EC_PORTCTRL_NOGIG	(1 << 18)
+#define TSI108_EC_PORTCTRL_HALFDUPLEX	(1 << 16)
+
+#define TSI108_EC_INTSTAT		(0x204)
+#define TSI108_EC_INTMASK		(0x208)
+
+#define TSI108_INT_ANY			(1 << 31)
+#define TSI108_INT_SFN			(1 << 30)
+#define TSI108_INT_RXIDLE		(1 << 29)
+#define TSI108_INT_RXABORT		(1 << 28)
+#define TSI108_INT_RXERROR		(1 << 27)
+#define TSI108_INT_RXOVERRUN		(1 << 26)
+#define TSI108_INT_RXTHRESH		(1 << 25)
+#define TSI108_INT_RXWAIT		(1 << 24)
+#define TSI108_INT_RXQUEUE0		(1 << 16)
+#define TSI108_INT_STATCARRY		(1 << 15)
+#define TSI108_INT_TXIDLE		(1 << 13)
+#define TSI108_INT_TXABORT		(1 << 12)
+#define TSI108_INT_TXERROR		(1 << 11)
+#define TSI108_INT_TXUNDERRUN		(1 << 10)
+#define TSI108_INT_TXTHRESH		(1 <<  9)
+#define TSI108_INT_TXWAIT		(1 <<  8)
+#define TSI108_INT_TXQUEUE0		(1 <<  0)
+
+#define TSI108_EC_TXCFG			(0x220)
+#define TSI108_EC_TXCFG_RST		(1 << 31)
+
+#define TSI108_EC_TXCTRL		(0x224)
+#define TSI108_EC_TXCTRL_IDLEINT	(1 << 31)
+#define TSI108_EC_TXCTRL_ABORT		(1 << 30)
+#define TSI108_EC_TXCTRL_GO		(1 << 15)
+#define TSI108_EC_TXCTRL_QUEUE0		(1 <<  0)
+
+#define TSI108_EC_TXSTAT		(0x228)
+#define TSI108_EC_TXSTAT_ACTIVE		(1 << 15)
+#define TSI108_EC_TXSTAT_QUEUE0		(1 << 0)
+
+#define TSI108_EC_TXESTAT		(0x22c)
+#define TSI108_EC_TXESTAT_Q0_ERR	(1 << 24)
+#define TSI108_EC_TXESTAT_Q0_DESCINT	(1 << 16)
+#define TSI108_EC_TXESTAT_Q0_EOF	(1 <<  8)
+#define TSI108_EC_TXESTAT_Q0_EOQ	(1 <<  0)
+
+#define TSI108_EC_TXERR			(0x278)
+
+#define TSI108_EC_TXQ_CFG		(0x280)
+#define TSI108_EC_TXQ_CFG_DESC_INT	(1 << 20)
+#define TSI108_EC_TXQ_CFG_EOQ_OWN_INT	(1 << 19)
+#define TSI108_EC_TXQ_CFG_WSWP		(1 << 11)
+#define TSI108_EC_TXQ_CFG_BSWP		(1 << 10)
+#define TSI108_EC_TXQ_CFG_SFNPORT	0
+
+#define TSI108_EC_TXQ_BUFCFG		(0x284)
+#define TSI108_EC_TXQ_BUFCFG_BURST8	(0 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST32	(1 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST128	(2 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST256	(3 << 8)
+#define TSI108_EC_TXQ_BUFCFG_WSWP	(1 << 11)
+#define TSI108_EC_TXQ_BUFCFG_BSWP	(1 << 10)
+#define TSI108_EC_TXQ_BUFCFG_SFNPORT	0
+
+#define TSI108_EC_TXQ_PTRLOW		(0x288)
+
+#define TSI108_EC_TXQ_PTRHIGH		(0x28c)
+#define TSI108_EC_TXQ_PTRHIGH_VALID	(1 << 31)
+
+#define TSI108_EC_TXTHRESH		(0x230)
+#define TSI108_EC_TXTHRESH_STARTFILL	0
+#define TSI108_EC_TXTHRESH_STOPFILL	16
+
+#define TSI108_EC_RXCFG			(0x320)
+#define TSI108_EC_RXCFG_RST		(1 << 31)
+
+#define TSI108_EC_RXSTAT		(0x328)
+#define TSI108_EC_RXSTAT_ACTIVE		(1 << 15)
+#define TSI108_EC_RXSTAT_QUEUE0		(1 << 0)
+
+#define TSI108_EC_RXESTAT		(0x32c)
+#define TSI108_EC_RXESTAT_Q0_ERR	(1 << 24)
+#define TSI108_EC_RXESTAT_Q0_DESCINT	(1 << 16)
+#define TSI108_EC_RXESTAT_Q0_EOF	(1 <<  8)
+#define TSI108_EC_RXESTAT_Q0_EOQ	(1 <<  0)
+
+#define TSI108_EC_HASHADDR		(0x360)
+#define TSI108_EC_HASHADDR_AUTOINC	(1 << 31)
+#define TSI108_EC_HASHADDR_DO1STREAD	(1 << 30)
+#define TSI108_EC_HASHADDR_UNICAST	(0 <<  4)
+#define TSI108_EC_HASHADDR_MCAST	(1 <<  4)
+
+#define TSI108_EC_HASHDATA		(0x364)
+
+#define TSI108_EC_RXQ_PTRLOW		(0x388)
+
+#define TSI108_EC_RXQ_PTRHIGH		(0x38c)
+#define TSI108_EC_RXQ_PTRHIGH_VALID	(1 << 31)
+
+/* Station Enable -- accept packets destined for us */
+#define TSI108_EC_RXCFG_SE		(1 << 13)
+/* Unicast Frame Enable -- for packets not destined for us */
+#define TSI108_EC_RXCFG_UFE		(1 << 12)
+/* Multicast Frame Enable */
+#define TSI108_EC_RXCFG_MFE		(1 << 11)
+/* Broadcast Frame Enable */
+#define TSI108_EC_RXCFG_BFE		(1 << 10)
+#define TSI108_EC_RXCFG_UC_HASH		(1 <<  9)
+#define TSI108_EC_RXCFG_MC_HASH		(1 <<  8)
+
+#define TSI108_EC_RXQ_CFG		(0x380)
+#define TSI108_EC_RXQ_CFG_DESC_INT	(1 << 20)
+#define TSI108_EC_RXQ_CFG_EOQ_OWN_INT	(1 << 19)
+#define TSI108_EC_RXQ_CFG_WSWP		(1 << 11)
+#define TSI108_EC_RXQ_CFG_BSWP		(1 << 10)
+#define TSI108_EC_RXQ_CFG_SFNPORT	0
+
+#define TSI108_EC_RXQ_BUFCFG		(0x384)
+#define TSI108_EC_RXQ_BUFCFG_BURST8	(0 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST32	(1 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST128	(2 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST256	(3 << 8)
+#define TSI108_EC_RXQ_BUFCFG_WSWP	(1 << 11)
+#define TSI108_EC_RXQ_BUFCFG_BSWP	(1 << 10)
+#define TSI108_EC_RXQ_BUFCFG_SFNPORT	0
+
+#define TSI108_EC_RXCTRL		(0x324)
+#define TSI108_EC_RXCTRL_ABORT		(1 << 30)
+#define TSI108_EC_RXCTRL_GO		(1 << 15)
+#define TSI108_EC_RXCTRL_QUEUE0		(1 << 0)
+
+#define TSI108_EC_RXERR			(0x378)
+
+#define TSI108_TX_EOF	(1 << 0)	/* End of frame; last fragment of packet */
+#define TSI108_TX_SOF	(1 << 1)	/* Start of frame; first frag. of packet */
+#define TSI108_TX_VLAN	(1 << 2)	/* Per-frame VLAN: enables VLAN override */
+#define TSI108_TX_HUGE	(1 << 3)	/* Huge frame enable */
+#define TSI108_TX_PAD	(1 << 4)	/* Pad the packet if too short */
+#define TSI108_TX_CRC	(1 << 5)	/* Generate CRC for this packet */
+#define TSI108_TX_INT	(1 << 14)	/* Generate an IRQ after frag. processed */
+#define TSI108_TX_RETRY	(0xf << 16)	/* 4 bit field indicating num. of retries */
+#define TSI108_TX_COL	(1 << 20)	/* Set if a collision occured */
+#define TSI108_TX_LCOL	(1 << 24)	/* Set if a late collision occured */
+#define TSI108_TX_UNDER	(1 << 25)	/* Set if a FIFO underrun occured */
+#define TSI108_TX_RLIM	(1 << 26)	/* Set if the retry limit was reached */
+#define TSI108_TX_OK	(1 << 30)	/* Set if the frame TX was successful */
+#define TSI108_TX_OWN	(1 << 31)	/* Set if the device owns the descriptor */
+
+/* Note: the descriptor layouts assume big-endian byte order. */
+typedef struct {
+	u32 buf0;
+	u32 buf1;		/* Base address of buffer */
+	u32 next0;		/* Address of next descriptor, if any */
+	u32 next1;
+	u16 vlan;		/* VLAN, if override enabled for this packet */
+	u16 len;		/* Length of buffer in bytes */
+	u32 misc;		/* See TSI108_TX_* above */
+	u32 reserved0;		/*reserved0 and reserved1 are added to make the desc */
+	u32 reserved1;		/* 32-byte aligned */
+} __attribute__ ((aligned(32))) tx_desc;
+
+#define TSI108_RX_EOF	(1 << 0)	/* End of frame; last fragment of packet */
+#define TSI108_RX_SOF	(1 << 1)	/* Start of frame; first frag. of packet */
+#define TSI108_RX_VLAN	(1 << 2)	/* Set on SOF if packet has a VLAN */
+#define TSI108_RX_FTYPE	(1 << 3)	/* Length/Type field is type, not length */
+#define TSI108_RX_RUNT	(1 << 4)/* Packet is less than minimum size */
+#define TSI108_RX_HASH	(1 << 7)/* Hash table match */
+#define TSI108_RX_BAD	(1 << 8)	/* Bad frame */
+#define TSI108_RX_OVER	(1 << 9)	/* FIFO overrun occured */
+#define TSI108_RX_TRUNC	(1 << 11)	/* Packet truncated due to excess length */
+#define TSI108_RX_CRC	(1 << 12)	/* Packet had a CRC error */
+#define TSI108_RX_INT	(1 << 13)	/* Generate an IRQ after frag. processed */
+#define TSI108_RX_OWN	(1 << 15)	/* Set if the device owns the descriptor */
+
+#define TSI108_RX_SKB_SIZE 1536		/* The RX skb length */
+
+typedef struct {
+	u32 buf0;		/* Base address of buffer */
+	u32 buf1;		/* Base address of buffer */
+	u32 next0;		/* Address of next descriptor, if any */
+	u32 next1;		/* Address of next descriptor, if any */
+	u16 vlan;		/* VLAN of received packet, first frag only */
+	u16 len;		/* Length of received fragment in bytes */
+	u16 blen;		/* Length of buffer in bytes */
+	u16 misc;		/* See TSI108_RX_* above */
+	u32 reserved0;		/* reserved0 and reserved1 are added to make the desc */
+	u32 reserved1;		/* 32-byte aligned */
+} __attribute__ ((aligned(32))) rx_desc;
+
+#endif				/* __TSI108_ETH_H */
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index fa3a2bb..942b839 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -26,10 +26,11 @@
 
 /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
    of available transceivers.  */
-void t21142_media_task(void *data)
+void t21142_media_task(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct tulip_private *tp = netdev_priv(dev);
+	struct tulip_private *tp =
+		container_of(work, struct tulip_private, media_work);
+	struct net_device *dev = tp->dev;
 	void __iomem *ioaddr = tp->base_addr;
 	int csr12 = ioread32(ioaddr + CSR12);
 	int next_tick = 60*HZ;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index f6b3a94..9d67f11 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1906,9 +1906,7 @@
 			de->media[i].csr15 = t21041_csr15[i];
 	}
 
-	de->ee_data = kmalloc(DE_EEPROM_SIZE, GFP_KERNEL);
-	if (de->ee_data)
-		memcpy(de->ee_data, &ee_data[0], DE_EEPROM_SIZE);
+	de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
 
 	return;
 
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 3f4b640..4b3cd3d 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -473,9 +473,9 @@
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 #include <asm/uaccess.h>
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_PPC_PMAC
 #include <asm/machdep.h>
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC_PMAC */
 
 #include "de4x5.h"
 
@@ -4151,7 +4151,7 @@
     /* If possible, try to fix a broken card - SMC only so far */
     srom_repair(dev, broken);
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_PPC_PMAC
     /*
     ** If the address starts with 00 a0, we have to bit-reverse
     ** each byte of the address.
@@ -4168,7 +4168,7 @@
 		    dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
 	    }
     }
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC_PMAC */
 
     /* Test for a bad enet address */
     status = test_bad_enet(dev, status);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 4dd8a0b..7f59a3d 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -187,7 +187,7 @@
 struct dmfe_board_info {
 	u32 chip_id;			/* Chip vendor/Device ID */
 	u32 chip_revision;		/* Chip revision */
-	struct DEVICE *next_dev;	/* next device */
+	struct DEVICE *dev;		/* net device */
 	struct pci_dev *pdev;		/* PCI device */
 	spinlock_t lock;
 
@@ -399,6 +399,8 @@
 	/* Init system & device */
 	db = netdev_priv(dev);
 
+	db->dev = dev;
+
 	/* Allocate Tx/Rx descriptor memory */
 	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
 	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
@@ -426,6 +428,7 @@
 	dev->poll_controller = &poll_dmfe;
 #endif
 	dev->ethtool_ops = &netdev_ethtool_ops;
+	netif_carrier_off(db->dev);
 	spin_lock_init(&db->lock);
 
 	pci_read_config_dword(pdev, 0x50, &pci_pmr);
@@ -1050,6 +1053,7 @@
 
 static const struct ethtool_ops netdev_ethtool_ops = {
 	.get_drvinfo		= netdev_get_drvinfo,
+	.get_link               = ethtool_op_get_link,
 };
 
 /*
@@ -1144,6 +1148,7 @@
 		/* Link Failed */
 		DMFE_DBUG(0, "Link Failed", tmp_cr12);
 		db->link_failed = 1;
+		netif_carrier_off(db->dev);
 
 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
 		/* AUTO or force 1M Homerun/Longrun don't need */
@@ -1166,6 +1171,8 @@
 			if ( (db->media_mode & DMFE_AUTO) &&
 				dmfe_sense_speed(db) )
 				db->link_failed = 1;
+			else
+				netif_carrier_on(db->dev);
 			dmfe_process_mode(db);
 			/* SHOW_MEDIA_TYPE(db->op_mode); */
 		}
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 066e5d6..df326fe 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -18,10 +18,11 @@
 #include "tulip.h"
 
 
-void tulip_media_task(void *data)
+void tulip_media_task(struct work_struct *work)
 {
-	struct net_device *dev = data;
-	struct tulip_private *tp = netdev_priv(dev);
+	struct tulip_private *tp =
+		container_of(work, struct tulip_private, media_work);
+	struct net_device *dev = tp->dev;
 	void __iomem *ioaddr = tp->base_addr;
 	u32 csr12 = ioread32(ioaddr + CSR12);
 	int next_tick = 2*HZ;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index ad107f4..25f25da 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -44,7 +44,7 @@
 	int valid_intrs;	/* CSR7 interrupt enable settings */
 	int flags;
 	void (*media_timer) (unsigned long);
-	void (*media_task) (void *);
+	work_func_t media_task;
 };
 
 
@@ -392,6 +392,7 @@
 	int csr12_shadow;
 	int pad0;		/* Used for 8-byte alignment */
 	struct work_struct media_work;
+	struct net_device *dev;
 };
 
 
@@ -406,7 +407,7 @@
 
 /* 21142.c */
 extern u16 t21142_csr14[];
-void t21142_media_task(void *data);
+void t21142_media_task(struct work_struct *work);
 void t21142_start_nway(struct net_device *dev);
 void t21142_lnk_change(struct net_device *dev, int csr5);
 
@@ -444,7 +445,7 @@
 void pnic_timer(unsigned long data);
 
 /* timer.c */
-void tulip_media_task(void *data);
+void tulip_media_task(struct work_struct *work);
 void mxic_timer(unsigned long data);
 void comet_timer(unsigned long data);
 
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0aee618..5a35354 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1367,6 +1367,7 @@
 	 * it is zeroed and aligned in alloc_etherdev
 	 */
 	tp = netdev_priv(dev);
+	tp->dev = dev;
 
 	tp->rx_ring = pci_alloc_consistent(pdev,
 					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
@@ -1389,7 +1390,7 @@
 	tp->timer.data = (unsigned long)dev;
 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
 
-	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
 
 	dev->base_addr = (unsigned long)ioaddr;
 
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 3bf9e63..9781b16 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -117,6 +117,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+#include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/ethtool.h>
@@ -127,7 +128,6 @@
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <linux/in6.h>
-#include <asm/checksum.h>
 #include <linux/version.h>
 #include <linux/dma-mapping.h>
 
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index b378880..1f05511 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -30,7 +30,7 @@
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 
-#include <asm/of_device.h>
+#include <asm/of_platform.h>
 #include <asm/uaccess.h>
 #include <asm/irq.h>
 #include <asm/io.h>
@@ -4301,12 +4301,12 @@
 		memcpy(&(ugeth_info[i]), &ugeth_primary_info,
 		       sizeof(ugeth_primary_info));
 
-	return of_register_driver(&ucc_geth_driver);
+	return of_register_platform_driver(&ucc_geth_driver);
 }
 
 static void __exit ucc_geth_exit(void)
 {
-	of_unregister_driver(&ucc_geth_driver);
+	of_unregister_platform_driver(&ucc_geth_driver);
 }
 
 module_init(ucc_geth_init);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index b5d0d7f..d5ab9cf 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -57,44 +57,6 @@
 	  The driver will be compiled as a module: the
 	  module will be called cosa.
 
-config DSCC4
-	tristate "Etinc PCISYNC serial board support"
-	depends on WAN && PCI && m
-	help
-	  Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens)
-	  DSCC4 chipset.
-
-	  This is supposed to work with the four port card. Take a look at
-	  <http://www.cogenit.fr/dscc4/> for further information about the
-	  driver.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called dscc4.
-
-config DSCC4_PCISYNC
-	bool "Etinc PCISYNC features"
-	depends on DSCC4
-	help
-	  Due to Etinc's design choice for its PCISYNC cards, some operations
-	  are only allowed on specific ports of the DSCC4. This option is the
-	  only way for the driver to know that it shouldn't return a success
-	  code for these operations.
-
-	  Please say Y if your card is an Etinc's PCISYNC.
-
-config DSCC4_PCI_RST
-	bool "Hard reset support"
-	depends on DSCC4
-	help
-	  Various DSCC4 bugs forbid any reliable software reset of the ASIC.
-	  As a replacement, some vendors provide a way to assert the PCI #RST
-	  pin of DSCC4 through the GPIO port of the card. If you choose Y,
-	  the driver will make use of this feature before module removal
-	  (i.e. rmmod). The feature is known to be available on Commtech's
-	  cards. Contact your manufacturer for details.
-
-	  Say Y if your card supports this feature.
-
 #
 # Lan Media's board. Currently 1000, 1200, 5200, 5245
 #
@@ -323,6 +285,44 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called farsync.
 
+config DSCC4
+	tristate "Etinc PCISYNC serial board support"
+	depends on HDLC && PCI && m
+	help
+	  Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens)
+	  DSCC4 chipset.
+
+	  This is supposed to work with the four port card. Take a look at
+	  <http://www.cogenit.fr/dscc4/> for further information about the
+	  driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called dscc4.
+
+config DSCC4_PCISYNC
+	bool "Etinc PCISYNC features"
+	depends on DSCC4
+	help
+	  Due to Etinc's design choice for its PCISYNC cards, some operations
+	  are only allowed on specific ports of the DSCC4. This option is the
+	  only way for the driver to know that it shouldn't return a success
+	  code for these operations.
+
+	  Please say Y if your card is an Etinc's PCISYNC.
+
+config DSCC4_PCI_RST
+	bool "Hard reset support"
+	depends on DSCC4
+	help
+	  Various DSCC4 bugs forbid any reliable software reset of the ASIC.
+	  As a replacement, some vendors provide a way to assert the PCI #RST
+	  pin of DSCC4 through the GPIO port of the card. If you choose Y,
+	  the driver will make use of this feature before module removal
+	  (i.e. rmmod). The feature is known to be available on Commtech's
+	  cards. Contact your manufacturer for details.
+
+	  Say Y if your card supports this feature.
+
 config DLCI
 	tristate "Frame Relay DLCI support"
 	depends on WAN
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 931cbdf..b2a23ae 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -125,8 +125,8 @@
 static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
 static void cpc_tty_flush_buffer(struct tty_struct *tty);
 static void cpc_tty_hangup(struct tty_struct *tty);
-static void cpc_tty_rx_work(void *data);
-static void cpc_tty_tx_work(void *data);
+static void cpc_tty_rx_work(struct work_struct *work);
+static void cpc_tty_tx_work(struct work_struct *work);
 static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
 static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
 static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
@@ -261,8 +261,8 @@
 	cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
 	cpc_tty->pc300dev = pc300dev; 
 
-	INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
-	INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
+	INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
+	INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
 	
 	cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
 
@@ -659,21 +659,23 @@
  * o call the line disc. read
  * o free memory
  */
-static void cpc_tty_rx_work(void * data)
+static void cpc_tty_rx_work(struct work_struct *work)
 {
+	st_cpc_tty_area *cpc_tty;
 	unsigned long port;
 	int i, j;
-	st_cpc_tty_area *cpc_tty; 
 	volatile st_cpc_rx_buf *buf;
 	char flags=0,flg_rx=1; 
 	struct tty_ldisc *ld;
 
 	if (cpc_tty_cnt == 0) return;
-
 	
 	for (i=0; (i < 4) && flg_rx ; i++) {
 		flg_rx = 0;
-		port = (unsigned long)data;
+
+		cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
+		port = cpc_tty - cpc_tty_area;
+
 		for (j=0; j < CPC_TTY_NPORTS; j++) {
 			cpc_tty = &cpc_tty_area[port];
 		
@@ -882,9 +884,10 @@
  * o if need call line discipline wakeup
  * o call wake_up_interruptible
  */
-static void cpc_tty_tx_work(void *data)
+static void cpc_tty_tx_work(struct work_struct *work)
 {
-	st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; 
+	st_cpc_tty_area *cpc_tty =
+		container_of(work, st_cpc_tty_area, tty_tx_work);
 	struct tty_struct *tty; 
 
 	CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 41f1d67..7f38012 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -538,7 +538,7 @@
 	iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
 	int this_dev;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index efcdaf1..44a2270 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -49,6 +49,7 @@
 #include <asm/uaccess.h>
 #include <net/ieee80211.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include "airo.h"
 
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index ac9437d..f123553 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -219,21 +219,6 @@
 	dev = link->priv;
 
 	DEBUG(0, "airo_config(0x%p)\n", link);
-	
-	/*
-	  This reads the card's CONFIG tuple to find its configuration
-	  registers.
-	*/
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	tuple.Attributes = 0;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = sizeof(buf);
-	tuple.TupleOffset = 0;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
 
 	/*
 	  In this loop, we scan the CIS for configuration table entries,
@@ -247,6 +232,10 @@
 	  these things without consulting the CIS, and most client drivers
 	  will only use the CIS to fill in implementation-defined details.
 	*/
+	tuple.Attributes = 0;
+	tuple.TupleData = buf;
+	tuple.TupleDataMax = sizeof(buf);
+	tuple.TupleOffset = 0;
 	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
 	while (1) {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 0c07b8b..10bcb48 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -595,7 +595,7 @@
 static void atmel_smooth_qual(struct atmel_private *priv);
 static void atmel_writeAR(struct net_device *dev, u16 data);
 static int probe_atmel_card(struct net_device *dev);
-static int reset_atmel_card(struct net_device *dev );
+static int reset_atmel_card(struct net_device *dev);
 static void atmel_enter_state(struct atmel_private *priv, int new_state);
 int atmel_open (struct net_device *dev);
 
@@ -784,11 +784,11 @@
 
 static int start_tx(struct sk_buff *skb, struct net_device *dev)
 {
+	static const u8 SNAP_RFC1024[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
 	struct atmel_private *priv = netdev_priv(dev);
 	struct ieee80211_hdr_4addr header;
 	unsigned long flags;
 	u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
-	u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
 
 	if (priv->card && priv->present_callback &&
 	    !(*priv->present_callback)(priv->card)) {
@@ -1193,7 +1193,7 @@
 
 		atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */
 
-		for (i = 0; i < sizeof(irq_order)/sizeof(u8); i++)
+		for (i = 0; i < ARRAY_SIZE(irq_order); i++)
 			if (isr & irq_order[i])
 				break;
 
@@ -1345,10 +1345,10 @@
 		atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS, priv->reg_domain);
 	} else {
 		priv->reg_domain = atmel_get_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS);
-		for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+		for (i = 0; i < ARRAY_SIZE(channel_table); i++)
 			if (priv->reg_domain == channel_table[i].reg_domain)
 				break;
-		if (i == sizeof(channel_table)/sizeof(channel_table[0])) {
+		if (i == ARRAY_SIZE(channel_table)) {
 			priv->reg_domain = REG_DOMAIN_MKK1;
 			printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name);
 		}
@@ -1393,7 +1393,7 @@
 	   else return suitable default channel */
 	int i;
 
-	for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+	for (i = 0; i < ARRAY_SIZE(channel_table); i++)
 		if (priv->reg_domain == channel_table[i].reg_domain) {
 			if (channel >= channel_table[i].min &&
 			    channel <= channel_table[i].max)
@@ -1437,7 +1437,7 @@
 		}
 
 		r = "<unknown>";
-		for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++)
+		for (i = 0; i < ARRAY_SIZE(channel_table); i++)
 			if (priv->reg_domain == channel_table[i].reg_domain)
 				r = channel_table[i].name;
 
@@ -1736,7 +1736,7 @@
 				/* Disable the key */
 				priv->wep_key_len[index] = 0;
 		/* Check if the key is not marked as invalid */
-		if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
+		if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
 			/* Cleanup */
 			memset(priv->wep_keys[index], 0, 13);
 			/* Copy the key in the driver */
@@ -1907,7 +1907,7 @@
 
 	encoding->flags = idx + 1;
 	memset(ext, 0, sizeof(*ext));
-	
+
 	if (!priv->wep_is_on) {
 		ext->alg = IW_ENCODE_ALG_NONE;
 		ext->key_len = 0;
@@ -2343,6 +2343,14 @@
 		iwe.u.freq.e = 0;
 		current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN);
 
+		/* Add quality statistics */
+		iwe.cmd = IWEVQUAL;
+		iwe.u.qual.level = priv->BSSinfo[i].RSSI;
+		iwe.u.qual.qual  = iwe.u.qual.level;
+		/* iwe.u.qual.noise  = SOMETHING */
+		current_ev = iwe_stream_add_event(current_ev, extra + IW_SCAN_MAX_DATA , &iwe, IW_EV_QUAL_LEN);
+
+
 		iwe.cmd = SIOCGIWENCODE;
 		if (priv->BSSinfo[i].UsingWEP)
 			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
@@ -2373,7 +2381,7 @@
 	range->min_nwid = 0x0000;
 	range->max_nwid = 0x0000;
 	range->num_channels = 0;
-	for (j = 0; j < sizeof(channel_table)/sizeof(channel_table[0]); j++)
+	for (j = 0; j < ARRAY_SIZE(channel_table); j++)
 		if (priv->reg_domain == channel_table[j].reg_domain) {
 			range->num_channels = channel_table[j].max - channel_table[j].min + 1;
 			break;
@@ -2579,9 +2587,9 @@
 
 static const struct iw_handler_def atmel_handler_def =
 {
-	.num_standard	= sizeof(atmel_handler)/sizeof(iw_handler),
-	.num_private	= sizeof(atmel_private_handler)/sizeof(iw_handler),
-	.num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args),
+	.num_standard	= ARRAY_SIZE(atmel_handler),
+	.num_private	= ARRAY_SIZE(atmel_private_handler),
+	.num_private_args = ARRAY_SIZE(atmel_private_args),
 	.standard	= (iw_handler *) atmel_handler,
 	.private	= (iw_handler *) atmel_private_handler,
 	.private_args	= (struct iw_priv_args *) atmel_private_args,
@@ -2645,7 +2653,7 @@
 
 		domain[REGDOMAINSZ] = 0;
 		rc = -EINVAL;
-		for (i = 0; i < sizeof(channel_table)/sizeof(channel_table[0]); i++) {
+		for (i = 0; i < ARRAY_SIZE(channel_table); i++) {
 			/* strcasecmp doesn't exist in the library */
 			char *a = channel_table[i].name;
 			char *b = domain;
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 7856640..12617cd 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -5,12 +5,12 @@
         Copyright 2000-2001 ATMEL Corporation.
         Copyright 2003 Simon Kelley.
 
-    This code was developed from version 2.1.1 of the Atmel drivers, 
-    released by Atmel corp. under the GPL in December 2002. It also 
-    includes code from the Linux aironet drivers (C) Benjamin Reed, 
-    and the Linux PCMCIA package, (C) David Hinds. 
+    This code was developed from version 2.1.1 of the Atmel drivers,
+    released by Atmel corp. under the GPL in December 2002. It also
+    includes code from the Linux aironet drivers (C) Benjamin Reed,
+    and the Linux PCMCIA package, (C) David Hinds.
 
-    For all queries about this code, please contact the current author, 
+    For all queries about this code, please contact the current author,
     Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
 
     This program is free software; you can redistribute it and/or modify
@@ -87,7 +87,7 @@
    event is received.  The config() and release() entry points are
    used to configure or release a socket, in response to card
    insertion and ejection events.  They are invoked from the atmel_cs
-   event handler. 
+   event handler.
 */
 
 static int atmel_config(struct pcmcia_device *link);
@@ -133,22 +133,22 @@
    device IO routines can use a flag like this to throttle IO to a
    card that is not ready to accept it.
 */
-   
+
 typedef struct local_info_t {
 	dev_node_t	node;
 	struct net_device *eth_dev;
 } local_info_t;
 
 /*======================================================================
-  
+
   atmel_attach() creates an "instance" of the driver, allocating
   local data structures for one device.  The device is registered
   with Card Services.
-  
+
   The dev_link structure is initialized, but we don't actually
   configure the card at this point -- we wait until we receive a
   card insertion event.
-  
+
   ======================================================================*/
 
 static int atmel_probe(struct pcmcia_device *p_dev)
@@ -184,12 +184,12 @@
 } /* atmel_attach */
 
 /*======================================================================
-  
+
   This deletes a driver "instance".  The device is de-registered
   with Card Services.  If it has been released, all local data
   structures are freed.  Otherwise, the structures will be freed
   when the device is released.
-  
+
   ======================================================================*/
 
 static void atmel_detach(struct pcmcia_device *link)
@@ -202,11 +202,11 @@
 }
 
 /*======================================================================
-  
+
   atmel_config() is scheduled to run after a CARD_INSERTION event
   is received, to configure the PCMCIA socket, and to make the
   device available to the system.
-  
+
   ======================================================================*/
 
 #define CS_CHECK(fn, ret) \
@@ -237,28 +237,17 @@
 	did = handle_to_dev(link).driver_data;
 
 	DEBUG(0, "atmel_config(0x%p)\n", link);
-	
+
 	tuple.Attributes = 0;
 	tuple.TupleData = buf;
 	tuple.TupleDataMax = sizeof(buf);
 	tuple.TupleOffset = 0;
-	
-	/*
-	  This reads the card's CONFIG tuple to find its configuration
-	  registers.
-	*/
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
 
 	/*
 	  In this loop, we scan the CIS for configuration table entries,
 	  each of which describes a valid card configuration, including
 	  voltage, IO window, memory window, and interrupt settings.
-	  
+
 	  We make no assumptions about the card to be configured: we use
 	  just the information available in the CIS.  In an ideal world,
 	  this would work for any PCMCIA card, but it requires a complete
@@ -274,17 +263,17 @@
 		if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
 				pcmcia_parse_tuple(link, &tuple, &parse) != 0)
 			goto next_entry;
-		
+
 		if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
 		if (cfg->index == 0) goto next_entry;
 		link->conf.ConfigIndex = cfg->index;
-		
+
 		/* Does this card need audio output? */
 		if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
 			link->conf.Attributes |= CONF_ENABLE_SPKR;
 			link->conf.Status = CCSR_AUDIO_ENA;
 		}
-		
+
 		/* Use power settings for Vcc and Vpp if present */
 		/*  Note that the CIS values need to be rescaled */
 		if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
@@ -293,11 +282,11 @@
 		else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
 			link->conf.Vpp =
 				dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
-		
+
 		/* Do we need to allocate an interrupt? */
 		if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
 			link->conf.Attributes |= CONF_ENABLE_IRQ;
-		
+
 		/* IO window settings */
 		link->io.NumPorts1 = link->io.NumPorts2 = 0;
 		if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
@@ -315,18 +304,18 @@
 				link->io.NumPorts2 = io->win[1].len;
 			}
 		}
-		
+
 		/* This reserves IO space but doesn't actually enable it */
 		if (pcmcia_request_io(link, &link->io) != 0)
 			goto next_entry;
 
 		/* If we got this far, we're cool! */
 		break;
-		
+
 	next_entry:
 		CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple));
 	}
-	
+
 	/*
 	  Allocate an interrupt line.  Note that this does not assign a
 	  handler to the interrupt, unless the 'Handler' member of the
@@ -334,31 +323,31 @@
 	*/
 	if (link->conf.Attributes & CONF_ENABLE_IRQ)
 		CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
-	
+
 	/*
 	  This actually configures the PCMCIA socket -- setting up
 	  the I/O windows and the interrupt mapping, and putting the
 	  card and host interface into "Memory and IO" mode.
 	*/
 	CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
-	
+
 	if (link->irq.AssignedIRQ == 0) {
-		printk(KERN_ALERT 
+		printk(KERN_ALERT
 		       "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
 		goto cs_failed;
 	}
-       
-	((local_info_t*)link->priv)->eth_dev = 
+
+	((local_info_t*)link->priv)->eth_dev =
 		init_atmel_card(link->irq.AssignedIRQ,
 				link->io.BasePort1,
 				did ? did->driver_info : ATMEL_FW_TYPE_NONE,
 				&handle_to_dev(link),
-				card_present, 
+				card_present,
 				link);
-	if (!((local_info_t*)link->priv)->eth_dev) 
+	if (!((local_info_t*)link->priv)->eth_dev)
 			goto cs_failed;
-	
-	
+
+
 	/*
 	  At this point, the dev_node_t structure(s) need to be
 	  initialized and arranged in a linked list at link->dev_node.
@@ -376,11 +365,11 @@
 }
 
 /*======================================================================
-  
+
   After a card is removed, atmel_release() will unregister the
   device, and release the PCMCIA configuration.  If the device is
   still open, this will be postponed until it is closed.
-  
+
   ======================================================================*/
 
 static void atmel_release(struct pcmcia_device *link)
@@ -517,7 +506,7 @@
     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-    POSSIBILITY OF SUCH DAMAGE.    
+    POSSIBILITY OF SUCH DAMAGE.
 */
 
 module_init(atmel_cs_init);
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 3bfa791..92f87fb 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -53,18 +53,18 @@
 				     const struct pci_device_id *pent)
 {
 	struct net_device *dev;
-		
+
 	if (pci_enable_device(pdev))
 		return -ENODEV;
-	
+
 	pci_set_master(pdev);
-	
-	dev = init_atmel_card(pdev->irq, pdev->resource[1].start, 
+
+	dev = init_atmel_card(pdev->irq, pdev->resource[1].start,
 			      ATMEL_FW_TYPE_506,
 			      &pdev->dev, NULL, NULL);
 	if (!dev)
 		return -ENODEV;
-	
+
 	pci_set_drvdata(pdev, dev);
 	return 0;
 }
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index d6a8bf0..8286678 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -159,6 +159,7 @@
 
 /* Chipcommon registers. */
 #define BCM43xx_CHIPCOMMON_CAPABILITIES 	0x04
+#define BCM43xx_CHIPCOMMON_CTL			0x28
 #define BCM43xx_CHIPCOMMON_PLLONDELAY		0xB0
 #define BCM43xx_CHIPCOMMON_FREFSELDELAY		0xB4
 #define BCM43xx_CHIPCOMMON_SLOWCLKCTL		0xB8
@@ -172,6 +173,33 @@
 /* SBTOPCI2 values. */
 #define BCM43xx_SBTOPCI2_PREFETCH	0x4
 #define BCM43xx_SBTOPCI2_BURST		0x8
+#define BCM43xx_SBTOPCI2_MEMREAD_MULTI	0x20
+
+/* PCI-E core registers. */
+#define BCM43xx_PCIECORE_REG_ADDR      0x0130
+#define BCM43xx_PCIECORE_REG_DATA      0x0134
+#define BCM43xx_PCIECORE_MDIO_CTL      0x0128
+#define BCM43xx_PCIECORE_MDIO_DATA     0x012C
+
+/* PCI-E registers. */
+#define BCM43xx_PCIE_TLP_WORKAROUND    0x0004
+#define BCM43xx_PCIE_DLLP_LINKCTL      0x0100
+
+/* PCI-E MDIO bits. */
+#define BCM43xx_PCIE_MDIO_ST   0x40000000
+#define BCM43xx_PCIE_MDIO_WT   0x10000000
+#define BCM43xx_PCIE_MDIO_DEV  22
+#define BCM43xx_PCIE_MDIO_REG  18
+#define BCM43xx_PCIE_MDIO_TA   0x00020000
+#define BCM43xx_PCIE_MDIO_TC   0x0100
+
+/* MDIO devices. */
+#define BCM43xx_MDIO_SERDES_RX	0x1F
+
+/* SERDES RX registers. */
+#define BCM43xx_SERDES_RXTIMER	0x2
+#define BCM43xx_SERDES_CDR	0x6
+#define BCM43xx_SERDES_CDR_BW	0x7
 
 /* Chipcommon capabilities. */
 #define BCM43xx_CAPABILITIES_PCTL		0x00040000
@@ -221,6 +249,7 @@
 #define BCM43xx_COREID_USB20_HOST       0x819
 #define BCM43xx_COREID_USB20_DEV        0x81a
 #define BCM43xx_COREID_SDIO_HOST        0x81b
+#define BCM43xx_COREID_PCIE		0x820
 
 /* Core Information Registers */
 #define BCM43xx_CIR_BASE		0xf00
@@ -365,6 +394,9 @@
 #define BCM43xx_DEFAULT_SHORT_RETRY_LIMIT	7
 #define BCM43xx_DEFAULT_LONG_RETRY_LIMIT	4
 
+/* FIXME: the next line is a guess as to what the maximum RSSI value might be */
+#define RX_RSSI_MAX				60
+
 /* Max size of a security key */
 #define BCM43xx_SEC_KEYSIZE			16
 /* Security algorithms. */
@@ -787,7 +819,7 @@
 	struct tasklet_struct isr_tasklet;
 
 	/* Periodic tasks */
-	struct work_struct periodic_work;
+	struct delayed_work periodic_work;
 	unsigned int periodic_state;
 
 	struct work_struct restart_work;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index a1b7838..2ec2e5a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -130,6 +130,10 @@
 	{ PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 	/* Broadcom 4307 802.11b */
 	{ PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	/* Broadcom 4311 802.11(a)/b/g */
+	{ PCI_VENDOR_ID_BROADCOM, 0x4311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	/* Broadcom 4312 802.11a/b/g */
+	{ PCI_VENDOR_ID_BROADCOM, 0x4312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 	/* Broadcom 4318 802.11b/g */
 	{ PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 	/* Broadcom 4319 802.11a/b/g */
@@ -2600,8 +2604,9 @@
 	/* fetch sb_id_hi from core information registers */
 	sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI);
 
-	core_id = (sb_id_hi & 0xFFF0) >> 4;
-	core_rev = (sb_id_hi & 0xF);
+	core_id = (sb_id_hi & 0x8FF0) >> 4;
+	core_rev = (sb_id_hi & 0x7000) >> 8;
+	core_rev |= (sb_id_hi & 0xF);
 	core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
 
 	/* if present, chipcommon is always core 0; read the chipid from it */
@@ -2679,14 +2684,10 @@
 		bcm->chip_id, bcm->chip_rev);
 	dprintk(KERN_INFO PFX "Number of cores: %d\n", core_count);
 	if (bcm->core_chipcommon.available) {
-		dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
-			core_id, core_rev, core_vendor,
-			bcm43xx_core_enabled(bcm) ? "enabled" : "disabled");
-	}
-
-	if (bcm->core_chipcommon.available)
+		dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+			core_id, core_rev, core_vendor);
 		current_core = 1;
-	else
+	} else
 		current_core = 0;
 	for ( ; current_core < core_count; current_core++) {
 		struct bcm43xx_coreinfo *core;
@@ -2704,13 +2705,13 @@
 		core_rev = (sb_id_hi & 0xF);
 		core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
 
-		dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
-			current_core, core_id, core_rev, core_vendor,
-			bcm43xx_core_enabled(bcm) ? "enabled" : "disabled" );
+		dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+			current_core, core_id, core_rev, core_vendor);
 
 		core = NULL;
 		switch (core_id) {
 		case BCM43xx_COREID_PCI:
+		case BCM43xx_COREID_PCIE:
 			core = &bcm->core_pci;
 			if (core->available) {
 				printk(KERN_WARNING PFX "Multiple PCI cores found.\n");
@@ -2749,12 +2750,12 @@
 			case 6:
 			case 7:
 			case 9:
+			case 10:
 				break;
 			default:
-				printk(KERN_ERR PFX "Error: Unsupported 80211 core revision %u\n",
+				printk(KERN_WARNING PFX
+				       "Unsupported 80211 core revision %u\n",
 				       core_rev);
-				err = -ENODEV;
-				goto out;
 			}
 			bcm->nr_80211_available++;
 			core->priv = ext_80211;
@@ -2868,16 +2869,11 @@
 	u32 sbimconfiglow;
 	u8 limit;
 
-	if (bcm->chip_rev < 5) {
+	if (bcm->core_pci.rev <= 5 && bcm->core_pci.id != BCM43xx_COREID_PCIE) {
 		sbimconfiglow = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
 		sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
 		sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
-		if (bcm->bustype == BCM43xx_BUSTYPE_PCI)
-			sbimconfiglow |= 0x32;
-		else if (bcm->bustype == BCM43xx_BUSTYPE_SB)
-			sbimconfiglow |= 0x53;
-		else
-			assert(0);
+		sbimconfiglow |= 0x32;
 		bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, sbimconfiglow);
 	}
 
@@ -3004,22 +3000,64 @@
 
 static int bcm43xx_pcicore_commit_settings(struct bcm43xx_private *bcm)
 {
-	int err;
-	struct bcm43xx_coreinfo *old_core;
+	int err = 0;
 
-	old_core = bcm->current_core;
-	err = bcm43xx_switch_core(bcm, &bcm->core_pci);
-	if (err)
-		goto out;
+	bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
 
-	bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+	if (bcm->core_chipcommon.available) {
+		err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
+		if (err)
+			goto out;
 
-	bcm43xx_switch_core(bcm, old_core);
-	assert(err == 0);
+		bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+
+		/* this function is always called when a PCI core is mapped */
+		err = bcm43xx_switch_core(bcm, &bcm->core_pci);
+		if (err)
+			goto out;
+	} else
+		bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+
+	bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
+
 out:
 	return err;
 }
 
+static u32 bcm43xx_pcie_reg_read(struct bcm43xx_private *bcm, u32 address)
+{
+	bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+	return bcm43xx_read32(bcm, BCM43xx_PCIECORE_REG_DATA);
+}
+
+static void bcm43xx_pcie_reg_write(struct bcm43xx_private *bcm, u32 address,
+				    u32 data)
+{
+	bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+	bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_DATA, data);
+}
+
+static void bcm43xx_pcie_mdio_write(struct bcm43xx_private *bcm, u8 dev, u8 reg,
+				    u16 data)
+{
+	int i;
+
+	bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0x0082);
+	bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_DATA, BCM43xx_PCIE_MDIO_ST |
+			BCM43xx_PCIE_MDIO_WT | (dev << BCM43xx_PCIE_MDIO_DEV) |
+			(reg << BCM43xx_PCIE_MDIO_REG) | BCM43xx_PCIE_MDIO_TA |
+			data);
+	udelay(10);
+
+	for (i = 0; i < 10; i++) {
+		if (bcm43xx_read32(bcm, BCM43xx_PCIECORE_MDIO_CTL) &
+		    BCM43xx_PCIE_MDIO_TC)
+			break;
+		msleep(1);
+	}
+	bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0);
+}
+
 /* Make an I/O Core usable. "core_mask" is the bitmask of the cores to enable.
  * To enable core 0, pass a core_mask of 1<<0
  */
@@ -3039,7 +3077,8 @@
 	if (err)
 		goto out;
 
-	if (bcm->core_pci.rev < 6) {
+	if (bcm->current_core->rev < 6 ||
+		bcm->current_core->id == BCM43xx_COREID_PCI) {
 		value = bcm43xx_read32(bcm, BCM43xx_CIR_SBINTVEC);
 		value |= (1 << backplane_flag_nr);
 		bcm43xx_write32(bcm, BCM43xx_CIR_SBINTVEC, value);
@@ -3057,21 +3096,46 @@
 		}
 	}
 
-	value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
-	value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
-	bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+	if (bcm->current_core->id == BCM43xx_COREID_PCI) {
+		value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+		value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
+		bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
 
-	if (bcm->core_pci.rev < 5) {
-		value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
-		value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
-			 & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
-		value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
-			 & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
-		bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
-		err = bcm43xx_pcicore_commit_settings(bcm);
-		assert(err == 0);
+		if (bcm->current_core->rev < 5) {
+			value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
+			value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
+				 & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
+			value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
+				 & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
+			bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
+			err = bcm43xx_pcicore_commit_settings(bcm);
+			assert(err == 0);
+		} else if (bcm->current_core->rev >= 11) {
+			value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+			value |= BCM43xx_SBTOPCI2_MEMREAD_MULTI;
+			bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+		}
+	} else {
+		if (bcm->current_core->rev == 0 || bcm->current_core->rev == 1) {
+			value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_TLP_WORKAROUND);
+			value |= 0x8;
+			bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_TLP_WORKAROUND,
+					       value);
+		}
+		if (bcm->current_core->rev == 0) {
+			bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+						BCM43xx_SERDES_RXTIMER, 0x8128);
+			bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+						BCM43xx_SERDES_CDR, 0x0100);
+			bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+						BCM43xx_SERDES_CDR_BW, 0x1466);
+		} else if (bcm->current_core->rev == 1) {
+			value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_DLLP_LINKCTL);
+			value |= 0x40;
+			bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_DLLP_LINKCTL,
+					       value);
+		}
 	}
-
 out_switch_back:
 	err = bcm43xx_switch_core(bcm, old_core);
 out:
@@ -3140,55 +3204,28 @@
 
 static void do_periodic_work(struct bcm43xx_private *bcm)
 {
-	unsigned int state;
-
-	state = bcm->periodic_state;
-	if (state % 8 == 0)
+	if (bcm->periodic_state % 8 == 0)
 		bcm43xx_periodic_every120sec(bcm);
-	if (state % 4 == 0)
+	if (bcm->periodic_state % 4 == 0)
 		bcm43xx_periodic_every60sec(bcm);
-	if (state % 2 == 0)
+	if (bcm->periodic_state % 2 == 0)
 		bcm43xx_periodic_every30sec(bcm);
-	if (state % 1 == 0)
-		bcm43xx_periodic_every15sec(bcm);
-	bcm->periodic_state = state + 1;
+	bcm43xx_periodic_every15sec(bcm);
 
 	schedule_delayed_work(&bcm->periodic_work, HZ * 15);
 }
 
-/* Estimate a "Badness" value based on the periodic work
- * state-machine state. "Badness" is worse (bigger), if the
- * periodic work will take longer.
- */
-static int estimate_periodic_work_badness(unsigned int state)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
 {
-	int badness = 0;
-
-	if (state % 8 == 0) /* every 120 sec */
-		badness += 10;
-	if (state % 4 == 0) /* every 60 sec */
-		badness += 5;
-	if (state % 2 == 0) /* every 30 sec */
-		badness += 1;
-	if (state % 1 == 0) /* every 15 sec */
-		badness += 1;
-
-#define BADNESS_LIMIT	4
-	return badness;
-}
-
-static void bcm43xx_periodic_work_handler(void *d)
-{
-	struct bcm43xx_private *bcm = d;
+	struct bcm43xx_private *bcm =
+		container_of(work, struct bcm43xx_private, periodic_work.work);
 	struct net_device *net_dev = bcm->net_dev;
 	unsigned long flags;
 	u32 savedirqs = 0;
-	int badness;
 	unsigned long orig_trans_start = 0;
 
 	mutex_lock(&bcm->mutex);
-	badness = estimate_periodic_work_badness(bcm->periodic_state);
-	if (badness > BADNESS_LIMIT) {
+	if (unlikely(bcm->periodic_state % 4 == 0)) {
 		/* Periodic work will take a long time, so we want it to
 		 * be preemtible.
 		 */
@@ -3220,7 +3257,7 @@
 
 	do_periodic_work(bcm);
 
-	if (badness > BADNESS_LIMIT) {
+	if (unlikely(bcm->periodic_state % 4 == 0)) {
 		spin_lock_irqsave(&bcm->irq_lock, flags);
 		tasklet_enable(&bcm->isr_tasklet);
 		bcm43xx_interrupt_enable(bcm, savedirqs);
@@ -3231,6 +3268,7 @@
 		net_dev->trans_start = orig_trans_start;
 	}
 	mmiowb();
+	bcm->periodic_state++;
 	spin_unlock_irqrestore(&bcm->irq_lock, flags);
 	mutex_unlock(&bcm->mutex);
 }
@@ -3242,11 +3280,11 @@
 
 void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
 {
-	struct work_struct *work = &(bcm->periodic_work);
+	struct delayed_work *work = &bcm->periodic_work;
 
 	assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
-	INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
-	schedule_work(work);
+	INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
+	schedule_delayed_work(work, 0);
 }
 
 static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@ -3598,7 +3636,7 @@
 	bcm43xx_periodic_tasks_setup(bcm);
 
 	/*FIXME: This should be handled by softmac instead. */
-	schedule_work(&bcm->softmac->associnfo.work);
+	schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
 
 out:
 	mutex_unlock(&(bcm)->mutex);
@@ -3676,7 +3714,7 @@
 		bcm->ieee->freq_band = IEEE80211_24GHZ_BAND;
 		break;
 	case BCM43xx_PHYTYPE_G:
-		if (phy_rev > 7)
+		if (phy_rev > 8)
 			phy_rev_ok = 0;
 		bcm->ieee->modulation = IEEE80211_OFDM_MODULATION |
 					IEEE80211_CCK_MODULATION;
@@ -3688,6 +3726,8 @@
 		       phy_type);
 		return -ENODEV;
 	};
+	bcm->ieee->perfect_rssi = RX_RSSI_MAX;
+	bcm->ieee->worst_rssi = 0;
 	if (!phy_rev_ok) {
 		printk(KERN_WARNING PFX "Invalid PHY Revision %x\n",
 		       phy_rev);
@@ -3974,11 +4014,6 @@
 	return NETDEV_TX_OK;
 }
 
-static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev)
-{
-	return &(bcm43xx_priv(net_dev)->ieee->stats);
-}
-
 static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
 {
 	struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
@@ -4092,7 +4127,6 @@
 
 	net_dev->open = bcm43xx_net_open;
 	net_dev->stop = bcm43xx_net_stop;
-	net_dev->get_stats = bcm43xx_net_get_stats;
 	net_dev->tx_timeout = bcm43xx_net_tx_timeout;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	net_dev->poll_controller = bcm43xx_net_poll_controller;
@@ -4149,9 +4183,10 @@
 /* Hard-reset the chip. Do not call this directly.
  * Use bcm43xx_controller_restart()
  */
-static void bcm43xx_chip_reset(void *_bcm)
+static void bcm43xx_chip_reset(struct work_struct *work)
 {
-	struct bcm43xx_private *bcm = _bcm;
+	struct bcm43xx_private *bcm =
+		container_of(work, struct bcm43xx_private, restart_work);
 	struct bcm43xx_phyinfo *phy;
 	int err = -ENODEV;
 
@@ -4178,7 +4213,7 @@
 	if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
 		return;
 	printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
-	INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
+	INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
 	schedule_work(&bcm->restart_work);
 }
 
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_power.c b/drivers/net/wireless/bcm43xx/bcm43xx_power.c
index 6569da3..7e774f4 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_power.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_power.c
@@ -153,8 +153,6 @@
 	int err, maxfreq;
 	struct bcm43xx_coreinfo *old_core;
 
-	if (!(bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL))
-		return 0;
 	old_core = bcm->current_core;
 	err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
 	if (err == -ENODEV)
@@ -162,11 +160,27 @@
 	if (err)
 		goto out;
 
-	maxfreq = bcm43xx_pctl_clockfreqlimit(bcm, 1);
-	bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY,
-			(maxfreq * 150 + 999999) / 1000000);
-	bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_FREFSELDELAY,
-			(maxfreq * 15 + 999999) / 1000000);
+	if (bcm->chip_id == 0x4321) {
+		if (bcm->chip_rev == 0)
+			bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x03A4);
+		if (bcm->chip_rev == 1)
+			bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x00A4);
+	}
+
+	if (bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL) {
+		if (bcm->current_core->rev >= 10) {
+			/* Set Idle Power clock rate to 1Mhz */
+			bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL,
+				       (bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL)
+				       & 0x0000FFFF) | 0x40000);
+		} else {
+			maxfreq = bcm43xx_pctl_clockfreqlimit(bcm, 1);
+			bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY,
+				       (maxfreq * 150 + 999999) / 1000000);
+			bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_FREFSELDELAY,
+				       (maxfreq * 15 + 999999) / 1000000);
+		}
+	}
 
 	err = bcm43xx_switch_core(bcm, old_core);
 	assert(err == 0);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index d27016f..a659442 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -47,9 +47,6 @@
 #define BCM43xx_WX_VERSION	18
 
 #define MAX_WX_STRING		80
-/* FIXME: the next line is a guess as to what the maximum RSSI value might be */
-#define RX_RSSI_MAX		60
-
 
 static int bcm43xx_wx_get_name(struct net_device *net_dev,
                                struct iw_request_info *info,
@@ -693,6 +690,7 @@
 	bcm->ieee->host_encrypt = !!on;
 	bcm->ieee->host_decrypt = !!on;
 	bcm->ieee->host_build_iv = !on;
+	bcm->ieee->host_strip_iv_icv = !on;
 	spin_unlock_irqrestore(&bcm->irq_lock, flags);
 	mutex_unlock(&bcm->mutex);
 
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
index 0159e4e..3e24626 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
@@ -544,24 +544,6 @@
 	}
 
 	frame_ctl = le16_to_cpu(wlhdr->frame_ctl);
-	if ((frame_ctl & IEEE80211_FCTL_PROTECTED) && !bcm->ieee->host_decrypt) {
-		frame_ctl &= ~IEEE80211_FCTL_PROTECTED;
-		wlhdr->frame_ctl = cpu_to_le16(frame_ctl);		
-		/* trim IV and ICV */
-		/* FIXME: this must be done only for WEP encrypted packets */
-		if (skb->len < 32) {
-			dprintkl(KERN_ERR PFX "RX packet dropped (PROTECTED flag "
-					      "set and length < 32)\n");
-			return -EINVAL;
-		} else {		
-			memmove(skb->data + 4, skb->data, 24);
-			skb_pull(skb, 4);
-			skb_trim(skb, skb->len - 4);
-			stats.len -= 8;
-		}
-		wlhdr = (struct ieee80211_hdr_4addr *)(skb->data);
-	}
-	
 	switch (WLAN_FC_GET_TYPE(frame_ctl)) {
 	case IEEE80211_FTYPE_MGMT:
 		ieee80211_rx_mgt(bcm->ieee, wlhdr, &stats);
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index e663518..e89c890 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -35,7 +35,7 @@
 struct net_device_stats *hostap_get_stats(struct net_device *dev);
 void hostap_setup_dev(struct net_device *dev, local_info_t *local,
 		      int main_dev);
-void hostap_set_multicast_list_queue(void *data);
+void hostap_set_multicast_list_queue(struct work_struct *work);
 int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
 int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
 void hostap_cleanup(local_info_t *local);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index ba13125..974a8e5 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -49,10 +49,10 @@
 static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
 static void hostap_event_expired_sta(struct net_device *dev,
 				     struct sta_info *sta);
-static void handle_add_proc_queue(void *data);
+static void handle_add_proc_queue(struct work_struct *work);
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data);
+static void handle_wds_oper_queue(struct work_struct *work);
 static void prism2_send_mgmt(struct net_device *dev,
 			     u16 type_subtype, char *body,
 			     int body_len, u8 *addr, u16 tx_cb_idx);
@@ -807,7 +807,7 @@
 	INIT_LIST_HEAD(&ap->sta_list);
 
 	/* Initialize task queue structure for AP management */
-	INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
+	INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
 
 	ap->tx_callback_idx =
 		hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
@@ -815,7 +815,7 @@
 		printk(KERN_WARNING "%s: failed to register TX callback for "
 		       "AP\n", local->dev->name);
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-	INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
+	INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
 
 	ap->tx_callback_auth =
 		hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
@@ -1062,9 +1062,10 @@
 }
 
 
-static void handle_add_proc_queue(void *data)
+static void handle_add_proc_queue(struct work_struct *work)
 {
-	struct ap_data *ap = (struct ap_data *) data;
+	struct ap_data *ap = container_of(work, struct ap_data,
+					  add_sta_proc_queue);
 	struct sta_info *sta;
 	char name[20];
 	struct add_sta_proc_data *entry, *prev;
@@ -1099,15 +1100,13 @@
 {
 	struct sta_info *sta;
 
-	sta = (struct sta_info *)
-		kmalloc(sizeof(struct sta_info), GFP_ATOMIC);
+	sta = kzalloc(sizeof(struct sta_info), GFP_ATOMIC);
 	if (sta == NULL) {
 		PDEBUG(DEBUG_AP, "AP: kmalloc failed\n");
 		return NULL;
 	}
 
 	/* initialize STA info data */
-	memset(sta, 0, sizeof(struct sta_info));
 	sta->local = ap->local;
 	skb_queue_head_init(&sta->tx_buf);
 	memcpy(sta->addr, addr, ETH_ALEN);
@@ -1952,9 +1951,11 @@
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
 
-static void handle_wds_oper_queue(void *data)
+static void handle_wds_oper_queue(struct work_struct *work)
 {
-	local_info_t *local = data;
+	struct ap_data *ap = container_of(work, struct ap_data,
+					  wds_oper_queue);
+	local_info_t *local = ap->local;
 	struct wds_oper_data *entry, *prev;
 
 	spin_lock_bh(&local->lock);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index f63909e..8d8f4b9 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -293,15 +293,12 @@
 		goto done;
 	}
 
-	tuple.DesiredTuple = CISTPL_MANFID;
 	tuple.Attributes = TUPLE_RETURN_COMMON;
 	tuple.TupleData = buf;
 	tuple.TupleDataMax = sizeof(buf);
 	tuple.TupleOffset = 0;
-	if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
-	    pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
-	    pcmcia_parse_tuple(hw_priv->link, &tuple, parse) ||
-	    parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) {
+
+	if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) {
 		/* No SanDisk manfid found */
 		ret = -ENODEV;
 		goto done;
@@ -566,23 +563,16 @@
 	PDEBUG(DEBUG_FLOW, "prism2_config()\n");
 
 	parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
-	hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+	hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
 	if (parse == NULL || hw_priv == NULL) {
 		ret = -ENOMEM;
 		goto failed;
 	}
-	memset(hw_priv, 0, sizeof(*hw_priv));
 
-	tuple.DesiredTuple = CISTPL_CONFIG;
 	tuple.Attributes = 0;
 	tuple.TupleData = buf;
 	tuple.TupleDataMax = sizeof(buf);
 	tuple.TupleOffset = 0;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-	link->conf.ConfigBase = parse->config.base;
-	link->conf.Present = parse->config.rmask[0];
 
 	CS_CHECK(GetConfigurationInfo,
 		 pcmcia_get_configuration_info(link, &conf));
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
index ab26b52..24fc387 100644
--- a/drivers/net/wireless/hostap/hostap_download.c
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -685,14 +685,12 @@
 		goto out;
 	}
 
-	dl = kmalloc(sizeof(*dl) + param->num_areas *
+	dl = kzalloc(sizeof(*dl) + param->num_areas *
 		     sizeof(struct prism2_download_data_area), GFP_KERNEL);
 	if (dl == NULL) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	memset(dl, 0, sizeof(*dl) + param->num_areas *
-	       sizeof(struct prism2_download_data_area));
 	dl->dl_cmd = param->dl_cmd;
 	dl->start_addr = param->start_addr;
 	dl->num_areas = param->num_areas;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ed00ebb..a394a23 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -347,14 +347,12 @@
 	if (signal_pending(current))
 		return -EINTR;
 
-	entry = (struct hostap_cmd_queue *)
-		kmalloc(sizeof(*entry), GFP_ATOMIC);
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 	if (entry == NULL) {
 		printk(KERN_DEBUG "%s: hfa384x_cmd - kmalloc failed\n",
 		       dev->name);
 		return -ENOMEM;
 	}
-	memset(entry, 0, sizeof(*entry));
 	atomic_set(&entry->usecnt, 1);
 	entry->type = CMD_SLEEP;
 	entry->cmd = cmd;
@@ -517,14 +515,12 @@
 		return -1;
 	}
 
-	entry = (struct hostap_cmd_queue *)
-		kmalloc(sizeof(*entry), GFP_ATOMIC);
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 	if (entry == NULL) {
 		printk(KERN_DEBUG "%s: hfa384x_cmd_callback - kmalloc "
 		       "failed\n", dev->name);
 		return -ENOMEM;
 	}
-	memset(entry, 0, sizeof(*entry));
 	atomic_set(&entry->usecnt, 1);
 	entry->type = CMD_CALLBACK;
 	entry->cmd = cmd;
@@ -1645,9 +1641,9 @@
 
 /* Called only as scheduled task after noticing card timeout in interrupt
  * context */
-static void handle_reset_queue(void *data)
+static void handle_reset_queue(struct work_struct *work)
 {
-	local_info_t *local = (local_info_t *) data;
+	local_info_t *local = container_of(work, local_info_t, reset_queue);
 
 	printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
 	prism2_hw_reset(local->dev);
@@ -2896,9 +2892,10 @@
 
 /* Called only as a scheduled task when communications quality values should
  * be updated. */
-static void handle_comms_qual_update(void *data)
+static void handle_comms_qual_update(struct work_struct *work)
 {
-	local_info_t *local = data;
+	local_info_t *local =
+		container_of(work, local_info_t, comms_qual_update);
 	prism2_update_comms_qual(local->dev);
 }
 
@@ -3015,14 +3012,12 @@
 	iface = netdev_priv(dev);
 	local = iface->local;
 
-	new_entry = (struct set_tim_data *)
-		kmalloc(sizeof(*new_entry), GFP_ATOMIC);
+	new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
 	if (new_entry == NULL) {
 		printk(KERN_DEBUG "%s: prism2_set_tim: kmalloc failed\n",
 		       local->dev->name);
 		return -ENOMEM;
 	}
-	memset(new_entry, 0, sizeof(*new_entry));
 	new_entry->aid = aid;
 	new_entry->set = set;
 
@@ -3050,9 +3045,9 @@
 }
 
 
-static void handle_set_tim_queue(void *data)
+static void handle_set_tim_queue(struct work_struct *work)
 {
-	local_info_t *local = (local_info_t *) data;
+	local_info_t *local = container_of(work, local_info_t, set_tim_queue);
 	struct set_tim_data *entry;
 	u16 val;
 
@@ -3209,15 +3204,15 @@
 	local->scan_channel_mask = 0xffff;
 
 	/* Initialize task queue structures */
-	INIT_WORK(&local->reset_queue, handle_reset_queue, local);
+	INIT_WORK(&local->reset_queue, handle_reset_queue);
 	INIT_WORK(&local->set_multicast_list_queue,
-		  hostap_set_multicast_list_queue, local->dev);
+		  hostap_set_multicast_list_queue);
 
-	INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local);
+	INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
 	INIT_LIST_HEAD(&local->set_tim_list);
 	spin_lock_init(&local->set_tim_lock);
 
-	INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local);
+	INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
 
 	/* Initialize tasklets for handling hardware IRQ related operations
 	 * outside hw IRQ handler */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 50f72d8..b6a02a0 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -327,11 +327,10 @@
 	ptr = (u8 *) pos;
 
 	new_count = left / result_size;
-	results = kmalloc(new_count * sizeof(struct hfa384x_hostscan_result),
+	results = kcalloc(new_count, sizeof(struct hfa384x_hostscan_result),
 			  GFP_ATOMIC);
 	if (results == NULL)
 		return;
-	memset(results, 0, new_count * sizeof(struct hfa384x_hostscan_result));
 
 	for (i = 0; i < new_count; i++) {
 		memcpy(&results[i], ptr, copy_len);
@@ -474,9 +473,9 @@
 
 /* Called only as scheduled task after receiving info frames (used to avoid
  * pending too much time in HW IRQ handler). */
-static void handle_info_queue(void *data)
+static void handle_info_queue(struct work_struct *work)
 {
-	local_info_t *local = (local_info_t *) data;
+	local_info_t *local = container_of(work, local_info_t, info_queue);
 
 	if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
 			       &local->pending_info))
@@ -493,7 +492,7 @@
 {
 	skb_queue_head_init(&local->info_list);
 #ifndef PRISM2_NO_STATION_MODES
-	INIT_WORK(&local->info_queue, handle_info_queue, local);
+	INIT_WORK(&local->info_queue, handle_info_queue);
 #endif /* PRISM2_NO_STATION_MODES */
 }
 
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index d061fb3..3b7b806 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -181,12 +181,10 @@
 		struct ieee80211_crypt_data *new_crypt;
 
 		/* take WEP into use */
-		new_crypt = (struct ieee80211_crypt_data *)
-			kmalloc(sizeof(struct ieee80211_crypt_data),
+		new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
 				GFP_KERNEL);
 		if (new_crypt == NULL)
 			return -ENOMEM;
-		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
 		new_crypt->ops = ieee80211_get_crypto_ops("WEP");
 		if (!new_crypt->ops) {
 			request_module("ieee80211_crypt_wep");
@@ -3320,14 +3318,12 @@
 
 		prism2_crypt_delayed_deinit(local, crypt);
 
-		new_crypt = (struct ieee80211_crypt_data *)
-			kmalloc(sizeof(struct ieee80211_crypt_data),
+		new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
 				GFP_KERNEL);
 		if (new_crypt == NULL) {
 			ret = -ENOMEM;
 			goto done;
 		}
-		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
 		new_crypt->ops = ops;
 		new_crypt->priv = new_crypt->ops->init(i);
 		if (new_crypt->priv == NULL) {
@@ -3538,14 +3534,12 @@
 
 		prism2_crypt_delayed_deinit(local, crypt);
 
-		new_crypt = (struct ieee80211_crypt_data *)
-			kmalloc(sizeof(struct ieee80211_crypt_data),
+		new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
 				GFP_KERNEL);
 		if (new_crypt == NULL) {
 			ret = -ENOMEM;
 			goto done;
 		}
-		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
 		new_crypt->ops = ops;
 		new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
 		if (new_crypt->priv == NULL) {
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 53374fc..0796be9 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -767,14 +767,14 @@
 
 /* TODO: to be further implemented as soon as Prism2 fully supports
  *       GroupAddresses and correct documentation is available */
-void hostap_set_multicast_list_queue(void *data)
+void hostap_set_multicast_list_queue(struct work_struct *work)
 {
-	struct net_device *dev = (struct net_device *) data;
+	local_info_t *local =
+		container_of(work, local_info_t, set_multicast_list_queue);
+	struct net_device *dev = local->dev;
 	struct hostap_interface *iface;
-	local_info_t *local;
 
 	iface = netdev_priv(dev);
-	local = iface->local;
 	if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
 			    local->is_promisc)) {
 		printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index c2fa011..c4f6020 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -300,10 +300,9 @@
 	struct hostap_interface *iface;
 	struct hostap_pci_priv *hw_priv;
 
-	hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+	hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
 	if (hw_priv == NULL)
 		return -ENOMEM;
-	memset(hw_priv, 0, sizeof(*hw_priv));
 
 	if (pci_enable_device(pdev))
 		goto err_out_free;
@@ -425,8 +424,14 @@
 static int prism2_pci_resume(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
+	int err;
 
-	pci_enable_device(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+		       dev->name);
+		return err;
+	}
 	pci_restore_state(pdev);
 	prism2_hw_config(dev, 0);
 	if (netif_running(dev)) {
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index bc81b13..e235e06 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -447,10 +447,9 @@
 	int tmd7160;
 	struct hostap_plx_priv *hw_priv;
 
-	hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+	hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
 	if (hw_priv == NULL)
 		return -ENOMEM;
-	memset(hw_priv, 0, sizeof(*hw_priv));
 
 	if (pci_enable_device(pdev))
 		goto err_out_free;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 4e4eaa2..dd9ba4a 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -316,7 +316,7 @@
 				     struct ipw2100_fw *fw);
 static int ipw2100_ucode_download(struct ipw2100_priv *priv,
 				  struct ipw2100_fw *fw);
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
+static void ipw2100_wx_event_work(struct work_struct *work);
 static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
 static struct iw_handler_def ipw2100_wx_handler_def;
 
@@ -679,7 +679,8 @@
 			queue_delayed_work(priv->workqueue, &priv->reset_work,
 					   priv->reset_backoff * HZ);
 		else
-			queue_work(priv->workqueue, &priv->reset_work);
+			queue_delayed_work(priv->workqueue, &priv->reset_work,
+					   0);
 
 		if (priv->reset_backoff < MAX_RESET_BACKOFF)
 			priv->reset_backoff++;
@@ -1873,8 +1874,10 @@
 	netif_stop_queue(priv->net_dev);
 }
 
-static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
+static void ipw2100_reset_adapter(struct work_struct *work)
 {
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, reset_work.work);
 	unsigned long flags;
 	union iwreq_data wrqu = {
 		.ap_addr = {
@@ -2071,9 +2074,9 @@
 		return;
 
 	if (priv->status & STATUS_SECURITY_UPDATED)
-		queue_work(priv->workqueue, &priv->security_work);
+		queue_delayed_work(priv->workqueue, &priv->security_work, 0);
 
-	queue_work(priv->workqueue, &priv->wx_event_work);
+	queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
 }
 
 static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -5524,8 +5527,11 @@
 	return err;
 }
 
-static void ipw2100_security_work(struct ipw2100_priv *priv)
+static void ipw2100_security_work(struct work_struct *work)
 {
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, security_work.work);
+
 	/* If we happen to have reconnected before we get a chance to
 	 * process this, then update the security settings--which causes
 	 * a disassociation to occur */
@@ -5748,7 +5754,7 @@
 
 	priv->reset_backoff = 0;
 	mutex_unlock(&priv->action_mutex);
-	ipw2100_reset_adapter(priv);
+	ipw2100_reset_adapter(&priv->reset_work.work);
 	return 0;
 
       done:
@@ -5827,19 +5833,6 @@
 	schedule_reset(priv);
 }
 
-/*
- * TODO: reimplement it so that it reads statistics
- *       from the adapter using ordinal tables
- *       instead of/in addition to collecting them
- *       in the driver
- */
-static struct net_device_stats *ipw2100_stats(struct net_device *dev)
-{
-	struct ipw2100_priv *priv = ieee80211_priv(dev);
-
-	return &priv->ieee->stats;
-}
-
 static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
 {
 	/* This is called when wpa_supplicant loads and closes the driver
@@ -5923,9 +5916,10 @@
 	.get_drvinfo = ipw_ethtool_get_drvinfo,
 };
 
-static void ipw2100_hang_check(void *adapter)
+static void ipw2100_hang_check(struct work_struct *work)
 {
-	struct ipw2100_priv *priv = adapter;
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, hang_check.work);
 	unsigned long flags;
 	u32 rtc = 0xa5a5a5a5;
 	u32 len = sizeof(rtc);
@@ -5965,9 +5959,10 @@
 	spin_unlock_irqrestore(&priv->low_lock, flags);
 }
 
-static void ipw2100_rf_kill(void *adapter)
+static void ipw2100_rf_kill(struct work_struct *work)
 {
-	struct ipw2100_priv *priv = adapter;
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, rf_kill.work);
 	unsigned long flags;
 
 	spin_lock_irqsave(&priv->low_lock, flags);
@@ -6022,7 +6017,6 @@
 	dev->open = ipw2100_open;
 	dev->stop = ipw2100_close;
 	dev->init = ipw2100_net_init;
-	dev->get_stats = ipw2100_stats;
 	dev->ethtool_ops = &ipw2100_ethtool_ops;
 	dev->tx_timeout = ipw2100_tx_timeout;
 	dev->wireless_handlers = &ipw2100_wx_handler_def;
@@ -6117,14 +6111,11 @@
 
 	priv->workqueue = create_workqueue(DRV_NAME);
 
-	INIT_WORK(&priv->reset_work,
-		  (void (*)(void *))ipw2100_reset_adapter, priv);
-	INIT_WORK(&priv->security_work,
-		  (void (*)(void *))ipw2100_security_work, priv);
-	INIT_WORK(&priv->wx_event_work,
-		  (void (*)(void *))ipw2100_wx_event_work, priv);
-	INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
-	INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
+	INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
+	INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
+	INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
+	INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
+	INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
 
 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
 		     ipw2100_irq_tasklet, (unsigned long)priv);
@@ -6229,7 +6220,7 @@
 	/* Allocate and initialize the Tx/Rx queues and lists */
 	if (ipw2100_queues_allocate(priv)) {
 		printk(KERN_WARNING DRV_NAME
-		       "Error calilng ipw2100_queues_allocate.\n");
+		       "Error calling ipw2100_queues_allocate.\n");
 		err = -ENOMEM;
 		goto fail;
 	}
@@ -6423,6 +6414,7 @@
 {
 	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
 	struct net_device *dev = priv->net_dev;
+	int err;
 	u32 val;
 
 	if (IPW2100_PM_DISABLED)
@@ -6433,7 +6425,12 @@
 	IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name);
 
 	pci_set_power_state(pci_dev, PCI_D0);
-	pci_enable_device(pci_dev);
+	err = pci_enable_device(pci_dev);
+	if (err) {
+		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+		       dev->name);
+		return err;
+	}
 	pci_restore_state(pci_dev);
 
 	/*
@@ -7568,11 +7565,10 @@
 		return -EINVAL;
 
 	if (wrqu->data.length) {
-		buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
 		if (buf == NULL)
 			return -ENOMEM;
 
-		memcpy(buf, extra, wrqu->data.length);
 		kfree(ieee->wpa_ie);
 		ieee->wpa_ie = buf;
 		ieee->wpa_ie_len = wrqu->data.length;
@@ -8290,8 +8286,10 @@
 	.get_wireless_stats = ipw2100_wx_wireless_stats,
 };
 
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
+static void ipw2100_wx_event_work(struct work_struct *work)
 {
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, wx_event_work.work);
 	union iwreq_data wrqu;
 	int len = ETH_ALEN;
 
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 55b7227..de7d384 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -583,11 +583,11 @@
 	struct tasklet_struct irq_tasklet;
 
 	struct workqueue_struct *workqueue;
-	struct work_struct reset_work;
-	struct work_struct security_work;
-	struct work_struct wx_event_work;
-	struct work_struct hang_check;
-	struct work_struct rf_kill;
+	struct delayed_work reset_work;
+	struct delayed_work security_work;
+	struct delayed_work wx_event_work;
+	struct delayed_work hang_check;
+	struct delayed_work rf_kill;
 
 	u32 interrupts;
 	int tx_interrupts;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 1f74281..22cb3fb 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -70,7 +70,7 @@
 #define VQ
 #endif
 
-#define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ
+#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ
 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
 #define DRV_VERSION     IPW2200_VERSION
@@ -187,9 +187,9 @@
 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
 static void ipw_rx_queue_replenish(void *);
 static int ipw_up(struct ipw_priv *);
-static void ipw_bg_up(void *);
+static void ipw_bg_up(struct work_struct *work);
 static void ipw_down(struct ipw_priv *);
-static void ipw_bg_down(void *);
+static void ipw_bg_down(struct work_struct *work);
 static int ipw_config(struct ipw_priv *);
 static int init_supported_rates(struct ipw_priv *priv,
 				struct ipw_supported_rates *prates);
@@ -862,11 +862,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_on(void *data)
+static void ipw_bg_led_link_on(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_link_on.work);
 	mutex_lock(&priv->mutex);
-	ipw_led_link_on(data);
+	ipw_led_link_on(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -906,11 +907,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_off(void *data)
+static void ipw_bg_led_link_off(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_link_off.work);
 	mutex_lock(&priv->mutex);
-	ipw_led_link_off(data);
+	ipw_led_link_off(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -985,11 +987,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_activity_off(void *data)
+static void ipw_bg_led_activity_off(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_act_off.work);
 	mutex_lock(&priv->mutex);
-	ipw_led_activity_off(data);
+	ipw_led_activity_off(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -2228,11 +2231,12 @@
 	}
 }
 
-static void ipw_bg_adapter_restart(void *data)
+static void ipw_bg_adapter_restart(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, adapter_restart);
 	mutex_lock(&priv->mutex);
-	ipw_adapter_restart(data);
+	ipw_adapter_restart(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -2249,11 +2253,12 @@
 	}
 }
 
-static void ipw_bg_scan_check(void *data)
+static void ipw_bg_scan_check(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, scan_check.work);
 	mutex_lock(&priv->mutex);
-	ipw_scan_check(data);
+	ipw_scan_check(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -3831,17 +3836,19 @@
 	return 1;
 }
 
-static void ipw_bg_disassociate(void *data)
+static void ipw_bg_disassociate(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, disassociate);
 	mutex_lock(&priv->mutex);
-	ipw_disassociate(data);
+	ipw_disassociate(priv);
 	mutex_unlock(&priv->mutex);
 }
 
-static void ipw_system_config(void *data)
+static void ipw_system_config(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, system_config);
 
 #ifdef CONFIG_IPW2200_PROMISCUOUS
 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
@@ -4208,11 +4215,12 @@
 			   IPW_STATS_INTERVAL);
 }
 
-static void ipw_bg_gather_stats(void *data)
+static void ipw_bg_gather_stats(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, gather_stats.work);
 	mutex_lock(&priv->mutex);
-	ipw_gather_stats(data);
+	ipw_gather_stats(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -4268,8 +4276,8 @@
 		if (!(priv->status & STATUS_ROAMING)) {
 			priv->status |= STATUS_ROAMING;
 			if (!(priv->status & STATUS_SCANNING))
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 		}
 		return;
 	}
@@ -4607,8 +4615,8 @@
 #ifdef CONFIG_IPW2200_MONITOR
 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
 				priv->status |= STATUS_SCAN_FORCED;
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 				break;
 			}
 			priv->status &= ~STATUS_SCAN_FORCED;
@@ -4631,8 +4639,8 @@
 					/* Don't schedule if we aborted the scan */
 					priv->status &= ~STATUS_ROAMING;
 			} else if (priv->status & STATUS_SCAN_PENDING)
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 			else if (priv->config & CFG_BACKGROUND_SCAN
 				 && priv->status & STATUS_ASSOCIATED)
 				queue_delayed_work(priv->workqueue,
@@ -5055,11 +5063,12 @@
 	ipw_rx_queue_restock(priv);
 }
 
-static void ipw_bg_rx_queue_replenish(void *data)
+static void ipw_bg_rx_queue_replenish(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, rx_replenish);
 	mutex_lock(&priv->mutex);
-	ipw_rx_queue_replenish(data);
+	ipw_rx_queue_replenish(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -5489,9 +5498,10 @@
 	return 1;
 }
 
-static void ipw_merge_adhoc_network(void *data)
+static void ipw_merge_adhoc_network(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, merge_networks);
 	struct ieee80211_network *network = NULL;
 	struct ipw_network_match match = {
 		.network = priv->assoc_network
@@ -5948,11 +5958,12 @@
 			   priv->assoc_request.beacon_interval);
 }
 
-static void ipw_bg_adhoc_check(void *data)
+static void ipw_bg_adhoc_check(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, adhoc_check.work);
 	mutex_lock(&priv->mutex);
-	ipw_adhoc_check(data);
+	ipw_adhoc_check(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -6299,19 +6310,26 @@
 	return err;
 }
 
-static int ipw_request_passive_scan(struct ipw_priv *priv) {
-  	return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
-}
-
-static int ipw_request_scan(struct ipw_priv *priv) {
-	return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
-}
-
-static void ipw_bg_abort_scan(void *data)
+static void ipw_request_passive_scan(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, request_passive_scan);
+  	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
+}
+
+static void ipw_request_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, request_scan.work);
+	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
+}
+
+static void ipw_bg_abort_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, abort_scan);
 	mutex_lock(&priv->mutex);
-	ipw_abort_scan(data);
+	ipw_abort_scan(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -6920,8 +6938,8 @@
 }
 
 /*
-* handling the beaconing responces. if we get different QoS setting
-* of the network from the the associated setting adjust the QoS
+* handling the beaconing responses. if we get different QoS setting
+* off the network from the associated setting, adjust the QoS
 * setting
 */
 static int ipw_qos_association_resp(struct ipw_priv *priv,
@@ -7084,9 +7102,10 @@
 /*
 * background support to run QoS activate functionality
 */
-static void ipw_bg_qos_activate(void *data)
+static void ipw_bg_qos_activate(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, qos_activate);
 
 	if (priv == NULL)
 		return;
@@ -7394,11 +7413,12 @@
 	priv->status &= ~STATUS_ROAMING;
 }
 
-static void ipw_bg_roam(void *data)
+static void ipw_bg_roam(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, roam);
 	mutex_lock(&priv->mutex);
-	ipw_roam(data);
+	ipw_roam(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -7479,8 +7499,8 @@
 						   &priv->request_scan,
 						   SCAN_INTERVAL);
 			else
-				queue_work(priv->workqueue,
-					   &priv->request_scan);
+				queue_delayed_work(priv->workqueue,
+						   &priv->request_scan, 0);
 		}
 
 		return 0;
@@ -7491,11 +7511,12 @@
 	return 1;
 }
 
-static void ipw_bg_associate(void *data)
+static void ipw_bg_associate(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, associate);
 	mutex_lock(&priv->mutex);
-	ipw_associate(data);
+	ipw_associate(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -7656,7 +7677,8 @@
 
 	/* Big bitfield of all the fields we provide in radiotap */
 	ipw_rt->rt_hdr.it_present =
-	    ((1 << IEEE80211_RADIOTAP_FLAGS) |
+	    ((1 << IEEE80211_RADIOTAP_TSFT) |
+	     (1 << IEEE80211_RADIOTAP_FLAGS) |
 	     (1 << IEEE80211_RADIOTAP_RATE) |
 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7665,10 +7687,14 @@
 
 	/* Zero the flags, we'll add to them as we go */
 	ipw_rt->rt_flags = 0;
-	ipw_rt->rt_tsf = 0ULL;
+	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
+			       frame->parent_tsf[2] << 16 |
+			       frame->parent_tsf[1] << 8  |
+			       frame->parent_tsf[0]);
 
 	/* Convert signal to DBM */
 	ipw_rt->rt_dbmsignal = antsignal;
+	ipw_rt->rt_dbmnoise = frame->noise;
 
 	/* Convert the channel data and set the flags */
 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
@@ -7868,7 +7894,8 @@
 
 	/* Big bitfield of all the fields we provide in radiotap */
 	ipw_rt->rt_hdr.it_present =
-	    ((1 << IEEE80211_RADIOTAP_FLAGS) |
+	    ((1 << IEEE80211_RADIOTAP_TSFT) |
+	     (1 << IEEE80211_RADIOTAP_FLAGS) |
 	     (1 << IEEE80211_RADIOTAP_RATE) |
 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7877,7 +7904,10 @@
 
 	/* Zero the flags, we'll add to them as we go */
 	ipw_rt->rt_flags = 0;
-	ipw_rt->rt_tsf = 0ULL;
+	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
+			       frame->parent_tsf[2] << 16 |
+			       frame->parent_tsf[1] << 8  |
+			       frame->parent_tsf[0]);
 
 	/* Convert to DBM */
 	ipw_rt->rt_dbmsignal = signal;
@@ -8276,7 +8306,7 @@
 				    ("Notification: subtype=%02X flags=%02X size=%d\n",
 				     pkt->u.notification.subtype,
 				     pkt->u.notification.flags,
-				     pkt->u.notification.size);
+				     le16_to_cpu(pkt->u.notification.size));
 				ipw_rx_notification(priv, &pkt->u.notification);
 				break;
 			}
@@ -9410,7 +9440,7 @@
 
 	IPW_DEBUG_WX("Start scan\n");
 
-	queue_work(priv->workqueue, &priv->request_scan);
+	queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
 
 	return 0;
 }
@@ -10547,11 +10577,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_rf_kill(void *data)
+static void ipw_bg_rf_kill(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, rf_kill.work);
 	mutex_lock(&priv->mutex);
-	ipw_rf_kill(data);
+	ipw_rf_kill(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -10582,11 +10613,12 @@
 		queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
 }
 
-static void ipw_bg_link_up(void *data)
+static void ipw_bg_link_up(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, link_up);
 	mutex_lock(&priv->mutex);
-	ipw_link_up(data);
+	ipw_link_up(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -10606,15 +10638,16 @@
 
 	if (!(priv->status & STATUS_EXIT_PENDING)) {
 		/* Queue up another scan... */
-		queue_work(priv->workqueue, &priv->request_scan);
+		queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
 	}
 }
 
-static void ipw_bg_link_down(void *data)
+static void ipw_bg_link_down(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, link_down);
 	mutex_lock(&priv->mutex);
-	ipw_link_down(data);
+	ipw_link_down(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -10626,38 +10659,30 @@
 	init_waitqueue_head(&priv->wait_command_queue);
 	init_waitqueue_head(&priv->wait_state);
 
-	INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
-	INIT_WORK(&priv->associate, ipw_bg_associate, priv);
-	INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
-	INIT_WORK(&priv->system_config, ipw_system_config, priv);
-	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
-	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
-	INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
-	INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
-	INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
-	INIT_WORK(&priv->request_scan,
-		  (void (*)(void *))ipw_request_scan, priv);
-	INIT_WORK(&priv->request_passive_scan,
-		  (void (*)(void *))ipw_request_passive_scan, priv);
-	INIT_WORK(&priv->gather_stats,
-		  (void (*)(void *))ipw_bg_gather_stats, priv);
-	INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
-	INIT_WORK(&priv->roam, ipw_bg_roam, priv);
-	INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
-	INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
-	INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
-	INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
-		  priv);
-	INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
-		  priv);
-	INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
-		  priv);
-	INIT_WORK(&priv->merge_networks,
-		  (void (*)(void *))ipw_merge_adhoc_network, priv);
+	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
+	INIT_WORK(&priv->associate, ipw_bg_associate);
+	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
+	INIT_WORK(&priv->system_config, ipw_system_config);
+	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
+	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
+	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
+	INIT_WORK(&priv->up, ipw_bg_up);
+	INIT_WORK(&priv->down, ipw_bg_down);
+	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
+	INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
+	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
+	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
+	INIT_WORK(&priv->roam, ipw_bg_roam);
+	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
+	INIT_WORK(&priv->link_up, ipw_bg_link_up);
+	INIT_WORK(&priv->link_down, ipw_bg_link_down);
+	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
+	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
+	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
+	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
 
 #ifdef CONFIG_IPW2200_QOS
-	INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
-		  priv);
+	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
 #endif				/* CONFIG_IPW2200_QOS */
 
 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -11129,14 +11154,13 @@
 		return -EIO;
 
 	if (cmdlog && !priv->cmdlog) {
-		priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
+		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
 				       GFP_KERNEL);
 		if (priv->cmdlog == NULL) {
 			IPW_ERROR("Error allocating %d command log entries.\n",
 				  cmdlog);
 			return -ENOMEM;
 		} else {
-			memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
 			priv->cmdlog_len = cmdlog;
 		}
 	}
@@ -11190,7 +11214,8 @@
 
 			/* If configure to try and auto-associate, kick
 			 * off a scan. */
-			queue_work(priv->workqueue, &priv->request_scan);
+			queue_delayed_work(priv->workqueue,
+					   &priv->request_scan, 0);
 
 			return 0;
 		}
@@ -11211,11 +11236,12 @@
 	return -EIO;
 }
 
-static void ipw_bg_up(void *data)
+static void ipw_bg_up(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, up);
 	mutex_lock(&priv->mutex);
-	ipw_up(data);
+	ipw_up(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -11282,11 +11308,12 @@
 	ipw_led_radio_off(priv);
 }
 
-static void ipw_bg_down(void *data)
+static void ipw_bg_down(struct work_struct *work)
 {
-	struct ipw_priv *priv = data;
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, down);
 	mutex_lock(&priv->mutex);
-	ipw_down(data);
+	ipw_down(priv);
 	mutex_unlock(&priv->mutex);
 }
 
@@ -11727,12 +11754,18 @@
 {
 	struct ipw_priv *priv = pci_get_drvdata(pdev);
 	struct net_device *dev = priv->net_dev;
+	int err;
 	u32 val;
 
 	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
 
 	pci_set_power_state(pdev, PCI_D0);
-	pci_enable_device(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+		       dev->name);
+		return err;
+	}
 	pci_restore_state(pdev);
 
 	/*
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index dad5eed..626a240 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1290,21 +1290,21 @@
 
 	struct workqueue_struct *workqueue;
 
-	struct work_struct adhoc_check;
+	struct delayed_work adhoc_check;
 	struct work_struct associate;
 	struct work_struct disassociate;
 	struct work_struct system_config;
 	struct work_struct rx_replenish;
-	struct work_struct request_scan;
+	struct delayed_work request_scan;
   	struct work_struct request_passive_scan;
 	struct work_struct adapter_restart;
-	struct work_struct rf_kill;
+	struct delayed_work rf_kill;
 	struct work_struct up;
 	struct work_struct down;
-	struct work_struct gather_stats;
+	struct delayed_work gather_stats;
 	struct work_struct abort_scan;
 	struct work_struct roam;
-	struct work_struct scan_check;
+	struct delayed_work scan_check;
 	struct work_struct link_up;
 	struct work_struct link_down;
 
@@ -1319,9 +1319,9 @@
 	u32 led_ofdm_on;
 	u32 led_ofdm_off;
 
-	struct work_struct led_link_on;
-	struct work_struct led_link_off;
-	struct work_struct led_act_off;
+	struct delayed_work led_link_on;
+	struct delayed_work led_link_off;
+	struct delayed_work led_act_off;
 	struct work_struct merge_networks;
 
 	struct ipw_cmd_log *cmdlog;
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 6714e0d..644b474 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -735,10 +735,7 @@
 static int netwave_pcmcia_config(struct pcmcia_device *link) {
     struct net_device *dev = link->priv;
     netwave_private *priv = netdev_priv(dev);
-    tuple_t tuple;
-    cisparse_t parse;
     int i, j, last_ret, last_fn;
-    u_char buf[64];
     win_req_t req;
     memreq_t mem;
     u_char __iomem *ramBase = NULL;
@@ -746,21 +743,6 @@
     DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link);
 
     /*
-      This reads the card's CONFIG tuple to find its configuration
-      registers.
-    */
-    tuple.Attributes = 0;
-    tuple.TupleData = (cisdata_t *) buf;
-    tuple.TupleDataMax = 64;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
-    /*
      *  Try allocating IO ports.  This tries a few fixed addresses.
      *  If you want, you can also read the card's config table to
      *  pick addresses -- see the serial driver for an example.
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 336caba..936c888 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -980,9 +980,11 @@
 }
 
 /* Search scan results for requested BSSID, join it if found */
-static void orinoco_join_ap(struct net_device *dev)
+static void orinoco_join_ap(struct work_struct *work)
 {
-	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_private *priv =
+		container_of(work, struct orinoco_private, join_work);
+	struct net_device *dev = priv->ndev;
 	struct hermes *hw = &priv->hw;
 	int err;
 	unsigned long flags;
@@ -1055,9 +1057,11 @@
 }
 
 /* Send new BSSID to userspace */
-static void orinoco_send_wevents(struct net_device *dev)
+static void orinoco_send_wevents(struct work_struct *work)
 {
-	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_private *priv =
+		container_of(work, struct orinoco_private, wevent_work);
+	struct net_device *dev = priv->ndev;
 	struct hermes *hw = &priv->hw;
 	union iwreq_data wrqu;
 	int err;
@@ -1864,9 +1868,11 @@
 
 /* This must be called from user context, without locks held - use
  * schedule_work() */
-static void orinoco_reset(struct net_device *dev)
+static void orinoco_reset(struct work_struct *work)
 {
-	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_private *priv =
+		container_of(work, struct orinoco_private, reset_work);
+	struct net_device *dev = priv->ndev;
 	struct hermes *hw = &priv->hw;
 	int err;
 	unsigned long flags;
@@ -2434,9 +2440,9 @@
 	priv->hw_unavailable = 1; /* orinoco_init() must clear this
 				   * before anything else touches the
 				   * hardware */
-	INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
-	INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev);
-	INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev);
+	INIT_WORK(&priv->reset_work, orinoco_reset);
+	INIT_WORK(&priv->join_work, orinoco_join_ap);
+	INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
 
 	netif_carrier_off(dev);
 	priv->last_linkstatus = 0xffff;
@@ -3608,7 +3614,7 @@
 		printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
 
 		/* Firmware reset */
-		orinoco_reset(dev);
+		orinoco_reset(&priv->reset_work);
 	} else {
 		printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
 
@@ -4154,7 +4160,7 @@
 		return 0;
 
 	if (priv->broken_disableport) {
-		orinoco_reset(dev);
+		orinoco_reset(&priv->reset_work);
 		return 0;
 	}
 
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index bc14689..d08ae8d 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -178,21 +178,6 @@
 	cisparse_t parse;
 	void __iomem *mem;
 
-	/*
-	 * This reads the card's CONFIG tuple to find its
-	 * configuration registers.
-	 */
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	tuple.Attributes = 0;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = sizeof(buf);
-	tuple.TupleOffset = 0;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
-
 	/* Look up the current Vcc */
 	CS_CHECK(GetConfigurationInfo,
 		 pcmcia_get_configuration_info(link, &conf));
@@ -211,6 +196,10 @@
 	 * and most client drivers will only use the CIS to fill in
 	 * implementation-defined details.
 	 */
+	tuple.Attributes = 0;
+	tuple.TupleData = buf;
+	tuple.TupleDataMax = sizeof(buf);
+	tuple.TupleOffset = 0;
 	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
 	while (1) {
diff --git a/drivers/net/wireless/orinoco_pci.h b/drivers/net/wireless/orinoco_pci.h
index be1abea..f4e5e06 100644
--- a/drivers/net/wireless/orinoco_pci.h
+++ b/drivers/net/wireless/orinoco_pci.h
@@ -60,7 +60,12 @@
 	int err;
 
 	pci_set_power_state(pdev, 0);
-	pci_enable_device(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+		       dev->name);
+		return err;
+	}
 	pci_restore_state(pdev);
 
 	err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
diff --git a/drivers/net/wireless/prism54/isl_38xx.c b/drivers/net/wireless/prism54/isl_38xx.c
index 23deee6..02fc67b 100644
--- a/drivers/net/wireless/prism54/isl_38xx.c
+++ b/drivers/net/wireless/prism54/isl_38xx.c
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *  Copyright (C) 2003-2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>_
  *
@@ -38,7 +37,7 @@
  * isl38xx_disable_interrupts - disable all interrupts
  * @device: pci memory base address
  *
- *  Instructs the device to disable all interrupt reporting by asserting 
+ *  Instructs the device to disable all interrupt reporting by asserting
  *  the IRQ line. New events may still show up in the interrupt identification
  *  register located at offset %ISL38XX_INT_IDENT_REG.
  */
@@ -204,17 +203,19 @@
 	/* enable the interrupt for detecting initialization */
 
 	/* Note: Do not enable other interrupts here. We want the
-	 * device to have come up first 100% before allowing any other 
+	 * device to have come up first 100% before allowing any other
 	 * interrupts. */
 	isl38xx_w32_flush(device_base, ISL38XX_INT_IDENT_INIT, ISL38XX_INT_EN_REG);
 	udelay(ISL38XX_WRITEIO_DELAY);  /* allow complete full reset */
 }
 
 void
-isl38xx_enable_common_interrupts(void __iomem *device_base) {
+isl38xx_enable_common_interrupts(void __iomem *device_base)
+{
 	u32 reg;
-	reg = ( ISL38XX_INT_IDENT_UPDATE | 
-			ISL38XX_INT_IDENT_SLEEP | ISL38XX_INT_IDENT_WAKEUP);
+
+	reg = ISL38XX_INT_IDENT_UPDATE | ISL38XX_INT_IDENT_SLEEP |
+	      ISL38XX_INT_IDENT_WAKEUP;
 	isl38xx_w32_flush(device_base, reg, ISL38XX_INT_EN_REG);
 	udelay(ISL38XX_WRITEIO_DELAY);
 }
@@ -234,23 +235,21 @@
 		/* send queues */
 	case ISL38XX_CB_TX_MGMTQ:
 		BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
+
 	case ISL38XX_CB_TX_DATA_LQ:
 	case ISL38XX_CB_TX_DATA_HQ:
 		BUG_ON(delta > ISL38XX_CB_TX_QSIZE);
 		return delta;
-		break;
 
 		/* receive queues */
 	case ISL38XX_CB_RX_MGMTQ:
 		BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
 		return ISL38XX_CB_MGMT_QSIZE - delta;
-		break;
 
 	case ISL38XX_CB_RX_DATA_LQ:
 	case ISL38XX_CB_RX_DATA_HQ:
 		BUG_ON(delta > ISL38XX_CB_RX_QSIZE);
 		return ISL38XX_CB_RX_QSIZE - delta;
-		break;
 	}
 	BUG();
 	return 0;
diff --git a/drivers/net/wireless/prism54/isl_38xx.h b/drivers/net/wireless/prism54/isl_38xx.h
index 8af2098..3fadcb6 100644
--- a/drivers/net/wireless/prism54/isl_38xx.h
+++ b/drivers/net/wireless/prism54/isl_38xx.h
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -67,10 +66,10 @@
  * @base: (host) memory base address of the device
  * @val: 32bit value (host order) to write
  * @offset: byte offset into @base to write value to
- * 
+ *
  *  This helper takes care of writing a 32bit datum to the
- *  specified offset into the device's pci memory space, and making sure 
- *  the pci memory buffers get flushed by performing one harmless read 
+ *  specified offset into the device's pci memory space, and making sure
+ *  the pci memory buffers get flushed by performing one harmless read
  *  from the %ISL38XX_PCI_POSTING_FLUSH offset.
  */
 static inline void
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 286325c..96606ed 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *            (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
  *            (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
@@ -55,12 +54,12 @@
  * prism54_mib_mode_helper - MIB change mode helper function
  * @mib: the &struct islpci_mib object to modify
  * @iw_mode: new mode (%IW_MODE_*)
- * 
+ *
  *  This is a helper function, hence it does not lock. Make sure
- *  caller deals with locking *if* necessary. This function sets the 
- *  mode-dependent mib values and does the mapping of the Linux 
- *  Wireless API modes to Device firmware modes. It also checks for 
- *  correct valid Linux wireless modes. 
+ *  caller deals with locking *if* necessary. This function sets the
+ *  mode-dependent mib values and does the mapping of the Linux
+ *  Wireless API modes to Device firmware modes. It also checks for
+ *  correct valid Linux wireless modes.
  */
 static int
 prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
@@ -118,7 +117,7 @@
  *
  *  this function initializes the struct given as @mib with defaults,
  *  of which many are retrieved from the global module parameter
- *  variables.  
+ *  variables.
  */
 
 void
@@ -134,7 +133,7 @@
 	authen = CARD_DEFAULT_AUTHEN;
 	wep = CARD_DEFAULT_WEP;
 	filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */
-	dot1x = CARD_DEFAULT_DOT1X; 
+	dot1x = CARD_DEFAULT_DOT1X;
 	mlme = CARD_DEFAULT_MLME_MODE;
 	conformance = CARD_DEFAULT_CONFORMANCE;
 	power = 127;
@@ -158,8 +157,9 @@
  * schedule_work(), thus we can as well use sleeping semaphore
  * locking */
 void
-prism54_update_stats(islpci_private *priv)
+prism54_update_stats(struct work_struct *work)
 {
+	islpci_private *priv = container_of(work, islpci_private, stats_work);
 	char *data;
 	int j;
 	struct obj_bss bss, *bss2;
@@ -228,7 +228,7 @@
 	} else
 		priv->iwstatistics.qual.updated = 0;
 
-	/* Update our wireless stats, but do not schedule to often 
+	/* Update our wireless stats, but do not schedule to often
 	 * (max 1 HZ) */
 	if ((priv->stats_timestamp == 0) ||
 	    time_after(jiffies, priv->stats_timestamp + 1 * HZ)) {
@@ -705,7 +705,7 @@
 	* Starting with WE-17, the buffer can be as big as needed.
 	* But the device won't repport anything if you change the value
 	* of IWMAX_BSS=24. */
-	
+
 	rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
 	bsslist = r.ptr;
 
@@ -785,7 +785,7 @@
 	return rvalue;
 }
 
-/* Provides no functionality, just completes the ioctl. In essence this is a 
+/* Provides no functionality, just completes the ioctl. In essence this is a
  * just a cosmetic ioctl.
  */
 static int
@@ -1104,7 +1104,7 @@
 					    &key);
 		}
 		/*
-		 * If a valid key is set, encryption should be enabled 
+		 * If a valid key is set, encryption should be enabled
 		 * (user may turn it off later).
 		 * This is also how "iwconfig ethX key on" works
 		 */
@@ -1126,7 +1126,7 @@
 	}
 	/* now read the flags */
 	if (dwrq->flags & IW_ENCODE_DISABLED) {
-		/* Encoding disabled, 
+		/* Encoding disabled,
 		 * authen = DOT11_AUTH_OS;
 		 * invoke = 0;
 		 * exunencrypt = 0; */
@@ -1214,7 +1214,7 @@
 	vwrq->value = (s32) r.u / 4;
 	vwrq->fixed = 1;
 	/* radio is not turned of
-	 * btw: how is possible to turn off only the radio 
+	 * btw: how is possible to turn off only the radio
 	 */
 	vwrq->disabled = 0;
 
@@ -2141,11 +2141,9 @@
 					 struct islpci_bss_wpa_ie, list);
 			list_del(&bss->list);
 		} else {
-			bss = kmalloc(sizeof (*bss), GFP_ATOMIC);
-			if (bss != NULL) {
+			bss = kzalloc(sizeof (*bss), GFP_ATOMIC);
+			if (bss != NULL)
 				priv->num_bss_wpa++;
-				memset(bss, 0, sizeof (*bss));
-			}
 		}
 		if (bss != NULL) {
 			memcpy(bss->bssid, bssid, ETH_ALEN);
@@ -2354,17 +2352,17 @@
 		handle_request(priv, mlme, oid);
 		send_formatted_event(priv, "Authenticate request (ex)", mlme, 1);
 
-		if (priv->iw_mode != IW_MODE_MASTER 
+		if (priv->iw_mode != IW_MODE_MASTER
 				&& mlmeex->state != DOT11_STATE_AUTHING)
 			break;
 
 		confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC);
 
-		if (!confirm) 
+		if (!confirm)
 			break;
 
 		memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
-		printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", 
+		printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
 				mlmeex->address[0],
 				mlmeex->address[1],
 				mlmeex->address[2],
@@ -2398,10 +2396,10 @@
 		handle_request(priv, mlme, oid);
 		send_formatted_event(priv, "Associate request (ex)", mlme, 1);
 
-		if (priv->iw_mode != IW_MODE_MASTER 
+		if (priv->iw_mode != IW_MODE_MASTER
 				&& mlmeex->state != DOT11_STATE_ASSOCING)
 			break;
-		
+
 		confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
 
 		if (!confirm)
@@ -2417,7 +2415,7 @@
 
 		if (!wpa_ie_len) {
 			printk(KERN_DEBUG "No WPA IE found from "
-					"address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", 
+					"address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
 				mlmeex->address[0],
 				mlmeex->address[1],
 				mlmeex->address[2],
@@ -2435,14 +2433,14 @@
 		mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
 
 		kfree(confirm);
-		
+
 		break;
 
 	case DOT11_OID_REASSOCIATEEX:
 		handle_request(priv, mlme, oid);
 		send_formatted_event(priv, "Reassociate request (ex)", mlme, 1);
 
-		if (priv->iw_mode != IW_MODE_MASTER 
+		if (priv->iw_mode != IW_MODE_MASTER
 				&& mlmeex->state != DOT11_STATE_ASSOCING)
 			break;
 
@@ -2461,7 +2459,7 @@
 
 		if (!wpa_ie_len) {
 			printk(KERN_DEBUG "No WPA IE found from "
-					"address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", 
+					"address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
 				mlmeex->address[0],
 				mlmeex->address[1],
 				mlmeex->address[2],
@@ -2473,13 +2471,13 @@
 			break;
 		}
 
-		confirm->size = wpa_ie_len; 
+		confirm->size = wpa_ie_len;
 		memcpy(&confirm->data, wpa_ie, wpa_ie_len);
 
 		mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
 
 		kfree(confirm);
-		
+
 		break;
 
 	default:
@@ -2494,9 +2492,10 @@
  * interrupt context, no locks held.
  */
 void
-prism54_process_trap(void *data)
+prism54_process_trap(struct work_struct *work)
 {
-	struct islpci_mgmtframe *frame = data;
+	struct islpci_mgmtframe *frame =
+		container_of(work, struct islpci_mgmtframe, ws);
 	struct net_device *ndev = frame->ndev;
 	enum oid_num_t n = mgt_oidtonum(frame->header->oid);
 
@@ -2545,10 +2544,10 @@
 #define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
 ((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
 
-/* Maximum length for algorithm names (-1 for nul termination) 
+/* Maximum length for algorithm names (-1 for nul termination)
  * used in ioctl() */
 #define HOSTAP_CRYPT_ALG_NAME_LEN 16
-	
+
 struct prism2_hostapd_param {
 	u32 cmd;
 	u8 sta_addr[ETH_ALEN];
@@ -2621,7 +2620,7 @@
 					    &key);
 		}
 		/*
-		 * If a valid key is set, encryption should be enabled 
+		 * If a valid key is set, encryption should be enabled
 		 * (user may turn it off later).
 		 * This is also how "iwconfig ethX key on" works
 		 */
@@ -2643,7 +2642,7 @@
 	}
 	/* now read the flags */
 	if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
-		/* Encoding disabled, 
+		/* Encoding disabled,
 		 * authen = DOT11_AUTH_OS;
 		 * invoke = 0;
 		 * exunencrypt = 0; */
@@ -2685,11 +2684,10 @@
                return -EINVAL;
 
        alen = sizeof(*attach) + len;
-       attach = kmalloc(alen, GFP_KERNEL);
+       attach = kzalloc(alen, GFP_KERNEL);
        if (attach == NULL)
                return -ENOMEM;
 
-       memset(attach, 0, alen);
 #define WLAN_FC_TYPE_MGMT 0
 #define WLAN_FC_STYPE_ASSOC_REQ 0
 #define WLAN_FC_STYPE_REASSOC_REQ 2
@@ -2710,7 +2708,7 @@
 
 	       ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
 
-	       if (ret == 0) 
+	       if (ret == 0)
 		       printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
 				       ndev->name);
        }
@@ -2870,7 +2868,7 @@
 			mlme = DOT11_MLME_AUTO;
 			printk("%s: Disabling WPA\n", ndev->name);
 			break;
-		case 2: 
+		case 2:
 		case 1: /* WPA */
 			printk("%s: Enabling WPA\n", ndev->name);
 			break;
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index 65f33ac..bcfbfb9 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *            (C) 2003 Aurelien Alleaume <slts@free.fr>
  *            (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
@@ -32,12 +31,12 @@
 void prism54_mib_init(islpci_private *);
 
 struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
-void prism54_update_stats(islpci_private *);
+void prism54_update_stats(struct work_struct *);
 
 void prism54_acl_init(struct islpci_acl *);
 void prism54_acl_clean(struct islpci_acl *);
 
-void prism54_process_trap(void *);
+void prism54_process_trap(struct work_struct *);
 
 void prism54_wpa_bss_ie_init(islpci_private *priv);
 void prism54_wpa_bss_ie_clean(islpci_private *priv);
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index 419edf7..b7534c2 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -1,6 +1,4 @@
 /*
- *
- *  
  *  Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
  *  Copyright (C) 2004 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
  *  Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
@@ -23,7 +21,7 @@
 #if !defined(_ISL_OID_H)
 #define _ISL_OID_H
 
-/* 
+/*
  * MIB related constant and structure definitions for communicating
  * with the device firmware
  */
@@ -99,21 +97,21 @@
 	char data[0];
 } __attribute__((packed));
 
-/* 
+/*
  * in case everything's ok, the inlined function below will be
  * optimized away by the compiler...
  */
 static inline void
 __bug_on_wrong_struct_sizes(void)
 {
-	BUG_ON(sizeof (struct obj_ssid) != 34);
-	BUG_ON(sizeof (struct obj_key) != 34);
-	BUG_ON(sizeof (struct obj_mlme) != 12);
-	BUG_ON(sizeof (struct obj_mlmeex) != 14);
-	BUG_ON(sizeof (struct obj_buffer) != 8);
-	BUG_ON(sizeof (struct obj_bss) != 60);
-	BUG_ON(sizeof (struct obj_bsslist) != 4);
-	BUG_ON(sizeof (struct obj_frequencies) != 2);
+	BUILD_BUG_ON(sizeof (struct obj_ssid) != 34);
+	BUILD_BUG_ON(sizeof (struct obj_key) != 34);
+	BUILD_BUG_ON(sizeof (struct obj_mlme) != 12);
+	BUILD_BUG_ON(sizeof (struct obj_mlmeex) != 14);
+	BUILD_BUG_ON(sizeof (struct obj_buffer) != 8);
+	BUILD_BUG_ON(sizeof (struct obj_bss) != 60);
+	BUILD_BUG_ON(sizeof (struct obj_bsslist) != 4);
+	BUILD_BUG_ON(sizeof (struct obj_frequencies) != 2);
 }
 
 enum dot11_state_t {
@@ -154,13 +152,13 @@
 
 /* Prism "Nitro" / Frameburst / "Packet Frame Grouping"
  * Value is in microseconds. Represents the # microseconds
- * the firmware will take to group frames before sending out then out 
+ * the firmware will take to group frames before sending out then out
  * together with a CSMA contention. Without this all frames are
- * sent with a CSMA contention. 
- * Bibliography: 
+ * sent with a CSMA contention.
+ * Bibliography:
  * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html
  */
-enum dot11_maxframeburst_t { 
+enum dot11_maxframeburst_t {
 	/* Values for DOT11_OID_MAXFRAMEBURST */
 	DOT11_MAXFRAMEBURST_OFF = 0, /* Card firmware default */
 	DOT11_MAXFRAMEBURST_MIXED_SAFE = 650, /* 802.11 a,b,g safe */
@@ -176,9 +174,9 @@
 /* Support for 802.11 long and short frame preambles.
  * Long	 preamble uses 128-bit sync field, 8-bit  CRC
  * Short preamble uses 56-bit  sync field, 16-bit CRC
- * 
+ *
  * 802.11a -- not sure, both optionally ?
- * 802.11b supports long and optionally short 
+ * 802.11b supports long and optionally short
  * 802.11g supports both */
 enum dot11_preamblesettings_t {
 	DOT11_PREAMBLESETTING_LONG = 0,
@@ -194,7 +192,7 @@
  * Long uses 802.11a slot timing  (9 usec ?)
  * Short uses 802.11b slot timing (20 use ?) */
 enum dot11_slotsettings_t {
-	DOT11_SLOTSETTINGS_LONG = 0, 
+	DOT11_SLOTSETTINGS_LONG = 0,
 		/* Allows *only* long 802.11b slot timing */
 	DOT11_SLOTSETTINGS_SHORT = 1,
 		/* Allows *only* long 802.11a slot timing */
@@ -203,7 +201,7 @@
 };
 
 /* All you need to know, ERP is "Extended Rate PHY".
- * An Extended Rate PHY (ERP) STA or AP shall support three different 
+ * An Extended Rate PHY (ERP) STA or AP shall support three different
  * preamble and header formats:
  * Long  preamble (refer to above)
  * Short preamble (refer to above)
@@ -221,7 +219,7 @@
 /* (ERP is "Extended Rate PHY") Way to read NONERP is NON-ERP-*
  * The key here is DOT11 NON ERP NEVER protects against
  * NON ERP STA's. You *don't* want this unless
- * you know what you are doing. It means you will only 
+ * you know what you are doing. It means you will only
  * get Extended Rate capabilities */
 enum dot11_nonerpprotection_t {
 	DOT11_NONERP_NEVER = 0,
@@ -229,13 +227,13 @@
 	DOT11_NONERP_DYNAMIC = 2
 };
 
-/* Preset OID configuration for 802.11 modes 
- * Note: DOT11_OID_CW[MIN|MAX] hold the values of the 
+/* Preset OID configuration for 802.11 modes
+ * Note: DOT11_OID_CW[MIN|MAX] hold the values of the
  * DCS MIN|MAX backoff used */
 enum dot11_profile_t { /* And set/allowed values */
 	/* Allowed values for DOT11_OID_PROFILES */
 	DOT11_PROFILE_B_ONLY = 0,
-		/* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps 
+		/* DOT11_OID_RATES: 1, 2, 5.5, 11Mbps
 		 * DOT11_OID_PREAMBLESETTINGS: DOT11_PREAMBLESETTING_DYNAMIC
 		 * DOT11_OID_CWMIN: 31
 		 * DOT11_OID_NONEPROTECTION: DOT11_NOERP_DYNAMIC
@@ -275,7 +273,7 @@
 	OID_INL_CONFORMANCE_NONE = 0,	/* Perform active scanning */
 	OID_INL_CONFORMANCE_STRICT = 1,	/* Strictly adhere to 802.11d */
 	OID_INL_CONFORMANCE_FLEXIBLE = 2,	/* Use passed 802.11d info to
-		* determine channel AND/OR just make assumption that active 
+		* determine channel AND/OR just make assumption that active
 		* channels are valid  channels */
 };
 
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index ec1c00f..f057fd9 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *  Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
  *  Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
@@ -413,7 +412,7 @@
 	islpci_set_state(priv, PRV_STATE_PREBOOT);
 
 	/* disable all device interrupts in case they weren't */
-	isl38xx_disable_interrupts(priv->device_base);  
+	isl38xx_disable_interrupts(priv->device_base);
 
 	/* For safety reasons, we may want to ensure that no DMA transfer is
 	 * currently in progress by emptying the TX and RX queues. */
@@ -480,7 +479,7 @@
 
 	DEFINE_WAIT(wait);
 	prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);
-	
+
 	/* now the last step is to reset the interface */
 	isl38xx_interface_reset(priv->device_base, priv->device_host_address);
 	islpci_set_state(priv, PRV_STATE_PREINIT);
@@ -488,7 +487,7 @@
         for(count = 0; count < 2 && result; count++) {
 		/* The software reset acknowledge needs about 220 msec here.
 		 * Be conservative and wait for up to one second. */
-	
+
 		remaining = schedule_timeout_uninterruptible(HZ);
 
 		if(remaining > 0) {
@@ -496,7 +495,7 @@
 			break;
 		}
 
-		/* If we're here it's because our IRQ hasn't yet gone through. 
+		/* If we're here it's because our IRQ hasn't yet gone through.
 		 * Retry a bit more...
 		 */
 		printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
@@ -514,7 +513,7 @@
 
 	/* Now that the device is 100% up, let's allow
 	 * for the other interrupts --
-	 * NOTE: this is not *yet* true since we've only allowed the 
+	 * NOTE: this is not *yet* true since we've only allowed the
 	 * INIT interrupt on the IRQ line. We can perhaps poll
 	 * the IRQ line until we know for sure the reset went through */
 	isl38xx_enable_common_interrupts(priv->device_base);
@@ -716,7 +715,7 @@
 
 	prism54_acl_init(&priv->acl);
 	prism54_wpa_bss_ie_init(priv);
-	if (mgt_init(priv)) 
+	if (mgt_init(priv))
 		goto out_free;
 
 	return 0;
@@ -861,11 +860,10 @@
 	priv->state_off = 1;
 
 	/* initialize workqueue's */
-	INIT_WORK(&priv->stats_work,
-		  (void (*)(void *)) prism54_update_stats, priv);
+	INIT_WORK(&priv->stats_work, prism54_update_stats);
 	priv->stats_timestamp = 0;
 
-	INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
+	INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
 	priv->reset_task_pending = 0;
 
 	/* allocate various memory areas */
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index 2f7e525..a9aa166 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -1,6 +1,5 @@
 /*
- *  
- *  Copyright (C) 2002 Intersil Americas Inc. 
+ *  Copyright (C) 2002 Intersil Americas Inc.
  *  Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
  *  Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
  *  Copyright (C) 2003 Aurelien Alleaume <slts@free.fr>
@@ -72,12 +71,12 @@
 	u8 bssid[ETH_ALEN];
 	u8 wpa_ie[MAX_WPA_IE_LEN];
 	size_t wpa_ie_len;
-	
+
 };
 
 typedef struct {
 	spinlock_t slock;	/* generic spinlock; */
-	
+
 	u32 priv_oid;
 
 	/* our mib cache */
@@ -85,7 +84,7 @@
         struct rw_semaphore mib_sem;
 	void **mib;
 	char nickname[IW_ESSID_MAX_SIZE+1];
-	
+
 	/* Take care of the wireless stats */
 	struct work_struct stats_work;
 	struct semaphore stats_sem;
@@ -120,7 +119,7 @@
 	struct net_device *ndev;
 
 	/* device queue interface members */
-	struct isl38xx_cb *control_block;	/* device control block 
+	struct isl38xx_cb *control_block;	/* device control block
 							   (== driver_mem_address!) */
 
 	/* Each queue has three indexes:
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index a8261d8..b112291 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *  Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
  *  This program is free software; you can redistribute it and/or modify
@@ -48,7 +47,7 @@
 		/* read the index of the first fragment to be freed */
 		index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
 
-		/* check for holes in the arrays caused by multi fragment frames 
+		/* check for holes in the arrays caused by multi fragment frames
 		 * searching for the last fragment of a frame */
 		if (priv->pci_map_tx_address[index] != (dma_addr_t) NULL) {
 			/* entry is the last fragment of a frame
@@ -253,6 +252,7 @@
 	 * header and without the FCS. But there a is a bit that
 	 * indicates if the packet is corrupted :-) */
 	struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
+
 	if (hdr->flags & 0x01)
 		/* This one is bad. Drop it ! */
 		return -1;
@@ -284,7 +284,7 @@
 		    (struct avs_80211_1_header *) skb_push(*skb,
 							   sizeof (struct
 								   avs_80211_1_header));
-		
+
 		avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
 		avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
 		avs->mactime = cpu_to_be64(le64_to_cpu(clock));
@@ -390,7 +390,7 @@
 			struct rx_annex_header *annex =
 			    (struct rx_annex_header *) skb->data;
 			wstats.level = annex->rfmon.rssi;
-			/* The noise value can be a bit outdated if nobody's 
+			/* The noise value can be a bit outdated if nobody's
 			 * reading wireless stats... */
 			wstats.noise = priv->local_iwstatistics.qual.noise;
 			wstats.qual = wstats.level - wstats.noise;
@@ -464,10 +464,8 @@
 			break;
 		}
 		/* update the fragment address */
-		control_block->rx_data_low[index].address = cpu_to_le32((u32)
-									priv->
-									pci_map_rx_address
-									[index]);
+		control_block->rx_data_low[index].address =
+			cpu_to_le32((u32)priv->pci_map_rx_address[index]);
 		wmb();
 
 		/* increment the driver read pointer */
@@ -482,12 +480,14 @@
 }
 
 void
-islpci_do_reset_and_wake(void *data)
+islpci_do_reset_and_wake(struct work_struct *work)
 {
-	islpci_private *priv = (islpci_private *) data;
+	islpci_private *priv = container_of(work, islpci_private, reset_task);
+
 	islpci_reset(priv, 1);
-	netif_wake_queue(priv->ndev);
 	priv->reset_task_pending = 0;
+	smp_wmb();
+	netif_wake_queue(priv->ndev);
 }
 
 void
@@ -499,12 +499,14 @@
 	/* increment the transmit error counter */
 	statistics->tx_errors++;
 
-	printk(KERN_WARNING "%s: tx_timeout", ndev->name);
 	if (!priv->reset_task_pending) {
-		priv->reset_task_pending = 1;
-		printk(", scheduling a reset");
+		printk(KERN_WARNING
+			"%s: tx_timeout, scheduling reset", ndev->name);
 		netif_stop_queue(ndev);
+		priv->reset_task_pending = 1;
 		schedule_work(&priv->reset_task);
+	} else {
+		printk(KERN_WARNING
+			"%s: tx_timeout, waiting for reset", ndev->name);
 	}
-	printk("\n");
 }
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index bc9d7a6..5bf820d 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -68,6 +67,6 @@
 int islpci_eth_transmit(struct sk_buff *, struct net_device *);
 int islpci_eth_receive(islpci_private *);
 void islpci_eth_tx_timeout(struct net_device *);
-void islpci_do_reset_and_wake(void *data);
+void islpci_do_reset_and_wake(struct work_struct *);
 
 #endif				/* _ISL_GEN_H */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index f692dcc..58257b4 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *  Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
  *
@@ -40,8 +39,8 @@
 module_param(init_pcitm, int, 0);
 
 /* In this order: vendor, device, subvendor, subdevice, class, class_mask,
- * driver_data 
- * If you have an update for this please contact prism54-devel@prism54.org 
+ * driver_data
+ * If you have an update for this please contact prism54-devel@prism54.org
  * The latest list can be found at http://prism54.org/supported_cards.php */
 static const struct pci_device_id prism54_id_tbl[] = {
 	/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
@@ -132,15 +131,15 @@
 
 	/* 0x40 is the programmable timer to configure the response timeout (TRDY_TIMEOUT)
 	 * 0x41 is the programmable timer to configure the retry timeout (RETRY_TIMEOUT)
-	 * 	The RETRY_TIMEOUT is used to set the number of retries that the core, as a
-	 * 	Master, will perform before abandoning a cycle. The default value for
-	 * 	RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new
-	 * 	devices. A write of zero to the RETRY_TIMEOUT register disables this
-	 * 	function to allow use with any non-compliant legacy devices that may
-	 * 	execute more retries.
+	 *	The RETRY_TIMEOUT is used to set the number of retries that the core, as a
+	 *	Master, will perform before abandoning a cycle. The default value for
+	 *	RETRY_TIMEOUT is 0x80, which far exceeds the PCI 2.1 requirement for new
+	 *	devices. A write of zero to the RETRY_TIMEOUT register disables this
+	 *	function to allow use with any non-compliant legacy devices that may
+	 *	execute more retries.
 	 *
-	 * 	Writing zero to both these two registers will disable both timeouts and
-	 * 	*can* solve problems caused by devices that are slow to respond.
+	 *	Writing zero to both these two registers will disable both timeouts and
+	 *	*can* solve problems caused by devices that are slow to respond.
 	 *	Make this configurable - MSW
 	 */
 	if ( init_pcitm >= 0 ) {
@@ -171,14 +170,15 @@
 	pci_set_master(pdev);
 
 	/* enable MWI */
-	pci_set_mwi(pdev);
+	if (!pci_set_mwi(pdev))
+		printk(KERN_INFO "%s: pci_set_mwi(pdev) succeeded\n", DRV_NAME);
 
 	/* setup the network device interface and its structure */
 	if (!(ndev = islpci_setup(pdev))) {
 		/* error configuring the driver as a network device */
 		printk(KERN_ERR "%s: could not configure network device\n",
 		       DRV_NAME);
-		goto do_pci_release_regions;
+		goto do_pci_clear_mwi;
 	}
 
 	priv = netdev_priv(ndev);
@@ -208,6 +208,8 @@
 	pci_set_drvdata(pdev, NULL);
 	free_netdev(ndev);
 	priv = NULL;
+      do_pci_clear_mwi:
+	pci_clear_mwi(pdev);
       do_pci_release_regions:
 	pci_release_regions(pdev);
       do_pci_disable_device:
@@ -241,7 +243,7 @@
 		isl38xx_disable_interrupts(priv->device_base);
 		islpci_set_state(priv, PRV_STATE_OFF);
 		/* This bellow causes a lockup at rmmod time. It might be
-		 * because some interrupts still linger after rmmod time, 
+		 * because some interrupts still linger after rmmod time,
 		 * see bug #17 */
 		/* pci_set_power_state(pdev, 3);*/	/* try to power-off */
 	}
@@ -255,6 +257,8 @@
 	free_netdev(ndev);
 	priv = NULL;
 
+	pci_clear_mwi(pdev);
+
 	pci_release_regions(pdev);
 
 	pci_disable_device(pdev);
@@ -288,12 +292,19 @@
 {
 	struct net_device *ndev = pci_get_drvdata(pdev);
 	islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
+	int err;
+
 	BUG_ON(!priv);
 
-	pci_enable_device(pdev);
-
 	printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
 
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+		       ndev->name);
+		return err;
+	}
+
 	pci_restore_state(pdev);
 
 	/* alright let's go into the PREBOOT state */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 2e061a8..2246f79 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *  Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
  *
@@ -387,7 +386,7 @@
 
 			/* Create work to handle trap out of interrupt
 			 * context. */
-			INIT_WORK(&frame->ws, prism54_process_trap, frame);
+			INIT_WORK(&frame->ws, prism54_process_trap);
 			schedule_work(&frame->ws);
 
 		} else {
@@ -502,7 +501,7 @@
 	printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
 	       ndev->name);
 
-	/* TODO: we should reset the device here */     
+	/* TODO: we should reset the device here */
  out:
 	finish_wait(&priv->mgmt_wqueue, &wait);
 	up(&priv->mgmt_sem);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 2982be3..fc53b58 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -1,5 +1,4 @@
 /*
- *  
  *  Copyright (C) 2002 Intersil Americas Inc.
  *  Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
  *
@@ -36,8 +35,8 @@
 
 
 /* General driver definitions */
-#define PCIDEVICE_LATENCY_TIMER_MIN 		0x40
-#define PCIDEVICE_LATENCY_TIMER_VAL 		0x50
+#define PCIDEVICE_LATENCY_TIMER_MIN		0x40
+#define PCIDEVICE_LATENCY_TIMER_VAL		0x50
 
 /* Debugging verbose definitions */
 #define SHOW_NOTHING                            0x00	/* overrules everything */
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index ebb2387..e6cf9df 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -1,4 +1,4 @@
-/*   
+/*
  *  Copyright (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -235,12 +235,10 @@
 {
 	int i;
 
-	priv->mib = kmalloc(OID_NUM_LAST * sizeof (void *), GFP_KERNEL);
+	priv->mib = kcalloc(OID_NUM_LAST, sizeof (void *), GFP_KERNEL);
 	if (!priv->mib)
 		return -ENOMEM;
 
-	memset(priv->mib, 0, OID_NUM_LAST * sizeof (void *));
-
 	/* Alloc the cache */
 	for (i = 0; i < OID_NUM_LAST; i++) {
 		if (isl_oid[i].flags & OID_FLAG_CACHED) {
@@ -503,7 +501,7 @@
 		}
 		if (ret || response_op == PIMFOR_OP_ERROR)
 			ret = -EIO;
-	} else 
+	} else
 		ret = -EIO;
 
 	/* re-set given data to what it was */
@@ -727,7 +725,7 @@
  * MEDIUMLIMIT,BEACONPERIOD,DTIMPERIOD,ATIMWINDOW,LISTENINTERVAL
  * FREQUENCY,EXTENDEDRATES.
  *
- * The way to do this is to set ESSID. Note though that they may get 
+ * The way to do this is to set ESSID. Note though that they may get
  * unlatch before though by setting another OID. */
 #if 0
 void
diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h
index d71eca5..aa1d174 100644
--- a/drivers/net/wireless/prism54/prismcompat.h
+++ b/drivers/net/wireless/prism54/prismcompat.h
@@ -1,4 +1,4 @@
-/*  
+/*
  *  (C) 2004 Margit Schubert-While <margitsw@t-online.de>
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -16,7 +16,7 @@
  *
  */
 
-/*  
+/*
  *	Compatibility header file to aid support of different kernel versions
  */
 
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 7fbfc9e..88e10c9 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -408,11 +408,8 @@
 #define MAX_TUPLE_SIZE 128
 static int ray_config(struct pcmcia_device *link)
 {
-    tuple_t tuple;
-    cisparse_t parse;
     int last_fn = 0, last_ret = 0;
     int i;
-    u_char buf[MAX_TUPLE_SIZE];
     win_req_t req;
     memreq_t mem;
     struct net_device *dev = (struct net_device *)link->priv;
@@ -420,29 +417,12 @@
 
     DEBUG(1, "ray_config(0x%p)\n", link);
 
-    /* This reads the card's CONFIG tuple to find its configuration regs */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = MAX_TUPLE_SIZE;
-    tuple.TupleOffset = 0;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     /* Determine card type and firmware version */
-    buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0;
-    tuple.DesiredTuple = CISTPL_VERS_1;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = MAX_TUPLE_SIZE;
-    tuple.TupleOffset = 2;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-
-    for (i=0; i<tuple.TupleDataLen - 4; i++) 
-        if (buf[i] == 0) buf[i] = ' ';
-    printk(KERN_INFO "ray_cs Detected: %s\n",buf);
+    printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n",
+	   link->prod_id[0] ? link->prod_id[0] : " ",
+	   link->prod_id[1] ? link->prod_id[1] : " ",
+	   link->prod_id[2] ? link->prod_id[2] : " ",
+	   link->prod_id[3] ? link->prod_id[3] : " ");
 
     /* Now allocate an interrupt line.  Note that this does not
        actually assign a handler to the interrupt.
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index bcc7038..cf2d148 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -647,21 +647,6 @@
 	cisparse_t parse;
 	void __iomem *mem;
 
-	/*
-	 * This reads the card's CONFIG tuple to find its
-	 * configuration registers.
-	 */
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	tuple.Attributes = 0;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = sizeof(buf);
-	tuple.TupleOffset = 0;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
-
 	/* Look up the current Vcc */
 	CS_CHECK(GetConfigurationInfo,
 		 pcmcia_get_configuration_info(link, &conf));
@@ -681,6 +666,10 @@
 	 * implementation-defined details.
 	 */
 	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+	tuple.Attributes = 0;
+	tuple.TupleData = buf;
+	tuple.TupleDataMax = sizeof(buf);
+	tuple.TupleOffset = 0;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
 	while (1) {
 		cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index aafb301..233d906 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -3939,11 +3939,8 @@
 static inline int
 wv_pcmcia_config(struct pcmcia_device *	link)
 {
-  tuple_t		tuple;
-  cisparse_t		parse;
   struct net_device *	dev = (struct net_device *) link->priv;
   int			i;
-  u_char		buf[64];
   win_req_t		req;
   memreq_t		mem;
   net_local *		lp = netdev_priv(dev);
@@ -3953,36 +3950,6 @@
   printk(KERN_DEBUG "->wv_pcmcia_config(0x%p)\n", link);
 #endif
 
-  /*
-   * This reads the card's CONFIG tuple to find its configuration
-   * registers.
-   */
-  do
-    {
-      tuple.Attributes = 0;
-      tuple.DesiredTuple = CISTPL_CONFIG;
-      i = pcmcia_get_first_tuple(link, &tuple);
-      if(i != CS_SUCCESS)
-	break;
-      tuple.TupleData = (cisdata_t *)buf;
-      tuple.TupleDataMax = 64;
-      tuple.TupleOffset = 0;
-      i = pcmcia_get_tuple_data(link, &tuple);
-      if(i != CS_SUCCESS)
-	break;
-      i = pcmcia_parse_tuple(link, &tuple, &parse);
-      if(i != CS_SUCCESS)
-	break;
-      link->conf.ConfigBase = parse.config.base;
-      link->conf.Present = parse.config.rmask[0];
-    }
-  while(0);
-  if(i != CS_SUCCESS)
-    {
-      cs_error(link, ParseTuple, i);
-      return FALSE;
-    }
-
   do
     {
       i = pcmcia_request_io(link, &link->io);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 5b98a78..583e0d6 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1966,25 +1966,10 @@
  */
 static int wl3501_config(struct pcmcia_device *link)
 {
-	tuple_t tuple;
-	cisparse_t parse;
 	struct net_device *dev = link->priv;
 	int i = 0, j, last_fn, last_ret;
-	unsigned char bf[64];
 	struct wl3501_card *this;
 
-	/* This reads the card's CONFIG tuple to find its config registers. */
-	tuple.Attributes	= 0;
-	tuple.DesiredTuple	= CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	tuple.TupleData		= bf;
-	tuple.TupleDataMax	= sizeof(bf);
-	tuple.TupleOffset	= 0;
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase	= parse.config.base;
-	link->conf.Present	= parse.config.rmask[0];
-
 	/* Try allocating IO ports.  This tries a few fixed addresses.  If you
 	 * want, you can also read the card's config table to pick addresses --
 	 * see the serial driver for an example. */
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 36b29ff..6cb66a3 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1828,10 +1828,8 @@
 	/* Leave the device in reset state */
 	zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0);
 err_zd:
-	if (zd->tx_urb)
-		usb_free_urb(zd->tx_urb);
-	if (zd->rx_urb)
-		usb_free_urb(zd->rx_urb);
+	usb_free_urb(zd->tx_urb);
+	usb_free_urb(zd->rx_urb);
 	kfree(zd);
 	return err;
 }
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index aa661b2..77e11dd 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1076,6 +1076,31 @@
 	return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL);
 }
 
+int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip,
+	u8 rts_rate, int preamble)
+{
+	int rts_mod = ZD_RX_CCK;
+	u32 value = 0;
+
+	/* Modulation bit */
+	if (ZD_CS_TYPE(rts_rate) == ZD_CS_OFDM)
+		rts_mod = ZD_RX_OFDM;
+
+	dev_dbg_f(zd_chip_dev(chip), "rts_rate=%x preamble=%x\n",
+		rts_rate, preamble);
+
+	value |= rts_rate << RTSCTS_SH_RTS_RATE;
+	value |= rts_mod << RTSCTS_SH_RTS_MOD_TYPE;
+	value |= preamble << RTSCTS_SH_RTS_PMB_TYPE;
+	value |= preamble << RTSCTS_SH_CTS_PMB_TYPE;
+
+	/* We always send 11M self-CTS messages, like the vendor driver. */
+	value |= ZD_CCK_RATE_11M << RTSCTS_SH_CTS_RATE;
+	value |= ZD_RX_CCK << RTSCTS_SH_CTS_MOD_TYPE;
+
+	return zd_iowrite32_locked(chip, value, CR_RTS_CTS_RATE);
+}
+
 int zd_chip_enable_hwint(struct zd_chip *chip)
 {
 	int r;
@@ -1355,17 +1380,12 @@
 	return r;
 }
 
-int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
+int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates)
 {
-	int r;
+	ZD_ASSERT((cr_rates & ~(CR_RATES_80211B | CR_RATES_80211G)) == 0);
+	dev_dbg_f(zd_chip_dev(chip), "%x\n", cr_rates);
 
-	if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G))
-		return -EINVAL;
-
-	mutex_lock(&chip->mutex);
-	r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL);
-	mutex_unlock(&chip->mutex);
-	return r;
+	return zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL);
 }
 
 static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size)
@@ -1653,3 +1673,16 @@
 
 	return 0;
 }
+
+int zd_chip_set_multicast_hash(struct zd_chip *chip,
+	                       struct zd_mc_hash *hash)
+{
+	struct zd_ioreq32 ioreqs[] = {
+		{ CR_GROUP_HASH_P1, hash->low },
+		{ CR_GROUP_HASH_P2, hash->high },
+	};
+
+	dev_dbg_f(zd_chip_dev(chip), "hash l 0x%08x h 0x%08x\n",
+		ioreqs[0].value, ioreqs[1].value);
+	return zd_iowrite32a(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index ae59597..a4e3cee 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -337,24 +337,24 @@
 #define CR_MAC_PS_STATE			CTL_REG(0x050C)
 
 #define CR_INTERRUPT			CTL_REG(0x0510)
-#define INT_TX_COMPLETE			0x00000001
-#define INT_RX_COMPLETE			0x00000002
-#define INT_RETRY_FAIL			0x00000004
-#define INT_WAKEUP			0x00000008
-#define INT_DTIM_NOTIFY			0x00000020
-#define INT_CFG_NEXT_BCN		0x00000040
-#define INT_BUS_ABORT			0x00000080
-#define INT_TX_FIFO_READY		0x00000100
-#define INT_UART			0x00000200
-#define INT_TX_COMPLETE_EN		0x00010000
-#define INT_RX_COMPLETE_EN		0x00020000
-#define INT_RETRY_FAIL_EN		0x00040000
-#define INT_WAKEUP_EN			0x00080000
-#define INT_DTIM_NOTIFY_EN		0x00200000
-#define INT_CFG_NEXT_BCN_EN		0x00400000
-#define INT_BUS_ABORT_EN		0x00800000
-#define INT_TX_FIFO_READY_EN		0x01000000
-#define INT_UART_EN			0x02000000
+#define INT_TX_COMPLETE			(1 <<  0)
+#define INT_RX_COMPLETE			(1 <<  1)
+#define INT_RETRY_FAIL			(1 <<  2)
+#define INT_WAKEUP			(1 <<  3)
+#define INT_DTIM_NOTIFY			(1 <<  5)
+#define INT_CFG_NEXT_BCN		(1 <<  6)
+#define INT_BUS_ABORT			(1 <<  7)
+#define INT_TX_FIFO_READY		(1 <<  8)
+#define INT_UART			(1 <<  9)
+#define INT_TX_COMPLETE_EN		(1 << 16)
+#define INT_RX_COMPLETE_EN		(1 << 17)
+#define INT_RETRY_FAIL_EN		(1 << 18)
+#define INT_WAKEUP_EN			(1 << 19)
+#define INT_DTIM_NOTIFY_EN		(1 << 21)
+#define INT_CFG_NEXT_BCN_EN		(1 << 22)
+#define INT_BUS_ABORT_EN		(1 << 23)
+#define INT_TX_FIFO_READY_EN		(1 << 24)
+#define INT_UART_EN			(1 << 25)
 
 #define CR_TSF_LOW_PART			CTL_REG(0x0514)
 #define CR_TSF_HIGH_PART		CTL_REG(0x0518)
@@ -390,26 +390,35 @@
 #define CR_BSSID_P1			CTL_REG(0x0618)
 #define CR_BSSID_P2			CTL_REG(0x061C)
 #define CR_BCN_PLCP_CFG			CTL_REG(0x0620)
+
+/* Group hash table for filtering incoming packets.
+ *
+ * The group hash table is 64 bit large and split over two parts. The first
+ * part is the lower part. The upper 6 bits of the last byte of the target
+ * address are used as index. Packets are received if the hash table bit is
+ * set. This is used for multicast handling, but for broadcasts (address
+ * ff:ff:ff:ff:ff:ff) the highest bit in the second table must also be set.
+ */
 #define CR_GROUP_HASH_P1		CTL_REG(0x0624)
 #define CR_GROUP_HASH_P2		CTL_REG(0x0628)
-#define CR_RX_TIMEOUT			CTL_REG(0x062C)
 
+#define CR_RX_TIMEOUT			CTL_REG(0x062C)
 /* Basic rates supported by the BSS. When producing ACK or CTS messages, the
  * device will use a rate in this table that is less than or equal to the rate
  * of the incoming frame which prompted the response */
 #define CR_BASIC_RATE_TBL		CTL_REG(0x0630)
-#define CR_RATE_1M	0x0001	/* 802.11b */
-#define CR_RATE_2M	0x0002	/* 802.11b */
-#define CR_RATE_5_5M	0x0004	/* 802.11b */
-#define CR_RATE_11M	0x0008	/* 802.11b */
-#define CR_RATE_6M      0x0100	/* 802.11g */
-#define CR_RATE_9M      0x0200	/* 802.11g */
-#define CR_RATE_12M	0x0400	/* 802.11g */
-#define CR_RATE_18M	0x0800	/* 802.11g */
-#define CR_RATE_24M     0x1000	/* 802.11g */
-#define CR_RATE_36M     0x2000	/* 802.11g */
-#define CR_RATE_48M     0x4000	/* 802.11g */
-#define CR_RATE_54M     0x8000	/* 802.11g */
+#define CR_RATE_1M	(1 <<  0)	/* 802.11b */
+#define CR_RATE_2M	(1 <<  1)	/* 802.11b */
+#define CR_RATE_5_5M	(1 <<  2)	/* 802.11b */
+#define CR_RATE_11M	(1 <<  3)	/* 802.11b */
+#define CR_RATE_6M      (1 <<  8)	/* 802.11g */
+#define CR_RATE_9M      (1 <<  9)	/* 802.11g */
+#define CR_RATE_12M	(1 << 10)	/* 802.11g */
+#define CR_RATE_18M	(1 << 11)	/* 802.11g */
+#define CR_RATE_24M     (1 << 12)	/* 802.11g */
+#define CR_RATE_36M     (1 << 13)	/* 802.11g */
+#define CR_RATE_48M     (1 << 14)	/* 802.11g */
+#define CR_RATE_54M     (1 << 15)	/* 802.11g */
 #define CR_RATES_80211G	0xff00
 #define CR_RATES_80211B	0x000f
 
@@ -420,15 +429,24 @@
 #define CR_MANDATORY_RATE_TBL		CTL_REG(0x0634)
 #define CR_RTS_CTS_RATE			CTL_REG(0x0638)
 
+/* These are all bit indexes in CR_RTS_CTS_RATE, so remember to shift. */
+#define RTSCTS_SH_RTS_RATE		0
+#define RTSCTS_SH_EXP_CTS_RATE		4
+#define RTSCTS_SH_RTS_MOD_TYPE		8
+#define RTSCTS_SH_RTS_PMB_TYPE		9
+#define RTSCTS_SH_CTS_RATE		16
+#define RTSCTS_SH_CTS_MOD_TYPE		24
+#define RTSCTS_SH_CTS_PMB_TYPE		25
+
 #define CR_WEP_PROTECT			CTL_REG(0x063C)
 #define CR_RX_THRESHOLD			CTL_REG(0x0640)
 
 /* register for controlling the LEDS */
 #define CR_LED				CTL_REG(0x0644)
 /* masks for controlling LEDs */
-#define LED1				0x0100
-#define LED2				0x0200
-#define LED_SW				0x0400
+#define LED1				(1 <<  8)
+#define LED2				(1 <<  9)
+#define LED_SW				(1 << 10)
 
 /* Seems to indicate that the configuration is over.
  */
@@ -455,18 +473,18 @@
  * registers, so one could argue it is a LOCK bit. But calling it
  * LOCK_PHY_REGS makes it confusing.
  */
-#define UNLOCK_PHY_REGS			0x0080
+#define UNLOCK_PHY_REGS			(1 << 7)
 
 #define CR_DEVICE_STATE			CTL_REG(0x0684)
 #define CR_UNDERRUN_CNT			CTL_REG(0x0688)
 
 #define CR_RX_FILTER			CTL_REG(0x068c)
-#define RX_FILTER_ASSOC_RESPONSE	0x0002
-#define RX_FILTER_REASSOC_RESPONSE	0x0008
-#define RX_FILTER_PROBE_RESPONSE	0x0020
-#define RX_FILTER_BEACON		0x0100
-#define RX_FILTER_DISASSOC		0x0400
-#define RX_FILTER_AUTH			0x0800
+#define RX_FILTER_ASSOC_RESPONSE	(1 <<  1)
+#define RX_FILTER_REASSOC_RESPONSE	(1 <<  3)
+#define RX_FILTER_PROBE_RESPONSE	(1 <<  5)
+#define RX_FILTER_BEACON		(1 <<  8)
+#define RX_FILTER_DISASSOC		(1 << 10)
+#define RX_FILTER_AUTH			(1 << 11)
 #define AP_RX_FILTER			0x0400feff
 #define STA_RX_FILTER			0x0000ffff
 
@@ -794,6 +812,9 @@
 int zd_chip_enable_hwint(struct zd_chip *chip);
 int zd_chip_disable_hwint(struct zd_chip *chip);
 
+int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip,
+	u8 rts_rate, int preamble);
+
 static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type)
 {
 	return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type);
@@ -809,7 +830,17 @@
 	return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates);
 }
 
-int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates);
+int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates);
+
+static inline int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
+{
+	int r;
+
+	mutex_lock(&chip->mutex);
+	r = zd_chip_set_basic_rates_locked(chip, cr_rates);
+	mutex_unlock(&chip->mutex);
+	return r;
+}
 
 static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter)
 {
@@ -842,4 +873,36 @@
 
 u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
 
+struct zd_mc_hash {
+	u32 low;
+	u32 high;
+};
+
+static inline void zd_mc_clear(struct zd_mc_hash *hash)
+{
+	hash->low = 0;
+	/* The interfaces must always received broadcasts.
+	 * The hash of the broadcast address ff:ff:ff:ff:ff:ff is 63.
+	 */
+	hash->high = 0x80000000;
+}
+
+static inline void zd_mc_add_all(struct zd_mc_hash *hash)
+{
+	hash->low = hash->high = 0xffffffff;
+}
+
+static inline void zd_mc_add_addr(struct zd_mc_hash *hash, u8 *addr)
+{
+	unsigned int i = addr[5] >> 2;
+	if (i < 32) {
+		hash->low |= 1 << i;
+	} else {
+		hash->high |= 1 << (i-32);
+	}
+}
+
+int zd_chip_set_multicast_hash(struct zd_chip *chip,
+	                       struct zd_mc_hash *hash);
+
 #endif /* _ZD_CHIP_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index a13ec72..fb22f62 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -39,6 +39,7 @@
 	if (!(x)) { \
 		pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
 			__FILE__, __LINE__, __stringify(x)); \
+		dump_stack(); \
 	} \
 } while (0)
 #else
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
index 66905f7..189160e 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.c
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
@@ -37,7 +37,12 @@
 	[ZD_REGDOMAIN_JAPAN]	 = { 1, 14},
 	[ZD_REGDOMAIN_SPAIN]	 = { 1, 14},
 	[ZD_REGDOMAIN_FRANCE]	 = { 1, 14},
-	[ZD_REGDOMAIN_JAPAN_ADD] = {14, 15},
+
+	/* Japan originally only had channel 14 available (see CHNL_ID 0x40 in
+	 * 802.11). However, in 2001 the range was extended to include channels
+	 * 1-13. The ZyDAS devices still use the old region code but are
+	 * designed to allow the extra channel access in Japan. */
+	[ZD_REGDOMAIN_JAPAN_ADD] = { 1, 15},
 };
 
 const struct channel_range *zd_channel_range(u8 regdomain)
@@ -133,9 +138,6 @@
 	int i, r;
 	u32 mhz;
 
-	if (!(freq->flags & IW_FREQ_FIXED))
-		return 0;
-
 	if (freq->m < 1000) {
 		if (freq->m  > NUM_CHANNELS || freq->m == 0)
 			return -EINVAL;
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
index f63245b..26b8298 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -50,6 +50,7 @@
 	return header->prefix[0] & 0xf;
 }
 
+/* These are referred to as zd_rates */
 #define ZD_OFDM_RATE_6M		0xb
 #define ZD_OFDM_RATE_9M		0xf
 #define ZD_OFDM_RATE_12M	0xa
@@ -64,7 +65,7 @@
 	u8 service;
 	__le16 length;
 	__le16 crc16;
-};
+} __attribute__((packed));
 
 static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header)
 {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index a7d29bd..00ca704 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -32,11 +32,15 @@
 
 static void ieee_init(struct ieee80211_device *ieee);
 static void softmac_init(struct ieee80211softmac_device *sm);
+static void set_rts_cts_work(struct work_struct *work);
+static void set_basic_rates_work(struct work_struct *work);
 
 static void housekeeping_init(struct zd_mac *mac);
 static void housekeeping_enable(struct zd_mac *mac);
 static void housekeeping_disable(struct zd_mac *mac);
 
+static void set_multicast_hash_handler(struct work_struct *work);
+
 int zd_mac_init(struct zd_mac *mac,
 	        struct net_device *netdev,
 	        struct usb_interface *intf)
@@ -46,11 +50,14 @@
 	memset(mac, 0, sizeof(*mac));
 	spin_lock_init(&mac->lock);
 	mac->netdev = netdev;
+	INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
+	INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
 
 	ieee_init(ieee);
 	softmac_init(ieee80211_priv(netdev));
 	zd_chip_init(&mac->chip, netdev, intf);
 	housekeeping_init(mac);
+	INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
 	return 0;
 }
 
@@ -132,6 +139,7 @@
 
 void zd_mac_clear(struct zd_mac *mac)
 {
+	flush_workqueue(zd_workqueue);
 	zd_chip_clear(&mac->chip);
 	ZD_ASSERT(!spin_is_locked(&mac->lock));
 	ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
@@ -213,6 +221,13 @@
 	housekeeping_disable(mac);
 	ieee80211softmac_stop(netdev);
 
+	/* Ensure no work items are running or queued from this point */
+	cancel_delayed_work(&mac->set_rts_cts_work);
+	cancel_delayed_work(&mac->set_basic_rates_work);
+	flush_workqueue(zd_workqueue);
+	mac->updating_rts_rate = 0;
+	mac->updating_basic_rates = 0;
+
 	zd_chip_disable_hwint(chip);
 	zd_chip_switch_radio_off(chip);
 	zd_chip_disable_int(chip);
@@ -245,6 +260,43 @@
 	return 0;
 }
 
+static void set_multicast_hash_handler(struct work_struct *work)
+{
+	struct zd_mac *mac = container_of(work, struct zd_mac,
+					  set_multicast_hash_work);
+	struct zd_mc_hash hash;
+
+	spin_lock_irq(&mac->lock);
+	hash = mac->multicast_hash;
+	spin_unlock_irq(&mac->lock);
+
+	zd_chip_set_multicast_hash(&mac->chip, &hash);
+}
+
+void zd_mac_set_multicast_list(struct net_device *dev)
+{
+	struct zd_mc_hash hash;
+	struct zd_mac *mac = zd_netdev_mac(dev);
+	struct dev_mc_list *mc;
+	unsigned long flags;
+
+	if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
+		zd_mc_add_all(&hash);
+	} else {
+		zd_mc_clear(&hash);
+		for (mc = dev->mc_list; mc; mc = mc->next) {
+			dev_dbg_f(zd_mac_dev(mac), "mc addr " MAC_FMT "\n",
+				  MAC_ARG(mc->dmi_addr));
+			zd_mc_add_addr(&hash, mc->dmi_addr);
+		}
+	}
+
+	spin_lock_irqsave(&mac->lock, flags);
+	mac->multicast_hash = hash;
+	spin_unlock_irqrestore(&mac->lock, flags);
+	queue_work(zd_workqueue, &mac->set_multicast_hash_work);
+}
+
 int zd_mac_set_regdomain(struct zd_mac *mac, u8 regdomain)
 {
 	int r;
@@ -286,6 +338,189 @@
 	return regdomain;
 }
 
+/* Fallback to lowest rate, if rate is unknown. */
+static u8 rate_to_zd_rate(u8 rate)
+{
+	switch (rate) {
+	case IEEE80211_CCK_RATE_2MB:
+		return ZD_CCK_RATE_2M;
+	case IEEE80211_CCK_RATE_5MB:
+		return ZD_CCK_RATE_5_5M;
+	case IEEE80211_CCK_RATE_11MB:
+		return ZD_CCK_RATE_11M;
+	case IEEE80211_OFDM_RATE_6MB:
+		return ZD_OFDM_RATE_6M;
+	case IEEE80211_OFDM_RATE_9MB:
+		return ZD_OFDM_RATE_9M;
+	case IEEE80211_OFDM_RATE_12MB:
+		return ZD_OFDM_RATE_12M;
+	case IEEE80211_OFDM_RATE_18MB:
+		return ZD_OFDM_RATE_18M;
+	case IEEE80211_OFDM_RATE_24MB:
+		return ZD_OFDM_RATE_24M;
+	case IEEE80211_OFDM_RATE_36MB:
+		return ZD_OFDM_RATE_36M;
+	case IEEE80211_OFDM_RATE_48MB:
+		return ZD_OFDM_RATE_48M;
+	case IEEE80211_OFDM_RATE_54MB:
+		return ZD_OFDM_RATE_54M;
+	}
+	return ZD_CCK_RATE_1M;
+}
+
+static u16 rate_to_cr_rate(u8 rate)
+{
+	switch (rate) {
+	case IEEE80211_CCK_RATE_2MB:
+		return CR_RATE_1M;
+	case IEEE80211_CCK_RATE_5MB:
+		return CR_RATE_5_5M;
+	case IEEE80211_CCK_RATE_11MB:
+		return CR_RATE_11M;
+	case IEEE80211_OFDM_RATE_6MB:
+		return CR_RATE_6M;
+	case IEEE80211_OFDM_RATE_9MB:
+		return CR_RATE_9M;
+	case IEEE80211_OFDM_RATE_12MB:
+		return CR_RATE_12M;
+	case IEEE80211_OFDM_RATE_18MB:
+		return CR_RATE_18M;
+	case IEEE80211_OFDM_RATE_24MB:
+		return CR_RATE_24M;
+	case IEEE80211_OFDM_RATE_36MB:
+		return CR_RATE_36M;
+	case IEEE80211_OFDM_RATE_48MB:
+		return CR_RATE_48M;
+	case IEEE80211_OFDM_RATE_54MB:
+		return CR_RATE_54M;
+	}
+	return CR_RATE_1M;
+}
+
+static void try_enable_tx(struct zd_mac *mac)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mac->lock, flags);
+	if (mac->updating_rts_rate == 0 && mac->updating_basic_rates == 0)
+		netif_wake_queue(mac->netdev);
+	spin_unlock_irqrestore(&mac->lock, flags);
+}
+
+static void set_rts_cts_work(struct work_struct *work)
+{
+	struct zd_mac *mac =
+		container_of(work, struct zd_mac, set_rts_cts_work.work);
+	unsigned long flags;
+	u8 rts_rate;
+	unsigned int short_preamble;
+
+	mutex_lock(&mac->chip.mutex);
+
+	spin_lock_irqsave(&mac->lock, flags);
+	mac->updating_rts_rate = 0;
+	rts_rate = mac->rts_rate;
+	short_preamble = mac->short_preamble;
+	spin_unlock_irqrestore(&mac->lock, flags);
+
+	zd_chip_set_rts_cts_rate_locked(&mac->chip, rts_rate, short_preamble);
+	mutex_unlock(&mac->chip.mutex);
+
+	try_enable_tx(mac);
+}
+
+static void set_basic_rates_work(struct work_struct *work)
+{
+	struct zd_mac *mac =
+		container_of(work, struct zd_mac, set_basic_rates_work.work);
+	unsigned long flags;
+	u16 basic_rates;
+
+	mutex_lock(&mac->chip.mutex);
+
+	spin_lock_irqsave(&mac->lock, flags);
+	mac->updating_basic_rates = 0;
+	basic_rates = mac->basic_rates;
+	spin_unlock_irqrestore(&mac->lock, flags);
+
+	zd_chip_set_basic_rates_locked(&mac->chip, basic_rates);
+	mutex_unlock(&mac->chip.mutex);
+
+	try_enable_tx(mac);
+}
+
+static void bssinfo_change(struct net_device *netdev, u32 changes)
+{
+	struct zd_mac *mac = zd_netdev_mac(netdev);
+	struct ieee80211softmac_device *softmac = ieee80211_priv(netdev);
+	struct ieee80211softmac_bss_info *bssinfo = &softmac->bssinfo;
+	int need_set_rts_cts = 0;
+	int need_set_rates = 0;
+	u16 basic_rates;
+	unsigned long flags;
+
+	dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
+
+	if (changes & IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE) {
+		spin_lock_irqsave(&mac->lock, flags);
+		mac->short_preamble = bssinfo->short_preamble;
+		spin_unlock_irqrestore(&mac->lock, flags);
+		need_set_rts_cts = 1;
+	}
+
+	if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) {
+		/* Set RTS rate to highest available basic rate */
+		u8 rate = ieee80211softmac_highest_supported_rate(softmac,
+			&bssinfo->supported_rates, 1);
+		rate = rate_to_zd_rate(rate);
+
+		spin_lock_irqsave(&mac->lock, flags);
+		if (rate != mac->rts_rate) {
+			mac->rts_rate = rate;
+			need_set_rts_cts = 1;
+		}
+		spin_unlock_irqrestore(&mac->lock, flags);
+
+		/* Set basic rates */
+		need_set_rates = 1;
+		if (bssinfo->supported_rates.count == 0) {
+			/* Allow the device to be flexible */
+			basic_rates = CR_RATES_80211B | CR_RATES_80211G;
+		} else {
+			int i = 0;
+			basic_rates = 0;
+
+			for (i = 0; i < bssinfo->supported_rates.count; i++) {
+				u16 rate = bssinfo->supported_rates.rates[i];
+				if ((rate & IEEE80211_BASIC_RATE_MASK) == 0)
+					continue;
+
+				rate &= ~IEEE80211_BASIC_RATE_MASK;
+				basic_rates |= rate_to_cr_rate(rate);
+			}
+		}
+		spin_lock_irqsave(&mac->lock, flags);
+		mac->basic_rates = basic_rates;
+		spin_unlock_irqrestore(&mac->lock, flags);
+	}
+
+	/* Schedule any changes we made above */
+
+	spin_lock_irqsave(&mac->lock, flags);
+	if (need_set_rts_cts && !mac->updating_rts_rate) {
+		mac->updating_rts_rate = 1;
+		netif_stop_queue(mac->netdev);
+		queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0);
+	}
+	if (need_set_rates && !mac->updating_basic_rates) {
+		mac->updating_basic_rates = 1;
+		netif_stop_queue(mac->netdev);
+		queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work,
+				   0);
+	}
+	spin_unlock_irqrestore(&mac->lock, flags);
+}
+
 static void set_channel(struct net_device *netdev, u8 channel)
 {
 	struct zd_mac *mac = zd_netdev_mac(netdev);
@@ -295,7 +530,6 @@
 	zd_chip_set_channel(&mac->chip, channel);
 }
 
-/* TODO: Should not work in Managed mode. */
 int zd_mac_request_channel(struct zd_mac *mac, u8 channel)
 {
 	unsigned long lock_flags;
@@ -317,31 +551,22 @@
 		return 0;
 }
 
-int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags)
+u8 zd_mac_get_channel(struct zd_mac *mac)
 {
-	struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+	u8 channel = zd_chip_get_channel(&mac->chip);
 
-	*channel = zd_chip_get_channel(&mac->chip);
-	if (ieee->iw_mode != IW_MODE_INFRA) {
-		spin_lock_irq(&mac->lock);
-		*flags = *channel == mac->requested_channel ?
-			MAC_FIXED_CHANNEL : 0;
-		spin_unlock(&mac->lock);
-	} else {
-		*flags = 0;
-	}
-	dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags);
-	return 0;
+	dev_dbg_f(zd_mac_dev(mac), "channel %u\n", channel);
+	return channel;
 }
 
 /* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */
-static u8 cs_typed_rate(u8 cs_rate)
+static u8 zd_rate_typed(u8 zd_rate)
 {
 	static const u8 typed_rates[16] = {
-		[ZD_CS_CCK_RATE_1M]	= ZD_CS_CCK|ZD_CS_CCK_RATE_1M,
-		[ZD_CS_CCK_RATE_2M]	= ZD_CS_CCK|ZD_CS_CCK_RATE_2M,
-		[ZD_CS_CCK_RATE_5_5M]	= ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M,
-		[ZD_CS_CCK_RATE_11M]	= ZD_CS_CCK|ZD_CS_CCK_RATE_11M,
+		[ZD_CCK_RATE_1M]	= ZD_CS_CCK|ZD_CCK_RATE_1M,
+		[ZD_CCK_RATE_2M]	= ZD_CS_CCK|ZD_CCK_RATE_2M,
+		[ZD_CCK_RATE_5_5M]	= ZD_CS_CCK|ZD_CCK_RATE_5_5M,
+		[ZD_CCK_RATE_11M]	= ZD_CS_CCK|ZD_CCK_RATE_11M,
 		[ZD_OFDM_RATE_6M]	= ZD_CS_OFDM|ZD_OFDM_RATE_6M,
 		[ZD_OFDM_RATE_9M]	= ZD_CS_OFDM|ZD_OFDM_RATE_9M,
 		[ZD_OFDM_RATE_12M]	= ZD_CS_OFDM|ZD_OFDM_RATE_12M,
@@ -353,37 +578,7 @@
 	};
 
 	ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f);
-	return typed_rates[cs_rate & ZD_CS_RATE_MASK];
-}
-
-/* Fallback to lowest rate, if rate is unknown. */
-static u8 rate_to_cs_rate(u8 rate)
-{
-	switch (rate) {
-	case IEEE80211_CCK_RATE_2MB:
-		return ZD_CS_CCK_RATE_2M;
-	case IEEE80211_CCK_RATE_5MB:
-		return ZD_CS_CCK_RATE_5_5M;
-	case IEEE80211_CCK_RATE_11MB:
-		return ZD_CS_CCK_RATE_11M;
-	case IEEE80211_OFDM_RATE_6MB:
-		return ZD_OFDM_RATE_6M;
-	case IEEE80211_OFDM_RATE_9MB:
-		return ZD_OFDM_RATE_9M;
-	case IEEE80211_OFDM_RATE_12MB:
-		return ZD_OFDM_RATE_12M;
-	case IEEE80211_OFDM_RATE_18MB:
-		return ZD_OFDM_RATE_18M;
-	case IEEE80211_OFDM_RATE_24MB:
-		return ZD_OFDM_RATE_24M;
-	case IEEE80211_OFDM_RATE_36MB:
-		return ZD_OFDM_RATE_36M;
-	case IEEE80211_OFDM_RATE_48MB:
-		return ZD_OFDM_RATE_48M;
-	case IEEE80211_OFDM_RATE_54MB:
-		return ZD_OFDM_RATE_54M;
-	}
-	return ZD_CS_CCK_RATE_1M;
+	return typed_rates[zd_rate & ZD_CS_RATE_MASK];
 }
 
 int zd_mac_set_mode(struct zd_mac *mac, u32 mode)
@@ -464,6 +659,9 @@
 	range->we_version_compiled = WIRELESS_EXT;
 	range->we_version_source = 20;
 
+	range->enc_capa = IW_ENC_CAPA_WPA |  IW_ENC_CAPA_WPA2 |
+			  IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
 	ZD_ASSERT(!irqs_disabled());
 	spin_lock_irq(&mac->lock);
 	regdomain = mac->regdomain;
@@ -484,13 +682,13 @@
 	return 0;
 }
 
-static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length)
+static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length)
 {
 	static const u8 rate_divisor[] = {
-		[ZD_CS_CCK_RATE_1M]	=  1,
-		[ZD_CS_CCK_RATE_2M]	=  2,
-		[ZD_CS_CCK_RATE_5_5M]	= 11, /* bits must be doubled */
-		[ZD_CS_CCK_RATE_11M]	= 11,
+		[ZD_CCK_RATE_1M]	=  1,
+		[ZD_CCK_RATE_2M]	=  2,
+		[ZD_CCK_RATE_5_5M]	= 11, /* bits must be doubled */
+		[ZD_CCK_RATE_11M]	= 11,
 		[ZD_OFDM_RATE_6M]	=  6,
 		[ZD_OFDM_RATE_9M]	=  9,
 		[ZD_OFDM_RATE_12M]	= 12,
@@ -504,15 +702,15 @@
 	u32 bits = (u32)tx_length * 8;
 	u32 divisor;
 
-	divisor = rate_divisor[cs_rate];
+	divisor = rate_divisor[zd_rate];
 	if (divisor == 0)
 		return -EINVAL;
 
-	switch (cs_rate) {
-	case ZD_CS_CCK_RATE_5_5M:
+	switch (zd_rate) {
+	case ZD_CCK_RATE_5_5M:
 		bits = (2*bits) + 10; /* round up to the next integer */
 		break;
-	case ZD_CS_CCK_RATE_11M:
+	case ZD_CCK_RATE_11M:
 		if (service) {
 			u32 t = bits % 11;
 			*service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
@@ -532,16 +730,16 @@
 	R2M_11A		   = 0x02,
 };
 
-static u8 cs_rate_to_modulation(u8 cs_rate, int flags)
+static u8 zd_rate_to_modulation(u8 zd_rate, int flags)
 {
 	u8 modulation;
 
-	modulation = cs_typed_rate(cs_rate);
+	modulation = zd_rate_typed(zd_rate);
 	if (flags & R2M_SHORT_PREAMBLE) {
 		switch (ZD_CS_RATE(modulation)) {
-		case ZD_CS_CCK_RATE_2M:
-		case ZD_CS_CCK_RATE_5_5M:
-		case ZD_CS_CCK_RATE_11M:
+		case ZD_CCK_RATE_2M:
+		case ZD_CCK_RATE_5_5M:
+		case ZD_CCK_RATE_11M:
 			modulation |= ZD_CS_CCK_PREA_SHORT;
 			return modulation;
 		}
@@ -558,39 +756,36 @@
 {
 	struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
 	u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl));
-	u8 rate, cs_rate;
+	u8 rate, zd_rate;
 	int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0;
+	int is_multicast = is_multicast_ether_addr(hdr->addr1);
+	int short_preamble = ieee80211softmac_short_preamble_ok(softmac,
+		is_multicast, is_mgt);
+	int flags = 0;
 
-	/* FIXME: 802.11a? short preamble? */
-	rate = ieee80211softmac_suggest_txrate(softmac,
-		is_multicast_ether_addr(hdr->addr1), is_mgt);
+	/* FIXME: 802.11a? */
+	rate = ieee80211softmac_suggest_txrate(softmac, is_multicast, is_mgt);
 
-	cs_rate = rate_to_cs_rate(rate);
-	cs->modulation = cs_rate_to_modulation(cs_rate, 0);
+	if (short_preamble)
+		flags |= R2M_SHORT_PREAMBLE;
+
+	zd_rate = rate_to_zd_rate(rate);
+	cs->modulation = zd_rate_to_modulation(zd_rate, flags);
 }
 
 static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
 	                   struct ieee80211_hdr_4addr *header)
 {
+	struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
 	unsigned int tx_length = le16_to_cpu(cs->tx_length);
 	u16 fctl = le16_to_cpu(header->frame_ctl);
 	u16 ftype = WLAN_FC_GET_TYPE(fctl);
 	u16 stype = WLAN_FC_GET_STYPE(fctl);
 
 	/*
-	 * CONTROL:
-	 * - start at 0x00
-	 * - if fragment 0, enable bit 0
+	 * CONTROL TODO:
 	 * - if backoff needed, enable bit 0
 	 * - if burst (backoff not needed) disable bit 0
-	 * - if multicast, enable bit 1
-	 * - if PS-POLL frame, enable bit 2
-	 * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable
-	 *   bit 4 (FIXME: wtf)
-	 * - if frag_len > RTS threshold, set bit 5 as long if it isnt
-	 *   multicast or mgt
-	 * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit
-	 *   7
 	 */
 
 	cs->control = 0;
@@ -607,17 +802,18 @@
 	if (stype == IEEE80211_STYPE_PSPOLL)
 		cs->control |= ZD_CS_PS_POLL_FRAME;
 
+	/* Unicast data frames over the threshold should have RTS */
 	if (!is_multicast_ether_addr(header->addr1) &&
-	    ftype != IEEE80211_FTYPE_MGMT &&
-	    tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
-	{
-		/* FIXME: check the logic */
-		if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) {
-			/* 802.11g */
-			cs->control |= ZD_CS_SELF_CTS;
-		} else { /* 802.11b */
-			cs->control |= ZD_CS_RTS;
-		}
+	    	ftype != IEEE80211_FTYPE_MGMT &&
+		    tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
+		cs->control |= ZD_CS_RTS;
+
+	/* Use CTS-to-self protection if required */
+	if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM &&
+			ieee80211softmac_protection_needed(softmac)) {
+		/* FIXME: avoid sending RTS *and* self-CTS, is that correct? */
+		cs->control &= ~ZD_CS_RTS;
+		cs->control |= ZD_CS_SELF_CTS;
 	}
 
 	/* FIXME: Management frame? */
@@ -721,7 +917,7 @@
 	u8  rt_rate;
 	u16 rt_channel;
 	u16 rt_chbitmask;
-};
+} __attribute__((packed));
 
 static void fill_rt_header(void *buffer, struct zd_mac *mac,
 	                   const struct ieee80211_rx_stats *stats,
@@ -778,13 +974,16 @@
 	}
 
 	return memcmp(hdr->addr1, netdev->dev_addr, ETH_ALEN) == 0 ||
-	       is_multicast_ether_addr(hdr->addr1) ||
+	       (is_multicast_ether_addr(hdr->addr1) &&
+		memcmp(hdr->addr3, netdev->dev_addr, ETH_ALEN) != 0) ||
 	       (netdev->flags & IFF_PROMISC);
 }
 
-/* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0
- * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is
- * called here.
+/* Filters received packets. The function returns 1 if the packet should be
+ * forwarded to ieee80211_rx(). If the packet should be ignored the function
+ * returns 0. If an invalid packet is found the function returns -EINVAL.
+ *
+ * The function calls ieee80211_rx_mgt() directly.
  *
  * It has been based on ieee80211_rx_any.
  */
@@ -810,9 +1009,9 @@
 		ieee80211_rx_mgt(ieee, hdr, stats);
 		return 0;
 	case IEEE80211_FTYPE_CTL:
-		/* Ignore invalid short buffers */
 		return 0;
 	case IEEE80211_FTYPE_DATA:
+		/* Ignore invalid short buffers */
 		if (length < sizeof(struct ieee80211_hdr_3addr))
 			return -EINVAL;
 		return is_data_packet_for_us(ieee, hdr);
@@ -908,10 +1107,8 @@
 	memcpy(skb_put(skb, length), buffer, length);
 
 	r = ieee80211_rx(ieee, skb, &stats);
-	if (!r) {
-		ZD_ASSERT(in_irq());
-		dev_kfree_skb_irq(skb);
-	}
+	if (!r)
+		dev_kfree_skb_any(skb);
 	return 0;
 }
 
@@ -993,6 +1190,7 @@
 static void softmac_init(struct ieee80211softmac_device *sm)
 {
 	sm->set_channel = set_channel;
+	sm->bssinfo_change = bssinfo_change;
 }
 
 struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
@@ -1028,71 +1226,12 @@
 	return iw_stats;
 }
 
-#ifdef DEBUG
-static const char* decryption_types[] = {
-	[ZD_RX_NO_WEP] = "none",
-	[ZD_RX_WEP64] = "WEP64",
-	[ZD_RX_TKIP] = "TKIP",
-	[ZD_RX_AES] = "AES",
-	[ZD_RX_WEP128] = "WEP128",
-	[ZD_RX_WEP256] = "WEP256",
-};
-
-static const char *decryption_type_string(u8 type)
-{
-	const char *s;
-
-	if (type < ARRAY_SIZE(decryption_types)) {
-		s = decryption_types[type];
-	} else {
-		s = NULL;
-	}
-	return s ? s : "unknown";
-}
-
-static int is_ofdm(u8 frame_status)
-{
-	return (frame_status & ZD_RX_OFDM);
-}
-
-void zd_dump_rx_status(const struct rx_status *status)
-{
-	const char* modulation;
-	u8 quality;
-
-	if (is_ofdm(status->frame_status)) {
-		modulation = "ofdm";
-		quality = status->signal_quality_ofdm;
-	} else {
-		modulation = "cck";
-		quality = status->signal_quality_cck;
-	}
-	pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n",
-		modulation, status->signal_strength, quality,
-		decryption_type_string(status->decryption_type));
-	if (status->frame_status & ZD_RX_ERROR) {
-		pr_debug("rx error %s%s%s%s%s%s\n",
-			(status->frame_status & ZD_RX_TIMEOUT_ERROR) ?
-				"timeout " : "",
-			(status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ?
-				"fifo " : "",
-			(status->frame_status & ZD_RX_DECRYPTION_ERROR) ?
-				"decryption " : "",
-			(status->frame_status & ZD_RX_CRC32_ERROR) ?
-				"crc32 " : "",
-			(status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ?
-				"addr1 " : "",
-			(status->frame_status & ZD_RX_CRC16_ERROR) ?
-				"crc16" : "");
-	}
-}
-#endif /* DEBUG */
-
 #define LINK_LED_WORK_DELAY HZ
 
-static void link_led_handler(void *p)
+static void link_led_handler(struct work_struct *work)
 {
-	struct zd_mac *mac = p;
+	struct zd_mac *mac =
+		container_of(work, struct zd_mac, housekeeping.link_led_work.work);
 	struct zd_chip *chip = &mac->chip;
 	struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
 	int is_associated;
@@ -1113,7 +1252,7 @@
 
 static void housekeeping_init(struct zd_mac *mac)
 {
-	INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac);
+	INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
 }
 
 static void housekeeping_enable(struct zd_mac *mac)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index b8ea3de..f0cf05d 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -20,6 +20,7 @@
 
 #include <linux/wireless.h>
 #include <linux/kernel.h>
+#include <linux/workqueue.h>
 #include <net/ieee80211.h>
 #include <net/ieee80211softmac.h>
 
@@ -48,10 +49,11 @@
 #define ZD_CS_CCK		0x00
 #define ZD_CS_OFDM		0x10
 
-#define ZD_CS_CCK_RATE_1M	0x00
-#define ZD_CS_CCK_RATE_2M	0x01
-#define ZD_CS_CCK_RATE_5_5M	0x02
-#define ZD_CS_CCK_RATE_11M	0x03
+/* These are referred to as zd_rates */
+#define ZD_CCK_RATE_1M	0x00
+#define ZD_CCK_RATE_2M	0x01
+#define ZD_CCK_RATE_5_5M	0x02
+#define ZD_CCK_RATE_11M	0x03
 /* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*.
  */
 
@@ -82,7 +84,7 @@
 struct rx_length_info {
 	__le16 length[3];
 	__le16 tag;
-};
+} __attribute__((packed));
 
 #define RX_LENGTH_INFO_TAG		0x697e
 
@@ -93,7 +95,7 @@
 	u8 signal_quality_ofdm;
 	u8 decryption_type;
 	u8 frame_status;
-};
+} __attribute__((packed));
 
 /* rx_status field decryption_type */
 #define ZD_RX_NO_WEP	0
@@ -116,12 +118,8 @@
 #define ZD_RX_CRC16_ERROR		0x40
 #define ZD_RX_ERROR			0x80
 
-enum mac_flags {
-	MAC_FIXED_CHANNEL = 0x01,
-};
-
 struct housekeeping {
-	struct work_struct link_led_work;
+	struct delayed_work link_led_work;
 };
 
 #define ZD_MAC_STATS_BUFFER_SIZE 16
@@ -130,15 +128,35 @@
 	struct zd_chip chip;
 	spinlock_t lock;
 	struct net_device *netdev;
+
 	/* Unlocked reading possible */
 	struct iw_statistics iw_stats;
+
 	struct housekeeping housekeeping;
+	struct work_struct set_multicast_hash_work;
+	struct zd_mc_hash multicast_hash;
+	struct delayed_work set_rts_cts_work;
+	struct delayed_work set_basic_rates_work;
+
 	unsigned int stats_count;
 	u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
 	u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
 	u8 regdomain;
 	u8 default_regdomain;
 	u8 requested_channel;
+
+	/* A bitpattern of cr_rates */
+	u16 basic_rates;
+
+	/* A zd_rate */
+	u8 rts_rate;
+
+	/* Short preamble (used for RTS/CTS) */
+	unsigned int short_preamble:1;
+
+	/* flags to indicate update in progress */
+	unsigned int updating_rts_rate:1;
+	unsigned int updating_basic_rates:1;
 };
 
 static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac)
@@ -173,6 +191,7 @@
 int zd_mac_open(struct net_device *netdev);
 int zd_mac_stop(struct net_device *netdev);
 int zd_mac_set_mac_address(struct net_device *dev, void *p);
+void zd_mac_set_multicast_list(struct net_device *netdev);
 
 int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length);
 
@@ -180,7 +199,7 @@
 u8 zd_mac_get_regdomain(struct zd_mac *zd_mac);
 
 int zd_mac_request_channel(struct zd_mac *mac, u8 channel);
-int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags);
+u8 zd_mac_get_channel(struct zd_mac *mac);
 
 int zd_mac_set_mode(struct zd_mac *mac, u32 mode);
 int zd_mac_get_mode(struct zd_mac *mac, u32 *mode);
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c
index af3a7b3..8bda48d 100644
--- a/drivers/net/wireless/zd1211rw/zd_netdev.c
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.c
@@ -107,21 +107,10 @@
 	           struct iw_request_info *info,
 		   union iwreq_data *req, char *extra)
 {
-	int r;
 	struct zd_mac *mac = zd_netdev_mac(netdev);
 	struct iw_freq *freq = &req->freq;
-	u8 channel;
-	u8 flags;
 
-	r = zd_mac_get_channel(mac, &channel, &flags);
-	if (r)
-		return r;
-
-	freq->flags = (flags & MAC_FIXED_CHANNEL) ?
-		      IW_FREQ_FIXED : IW_FREQ_AUTO;
-	dev_dbg_f(zd_mac_dev(mac), "channel %s\n",
-		  (flags & MAC_FIXED_CHANNEL) ? "fixed" : "auto");
-	return zd_channel_to_freq(freq, channel);
+	return zd_channel_to_freq(freq, zd_mac_get_channel(mac));
 }
 
 static int iw_set_mode(struct net_device *netdev,
@@ -253,7 +242,7 @@
 	netdev->open = zd_mac_open;
 	netdev->stop = zd_mac_stop;
 	/* netdev->get_stats = */
-	/* netdev->set_multicast_list = */
+	netdev->set_multicast_list = zd_mac_set_multicast_list;
 	netdev->set_mac_address = zd_mac_set_mac_address;
 	netdev->wireless_handlers = &iw_handler_def;
 	/* netdev->ethtool_ops = */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 3faaeb2..aa782e8 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -47,11 +47,17 @@
 	{ USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 },
 	{ USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 },
 	{ USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 },
+	{ USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 },
+	{ USB_DEVICE(0x0586, 0x3409), .driver_info = DEVICE_ZD1211 },
+	{ USB_DEVICE(0x0b3b, 0x1630), .driver_info = DEVICE_ZD1211 },
+	{ USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 },
+	{ USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
 	/* ZD1211B */
 	{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
 	{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
 	{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
 	{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
+	{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
 	/* "Driverless" devices that need ejecting */
 	{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
 	{}
@@ -366,15 +372,6 @@
 	return r;
 }
 
-static void disable_read_regs_int(struct zd_usb *usb)
-{
-	struct zd_usb_interrupt *intr = &usb->intr;
-
-	spin_lock(&intr->lock);
-	intr->read_regs_enabled = 0;
-	spin_unlock(&intr->lock);
-}
-
 #define urb_dev(urb) (&(urb)->dev->dev)
 
 static inline void handle_regs_int(struct urb *urb)
@@ -596,6 +593,8 @@
 		unsigned int l, k, n;
 		for (i = 0, l = 0;; i++) {
 			k = le16_to_cpu(get_unaligned(&length_info->length[i]));
+			if (k == 0)
+				return;
 			n = l+k;
 			if (n > length)
 				return;
@@ -1119,27 +1118,28 @@
 {
 	int r;
 
-	pr_debug("usb_init()\n");
+	pr_debug("%s usb_init()\n", driver.name);
 
 	zd_workqueue = create_singlethread_workqueue(driver.name);
 	if (zd_workqueue == NULL) {
-		printk(KERN_ERR "%s: couldn't create workqueue\n", driver.name);
+		printk(KERN_ERR "%s couldn't create workqueue\n", driver.name);
 		return -ENOMEM;
 	}
 
 	r = usb_register(&driver);
 	if (r) {
-		printk(KERN_ERR "usb_register() failed. Error number %d\n", r);
+		printk(KERN_ERR "%s usb_register() failed. Error number %d\n",
+		       driver.name, r);
 		return r;
 	}
 
-	pr_debug("zd1211rw initialized\n");
+	pr_debug("%s initialized\n", driver.name);
 	return 0;
 }
 
 static void __exit usb_exit(void)
 {
-	pr_debug("usb_exit()\n");
+	pr_debug("%s usb_exit()\n", driver.name);
 	usb_deregister(&driver);
 	destroy_workqueue(zd_workqueue);
 }
@@ -1156,10 +1156,19 @@
 {
 	struct zd_usb_interrupt *intr = &usb->intr;
 
-	spin_lock(&intr->lock);
+	spin_lock_irq(&intr->lock);
 	intr->read_regs_enabled = 1;
 	INIT_COMPLETION(intr->read_regs.completion);
-	spin_unlock(&intr->lock);
+	spin_unlock_irq(&intr->lock);
+}
+
+static void disable_read_regs_int(struct zd_usb *usb)
+{
+	struct zd_usb_interrupt *intr = &usb->intr;
+
+	spin_lock_irq(&intr->lock);
+	intr->read_regs_enabled = 0;
+	spin_unlock_irq(&intr->lock);
 }
 
 static int get_results(struct zd_usb *usb, u16 *values,
@@ -1171,7 +1180,7 @@
 	struct read_regs_int *rr = &intr->read_regs;
 	struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
 
-	spin_lock(&intr->lock);
+	spin_lock_irq(&intr->lock);
 
 	r = -EIO;
 	/* The created block size seems to be larger than expected.
@@ -1204,7 +1213,7 @@
 
 	r = 0;
 error_unlock:
-	spin_unlock(&intr->lock);
+	spin_unlock_irq(&intr->lock);
 	return r;
 }
 
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index e81a2d3..317d37c 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -74,17 +74,17 @@
 struct usb_req_read_regs {
 	__le16 id;
 	__le16 addr[0];
-};
+} __attribute__((packed));
 
 struct reg_data {
 	__le16 addr;
 	__le16 value;
-};
+} __attribute__((packed));
 
 struct usb_req_write_regs {
 	__le16 id;
 	struct reg_data reg_writes[0];
-};
+} __attribute__((packed));
 
 enum {
 	RF_IF_LE = 0x02,
@@ -101,7 +101,7 @@
 	/* RF2595: 24 */
 	__le16 bit_values[0];
 	/* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
-};
+} __attribute__((packed));
 
 /* USB interrupt */
 
@@ -118,12 +118,12 @@
 struct usb_int_header {
 	u8 type;	/* must always be 1 */
 	u8 id;
-};
+} __attribute__((packed));
 
 struct usb_int_regs {
 	struct usb_int_header hdr;
 	struct reg_data regs[0];
-};
+} __attribute__((packed));
 
 struct usb_int_retry_fail {
 	struct usb_int_header hdr;
@@ -131,7 +131,7 @@
 	u8 _dummy;
 	u8 addr[ETH_ALEN];
 	u8 ibss_wakeup_dest;
-};
+} __attribute__((packed));
 
 struct read_regs_int {
 	struct completion completion;
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index df04e05..d85e2ea 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -34,8 +34,16 @@
 #include <asm/amigaints.h>
 #include <asm/amigahw.h>
 
-#include "8390.h"
+#define EI_SHIFT(x)	(ei_local->reg_offset[x])
+#define ei_inb(port)   in_8(port)
+#define ei_outb(val,port)  out_8(port,val)
+#define ei_inb_p(port)   in_8(port)
+#define ei_outb_p(val,port)  out_8(port,val)
 
+static const char version[] =
+    "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
 
 #define DRV_NAME	"zorro8390"
 
@@ -114,7 +122,7 @@
 	    break;
     board = z->resource.start;
     ioaddr = board+cards[i].offset;
-    dev = alloc_ei_netdev();
+    dev = ____alloc_ei_netdev(0);
     if (!dev)
 	return -ENOMEM;
     SET_MODULE_OWNER(dev);
@@ -201,7 +209,7 @@
     dev->irq = IRQ_AMIGA_PORTS;
 
     /* Install the Interrupt handler */
-    i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, IRQF_SHARED, DRV_NAME, dev);
+    i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, DRV_NAME, dev);
     if (i) return i;
 
     for(i = 0; i < ETHER_ADDR_LEN; i++) {
@@ -226,10 +234,10 @@
     dev->open = &zorro8390_open;
     dev->stop = &zorro8390_close;
 #ifdef CONFIG_NET_POLL_CONTROLLER
-    dev->poll_controller = ei_poll;
+    dev->poll_controller = __ei_poll;
 #endif
 
-    NS8390_init(dev, 0);
+    __NS8390_init(dev, 0);
     err = register_netdev(dev);
     if (err) {
 	free_irq(IRQ_AMIGA_PORTS, dev);
@@ -246,7 +254,7 @@
 
 static int zorro8390_open(struct net_device *dev)
 {
-    ei_open(dev);
+    __ei_open(dev);
     return 0;
 }
 
@@ -254,7 +262,7 @@
 {
     if (ei_debug > 1)
 	printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
-    ei_close(dev);
+    __ei_close(dev);
     return 0;
 }
 
@@ -405,7 +413,7 @@
 		printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n",
 		       dev->name);
 		zorro8390_reset_8390(dev);
-		NS8390_init(dev,1);
+		__NS8390_init(dev,1);
 		break;
 	}
 
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index fc4bc9b..a83c3db 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -29,7 +29,7 @@
 
 struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
 
-static void wq_sync_buffer(void *);
+static void wq_sync_buffer(struct work_struct *work);
 
 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
 static int work_enabled;
@@ -65,7 +65,7 @@
 		b->sample_received = 0;
 		b->sample_lost_overflow = 0;
 		b->cpu = i;
-		INIT_WORK(&b->work, wq_sync_buffer, b);
+		INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
 	}
 	return 0;
 
@@ -282,9 +282,10 @@
  * By using schedule_delayed_work_on and then schedule_delayed_work
  * we guarantee this will stay on the correct cpu
  */
-static void wq_sync_buffer(void * data)
+static void wq_sync_buffer(struct work_struct *work)
 {
-	struct oprofile_cpu_buffer * b = data;
+	struct oprofile_cpu_buffer * b =
+		container_of(work, struct oprofile_cpu_buffer, work.work);
 	if (b->cpu != smp_processor_id()) {
 		printk("WQ on CPU%d, prefer CPU%d\n",
 		       smp_processor_id(), b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 09abb80..49900d9 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -43,7 +43,7 @@
 	unsigned long sample_lost_overflow;
 	unsigned long backtrace_aborted;
 	int cpu;
-	struct work_struct work;
+	struct delayed_work work;
 } ____cacheline_aligned;
 
 extern struct oprofile_cpu_buffer cpu_buffer[];
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 68cb3a0..fe3f5f5 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -486,7 +486,7 @@
 **   This bit tells U2 to do R/M/W for partial cachelines. "Streaming"
 **   data can avoid this if the mapping covers full cache lines.
 ** o STOP_MOST is needed for atomicity across cachelines.
-**   Apperently only "some EISA devices" need this.
+**   Apparently only "some EISA devices" need this.
 **   Using CONFIG_ISA is hack. Only the IOA with EISA under it needs
 **   to use this hint iff the EISA devices needs this feature.
 **   According to the U2 ERS, STOP_MOST enabled pages hurt performance.
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index c2949b4..12bab64 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -50,12 +50,12 @@
 **
 ** PA Firmware
 ** -----------
-** PA-RISC platforms have two fundementally different types of firmware.
+** PA-RISC platforms have two fundamentally different types of firmware.
 ** For PCI devices, "Legacy" PDC initializes the "INTERRUPT_LINE" register
 ** and BARs similar to a traditional PC BIOS.
 ** The newer "PAT" firmware supports PDC calls which return tables.
-** PAT firmware only initializes PCI Console and Boot interface.
-** With these tables, the OS can progam all other PCI devices.
+** PAT firmware only initializes the PCI Console and Boot interface.
+** With these tables, the OS can program all other PCI devices.
 **
 ** One such PAT PDC call returns the "Interrupt Routing Table" (IRT).
 ** The IRT maps each PCI slot's INTA-D "output" line to an I/O SAPIC
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index b953d59..e60b4bf 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -166,14 +166,6 @@
     
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
-    tuple.Attributes = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
     tuple.Attributes = 0;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -263,6 +255,7 @@
 
 static struct pcmcia_device_id parport_ids[] = {
 	PCMCIA_DEVICE_FUNC_ID(3),
+	PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
 	PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
 	PCMCIA_DEVICE_NULL
 };
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 39c9664..b61c17b 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1975,7 +1975,7 @@
 /* --- IRQ detection -------------------------------------- */
 
 /* Only if supports ECP mode */
-static int __devinit programmable_irq_support(struct parport *pb)
+static int programmable_irq_support(struct parport *pb)
 {
 	int irq, intrLine;
 	unsigned char oecr = inb (ECONTROL (pb));
@@ -1992,7 +1992,7 @@
 	return irq;
 }
 
-static int __devinit irq_probe_ECP(struct parport *pb)
+static int irq_probe_ECP(struct parport *pb)
 {
 	int i;
 	unsigned long irqs;
@@ -2020,7 +2020,7 @@
  * This detection seems that only works in National Semiconductors
  * This doesn't work in SMC, LGS, and Winbond 
  */
-static int __devinit irq_probe_EPP(struct parport *pb)
+static int irq_probe_EPP(struct parport *pb)
 {
 #ifndef ADVANCED_DETECT
 	return PARPORT_IRQ_NONE;
@@ -2059,7 +2059,7 @@
 #endif /* Advanced detection */
 }
 
-static int __devinit irq_probe_SPP(struct parport *pb)
+static int irq_probe_SPP(struct parport *pb)
 {
 	/* Don't even try to do this. */
 	return PARPORT_IRQ_NONE;
@@ -2747,6 +2747,7 @@
 	titan_1284p2,
 	avlab_1p,
 	avlab_2p,
+	oxsemi_952,
 	oxsemi_954,
 	oxsemi_840,
 	aks_0100,
@@ -2822,6 +2823,7 @@
 	/* avlab_2p		*/	{ 2, { { 0, 1}, { 2, 3 },} },
 	/* The Oxford Semi cards are unusual: 954 doesn't support ECP,
 	 * and 840 locks up if you write 1 to bit 2! */
+	/* oxsemi_952 */		{ 1, { { 0, 1 }, } },
 	/* oxsemi_954 */		{ 1, { { 0, -1 }, } },
 	/* oxsemi_840 */		{ 1, { { 0, -1 }, } },
 	/* aks_0100 */                  { 1, { { 0, -1 }, } },
@@ -2895,6 +2897,8 @@
 	/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
 	{ 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
 	{ 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
+	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
 	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954PP,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
 	{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5f1b9f5..f1dd81a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -19,7 +19,7 @@
 
 config PCI_MULTITHREAD_PROBE
 	bool "PCI Multi-threaded probe (EXPERIMENTAL)"
-	depends on PCI && EXPERIMENTAL && BROKEN
+	depends on PCI && EXPERIMENTAL
 	help
 	  Say Y here if you want the PCI core to spawn a new thread for
 	  every PCI device that is probed.  This can cause a huge
@@ -27,14 +27,14 @@
 	  smaller speedup on single processor machines.
 
 	  But it can also cause lots of bad things to happen.  A number
-	  of PCI drivers can not properly handle running in this way,
+	  of PCI drivers cannot properly handle running in this way,
 	  some will just not work properly at all, while others might
 	  decide to blow up power supplies with a huge load all at once,
 	  so use this option at your own risk.
 
 	  It is very unwise to use this option if you are not using a
 	  boot process that can handle devices being created in any
-	  order.  A program that can create persistant block and network
+	  order.  A program that can create persistent block and network
 	  device names (like udev) is a good idea if you wish to use
 	  this option.
 
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index ea16805..fc405f0 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -1,6 +1,8 @@
 #include <linux/pci.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/ioport.h>
+#include <linux/wait.h>
 
 #include "pci.h"
 
@@ -63,30 +65,42 @@
 EXPORT_SYMBOL(pci_bus_write_config_word);
 EXPORT_SYMBOL(pci_bus_write_config_dword);
 
-static u32 pci_user_cached_config(struct pci_dev *dev, int pos)
-{
-	u32 data;
+/*
+ * The following routines are to prevent the user from accessing PCI config
+ * space when it's unsafe to do so.  Some devices require this during BIST and
+ * we're required to prevent it during D-state transitions.
+ *
+ * We have a bit per device to indicate it's blocked and a global wait queue
+ * for callers to sleep on until devices are unblocked.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
 
-	data = dev->saved_config_space[pos/sizeof(dev->saved_config_space[0])];
-	data >>= (pos % sizeof(dev->saved_config_space[0])) * 8;
-	return data;
+static noinline void pci_wait_ucfg(struct pci_dev *dev)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	__add_wait_queue(&pci_ucfg_wait, &wait);
+	do {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		spin_unlock_irq(&pci_lock);
+		schedule();
+		spin_lock_irq(&pci_lock);
+	} while (dev->block_ucfg_access);
+	__remove_wait_queue(&pci_ucfg_wait, &wait);
 }
 
 #define PCI_USER_READ_CONFIG(size,type)					\
 int pci_user_read_config_##size						\
 	(struct pci_dev *dev, int pos, type *val)			\
 {									\
-	unsigned long flags;						\
 	int ret = 0;							\
 	u32 data = -1;							\
 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
-	spin_lock_irqsave(&pci_lock, flags);				\
-	if (likely(!dev->block_ucfg_access))				\
-		ret = dev->bus->ops->read(dev->bus, dev->devfn,		\
+	spin_lock_irq(&pci_lock);					\
+	if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev);	\
+	ret = dev->bus->ops->read(dev->bus, dev->devfn,			\
 					pos, sizeof(type), &data);	\
-	else if (pos < sizeof(dev->saved_config_space))			\
-		data = pci_user_cached_config(dev, pos); 		\
-	spin_unlock_irqrestore(&pci_lock, flags);			\
+	spin_unlock_irq(&pci_lock);					\
 	*val = (type)data;						\
 	return ret;							\
 }
@@ -95,14 +109,13 @@
 int pci_user_write_config_##size					\
 	(struct pci_dev *dev, int pos, type val)			\
 {									\
-	unsigned long flags;						\
 	int ret = -EIO;							\
 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
-	spin_lock_irqsave(&pci_lock, flags);				\
-	if (likely(!dev->block_ucfg_access))				\
-		ret = dev->bus->ops->write(dev->bus, dev->devfn,	\
+	spin_lock_irq(&pci_lock);					\
+	if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev);	\
+	ret = dev->bus->ops->write(dev->bus, dev->devfn,		\
 					pos, sizeof(type), val);	\
-	spin_unlock_irqrestore(&pci_lock, flags);			\
+	spin_unlock_irq(&pci_lock);					\
 	return ret;							\
 }
 
@@ -117,21 +130,23 @@
  * pci_block_user_cfg_access - Block userspace PCI config reads/writes
  * @dev:	pci device struct
  *
- * This function blocks any userspace PCI config accesses from occurring.
- * When blocked, any writes will be bit bucketed and reads will return the
- * data saved using pci_save_state for the first 64 bytes of config
- * space and return 0xff for all other config reads.
- **/
+ * When user access is blocked, any reads or writes to config space will
+ * sleep until access is unblocked again.  We don't allow nesting of
+ * block/unblock calls.
+ */
 void pci_block_user_cfg_access(struct pci_dev *dev)
 {
 	unsigned long flags;
+	int was_blocked;
 
-	pci_save_state(dev);
-
-	/* spinlock to synchronize with anyone reading config space now */
 	spin_lock_irqsave(&pci_lock, flags);
+	was_blocked = dev->block_ucfg_access;
 	dev->block_ucfg_access = 1;
 	spin_unlock_irqrestore(&pci_lock, flags);
+
+	/* If we BUG() inside the pci_lock, we're guaranteed to hose
+	 * the machine */
+	BUG_ON(was_blocked);
 }
 EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
 
@@ -140,14 +155,19 @@
  * @dev:	pci device struct
  *
  * This function allows userspace PCI config accesses to resume.
- **/
+ */
 void pci_unblock_user_cfg_access(struct pci_dev *dev)
 {
 	unsigned long flags;
 
-	/* spinlock to synchronize with anyone reading saved config space */
 	spin_lock_irqsave(&pci_lock, flags);
+
+	/* This indicates a problem in the caller, but we don't need
+	 * to kill them, unlike a double-block above. */
+	WARN_ON(!dev->block_ucfg_access);
+
 	dev->block_ucfg_access = 0;
+	wake_up_all(&pci_ucfg_wait);
 	spin_unlock_irqrestore(&pci_lock, flags);
 }
 EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 59c5b24..ddbadd95 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -62,10 +62,10 @@
 struct slot {
 	struct hotplug_slot	*hotplug_slot;
 	struct acpiphp_slot	*acpi_slot;
+	struct hotplug_slot_info info;
+	char name[SLOT_NAME_SIZE];
 };
 
-
-
 /**
  * struct acpiphp_bridge - PCI bridge information
  *
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index c57d9d5..40c79b0 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -303,25 +303,15 @@
 	/* read initial number of slots */
 	if (!retval) {
 		num_slots = acpiphp_get_num_slots();
-		if (num_slots == 0)
+		if (num_slots == 0) {
+			acpiphp_glue_exit();
 			retval = -ENODEV;
+		}
 	}
 
 	return retval;
 }
 
-
-/**
- * make_slot_name - make a slot name that appears in pcihpfs
- * @slot: slot to name
- *
- */
-static void make_slot_name(struct slot *slot)
-{
-	snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%u",
-		 slot->acpi_slot->sun);
-}
-
 /**
  * release_slot - free up the memory used by a slot
  * @hotplug_slot: slot to free
@@ -332,8 +322,6 @@
 
 	dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
 
-	kfree(slot->hotplug_slot->info);
-	kfree(slot->hotplug_slot->name);
 	kfree(slot->hotplug_slot);
 	kfree(slot);
 }
@@ -342,26 +330,19 @@
 int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
 {
 	struct slot *slot;
-	struct hotplug_slot *hotplug_slot;
-	struct hotplug_slot_info *hotplug_slot_info;
 	int retval = -ENOMEM;
 
 	slot = kzalloc(sizeof(*slot), GFP_KERNEL);
 	if (!slot)
 		goto error;
 
-	slot->hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
+	slot->hotplug_slot = kzalloc(sizeof(*slot->hotplug_slot), GFP_KERNEL);
 	if (!slot->hotplug_slot)
 		goto error_slot;
 
-	slot->hotplug_slot->info = kzalloc(sizeof(*hotplug_slot_info),
-					   GFP_KERNEL);
-	if (!slot->hotplug_slot->info)
-		goto error_hpslot;
+	slot->hotplug_slot->info = &slot->info;
 
-	slot->hotplug_slot->name = kzalloc(SLOT_NAME_SIZE, GFP_KERNEL);
-	if (!slot->hotplug_slot->name)
-		goto error_info;
+	slot->hotplug_slot->name = slot->name;
 
 	slot->hotplug_slot->private = slot;
 	slot->hotplug_slot->release = &release_slot;
@@ -376,21 +357,17 @@
 	slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
 
 	acpiphp_slot->slot = slot;
-	make_slot_name(slot);
+	snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun);
 
 	retval = pci_hp_register(slot->hotplug_slot);
 	if (retval) {
 		err("pci_hp_register failed with error %d\n", retval);
-		goto error_name;
+		goto error_hpslot;
  	}
 
 	info("Slot [%s] registered\n", slot->hotplug_slot->name);
 
 	return 0;
-error_name:
-	kfree(slot->hotplug_slot->name);
-error_info:
-	kfree(slot->hotplug_slot->info);
 error_hpslot:
 	kfree(slot->hotplug_slot);
 error_slot:
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 16167b0..0b9d0db 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -1693,14 +1693,10 @@
  */
 int __init acpiphp_get_num_slots(void)
 {
-	struct list_head *node;
 	struct acpiphp_bridge *bridge;
-	int num_slots;
+	int num_slots = 0;
 
-	num_slots = 0;
-
-	list_for_each (node, &bridge_list) {
-		bridge = (struct acpiphp_bridge *)node;
+	list_for_each_entry (bridge, &bridge_list, list) {
 		dbg("Bus %04x:%02x has %d slot%s\n",
 				pci_domain_nr(bridge->pci_bus),
 				bridge->pci_bus->number, bridge->nr_slots,
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index c3ac98a..f55ac38 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -531,7 +531,7 @@
 *
 * Action:  issue a READ command to HPC
 *
-* Input:   pslot   - can not be NULL for READ_ALLSTAT
+* Input:   pslot   - cannot be NULL for READ_ALLSTAT
 *          pstatus - can be NULL for READ_ALLSTAT
 *
 * Return   0 or error codes
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index d87a9e3..d8f05d7 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1371,12 +1371,12 @@
 	}
 
 	bus = ibmphp_find_res_bus (sec_number);
-	debug ("bus->busno is %x\n", bus->busno);
-	debug ("sec_number is %x\n", sec_number);
 	if (!bus) {
 		err ("cannot find Bus structure for the bridged device\n");
 		return -EINVAL;
 	}
+	debug("bus->busno is %x\n", bus->busno);
+	debug("sec_number is %x\n", sec_number);
 
 	ibmphp_remove_bus (bus, busno);
 
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f93e81e..f13f313 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -521,14 +521,9 @@
 
 }
 
-static int hpdriver_context = 0;
-
 static void pciehp_remove (struct pcie_device *device)
 {
-	printk("%s ENTRY\n", __FUNCTION__);	
-	printk("%s -> Call free_irq for irq = %d\n",  
-		__FUNCTION__, device->irq);
-	free_irq(device->irq, &hpdriver_context);
+	/* XXX - Needs to be adapted to device driver model */
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1c551c6..6d3f580 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -718,8 +718,6 @@
 		if (php_ctlr->irq) {
 			free_irq(php_ctlr->irq, ctrl);
 			php_ctlr->irq = 0;
-			if (!pcie_mch_quirk) 
-				pci_disable_msi(php_ctlr->pci_dev);
 		}
 	}
 	if (php_ctlr->pci_dev) 
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 46825fe..7238346 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -63,7 +63,7 @@
 	char *type;
 	int rc;
 
-	while ((np = of_find_node_by_type(np, "pci"))) {
+	while ((np = of_find_node_by_name(np, "pci"))) {
 		rc = rpaphp_get_drc_props(np, NULL, &name, &type, NULL);
 		if (rc == 0)
 			if (!strcmp(drc_name, name) && !strcmp(drc_type, type))
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 141486d..71a2cb8 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -356,7 +356,7 @@
 	info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
 	init_MUTEX(&rpaphp_sem);
 
-	while ((dn = of_find_node_by_type(dn, "pci")))
+	while ((dn = of_find_node_by_name(dn, "pci")))
 		rpaphp_add_slot(dn);
 
 	return 0;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index b62ad31..5d188c5 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -205,21 +205,6 @@
 	return bss_hotplug_slot;
 }
 
-static void sn_bus_alloc_data(struct pci_dev *dev)
-{
-	struct pci_bus *subordinate_bus;
-	struct pci_dev *child;
-
-	sn_pci_fixup_slot(dev);
-
-	/* Recursively sets up the sn_irq_info structs */
-	if (dev->subordinate) {
-		subordinate_bus = dev->subordinate;
-		list_for_each_entry(child, &subordinate_bus->devices, bus_list)
-			sn_bus_alloc_data(child);
-	}
-}
-
 static void sn_bus_free_data(struct pci_dev *dev)
 {
 	struct pci_bus *subordinate_bus;
@@ -337,6 +322,11 @@
 	return rc;
 }
 
+/*
+ * Power up and configure the slot via a SAL call to PROM.
+ * Scan slot (and any children), do any platform specific fixup,
+ * and find device driver.
+ */
 static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
 {
 	struct slot *slot = bss_hotplug_slot->private;
@@ -345,6 +335,7 @@
 	int func, num_funcs;
 	int new_ppb = 0;
 	int rc;
+	void pcibios_fixup_device_resources(struct pci_dev *);
 
 	/* Serialize the Linux PCI infrastructure */
 	mutex_lock(&sn_hotplug_mutex);
@@ -367,9 +358,6 @@
 		return -ENODEV;
 	}
 
-	sn_pci_controller_fixup(pci_domain_nr(slot->pci_bus),
-				slot->pci_bus->number,
-				slot->pci_bus);
 	/*
 	 * Map SN resources for all functions on the card
 	 * to the Linux PCI interface and tell the drivers
@@ -380,6 +368,13 @@
 				   PCI_DEVFN(slot->device_num + 1,
 					     PCI_FUNC(func)));
 		if (dev) {
+			/* Need to do slot fixup on PPB before fixup of children
+			 * (PPB's pcidev_info needs to be in pcidev_info list
+			 * before child's SN_PCIDEV_INFO() call to setup
+			 * pdi_host_pcidev_info).
+			 */
+			pcibios_fixup_device_resources(dev);
+			sn_pci_fixup_slot(dev);
 			if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
 				unsigned char sec_bus;
 				pci_read_config_byte(dev, PCI_SECONDARY_BUS,
@@ -387,12 +382,8 @@
 				new_bus = pci_add_new_bus(dev->bus, dev,
 							  sec_bus);
 				pci_scan_child_bus(new_bus);
-				sn_pci_controller_fixup(pci_domain_nr(new_bus),
-							new_bus->number,
-							new_bus);
 				new_ppb = 1;
 			}
-			sn_bus_alloc_data(dev);
 			pci_dev_put(dev);
 		}
 	}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index ea2087c..5075769 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -70,7 +70,7 @@
 	struct hotplug_slot *hotplug_slot;
 	struct list_head	slot_list;
 	char name[SLOT_NAME_SIZE];
-	struct work_struct work;	/* work for button event */
+	struct delayed_work work;	/* work for button event */
 	struct mutex lock;
 };
 
@@ -187,7 +187,7 @@
 extern int	shpchp_unconfigure_device(struct slot *p_slot);
 extern void	shpchp_remove_ctrl_files(struct controller *ctrl);
 extern void	cleanup_slots(struct controller *ctrl);
-extern void	queue_pushbutton_work(void *data);
+extern void	queue_pushbutton_work(struct work_struct *work);
 
 
 #ifdef CONFIG_ACPI
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 235c18a..4eac85b 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -159,7 +159,7 @@
 			goto error_info;
 
 		slot->number = sun;
-		INIT_WORK(&slot->work, queue_pushbutton_work, slot);
+		INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work);
 
 		/* register this slot with the hotplug pci core */
 		hotplug_slot->private = slot;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index c39901d..158ac78 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -36,7 +36,7 @@
 #include "../pci.h"
 #include "shpchp.h"
 
-static void interrupt_event_handler(void *data);
+static void interrupt_event_handler(struct work_struct *work);
 static int shpchp_enable_slot(struct slot *p_slot);
 static int shpchp_disable_slot(struct slot *p_slot);
 
@@ -50,7 +50,7 @@
 
 	info->event_type = event_type;
 	info->p_slot = p_slot;
-	INIT_WORK(&info->work, interrupt_event_handler, info);
+	INIT_WORK(&info->work, interrupt_event_handler);
 
 	schedule_work(&info->work);
 
@@ -408,9 +408,10 @@
  * Handles all pending events and exits.
  *
  */
-static void shpchp_pushbutton_thread(void *data)
+static void shpchp_pushbutton_thread(struct work_struct *work)
 {
-	struct pushbutton_work_info *info = data;
+	struct pushbutton_work_info *info =
+		container_of(work, struct pushbutton_work_info, work);
 	struct slot *p_slot = info->p_slot;
 
 	mutex_lock(&p_slot->lock);
@@ -436,9 +437,9 @@
 	kfree(info);
 }
 
-void queue_pushbutton_work(void *data)
+void queue_pushbutton_work(struct work_struct *work)
 {
-	struct slot *p_slot = data;
+	struct slot *p_slot = container_of(work, struct slot, work.work);
 	struct pushbutton_work_info *info;
 
 	info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -447,7 +448,7 @@
 		return;
 	}
 	info->p_slot = p_slot;
-	INIT_WORK(&info->work, shpchp_pushbutton_thread, info);
+	INIT_WORK(&info->work, shpchp_pushbutton_thread);
 
 	mutex_lock(&p_slot->lock);
 	switch (p_slot->state) {
@@ -541,9 +542,9 @@
 	}
 }
 
-static void interrupt_event_handler(void *data)
+static void interrupt_event_handler(struct work_struct *work)
 {
-	struct event_info *info = data;
+	struct event_info *info = container_of(work, struct event_info, work);
 	struct slot *p_slot = info->p_slot;
 
 	mutex_lock(&p_slot->lock);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 9fc9a34..ed3f7e1 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -26,7 +26,7 @@
 
 static DEFINE_SPINLOCK(msi_lock);
 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
-static kmem_cache_t* msi_cachep;
+static struct kmem_cache* msi_cachep;
 
 static int pci_msi_enable = 1;
 
@@ -255,10 +255,8 @@
 		pci_write_config_word(dev, msi_control_reg(pos), control);
 		dev->msix_enabled = 1;
 	}
-    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
-		/* PCI Express Endpoint device detected */
-		pci_intx(dev, 0);  /* disable intx */
-	}
+
+	pci_intx(dev, 0);  /* disable intx */
 }
 
 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
@@ -276,10 +274,8 @@
 		pci_write_config_word(dev, msi_control_reg(pos), control);
 		dev->msix_enabled = 0;
 	}
-    	if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
-		/* PCI Express Endpoint device detected */
-		pci_intx(dev, 1);  /* enable intx */
-	}
+
+	pci_intx(dev, 1);  /* enable intx */
 }
 
 static int msi_lookup_irq(struct pci_dev *dev, int type)
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index f0cca17..3898f52 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,14 +6,6 @@
 #ifndef MSI_H
 #define MSI_H
 
-/*
- * MSI-X Address Register
- */
-#define PCI_MSIX_FLAGS_QSIZE		0x7FF
-#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
-#define PCI_MSIX_FLAGS_BIRMASK		(7 << 0)
-#define PCI_MSIX_FLAGS_BITMASK		(1 << 0)
-
 #define PCI_MSIX_ENTRY_SIZE			16
 #define  PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET	0
 #define  PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET	4
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index bb7456c..a064f36 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -36,6 +36,7 @@
 	struct acpi_buffer	output = {ACPI_ALLOCATE_BUFFER, NULL};
 	union acpi_object 	*out_obj;
 	u32			osc_dw0;
+	acpi_status *ret_status = (acpi_status *)retval;
 
 	
 	/* Setting up input parameters */
@@ -56,6 +57,7 @@
 	if (ACPI_FAILURE (status)) {
 		printk(KERN_DEBUG  
 			"Evaluate _OSC Set fails. Status = 0x%04x\n", status);
+		*ret_status = status;
 		return status;
 	}
 	out_obj = output.pointer;
@@ -90,6 +92,7 @@
 
 query_osc_out:
 	kfree(output.pointer);
+	*ret_status = status;
 	return status;
 }
 
@@ -166,6 +169,7 @@
 acpi_status pci_osc_support_set(u32 flags)
 {
 	u32 temp;
+	acpi_status retval;
 
 	if (!(flags & OSC_SUPPORT_MASKS)) {
 		return AE_TYPE;
@@ -179,9 +183,13 @@
 	acpi_get_devices ( PCI_ROOT_HID_STRING,
 			acpi_query_osc,
 			ctrlset_buf,
-			NULL );
+			(void **) &retval );
 	ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE;
 	ctrlset_buf[OSC_CONTROL_TYPE] = temp;
+	if (ACPI_FAILURE(retval)) {
+		/* no osc support at all */
+		ctrlset_buf[OSC_SUPPORT_TYPE] = 0;
+	}
 	return AE_OK;
 }
 EXPORT_SYMBOL(pci_osc_support_set);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 194f1d2..e5ae3a0 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -329,8 +329,8 @@
 	/* restore the PCI config space */
 	pci_restore_state(pci_dev);
 	/* if the device was enabled before suspend, reenable */
-	if (pci_dev->is_enabled)
-		retval = pci_enable_device(pci_dev);
+	if (atomic_read(&pci_dev->enable_cnt))
+		retval = __pci_enable_device(pci_dev);
 	/* if the device was busmaster before the suspend, make it busmaster again */
 	if (pci_dev->is_busmaster)
 		pci_set_master(pci_dev);
@@ -445,9 +445,12 @@
 
 	/* register with core */
 	error = driver_register(&drv->driver);
+	if (error)
+		return error;
 
-	if (!error)
-		error = pci_create_newid_file(drv);
+	error = pci_create_newid_file(drv);
+	if (error)
+		driver_unregister(&drv->driver);
 
 	return error;
 }
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index f952bfe..7a94076 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -42,7 +42,6 @@
 pci_config_attr(subsystem_device, "0x%04x\n");
 pci_config_attr(class, "0x%06x\n");
 pci_config_attr(irq, "%u\n");
-pci_config_attr(is_enabled, "%u\n");
 
 static ssize_t broken_parity_status_show(struct device *dev,
 					 struct device_attribute *attr,
@@ -112,26 +111,36 @@
 		       (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
 		       (u8)(pci_dev->class));
 }
-static ssize_t
-is_enabled_store(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
+
+static ssize_t is_enabled_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
 {
+	ssize_t result = -EINVAL;
 	struct pci_dev *pdev = to_pci_dev(dev);
-	int retval = 0;
 
 	/* this can crash the machine when done on the "wrong" device */
 	if (!capable(CAP_SYS_ADMIN))
 		return count;
 
-	if (*buf == '0')
-		pci_disable_device(pdev);
+	if (*buf == '0') {
+		if (atomic_read(&pdev->enable_cnt) != 0)
+			pci_disable_device(pdev);
+		else
+			result = -EIO;
+	} else if (*buf == '1')
+		result = pci_enable_device(pdev);
 
-	if (*buf == '1')
-		retval = pci_enable_device(pdev);
+	return result < 0 ? result : count;
+}
 
-	if (retval)
-		return retval;
-	return count;
+static ssize_t is_enabled_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev (dev);
+	return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
 }
 
 static ssize_t
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a544997..5a14b73 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -490,6 +490,47 @@
 	kfree(save_state);
 }
 
+
+static int pci_save_pcix_state(struct pci_dev *dev)
+{
+	int pos, i = 0;
+	struct pci_cap_saved_state *save_state;
+	u16 *cap;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+	if (pos <= 0)
+		return 0;
+
+	save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL);
+	if (!save_state) {
+		dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n");
+		return -ENOMEM;
+	}
+	cap = (u16 *)&save_state->data[0];
+
+	pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]);
+	pci_add_saved_cap(dev, save_state);
+	return 0;
+}
+
+static void pci_restore_pcix_state(struct pci_dev *dev)
+{
+	int i = 0, pos;
+	struct pci_cap_saved_state *save_state;
+	u16 *cap;
+
+	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
+	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+	if (!save_state || pos <= 0)
+		return;
+	cap = (u16 *)&save_state->data[0];
+
+	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
+	pci_remove_saved_cap(save_state);
+	kfree(save_state);
+}
+
+
 /**
  * pci_save_state - save the PCI configuration space of a device before suspending
  * @dev: - PCI device that we're dealing with
@@ -507,6 +548,8 @@
 		return i;
 	if ((i = pci_save_pcie_state(dev)) != 0)
 		return i;
+	if ((i = pci_save_pcix_state(dev)) != 0)
+		return i;
 	return 0;
 }
 
@@ -538,6 +581,7 @@
 				dev->saved_config_space[i]);
 		}
 	}
+	pci_restore_pcix_state(dev);
 	pci_restore_msi_state(dev);
 	pci_restore_msix_state(dev);
 	return 0;
@@ -568,27 +612,48 @@
 }
 
 /**
+ * __pci_enable_device - Initialize device before it's used by a driver.
+ * @dev: PCI device to be initialized
+ *
+ *  Initialize device before it's used by a driver. Ask low-level code
+ *  to enable I/O and memory. Wake up the device if it was suspended.
+ *  Beware, this function can fail.
+ *
+ * Note this function is a backend and is not supposed to be called by
+ * normal code, use pci_enable_device() instead.
+ */
+int
+__pci_enable_device(struct pci_dev *dev)
+{
+	int err;
+
+	err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
+	if (err)
+		return err;
+	pci_fixup_device(pci_fixup_enable, dev);
+	return 0;
+}
+
+/**
  * pci_enable_device - Initialize device before it's used by a driver.
  * @dev: PCI device to be initialized
  *
  *  Initialize device before it's used by a driver. Ask low-level code
  *  to enable I/O and memory. Wake up the device if it was suspended.
  *  Beware, this function can fail.
+ *
+ *  Note we don't actually enable the device many times if we call
+ *  this function repeatedly (we just increment the count).
  */
-int
-pci_enable_device(struct pci_dev *dev)
+int pci_enable_device(struct pci_dev *dev)
 {
-	int err;
-
-	if (dev->is_enabled)
-		return 0;
-
-	err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
-	if (err)
-		return err;
-	pci_fixup_device(pci_fixup_enable, dev);
-	dev->is_enabled = 1;
-	return 0;
+	int result;
+	if (atomic_add_return(1, &dev->enable_cnt) > 1)
+		return 0;		/* already enabled */
+	result = __pci_enable_device(dev);
+	if (result < 0)
+		atomic_dec(&dev->enable_cnt);
+	return result;
 }
 
 /**
@@ -607,12 +672,18 @@
  *
  * Signal to the system that the PCI device is not in use by the system
  * anymore.  This only involves disabling PCI bus-mastering, if active.
+ *
+ * Note we don't actually disable the device until all callers of
+ * pci_device_enable() have called pci_device_disable().
  */
 void
 pci_disable_device(struct pci_dev *dev)
 {
 	u16 pci_command;
 
+	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
+		return;
+
 	if (dev->msi_enabled)
 		disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
 			PCI_CAP_ID_MSI);
@@ -628,7 +699,6 @@
 	dev->is_busmaster = 0;
 
 	pcibios_disable_device(dev);
-	dev->is_enabled = 0;
 }
 
 /**
@@ -831,22 +901,38 @@
 	pcibios_set_master(dev);
 }
 
-#ifndef HAVE_ARCH_PCI_MWI
+#ifdef PCI_DISABLE_MWI
+int pci_set_mwi(struct pci_dev *dev)
+{
+	return 0;
+}
+
+void pci_clear_mwi(struct pci_dev *dev)
+{
+}
+
+#else
+
+#ifndef PCI_CACHE_LINE_BYTES
+#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
+#endif
+
 /* This can be overridden by arch code. */
-u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
+/* Don't forget this is measured in 32-bit words, not bytes */
+u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
 
 /**
- * pci_generic_prep_mwi - helper function for pci_set_mwi
- * @dev: the PCI device for which MWI is enabled
+ * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
+ * @dev: the PCI device for which MWI is to be enabled
  *
- * Helper function for generic implementation of pcibios_prep_mwi
- * function.  Originally copied from drivers/net/acenic.c.
+ * Helper function for pci_set_mwi.
+ * Originally copied from drivers/net/acenic.c.
  * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
  *
  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  */
 static int
-pci_generic_prep_mwi(struct pci_dev *dev)
+pci_set_cacheline_size(struct pci_dev *dev)
 {
 	u8 cacheline_size;
 
@@ -872,7 +958,6 @@
 
 	return -EINVAL;
 }
-#endif /* !HAVE_ARCH_PCI_MWI */
 
 /**
  * pci_set_mwi - enables memory-write-invalidate PCI transaction
@@ -890,12 +975,7 @@
 	int rc;
 	u16 cmd;
 
-#ifdef HAVE_ARCH_PCI_MWI
-	rc = pcibios_prep_mwi(dev);
-#else
-	rc = pci_generic_prep_mwi(dev);
-#endif
-
+	rc = pci_set_cacheline_size(dev);
 	if (rc)
 		return rc;
 
@@ -926,6 +1006,7 @@
 		pci_write_config_word(dev, PCI_COMMAND, cmd);
 	}
 }
+#endif /* ! PCI_DISABLE_MWI */
 
 /**
  * pci_intx - enables/disables PCI INTx for device dev
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6bf327d..398852f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,5 +1,6 @@
 /* Functions internal to the PCI core code */
 
+extern int __must_check __pci_enable_device(struct pci_dev *);
 extern int pci_uevent(struct device *dev, char **envp, int num_envp,
 		      char *buffer, int buffer_size);
 extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 04c43ef..55866b6 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -160,7 +160,7 @@
 	rpc->e_lock = SPIN_LOCK_UNLOCKED;
 
 	rpc->rpd = dev;
-	INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev);
+	INIT_WORK(&rpc->dpc_handler, aer_isr);
 	rpc->prod_idx = rpc->cons_idx = 0;
 	mutex_init(&rpc->rpc_mutex);
 	init_waitqueue_head(&rpc->wait_release);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index daf0cad..3c0a58f 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -118,7 +118,7 @@
 extern void aer_enable_rootport(struct aer_rpc *rpc);
 extern void aer_delete_rootport(struct aer_rpc *rpc);
 extern int aer_init(struct pcie_device *dev);
-extern void aer_isr(void *context);
+extern void aer_isr(struct work_struct *work);
 extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
 extern int aer_osc_setup(struct pci_dev *dev);
 
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 1c7e660..08e1303 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -690,14 +690,14 @@
 
 /**
  * aer_isr - consume errors detected by root port
- * @context: pointer to a private data of pcie device
+ * @work: definition of this work item
  *
  * Invoked, as DPC, when root port records new detected error
  **/
-void aer_isr(void *context)
+void aer_isr(struct work_struct *work)
 {
-	struct pcie_device *p_device = (struct pcie_device *) context;
-	struct aer_rpc *rpc = get_service_data(p_device);
+	struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+	struct pcie_device *p_device = rpc->rpd;
 	struct aer_err_source *e_src;
 
 	mutex_lock(&rpc->rpc_mutex);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e159d66..6a3c1e7 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -679,6 +679,33 @@
 		pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
 		pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
 		pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
+
+		/*
+		 *	Do the ugly legacy mode stuff here rather than broken chip
+		 *	quirk code. Legacy mode ATA controllers have fixed
+		 *	addresses. These are not always echoed in BAR0-3, and
+		 *	BAR0-3 in a few cases contain junk!
+		 */
+		if (class == PCI_CLASS_STORAGE_IDE) {
+			u8 progif;
+			pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
+			if ((progif & 1) == 0) {
+				dev->resource[0].start = 0x1F0;
+				dev->resource[0].end = 0x1F7;
+				dev->resource[0].flags = IORESOURCE_IO;
+				dev->resource[1].start = 0x3F6;
+				dev->resource[1].end = 0x3F6;
+				dev->resource[1].flags = IORESOURCE_IO;
+			}
+			if ((progif & 4) == 0) {
+				dev->resource[2].start = 0x170;
+				dev->resource[2].end = 0x177;
+				dev->resource[2].flags = IORESOURCE_IO;
+				dev->resource[3].start = 0x376;
+				dev->resource[3].end = 0x376;
+				dev->resource[3].flags = IORESOURCE_IO;
+			}
+		}
 		break;
 
 	case PCI_HEADER_TYPE_BRIDGE:		    /* bridge header */
@@ -846,6 +873,7 @@
 	dev->dev.release = pci_release_dev;
 	pci_dev_get(dev);
 
+	set_dev_node(&dev->dev, pcibus_to_node(bus));
 	dev->dev.dma_mask = &dev->dma_mask;
 	dev->dev.coherent_dma_mask = 0xffffffffull;
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5b44838..9ca9b9b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -797,56 +797,6 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master );
 
 /*
- * As per PCI spec, ignore base address registers 0-3 of the IDE controllers
- * running in Compatible mode (bits 0 and 2 in the ProgIf for primary and
- * secondary channels respectively). If the device reports Compatible mode
- * but does use BAR0-3 for address decoding, we assume that firmware has
- * programmed these BARs with standard values (0x1f0,0x3f4 and 0x170,0x374).
- * Exceptions (if they exist) must be handled in chip/architecture specific
- * fixups.
- *
- * Note: for non x86 people. You may need an arch specific quirk to handle
- * moving IDE devices to native mode as well. Some plug in card devices power
- * up in compatible mode and assume the BIOS will adjust them.
- *
- * Q: should we load the 0x1f0,0x3f4 into the registers or zap them as
- * we do now ? We don't want is pci_enable_device to come along
- * and assign new resources. Both approaches work for that.
- */ 
-static void __devinit quirk_ide_bases(struct pci_dev *dev)
-{
-       struct resource *res;
-       int first_bar = 2, last_bar = 0;
-
-       if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
-               return;
-
-       res = &dev->resource[0];
-
-       /* primary channel: ProgIf bit 0, BAR0, BAR1 */
-       if (!(dev->class & 1) && (res[0].flags || res[1].flags)) { 
-               res[0].start = res[0].end = res[0].flags = 0;
-               res[1].start = res[1].end = res[1].flags = 0;
-               first_bar = 0;
-               last_bar = 1;
-       }
-
-       /* secondary channel: ProgIf bit 2, BAR2, BAR3 */
-       if (!(dev->class & 4) && (res[2].flags || res[3].flags)) { 
-               res[2].start = res[2].end = res[2].flags = 0;
-               res[3].start = res[3].end = res[3].flags = 0;
-               last_bar = 3;
-       }
-
-       if (!last_bar)
-               return;
-
-       printk(KERN_INFO "PCI: Ignoring BAR%d-%d of IDE controller %s\n",
-              first_bar, last_bar, pci_name(dev));
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_ide_bases);
-
-/*
  *	Ensure C0 rev restreaming is off. This is normally done by
  *	the BIOS but in the odd case it is not the results are corruption
  *	hence the presence of a Linux check
@@ -880,11 +830,10 @@
 		prog &= ~5;
 		pdev->class &= ~5;
 		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
-		/* need to re-assign BARs for compat mode */
-		quirk_ide_bases(pdev);
+		/* PCI layer will sort out resources */
 	}
 }
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide );
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide );
 
 /*
  *	Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
@@ -900,11 +849,9 @@
 		prog &= ~5;
 		pdev->class &= ~5;
 		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
-		/* need to re-assign BARs for compat mode */
-		quirk_ide_bases(pdev);
 	}
 }
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
 
 /* This was originally an Alpha specific thing, but it really fits here.
  * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index e1dcefc..d087e08 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -81,7 +81,8 @@
 		start = (loff_t)0xC0000;
 		*size = 0x20000; /* cover C000:0 through E000:0 */
 	} else {
-		if (res->flags & IORESOURCE_ROM_COPY) {
+		if (res->flags &
+			(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
 			*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
 			return (void __iomem *)(unsigned long)
 				pci_resource_start(pdev, PCI_ROM_RESOURCE);
@@ -165,7 +166,8 @@
 	if (!rom)
 		return NULL;
 
-	if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_SHADOW))
+	if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_SHADOW |
+			  IORESOURCE_ROM_BIOS_COPY))
 		return rom;
 
 	res->start = (unsigned long)kmalloc(*size, GFP_KERNEL);
@@ -191,7 +193,7 @@
 {
 	struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
 
-	if (res->flags & IORESOURCE_ROM_COPY)
+	if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
 		return;
 
 	iounmap(rom);
@@ -215,6 +217,7 @@
 		sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
 	if (!(res->flags & (IORESOURCE_ROM_ENABLE |
 			    IORESOURCE_ROM_SHADOW |
+			    IORESOURCE_ROM_BIOS_COPY |
 			    IORESOURCE_ROM_COPY)))
 		pci_disable_rom(pdev);
 }
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 6979667..52d4a38 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -32,10 +32,11 @@
  * A0..A10 work in each range; A23 indicates I/O space;  A25 is CFRNW;
  * some other bit in {A24,A22..A11} is nREG to flag memory access
  * (vs attributes).  So more than 2KB/region would just be waste.
+ * Note: These are offsets from the physical base address.
  */
-#define	CF_ATTR_PHYS	(AT91_CF_BASE)
-#define	CF_IO_PHYS	(AT91_CF_BASE  + (1 << 23))
-#define	CF_MEM_PHYS	(AT91_CF_BASE  + 0x017ff800)
+#define	CF_ATTR_PHYS	(0)
+#define	CF_IO_PHYS	(1 << 23)
+#define	CF_MEM_PHYS	(0x017ff800)
 
 /*--------------------------------------------------------------------------*/
 
@@ -48,6 +49,8 @@
 
 	struct platform_device	*pdev;
 	struct at91_cf_data	*board;
+
+	unsigned long		phys_baseaddr;
 };
 
 #define	SZ_2K			(2 * SZ_1K)
@@ -154,9 +157,8 @@
 
 	/*
 	 * Use 16 bit accesses unless/until we need 8-bit i/o space.
-	 * Always set CSR4 ... PCMCIA won't always unmap things.
 	 */
-	csr = at91_sys_read(AT91_SMC_CSR(4)) & ~AT91_SMC_DBW;
+	csr = at91_sys_read(AT91_SMC_CSR(cf->board->chipselect)) & ~AT91_SMC_DBW;
 
 	/*
 	 * NOTE: this CF controller ignores IOIS16, so we can't really do
@@ -168,14 +170,14 @@
 	 * some cards only like that way to get at the odd byte, despite
 	 * CF 3.0 spec table 35 also giving the D8-D15 option.
 	 */
-	if (!(io->flags & (MAP_16BIT|MAP_AUTOSZ))) {
+	if (!(io->flags & (MAP_16BIT | MAP_AUTOSZ))) {
 		csr |= AT91_SMC_DBW_8;
 		pr_debug("%s: 8bit i/o bus\n", driver_name);
 	} else {
 		csr |= AT91_SMC_DBW_16;
 		pr_debug("%s: 16bit i/o bus\n", driver_name);
 	}
-	at91_sys_write(AT91_SMC_CSR(4), csr);
+	at91_sys_write(AT91_SMC_CSR(cf->board->chipselect), csr);
 
 	io->start = cf->socket.io_offset;
 	io->stop = io->start + SZ_2K - 1;
@@ -194,11 +196,11 @@
 
 	cf = container_of(s, struct at91_cf_socket, socket);
 
-	map->flags &= MAP_ACTIVE|MAP_ATTRIB|MAP_16BIT;
+	map->flags &= (MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT);
 	if (map->flags & MAP_ATTRIB)
-		map->static_start = CF_ATTR_PHYS;
+		map->static_start = cf->phys_baseaddr + CF_ATTR_PHYS;
 	else
-		map->static_start = CF_MEM_PHYS;
+		map->static_start = cf->phys_baseaddr + CF_MEM_PHYS;
 
 	return 0;
 }
@@ -219,7 +221,6 @@
 	struct at91_cf_socket	*cf;
 	struct at91_cf_data	*board = pdev->dev.platform_data;
 	struct resource		*io;
-	unsigned int		csa;
 	int			status;
 
 	if (!board || !board->det_pin || !board->rst_pin)
@@ -235,33 +236,11 @@
 
 	cf->board = board;
 	cf->pdev = pdev;
+	cf->phys_baseaddr = io->start;
 	platform_set_drvdata(pdev, cf);
 
-	/* CF takes over CS4, CS5, CS6 */
-	csa = at91_sys_read(AT91_EBI_CSA);
-	at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH);
-
-	/* nWAIT is _not_ a default setting */
-	(void) at91_set_A_periph(AT91_PIN_PC6, 1);	/*  nWAIT */
-
-	/*
-	 * Static memory controller timing adjustments.
-	 * REVISIT:  these timings are in terms of MCK cycles, so
-	 * when MCK changes (cpufreq etc) so must these values...
-	 */
-	at91_sys_write(AT91_SMC_CSR(4),
-				  AT91_SMC_ACSS_STD
-				| AT91_SMC_DBW_16
-				| AT91_SMC_BAT
-				| AT91_SMC_WSEN
-				| AT91_SMC_NWS_(32)	/* wait states */
-				| AT91_SMC_RWSETUP_(6)	/* setup time */
-				| AT91_SMC_RWHOLD_(4)	/* hold time */
-	);
-
 	/* must be a GPIO; ergo must trigger on both edges */
-	status = request_irq(board->det_pin, at91_cf_irq,
-			IRQF_SAMPLE_RANDOM, driver_name, cf);
+	status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
 	if (status < 0)
 		goto fail0;
 	device_init_wakeup(&pdev->dev, 1);
@@ -282,14 +261,18 @@
 		cf->socket.pci_irq = NR_IRQS + 1;
 
 	/* pcmcia layer only remaps "real" memory not iospace */
-	cf->socket.io_offset = (unsigned long) ioremap(CF_IO_PHYS, SZ_2K);
-	if (!cf->socket.io_offset)
+	cf->socket.io_offset = (unsigned long) ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
+	if (!cf->socket.io_offset) {
+		status = -ENXIO;
 		goto fail1;
+	}
 
-	/* reserve CS4, CS5, and CS6 regions; but use just CS4 */
+	/* reserve chip-select regions */
 	if (!request_mem_region(io->start, io->end + 1 - io->start,
-				driver_name))
+				driver_name)) {
+		status = -ENXIO;
 		goto fail1;
+	}
 
 	pr_info("%s: irqs det #%d, io #%d\n", driver_name,
 		board->det_pin, board->irq_pin);
@@ -319,9 +302,7 @@
 fail0a:
 	device_init_wakeup(&pdev->dev, 0);
 	free_irq(board->det_pin, cf);
-	device_init_wakeup(&pdev->dev, 0);
 fail0:
-	at91_sys_write(AT91_EBI_CSA, csa);
 	kfree(cf);
 	return status;
 }
@@ -331,19 +312,15 @@
 	struct at91_cf_socket	*cf = platform_get_drvdata(pdev);
 	struct at91_cf_data	*board = cf->board;
 	struct resource		*io = cf->socket.io[0].res;
-	unsigned int		csa;
 
 	pcmcia_unregister_socket(&cf->socket);
 	if (board->irq_pin)
 		free_irq(board->irq_pin, cf);
-	free_irq(board->det_pin, cf);
 	device_init_wakeup(&pdev->dev, 0);
+	free_irq(board->det_pin, cf);
 	iounmap((void __iomem *) cf->socket.io_offset);
 	release_mem_region(io->start, io->end + 1 - io->start);
 
-	csa = at91_sys_read(AT91_EBI_CSA);
-	at91_sys_write(AT91_EBI_CSA, csa & ~AT91_EBI_CS4A);
-
 	kfree(cf);
 	return 0;
 }
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index f9cd831..606a467 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/device.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <asm/system.h>
 #include <asm/irq.h>
 
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index d6164cd..f573ea0 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -135,7 +135,7 @@
 struct pcmcia_callback{
 	struct module	*owner;
 	int		(*event) (struct pcmcia_socket *s, event_t event, int priority);
-	void		(*requery) (struct pcmcia_socket *s);
+	void		(*requery) (struct pcmcia_socket *s, int new_cis);
 	int		(*suspend) (struct pcmcia_socket *s);
 	int		(*resume) (struct pcmcia_socket *s);
 };
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 21d83a8..7355eb4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -231,65 +231,6 @@
 }
 
 
-#ifdef CONFIG_PCMCIA_LOAD_CIS
-
-/**
- * pcmcia_load_firmware - load CIS from userspace if device-provided is broken
- * @dev - the pcmcia device which needs a CIS override
- * @filename - requested filename in /lib/firmware/
- *
- * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if
- * the one provided by the card is broken. The firmware files reside in
- * /lib/firmware/ in userspace.
- */
-static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
-{
-	struct pcmcia_socket *s = dev->socket;
-	const struct firmware *fw;
-	char path[20];
-	int ret=-ENOMEM;
-	cisdump_t *cis;
-
-	if (!filename)
-		return -EINVAL;
-
-	ds_dbg(1, "trying to load firmware %s\n", filename);
-
-	if (strlen(filename) > 14)
-		return -EINVAL;
-
-	snprintf(path, 20, "%s", filename);
-
-	if (request_firmware(&fw, path, &dev->dev) == 0) {
-		if (fw->size >= CISTPL_MAX_CIS_SIZE)
-			goto release;
-
-		cis = kzalloc(sizeof(cisdump_t), GFP_KERNEL);
-		if (!cis)
-			goto release;
-
-		cis->Length = fw->size + 1;
-		memcpy(cis->Data, fw->data, fw->size);
-
-		if (!pcmcia_replace_cis(s, cis))
-			ret = 0;
-	}
- release:
-	release_firmware(fw);
-
-	return (ret);
-}
-
-#else /* !CONFIG_PCMCIA_LOAD_CIS */
-
-static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
-{
-	return -ENODEV;
-}
-
-#endif
-
-
 /*======================================================================*/
 
 
@@ -309,6 +250,8 @@
 	driver->drv.bus = &pcmcia_bus_type;
 	driver->drv.owner = driver->owner;
 
+	ds_dbg(3, "registering driver %s\n", driver->drv.name);
+
 	return driver_register(&driver->drv);
 }
 EXPORT_SYMBOL(pcmcia_register_driver);
@@ -318,6 +261,7 @@
  */
 void pcmcia_unregister_driver(struct pcmcia_driver *driver)
 {
+	ds_dbg(3, "unregistering driver %s\n", driver->drv.name);
 	driver_unregister(&driver->drv);
 }
 EXPORT_SYMBOL(pcmcia_unregister_driver);
@@ -343,23 +287,27 @@
 static void pcmcia_release_function(struct kref *ref)
 {
 	struct config_t *c = container_of(ref, struct config_t, ref);
+	ds_dbg(1, "releasing config_t\n");
 	kfree(c);
 }
 
 static void pcmcia_release_dev(struct device *dev)
 {
 	struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
-	ds_dbg(1, "releasing dev %p\n", p_dev);
+	ds_dbg(1, "releasing device %s\n", p_dev->dev.bus_id);
 	pcmcia_put_socket(p_dev->socket);
 	kfree(p_dev->devname);
 	kref_put(&p_dev->function_config->ref, pcmcia_release_function);
 	kfree(p_dev);
 }
 
-static void pcmcia_add_pseudo_device(struct pcmcia_socket *s)
+static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
 {
 	if (!s->pcmcia_state.device_add_pending) {
+		ds_dbg(1, "scheduling to add %s secondary"
+		       " device to %d\n", mfc ? "mfc" : "pfc", s->sock);
 		s->pcmcia_state.device_add_pending = 1;
+		s->pcmcia_state.mfc_pfc = mfc;
 		schedule_work(&s->device_add);
 	}
 	return;
@@ -371,6 +319,7 @@
 	struct pcmcia_driver *p_drv;
 	struct pcmcia_device_id *did;
 	struct pcmcia_socket *s;
+	cistpl_config_t cis_config;
 	int ret = 0;
 
 	dev = get_device(dev);
@@ -381,15 +330,33 @@
 	p_drv = to_pcmcia_drv(dev->driver);
 	s = p_dev->socket;
 
+	ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id,
+	       p_drv->drv.name);
+
 	if ((!p_drv->probe) || (!p_dev->function_config) ||
 	    (!try_module_get(p_drv->owner))) {
 		ret = -EINVAL;
 		goto put_dev;
 	}
 
+	/* set up some more device information */
+	ret = pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_CONFIG,
+				&cis_config);
+	if (!ret) {
+		p_dev->conf.ConfigBase = cis_config.base;
+		p_dev->conf.Present = cis_config.rmask[0];
+	} else {
+		printk(KERN_INFO "pcmcia: could not parse base and rmask0 of CIS\n");
+		p_dev->conf.ConfigBase = 0;
+		p_dev->conf.Present = 0;
+	}
+
 	ret = p_drv->probe(p_dev);
-	if (ret)
+	if (ret) {
+		ds_dbg(1, "binding %s to %s failed with %d\n",
+		       p_dev->dev.bus_id, p_drv->drv.name, ret);
 		goto put_module;
+	}
 
 	/* handle pseudo multifunction devices:
 	 * there are at most two pseudo multifunction devices.
@@ -400,7 +367,7 @@
 	did = p_dev->dev.driver_data;
 	if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
 	    (p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
-		pcmcia_add_pseudo_device(p_dev->socket);
+		pcmcia_add_device_later(p_dev->socket, 0);
 
  put_module:
 	if (ret)
@@ -421,8 +388,8 @@
 	struct pcmcia_device	*tmp;
 	unsigned long		flags;
 
-	ds_dbg(2, "unbind_request(%d)\n", s->sock);
-
+	ds_dbg(2, "pcmcia_card_remove(%d) %s\n", s->sock,
+	       leftover ? leftover->devname : "");
 
 	if (!leftover)
 		s->device_count = 0;
@@ -439,6 +406,7 @@
 		p_dev->_removed=1;
 		spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
 
+		ds_dbg(2, "unregistering device %s\n", p_dev->dev.bus_id);
 		device_unregister(&p_dev->dev);
 	}
 
@@ -455,6 +423,8 @@
 	p_dev = to_pcmcia_dev(dev);
 	p_drv = to_pcmcia_drv(dev->driver);
 
+	ds_dbg(1, "removing device %s\n", p_dev->dev.bus_id);
+
 	/* If we're removing the primary module driving a
 	 * pseudo multi-function card, we need to unbind
 	 * all devices
@@ -587,8 +557,10 @@
 
 	mutex_lock(&device_add_lock);
 
-	/* max of 2 devices per card */
-	if (s->device_count == 2)
+	ds_dbg(3, "adding device to %d, function %d\n", s->sock, function);
+
+	/* max of 4 devices per card */
+	if (s->device_count == 4)
 		goto err_put;
 
 	p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL);
@@ -598,8 +570,6 @@
 	p_dev->socket = s;
 	p_dev->device_no = (s->device_count++);
 	p_dev->func   = function;
-	if (s->functions <= function)
-		s->functions = function + 1;
 
 	p_dev->dev.bus = &pcmcia_bus_type;
 	p_dev->dev.parent = s->dev.dev;
@@ -610,8 +580,8 @@
 	if (!p_dev->devname)
 		goto err_free;
 	sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id);
+	ds_dbg(3, "devname is %s\n", p_dev->devname);
 
-	/* compat */
 	spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
 
 	/*
@@ -631,6 +601,7 @@
 	spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
 
 	if (!p_dev->function_config) {
+		ds_dbg(3, "creating config_t for %s\n", p_dev->dev.bus_id);
 		p_dev->function_config = kzalloc(sizeof(struct config_t),
 						 GFP_KERNEL);
 		if (!p_dev->function_config)
@@ -674,11 +645,16 @@
 	unsigned int no_funcs, i;
 	int ret = 0;
 
-	if (!(s->resource_setup_done))
+	if (!(s->resource_setup_done)) {
+		ds_dbg(3, "no resources available, delaying card_add\n");
 		return -EAGAIN; /* try again, but later... */
+	}
 
-	if (pcmcia_validate_mem(s))
+	if (pcmcia_validate_mem(s)) {
+		ds_dbg(3, "validating mem resources failed, "
+		       "delaying card_add\n");
 		return -EAGAIN; /* try again, but later... */
+	}
 
 	ret = pccard_validate_cis(s, BIND_FN_ALL, &cisinfo);
 	if (ret || !cisinfo.Chains) {
@@ -690,6 +666,7 @@
 		no_funcs = mfc.nfn;
 	else
 		no_funcs = 1;
+	s->functions = no_funcs;
 
 	for (i=0; i < no_funcs; i++)
 		pcmcia_device_add(s, i);
@@ -698,38 +675,50 @@
 }
 
 
-static void pcmcia_delayed_add_pseudo_device(void *data)
+static void pcmcia_delayed_add_device(struct work_struct *work)
 {
-	struct pcmcia_socket *s = data;
-	pcmcia_device_add(s, 0);
+	struct pcmcia_socket *s =
+		container_of(work, struct pcmcia_socket, device_add);
+	ds_dbg(1, "adding additional device to %d\n", s->sock);
+	pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
 	s->pcmcia_state.device_add_pending = 0;
+	s->pcmcia_state.mfc_pfc = 0;
 }
 
 static int pcmcia_requery(struct device *dev, void * _data)
 {
 	struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
-	if (!p_dev->dev.driver)
+	if (!p_dev->dev.driver) {
+		ds_dbg(1, "update device information for %s\n",
+		       p_dev->dev.bus_id);
 		pcmcia_device_query(p_dev);
+	}
 
 	return 0;
 }
 
-static void pcmcia_bus_rescan(struct pcmcia_socket *skt)
+static void pcmcia_bus_rescan(struct pcmcia_socket *skt, int new_cis)
 {
-	int no_devices=0;
+	int no_devices = 0;
 	int ret = 0;
 	unsigned long flags;
 
 	/* must be called with skt_mutex held */
+	ds_dbg(0, "re-scanning socket %d\n", skt->sock);
+
 	spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
 	if (list_empty(&skt->devices_list))
-		no_devices=1;
+		no_devices = 1;
 	spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
 
+	/* If this is because of a CIS override, start over */
+	if (new_cis && !no_devices)
+		pcmcia_card_remove(skt, NULL);
+
 	/* if no devices were added for this socket yet because of
 	 * missing resource information or other trouble, we need to
 	 * do this now. */
-	if (no_devices) {
+	if (no_devices || new_cis) {
 		ret = pcmcia_card_add(skt);
 		if (ret)
 			return;
@@ -747,6 +736,97 @@
 		printk(KERN_INFO "pcmcia: bus_rescan_devices failed\n");
 }
 
+#ifdef CONFIG_PCMCIA_LOAD_CIS
+
+/**
+ * pcmcia_load_firmware - load CIS from userspace if device-provided is broken
+ * @dev - the pcmcia device which needs a CIS override
+ * @filename - requested filename in /lib/firmware/
+ *
+ * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if
+ * the one provided by the card is broken. The firmware files reside in
+ * /lib/firmware/ in userspace.
+ */
+static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
+{
+	struct pcmcia_socket *s = dev->socket;
+	const struct firmware *fw;
+	char path[20];
+	int ret = -ENOMEM;
+	int no_funcs;
+	int old_funcs;
+	cisdump_t *cis;
+	cistpl_longlink_mfc_t mfc;
+
+	if (!filename)
+		return -EINVAL;
+
+	ds_dbg(1, "trying to load CIS file %s\n", filename);
+
+	if (strlen(filename) > 14) {
+		printk(KERN_WARNING "pcmcia: CIS filename is too long\n");
+		return -EINVAL;
+	}
+
+	snprintf(path, 20, "%s", filename);
+
+	if (request_firmware(&fw, path, &dev->dev) == 0) {
+		if (fw->size >= CISTPL_MAX_CIS_SIZE) {
+			ret = -EINVAL;
+			printk(KERN_ERR "pcmcia: CIS override is too big\n");
+			goto release;
+		}
+
+		cis = kzalloc(sizeof(cisdump_t), GFP_KERNEL);
+		if (!cis) {
+			ret = -ENOMEM;
+			goto release;
+		}
+
+		cis->Length = fw->size + 1;
+		memcpy(cis->Data, fw->data, fw->size);
+
+		if (!pcmcia_replace_cis(s, cis))
+			ret = 0;
+		else {
+			printk(KERN_ERR "pcmcia: CIS override failed\n");
+			goto release;
+		}
+
+
+		/* update information */
+		pcmcia_device_query(dev);
+
+		/* does this cis override add or remove functions? */
+		old_funcs = s->functions;
+
+		if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc))
+			no_funcs = mfc.nfn;
+		else
+			no_funcs = 1;
+		s->functions = no_funcs;
+
+		if (old_funcs > no_funcs)
+			pcmcia_card_remove(s, dev);
+		else if (no_funcs > old_funcs)
+			pcmcia_add_device_later(s, 1);
+	}
+ release:
+	release_firmware(fw);
+
+	return (ret);
+}
+
+#else /* !CONFIG_PCMCIA_LOAD_CIS */
+
+static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
+{
+	return -ENODEV;
+}
+
+#endif
+
+
 static inline int pcmcia_devmatch(struct pcmcia_device *dev,
 				  struct pcmcia_device_id *did)
 {
@@ -813,11 +893,14 @@
 		 * after it has re-checked that there is no possible module
 		 * with a prod_id/manf_id/card_id match.
 		 */
+		ds_dbg(0, "skipping FUNC_ID match for %s until userspace "
+		       "interaction\n", dev->dev.bus_id);
 		if (!dev->allow_func_id_match)
 			return 0;
 	}
 
 	if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
+		ds_dbg(0, "device %s needs a fake CIS\n", dev->dev.bus_id);
 		if (!dev->socket->fake_cis)
 			pcmcia_load_firmware(dev, did->cisfile);
 
@@ -847,13 +930,21 @@
 
 #ifdef CONFIG_PCMCIA_IOCTL
 	/* matching by cardmgr */
-	if (p_dev->cardmgr == p_drv)
+	if (p_dev->cardmgr == p_drv) {
+		ds_dbg(0, "cardmgr matched %s to %s\n", dev->bus_id,
+		       drv->name);
 		return 1;
+	}
 #endif
 
 	while (did && did->match_flags) {
-		if (pcmcia_devmatch(p_dev, did))
+		ds_dbg(3, "trying to match %s to %s\n", dev->bus_id,
+		       drv->name);
+		if (pcmcia_devmatch(p_dev, did)) {
+			ds_dbg(0, "matched %s to %s\n", dev->bus_id,
+			       drv->name);
 			return 1;
+		}
 		did++;
 	}
 
@@ -1044,6 +1135,8 @@
 	struct pcmcia_driver *p_drv = NULL;
 	int ret = 0;
 
+	ds_dbg(2, "suspending %s\n", dev->bus_id);
+
 	if (dev->driver)
 		p_drv = to_pcmcia_drv(dev->driver);
 
@@ -1052,12 +1145,18 @@
 
 	if (p_drv->suspend) {
 		ret = p_drv->suspend(p_dev);
-		if (ret)
+		if (ret) {
+			printk(KERN_ERR "pcmcia: device %s (driver %s) did "
+			       "not want to go to sleep (%d)\n",
+			       p_dev->devname, p_drv->drv.name, ret);
 			goto out;
+		}
 	}
 
-	if (p_dev->device_no == p_dev->func)
+	if (p_dev->device_no == p_dev->func) {
+		ds_dbg(2, "releasing configuration for %s\n", dev->bus_id);
 		pcmcia_release_configuration(p_dev);
+	}
 
  out:
 	if (!ret)
@@ -1072,6 +1171,8 @@
         struct pcmcia_driver *p_drv = NULL;
 	int ret = 0;
 
+	ds_dbg(2, "resuming %s\n", dev->bus_id);
+
 	if (dev->driver)
 		p_drv = to_pcmcia_drv(dev->driver);
 
@@ -1079,6 +1180,7 @@
 		goto out;
 
 	if (p_dev->device_no == p_dev->func) {
+		ds_dbg(2, "requesting configuration for %s\n", dev->bus_id);
 		ret = pcmcia_request_configuration(p_dev, &p_dev->conf);
 		if (ret)
 			goto out;
@@ -1120,12 +1222,14 @@
 
 static int pcmcia_bus_resume(struct pcmcia_socket *skt)
 {
+	ds_dbg(2, "resuming socket %d\n", skt->sock);
 	bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback);
 	return 0;
 }
 
 static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
 {
+	ds_dbg(2, "suspending socket %d\n", skt->sock);
 	if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt,
 			     pcmcia_bus_suspend_callback)) {
 		pcmcia_bus_resume(skt);
@@ -1246,7 +1350,7 @@
 	init_waitqueue_head(&socket->queue);
 #endif
 	INIT_LIST_HEAD(&socket->devices_list);
-	INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket);
+	INIT_WORK(&socket->device_add, pcmcia_delayed_add_device);
 	memset(&socket->pcmcia_state, 0, sizeof(u8));
 	socket->device_count = 0;
 
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 36fdaa5..3c22ac4 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -398,7 +398,7 @@
 static void pcc_interrupt_wrapper(u_long data)
 {
 	debug(3, "m32r_cfc: pcc_interrupt_wrapper:\n");
-	pcc_interrupt(0, NULL, NULL);
+	pcc_interrupt(0, NULL);
 	init_timer(&poll_timer);
 	poll_timer.expires = jiffies + poll_interval;
 	add_timer(&poll_timer);
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 310ede5..d077870 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -594,7 +594,12 @@
 
     err = ret = 0;
 
-    if (cmd & IOC_IN) __copy_from_user((char *)buf, uarg, size);
+    if (cmd & IOC_IN) {
+	if (__copy_from_user((char *)buf, uarg, size)) {
+	    err = -EFAULT;
+	    goto free_out;
+	}
+    }
 
     switch (cmd) {
     case DS_ADJUST_RESOURCE_INFO:
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index a70f97f..360c248 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -581,10 +581,10 @@
 	return IRQ_HANDLED;
 }
 
-static int pd6729_check_irq(int irq, int flags)
+static int pd6729_check_irq(int irq)
 {
-	if (request_irq(irq, pd6729_test, flags, "x", pd6729_test) != 0)
-		return -1;
+	if (request_irq(irq, pd6729_test, IRQF_PROBE_SHARED, "x", pd6729_test)
+		!= 0) return -1;
 	free_irq(irq, pd6729_test);
 	return 0;
 }
@@ -610,7 +610,7 @@
 
 	/* just find interrupts that aren't in use */
 	for (i = 0; i < 16; i++)
-		if ((mask0 & (1 << i)) && (pd6729_check_irq(i, 0) == 0))
+		if ((mask0 & (1 << i)) && (pd6729_check_irq(i) == 0))
 			mask |= (1 << i);
 
 	printk(KERN_INFO "pd6729: ISA irqs = ");
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 933cd86..b005602 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -188,7 +188,7 @@
 	    (s->state & SOCKET_PRESENT) &&
 	    !(s->state & SOCKET_CARDBUS)) {
 		if (try_module_get(s->callback->owner)) {
-			s->callback->requery(s);
+			s->callback->requery(s, 0);
 			module_put(s->callback->owner);
 		}
 	}
@@ -325,7 +325,7 @@
 	if ((s->callback) && (s->state & SOCKET_PRESENT) &&
 	    !(s->state & SOCKET_CARDBUS)) {
 		if (try_module_get(s->callback->owner)) {
-			s->callback->requery(s);
+			s->callback->requery(s, 1);
 			module_put(s->callback->owner);
 		}
 	}
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 227600c..91c047a 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -164,9 +164,17 @@
 
 static int pnp_interface_attach_card(struct pnp_card *card)
 {
-	device_create_file(&card->dev,&dev_attr_name);
-	device_create_file(&card->dev,&dev_attr_card_id);
+	int rc = device_create_file(&card->dev,&dev_attr_name);
+	if (rc) return rc;
+
+	rc = device_create_file(&card->dev,&dev_attr_card_id);
+	if (rc) goto err_name;
+
 	return 0;
+
+err_name:
+	device_remove_file(&card->dev,&dev_attr_name);
+	return rc;
 }
 
 /**
@@ -306,16 +314,20 @@
 	down_write(&dev->dev.bus->subsys.rwsem);
 	dev->card_link = clink;
 	dev->dev.driver = &drv->link.driver;
-	if (pnp_bus_type.probe(&dev->dev)) {
-		dev->dev.driver = NULL;
-		dev->card_link = NULL;
-		up_write(&dev->dev.bus->subsys.rwsem);
-		return NULL;
-	}
-	device_bind_driver(&dev->dev);
+	if (pnp_bus_type.probe(&dev->dev))
+		goto err_out;
+	if (device_bind_driver(&dev->dev))
+		goto err_out;
+
 	up_write(&dev->dev.bus->subsys.rwsem);
 
 	return dev;
+
+err_out:
+	dev->dev.driver = NULL;
+	dev->card_link = NULL;
+	up_write(&dev->dev.bus->subsys.rwsem);
+	return NULL;
 }
 
 /**
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 9d8b415..ac9fcd4 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -461,8 +461,19 @@
 
 int pnp_interface_attach_device(struct pnp_dev *dev)
 {
-	device_create_file(&dev->dev,&dev_attr_options);
-	device_create_file(&dev->dev,&dev_attr_resources);
-	device_create_file(&dev->dev,&dev_attr_id);
+	int rc = device_create_file(&dev->dev,&dev_attr_options);
+	if (rc) goto err;
+	rc = device_create_file(&dev->dev,&dev_attr_resources);
+	if (rc) goto err_opt;
+	rc = device_create_file(&dev->dev,&dev_attr_id);
+	if (rc) goto err_res;
+
 	return 0;
+
+err_res:
+	device_remove_file(&dev->dev,&dev_attr_resources);
+err_opt:
+	device_remove_file(&dev->dev,&dev_attr_options);
+err:
+	return rc;
 }
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 81a6c83..33adeba 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -61,6 +61,7 @@
 #include <linux/dmi.h>
 #include <linux/delay.h>
 #include <linux/acpi.h>
+#include <linux/freezer.h>
 
 #include <asm/page.h>
 #include <asm/desc.h>
@@ -530,7 +531,8 @@
 	if (check_legacy_ioport(PNPBIOS_BASE))
 		return -ENODEV;
 #endif
-	if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table)) {
+	if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
+	    paravirt_enabled()) {
 		printk(KERN_INFO "PnPBIOS: Disabled\n");
 		return -ENODEV;
 	}
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
new file mode 100644
index 0000000..b52d547
--- /dev/null
+++ b/drivers/ps3/Makefile
@@ -0,0 +1 @@
+obj-y += system-bus.o
diff --git a/drivers/ps3/system-bus.c b/drivers/ps3/system-bus.c
new file mode 100644
index 0000000..d79f949
--- /dev/null
+++ b/drivers/ps3/system-bus.c
@@ -0,0 +1,362 @@
+/*
+ *  PS3 system bus driver.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+#include <asm/firmware.h>
+
+#define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
+static void _dump_mmio_region(const struct ps3_mmio_region* r,
+	const char* func, int line)
+{
+	pr_debug("%s:%d: dev       %u:%u\n", func, line, r->did.bus_id,
+		r->did.dev_id);
+	pr_debug("%s:%d: bus_addr  %lxh\n", func, line, r->bus_addr);
+	pr_debug("%s:%d: len       %lxh\n", func, line, r->len);
+	pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
+}
+
+int ps3_mmio_region_create(struct ps3_mmio_region *r)
+{
+	int result;
+
+	result = lv1_map_device_mmio_region(r->did.bus_id, r->did.dev_id,
+		r->bus_addr, r->len, r->page_size, &r->lpar_addr);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+		r->lpar_addr = r->len = r->bus_addr = 0;
+	}
+
+	dump_mmio_region(r);
+	return result;
+}
+
+int ps3_free_mmio_region(struct ps3_mmio_region *r)
+{
+	int result;
+
+	result = lv1_unmap_device_mmio_region(r->did.bus_id, r->did.dev_id,
+		r->bus_addr);
+
+	if (result)
+		pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n",
+			__func__, __LINE__, ps3_result(result));
+
+	r->lpar_addr = r->len = r->bus_addr = 0;
+	return result;
+}
+
+static int ps3_system_bus_match(struct device *_dev,
+	struct device_driver *_drv)
+{
+	int result;
+	struct ps3_system_bus_driver *drv = to_ps3_system_bus_driver(_drv);
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+
+	result = dev->match_id == drv->match_id;
+
+	pr_info("%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__, __LINE__,
+		dev->match_id, dev->core.bus_id, drv->match_id, drv->core.name,
+		(result ? "match" : "miss"));
+	return result;
+}
+
+static int ps3_system_bus_probe(struct device *_dev)
+{
+	int result;
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_driver *drv =
+		to_ps3_system_bus_driver(_dev->driver);
+
+	result = lv1_open_device(dev->did.bus_id, dev->did.dev_id, 0);
+
+	if (result) {
+		pr_debug("%s:%d: lv1_open_device failed (%d)\n",
+			__func__, __LINE__, result);
+		result = -EACCES;
+		goto clean_none;
+	}
+
+	if (dev->d_region->did.bus_id) {
+		result = ps3_dma_region_create(dev->d_region);
+
+		if (result) {
+			pr_debug("%s:%d: ps3_dma_region_create failed (%d)\n",
+				__func__, __LINE__, result);
+			BUG_ON("check region type");
+			result = -EINVAL;
+			goto clean_device;
+		}
+	}
+
+	BUG_ON(!drv);
+
+	if (drv->probe)
+		result = drv->probe(dev);
+	else
+		pr_info("%s:%d: %s no probe method\n", __func__, __LINE__,
+			dev->core.bus_id);
+
+	if (result) {
+		pr_debug("%s:%d: drv->probe failed\n", __func__, __LINE__);
+		goto clean_dma;
+	}
+
+	return result;
+
+clean_dma:
+	ps3_dma_region_free(dev->d_region);
+clean_device:
+	lv1_close_device(dev->did.bus_id, dev->did.dev_id);
+clean_none:
+	return result;
+}
+
+static int ps3_system_bus_remove(struct device *_dev)
+{
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	struct ps3_system_bus_driver *drv =
+		to_ps3_system_bus_driver(_dev->driver);
+
+	if (drv->remove)
+		drv->remove(dev);
+	else
+		pr_info("%s:%d: %s no remove method\n", __func__, __LINE__,
+			dev->core.bus_id);
+
+	ps3_dma_region_free(dev->d_region);
+	ps3_free_mmio_region(dev->m_region);
+	lv1_close_device(dev->did.bus_id, dev->did.dev_id);
+
+	return 0;
+}
+
+struct bus_type ps3_system_bus_type = {
+        .name = "ps3_system_bus",
+	.match = ps3_system_bus_match,
+	.probe = ps3_system_bus_probe,
+	.remove = ps3_system_bus_remove,
+};
+
+int __init ps3_system_bus_init(void)
+{
+	int result;
+
+	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+		return 0;
+
+	result = bus_register(&ps3_system_bus_type);
+	BUG_ON(result);
+	return result;
+}
+
+core_initcall(ps3_system_bus_init);
+
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+
+static void * ps3_alloc_coherent(struct device *_dev, size_t size,
+	dma_addr_t *dma_handle, gfp_t flag)
+{
+	int result;
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	unsigned long virt_addr;
+
+	BUG_ON(!dev->d_region->bus_addr);
+
+	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
+	flag |= __GFP_ZERO;
+
+	virt_addr = __get_free_pages(flag, get_order(size));
+
+	if (!virt_addr) {
+		pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__);
+		goto clean_none;
+	}
+
+	result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle);
+
+	if (result) {
+		pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
+			__func__, __LINE__, result);
+		BUG_ON("check region type");
+		goto clean_alloc;
+	}
+
+	return (void*)virt_addr;
+
+clean_alloc:
+	free_pages(virt_addr, get_order(size));
+clean_none:
+	dma_handle = NULL;
+	return NULL;
+}
+
+static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
+	dma_addr_t dma_handle)
+{
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+
+	ps3_dma_unmap(dev->d_region, dma_handle, size);
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+
+/* Creates TCEs for a user provided buffer.  The user buffer must be
+ * contiguous real kernel storage (not vmalloc).  The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer.  The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+
+static dma_addr_t ps3_map_single(struct device *_dev, void *ptr, size_t size,
+	enum dma_data_direction direction)
+{
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	int result;
+	unsigned long bus_addr;
+
+	result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
+		&bus_addr);
+
+	if (result) {
+		pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
+			__func__, __LINE__, result);
+	}
+
+	return bus_addr;
+}
+
+static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
+	size_t size, enum dma_data_direction direction)
+{
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	int result;
+
+	result = ps3_dma_unmap(dev->d_region, dma_addr, size);
+
+	if (result) {
+		pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n",
+			__func__, __LINE__, result);
+	}
+}
+
+static int ps3_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
+	enum dma_data_direction direction)
+{
+#if defined(CONFIG_PS3_DYNAMIC_DMA)
+	BUG_ON("do");
+#endif
+	return 0;
+}
+
+static void ps3_unmap_sg(struct device *_dev, struct scatterlist *sg,
+	int nents, enum dma_data_direction direction)
+{
+#if defined(CONFIG_PS3_DYNAMIC_DMA)
+	BUG_ON("do");
+#endif
+}
+
+static int ps3_dma_supported(struct device *_dev, u64 mask)
+{
+	return 1;
+}
+
+static struct dma_mapping_ops ps3_dma_ops = {
+	.alloc_coherent = ps3_alloc_coherent,
+	.free_coherent = ps3_free_coherent,
+	.map_single = ps3_map_single,
+	.unmap_single = ps3_unmap_single,
+	.map_sg = ps3_map_sg,
+	.unmap_sg = ps3_unmap_sg,
+	.dma_supported = ps3_dma_supported
+};
+
+/**
+ * ps3_system_bus_release_device - remove a device from the system bus
+ */
+
+static void ps3_system_bus_release_device(struct device *_dev)
+{
+	struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+	kfree(dev);
+}
+
+/**
+ * ps3_system_bus_device_register - add a device to the system bus
+ *
+ * ps3_system_bus_device_register() expects the dev object to be allocated
+ * dynamically by the caller.  The system bus takes ownership of the dev
+ * object and frees the object in ps3_system_bus_release_device().
+ */
+
+int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
+{
+	int result;
+	static unsigned int dev_count = 1;
+
+	dev->core.parent = NULL;
+	dev->core.bus = &ps3_system_bus_type;
+	dev->core.release = ps3_system_bus_release_device;
+
+	dev->core.archdata.of_node = NULL;
+	dev->core.archdata.dma_ops = &ps3_dma_ops;
+	dev->core.archdata.numa_node = 0;
+
+	snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "sb_%02x",
+		dev_count++);
+
+	pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
+
+	result = device_register(&dev->core);
+	return result;
+}
+
+EXPORT_SYMBOL_GPL(ps3_system_bus_device_register);
+
+int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv)
+{
+	int result;
+
+	drv->core.bus = &ps3_system_bus_type;
+
+	result = driver_register(&drv->core);
+	return result;
+}
+
+EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register);
+
+void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
+{
+	driver_unregister(&drv->core);
+}
+
+EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index fc766a7..2a63ab2 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -154,15 +154,23 @@
 	  will be called rtc-ds1672.
 
 config RTC_DRV_DS1742
-	tristate "Dallas DS1742"
+	tristate "Dallas DS1742/1743"
 	depends on RTC_CLASS
 	help
 	  If you say yes here you get support for the
-	  Dallas DS1742 timekeeping chip.
+	  Dallas DS1742/1743 timekeeping chip.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-ds1742.
 
+config RTC_DRV_OMAP
+	tristate "TI OMAP1"
+	depends on RTC_CLASS && ( \
+		ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 )
+	help
+	  Say "yes" here to support the real time clock on TI OMAP1 chips.
+	  This driver can also be built as a module called rtc-omap.
+
 config RTC_DRV_PCF8563
 	tristate "Philips PCF8563/Epson RTC8564"
 	depends on RTC_CLASS && I2C
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 3ba5ff6..bd4c45d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_RTC_DRV_DS1307)	+= rtc-ds1307.o
 obj-$(CONFIG_RTC_DRV_DS1672)	+= rtc-ds1672.o
 obj-$(CONFIG_RTC_DRV_DS1742)	+= rtc-ds1742.o
+obj-$(CONFIG_RTC_DRV_OMAP)	+= rtc-omap.o
 obj-$(CONFIG_RTC_DRV_PCF8563)	+= rtc-pcf8563.o
 obj-$(CONFIG_RTC_DRV_PCF8583)	+= rtc-pcf8583.o
 obj-$(CONFIG_RTC_DRV_RS5C372)	+= rtc-rs5c372.o
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 814b9e1..828b329 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -53,9 +53,10 @@
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void rtc_uie_task(void *data)
+static void rtc_uie_task(struct work_struct *work)
 {
-	struct rtc_device *rtc = data;
+	struct rtc_device *rtc =
+		container_of(work, struct rtc_device, uie_task);
 	struct rtc_time tm;
 	int num = 0;
 	int err;
@@ -411,7 +412,7 @@
 	spin_lock_init(&rtc->irq_lock);
 	init_waitqueue_head(&rtc->irq_queue);
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
+	INIT_WORK(&rtc->uie_task, rtc_uie_task);
 	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
 #endif
 
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 67e816a..dfef163 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -237,17 +237,22 @@
 	/* read control register */
 	err = ds1672_get_control(client, &control);
 	if (err)
-		goto exit_detach;
+		goto exit_devreg;
 
 	if (control & DS1672_REG_CONTROL_EOSC)
 		dev_warn(&client->dev, "Oscillator not enabled. "
 					"Set time to enable.\n");
 
 	/* Register sysfs hooks */
-	device_create_file(&client->dev, &dev_attr_control);
+	err = device_create_file(&client->dev, &dev_attr_control);
+	if (err)
+		goto exit_devreg;
 
 	return 0;
 
+exit_devreg:
+	rtc_device_unregister(rtc);
+
 exit_detach:
 	i2c_detach_client(client);
 
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 6273a3d..17633bf 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -6,6 +6,10 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2006 Torsten Ertbjerg Rasmussen <tr@newtec.dk>
+ *  - nvram size determined from resource
+ *  - this ds1742 driver now supports ds1743.
  */
 
 #include <linux/bcd.h>
@@ -17,20 +21,19 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
 
-#define RTC_REG_SIZE		0x800
-#define RTC_OFFSET		0x7f8
+#define RTC_SIZE		8
 
-#define RTC_CONTROL		(RTC_OFFSET + 0)
-#define RTC_CENTURY		(RTC_OFFSET + 0)
-#define RTC_SECONDS		(RTC_OFFSET + 1)
-#define RTC_MINUTES		(RTC_OFFSET + 2)
-#define RTC_HOURS		(RTC_OFFSET + 3)
-#define RTC_DAY			(RTC_OFFSET + 4)
-#define RTC_DATE		(RTC_OFFSET + 5)
-#define RTC_MONTH		(RTC_OFFSET + 6)
-#define RTC_YEAR		(RTC_OFFSET + 7)
+#define RTC_CONTROL		0
+#define RTC_CENTURY		0
+#define RTC_SECONDS		1
+#define RTC_MINUTES		2
+#define RTC_HOURS		3
+#define RTC_DAY			4
+#define RTC_DATE		5
+#define RTC_MONTH		6
+#define RTC_YEAR		7
 
 #define RTC_CENTURY_MASK	0x3f
 #define RTC_SECONDS_MASK	0x7f
@@ -48,7 +51,10 @@
 
 struct rtc_plat_data {
 	struct rtc_device *rtc;
-	void __iomem *ioaddr;
+	void __iomem *ioaddr_nvram;
+	void __iomem *ioaddr_rtc;
+	size_t size_nvram;
+	size_t size;
 	unsigned long baseaddr;
 	unsigned long last_jiffies;
 };
@@ -57,7 +63,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_rtc;
 	u8 century;
 
 	century = BIN2BCD((tm->tm_year + 1900) / 100);
@@ -82,7 +88,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_rtc;
 	unsigned int year, month, day, hour, minute, second, week;
 	unsigned int century;
 
@@ -127,10 +133,10 @@
 	struct platform_device *pdev =
 		to_platform_device(container_of(kobj, struct device, kobj));
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_nvram;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
 		*buf++ = readb(ioaddr + pos++);
 	return count;
 }
@@ -141,10 +147,10 @@
 	struct platform_device *pdev =
 		to_platform_device(container_of(kobj, struct device, kobj));
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-	void __iomem *ioaddr = pdata->ioaddr;
+	void __iomem *ioaddr = pdata->ioaddr_nvram;
 	ssize_t count;
 
-	for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+	for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
 		writeb(*buf++, ioaddr + pos++);
 	return count;
 }
@@ -155,7 +161,6 @@
 		.mode = S_IRUGO | S_IWUGO,
 		.owner = THIS_MODULE,
 	},
-	.size = RTC_OFFSET,
 	.read = ds1742_nvram_read,
 	.write = ds1742_nvram_write,
 };
@@ -175,19 +180,23 @@
 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
-	if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
+	pdata->size = res->end - res->start + 1;
+	if (!request_mem_region(res->start, pdata->size, pdev->name)) {
 		ret = -EBUSY;
 		goto out;
 	}
 	pdata->baseaddr = res->start;
-	ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
+	ioaddr = ioremap(pdata->baseaddr, pdata->size);
 	if (!ioaddr) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	pdata->ioaddr = ioaddr;
+	pdata->ioaddr_nvram = ioaddr;
+	pdata->size_nvram = pdata->size - RTC_SIZE;
+	pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
 
 	/* turn RTC on if it was not on */
+	ioaddr = pdata->ioaddr_rtc;
 	sec = readb(ioaddr + RTC_SECONDS);
 	if (sec & RTC_STOP) {
 		sec &= RTC_SECONDS_MASK;
@@ -208,6 +217,8 @@
 	pdata->rtc = rtc;
 	pdata->last_jiffies = jiffies;
 	platform_set_drvdata(pdev, pdata);
+	ds1742_nvram_attr.size = max(ds1742_nvram_attr.size,
+				     pdata->size_nvram);
 	ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
 	if (ret)
 		goto out;
@@ -215,10 +226,10 @@
  out:
 	if (pdata->rtc)
 		rtc_device_unregister(pdata->rtc);
-	if (ioaddr)
-		iounmap(ioaddr);
+	if (pdata->ioaddr_nvram)
+		iounmap(pdata->ioaddr_nvram);
 	if (pdata->baseaddr)
-		release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+		release_mem_region(pdata->baseaddr, pdata->size);
 	kfree(pdata);
 	return ret;
 }
@@ -229,8 +240,8 @@
 
 	sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
 	rtc_device_unregister(pdata->rtc);
-	iounmap(pdata->ioaddr);
-	release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+	iounmap(pdata->ioaddr_nvram);
+	release_mem_region(pdata->baseaddr, pdata->size);
 	kfree(pdata);
 	return 0;
 }
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
new file mode 100644
index 0000000..eac5fb1
--- /dev/null
+++ b/drivers/rtc/rtc-omap.c
@@ -0,0 +1,572 @@
+/*
+ * TI OMAP1 Real Time Clock interface for Linux
+ *
+ * Copyright (C) 2003 MontaVista Software, Inc.
+ * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
+ *
+ * Copyright (C) 2006 David Brownell (new RTC framework)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/mach/time.h>
+
+
+/* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock
+ * with century-range alarm matching, driven by the 32kHz clock.
+ *
+ * The main user-visible ways it differs from PC RTCs are by omitting
+ * "don't care" alarm fields and sub-second periodic IRQs, and having
+ * an autoadjust mechanism to calibrate to the true oscillator rate.
+ *
+ * Board-specific wiring options include using split power mode with
+ * RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
+ * and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
+ * low power modes).  See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ */
+
+#define OMAP_RTC_BASE			0xfffb4800
+
+/* RTC registers */
+#define OMAP_RTC_SECONDS_REG		0x00
+#define OMAP_RTC_MINUTES_REG		0x04
+#define OMAP_RTC_HOURS_REG		0x08
+#define OMAP_RTC_DAYS_REG		0x0C
+#define OMAP_RTC_MONTHS_REG		0x10
+#define OMAP_RTC_YEARS_REG		0x14
+#define OMAP_RTC_WEEKS_REG		0x18
+
+#define OMAP_RTC_ALARM_SECONDS_REG	0x20
+#define OMAP_RTC_ALARM_MINUTES_REG	0x24
+#define OMAP_RTC_ALARM_HOURS_REG	0x28
+#define OMAP_RTC_ALARM_DAYS_REG		0x2c
+#define OMAP_RTC_ALARM_MONTHS_REG	0x30
+#define OMAP_RTC_ALARM_YEARS_REG	0x34
+
+#define OMAP_RTC_CTRL_REG		0x40
+#define OMAP_RTC_STATUS_REG		0x44
+#define OMAP_RTC_INTERRUPTS_REG		0x48
+
+#define OMAP_RTC_COMP_LSB_REG		0x4c
+#define OMAP_RTC_COMP_MSB_REG		0x50
+#define OMAP_RTC_OSC_REG		0x54
+
+/* OMAP_RTC_CTRL_REG bit fields: */
+#define OMAP_RTC_CTRL_SPLIT		(1<<7)
+#define OMAP_RTC_CTRL_DISABLE		(1<<6)
+#define OMAP_RTC_CTRL_SET_32_COUNTER	(1<<5)
+#define OMAP_RTC_CTRL_TEST		(1<<4)
+#define OMAP_RTC_CTRL_MODE_12_24	(1<<3)
+#define OMAP_RTC_CTRL_AUTO_COMP		(1<<2)
+#define OMAP_RTC_CTRL_ROUND_30S		(1<<1)
+#define OMAP_RTC_CTRL_STOP		(1<<0)
+
+/* OMAP_RTC_STATUS_REG bit fields: */
+#define OMAP_RTC_STATUS_POWER_UP        (1<<7)
+#define OMAP_RTC_STATUS_ALARM           (1<<6)
+#define OMAP_RTC_STATUS_1D_EVENT        (1<<5)
+#define OMAP_RTC_STATUS_1H_EVENT        (1<<4)
+#define OMAP_RTC_STATUS_1M_EVENT        (1<<3)
+#define OMAP_RTC_STATUS_1S_EVENT        (1<<2)
+#define OMAP_RTC_STATUS_RUN             (1<<1)
+#define OMAP_RTC_STATUS_BUSY            (1<<0)
+
+/* OMAP_RTC_INTERRUPTS_REG bit fields: */
+#define OMAP_RTC_INTERRUPTS_IT_ALARM    (1<<3)
+#define OMAP_RTC_INTERRUPTS_IT_TIMER    (1<<2)
+
+
+#define rtc_read(addr)		omap_readb(OMAP_RTC_BASE + (addr))
+#define rtc_write(val, addr)	omap_writeb(val, OMAP_RTC_BASE + (addr))
+
+
+/* platform_bus isn't hotpluggable, so for static linkage it'd be safe
+ * to get rid of probe() and remove() code ... too bad the driver struct
+ * remembers probe(), that's about 25% of the runtime footprint!!
+ */
+#ifndef	MODULE
+#undef	__devexit
+#undef	__devexit_p
+#define	__devexit	__exit
+#define	__devexit_p	__exit_p
+#endif
+
+
+/* we rely on the rtc framework to handle locking (rtc->ops_lock),
+ * so the only other requirement is that register accesses which
+ * require BUSY to be clear are made with IRQs locally disabled
+ */
+static void rtc_wait_not_busy(void)
+{
+	int	count = 0;
+	u8	status;
+
+	/* BUSY may stay active for 1/32768 second (~30 usec) */
+	for (count = 0; count < 50; count++) {
+		status = rtc_read(OMAP_RTC_STATUS_REG);
+		if ((status & (u8)OMAP_RTC_STATUS_BUSY) == 0)
+			break;
+		udelay(1);
+	}
+	/* now we have ~15 usec to read/write various registers */
+}
+
+static irqreturn_t rtc_irq(int irq, void *class_dev)
+{
+	unsigned long		events = 0;
+	u8			irq_data;
+
+	irq_data = rtc_read(OMAP_RTC_STATUS_REG);
+
+	/* alarm irq? */
+	if (irq_data & OMAP_RTC_STATUS_ALARM) {
+		rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG);
+		events |= RTC_IRQF | RTC_AF;
+	}
+
+	/* 1/sec periodic/update irq? */
+	if (irq_data & OMAP_RTC_STATUS_1S_EVENT)
+		events |= RTC_IRQF | RTC_UF;
+
+	rtc_update_irq(class_dev, 1, events);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef	CONFIG_RTC_INTF_DEV
+
+static int
+omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+	u8 reg;
+
+	switch (cmd) {
+	case RTC_AIE_OFF:
+	case RTC_AIE_ON:
+	case RTC_UIE_OFF:
+	case RTC_UIE_ON:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	local_irq_disable();
+	rtc_wait_not_busy();
+	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+	switch (cmd) {
+	/* AIE = Alarm Interrupt Enable */
+	case RTC_AIE_OFF:
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+		break;
+	case RTC_AIE_ON:
+		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+		break;
+	/* UIE = Update Interrupt Enable (1/second) */
+	case RTC_UIE_OFF:
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
+		break;
+	case RTC_UIE_ON:
+		reg |= OMAP_RTC_INTERRUPTS_IT_TIMER;
+		break;
+	}
+	rtc_wait_not_busy();
+	rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+	local_irq_enable();
+
+	return 0;
+}
+
+#else
+#define	omap_rtc_ioctl	NULL
+#endif
+
+/* this hardware doesn't support "don't care" alarm fields */
+static int tm2bcd(struct rtc_time *tm)
+{
+	if (rtc_valid_tm(tm) != 0)
+		return -EINVAL;
+
+	tm->tm_sec = BIN2BCD(tm->tm_sec);
+	tm->tm_min = BIN2BCD(tm->tm_min);
+	tm->tm_hour = BIN2BCD(tm->tm_hour);
+	tm->tm_mday = BIN2BCD(tm->tm_mday);
+
+	tm->tm_mon = BIN2BCD(tm->tm_mon + 1);
+
+	/* epoch == 1900 */
+	if (tm->tm_year < 100 || tm->tm_year > 199)
+		return -EINVAL;
+	tm->tm_year = BIN2BCD(tm->tm_year - 100);
+
+	return 0;
+}
+
+static void bcd2tm(struct rtc_time *tm)
+{
+	tm->tm_sec = BCD2BIN(tm->tm_sec);
+	tm->tm_min = BCD2BIN(tm->tm_min);
+	tm->tm_hour = BCD2BIN(tm->tm_hour);
+	tm->tm_mday = BCD2BIN(tm->tm_mday);
+	tm->tm_mon = BCD2BIN(tm->tm_mon) - 1;
+	/* epoch == 1900 */
+	tm->tm_year = BCD2BIN(tm->tm_year) + 100;
+}
+
+
+static int omap_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	/* we don't report wday/yday/isdst ... */
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	tm->tm_sec = rtc_read(OMAP_RTC_SECONDS_REG);
+	tm->tm_min = rtc_read(OMAP_RTC_MINUTES_REG);
+	tm->tm_hour = rtc_read(OMAP_RTC_HOURS_REG);
+	tm->tm_mday = rtc_read(OMAP_RTC_DAYS_REG);
+	tm->tm_mon = rtc_read(OMAP_RTC_MONTHS_REG);
+	tm->tm_year = rtc_read(OMAP_RTC_YEARS_REG);
+
+	local_irq_enable();
+
+	bcd2tm(tm);
+	return 0;
+}
+
+static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	if (tm2bcd(tm) < 0)
+		return -EINVAL;
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	rtc_write(tm->tm_year, OMAP_RTC_YEARS_REG);
+	rtc_write(tm->tm_mon, OMAP_RTC_MONTHS_REG);
+	rtc_write(tm->tm_mday, OMAP_RTC_DAYS_REG);
+	rtc_write(tm->tm_hour, OMAP_RTC_HOURS_REG);
+	rtc_write(tm->tm_min, OMAP_RTC_MINUTES_REG);
+	rtc_write(tm->tm_sec, OMAP_RTC_SECONDS_REG);
+
+	local_irq_enable();
+
+	return 0;
+}
+
+static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	alm->time.tm_sec = rtc_read(OMAP_RTC_ALARM_SECONDS_REG);
+	alm->time.tm_min = rtc_read(OMAP_RTC_ALARM_MINUTES_REG);
+	alm->time.tm_hour = rtc_read(OMAP_RTC_ALARM_HOURS_REG);
+	alm->time.tm_mday = rtc_read(OMAP_RTC_ALARM_DAYS_REG);
+	alm->time.tm_mon = rtc_read(OMAP_RTC_ALARM_MONTHS_REG);
+	alm->time.tm_year = rtc_read(OMAP_RTC_ALARM_YEARS_REG);
+
+	local_irq_enable();
+
+	bcd2tm(&alm->time);
+	alm->pending = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG)
+			& OMAP_RTC_INTERRUPTS_IT_ALARM);
+	alm->enabled = alm->pending && device_may_wakeup(dev);
+
+	return 0;
+}
+
+static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+	u8 reg;
+
+	/* Much userspace code uses RTC_ALM_SET, thus "don't care" for
+	 * day/month/year specifies alarms up to 24 hours in the future.
+	 * So we need to handle that ... but let's ignore the "don't care"
+	 * values for hours/minutes/seconds.
+	 */
+	if (alm->time.tm_mday <= 0
+			&& alm->time.tm_mon < 0
+			&& alm->time.tm_year < 0) {
+		struct rtc_time tm;
+		unsigned long now, then;
+
+		omap_rtc_read_time(dev, &tm);
+		rtc_tm_to_time(&tm, &now);
+
+		alm->time.tm_mday = tm.tm_mday;
+		alm->time.tm_mon = tm.tm_mon;
+		alm->time.tm_year = tm.tm_year;
+		rtc_tm_to_time(&alm->time, &then);
+
+		/* sometimes the alarm wraps into tomorrow */
+		if (then < now) {
+			rtc_time_to_tm(now + 24 * 60 * 60, &tm);
+			alm->time.tm_mday = tm.tm_mday;
+			alm->time.tm_mon = tm.tm_mon;
+			alm->time.tm_year = tm.tm_year;
+		}
+	}
+
+	if (tm2bcd(&alm->time) < 0)
+		return -EINVAL;
+
+	local_irq_disable();
+	rtc_wait_not_busy();
+
+	rtc_write(alm->time.tm_year, OMAP_RTC_ALARM_YEARS_REG);
+	rtc_write(alm->time.tm_mon, OMAP_RTC_ALARM_MONTHS_REG);
+	rtc_write(alm->time.tm_mday, OMAP_RTC_ALARM_DAYS_REG);
+	rtc_write(alm->time.tm_hour, OMAP_RTC_ALARM_HOURS_REG);
+	rtc_write(alm->time.tm_min, OMAP_RTC_ALARM_MINUTES_REG);
+	rtc_write(alm->time.tm_sec, OMAP_RTC_ALARM_SECONDS_REG);
+
+	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+	if (alm->enabled)
+		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+	else
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+	rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+
+	local_irq_enable();
+
+	return 0;
+}
+
+static struct rtc_class_ops omap_rtc_ops = {
+	.ioctl		= omap_rtc_ioctl,
+	.read_time	= omap_rtc_read_time,
+	.set_time	= omap_rtc_set_time,
+	.read_alarm	= omap_rtc_read_alarm,
+	.set_alarm	= omap_rtc_set_alarm,
+};
+
+static int omap_rtc_alarm;
+static int omap_rtc_timer;
+
+static int __devinit omap_rtc_probe(struct platform_device *pdev)
+{
+	struct resource		*res, *mem;
+	struct rtc_device	*rtc;
+	u8			reg, new_ctrl;
+
+	omap_rtc_timer = platform_get_irq(pdev, 0);
+	if (omap_rtc_timer <= 0) {
+		pr_debug("%s: no update irq?\n", pdev->name);
+		return -ENOENT;
+	}
+
+	omap_rtc_alarm = platform_get_irq(pdev, 1);
+	if (omap_rtc_alarm <= 0) {
+		pr_debug("%s: no alarm irq?\n", pdev->name);
+		return -ENOENT;
+	}
+
+	/* NOTE:  using static mapping for RTC registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res && res->start != OMAP_RTC_BASE) {
+		pr_debug("%s: RTC registers at %08x, expected %08x\n",
+			pdev->name, (unsigned) res->start, OMAP_RTC_BASE);
+		return -ENOENT;
+	}
+
+	if (res)
+		mem = request_mem_region(res->start,
+				res->end - res->start + 1,
+				pdev->name);
+	else
+		mem = NULL;
+	if (!mem) {
+		pr_debug("%s: RTC registers at %08x are not free\n",
+			pdev->name, OMAP_RTC_BASE);
+		return -EBUSY;
+	}
+
+	rtc = rtc_device_register(pdev->name, &pdev->dev,
+			&omap_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc)) {
+		pr_debug("%s: can't register RTC device, err %ld\n",
+			pdev->name, PTR_ERR(rtc));
+		goto fail;
+	}
+	platform_set_drvdata(pdev, rtc);
+	class_set_devdata(&rtc->class_dev, mem);
+
+	/* clear pending irqs, and set 1/second periodic,
+	 * which we'll use instead of update irqs
+	 */
+	rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+	/* clear old status */
+	reg = rtc_read(OMAP_RTC_STATUS_REG);
+	if (reg & (u8) OMAP_RTC_STATUS_POWER_UP) {
+		pr_info("%s: RTC power up reset detected\n",
+			pdev->name);
+		rtc_write(OMAP_RTC_STATUS_POWER_UP, OMAP_RTC_STATUS_REG);
+	}
+	if (reg & (u8) OMAP_RTC_STATUS_ALARM)
+		rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG);
+
+	/* handle periodic and alarm irqs */
+	if (request_irq(omap_rtc_timer, rtc_irq, SA_INTERRUPT,
+			rtc->class_dev.class_id, &rtc->class_dev)) {
+		pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n",
+			pdev->name, omap_rtc_timer);
+		goto fail0;
+	}
+	if (request_irq(omap_rtc_alarm, rtc_irq, SA_INTERRUPT,
+			rtc->class_dev.class_id, &rtc->class_dev)) {
+		pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
+			pdev->name, omap_rtc_alarm);
+		goto fail1;
+	}
+
+	/* On boards with split power, RTC_ON_NOFF won't reset the RTC */
+	reg = rtc_read(OMAP_RTC_CTRL_REG);
+	if (reg & (u8) OMAP_RTC_CTRL_STOP)
+		pr_info("%s: already running\n", pdev->name);
+
+	/* force to 24 hour mode */
+	new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
+	new_ctrl |= OMAP_RTC_CTRL_STOP;
+
+	/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
+	 *
+	 *  - Boards wired so that RTC_WAKE_INT does something, and muxed
+	 *    right (W13_1610_RTC_WAKE_INT is the default after chip reset),
+	 *    should initialize the device wakeup flag appropriately.
+	 *
+	 *  - Boards wired so RTC_ON_nOFF is used as the reset signal,
+	 *    rather than nPWRON_RESET, should forcibly enable split
+	 *    power mode.  (Some chip errata report that RTC_CTRL_SPLIT
+	 *    is write-only, and always reads as zero...)
+	 */
+	device_init_wakeup(&pdev->dev, 0);
+
+	if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
+		pr_info("%s: split power mode\n", pdev->name);
+
+	if (reg != new_ctrl)
+		rtc_write(new_ctrl, OMAP_RTC_CTRL_REG);
+
+	return 0;
+
+fail1:
+	free_irq(omap_rtc_timer, NULL);
+fail0:
+	rtc_device_unregister(rtc);
+fail:
+	release_resource(mem);
+	return -EIO;
+}
+
+static int __devexit omap_rtc_remove(struct platform_device *pdev)
+{
+	struct rtc_device	*rtc = platform_get_drvdata(pdev);;
+
+	device_init_wakeup(&pdev->dev, 0);
+
+	/* leave rtc running, but disable irqs */
+	rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+	free_irq(omap_rtc_timer, rtc);
+	free_irq(omap_rtc_alarm, rtc);
+
+	release_resource(class_get_devdata(&rtc->class_dev));
+	rtc_device_unregister(rtc);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+
+static struct timespec rtc_delta;
+static u8 irqstat;
+
+static int omap_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct rtc_time rtc_tm;
+	struct timespec time;
+
+	time.tv_nsec = 0;
+	omap_rtc_read_time(NULL, &rtc_tm);
+	rtc_tm_to_time(&rtc_tm, &time.tv_sec);
+
+	save_time_delta(&rtc_delta, &time);
+	irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+
+	/* FIXME the RTC alarm is not currently acting as a wakeup event
+	 * source, and in fact this enable() call is just saving a flag
+	 * that's never used...
+	 */
+	if (device_may_wakeup(&pdev->dev))
+		enable_irq_wake(omap_rtc_alarm);
+	else
+		rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+	return 0;
+}
+
+static int omap_rtc_resume(struct platform_device *pdev)
+{
+	struct rtc_time rtc_tm;
+	struct timespec time;
+
+	time.tv_nsec = 0;
+	omap_rtc_read_time(NULL, &rtc_tm);
+	rtc_tm_to_time(&rtc_tm, &time.tv_sec);
+
+	restore_time_delta(&rtc_delta, &time);
+	if (device_may_wakeup(&pdev->dev))
+		disable_irq_wake(omap_rtc_alarm);
+	else
+		rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG);
+	return 0;
+}
+
+#else
+#define omap_rtc_suspend NULL
+#define omap_rtc_resume  NULL
+#endif
+
+static void omap_rtc_shutdown(struct platform_device *pdev)
+{
+	rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+}
+
+MODULE_ALIAS("omap_rtc");
+static struct platform_driver omap_rtc_driver = {
+	.probe		= omap_rtc_probe,
+	.remove		= __devexit_p(omap_rtc_remove),
+	.suspend	= omap_rtc_suspend,
+	.resume		= omap_rtc_resume,
+	.shutdown	= omap_rtc_shutdown,
+	.driver		= {
+		.name	= "omap_rtc",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init rtc_init(void)
+{
+	return platform_driver_register(&omap_rtc_driver);
+}
+module_init(rtc_init);
+
+static void __exit rtc_exit(void)
+{
+	platform_driver_unregister(&omap_rtc_driver);
+}
+module_exit(rtc_exit);
+
+MODULE_AUTHOR("George G. Davis (and others)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index a44fe4e..e2c7698 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -13,7 +13,7 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
 
 /* Addresses to scan */
 static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
@@ -39,6 +39,14 @@
 static int rs5c372_detach(struct i2c_client *client);
 static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind);
 
+struct rs5c372 {
+	u8 reg_addr;
+	u8 regs[17];
+	struct i2c_msg msg[1];
+	struct i2c_client client;
+	struct rtc_device *rtc;
+};
+
 static struct i2c_driver rs5c372_driver = {
 	.driver		= {
 		.name	= "rs5c372",
@@ -49,18 +57,16 @@
 
 static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-	unsigned char buf[7] = { RS5C372_REG_BASE };
 
-	/* this implements the 1st reading method, according
-	 * to the datasheet. buf[0] is initialized with
-	 * address ptr and transmission format register.
+	struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
+	u8 *buf = &(rs5c372->regs[1]);
+
+	/* this implements the 3rd reading method, according
+	 * to the datasheet. rs5c372 defaults to internal
+	 * address 0xF, so 0x0 is in regs[1]
 	 */
-	struct i2c_msg msgs[] = {
-		{ client->addr, 0, 1, buf },
-		{ client->addr, I2C_M_RD, 7, buf },
-	};
 
-	if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
+	if ((i2c_transfer(client->adapter, rs5c372->msg, 1)) != 1) {
 		dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
 		return -EIO;
 	}
@@ -114,23 +120,14 @@
 
 static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
 {
-	unsigned char buf = RS5C372_REG_TRIM;
-
-	struct i2c_msg msgs[] = {
-		{ client->addr, 0, 1, &buf },
-		{ client->addr, I2C_M_RD, 1, &buf },
-	};
-
-	if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
-		dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
-		return -EIO;
-	}
+	struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
+	u8 tmp = rs5c372->regs[RS5C372_REG_TRIM + 1];
 
 	if (osc)
-		*osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768;
+		*osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
 
 	if (trim) {
-		*trim = buf & RS5C372_TRIM_MASK;
+		*trim = tmp & RS5C372_TRIM_MASK;
 		dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim);
 	}
 
@@ -201,7 +198,7 @@
 {
 	int err = 0;
 	struct i2c_client *client;
-	struct rtc_device *rtc;
+	struct rs5c372 *rs5c372;
 
 	dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
 
@@ -210,10 +207,11 @@
 		goto exit;
 	}
 
-	if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
+	if (!(rs5c372 = kzalloc(sizeof(struct rs5c372), GFP_KERNEL))) {
 		err = -ENOMEM;
 		goto exit;
 	}
+	client = &rs5c372->client;
 
 	/* I2C client */
 	client->addr = address;
@@ -222,32 +220,47 @@
 
 	strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
 
+	i2c_set_clientdata(client, rs5c372);
+
+	rs5c372->msg[0].addr = address;
+	rs5c372->msg[0].flags = I2C_M_RD;
+	rs5c372->msg[0].len = sizeof(rs5c372->regs);
+	rs5c372->msg[0].buf = rs5c372->regs;
+
 	/* Inform the i2c layer */
 	if ((err = i2c_attach_client(client)))
 		goto exit_kfree;
 
 	dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
 
-	rtc = rtc_device_register(rs5c372_driver.driver.name, &client->dev,
-				&rs5c372_rtc_ops, THIS_MODULE);
+	rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
+				&client->dev, &rs5c372_rtc_ops, THIS_MODULE);
 
-	if (IS_ERR(rtc)) {
-		err = PTR_ERR(rtc);
+	if (IS_ERR(rs5c372->rtc)) {
+		err = PTR_ERR(rs5c372->rtc);
 		goto exit_detach;
 	}
 
-	i2c_set_clientdata(client, rtc);
-
-	device_create_file(&client->dev, &dev_attr_trim);
-	device_create_file(&client->dev, &dev_attr_osc);
+	err = device_create_file(&client->dev, &dev_attr_trim);
+	if (err)
+		goto exit_devreg;
+	err = device_create_file(&client->dev, &dev_attr_osc);
+	if (err)
+		goto exit_trim;
 
 	return 0;
 
+exit_trim:
+	device_remove_file(&client->dev, &dev_attr_trim);
+
+exit_devreg:
+	rtc_device_unregister(rs5c372->rtc);
+
 exit_detach:
 	i2c_detach_client(client);
 
 exit_kfree:
-	kfree(client);
+	kfree(rs5c372);
 
 exit:
 	return err;
@@ -256,16 +269,15 @@
 static int rs5c372_detach(struct i2c_client *client)
 {
 	int err;
-	struct rtc_device *rtc = i2c_get_clientdata(client);
+	struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
 
-	if (rtc)
-		rtc_device_unregister(rtc);
+	if (rs5c372->rtc)
+		rtc_device_unregister(rs5c372->rtc);
 
 	if ((err = i2c_detach_client(client)))
 		return err;
 
-	kfree(client);
-
+	kfree(rs5c372);
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 6ef9c62..f50a1b8 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -123,11 +123,18 @@
 		err = PTR_ERR(rtc);
 		return err;
 	}
-	device_create_file(&plat_dev->dev, &dev_attr_irq);
+
+	err = device_create_file(&plat_dev->dev, &dev_attr_irq);
+	if (err)
+		goto err;
 
 	platform_set_drvdata(plat_dev, rtc);
 
 	return 0;
+
+err:
+	rtc_device_unregister(rtc);
+	return err;
 }
 
 static int __devexit test_remove(struct platform_device *plat_dev)
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 522c697..9a67487 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -562,11 +562,19 @@
 	else
 		dev_err(&client->dev, "couldn't read status\n");
 
-	device_create_file(&client->dev, &dev_attr_atrim);
-	device_create_file(&client->dev, &dev_attr_dtrim);
+	err = device_create_file(&client->dev, &dev_attr_atrim);
+	if (err) goto exit_devreg;
+	err = device_create_file(&client->dev, &dev_attr_dtrim);
+	if (err) goto exit_atrim;
 
 	return 0;
 
+exit_atrim:
+	device_remove_file(&client->dev, &dev_attr_atrim);
+
+exit_devreg:
+	rtc_device_unregister(rtc);
+
 exit_detach:
 	i2c_detach_client(client);
 
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 79ffef6..2af2d9b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -54,7 +54,7 @@
 static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
 static int dasd_flush_ccw_queue(struct dasd_device *, int);
 static void dasd_tasklet(struct dasd_device *);
-static void do_kick_device(void *data);
+static void do_kick_device(struct work_struct *);
 
 /*
  * SECTION: Operations on the device structure.
@@ -100,7 +100,7 @@
 		     (unsigned long) device);
 	INIT_LIST_HEAD(&device->ccw_queue);
 	init_timer(&device->timer);
-	INIT_WORK(&device->kick_work, do_kick_device, device);
+	INIT_WORK(&device->kick_work, do_kick_device);
 	device->state = DASD_STATE_NEW;
 	device->target = DASD_STATE_NEW;
 
@@ -407,11 +407,9 @@
  * event daemon.
  */
 static void
-do_kick_device(void *data)
+do_kick_device(struct work_struct *work)
 {
-	struct dasd_device *device;
-
-	device = (struct dasd_device *) data;
+	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
 	dasd_change_state(device);
 	dasd_schedule_bh(device);
 	dasd_put_device(device);
@@ -1264,15 +1262,21 @@
 	if (list_empty(&device->ccw_queue))
 		return;
 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
-	if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
-		if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
+	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
+	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+		if (device->discipline->term_IO(cqr) != 0) {
+			/* Hmpf, try again in 5 sec */
+			dasd_set_timer(device, 5*HZ);
+			DEV_MESSAGE(KERN_ERR, device,
+				    "internal error - timeout (%is) expired "
+				    "for cqr %p, termination failed, "
+				    "retrying in 5s",
+				    (cqr->expires/HZ), cqr);
+		} else {
 			DEV_MESSAGE(KERN_ERR, device,
 				    "internal error - timeout (%is) expired "
 				    "for cqr %p (%i retries left)",
 				    (cqr->expires/HZ), cqr, cqr->retries);
-			if (device->discipline->term_IO(cqr) != 0)
-				/* Hmpf, try again in 1/10 sec */
-				dasd_set_timer(device, 10);
 		}
 	}
 }
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 91cf971..cf28ccc 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -25,7 +25,7 @@
 
 #include "dasd_int.h"
 
-kmem_cache_t *dasd_page_cache;
+struct kmem_cache *dasd_page_cache;
 EXPORT_SYMBOL_GPL(dasd_page_cache);
 
 /*
@@ -684,21 +684,26 @@
 	      const char *buf, size_t count)
 {
 	struct dasd_devmap *devmap;
-	int ro_flag;
+	int val;
+	char *endp;
 
 	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
 	if (IS_ERR(devmap))
 		return PTR_ERR(devmap);
-	ro_flag = buf[0] == '1';
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
 	spin_lock(&dasd_devmap_lock);
-	if (ro_flag)
+	if (val)
 		devmap->features |= DASD_FEATURE_READONLY;
 	else
 		devmap->features &= ~DASD_FEATURE_READONLY;
 	if (devmap->device)
 		devmap->device->features = devmap->features;
 	if (devmap->device && devmap->device->gdp)
-		set_disk_ro(devmap->device->gdp, ro_flag);
+		set_disk_ro(devmap->device->gdp, val);
 	spin_unlock(&dasd_devmap_lock);
 	return count;
 }
@@ -729,17 +734,22 @@
 {
 	struct dasd_devmap *devmap;
 	ssize_t rc;
-	int use_diag;
+	int val;
+	char *endp;
 
 	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
 	if (IS_ERR(devmap))
 		return PTR_ERR(devmap);
-	use_diag = buf[0] == '1';
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
 	spin_lock(&dasd_devmap_lock);
 	/* Changing diag discipline flag is only allowed in offline state. */
 	rc = count;
 	if (!devmap->device) {
-		if (use_diag)
+		if (val)
 			devmap->features |= DASD_FEATURE_USEDIAG;
 		else
 			devmap->features &= ~DASD_FEATURE_USEDIAG;
@@ -854,14 +864,20 @@
 	       const char *buf, size_t count)
 {
 	struct dasd_devmap *devmap;
-	int rc;
+	int val, rc;
+	char *endp;
 
 	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
 	if (IS_ERR(devmap))
 		return PTR_ERR(devmap);
 	if (!devmap->device)
-		return count;
-	if (buf[0] == '1') {
+		return -ENODEV;
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
+	if (val) {
 		rc = dasd_eer_enable(devmap->device);
 		if (rc)
 			return rc;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5ecea3e..fdaa471 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1215,7 +1215,7 @@
 		dst = page_address(bv->bv_page) + bv->bv_offset;
 		if (dasd_page_cache) {
 			char *copy = kmem_cache_alloc(dasd_page_cache,
-						      SLAB_DMA | __GFP_NOWARN);
+						      GFP_DMA | __GFP_NOWARN);
 			if (copy && rq_data_dir(req) == WRITE)
 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
 			if (copy)
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 80926c5..b857fd5 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -308,7 +308,7 @@
 		dst = page_address(bv->bv_page) + bv->bv_offset;
 		if (dasd_page_cache) {
 			char *copy = kmem_cache_alloc(dasd_page_cache,
-						      SLAB_DMA | __GFP_NOWARN);
+						      GFP_DMA | __GFP_NOWARN);
 			if (copy && rq_data_dir(req) == WRITE)
 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
 			if (copy)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9f52004..dc5dd50 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -474,7 +474,7 @@
 extern unsigned int dasd_profile_level;
 extern struct block_device_operations dasd_device_operations;
 
-extern kmem_cache_t *dasd_page_cache;
+extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
 dasd_kmalloc_request(char *, int, int, struct dasd_device *);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index d7de175d..c9321b9 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -299,14 +299,14 @@
 	struct raw3215_info *raw = (struct raw3215_info *) __data;
 	unsigned long flags;
 
-	spin_lock_irqsave(raw->lock, flags);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	if (raw->flags & RAW3215_TIMER_RUNS) {
 		del_timer(&raw->timer);
 		raw->flags &= ~RAW3215_TIMER_RUNS;
 		raw3215_mk_write_req(raw);
 		raw3215_start_io(raw);
 	}
-	spin_unlock_irqrestore(raw->lock, flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
 
 /*
@@ -355,10 +355,10 @@
 	unsigned long flags;
 
 	raw = (struct raw3215_info *) data;
-	spin_lock_irqsave(raw->lock, flags);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	raw3215_mk_write_req(raw);
 	raw3215_try_io(raw);
-	spin_unlock_irqrestore(raw->lock, flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 	/* Check for pending message from raw3215_irq */
 	if (raw->message != NULL) {
 		printk(raw->message, raw->msg_dstat, raw->msg_cstat);
@@ -512,9 +512,9 @@
 		if (RAW3215_BUFFER_SIZE - raw->count >= length)
 			break;
 		/* there might be another cpu waiting for the lock */
-		spin_unlock(raw->lock);
+		spin_unlock(get_ccwdev_lock(raw->cdev));
 		udelay(100);
-		spin_lock(raw->lock);
+		spin_lock(get_ccwdev_lock(raw->cdev));
 	}
 }
 
@@ -528,7 +528,7 @@
 	int c, count;
 
 	while (length > 0) {
-		spin_lock_irqsave(raw->lock, flags);
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 		count = (length > RAW3215_BUFFER_SIZE) ?
 					     RAW3215_BUFFER_SIZE : length;
 		length -= count;
@@ -555,7 +555,7 @@
 			/* start or queue request */
 			raw3215_try_io(raw);
 		}
-		spin_unlock_irqrestore(raw->lock, flags);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 	}
 }
 
@@ -568,7 +568,7 @@
 	unsigned long flags;
 	unsigned int length, i;
 
-	spin_lock_irqsave(raw->lock, flags);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	if (ch == '\t') {
 		length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE);
 		raw->line_pos += length;
@@ -592,7 +592,7 @@
 		/* start or queue request */
 		raw3215_try_io(raw);
 	}
-	spin_unlock_irqrestore(raw->lock, flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
 
 /*
@@ -604,13 +604,13 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(raw->lock, flags);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	if (raw->count > 0) {
 		raw->flags |= RAW3215_FLUSHING;
 		raw3215_try_io(raw);
 		raw->flags &= ~RAW3215_FLUSHING;
 	}
-	spin_unlock_irqrestore(raw->lock, flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
 
 /*
@@ -625,9 +625,9 @@
 		return 0;
 	raw->line_pos = 0;
 	raw->flags |= RAW3215_ACTIVE;
-	spin_lock_irqsave(raw->lock, flags);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	raw3215_try_io(raw);
-	spin_unlock_irqrestore(raw->lock, flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 
 	return 0;
 }
@@ -644,21 +644,21 @@
 	if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
 		return;
 	/* Wait for outstanding requests, then free irq */
-	spin_lock_irqsave(raw->lock, flags);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	if ((raw->flags & RAW3215_WORKING) ||
 	    raw->queued_write != NULL ||
 	    raw->queued_read != NULL) {
 		raw->flags |= RAW3215_CLOSING;
 		add_wait_queue(&raw->empty_wait, &wait);
 		set_current_state(TASK_INTERRUPTIBLE);
-		spin_unlock_irqrestore(raw->lock, flags);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 		schedule();
-		spin_lock_irqsave(raw->lock, flags);
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 		remove_wait_queue(&raw->empty_wait, &wait);
 		set_current_state(TASK_RUNNING);
 		raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
 	}
-	spin_unlock_irqrestore(raw->lock, flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
 
 static int
@@ -686,7 +686,6 @@
 	}
 
 	raw->cdev = cdev;
-	raw->lock = get_ccwdev_lock(cdev);
 	raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
 	memset(raw, 0, sizeof(struct raw3215_info));
 	raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE,
@@ -809,9 +808,9 @@
 	unsigned long flags;
 
 	raw = raw3215[0];  /* console 3215 is the first one */
-	spin_lock_irqsave(raw->lock, flags);
+	spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 	raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
-	spin_unlock_irqrestore(raw->lock, flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
 
 static int __init 
@@ -873,7 +872,6 @@
 	raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
 	raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
 	raw->cdev = cdev;
-	raw->lock = get_ccwdev_lock(cdev);
 	cdev->dev.driver_data = raw;
 	cdev->handler = raw3215_irq;
 
@@ -1066,10 +1064,10 @@
 
 	raw = (struct raw3215_info *) tty->driver_data;
 	if (raw->flags & RAW3215_THROTTLED) {
-		spin_lock_irqsave(raw->lock, flags);
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 		raw->flags &= ~RAW3215_THROTTLED;
 		raw3215_try_io(raw);
-		spin_unlock_irqrestore(raw->lock, flags);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 	}
 }
 
@@ -1096,10 +1094,10 @@
 
 	raw = (struct raw3215_info *) tty->driver_data;
 	if (raw->flags & RAW3215_STOPPED) {
-		spin_lock_irqsave(raw->lock, flags);
+		spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
 		raw->flags &= ~RAW3215_STOPPED;
 		raw3215_try_io(raw);
-		spin_unlock_irqrestore(raw->lock, flags);
+		spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 	}
 }
 
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 32004aa..ffa9282 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -19,52 +19,17 @@
 
 #include "sclp.h"
 
-
-#ifdef CONFIG_SMP
-/* Signal completion of shutdown process. All CPUs except the first to enter
- * this function: go to stopped state. First CPU: wait until all other
- * CPUs are in stopped or check stop state. Afterwards, load special PSW
- * to indicate completion. */
-static void
-do_load_quiesce_psw(void * __unused)
-{
-	static atomic_t cpuid = ATOMIC_INIT(-1);
-	psw_t quiesce_psw;
-	int cpu;
-
-	if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
-		signal_processor(smp_processor_id(), sigp_stop);
-	/* Wait for all other cpus to enter stopped state */
-	for_each_online_cpu(cpu) {
-		if (cpu == smp_processor_id())
-			continue;
-		while(!smp_cpu_not_running(cpu))
-			cpu_relax();
-	}
-	/* Quiesce the last cpu with the special psw */
-	quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
-	quiesce_psw.addr = 0xfff;
-	__load_psw(quiesce_psw);
-}
-
-/* Shutdown handler. Perform shutdown function on all CPUs. */
-static void
-do_machine_quiesce(void)
-{
-	on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
-}
-#else
 /* Shutdown handler. Signal completion of shutdown by loading special PSW. */
 static void
 do_machine_quiesce(void)
 {
 	psw_t quiesce_psw;
 
+	smp_send_stop();
 	quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
 	quiesce_psw.addr = 0xfff;
 	__load_psw(quiesce_psw);
 }
-#endif
 
 /* Handler for quiesce event. Start shutdown procedure. */
 static void
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 2d78f0f..dbfb77b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -251,6 +251,8 @@
 		cc = cio_clear(sch);
 		if (cc == -ENODEV)
 			goto out_unreg;
+		/* Request retry of internal operation. */
+		device_set_intretry(sch);
 		/* Call handler. */
 		if (sch->driver && sch->driver->termination)
 			sch->driver->termination(&sch->dev);
@@ -711,9 +713,6 @@
 {
 	int cc;
 
-	if (!device_is_online(sch))
-		/* cio could be doing I/O. */
-		return 0;
 	cc = stsch(sch->schid, &sch->schib);
 	if (cc)
 		return 0;
@@ -722,6 +721,26 @@
 	return 0;
 }
 
+static void terminate_internal_io(struct subchannel *sch)
+{
+	if (cio_clear(sch)) {
+		/* Recheck device in case clear failed. */
+		sch->lpm = 0;
+		if (device_trigger_verify(sch) != 0) {
+			if(css_enqueue_subchannel_slow(sch->schid)) {
+				css_clear_subchannel_slow_list();
+				need_rescan = 1;
+			}
+		}
+		return;
+	}
+	/* Request retry of internal operation. */
+	device_set_intretry(sch);
+	/* Call handler. */
+	if (sch->driver && sch->driver->termination)
+		sch->driver->termination(&sch->dev);
+}
+
 static inline void
 __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
 {
@@ -744,20 +763,26 @@
 				device_trigger_reprobe(sch);
 			else if (sch->driver && sch->driver->verify)
 				sch->driver->verify(&sch->dev);
-		} else {
-			sch->opm &= ~(0x80 >> chp);
-			sch->lpm &= ~(0x80 >> chp);
-			if (check_for_io_on_path(sch, chp))
+			break;
+		}
+		sch->opm &= ~(0x80 >> chp);
+		sch->lpm &= ~(0x80 >> chp);
+		if (check_for_io_on_path(sch, chp)) {
+			if (device_is_online(sch))
 				/* Path verification is done after killing. */
 				device_kill_io(sch);
-			else if (!sch->lpm) {
+			else
+				/* Kill and retry internal I/O. */
+				terminate_internal_io(sch);
+		} else if (!sch->lpm) {
+			if (device_trigger_verify(sch) != 0) {
 				if (css_enqueue_subchannel_slow(sch->schid)) {
 					css_clear_subchannel_slow_list();
 					need_rescan = 1;
 				}
-			} else if (sch->driver && sch->driver->verify)
-				sch->driver->verify(&sch->dev);
-		}
+			}
+		} else if (sch->driver && sch->driver->verify)
+			sch->driver->verify(&sch->dev);
 		break;
 	}
 	spin_unlock_irqrestore(&sch->lock, flags);
@@ -1465,41 +1490,6 @@
 	return desc;
 }
 
-static int reset_channel_path(struct channel_path *chp)
-{
-	int cc;
-
-	cc = rchp(chp->id);
-	switch (cc) {
-	case 0:
-		return 0;
-	case 2:
-		return -EBUSY;
-	default:
-		return -ENODEV;
-	}
-}
-
-static void reset_channel_paths_css(struct channel_subsystem *css)
-{
-	int i;
-
-	for (i = 0; i <= __MAX_CHPID; i++) {
-		if (css->chps[i])
-			reset_channel_path(css->chps[i]);
-	}
-}
-
-void cio_reset_channel_paths(void)
-{
-	int i;
-
-	for (i = 0; i <= __MAX_CSSID; i++) {
-		if (css[i] && css[i]->valid)
-			reset_channel_paths_css(css[i]);
-	}
-}
-
 static int __init
 chsc_alloc_sei_area(void)
 {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 8936e46..20aee27 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -21,6 +21,7 @@
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
 #include <asm/setup.h>
+#include <asm/reset.h>
 #include "airq.h"
 #include "cio.h"
 #include "css.h"
@@ -28,6 +29,7 @@
 #include "ioasm.h"
 #include "blacklist.h"
 #include "cio_debug.h"
+#include "../s390mach.h"
 
 debug_info_t *cio_debug_msg_id;
 debug_info_t *cio_debug_trace_id;
@@ -841,26 +843,12 @@
 	return -EBUSY;
 }
 
-struct sch_match_id {
-	struct subchannel_id schid;
-	struct ccw_dev_id devid;
-	int rc;
-};
-
-static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
-	void *data)
+static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
 {
 	struct schib schib;
-	struct sch_match_id *match_id = data;
 
 	if (stsch_err(schid, &schib))
 		return -ENXIO;
-	if (match_id && schib.pmcw.dnv &&
-		(schib.pmcw.dev == match_id->devid.devno) &&
-		(schid.ssid == match_id->devid.ssid)) {
-		match_id->schid = schid;
-		match_id->rc = 0;
-	}
 	if (!schib.pmcw.ena)
 		return 0;
 	switch(__disable_subchannel_easy(schid, &schib)) {
@@ -876,27 +864,111 @@
 	return 0;
 }
 
-static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
-	struct subchannel_id *schid)
+static atomic_t chpid_reset_count;
+
+static void s390_reset_chpids_mcck_handler(void)
+{
+	struct crw crw;
+	struct mci *mci;
+
+	/* Check for pending channel report word. */
+	mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
+	if (!mci->cp)
+		return;
+	/* Process channel report words. */
+	while (stcrw(&crw) == 0) {
+		/* Check for responses to RCHP. */
+		if (crw.slct && crw.rsc == CRW_RSC_CPATH)
+			atomic_dec(&chpid_reset_count);
+	}
+}
+
+#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
+static void css_reset(void)
+{
+	int i, ret;
+	unsigned long long timeout;
+
+	/* Reset subchannels. */
+	for_each_subchannel(__shutdown_subchannel_easy,  NULL);
+	/* Reset channel paths. */
+	s390_reset_mcck_handler = s390_reset_chpids_mcck_handler;
+	/* Enable channel report machine checks. */
+	__ctl_set_bit(14, 28);
+	/* Temporarily reenable machine checks. */
+	local_mcck_enable();
+	for (i = 0; i <= __MAX_CHPID; i++) {
+		ret = rchp(i);
+		if ((ret == 0) || (ret == 2))
+			/*
+			 * rchp either succeeded, or another rchp is already
+			 * in progress. In either case, we'll get a crw.
+			 */
+			atomic_inc(&chpid_reset_count);
+	}
+	/* Wait for machine check for all channel paths. */
+	timeout = get_clock() + (RCHP_TIMEOUT << 12);
+	while (atomic_read(&chpid_reset_count) != 0) {
+		if (get_clock() > timeout)
+			break;
+		cpu_relax();
+	}
+	/* Disable machine checks again. */
+	local_mcck_disable();
+	/* Disable channel report machine checks. */
+	__ctl_clear_bit(14, 28);
+	s390_reset_mcck_handler = NULL;
+}
+
+static struct reset_call css_reset_call = {
+	.fn = css_reset,
+};
+
+static int __init init_css_reset_call(void)
+{
+	atomic_set(&chpid_reset_count, 0);
+	register_reset_call(&css_reset_call);
+	return 0;
+}
+
+arch_initcall(init_css_reset_call);
+
+struct sch_match_id {
+	struct subchannel_id schid;
+	struct ccw_dev_id devid;
+	int rc;
+};
+
+static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
+{
+	struct schib schib;
+	struct sch_match_id *match_id = data;
+
+	if (stsch_err(schid, &schib))
+		return -ENXIO;
+	if (schib.pmcw.dnv &&
+	    (schib.pmcw.dev == match_id->devid.devno) &&
+	    (schid.ssid == match_id->devid.ssid)) {
+		match_id->schid = schid;
+		match_id->rc = 0;
+		return 1;
+	}
+	return 0;
+}
+
+static int reipl_find_schid(struct ccw_dev_id *devid,
+			    struct subchannel_id *schid)
 {
 	struct sch_match_id match_id;
 
 	match_id.devid = *devid;
 	match_id.rc = -ENODEV;
-	local_irq_disable();
-	for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
+	for_each_subchannel(__reipl_subchannel_match, &match_id);
 	if (match_id.rc == 0)
 		*schid = match_id.schid;
 	return match_id.rc;
 }
 
-
-void clear_all_subchannels(void)
-{
-	local_irq_disable();
-	for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
-}
-
 extern void do_reipl_asm(__u32 schid);
 
 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
@@ -904,9 +976,9 @@
 {
 	struct subchannel_id schid;
 
-	if (clear_all_subchannels_and_match(devid, &schid))
+	s390_reset_system();
+	if (reipl_find_schid(devid, &schid) != 0)
 		panic("IPL Device not found\n");
-	cio_reset_channel_paths();
 	do_reipl_asm(*((__u32*)&schid));
 }
 
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index ad7f7e1..26cf2f5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -334,7 +334,7 @@
 static DEFINE_SPINLOCK(slow_subchannel_lock);
 
 static void
-css_trigger_slow_path(void)
+css_trigger_slow_path(struct work_struct *unused)
 {
 	CIO_TRACE_EVENT(4, "slowpath");
 
@@ -359,8 +359,7 @@
 	spin_unlock_irq(&slow_subchannel_lock);
 }
 
-typedef void (*workfunc)(void *);
-DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
+DECLARE_WORK(slow_path_work, css_trigger_slow_path);
 struct workqueue_struct *slow_path_wq;
 
 /* Reprobe subchannel if unregistered. */
@@ -397,7 +396,7 @@
 }
 
 /* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(void *data)
+static void reprobe_all(struct work_struct *unused)
 {
 	int ret;
 
@@ -413,7 +412,7 @@
 		      need_reprobe);
 }
 
-DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
+DECLARE_WORK(css_reprobe_work, reprobe_all);
 
 /* Schedule reprobing of all unregistered subchannels. */
 void css_schedule_reprobe(void)
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 4c2ff83..9ff064e 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -94,6 +94,7 @@
 		unsigned int donotify:1;    /* call notify function */
 		unsigned int recog_done:1;  /* dev. recog. complete */
 		unsigned int fake_irb:1;    /* deliver faked irb */
+		unsigned int intretry:1;    /* retry internal operation */
 	} __attribute__((packed)) flags;
 	unsigned long intparm;	/* user interruption parameter */
 	struct qdio_irq *qdio_data;
@@ -171,6 +172,8 @@
 /* Helper functions for vary on/off. */
 int device_is_online(struct subchannel *);
 void device_kill_io(struct subchannel *);
+void device_set_intretry(struct subchannel *sch);
+int device_trigger_verify(struct subchannel *sch);
 
 /* Machine check helper function. */
 void device_kill_pending_timer(struct subchannel *);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 39c98f9..d3d3716 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -687,8 +687,20 @@
 	cdev = data;
 	sch = to_subchannel(cdev->dev.parent);
 
+	/*
+	 * io_subchannel_register() will also be called after device
+	 * recognition has been done for a boxed device (which will already
+	 * be registered). We need to reprobe since we may now have sense id
+	 * information.
+	 */
 	if (klist_node_attached(&cdev->dev.knode_parent)) {
-		bus_rescan_devices(&ccw_bus_type);
+		if (!cdev->drv) {
+			ret = device_reprobe(&cdev->dev);
+			if (ret)
+				/* We can't do much here. */
+				dev_info(&cdev->dev, "device_reprobe() returned"
+					 " %d\n", ret);
+		}
 		goto out;
 	}
 	/* make it known to the system */
@@ -948,6 +960,9 @@
 	cdev = dev->driver_data;
 	if (!cdev)
 		return;
+	/* Internal I/O will be retried by the interrupt handler. */
+	if (cdev->private->flags.intretry)
+		return;
 	cdev->private->state = DEV_STATE_CLEAR_VERIFY;
 	if (cdev->handler)
 		cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index de3d085..09c7672 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -59,6 +59,27 @@
 	cdev->private->state = DEV_STATE_DISCONNECTED;
 }
 
+void device_set_intretry(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch->dev.driver_data;
+	if (!cdev)
+		return;
+	cdev->private->flags.intretry = 1;
+}
+
+int device_trigger_verify(struct subchannel *sch)
+{
+	struct ccw_device *cdev;
+
+	cdev = sch->dev.driver_data;
+	if (!cdev || !cdev->online)
+		return -EINVAL;
+	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+	return 0;
+}
+
 /*
  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
  */
@@ -893,6 +914,12 @@
 	 * had killed the original request.
 	 */
 	if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+		/* Retry Basic Sense if requested. */
+		if (cdev->private->flags.intretry) {
+			cdev->private->flags.intretry = 0;
+			ccw_device_do_sense(cdev, irb);
+			return;
+		}
 		cdev->private->flags.dosense = 0;
 		memset(&cdev->private->irb, 0, sizeof(struct irb));
 		ccw_device_accumulate_irb(cdev, irb);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index a74785b..f172759 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -191,6 +191,8 @@
 		if ((sch->opm & cdev->private->imask) != 0 &&
 		    cdev->private->iretry > 0) {
 			cdev->private->iretry--;
+			/* Reset internal retry indication. */
+			cdev->private->flags.intretry = 0;
 			ret = cio_start (sch, cdev->private->iccws,
 					 cdev->private->imask);
 			/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -237,8 +239,14 @@
 		return 0; /* Success */
 	}
 	/* Check the error cases. */
-	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+		/* Retry Sense ID if requested. */
+		if (cdev->private->flags.intretry) {
+			cdev->private->flags.intretry = 0;
+			return -EAGAIN;
+		}
 		return -ETIME;
+	}
 	if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
 		/*
 		 * if the device doesn't support the SenseID
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 2975ce8..cb1879a 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -71,6 +71,8 @@
 		ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
 		if (cdev->private->iretry > 0) {
 			cdev->private->iretry--;
+			/* Reset internal retry indication. */
+			cdev->private->flags.intretry = 0;
 			ret = cio_start (sch, cdev->private->iccws, 
 					 cdev->private->imask);
 			/* ret is 0, -EBUSY, -EACCES or -ENODEV */
@@ -122,8 +124,14 @@
 
 	sch = to_subchannel(cdev->dev.parent);
 	irb = &cdev->private->irb;
-	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+		/* Retry Sense PGID if requested. */
+		if (cdev->private->flags.intretry) {
+			cdev->private->flags.intretry = 0;
+			return -EAGAIN;
+		}
 		return -ETIME;
+	}
 	if (irb->esw.esw0.erw.cons &&
 	    (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
 		/*
@@ -253,6 +261,8 @@
 	ret = -EACCES;
 	if (cdev->private->iretry > 0) {
 		cdev->private->iretry--;
+		/* Reset internal retry indication. */
+		cdev->private->flags.intretry = 0;
 		ret = cio_start (sch, cdev->private->iccws,
 				 cdev->private->imask);
 		/* We expect an interrupt in case of success or busy
@@ -293,6 +303,8 @@
 	ret = -EACCES;
 	if (cdev->private->iretry > 0) {
 		cdev->private->iretry--;
+		/* Reset internal retry indication. */
+		cdev->private->flags.intretry = 0;
 		ret = cio_start (sch, cdev->private->iccws,
 				 cdev->private->imask);
 		/* We expect an interrupt in case of success or busy
@@ -321,8 +333,14 @@
 
 	sch = to_subchannel(cdev->dev.parent);
 	irb = &cdev->private->irb;
-	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+		/* Retry Set PGID if requested. */
+		if (cdev->private->flags.intretry) {
+			cdev->private->flags.intretry = 0;
+			return -EAGAIN;
+		}
 		return -ETIME;
+	}
 	if (irb->esw.esw0.erw.cons) {
 		if (irb->ecw[0] & SNS0_CMD_REJECT)
 			return -EOPNOTSUPP;
@@ -360,8 +378,14 @@
 
 	sch = to_subchannel(cdev->dev.parent);
 	irb = &cdev->private->irb;
-	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+	if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+		/* Retry NOP if requested. */
+		if (cdev->private->flags.intretry) {
+			cdev->private->flags.intretry = 0;
+			return -EAGAIN;
+		}
 		return -ETIME;
+	}
 	if (irb->scsw.cc == 3) {
 		CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x,"
 			      " lpm %02X, became 'not operational'\n",
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 3f7cbce..bdcf930 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -319,6 +319,9 @@
 	sch->sense_ccw.count = SENSE_MAX_COUNT;
 	sch->sense_ccw.flags = CCW_FLAG_SLI;
 
+	/* Reset internal retry indication. */
+	cdev->private->flags.intretry = 0;
+
 	return cio_start (sch, &sch->sense_ccw, 0xff);
 }
 
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 476aa1d..8d5fa1b 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -481,7 +481,7 @@
        unsigned char state = 0;
        struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
 
-	if (!atomic_swap(&q->polling,0)) 
+	if (!atomic_xchg(&q->polling,0))
 		return 1;
 
 	QDIO_DBF_TEXT4(0,trace,"stoppoll");
@@ -1964,8 +1964,8 @@
 		QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
 
 		QDIO_PRINT_WARN("sense data available on qdio channel.\n");
-		HEXDUMP16(WARN,"irb: ",irb);
-		HEXDUMP16(WARN,"sense data: ",irb->ecw);
+		QDIO_HEXDUMP16(WARN,"irb: ",irb);
+		QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
 	}
 		
 }
@@ -3425,7 +3425,7 @@
 	
 	if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
 	    (callflags&QDIO_FLAG_UNDER_INTERRUPT))
-		atomic_swap(&q->polling,0);
+		atomic_xchg(&q->polling,0);
 	
 	if (used_elements) 
 		return;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 49bb9e3..42927c1 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -236,7 +236,7 @@
 #define QDIO_PRINT_EMERG(x...) do { } while (0)
 #endif
 
-#define HEXDUMP16(importance,header,ptr) \
+#define QDIO_HEXDUMP16(importance,header,ptr) \
 QDIO_PRINT_##importance(header "%02x %02x %02x %02x  " \
 			"%02x %02x %02x %02x  %02x %02x %02x %02x  " \
 			"%02x %02x %02x %02x\n",*(((char*)ptr)), \
@@ -429,8 +429,6 @@
 };
 #endif /* QDIO_PERFORMANCE_STATS */
 
-#define atomic_swap(a,b) xchg((int*)a.counter,b)
-
 /* unlikely as the later the better */
 #define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
 #define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 79d89c3..e4dc947 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -37,7 +37,7 @@
 #include "ap_bus.h"
 
 /* Some prototypes. */
-static void ap_scan_bus(void *);
+static void ap_scan_bus(struct work_struct *);
 static void ap_poll_all(unsigned long);
 static void ap_poll_timeout(unsigned long);
 static int ap_poll_thread_start(void);
@@ -71,7 +71,7 @@
 static struct workqueue_struct *ap_work_queue;
 static struct timer_list ap_config_timer;
 static int ap_config_time = AP_CONFIG_TIME;
-static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
+static DECLARE_WORK(ap_config_work, ap_scan_bus);
 
 /**
  * Tasklet & timer for AP request polling.
@@ -431,7 +431,15 @@
 			   ap_dev->device_type);
 	if (buffer_size - length <= 0)
 		return -ENOMEM;
-	envp[1] = 0;
+	buffer += length;
+	buffer_size -= length;
+	/* Add MODALIAS= */
+	envp[1] = buffer;
+	length = scnprintf(buffer, buffer_size, "MODALIAS=ap:t%02X",
+			   ap_dev->device_type);
+	if (buffer_size - length <= 0)
+		return -ENOMEM;
+	envp[2] = NULL;
 	return 0;
 }
 
@@ -724,7 +732,7 @@
 	kfree(ap_dev);
 }
 
-static void ap_scan_bus(void *data)
+static void ap_scan_bus(struct work_struct *unused)
 {
 	struct ap_device *ap_dev;
 	struct device *dev;
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 969be46..1ee9a6f 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -29,7 +29,7 @@
 #define CLAW_COMPLETE           0xff   /* flag to indicate i/o completed */
 
 /*-----------------------------------------------------*
-*     CLAW control comand code                         *
+*     CLAW control command code                        *
 *------------------------------------------------------*/
 
 #define SYSTEM_VALIDATE_REQUEST   0x01  /* System Validate request */
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 16ac68c..e5665b6 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -54,6 +54,8 @@
 #error Cannot compile lcs.c without some net devices switched on.
 #endif
 
+#define PRINTK_HEADER		" lcs: "
+
 /**
  * initialization string for output
  */
@@ -65,7 +67,7 @@
  * Some prototypes.
  */
 static void lcs_tasklet(unsigned long);
-static void lcs_start_kernel_thread(struct lcs_card *card);
+static void lcs_start_kernel_thread(struct work_struct *);
 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
 static int lcs_recovery(void *ptr);
@@ -120,7 +122,7 @@
 			kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
 		if (channel->iob[cnt].data == NULL)
 			break;
-		channel->iob[cnt].state = BUF_STATE_EMPTY;
+		channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
 	}
 	if (cnt < LCS_NUM_BUFFS) {
 		/* Not all io buffers could be allocated. */
@@ -236,7 +238,7 @@
 		((struct lcs_header *)
 		 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
 		card->read.iob[cnt].callback = lcs_get_frames_cb;
-		card->read.iob[cnt].state = BUF_STATE_READY;
+		card->read.iob[cnt].state = LCS_BUF_STATE_READY;
 		card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
 	}
 	card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
@@ -247,7 +249,7 @@
 	card->read.ccws[LCS_NUM_BUFFS].cda =
 		(__u32) __pa(card->read.ccws);
 	/* Setg initial state of the read channel. */
-	card->read.state = CH_STATE_INIT;
+	card->read.state = LCS_CH_STATE_INIT;
 
 	card->read.io_idx = 0;
 	card->read.buf_idx = 0;
@@ -294,7 +296,7 @@
 	card->write.ccws[LCS_NUM_BUFFS].cda =
 		(__u32) __pa(card->write.ccws);
 	/* Set initial state of the write channel. */
-	card->read.state = CH_STATE_INIT;
+	card->read.state = LCS_CH_STATE_INIT;
 
 	card->write.io_idx = 0;
 	card->write.buf_idx = 0;
@@ -496,7 +498,7 @@
 			      channel->ccws + channel->io_idx, 0, 0,
 			      DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
 	if (rc == 0)
-		channel->state = CH_STATE_RUNNING;
+		channel->state = LCS_CH_STATE_RUNNING;
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
 	if (rc) {
 		LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
@@ -520,8 +522,8 @@
 		LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
 		return rc;
 	}
-	wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED));
-	channel->state = CH_STATE_STOPPED;
+	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
+	channel->state = LCS_CH_STATE_STOPPED;
 	return rc;
 }
 
@@ -535,11 +537,11 @@
 	unsigned long flags;
 	int rc;
 
-	if (channel->state == CH_STATE_STOPPED)
+	if (channel->state == LCS_CH_STATE_STOPPED)
 		return 0;
 	LCS_DBF_TEXT(4,trace,"haltsch");
 	LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
-	channel->state = CH_STATE_INIT;
+	channel->state = LCS_CH_STATE_INIT;
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
 	rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -548,7 +550,7 @@
 		return rc;
 	}
 	/* Asynchronous halt initialted. Wait for its completion. */
-	wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED));
+	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
 	lcs_clear_channel(channel);
 	return 0;
 }
@@ -596,8 +598,8 @@
 	LCS_DBF_TEXT(5, trace, "_getbuff");
 	index = channel->io_idx;
 	do {
-		if (channel->iob[index].state == BUF_STATE_EMPTY) {
-			channel->iob[index].state = BUF_STATE_LOCKED;
+		if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
+			channel->iob[index].state = LCS_BUF_STATE_LOCKED;
 			return channel->iob + index;
 		}
 		index = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -626,7 +628,7 @@
 {
 	int rc;
 
-	if (channel->state != CH_STATE_SUSPENDED)
+	if (channel->state != LCS_CH_STATE_SUSPENDED)
 		return 0;
 	if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
 		return 0;
@@ -636,7 +638,7 @@
 		LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
 		PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
 	} else
-		channel->state = CH_STATE_RUNNING;
+		channel->state = LCS_CH_STATE_RUNNING;
 	return rc;
 
 }
@@ -670,10 +672,10 @@
 	int index, rc;
 
 	LCS_DBF_TEXT(5, trace, "rdybuff");
-	BUG_ON(buffer->state != BUF_STATE_LOCKED &&
-	       buffer->state != BUF_STATE_PROCESSED);
+	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+	       buffer->state != LCS_BUF_STATE_PROCESSED);
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
-	buffer->state = BUF_STATE_READY;
+	buffer->state = LCS_BUF_STATE_READY;
 	index = buffer - channel->iob;
 	/* Set length. */
 	channel->ccws[index].count = buffer->count;
@@ -695,8 +697,8 @@
 	int index, prev, next;
 
 	LCS_DBF_TEXT(5, trace, "prcsbuff");
-	BUG_ON(buffer->state != BUF_STATE_READY);
-	buffer->state = BUF_STATE_PROCESSED;
+	BUG_ON(buffer->state != LCS_BUF_STATE_READY);
+	buffer->state = LCS_BUF_STATE_PROCESSED;
 	index = buffer - channel->iob;
 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
 	next = (index + 1) & (LCS_NUM_BUFFS - 1);
@@ -704,7 +706,7 @@
 	channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
 	channel->ccws[index].flags &= ~CCW_FLAG_PCI;
 	/* Check the suspend bit of the previous buffer. */
-	if (channel->iob[prev].state == BUF_STATE_READY) {
+	if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
 		/*
 		 * Previous buffer is in state ready. It might have
 		 * happened in lcs_ready_buffer that the suspend bit
@@ -727,10 +729,10 @@
 	unsigned long flags;
 
 	LCS_DBF_TEXT(5, trace, "relbuff");
-	BUG_ON(buffer->state != BUF_STATE_LOCKED &&
-	       buffer->state != BUF_STATE_PROCESSED);
+	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+	       buffer->state != LCS_BUF_STATE_PROCESSED);
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
-	buffer->state = BUF_STATE_EMPTY;
+	buffer->state = LCS_BUF_STATE_EMPTY;
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
 }
 
@@ -1147,7 +1149,7 @@
  * get mac address for the relevant Multicast address
  */
 static void
-lcs_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
+lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
 {
 	LCS_DBF_TEXT(4,trace, "getmac");
 	if (dev->type == ARPHRD_IEEE802_TR)
@@ -1264,7 +1266,7 @@
 	netif_carrier_off(card->dev);
 	netif_tx_disable(card->dev);
 	wait_event(card->write.wait_q,
-			(card->write.state != CH_STATE_RUNNING));
+			(card->write.state != LCS_CH_STATE_RUNNING));
 	lcs_fix_multicast_list(card);
 	if (card->state == DEV_STATE_UP) {
 		netif_carrier_on(card->dev);
@@ -1404,7 +1406,7 @@
 		}
 	}
 	/* How far in the ccw chain have we processed? */
-	if ((channel->state != CH_STATE_INIT) &&
+	if ((channel->state != LCS_CH_STATE_INIT) &&
 	    (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
 		index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
 			- channel->ccws;
@@ -1424,20 +1426,20 @@
 	    (irb->scsw.dstat & DEV_STAT_CHN_END) ||
 	    (irb->scsw.dstat & DEV_STAT_UNIT_CHECK))
 		/* Mark channel as stopped. */
-		channel->state = CH_STATE_STOPPED;
+		channel->state = LCS_CH_STATE_STOPPED;
 	else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED)
 		/* CCW execution stopped on a suspend bit. */
-		channel->state = CH_STATE_SUSPENDED;
+		channel->state = LCS_CH_STATE_SUSPENDED;
 	if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) {
 		if (irb->scsw.cc != 0) {
 			ccw_device_halt(channel->ccwdev, (addr_t) channel);
 			return;
 		}
 		/* The channel has been stopped by halt_IO. */
-		channel->state = CH_STATE_HALTED;
+		channel->state = LCS_CH_STATE_HALTED;
 	}
 	if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
-		channel->state = CH_STATE_CLEARED;
+		channel->state = LCS_CH_STATE_CLEARED;
 	}
 	/* Do the rest in the tasklet. */
 	tasklet_schedule(&channel->irq_tasklet);
@@ -1461,7 +1463,7 @@
 	/* Check for processed buffers. */
 	iob = channel->iob;
 	buf_idx = channel->buf_idx;
-	while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
+	while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
 		/* Do the callback thing. */
 		if (iob[buf_idx].callback != NULL)
 			iob[buf_idx].callback(channel, iob + buf_idx);
@@ -1469,12 +1471,12 @@
 	}
 	channel->buf_idx = buf_idx;
 
-	if (channel->state == CH_STATE_STOPPED)
+	if (channel->state == LCS_CH_STATE_STOPPED)
 		// FIXME: what if rc != 0 ??
 		rc = lcs_start_channel(channel);
 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
-	if (channel->state == CH_STATE_SUSPENDED &&
-	    channel->iob[channel->io_idx].state == BUF_STATE_READY) {
+	if (channel->state == LCS_CH_STATE_SUSPENDED &&
+	    channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) {
 		// FIXME: what if rc != 0 ??
 		rc = __lcs_resume_channel(channel);
 	}
@@ -1689,8 +1691,8 @@
 		card->state = DEV_STATE_UP;
 	} else {
 		card->state = DEV_STATE_DOWN;
-		card->write.state = CH_STATE_INIT;
-		card->read.state =  CH_STATE_INIT;
+		card->write.state = LCS_CH_STATE_INIT;
+		card->read.state =  LCS_CH_STATE_INIT;
 	}
 	return rc;
 }
@@ -1705,8 +1707,8 @@
 
 	LCS_DBF_TEXT(3, setup, "stopcard");
 
-	if (card->read.state != CH_STATE_STOPPED &&
-	    card->write.state != CH_STATE_STOPPED &&
+	if (card->read.state != LCS_CH_STATE_STOPPED &&
+	    card->write.state != LCS_CH_STATE_STOPPED &&
 	    card->state == DEV_STATE_UP) {
 		lcs_clear_multicast_list(card);
 		rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
@@ -1722,8 +1724,9 @@
  * Kernel Thread helper functions for LGW initiated commands
  */
 static void
-lcs_start_kernel_thread(struct lcs_card *card)
+lcs_start_kernel_thread(struct work_struct *work)
 {
+	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
 	LCS_DBF_TEXT(5, trace, "krnthrd");
 	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
 		kernel_thread(lcs_recovery, (void *) card, SIGCHLD);
@@ -1871,7 +1874,7 @@
 	netif_tx_disable(dev);
 	dev->flags &= ~IFF_UP;
 	wait_event(card->write.wait_q,
-		(card->write.state != CH_STATE_RUNNING));
+		(card->write.state != LCS_CH_STATE_RUNNING));
 	rc = lcs_stopcard(card);
 	if (rc)
 		PRINT_ERR("Try it again!\n ");
@@ -2051,8 +2054,7 @@
 	ccwgdev->cdev[0]->handler = lcs_irq;
 	ccwgdev->cdev[1]->handler = lcs_irq;
 	card->gdev = ccwgdev;
-	INIT_WORK(&card->kernel_thread_starter,
-		  (void *) lcs_start_kernel_thread, card);
+	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
 	card->thread_start_mask = 0;
 	card->thread_allowed_mask = 0;
 	card->thread_running_mask = 0;
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 9314393..0e1e4a0 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -23,11 +23,6 @@
 } while (0)
 
 /**
- * some more definitions for debug or output stuff
- */
-#define PRINTK_HEADER		" lcs: "
-
-/**
  *	sysfs related stuff
  */
 #define CARD_FROM_DEV(cdev) \
@@ -127,22 +122,22 @@
  * LCS Buffer states
  */
 enum lcs_buffer_states {
-	BUF_STATE_EMPTY,	/* buffer is empty */
-	BUF_STATE_LOCKED,	/* buffer is locked, don't touch */
-	BUF_STATE_READY,	/* buffer is ready for read/write */
-	BUF_STATE_PROCESSED,
+	LCS_BUF_STATE_EMPTY,	/* buffer is empty */
+	LCS_BUF_STATE_LOCKED,	/* buffer is locked, don't touch */
+	LCS_BUF_STATE_READY,	/* buffer is ready for read/write */
+	LCS_BUF_STATE_PROCESSED,
 };
 
 /**
  * LCS Channel State Machine declarations
  */
 enum lcs_channel_states {
-	CH_STATE_INIT,
-	CH_STATE_HALTED,
-	CH_STATE_STOPPED,
-	CH_STATE_RUNNING,
-	CH_STATE_SUSPENDED,
-	CH_STATE_CLEARED,
+	LCS_CH_STATE_INIT,
+	LCS_CH_STATE_HALTED,
+	LCS_CH_STATE_STOPPED,
+	LCS_CH_STATE_RUNNING,
+	LCS_CH_STATE_SUSPENDED,
+	LCS_CH_STATE_CLEARED,
 };
 
 /**
@@ -169,7 +164,7 @@
 }  __attribute__ ((packed));
 
 struct lcs_ip_mac_pair {
-	__u32  ip_addr;
+	__be32  ip_addr;
 	__u8   mac_addr[LCS_MAC_LENGTH];
 	__u8   reserved[2];
 }  __attribute__ ((packed));
@@ -287,7 +282,7 @@
 	enum lcs_dev_states state;
 	struct net_device *dev;
 	struct net_device_stats stats;
-	unsigned short (*lan_type_trans)(struct sk_buff *skb,
+	__be16 (*lan_type_trans)(struct sk_buff *skb,
 					 struct net_device *dev);
 	struct ccwgroup_device *gdev;
 	struct lcs_channel read;
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 821383d..53c358c 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -151,8 +151,6 @@
 #define SENSE_RESETTING_EVENT_BYTE 1
 #define SENSE_RESETTING_EVENT_FLAG 0x80
 
-#define atomic_swap(a,b) xchg((int *)a.counter, b)
-
 /*
  * Common IO related definitions
  */
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index a363721..6bb558a 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -258,7 +258,7 @@
 
 static inline void
 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
-			u32 *hcsum)
+			__wsum *hcsum)
 {
 	struct skb_frag_struct *frag;
 	int left_in_frag;
@@ -305,7 +305,7 @@
 static inline void
 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
 				  struct qeth_eddp_data *eddp, int data_len,
-				  u32 hcsum)
+				  __wsum hcsum)
 {
 	u8 *page;
 	int page_remainder;
@@ -349,10 +349,10 @@
 	((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
 }
 
-static inline u32
+static inline __wsum
 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
 {
-	u32 phcsum; /* pseudo header checksum */
+	__wsum phcsum; /* pseudo header checksum */
 
 	QETH_DBF_TEXT(trace, 5, "eddpckt4");
 	eddp->th.tcp.h.check = 0;
@@ -363,11 +363,11 @@
 	return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
 }
 
-static inline u32
+static inline __wsum
 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
 {
-	u32 proto;
-	u32 phcsum; /* pseudo header checksum */
+	__be32 proto;
+	__wsum phcsum; /* pseudo header checksum */
 
 	QETH_DBF_TEXT(trace, 5, "eddpckt6");
 	eddp->th.tcp.h.check = 0;
@@ -405,7 +405,7 @@
 {
 	struct tcphdr *tcph;
 	int data_len;
-	u32 hcsum;
+	__wsum hcsum;
 
 	QETH_DBF_TEXT(trace, 5, "eddpftcp");
 	eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
@@ -433,22 +433,22 @@
 			eddp->qh.hdr.l3.length = data_len + eddp->nhl +
 						 eddp->thl;
 		/* prepare ip hdr */
-		if (eddp->skb->protocol == ETH_P_IP){
-			eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
-						 eddp->thl;
+		if (eddp->skb->protocol == htons(ETH_P_IP)){
+			eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
+						 eddp->thl);
 			eddp->nh.ip4.h.check = 0;
 			eddp->nh.ip4.h.check =
 				ip_fast_csum((u8 *)&eddp->nh.ip4.h,
 						eddp->nh.ip4.h.ihl);
 		} else
-			eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
+			eddp->nh.ip6.h.payload_len = htons(data_len + eddp->thl);
 		/* prepare tcp hdr */
 		if (data_len == (eddp->skb->len - eddp->skb_offset)){
 			/* last segment -> set FIN and PSH flags */
 			eddp->th.tcp.h.fin = tcph->fin;
 			eddp->th.tcp.h.psh = tcph->psh;
 		}
-		if (eddp->skb->protocol == ETH_P_IP)
+		if (eddp->skb->protocol == htons(ETH_P_IP))
 			hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
 		else
 			hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
@@ -458,9 +458,9 @@
 		if (eddp->skb_offset >= eddp->skb->len)
 			break;
 		/* prepare headers for next round */
-		if (eddp->skb->protocol == ETH_P_IP)
-			eddp->nh.ip4.h.id++;
-		eddp->th.tcp.h.seq += data_len;
+		if (eddp->skb->protocol == htons(ETH_P_IP))
+			eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
+		eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + data_len);
 	}
 }
 
@@ -472,7 +472,7 @@
 
 	QETH_DBF_TEXT(trace, 5, "eddpficx");
 	/* create our segmentation headers and copy original headers */
-	if (skb->protocol == ETH_P_IP)
+	if (skb->protocol == htons(ETH_P_IP))
 		eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
 				skb->nh.iph->ihl*4,
 				(u8 *)skb->h.th, skb->h.th->doff*4);
@@ -490,7 +490,7 @@
 		memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
 #ifdef CONFIG_QETH_VLAN
 		if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
-			eddp->vlan[0] = __constant_htons(skb->protocol);
+			eddp->vlan[0] = skb->protocol;
 			eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
 		}
 #endif /* CONFIG_QETH_VLAN */
@@ -588,11 +588,11 @@
 	struct qeth_eddp_context *ctx = NULL;
 
 	QETH_DBF_TEXT(trace, 5, "creddpct");
-	if (skb->protocol == ETH_P_IP)
+	if (skb->protocol == htons(ETH_P_IP))
 		ctx = qeth_eddp_create_context_generic(card, skb,
 			sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
 			skb->h.th->doff*4);
-	else if (skb->protocol == ETH_P_IPV6)
+	else if (skb->protocol == htons(ETH_P_IPV6))
 		ctx = qeth_eddp_create_context_generic(card, skb,
 			sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
 			skb->h.th->doff*4);
diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_eddp.h
index cae9ba2..103768d 100644
--- a/drivers/s390/net/qeth_eddp.h
+++ b/drivers/s390/net/qeth_eddp.h
@@ -54,7 +54,7 @@
 struct qeth_eddp_data {
 	struct qeth_hdr qh;
 	struct ethhdr mac;
-	u16 vlan[2];
+	__be16 vlan[2];
 	union {
 		struct {
 			struct iphdr h;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 8364d54..2bde4f1 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1039,8 +1039,9 @@
 }
 
 static void
-qeth_start_kernel_thread(struct qeth_card *card)
+qeth_start_kernel_thread(struct work_struct *work)
 {
+	struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter);
 	QETH_DBF_TEXT(trace , 2, "strthrd");
 
 	if (card->read.state != CH_STATE_UP &&
@@ -1103,8 +1104,7 @@
 	card->thread_start_mask = 0;
 	card->thread_allowed_mask = 0;
 	card->thread_running_mask = 0;
-	INIT_WORK(&card->kernel_thread_starter,
-		  (void *)qeth_start_kernel_thread,card);
+	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
 	INIT_LIST_HEAD(&card->ip_list);
 	card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
 	if (!card->ip_tbd_list) {
@@ -2982,7 +2982,7 @@
 	 */
 	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
 	    !atomic_read(&queue->set_pci_flags_count)){
-		if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
+		if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
 				QETH_OUT_Q_UNLOCKED) {
 			/*
 			 * If we get in here, there was no action in
@@ -3245,7 +3245,7 @@
 	int i, j;
 
 	QETH_DBF_TEXT(trace, 2, "freeqdbf");
-	if (atomic_swap(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
 		QETH_QDIO_UNINITIALIZED)
 		return;
 	kfree(card->qdio.in_q);
@@ -4366,7 +4366,7 @@
 	if (flush_count)
 		qeth_flush_buffers(queue, 0, start_index, flush_count);
 	else if (!atomic_read(&queue->set_pci_flags_count))
-		atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
+		atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
 	/*
 	 * queue->state will go from LOCKED -> UNLOCKED or from
 	 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 74c0eac..32933ed 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -1032,9 +1032,9 @@
 	wwn_t                   init_wwpn;
 	fcp_lun_t               init_fcp_lun;
 	char 			*driver_version;
-	kmem_cache_t		*fsf_req_qtcb_cache;
-	kmem_cache_t		*sr_buffer_cache;
-	kmem_cache_t		*gid_pn_cache;
+	struct kmem_cache		*fsf_req_qtcb_cache;
+	struct kmem_cache		*sr_buffer_cache;
+	struct kmem_cache		*gid_pn_cache;
 };
 
 /**
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 277826c..067f151 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -109,7 +109,7 @@
 			ptr = kmalloc(size, GFP_ATOMIC);
 		else
 			ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
-					       SLAB_ATOMIC);
+					       GFP_ATOMIC);
 	}
 
 	if (unlikely(!ptr))
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 562432d..68103e5 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -313,7 +313,7 @@
 	hostdata->status = memory + STATUS_OFFSET;
 	/* all of these offsets are L1_CACHE_BYTES separated.  It is fatal
 	 * if this isn't sufficient separation to avoid dma flushing issues */
-	BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
+	BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
 	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
 	hostdata->dev = dev;
 
@@ -362,11 +362,11 @@
 	for (j = 0; j < PATCHES; j++)
 		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
 	/* now patch up fixed addresses. */
-	script_patch_32(script, MessageLocation,
+	script_patch_32(hostdata->dev, script, MessageLocation,
 			pScript + MSGOUT_OFFSET);
-	script_patch_32(script, StatusAddress,
+	script_patch_32(hostdata->dev, script, StatusAddress,
 			pScript + STATUS_OFFSET);
-	script_patch_32(script, ReceiveMsgAddress,
+	script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
 			pScript + MSGIN_OFFSET);
 
 	hostdata->script = script;
@@ -622,8 +622,10 @@
 			dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
 			/* restore the old result if the request sense was
 			 * successful */
-			if(result == 0)
+			if (result == 0)
 				result = cmnd[7];
+			/* restore the original length */
+			SCp->cmd_len = cmnd[8];
 		} else
 			NCR_700_unmap(hostdata, SCp, slot);
 
@@ -819,8 +821,9 @@
 			shost_printk(KERN_WARNING, host,
 				"Unexpected SDTR msg\n");
 			hostdata->msgout[0] = A_REJECT_MSG;
-			dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-			script_patch_16(hostdata->script, MessageCount, 1);
+			dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+			script_patch_16(hostdata->dev, hostdata->script,
+			                MessageCount, 1);
 			/* SendMsgOut returns, so set up the return
 			 * address */
 			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -831,8 +834,9 @@
 		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
 		       host->host_no, pun, lun);
 		hostdata->msgout[0] = A_REJECT_MSG;
-		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-		script_patch_16(hostdata->script, MessageCount, 1);
+		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+		                1);
 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 
 		break;
@@ -845,8 +849,9 @@
 		printk("\n");
 		/* just reject it */
 		hostdata->msgout[0] = A_REJECT_MSG;
-		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-		script_patch_16(hostdata->script, MessageCount, 1);
+		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+		                1);
 		/* SendMsgOut returns, so set up the return
 		 * address */
 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -927,8 +932,9 @@
 		printk("\n");
 		/* just reject it */
 		hostdata->msgout[0] = A_REJECT_MSG;
-		dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-		script_patch_16(hostdata->script, MessageCount, 1);
+		dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+		script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+		                1);
 		/* SendMsgOut returns, so set up the return
 		 * address */
 		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -937,7 +943,7 @@
 	}
 	NCR_700_writel(temp, host, TEMP_REG);
 	/* set us up to receive another message */
-	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
 	return resume_offset;
 }
 
@@ -1007,6 +1013,9 @@
 				 * of the command */
 				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
 				cmnd[7] = hostdata->status[0];
+				cmnd[8] = SCp->cmd_len;
+				SCp->cmd_len = 6; /* command length for
+						   * REQUEST_SENSE */
 				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
 				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
 				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
@@ -1014,9 +1023,9 @@
 				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
 				slot->SG[1].pAddr = 0;
 				slot->resume_offset = hostdata->pScript;
-				dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
-				dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
-				
+				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
+				dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
+
 				/* queue the command for reissue */
 				slot->state = NCR_700_SLOT_QUEUED;
 				slot->flags = NCR_700_FLAG_AUTOSENSE;
@@ -1131,11 +1140,12 @@
 			hostdata->cmd = slot->cmnd;
 
 			/* re-patch for this command */
-			script_patch_32_abs(hostdata->script, CommandAddress, 
-					    slot->pCmd);
-			script_patch_16(hostdata->script,
+			script_patch_32_abs(hostdata->dev, hostdata->script,
+			                    CommandAddress, slot->pCmd);
+			script_patch_16(hostdata->dev, hostdata->script,
 					CommandCount, slot->cmnd->cmd_len);
-			script_patch_32_abs(hostdata->script, SGScriptStartAddress,
+			script_patch_32_abs(hostdata->dev, hostdata->script,
+			                    SGScriptStartAddress,
 					    to32bit(&slot->pSG[0].ins));
 
 			/* Note: setting SXFER only works if we're
@@ -1145,13 +1155,13 @@
 			 * should therefore always clear ACK */
 			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
 				       host, SXFER_REG);
-			dma_cache_sync(hostdata->msgin,
+			dma_cache_sync(hostdata->dev, hostdata->msgin,
 				       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
-			dma_cache_sync(hostdata->msgout,
+			dma_cache_sync(hostdata->dev, hostdata->msgout,
 				       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
 			/* I'm just being paranoid here, the command should
 			 * already have been flushed from the cache */
-			dma_cache_sync(slot->cmnd->cmnd,
+			dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
 				       slot->cmnd->cmd_len, DMA_TO_DEVICE);
 
 
@@ -1215,7 +1225,7 @@
 		hostdata->reselection_id = reselection_id;
 		/* just in case we have a stale simple tag message, clear it */
 		hostdata->msgin[1] = 0;
-		dma_cache_sync(hostdata->msgin,
+		dma_cache_sync(hostdata->dev, hostdata->msgin,
 			       MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
 		if(hostdata->tag_negotiated & (1<<reselection_id)) {
 			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
@@ -1331,7 +1341,7 @@
 	hostdata->cmd = NULL;
 	/* clear any stale simple tag message */
 	hostdata->msgin[1] = 0;
-	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
 		       DMA_BIDIRECTIONAL);
 
 	if(id == 0xff) {
@@ -1428,29 +1438,30 @@
 		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
 	}
 
-	script_patch_16(hostdata->script, MessageCount, count);
+	script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
 
 
-	script_patch_ID(hostdata->script,
+	script_patch_ID(hostdata->dev, hostdata->script,
 			Device_ID, 1<<scmd_id(SCp));
 
-	script_patch_32_abs(hostdata->script, CommandAddress, 
+	script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
 			    slot->pCmd);
-	script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
+	script_patch_16(hostdata->dev, hostdata->script, CommandCount,
+	                SCp->cmd_len);
 	/* finally plumb the beginning of the SG list into the script
 	 * */
-	script_patch_32_abs(hostdata->script, SGScriptStartAddress,
-			    to32bit(&slot->pSG[0].ins));
+	script_patch_32_abs(hostdata->dev, hostdata->script,
+	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
 	NCR_700_clear_fifo(SCp->device->host);
 
 	if(slot->resume_offset == 0)
 		slot->resume_offset = hostdata->pScript;
 	/* now perform all the writebacks and invalidates */
-	dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
-	dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+	dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
+	dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
 		       DMA_FROM_DEVICE);
-	dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
-	dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
+	dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
+	dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
 
 	/* set the synchronous period/offset */
 	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
@@ -1626,7 +1637,7 @@
 					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
 					slot->SG[i].pAddr = 0;
 				}
-				dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+				dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
 				/* and pretend we disconnected after
 				 * the command phase */
 				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
@@ -1892,9 +1903,9 @@
 		}
 		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
 		slot->SG[i].pAddr = 0;
-		dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+		dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
 		DEBUG((" SETTING %08lx to %x\n",
-		       (&slot->pSG[i].ins), 
+		       (&slot->pSG[i].ins),
 		       slot->SG[i].ins));
 	}
 	slot->resume_offset = 0;
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index f5c3caf..f38822d 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -415,31 +415,31 @@
 #define NCR_710_MIN_XFERP	0
 #define NCR_700_MIN_PERIOD	25 /* for SDTR message, 100ns */
 
-#define script_patch_32(script, symbol, value) \
+#define script_patch_32(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
 		__u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
 		(script)[A_##symbol##_used[i]] = bS_to_host(val); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching %s at %d to 0x%lx\n", \
 		       #symbol, A_##symbol##_used[i], (value))); \
 	} \
 }
 
-#define script_patch_32_abs(script, symbol, value) \
+#define script_patch_32_abs(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
 		(script)[A_##symbol##_used[i]] = bS_to_host(value); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching %s at %d to 0x%lx\n", \
 		       #symbol, A_##symbol##_used[i], (value))); \
 	} \
 }
 
 /* Used for patching the SCSI ID in the SELECT instruction */
-#define script_patch_ID(script, symbol, value) \
+#define script_patch_ID(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -447,13 +447,13 @@
 		val &= 0xff00ffff; \
 		val |= ((value) & 0xff) << 16; \
 		(script)[A_##symbol##_used[i]] = bS_to_host(val); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
 		       #symbol, A_##symbol##_used[i], val)); \
 	} \
 }
 
-#define script_patch_16(script, symbol, value) \
+#define script_patch_16(dev, script, symbol, value) \
 { \
 	int i; \
 	for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -461,7 +461,7 @@
 		val &= 0xffff0000; \
 		val |= ((value) & 0xffff); \
 		(script)[A_##symbol##_used[i]] = bS_to_host(val); \
-		dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+		dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
 		DEBUG((" script, patching short field %s at %d to 0x%x\n", \
 		       #symbol, A_##symbol##_used[i], val)); \
 	} \
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index cdd0337..3075204 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2186,21 +2186,21 @@
 
 	if (BusLogic_ProbeOptions.NoProbe)
 		return -ENODEV;
-	BusLogic_ProbeInfoList = (struct BusLogic_ProbeInfo *)
-	    kmalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_ATOMIC);
+	BusLogic_ProbeInfoList =
+	    kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL);
 	if (BusLogic_ProbeInfoList == NULL) {
 		BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
 		return -ENOMEM;
 	}
-	memset(BusLogic_ProbeInfoList, 0, BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo));
-	PrototypeHostAdapter = (struct BusLogic_HostAdapter *)
-	    kmalloc(sizeof(struct BusLogic_HostAdapter), GFP_ATOMIC);
+
+	PrototypeHostAdapter =
+	    kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL);
 	if (PrototypeHostAdapter == NULL) {
 		kfree(BusLogic_ProbeInfoList);
 		BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL);
 		return -ENOMEM;
 	}
-	memset(PrototypeHostAdapter, 0, sizeof(struct BusLogic_HostAdapter));
+
 #ifdef MODULE
 	if (BusLogic != NULL)
 		BusLogic_Setup(BusLogic);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9540eb8..6956909 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -29,6 +29,13 @@
 	  However, do not compile this as a module if your root file system
 	  (the one containing the directory /) is located on a SCSI device.
 
+config SCSI_TGT
+	tristate "SCSI target support"
+	depends on SCSI && EXPERIMENTAL
+	---help---
+	  If you want to use SCSI target mode drivers enable this option.
+	  If you choose M, the module will be called scsi_tgt.
+
 config SCSI_NETLINK
 	bool
 	default	n
@@ -216,6 +223,23 @@
 	  there should be no noticeable performance impact as long as you have
 	  logging turned off.
 
+config SCSI_SCAN_ASYNC
+	bool "Asynchronous SCSI scanning"
+	depends on SCSI
+	help
+	  The SCSI subsystem can probe for devices while the rest of the
+	  system continues booting, and even probe devices on different
+	  busses in parallel, leading to a significant speed-up.
+	  If you have built SCSI as modules, enabling this option can
+	  be a problem as the devices may not have been found by the
+	  time your system expects them to have been.  You can load the
+	  scsi_wait_scan module to ensure that all scans have completed.
+	  If you build your SCSI drivers into the kernel, then everything
+	  will work fine if you say Y here.
+
+	  You can override this choice by specifying scsi_mod.scan="sync"
+	  or "async" on the kernel's command line.
+
 menu "SCSI Transports"
 	depends on SCSI
 
@@ -797,6 +821,20 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ibmvscsic.
 
+config SCSI_IBMVSCSIS
+	tristate "IBM Virtual SCSI Server support"
+	depends on PPC_PSERIES && SCSI_TGT && SCSI_SRP
+	help
+	  This is the SRP target driver for IBM pSeries virtual environments.
+
+	  The userspace component needed to initialize the driver and
+	  documentation can be found:
+
+	  http://stgt.berlios.de/
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ibmvstgt.
+
 config SCSI_INITIO
 	tristate "Initio 9100U(W) support"
 	depends on PCI && SCSI
@@ -944,8 +982,13 @@
 	tristate "Promise SuperTrak EX Series support"
 	depends on PCI && SCSI
 	---help---
-	  This driver supports Promise SuperTrak EX8350/8300/16350/16300
-	  Storage controllers.
+	  This driver supports Promise SuperTrak EX series storage controllers.
+
+	  Promise provides Linux RAID configuration utility for these
+	  controllers. Please visit <http://www.promise.com> to download.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called stex.
 
 config SCSI_SYM53C8XX_2
 	tristate "SYM53C8XX Version 2 SCSI support"
@@ -1026,6 +1069,7 @@
 config SCSI_IPR_TRACE
 	bool "enable driver internal trace"
 	depends on SCSI_IPR
+	default y
 	help
 	  If you say Y here, the driver will trace all commands issued
 	  to the adapter. Performance impact is minimal. Trace can be
@@ -1034,6 +1078,7 @@
 config SCSI_IPR_DUMP
 	bool "enable adapter dump support"
 	depends on SCSI_IPR
+	default y
 	help
 	  If you say Y here, the driver will support adapter crash dump.
 	  If you enable this support, the iprdump daemon can be used
@@ -1734,6 +1779,16 @@
           called zfcp. If you want to compile it as a module, say M here
           and read <file:Documentation/modules.txt>.
 
+config SCSI_SRP
+	tristate "SCSI RDMA Protocol helper library"
+	depends on SCSI && PCI
+	select SCSI_TGT
+	help
+	  If you wish to use SRP target drivers, say Y.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called libsrp.
+
 endmenu
 
 source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index bcca39c..bd7c988 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -21,6 +21,7 @@
 subdir-$(CONFIG_PCMCIA)		+= pcmcia
 
 obj-$(CONFIG_SCSI)		+= scsi_mod.o
+obj-$(CONFIG_SCSI_TGT)		+= scsi_tgt.o
 
 obj-$(CONFIG_RAID_ATTRS)	+= raid_class.o
 
@@ -125,7 +126,9 @@
 obj-$(CONFIG_SCSI_LASI700)	+= 53c700.o lasi700.o
 obj-$(CONFIG_SCSI_NSP32)	+= nsp32.o
 obj-$(CONFIG_SCSI_IPR)		+= ipr.o
+obj-$(CONFIG_SCSI_SRP)		+= libsrp.o
 obj-$(CONFIG_SCSI_IBMVSCSI)	+= ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVSCSIS)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
 
@@ -141,6 +144,8 @@
 # This goes last, so that "real" scsi devices probe earlier
 obj-$(CONFIG_SCSI_DEBUG)	+= scsi_debug.o
 
+obj-$(CONFIG_SCSI)		+= scsi_wait_scan.o
+
 scsi_mod-y			+= scsi.o hosts.o scsi_ioctl.o constants.o \
 				   scsicam.o scsi_error.o scsi_lib.o \
 				   scsi_scan.o scsi_sysfs.o \
@@ -149,6 +154,8 @@
 scsi_mod-$(CONFIG_SYSCTL)	+= scsi_sysctl.o
 scsi_mod-$(CONFIG_SCSI_PROC_FS)	+= scsi_proc.o
 
+scsi_tgt-y			+= scsi_tgt_lib.o scsi_tgt_if.o
+
 sd_mod-objs	:= sd.o
 sr_mod-objs	:= sr.o sr_ioctl.o sr_vendor.o
 ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index a6aa910..bb3cb33 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -849,7 +849,7 @@
 	hostdata->issue_queue = NULL;
 	hostdata->disconnected_queue = NULL;
 	
-	INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata);
+	INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
 	
 #ifdef NCR5380_STATS
 	for (i = 0; i < 8; ++i) {
@@ -1016,7 +1016,7 @@
 
 	/* Run the coroutine if it isn't already running. */
 	/* Kick off command processing */
-	schedule_work(&hostdata->coroutine);
+	schedule_delayed_work(&hostdata->coroutine, 0);
 	return 0;
 }
 
@@ -1033,9 +1033,10 @@
  *	host lock and called routines may take the isa dma lock.
  */
 
-static void NCR5380_main(void *p)
+static void NCR5380_main(struct work_struct *work)
 {
-	struct NCR5380_hostdata *hostdata = p;
+	struct NCR5380_hostdata *hostdata =
+		container_of(work, struct NCR5380_hostdata, coroutine.work);
 	struct Scsi_Host *instance = hostdata->host;
 	Scsi_Cmnd *tmp, *prev;
 	int done;
@@ -1221,7 +1222,7 @@
 		}	/* if BASR_IRQ */
 		spin_unlock_irqrestore(instance->host_lock, flags);
 		if(!done)
-			schedule_work(&hostdata->coroutine);
+			schedule_delayed_work(&hostdata->coroutine, 0);
 	} while (!done);
 	return IRQ_HANDLED;
 }
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 1bc73de..713a108 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -271,7 +271,7 @@
 	unsigned long time_expires;		/* in jiffies, set prior to sleeping */
 	int select_time;			/* timer in select for target response */
 	volatile Scsi_Cmnd *selecting;
-	struct work_struct coroutine;		/* our co-routine */
+	struct delayed_work coroutine;		/* our co-routine */
 #ifdef NCR5380_STATS
 	unsigned timebase;			/* Base for time calcs */
 	long time_read[8];			/* time to do reads */
@@ -298,7 +298,7 @@
 #ifndef DONT_USE_INTR
 static irqreturn_t NCR5380_intr(int irq, void *dev_id);
 #endif
-static void NCR5380_main(void *ptr);
+static void NCR5380_main(struct work_struct *work);
 static void NCR5380_print_options(struct Scsi_Host *instance);
 #ifdef NDEBUG
 static void NCR5380_print_phase(struct Scsi_Host *instance);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index d461381..8578555 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -220,9 +220,11 @@
 static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 };
 #define PORT_COUNT ARRAY_SIZE(ports)
 
+#ifndef MODULE
 /* possible interrupt channels */
 static unsigned short intrs[] = { 10, 11, 12, 15 };
 #define INTR_COUNT ARRAY_SIZE(intrs)
+#endif /* !MODULE */
 
 /* signatures for NCR 53c406a based controllers */
 #if USE_BIOS
@@ -605,6 +607,7 @@
 	return 0;
 }
 
+#ifndef MODULE
 /* called from init/main.c */
 static int __init NCR53c406a_setup(char *str)
 {
@@ -661,6 +664,8 @@
 
 __setup("ncr53c406a=", NCR53c406a_setup);
 
+#endif /* !MODULE */
+
 static const char *NCR53c406a_info(struct Scsi_Host *SChost)
 {
 	DEB(printk("NCR53c406a_info called\n"));
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index eb3ed91..4f8b4c5 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -11,8 +11,8 @@
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 2409
-# define AAC_DRIVER_BRANCH "-mh2"
+# define AAC_DRIVER_BUILD 2423
+# define AAC_DRIVER_BRANCH "-mh3"
 #endif
 #define MAXIMUM_NUM_CONTAINERS	32
 
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 19e42ac..4893a6d 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -518,6 +518,7 @@
 			 */
 			unsigned long count = 36000000L; /* 3 minutes */
 			while (down_trylock(&fibptr->event_wait)) {
+				int blink;
 				if (--count == 0) {
 					spin_lock_irqsave(q->lock, qflags);
 					q->numpending--;
@@ -530,6 +531,14 @@
 					}
 					return -ETIMEDOUT;
 				}
+				if ((blink = aac_adapter_check_health(dev)) > 0) {
+					if (wait == -1) {
+	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
+						  "Usually a result of a serious unrecoverable hardware problem\n",
+						  blink);
+					}
+					return -EFAULT;
+				}
 				udelay(5);
 			}
 		} else if (down_interruptible(&fibptr->event_wait)) {
@@ -1093,6 +1102,20 @@
 		goto out;
 	}
 
+	/*
+	 *	Loop through the fibs, close the synchronous FIBS
+	 */
+	for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
+		struct fib *fib = &aac->fibs[index];
+		if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+		  (fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
+			unsigned long flagv;
+			spin_lock_irqsave(&fib->event_lock, flagv);
+			up(&fib->event_wait);
+			spin_unlock_irqrestore(&fib->event_lock, flagv);
+			schedule();
+		}
+	}
 	index = aac->cardtype;
 
 	/*
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 306f46b..0cec742 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1443,7 +1443,7 @@
  * Run service completions on the card with interrupts enabled.
  *
  */
-static void run(void)
+static void run(struct work_struct *work)
 {
 	struct aha152x_hostdata *hd;
 
@@ -1499,7 +1499,7 @@
 		HOSTDATA(shpnt)->service=1;
 
 		/* Poke the BH handler */
-		INIT_WORK(&aha152x_tq, (void *) run, NULL);
+		INIT_WORK(&aha152x_tq, run);
 		schedule_work(&aha152x_tq);
 	}
 	DO_UNLOCK(flags);
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index c3c38a7..d7af9c6 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -586,7 +586,7 @@
 
 static int aha1740_probe (struct device *dev)
 {
-	int slotbase;
+	int slotbase, rc;
 	unsigned int irq_level, irq_type, translation;
 	struct Scsi_Host *shpnt;
 	struct aha1740_hostdata *host;
@@ -641,10 +641,16 @@
 	}
 
 	eisa_set_drvdata (edev, shpnt);
-	scsi_add_host (shpnt, dev); /* XXX handle failure */
+
+	rc = scsi_add_host (shpnt, dev);
+	if (rc)
+		goto err_irq;
+
 	scsi_scan_host (shpnt);
 	return 0;
 
+ err_irq:
+ 	free_irq(irq_level, shpnt);
  err_unmap:
 	dma_unmap_single (&edev->dev, host->ecb_dma_addr,
 			  sizeof (host->ecb), DMA_BIDIRECTIONAL);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 2001fe8..1a3ab6a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -62,6 +62,7 @@
 	/* aic7901 based controllers */
 	ID(ID_AHA_29320A),
 	ID(ID_AHA_29320ALP),
+	ID(ID_AHA_29320LPE),
 	/* aic7902 based controllers */
 	ID(ID_AHA_29320),
 	ID(ID_AHA_29320B),
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c077358..2cf7bb3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -109,7 +109,13 @@
 	{
 		ID_AHA_29320ALP,
 		ID_ALL_MASK,
-		"Adaptec 29320ALP Ultra320 SCSI adapter",
+		"Adaptec 29320ALP PCIx Ultra320 SCSI adapter",
+		ahd_aic7901_setup
+	},
+	{
+		ID_AHA_29320LPE,
+		ID_ALL_MASK,
+		"Adaptec 29320LPE PCIe Ultra320 SCSI adapter",
 		ahd_aic7901_setup
 	},
 	/* aic7901A based controllers */
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.h b/drivers/scsi/aic7xxx/aic79xx_pci.h
index da45153..16b7c70 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.h
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.h
@@ -51,6 +51,7 @@
 #define ID_AIC7901			0x800F9005FFFF9005ull
 #define ID_AHA_29320A			0x8000900500609005ull
 #define ID_AHA_29320ALP			0x8017900500449005ull
+#define ID_AHA_29320LPE			0x8017900500459005ull
 
 #define ID_AIC7901A			0x801E9005FFFF9005ull
 #define ID_AHA_29320LP			0x8014900500449005ull
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 71a031d..32f513b 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -56,8 +56,8 @@
 /* 2*ITNL timeout + 1 second */
 #define AIC94XX_SCB_TIMEOUT  (5*HZ)
 
-extern kmem_cache_t *asd_dma_token_cache;
-extern kmem_cache_t *asd_ascb_cache;
+extern struct kmem_cache *asd_dma_token_cache;
+extern struct kmem_cache *asd_ascb_cache;
 extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
 
 static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index af7e011..da94e12 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1047,7 +1047,7 @@
 static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
 					      gfp_t gfp_flags)
 {
-	extern kmem_cache_t *asd_ascb_cache;
+	extern struct kmem_cache *asd_ascb_cache;
 	struct asd_seq_data *seq = &asd_ha->seq;
 	struct asd_ascb *ascb;
 	unsigned long flags;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 57c5ba4..fbc82b0 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -450,8 +450,8 @@
 	asd_ha->scb_pool = NULL;
 }
 
-kmem_cache_t *asd_dma_token_cache;
-kmem_cache_t *asd_ascb_cache;
+struct kmem_cache *asd_dma_token_cache;
+struct kmem_cache *asd_ascb_cache;
 
 static int asd_create_global_caches(void)
 {
@@ -724,6 +724,15 @@
 
 	list_for_each_safe(pos, n, &pending) {
 		struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
+		/*
+		 * Delete unexpired ascb timers.  This may happen if we issue
+		 * a CONTROL PHY scb to an adapter and rmmod before the scb
+		 * times out.  Apparently we don't wait for the CONTROL PHY
+		 * to complete, so it doesn't matter if we kill the timer.
+		 */
+		del_timer_sync(&ascb->timer);
+		WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
+
 		list_del_init(pos);
 		ASD_DPRINTK("freeing from pending\n");
 		asd_ascb_free(ascb);
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
index b79f45f..a11f4e6 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg_def.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -2000,7 +2000,7 @@
  * The host accesses this scratch in a different manner from the
  * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE
  * and CMnSCRPAGE to access the scratch memory. A flat mapping of the
- * scratch memory is avaliable for software convenience and to prevent
+ * scratch memory is available for software convenience and to prevent
  * corruption while the sequencer is running. This memory is mapped
  * onto addresses 800h - BFFh, total of 400h bytes.
  *
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index b15caf1..75ed6b0 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -25,6 +25,7 @@
  */
 
 #include <linux/pci.h>
+#include <scsi/scsi_host.h>
 
 #include "aic94xx.h"
 #include "aic94xx_reg.h"
@@ -412,6 +413,40 @@
 	}
 }
 
+/* hard reset a phy later */
+static void do_phy_reset_later(struct work_struct *work)
+{
+	struct sas_phy *sas_phy =
+		container_of(work, struct sas_phy, reset_work);
+	int error;
+
+	ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
+		    sas_phy->identify.phy_identifier);
+	/* Reset device port */
+	error = sas_phy_reset(sas_phy, 1);
+	if (error)
+		ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
+			    __FUNCTION__, sas_phy->identify.phy_identifier, error);
+}
+
+static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
+{
+	INIT_WORK(&sas_phy->reset_work, do_phy_reset_later);
+	queue_work(shost->work_q, &sas_phy->reset_work);
+}
+
+/* start up the ABORT TASK tmf... */
+static void task_kill_later(struct asd_ascb *ascb)
+{
+	struct asd_ha_struct *asd_ha = ascb->ha;
+	struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+	struct Scsi_Host *shost = sas_ha->core.shost;
+	struct sas_task *task = ascb->uldd_task;
+
+	INIT_WORK(&task->abort_work, sas_task_abort);
+	queue_work(shost->work_q, &task->abort_work);
+}
+
 static void escb_tasklet_complete(struct asd_ascb *ascb,
 				  struct done_list_struct *dl)
 {
@@ -439,6 +474,74 @@
 			    ascb->scb->header.opcode);
 	}
 
+	/* Catch these before we mask off the sb_opcode bits */
+	switch (sb_opcode) {
+	case REQ_TASK_ABORT: {
+		struct asd_ascb *a, *b;
+		u16 tc_abort;
+
+		tc_abort = *((u16*)(&dl->status_block[1]));
+		tc_abort = le16_to_cpu(tc_abort);
+
+		ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
+			    __FUNCTION__, dl->status_block[3]);
+
+		/* Find the pending task and abort it. */
+		list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list)
+			if (a->tc_index == tc_abort) {
+				task_kill_later(a);
+				break;
+			}
+		goto out;
+	}
+	case REQ_DEVICE_RESET: {
+		struct Scsi_Host *shost = sas_ha->core.shost;
+		struct sas_phy *dev_phy;
+		struct asd_ascb *a;
+		u16 conn_handle;
+
+		conn_handle = *((u16*)(&dl->status_block[1]));
+		conn_handle = le16_to_cpu(conn_handle);
+
+		ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
+			    dl->status_block[3]);
+
+		/* Kill all pending tasks and reset the device */
+		dev_phy = NULL;
+		list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
+			struct sas_task *task;
+			struct domain_device *dev;
+			u16 x;
+
+			task = a->uldd_task;
+			if (!task)
+				continue;
+			dev = task->dev;
+
+			x = (unsigned long)dev->lldd_dev;
+			if (x == conn_handle) {
+				dev_phy = dev->port->phy;
+				task_kill_later(a);
+			}
+		}
+
+		/* Reset device port */
+		if (!dev_phy) {
+			ASD_DPRINTK("%s: No pending commands; can't reset.\n",
+				    __FUNCTION__);
+			goto out;
+		}
+		phy_reset_later(dev_phy, shost);
+		goto out;
+	}
+	case SIGNAL_NCQ_ERROR:
+		ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
+		goto out;
+	case CLEAR_NCQ_ERROR:
+		ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
+		goto out;
+	}
+
 	sb_opcode &= ~DL_PHY_MASK;
 
 	switch (sb_opcode) {
@@ -469,22 +572,6 @@
 		asd_deform_port(asd_ha, phy);
 		sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
 		break;
-	case REQ_TASK_ABORT:
-		ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
-			    phy_id);
-		break;
-	case REQ_DEVICE_RESET:
-		ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
-			    phy_id);
-		break;
-	case SIGNAL_NCQ_ERROR:
-		ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
-			    phy_id);
-		break;
-	case CLEAR_NCQ_ERROR:
-		ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
-			    phy_id);
-		break;
 	default:
 		ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
 			    phy_id, sb_opcode);
@@ -504,7 +591,7 @@
 
 		break;
 	}
-
+out:
 	asd_invalidate_edb(ascb, edb);
 }
 
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index de7c04d..e5a0ec3 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -64,7 +64,7 @@
 
 #define OCM_INIT_DIR_ENTRIES	5
 /***************************************************************************
-*  OCM dircetory default
+*  OCM directory default
 ***************************************************************************/
 static struct asd_ocm_dir OCMDirInit =
 {
@@ -73,7 +73,7 @@
 };
 
 /***************************************************************************
-*  OCM dircetory Entries default
+*  OCM directory Entries default
 ***************************************************************************/
 static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] =
 {
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index ef8285c..668569e 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -294,6 +294,7 @@
 static int user_fifo_count = 0;
 static int user_fifo_size = 0;
 
+#ifndef MODULE
 static int __init fd_mcs_setup(char *str)
 {
 	static int done_setup = 0;
@@ -311,6 +312,7 @@
 }
 
 __setup("fd_mcs=", fd_mcs_setup);
+#endif /* !MODULE */
 
 static void print_banner(struct Scsi_Host *shpnt)
 {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 68ef163..38c3a29 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -263,6 +263,10 @@
 		kthread_stop(shost->ehandler);
 	if (shost->work_q)
 		destroy_workqueue(shost->work_q);
+	if (shost->uspace_req_q) {
+		kfree(shost->uspace_req_q->queuedata);
+		scsi_free_queue(shost->uspace_req_q);
+	}
 
 	scsi_destroy_command_freelist(shost);
 	if (shost->bqt)
@@ -301,8 +305,8 @@
 	if (!shost)
 		return NULL;
 
-	spin_lock_init(&shost->default_lock);
-	scsi_assign_lock(shost, &shost->default_lock);
+	shost->host_lock = &shost->default_lock;
+	spin_lock_init(shost->host_lock);
 	shost->shost_state = SHOST_CREATED;
 	INIT_LIST_HEAD(&shost->__devices);
 	INIT_LIST_HEAD(&shost->__targets);
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 4e247b6..6ac0633 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -3,3 +3,5 @@
 ibmvscsic-y			+= ibmvscsi.o
 ibmvscsic-$(CONFIG_PPC_ISERIES)	+= iseries_vscsi.o 
 ibmvscsic-$(CONFIG_PPC_PSERIES)	+= rpa_vscsi.o 
+
+obj-$(CONFIG_SCSI_IBMVSCSIS)	+= ibmvstgt.o
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
new file mode 100644
index 0000000..e28260f
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -0,0 +1,960 @@
+/*
+ * IBM eServer i/pSeries Virtual SCSI Target Driver
+ * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
+ *			   Santiago Leon (santil@us.ibm.com) IBM Corp.
+ *			   Linda Xie (lxie@us.ibm.com) IBM Corp.
+ *
+ * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tgt.h>
+#include <scsi/libsrp.h>
+#include <asm/hvcall.h>
+#include <asm/iommu.h>
+#include <asm/prom.h>
+#include <asm/vio.h>
+
+#include "ibmvscsi.h"
+
+#define	INITIAL_SRP_LIMIT	16
+#define	DEFAULT_MAX_SECTORS	512
+
+#define	TGT_NAME	"ibmvstgt"
+
+/*
+ * Hypervisor calls.
+ */
+#define h_copy_rdma(l, sa, sb, da, db) \
+			plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
+#define h_send_crq(ua, l, h) \
+			plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
+#define h_reg_crq(ua, tok, sz)\
+			plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
+#define h_free_crq(ua) \
+			plpar_hcall_norets(H_FREE_CRQ, ua);
+
+/* tmp - will replace with SCSI logging stuff */
+#define eprintk(fmt, args...)					\
+do {								\
+	printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args);	\
+} while (0)
+/* #define dprintk eprintk */
+#define dprintk(fmt, args...)
+
+struct vio_port {
+	struct vio_dev *dma_dev;
+
+	struct crq_queue crq_queue;
+	struct work_struct crq_work;
+
+	unsigned long liobn;
+	unsigned long riobn;
+	struct srp_target *target;
+};
+
+static struct workqueue_struct *vtgtd;
+
+/*
+ * These are fixed for the system and come from the Open Firmware device tree.
+ * We just store them here to save getting them every time.
+ */
+static char system_id[64] = "";
+static char partition_name[97] = "UNKNOWN";
+static unsigned int partition_number = -1;
+
+static struct vio_port *target_to_port(struct srp_target *target)
+{
+	return (struct vio_port *) target->ldata;
+}
+
+static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
+{
+	return (union viosrp_iu *) (iue->sbuf->buf);
+}
+
+static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
+{
+	struct srp_target *target = iue->target;
+	struct vio_port *vport = target_to_port(target);
+	long rc, rc1;
+	union {
+		struct viosrp_crq cooked;
+		uint64_t raw[2];
+	} crq;
+
+	/* First copy the SRP */
+	rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
+			 vport->riobn, iue->remote_token);
+
+	if (rc)
+		eprintk("Error %ld transferring data\n", rc);
+
+	crq.cooked.valid = 0x80;
+	crq.cooked.format = format;
+	crq.cooked.reserved = 0x00;
+	crq.cooked.timeout = 0x00;
+	crq.cooked.IU_length = length;
+	crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
+
+	if (rc == 0)
+		crq.cooked.status = 0x99;	/* Just needs to be non-zero */
+	else
+		crq.cooked.status = 0x00;
+
+	rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
+
+	if (rc1) {
+		eprintk("%ld sending response\n", rc1);
+		return rc1;
+	}
+
+	return rc;
+}
+
+#define SRP_RSP_SENSE_DATA_LEN	18
+
+static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
+		    unsigned char status, unsigned char asc)
+{
+	union viosrp_iu *iu = vio_iu(iue);
+	uint64_t tag = iu->srp.rsp.tag;
+
+	/* If the linked bit is on and status is good */
+	if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
+		status = 0x10;
+
+	memset(iu, 0, sizeof(struct srp_rsp));
+	iu->srp.rsp.opcode = SRP_RSP;
+	iu->srp.rsp.req_lim_delta = 1;
+	iu->srp.rsp.tag = tag;
+
+	if (test_bit(V_DIOVER, &iue->flags))
+		iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
+
+	iu->srp.rsp.data_in_res_cnt = 0;
+	iu->srp.rsp.data_out_res_cnt = 0;
+
+	iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
+
+	iu->srp.rsp.resp_data_len = 0;
+	iu->srp.rsp.status = status;
+	if (status) {
+		uint8_t *sense = iu->srp.rsp.data;
+
+		if (sc) {
+			iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
+			iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
+			memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+		} else {
+			iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
+			iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
+			iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
+
+			/* Valid bit and 'current errors' */
+			sense[0] = (0x1 << 7 | 0x70);
+			/* Sense key */
+			sense[2] = status;
+			/* Additional sense length */
+			sense[7] = 0xa;	/* 10 bytes */
+			/* Additional sense code */
+			sense[12] = asc;
+		}
+	}
+
+	send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
+		VIOSRP_SRP_FORMAT);
+
+	return 0;
+}
+
+static void handle_cmd_queue(struct srp_target *target)
+{
+	struct Scsi_Host *shost = target->shost;
+	struct iu_entry *iue;
+	struct srp_cmd *cmd;
+	unsigned long flags;
+	int err;
+
+retry:
+	spin_lock_irqsave(&target->lock, flags);
+
+	list_for_each_entry(iue, &target->cmd_queue, ilist) {
+		if (!test_and_set_bit(V_FLYING, &iue->flags)) {
+			spin_unlock_irqrestore(&target->lock, flags);
+			cmd = iue->sbuf->buf;
+			err = srp_cmd_queue(shost, cmd, iue, 0);
+			if (err) {
+				eprintk("cannot queue cmd %p %d\n", cmd, err);
+				srp_iu_put(iue);
+			}
+			goto retry;
+		}
+	}
+
+	spin_unlock_irqrestore(&target->lock, flags);
+}
+
+static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
+			 struct srp_direct_buf *md, int nmd,
+			 enum dma_data_direction dir, unsigned int rest)
+{
+	struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
+	struct srp_target *target = iue->target;
+	struct vio_port *vport = target_to_port(target);
+	dma_addr_t token;
+	long err;
+	unsigned int done = 0;
+	int i, sidx, soff;
+
+	sidx = soff = 0;
+	token = sg_dma_address(sg + sidx);
+
+	for (i = 0; i < nmd && rest; i++) {
+		unsigned int mdone, mlen;
+
+		mlen = min(rest, md[i].len);
+		for (mdone = 0; mlen;) {
+			int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
+
+			if (dir == DMA_TO_DEVICE)
+				err = h_copy_rdma(slen,
+						  vport->riobn,
+						  md[i].va + mdone,
+						  vport->liobn,
+						  token + soff);
+			else
+				err = h_copy_rdma(slen,
+						  vport->liobn,
+						  token + soff,
+						  vport->riobn,
+						  md[i].va + mdone);
+
+			if (err != H_SUCCESS) {
+				eprintk("rdma error %d %d\n", dir, slen);
+				goto out;
+			}
+
+			mlen -= slen;
+			mdone += slen;
+			soff += slen;
+			done += slen;
+
+			if (soff == sg_dma_len(sg + sidx)) {
+				sidx++;
+				soff = 0;
+				token = sg_dma_address(sg + sidx);
+
+				if (sidx > nsg) {
+					eprintk("out of sg %p %d %d\n",
+						iue, sidx, nsg);
+					goto out;
+				}
+			}
+		};
+
+		rest -= mlen;
+	}
+out:
+
+	return 0;
+}
+
+static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
+				  void (*done)(struct scsi_cmnd *))
+{
+	struct iu_entry	*iue = (struct iu_entry *) sc->SCp.ptr;
+	int err;
+
+	err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
+
+	done(sc);
+
+	return err;
+}
+
+static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
+			     void (*done)(struct scsi_cmnd *))
+{
+	unsigned long flags;
+	struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
+	struct srp_target *target = iue->target;
+
+	dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_del(&iue->ilist);
+	spin_unlock_irqrestore(&target->lock, flags);
+
+	if (sc->result != SAM_STAT_GOOD) {
+		eprintk("operation failed %p %d %x\n",
+			iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
+		send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
+	} else
+		send_rsp(iue, sc, NO_SENSE, 0x00);
+
+	done(sc);
+	srp_iu_put(iue);
+	return 0;
+}
+
+int send_adapter_info(struct iu_entry *iue,
+		      dma_addr_t remote_buffer, uint16_t length)
+{
+	struct srp_target *target = iue->target;
+	struct vio_port *vport = target_to_port(target);
+	struct Scsi_Host *shost = target->shost;
+	dma_addr_t data_token;
+	struct mad_adapter_info_data *info;
+	int err;
+
+	info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
+				  GFP_KERNEL);
+	if (!info) {
+		eprintk("bad dma_alloc_coherent %p\n", target);
+		return 1;
+	}
+
+	/* Get remote info */
+	err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
+			  vport->liobn, data_token);
+	if (err == H_SUCCESS) {
+		dprintk("Client connect: %s (%d)\n",
+			info->partition_name, info->partition_number);
+	}
+
+	memset(info, 0, sizeof(*info));
+
+	strcpy(info->srp_version, "16.a");
+	strncpy(info->partition_name, partition_name,
+		sizeof(info->partition_name));
+	info->partition_number = partition_number;
+	info->mad_version = 1;
+	info->os_type = 2;
+	info->port_max_txu[0] = shost->hostt->max_sectors << 9;
+
+	/* Send our info to remote */
+	err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
+			  vport->riobn, remote_buffer);
+
+	dma_free_coherent(target->dev, sizeof(*info), info, data_token);
+
+	if (err != H_SUCCESS) {
+		eprintk("Error sending adapter info %d\n", err);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void process_login(struct iu_entry *iue)
+{
+	union viosrp_iu *iu = vio_iu(iue);
+	struct srp_login_rsp *rsp = &iu->srp.login_rsp;
+	uint64_t tag = iu->srp.rsp.tag;
+
+	/* TODO handle case that requested size is wrong and
+	 * buffer format is wrong
+	 */
+	memset(iu, 0, sizeof(struct srp_login_rsp));
+	rsp->opcode = SRP_LOGIN_RSP;
+	rsp->req_lim_delta = INITIAL_SRP_LIMIT;
+	rsp->tag = tag;
+	rsp->max_it_iu_len = sizeof(union srp_iu);
+	rsp->max_ti_iu_len = sizeof(union srp_iu);
+	/* direct and indirect */
+	rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
+
+	send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
+}
+
+static inline void queue_cmd(struct iu_entry *iue)
+{
+	struct srp_target *target = iue->target;
+	unsigned long flags;
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add_tail(&iue->ilist, &target->cmd_queue);
+	spin_unlock_irqrestore(&target->lock, flags);
+}
+
+static int process_tsk_mgmt(struct iu_entry *iue)
+{
+	union viosrp_iu *iu = vio_iu(iue);
+	int fn;
+
+	dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
+
+	switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
+	case SRP_TSK_ABORT_TASK:
+		fn = ABORT_TASK;
+		break;
+	case SRP_TSK_ABORT_TASK_SET:
+		fn = ABORT_TASK_SET;
+		break;
+	case SRP_TSK_CLEAR_TASK_SET:
+		fn = CLEAR_TASK_SET;
+		break;
+	case SRP_TSK_LUN_RESET:
+		fn = LOGICAL_UNIT_RESET;
+		break;
+	case SRP_TSK_CLEAR_ACA:
+		fn = CLEAR_ACA;
+		break;
+	default:
+		fn = 0;
+	}
+	if (fn)
+		scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
+					  iu->srp.tsk_mgmt.task_tag,
+					  (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
+					  iue);
+	else
+		send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
+
+	return !fn;
+}
+
+static int process_mad_iu(struct iu_entry *iue)
+{
+	union viosrp_iu *iu = vio_iu(iue);
+	struct viosrp_adapter_info *info;
+	struct viosrp_host_config *conf;
+
+	switch (iu->mad.empty_iu.common.type) {
+	case VIOSRP_EMPTY_IU_TYPE:
+		eprintk("%s\n", "Unsupported EMPTY MAD IU");
+		break;
+	case VIOSRP_ERROR_LOG_TYPE:
+		eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
+		iu->mad.error_log.common.status = 1;
+		send_iu(iue, sizeof(iu->mad.error_log),	VIOSRP_MAD_FORMAT);
+		break;
+	case VIOSRP_ADAPTER_INFO_TYPE:
+		info = &iu->mad.adapter_info;
+		info->common.status = send_adapter_info(iue, info->buffer,
+							info->common.length);
+		send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
+		break;
+	case VIOSRP_HOST_CONFIG_TYPE:
+		conf = &iu->mad.host_config;
+		conf->common.status = 1;
+		send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
+		break;
+	default:
+		eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
+	}
+
+	return 1;
+}
+
+static int process_srp_iu(struct iu_entry *iue)
+{
+	union viosrp_iu *iu = vio_iu(iue);
+	int done = 1;
+	u8 opcode = iu->srp.rsp.opcode;
+
+	switch (opcode) {
+	case SRP_LOGIN_REQ:
+		process_login(iue);
+		break;
+	case SRP_TSK_MGMT:
+		done = process_tsk_mgmt(iue);
+		break;
+	case SRP_CMD:
+		queue_cmd(iue);
+		done = 0;
+		break;
+	case SRP_LOGIN_RSP:
+	case SRP_I_LOGOUT:
+	case SRP_T_LOGOUT:
+	case SRP_RSP:
+	case SRP_CRED_REQ:
+	case SRP_CRED_RSP:
+	case SRP_AER_REQ:
+	case SRP_AER_RSP:
+		eprintk("Unsupported type %u\n", opcode);
+		break;
+	default:
+		eprintk("Unknown type %u\n", opcode);
+	}
+
+	return done;
+}
+
+static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
+{
+	struct vio_port *vport = target_to_port(target);
+	struct iu_entry *iue;
+	long err, done;
+
+	iue = srp_iu_get(target);
+	if (!iue) {
+		eprintk("Error getting IU from pool, %p\n", target);
+		return;
+	}
+
+	iue->remote_token = crq->IU_data_ptr;
+
+	err = h_copy_rdma(crq->IU_length, vport->riobn,
+			  iue->remote_token, vport->liobn, iue->sbuf->dma);
+
+	if (err != H_SUCCESS) {
+		eprintk("%ld transferring data error %p\n", err, iue);
+		done = 1;
+		goto out;
+	}
+
+	if (crq->format == VIOSRP_MAD_FORMAT)
+		done = process_mad_iu(iue);
+	else
+		done = process_srp_iu(iue);
+out:
+	if (done)
+		srp_iu_put(iue);
+}
+
+static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
+{
+	struct srp_target *target = (struct srp_target *) data;
+	struct vio_port *vport = target_to_port(target);
+
+	vio_disable_interrupts(vport->dma_dev);
+	queue_work(vtgtd, &vport->crq_work);
+
+	return IRQ_HANDLED;
+}
+
+static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
+{
+	int err;
+	struct vio_port *vport = target_to_port(target);
+
+	queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
+	if (!queue->msgs)
+		goto malloc_failed;
+	queue->size = PAGE_SIZE / sizeof(*queue->msgs);
+
+	queue->msg_token = dma_map_single(target->dev, queue->msgs,
+					  queue->size * sizeof(*queue->msgs),
+					  DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(queue->msg_token))
+		goto map_failed;
+
+	err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
+			PAGE_SIZE);
+
+	/* If the adapter was left active for some reason (like kexec)
+	 * try freeing and re-registering
+	 */
+	if (err == H_RESOURCE) {
+	    do {
+		err = h_free_crq(vport->dma_dev->unit_address);
+	    } while (err == H_BUSY || H_IS_LONG_BUSY(err));
+
+	    err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
+			    PAGE_SIZE);
+	}
+
+	if (err != H_SUCCESS && err != 2) {
+		eprintk("Error 0x%x opening virtual adapter\n", err);
+		goto reg_crq_failed;
+	}
+
+	err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
+			  SA_INTERRUPT, "ibmvstgt", target);
+	if (err)
+		goto req_irq_failed;
+
+	vio_enable_interrupts(vport->dma_dev);
+
+	h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
+
+	queue->cur = 0;
+	spin_lock_init(&queue->lock);
+
+	return 0;
+
+req_irq_failed:
+	do {
+		err = h_free_crq(vport->dma_dev->unit_address);
+	} while (err == H_BUSY || H_IS_LONG_BUSY(err));
+
+reg_crq_failed:
+	dma_unmap_single(target->dev, queue->msg_token,
+			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+map_failed:
+	free_page((unsigned long) queue->msgs);
+
+malloc_failed:
+	return -ENOMEM;
+}
+
+static void crq_queue_destroy(struct srp_target *target)
+{
+	struct vio_port *vport = target_to_port(target);
+	struct crq_queue *queue = &vport->crq_queue;
+	int err;
+
+	free_irq(vport->dma_dev->irq, target);
+	do {
+		err = h_free_crq(vport->dma_dev->unit_address);
+	} while (err == H_BUSY || H_IS_LONG_BUSY(err));
+
+	dma_unmap_single(target->dev, queue->msg_token,
+			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+
+	free_page((unsigned long) queue->msgs);
+}
+
+static void process_crq(struct viosrp_crq *crq,	struct srp_target *target)
+{
+	struct vio_port *vport = target_to_port(target);
+	dprintk("%x %x\n", crq->valid, crq->format);
+
+	switch (crq->valid) {
+	case 0xC0:
+		/* initialization */
+		switch (crq->format) {
+		case 0x01:
+			h_send_crq(vport->dma_dev->unit_address,
+				   0xC002000000000000, 0);
+			break;
+		case 0x02:
+			break;
+		default:
+			eprintk("Unknown format %u\n", crq->format);
+		}
+		break;
+	case 0xFF:
+		/* transport event */
+		break;
+	case 0x80:
+		/* real payload */
+		switch (crq->format) {
+		case VIOSRP_SRP_FORMAT:
+		case VIOSRP_MAD_FORMAT:
+			process_iu(crq, target);
+			break;
+		case VIOSRP_OS400_FORMAT:
+		case VIOSRP_AIX_FORMAT:
+		case VIOSRP_LINUX_FORMAT:
+		case VIOSRP_INLINE_FORMAT:
+			eprintk("Unsupported format %u\n", crq->format);
+			break;
+		default:
+			eprintk("Unknown format %u\n", crq->format);
+		}
+		break;
+	default:
+		eprintk("unknown message type 0x%02x!?\n", crq->valid);
+	}
+}
+
+static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
+{
+	struct viosrp_crq *crq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->lock, flags);
+	crq = &queue->msgs[queue->cur];
+	if (crq->valid & 0x80) {
+		if (++queue->cur == queue->size)
+			queue->cur = 0;
+	} else
+		crq = NULL;
+	spin_unlock_irqrestore(&queue->lock, flags);
+
+	return crq;
+}
+
+static void handle_crq(struct work_struct *work)
+{
+	struct vio_port *vport = container_of(work, struct vio_port, crq_work);
+	struct srp_target *target = vport->target;
+	struct viosrp_crq *crq;
+	int done = 0;
+
+	while (!done) {
+		while ((crq = next_crq(&vport->crq_queue)) != NULL) {
+			process_crq(crq, target);
+			crq->valid = 0x00;
+		}
+
+		vio_enable_interrupts(vport->dma_dev);
+
+		crq = next_crq(&vport->crq_queue);
+		if (crq) {
+			vio_disable_interrupts(vport->dma_dev);
+			process_crq(crq, target);
+			crq->valid = 0x00;
+		} else
+			done = 1;
+	}
+
+	handle_cmd_queue(target);
+}
+
+
+static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
+{
+	unsigned long flags;
+	struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
+	struct srp_target *target = iue->target;
+
+	dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_del(&iue->ilist);
+	spin_unlock_irqrestore(&target->lock, flags);
+
+	srp_iu_put(iue);
+
+	return 0;
+}
+
+static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
+{
+	struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
+	union viosrp_iu *iu = vio_iu(iue);
+	unsigned char status, asc;
+
+	eprintk("%p %d\n", iue, result);
+	status = NO_SENSE;
+	asc = 0;
+
+	switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
+	case SRP_TSK_ABORT_TASK:
+		asc = 0x14;
+		if (result)
+			status = ABORTED_COMMAND;
+		break;
+	default:
+		break;
+	}
+
+	send_rsp(iue, NULL, status, asc);
+	srp_iu_put(iue);
+
+	return 0;
+}
+
+static ssize_t system_id_show(struct class_device *cdev, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
+}
+
+static ssize_t partition_number_show(struct class_device *cdev, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
+}
+
+static ssize_t unit_address_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct srp_target *target = host_to_srp_target(shost);
+	struct vio_port *vport = target_to_port(target);
+	return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
+}
+
+static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
+static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
+static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
+
+static struct class_device_attribute *ibmvstgt_attrs[] = {
+	&class_device_attr_system_id,
+	&class_device_attr_partition_number,
+	&class_device_attr_unit_address,
+	NULL,
+};
+
+static struct scsi_host_template ibmvstgt_sht = {
+	.name			= TGT_NAME,
+	.module			= THIS_MODULE,
+	.can_queue		= INITIAL_SRP_LIMIT,
+	.sg_tablesize		= SG_ALL,
+	.use_clustering		= DISABLE_CLUSTERING,
+	.max_sectors		= DEFAULT_MAX_SECTORS,
+	.transfer_response	= ibmvstgt_cmd_done,
+	.transfer_data		= ibmvstgt_transfer_data,
+	.eh_abort_handler	= ibmvstgt_eh_abort_handler,
+	.tsk_mgmt_response	= ibmvstgt_tsk_mgmt_response,
+	.shost_attrs		= ibmvstgt_attrs,
+	.proc_name		= TGT_NAME,
+};
+
+static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
+{
+	struct Scsi_Host *shost;
+	struct srp_target *target;
+	struct vio_port *vport;
+	unsigned int *dma, dma_size;
+	int err = -ENOMEM;
+
+	vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
+	if (!vport)
+		return err;
+	shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
+	if (!shost)
+		goto free_vport;
+	err = scsi_tgt_alloc_queue(shost);
+	if (err)
+		goto put_host;
+
+	target = host_to_srp_target(shost);
+	target->shost = shost;
+	vport->dma_dev = dev;
+	target->ldata = vport;
+	vport->target = target;
+	err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
+			       SRP_MAX_IU_LEN);
+	if (err)
+		goto put_host;
+
+	dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
+						 &dma_size);
+	if (!dma || dma_size != 40) {
+		eprintk("Couldn't get window property %d\n", dma_size);
+		err = -EIO;
+		goto free_srp_target;
+	}
+	vport->liobn = dma[0];
+	vport->riobn = dma[5];
+
+	INIT_WORK(&vport->crq_work, handle_crq);
+
+	err = crq_queue_create(&vport->crq_queue, target);
+	if (err)
+		goto free_srp_target;
+
+	err = scsi_add_host(shost, target->dev);
+	if (err)
+		goto destroy_queue;
+	return 0;
+
+destroy_queue:
+	crq_queue_destroy(target);
+free_srp_target:
+	srp_target_free(target);
+put_host:
+	scsi_host_put(shost);
+free_vport:
+	kfree(vport);
+	return err;
+}
+
+static int ibmvstgt_remove(struct vio_dev *dev)
+{
+	struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
+	struct Scsi_Host *shost = target->shost;
+	struct vio_port *vport = target->ldata;
+
+	crq_queue_destroy(target);
+	scsi_remove_host(shost);
+	scsi_tgt_free_queue(shost);
+	srp_target_free(target);
+	kfree(vport);
+	scsi_host_put(shost);
+	return 0;
+}
+
+static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
+	{"v-scsi-host", "IBM,v-scsi-host"},
+	{"",""}
+};
+
+MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
+
+static struct vio_driver ibmvstgt_driver = {
+	.id_table = ibmvstgt_device_table,
+	.probe = ibmvstgt_probe,
+	.remove = ibmvstgt_remove,
+	.driver = {
+		.name = "ibmvscsis",
+		.owner = THIS_MODULE,
+	}
+};
+
+static int get_system_info(void)
+{
+	struct device_node *rootdn;
+	const char *id, *model, *name;
+	unsigned int *num;
+
+	rootdn = find_path_device("/");
+	if (!rootdn)
+		return -ENOENT;
+
+	model = get_property(rootdn, "model", NULL);
+	id = get_property(rootdn, "system-id", NULL);
+	if (model && id)
+		snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
+
+	name = get_property(rootdn, "ibm,partition-name", NULL);
+	if (name)
+		strncpy(partition_name, name, sizeof(partition_name));
+
+	num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
+	if (num)
+		partition_number = *num;
+
+	return 0;
+}
+
+static int ibmvstgt_init(void)
+{
+	int err = -ENOMEM;
+
+	printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
+
+	vtgtd = create_workqueue("ibmvtgtd");
+	if (!vtgtd)
+		return err;
+
+	err = get_system_info();
+	if (err)
+		goto destroy_wq;
+
+	err = vio_register_driver(&ibmvstgt_driver);
+	if (err)
+		goto destroy_wq;
+
+	return 0;
+
+destroy_wq:
+	destroy_workqueue(vtgtd);
+	return err;
+}
+
+static void ibmvstgt_exit(void)
+{
+	printk("Unregister IBM virtual SCSI driver\n");
+
+	destroy_workqueue(vtgtd);
+	vio_unregister_driver(&ibmvstgt_driver);
+}
+
+MODULE_DESCRIPTION("IBM Virtual SCSI Target");
+MODULE_AUTHOR("Santiago Leon");
+MODULE_LICENSE("GPL");
+
+module_init(ibmvstgt_init);
+module_exit(ibmvstgt_exit);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 1427a41..8f6b5bf 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -110,6 +110,7 @@
 } idescsi_scsi_t;
 
 static DEFINE_MUTEX(idescsi_ref_mutex);
+static int idescsi_nocd;			/* Set by module param to skip cd */
 
 #define ide_scsi_g(disk) \
 	container_of((disk)->private_data, struct ide_scsi_obj, driver)
@@ -1127,6 +1128,9 @@
 		warned = 1;
 	}
 
+	if (idescsi_nocd && drive->media == ide_cdrom)
+		return -ENODEV;
+
 	if (!strstr("ide-scsi", drive->driver_req) ||
 	    !drive->present ||
 	    drive->media == ide_disk ||
@@ -1187,6 +1191,8 @@
 	driver_unregister(&idescsi_driver.gen_driver);
 }
 
+module_param(idescsi_nocd, int, 0600);
+MODULE_PARM_DESC(idescsi_nocd, "Disable handling of CD-ROMs so they may be driven by ide-cd");
 module_init(init_idescsi_module);
 module_exit(exit_idescsi_module);
 MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index e31f612..0464c18 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -36,7 +36,7 @@
 	int base_hi;		/* Hi Base address for ECP-ISA chipset */
 	int mode;		/* Transfer mode                */
 	struct scsi_cmnd *cur_cmd;	/* Current queued command       */
-	struct work_struct imm_tq;	/* Polling interrupt stuff       */
+	struct delayed_work imm_tq;	/* Polling interrupt stuff       */
 	unsigned long jstart;	/* Jiffies at start             */
 	unsigned failed:1;	/* Failure flag                 */
 	unsigned dp:1;		/* Data phase present           */
@@ -733,9 +733,9 @@
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void imm_interrupt(void *data)
+static void imm_interrupt(struct work_struct *work)
 {
-	imm_struct *dev = (imm_struct *) data;
+	imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
 	struct scsi_cmnd *cmd = dev->cur_cmd;
 	struct Scsi_Host *host = cmd->device->host;
 	unsigned long flags;
@@ -745,7 +745,6 @@
 		return;
 	}
 	if (imm_engine(dev, cmd)) {
-		INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
 		schedule_delayed_work(&dev->imm_tq, 1);
 		return;
 	}
@@ -953,8 +952,7 @@
 	cmd->result = DID_ERROR << 16;	/* default return code */
 	cmd->SCp.phase = 0;	/* bus free */
 
-	INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
-	schedule_work(&dev->imm_tq);
+	schedule_delayed_work(&dev->imm_tq, 0);
 
 	imm_pb_claim(dev);
 
@@ -1225,7 +1223,7 @@
 	else
 		ports = 8;
 
-	INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
+	INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
 
 	err = -ENOMEM;
 	host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index afed293..f160357 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -170,7 +170,7 @@
 static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
 
 /* PCI Devices supported by this driver */
-static struct pci_device_id i91u_pci_devices[] __devinitdata = {
+static struct pci_device_id i91u_pci_devices[] = {
 	{ PCI_VENDOR_ID_INIT,  I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ PCI_VENDOR_ID_INIT,  I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ PCI_VENDOR_ID_INIT,  I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2dde821..b318500 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -79,7 +79,6 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_transport.h>
 #include "ipr.h"
 
 /*
@@ -98,7 +97,7 @@
 
 /* This table describes the differences between DMA controller chips */
 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
-	{ /* Gemstone, Citrine, and Obsidian */
+	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
 		.mailbox = 0x0042C,
 		.cache_line_size = 0x20,
 		{
@@ -135,6 +134,7 @@
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
 };
@@ -1249,19 +1249,23 @@
 
 /**
  * ipr_log_hex_data - Log additional hex IOA error data.
+ * @ioa_cfg:	ioa config struct
  * @data:		IOA error data
  * @len:		data length
  *
  * Return value:
  * 	none
  **/
-static void ipr_log_hex_data(u32 *data, int len)
+static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
 {
 	int i;
 
 	if (len == 0)
 		return;
 
+	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
+		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
+
 	for (i = 0; i < len / 4; i += 4) {
 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
 			be32_to_cpu(data[i]),
@@ -1290,7 +1294,7 @@
 	ipr_err("%s\n", error->failure_reason);
 	ipr_err("Remote Adapter VPD:\n");
 	ipr_log_ext_vpd(&error->vpd);
-	ipr_log_hex_data(error->data,
+	ipr_log_hex_data(ioa_cfg, error->data,
 			 be32_to_cpu(hostrcb->hcam.length) -
 			 (offsetof(struct ipr_hostrcb_error, u) +
 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
@@ -1315,12 +1319,225 @@
 	ipr_err("%s\n", error->failure_reason);
 	ipr_err("Remote Adapter VPD:\n");
 	ipr_log_vpd(&error->vpd);
-	ipr_log_hex_data(error->data,
+	ipr_log_hex_data(ioa_cfg, error->data,
 			 be32_to_cpu(hostrcb->hcam.length) -
 			 (offsetof(struct ipr_hostrcb_error, u) +
 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
 }
 
+static const struct {
+	u8 active;
+	char *desc;
+} path_active_desc[] = {
+	{ IPR_PATH_NO_INFO, "Path" },
+	{ IPR_PATH_ACTIVE, "Active path" },
+	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
+};
+
+static const struct {
+	u8 state;
+	char *desc;
+} path_state_desc[] = {
+	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
+	{ IPR_PATH_HEALTHY, "is healthy" },
+	{ IPR_PATH_DEGRADED, "is degraded" },
+	{ IPR_PATH_FAILED, "is failed" }
+};
+
+/**
+ * ipr_log_fabric_path - Log a fabric path error
+ * @hostrcb:	hostrcb struct
+ * @fabric:		fabric descriptor
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
+				struct ipr_hostrcb_fabric_desc *fabric)
+{
+	int i, j;
+	u8 path_state = fabric->path_state;
+	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
+	u8 state = path_state & IPR_PATH_STATE_MASK;
+
+	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
+		if (path_active_desc[i].active != active)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
+			if (path_state_desc[j].state != state)
+				continue;
+
+			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
+				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
+					     path_active_desc[i].desc, path_state_desc[j].desc,
+					     fabric->ioa_port);
+			} else if (fabric->cascaded_expander == 0xff) {
+				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
+					     path_active_desc[i].desc, path_state_desc[j].desc,
+					     fabric->ioa_port, fabric->phy);
+			} else if (fabric->phy == 0xff) {
+				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
+					     path_active_desc[i].desc, path_state_desc[j].desc,
+					     fabric->ioa_port, fabric->cascaded_expander);
+			} else {
+				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
+					     path_active_desc[i].desc, path_state_desc[j].desc,
+					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
+			}
+			return;
+		}
+	}
+
+	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
+		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
+}
+
+static const struct {
+	u8 type;
+	char *desc;
+} path_type_desc[] = {
+	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
+	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
+	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
+	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
+};
+
+static const struct {
+	u8 status;
+	char *desc;
+} path_status_desc[] = {
+	{ IPR_PATH_CFG_NO_PROB, "Functional" },
+	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
+	{ IPR_PATH_CFG_FAILED, "Failed" },
+	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
+	{ IPR_PATH_NOT_DETECTED, "Missing" },
+	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
+};
+
+static const char *link_rate[] = {
+	"unknown",
+	"disabled",
+	"phy reset problem",
+	"spinup hold",
+	"port selector",
+	"unknown",
+	"unknown",
+	"unknown",
+	"1.5Gbps",
+	"3.0Gbps",
+	"unknown",
+	"unknown",
+	"unknown",
+	"unknown",
+	"unknown",
+	"unknown"
+};
+
+/**
+ * ipr_log_path_elem - Log a fabric path element.
+ * @hostrcb:	hostrcb struct
+ * @cfg:		fabric path element struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
+			      struct ipr_hostrcb_config_element *cfg)
+{
+	int i, j;
+	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
+	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
+
+	if (type == IPR_PATH_CFG_NOT_EXIST)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
+		if (path_type_desc[i].type != type)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
+			if (path_status_desc[j].status != status)
+				continue;
+
+			if (type == IPR_PATH_CFG_IOA_PORT) {
+				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
+					     path_status_desc[j].desc, path_type_desc[i].desc,
+					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+			} else {
+				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
+					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
+						     path_status_desc[j].desc, path_type_desc[i].desc,
+						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+				} else if (cfg->cascaded_expander == 0xff) {
+					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
+						     "WWN=%08X%08X\n", path_status_desc[j].desc,
+						     path_type_desc[i].desc, cfg->phy,
+						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+				} else if (cfg->phy == 0xff) {
+					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
+						     "WWN=%08X%08X\n", path_status_desc[j].desc,
+						     path_type_desc[i].desc, cfg->cascaded_expander,
+						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+				} else {
+					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
+						     "WWN=%08X%08X\n", path_status_desc[j].desc,
+						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
+						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+				}
+			}
+			return;
+		}
+	}
+
+	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
+		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
+		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+}
+
+/**
+ * ipr_log_fabric_error - Log a fabric error.
+ * @ioa_cfg:	ioa config struct
+ * @hostrcb:	hostrcb struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
+				 struct ipr_hostrcb *hostrcb)
+{
+	struct ipr_hostrcb_type_20_error *error;
+	struct ipr_hostrcb_fabric_desc *fabric;
+	struct ipr_hostrcb_config_element *cfg;
+	int i, add_len;
+
+	error = &hostrcb->hcam.u.error.u.type_20_error;
+	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
+
+	add_len = be32_to_cpu(hostrcb->hcam.length) -
+		(offsetof(struct ipr_hostrcb_error, u) +
+		 offsetof(struct ipr_hostrcb_type_20_error, desc));
+
+	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
+		ipr_log_fabric_path(hostrcb, fabric);
+		for_each_fabric_cfg(fabric, cfg)
+			ipr_log_path_elem(hostrcb, cfg);
+
+		add_len -= be16_to_cpu(fabric->length);
+		fabric = (struct ipr_hostrcb_fabric_desc *)
+			((unsigned long)fabric + be16_to_cpu(fabric->length));
+	}
+
+	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+}
+
 /**
  * ipr_log_generic_error - Log an adapter error.
  * @ioa_cfg:	ioa config struct
@@ -1332,7 +1549,7 @@
 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
 				  struct ipr_hostrcb *hostrcb)
 {
-	ipr_log_hex_data(hostrcb->hcam.u.raw.data,
+	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
 			 be32_to_cpu(hostrcb->hcam.length));
 }
 
@@ -1394,13 +1611,7 @@
 	if (!ipr_error_table[error_index].log_hcam)
 		return;
 
-	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
-		ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
-			   "%s\n", ipr_error_table[error_index].error);
-	} else {
-		dev_err(&ioa_cfg->pdev->dev, "%s\n",
-			ipr_error_table[error_index].error);
-	}
+	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
 
 	/* Set indication we have logged an error */
 	ioa_cfg->errors_logged++;
@@ -1437,6 +1648,9 @@
 	case IPR_HOST_RCB_OVERLAY_ID_17:
 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
 		break;
+	case IPR_HOST_RCB_OVERLAY_ID_20:
+		ipr_log_fabric_error(ioa_cfg, hostrcb);
+		break;
 	case IPR_HOST_RCB_OVERLAY_ID_1:
 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
 	default:
@@ -2093,7 +2307,7 @@
 
 /**
  * ipr_worker_thread - Worker thread
- * @data:		ioa config struct
+ * @work:		ioa config struct
  *
  * Called at task level from a work thread. This function takes care
  * of adding and removing device from the mid-layer as configuration
@@ -2102,13 +2316,14 @@
  * Return value:
  * 	nothing
  **/
-static void ipr_worker_thread(void *data)
+static void ipr_worker_thread(struct work_struct *work)
 {
 	unsigned long lock_flags;
 	struct ipr_resource_entry *res;
 	struct scsi_device *sdev;
 	struct ipr_dump *dump;
-	struct ipr_ioa_cfg *ioa_cfg = data;
+	struct ipr_ioa_cfg *ioa_cfg =
+		container_of(work, struct ipr_ioa_cfg, work_q);
 	u8 bus, target, lun;
 	int did_work;
 
@@ -2969,7 +3184,6 @@
 	struct ipr_dump *dump;
 	unsigned long lock_flags = 0;
 
-	ENTER;
 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
 
 	if (!dump) {
@@ -2996,7 +3210,6 @@
 	}
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
-	LEAVE;
 	return 0;
 }
 
@@ -3573,6 +3786,12 @@
 
 	ENTER;
 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+	while(ioa_cfg->in_reset_reload) {
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+	}
+
 	res = sata_port->res;
 	if (res) {
 		rc = ipr_device_reset(ioa_cfg, res);
@@ -3636,6 +3855,10 @@
 		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
 			if (ipr_cmd->scsi_cmd)
 				ipr_cmd->done = ipr_scsi_eh_done;
+			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+			}
 		}
 	}
 
@@ -3770,7 +3993,7 @@
 	 */
 	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
 		return FAILED;
-	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
+	if (!res || !ipr_is_gscsi(res))
 		return FAILED;
 
 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@@ -4615,7 +4838,7 @@
  * Return value:
  * 	0 on success / other on failure
  **/
-int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 {
 	struct ipr_resource_entry *res;
 
@@ -4648,40 +4871,6 @@
 	return buffer;
 }
 
-/**
- * ipr_scsi_timed_out - Handle scsi command timeout
- * @scsi_cmd:	scsi command struct
- *
- * Return value:
- * 	EH_NOT_HANDLED
- **/
-enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
-{
-	struct ipr_ioa_cfg *ioa_cfg;
-	struct ipr_cmnd *ipr_cmd;
-	unsigned long flags;
-
-	ENTER;
-	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
-	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
-
-	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-		if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
-			ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
-			ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
-			break;
-		}
-	}
-
-	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
-	LEAVE;
-	return EH_NOT_HANDLED;
-}
-
-static struct scsi_transport_template ipr_transport_template = {
-	.eh_timed_out = ipr_scsi_timed_out
-};
-
 static struct scsi_host_template driver_template = {
 	.module = THIS_MODULE,
 	.name = "IPR",
@@ -4776,6 +4965,12 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+	while(ioa_cfg->in_reset_reload) {
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+	}
+
 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
 		if (ipr_cmd->qc == qc) {
 			ipr_device_reset(ioa_cfg, sata_port->res);
@@ -6745,7 +6940,7 @@
 		return -ENOMEM;
 
 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
-		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
+		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
 
 		if (!ipr_cmd) {
 			ipr_free_cmd_blks(ioa_cfg);
@@ -6832,6 +7027,7 @@
 
 		ioa_cfg->hostrcb[i]->hostrcb_dma =
 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
+		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
 	}
 
@@ -6926,7 +7122,7 @@
 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
-	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
+	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
 	ioa_cfg->sdt_state = INACTIVE;
 	if (ipr_enable_cache)
@@ -7017,7 +7213,6 @@
 
 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
-	host->transportt = &ipr_transport_template;
 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
 		      sata_port_info.flags, &ipr_sata_ops);
 
@@ -7351,12 +7546,24 @@
 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
+	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
+	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
 	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
+	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
+	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
+	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
+	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
@@ -7366,6 +7573,9 @@
 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
 		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
+	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
+		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 6d03528..9f62a1d 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.2.0"
-#define IPR_DRIVER_DATE "(September 25, 2006)"
+#define IPR_DRIVER_VERSION "2.3.0"
+#define IPR_DRIVER_DATE "(November 8, 2006)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -54,6 +54,8 @@
  */
 #define IPR_NUM_BASE_CMD_BLKS				100
 
+#define PCI_DEVICE_ID_IBM_OBSIDIAN_E	0x0339
+
 #define IPR_SUBS_DEV_ID_2780	0x0264
 #define IPR_SUBS_DEV_ID_5702	0x0266
 #define IPR_SUBS_DEV_ID_5703	0x0278
@@ -66,7 +68,11 @@
 #define IPR_SUBS_DEV_ID_571F	0x02D5
 #define IPR_SUBS_DEV_ID_572A	0x02C1
 #define IPR_SUBS_DEV_ID_572B	0x02C2
+#define IPR_SUBS_DEV_ID_572F	0x02C3
 #define IPR_SUBS_DEV_ID_575B	0x030D
+#define IPR_SUBS_DEV_ID_575C	0x0338
+#define IPR_SUBS_DEV_ID_57B7	0x0360
+#define IPR_SUBS_DEV_ID_57B8	0x02C2
 
 #define IPR_NAME				"ipr"
 
@@ -98,6 +104,7 @@
 #define IPR_IOASC_IOA_WAS_RESET			0x10000001
 #define IPR_IOASC_PCI_ACCESS_ERROR			0x10000002
 
+#define IPR_DEFAULT_MAX_ERROR_DUMP			984
 #define IPR_NUM_LOG_HCAMS				2
 #define IPR_NUM_CFG_CHG_HCAMS				2
 #define IPR_NUM_HCAMS	(IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
@@ -731,6 +738,64 @@
 	u32 data[476];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb_config_element {
+	u8 type_status;
+#define IPR_PATH_CFG_TYPE_MASK	0xF0
+#define IPR_PATH_CFG_NOT_EXIST	0x00
+#define IPR_PATH_CFG_IOA_PORT		0x10
+#define IPR_PATH_CFG_EXP_PORT		0x20
+#define IPR_PATH_CFG_DEVICE_PORT	0x30
+#define IPR_PATH_CFG_DEVICE_LUN	0x40
+
+#define IPR_PATH_CFG_STATUS_MASK	0x0F
+#define IPR_PATH_CFG_NO_PROB		0x00
+#define IPR_PATH_CFG_DEGRADED		0x01
+#define IPR_PATH_CFG_FAILED		0x02
+#define IPR_PATH_CFG_SUSPECT		0x03
+#define IPR_PATH_NOT_DETECTED		0x04
+#define IPR_PATH_INCORRECT_CONN	0x05
+
+	u8 cascaded_expander;
+	u8 phy;
+	u8 link_rate;
+#define IPR_PHY_LINK_RATE_MASK	0x0F
+
+	__be32 wwid[2];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_fabric_desc {
+	__be16 length;
+	u8 ioa_port;
+	u8 cascaded_expander;
+	u8 phy;
+	u8 path_state;
+#define IPR_PATH_ACTIVE_MASK		0xC0
+#define IPR_PATH_NO_INFO		0x00
+#define IPR_PATH_ACTIVE			0x40
+#define IPR_PATH_NOT_ACTIVE		0x80
+
+#define IPR_PATH_STATE_MASK		0x0F
+#define IPR_PATH_STATE_NO_INFO	0x00
+#define IPR_PATH_HEALTHY		0x01
+#define IPR_PATH_DEGRADED		0x02
+#define IPR_PATH_FAILED			0x03
+
+	__be16 num_entries;
+	struct ipr_hostrcb_config_element elem[1];
+}__attribute__((packed, aligned (4)));
+
+#define for_each_fabric_cfg(fabric, cfg) \
+		for (cfg = (fabric)->elem; \
+			cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
+			cfg++)
+
+struct ipr_hostrcb_type_20_error {
+	u8 failure_reason[64];
+	u8 reserved[3];
+	u8 num_entries;
+	struct ipr_hostrcb_fabric_desc desc[1];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_error {
 	__be32 failing_dev_ioasc;
 	struct ipr_res_addr failing_dev_res_addr;
@@ -747,6 +812,7 @@
 		struct ipr_hostrcb_type_13_error type_13_error;
 		struct ipr_hostrcb_type_14_error type_14_error;
 		struct ipr_hostrcb_type_17_error type_17_error;
+		struct ipr_hostrcb_type_20_error type_20_error;
 	} u;
 }__attribute__((packed, aligned (4)));
 
@@ -786,6 +852,7 @@
 #define IPR_HOST_RCB_OVERLAY_ID_14				0x14
 #define IPR_HOST_RCB_OVERLAY_ID_16				0x16
 #define IPR_HOST_RCB_OVERLAY_ID_17				0x17
+#define IPR_HOST_RCB_OVERLAY_ID_20				0x20
 #define IPR_HOST_RCB_OVERLAY_ID_DEFAULT			0xFF
 
 	u8 reserved1[3];
@@ -805,6 +872,7 @@
 	struct ipr_hcam hcam;
 	dma_addr_t hostrcb_dma;
 	struct list_head queue;
+	struct ipr_ioa_cfg *ioa_cfg;
 };
 
 /* IPR smart dump table structures */
@@ -1283,6 +1351,17 @@
 	}								\
 }
 
+#define ipr_hcam_err(hostrcb, fmt, ...)					\
+{													\
+	if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) {		\
+		ipr_ra_err((hostrcb)->ioa_cfg,							\
+				(hostrcb)->hcam.u.error.failing_dev_res_addr,			\
+				fmt, ##__VA_ARGS__);							\
+	} else {											\
+		dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__);		\
+	}												\
+}
+
 #define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
 	__FILE__, __FUNCTION__, __LINE__)
 
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index f06a06a..8b704f7 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -5001,7 +5001,7 @@
 				break;
 
 			/* Delay for 1 Second */
-			msleep(IPS_ONE_SEC);
+			MDELAY(IPS_ONE_SEC);
 		}
 
 		if (j >= 45)
@@ -5027,7 +5027,7 @@
 				break;
 
 			/* Delay for 1 Second */
-			msleep(IPS_ONE_SEC);
+			MDELAY(IPS_ONE_SEC);
 		}
 
 		if (j >= 240)
@@ -5045,7 +5045,7 @@
 			break;
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 	}
 
 	if (i >= 240)
@@ -5095,7 +5095,7 @@
 				break;
 
 			/* Delay for 1 Second */
-			msleep(IPS_ONE_SEC);
+			MDELAY(IPS_ONE_SEC);
 		}
 
 		if (j >= 45)
@@ -5121,7 +5121,7 @@
 				break;
 
 			/* Delay for 1 Second */
-			msleep(IPS_ONE_SEC);
+			MDELAY(IPS_ONE_SEC);
 		}
 
 		if (j >= 240)
@@ -5139,7 +5139,7 @@
 			break;
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 	}
 
 	if (i >= 240)
@@ -5191,7 +5191,7 @@
 			break;
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 	}
 
 	if (i >= 45) {
@@ -5217,7 +5217,7 @@
 			if (Post != 0x4F00)
 				break;
 			/* Delay for 1 Second */
-			msleep(IPS_ONE_SEC);
+			MDELAY(IPS_ONE_SEC);
 		}
 
 		if (i >= 120) {
@@ -5247,7 +5247,7 @@
 			break;
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 	}
 
 	if (i >= 240) {
@@ -5307,12 +5307,12 @@
 		outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 
 		outb(0, ha->io_addr + IPS_REG_SCPR);
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 
 		if ((*ha->func.init) (ha))
 			break;
@@ -5352,12 +5352,12 @@
 		writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 
 		writeb(0, ha->mem_ptr + IPS_REG_SCPR);
 
 		/* Delay for 1 Second */
-		msleep(IPS_ONE_SEC);
+		MDELAY(IPS_ONE_SEC);
 
 		if ((*ha->func.init) (ha))
 			break;
@@ -5398,7 +5398,7 @@
 		writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
 
 		/* Delay for 5 Seconds */
-		msleep(5 * IPS_ONE_SEC);
+		MDELAY(5 * IPS_ONE_SEC);
 
 		/* Do a PCI config read to wait for adapter */
 		pci_read_config_byte(ha->pcidev, 4, &junk);
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 34680f3..b726dcc 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -51,6 +51,7 @@
    #define _IPS_H_
 
 #include <linux/version.h>
+#include <linux/nmi.h>
    #include <asm/uaccess.h>
    #include <asm/io.h>
 
@@ -116,9 +117,11 @@
             dev_printk(level , &((pcidev)->dev) , format , ## arg)
    #endif
 
-   #ifndef MDELAY
-      #define MDELAY mdelay
-   #endif
+   #define MDELAY(n)			\
+	do {				\
+		mdelay(n);		\
+		touch_nmi_watchdog();	\
+	} while (0)
 
    #ifndef min
       #define min(x,y) ((x) < (y) ? x : y)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5d88621..e11b23c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -719,9 +719,10 @@
 	return rc;
 }
 
-static void iscsi_xmitworker(void *data)
+static void iscsi_xmitworker(struct work_struct *work)
 {
-	struct iscsi_conn *conn = data;
+	struct iscsi_conn *conn =
+		container_of(work, struct iscsi_conn, xmitwork);
 	int rc;
 	/*
 	 * serialize Xmit worker on a per-connection basis.
@@ -1512,7 +1513,7 @@
 	if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
 		goto mgmtqueue_alloc_fail;
 
-	INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
+	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
 	/* allocate login_mtask used for the login/text sequences */
 	spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d977bd4..fb7df7b 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -647,10 +647,12 @@
  * Discover process only interrogates devices in order to discover the
  * domain.
  */
-static void sas_discover_domain(void *data)
+static void sas_discover_domain(struct work_struct *work)
 {
 	int error = 0;
-	struct asd_sas_port *port = data;
+	struct sas_discovery_event *ev =
+		container_of(work, struct sas_discovery_event, work);
+	struct asd_sas_port *port = ev->port;
 
 	sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
 			&port->disc.pending);
@@ -692,10 +694,12 @@
 		    current->pid, error);
 }
 
-static void sas_revalidate_domain(void *data)
+static void sas_revalidate_domain(struct work_struct *work)
 {
 	int res = 0;
-	struct asd_sas_port *port = data;
+	struct sas_discovery_event *ev =
+		container_of(work, struct sas_discovery_event, work);
+	struct asd_sas_port *port = ev->port;
 
 	sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
 			&port->disc.pending);
@@ -722,7 +726,7 @@
 	BUG_ON(ev >= DISC_NUM_EVENTS);
 
 	sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
-			&disc->disc_work[ev], port->ha->core.shost);
+			&disc->disc_work[ev].work, port->ha->core.shost);
 
 	return 0;
 }
@@ -737,13 +741,15 @@
 {
 	int i;
 
-	static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
 		[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
 		[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
 	};
 
 	spin_lock_init(&disc->disc_event_lock);
 	disc->pending = 0;
-	for (i = 0; i < DISC_NUM_EVENTS; i++)
-		INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
+	for (i = 0; i < DISC_NUM_EVENTS; i++) {
+		INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+		disc->disc_work[i].port = port;
+	}
 }
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 19110ed..d83392e 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -31,7 +31,7 @@
 	BUG_ON(event >= HA_NUM_EVENTS);
 
 	sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
-			&sas_ha->ha_events[event], sas_ha->core.shost);
+			&sas_ha->ha_events[event].work, sas_ha->core.shost);
 }
 
 static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@
 	BUG_ON(event >= PORT_NUM_EVENTS);
 
 	sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
-			&phy->port_events[event], ha->core.shost);
+			&phy->port_events[event].work, ha->core.shost);
 }
 
 static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,12 +51,12 @@
 	BUG_ON(event >= PHY_NUM_EVENTS);
 
 	sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
-			&phy->phy_events[event], ha->core.shost);
+			&phy->phy_events[event].work, ha->core.shost);
 }
 
 int sas_init_events(struct sas_ha_struct *sas_ha)
 {
-	static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
 		[HAE_RESET] = sas_hae_reset,
 	};
 
@@ -64,8 +64,10 @@
 
 	spin_lock_init(&sas_ha->event_lock);
 
-	for (i = 0; i < HA_NUM_EVENTS; i++)
-		INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
+	for (i = 0; i < HA_NUM_EVENTS; i++) {
+		INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+		sas_ha->ha_events[i].ha = sas_ha;
+	}
 
 	sas_ha->notify_ha_event = notify_ha_event;
 	sas_ha->notify_port_event = notify_port_event;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index e34a934..d31e6fa 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -597,10 +597,15 @@
 	child->iproto = phy->attached_iproto;
 	memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
 	sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
-	phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
-	BUG_ON(!phy->port);
-	/* FIXME: better error handling*/
-	BUG_ON(sas_port_add(phy->port) != 0);
+	if (!phy->port) {
+		phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
+		if (unlikely(!phy->port))
+			goto out_err;
+		if (unlikely(sas_port_add(phy->port) != 0)) {
+			sas_port_free(phy->port);
+			goto out_err;
+		}
+	}
 	sas_ex_get_linkrate(parent, child, phy);
 
 	if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
@@ -615,8 +620,7 @@
 			SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
 				    "0x%x\n", SAS_ADDR(parent->sas_addr),
 				    phy_id, res);
-			kfree(child);
-			return NULL;
+			goto out_free;
 		}
 		memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
 		       sizeof(struct dev_to_host_fis));
@@ -627,14 +631,14 @@
 				    "%016llx:0x%x returned 0x%x\n",
 				    SAS_ADDR(child->sas_addr),
 				    SAS_ADDR(parent->sas_addr), phy_id, res);
-			kfree(child);
-			return NULL;
+			goto out_free;
 		}
 	} else if (phy->attached_tproto & SAS_PROTO_SSP) {
 		child->dev_type = SAS_END_DEV;
 		rphy = sas_end_device_alloc(phy->port);
 		/* FIXME: error handling */
-		BUG_ON(!rphy);
+		if (unlikely(!rphy))
+			goto out_free;
 		child->tproto = phy->attached_tproto;
 		sas_init_dev(child);
 
@@ -651,9 +655,7 @@
 				    "at %016llx:0x%x returned 0x%x\n",
 				    SAS_ADDR(child->sas_addr),
 				    SAS_ADDR(parent->sas_addr), phy_id, res);
-			/* FIXME: this kfrees list elements without removing them */
-			//kfree(child);
-			return NULL;
+			goto out_list_del;
 		}
 	} else {
 		SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
@@ -663,6 +665,16 @@
 
 	list_add_tail(&child->siblings, &parent_ex->children);
 	return child;
+
+ out_list_del:
+	list_del(&child->dev_list_node);
+	sas_rphy_free(rphy);
+ out_free:
+	sas_port_delete(phy->port);
+ out_err:
+	phy->port = NULL;
+	kfree(child);
+	return NULL;
 }
 
 static struct domain_device *sas_ex_discover_expander(
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index c836a23..2f0c07f 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -36,7 +36,7 @@
 
 #include "../scsi_sas_internal.h"
 
-kmem_cache_t *sas_task_cache;
+struct kmem_cache *sas_task_cache;
 
 /*------------ SAS addr hash -----------*/
 void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
@@ -65,9 +65,11 @@
 
 /* ---------- HA events ---------- */
 
-void sas_hae_reset(void *data)
+void sas_hae_reset(struct work_struct *work)
 {
-	struct sas_ha_struct *ha = data;
+	struct sas_ha_event *ev =
+		container_of(work, struct sas_ha_event, work);
+	struct sas_ha_struct *ha = ev->ha;
 
 	sas_begin_event(HAE_RESET, &ha->event_lock,
 			&ha->pending);
@@ -112,6 +114,8 @@
 		}
 	}
 
+	INIT_LIST_HEAD(&sas_ha->eh_done_q);
+
 	return 0;
 
 Undo_ports:
@@ -142,7 +146,7 @@
 	return sas_smp_get_phy_events(phy);
 }
 
-static int sas_phy_reset(struct sas_phy *phy, int hard_reset)
+int sas_phy_reset(struct sas_phy *phy, int hard_reset)
 {
 	int ret;
 	enum phy_func reset_type;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index bffcee4..137d7e4 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -60,11 +60,11 @@
 
 void sas_deform_port(struct asd_sas_phy *phy);
 
-void sas_porte_bytes_dmaed(void *);
-void sas_porte_broadcast_rcvd(void *);
-void sas_porte_link_reset_err(void *);
-void sas_porte_timer_event(void *);
-void sas_porte_hard_reset(void *);
+void sas_porte_bytes_dmaed(struct work_struct *work);
+void sas_porte_broadcast_rcvd(struct work_struct *work);
+void sas_porte_link_reset_err(struct work_struct *work);
+void sas_porte_timer_event(struct work_struct *work);
+void sas_porte_hard_reset(struct work_struct *work);
 
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -75,7 +75,7 @@
 
 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
 
-void sas_hae_reset(void *);
+void sas_hae_reset(struct work_struct *work);
 
 static inline void sas_queue_event(int event, spinlock_t *lock,
 				   unsigned long *pending,
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 9340cdb..b459c4b 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -30,9 +30,11 @@
 
 /* ---------- Phy events ---------- */
 
-static void sas_phye_loss_of_signal(void *data)
+static void sas_phye_loss_of_signal(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
 			&phy->phy_events_pending);
@@ -40,18 +42,22 @@
 	sas_deform_port(phy);
 }
 
-static void sas_phye_oob_done(void *data)
+static void sas_phye_oob_done(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
 			&phy->phy_events_pending);
 	phy->error = 0;
 }
 
-static void sas_phye_oob_error(void *data)
+static void sas_phye_oob_error(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 	struct sas_ha_struct *sas_ha = phy->ha;
 	struct asd_sas_port *port = phy->port;
 	struct sas_internal *i =
@@ -80,9 +86,11 @@
 	}
 }
 
-static void sas_phye_spinup_hold(void *data)
+static void sas_phye_spinup_hold(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 	struct sas_ha_struct *sas_ha = phy->ha;
 	struct sas_internal *i =
 		to_sas_internal(sas_ha->core.shost->transportt);
@@ -100,14 +108,14 @@
 {
 	int i;
 
-	static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
 		[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
 		[PHYE_OOB_DONE] = sas_phye_oob_done,
 		[PHYE_OOB_ERROR] = sas_phye_oob_error,
 		[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
 	};
 
-	static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
+	static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
 		[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
 		[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
 		[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
@@ -122,13 +130,18 @@
 
 		phy->error = 0;
 		INIT_LIST_HEAD(&phy->port_phy_el);
-		for (k = 0; k < PORT_NUM_EVENTS; k++)
-			INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
-				  phy);
+		for (k = 0; k < PORT_NUM_EVENTS; k++) {
+			INIT_WORK(&phy->port_events[k].work,
+				  sas_port_event_fns[k]);
+			phy->port_events[k].phy = phy;
+		}
 
-		for (k = 0; k < PHY_NUM_EVENTS; k++)
-			INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
-				  phy);
+		for (k = 0; k < PHY_NUM_EVENTS; k++) {
+			INIT_WORK(&phy->phy_events[k].work,
+				  sas_phy_event_fns[k]);
+			phy->phy_events[k].phy = phy;
+		}
+
 		phy->port = NULL;
 		phy->ha = sas_ha;
 		spin_lock_init(&phy->frame_rcvd_lock);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 253cdcf..971c37c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -181,9 +181,11 @@
 
 /* ---------- SAS port events ---------- */
 
-void sas_porte_bytes_dmaed(void *data)
+void sas_porte_bytes_dmaed(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -191,11 +193,13 @@
 	sas_form_port(phy);
 }
 
-void sas_porte_broadcast_rcvd(void *data)
+void sas_porte_broadcast_rcvd(struct work_struct *work)
 {
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 	unsigned long flags;
 	u32 prim;
-	struct asd_sas_phy *phy = data;
 
 	sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -208,9 +212,11 @@
 	sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
 }
 
-void sas_porte_link_reset_err(void *data)
+void sas_porte_link_reset_err(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -218,9 +224,11 @@
 	sas_deform_port(phy);
 }
 
-void sas_porte_timer_event(void *data)
+void sas_porte_timer_event(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
 			&phy->port_events_pending);
@@ -228,9 +236,11 @@
 	sas_deform_port(phy);
 }
 
-void sas_porte_hard_reset(void *data)
+void sas_porte_hard_reset(struct work_struct *work)
 {
-	struct asd_sas_phy *phy = data;
+	struct asd_sas_event *ev =
+		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_phy *phy = ev->phy;
 
 	sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
 			&phy->port_events_pending);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index e46e793..22672d5 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -29,9 +29,11 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
 #include "../scsi_sas_internal.h"
+#include "../scsi_transport_api.h"
 
 #include <linux/err.h>
 #include <linux/blkdev.h>
@@ -46,6 +48,7 @@
 {
 	struct task_status_struct *ts = &task->task_status;
 	struct scsi_cmnd *sc = task->uldd_task;
+	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
 	unsigned ts_flags = task->task_state_flags;
 	int hs = 0, stat = 0;
 
@@ -116,7 +119,7 @@
 	sas_free_task(task);
 	/* This is very ugly but this is how SCSI Core works. */
 	if (ts_flags & SAS_TASK_STATE_ABORTED)
-		scsi_finish_command(sc);
+		scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
 	else
 		sc->scsi_done(sc);
 }
@@ -307,6 +310,15 @@
 		spin_unlock_irqrestore(&core->task_queue_lock, flags);
 	}
 
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		SAS_DPRINTK("%s: task 0x%p already aborted\n",
+			    __FUNCTION__, task);
+		return TASK_IS_ABORTED;
+	}
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+
 	for (i = 0; i < 5; i++) {
 		SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
 		res = si->dft->lldd_abort_task(task);
@@ -409,13 +421,16 @@
 	SAS_DPRINTK("going over list...\n");
 	list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
 		struct sas_task *task = TO_SAS_TASK(cmd);
-
-		SAS_DPRINTK("trying to find task 0x%p\n", task);
 		list_del_init(&cmd->eh_entry);
+
+		if (!task) {
+			SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__);
+			continue;
+		}
+		SAS_DPRINTK("trying to find task 0x%p\n", task);
 		res = sas_scsi_find_task(task);
 
 		cmd->eh_eflags = 0;
-		shost->host_failed--;
 
 		switch (res) {
 		case TASK_IS_DONE:
@@ -491,6 +506,7 @@
 		}
 	}
 out:
+	scsi_eh_flush_done_q(&ha->eh_done_q);
 	SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
 	return;
 clear_q:
@@ -508,12 +524,18 @@
 	unsigned long flags;
 
 	if (!task) {
-		SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
+		SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n",
 			    cmd, task);
 		return EH_HANDLED;
 	}
 
 	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
+			    "EH_NOT_HANDLED\n", cmd, task);
+		return EH_NOT_HANDLED;
+	}
 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
 		SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
@@ -777,6 +799,66 @@
 	spin_unlock_irqrestore(&core->task_queue_lock, flags);
 }
 
+static int do_sas_task_abort(struct sas_task *task)
+{
+	struct scsi_cmnd *sc = task->uldd_task;
+	struct sas_internal *si =
+		to_sas_internal(task->dev->port->ha->core.shost->transportt);
+	unsigned long flags;
+	int res;
+
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
+		SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__,
+			    task);
+		return 0;
+	}
+
+	task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+	if (!si->dft->lldd_abort_task)
+		return -ENODEV;
+
+	res = si->dft->lldd_abort_task(task);
+	if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
+	    (res == TMF_RESP_FUNC_COMPLETE))
+	{
+		/* SMP commands don't have scsi_cmds(?) */
+		if (!sc) {
+			task->task_done(task);
+			return 0;
+		}
+		scsi_req_abort_cmd(sc);
+		scsi_schedule_eh(sc->device->host);
+		return 0;
+	}
+
+	spin_lock_irqsave(&task->task_state_lock, flags);
+	task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+		task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+	return -EAGAIN;
+}
+
+void sas_task_abort(struct work_struct *work)
+{
+	struct sas_task *task =
+		container_of(work, struct sas_task, abort_work);
+	int i;
+
+	for (i = 0; i < 5; i++)
+		if (!do_sas_task_abort(task))
+			return;
+
+	SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__);
+}
+
 EXPORT_SYMBOL_GPL(sas_queuecommand);
 EXPORT_SYMBOL_GPL(sas_target_alloc);
 EXPORT_SYMBOL_GPL(sas_slave_configure);
@@ -784,3 +866,5 @@
 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
 EXPORT_SYMBOL_GPL(sas_change_queue_type);
 EXPORT_SYMBOL_GPL(sas_bios_param);
+EXPORT_SYMBOL_GPL(sas_task_abort);
+EXPORT_SYMBOL_GPL(sas_phy_reset);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
new file mode 100644
index 0000000..89403b0
--- /dev/null
+++ b/drivers/scsi/libsrp.c
@@ -0,0 +1,441 @@
+/*
+ * SCSI RDAM Protocol lib functions
+ *
+ * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/err.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_tgt.h>
+#include <scsi/srp.h>
+#include <scsi/libsrp.h>
+
+enum srp_task_attributes {
+	SRP_SIMPLE_TASK = 0,
+	SRP_HEAD_TASK = 1,
+	SRP_ORDERED_TASK = 2,
+	SRP_ACA_TASK = 4
+};
+
+/* tmp - will replace with SCSI logging stuff */
+#define eprintk(fmt, args...)					\
+do {								\
+	printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args);	\
+} while (0)
+/* #define dprintk eprintk */
+#define dprintk(fmt, args...)
+
+static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
+			     struct srp_buf **ring)
+{
+	int i;
+	struct iu_entry *iue;
+
+	q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
+	if (!q->pool)
+		return -ENOMEM;
+	q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
+	if (!q->items)
+		goto free_pool;
+
+	spin_lock_init(&q->lock);
+	q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
+			      GFP_KERNEL, &q->lock);
+	if (IS_ERR(q->queue))
+		goto free_item;
+
+	for (i = 0, iue = q->items; i < max; i++) {
+		__kfifo_put(q->queue, (void *) &iue, sizeof(void *));
+		iue->sbuf = ring[i];
+		iue++;
+	}
+	return 0;
+
+free_item:
+	kfree(q->items);
+free_pool:
+	kfree(q->pool);
+	return -ENOMEM;
+}
+
+static void srp_iu_pool_free(struct srp_queue *q)
+{
+	kfree(q->items);
+	kfree(q->pool);
+}
+
+static struct srp_buf **srp_ring_alloc(struct device *dev,
+				       size_t max, size_t size)
+{
+	int i;
+	struct srp_buf **ring;
+
+	ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	for (i = 0; i < max; i++) {
+		ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
+		if (!ring[i])
+			goto out;
+		ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
+						  GFP_KERNEL);
+		if (!ring[i]->buf)
+			goto out;
+	}
+	return ring;
+
+out:
+	for (i = 0; i < max && ring[i]; i++) {
+		if (ring[i]->buf)
+			dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
+		kfree(ring[i]);
+	}
+	kfree(ring);
+
+	return NULL;
+}
+
+static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
+			  size_t size)
+{
+	int i;
+
+	for (i = 0; i < max; i++) {
+		dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
+		kfree(ring[i]);
+	}
+}
+
+int srp_target_alloc(struct srp_target *target, struct device *dev,
+		     size_t nr, size_t iu_size)
+{
+	int err;
+
+	spin_lock_init(&target->lock);
+	INIT_LIST_HEAD(&target->cmd_queue);
+
+	target->dev = dev;
+	target->dev->driver_data = target;
+
+	target->srp_iu_size = iu_size;
+	target->rx_ring_size = nr;
+	target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
+	if (!target->rx_ring)
+		return -ENOMEM;
+	err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
+	if (err)
+		goto free_ring;
+
+	return 0;
+
+free_ring:
+	srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(srp_target_alloc);
+
+void srp_target_free(struct srp_target *target)
+{
+	srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
+		      target->srp_iu_size);
+	srp_iu_pool_free(&target->iu_queue);
+}
+EXPORT_SYMBOL_GPL(srp_target_free);
+
+struct iu_entry *srp_iu_get(struct srp_target *target)
+{
+	struct iu_entry *iue = NULL;
+
+	kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
+	if (!iue)
+		return iue;
+	iue->target = target;
+	INIT_LIST_HEAD(&iue->ilist);
+	iue->flags = 0;
+	return iue;
+}
+EXPORT_SYMBOL_GPL(srp_iu_get);
+
+void srp_iu_put(struct iu_entry *iue)
+{
+	kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
+}
+EXPORT_SYMBOL_GPL(srp_iu_put);
+
+static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
+			   enum dma_data_direction dir, srp_rdma_t rdma_io,
+			   int dma_map, int ext_desc)
+{
+	struct iu_entry *iue = NULL;
+	struct scatterlist *sg = NULL;
+	int err, nsg = 0, len;
+
+	if (dma_map) {
+		iue = (struct iu_entry *) sc->SCp.ptr;
+		sg = sc->request_buffer;
+
+		dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
+			md->len, sc->use_sg);
+
+		nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
+				 DMA_BIDIRECTIONAL);
+		if (!nsg) {
+			printk("fail to map %p %d\n", iue, sc->use_sg);
+			return 0;
+		}
+		len = min(sc->request_bufflen, md->len);
+	} else
+		len = md->len;
+
+	err = rdma_io(sc, sg, nsg, md, 1, dir, len);
+
+	if (dma_map)
+		dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+	return err;
+}
+
+static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
+			     struct srp_indirect_buf *id,
+			     enum dma_data_direction dir, srp_rdma_t rdma_io,
+			     int dma_map, int ext_desc)
+{
+	struct iu_entry *iue = NULL;
+	struct srp_direct_buf *md = NULL;
+	struct scatterlist dummy, *sg = NULL;
+	dma_addr_t token = 0;
+	long err;
+	unsigned int done = 0;
+	int nmd, nsg = 0, len;
+
+	if (dma_map || ext_desc) {
+		iue = (struct iu_entry *) sc->SCp.ptr;
+		sg = sc->request_buffer;
+
+		dprintk("%p %u %u %d %d\n",
+			iue, sc->request_bufflen, id->len,
+			cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
+	}
+
+	nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
+
+	if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
+	    (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
+		md = &id->desc_list[0];
+		goto rdma;
+	}
+
+	if (ext_desc && dma_map) {
+		md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
+				&token, GFP_KERNEL);
+		if (!md) {
+			eprintk("Can't get dma memory %u\n", id->table_desc.len);
+			return -ENOMEM;
+		}
+
+		sg_init_one(&dummy, md, id->table_desc.len);
+		sg_dma_address(&dummy) = token;
+		err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
+			      id->table_desc.len);
+		if (err < 0) {
+			eprintk("Error copying indirect table %ld\n", err);
+			goto free_mem;
+		}
+	} else {
+		eprintk("This command uses external indirect buffer\n");
+		return -EINVAL;
+	}
+
+rdma:
+	if (dma_map) {
+		nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
+		if (!nsg) {
+			eprintk("fail to map %p %d\n", iue, sc->use_sg);
+			goto free_mem;
+		}
+		len = min(sc->request_bufflen, id->len);
+	} else
+		len = id->len;
+
+	err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
+
+	if (dma_map)
+		dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+free_mem:
+	if (token && dma_map)
+		dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
+
+	return done;
+}
+
+static int data_out_desc_size(struct srp_cmd *cmd)
+{
+	int size = 0;
+	u8 fmt = cmd->buf_fmt >> 4;
+
+	switch (fmt) {
+	case SRP_NO_DATA_DESC:
+		break;
+	case SRP_DATA_DESC_DIRECT:
+		size = sizeof(struct srp_direct_buf);
+		break;
+	case SRP_DATA_DESC_INDIRECT:
+		size = sizeof(struct srp_indirect_buf) +
+			sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
+		break;
+	default:
+		eprintk("client error. Invalid data_out_format %x\n", fmt);
+		break;
+	}
+	return size;
+}
+
+/*
+ * TODO: this can be called multiple times for a single command if it
+ * has very long data.
+ */
+int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
+		      srp_rdma_t rdma_io, int dma_map, int ext_desc)
+{
+	struct srp_direct_buf *md;
+	struct srp_indirect_buf *id;
+	enum dma_data_direction dir;
+	int offset, err = 0;
+	u8 format;
+
+	offset = cmd->add_cdb_len * 4;
+
+	dir = srp_cmd_direction(cmd);
+	if (dir == DMA_FROM_DEVICE)
+		offset += data_out_desc_size(cmd);
+
+	if (dir == DMA_TO_DEVICE)
+		format = cmd->buf_fmt >> 4;
+	else
+		format = cmd->buf_fmt & ((1U << 4) - 1);
+
+	switch (format) {
+	case SRP_NO_DATA_DESC:
+		break;
+	case SRP_DATA_DESC_DIRECT:
+		md = (struct srp_direct_buf *)
+			(cmd->add_data + offset);
+		err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc);
+		break;
+	case SRP_DATA_DESC_INDIRECT:
+		id = (struct srp_indirect_buf *)
+			(cmd->add_data + offset);
+		err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map,
+					ext_desc);
+		break;
+	default:
+		eprintk("Unknown format %d %x\n", dir, format);
+		break;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(srp_transfer_data);
+
+static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
+{
+	struct srp_direct_buf *md;
+	struct srp_indirect_buf *id;
+	int len = 0, offset = cmd->add_cdb_len * 4;
+	u8 fmt;
+
+	if (dir == DMA_TO_DEVICE)
+		fmt = cmd->buf_fmt >> 4;
+	else {
+		fmt = cmd->buf_fmt & ((1U << 4) - 1);
+		offset += data_out_desc_size(cmd);
+	}
+
+	switch (fmt) {
+	case SRP_NO_DATA_DESC:
+		break;
+	case SRP_DATA_DESC_DIRECT:
+		md = (struct srp_direct_buf *) (cmd->add_data + offset);
+		len = md->len;
+		break;
+	case SRP_DATA_DESC_INDIRECT:
+		id = (struct srp_indirect_buf *) (cmd->add_data + offset);
+		len = id->len;
+		break;
+	default:
+		eprintk("invalid data format %x\n", fmt);
+		break;
+	}
+	return len;
+}
+
+int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
+		  u64 addr)
+{
+	enum dma_data_direction dir;
+	struct scsi_cmnd *sc;
+	int tag, len, err;
+
+	switch (cmd->task_attr) {
+	case SRP_SIMPLE_TASK:
+		tag = MSG_SIMPLE_TAG;
+		break;
+	case SRP_ORDERED_TASK:
+		tag = MSG_ORDERED_TAG;
+		break;
+	case SRP_HEAD_TASK:
+		tag = MSG_HEAD_TAG;
+		break;
+	default:
+		eprintk("Task attribute %d not supported\n", cmd->task_attr);
+		tag = MSG_ORDERED_TAG;
+	}
+
+	dir = srp_cmd_direction(cmd);
+	len = vscsis_data_length(cmd, dir);
+
+	dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0],
+		cmd->lun, dir, len, tag, (unsigned long long) cmd->tag);
+
+	sc = scsi_host_get_command(shost, dir, GFP_KERNEL);
+	if (!sc)
+		return -ENOMEM;
+
+	sc->SCp.ptr = info;
+	memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
+	sc->request_bufflen = len;
+	sc->request_buffer = (void *) (unsigned long) addr;
+	sc->tag = tag;
+	err = scsi_tgt_queue_command(sc, (struct scsi_lun *) &cmd->lun, cmd->tag);
+	if (err)
+		scsi_host_put_command(shost, sc);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(srp_cmd_queue);
+
+MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
+MODULE_AUTHOR("FUJITA Tomonori");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3f7f5f8..a7de0bc 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -296,13 +296,17 @@
 	uint32_t cfg_cr_delay;
 	uint32_t cfg_cr_count;
 	uint32_t cfg_multi_ring_support;
+	uint32_t cfg_multi_ring_rctl;
+	uint32_t cfg_multi_ring_type;
 	uint32_t cfg_fdmi_on;
 	uint32_t cfg_discovery_threads;
 	uint32_t cfg_max_luns;
 	uint32_t cfg_poll;
 	uint32_t cfg_poll_tmo;
+	uint32_t cfg_use_msi;
 	uint32_t cfg_sg_seg_cnt;
 	uint32_t cfg_sg_dma_buf_size;
+	uint64_t cfg_soft_wwnn;
 	uint64_t cfg_soft_wwpn;
 
 	uint32_t dev_loss_tmo_changed;
@@ -355,7 +359,7 @@
 #define VPD_PORT            0x8         /* valid vpd port data */
 #define VPD_MASK            0xf         /* mask for any vpd data */
 
-	uint8_t soft_wwpn_enable;
+	uint8_t soft_wwn_enable;
 
 	struct timer_list fcp_poll_timer;
 	struct timer_list els_tmofunc;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2a4e02e..f247e78 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -552,10 +552,10 @@
 static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
 
 
-static char *lpfc_soft_wwpn_key = "C99G71SL8032A";
+static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 
 static ssize_t
-lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
+lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
 				size_t count)
 {
 	struct Scsi_Host *host = class_to_shost(cdev);
@@ -579,15 +579,15 @@
 	if (buf[cnt-1] == '\n')
 		cnt--;
 
-	if ((cnt != strlen(lpfc_soft_wwpn_key)) ||
-	    (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0))
+	if ((cnt != strlen(lpfc_soft_wwn_key)) ||
+	    (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
 		return -EINVAL;
 
-	phba->soft_wwpn_enable = 1;
+	phba->soft_wwn_enable = 1;
 	return count;
 }
-static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL,
-				lpfc_soft_wwpn_enable_store);
+static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
+				lpfc_soft_wwn_enable_store);
 
 static ssize_t
 lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
@@ -613,12 +613,12 @@
 	if (buf[cnt-1] == '\n')
 		cnt--;
 
-	if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) ||
+	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
 	    ((cnt == 17) && (*buf++ != 'x')) ||
 	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
 		return -EINVAL;
 
-	phba->soft_wwpn_enable = 0;
+	phba->soft_wwn_enable = 0;
 
 	memset(wwpn, 0, sizeof(wwpn));
 
@@ -639,6 +639,8 @@
 	}
 	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
 	fc_host_port_name(host) = phba->cfg_soft_wwpn;
+	if (phba->cfg_soft_wwnn)
+		fc_host_node_name(host) = phba->cfg_soft_wwnn;
 
 	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
 		   "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -664,6 +666,66 @@
 static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
 			 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
 
+static ssize_t
+lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host *host = class_to_shost(cdev);
+	struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			(unsigned long long)phba->cfg_soft_wwnn);
+}
+
+
+static ssize_t
+lpfc_soft_wwnn_store(struct class_device *cdev, const char *buf, size_t count)
+{
+	struct Scsi_Host *host = class_to_shost(cdev);
+	struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+	unsigned int i, j, cnt=count;
+	u8 wwnn[8];
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
+	    ((cnt == 17) && (*buf++ != 'x')) ||
+	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+		return -EINVAL;
+
+	/*
+	 * Allow wwnn to be set many times, as long as the enable is set.
+	 * However, once the wwpn is set, everything locks.
+	 */
+
+	memset(wwnn, 0, sizeof(wwnn));
+
+	/* Validate and store the new name */
+	for (i=0, j=0; i < 16; i++) {
+		if ((*buf >= 'a') && (*buf <= 'f'))
+			j = ((j << 4) | ((*buf++ -'a') + 10));
+		else if ((*buf >= 'A') && (*buf <= 'F'))
+			j = ((j << 4) | ((*buf++ -'A') + 10));
+		else if ((*buf >= '0') && (*buf <= '9'))
+			j = ((j << 4) | (*buf++ -'0'));
+		else
+			return -EINVAL;
+		if (i % 2) {
+			wwnn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
+
+	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+		   "lpfc%d: soft_wwnn set. Value will take effect upon "
+		   "setting of the soft_wwpn\n", phba->brd_no);
+
+	return count;
+}
+static CLASS_DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+			 lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+
 
 static int lpfc_poll = 0;
 module_param(lpfc_poll, int, 0);
@@ -802,12 +864,11 @@
 # LOG_MBOX                      0x4        Mailbox events
 # LOG_INIT                      0x8        Initialization events
 # LOG_LINK_EVENT                0x10       Link events
-# LOG_IP                        0x20       IP traffic history
 # LOG_FCP                       0x40       FCP traffic history
 # LOG_NODE                      0x80       Node table events
 # LOG_MISC                      0x400      Miscellaneous events
 # LOG_SLI                       0x800      SLI events
-# LOG_CHK_COND                  0x1000     FCP Check condition flag
+# LOG_FCP_ERROR                 0x1000     Only log FCP errors
 # LOG_LIBDFC                    0x2000     LIBDFC events
 # LOG_ALL_MSG                   0xffff     LOG all messages
 */
@@ -916,6 +977,22 @@
 		"SLI rings to spread IOCB entries across");
 
 /*
+# lpfc_multi_ring_rctl:  If lpfc_multi_ring_support is enabled, this
+# identifies what rctl value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
+*/
+LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
+	     255, "Identifies RCTL for additional ring configuration");
+
+/*
+# lpfc_multi_ring_type:  If lpfc_multi_ring_support is enabled, this
+# identifies what type value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
+*/
+LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
+	     255, "Identifies TYPE for additional ring configuration");
+
+/*
 # lpfc_fdmi_on: controls FDMI support.
 #       0 = no FDMI support
 #       1 = support FDMI without attribute of hostname
@@ -946,6 +1023,15 @@
 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
 	     "Milliseconds driver will wait between polling FCP ring");
 
+/*
+# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
+#		support this feature
+#       0  = MSI disabled (default)
+#       1  = MSI enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
+
 
 struct class_device_attribute *lpfc_host_attrs[] = {
 	&class_device_attr_info,
@@ -974,6 +1060,8 @@
 	&class_device_attr_lpfc_cr_delay,
 	&class_device_attr_lpfc_cr_count,
 	&class_device_attr_lpfc_multi_ring_support,
+	&class_device_attr_lpfc_multi_ring_rctl,
+	&class_device_attr_lpfc_multi_ring_type,
 	&class_device_attr_lpfc_fdmi_on,
 	&class_device_attr_lpfc_max_luns,
 	&class_device_attr_nport_evt_cnt,
@@ -982,8 +1070,10 @@
 	&class_device_attr_issue_reset,
 	&class_device_attr_lpfc_poll,
 	&class_device_attr_lpfc_poll_tmo,
+	&class_device_attr_lpfc_use_msi,
+	&class_device_attr_lpfc_soft_wwnn,
 	&class_device_attr_lpfc_soft_wwpn,
-	&class_device_attr_lpfc_soft_wwpn_enable,
+	&class_device_attr_lpfc_soft_wwn_enable,
 	NULL,
 };
 
@@ -1771,6 +1861,8 @@
 	lpfc_cr_delay_init(phba, lpfc_cr_delay);
 	lpfc_cr_count_init(phba, lpfc_cr_count);
 	lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
+	lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
+	lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
 	lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
 	lpfc_fcp_class_init(phba, lpfc_fcp_class);
 	lpfc_use_adisc_init(phba, lpfc_use_adisc);
@@ -1782,9 +1874,11 @@
 	lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
 	lpfc_max_luns_init(phba, lpfc_max_luns);
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
 	lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
 	phba->cfg_poll = lpfc_poll;
+	phba->cfg_soft_wwnn = 0L;
 	phba->cfg_soft_wwpn = 0L;
 
 	/*
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 3add7c2..a51a41b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -558,6 +558,14 @@
 	return;
 }
 
+static void
+lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+			 struct lpfc_iocbq * rspiocb)
+{
+	lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+	return;
+}
+
 void
 lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
 {
@@ -629,6 +637,8 @@
 		bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
 	else if (cmdcode == SLI_CTNS_RSNN_NN)
 		bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RFF_ID)
+		bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
 	else
 		bpl->tus.f.bdeSize = 0;
 	bpl->tus.w = le32_to_cpu(bpl->tus.w);
@@ -660,6 +670,17 @@
 		cmpl = lpfc_cmpl_ct_cmd_rft_id;
 		break;
 
+	case SLI_CTNS_RFF_ID:
+		CtReq->CommandResponse.bits.CmdRsp =
+			be16_to_cpu(SLI_CTNS_RFF_ID);
+		CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
+		CtReq->un.rff.feature_res = 0;
+		CtReq->un.rff.feature_tgt = 0;
+		CtReq->un.rff.type_code = FC_FCP_DATA;
+		CtReq->un.rff.feature_init = 1;
+		cmpl = lpfc_cmpl_ct_cmd_rff_id;
+		break;
+
 	case SLI_CTNS_RNN_ID:
 		CtReq->CommandResponse.bits.CmdRsp =
 		    be16_to_cpu(SLI_CTNS_RNN_ID);
@@ -934,7 +955,8 @@
 			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
 			ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
 			sprintf(ae->un.OsNameVersion, "%s %s %s",
-				init_utsname()->sysname, init_utsname()->release,
+				init_utsname()->sysname,
+				init_utsname()->release,
 				init_utsname()->version);
 			len = strlen(ae->un.OsNameVersion);
 			len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 71864cd..a5f33a0 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -243,6 +243,7 @@
 		struct serv_parm *sp, IOCB_t *irsp)
 {
 	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *mp;
 	int rc;
 
 	spin_lock_irq(phba->host->host_lock);
@@ -307,10 +308,14 @@
 
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
 	if (rc == MBX_NOT_FINISHED)
-		goto fail_free_mbox;
+		goto fail_issue_reg_login;
 
 	return 0;
 
+ fail_issue_reg_login:
+	mp = (struct lpfc_dmabuf *) mbox->context1;
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
  fail_free_mbox:
 	mempool_free(mbox, phba->mbox_mem_pool);
  fail:
@@ -657,6 +662,12 @@
 	uint8_t name[sizeof (struct lpfc_name)];
 	uint32_t rc;
 
+	/* Fabric nodes can have the same WWPN so we don't bother searching
+	 * by WWPN.  Just return the ndlp that was given to us.
+	 */
+	if (ndlp->nlp_type & NLP_FABRIC)
+		return ndlp;
+
 	lp = (uint32_t *) prsp->virt;
 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
 	memset(name, 0, sizeof (struct lpfc_name));
@@ -1122,7 +1133,7 @@
 						mempool_free(mbox,
 						     phba->mbox_mem_pool);
 						lpfc_disc_flush_list(phba);
-						psli->ring[(psli->ip_ring)].
+						psli->ring[(psli->extra_ring)].
 						    flag &=
 						    ~LPFC_STOP_IOCB_EVENT;
 						psli->ring[(psli->fcp_ring)].
@@ -1851,6 +1862,7 @@
 	IOCB_t *irsp;
 	struct lpfc_nodelist *ndlp;
 	LPFC_MBOXQ_t *mbox = NULL;
+	struct lpfc_dmabuf *mp;
 
 	irsp = &rspiocb->iocb;
 
@@ -1862,6 +1874,11 @@
 	/* Check to see if link went down during discovery */
 	if ((lpfc_els_chk_latt(phba)) || !ndlp) {
 		if (mbox) {
+			mp = (struct lpfc_dmabuf *) mbox->context1;
+			if (mp) {
+				lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
 			mempool_free( mbox, phba->mbox_mem_pool);
 		}
 		goto out;
@@ -1893,9 +1910,7 @@
 			}
 			/* NOTE: we should have messages for unsuccessful
 			   reglogin */
-			mempool_free( mbox, phba->mbox_mem_pool);
 		} else {
-			mempool_free( mbox, phba->mbox_mem_pool);
 			/* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
 			if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
 			      ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
@@ -1907,6 +1922,12 @@
 				}
 			}
 		}
+		mp = (struct lpfc_dmabuf *) mbox->context1;
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		mempool_free(mbox, phba->mbox_mem_pool);
 	}
 out:
 	if (ndlp) {
@@ -2644,6 +2665,7 @@
 			ndlp->nlp_type |= NLP_FABRIC;
 			ndlp->nlp_prev_state = ndlp->nlp_state;
 			ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+			lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
 			lpfc_issue_els_plogi(phba, NameServer_DID, 0);
 			/* Wait for NameServer login cmpl before we can
 			   continue */
@@ -3039,7 +3061,7 @@
 	/* FARP-REQ received from DID <did> */
 	lpfc_printf_log(phba,
 			 KERN_INFO,
-			 LOG_IP,
+			 LOG_ELS,
 			 "%d:0601 FARP-REQ received from DID x%x\n",
 			 phba->brd_no, did);
 
@@ -3101,7 +3123,7 @@
 	/* FARP-RSP received from DID <did> */
 	lpfc_printf_log(phba,
 			 KERN_INFO,
-			 LOG_IP,
+			 LOG_ELS,
 			 "%d:0600 FARP-RSP received from DID x%x\n",
 			 phba->brd_no, did);
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 19c79a0..c39564e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,7 +525,7 @@
 	psli = &phba->sli;
 	mb = &pmb->mb;
 	/* Since we don't do discovery right now, turn these off here */
-	psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+	psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
 	psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
 	psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
 
@@ -641,7 +641,7 @@
 	if (rc == MBX_NOT_FINISHED) {
 		mempool_free(pmb, phba->mbox_mem_pool);
 		lpfc_disc_flush_list(phba);
-		psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+		psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
 		psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
 		psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
 		phba->hba_state = LPFC_HBA_READY;
@@ -672,6 +672,8 @@
 
 	memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
 	       sizeof (struct serv_parm));
+	if (phba->cfg_soft_wwnn)
+		u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
 	if (phba->cfg_soft_wwpn)
 		u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
 	memcpy((uint8_t *) & phba->fc_nodename,
@@ -696,7 +698,7 @@
 		    == MBX_NOT_FINISHED) {
 			mempool_free( pmb, phba->mbox_mem_pool);
 			lpfc_disc_flush_list(phba);
-			psli->ring[(psli->ip_ring)].flag &=
+			psli->ring[(psli->extra_ring)].flag &=
 			    ~LPFC_STOP_IOCB_EVENT;
 			psli->ring[(psli->fcp_ring)].flag &=
 			    ~LPFC_STOP_IOCB_EVENT;
@@ -715,6 +717,9 @@
 {
 	int i;
 	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+	struct lpfc_dmabuf *mp;
+	int rc;
+
 	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 
@@ -793,16 +798,27 @@
 	if (sparam_mbox) {
 		lpfc_read_sparam(phba, sparam_mbox);
 		sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
-		lpfc_sli_issue_mbox(phba, sparam_mbox,
+		rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
 						(MBX_NOWAIT | MBX_STOP_IOCB));
+		if (rc == MBX_NOT_FINISHED) {
+			mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+			mempool_free(sparam_mbox, phba->mbox_mem_pool);
+			if (cfglink_mbox)
+				mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+			return;
+		}
 	}
 
 	if (cfglink_mbox) {
 		phba->hba_state = LPFC_LOCAL_CFG_LINK;
 		lpfc_config_link(phba, cfglink_mbox);
 		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
-		lpfc_sli_issue_mbox(phba, cfglink_mbox,
+		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
 						(MBX_NOWAIT | MBX_STOP_IOCB));
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(cfglink_mbox, phba->mbox_mem_pool);
 	}
 }
 
@@ -1067,6 +1083,7 @@
 		lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
 		lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
 		lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
+		lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
 	}
 
 	phba->fc_ns_retry = 0;
@@ -1423,7 +1440,7 @@
 			if (iocb->context1 == (uint8_t *) ndlp)
 				return 1;
 		}
-	} else if (pring->ringno == psli->ip_ring) {
+	} else if (pring->ringno == psli->extra_ring) {
 
 	} else if (pring->ringno == psli->fcp_ring) {
 		/* Skip match check if waiting to relogin to FCP target */
@@ -1680,21 +1697,38 @@
 struct lpfc_nodelist *
 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
 {
-	struct lpfc_nodelist *ndlp, *next_ndlp;
+	struct lpfc_nodelist *ndlp;
+	struct list_head *lists[]={&phba->fc_nlpunmap_list,
+				   &phba->fc_nlpmap_list,
+				   &phba->fc_plogi_list,
+				   &phba->fc_adisc_list,
+				   &phba->fc_reglogin_list,
+				   &phba->fc_prli_list,
+				   &phba->fc_npr_list,
+				   &phba->fc_unused_list};
+	uint32_t search[]={NLP_SEARCH_UNMAPPED,
+			   NLP_SEARCH_MAPPED,
+			   NLP_SEARCH_PLOGI,
+			   NLP_SEARCH_ADISC,
+			   NLP_SEARCH_REGLOGIN,
+			   NLP_SEARCH_PRLI,
+			   NLP_SEARCH_NPR,
+			   NLP_SEARCH_UNUSED};
+	int i;
 	uint32_t data1;
 
 	spin_lock_irq(phba->host->host_lock);
-	if (order & NLP_SEARCH_UNMAPPED) {
-		list_for_each_entry_safe(ndlp, next_ndlp,
-					 &phba->fc_nlpunmap_list, nlp_listp) {
+	for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
+		if (!(order & search[i]))
+			continue;
+		list_for_each_entry(ndlp, lists[i], nlp_listp) {
 			if (lpfc_matchdid(phba, ndlp, did)) {
 				data1 = (((uint32_t) ndlp->nlp_state << 24) |
 					 ((uint32_t) ndlp->nlp_xri << 16) |
 					 ((uint32_t) ndlp->nlp_type << 8) |
 					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* FIND node DID unmapped */
 				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0929 FIND node DID unmapped"
+						"%d:0929 FIND node DID "
 						" Data: x%p x%x x%x x%x\n",
 						phba->brd_no,
 						ndlp, ndlp->nlp_DID,
@@ -1704,177 +1738,12 @@
 			}
 		}
 	}
-
-	if (order & NLP_SEARCH_MAPPED) {
-		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
-					nlp_listp) {
-			if (lpfc_matchdid(phba, ndlp, did)) {
-
-				data1 = (((uint32_t) ndlp->nlp_state << 24) |
-					 ((uint32_t) ndlp->nlp_xri << 16) |
-					 ((uint32_t) ndlp->nlp_type << 8) |
-					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* FIND node DID mapped */
-				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0930 FIND node DID mapped "
-						"Data: x%p x%x x%x x%x\n",
-						phba->brd_no,
-						ndlp, ndlp->nlp_DID,
-						ndlp->nlp_flag, data1);
-				spin_unlock_irq(phba->host->host_lock);
-				return ndlp;
-			}
-		}
-	}
-
-	if (order & NLP_SEARCH_PLOGI) {
-		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
-					nlp_listp) {
-			if (lpfc_matchdid(phba, ndlp, did)) {
-
-				data1 = (((uint32_t) ndlp->nlp_state << 24) |
-					 ((uint32_t) ndlp->nlp_xri << 16) |
-					 ((uint32_t) ndlp->nlp_type << 8) |
-					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* LOG change to PLOGI */
-				/* FIND node DID plogi */
-				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0908 FIND node DID plogi "
-						"Data: x%p x%x x%x x%x\n",
-						phba->brd_no,
-						ndlp, ndlp->nlp_DID,
-						ndlp->nlp_flag, data1);
-				spin_unlock_irq(phba->host->host_lock);
-				return ndlp;
-			}
-		}
-	}
-
-	if (order & NLP_SEARCH_ADISC) {
-		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-					nlp_listp) {
-			if (lpfc_matchdid(phba, ndlp, did)) {
-
-				data1 = (((uint32_t) ndlp->nlp_state << 24) |
-					 ((uint32_t) ndlp->nlp_xri << 16) |
-					 ((uint32_t) ndlp->nlp_type << 8) |
-					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* LOG change to ADISC */
-				/* FIND node DID adisc */
-				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0931 FIND node DID adisc "
-						"Data: x%p x%x x%x x%x\n",
-						phba->brd_no,
-						ndlp, ndlp->nlp_DID,
-						ndlp->nlp_flag, data1);
-				spin_unlock_irq(phba->host->host_lock);
-				return ndlp;
-			}
-		}
-	}
-
-	if (order & NLP_SEARCH_REGLOGIN) {
-		list_for_each_entry_safe(ndlp, next_ndlp,
-					 &phba->fc_reglogin_list, nlp_listp) {
-			if (lpfc_matchdid(phba, ndlp, did)) {
-
-				data1 = (((uint32_t) ndlp->nlp_state << 24) |
-					 ((uint32_t) ndlp->nlp_xri << 16) |
-					 ((uint32_t) ndlp->nlp_type << 8) |
-					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* LOG change to REGLOGIN */
-				/* FIND node DID reglogin */
-				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0901 FIND node DID reglogin"
-						" Data: x%p x%x x%x x%x\n",
-						phba->brd_no,
-						ndlp, ndlp->nlp_DID,
-						ndlp->nlp_flag, data1);
-				spin_unlock_irq(phba->host->host_lock);
-				return ndlp;
-			}
-		}
-	}
-
-	if (order & NLP_SEARCH_PRLI) {
-		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
-					nlp_listp) {
-			if (lpfc_matchdid(phba, ndlp, did)) {
-
-				data1 = (((uint32_t) ndlp->nlp_state << 24) |
-					 ((uint32_t) ndlp->nlp_xri << 16) |
-					 ((uint32_t) ndlp->nlp_type << 8) |
-					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* LOG change to PRLI */
-				/* FIND node DID prli */
-				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0902 FIND node DID prli "
-						"Data: x%p x%x x%x x%x\n",
-						phba->brd_no,
-						ndlp, ndlp->nlp_DID,
-						ndlp->nlp_flag, data1);
-				spin_unlock_irq(phba->host->host_lock);
-				return ndlp;
-			}
-		}
-	}
-
-	if (order & NLP_SEARCH_NPR) {
-		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-					nlp_listp) {
-			if (lpfc_matchdid(phba, ndlp, did)) {
-
-				data1 = (((uint32_t) ndlp->nlp_state << 24) |
-					 ((uint32_t) ndlp->nlp_xri << 16) |
-					 ((uint32_t) ndlp->nlp_type << 8) |
-					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* LOG change to NPR */
-				/* FIND node DID npr */
-				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0903 FIND node DID npr "
-						"Data: x%p x%x x%x x%x\n",
-						phba->brd_no,
-						ndlp, ndlp->nlp_DID,
-						ndlp->nlp_flag, data1);
-				spin_unlock_irq(phba->host->host_lock);
-				return ndlp;
-			}
-		}
-	}
-
-	if (order & NLP_SEARCH_UNUSED) {
-		list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-					nlp_listp) {
-			if (lpfc_matchdid(phba, ndlp, did)) {
-
-				data1 = (((uint32_t) ndlp->nlp_state << 24) |
-					 ((uint32_t) ndlp->nlp_xri << 16) |
-					 ((uint32_t) ndlp->nlp_type << 8) |
-					 ((uint32_t) ndlp->nlp_rpi & 0xff));
-				/* LOG change to UNUSED */
-				/* FIND node DID unused */
-				lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-						"%d:0905 FIND node DID unused "
-						"Data: x%p x%x x%x x%x\n",
-						phba->brd_no,
-						ndlp, ndlp->nlp_DID,
-						ndlp->nlp_flag, data1);
-				spin_unlock_irq(phba->host->host_lock);
-				return ndlp;
-			}
-		}
-	}
-
 	spin_unlock_irq(phba->host->host_lock);
 
 	/* FIND node did <did> NOT FOUND */
-	lpfc_printf_log(phba,
-			KERN_INFO,
-			LOG_NODE,
+	lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
 			"%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
 			phba->brd_no, did, order);
-
-	/* no match found */
 	return NULL;
 }
 
@@ -2036,7 +1905,7 @@
 			if (rc == MBX_NOT_FINISHED) {
 				mempool_free( mbox, phba->mbox_mem_pool);
 				lpfc_disc_flush_list(phba);
-				psli->ring[(psli->ip_ring)].flag &=
+				psli->ring[(psli->extra_ring)].flag &=
 					~LPFC_STOP_IOCB_EVENT;
 				psli->ring[(psli->fcp_ring)].flag &=
 					~LPFC_STOP_IOCB_EVENT;
@@ -2415,7 +2284,7 @@
 
 	if (clrlaerr) {
 		lpfc_disc_flush_list(phba);
-		psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+		psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
 		psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
 		psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
 		phba->hba_state = LPFC_HBA_READY;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index eedf988..f79cb61 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -42,14 +42,14 @@
 #define FCELSSIZE             1024	/* maximum ELS transfer size */
 
 #define LPFC_FCP_RING            0	/* ring 0 for FCP initiator commands */
-#define LPFC_IP_RING             1	/* ring 1 for IP commands */
+#define LPFC_EXTRA_RING          1	/* ring 1 for other protocols */
 #define LPFC_ELS_RING            2	/* ring 2 for ELS commands */
 #define LPFC_FCP_NEXT_RING       3
 
 #define SLI2_IOCB_CMD_R0_ENTRIES    172	/* SLI-2 FCP command ring entries */
 #define SLI2_IOCB_RSP_R0_ENTRIES    134	/* SLI-2 FCP response ring entries */
-#define SLI2_IOCB_CMD_R1_ENTRIES      4	/* SLI-2 IP command ring entries */
-#define SLI2_IOCB_RSP_R1_ENTRIES      4	/* SLI-2 IP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES      4	/* SLI-2 extra command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES      4	/* SLI-2 extra response ring entries */
 #define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36	/* SLI-2 extra FCP cmd ring entries */
 #define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52	/* SLI-2 extra FCP rsp ring entries */
 #define SLI2_IOCB_CMD_R2_ENTRIES     20	/* SLI-2 ELS command ring entries */
@@ -121,6 +121,20 @@
 
 			uint32_t rsvd[7];
 		} rft;
+		struct rff {
+			uint32_t PortId;
+			uint8_t reserved[2];
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint8_t feature_res:6;
+			uint8_t feature_init:1;
+			uint8_t feature_tgt:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+			uint8_t feature_tgt:1;
+			uint8_t feature_init:1;
+			uint8_t feature_res:6;
+#endif
+			uint8_t type_code;     /* type=8 for FCP */
+		} rff;
 		struct rnn {
 			uint32_t PortId;	/* For RNN_ID requests */
 			uint8_t wwnn[8];
@@ -136,6 +150,7 @@
 #define  SLI_CT_REVISION        1
 #define  GID_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 260)
 #define  RFT_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 228)
+#define  RFF_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 235)
 #define  RNN_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 252)
 #define  RSNN_REQUEST_SZ        (sizeof(struct lpfc_sli_ct_request))
 
@@ -225,6 +240,7 @@
 #define  SLI_CTNS_RNN_ID      0x0213
 #define  SLI_CTNS_RCS_ID      0x0214
 #define  SLI_CTNS_RFT_ID      0x0217
+#define  SLI_CTNS_RFF_ID      0x021F
 #define  SLI_CTNS_RSPN_ID     0x0218
 #define  SLI_CTNS_RPT_ID      0x021A
 #define  SLI_CTNS_RIP_NN      0x0235
@@ -1089,12 +1105,6 @@
 #define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
 
-#define PCI_SUBSYSTEM_ID_LP11000S      0xfc11
-#define PCI_SUBSYSTEM_ID_LP11002S      0xfc12
-#define PCI_SUBSYSTEM_ID_LPE11000S     0xfc21
-#define PCI_SUBSYSTEM_ID_LPE11002S     0xfc22
-#define PCI_SUBSYSTEM_ID_LPE11010S     0xfc2A
-
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
 #define SUPERFLY_JEDEC_ID           0x0020
@@ -1284,6 +1294,10 @@
 #define CMD_FCP_IREAD_CX        0x1B
 #define CMD_FCP_ICMND_CR        0x1C
 #define CMD_FCP_ICMND_CX        0x1D
+#define CMD_FCP_TSEND_CX        0x1F
+#define CMD_FCP_TRECEIVE_CX     0x21
+#define CMD_FCP_TRSP_CX	        0x23
+#define CMD_FCP_AUTO_TRSP_CX    0x29
 
 #define CMD_ADAPTER_MSG         0x20
 #define CMD_ADAPTER_DUMP        0x22
@@ -1310,6 +1324,9 @@
 #define CMD_FCP_IREAD64_CX      0x9B
 #define CMD_FCP_ICMND64_CR      0x9C
 #define CMD_FCP_ICMND64_CX      0x9D
+#define CMD_FCP_TSEND64_CX      0x9F
+#define CMD_FCP_TRECEIVE64_CX   0xA1
+#define CMD_FCP_TRSP64_CX       0xA3
 
 #define CMD_GEN_REQUEST64_CR    0xC2
 #define CMD_GEN_REQUEST64_CX    0xC3
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a5723ad..afca45c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -268,6 +268,8 @@
 	kfree(mp);
 	pmb->context1 = NULL;
 
+	if (phba->cfg_soft_wwnn)
+		u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
 	if (phba->cfg_soft_wwpn)
 		u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
 	memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
@@ -349,8 +351,8 @@
 	phba->hba_state = LPFC_LINK_DOWN;
 
 	/* Only process IOCBs on ring 0 till hba_state is READY */
-	if (psli->ring[psli->ip_ring].cmdringaddr)
-		psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
+	if (psli->ring[psli->extra_ring].cmdringaddr)
+		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
 	if (psli->ring[psli->fcp_ring].cmdringaddr)
 		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
 	if (psli->ring[psli->next_ring].cmdringaddr)
@@ -517,7 +519,8 @@
 	struct lpfc_sli_ring  *pring;
 	uint32_t event_data;
 
-	if (phba->work_hs & HS_FFER6) {
+	if (phba->work_hs & HS_FFER6 ||
+	    phba->work_hs & HS_FFER5) {
 		/* Re-establishing Link */
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
 				"%d:1301 Re-establishing Link "
@@ -611,7 +614,7 @@
 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
 	rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
 	if (rc == MBX_NOT_FINISHED)
-		goto lpfc_handle_latt_free_mp;
+		goto lpfc_handle_latt_free_mbuf;
 
 	/* Clear Link Attention in HA REG */
 	spin_lock_irq(phba->host->host_lock);
@@ -621,6 +624,8 @@
 
 	return;
 
+lpfc_handle_latt_free_mbuf:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 lpfc_handle_latt_free_mp:
 	kfree(mp);
 lpfc_handle_latt_free_pmb:
@@ -802,19 +807,13 @@
 {
 	lpfc_vpd_t *vp;
 	uint16_t dev_id = phba->pcidev->device;
-	uint16_t dev_subid = phba->pcidev->subsystem_device;
-	uint8_t hdrtype;
 	int max_speed;
-	char * ports;
 	struct {
 		char * name;
 		int    max_speed;
-		char * ports;
 		char * bus;
-	} m = {"<Unknown>", 0, "", ""};
+	} m = {"<Unknown>", 0, ""};
 
-	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
-	ports = (hdrtype == 0x80) ? "2-port " : "";
 	if (mdp && mdp[0] != '\0'
 		&& descp && descp[0] != '\0')
 		return;
@@ -834,130 +833,93 @@
 
 	switch (dev_id) {
 	case PCI_DEVICE_ID_FIREFLY:
-		m = (typeof(m)){"LP6000", max_speed, "", "PCI"};
+		m = (typeof(m)){"LP6000", max_speed, "PCI"};
 		break;
 	case PCI_DEVICE_ID_SUPERFLY:
 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
-			m = (typeof(m)){"LP7000", max_speed, "", "PCI"};
+			m = (typeof(m)){"LP7000", max_speed,  "PCI"};
 		else
-			m = (typeof(m)){"LP7000E", max_speed, "", "PCI"};
+			m = (typeof(m)){"LP7000E", max_speed, "PCI"};
 		break;
 	case PCI_DEVICE_ID_DRAGONFLY:
-		m = (typeof(m)){"LP8000", max_speed, "", "PCI"};
+		m = (typeof(m)){"LP8000", max_speed, "PCI"};
 		break;
 	case PCI_DEVICE_ID_CENTAUR:
 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
-			m = (typeof(m)){"LP9002", max_speed, "", "PCI"};
+			m = (typeof(m)){"LP9002", max_speed, "PCI"};
 		else
-			m = (typeof(m)){"LP9000", max_speed, "", "PCI"};
+			m = (typeof(m)){"LP9000", max_speed, "PCI"};
 		break;
 	case PCI_DEVICE_ID_RFLY:
-		m = (typeof(m)){"LP952", max_speed, "", "PCI"};
+		m = (typeof(m)){"LP952", max_speed, "PCI"};
 		break;
 	case PCI_DEVICE_ID_PEGASUS:
-		m = (typeof(m)){"LP9802", max_speed, "", "PCI-X"};
+		m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
 		break;
 	case PCI_DEVICE_ID_THOR:
-		if (hdrtype == 0x80)
-			m = (typeof(m)){"LP10000DC",
-					max_speed, ports, "PCI-X"};
-		else
-			m = (typeof(m)){"LP10000",
-					max_speed, ports, "PCI-X"};
+		m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
 		break;
 	case PCI_DEVICE_ID_VIPER:
-		m = (typeof(m)){"LPX1000", max_speed, "", "PCI-X"};
+		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
 		break;
 	case PCI_DEVICE_ID_PFLY:
-		m = (typeof(m)){"LP982", max_speed, "", "PCI-X"};
+		m = (typeof(m)){"LP982", max_speed, "PCI-X"};
 		break;
 	case PCI_DEVICE_ID_TFLY:
-		if (hdrtype == 0x80)
-			m = (typeof(m)){"LP1050DC", max_speed, ports, "PCI-X"};
-		else
-			m = (typeof(m)){"LP1050", max_speed, ports, "PCI-X"};
+		m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
 		break;
 	case PCI_DEVICE_ID_HELIOS:
-		if (hdrtype == 0x80)
-			m = (typeof(m)){"LP11002", max_speed, ports, "PCI-X2"};
-		else
-			m = (typeof(m)){"LP11000", max_speed, ports, "PCI-X2"};
+		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
 		break;
 	case PCI_DEVICE_ID_HELIOS_SCSP:
-		m = (typeof(m)){"LP11000-SP", max_speed, ports, "PCI-X2"};
+		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
 		break;
 	case PCI_DEVICE_ID_HELIOS_DCSP:
-		m = (typeof(m)){"LP11002-SP", max_speed, ports, "PCI-X2"};
+		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
 		break;
 	case PCI_DEVICE_ID_NEPTUNE:
-		if (hdrtype == 0x80)
-			m = (typeof(m)){"LPe1002", max_speed, ports, "PCIe"};
-		else
-			m = (typeof(m)){"LPe1000", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_NEPTUNE_SCSP:
-		m = (typeof(m)){"LPe1000-SP", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_NEPTUNE_DCSP:
-		m = (typeof(m)){"LPe1002-SP", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_BMID:
-		m = (typeof(m)){"LP1150", max_speed, ports, "PCI-X2"};
+		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
 		break;
 	case PCI_DEVICE_ID_BSMB:
-		m = (typeof(m)){"LP111", max_speed, ports, "PCI-X2"};
+		m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
 		break;
 	case PCI_DEVICE_ID_ZEPHYR:
-		if (hdrtype == 0x80)
-			m = (typeof(m)){"LPe11002", max_speed, ports, "PCIe"};
-		else
-			m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_ZEPHYR_SCSP:
-		m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
-		m = (typeof(m)){"LPe11002-SP", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_ZMID:
-		m = (typeof(m)){"LPe1150", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_ZSMB:
-		m = (typeof(m)){"LPe111", max_speed, ports, "PCIe"};
+		m = (typeof(m)){"LPe111", max_speed, "PCIe"};
 		break;
 	case PCI_DEVICE_ID_LP101:
-		m = (typeof(m)){"LP101", max_speed, ports, "PCI-X"};
+		m = (typeof(m)){"LP101", max_speed, "PCI-X"};
 		break;
 	case PCI_DEVICE_ID_LP10000S:
-		m = (typeof(m)){"LP10000-S", max_speed, ports, "PCI"};
+		m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
 		break;
 	case PCI_DEVICE_ID_LP11000S:
+		m = (typeof(m)){"LP11000-S", max_speed,
+			"PCI-X2"};
+		break;
 	case PCI_DEVICE_ID_LPE11000S:
-		switch (dev_subid) {
-		case PCI_SUBSYSTEM_ID_LP11000S:
-			m = (typeof(m)){"LP11000-S", max_speed,
-					ports, "PCI-X2"};
-			break;
-		case PCI_SUBSYSTEM_ID_LP11002S:
-			m = (typeof(m)){"LP11002-S", max_speed,
-					ports, "PCI-X2"};
-			break;
-		case PCI_SUBSYSTEM_ID_LPE11000S:
-			m = (typeof(m)){"LPe11000-S", max_speed,
-					ports, "PCIe"};
-			break;
-		case PCI_SUBSYSTEM_ID_LPE11002S:
-			m = (typeof(m)){"LPe11002-S", max_speed,
-					ports, "PCIe"};
-			break;
-		case PCI_SUBSYSTEM_ID_LPE11010S:
-			m = (typeof(m)){"LPe11010-S", max_speed,
-					"10-port ", "PCIe"};
-			break;
-		default:
-			m = (typeof(m)){ NULL };
-			break;
-		}
+		m = (typeof(m)){"LPe11000-S", max_speed,
+			"PCIe"};
 		break;
 	default:
 		m = (typeof(m)){ NULL };
@@ -968,8 +930,8 @@
 		snprintf(mdp, 79,"%s", m.name);
 	if (descp && descp[0] == '\0')
 		snprintf(descp, 255,
-			 "Emulex %s %dGb %s%s Fibre Channel Adapter",
-			 m.name, m.max_speed, m.ports, m.bus);
+			 "Emulex %s %dGb %s Fibre Channel Adapter",
+			 m.name, m.max_speed, m.bus);
 }
 
 /**************************************************/
@@ -1651,6 +1613,14 @@
 	if (error)
 		goto out_remove_host;
 
+	if (phba->cfg_use_msi) {
+		error = pci_enable_msi(phba->pcidev);
+		if (error)
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
+					"Enable MSI failed, continuing with "
+					"IRQ\n", phba->brd_no);
+	}
+
 	error =	request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
 							LPFC_DRIVER_NAME, phba);
 	if (error) {
@@ -1730,6 +1700,7 @@
 	lpfc_stop_timer(phba);
 	phba->work_hba_events = 0;
 	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
 out_free_sysfs_attr:
 	lpfc_free_sysfs_attr(phba);
 out_remove_host:
@@ -1796,6 +1767,7 @@
 
 	/* Release the irq reservation */
 	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
 
 	lpfc_cleanup(phba, 0);
 	lpfc_stop_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 62c8ca8..438cbcd 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -28,7 +28,7 @@
 #define LOG_NODE                      0x80	/* Node table events */
 #define LOG_MISC                      0x400	/* Miscellaneous events */
 #define LOG_SLI                       0x800	/* SLI events */
-#define LOG_CHK_COND                  0x1000	/* FCP Check condition flag */
+#define LOG_FCP_ERROR                 0x1000	/* log errors, not underruns */
 #define LOG_LIBDFC                    0x2000	/* Libdfc events */
 #define LOG_ALL_MSG                   0xffff	/* LOG all messages */
 
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d5f4150..0c7e731 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -739,7 +739,7 @@
 			    uint32_t evt)
 {
 	struct lpfc_iocbq *cmdiocb, *rspiocb;
-	struct lpfc_dmabuf *pcmd, *prsp;
+	struct lpfc_dmabuf *pcmd, *prsp, *mp;
 	uint32_t *lp;
 	IOCB_t *irsp;
 	struct serv_parm *sp;
@@ -829,6 +829,9 @@
 				      NLP_REGLOGIN_LIST);
 			return ndlp->nlp_state;
 		}
+		mp = (struct lpfc_dmabuf *)mbox->context1;
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
 		mempool_free(mbox, phba->mbox_mem_pool);
 	} else {
 		mempool_free(mbox, phba->mbox_mem_pool);
@@ -1620,8 +1623,8 @@
 	 * or discovery in progress for this node. Starting discovery
 	 * here will affect the counting of discovery threads.
 	 */
-	if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
-		(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+		!(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
 			ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 97ae98d..c3e68e0 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -297,8 +297,10 @@
 	uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
 	uint32_t resp_info = fcprsp->rspStatus2;
 	uint32_t scsi_status = fcprsp->rspStatus3;
+	uint32_t *lp;
 	uint32_t host_status = DID_OK;
 	uint32_t rsplen = 0;
+	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
 
 	/*
 	 *  If this is a task management command, there is no
@@ -310,10 +312,25 @@
 		goto out;
 	}
 
-	lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-			"%d:0730 FCP command failed: RSP "
-			"Data: x%x x%x x%x x%x x%x x%x\n",
-			phba->brd_no, resp_info, scsi_status,
+	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
+		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
+		if (snslen > SCSI_SENSE_BUFFERSIZE)
+			snslen = SCSI_SENSE_BUFFERSIZE;
+
+		if (resp_info & RSP_LEN_VALID)
+		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
+		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
+	}
+	lp = (uint32_t *)cmnd->sense_buffer;
+
+	if (!scsi_status && (resp_info & RESID_UNDER))
+		logit = LOG_FCP;
+
+	lpfc_printf_log(phba, KERN_WARNING, logit,
+			"%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
+			"Data: x%x x%x x%x x%x x%x\n",
+			phba->brd_no, cmnd->cmnd[0], scsi_status,
+			be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
 			be32_to_cpu(fcprsp->rspResId),
 			be32_to_cpu(fcprsp->rspSnsLen),
 			be32_to_cpu(fcprsp->rspRspLen),
@@ -328,14 +345,6 @@
 		}
 	}
 
-	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
-		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
-		if (snslen > SCSI_SENSE_BUFFERSIZE)
-			snslen = SCSI_SENSE_BUFFERSIZE;
-
-		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
-	}
-
 	cmnd->resid = 0;
 	if (resp_info & RESID_UNDER) {
 		cmnd->resid = be32_to_cpu(fcprsp->rspResId);
@@ -378,7 +387,7 @@
 	 */
 	} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
 			(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
 			"%d:0734 FCP Read Check Error Data: "
 			"x%x x%x x%x x%x\n", phba->brd_no,
 			be32_to_cpu(fcpcmd->fcpDl),
@@ -670,6 +679,9 @@
 	struct lpfc_iocbq *iocbqrsp;
 	int ret;
 
+	if (!rdata->pnode)
+		return FAILED;
+
 	lpfc_cmd->rdata = rdata;
 	ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
 					   FCP_TARGET_RESET);
@@ -976,20 +988,34 @@
 
 	lpfc_block_error_handler(cmnd);
 	spin_lock_irq(shost->host_lock);
+	loopcnt = 0;
 	/*
 	 * If target is not in a MAPPED state, delay the reset until
 	 * target is rediscovered or devloss timeout expires.
 	 */
 	while ( 1 ) {
 		if (!pnode)
-			break;
+			return FAILED;
 
 		if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
 			spin_unlock_irq(phba->host->host_lock);
 			schedule_timeout_uninterruptible(msecs_to_jiffies(500));
 			spin_lock_irq(phba->host->host_lock);
+			loopcnt++;
+			rdata = cmnd->device->hostdata;
+			if (!rdata ||
+				(loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+		   			"%d:0721 LUN Reset rport failure:"
+					" cnt x%x rdata x%p\n",
+		   			phba->brd_no, loopcnt, rdata);
+				goto out;
+			}
+			pnode = rdata->pnode;
+			if (!pnode)
+				return FAILED;
 		}
-		if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
+		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
 			break;
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 582f5ea..a4128e1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -117,6 +117,10 @@
 	case CMD_FCP_IREAD_CX:
 	case CMD_FCP_ICMND_CR:
 	case CMD_FCP_ICMND_CX:
+	case CMD_FCP_TSEND_CX:
+	case CMD_FCP_TRSP_CX:
+	case CMD_FCP_TRECEIVE_CX:
+	case CMD_FCP_AUTO_TRSP_CX:
 	case CMD_ADAPTER_MSG:
 	case CMD_ADAPTER_DUMP:
 	case CMD_XMIT_SEQUENCE64_CR:
@@ -131,6 +135,9 @@
 	case CMD_FCP_IREAD64_CX:
 	case CMD_FCP_ICMND64_CR:
 	case CMD_FCP_ICMND64_CX:
+	case CMD_FCP_TSEND64_CX:
+	case CMD_FCP_TRSP64_CX:
+	case CMD_FCP_TRECEIVE64_CX:
 	case CMD_GEN_REQUEST64_CR:
 	case CMD_GEN_REQUEST64_CX:
 	case CMD_XMIT_ELS_RSP64_CX:
@@ -1098,6 +1105,7 @@
 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
 				      (uint32_t *) &rspiocbq.iocb,
 				      sizeof (IOCB_t));
+		INIT_LIST_HEAD(&(rspiocbq.list));
 		irsp = &rspiocbq.iocb;
 
 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
@@ -1149,6 +1157,11 @@
 				}
 			}
 			break;
+		case LPFC_UNSOL_IOCB:
+			spin_unlock_irqrestore(phba->host->host_lock, iflag);
+			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
+			spin_lock_irqsave(phba->host->host_lock, iflag);
+			break;
 		default:
 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
 				char adaptermsg[LPFC_MAX_ADPTMSG];
@@ -2472,13 +2485,17 @@
 	psli = &phba->sli;
 
 	/* Adjust cmd/rsp ring iocb entries more evenly */
+
+	/* Take some away from the FCP ring */
 	pring = &psli->ring[psli->fcp_ring];
 	pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
 	pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
 	pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
 	pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
 
-	pring = &psli->ring[1];
+	/* and give them to the extra ring */
+	pring = &psli->ring[psli->extra_ring];
+
 	pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
 	pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
 	pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
@@ -2488,8 +2505,8 @@
 	pring->iotag_max = 4096;
 	pring->num_mask = 1;
 	pring->prt[0].profile = 0;      /* Mask 0 */
-	pring->prt[0].rctl = FC_UNSOL_DATA;
-	pring->prt[0].type = 5;
+	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
+	pring->prt[0].type = phba->cfg_multi_ring_type;
 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
 	return 0;
 }
@@ -2505,7 +2522,7 @@
 	psli->sli_flag = 0;
 	psli->fcp_ring = LPFC_FCP_RING;
 	psli->next_ring = LPFC_FCP_NEXT_RING;
-	psli->ip_ring = LPFC_IP_RING;
+	psli->extra_ring = LPFC_EXTRA_RING;
 
 	psli->iocbq_lookup = NULL;
 	psli->iocbq_lookup_len = 0;
@@ -2528,7 +2545,7 @@
 			pring->fast_iotag = pring->iotag_max;
 			pring->num_mask = 0;
 			break;
-		case LPFC_IP_RING:	/* ring 1 - IP */
+		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
 			/* numCiocb and numRiocb are used in config_port */
 			pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
 			pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
@@ -3238,6 +3255,21 @@
 		lpfc_sli_handle_fast_ring_event(phba,
 						&phba->sli.ring[LPFC_FCP_RING],
 						status);
+
+	if (phba->cfg_multi_ring_support == 2) {
+		/*
+		 * Process all events on extra ring.  Take the optimized path
+		 * for extra ring IO.  Any other IO is slow path and is handled
+		 * by the worker thread.
+		 */
+		status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
+		status >>= (4*LPFC_EXTRA_RING);
+		if (status & HA_RXATT) {
+			lpfc_sli_handle_fast_ring_event(phba,
+					&phba->sli.ring[LPFC_EXTRA_RING],
+					status);
+		}
+	}
 	return IRQ_HANDLED;
 
 } /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index e26de6809..a435499 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -198,7 +198,7 @@
 	int fcp_ring;		/* ring used for FCP initiator commands */
 	int next_ring;
 
-	int ip_ring;		/* ring used for IP network drv cmds */
+	int extra_ring;		/* extra ring used for other protocols */
 
 	struct lpfc_sli_stat slistat;	/* SLI statistical info */
 	struct list_head mboxq;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac41790..a61ef3d1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.1.10"
+#define LPFC_DRIVER_VERSION "8.1.11"
 
 #define LPFC_DRIVER_NAME "lpfc"
 
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 86099fd..77d9d38 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -73,10 +73,10 @@
 module_param(max_mbox_busy_wait, ushort, 0);
 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
 
-#define RDINDOOR(adapter)		readl((adapter)->base + 0x20)
-#define RDOUTDOOR(adapter)		readl((adapter)->base + 0x2C)
-#define WRINDOOR(adapter,value)		writel(value, (adapter)->base + 0x20)
-#define WROUTDOOR(adapter,value)	writel(value, (adapter)->base + 0x2C)
+#define RDINDOOR(adapter)	readl((adapter)->mmio_base + 0x20)
+#define RDOUTDOOR(adapter)	readl((adapter)->mmio_base + 0x2C)
+#define WRINDOOR(adapter,value)	 writel(value, (adapter)->mmio_base + 0x20)
+#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
 
 /*
  * Global variables
@@ -1386,7 +1386,8 @@
 
 		handled = 1;
 
-		while( RDINDOOR(adapter) & 0x02 ) cpu_relax();
+		while( RDINDOOR(adapter) & 0x02 )
+			cpu_relax();
 
 		mega_cmd_done(adapter, completed, nstatus, status);
 
@@ -4668,6 +4669,8 @@
 		host->host_no, mega_baseport, irq);
 
 	adapter->base = mega_baseport;
+	if (flag & BOARD_MEMMAP)
+		adapter->mmio_base = (void __iomem *) mega_baseport;
 
 	INIT_LIST_HEAD(&adapter->free_list);
 	INIT_LIST_HEAD(&adapter->pending_list);
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 66529f1..c6e7464 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -801,7 +801,8 @@
 				   clustering is available */
 	u32	flag;
 
-	unsigned long	base;
+	unsigned long		base;
+	void __iomem		*mmio_base;
 
 	/* mbox64 with mbox not aligned on 16-byte boundry */
 	mbox64_t	*una_mbox64;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 7e4262f..046223b 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -517,7 +517,7 @@
  * Returns the number of frames required for numnber of sge's (sge_count)
  */
 
-u32 megasas_get_frame_count(u8 sge_count)
+static u32 megasas_get_frame_count(u8 sge_count)
 {
 	int num_cnt;
 	int sge_bytes;
@@ -1733,7 +1733,7 @@
  *
  * Tasklet to complete cmds
  */
-void megasas_complete_cmd_dpc(unsigned long instance_addr)
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
 {
 	u32 producer;
 	u32 consumer;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 6cc2bc2..bbf521c 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -185,7 +185,7 @@
 **	power of 2 cache line size.
 **	Enhanced in linux-2.3.44 to provide a memory pool 
 **	per pcidev to support dynamic dma mapping. (I would 
-**	have preferred a real bus astraction, btw).
+**	have preferred a real bus abstraction, btw).
 **
 **==========================================================
 */
@@ -589,10 +589,12 @@
 static struct ncr_driver_setup
 	driver_setup			= SCSI_NCR_DRIVER_SETUP;
 
+#ifndef MODULE
 #ifdef	SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
 static struct ncr_driver_setup
 	driver_safe_setup __initdata	= SCSI_NCR_DRIVER_SAFE_SETUP;
 #endif
+#endif /* !MODULE */
 
 #define initverbose (driver_setup.verbose)
 #define bootverbose (np->verbose)
@@ -641,6 +643,13 @@
 #define OPT_IARB		26
 #endif
 
+#ifdef MODULE
+#define	ARG_SEP	' '
+#else
+#define	ARG_SEP	','
+#endif
+
+#ifndef MODULE
 static char setup_token[] __initdata = 
 	"tags:"   "mpar:"
 	"spar:"   "disc:"
@@ -660,12 +669,6 @@
 #endif
 	;	/* DONNOT REMOVE THIS ';' */
 
-#ifdef MODULE
-#define	ARG_SEP	' '
-#else
-#define	ARG_SEP	','
-#endif
-
 static int __init get_setup_token(char *p)
 {
 	char *cur = setup_token;
@@ -682,7 +685,6 @@
 	return 0;
 }
 
-
 static int __init sym53c8xx__setup(char *str)
 {
 #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
@@ -804,6 +806,7 @@
 #endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
 	return 1;
 }
+#endif /* !MODULE */
 
 /*===================================================================
 **
@@ -1438,7 +1441,7 @@
 **	The first four bytes (scr_st[4]) are used inside the script by 
 **	"COPY" commands.
 **	Because source and destination must have the same alignment
-**	in a DWORD, the fields HAVE to be at the choosen offsets.
+**	in a DWORD, the fields HAVE to be at the chosen offsets.
 **		xerr_st		0	(0x34)	scratcha
 **		sync_st		1	(0x05)	sxfer
 **		wide_st		3	(0x03)	scntl3
@@ -1498,7 +1501,7 @@
 **	the DSA (data structure address) register points
 **	to this substructure of the ccb.
 **	This substructure contains the header with
-**	the script-processor-changable data and
+**	the script-processor-changeable data and
 **	data blocks for the indirect move commands.
 **
 **----------------------------------------------------------
@@ -5107,7 +5110,7 @@
 
 /*
 **	This CCB has been skipped by the NCR.
-**	Queue it in the correponding unit queue.
+**	Queue it in the corresponding unit queue.
 */
 static void ncr_ccb_skipped(struct ncb *np, struct ccb *cp)
 {
@@ -5896,8 +5899,8 @@
 **
 **	In normal cases, interrupt conditions occur one at a 
 **	time. The ncr is able to stack in some extra registers 
-**	other interrupts that will occurs after the first one.
-**	But severall interrupts may occur at the same time.
+**	other interrupts that will occur after the first one.
+**	But, several interrupts may occur at the same time.
 **
 **	We probably should only try to deal with the normal 
 **	case, but it seems that multiple interrupts occur in 
@@ -6796,7 +6799,7 @@
 **	The host status field is set to HS_NEGOTIATE to mark this
 **	situation.
 **
-**	If the target doesn't answer this message immidiately
+**	If the target doesn't answer this message immediately
 **	(as required by the standard), the SIR_NEGO_FAIL interrupt
 **	will be raised eventually.
 **	The handler removes the HS_NEGOTIATE status, and sets the
@@ -8321,12 +8324,12 @@
 module_param(ncr53c8xx, charp, 0);
 #endif
 
+#ifndef MODULE
 static int __init ncr53c8xx_setup(char *str)
 {
 	return sym53c8xx__setup(str);
 }
 
-#ifndef MODULE
 __setup("ncr53c8xx=", ncr53c8xx_setup);
 #endif
 
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index cb8b770..b39357d 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -218,7 +218,7 @@
 **	Same as option 1, but also deal with 
 **	misconfigured interrupts.
 **
-**	- Edge triggerred instead of level sensitive.
+**	- Edge triggered instead of level sensitive.
 **	- No interrupt line connected.
 **	- IRQ number misconfigured.
 **	
@@ -549,7 +549,7 @@
 
 /*
 **	Initial setup.
-**	Can be overriden at startup by a command line.
+**	Can be overridden at startup by a command line.
 */
 #define SCSI_NCR_DRIVER_SETUP			\
 {						\
@@ -1093,7 +1093,7 @@
 **-----------------------------------------------------------
 **	On 810A, 860, 825A, 875, 895 and 896 chips the content 
 **	of SFBR register can be used as data (SCR_SFBR_DATA).
-**	The 896 has additionnal IO registers starting at 
+**	The 896 has additional IO registers starting at 
 **	offset 0x80. Bit 7 of register offset is stored in 
 **	bit 7 of the SCRIPTS instruction first DWORD.
 **-----------------------------------------------------------
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index dd67a68..c116a6a 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -72,12 +72,12 @@
 static int  oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
 
 #ifdef USE_BOTTOM_HALF
-static void dma_commit(void *opaque);
+static void dma_commit(struct work_struct *unused);
 
 long oktag_to_io(long *paddr, long *addr, long len);
 long oktag_from_io(long *addr, long *paddr, long len);
 
-static DECLARE_WORK(tq_fake_dma, dma_commit, NULL);
+static DECLARE_WORK(tq_fake_dma, dma_commit);
 
 #define DMA_MAXTRANSFER 0x8000
 
@@ -266,7 +266,7 @@
  */
  
  
-static void dma_commit(void *opaque)
+static void dma_commit(struct work_struct *unused)
 {
     long wait,len2,pos;
     struct NCR_ESP *esp;
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index ee449b2..aad362b 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -154,16 +154,11 @@
     
     DEBUG(0, "aha152x_config(0x%p)\n", link);
 
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.TupleData = tuple_data;
     tuple.TupleDataMax = 64;
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+    tuple.Attributes = 0;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
     while (1) {
 	if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 85f7ffa..a1c5f26 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -136,14 +136,9 @@
 
     DEBUG(0, "fdomain_config(0x%p)\n", link);
 
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.TupleData = tuple_data;
     tuple.TupleDataMax = 64;
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
 
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index f2d79c3..d72df5d 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1685,16 +1685,10 @@
 
 	nsp_dbg(NSP_DEBUG_INIT, "in");
 
-	tuple.DesiredTuple    = CISTPL_CONFIG;
 	tuple.Attributes      = 0;
 	tuple.TupleData	      = tuple_data;
 	tuple.TupleDataMax    = sizeof(tuple_data);
 	tuple.TupleOffset     = 0;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData,	pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple,	pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present    = parse.config.rmask[0];
 
 	/* Look up the current Vcc */
 	CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 86c2ac6..9d431fe 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -208,18 +208,11 @@
 
 	DEBUG(0, "qlogic_config(0x%p)\n", link);
 
+	info->manf_id = link->manf_id;
+
 	tuple.TupleData = (cisdata_t *) tuple_data;
 	tuple.TupleDataMax = 64;
 	tuple.TupleOffset = 0;
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-
-	tuple.DesiredTuple = CISTPL_MANFID;
-	if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
-		info->manf_id = le16_to_cpu(tuple.TupleData[0]);
 
 	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 72fe5d0..fb7acea 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -722,19 +722,11 @@
 
 	DEBUG(0, "SYM53C500_config(0x%p)\n", link);
 
+	info->manf_id = link->manf_id;
+
 	tuple.TupleData = (cisdata_t *)tuple_data;
 	tuple.TupleDataMax = 64;
 	tuple.TupleOffset = 0;
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-
-	tuple.DesiredTuple = CISTPL_MANFID;
-	if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
-	    (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
-		info->manf_id = le16_to_cpu(tuple.TupleData[0]);
 
 	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 89a2a9f..584ba4d 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -31,7 +31,7 @@
 	int base;		/* Actual port address          */
 	int mode;		/* Transfer mode                */
 	struct scsi_cmnd *cur_cmd;	/* Current queued command       */
-	struct work_struct ppa_tq;	/* Polling interrupt stuff       */
+	struct delayed_work ppa_tq;	/* Polling interrupt stuff       */
 	unsigned long jstart;	/* Jiffies at start             */
 	unsigned long recon_tmo;	/* How many usecs to wait for reconnection (6th bit) */
 	unsigned int failed:1;	/* Failure flag                 */
@@ -627,9 +627,9 @@
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void ppa_interrupt(void *data)
+static void ppa_interrupt(struct work_struct *work)
 {
-	ppa_struct *dev = (ppa_struct *) data;
+	ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
 	struct scsi_cmnd *cmd = dev->cur_cmd;
 
 	if (!cmd) {
@@ -637,7 +637,6 @@
 		return;
 	}
 	if (ppa_engine(dev, cmd)) {
-		dev->ppa_tq.data = (void *) dev;
 		schedule_delayed_work(&dev->ppa_tq, 1);
 		return;
 	}
@@ -822,8 +821,7 @@
 	cmd->result = DID_ERROR << 16;	/* default return code */
 	cmd->SCp.phase = 0;	/* bus free */
 
-	dev->ppa_tq.data = dev;
-	schedule_work(&dev->ppa_tq);
+	schedule_delayed_work(&dev->ppa_tq, 0);
 
 	ppa_pb_claim(dev);
 
@@ -1086,7 +1084,7 @@
 	else
 		ports = 8;
 
-	INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev);
+	INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
 
 	err = -ENOMEM;
 	host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 285c8e8..7b18a6c 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -390,7 +390,7 @@
 	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
 	{ "vpd", &sysfs_vpd_attr, 1 },
 	{ "sfp", &sysfs_sfp_attr, 1 },
-	{ 0 },
+	{ NULL },
 };
 
 void
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 08cb5e3..a823f0b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -59,9 +59,6 @@
 qla2x00_initialize_adapter(scsi_qla_host_t *ha)
 {
 	int	rval;
-	uint8_t	restart_risc = 0;
-	uint8_t	retry;
-	uint32_t wait_time;
 
 	/* Clear adapter flags. */
 	ha->flags.online = 0;
@@ -104,87 +101,15 @@
 
 	qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
 
-	retry = 10;
-	/*
-	 * Try to configure the loop.
-	 */
-	do {
-		restart_risc = 0;
-
-		/* If firmware needs to be loaded */
-		if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
-			if ((rval = ha->isp_ops.chip_diag(ha)) == QLA_SUCCESS) {
-				rval = qla2x00_setup_chip(ha);
-			}
-		}
-
-		if (rval == QLA_SUCCESS &&
-		    (rval = qla2x00_init_rings(ha)) == QLA_SUCCESS) {
-check_fw_ready_again:
-			/*
-			 * Wait for a successful LIP up to a maximum
-			 * of (in seconds): RISC login timeout value,
-			 * RISC retry count value, and port down retry
-			 * value OR a minimum of 4 seconds OR If no
-			 * cable, only 5 seconds.
-			 */
-			rval = qla2x00_fw_ready(ha);
-			if (rval == QLA_SUCCESS) {
-				clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-
-				/* Issue a marker after FW becomes ready. */
-				qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
-
-				/*
-				 * Wait at most MAX_TARGET RSCNs for a stable
-				 * link.
-				 */
-				wait_time = 256;
-				do {
-					clear_bit(LOOP_RESYNC_NEEDED,
-					    &ha->dpc_flags);
-					rval = qla2x00_configure_loop(ha);
-
-					if (test_and_clear_bit(ISP_ABORT_NEEDED,
-					    &ha->dpc_flags)) {
-						restart_risc = 1;
-						break;
-					}
-
-					/*
-					 * If loop state change while we were
-					 * discoverying devices then wait for
-					 * LIP to complete
-					 */
-
-					if (atomic_read(&ha->loop_state) !=
-					    LOOP_READY && retry--) {
-						goto check_fw_ready_again;
-					}
-					wait_time--;
-				} while (!atomic_read(&ha->loop_down_timer) &&
-				    retry &&
-				    wait_time &&
-				    (test_bit(LOOP_RESYNC_NEEDED,
-					&ha->dpc_flags)));
-
-				if (wait_time == 0)
-					rval = QLA_FUNCTION_FAILED;
-			} else if (ha->device_flags & DFLG_NO_CABLE)
-				/* If no cable, then all is good. */
-				rval = QLA_SUCCESS;
-		}
-	} while (restart_risc && retry--);
-
-	if (rval == QLA_SUCCESS) {
-		clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-		qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
-		ha->marker_needed = 0;
-
-		ha->flags.online = 1;
-	} else {
-		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
+	if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
+		rval = ha->isp_ops.chip_diag(ha);
+		if (rval)
+			return (rval);
+		rval = qla2x00_setup_chip(ha);
+		if (rval)
+			return (rval);
 	}
+	rval = qla2x00_init_rings(ha);
 
 	return (rval);
 }
@@ -2208,8 +2133,7 @@
 
 	atomic_set(&fcport->state, FCS_ONLINE);
 
-	if (ha->flags.init_done)
-		qla2x00_reg_remote_port(ha, fcport);
+	qla2x00_reg_remote_port(ha, fcport);
 }
 
 void
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 208607b..d03523d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -24,7 +24,7 @@
 /*
  * SRB allocation cache
  */
-static kmem_cache_t *srb_cachep;
+static struct kmem_cache *srb_cachep;
 
 /*
  * Ioctl related information.
@@ -95,6 +95,8 @@
  */
 static int qla2xxx_slave_configure(struct scsi_device * device);
 static int qla2xxx_slave_alloc(struct scsi_device *);
+static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
+static void qla2xxx_scan_start(struct Scsi_Host *);
 static void qla2xxx_slave_destroy(struct scsi_device *);
 static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
 		void (*fn)(struct scsi_cmnd *));
@@ -124,6 +126,8 @@
 
 	.slave_alloc		= qla2xxx_slave_alloc,
 	.slave_destroy		= qla2xxx_slave_destroy,
+	.scan_finished		= qla2xxx_scan_finished,
+	.scan_start		= qla2xxx_scan_start,
 	.change_queue_depth	= qla2x00_change_queue_depth,
 	.change_queue_type	= qla2x00_change_queue_type,
 	.this_id		= -1,
@@ -287,7 +291,7 @@
 	return str;
 }
 
-char *
+static char *
 qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
 {
 	char un_str[10];
@@ -325,7 +329,7 @@
 	return (str);
 }
 
-char *
+static char *
 qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
 {
 	sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
@@ -634,7 +638,7 @@
 * Note:
 *    Only return FAILED if command not returned by firmware.
 **************************************************************************/
-int
+static int
 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 {
 	scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -771,7 +775,7 @@
 *    SUCCESS/FAILURE (defined as macro in scsi.h).
 *
 **************************************************************************/
-int
+static int
 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 {
 	scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -902,7 +906,7 @@
 *    SUCCESS/FAILURE (defined as macro in scsi.h).
 *
 **************************************************************************/
-int
+static int
 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 {
 	scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -963,7 +967,7 @@
 *
 * Note:
 **************************************************************************/
-int
+static int
 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 {
 	scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -1366,6 +1370,29 @@
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
+static void
+qla2xxx_scan_start(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
+
+	set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+	set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+	set_bit(RSCN_UPDATE, &ha->dpc_flags);
+}
+
+static int
+qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
+
+	if (!ha->host)
+		return 1;
+	if (time > ha->loop_reset_delay * HZ)
+		return 1;
+
+	return atomic_read(&ha->loop_state) == LOOP_READY;
+}
+
 /*
  * PCI driver interface
  */
@@ -1377,10 +1404,8 @@
 	struct Scsi_Host *host;
 	scsi_qla_host_t *ha;
 	unsigned long	flags = 0;
-	unsigned long	wait_switch = 0;
 	char pci_info[20];
 	char fw_str[30];
-	fc_port_t *fcport;
 	struct scsi_host_template *sht;
 
 	if (pci_enable_device(pdev))
@@ -1631,30 +1656,19 @@
 
 	ha->isp_ops.enable_intrs(ha);
 
-	/* v2.19.5b6 */
-	/*
-	 * Wait around max loop_reset_delay secs for the devices to come
-	 * on-line. We don't want Linux scanning before we are ready.
-	 *
-	 */
-	for (wait_switch = jiffies + (ha->loop_reset_delay * HZ);
-	    time_before(jiffies,wait_switch) &&
-	     !(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES))
-	     && (ha->device_flags & SWITCH_FOUND) ;) {
-
-		qla2x00_check_fabric_devices(ha);
-
-		msleep(10);
-	}
-
 	pci_set_drvdata(pdev, ha);
+
 	ha->flags.init_done = 1;
+	ha->flags.online = 1;
+
 	num_hosts++;
 
 	ret = scsi_add_host(host, &pdev->dev);
 	if (ret)
 		goto probe_failed;
 
+	scsi_scan_host(host);
+
 	qla2x00_alloc_sysfs_attr(ha);
 
 	qla2x00_init_host_attr(ha);
@@ -1669,10 +1683,6 @@
 	    ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
 	    ha->isp_ops.fw_version_str(ha, fw_str));
 
-	/* Go with fc_rport registration. */
-	list_for_each_entry(fcport, &ha->fcports, list)
-		qla2x00_reg_remote_port(ha, fcport);
-
 	return 0;
 
 probe_failed:
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index c71dbd5..15390ad 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -449,7 +449,7 @@
 	return FARX_ACCESS_NVRAM_DATA | naddr;
 }
 
-uint32_t
+static uint32_t
 qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
 {
 	int rval;
@@ -490,7 +490,7 @@
 	return dwptr;
 }
 
-int
+static int
 qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
 {
 	int rval;
@@ -512,7 +512,7 @@
 	return rval;
 }
 
-void
+static void
 qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
     uint8_t *flash_id)
 {
@@ -537,7 +537,7 @@
 	}
 }
 
-int
+static int
 qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 752031f..7b4e077 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -71,7 +71,7 @@
 		       readw(&ha->reg->u1.isp4010.nvram));
 	}
 
-	else if (is_qla4022(ha)) {
+	else if (is_qla4022(ha) | is_qla4032(ha)) {
 		printk(KERN_INFO "0x%02X intr_mask	 = 0x%08X\n",
 		       (uint8_t) offsetof(struct isp_reg,
 					  u1.isp4022.intr_mask),
@@ -119,7 +119,7 @@
 		       readw(&ha->reg->u2.isp4010.port_err_status));
 	}
 
-	else if (is_qla4022(ha)) {
+	else if (is_qla4022(ha) | is_qla4032(ha)) {
 		printk(KERN_INFO "Page 0 Registers:\n");
 		printk(KERN_INFO "0x%02X ext_hw_conf	 = 0x%08X\n",
 		       (uint8_t) offsetof(struct isp_reg,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7f6c7b..4249e52 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -40,7 +40,11 @@
 
 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
 #define PCI_DEVICE_ID_QLOGIC_ISP4022	0x4022
-#endif				/*  */
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
+#define PCI_DEVICE_ID_QLOGIC_ISP4032	0x4032
+#endif
 
 #define QLA_SUCCESS			0
 #define QLA_ERROR			1
@@ -277,7 +281,6 @@
 #define AF_INTERRUPTS_ON	      6 /* 0x00000040 Not Used */
 #define AF_GET_CRASH_RECORD	      7 /* 0x00000080 */
 #define AF_LINK_UP		      8 /* 0x00000100 */
-#define AF_TOPCAT_CHIP_PRESENT	      9 /* 0x00000200 */
 #define AF_IRQ_ATTACHED		     10 /* 0x00000400 */
 #define AF_ISNS_CMD_IN_PROCESS	     12 /* 0x00001000 */
 #define AF_ISNS_CMD_DONE	     13 /* 0x00002000 */
@@ -317,16 +320,17 @@
 	/* NVRAM registers */
 	struct eeprom_data *nvram;
 	spinlock_t hardware_lock ____cacheline_aligned;
-	spinlock_t list_lock;
 	uint32_t   eeprom_cmd_data;
 
 	/* Counters for general statistics */
+	uint64_t isr_count;
 	uint64_t adapter_error_count;
 	uint64_t device_error_count;
 	uint64_t total_io_count;
 	uint64_t total_mbytes_xferred;
 	uint64_t link_failure_count;
 	uint64_t invalid_crc_count;
+	uint32_t bytes_xfered;
 	uint32_t spurious_int_count;
 	uint32_t aborted_io_count;
 	uint32_t io_timeout_count;
@@ -438,6 +442,11 @@
 	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
 }
 
+static inline int is_qla4032(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
+}
+
 static inline int adapter_up(struct scsi_qla_host *ha)
 {
 	return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
@@ -451,58 +460,58 @@
 
 static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		&ha->reg->u1.isp4022.semaphore :
-		&ha->reg->u1.isp4010.nvram);
+	return (is_qla4010(ha) ?
+		&ha->reg->u1.isp4010.nvram :
+		&ha->reg->u1.isp4022.semaphore);
 }
 
 static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		&ha->reg->u1.isp4022.nvram :
-		&ha->reg->u1.isp4010.nvram);
+	return (is_qla4010(ha) ?
+		&ha->reg->u1.isp4010.nvram :
+		&ha->reg->u1.isp4022.nvram);
 }
 
 static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		&ha->reg->u2.isp4022.p0.ext_hw_conf :
-		&ha->reg->u2.isp4010.ext_hw_conf);
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.ext_hw_conf :
+		&ha->reg->u2.isp4022.p0.ext_hw_conf);
 }
 
 static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		&ha->reg->u2.isp4022.p0.port_status :
-		&ha->reg->u2.isp4010.port_status);
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.port_status :
+		&ha->reg->u2.isp4022.p0.port_status);
 }
 
 static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		&ha->reg->u2.isp4022.p0.port_ctrl :
-		&ha->reg->u2.isp4010.port_ctrl);
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.port_ctrl :
+		&ha->reg->u2.isp4022.p0.port_ctrl);
 }
 
 static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		&ha->reg->u2.isp4022.p0.port_err_status :
-		&ha->reg->u2.isp4010.port_err_status);
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.port_err_status :
+		&ha->reg->u2.isp4022.p0.port_err_status);
 }
 
 static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		&ha->reg->u2.isp4022.p0.gp_out :
-		&ha->reg->u2.isp4010.gp_out);
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.gp_out :
+		&ha->reg->u2.isp4022.p0.gp_out);
 }
 
 static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
 {
-	return (is_qla4022(ha) ?
-		offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
-		offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
+	return (is_qla4010(ha) ?
+		offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
+		offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
 }
 
 int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
@@ -511,59 +520,59 @@
 
 static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
 {
-	if (is_qla4022(a))
+	if (is_qla4010(a))
+		return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
+					   QL4010_FLASH_SEM_BITS);
+	else
 		return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
 					   (QL4022_RESOURCE_BITS_BASE_CODE |
 					    (a->mac_index)) << 13);
-	else
-		return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
-					   QL4010_FLASH_SEM_BITS);
 }
 
 static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
 {
-	if (is_qla4022(a))
-		ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
-	else
+	if (is_qla4010(a))
 		ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
 }
 
 static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
 {
-	if (is_qla4022(a))
+	if (is_qla4010(a))
+		return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
+					   QL4010_NVRAM_SEM_BITS);
+	else
 		return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
 					   (QL4022_RESOURCE_BITS_BASE_CODE |
 					    (a->mac_index)) << 10);
-	else
-		return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
-					   QL4010_NVRAM_SEM_BITS);
 }
 
 static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
 {
-	if (is_qla4022(a))
-		ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
-	else
+	if (is_qla4010(a))
 		ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
 }
 
 static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
 {
-	if (is_qla4022(a))
+	if (is_qla4010(a))
+		return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
+				       QL4010_DRVR_SEM_BITS);
+	else
 		return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
 				       (QL4022_RESOURCE_BITS_BASE_CODE |
 					(a->mac_index)) << 1);
-	else
-		return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
-				       QL4010_DRVR_SEM_BITS);
 }
 
 static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
 {
-	if (is_qla4022(a))
-		ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
-	else
+	if (is_qla4010(a))
 		ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
 }
 
 /*---------------------------------------------------------------------------*/
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 427489d..4eea8c5 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -296,7 +296,6 @@
 /*  ISP Semaphore definitions */
 
 /*  ISP General Purpose Output definitions */
-#define GPOR_TOPCAT_RESET			0x00000004
 
 /*  shadow registers (DMA'd from HA to system memory.  read only) */
 struct shadow_regs {
@@ -339,10 +338,13 @@
 /*  Mailbox command definitions */
 #define MBOX_CMD_ABOUT_FW			0x0009
 #define MBOX_CMD_LUN_RESET			0x0016
+#define MBOX_CMD_GET_MANAGEMENT_DATA		0x001E
 #define MBOX_CMD_GET_FW_STATUS			0x001F
 #define MBOX_CMD_SET_ISNS_SERVICE		0x0021
 #define ISNS_DISABLE				0
 #define ISNS_ENABLE				1
+#define MBOX_CMD_COPY_FLASH			0x0024
+#define MBOX_CMD_WRITE_FLASH			0x0025
 #define MBOX_CMD_READ_FLASH			0x0026
 #define MBOX_CMD_CLEAR_DATABASE_ENTRY		0x0031
 #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT		0x0056
@@ -360,10 +362,13 @@
 #define DDB_DS_SESSION_FAILED			0x06
 #define DDB_DS_LOGIN_IN_PROCESS			0x07
 #define MBOX_CMD_GET_FW_STATE			0x0069
+#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
+#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS	0x0087
 
 /* Mailbox 1 */
 #define FW_STATE_READY				0x0000
 #define FW_STATE_CONFIG_WAIT			0x0001
+#define FW_STATE_WAIT_LOGIN			0x0002
 #define FW_STATE_ERROR				0x0004
 #define FW_STATE_DHCP_IN_PROGRESS		0x0008
 
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 1b221ff..2122967 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -8,6 +8,7 @@
 #ifndef __QLA4x_GBL_H
 #define	__QLA4x_GBL_H
 
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
 int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
 int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
@@ -75,4 +76,4 @@
 extern int ql4xextended_error_logging;
 extern int ql4xdiscoverywait;
 extern int ql4xdontresethba;
-#endif				/* _QLA4x_GBL_H */
+#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index bb3a1c1..cc210f2 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -259,10 +259,16 @@
 			      "seconds expired= %d\n", ha->host_no, __func__,
 			      ha->firmware_state, ha->addl_fw_state,
 			      timeout_count));
+		if (is_qla4032(ha) &&
+			!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
+			(timeout_count < ADAPTER_INIT_TOV - 5)) {
+			break;
+		}
+
 		msleep(1000);
 	}			/* end of for */
 
-	if (timeout_count <= 0)
+	if (timeout_count == 0)
 		DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
 			      ha->host_no, __func__));
 
@@ -806,32 +812,6 @@
 	return QLA_SUCCESS;
 }
 
-/**
- * qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
- * @ha: Pointer to host adapter structure.
- *
- **/
-static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
-{
-	unsigned long flags;
-	uint16_t topcat;
-
-	if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
-		return QLA_ERROR;
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
-					    isp4010.topcat));
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-	if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
-		set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
-	else
-		clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
-	ql4xxx_unlock_nvram(ha);
-	return QLA_SUCCESS;
-}
-
-
 static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
 {
 	unsigned long flags;
@@ -866,7 +846,7 @@
 		/* set defaults */
 		if (is_qla4010(ha))
 			extHwConfig.Asuint32_t = 0x1912;
-		else if (is_qla4022(ha))
+		else if (is_qla4022(ha) | is_qla4032(ha))
 			extHwConfig.Asuint32_t = 0x0023;
 	}
 	DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
@@ -927,7 +907,7 @@
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	writel(jiffies, &ha->reg->mailbox[7]);
-	if (is_qla4022(ha))
+	if (is_qla4022(ha) | is_qla4032(ha))
 		writel(set_rmask(NVR_WRITE_ENABLE),
 		       &ha->reg->u1.isp4022.nvram);
 
@@ -978,7 +958,7 @@
 	return status;
 }
 
-static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
 {
 #define QL4_LOCK_DRVR_WAIT	300
 #define QL4_LOCK_DRVR_SLEEP	100
@@ -1018,12 +998,7 @@
 	int soft_reset = 1;
 	int config_chip = 0;
 
-	if (is_qla4010(ha)){
-		if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
-			return QLA_ERROR;
-	}
-
-	if (is_qla4022(ha))
+	if (is_qla4022(ha) | is_qla4032(ha))
 		ql4xxx_set_mac_number(ha);
 
 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 0d61797..6375eb0 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -38,7 +38,7 @@
 static inline void
 __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
 {
-	if (is_qla4022(ha)) {
+	if (is_qla4022(ha) | is_qla4032(ha)) {
 		writel(set_rmask(IMR_SCSI_INTR_ENABLE),
 		       &ha->reg->u1.isp4022.intr_mask);
 		readl(&ha->reg->u1.isp4022.intr_mask);
@@ -52,7 +52,7 @@
 static inline void
 __qla4xxx_disable_intrs(struct scsi_qla_host *ha)
 {
-	if (is_qla4022(ha)) {
+	if (is_qla4022(ha) | is_qla4032(ha)) {
 		writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
 		       &ha->reg->u1.isp4022.intr_mask);
 		readl(&ha->reg->u1.isp4022.intr_mask);
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index c0a254b..d41ce38 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -294,6 +294,12 @@
 			cmd_entry->control_flags = CF_WRITE;
 		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
 			cmd_entry->control_flags = CF_READ;
+
+		ha->bytes_xfered += cmd->request_bufflen;
+		if (ha->bytes_xfered & ~0xFFFFF){
+			ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
+			ha->bytes_xfered &= 0xFFFFF;
+		}
 	}
 
 	/* Set tagged queueing control flags */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 1e28332..ef975e0 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -627,6 +627,7 @@
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
+	ha->isr_count++;
 	/*
 	 * Repeatedly service interrupts up to a maximum of
 	 * MAX_REQS_SERVICED_PER_INTR
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index e3957ca..58afd13 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -7,15 +7,22 @@
 
 #include "ql4_def.h"
 
+static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
+{
+	writel(cmd, isp_nvram(ha));
+	readl(isp_nvram(ha));
+	udelay(1);
+}
+
 static inline int eeprom_size(struct scsi_qla_host *ha)
 {
-	return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
+	return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
 }
 
 static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
 {
-	return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
-		FM93C56A_NO_ADDR_BITS_16;
+	return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
+		FM93C86A_NO_ADDR_BITS_16 ;
 }
 
 static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
@@ -28,8 +35,7 @@
 	DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
 
 	ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
-	writel(ha->eeprom_cmd_data, isp_nvram(ha));
-	readl(isp_nvram(ha));
+	eeprom_cmd(ha->eeprom_cmd_data, ha);
 	return 1;
 }
 
@@ -41,12 +47,13 @@
 	int previousBit;
 
 	/* Clock in a zero, then do the start bit. */
-	writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
-	writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-	       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-	writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-	       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-	readl(isp_nvram(ha));
+	eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
+
+	eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+	       AUBURN_EEPROM_CLK_RISE, ha);
+	eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+	       AUBURN_EEPROM_CLK_FALL, ha);
+
 	mask = 1 << (FM93C56A_CMD_BITS - 1);
 
 	/* Force the previous data bit to be different. */
@@ -60,14 +67,14 @@
 			 * If the bit changed, then change the DO state to
 			 * match.
 			 */
-			writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
+			eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
 			previousBit = dataBit;
 		}
-		writel(ha->eeprom_cmd_data | dataBit |
-		       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-		writel(ha->eeprom_cmd_data | dataBit |
-		       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-		readl(isp_nvram(ha));
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_RISE, ha);
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_FALL, ha);
+
 		cmd = cmd << 1;
 	}
 	mask = 1 << (eeprom_no_addr_bits(ha) - 1);
@@ -82,14 +89,15 @@
 			 * If the bit changed, then change the DO state to
 			 * match.
 			 */
-			writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
+			eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
+
 			previousBit = dataBit;
 		}
-		writel(ha->eeprom_cmd_data | dataBit |
-		       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-		writel(ha->eeprom_cmd_data | dataBit |
-		       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-		readl(isp_nvram(ha));
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_RISE, ha);
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_FALL, ha);
+
 		addr = addr << 1;
 	}
 	return 1;
@@ -98,8 +106,7 @@
 static int fm93c56a_deselect(struct scsi_qla_host * ha)
 {
 	ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
-	writel(ha->eeprom_cmd_data, isp_nvram(ha));
-	readl(isp_nvram(ha));
+	eeprom_cmd(ha->eeprom_cmd_data, ha);
 	return 1;
 }
 
@@ -112,12 +119,13 @@
 	/* Read the data bits
 	 * The first bit is a dummy.  Clock right over it. */
 	for (i = 0; i < eeprom_no_data_bits(ha); i++) {
-		writel(ha->eeprom_cmd_data |
-		       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-		writel(ha->eeprom_cmd_data |
-		       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-		dataBit =
-			(readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+		eeprom_cmd(ha->eeprom_cmd_data |
+		       AUBURN_EEPROM_CLK_RISE, ha);
+		eeprom_cmd(ha->eeprom_cmd_data |
+		       AUBURN_EEPROM_CLK_FALL, ha);
+
+		dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+
 		data = (data << 1) | dataBit;
 	}
 
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 08e2aed..b47b4fc 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -134,9 +134,7 @@
 			u16 phyConfig;	/* x36 */
 #define	 PHY_CONFIG_PHY_ADDR_MASK	      0x1f
 #define	 PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
-			u16 topcat;	/* x38 */
-#define TOPCAT_PRESENT		0x0100
-#define TOPCAT_MASK		0xFF00
+			u16 reserved_56;	/* x38 */
 
 #define EEPROM_UNUSED_1_SIZE   2
 			u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5b8db61..9ef693c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -19,7 +19,7 @@
 /*
  * SRB allocation cache
  */
-static kmem_cache_t *srb_cachep;
+static struct kmem_cache *srb_cachep;
 
 /*
  * Module parameter information and variables
@@ -708,10 +708,10 @@
 }
 
 /**
- * qla4010_soft_reset - performs soft reset.
+ * qla4xxx_soft_reset - performs soft reset.
  * @ha: Pointer to host adapter structure.
  **/
-static int qla4010_soft_reset(struct scsi_qla_host *ha)
+int qla4xxx_soft_reset(struct scsi_qla_host *ha)
 {
 	uint32_t max_wait_time;
 	unsigned long flags = 0;
@@ -817,29 +817,6 @@
 }
 
 /**
- * qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
- * @ha: Pointer to host adapter structure.
- **/
-static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
-{
-	unsigned long flags;
-
-	ql4xxx_lock_nvram(ha);
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
-	readl(isp_gp_out(ha));
-	mdelay(1);
-
-	writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
-	readl(isp_gp_out(ha));
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-	mdelay(2523);
-
-	ql4xxx_unlock_nvram(ha);
-	return QLA_SUCCESS;
-}
-
-/**
  * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
  * @ha: Pointer to host adapter structure.
  *
@@ -867,26 +844,6 @@
 }
 
 /**
- * qla4xxx_hard_reset - performs HBA Hard Reset
- * @ha: Pointer to host adapter structure.
- **/
-static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
-{
-	/* The QLA4010 really doesn't have an equivalent to a hard reset */
-	qla4xxx_flush_active_srbs(ha);
-	if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
-		int status = QLA_ERROR;
-
-		if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
-		    (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
-		    (qla4010_soft_reset(ha) == QLA_SUCCESS))
-			status = QLA_SUCCESS;
-		return status;
-	} else
-		return qla4010_soft_reset(ha);
-}
-
-/**
  * qla4xxx_recover_adapter - recovers adapter after a fatal error
  * @ha: Pointer to host adapter structure.
  * @renew_ddb_list: Indicates what to do with the adapter's ddb list
@@ -919,18 +876,11 @@
 	if (status == QLA_SUCCESS) {
 		DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
 			      ha->host_no, __func__));
-		status = qla4xxx_soft_reset(ha);
-	}
-	/* FIXMEkaren: Do we want to keep interrupts enabled and process
-	   AENs after soft reset */
-
-	/* If firmware (SOFT) reset failed, or if all outstanding
-	 * commands have not returned, then do a HARD reset.
-	 */
-	if (status == QLA_ERROR) {
-		DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
-			      ha->host_no, __func__));
-		status = qla4xxx_hard_reset(ha);
+		qla4xxx_flush_active_srbs(ha);
+		if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
+			status = qla4xxx_soft_reset(ha);
+		else
+			status = QLA_ERROR;
 	}
 
 	/* Flush any pending ddb changed AENs */
@@ -1011,18 +961,15 @@
  * the mid-level tries to sleep when it reaches the driver threshold
  * "host->can_queue". This can cause a panic if we were in our interrupt code.
  **/
-static void qla4xxx_do_dpc(void *data)
+static void qla4xxx_do_dpc(struct work_struct *work)
 {
-	struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
+	struct scsi_qla_host *ha =
+		container_of(work, struct scsi_qla_host, dpc_work);
 	struct ddb_entry *ddb_entry, *dtemp;
 
-	DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
-		      ha->host_no, __func__));
-
-	DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
-		      ha->host_no, __func__, ha->flags));
-	DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
-		      ha->host_no, __func__, ha->dpc_flags));
+	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
+		"flags = 0x%08lx, dpc_flags = 0x%08lx\n",
+		ha->host_no, __func__, ha->flags, ha->dpc_flags));
 
 	/* Initialization not yet finished. Don't do anything yet. */
 	if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -1032,16 +979,8 @@
 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
 	    test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
-		if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags))
-			/*
-			 * dg 09/23 Never initialize ddb list
-			 * once we up and running
-			 * qla4xxx_recover_adapter(ha,
-			 *    REBUILD_DDB_LIST);
-			 */
-			qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
-
-		if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+		if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
+			test_bit(DPC_RESET_HA, &ha->dpc_flags))
 			qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
 
 		if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
@@ -1122,7 +1061,8 @@
 		destroy_workqueue(ha->dpc_thread);
 
 	/* Issue Soft Reset to put firmware in unknown state */
-	qla4xxx_soft_reset(ha);
+	if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
+		qla4xxx_soft_reset(ha);
 
 	/* Remove timer thread, if present */
 	if (ha->timer_active)
@@ -1261,7 +1201,6 @@
 	init_waitqueue_head(&ha->mailbox_wait_queue);
 
 	spin_lock_init(&ha->hardware_lock);
-	spin_lock_init(&ha->list_lock);
 
 	/* Allocate dma buffers */
 	if (qla4xxx_mem_alloc(ha)) {
@@ -1315,7 +1254,7 @@
 		ret = -ENODEV;
 		goto probe_failed;
 	}
-	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
+	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
 
 	ret = request_irq(pdev->irq, qla4xxx_intr_handler,
 			  SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
@@ -1468,27 +1407,6 @@
 }
 
 /**
- * qla4xxx_soft_reset - performs a SOFT RESET of hba.
- * @ha: Pointer to host adapter structure.
- **/
-int qla4xxx_soft_reset(struct scsi_qla_host *ha)
-{
-
-	DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
-		      __func__));
-	if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
-		int status = QLA_ERROR;
-
-		if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
-		    (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
-		    (qla4010_soft_reset(ha) == QLA_SUCCESS) )
-			status = QLA_SUCCESS;
-		return status;
-	} else
-		return qla4010_soft_reset(ha);
-}
-
-/**
  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
  * @ha: actual ha whose done queue will contain the comd returned by firmware.
  * @cmd: Scsi Command to wait on.
@@ -1686,6 +1604,12 @@
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
 	},
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
 	{0, 0},
 };
 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index b3fe7e6..454e19c 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,9 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.00.05b9-k"
-
-#define QL4_DRIVER_MAJOR_VER	5
-#define QL4_DRIVER_MINOR_VER	0
-#define QL4_DRIVER_PATCH_VER	5
-#define QL4_DRIVER_BETA_VER	9
+#define QLA4XXX_DRIVER_VERSION	"5.00.07-k"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index c59f315..24cffd9 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -136,7 +136,7 @@
 EXPORT_SYMBOL(scsi_device_type);
 
 struct scsi_host_cmd_pool {
-	kmem_cache_t	*slab;
+	struct kmem_cache	*slab;
 	unsigned int	users;
 	char		*name;
 	unsigned int	slab_flags;
@@ -156,8 +156,7 @@
 
 static DEFINE_MUTEX(host_cmd_pool_mutex);
 
-static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
-					    gfp_t gfp_mask)
+struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
 {
 	struct scsi_cmnd *cmd;
 
@@ -178,6 +177,7 @@
 
 	return cmd;
 }
+EXPORT_SYMBOL_GPL(__scsi_get_command);
 
 /*
  * Function:	scsi_get_command()
@@ -214,9 +214,29 @@
 		put_device(&dev->sdev_gendev);
 
 	return cmd;
-}				
+}
 EXPORT_SYMBOL(scsi_get_command);
 
+void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
+			struct device *dev)
+{
+	unsigned long flags;
+
+	/* changing locks here, don't need to restore the irq state */
+	spin_lock_irqsave(&shost->free_list_lock, flags);
+	if (unlikely(list_empty(&shost->free_list))) {
+		list_add(&cmd->list, &shost->free_list);
+		cmd = NULL;
+	}
+	spin_unlock_irqrestore(&shost->free_list_lock, flags);
+
+	if (likely(cmd != NULL))
+		kmem_cache_free(shost->cmd_pool->slab, cmd);
+
+	put_device(dev);
+}
+EXPORT_SYMBOL(__scsi_put_command);
+
 /*
  * Function:	scsi_put_command()
  *
@@ -231,26 +251,15 @@
 void scsi_put_command(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
-	struct Scsi_Host *shost = sdev->host;
 	unsigned long flags;
-	
+
 	/* serious error if the command hasn't come from a device list */
 	spin_lock_irqsave(&cmd->device->list_lock, flags);
 	BUG_ON(list_empty(&cmd->list));
 	list_del_init(&cmd->list);
-	spin_unlock(&cmd->device->list_lock);
-	/* changing locks here, don't need to restore the irq state */
-	spin_lock(&shost->free_list_lock);
-	if (unlikely(list_empty(&shost->free_list))) {
-		list_add(&cmd->list, &shost->free_list);
-		cmd = NULL;
-	}
-	spin_unlock_irqrestore(&shost->free_list_lock, flags);
+	spin_unlock_irqrestore(&cmd->device->list_lock, flags);
 
-	if (likely(cmd != NULL))
-		kmem_cache_free(shost->cmd_pool->slab, cmd);
-
-	put_device(&sdev->sdev_gendev);
+	__scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
 }
 EXPORT_SYMBOL(scsi_put_command);
 
@@ -871,9 +880,9 @@
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
+#ifdef CONFIG_MODULE_UNLOAD
 	struct module *module = sdev->host->hostt->module;
 
-#ifdef CONFIG_MODULE_UNLOAD
 	/* The module refcount will be zero if scsi_device_get()
 	 * was called from a module removal routine */
 	if (module && module_refcount(module) != 0)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index aff1b0c..2ecb6ff 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -453,9 +453,18 @@
 }
 
 /**
- * scsi_send_eh_cmnd  - send a cmd to a device as part of error recovery.
- * @scmd:	SCSI Cmd to send.
- * @timeout:	Timeout for cmd.
+ * scsi_send_eh_cmnd  - submit a scsi command as part of error recory
+ * @scmd:       SCSI command structure to hijack
+ * @cmnd:       CDB to send
+ * @cmnd_size:  size in bytes of @cmnd
+ * @timeout:    timeout for this request
+ * @copy_sense: request sense data if set to 1
+ *
+ * This function is used to send a scsi command down to a target device
+ * as part of the error recovery process.  If @copy_sense is 0 the command
+ * sent must be one that does not transfer any data.  If @copy_sense is 1
+ * the command must be REQUEST_SENSE and this functions copies out the
+ * sense buffer it got into @scmd->sense_buffer.
  *
  * Return value:
  *    SUCCESS or FAILED or NEEDS_RETRY
@@ -469,6 +478,7 @@
 	DECLARE_COMPLETION_ONSTACK(done);
 	unsigned long timeleft;
 	unsigned long flags;
+	struct scatterlist sgl;
 	unsigned char old_cmnd[MAX_COMMAND_SIZE];
 	enum dma_data_direction old_data_direction;
 	unsigned short old_use_sg;
@@ -500,19 +510,24 @@
 		if (shost->hostt->unchecked_isa_dma)
 			gfp_mask |= __GFP_DMA;
 
-		scmd->sc_data_direction = DMA_FROM_DEVICE;
-		scmd->request_bufflen = 252;
-		scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
-		if (!scmd->request_buffer)
+		sgl.page = alloc_page(gfp_mask);
+		if (!sgl.page)
 			return FAILED;
+		sgl.offset = 0;
+		sgl.length = 252;
+
+		scmd->sc_data_direction = DMA_FROM_DEVICE;
+		scmd->request_bufflen = sgl.length;
+		scmd->request_buffer = &sgl;
+		scmd->use_sg = 1;
 	} else {
 		scmd->request_buffer = NULL;
 		scmd->request_bufflen = 0;
 		scmd->sc_data_direction = DMA_NONE;
+		scmd->use_sg = 0;
 	}
 
 	scmd->underflow = 0;
-	scmd->use_sg = 0;
 	scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
 
 	if (sdev->scsi_level <= SCSI_2)
@@ -583,7 +598,7 @@
 			memcpy(scmd->sense_buffer, scmd->request_buffer,
 			       sizeof(scmd->sense_buffer));
 		}
-		kfree(scmd->request_buffer);
+		__free_page(sgl.page);
 	}
 
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3ac4890..1748e27 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -36,7 +36,7 @@
 struct scsi_host_sg_pool {
 	size_t		size;
 	char		*name; 
-	kmem_cache_t	*slab;
+	struct kmem_cache	*slab;
 	mempool_t	*pool;
 };
 
@@ -241,7 +241,7 @@
 	char sense[SCSI_SENSE_BUFFERSIZE];
 };
 
-static kmem_cache_t *scsi_io_context_cache;
+static struct kmem_cache *scsi_io_context_cache;
 
 static void scsi_end_async(struct request *req, int uptodate)
 {
@@ -704,7 +704,7 @@
 	return NULL;
 }
 
-static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 {
 	struct scsi_host_sg_pool *sgp;
 	struct scatterlist *sgl;
@@ -745,7 +745,9 @@
 	return sgl;
 }
 
-static void scsi_free_sgtable(struct scatterlist *sgl, int index)
+EXPORT_SYMBOL(scsi_alloc_sgtable);
+
+void scsi_free_sgtable(struct scatterlist *sgl, int index)
 {
 	struct scsi_host_sg_pool *sgp;
 
@@ -755,6 +757,8 @@
 	mempool_free(sgl, sgp->pool);
 }
 
+EXPORT_SYMBOL(scsi_free_sgtable);
+
 /*
  * Function:    scsi_release_buffers()
  *
@@ -996,25 +1000,14 @@
 	int		   count;
 
 	/*
-	 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
-	 */
-	if (blk_pc_request(req) && !req->bio) {
-		cmd->request_bufflen = req->data_len;
-		cmd->request_buffer = req->data;
-		req->buffer = req->data;
-		cmd->use_sg = 0;
-		return 0;
-	}
-
-	/*
-	 * we used to not use scatter-gather for single segment request,
+	 * We used to not use scatter-gather for single segment request,
 	 * but now we do (it makes highmem I/O easier to support without
 	 * kmapping pages)
 	 */
 	cmd->use_sg = req->nr_phys_segments;
 
 	/*
-	 * if sg table allocation fails, requeue request later.
+	 * If sg table allocation fails, requeue request later.
 	 */
 	sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
 	if (unlikely(!sgpnt)) {
@@ -1022,24 +1015,21 @@
 		return BLKPREP_DEFER;
 	}
 
+	req->buffer = NULL;
 	cmd->request_buffer = (char *) sgpnt;
-	cmd->request_bufflen = req->nr_sectors << 9;
 	if (blk_pc_request(req))
 		cmd->request_bufflen = req->data_len;
-	req->buffer = NULL;
+	else
+		cmd->request_bufflen = req->nr_sectors << 9;
 
 	/* 
 	 * Next, walk the list, and fill in the addresses and sizes of
 	 * each segment.
 	 */
 	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
-
-	/*
-	 * mapped well, send it off
-	 */
 	if (likely(count <= cmd->use_sg)) {
 		cmd->use_sg = count;
-		return 0;
+		return BLKPREP_OK;
 	}
 
 	printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1069,6 +1059,27 @@
 	return -EOPNOTSUPP;
 }
 
+static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
+		struct request *req)
+{
+	struct scsi_cmnd *cmd;
+
+	if (!req->special) {
+		cmd = scsi_get_command(sdev, GFP_ATOMIC);
+		if (unlikely(!cmd))
+			return NULL;
+		req->special = cmd;
+	} else {
+		cmd = req->special;
+	}
+
+	/* pull a tag out of the request if we have one */
+	cmd->tag = req->tag;
+	cmd->request = req;
+
+	return cmd;
+}
+
 static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
 {
 	BUG_ON(!blk_pc_request(cmd->request));
@@ -1081,9 +1092,37 @@
 	scsi_io_completion(cmd, cmd->request_bufflen);
 }
 
-static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
+static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
 {
-	struct request *req = cmd->request;
+	struct scsi_cmnd *cmd;
+
+	cmd = scsi_get_cmd_from_req(sdev, req);
+	if (unlikely(!cmd))
+		return BLKPREP_DEFER;
+
+	/*
+	 * BLOCK_PC requests may transfer data, in which case they must
+	 * a bio attached to them.  Or they might contain a SCSI command
+	 * that does not transfer data, in which case they may optionally
+	 * submit a request without an attached bio.
+	 */
+	if (req->bio) {
+		int ret;
+
+		BUG_ON(!req->nr_phys_segments);
+
+		ret = scsi_init_io(cmd);
+		if (unlikely(ret))
+			return ret;
+	} else {
+		BUG_ON(req->data_len);
+		BUG_ON(req->data);
+
+		cmd->request_bufflen = 0;
+		cmd->request_buffer = NULL;
+		cmd->use_sg = 0;
+		req->buffer = NULL;
+	}
 
 	BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
 	memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1099,154 +1138,138 @@
 	cmd->allowed = req->retries;
 	cmd->timeout_per_command = req->timeout;
 	cmd->done = scsi_blk_pc_done;
+	return BLKPREP_OK;
+}
+
+/*
+ * Setup a REQ_TYPE_FS command.  These are simple read/write request
+ * from filesystems that still need to be translated to SCSI CDBs from
+ * the ULD.
+ */
+static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
+{
+	struct scsi_cmnd *cmd;
+	struct scsi_driver *drv;
+	int ret;
+
+	/*
+	 * Filesystem requests must transfer data.
+	 */
+	BUG_ON(!req->nr_phys_segments);
+
+	cmd = scsi_get_cmd_from_req(sdev, req);
+	if (unlikely(!cmd))
+		return BLKPREP_DEFER;
+
+	ret = scsi_init_io(cmd);
+	if (unlikely(ret))
+		return ret;
+
+	/*
+	 * Initialize the actual SCSI command for this request.
+	 */
+	drv = *(struct scsi_driver **)req->rq_disk->private_data;
+	if (unlikely(!drv->init_command(cmd))) {
+		scsi_release_buffers(cmd);
+		scsi_put_command(cmd);
+		return BLKPREP_KILL;
+	}
+
+	return BLKPREP_OK;
 }
 
 static int scsi_prep_fn(struct request_queue *q, struct request *req)
 {
 	struct scsi_device *sdev = q->queuedata;
-	struct scsi_cmnd *cmd;
-	int specials_only = 0;
+	int ret = BLKPREP_OK;
 
 	/*
-	 * Just check to see if the device is online.  If it isn't, we
-	 * refuse to process any commands.  The device must be brought
-	 * online before trying any recovery commands
+	 * If the device is not in running state we will reject some
+	 * or all commands.
 	 */
-	if (unlikely(!scsi_device_online(sdev))) {
-		sdev_printk(KERN_ERR, sdev,
-			    "rejecting I/O to offline device\n");
-		goto kill;
-	}
 	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
-		/* OK, we're not in a running state don't prep
-		 * user commands */
-		if (sdev->sdev_state == SDEV_DEL) {
-			/* Device is fully deleted, no commands
-			 * at all allowed down */
+		switch (sdev->sdev_state) {
+		case SDEV_OFFLINE:
+			/*
+			 * If the device is offline we refuse to process any
+			 * commands.  The device must be brought online
+			 * before trying any recovery commands.
+			 */
+			sdev_printk(KERN_ERR, sdev,
+				    "rejecting I/O to offline device\n");
+			ret = BLKPREP_KILL;
+			break;
+		case SDEV_DEL:
+			/*
+			 * If the device is fully deleted, we refuse to
+			 * process any commands as well.
+			 */
 			sdev_printk(KERN_ERR, sdev,
 				    "rejecting I/O to dead device\n");
-			goto kill;
+			ret = BLKPREP_KILL;
+			break;
+		case SDEV_QUIESCE:
+		case SDEV_BLOCK:
+			/*
+			 * If the devices is blocked we defer normal commands.
+			 */
+			if (!(req->cmd_flags & REQ_PREEMPT))
+				ret = BLKPREP_DEFER;
+			break;
+		default:
+			/*
+			 * For any other not fully online state we only allow
+			 * special commands.  In particular any user initiated
+			 * command is not allowed.
+			 */
+			if (!(req->cmd_flags & REQ_PREEMPT))
+				ret = BLKPREP_KILL;
+			break;
 		}
-		/* OK, we only allow special commands (i.e. not
-		 * user initiated ones */
-		specials_only = sdev->sdev_state;
+
+		if (ret != BLKPREP_OK)
+			goto out;
 	}
 
-	/*
-	 * Find the actual device driver associated with this command.
-	 * The SPECIAL requests are things like character device or
-	 * ioctls, which did not originate from ll_rw_blk.  Note that
-	 * the special field is also used to indicate the cmd for
-	 * the remainder of a partially fulfilled request that can 
-	 * come up when there is a medium error.  We have to treat
-	 * these two cases differently.  We differentiate by looking
-	 * at request->cmd, as this tells us the real story.
-	 */
-	if (blk_special_request(req) && req->special)
-		cmd = req->special;
-	else if (blk_pc_request(req) || blk_fs_request(req)) {
-		if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){
-			if (specials_only == SDEV_QUIESCE ||
-			    specials_only == SDEV_BLOCK)
-				goto defer;
-			
-			sdev_printk(KERN_ERR, sdev,
-				    "rejecting I/O to device being removed\n");
-			goto kill;
-		}
-			
+	switch (req->cmd_type) {
+	case REQ_TYPE_BLOCK_PC:
+		ret = scsi_setup_blk_pc_cmnd(sdev, req);
+		break;
+	case REQ_TYPE_FS:
+		ret = scsi_setup_fs_cmnd(sdev, req);
+		break;
+	default:
 		/*
-		 * Now try and find a command block that we can use.
-		 */
-		if (!req->special) {
-			cmd = scsi_get_command(sdev, GFP_ATOMIC);
-			if (unlikely(!cmd))
-				goto defer;
-		} else
-			cmd = req->special;
-		
-		/* pull a tag out of the request if we have one */
-		cmd->tag = req->tag;
-	} else {
-		blk_dump_rq_flags(req, "SCSI bad req");
-		goto kill;
-	}
-	
-	/* note the overloading of req->special.  When the tag
-	 * is active it always means cmd.  If the tag goes
-	 * back for re-queueing, it may be reset */
-	req->special = cmd;
-	cmd->request = req;
-	
-	/*
-	 * FIXME: drop the lock here because the functions below
-	 * expect to be called without the queue lock held.  Also,
-	 * previously, we dequeued the request before dropping the
-	 * lock.  We hope REQ_STARTED prevents anything untoward from
-	 * happening now.
-	 */
-	if (blk_fs_request(req) || blk_pc_request(req)) {
-		int ret;
-
-		/*
-		 * This will do a couple of things:
-		 *  1) Fill in the actual SCSI command.
-		 *  2) Fill in any other upper-level specific fields
-		 * (timeout).
+		 * All other command types are not supported.
 		 *
-		 * If this returns 0, it means that the request failed
-		 * (reading past end of disk, reading offline device,
-		 * etc).   This won't actually talk to the device, but
-		 * some kinds of consistency checking may cause the	
-		 * request to be rejected immediately.
+		 * Note that these days the SCSI subsystem does not use
+		 * REQ_TYPE_SPECIAL requests anymore.  These are only used
+		 * (directly or via blk_insert_request) by non-SCSI drivers.
 		 */
-
-		/* 
-		 * This sets up the scatter-gather table (allocating if
-		 * required).
-		 */
-		ret = scsi_init_io(cmd);
-		switch(ret) {
-			/* For BLKPREP_KILL/DEFER the cmd was released */
-		case BLKPREP_KILL:
-			goto kill;
-		case BLKPREP_DEFER:
-			goto defer;
-		}
-		
-		/*
-		 * Initialize the actual SCSI command for this request.
-		 */
-		if (blk_pc_request(req)) {
-			scsi_setup_blk_pc_cmnd(cmd);
-		} else if (req->rq_disk) {
-			struct scsi_driver *drv;
-
-			drv = *(struct scsi_driver **)req->rq_disk->private_data;
-			if (unlikely(!drv->init_command(cmd))) {
-				scsi_release_buffers(cmd);
-				scsi_put_command(cmd);
-				goto kill;
-			}
-		}
+		blk_dump_rq_flags(req, "SCSI bad req");
+		ret = BLKPREP_KILL;
+		break;
 	}
 
-	/*
-	 * The request is now prepped, no need to come back here
-	 */
-	req->cmd_flags |= REQ_DONTPREP;
-	return BLKPREP_OK;
+ out:
+	switch (ret) {
+	case BLKPREP_KILL:
+		req->errors = DID_NO_CONNECT << 16;
+		break;
+	case BLKPREP_DEFER:
+		/*
+		 * If we defer, the elv_next_request() returns NULL, but the
+		 * queue must be restarted, so we plug here if no returning
+		 * command will automatically do that.
+		 */
+		if (sdev->device_busy == 0)
+			blk_plug_device(q);
+		break;
+	default:
+		req->cmd_flags |= REQ_DONTPREP;
+	}
 
- defer:
-	/* If we defer, the elv_next_request() returns NULL, but the
-	 * queue must be restarted, so we plug here if no returning
-	 * command will automatically do that. */
-	if (sdev->device_busy == 0)
-		blk_plug_device(q);
-	return BLKPREP_DEFER;
- kill:
-	req->errors = DID_NO_CONNECT << 16;
-	return BLKPREP_KILL;
+	return ret;
 }
 
 /*
@@ -1548,29 +1571,40 @@
 }
 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
 
-struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+					 request_fn_proc *request_fn)
 {
-	struct Scsi_Host *shost = sdev->host;
 	struct request_queue *q;
 
-	q = blk_init_queue(scsi_request_fn, NULL);
+	q = blk_init_queue(request_fn, NULL);
 	if (!q)
 		return NULL;
 
-	blk_queue_prep_rq(q, scsi_prep_fn);
-
 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
 	blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
 	blk_queue_max_sectors(q, shost->max_sectors);
 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
 	blk_queue_segment_boundary(q, shost->dma_boundary);
-	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
-	blk_queue_softirq_done(q, scsi_softirq_done);
 
 	if (!shost->use_clustering)
 		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 	return q;
 }
+EXPORT_SYMBOL(__scsi_alloc_queue);
+
+struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+{
+	struct request_queue *q;
+
+	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
+	if (!q)
+		return NULL;
+
+	blk_queue_prep_rq(q, scsi_prep_fn);
+	blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
+	blk_queue_softirq_done(q, scsi_softirq_done);
+	return q;
+}
 
 void scsi_free_queue(struct request_queue *q)
 {
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5d023d4..f458c2f 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -39,6 +39,9 @@
 	{ };
 #endif
 
+/* scsi_scan.c */
+int scsi_complete_async_scans(void);
+
 /* scsi_devinfo.c */
 extern int scsi_get_device_flags(struct scsi_device *sdev,
 				 const unsigned char *vendor,
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 94a2746..14e635a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -29,7 +29,9 @@
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/blkdev.h>
-#include <asm/semaphore.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/spinlock.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -87,6 +89,17 @@
 MODULE_PARM_DESC(max_luns,
 		 "last scsi LUN (should be between 1 and 2^32-1)");
 
+#ifdef CONFIG_SCSI_SCAN_ASYNC
+#define SCSI_SCAN_TYPE_DEFAULT "async"
+#else
+#define SCSI_SCAN_TYPE_DEFAULT "sync"
+#endif
+
+static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+
+module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
+MODULE_PARM_DESC(scan, "sync, async or none");
+
 /*
  * max_scsi_report_luns: the maximum number of LUNS that will be
  * returned from the REPORT LUNS command. 8 times this value must
@@ -108,6 +121,68 @@
 		 "Timeout (in seconds) waiting for devices to answer INQUIRY."
 		 " Default is 5. Some non-compliant devices need more.");
 
+static DEFINE_SPINLOCK(async_scan_lock);
+static LIST_HEAD(scanning_hosts);
+
+struct async_scan_data {
+	struct list_head list;
+	struct Scsi_Host *shost;
+	struct completion prev_finished;
+};
+
+/**
+ * scsi_complete_async_scans - Wait for asynchronous scans to complete
+ *
+ * Asynchronous scans add themselves to the scanning_hosts list.  Once
+ * that list is empty, we know that the scans are complete.  Rather than
+ * waking up periodically to check the state of the list, we pretend to be
+ * a scanning task by adding ourselves at the end of the list and going to
+ * sleep.  When the task before us wakes us up, we take ourselves off the
+ * list and return.
+ */
+int scsi_complete_async_scans(void)
+{
+	struct async_scan_data *data;
+
+	do {
+		if (list_empty(&scanning_hosts))
+			return 0;
+		/* If we can't get memory immediately, that's OK.  Just
+		 * sleep a little.  Even if we never get memory, the async
+		 * scans will finish eventually.
+		 */
+		data = kmalloc(sizeof(*data), GFP_KERNEL);
+		if (!data)
+			msleep(1);
+	} while (!data);
+
+	data->shost = NULL;
+	init_completion(&data->prev_finished);
+
+	spin_lock(&async_scan_lock);
+	/* Check that there's still somebody else on the list */
+	if (list_empty(&scanning_hosts))
+		goto done;
+	list_add_tail(&data->list, &scanning_hosts);
+	spin_unlock(&async_scan_lock);
+
+	printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
+	wait_for_completion(&data->prev_finished);
+
+	spin_lock(&async_scan_lock);
+	list_del(&data->list);
+ done:
+	spin_unlock(&async_scan_lock);
+
+	kfree(data);
+	return 0;
+}
+
+#ifdef MODULE
+/* Only exported for the benefit of scsi_wait_scan */
+EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
+#endif
+
 /**
  * scsi_unlock_floptical - unlock device via a special MODE SENSE command
  * @sdev:	scsi device to send command to
@@ -362,9 +437,10 @@
 	goto retry;
 }
 
-static void scsi_target_reap_usercontext(void *data)
+static void scsi_target_reap_usercontext(struct work_struct *work)
 {
-	struct scsi_target *starget = data;
+	struct scsi_target *starget =
+		container_of(work, struct scsi_target, ew.work);
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 	unsigned long flags;
 
@@ -400,7 +476,7 @@
 		starget->state = STARGET_DEL;
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		execute_in_process_context(scsi_target_reap_usercontext,
-					   starget, &starget->ew);
+					   &starget->ew);
 		return;
 
 	}
@@ -619,7 +695,7 @@
  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
-		int *bflags)
+		int *bflags, int async)
 {
 	/*
 	 * XXX do not save the inquiry, since it can change underneath us,
@@ -805,7 +881,7 @@
 	 * register it and tell the rest of the kernel
 	 * about it.
 	 */
-	if (scsi_sysfs_add_sdev(sdev) != 0)
+	if (!async && scsi_sysfs_add_sdev(sdev) != 0)
 		return SCSI_SCAN_NO_RESPONSE;
 
 	return SCSI_SCAN_LUN_PRESENT;
@@ -974,7 +1050,7 @@
 		goto out_free_result;
 	}
 
-	res = scsi_add_lun(sdev, result, &bflags);
+	res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
 	if (res == SCSI_SCAN_LUN_PRESENT) {
 		if (bflags & BLIST_KEY) {
 			sdev->lockable = 0;
@@ -1474,6 +1550,12 @@
 {
 	struct Scsi_Host *shost = dev_to_shost(parent);
 
+	if (strncmp(scsi_scan_type, "none", 4) == 0)
+		return;
+
+	if (!shost->async_scan)
+		scsi_complete_async_scans();
+
 	mutex_lock(&shost->scan_mutex);
 	if (scsi_host_scan_allowed(shost))
 		__scsi_scan_target(parent, channel, id, lun, rescan);
@@ -1519,6 +1601,9 @@
 		"%s: <%u:%u:%u>\n",
 		__FUNCTION__, channel, id, lun));
 
+	if (!shost->async_scan)
+		scsi_complete_async_scans();
+
 	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
 	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
 	    ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
@@ -1539,14 +1624,143 @@
 	return 0;
 }
 
+static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+{
+	struct scsi_device *sdev;
+	shost_for_each_device(sdev, shost) {
+		if (scsi_sysfs_add_sdev(sdev) != 0)
+			scsi_destroy_sdev(sdev);
+	}
+}
+
+/**
+ * scsi_prep_async_scan - prepare for an async scan
+ * @shost: the host which will be scanned
+ * Returns: a cookie to be passed to scsi_finish_async_scan()
+ *
+ * Tells the midlayer this host is going to do an asynchronous scan.
+ * It reserves the host's position in the scanning list and ensures
+ * that other asynchronous scans started after this one won't affect the
+ * ordering of the discovered devices.
+ */
+static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+{
+	struct async_scan_data *data;
+
+	if (strncmp(scsi_scan_type, "sync", 4) == 0)
+		return NULL;
+
+	if (shost->async_scan) {
+		printk("%s called twice for host %d", __FUNCTION__,
+				shost->host_no);
+		dump_stack();
+		return NULL;
+	}
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		goto err;
+	data->shost = scsi_host_get(shost);
+	if (!data->shost)
+		goto err;
+	init_completion(&data->prev_finished);
+
+	spin_lock(&async_scan_lock);
+	shost->async_scan = 1;
+	if (list_empty(&scanning_hosts))
+		complete(&data->prev_finished);
+	list_add_tail(&data->list, &scanning_hosts);
+	spin_unlock(&async_scan_lock);
+
+	return data;
+
+ err:
+	kfree(data);
+	return NULL;
+}
+
+/**
+ * scsi_finish_async_scan - asynchronous scan has finished
+ * @data: cookie returned from earlier call to scsi_prep_async_scan()
+ *
+ * All the devices currently attached to this host have been found.
+ * This function announces all the devices it has found to the rest
+ * of the system.
+ */
+static void scsi_finish_async_scan(struct async_scan_data *data)
+{
+	struct Scsi_Host *shost;
+
+	if (!data)
+		return;
+
+	shost = data->shost;
+	if (!shost->async_scan) {
+		printk("%s called twice for host %d", __FUNCTION__,
+				shost->host_no);
+		dump_stack();
+		return;
+	}
+
+	wait_for_completion(&data->prev_finished);
+
+	scsi_sysfs_add_devices(shost);
+
+	spin_lock(&async_scan_lock);
+	shost->async_scan = 0;
+	list_del(&data->list);
+	if (!list_empty(&scanning_hosts)) {
+		struct async_scan_data *next = list_entry(scanning_hosts.next,
+				struct async_scan_data, list);
+		complete(&next->prev_finished);
+	}
+	spin_unlock(&async_scan_lock);
+
+	scsi_host_put(shost);
+	kfree(data);
+}
+
+static void do_scsi_scan_host(struct Scsi_Host *shost)
+{
+	if (shost->hostt->scan_finished) {
+		unsigned long start = jiffies;
+		if (shost->hostt->scan_start)
+			shost->hostt->scan_start(shost);
+
+		while (!shost->hostt->scan_finished(shost, jiffies - start))
+			msleep(10);
+	} else {
+		scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
+				SCAN_WILD_CARD, 0);
+	}
+}
+
+static int do_scan_async(void *_data)
+{
+	struct async_scan_data *data = _data;
+	do_scsi_scan_host(data->shost);
+	scsi_finish_async_scan(data);
+	return 0;
+}
+
 /**
  * scsi_scan_host - scan the given adapter
  * @shost:	adapter to scan
  **/
 void scsi_scan_host(struct Scsi_Host *shost)
 {
-	scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
-				SCAN_WILD_CARD, 0);
+	struct async_scan_data *data;
+
+	if (strncmp(scsi_scan_type, "none", 4) == 0)
+		return;
+
+	data = scsi_prep_async_scan(shost);
+	if (!data) {
+		do_scsi_scan_host(shost);
+		return;
+	}
+
+	kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
 }
 EXPORT_SYMBOL(scsi_scan_host);
 
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e1a9166..259c90c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -218,16 +218,16 @@
 	put_device(&sdev->sdev_gendev);
 }
 
-static void scsi_device_dev_release_usercontext(void *data)
+static void scsi_device_dev_release_usercontext(struct work_struct *work)
 {
-	struct device *dev = data;
 	struct scsi_device *sdev;
 	struct device *parent;
 	struct scsi_target *starget;
 	unsigned long flags;
 
-	parent = dev->parent;
-	sdev = to_scsi_device(dev);
+	sdev = container_of(work, struct scsi_device, ew.work);
+
+	parent = sdev->sdev_gendev.parent;
 	starget = to_scsi_target(parent);
 
 	spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@
 static void scsi_device_dev_release(struct device *dev)
 {
 	struct scsi_device *sdp = to_scsi_device(dev);
-	execute_in_process_context(scsi_device_dev_release_usercontext, dev,
+	execute_in_process_context(scsi_device_dev_release_usercontext,
 				   &sdp->ew);
 }
 
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
new file mode 100644
index 0000000..37bbfbd
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -0,0 +1,352 @@
+/*
+ * SCSI target kernel/user interface functions
+ *
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/miscdevice.h>
+#include <linux/file.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tgt.h>
+#include <scsi/scsi_tgt_if.h>
+
+#include <asm/cacheflush.h>
+
+#include "scsi_tgt_priv.h"
+
+struct tgt_ring {
+	u32 tr_idx;
+	unsigned long tr_pages[TGT_RING_PAGES];
+	spinlock_t tr_lock;
+};
+
+/* tx_ring : kernel->user, rx_ring : user->kernel */
+static struct tgt_ring tx_ring, rx_ring;
+static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
+
+static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
+{
+	if (ring->tr_idx == TGT_MAX_EVENTS - 1)
+		ring->tr_idx = 0;
+	else
+		ring->tr_idx++;
+}
+
+static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
+{
+	u32 pidx, off;
+
+	pidx = idx / TGT_EVENT_PER_PAGE;
+	off = idx % TGT_EVENT_PER_PAGE;
+
+	return (struct tgt_event *)
+		(ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
+}
+
+static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
+{
+	struct tgt_event *ev;
+	struct tgt_ring *ring = &tx_ring;
+	unsigned long flags;
+	int err = 0;
+
+	spin_lock_irqsave(&ring->tr_lock, flags);
+
+	ev = tgt_head_event(ring, ring->tr_idx);
+	if (!ev->hdr.status)
+		tgt_ring_idx_inc(ring);
+	else
+		err = -BUSY;
+
+	spin_unlock_irqrestore(&ring->tr_lock, flags);
+
+	if (err)
+		return err;
+
+	memcpy(ev, p, sizeof(*ev));
+	ev->hdr.type = type;
+	mb();
+	ev->hdr.status = 1;
+
+	flush_dcache_page(virt_to_page(ev));
+
+	wake_up_interruptible(&tgt_poll_wait);
+
+	return 0;
+}
+
+int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, u64 tag)
+{
+	struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
+	struct tgt_event ev;
+	int err;
+
+	memset(&ev, 0, sizeof(ev));
+	ev.p.cmd_req.host_no = shost->host_no;
+	ev.p.cmd_req.data_len = cmd->request_bufflen;
+	memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
+	memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
+	ev.p.cmd_req.attribute = cmd->tag;
+	ev.p.cmd_req.tag = tag;
+
+	dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
+		ev.p.cmd_req.data_len, cmd->tag,
+		(unsigned long long) ev.p.cmd_req.tag);
+
+	err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
+	if (err)
+		eprintk("tx buf is full, could not send\n");
+
+	return err;
+}
+
+int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag)
+{
+	struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
+	struct tgt_event ev;
+	int err;
+
+	memset(&ev, 0, sizeof(ev));
+	ev.p.cmd_done.host_no = shost->host_no;
+	ev.p.cmd_done.tag = tag;
+	ev.p.cmd_done.result = cmd->result;
+
+	dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
+		(unsigned long long) ev.p.cmd_req.tag,
+		ev.p.cmd_req.data_len, cmd->tag);
+
+	err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
+	if (err)
+		eprintk("tx buf is full, could not send\n");
+
+	return err;
+}
+
+int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
+				  struct scsi_lun *scsilun, void *data)
+{
+	struct tgt_event ev;
+	int err;
+
+	memset(&ev, 0, sizeof(ev));
+	ev.p.tsk_mgmt_req.host_no = host_no;
+	ev.p.tsk_mgmt_req.function = function;
+	ev.p.tsk_mgmt_req.tag = tag;
+	memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
+	ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
+
+	dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
+		(unsigned long long) ev.p.tsk_mgmt_req.mid);
+
+	err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
+	if (err)
+		eprintk("tx buf is full, could not send\n");
+
+	return err;
+}
+
+static int event_recv_msg(struct tgt_event *ev)
+{
+	int err = 0;
+
+	switch (ev->hdr.type) {
+	case TGT_UEVENT_CMD_RSP:
+		err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
+					   ev->p.cmd_rsp.tag,
+					   ev->p.cmd_rsp.result,
+					   ev->p.cmd_rsp.len,
+					   ev->p.cmd_rsp.uaddr,
+					   ev->p.cmd_rsp.rw);
+		break;
+	case TGT_UEVENT_TSK_MGMT_RSP:
+		err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
+					       ev->p.tsk_mgmt_rsp.mid,
+					       ev->p.tsk_mgmt_rsp.result);
+		break;
+	default:
+		eprintk("unknown type %d\n", ev->hdr.type);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static ssize_t tgt_write(struct file *file, const char __user * buffer,
+			 size_t count, loff_t * ppos)
+{
+	struct tgt_event *ev;
+	struct tgt_ring *ring = &rx_ring;
+
+	while (1) {
+		ev = tgt_head_event(ring, ring->tr_idx);
+		/* do we need this? */
+		flush_dcache_page(virt_to_page(ev));
+
+		if (!ev->hdr.status)
+			break;
+
+		tgt_ring_idx_inc(ring);
+		event_recv_msg(ev);
+		ev->hdr.status = 0;
+	};
+
+	return count;
+}
+
+static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
+{
+	struct tgt_event *ev;
+	struct tgt_ring *ring = &tx_ring;
+	unsigned long flags;
+	unsigned int mask = 0;
+	u32 idx;
+
+	poll_wait(file, &tgt_poll_wait, wait);
+
+	spin_lock_irqsave(&ring->tr_lock, flags);
+
+	idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
+	ev = tgt_head_event(ring, idx);
+	if (ev->hdr.status)
+		mask |= POLLIN | POLLRDNORM;
+
+	spin_unlock_irqrestore(&ring->tr_lock, flags);
+
+	return mask;
+}
+
+static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
+			   struct tgt_ring *ring)
+{
+	int i, err;
+
+	for (i = 0; i < TGT_RING_PAGES; i++) {
+		struct page *page = virt_to_page(ring->tr_pages[i]);
+		err = vm_insert_page(vma, addr, page);
+		if (err)
+			return err;
+		addr += PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long addr;
+	int err;
+
+	if (vma->vm_pgoff)
+		return -EINVAL;
+
+	if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
+		eprintk("mmap size must be %lu, not %lu \n",
+			TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
+		return -EINVAL;
+	}
+
+	addr = vma->vm_start;
+	err = uspace_ring_map(vma, addr, &tx_ring);
+	if (err)
+		return err;
+	err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
+
+	return err;
+}
+
+static int tgt_open(struct inode *inode, struct file *file)
+{
+	tx_ring.tr_idx = rx_ring.tr_idx = 0;
+
+	return 0;
+}
+
+static struct file_operations tgt_fops = {
+	.owner		= THIS_MODULE,
+	.open		= tgt_open,
+	.poll		= tgt_poll,
+	.write		= tgt_write,
+	.mmap		= tgt_mmap,
+};
+
+static struct miscdevice tgt_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "tgt",
+	.fops = &tgt_fops,
+};
+
+static void tgt_ring_exit(struct tgt_ring *ring)
+{
+	int i;
+
+	for (i = 0; i < TGT_RING_PAGES; i++)
+		free_page(ring->tr_pages[i]);
+}
+
+static int tgt_ring_init(struct tgt_ring *ring)
+{
+	int i;
+
+	spin_lock_init(&ring->tr_lock);
+
+	for (i = 0; i < TGT_RING_PAGES; i++) {
+		ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
+		if (!ring->tr_pages[i]) {
+			eprintk("out of memory\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+void scsi_tgt_if_exit(void)
+{
+	tgt_ring_exit(&tx_ring);
+	tgt_ring_exit(&rx_ring);
+	misc_deregister(&tgt_miscdev);
+}
+
+int scsi_tgt_if_init(void)
+{
+	int err;
+
+	err = tgt_ring_init(&tx_ring);
+	if (err)
+		return err;
+
+	err = tgt_ring_init(&rx_ring);
+	if (err)
+		goto free_tx_ring;
+
+	err = misc_register(&tgt_miscdev);
+	if (err)
+		goto free_rx_ring;
+
+	return 0;
+free_rx_ring:
+	tgt_ring_exit(&rx_ring);
+free_tx_ring:
+	tgt_ring_exit(&tx_ring);
+
+	return err;
+}
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
new file mode 100644
index 0000000..d402aff
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -0,0 +1,745 @@
+/*
+ * SCSI target lib functions
+ *
+ * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/blkdev.h>
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tgt.h>
+#include <../drivers/md/dm-bio-list.h>
+
+#include "scsi_tgt_priv.h"
+
+static struct workqueue_struct *scsi_tgtd;
+static struct kmem_cache *scsi_tgt_cmd_cache;
+
+/*
+ * TODO: this struct will be killed when the block layer supports large bios
+ * and James's work struct code is in
+ */
+struct scsi_tgt_cmd {
+	/* TODO replace work with James b's code */
+	struct work_struct work;
+	/* TODO replace the lists with a large bio */
+	struct bio_list xfer_done_list;
+	struct bio_list xfer_list;
+
+	struct list_head hash_list;
+	struct request *rq;
+	u64 tag;
+
+	void *buffer;
+	unsigned bufflen;
+};
+
+#define TGT_HASH_ORDER	4
+#define cmd_hashfn(tag)	hash_long((unsigned long) (tag), TGT_HASH_ORDER)
+
+struct scsi_tgt_queuedata {
+	struct Scsi_Host *shost;
+	struct list_head cmd_hash[1 << TGT_HASH_ORDER];
+	spinlock_t cmd_hash_lock;
+};
+
+/*
+ * Function:	scsi_host_get_command()
+ *
+ * Purpose:	Allocate and setup a scsi command block and blk request
+ *
+ * Arguments:	shost	- scsi host
+ *		data_dir - dma data dir
+ *		gfp_mask- allocator flags
+ *
+ * Returns:	The allocated scsi command structure.
+ *
+ * This should be called by target LLDs to get a command.
+ */
+struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
+					enum dma_data_direction data_dir,
+					gfp_t gfp_mask)
+{
+	int write = (data_dir == DMA_TO_DEVICE);
+	struct request *rq;
+	struct scsi_cmnd *cmd;
+	struct scsi_tgt_cmd *tcmd;
+
+	/* Bail if we can't get a reference to the device */
+	if (!get_device(&shost->shost_gendev))
+		return NULL;
+
+	tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
+	if (!tcmd)
+		goto put_dev;
+
+	rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
+	if (!rq)
+		goto free_tcmd;
+
+	cmd = __scsi_get_command(shost, gfp_mask);
+	if (!cmd)
+		goto release_rq;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->sc_data_direction = data_dir;
+	cmd->jiffies_at_alloc = jiffies;
+	cmd->request = rq;
+
+	rq->special = cmd;
+	rq->cmd_type = REQ_TYPE_SPECIAL;
+	rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
+	rq->end_io_data = tcmd;
+
+	bio_list_init(&tcmd->xfer_list);
+	bio_list_init(&tcmd->xfer_done_list);
+	tcmd->rq = rq;
+
+	return cmd;
+
+release_rq:
+	blk_put_request(rq);
+free_tcmd:
+	kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
+put_dev:
+	put_device(&shost->shost_gendev);
+	return NULL;
+
+}
+EXPORT_SYMBOL_GPL(scsi_host_get_command);
+
+/*
+ * Function:	scsi_host_put_command()
+ *
+ * Purpose:	Free a scsi command block
+ *
+ * Arguments:	shost	- scsi host
+ * 		cmd	- command block to free
+ *
+ * Returns:	Nothing.
+ *
+ * Notes:	The command must not belong to any lists.
+ */
+void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+	struct request_queue *q = shost->uspace_req_q;
+	struct request *rq = cmd->request;
+	struct scsi_tgt_cmd *tcmd = rq->end_io_data;
+	unsigned long flags;
+
+	kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	__blk_put_request(q, rq);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	__scsi_put_command(shost, cmd, &shost->shost_gendev);
+}
+EXPORT_SYMBOL_GPL(scsi_host_put_command);
+
+static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
+{
+	struct bio *bio;
+
+	/* must call bio_endio in case bio was bounced */
+	while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
+		bio_endio(bio, bio->bi_size, 0);
+		bio_unmap_user(bio);
+	}
+
+	while ((bio = bio_list_pop(&tcmd->xfer_list))) {
+		bio_endio(bio, bio->bi_size, 0);
+		bio_unmap_user(bio);
+	}
+}
+
+static void cmd_hashlist_del(struct scsi_cmnd *cmd)
+{
+	struct request_queue *q = cmd->request->q;
+	struct scsi_tgt_queuedata *qdata = q->queuedata;
+	unsigned long flags;
+	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+
+	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+	list_del(&tcmd->hash_list);
+	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+}
+
+static void scsi_tgt_cmd_destroy(struct work_struct *work)
+{
+	struct scsi_tgt_cmd *tcmd =
+		container_of(work, struct scsi_tgt_cmd, work);
+	struct scsi_cmnd *cmd = tcmd->rq->special;
+
+	dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
+		rq_data_dir(cmd->request));
+	/*
+	 * We fix rq->cmd_flags here since when we told bio_map_user
+	 * to write vm for WRITE commands, blk_rq_bio_prep set
+	 * rq_data_dir the flags to READ.
+	 */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE)
+		cmd->request->cmd_flags |= REQ_RW;
+	else
+		cmd->request->cmd_flags &= ~REQ_RW;
+
+	scsi_unmap_user_pages(tcmd);
+	scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
+}
+
+static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
+			      u64 tag)
+{
+	struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
+	unsigned long flags;
+	struct list_head *head;
+
+	tcmd->tag = tag;
+	INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
+	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+	head = &qdata->cmd_hash[cmd_hashfn(tag)];
+	list_add(&tcmd->hash_list, head);
+	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+}
+
+/*
+ * scsi_tgt_alloc_queue - setup queue used for message passing
+ * shost: scsi host
+ *
+ * This should be called by the LLD after host allocation.
+ * And will be released when the host is released.
+ */
+int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
+{
+	struct scsi_tgt_queuedata *queuedata;
+	struct request_queue *q;
+	int err, i;
+
+	/*
+	 * Do we need to send a netlink event or should uspace
+	 * just respond to the hotplug event?
+	 */
+	q = __scsi_alloc_queue(shost, NULL);
+	if (!q)
+		return -ENOMEM;
+
+	queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
+	if (!queuedata) {
+		err = -ENOMEM;
+		goto cleanup_queue;
+	}
+	queuedata->shost = shost;
+	q->queuedata = queuedata;
+
+	/*
+	 * this is a silly hack. We should probably just queue as many
+	 * command as is recvd to userspace. uspace can then make
+	 * sure we do not overload the HBA
+	 */
+	q->nr_requests = shost->hostt->can_queue;
+	/*
+	 * We currently only support software LLDs so this does
+	 * not matter for now. Do we need this for the cards we support?
+	 * If so we should make it a host template value.
+	 */
+	blk_queue_dma_alignment(q, 0);
+	shost->uspace_req_q = q;
+
+	for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
+		INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
+	spin_lock_init(&queuedata->cmd_hash_lock);
+
+	return 0;
+
+cleanup_queue:
+	blk_cleanup_queue(q);
+	return err;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
+
+void scsi_tgt_free_queue(struct Scsi_Host *shost)
+{
+	int i;
+	unsigned long flags;
+	struct request_queue *q = shost->uspace_req_q;
+	struct scsi_cmnd *cmd;
+	struct scsi_tgt_queuedata *qdata = q->queuedata;
+	struct scsi_tgt_cmd *tcmd, *n;
+	LIST_HEAD(cmds);
+
+	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+
+	for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
+		list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
+					 hash_list) {
+			list_del(&tcmd->hash_list);
+			list_add(&tcmd->hash_list, &cmds);
+		}
+	}
+
+	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+
+	while (!list_empty(&cmds)) {
+		tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
+		list_del(&tcmd->hash_list);
+		cmd = tcmd->rq->special;
+
+		shost->hostt->eh_abort_handler(cmd);
+		scsi_tgt_cmd_destroy(&tcmd->work);
+	}
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
+
+struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
+{
+	struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
+	return queue->shost;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
+
+/*
+ * scsi_tgt_queue_command - queue command for userspace processing
+ * @cmd:	scsi command
+ * @scsilun:	scsi lun
+ * @tag:	unique value to identify this command for tmf
+ */
+int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun,
+			   u64 tag)
+{
+	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+	int err;
+
+	init_scsi_tgt_cmd(cmd->request, tcmd, tag);
+	err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag);
+	if (err)
+		cmd_hashlist_del(cmd);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
+
+/*
+ * This is run from a interrpt handler normally and the unmap
+ * needs process context so we must queue
+ */
+static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
+{
+	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+
+	dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+
+	scsi_tgt_uspace_send_status(cmd, tcmd->tag);
+	queue_work(scsi_tgtd, &tcmd->work);
+}
+
+static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
+	int err;
+
+	dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+
+	err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
+	switch (err) {
+	case SCSI_MLQUEUE_HOST_BUSY:
+	case SCSI_MLQUEUE_DEVICE_BUSY:
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
+{
+	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+	int err;
+
+	err = __scsi_tgt_transfer_response(cmd);
+	if (!err)
+		return;
+
+	cmd->result = DID_BUS_BUSY << 16;
+	err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
+	if (err <= 0)
+		/* the eh will have to pick this up */
+		printk(KERN_ERR "Could not send cmd %p status\n", cmd);
+}
+
+static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+{
+	struct request *rq = cmd->request;
+	struct scsi_tgt_cmd *tcmd = rq->end_io_data;
+	int count;
+
+	cmd->use_sg = rq->nr_phys_segments;
+	cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
+	if (!cmd->request_buffer)
+		return -ENOMEM;
+
+	cmd->request_bufflen = rq->data_len;
+
+	dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
+		rq_data_dir(rq));
+	count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
+	if (likely(count <= cmd->use_sg)) {
+		cmd->use_sg = count;
+		return 0;
+	}
+
+	eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
+	scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
+	return -EINVAL;
+}
+
+/* TODO: test this crap and replace bio_map_user with new interface maybe */
+static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
+			       int rw)
+{
+	struct request_queue *q = cmd->request->q;
+	struct request *rq = cmd->request;
+	void *uaddr = tcmd->buffer;
+	unsigned int len = tcmd->bufflen;
+	struct bio *bio;
+	int err;
+
+	while (len > 0) {
+		dprintk("%lx %u\n", (unsigned long) uaddr, len);
+		bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
+		if (IS_ERR(bio)) {
+			err = PTR_ERR(bio);
+			dprintk("fail to map %lx %u %d %x\n",
+				(unsigned long) uaddr, len, err, cmd->cmnd[0]);
+			goto unmap_bios;
+		}
+
+		uaddr += bio->bi_size;
+		len -= bio->bi_size;
+
+		/*
+		 * The first bio is added and merged. We could probably
+		 * try to add others using scsi_merge_bio() but for now
+		 * we keep it simple. The first bio should be pretty large
+		 * (either hitting the 1 MB bio pages limit or a queue limit)
+		 * already but for really large IO we may want to try and
+		 * merge these.
+		 */
+		if (!rq->bio) {
+			blk_rq_bio_prep(q, rq, bio);
+			rq->data_len = bio->bi_size;
+		} else
+			/* put list of bios to transfer in next go around */
+			bio_list_add(&tcmd->xfer_list, bio);
+	}
+
+	cmd->offset = 0;
+	err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
+	if (err)
+		goto unmap_bios;
+
+	return 0;
+
+unmap_bios:
+	if (rq->bio) {
+		bio_unmap_user(rq->bio);
+		while ((bio = bio_list_pop(&tcmd->xfer_list)))
+			bio_unmap_user(bio);
+	}
+
+	return err;
+}
+
+static int scsi_tgt_transfer_data(struct scsi_cmnd *);
+
+static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
+{
+	struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+	struct bio *bio;
+	int err;
+
+	/* should we free resources here on error ? */
+	if (cmd->result) {
+send_uspace_err:
+		err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
+		if (err <= 0)
+			/* the tgt uspace eh will have to pick this up */
+			printk(KERN_ERR "Could not send cmd %p status\n", cmd);
+		return;
+	}
+
+	dprintk("cmd %p request_bufflen %u bufflen %u\n",
+		cmd, cmd->request_bufflen, tcmd->bufflen);
+
+	scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
+	bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
+
+	tcmd->buffer += cmd->request_bufflen;
+	cmd->offset += cmd->request_bufflen;
+
+	if (!tcmd->xfer_list.head) {
+		scsi_tgt_transfer_response(cmd);
+		return;
+	}
+
+	dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
+		cmd, cmd->request_bufflen, tcmd->bufflen);
+
+	bio = bio_list_pop(&tcmd->xfer_list);
+	BUG_ON(!bio);
+
+	blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
+	cmd->request->data_len = bio->bi_size;
+	err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
+	if (err) {
+		cmd->result = DID_ERROR << 16;
+		goto send_uspace_err;
+	}
+
+	if (scsi_tgt_transfer_data(cmd)) {
+		cmd->result = DID_NO_CONNECT << 16;
+		goto send_uspace_err;
+	}
+}
+
+static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
+{
+	int err;
+	struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
+
+	err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
+	switch (err) {
+		case SCSI_MLQUEUE_HOST_BUSY:
+		case SCSI_MLQUEUE_DEVICE_BUSY:
+			return -EAGAIN;
+	default:
+		return 0;
+	}
+}
+
+static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
+				unsigned len)
+{
+	char __user *p = (char __user *) uaddr;
+
+	if (copy_from_user(cmd->sense_buffer, p,
+			   min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
+		printk(KERN_ERR "Could not copy the sense buffer\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+	struct scsi_tgt_cmd *tcmd;
+	int err;
+
+	err = shost->hostt->eh_abort_handler(cmd);
+	if (err)
+		eprintk("fail to abort %p\n", cmd);
+
+	tcmd = cmd->request->end_io_data;
+	scsi_tgt_cmd_destroy(&tcmd->work);
+	return err;
+}
+
+static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
+{
+	struct scsi_tgt_queuedata *qdata = q->queuedata;
+	struct request *rq = NULL;
+	struct list_head *head;
+	struct scsi_tgt_cmd *tcmd;
+	unsigned long flags;
+
+	head = &qdata->cmd_hash[cmd_hashfn(tag)];
+	spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+	list_for_each_entry(tcmd, head, hash_list) {
+		if (tcmd->tag == tag) {
+			rq = tcmd->rq;
+			list_del(&tcmd->hash_list);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+
+	return rq;
+}
+
+int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
+			 unsigned long uaddr, u8 rw)
+{
+	struct Scsi_Host *shost;
+	struct scsi_cmnd *cmd;
+	struct request *rq;
+	struct scsi_tgt_cmd *tcmd;
+	int err = 0;
+
+	dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
+		result, len, uaddr, rw);
+
+	/* TODO: replace with a O(1) alg */
+	shost = scsi_host_lookup(host_no);
+	if (IS_ERR(shost)) {
+		printk(KERN_ERR "Could not find host no %d\n", host_no);
+		return -EINVAL;
+	}
+
+	if (!shost->uspace_req_q) {
+		printk(KERN_ERR "Not target scsi host %d\n", host_no);
+		goto done;
+	}
+
+	rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
+	if (!rq) {
+		printk(KERN_ERR "Could not find tag %llu\n",
+		       (unsigned long long) tag);
+		err = -EINVAL;
+		goto done;
+	}
+	cmd = rq->special;
+
+	dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
+		result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
+
+	if (result == TASK_ABORTED) {
+		scsi_tgt_abort_cmd(shost, cmd);
+		goto done;
+	}
+	/*
+	 * store the userspace values here, the working values are
+	 * in the request_* values
+	 */
+	tcmd = cmd->request->end_io_data;
+	tcmd->buffer = (void *)uaddr;
+	tcmd->bufflen = len;
+	cmd->result = result;
+
+	if (!tcmd->bufflen || cmd->request_buffer) {
+		err = __scsi_tgt_transfer_response(cmd);
+		goto done;
+	}
+
+	/*
+	 * TODO: Do we need to handle case where request does not
+	 * align with LLD.
+	 */
+	err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
+	if (err) {
+		eprintk("%p %d\n", cmd, err);
+		err = -EAGAIN;
+		goto done;
+	}
+
+	/* userspace failure */
+	if (cmd->result) {
+		if (status_byte(cmd->result) == CHECK_CONDITION)
+			scsi_tgt_copy_sense(cmd, uaddr, len);
+		err = __scsi_tgt_transfer_response(cmd);
+		goto done;
+	}
+	/* ask the target LLD to transfer the data to the buffer */
+	err = scsi_tgt_transfer_data(cmd);
+
+done:
+	scsi_host_put(shost);
+	return err;
+}
+
+int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, int function, u64 tag,
+			      struct scsi_lun *scsilun, void *data)
+{
+	int err;
+
+	/* TODO: need to retry if this fails. */
+	err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, function,
+					    tag, scsilun, data);
+	if (err < 0)
+		eprintk("The task management request lost!\n");
+	return err;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
+
+int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result)
+{
+	struct Scsi_Host *shost;
+	int err = -EINVAL;
+
+	dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
+
+	shost = scsi_host_lookup(host_no);
+	if (IS_ERR(shost)) {
+		printk(KERN_ERR "Could not find host no %d\n", host_no);
+		return err;
+	}
+
+	if (!shost->uspace_req_q) {
+		printk(KERN_ERR "Not target scsi host %d\n", host_no);
+		goto done;
+	}
+
+	err = shost->hostt->tsk_mgmt_response(mid, result);
+done:
+	scsi_host_put(shost);
+	return err;
+}
+
+static int __init scsi_tgt_init(void)
+{
+	int err;
+
+	scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
+					       sizeof(struct scsi_tgt_cmd),
+					       0, 0, NULL, NULL);
+	if (!scsi_tgt_cmd_cache)
+		return -ENOMEM;
+
+	scsi_tgtd = create_workqueue("scsi_tgtd");
+	if (!scsi_tgtd) {
+		err = -ENOMEM;
+		goto free_kmemcache;
+	}
+
+	err = scsi_tgt_if_init();
+	if (err)
+		goto destroy_wq;
+
+	return 0;
+
+destroy_wq:
+	destroy_workqueue(scsi_tgtd);
+free_kmemcache:
+	kmem_cache_destroy(scsi_tgt_cmd_cache);
+	return err;
+}
+
+static void __exit scsi_tgt_exit(void)
+{
+	destroy_workqueue(scsi_tgtd);
+	scsi_tgt_if_exit();
+	kmem_cache_destroy(scsi_tgt_cmd_cache);
+}
+
+module_init(scsi_tgt_init);
+module_exit(scsi_tgt_exit);
+
+MODULE_DESCRIPTION("SCSI target core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
new file mode 100644
index 0000000..84488c5
--- /dev/null
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -0,0 +1,25 @@
+struct scsi_cmnd;
+struct scsi_lun;
+struct Scsi_Host;
+struct task_struct;
+
+/* tmp - will replace with SCSI logging stuff */
+#define eprintk(fmt, args...)					\
+do {								\
+	printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args);	\
+} while (0)
+
+#define dprintk(fmt, args...)
+/* #define dprintk eprintk */
+
+extern void scsi_tgt_if_exit(void);
+extern int scsi_tgt_if_init(void);
+
+extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
+				    u64 tag);
+extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
+extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
+				unsigned long uaddr, u8 rw);
+extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
+					 struct scsi_lun *scsilun, void *data);
+extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 38c215a..3571ce8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -241,9 +241,9 @@
 #define FC_MGMTSRVR_PORTID		0x00000a
 
 
-static void fc_timeout_deleted_rport(void *data);
-static void fc_timeout_fail_rport_io(void *data);
-static void fc_scsi_scan_rport(void *data);
+static void fc_timeout_deleted_rport(struct work_struct *work);
+static void fc_timeout_fail_rport_io(struct work_struct *work);
+static void fc_scsi_scan_rport(struct work_struct *work);
 
 /*
  * Attribute counts pre object type...
@@ -1613,7 +1613,7 @@
  * 	1 on success / 0 already queued / < 0 for error
  **/
 static int
-fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
+fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
 				unsigned long delay)
 {
 	if (unlikely(!fc_host_devloss_work_q(shost))) {
@@ -1625,9 +1625,6 @@
 		return -EINVAL;
 	}
 
-	if (delay == 0)
-		return queue_work(fc_host_devloss_work_q(shost), work);
-
 	return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
 }
 
@@ -1712,12 +1709,13 @@
  * fc_starget_delete - called to delete the scsi decendents of an rport
  *                  (target and all sdevs)
  *
- * @data:	remote port to be operated on.
+ * @work:	remote port to be operated on.
  **/
 static void
-fc_starget_delete(void *data)
+fc_starget_delete(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, stgt_delete_work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	unsigned long flags;
 	struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1751,12 +1749,13 @@
 /**
  * fc_rport_final_delete - finish rport termination and delete it.
  *
- * @data:	remote port to be deleted.
+ * @work:	remote port to be deleted.
  **/
 static void
-fc_rport_final_delete(void *data)
+fc_rport_final_delete(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, rport_delete_work);
 	struct device *dev = &rport->dev;
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1770,7 +1769,7 @@
 
 	/* Delete SCSI target and sdevs */
 	if (rport->scsi_target_id != -1)
-		fc_starget_delete(data);
+		fc_starget_delete(&rport->stgt_delete_work);
 	else if (i->f->dev_loss_tmo_callbk)
 		i->f->dev_loss_tmo_callbk(rport);
 	else if (i->f->terminate_rport_io)
@@ -1829,11 +1828,11 @@
 	rport->channel = channel;
 	rport->fast_io_fail_tmo = -1;
 
-	INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
-	INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
-	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
-	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
-	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
+	INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
+	INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
+	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
+	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
+	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
 
 	spin_lock_irqsave(shost->host_lock, flags);
 
@@ -1963,7 +1962,7 @@
 			}
 
 			if (match) {
-				struct work_struct *work = 
+				struct delayed_work *work =
 							&rport->dev_loss_work;
 
 				memcpy(&rport->node_name, &ids->node_name,
@@ -2267,12 +2266,13 @@
  *                       was a SCSI target (thus was blocked), and failed
  *                       to return in the alloted time.
  * 
- * @data:	rport target that failed to reappear in the alloted time.
+ * @work:	rport target that failed to reappear in the alloted time.
  **/
 static void
-fc_timeout_deleted_rport(void  *data)
+fc_timeout_deleted_rport(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, dev_loss_work.work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
 	unsigned long flags;
@@ -2366,15 +2366,16 @@
  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
  *                       disconnected SCSI target.
  *
- * @data:	rport to terminate io on.
+ * @work:	rport to terminate io on.
  *
  * Notes: Only requests the failure of the io, not that all are flushed
  *    prior to returning.
  **/
 static void
-fc_timeout_fail_rport_io(void  *data)
+fc_timeout_fail_rport_io(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, fail_io_work.work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	struct fc_internal *i = to_fc_internal(shost->transportt);
 
@@ -2387,12 +2388,13 @@
 /**
  * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
  *
- * @data:	remote port to be scanned.
+ * @work:	remote port to be scanned.
  **/
 static void
-fc_scsi_scan_rport(void *data)
+fc_scsi_scan_rport(struct work_struct *work)
 {
-	struct fc_rport *rport = (struct fc_rport *)data;
+	struct fc_rport *rport =
+		container_of(work, struct fc_rport, scan_work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
 	unsigned long flags;
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b25124..9c22f13 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -234,9 +234,11 @@
 	return 0;
 }
 
-static void session_recovery_timedout(void *data)
+static void session_recovery_timedout(struct work_struct *work)
 {
-	struct iscsi_cls_session *session = data;
+	struct iscsi_cls_session *session =
+		container_of(work, struct iscsi_cls_session,
+			     recovery_work.work);
 
 	dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
 		  "out after %d secs\n", session->recovery_tmo);
@@ -276,7 +278,7 @@
 
 	session->transport = transport;
 	session->recovery_tmo = 120;
-	INIT_WORK(&session->recovery_work, session_recovery_timedout, session);
+	INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
 	INIT_LIST_HEAD(&session->host_list);
 	INIT_LIST_HEAD(&session->sess_list);
 
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index b5b0c2c..5c0b75b 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -25,6 +25,7 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/jiffies.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/string.h>
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 9f070f0..3fded48 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -964,9 +964,10 @@
 };
 
 static void
-spi_dv_device_work_wrapper(void *data)
+spi_dv_device_work_wrapper(struct work_struct *work)
 {
-	struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+	struct work_queue_wrapper *wqw =
+		container_of(work, struct work_queue_wrapper, work);
 	struct scsi_device *sdev = wqw->sdev;
 
 	kfree(wqw);
@@ -1006,7 +1007,7 @@
 		return;
 	}
 
-	INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw);
+	INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
 	wqw->sdev = sdev;
 
 	schedule_work(&wqw->work);
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
new file mode 100644
index 0000000..8a63610
--- /dev/null
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -0,0 +1,31 @@
+/*
+ * scsi_wait_scan.c
+ *
+ * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
+ *
+ * This is a simple module to wait until all the async scans are
+ * complete.  The idea is to use it in initrd/initramfs scripts.  You
+ * modprobe it after all the modprobes of the root SCSI drivers and it
+ * will wait until they have all finished scanning their busses before
+ * allowing the boot to proceed
+ */
+
+#include <linux/module.h>
+#include "scsi_priv.h"
+
+static int __init wait_scan_init(void)
+{
+	scsi_complete_async_scans();
+	return 0;
+}
+
+static void __exit wait_scan_exit(void)
+{
+}
+
+MODULE_DESCRIPTION("SCSI wait for scans");
+MODULE_AUTHOR("James Bottomley");
+MODULE_LICENSE("GPL");
+
+late_initcall(wait_scan_init);
+module_exit(wait_scan_exit);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84ff203..f6a4528 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1051,6 +1051,14 @@
 						      &sshdr, SD_TIMEOUT,
 						      SD_MAX_RETRIES);
 
+			/*
+			 * If the drive has indicated to us that it
+			 * doesn't have any media in it, don't bother
+			 * with any more polling.
+			 */
+			if (media_not_present(sdkp, &sshdr))
+				return;
+
 			if (the_result)
 				sense_valid = scsi_sense_valid(&sshdr);
 			retries++;
@@ -1059,14 +1067,6 @@
 			  ((driver_byte(the_result) & DRIVER_SENSE) &&
 			  sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
 
-		/*
-		 * If the drive has indicated to us that it doesn't have
-		 * any media in it, don't bother with any of the rest of
-		 * this crap.
-		 */
-		if (media_not_present(sdkp, &sshdr))
-			return;
-
 		if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
 			/* no sense, TUR either succeeded or failed
 			 * with a status error */
@@ -1467,7 +1467,6 @@
 	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
 
 	if (scsi_status_is_good(res)) {
-		int ct = 0;
 		int offset = data.header_length + data.block_descriptor_length;
 
 		if (offset >= SD_BUF_SIZE - 2) {
@@ -1496,11 +1495,13 @@
 			sdkp->DPOFUA = 0;
 		}
 
-		ct =  sdkp->RCD + 2*sdkp->WCE;
-
-		printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n",
-		       diskname, sd_cache_types[ct],
-		       sdkp->DPOFUA ? " w/ FUA" : "");
+		printk(KERN_NOTICE "SCSI device %s: "
+		       "write cache: %s, read cache: %s, %s\n",
+		       diskname,
+		       sdkp->WCE ? "enabled" : "disabled",
+		       sdkp->RCD ? "disabled" : "enabled",
+		       sdkp->DPOFUA ? "supports DPO and FUA"
+		       : "doesn't support DPO or FUA");
 
 		return;
 	}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e1a52c5..587274d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
    Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
    Michael Schaefer, J"org Weule, and Eric Youngdale.
 
-   Copyright 1992 - 2005 Kai Makisara
+   Copyright 1992 - 2006 Kai Makisara
    email Kai.Makisara@kolumbus.fi
 
    Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
    Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
  */
 
-static const char *verstr = "20050830";
+static const char *verstr = "20061107";
 
 #include <linux/module.h>
 
@@ -999,7 +999,7 @@
 			STp->min_block = ((STp->buffer)->b_data[4] << 8) |
 			    (STp->buffer)->b_data[5];
 			if ( DEB( debugging || ) !STp->inited)
-				printk(KERN_WARNING
+				printk(KERN_INFO
                                        "%s: Block limits %d - %d bytes.\n", name,
                                        STp->min_block, STp->max_block);
 		} else {
@@ -1224,7 +1224,7 @@
 	}
 
 	DEBC( if (STp->nbr_requests)
-		printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
+		printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
 		       name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
 
 	if (STps->rw == ST_WRITING && !STp->pos_unknown) {
@@ -4056,11 +4056,11 @@
 			goto out_free_tape;
 	}
 
-	sdev_printk(KERN_WARNING, SDp,
+	sdev_printk(KERN_NOTICE, SDp,
 		    "Attached scsi tape %s\n", tape_name(tpnt));
-	printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
-	       tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
-	       queue_dma_alignment(SDp->request_queue) + 1);
+	sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
+		    tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
+		    queue_dma_alignment(SDp->request_queue) + 1);
 
 	return 0;
 
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 185c270..ba6bcda 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -11,8 +11,6 @@
  *	Written By:
  *		Ed Lin <promise_linux@promise.com>
  *
- *	Version: 3.0.0.1
- *
  */
 
 #include <linux/init.h>
@@ -37,9 +35,9 @@
 #include <scsi/scsi_tcq.h>
 
 #define DRV_NAME "stex"
-#define ST_DRIVER_VERSION "3.0.0.1"
+#define ST_DRIVER_VERSION "3.1.0.1"
 #define ST_VER_MAJOR 		3
-#define ST_VER_MINOR 		0
+#define ST_VER_MINOR 		1
 #define ST_OEM 			0
 #define ST_BUILD_VER 		1
 
@@ -76,8 +74,10 @@
 	MU_STATE_STARTED			= 4,
 	MU_STATE_RESETTING			= 5,
 
-	MU_MAX_DELAY_TIME			= 240000,
+	MU_MAX_DELAY				= 120,
 	MU_HANDSHAKE_SIGNATURE			= 0x55aaaa55,
+	MU_HANDSHAKE_SIGNATURE_HALF		= 0x5a5a0000,
+	MU_HARD_RESET_WAIT			= 30000,
 	HMU_PARTNER_TYPE			= 2,
 
 	/* firmware returned values */
@@ -120,7 +120,8 @@
 
 	st_shasta				= 0,
 	st_vsc					= 1,
-	st_yosemite				= 2,
+	st_vsc1					= 2,
+	st_yosemite				= 3,
 
 	PASSTHRU_REQ_TYPE			= 0x00000001,
 	PASSTHRU_REQ_NO_WAKEUP			= 0x00000100,
@@ -150,6 +151,8 @@
 	MGT_CMD_SIGNATURE			= 0xba,
 
 	INQUIRY_EVPD				= 0x01,
+
+	ST_ADDITIONAL_MEM			= 0x200000,
 };
 
 /* SCSI inquiry data */
@@ -211,7 +214,9 @@
 	__le32 partner_ver_minor;
 	__le32 partner_ver_oem;
 	__le32 partner_ver_build;
-	u32 reserved1[4];
+	__le32 extra_offset;	/* NEW */
+	__le32 extra_size;	/* NEW */
+	u32 reserved1[2];
 };
 
 struct req_msg {
@@ -302,6 +307,7 @@
 	void __iomem *mmio_base;	/* iomapped PCI memory space */
 	void *dma_mem;
 	dma_addr_t dma_handle;
+	size_t dma_size;
 
 	struct Scsi_Host *host;
 	struct pci_dev *pdev;
@@ -507,6 +513,7 @@
 	size_t count = sizeof(struct st_frame);
 
 	p = hba->copy_buffer;
+	stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD);
 	memset(p->base, 0, sizeof(u32)*6);
 	*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
 	p->rom_addr = 0;
@@ -901,27 +908,34 @@
 	void __iomem *base = hba->mmio_base;
 	struct handshake_frame *h;
 	dma_addr_t status_phys;
-	int i;
+	u32 data;
+	unsigned long before;
 
 	if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
 		writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
 		readl(base + IDBL);
-		for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
-			&& i < MU_MAX_DELAY_TIME; i++) {
+		before = jiffies;
+		while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+				printk(KERN_ERR DRV_NAME
+					"(%s): no handshake signature\n",
+					pci_name(hba->pdev));
+				return -1;
+			}
 			rmb();
 			msleep(1);
 		}
-
-		if (i == MU_MAX_DELAY_TIME) {
-			printk(KERN_ERR DRV_NAME
-				"(%s): no handshake signature\n",
-				pci_name(hba->pdev));
-			return -1;
-		}
 	}
 
 	udelay(10);
 
+	data = readl(base + OMR1);
+	if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
+		data &= 0x0000ffff;
+		if (hba->host->can_queue > data)
+			hba->host->can_queue = data;
+	}
+
 	h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
 	h->rb_phy = cpu_to_le32(hba->dma_handle);
 	h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
@@ -931,6 +945,11 @@
 	h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
 	stex_gettime(&h->hosttime);
 	h->partner_type = HMU_PARTNER_TYPE;
+	if (hba->dma_size > STEX_BUFFER_SIZE) {
+		h->extra_offset = cpu_to_le32(STEX_BUFFER_SIZE);
+		h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
+	} else
+		h->extra_offset = h->extra_size = 0;
 
 	status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
 	writel(status_phys, base + IMR0);
@@ -944,19 +963,18 @@
 	readl(base + IDBL); /* flush */
 
 	udelay(10);
-	for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
-		&& i < MU_MAX_DELAY_TIME; i++) {
+	before = jiffies;
+	while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+		if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+			printk(KERN_ERR DRV_NAME
+				"(%s): no signature after handshake frame\n",
+				pci_name(hba->pdev));
+			return -1;
+		}
 		rmb();
 		msleep(1);
 	}
 
-	if (i == MU_MAX_DELAY_TIME) {
-		printk(KERN_ERR DRV_NAME
-			"(%s): no signature after handshake frame\n",
-			pci_name(hba->pdev));
-		return -1;
-	}
-
 	writel(0, base + IMR0);
 	readl(base + IMR0);
 	writel(0, base + OMR0);
@@ -1038,9 +1056,9 @@
 	pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
 	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
 
-	for (i = 0; i < MU_MAX_DELAY_TIME; i++) {
+	for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
 		pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
-		if (pci_cmd & PCI_COMMAND_MASTER)
+		if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
 			break;
 		msleep(1);
 	}
@@ -1100,18 +1118,18 @@
 static int stex_biosparam(struct scsi_device *sdev,
 	struct block_device *bdev, sector_t capacity, int geom[])
 {
-	int heads = 255, sectors = 63, cylinders;
+	int heads = 255, sectors = 63;
 
 	if (capacity < 0x200000) {
 		heads = 64;
 		sectors = 32;
 	}
 
-	cylinders = sector_div(capacity, heads * sectors);
+	sector_div(capacity, heads * sectors);
 
 	geom[0] = heads;
 	geom[1] = sectors;
-	geom[2] = cylinders;
+	geom[2] = capacity;
 
 	return 0;
 }
@@ -1193,8 +1211,13 @@
 		goto out_iounmap;
 	}
 
+	hba->cardtype = (unsigned int) id->driver_data;
+	if (hba->cardtype == st_vsc && (pdev->subsystem_device & 0xf) == 0x1)
+		hba->cardtype = st_vsc1;
+	hba->dma_size = (hba->cardtype == st_vsc1) ?
+		(STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE);
 	hba->dma_mem = dma_alloc_coherent(&pdev->dev,
-		STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL);
+		hba->dma_size, &hba->dma_handle, GFP_KERNEL);
 	if (!hba->dma_mem) {
 		err = -ENOMEM;
 		printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
@@ -1207,8 +1230,6 @@
 	hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
 	hba->mu_status = MU_STATE_STARTING;
 
-	hba->cardtype = (unsigned int) id->driver_data;
-
 	/* firmware uses id/lun pair for a logical drive, but lun would be
 	   always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
 	   channel to map lun here */
@@ -1233,7 +1254,7 @@
 	if (err)
 		goto out_free_irq;
 
-	err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE);
+	err = scsi_init_shared_tag_map(host, host->can_queue);
 	if (err) {
 		printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
 			pci_name(pdev));
@@ -1256,7 +1277,7 @@
 out_free_irq:
 	free_irq(pdev->irq, hba);
 out_pci_free:
-	dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE,
+	dma_free_coherent(&pdev->dev, hba->dma_size,
 			  hba->dma_mem, hba->dma_handle);
 out_iounmap:
 	iounmap(hba->mmio_base);
@@ -1317,7 +1338,7 @@
 
 	pci_release_regions(hba->pdev);
 
-	dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE,
+	dma_free_coherent(&hba->pdev->dev, hba->dma_size,
 			  hba->dma_mem, hba->dma_handle);
 }
 
@@ -1346,15 +1367,32 @@
 }
 
 static struct pci_device_id stex_pci_tbl[] = {
-	{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-	{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-	{ 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-	{ 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-	{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-	{ 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-	{ 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-	{ 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
-	{ 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite },
+	/* st_shasta */
+	{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
+	{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		st_shasta }, /* SuperTrak EX12350 */
+	{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		st_shasta }, /* SuperTrak EX4350 */
+	{ 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		st_shasta }, /* SuperTrak EX24350 */
+
+	/* st_vsc */
+	{ 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
+
+	/* st_yosemite */
+	{ 0x105a, 0x8650, PCI_ANY_ID, 0x4600, 0, 0,
+		st_yosemite }, /* SuperTrak EX4650 */
+	{ 0x105a, 0x8650, PCI_ANY_ID, 0x4610, 0, 0,
+		st_yosemite }, /* SuperTrak EX4650o */
+	{ 0x105a, 0x8650, PCI_ANY_ID, 0x8600, 0, 0,
+		st_yosemite }, /* SuperTrak EX8650EL */
+	{ 0x105a, 0x8650, PCI_ANY_ID, 0x8601, 0, 0,
+		st_yosemite }, /* SuperTrak EX8650 */
+	{ 0x105a, 0x8650, PCI_ANY_ID, 0x8602, 0, 0,
+		st_yosemite }, /* SuperTrak EX8654 */
+	{ 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		st_yosemite }, /* generic st_yosemite */
 	{ }	/* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index 646e840..76a069b 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -8,20 +8,20 @@
  *	drew@colorado.edu
  *      +1 (303) 440-4894
  *
- * DISTRIBUTION RELEASE 3. 
+ * DISTRIBUTION RELEASE 3.
  *
- * For more information, please consult 
+ * For more information, please consult
  *
  * Trantor Systems, Ltd.
  * T128/T128F/T228 SCSI Host Adapter
  * Hardware Specifications
- * 
- * Trantor Systems, Ltd. 
+ *
+ * Trantor Systems, Ltd.
  * 5415 Randall Place
  * Fremont, CA 94538
  * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
- * 
- * and 
+ *
+ * and
  *
  * NCR 5380 Family
  * SCSI Protocol Controller
@@ -48,15 +48,15 @@
 #define TDEBUG_TRANSFER 0x2
 
 /*
- * The trantor boards are memory mapped. They use an NCR5380 or 
+ * The trantor boards are memory mapped. They use an NCR5380 or
  * equivalent (my sample board had part second sourced from ZILOG).
- * NCR's recommended "Pseudo-DMA" architecture is used, where 
+ * NCR's recommended "Pseudo-DMA" architecture is used, where
  * a PAL drives the DMA signals on the 5380 allowing fast, blind
- * transfers with proper handshaking. 
+ * transfers with proper handshaking.
  */
 
 /*
- * Note : a boot switch is provided for the purpose of informing the 
+ * Note : a boot switch is provided for the purpose of informing the
  * firmware to boot or not boot from attached SCSI devices.  So, I imagine
  * there are fewer people who've yanked the ROM like they do on the Seagate
  * to make bootup faster, and I'll probably use this for autodetection.
@@ -92,19 +92,20 @@
 #define T_DATA_REG_OFFSET	0x1e00	/* rw 512 bytes long */
 
 #ifndef ASM
-static int t128_abort(Scsi_Cmnd *);
+static int t128_abort(struct scsi_cmnd *);
 static int t128_biosparam(struct scsi_device *, struct block_device *,
 			  sector_t, int*);
 static int t128_detect(struct scsi_host_template *);
-static int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-static int t128_bus_reset(Scsi_Cmnd *);
+static int t128_queue_command(struct scsi_cmnd *,
+			      void (*done)(struct scsi_cmnd *));
+static int t128_bus_reset(struct scsi_cmnd *);
 
 #ifndef CMD_PER_LUN
 #define CMD_PER_LUN 2
 #endif
 
 #ifndef CAN_QUEUE
-#define CAN_QUEUE 32 
+#define CAN_QUEUE 32
 #endif
 
 #ifndef HOSTS_C
@@ -120,7 +121,7 @@
 
 #define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
 
-#if !(TDEBUG & TDEBUG_TRANSFER) 
+#if !(TDEBUG & TDEBUG_TRANSFER)
 #define NCR5380_read(reg) readb(T128_address(reg))
 #define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
 #else
@@ -129,7 +130,7 @@
     , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg)))
 
 #define NCR5380_write(reg, value) {					\
-    printk("scsi%d : write %02x to register %d at address %08x\n", 	\
+    printk("scsi%d : write %02x to register %d at address %08x\n",	\
 	    instance->hostno, (value), (reg), T128_address(reg));	\
     writeb((value), (T128_address(reg)));				\
 }
@@ -142,10 +143,10 @@
 #define NCR5380_bus_reset t128_bus_reset
 #define NCR5380_proc_info t128_proc_info
 
-/* 15 14 12 10 7 5 3 
+/* 15 14 12 10 7 5 3
    1101 0100 1010 1000 */
-   
-#define T128_IRQS 0xc4a8 
+
+#define T128_IRQS 0xc4a8
 
 #endif /* else def HOSTS_C */
 #endif /* ndef ASM */
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/serial/8250_exar_st16c554.c
new file mode 100644
index 0000000..567143a
--- /dev/null
+++ b/drivers/serial/8250_exar_st16c554.c
@@ -0,0 +1,52 @@
+/*
+ *  linux/drivers/serial/8250_exar.c
+ *
+ *  Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com >
+ *  Based on 8250_boca.
+ *
+ *  Copyright (C) 2005 Russell King.
+ *  Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(_base,_irq)				\
+	{						\
+		.iobase		= _base,		\
+		.irq		= _irq,			\
+		.uartclk	= 1843200,		\
+		.iotype		= UPIO_PORT,		\
+		.flags		= UPF_BOOT_AUTOCONF,	\
+	}
+
+static struct plat_serial8250_port exar_data[] = {
+	PORT(0x100, 5),
+	PORT(0x108, 5),
+	PORT(0x110, 5),
+	PORT(0x118, 5),
+	{ },
+};
+
+static struct platform_device exar_device = {
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_EXAR_ST16C554,
+	.dev			= {
+		.platform_data	= exar_data,
+	},
+};
+
+static int __init exar_init(void)
+{
+	return platform_device_register(&exar_device);
+}
+
+module_init(exar_init);
+
+MODULE_AUTHOR("Paul B Schroeder");
+MODULE_DESCRIPTION("8250 serial probe module for Exar cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 71d907c..d3d6b82 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -464,11 +464,38 @@
 		serial8250_unregister_port(line - 1);
 }
 
+#ifdef CONFIG_PM
+static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state)
+{
+	long line = (long)pnp_get_drvdata(dev);
+
+	if (!line)
+		return -ENODEV;
+	serial8250_suspend_port(line - 1);
+	return 0;
+}
+
+static int serial_pnp_resume(struct pnp_dev *dev)
+{
+	long line = (long)pnp_get_drvdata(dev);
+
+	if (!line)
+		return -ENODEV;
+	serial8250_resume_port(line - 1);
+	return 0;
+}
+#else
+#define serial_pnp_suspend NULL
+#define serial_pnp_resume NULL
+#endif /* CONFIG_PM */
+
 static struct pnp_driver serial_pnp_driver = {
 	.name		= "serial",
-	.id_table	= pnp_dev_table,
 	.probe		= serial_pnp_probe,
 	.remove		= __devexit_p(serial_pnp_remove),
+	.suspend	= serial_pnp_suspend,
+	.resume		= serial_pnp_resume,
+	.id_table	= pnp_dev_table,
 };
 
 static int __init serial8250_pnp_init(void)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 0b71e7d..fc12d5d 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -210,6 +210,17 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called 8250_boca.
 
+config SERIAL_8250_EXAR_ST16C554
+	tristate "Support Exar ST16C554/554D Quad UART"
+	depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+	help
+	  The Uplogix Envoy TU301 uses this Exar Quad UART.  If you are
+	  tinkering with your Envoy TU301, or have a machine with this UART,
+	  say Y here.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called 8250_exar_st16c554.
+
 config SERIAL_8250_HUB6
 	tristate "Support Hub6 cards"
 	depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
@@ -511,6 +522,25 @@
 	  your boot loader (lilo or loadlin) about how to pass options to the
 	  kernel at boot time.)
 
+config SERIAL_UARTLITE
+	tristate "Xilinx uartlite serial port support"
+	depends on PPC32
+	select SERIAL_CORE
+	help
+	  Say Y here if you want to use the Xilinx uartlite serial controller.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called uartlite.ko.
+
+config SERIAL_UARTLITE_CONSOLE
+	bool "Support for console on Xilinx uartlite serial port"
+	depends on SERIAL_UARTLITE=y
+	select SERIAL_CORE_CONSOLE
+	help
+	  Say Y here if you wish to use a Xilinx uartlite as the system
+	  console (the system console is the device which receives all kernel
+	  messages and warnings and which allows logins in single user mode).
+
 config SERIAL_SUNCORE
 	bool
 	depends on SPARC
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index b4d8a7c..df3632c 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
 obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
 obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
+obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
 obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
 obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
 obj-$(CONFIG_SERIAL_8250_AU1X00) += 8250_au1x00.o
@@ -55,4 +56,5 @@
 obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
 obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
 obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
+obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 4213fab..4d3626e 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -129,6 +129,8 @@
 		 */
 		rsr = readb(port->membase + UART01x_RSR) | UART_DUMMY_RSR_RX;
 		if (unlikely(rsr & UART01x_RSR_ANY)) {
+			writel(0, port->membase + UART01x_ECR);
+
 			if (rsr & UART01x_RSR_BE) {
 				rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE);
 				port->icount.brk++;
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index b691d3e..787a8f1 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -282,7 +282,7 @@
 }
 
 /* Setup any dynamic params in the uart desc */
-int cpm_uart_init_portdesc(void)
+int __init cpm_uart_init_portdesc(void)
 {
 #if defined(CONFIG_SERIAL_CPM_SMC1) || defined(CONFIG_SERIAL_CPM_SMC2)
 	u32 addr;
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index 53662b3..af1544f 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -1,11 +1,13 @@
 /*
- * dz.c: Serial port driver for DECStations equiped
+ * dz.c: Serial port driver for DECstations equipped
  *       with the DZ chipset.
  *
  * Copyright (C) 1998 Olivier A. D. Lebaillif
  *
  * Email: olivier.lebaillif@ifrsys.com
  *
+ * Copyright (C) 2004, 2006  Maciej W. Rozycki
+ *
  * [31-AUG-98] triemer
  * Changed IRQ to use Harald's dec internals interrupts.h
  * removed base_addr code - moving address assignment to setup.c
@@ -26,10 +28,16 @@
 
 #undef DEBUG_DZ
 
+#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/console.h>
+#include <linux/sysrq.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
@@ -45,14 +53,10 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
-#define CONSOLE_LINE (3)	/* for definition of struct console */
-
 #include "dz.h"
 
-#define DZ_INTR_DEBUG 1
-
 static char *dz_name = "DECstation DZ serial driver version ";
-static char *dz_version = "1.02";
+static char *dz_version = "1.03";
 
 struct dz_port {
 	struct uart_port	port;
@@ -61,22 +65,6 @@
 
 static struct dz_port dz_ports[DZ_NB_PORT];
 
-#ifdef DEBUG_DZ
-/*
- * debugging code to send out chars via prom
- */
-static void debug_console(const char *s, int count)
-{
-	unsigned i;
-
-	for (i = 0; i < count; i++) {
-		if (*s == 10)
-			prom_printf("%c", 13);
-		prom_printf("%c", *s++);
-	}
-}
-#endif
-
 /*
  * ------------------------------------------------------------
  * dz_in () and dz_out ()
@@ -90,6 +78,7 @@
 {
 	volatile unsigned short *addr =
 		(volatile unsigned short *) (dport->port.membase + offset);
+
 	return *addr;
 }
 
@@ -98,6 +87,7 @@
 {
 	volatile unsigned short *addr =
 		(volatile unsigned short *) (dport->port.membase + offset);
+
 	*addr = value;
 }
 
@@ -144,7 +134,7 @@
 
 	spin_lock_irqsave(&dport->port.lock, flags);
 	dport->cflag &= ~DZ_CREAD;
-	dz_out(dport, DZ_LPR, dport->cflag);
+	dz_out(dport, DZ_LPR, dport->cflag | dport->port.line);
 	spin_unlock_irqrestore(&dport->port.lock, flags);
 }
 
@@ -155,14 +145,14 @@
 
 /*
  * ------------------------------------------------------------
- * Here starts the interrupt handling routines.  All of the
- * following subroutines are declared as inline and are folded
- * into dz_interrupt.  They were separated out for readability's
- * sake.
  *
- * Note: rs_interrupt() is a "fast" interrupt, which means that it
+ * Here start the interrupt handling routines.  All of the following
+ * subroutines are declared as inline and are folded into
+ * dz_interrupt.  They were separated out for readability's sake.
+ *
+ * Note: dz_interrupt() is a "fast" interrupt, which means that it
  * runs with interrupts turned off.  People who may want to modify
- * rs_interrupt() should try to keep the interrupt handler as fast as
+ * dz_interrupt() should try to keep the interrupt handler as fast as
  * possible.  After you are done making modifications, it is not a bad
  * idea to do:
  *
@@ -180,92 +170,74 @@
  * This routine deals with inputs from any lines.
  * ------------------------------------------------------------
  */
-static inline void dz_receive_chars(struct dz_port *dport)
+static inline void dz_receive_chars(struct dz_port *dport_in,
+				    struct pt_regs *regs)
 {
+	struct dz_port *dport;
 	struct tty_struct *tty = NULL;
 	struct uart_icount *icount;
-	int ignore = 0;
-	unsigned short status, tmp;
+	int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
+	unsigned short status;
 	unsigned char ch, flag;
+	int i;
 
-	/* this code is going to be a problem...
-	   the call to tty_flip_buffer is going to need
-	   to be rethought...
-	 */
-	do {
-		status = dz_in(dport, DZ_RBUF);
+	while ((status = dz_in(dport_in, DZ_RBUF)) & DZ_DVAL) {
+		dport = &dz_ports[LINE(status)];
+		tty = dport->port.info->tty;	/* point to the proper dev */
 
-		/* punt so we don't get duplicate characters */
-		if (!(status & DZ_DVAL))
-			goto ignore_char;
+		ch = UCHAR(status);		/* grab the char */
 
-
-		ch = UCHAR(status);	/* grab the char */
-		flag = TTY_NORMAL;
-
-#if 0
-		if (info->is_console) {
-			if (ch == 0)
-				return;		/* it's a break ... */
-		}
-#endif
-
-		tty = dport->port.info->tty;/* now tty points to the proper dev */
 		icount = &dport->port.icount;
-
-		if (!tty)
-			break;
-
 		icount->rx++;
 
-		/* keep track of the statistics */
-		if (status & (DZ_OERR | DZ_FERR | DZ_PERR)) {
-			if (status & DZ_PERR)	/* parity error */
-				icount->parity++;
-			else if (status & DZ_FERR)	/* frame error */
-				icount->frame++;
-			if (status & DZ_OERR)	/* overrun error */
-				icount->overrun++;
-
-			/*  check to see if we should ignore the character
-			   and mask off conditions that should be ignored
+		flag = TTY_NORMAL;
+		if (status & DZ_FERR) {		/* frame error */
+			/*
+			 * There is no separate BREAK status bit, so
+			 * treat framing errors as BREAKs for Magic SysRq
+			 * and SAK; normally, otherwise.
 			 */
-
-			if (status & dport->port.ignore_status_mask) {
-				if (++ignore > 100)
-					break;
-				goto ignore_char;
-			}
-			/* mask off the error conditions we want to ignore */
-			tmp = status & dport->port.read_status_mask;
-
-			if (tmp & DZ_PERR) {
-				flag = TTY_PARITY;
-#ifdef DEBUG_DZ
-				debug_console("PERR\n", 5);
-#endif
-			} else if (tmp & DZ_FERR) {
+			if (uart_handle_break(&dport->port))
+				continue;
+			if (dport->port.flags & UPF_SAK)
+				flag = TTY_BREAK;
+			else
 				flag = TTY_FRAME;
-#ifdef DEBUG_DZ
-				debug_console("FERR\n", 5);
-#endif
-			}
-			if (tmp & DZ_OERR) {
-#ifdef DEBUG_DZ
-				debug_console("OERR\n", 5);
-#endif
-				tty_insert_flip_char(tty, ch, flag);
-				ch = 0;
-				flag = TTY_OVERRUN;
-			}
-		}
-		tty_insert_flip_char(tty, ch, flag);
-	      ignore_char:
-			;
-	} while (status & DZ_DVAL);
+		} else if (status & DZ_OERR)	/* overrun error */
+			flag = TTY_OVERRUN;
+		else if (status & DZ_PERR)	/* parity error */
+			flag = TTY_PARITY;
 
-	if (tty)
-		tty_flip_buffer_push(tty);
+		/* keep track of the statistics */
+		switch (flag) {
+		case TTY_FRAME:
+			icount->frame++;
+			break;
+		case TTY_PARITY:
+			icount->parity++;
+			break;
+		case TTY_OVERRUN:
+			icount->overrun++;
+			break;
+		case TTY_BREAK:
+			icount->brk++;
+			break;
+		default:
+			break;
+		}
+
+		if (uart_handle_sysrq_char(&dport->port, ch, regs))
+			continue;
+
+		if ((status & dport->port.ignore_status_mask) == 0) {
+			uart_insert_char(&dport->port,
+					 status, DZ_OERR, ch, flag);
+			lines_rx[LINE(status)] = 1;
+		}
+	}
+	for (i = 0; i < DZ_NB_PORT; i++)
+		if (lines_rx[i])
+			tty_flip_buffer_push(dz_ports[i].port.info->tty);
 }
 
 /*
@@ -275,26 +247,32 @@
  * This routine deals with outputs to any lines.
  * ------------------------------------------------------------
  */
-static inline void dz_transmit_chars(struct dz_port *dport)
+static inline void dz_transmit_chars(struct dz_port *dport_in)
 {
-	struct circ_buf *xmit = &dport->port.info->xmit;
+	struct dz_port *dport;
+	struct circ_buf *xmit;
+	unsigned short status;
 	unsigned char tmp;
 
-	if (dport->port.x_char) {	/* XON/XOFF chars */
+	status = dz_in(dport_in, DZ_CSR);
+	dport = &dz_ports[LINE(status)];
+	xmit = &dport->port.info->xmit;
+
+	if (dport->port.x_char) {		/* XON/XOFF chars */
 		dz_out(dport, DZ_TDR, dport->port.x_char);
 		dport->port.icount.tx++;
 		dport->port.x_char = 0;
 		return;
 	}
-	/* if nothing to do or stopped or hardware stopped */
+	/* If nothing to do or stopped or hardware stopped. */
 	if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
 		dz_stop_tx(&dport->port);
 		return;
 	}
 
 	/*
-	 * if something to do ... (rember the dz has no output fifo so we go
-	 * one char at a time :-<
+	 * If something to do... (remember the dz has no output fifo,
+	 * so we go one char at a time) :-<
 	 */
 	tmp = xmit->buf[xmit->tail];
 	xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1);
@@ -304,23 +282,29 @@
 	if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS)
 		uart_write_wakeup(&dport->port);
 
-	/* Are we done */
+	/* Are we are done. */
 	if (uart_circ_empty(xmit))
 		dz_stop_tx(&dport->port);
 }
 
 /*
  * ------------------------------------------------------------
- * check_modem_status ()
+ * check_modem_status()
  *
- * Only valid for the MODEM line duh !
+ * DS 3100 & 5100: Only valid for the MODEM line, duh!
+ * DS 5000/200: Valid for the MODEM and PRINTER line.
  * ------------------------------------------------------------
  */
 static inline void check_modem_status(struct dz_port *dport)
 {
+	/*
+	 * FIXME:
+	 * 1. No status change interrupt; use a timer.
+	 * 2. Handle the 3100/5000 as appropriate. --macro
+	 */
 	unsigned short status;
 
-	/* if not ne modem line just return */
+	/* If not the modem line just return.  */
 	if (dport->port.line != DZ_MODEM)
 		return;
 
@@ -341,21 +325,18 @@
  */
 static irqreturn_t dz_interrupt(int irq, void *dev)
 {
-	struct dz_port *dport;
+	struct dz_port *dport = (struct dz_port *)dev;
 	unsigned short status;
 
 	/* get the reason why we just got an irq */
-	status = dz_in((struct dz_port *)dev, DZ_CSR);
-	dport = &dz_ports[LINE(status)];
+	status = dz_in(dport, DZ_CSR);
 
-	if (status & DZ_RDONE)
-		dz_receive_chars(dport);
+	if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
+		dz_receive_chars(dport, regs);
 
-	if (status & DZ_TRDY)
+	if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
 		dz_transmit_chars(dport);
 
-	/* FIXME: what about check modem status??? --rmk */
-
 	return IRQ_HANDLED;
 }
 
@@ -367,13 +348,13 @@
 
 static unsigned int dz_get_mctrl(struct uart_port *uport)
 {
+	/*
+	 * FIXME: Handle the 3100/5000 as appropriate. --macro
+	 */
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
 
 	if (dport->port.line == DZ_MODEM) {
-		/*
-		 * CHECKME: This is a guess from the other code... --rmk
-		 */
 		if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR)
 			mctrl &= ~TIOCM_DSR;
 	}
@@ -383,6 +364,9 @@
 
 static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
 {
+	/*
+	 * FIXME: Handle the 3100/5000 as appropriate. --macro
+	 */
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned short tmp;
 
@@ -409,13 +393,6 @@
 	unsigned long flags;
 	unsigned short tmp;
 
-	/* The dz lines for the mouse/keyboard must be
-	 * opened using their respective drivers.
-	 */
-	if ((dport->port.line == DZ_KEYBOARD) ||
-	    (dport->port.line == DZ_MOUSE))
-		return -ENODEV;
-
 	spin_lock_irqsave(&dport->port.lock, flags);
 
 	/* enable the interrupt and the scanning */
@@ -442,7 +419,8 @@
 }
 
 /*
- * get_lsr_info - get line status register info
+ * -------------------------------------------------------------------
+ * dz_tx_empty() -- get the transmitter empty status
  *
  * Purpose: Let user call ioctl() to get info when the UART physically
  *          is emptied.  On bus types like RS485, the transmitter must
@@ -450,21 +428,28 @@
  *          the transmit shift register is empty, not be done when the
  *          transmit holding register is empty.  This functionality
  *          allows an RS485 driver to be written in user space.
+ * -------------------------------------------------------------------
  */
 static unsigned int dz_tx_empty(struct uart_port *uport)
 {
 	struct dz_port *dport = (struct dz_port *)uport;
-	unsigned short status = dz_in(dport, DZ_LPR);
+	unsigned short tmp, mask = 1 << dport->port.line;
 
-	/* FIXME: this appears to be obviously broken --rmk. */
-	return status ? TIOCSER_TEMT : 0;
+	tmp = dz_in(dport, DZ_TCR);
+	tmp &= mask;
+
+	return tmp ? 0 : TIOCSER_TEMT;
 }
 
 static void dz_break_ctl(struct uart_port *uport, int break_state)
 {
+	/*
+	 * FIXME: Can't access BREAK bits in TDR easily;
+	 * reuse the code for polled TX. --macro
+	 */
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned long flags;
-	unsigned short tmp, mask = 1 << uport->line;
+	unsigned short tmp, mask = 1 << dport->port.line;
 
 	spin_lock_irqsave(&uport->lock, flags);
 	tmp = dz_in(dport, DZ_TCR);
@@ -561,7 +546,7 @@
 
 	spin_lock_irqsave(&dport->port.lock, flags);
 
-	dz_out(dport, DZ_LPR, cflag);
+	dz_out(dport, DZ_LPR, cflag | dport->port.line);
 	dport->cflag = cflag;
 
 	/* setup accept flag */
@@ -650,7 +635,7 @@
 	for (i = 0, dport = dz_ports; i < DZ_NB_PORT; i++, dport++) {
 		spin_lock_init(&dport->port.lock);
 		dport->port.membase	= (char *) base;
-		dport->port.iotype	= UPIO_PORT;
+		dport->port.iotype	= UPIO_MEM;
 		dport->port.irq		= dec_interrupt[DEC_IRQ_DZ11];
 		dport->port.line	= i;
 		dport->port.fifosize	= 1;
@@ -662,10 +647,7 @@
 static void dz_reset(struct dz_port *dport)
 {
 	dz_out(dport, DZ_CSR, DZ_CLR);
-
 	while (dz_in(dport, DZ_CSR) & DZ_CLR);
-		/* FIXME: cpu_relax? */
-
 	iob();
 
 	/* enable scanning */
@@ -673,26 +655,55 @@
 }
 
 #ifdef CONFIG_SERIAL_DZ_CONSOLE
+/*
+ * -------------------------------------------------------------------
+ * dz_console_putchar() -- transmit a character
+ *
+ * Polled transmission.  This is tricky.  We need to mask transmit
+ * interrupts so that they do not interfere, enable the transmitter
+ * for the line requested and then wait till the transmit scanner
+ * requests data for this line.  But it may request data for another
+ * line first, in which case we have to disable its transmitter and
+ * repeat waiting till our line pops up.  Only then the character may
+ * be transmitted.  Finally, the state of the transmitter mask is
+ * restored.  Welcome to the world of PDP-11!
+ * -------------------------------------------------------------------
+ */
 static void dz_console_putchar(struct uart_port *uport, int ch)
 {
 	struct dz_port *dport = (struct dz_port *)uport;
 	unsigned long flags;
-	int loops = 2500;
-	unsigned short tmp = (unsigned char)ch;
-	/* this code sends stuff out to serial device - spinning its
-	   wheels and waiting. */
+	unsigned short csr, tcr, trdy, mask;
+	int loops = 10000;
 
 	spin_lock_irqsave(&dport->port.lock, flags);
-
-	/* spin our wheels */
-	while (((dz_in(dport, DZ_CSR) & DZ_TRDY) != DZ_TRDY) && loops--)
-		/* FIXME: cpu_relax, udelay? --rmk */
-		;
-
-	/* Actually transmit the character. */
-	dz_out(dport, DZ_TDR, tmp);
-
+	csr = dz_in(dport, DZ_CSR);
+	dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
+	tcr = dz_in(dport, DZ_TCR);
+	tcr |= 1 << dport->port.line;
+	mask = tcr;
+	dz_out(dport, DZ_TCR, mask);
+	iob();
 	spin_unlock_irqrestore(&dport->port.lock, flags);
+
+	while (loops--) {
+		trdy = dz_in(dport, DZ_CSR);
+		if (!(trdy & DZ_TRDY))
+			continue;
+		trdy = (trdy & DZ_TLINE) >> 8;
+		if (trdy == dport->port.line)
+			break;
+		mask &= ~(1 << trdy);
+		dz_out(dport, DZ_TCR, mask);
+		iob();
+		udelay(2);
+	}
+
+	if (loops)				/* Cannot send otherwise. */
+		dz_out(dport, DZ_TDR, ch);
+
+	dz_out(dport, DZ_TCR, tcr);
+	dz_out(dport, DZ_CSR, csr);
 }
 
 /*
@@ -703,11 +714,11 @@
  * The console must be locked when we get here.
  * -------------------------------------------------------------------
  */
-static void dz_console_print(struct console *cons,
+static void dz_console_print(struct console *co,
 			     const char *str,
 			     unsigned int count)
 {
-	struct dz_port *dport = &dz_ports[CONSOLE_LINE];
+	struct dz_port *dport = &dz_ports[co->index];
 #ifdef DEBUG_DZ
 	prom_printf((char *) str);
 #endif
@@ -716,49 +727,43 @@
 
 static int __init dz_console_setup(struct console *co, char *options)
 {
-	struct dz_port *dport = &dz_ports[CONSOLE_LINE];
+	struct dz_port *dport = &dz_ports[co->index];
 	int baud = 9600;
 	int bits = 8;
 	int parity = 'n';
 	int flow = 'n';
-	int ret;
-	unsigned short mask, tmp;
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 
 	dz_reset(dport);
 
-	ret = uart_set_options(&dport->port, co, baud, parity, bits, flow);
-	if (ret == 0) {
-		mask = 1 << dport->port.line;
-		tmp = dz_in(dport, DZ_TCR);	/* read the TX flag */
-		if (!(tmp & mask)) {
-			tmp |= mask;		/* set the TX flag */
-			dz_out(dport, DZ_TCR, tmp);
-		}
-	}
-
-	return ret;
+	return uart_set_options(&dport->port, co, baud, parity, bits, flow);
 }
 
-static struct console dz_sercons =
-{
+static struct uart_driver dz_reg;
+static struct console dz_sercons = {
 	.name	= "ttyS",
 	.write	= dz_console_print,
 	.device	= uart_console_device,
 	.setup	= dz_console_setup,
-	.flags	= CON_CONSDEV | CON_PRINTBUFFER,
-	.index	= CONSOLE_LINE,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1,
+	.data	= &dz_reg,
 };
 
-void __init dz_serial_console_init(void)
+static int __init dz_serial_console_init(void)
 {
-	dz_init_ports();
-
-	register_console(&dz_sercons);
+	if (!IOASIC) {
+		dz_init_ports();
+		register_console(&dz_sercons);
+		return 0;
+	} else
+		return -ENXIO;
 }
 
+console_initcall(dz_serial_console_init);
+
 #define SERIAL_DZ_CONSOLE	&dz_sercons
 #else
 #define SERIAL_DZ_CONSOLE	NULL
@@ -767,35 +772,29 @@
 static struct uart_driver dz_reg = {
 	.owner			= THIS_MODULE,
 	.driver_name		= "serial",
-	.dev_name		= "ttyS%d",
+	.dev_name		= "ttyS",
 	.major			= TTY_MAJOR,
 	.minor			= 64,
 	.nr			= DZ_NB_PORT,
 	.cons			= SERIAL_DZ_CONSOLE,
 };
 
-int __init dz_init(void)
+static int __init dz_init(void)
 {
-	unsigned long flags;
 	int ret, i;
 
+	if (IOASIC)
+		return -ENXIO;
+
 	printk("%s%s\n", dz_name, dz_version);
 
 	dz_init_ports();
 
-	save_flags(flags);
-	cli();
-
 #ifndef CONFIG_SERIAL_DZ_CONSOLE
 	/* reset the chip */
 	dz_reset(&dz_ports[0]);
 #endif
 
-	/* order matters here... the trick is that flags
-	   is updated... in request_irq - to immediatedly obliterate
-	   it is unwise. */
-	restore_flags(flags);
-
 	if (request_irq(dz_ports[0].port.irq, dz_interrupt,
 			IRQF_DISABLED, "DZ", &dz_ports[0]))
 		panic("Unable to register DZ interrupt");
@@ -810,5 +809,7 @@
 	return ret;
 }
 
+module_init(dz_init);
+
 MODULE_DESCRIPTION("DECstation DZ serial driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/serial/dz.h b/drivers/serial/dz.h
index 86ef417..9674d4e 100644
--- a/drivers/serial/dz.h
+++ b/drivers/serial/dz.h
@@ -1,20 +1,22 @@
 /*
- * dz.h: Serial port driver for DECStations equiped 
+ * dz.h: Serial port driver for DECstations equipped
  *       with the DZ chipset.
  *
  * Copyright (C) 1998 Olivier A. D. Lebaillif 
  *             
  * Email: olivier.lebaillif@ifrsys.com
  *
+ * Copyright (C) 2004, 2006  Maciej W. Rozycki
  */
 #ifndef DZ_SERIAL_H
 #define DZ_SERIAL_H
 
 /*
- * Definitions for the Control and Status Received.
+ * Definitions for the Control and Status Register.
  */
 #define DZ_TRDY        0x8000                 /* Transmitter empty */
-#define DZ_TIE         0x4000                 /* Transmitter Interrupt Enable */
+#define DZ_TIE         0x4000                 /* Transmitter Interrupt Enbl */
+#define DZ_TLINE       0x0300                 /* Transmitter Line Number */
 #define DZ_RDONE       0x0080                 /* Receiver data ready */
 #define DZ_RIE         0x0040                 /* Receive Interrupt Enable */
 #define DZ_MSE         0x0020                 /* Master Scan Enable */
@@ -22,32 +24,44 @@
 #define DZ_MAINT       0x0008                 /* Loop Back Mode */
 
 /*
- * Definitions for the Received buffer. 
+ * Definitions for the Receiver Buffer Register.
  */
-#define DZ_RBUF_MASK   0x00FF                 /* Data Mask in the Receive Buffer */
-#define DZ_LINE_MASK   0x0300                 /* Line Mask in the Receive Buffer */
+#define DZ_RBUF_MASK   0x00FF                 /* Data Mask */
+#define DZ_LINE_MASK   0x0300                 /* Line Mask */
 #define DZ_DVAL        0x8000                 /* Valid Data indicator */
 #define DZ_OERR        0x4000                 /* Overrun error indicator */
 #define DZ_FERR        0x2000                 /* Frame error indicator */
 #define DZ_PERR        0x1000                 /* Parity error indicator */
 
-#define LINE(x) (x & DZ_LINE_MASK) >> 8       /* Get the line number from the input buffer */
-#define UCHAR(x) (unsigned char)(x & DZ_RBUF_MASK)
+#define LINE(x) ((x & DZ_LINE_MASK) >> 8)     /* Get the line number
+                                                 from the input buffer */
+#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK))
 
 /*
- * Definitions for the Transmit Register.
+ * Definitions for the Transmit Control Register.
  */
 #define DZ_LINE_KEYBOARD 0x0001
 #define DZ_LINE_MOUSE    0x0002
 #define DZ_LINE_MODEM    0x0004
 #define DZ_LINE_PRINTER  0x0008
 
+#define DZ_MODEM_RTS     0x0800               /* RTS for the modem line (2) */
 #define DZ_MODEM_DTR     0x0400               /* DTR for the modem line (2) */
+#define DZ_PRINT_RTS     0x0200               /* RTS for the prntr line (3) */
+#define DZ_PRINT_DTR     0x0100               /* DTR for the prntr line (3) */
+#define DZ_LNENB         0x000f               /* Transmitter Line Enable */
 
 /*
  * Definitions for the Modem Status Register.
  */
+#define DZ_MODEM_RI      0x0800               /* RI for the modem line (2) */
+#define DZ_MODEM_CD      0x0400               /* CD for the modem line (2) */
 #define DZ_MODEM_DSR     0x0200               /* DSR for the modem line (2) */
+#define DZ_MODEM_CTS     0x0100               /* CTS for the modem line (2) */
+#define DZ_PRINT_RI      0x0008               /* RI for the printer line (3) */
+#define DZ_PRINT_CD      0x0004               /* CD for the printer line (3) */
+#define DZ_PRINT_DSR     0x0002               /* DSR for the prntr line (3) */
+#define DZ_PRINT_CTS     0x0001               /* CTS for the prntr line (3) */
 
 /*
  * Definitions for the Transmit Data Register.
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index aee1b31..3db206d 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -60,7 +60,8 @@
 #if defined(CONFIG_HW_FEITH)
 #define	CONSOLE_BAUD_RATE	38400
 #define	DEFAULT_CBAUD		B38400
-#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || defined(CONFIG_M5329EVB)
+#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || \
+      defined(CONFIG_M5329EVB) || defined(CONFIG_GILBARCO)
 #define CONSOLE_BAUD_RATE 	115200
 #define DEFAULT_CBAUD		B115200
 #elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \
@@ -109,12 +110,30 @@
 		.irq = IRQBASE,
 		.flags = ASYNC_BOOT_AUTOCONF,
 	},
+#ifdef MCFUART_BASE2
 	{  /* ttyS1 */
 		.magic = 0,
 		.addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2),
 		.irq = IRQBASE+1,
 		.flags = ASYNC_BOOT_AUTOCONF,
 	},
+#endif
+#ifdef MCFUART_BASE3
+	{  /* ttyS2 */
+		.magic = 0,
+		.addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE3),
+		.irq = IRQBASE+2,
+		.flags = ASYNC_BOOT_AUTOCONF,
+	},
+#endif
+#ifdef MCFUART_BASE4
+	{  /* ttyS3 */
+		.magic = 0,
+		.addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE4),
+		.irq = IRQBASE+3,
+		.flags = ASYNC_BOOT_AUTOCONF,
+	},
+#endif
 };
 
 
@@ -1516,6 +1535,22 @@
 	imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 +
 		MCFINTC_IMRL);
 	*imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1);
+#if defined(CONFIG_M527x)
+	{
+		/*
+		 * External Pin Mask Setting & Enable External Pin for Interface
+		 * mrcbis@aliceposta.it
+        	 */
+		unsigned short *serpin_enable_mask;
+		serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART);
+		if (info->line == 0)
+			*serpin_enable_mask |= UART0_ENABLE_MASK;
+		else if (info->line == 1)
+			*serpin_enable_mask |= UART1_ENABLE_MASK;
+		else if (info->line == 2)
+			*serpin_enable_mask |= UART2_ENABLE_MASK;
+	}
+#endif
 #elif defined(CONFIG_M520x)
 	volatile unsigned char *icrp, *uartp;
 	volatile unsigned long *imrp;
@@ -1713,7 +1748,7 @@
 	/* Initialize the tty_driver structure */
 	mcfrs_serial_driver->owner = THIS_MODULE;
 	mcfrs_serial_driver->name = "ttyS";
-	mcfrs_serial_driver->driver_name = "serial";
+	mcfrs_serial_driver->driver_name = "mcfserial";
 	mcfrs_serial_driver->major = TTY_MAJOR;
 	mcfrs_serial_driver->minor_start = 64;
 	mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1797,10 +1832,23 @@
 	uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8;
 	uartp[MCFUART_UMR] = MCFUART_MR2_STOP1;
 
+#ifdef	CONFIG_M5272
+{
+	/*
+	 * For the MCF5272, also compute the baudrate fraction.
+	 */
+	int fraction = MCF_BUSCLK - (clk * 32 * mcfrs_console_baud);
+	fraction *= 16;
+	fraction /= (32 * mcfrs_console_baud);
+	uartp[MCFUART_UFPD] = (fraction & 0xf);		/* set fraction */
+	clk = (MCF_BUSCLK / mcfrs_console_baud) / 32;
+}
+#else
 	clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */
+#endif
+
 	uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8;  /* set msb baud */
 	uartp[MCFUART_UBG2] = (clk & 0xff);  /* set lsb baud */
-
 	uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER;
 	uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE;
 
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 4f80c5b..6dd579e 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -1,6 +1,4 @@
 /*
- * drivers/serial/mpc52xx_uart.c
- *
  * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
  *
  * FIXME According to the usermanual the status bits in the status register
@@ -14,18 +12,20 @@
  *
  *
  * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- * 
+ *
  * Some of the code has been inspired/copied from the 2.4 code written
  * by Dale Farnsworth <dfarnsworth@mvista.com>.
- * 
- * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2006 Secret Lab Technologies Ltd.
+ *                    Grant Likely <grant.likely@secretlab.ca>
+ * Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
  * Copyright (C) 2003 MontaVista, Software, Inc.
- * 
+ *
  * This file is licensed under the terms of the GNU General Public License
  * version 2. This program is licensed "as is" without any warranty of any
  * kind, whether express or implied.
  */
- 
+
 /* Platform device Usage :
  *
  * Since PSCs can have multiple function, the correct driver for each one
@@ -44,7 +44,24 @@
  * will be mapped to.
  */
 
-#include <linux/platform_device.h>
+/* OF Platform device Usage :
+ *
+ * This driver is only used for PSCs configured in uart mode.  The device
+ * tree will have a node for each PSC in uart mode w/ device_type = "serial"
+ * and "mpc52xx-psc-uart" in the compatible string
+ *
+ * By default, PSC devices are enumerated in the order they are found.  However
+ * a particular PSC number can be forces by adding 'device_no = <port#>'
+ * to the device node.
+ *
+ * The driver init all necessary registers to place the PSC in uart mode without
+ * DCD. However, the pin multiplexing aren't changed and should be set either
+ * by the bootloader or in the platform init code.
+ */
+
+#undef DEBUG
+
+#include <linux/device.h>
 #include <linux/module.h>
 #include <linux/tty.h>
 #include <linux/serial.h>
@@ -54,6 +71,12 @@
 #include <asm/delay.h>
 #include <asm/io.h>
 
+#if defined(CONFIG_PPC_MERGE)
+#include <asm/of_platform.h>
+#else
+#include <linux/platform_device.h>
+#endif
+
 #include <asm/mpc52xx.h>
 #include <asm/mpc52xx_psc.h>
 
@@ -80,6 +103,12 @@
 	 *        it's cleared, then a memset(...,0,...) should be added to
 	 *        the console_init
 	 */
+#if defined(CONFIG_PPC_MERGE)
+/* lookup table for matching device nodes to index numbers */
+static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
+
+static void mpc52xx_uart_of_enumerate(void);
+#endif
 
 #define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
 
@@ -96,32 +125,40 @@
 #define uart_console(port)	(0)
 #endif
 
+#if defined(CONFIG_PPC_MERGE)
+static struct of_device_id mpc52xx_uart_of_match[] = {
+	{ .type = "serial", .compatible = "mpc52xx-psc-uart", },
+	{ .type = "serial", .compatible = "mpc5200-psc", }, /* Efika only! */
+	{},
+};
+#endif
+
 
 /* ======================================================================== */
 /* UART operations                                                          */
 /* ======================================================================== */
 
-static unsigned int 
+static unsigned int
 mpc52xx_uart_tx_empty(struct uart_port *port)
 {
 	int status = in_be16(&PSC(port)->mpc52xx_psc_status);
 	return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
 }
 
-static void 
+static void
 mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
 	/* Not implemented */
 }
 
-static unsigned int 
+static unsigned int
 mpc52xx_uart_get_mctrl(struct uart_port *port)
 {
 	/* Not implemented */
 	return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
 }
 
-static void 
+static void
 mpc52xx_uart_stop_tx(struct uart_port *port)
 {
 	/* port->lock taken by caller */
@@ -129,7 +166,7 @@
 	out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
 }
 
-static void 
+static void
 mpc52xx_uart_start_tx(struct uart_port *port)
 {
 	/* port->lock taken by caller */
@@ -137,12 +174,12 @@
 	out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
 }
 
-static void 
+static void
 mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
 {
 	unsigned long flags;
 	spin_lock_irqsave(&port->lock, flags);
-	
+
 	port->x_char = ch;
 	if (ch) {
 		/* Make sure tx interrupts are on */
@@ -150,7 +187,7 @@
 		port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
 		out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
 	}
-	
+
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -178,7 +215,7 @@
 		out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK);
 	else
 		out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK);
-	
+
 	spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -197,11 +234,11 @@
 	/* Reset/activate the port, clear and enable interrupts */
 	out_8(&psc->command,MPC52xx_PSC_RST_RX);
 	out_8(&psc->command,MPC52xx_PSC_RST_TX);
-	
+
 	out_be32(&psc->sicr,0);	/* UART mode DCD ignored */
 
 	out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
-	
+
 	out_8(&psc->rfcntl, 0x00);
 	out_be16(&psc->rfalarm, 0x1ff);
 	out_8(&psc->tfcntl, 0x07);
@@ -209,10 +246,10 @@
 
 	port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
 	out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
-	
+
 	out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
 	out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
-		
+
 	return 0;
 }
 
@@ -220,19 +257,19 @@
 mpc52xx_uart_shutdown(struct uart_port *port)
 {
 	struct mpc52xx_psc __iomem *psc = PSC(port);
-	
+
 	/* Shut down the port, interrupt and all */
 	out_8(&psc->command,MPC52xx_PSC_RST_RX);
 	out_8(&psc->command,MPC52xx_PSC_RST_TX);
-	
-	port->read_status_mask = 0; 
+
+	port->read_status_mask = 0;
 	out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
 
 	/* Release interrupt */
 	free_irq(port->irq, port);
 }
 
-static void 
+static void
 mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
                          struct termios *old)
 {
@@ -241,10 +278,10 @@
 	unsigned char mr1, mr2;
 	unsigned short ctr;
 	unsigned int j, baud, quot;
-	
+
 	/* Prepare what we're gonna write */
 	mr1 = 0;
-	
+
 	switch (new->c_cflag & CSIZE) {
 		case CS5:	mr1 |= MPC52xx_PSC_MODE_5_BITS;
 				break;
@@ -261,8 +298,8 @@
 			MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN;
 	} else
 		mr1 |= MPC52xx_PSC_MODE_PARNONE;
-	
-	
+
+
 	mr2 = 0;
 
 	if (new->c_cflag & CSTOPB)
@@ -276,7 +313,7 @@
 	baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
 	quot = uart_get_divisor(port, baud);
 	ctr = quot & 0xffff;
-	
+
 	/* Get the lock */
 	spin_lock_irqsave(&port->lock, flags);
 
@@ -290,14 +327,14 @@
 	 * boot for the console, all stuff is not yet ready to receive at that
 	 * time and that just makes the kernel oops */
 	/* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
-	while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && 
+	while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
 	       --j)
 		udelay(1);
 
 	if (!j)
 		printk(	KERN_ERR "mpc52xx_uart.c: "
 			"Unable to flush RX & TX fifos in-time in set_termios."
-			"Some chars may have been lost.\n" ); 
+			"Some chars may have been lost.\n" );
 
 	/* Reset the TX & RX */
 	out_8(&psc->command,MPC52xx_PSC_RST_RX);
@@ -309,7 +346,7 @@
 	out_8(&psc->mode,mr2);
 	out_8(&psc->ctur,ctr >> 8);
 	out_8(&psc->ctlr,ctr & 0xff);
-	
+
 	/* Reenable TX & RX */
 	out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
 	out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
@@ -332,7 +369,7 @@
 		port->membase = NULL;
 	}
 
-	release_mem_region(port->mapbase, MPC52xx_PSC_SIZE);
+	release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
 }
 
 static int
@@ -341,12 +378,13 @@
 	int err;
 
 	if (port->flags & UPF_IOREMAP) /* Need to remap ? */
-		port->membase = ioremap(port->mapbase, MPC52xx_PSC_SIZE);
+		port->membase = ioremap(port->mapbase,
+		                        sizeof(struct mpc52xx_psc));
 
 	if (!port->membase)
 		return -EINVAL;
 
-	err = request_mem_region(port->mapbase, MPC52xx_PSC_SIZE,
+	err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
 			"mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
 
 	if (err && (port->flags & UPF_IOREMAP)) {
@@ -373,7 +411,7 @@
 
 	if ( (ser->irq != port->irq) ||
 	     (ser->io_type != SERIAL_IO_MEM) ||
-	     (ser->baud_base != port->uartclk)  || 
+	     (ser->baud_base != port->uartclk)  ||
 	     (ser->iomem_base != (void*)port->mapbase) ||
 	     (ser->hub6 != 0 ) )
 		return -EINVAL;
@@ -404,11 +442,11 @@
 	.verify_port	= mpc52xx_uart_verify_port
 };
 
-	
+
 /* ======================================================================== */
 /* Interrupt handling                                                       */
 /* ======================================================================== */
-	
+
 static inline int
 mpc52xx_uart_int_rx_chars(struct uart_port *port)
 {
@@ -435,11 +473,11 @@
 
 		flag = TTY_NORMAL;
 		port->icount.rx++;
-	
+
 		if ( status & (MPC52xx_PSC_SR_PE |
 		               MPC52xx_PSC_SR_FE |
 		               MPC52xx_PSC_SR_RB) ) {
-			
+
 			if (status & MPC52xx_PSC_SR_RB) {
 				flag = TTY_BREAK;
 				uart_handle_break(port);
@@ -464,7 +502,7 @@
 	}
 
 	tty_flip_buffer_push(tty);
-	
+
 	return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY;
 }
 
@@ -509,25 +547,25 @@
 	return 1;
 }
 
-static irqreturn_t 
+static irqreturn_t
 mpc52xx_uart_int(int irq, void *dev_id)
 {
 	struct uart_port *port = dev_id;
 	unsigned long pass = ISR_PASS_LIMIT;
 	unsigned int keepgoing;
 	unsigned short status;
-	
+
 	spin_lock(&port->lock);
-	
+
 	/* While we have stuff to do, we continue */
 	do {
 		/* If we don't find anything to do, we stop */
-		keepgoing = 0; 
-		
+		keepgoing = 0;
+
 		/* Read status */
 		status = in_be16(&PSC(port)->mpc52xx_psc_isr);
 		status &= port->read_status_mask;
-			
+
 		/* Do we need to receive chars ? */
 		/* For this RX interrupts must be on and some chars waiting */
 		if ( status & MPC52xx_PSC_IMR_RXRDY )
@@ -537,15 +575,15 @@
 		/* For this, TX must be ready and TX interrupt enabled */
 		if ( status & MPC52xx_PSC_IMR_TXRDY )
 			keepgoing |= mpc52xx_uart_int_tx_chars(port);
-		
+
 		/* Limit number of iteration */
 		if ( !(--pass) )
 			keepgoing = 0;
 
 	} while (keepgoing);
-	
+
 	spin_unlock(&port->lock);
-	
+
 	return IRQ_HANDLED;
 }
 
@@ -563,13 +601,18 @@
 	struct mpc52xx_psc __iomem *psc = PSC(port);
 	unsigned char mr1;
 
+	pr_debug("mpc52xx_console_get_options(port=%p)\n", port);
+
 	/* Read the mode registers */
 	out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
 	mr1 = in_8(&psc->mode);
-	
+
 	/* CT{U,L}R are write-only ! */
-	*baud = __res.bi_baudrate ?
-		__res.bi_baudrate : CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+	*baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+#if !defined(CONFIG_PPC_MERGE)
+	if (__res.bi_baudrate)
+		*baud = __res.bi_baudrate;
+#endif
 
 	/* Parse them */
 	switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
@@ -579,26 +622,26 @@
 		case MPC52xx_PSC_MODE_8_BITS:
 		default:			*bits = 8;
 	}
-	
+
 	if (mr1 & MPC52xx_PSC_MODE_PARNONE)
 		*parity = 'n';
 	else
 		*parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e';
 }
 
-static void  
+static void
 mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
 {
 	struct uart_port *port = &mpc52xx_uart_ports[co->index];
 	struct mpc52xx_psc __iomem *psc = PSC(port);
 	unsigned int i, j;
-	
+
 	/* Disable interrupts */
 	out_be16(&psc->mpc52xx_psc_imr, 0);
 
 	/* Wait the TX buffer to be empty */
-	j = 5000000;	/* Maximum wait */	
-	while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && 
+	j = 5000000;	/* Maximum wait */
+	while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
 	       --j)
 		udelay(1);
 
@@ -607,13 +650,13 @@
 		/* Line return handling */
 		if (*s == '\n')
 			out_8(&psc->mpc52xx_psc_buffer_8, '\r');
-		
+
 		/* Send the char */
 		out_8(&psc->mpc52xx_psc_buffer_8, *s);
 
 		/* Wait the TX buffer to be empty */
-		j = 20000;	/* Maximum wait */	
-		while (!(in_be16(&psc->mpc52xx_psc_status) & 
+		j = 20000;	/* Maximum wait */
+		while (!(in_be16(&psc->mpc52xx_psc_status) &
 		         MPC52xx_PSC_SR_TXEMP) && --j)
 			udelay(1);
 	}
@@ -622,6 +665,7 @@
 	out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
 }
 
+#if !defined(CONFIG_PPC_MERGE)
 static int __init
 mpc52xx_console_setup(struct console *co, char *options)
 {
@@ -634,7 +678,7 @@
 
 	if (co->index < 0 || co->index >= MPC52xx_PSC_MAXNUM)
 		return -EINVAL;
-	
+
 	/* Basic port init. Needed since we use some uart_??? func before
 	 * real init for early access */
 	spin_lock_init(&port->lock);
@@ -656,6 +700,78 @@
 	return uart_set_options(port, co, baud, parity, bits, flow);
 }
 
+#else
+
+static int __init
+mpc52xx_console_setup(struct console *co, char *options)
+{
+	struct uart_port *port = &mpc52xx_uart_ports[co->index];
+	struct device_node *np = mpc52xx_uart_nodes[co->index];
+	unsigned int ipb_freq;
+	struct resource res;
+	int ret;
+
+	int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
+		 co, co->index, options);
+
+	if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
+		pr_debug("PSC%x out of range\n", co->index);
+		return -EINVAL;
+	}
+
+	if (!np) {
+		pr_debug("PSC%x not found in device tree\n", co->index);
+		return -EINVAL;
+	}
+
+	pr_debug("Console on ttyPSC%x is %s\n",
+	         co->index, mpc52xx_uart_nodes[co->index]->full_name);
+
+	/* Fetch register locations */
+	if ((ret = of_address_to_resource(np, 0, &res)) != 0) {
+		pr_debug("Could not get resources for PSC%x\n", co->index);
+		return ret;
+	}
+
+	/* Search for bus-frequency property in this node or a parent */
+	if ((ipb_freq = mpc52xx_find_ipb_freq(np)) == 0) {
+		pr_debug("Could not find IPB bus frequency!\n");
+		return -EINVAL;
+	}
+
+	/* Basic port init. Needed since we use some uart_??? func before
+	 * real init for early access */
+	spin_lock_init(&port->lock);
+	port->uartclk	= ipb_freq / 2;
+	port->ops	= &mpc52xx_uart_ops;
+	port->mapbase = res.start;
+	port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc));
+	port->irq = irq_of_parse_and_map(np, 0);
+
+	if (port->membase == NULL)
+		return -EINVAL;
+
+	pr_debug("mpc52xx-psc uart at %lx, mapped to %p, irq=%x, freq=%i\n",
+	         port->mapbase, port->membase, port->irq, port->uartclk);
+
+	/* Setup the port parameters accoding to options */
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+	else
+		mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
+
+	pr_debug("Setting console parameters: %i %i%c1 flow=%c\n",
+	         baud, bits, parity, flow);
+
+	return uart_set_options(port, co, baud, parity, bits, flow);
+}
+#endif /* defined(CONFIG_PPC_MERGE) */
+
 
 static struct uart_driver mpc52xx_uart_driver;
 
@@ -669,10 +785,11 @@
 	.data	= &mpc52xx_uart_driver,
 };
 
-	
-static int __init 
+
+static int __init
 mpc52xx_console_init(void)
 {
+	mpc52xx_uart_of_enumerate();
 	register_console(&mpc52xx_console);
 	return 0;
 }
@@ -700,6 +817,7 @@
 };
 
 
+#if !defined(CONFIG_PPC_MERGE)
 /* ======================================================================== */
 /* Platform Driver                                                          */
 /* ======================================================================== */
@@ -723,8 +841,6 @@
 	/* Init the port structure */
 	port = &mpc52xx_uart_ports[idx];
 
-	memset(port, 0x00, sizeof(struct uart_port));
-
 	spin_lock_init(&port->lock);
 	port->uartclk	= __res.bi_ipbfreq / 2; /* Look at CTLR doc */
 	port->fifosize	= 512;
@@ -733,6 +849,7 @@
 			  ( uart_console(port) ? 0 : UPF_IOREMAP );
 	port->line	= idx;
 	port->ops	= &mpc52xx_uart_ops;
+	port->dev	= &dev->dev;
 
 	/* Search for IRQ and mapbase */
 	for (i=0 ; i<dev->num_resources ; i++, res++) {
@@ -771,7 +888,7 @@
 {
 	struct uart_port *port = (struct uart_port *) platform_get_drvdata(dev);
 
-	if (sport)
+	if (port)
 		uart_suspend_port(&mpc52xx_uart_driver, port);
 
 	return 0;
@@ -789,6 +906,7 @@
 }
 #endif
 
+
 static struct platform_driver mpc52xx_uart_platform_driver = {
 	.probe		= mpc52xx_uart_probe,
 	.remove		= mpc52xx_uart_remove,
@@ -800,6 +918,184 @@
 		.name	= "mpc52xx-psc",
 	},
 };
+#endif /* !defined(CONFIG_PPC_MERGE) */
+
+
+#if defined(CONFIG_PPC_MERGE)
+/* ======================================================================== */
+/* OF Platform Driver                                                       */
+/* ======================================================================== */
+
+static int __devinit
+mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
+{
+	int idx = -1;
+	unsigned int ipb_freq;
+	struct uart_port *port = NULL;
+	struct resource res;
+	int ret;
+
+	dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p, match=%p)\n", op, match);
+
+	/* Check validity & presence */
+	for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
+		if (mpc52xx_uart_nodes[idx] == op->node)
+			break;
+	if (idx >= MPC52xx_PSC_MAXNUM)
+		return -EINVAL;
+	pr_debug("Found %s assigned to ttyPSC%x\n",
+	         mpc52xx_uart_nodes[idx]->full_name, idx);
+
+	/* Search for bus-frequency property in this node or a parent */
+	if ((ipb_freq = mpc52xx_find_ipb_freq(op->node)) == 0) {
+		dev_dbg(&op->dev, "Could not find IPB bus frequency!\n");
+		return -EINVAL;
+	}
+
+	/* Init the port structure */
+	port = &mpc52xx_uart_ports[idx];
+
+	spin_lock_init(&port->lock);
+	port->uartclk	= ipb_freq / 2;
+	port->fifosize	= 512;
+	port->iotype	= UPIO_MEM;
+	port->flags	= UPF_BOOT_AUTOCONF |
+			  ( uart_console(port) ? 0 : UPF_IOREMAP );
+	port->line	= idx;
+	port->ops	= &mpc52xx_uart_ops;
+	port->dev	= &op->dev;
+
+	/* Search for IRQ and mapbase */
+	if ((ret = of_address_to_resource(op->node, 0, &res)) != 0)
+		return ret;
+
+	port->mapbase = res.start;
+	port->irq = irq_of_parse_and_map(op->node, 0);
+
+	dev_dbg(&op->dev, "mpc52xx-psc uart at %lx, irq=%x, freq=%i\n",
+	        port->mapbase, port->irq, port->uartclk);
+
+	if ((port->irq==NO_IRQ) || !port->mapbase) {
+		printk(KERN_ERR "Could not allocate resources for PSC\n");
+		return -EINVAL;
+	}
+
+	/* Add the port to the uart sub-system */
+	ret = uart_add_one_port(&mpc52xx_uart_driver, port);
+	if (!ret)
+		dev_set_drvdata(&op->dev, (void*)port);
+
+	return ret;
+}
+
+static int
+mpc52xx_uart_of_remove(struct of_device *op)
+{
+	struct uart_port *port = dev_get_drvdata(&op->dev);
+	dev_set_drvdata(&op->dev, NULL);
+
+	if (port)
+		uart_remove_one_port(&mpc52xx_uart_driver, port);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+mpc52xx_uart_of_suspend(struct of_device *op, pm_message_t state)
+{
+	struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+
+	if (port)
+		uart_suspend_port(&mpc52xx_uart_driver, port);
+
+	return 0;
+}
+
+static int
+mpc52xx_uart_of_resume(struct of_device *op)
+{
+	struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+
+	if (port)
+		uart_resume_port(&mpc52xx_uart_driver, port);
+
+	return 0;
+}
+#endif
+
+static void
+mpc52xx_uart_of_assign(struct device_node *np, int idx)
+{
+	int free_idx = -1;
+	int i;
+
+	/* Find the first free node */
+	for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
+		if (mpc52xx_uart_nodes[i] == NULL) {
+			free_idx = i;
+			break;
+		}
+	}
+
+	if ((idx < 0) || (idx >= MPC52xx_PSC_MAXNUM))
+		idx = free_idx;
+
+	if (idx < 0)
+		return; /* No free slot; abort */
+
+	/* If the slot is already occupied, then swap slots */
+	if (mpc52xx_uart_nodes[idx] && (free_idx != -1))
+		mpc52xx_uart_nodes[free_idx] = mpc52xx_uart_nodes[idx];
+	mpc52xx_uart_nodes[i] = np;
+}
+
+static void
+mpc52xx_uart_of_enumerate(void)
+{
+	static int enum_done = 0;
+	struct device_node *np;
+	const unsigned int *devno;
+	int i;
+
+	if (enum_done)
+		return;
+
+	for_each_node_by_type(np, "serial") {
+		if (!of_match_node(mpc52xx_uart_of_match, np))
+			continue;
+
+		/* Is a particular device number requested? */
+		devno = get_property(np, "device_no", NULL);
+		mpc52xx_uart_of_assign(of_node_get(np), devno ? *devno : -1);
+	}
+
+	enum_done = 1;
+
+	for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
+		if (mpc52xx_uart_nodes[i])
+			pr_debug("%s assigned to ttyPSC%x\n",
+			         mpc52xx_uart_nodes[i]->full_name, i);
+	}
+}
+
+MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
+
+static struct of_platform_driver mpc52xx_uart_of_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "mpc52xx-psc-uart",
+	.match_table	= mpc52xx_uart_of_match,
+	.probe		= mpc52xx_uart_of_probe,
+	.remove		= mpc52xx_uart_of_remove,
+#ifdef CONFIG_PM
+	.suspend	= mpc52xx_uart_of_suspend,
+	.resume		= mpc52xx_uart_of_resume,
+#endif
+	.driver		= {
+		.name	= "mpc52xx-psc-uart",
+	},
+};
+#endif /* defined(CONFIG_PPC_MERGE) */
 
 
 /* ======================================================================== */
@@ -811,22 +1107,45 @@
 {
 	int ret;
 
-	printk(KERN_INFO "Serial: MPC52xx PSC driver\n");
+	printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n");
 
-	ret = uart_register_driver(&mpc52xx_uart_driver);
-	if (ret == 0) {
-		ret = platform_driver_register(&mpc52xx_uart_platform_driver);
-		if (ret)
-			uart_unregister_driver(&mpc52xx_uart_driver);
+	if ((ret = uart_register_driver(&mpc52xx_uart_driver)) != 0) {
+		printk(KERN_ERR "%s: uart_register_driver failed (%i)\n",
+		       __FILE__, ret);
+		return ret;
 	}
 
-	return ret;
+#if defined(CONFIG_PPC_MERGE)
+	mpc52xx_uart_of_enumerate();
+
+	ret = of_register_platform_driver(&mpc52xx_uart_of_driver);
+	if (ret) {
+		printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n",
+		       __FILE__, ret);
+		uart_unregister_driver(&mpc52xx_uart_driver);
+		return ret;
+	}
+#else
+	ret = platform_driver_register(&mpc52xx_uart_platform_driver);
+	if (ret) {
+		printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
+		       __FILE__, ret);
+		uart_unregister_driver(&mpc52xx_uart_driver);
+		return ret;
+	}
+#endif
+
+	return 0;
 }
 
 static void __exit
 mpc52xx_uart_exit(void)
 {
+#if defined(CONFIG_PPC_MERGE)
+	of_unregister_platform_driver(&mpc52xx_uart_of_driver);
+#else
 	platform_driver_unregister(&mpc52xx_uart_platform_driver);
+#endif
 	uart_unregister_driver(&mpc52xx_uart_driver);
 }
 
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 8eea69f..29823bd 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -555,7 +555,7 @@
 	if (!mpsc_sdma_tx_active(pi)) {
 		txre = (struct mpsc_tx_desc *)(pi->txr +
 			(pi->txr_tail * MPSC_TXRE_SIZE));
-		dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)txre,
@@ -931,7 +931,7 @@
 	}
 	txre->link = cpu_to_be32(pi->txr_p);	/* Wrap last back to first */
 
-	dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
+	dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
 		DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1005,7 +1005,7 @@
 
 	rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
 
-	dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+	dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 		invalidate_dcache_range((ulong)rxre,
@@ -1029,7 +1029,7 @@
 		}
 
 		bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
-		dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)bp,
@@ -1098,7 +1098,7 @@
 					    SDMA_DESC_CMDSTAT_F |
 					    SDMA_DESC_CMDSTAT_L);
 		wmb();
-		dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
+		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			flush_dcache_range((ulong)rxre,
@@ -1109,7 +1109,7 @@
 		pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
 		rxre = (struct mpsc_rx_desc *)(pi->rxr +
 			(pi->rxr_posn * MPSC_RXRE_SIZE));
-		dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)rxre,
@@ -1143,7 +1143,7 @@
 							   SDMA_DESC_CMDSTAT_EI
 							   : 0));
 	wmb();
-	dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
+	dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 	if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 		flush_dcache_range((ulong)txre,
@@ -1192,7 +1192,7 @@
 		else /* All tx data copied into ring bufs */
 			return;
 
-		dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+		dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			flush_dcache_range((ulong)bp,
@@ -1217,7 +1217,7 @@
 		txre = (struct mpsc_tx_desc *)(pi->txr +
 			(pi->txr_tail * MPSC_TXRE_SIZE));
 
-		dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+		dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			invalidate_dcache_range((ulong)txre,
@@ -1235,7 +1235,7 @@
 
 			txre = (struct mpsc_tx_desc *)(pi->txr +
 				(pi->txr_tail * MPSC_TXRE_SIZE));
-			dma_cache_sync((void *) txre, MPSC_TXRE_SIZE,
+			dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE,
 				DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 			if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1652,7 +1652,7 @@
 			count--;
 		}
 
-		dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+		dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
 		if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
 			flush_dcache_range((ulong)bp,
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 00f9ffd..431433f 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -723,7 +723,7 @@
 	u_char *buf;
 	cisparse_t *parse;
 	cistpl_cftable_entry_t *cf;
-	int i, last_ret, last_fn;
+	int i;
 
 	DEBUG(0, "serial_config(0x%p)\n", link);
 
@@ -740,15 +740,6 @@
 	tuple->TupleOffset = 0;
 	tuple->TupleDataMax = 255;
 	tuple->Attributes = 0;
-	/* Get configuration register information */
-	tuple->DesiredTuple = CISTPL_CONFIG;
-	last_ret = first_tuple(link, tuple, parse);
-	if (last_ret != CS_SUCCESS) {
-		last_fn = ParseTuple;
-		goto cs_failed;
-	}
-	link->conf.ConfigBase = parse->config.base;
-	link->conf.Present = parse->config.rmask[0];
 
 	/* Is this a compliant multifunction card? */
 	tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
@@ -757,27 +748,25 @@
 
 	/* Is this a multiport card? */
 	tuple->DesiredTuple = CISTPL_MANFID;
-	if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
-		info->manfid = parse->manfid.manf;
-		info->prodid = parse->manfid.card;
+	info->manfid = link->manf_id;
+	info->prodid = link->card_id;
 
-		for (i = 0; i < ARRAY_SIZE(quirks); i++)
-			if ((quirks[i].manfid == ~0 ||
-			     quirks[i].manfid == info->manfid) &&
-			    (quirks[i].prodid == ~0 ||
-			     quirks[i].prodid == info->prodid)) {
-				info->quirk = &quirks[i];
-				break;
-			}
-	}
+	for (i = 0; i < ARRAY_SIZE(quirks); i++)
+		if ((quirks[i].manfid == ~0 ||
+		     quirks[i].manfid == info->manfid) &&
+		    (quirks[i].prodid == ~0 ||
+		     quirks[i].prodid == info->prodid)) {
+			info->quirk = &quirks[i];
+			break;
+		}
 
 	/* Another check for dual-serial cards: look for either serial or
 	   multifunction cards that ask for appropriate IO port ranges */
 	tuple->DesiredTuple = CISTPL_FUNCID;
 	if ((info->multi == 0) &&
-	    ((first_tuple(link, tuple, parse) != CS_SUCCESS) ||
-	     (parse->funcid.func == CISTPL_FUNCID_MULTI) ||
-	     (parse->funcid.func == CISTPL_FUNCID_SERIAL))) {
+	    (link->has_func_id) &&
+	    ((link->func_id == CISTPL_FUNCID_MULTI) ||
+	     (link->func_id == CISTPL_FUNCID_SERIAL))) {
 		tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
 		if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
 			if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
@@ -814,8 +803,6 @@
 	kfree(cfg_mem);
 	return 0;
 
- cs_failed:
-	cs_error(link, last_fn, last_ret);
  failed:
 	serial_remove(link);
 	kfree(cfg_mem);
@@ -925,6 +912,30 @@
 	PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "RS-COM-2P.cis"),
 	PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
+	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100  1.00.",0x19ca78af,0xf964f42b),
+	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
+	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232  1.00.",0x19ca78af,0x69fb7490),
+	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232",0x19ca78af,0xb6bc0235),
+	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232",0x63f2e0bd,0xb9e175d3),
+	PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232-5",0x63f2e0bd,0xfce33442),
+	PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232",0x3beb8cf2,0x171e7190),
+	PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232-5",0x3beb8cf2,0x20da4262),
+	PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF428",0x3beb8cf2,0xea5dd57d),
+	PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF500",0x3beb8cf2,0xd77255fa),
+	PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: IC232",0x3beb8cf2,0x6a709903),
+	PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: SL232",0x3beb8cf2,0x18430676),
+	PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: XL232",0x3beb8cf2,0x6f933767),
+	PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
+	PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
+	PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
+	PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+	PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
+	PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
+	PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
+	PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
+	PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+	PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+	PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
 	/* too generic */
 	/* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */
 	/* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index cfcc3ca..3b5f19e 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -775,7 +775,7 @@
 			 *
 			 * Clean this up later..
 			 */
-			clk = clk_get("module_clk");
+			clk = clk_get(NULL, "module_clk");
 			port->uartclk = clk_get_rate(clk) * 16;
 			clk_put(clk);
 		}
@@ -960,7 +960,7 @@
 		default:
 		{
 #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
-			struct clk *clk = clk_get("module_clk");
+			struct clk *clk = clk_get(NULL, "module_clk");
 			t = SCBRR_VALUE(baud, clk_get_rate(clk));
 			clk_put(clk);
 #else
@@ -1128,7 +1128,7 @@
 		 * XXX: We should use a proper SCI/SCIF clock
 		 */
 		{
-			struct clk *clk = clk_get("module_clk");
+			struct clk *clk = clk_get(NULL, "module_clk");
 			sci_ports[i].port.uartclk = clk_get_rate(clk) * 16;
 			clk_put(clk);
 		}
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 7ee9921..e4557cc 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -133,6 +133,20 @@
 # define SCIF_ORER	0x0001		/* Overrun error bit */
 # define SCSCR_INIT(port)	0x3a	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 # define SCIF_ONLY
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
+# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */
+# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */
+# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */
+# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
+# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
+# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001  /* overrun error bit */
+# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
 #else
 # error CPU subtype not defined
 #endif
@@ -365,6 +379,7 @@
 SCIx_FNS(SCxRDR, 0x0a,  8, 0x14,  8, 0x0A,  8, 0x14,  8, 0x05,  8)
 SCIF_FNS(SCFCR,                      0x0c,  8, 0x18, 16)
 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780)
+SCIF_FNS(SCFDR,			     0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCTFDR,		     0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCRFDR,		     0x0e, 16, 0x20, 16)
 SCIF_FNS(SCSPTR,			0,  0, 0x24, 16)
@@ -544,6 +559,28 @@
 	if (port->mapbase == 0xffe10000)
 		return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
 }
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+	if (port->mapbase == 0xfffe8000)
+		return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xfffe8800)
+		return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xfffe9000)
+		return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xfffe9800)
+		return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+	if (port->mapbase == 0xf8400000)
+		return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xf8410000)
+		return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+	if (port->mapbase == 0xf8420000)
+		return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+}
 #endif
 
 /*
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
new file mode 100644
index 0000000..8369065
--- /dev/null
+++ b/drivers/serial/uartlite.c
@@ -0,0 +1,505 @@
+/*
+ * uartlite.c: Serial driver for Xilinx uartlite serial controller
+ *
+ * Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+#define ULITE_MAJOR		204
+#define ULITE_MINOR		187
+#define ULITE_NR_UARTS		4
+
+/* For register details see datasheet:
+   http://www.xilinx.com/bvdocs/ipcenter/data_sheet/opb_uartlite.pdf
+*/
+#define ULITE_RX		0x00
+#define ULITE_TX		0x04
+#define ULITE_STATUS		0x08
+#define ULITE_CONTROL		0x0c
+
+#define ULITE_REGION		16
+
+#define ULITE_STATUS_RXVALID	0x01
+#define ULITE_STATUS_RXFULL	0x02
+#define ULITE_STATUS_TXEMPTY	0x04
+#define ULITE_STATUS_TXFULL	0x08
+#define ULITE_STATUS_IE		0x10
+#define ULITE_STATUS_OVERRUN	0x20
+#define ULITE_STATUS_FRAME	0x40
+#define ULITE_STATUS_PARITY	0x80
+
+#define ULITE_CONTROL_RST_TX	0x01
+#define ULITE_CONTROL_RST_RX	0x02
+#define ULITE_CONTROL_IE	0x10
+
+
+static struct uart_port ports[ULITE_NR_UARTS];
+
+static int ulite_receive(struct uart_port *port, int stat)
+{
+	struct tty_struct *tty = port->info->tty;
+	unsigned char ch = 0;
+	char flag = TTY_NORMAL;
+
+	if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+		     | ULITE_STATUS_FRAME)) == 0)
+		return 0;
+
+	/* stats */
+	if (stat & ULITE_STATUS_RXVALID) {
+		port->icount.rx++;
+		ch = readb(port->membase + ULITE_RX);
+
+		if (stat & ULITE_STATUS_PARITY)
+			port->icount.parity++;
+	}
+
+	if (stat & ULITE_STATUS_OVERRUN)
+		port->icount.overrun++;
+
+	if (stat & ULITE_STATUS_FRAME)
+		port->icount.frame++;
+
+
+	/* drop byte with parity error if IGNPAR specificed */
+	if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY)
+		stat &= ~ULITE_STATUS_RXVALID;
+
+	stat &= port->read_status_mask;
+
+	if (stat & ULITE_STATUS_PARITY)
+		flag = TTY_PARITY;
+
+
+	stat &= ~port->ignore_status_mask;
+
+	if (stat & ULITE_STATUS_RXVALID)
+		tty_insert_flip_char(tty, ch, flag);
+
+	if (stat & ULITE_STATUS_FRAME)
+		tty_insert_flip_char(tty, 0, TTY_FRAME);
+
+	if (stat & ULITE_STATUS_OVERRUN)
+		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+	return 1;
+}
+
+static int ulite_transmit(struct uart_port *port, int stat)
+{
+	struct circ_buf *xmit  = &port->info->xmit;
+
+	if (stat & ULITE_STATUS_TXFULL)
+		return 0;
+
+	if (port->x_char) {
+		writeb(port->x_char, port->membase + ULITE_TX);
+		port->x_char = 0;
+		port->icount.tx++;
+		return 1;
+	}
+
+	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+		return 0;
+
+	writeb(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+	xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
+	port->icount.tx++;
+
+	/* wake up */
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+
+	return 1;
+}
+
+static irqreturn_t ulite_isr(int irq, void *dev_id)
+{
+	struct uart_port *port = (struct uart_port *)dev_id;
+	int busy;
+
+	do {
+		int stat = readb(port->membase + ULITE_STATUS);
+		busy  = ulite_receive(port, stat);
+		busy |= ulite_transmit(port, stat);
+	} while (busy);
+
+	tty_flip_buffer_push(port->info->tty);
+
+	return IRQ_HANDLED;
+}
+
+static unsigned int ulite_tx_empty(struct uart_port *port)
+{
+	unsigned long flags;
+	unsigned int ret;
+
+	spin_lock_irqsave(&port->lock, flags);
+	ret = readb(port->membase + ULITE_STATUS);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int ulite_get_mctrl(struct uart_port *port)
+{
+	return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	/* N/A */
+}
+
+static void ulite_stop_tx(struct uart_port *port)
+{
+	/* N/A */
+}
+
+static void ulite_start_tx(struct uart_port *port)
+{
+	ulite_transmit(port, readb(port->membase + ULITE_STATUS));
+}
+
+static void ulite_stop_rx(struct uart_port *port)
+{
+	/* don't forward any more data (like !CREAD) */
+	port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+		| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+}
+
+static void ulite_enable_ms(struct uart_port *port)
+{
+	/* N/A */
+}
+
+static void ulite_break_ctl(struct uart_port *port, int ctl)
+{
+	/* N/A */
+}
+
+static int ulite_startup(struct uart_port *port)
+{
+	int ret;
+
+	ret = request_irq(port->irq, ulite_isr,
+			  IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "uartlite", port);
+	if (ret)
+		return ret;
+
+	writeb(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+	       port->membase + ULITE_CONTROL);
+	writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+	return 0;
+}
+
+static void ulite_shutdown(struct uart_port *port)
+{
+	writeb(0, port->membase + ULITE_CONTROL);
+	readb(port->membase + ULITE_CONTROL); /* dummy */
+	free_irq(port->irq, port);
+}
+
+static void ulite_set_termios(struct uart_port *port, struct termios *termios,
+			      struct termios *old)
+{
+	unsigned long flags;
+	unsigned int baud;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+		| ULITE_STATUS_TXFULL;
+
+	if (termios->c_iflag & INPCK)
+		port->read_status_mask |=
+			ULITE_STATUS_PARITY | ULITE_STATUS_FRAME;
+
+	port->ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		port->ignore_status_mask |= ULITE_STATUS_PARITY
+			| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+	/* ignore all characters if CREAD is not set */
+	if ((termios->c_cflag & CREAD) == 0)
+		port->ignore_status_mask |=
+			ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+			| ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+	/* update timeout */
+	baud = uart_get_baud_rate(port, termios, old, 0, 460800);
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *ulite_type(struct uart_port *port)
+{
+	return port->type == PORT_UARTLITE ? "uartlite" : NULL;
+}
+
+static void ulite_release_port(struct uart_port *port)
+{
+	release_mem_region(port->mapbase, ULITE_REGION);
+	iounmap(port->membase);
+	port->membase = 0;
+}
+
+static int ulite_request_port(struct uart_port *port)
+{
+	if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
+		dev_err(port->dev, "Memory region busy\n");
+		return -EBUSY;
+	}
+
+	port->membase = ioremap(port->mapbase, ULITE_REGION);
+	if (!port->membase) {
+		dev_err(port->dev, "Unable to map registers\n");
+		release_mem_region(port->mapbase, ULITE_REGION);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void ulite_config_port(struct uart_port *port, int flags)
+{
+	ulite_request_port(port);
+	port->type = PORT_UARTLITE;
+}
+
+static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+	/* we don't want the core code to modify any port params */
+	return -EINVAL;
+}
+
+static struct uart_ops ulite_ops = {
+	.tx_empty	= ulite_tx_empty,
+	.set_mctrl	= ulite_set_mctrl,
+	.get_mctrl	= ulite_get_mctrl,
+	.stop_tx	= ulite_stop_tx,
+	.start_tx	= ulite_start_tx,
+	.stop_rx	= ulite_stop_rx,
+	.enable_ms	= ulite_enable_ms,
+	.break_ctl	= ulite_break_ctl,
+	.startup	= ulite_startup,
+	.shutdown	= ulite_shutdown,
+	.set_termios	= ulite_set_termios,
+	.type		= ulite_type,
+	.release_port	= ulite_release_port,
+	.request_port	= ulite_request_port,
+	.config_port	= ulite_config_port,
+	.verify_port	= ulite_verify_port
+};
+
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+static void ulite_console_wait_tx(struct uart_port *port)
+{
+	int i;
+
+	/* wait up to 10ms for the character(s) to be sent */
+	for (i = 0; i < 10000; i++) {
+		if (readb(port->membase + ULITE_STATUS) & ULITE_STATUS_TXEMPTY)
+			break;
+		udelay(1);
+	}
+}
+
+static void ulite_console_putchar(struct uart_port *port, int ch)
+{
+	ulite_console_wait_tx(port);
+	writeb(ch, port->membase + ULITE_TX);
+}
+
+static void ulite_console_write(struct console *co, const char *s,
+				unsigned int count)
+{
+	struct uart_port *port = &ports[co->index];
+	unsigned long flags;
+	unsigned int ier;
+	int locked = 1;
+
+	if (oops_in_progress) {
+		locked = spin_trylock_irqsave(&port->lock, flags);
+	} else
+		spin_lock_irqsave(&port->lock, flags);
+
+	/* save and disable interrupt */
+	ier = readb(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
+	writeb(0, port->membase + ULITE_CONTROL);
+
+	uart_console_write(port, s, count, ulite_console_putchar);
+
+	ulite_console_wait_tx(port);
+
+	/* restore interrupt state */
+	if (ier)
+		writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+	if (locked)
+		spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int __init ulite_console_setup(struct console *co, char *options)
+{
+	struct uart_port *port;
+	int baud = 9600;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	if (co->index < 0 || co->index >= ULITE_NR_UARTS)
+		return -EINVAL;
+
+	port = &ports[co->index];
+
+	/* not initialized yet? */
+	if (!port->membase)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver ulite_uart_driver;
+
+static struct console ulite_console = {
+	.name	= "ttyUL",
+	.write	= ulite_console_write,
+	.device	= uart_console_device,
+	.setup	= ulite_console_setup,
+	.flags	= CON_PRINTBUFFER,
+	.index	= -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */
+	.data	= &ulite_uart_driver,
+};
+
+static int __init ulite_console_init(void)
+{
+	register_console(&ulite_console);
+	return 0;
+}
+
+console_initcall(ulite_console_init);
+
+#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
+
+static struct uart_driver ulite_uart_driver = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "uartlite",
+	.dev_name	= "ttyUL",
+	.major		= ULITE_MAJOR,
+	.minor		= ULITE_MINOR,
+	.nr		= ULITE_NR_UARTS,
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+	.cons		= &ulite_console,
+#endif
+};
+
+static int __devinit ulite_probe(struct platform_device *pdev)
+{
+	struct resource *res, *res2;
+	struct uart_port *port;
+
+	if (pdev->id < 0 || pdev->id >= ULITE_NR_UARTS)
+		return -EINVAL;
+
+	if (ports[pdev->id].membase)
+		return -EBUSY;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res2)
+		return -ENODEV;
+
+	port = &ports[pdev->id];
+
+	port->fifosize	= 16;
+	port->regshift	= 2;
+	port->iotype	= UPIO_MEM;
+	port->iobase	= 1; /* mark port in use */
+	port->mapbase	= res->start;
+	port->membase	= 0;
+	port->ops	= &ulite_ops;
+	port->irq	= res2->start;
+	port->flags	= UPF_BOOT_AUTOCONF;
+	port->dev	= &pdev->dev;
+	port->type	= PORT_UNKNOWN;
+	port->line	= pdev->id;
+
+	uart_add_one_port(&ulite_uart_driver, port);
+	platform_set_drvdata(pdev, port);
+
+	return 0;
+}
+
+static int ulite_remove(struct platform_device *pdev)
+{
+	struct uart_port *port = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	if (port)
+		uart_remove_one_port(&ulite_uart_driver, port);
+
+	/* mark port as free */
+	port->membase = 0;
+
+	return 0;
+}
+
+static struct platform_driver ulite_platform_driver = {
+	.probe	= ulite_probe,
+	.remove	= ulite_remove,
+	.driver	= {
+		   .owner = THIS_MODULE,
+		   .name  = "uartlite",
+		   },
+};
+
+int __init ulite_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&ulite_uart_driver);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&ulite_platform_driver);
+	if (ret)
+		uart_unregister_driver(&ulite_uart_driver);
+
+	return ret;
+}
+
+void __exit ulite_exit(void)
+{
+	platform_driver_unregister(&ulite_platform_driver);
+	uart_unregister_driver(&ulite_uart_driver);
+}
+
+module_init(ulite_init);
+module_exit(ulite_exit);
+
+MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_DESCRIPTION("Xilinx uartlite serial driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 23334c8..d895a1a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -16,7 +16,7 @@
 	  controller and a chipselect.  Most SPI slaves don't support
 	  dynamic device discovery; some are even write-only or read-only.
 
-	  SPI is widely used by microcontollers to talk with sensors,
+	  SPI is widely used by microcontrollers to talk with sensors,
 	  eeprom and flash memory, codecs and various other controller
 	  chips, analog to digital (and d-to-a) converters, and more.
 	  MMC and SD cards can be accessed using SPI protocol; and for
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 72025df..494d9b8 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -148,7 +148,7 @@
 	void (*cs_control)(u32 command);
 };
 
-static void pump_messages(void *data);
+static void pump_messages(struct work_struct *work);
 
 static int flush(struct driver_data *drv_data)
 {
@@ -884,9 +884,10 @@
 	}
 }
 
-static void pump_messages(void *data)
+static void pump_messages(struct work_struct *work)
 {
-	struct driver_data *drv_data = data;
+	struct driver_data *drv_data =
+		container_of(work, struct driver_data, pump_messages);
 	unsigned long flags;
 
 	/* Lock queue and check for queue work */
@@ -1098,7 +1099,7 @@
 	tasklet_init(&drv_data->pump_transfers,
 			pump_transfers,	(unsigned long)drv_data);
 
-	INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
+	INIT_WORK(&drv_data->pump_messages, pump_messages);
 	drv_data->workqueue = create_singlethread_workqueue(
 					drv_data->master->cdev.dev->bus_id);
 	if (drv_data->workqueue == NULL)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c3c0626..270e621 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -360,12 +360,13 @@
 	if (!dev)
 		return NULL;
 
-	master = kzalloc(size + sizeof *master, SLAB_KERNEL);
+	master = kzalloc(size + sizeof *master, GFP_KERNEL);
 	if (!master)
 		return NULL;
 
 	class_device_initialize(&master->cdev);
 	master->cdev.class = &spi_master_class;
+	kobj_set_kset_s(&master->cdev, spi_master_class.subsys);
 	master->cdev.dev = get_device(dev);
 	spi_master_set_devdata(master, &master[1]);
 
@@ -447,7 +448,9 @@
  */
 void spi_unregister_master(struct spi_master *master)
 {
-	(void) device_for_each_child(master->cdev.dev, NULL, __unregister);
+	int dummy;
+
+	dummy = device_for_each_child(master->cdev.dev, NULL, __unregister);
 	class_device_unregister(&master->cdev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_master);
@@ -463,15 +466,13 @@
  */
 struct spi_master *spi_busnum_to_master(u16 bus_num)
 {
-	if (bus_num) {
-		char			name[8];
-		struct kobject		*bus;
+	char			name[9];
+	struct kobject		*bus;
 
-		snprintf(name, sizeof name, "spi%u", bus_num);
-		bus = kset_find_obj(&spi_master_class.subsys.kset, name);
-		if (bus)
-			return container_of(bus, struct spi_master, cdev.kobj);
-	}
+	snprintf(name, sizeof name, "spi%u", bus_num);
+	bus = kset_find_obj(&spi_master_class.subsys.kset, name);
+	if (bus)
+		return container_of(bus, struct spi_master, cdev.kobj);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
@@ -607,7 +608,7 @@
 {
 	int	status;
 
-	buf = kmalloc(SPI_BUFSIZ, SLAB_KERNEL);
+	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
 	if (!buf) {
 		status = -ENOMEM;
 		goto err0;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index a23862e..57289b6 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -196,7 +196,7 @@
 		return -EINVAL;
 
 	if (!cs) {
-		cs = kzalloc(sizeof *cs, SLAB_KERNEL);
+		cs = kzalloc(sizeof *cs, GFP_KERNEL);
 		if (!cs)
 			return -ENOMEM;
 		spi->controller_state = cs;
@@ -265,9 +265,10 @@
  * Drivers can provide word-at-a-time i/o primitives, or provide
  * transfer-at-a-time ones to leverage dma or fifo hardware.
  */
-static void bitbang_work(void *_bitbang)
+static void bitbang_work(struct work_struct *work)
 {
-	struct spi_bitbang	*bitbang = _bitbang;
+	struct spi_bitbang	*bitbang =
+		container_of(work, struct spi_bitbang, work);
 	unsigned long		flags;
 
 	spin_lock_irqsave(&bitbang->lock, flags);
@@ -456,7 +457,7 @@
 	if (!bitbang->master || !bitbang->chipselect)
 		return -EINVAL;
 
-	INIT_WORK(&bitbang->work, bitbang_work, bitbang);
+	INIT_WORK(&bitbang->work, bitbang_work);
 	spin_lock_init(&bitbang->lock);
 	INIT_LIST_HEAD(&bitbang->queue);
 
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
index 39d9b20..312987a 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi_butterfly.c
@@ -23,6 +23,7 @@
 #include <linux/platform_device.h>
 #include <linux/parport.h>
 
+#include <linux/sched.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/spi_bitbang.h>
 #include <linux/spi/flash.h>
@@ -250,6 +251,8 @@
 	 * setting up a platform device like this is an ugly kluge...
 	 */
 	pdev = platform_device_register_simple("butterfly", -1, NULL, 0);
+	if (IS_ERR(pdev))
+		return;
 
 	master = spi_alloc_master(&pdev->dev, sizeof *pp);
 	if (!master) {
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index dda0ca4..164a5dc 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -69,25 +69,21 @@
 
 static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
 {
-	tuple_t tuple;
-	u_short buf[128];
 	char *str;
-	int last_ret, last_fn, i, place;
+	int i, place;
 	DEBUG(0, "ixj_get_serial(0x%p)\n", link);
-	tuple.TupleData = (cisdata_t *) buf;
-	tuple.TupleOffset = 0;
-	tuple.TupleDataMax = 80;
-	tuple.Attributes = 0;
-	tuple.DesiredTuple = CISTPL_VERS_1;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	str = (char *) buf;
-	printk("PCMCIA Version %d.%d\n", str[0], str[1]);
-	str += 2;
+
+	str = link->prod_id[0];
+	if (!str)
+		goto cs_failed;
 	printk("%s", str);
-	str = str + strlen(str) + 1;
+	str = link->prod_id[1];
+	if (!str)
+		goto cs_failed;
 	printk(" %s", str);
-	str = str + strlen(str) + 1;
+	str = link->prod_id[2];
+	if (!str)
+		goto cs_failed;
 	place = 1;
 	for (i = strlen(str) - 1; i >= 0; i--) {
 		switch (str[i]) {
@@ -122,7 +118,9 @@
 		}
 		place = place * 0x10;
 	}
-	str = str + strlen(str) + 1;
+	str = link->prod_id[3];
+	if (!str)
+		goto cs_failed;
 	printk(" version %s\n", str);
       cs_failed:
 	return;
@@ -146,13 +144,6 @@
 	tuple.TupleData = (cisdata_t *) buf;
 	tuple.TupleOffset = 0;
 	tuple.TupleDataMax = 255;
-	tuple.Attributes = 0;
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
 	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
 	tuple.Attributes = 0;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index e656563..3dfa3e4 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -158,7 +158,7 @@
 	const struct cxacru_modem_type *modem_type;
 
 	int line_status;
-	struct work_struct poll_work;
+	struct delayed_work poll_work;
 
 	/* contol handles */
 	struct mutex cm_serialize;
@@ -347,7 +347,7 @@
 	return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance);
+static void cxacru_poll_status(struct work_struct *work);
 
 static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
 		struct atm_dev *atm_dev)
@@ -376,12 +376,14 @@
 	}
 
 	/* Start status polling */
-	cxacru_poll_status(instance);
+	cxacru_poll_status(&instance->poll_work.work);
 	return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance)
+static void cxacru_poll_status(struct work_struct *work)
 {
+	struct cxacru_data *instance =
+		container_of(work, struct cxacru_data, poll_work.work);
 	u32 buf[CXINF_MAX] = {};
 	struct usbatm_data *usbatm = instance->usbatm;
 	struct atm_dev *atm_dev = usbatm->atm_dev;
@@ -720,7 +722,7 @@
 
 	mutex_init(&instance->cm_serialize);
 
-	INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance);
+	INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
 
 	usbatm_instance->driver_data = instance;
 
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index c870c80..8ed6c75 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -142,7 +142,7 @@
 
 	struct speedtch_params params; /* set in probe, constant afterwards */
 
-	struct work_struct status_checker;
+	struct delayed_work status_checker;
 
 	unsigned char last_status;
 
@@ -498,8 +498,11 @@
 	return ret;
 }
 
-static void speedtch_check_status(struct speedtch_instance_data *instance)
+static void speedtch_check_status(struct work_struct *work)
 {
+	struct speedtch_instance_data *instance =
+		container_of(work, struct speedtch_instance_data,
+			     status_checker.work);
 	struct usbatm_data *usbatm = instance->usbatm;
 	struct atm_dev *atm_dev = usbatm->atm_dev;
 	unsigned char *buf = instance->scratch_buffer;
@@ -576,7 +579,7 @@
 {
 	struct speedtch_instance_data *instance = (void *)data;
 
-	schedule_work(&instance->status_checker);
+	schedule_delayed_work(&instance->status_checker, 0);
 
 	/* The following check is racy, but the race is harmless */
 	if (instance->poll_delay < MAX_POLL_DELAY)
@@ -596,7 +599,7 @@
 	if (int_urb) {
 		ret = usb_submit_urb(int_urb, GFP_ATOMIC);
 		if (!ret)
-			schedule_work(&instance->status_checker);
+			schedule_delayed_work(&instance->status_checker, 0);
 		else {
 			atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
 			mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@ -640,7 +643,7 @@
 
 	if ((int_urb = instance->int_urb)) {
 		ret = usb_submit_urb(int_urb, GFP_ATOMIC);
-		schedule_work(&instance->status_checker);
+		schedule_delayed_work(&instance->status_checker, 0);
 		if (ret < 0) {
 			atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
 			goto fail;
@@ -834,8 +837,8 @@
 			const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc;
 
 			if ((endpoint_desc->bEndpointAddress == target_address)) {
-				use_isoc = (endpoint_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-					USB_ENDPOINT_XFER_ISOC;
+				use_isoc =
+					usb_endpoint_xfer_isoc(endpoint_desc);
 				break;
 			}
 		}
@@ -855,7 +858,7 @@
 
 	usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
 
-	INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance);
+	INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
 
 	instance->status_checker.timer.function = speedtch_status_poll;
 	instance->status_checker.timer.data = (unsigned long)instance;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index f6b9f7e..dae4ef1 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -64,6 +64,8 @@
 #include <linux/kthread.h>
 #include <linux/version.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
+
 #include <asm/unaligned.h>
 
 #include "usbatm.h"
@@ -401,9 +403,8 @@
 	int ret = -ENOMEM;
 	u8 *xfer_buff;
 
-	xfer_buff = kmalloc(size, GFP_KERNEL);
+	xfer_buff = kmemdup(buff, size, GFP_KERNEL);
 	if (xfer_buff) {
-		memcpy(xfer_buff, buff, size);
 		ret = usb_control_msg(usb,
 				      usb_sndctrlpipe(usb, 0),
 				      LOAD_INTERNAL,
@@ -595,14 +596,12 @@
 	u8 *xfer_buff;
 	int bytes_read;
 
-	xfer_buff = kmalloc(size, GFP_KERNEL);
+	xfer_buff = kmemdup(data, size, GFP_KERNEL);
 	if (!xfer_buff) {
 		uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
 		return ret;
 	}
 
-	memcpy(xfer_buff, data, size);
-
 	ret = usb_bulk_msg(sc->usb_dev,
 			 usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE),
 			 xfer_buff, size, &bytes_read, BULK_TIMEOUT);
@@ -658,9 +657,9 @@
 /*
  * The uea_load_page() function must be called within a process context
  */
-static void uea_load_page(void *xsc)
+static void uea_load_page(struct work_struct *work)
 {
-	struct uea_softc *sc = xsc;
+	struct uea_softc *sc = container_of(work, struct uea_softc, task);
 	u16 pageno = sc->pageno;
 	u16 ovl = sc->ovl;
 	struct block_info bi;
@@ -765,12 +764,11 @@
 	u8 *xfer_buff;
 	int ret = -ENOMEM;
 
-	xfer_buff = kmalloc(size, GFP_KERNEL);
+	xfer_buff = kmemdup(data, size, GFP_KERNEL);
 	if (!xfer_buff) {
 		uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
 		return ret;
 	}
-	memcpy(xfer_buff, data, size);
 
 	ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0),
 			      UCDC_SEND_ENCAPSULATED_COMMAND,
@@ -1352,7 +1350,7 @@
 
 	uea_enters(INS_TO_USBDEV(sc));
 
-	INIT_WORK(&sc->task, uea_load_page, sc);
+	INIT_WORK(&sc->task, uea_load_page);
 	init_waitqueue_head(&sc->sync_q);
 	init_waitqueue_head(&sc->cmv_ack_wait);
 
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 9a9012f..7f1fa95 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -421,9 +421,9 @@
 		schedule_work(&acm->work);
 }
 
-static void acm_softint(void *private)
+static void acm_softint(struct work_struct *work)
 {
-	struct acm *acm = private;
+	struct acm *acm = container_of(work, struct acm, work);
 	dbg("Entering acm_softint.");
 	
 	if (!ACM_READY(acm))
@@ -892,7 +892,7 @@
 
 
 	/* workaround for switched endpoints */
-	if ((epread->bEndpointAddress & USB_DIR_IN) != USB_DIR_IN) {
+	if (!usb_endpoint_dir_in(epread)) {
 		/* descriptors are swapped */
 		struct usb_endpoint_descriptor *t;
 		dev_dbg(&intf->dev,"The data interface has switched endpoints");
@@ -927,7 +927,7 @@
 	acm->rx_buflimit = num_rx_buf;
 	acm->urb_task.func = acm_rx_tasklet;
 	acm->urb_task.data = (unsigned long) acm;
-	INIT_WORK(&acm->work, acm_softint, acm);
+	INIT_WORK(&acm->work, acm_softint);
 	spin_lock_init(&acm->throttle_lock);
 	spin_lock_init(&acm->write_lock);
 	spin_lock_init(&acm->read_lock);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 6e3b535..f8324d8 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -72,6 +72,21 @@
 
 	  If you are unsure about this, say N here.
 
+config USB_MULTITHREAD_PROBE
+	bool "USB Multi-threaded probe (EXPERIMENTAL)"
+	depends on USB && EXPERIMENTAL
+	default n
+	help
+	  Say Y here if you want the USB core to spawn a new thread for
+	  every USB device that is probed.  This can cause a small speedup
+	  in boot times on systems with a lot of different USB devices.
+
+	  This option should be safe to enable, but if any odd probing
+	  problems are found, please disable it, or dynamically turn it
+	  off in the /sys/module/usbcore/parameters/multithread_probe
+	  file
+
+	  When in doubt, say N.
 
 config USB_OTG
 	bool
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 840442a..c3915dc 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -93,7 +93,7 @@
 }
 
 
-/* sometimes alloc/free could use kmalloc with SLAB_DMA, for
+/* sometimes alloc/free could use kmalloc with GFP_DMA, for
  * better sharing and to leverage mm/slab.c intelligence.
  */
 
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 3538c2f..ea398e5 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -175,12 +175,13 @@
 )
 {
 	char dir, unit, *type;
-	unsigned interval, in, bandwidth = 1;
+	unsigned interval, bandwidth = 1;
 
 	if (start > end)
 		return start;
-	in = (desc->bEndpointAddress & USB_DIR_IN);
-	dir = in ? 'I' : 'O';
+
+	dir = usb_endpoint_dir_in(desc) ? 'I' : 'O';
+
 	if (speed == USB_SPEED_HIGH) {
 		switch (le16_to_cpu(desc->wMaxPacketSize) & (0x03 << 11)) {
 		case 1 << 11:	bandwidth = 2; break;
@@ -204,7 +205,7 @@
 		break;
 	case USB_ENDPOINT_XFER_BULK:
 		type = "Bulk";
-		if (speed == USB_SPEED_HIGH && !in)	/* uframes per NAK */
+		if (speed == USB_SPEED_HIGH && dir == 'O') /* uframes per NAK */
 			interval = desc->bInterval;
 		else
 			interval = 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index fed92be..3ed4cb2 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -561,7 +561,7 @@
 		dev = inode->i_private;
 	if (!dev)
 		goto out;
-	ret = usb_autoresume_device(dev, 1);
+	ret = usb_autoresume_device(dev);
 	if (ret)
 		goto out;
 
@@ -609,7 +609,7 @@
 			releaseintf(ps, ifnum);
 	}
 	destroy_all_async(ps);
-	usb_autosuspend_device(dev, 1);
+	usb_autosuspend_device(dev);
 	usb_unlock_device(dev);
 	usb_put_dev(dev);
 	put_pid(ps->disc_pid);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 113e484..d6eb5ce 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -205,7 +205,7 @@
 	if (id) {
 		dev_dbg(dev, "%s - got id\n", __FUNCTION__);
 
-		error = usb_autoresume_device(udev, 1);
+		error = usb_autoresume_device(udev);
 		if (error)
 			return error;
 
@@ -229,7 +229,7 @@
 		} else
 			intf->condition = USB_INTERFACE_BOUND;
 
-		usb_autosuspend_device(udev, 1);
+		usb_autosuspend_device(udev);
 	}
 
 	return error;
@@ -247,7 +247,7 @@
 
 	/* Autoresume for set_interface call below */
 	udev = interface_to_usbdev(intf);
-	error = usb_autoresume_device(udev, 1);
+	error = usb_autoresume_device(udev);
 
 	/* release all urbs for this interface */
 	usb_disable_interface(interface_to_usbdev(intf), intf);
@@ -265,7 +265,7 @@
 	intf->needs_remote_wakeup = 0;
 
 	if (!error)
-		usb_autosuspend_device(udev, 1);
+		usb_autosuspend_device(udev);
 
 	return 0;
 }
@@ -408,6 +408,16 @@
 	    (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol))
 		return 0;
 
+	/* The interface class, subclass, and protocol should never be
+	 * checked for a match if the device class is Vendor Specific,
+	 * unless the match record specifies the Vendor ID. */
+	if (dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC &&
+			!(id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) &&
+			(id->match_flags & (USB_DEVICE_ID_MATCH_INT_CLASS |
+				USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+				USB_DEVICE_ID_MATCH_INT_PROTOCOL)))
+		return 0;
+
 	if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_CLASS) &&
 	    (id->bInterfaceClass != intf->desc.bInterfaceClass))
 		return 0;
@@ -476,7 +486,17 @@
  * most general; they let drivers bind to any interface on a
  * multiple-function device.  Use the USB_INTERFACE_INFO
  * macro, or its siblings, to match class-per-interface style
- * devices (as recorded in bDeviceClass).
+ * devices (as recorded in bInterfaceClass).
+ *
+ * Note that an entry created by USB_INTERFACE_INFO won't match
+ * any interface if the device class is set to Vendor-Specific.
+ * This is deliberate; according to the USB spec the meanings of
+ * the interface class/subclass/protocol for these devices are also
+ * vendor-specific, and hence matching against a standard product
+ * class wouldn't work anyway.  If you really want to use an
+ * interface-based match for such a device, create a match record
+ * that also specifies the vendor ID.  (Unforunately there isn't a
+ * standard macro for creating records like this.)
  *
  * Within those groups, remember that not all combinations are
  * meaningful.  For example, don't give a product version range
@@ -505,7 +525,7 @@
 }
 EXPORT_SYMBOL_GPL_FUTURE(usb_match_id);
 
-int usb_device_match(struct device *dev, struct device_driver *drv)
+static int usb_device_match(struct device *dev, struct device_driver *drv)
 {
 	/* devices and interfaces are handled separately */
 	if (is_usb_device(dev)) {
@@ -790,7 +810,7 @@
 #ifdef CONFIG_PM
 
 /* Caller has locked udev's pm_mutex */
-static int suspend_device(struct usb_device *udev, pm_message_t msg)
+static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
 {
 	struct usb_device_driver	*udriver;
 	int				status = 0;
@@ -817,7 +837,7 @@
 }
 
 /* Caller has locked udev's pm_mutex */
-static int resume_device(struct usb_device *udev)
+static int usb_resume_device(struct usb_device *udev)
 {
 	struct usb_device_driver	*udriver;
 	int				status = 0;
@@ -843,7 +863,7 @@
 }
 
 /* Caller has locked intf's usb_device's pm mutex */
-static int suspend_interface(struct usb_interface *intf, pm_message_t msg)
+static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
 {
 	struct usb_driver	*driver;
 	int			status = 0;
@@ -880,7 +900,7 @@
 }
 
 /* Caller has locked intf's usb_device's pm_mutex */
-static int resume_interface(struct usb_interface *intf)
+static int usb_resume_interface(struct usb_interface *intf)
 {
 	struct usb_driver	*driver;
 	int			status = 0;
@@ -920,6 +940,44 @@
 	return status;
 }
 
+#ifdef	CONFIG_USB_SUSPEND
+
+/* Internal routine to check whether we may autosuspend a device. */
+static int autosuspend_check(struct usb_device *udev)
+{
+	int			i;
+	struct usb_interface	*intf;
+
+	/* For autosuspend, fail fast if anything is in use.
+	 * Also fail if any interfaces require remote wakeup but it
+	 * isn't available. */
+	udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+	if (udev->pm_usage_cnt > 0)
+		return -EBUSY;
+	if (udev->actconfig) {
+		for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+			intf = udev->actconfig->interface[i];
+			if (!is_active(intf))
+				continue;
+			if (intf->pm_usage_cnt > 0)
+				return -EBUSY;
+			if (intf->needs_remote_wakeup &&
+					!udev->do_remote_wakeup) {
+				dev_dbg(&udev->dev, "remote wakeup needed "
+						"for autosuspend\n");
+				return -EOPNOTSUPP;
+			}
+		}
+	}
+	return 0;
+}
+
+#else
+
+#define autosuspend_check(udev)		0
+
+#endif
+
 /**
  * usb_suspend_both - suspend a USB device and its interfaces
  * @udev: the usb_device to suspend
@@ -971,52 +1029,34 @@
 
 	udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
 
-	/* For autosuspend, fail fast if anything is in use.
-	 * Also fail if any interfaces require remote wakeup but it
-	 * isn't available. */
 	if (udev->auto_pm) {
-		if (udev->pm_usage_cnt > 0)
-			return -EBUSY;
-		if (udev->actconfig) {
-			for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
-				intf = udev->actconfig->interface[i];
-				if (!is_active(intf))
-					continue;
-				if (intf->pm_usage_cnt > 0)
-					return -EBUSY;
-				if (intf->needs_remote_wakeup &&
-						!udev->do_remote_wakeup) {
-					dev_dbg(&udev->dev,
-	"remote wakeup needed for autosuspend\n");
-					return -EOPNOTSUPP;
-				}
-			}
-			i = 0;
-		}
+		status = autosuspend_check(udev);
+		if (status < 0)
+			return status;
 	}
 
 	/* Suspend all the interfaces and then udev itself */
 	if (udev->actconfig) {
 		for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
 			intf = udev->actconfig->interface[i];
-			status = suspend_interface(intf, msg);
+			status = usb_suspend_interface(intf, msg);
 			if (status != 0)
 				break;
 		}
 	}
 	if (status == 0)
-		status = suspend_device(udev, msg);
+		status = usb_suspend_device(udev, msg);
 
 	/* If the suspend failed, resume interfaces that did get suspended */
 	if (status != 0) {
 		while (--i >= 0) {
 			intf = udev->actconfig->interface[i];
-			resume_interface(intf);
+			usb_resume_interface(intf);
 		}
 
 	/* If the suspend succeeded, propagate it up the tree */
 	} else if (parent)
-		usb_autosuspend_device(parent, 0);
+		usb_autosuspend_device(parent);
 
 	// dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
 	return status;
@@ -1064,9 +1104,25 @@
 	/* Propagate the resume up the tree, if necessary */
 	if (udev->state == USB_STATE_SUSPENDED) {
 		if (parent) {
-			usb_pm_lock(parent);
-			parent->auto_pm = 1;
-			status = usb_resume_both(parent);
+			status = usb_autoresume_device(parent);
+			if (status == 0) {
+				status = usb_resume_device(udev);
+				if (status) {
+					usb_autosuspend_device(parent);
+
+					/* It's possible usb_resume_device()
+					 * failed after the port was
+					 * unsuspended, causing udev to be
+					 * logically disconnected.  We don't
+					 * want usb_disconnect() to autosuspend
+					 * the parent again, so tell it that
+					 * udev disconnected while still
+					 * suspended. */
+					if (udev->state ==
+							USB_STATE_NOTATTACHED)
+						udev->discon_suspended = 1;
+				}
+			}
 		} else {
 
 			/* We can't progagate beyond the USB subsystem,
@@ -1075,24 +1131,20 @@
 			if (udev->dev.parent->power.power_state.event !=
 					PM_EVENT_ON)
 				status = -EHOSTUNREACH;
-		}
-		if (status == 0)
-			status = resume_device(udev);
-		if (parent)
-			usb_pm_unlock(parent);
+			else
+				status = usb_resume_device(udev);
+ 		}
 	} else {
 
 		/* Needed only for setting udev->dev.power.power_state.event
 		 * and for possible debugging message. */
-		status = resume_device(udev);
+		status = usb_resume_device(udev);
 	}
 
-	/* Now the parent won't suspend until we are finished */
-
 	if (status == 0 && udev->actconfig) {
 		for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
 			intf = udev->actconfig->interface[i];
-			resume_interface(intf);
+			usb_resume_interface(intf);
 		}
 	}
 
@@ -1102,39 +1154,53 @@
 
 #ifdef CONFIG_USB_SUSPEND
 
+/* Internal routine to adjust a device's usage counter and change
+ * its autosuspend state.
+ */
+static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
+{
+	int	status = 0;
+
+	usb_pm_lock(udev);
+	udev->pm_usage_cnt += inc_usage_cnt;
+	WARN_ON(udev->pm_usage_cnt < 0);
+	if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) {
+		udev->auto_pm = 1;
+		status = usb_resume_both(udev);
+		if (status != 0)
+			udev->pm_usage_cnt -= inc_usage_cnt;
+	} else if (inc_usage_cnt <= 0 && autosuspend_check(udev) == 0)
+		queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
+				USB_AUTOSUSPEND_DELAY);
+	usb_pm_unlock(udev);
+	return status;
+}
+
 /**
  * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
  * @udev: the usb_device to autosuspend
- * @dec_usage_cnt: flag to decrement @udev's PM-usage counter
  *
  * This routine should be called when a core subsystem is finished using
  * @udev and wants to allow it to autosuspend.  Examples would be when
  * @udev's device file in usbfs is closed or after a configuration change.
  *
- * @dec_usage_cnt should be 1 if the subsystem previously incremented
- * @udev's usage counter (such as by passing 1 to usb_autoresume_device);
- * otherwise it should be 0.
- *
- * If the usage counter for @udev or any of its active interfaces is greater
- * than 0, the autosuspend request will not be queued.  (If an interface
- * driver does not support autosuspend then its usage counter is permanently
- * positive.)  Likewise, if an interface driver requires remote-wakeup
- * capability during autosuspend but remote wakeup is disabled, the
- * autosuspend will fail.
+ * @udev's usage counter is decremented.  If it or any of the usage counters
+ * for an active interface is greater than 0, no autosuspend request will be
+ * queued.  (If an interface driver does not support autosuspend then its
+ * usage counter is permanently positive.)  Furthermore, if an interface
+ * driver requires remote-wakeup capability during autosuspend but remote
+ * wakeup is disabled, the autosuspend will fail.
  *
  * Often the caller will hold @udev's device lock, but this is not
  * necessary.
  *
  * This routine can run only in process context.
  */
-void usb_autosuspend_device(struct usb_device *udev, int dec_usage_cnt)
+void usb_autosuspend_device(struct usb_device *udev)
 {
-	usb_pm_lock(udev);
-	udev->pm_usage_cnt -= dec_usage_cnt;
-	if (udev->pm_usage_cnt <= 0)
-		queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
-				USB_AUTOSUSPEND_DELAY);
-	usb_pm_unlock(udev);
+	int	status;
+
+	status = usb_autopm_do_device(udev, -1);
 	// dev_dbg(&udev->dev, "%s: cnt %d\n",
 	//		__FUNCTION__, udev->pm_usage_cnt);
 }
@@ -1142,44 +1208,59 @@
 /**
  * usb_autoresume_device - immediately autoresume a USB device and its interfaces
  * @udev: the usb_device to autoresume
- * @inc_usage_cnt: flag to increment @udev's PM-usage counter
  *
  * This routine should be called when a core subsystem wants to use @udev
- * and needs to guarantee that it is not suspended.  In addition, the
- * caller can prevent @udev from being autosuspended subsequently.  (Note
- * that this will not prevent suspend events originating in the PM core.)
- * Examples would be when @udev's device file in usbfs is opened (autosuspend
- * should be prevented until the file is closed) or when a remote-wakeup
- * request is received (later autosuspends should not be prevented).
+ * and needs to guarantee that it is not suspended.  No autosuspend will
+ * occur until usb_autosuspend_device is called.  (Note that this will not
+ * prevent suspend events originating in the PM core.)  Examples would be
+ * when @udev's device file in usbfs is opened or when a remote-wakeup
+ * request is received.
  *
- * @inc_usage_cnt should be 1 to increment @udev's usage counter and prevent
- * autosuspends.  This prevention will persist until the usage counter is
- * decremented again (such as by passing 1 to usb_autosuspend_device).
- * Otherwise @inc_usage_cnt should be 0 to leave the usage counter unchanged.
- * Regardless, if the autoresume fails then the usage counter is not
- * incremented.
+ * @udev's usage counter is incremented to prevent subsequent autosuspends.
+ * However if the autoresume fails then the usage counter is re-decremented.
  *
  * Often the caller will hold @udev's device lock, but this is not
  * necessary (and attempting it might cause deadlock).
  *
  * This routine can run only in process context.
  */
-int usb_autoresume_device(struct usb_device *udev, int inc_usage_cnt)
+int usb_autoresume_device(struct usb_device *udev)
 {
 	int	status;
 
-	usb_pm_lock(udev);
-	udev->pm_usage_cnt += inc_usage_cnt;
-	udev->auto_pm = 1;
-	status = usb_resume_both(udev);
-	if (status != 0)
-		udev->pm_usage_cnt -= inc_usage_cnt;
-	usb_pm_unlock(udev);
+	status = usb_autopm_do_device(udev, 1);
 	// dev_dbg(&udev->dev, "%s: status %d cnt %d\n",
 	//		__FUNCTION__, status, udev->pm_usage_cnt);
 	return status;
 }
 
+/* Internal routine to adjust an interface's usage counter and change
+ * its device's autosuspend state.
+ */
+static int usb_autopm_do_interface(struct usb_interface *intf,
+		int inc_usage_cnt)
+{
+	struct usb_device	*udev = interface_to_usbdev(intf);
+	int			status = 0;
+
+	usb_pm_lock(udev);
+	if (intf->condition == USB_INTERFACE_UNBOUND)
+		status = -ENODEV;
+	else {
+		intf->pm_usage_cnt += inc_usage_cnt;
+		if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) {
+			udev->auto_pm = 1;
+			status = usb_resume_both(udev);
+			if (status != 0)
+				intf->pm_usage_cnt -= inc_usage_cnt;
+		} else if (inc_usage_cnt <= 0 && autosuspend_check(udev) == 0)
+			queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
+					USB_AUTOSUSPEND_DELAY);
+	}
+	usb_pm_unlock(udev);
+	return status;
+}
+
 /**
  * usb_autopm_put_interface - decrement a USB interface's PM-usage counter
  * @intf: the usb_interface whose counter should be decremented
@@ -1213,17 +1294,11 @@
  */
 void usb_autopm_put_interface(struct usb_interface *intf)
 {
-	struct usb_device	*udev = interface_to_usbdev(intf);
+	int	status;
 
-	usb_pm_lock(udev);
-	if (intf->condition != USB_INTERFACE_UNBOUND &&
-			--intf->pm_usage_cnt <= 0) {
-		queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
-				USB_AUTOSUSPEND_DELAY);
-	}
-	usb_pm_unlock(udev);
-	// dev_dbg(&intf->dev, "%s: cnt %d\n",
-	//		__FUNCTION__, intf->pm_usage_cnt);
+	status = usb_autopm_do_interface(intf, -1);
+	// dev_dbg(&intf->dev, "%s: status %d cnt %d\n",
+	//		__FUNCTION__, status, intf->pm_usage_cnt);
 }
 EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
 
@@ -1260,26 +1335,37 @@
  */
 int usb_autopm_get_interface(struct usb_interface *intf)
 {
-	struct usb_device	*udev = interface_to_usbdev(intf);
-	int			status;
+	int	status;
 
-	usb_pm_lock(udev);
-	if (intf->condition == USB_INTERFACE_UNBOUND)
-		status = -ENODEV;
-	else {
-		++intf->pm_usage_cnt;
-		udev->auto_pm = 1;
-		status = usb_resume_both(udev);
-		if (status != 0)
-			--intf->pm_usage_cnt;
-	}
-	usb_pm_unlock(udev);
+	status = usb_autopm_do_interface(intf, 1);
 	// dev_dbg(&intf->dev, "%s: status %d cnt %d\n",
 	//		__FUNCTION__, status, intf->pm_usage_cnt);
 	return status;
 }
 EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
 
+/**
+ * usb_autopm_set_interface - set a USB interface's autosuspend state
+ * @intf: the usb_interface whose state should be set
+ *
+ * This routine sets the autosuspend state of @intf's device according
+ * to @intf's usage counter, which the caller must have set previously.
+ * If the counter is <= 0, the device is autosuspended (if it isn't
+ * already suspended and if nothing else prevents the autosuspend).  If
+ * the counter is > 0, the device is autoresumed (if it isn't already
+ * awake).
+ */
+int usb_autopm_set_interface(struct usb_interface *intf)
+{
+	int	status;
+
+	status = usb_autopm_do_interface(intf, 0);
+	// dev_dbg(&intf->dev, "%s: status %d cnt %d\n",
+	//		__FUNCTION__, status, intf->pm_usage_cnt);
+	return status;
+}
+EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
+
 #endif /* CONFIG_USB_SUSPEND */
 
 static int usb_suspend(struct device *dev, pm_message_t message)
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 3b2d137..c505b76 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -10,15 +10,20 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
 #include <linux/usb.h>
 #include "usb.h"
 
-/* endpoint stuff */
+#define MAX_ENDPOINT_MINORS (64*128*32)
+static int usb_endpoint_major;
+static DEFINE_IDR(endpoint_idr);
 
 struct ep_device {
 	struct usb_endpoint_descriptor *desc;
 	struct usb_device *udev;
 	struct device dev;
+	int minor;
 };
 #define to_ep_device(_dev) \
 	container_of(_dev, struct ep_device, dev)
@@ -152,6 +157,55 @@
 	.attrs = ep_dev_attrs,
 };
 
+static int usb_endpoint_major_init(void)
+{
+	dev_t dev;
+	int error;
+
+	error = alloc_chrdev_region(&dev, 0, MAX_ENDPOINT_MINORS,
+				    "usb_endpoint");
+	if (error) {
+		err("unable to get a dynamic major for usb endpoints");
+		return error;
+	}
+	usb_endpoint_major = MAJOR(dev);
+
+	return error;
+}
+
+static void usb_endpoint_major_cleanup(void)
+{
+	unregister_chrdev_region(MKDEV(usb_endpoint_major, 0),
+				 MAX_ENDPOINT_MINORS);
+}
+
+static int endpoint_get_minor(struct ep_device *ep_dev)
+{
+	static DEFINE_MUTEX(minor_lock);
+	int retval = -ENOMEM;
+	int id;
+
+	mutex_lock(&minor_lock);
+	if (idr_pre_get(&endpoint_idr, GFP_KERNEL) == 0)
+		goto exit;
+
+	retval = idr_get_new(&endpoint_idr, ep_dev, &id);
+	if (retval < 0) {
+		if (retval == -EAGAIN)
+			retval = -ENOMEM;
+		goto exit;
+	}
+	ep_dev->minor = id & MAX_ID_MASK;
+exit:
+	mutex_unlock(&minor_lock);
+	return retval;
+}
+
+static void endpoint_free_minor(struct ep_device *ep_dev)
+{
+	idr_remove(&endpoint_idr, ep_dev->minor);
+}
+
 static struct endpoint_class {
 	struct kref kref;
 	struct class *class;
@@ -176,11 +230,20 @@
 	ep_class->class = class_create(THIS_MODULE, "usb_endpoint");
 	if (IS_ERR(ep_class->class)) {
 		result = IS_ERR(ep_class->class);
-		kfree(ep_class);
-		ep_class = NULL;
-		goto exit;
+		goto class_create_error;
 	}
 
+	result = usb_endpoint_major_init();
+	if (result)
+		goto endpoint_major_error;
+
+	goto exit;
+
+endpoint_major_error:
+	class_destroy(ep_class->class);
+class_create_error:
+	kfree(ep_class);
+	ep_class = NULL;
 exit:
 	return result;
 }
@@ -191,6 +254,7 @@
 	class_destroy(ep_class->class);
 	kfree(ep_class);
 	ep_class = NULL;
+	usb_endpoint_major_cleanup();
 }
 
 static void destroy_endpoint_class(void)
@@ -213,7 +277,6 @@
 {
 	char name[8];
 	struct ep_device *ep_dev;
-	int minor;
 	int retval;
 
 	retval = init_endpoint_class();
@@ -226,12 +289,16 @@
 		goto error_alloc;
 	}
 
-	/* fun calculation to determine the minor of this endpoint */
-	minor = (((udev->bus->busnum - 1) * 128) * 16) + (udev->devnum - 1);
+	retval = endpoint_get_minor(ep_dev);
+	if (retval) {
+		dev_err(parent, "can not allocate minor number for %s",
+			ep_dev->dev.bus_id);
+		goto error_register;
+	}
 
 	ep_dev->desc = &endpoint->desc;
 	ep_dev->udev = udev;
-	ep_dev->dev.devt = MKDEV(442, minor);	// FIXME fake number...
+	ep_dev->dev.devt = MKDEV(usb_endpoint_major, ep_dev->minor);
 	ep_dev->dev.class = ep_class->class;
 	ep_dev->dev.parent = parent;
 	ep_dev->dev.release = ep_device_release;
@@ -241,7 +308,7 @@
 
 	retval = device_register(&ep_dev->dev);
 	if (retval)
-		goto error_register;
+		goto error_chrdev;
 	retval = sysfs_create_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
 	if (retval)
 		goto error_group;
@@ -261,6 +328,9 @@
 	destroy_endpoint_class();
 	return retval;
 
+error_chrdev:
+	endpoint_free_minor(ep_dev);
+
 error_register:
 	kfree(ep_dev);
 error_alloc:
@@ -271,14 +341,16 @@
 
 void usb_remove_ep_files(struct usb_host_endpoint *endpoint)
 {
+	struct ep_device *ep_dev = endpoint->ep_dev;
 
-	if (endpoint->ep_dev) {
+	if (ep_dev) {
 		char name[8];
 
 		sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
-		sysfs_remove_link(&endpoint->ep_dev->dev.parent->kobj, name);
-		sysfs_remove_group(&endpoint->ep_dev->dev.kobj, &ep_dev_attr_grp);
-		device_unregister(&endpoint->ep_dev->dev);
+		sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
+		sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
+		endpoint_free_minor(ep_dev);
+		device_unregister(&ep_dev->dev);
 		endpoint->ep_dev = NULL;
 		destroy_endpoint_class();
 	}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index afa2dd2..10064af 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -256,7 +256,9 @@
 	0x05,       /*  __u8  ep_bDescriptorType; Endpoint */
 	0x81,       /*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
  	0x03,       /*  __u8  ep_bmAttributes; Interrupt */
- 	0x02, 0x00, /*  __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */
+		    /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
+		     * see hub.c:hub_configure() for details. */
+	(USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
 	0x0c        /*  __u8  ep_bInterval; (256ms -- usb 2.0 spec) */
 };
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index ba165af..2651c2e 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
 #include <linux/usbdevice_fs.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <asm/semaphore.h>
 #include <asm/uaccess.h>
@@ -31,6 +32,47 @@
 #include "hcd.h"
 #include "hub.h"
 
+struct usb_hub {
+	struct device		*intfdev;	/* the "interface" device */
+	struct usb_device	*hdev;
+	struct urb		*urb;		/* for interrupt polling pipe */
+
+	/* buffer for urb ... with extra space in case of babble */
+	char			(*buffer)[8];
+	dma_addr_t		buffer_dma;	/* DMA address for buffer */
+	union {
+		struct usb_hub_status	hub;
+		struct usb_port_status	port;
+	}			*status;	/* buffer for status reports */
+
+	int			error;		/* last reported error */
+	int			nerrors;	/* track consecutive errors */
+
+	struct list_head	event_list;	/* hubs w/data or errs ready */
+	unsigned long		event_bits[1];	/* status change bitmask */
+	unsigned long		change_bits[1];	/* ports with logical connect
+							status change */
+	unsigned long		busy_bits[1];	/* ports being reset or
+							resumed */
+#if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
+#error event_bits[] is too short!
+#endif
+
+	struct usb_hub_descriptor *descriptor;	/* class descriptor */
+	struct usb_tt		tt;		/* Transaction Translator */
+
+	unsigned		mA_per_port;	/* current for each child */
+
+	unsigned		limited_power:1;
+	unsigned		quiescing:1;
+	unsigned		activating:1;
+
+	unsigned		has_indicators:1;
+	u8			indicator[USB_MAXCHILDREN];
+	struct delayed_work	leds;
+};
+
+
 /* Protect struct usb_device->state and ->children members
  * Note: Both are also protected by ->dev.sem, except that ->state can
  * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@ -45,6 +87,16 @@
 
 static struct task_struct *khubd_task;
 
+/* multithreaded probe logic */
+static int multithread_probe =
+#ifdef CONFIG_USB_MULTITHREAD_PROBE
+	1;
+#else
+	0;
+#endif
+module_param(multithread_probe, bool, S_IRUGO);
+MODULE_PARM_DESC(multithread_probe, "Run each USB device probe in a new thread");
+
 /* cycle leds on hubs that aren't blinking for attention */
 static int blinkenlights = 0;
 module_param (blinkenlights, bool, S_IRUGO);
@@ -167,9 +219,10 @@
 
 #define	LED_CYCLE_PERIOD	((2*HZ)/3)
 
-static void led_work (void *__hub)
+static void led_work (struct work_struct *work)
 {
-	struct usb_hub		*hub = __hub;
+	struct usb_hub		*hub =
+		container_of(work, struct usb_hub, leds.work);
 	struct usb_device	*hdev = hub->hdev;
 	unsigned		i;
 	unsigned		changed = 0;
@@ -276,6 +329,9 @@
 {
 	unsigned long	flags;
 
+	/* Suppress autosuspend until khubd runs */
+	to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
+
 	spin_lock_irqsave(&hub_event_lock, flags);
 	if (list_empty(&hub->event_list)) {
 		list_add_tail(&hub->event_list, &hub_event_list);
@@ -351,9 +407,10 @@
  * talking to TTs must queue control transfers (not just bulk and iso), so
  * both can talk to the same hub concurrently.
  */
-static void hub_tt_kevent (void *arg)
+static void hub_tt_kevent (struct work_struct *work)
 {
-	struct usb_hub		*hub = arg;
+	struct usb_hub		*hub =
+		container_of(work, struct usb_hub, tt.kevent);
 	unsigned long		flags;
 
 	spin_lock_irqsave (&hub->tt.lock, flags);
@@ -404,7 +461,7 @@
 	 * since each TT has "at least two" buffers that can need it (and
 	 * there can be many TTs per hub).  even if they're uncommon.
 	 */
-	if ((clear = kmalloc (sizeof *clear, SLAB_ATOMIC)) == NULL) {
+	if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
 		dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
 		/* FIXME recover somehow ... RESET_TT? */
 		return;
@@ -457,7 +514,6 @@
 	/* (nonblocking) khubd and related activity won't re-trigger */
 	hub->quiescing = 1;
 	hub->activating = 0;
-	hub->resume_root_hub = 0;
 
 	/* (blocking) stop khubd and related activity */
 	usb_kill_urb(hub->urb);
@@ -473,7 +529,7 @@
 
 	hub->quiescing = 0;
 	hub->activating = 1;
-	hub->resume_root_hub = 0;
+
 	status = usb_submit_urb(hub->urb, GFP_NOIO);
 	if (status < 0)
 		dev_err(hub->intfdev, "activate --> %d\n", status);
@@ -641,7 +697,7 @@
 
 	spin_lock_init (&hub->tt.lock);
 	INIT_LIST_HEAD (&hub->tt.clear_list);
-	INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub);
+	INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
 	switch (hdev->descriptor.bDeviceProtocol) {
 		case 0:
 			break;
@@ -759,7 +815,12 @@
 		dev_dbg(hub_dev, "%sover-current condition exists\n",
 			(hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
 
-	/* set up the interrupt endpoint */
+	/* set up the interrupt endpoint
+	 * We use the EP's maxpacket size instead of (PORTS+1+7)/8
+	 * bytes as USB2.0[11.12.3] says because some hubs are known
+	 * to send more data (and thus cause overflow). For root hubs,
+	 * maxpktsize is defined in hcd.c's fake endpoint descriptors
+	 * to be big enough for at least USB_MAXCHILDREN ports. */
 	pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
 	maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
 
@@ -880,9 +941,10 @@
 	INIT_LIST_HEAD(&hub->event_list);
 	hub->intfdev = &intf->dev;
 	hub->hdev = hdev;
-	INIT_WORK(&hub->leds, led_work, hub);
+	INIT_DELAYED_WORK(&hub->leds, led_work);
 
 	usb_set_intfdata (intf, hub);
+	intf->needs_remote_wakeup = 1;
 
 	if (hdev->speed == USB_SPEED_HIGH)
 		highspeed_hubs++;
@@ -980,6 +1042,8 @@
 		if (udev->children[i])
 			recursively_mark_NOTATTACHED(udev->children[i]);
 	}
+	if (udev->state == USB_STATE_SUSPENDED)
+		udev->discon_suspended = 1;
 	udev->state = USB_STATE_NOTATTACHED;
 }
 
@@ -1169,6 +1233,14 @@
 	*pdev = NULL;
 	spin_unlock_irq(&device_state_lock);
 
+	/* Decrement the parent's count of unsuspended children */
+	if (udev->parent) {
+		usb_pm_lock(udev);
+		if (!udev->discon_suspended)
+			usb_autosuspend_device(udev->parent);
+		usb_pm_unlock(udev);
+	}
+
 	put_device(&udev->dev);
 }
 
@@ -1191,29 +1263,17 @@
 static int __usb_port_suspend(struct usb_device *, int port1);
 #endif
 
-/**
- * usb_new_device - perform initial device setup (usbcore-internal)
- * @udev: newly addressed device (in ADDRESS state)
- *
- * This is called with devices which have been enumerated, but not yet
- * configured.  The device descriptor is available, but not descriptors
- * for any device configuration.  The caller must have locked either
- * the parent hub (if udev is a normal device) or else the
- * usb_bus_list_lock (if udev is a root hub).  The parent's pointer to
- * udev has already been installed, but udev is not yet visible through
- * sysfs or other filesystem code.
- *
- * Returns 0 for success (device is configured and listed, with its
- * interfaces, in sysfs); else a negative errno value.
- *
- * This call is synchronous, and may not be used in an interrupt context.
- *
- * Only the hub driver or root-hub registrar should ever call this.
- */
-int usb_new_device(struct usb_device *udev)
+static int __usb_new_device(void *void_data)
 {
+	struct usb_device *udev = void_data;
 	int err;
 
+	/* Lock ourself into memory in order to keep a probe sequence
+	 * sleeping in a new thread from allowing us to be unloaded.
+	 */
+	if (!try_module_get(THIS_MODULE))
+		return -EINVAL;
+
 	err = usb_get_configuration(udev);
 	if (err < 0) {
 		dev_err(&udev->dev, "can't read configurations, error %d\n",
@@ -1309,13 +1369,56 @@
 		goto fail;
 	}
 
-	return 0;
+	/* Increment the parent's count of unsuspended children */
+	if (udev->parent)
+		usb_autoresume_device(udev->parent);
+
+exit:
+	module_put(THIS_MODULE);
+	return err;
 
 fail:
 	usb_set_device_state(udev, USB_STATE_NOTATTACHED);
-	return err;
+	goto exit;
 }
 
+/**
+ * usb_new_device - perform initial device setup (usbcore-internal)
+ * @udev: newly addressed device (in ADDRESS state)
+ *
+ * This is called with devices which have been enumerated, but not yet
+ * configured.  The device descriptor is available, but not descriptors
+ * for any device configuration.  The caller must have locked either
+ * the parent hub (if udev is a normal device) or else the
+ * usb_bus_list_lock (if udev is a root hub).  The parent's pointer to
+ * udev has already been installed, but udev is not yet visible through
+ * sysfs or other filesystem code.
+ *
+ * The return value for this function depends on if the
+ * multithread_probe variable is set or not.  If it's set, it will
+ * return a if the probe thread was successfully created or not.  If the
+ * variable is not set, it will return if the device is configured
+ * properly or not.  interfaces, in sysfs); else a negative errno value.
+ *
+ * This call is synchronous, and may not be used in an interrupt context.
+ *
+ * Only the hub driver or root-hub registrar should ever call this.
+ */
+int usb_new_device(struct usb_device *udev)
+{
+	struct task_struct *probe_task;
+	int ret = 0;
+
+	if (multithread_probe) {
+		probe_task = kthread_run(__usb_new_device, udev,
+					 "usb-probe-%s", udev->devnum);
+		if (IS_ERR(probe_task))
+			ret = PTR_ERR(probe_task);
+	} else
+		ret = __usb_new_device(udev);
+
+	return ret;
+}
 
 static int hub_port_status(struct usb_hub *hub, int port1,
 			       u16 *status, u16 *change)
@@ -1323,10 +1426,12 @@
 	int ret;
 
 	ret = get_port_status(hub->hdev, port1, &hub->status->port);
-	if (ret < 0)
+	if (ret < 4) {
 		dev_err (hub->intfdev,
 			"%s failed (err = %d)\n", __FUNCTION__, ret);
-	else {
+		if (ret >= 0)
+			ret = -EIO;
+	} else {
 		*status = le16_to_cpu(hub->status->port.wPortStatus);
 		*change = le16_to_cpu(hub->status->port.wPortChange); 
 		ret = 0;
@@ -1674,6 +1779,12 @@
 hub_port_resume(struct usb_hub *hub, int port1, struct usb_device *udev)
 {
 	int	status;
+	u16	portchange, portstatus;
+
+	/* Skip the initial Clear-Suspend step for a remote wakeup */
+	status = hub_port_status(hub, port1, &portstatus, &portchange);
+	if (status == 0 && !(portstatus & USB_PORT_STAT_SUSPEND))
+		goto SuspendCleared;
 
 	// dev_dbg(hub->intfdev, "resume port %d\n", port1);
 
@@ -1687,9 +1798,6 @@
 			"can't resume port %d, status %d\n",
 			port1, status);
 	} else {
-		u16		devstatus;
-		u16		portchange;
-
 		/* drive resume for at least 20 msec */
 		if (udev)
 			dev_dbg(&udev->dev, "usb %sresume\n",
@@ -1704,16 +1812,15 @@
 		 * stop resume signaling.  Then finish the resume
 		 * sequence.
 		 */
-		devstatus = portchange = 0;
-		status = hub_port_status(hub, port1,
-				&devstatus, &portchange);
+		status = hub_port_status(hub, port1, &portstatus, &portchange);
+SuspendCleared:
 		if (status < 0
-				|| (devstatus & LIVE_FLAGS) != LIVE_FLAGS
-				|| (devstatus & USB_PORT_STAT_SUSPEND) != 0
+				|| (portstatus & LIVE_FLAGS) != LIVE_FLAGS
+				|| (portstatus & USB_PORT_STAT_SUSPEND) != 0
 				) {
 			dev_dbg(hub->intfdev,
 				"port %d status %04x.%04x after resume, %d\n",
-				port1, portchange, devstatus, status);
+				port1, portchange, portstatus, status);
 			if (status >= 0)
 				status = -ENODEV;
 		} else {
@@ -1774,23 +1881,16 @@
 {
 	int	status = 0;
 
-	/* All this just to avoid sending a port-resume message
-	 * to the parent hub! */
-
 	usb_lock_device(udev);
-	usb_pm_lock(udev);
 	if (udev->state == USB_STATE_SUSPENDED) {
 		dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
-		/* TRSMRCY = 10 msec */
-		msleep(10);
-		status = finish_port_resume(udev);
-		if (status == 0)
-			udev->dev.power.power_state.event = PM_EVENT_ON;
-	}
-	usb_pm_unlock(udev);
+		status = usb_autoresume_device(udev);
 
-	if (status == 0)
-		usb_autoresume_device(udev, 0);
+		/* Give the interface drivers a chance to do something,
+		 * then autosuspend the device again. */
+		if (status == 0)
+			usb_autosuspend_device(udev);
+	}
 	usb_unlock_device(udev);
 	return status;
 }
@@ -1854,6 +1954,8 @@
 		}
 	}
 
+	dev_dbg(&intf->dev, "%s\n", __FUNCTION__);
+
 	/* "global suspend" of the downstream HC-to-USB interface */
 	if (!hdev->parent) {
 		struct usb_bus	*bus = hdev->bus;
@@ -1876,10 +1978,12 @@
 
 static int hub_resume(struct usb_interface *intf)
 {
-	struct usb_device	*hdev = interface_to_usbdev(intf);
 	struct usb_hub		*hub = usb_get_intfdata (intf);
+	struct usb_device	*hdev = hub->hdev;
 	int			status;
 
+	dev_dbg(&intf->dev, "%s\n", __FUNCTION__);
+
 	/* "global resume" of the downstream HC-to-USB interface */
 	if (!hdev->parent) {
 		struct usb_bus	*bus = hdev->bus;
@@ -1918,7 +2022,6 @@
 {
 	struct usb_hub *hub = hdev_to_hub(hdev);
 
-	hub->resume_root_hub = 1;
 	kick_khubd(hub);
 }
 
@@ -2269,7 +2372,7 @@
 	struct usb_qualifier_descriptor	*qual;
 	int				status;
 
-	qual = kmalloc (sizeof *qual, SLAB_KERNEL);
+	qual = kmalloc (sizeof *qual, GFP_KERNEL);
 	if (qual == NULL)
 		return;
 
@@ -2281,7 +2384,7 @@
 		/* hub LEDs are probably harder to miss than syslog */
 		if (hub->has_indicators) {
 			hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
-			schedule_work (&hub->leds);
+			schedule_delayed_work (&hub->leds, 0);
 		}
 	}
 	kfree(qual);
@@ -2455,7 +2558,7 @@
 				if (hub->has_indicators) {
 					hub->indicator[port1-1] =
 						INDICATOR_AMBER_BLINK;
-					schedule_work (&hub->leds);
+					schedule_delayed_work (&hub->leds, 0);
 				}
 				status = -ENOTCONN;	/* Don't retry */
 				goto loop_disable;
@@ -2555,16 +2658,13 @@
 		intf = to_usb_interface(hub->intfdev);
 		hub_dev = &intf->dev;
 
-		i = hub->resume_root_hub;
-
-		dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x%s\n",
+		dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
 				hdev->state, hub->descriptor
 					? hub->descriptor->bNbrPorts
 					: 0,
 				/* NOTE: expects max 15 ports... */
 				(u16) hub->change_bits[0],
-				(u16) hub->event_bits[0],
-				i ? ", resume root" : "");
+				(u16) hub->event_bits[0]);
 
 		usb_get_intf(intf);
 		spin_unlock_irq(&hub_event_lock);
@@ -2585,16 +2685,16 @@
 			goto loop;
 		}
 
-		/* Is this is a root hub wanting to reactivate the downstream
-		 * ports?  If so, be sure the interface resumes even if its
-		 * stub "device" node was never suspended.
-		 */
-		if (i)
-			usb_autoresume_device(hdev, 0);
-
-		/* If this is an inactive or suspended hub, do nothing */
-		if (hub->quiescing)
+		/* Autoresume */
+		ret = usb_autopm_get_interface(intf);
+		if (ret) {
+			dev_dbg(hub_dev, "Can't autoresume: %d\n", ret);
 			goto loop;
+		}
+
+		/* If this is an inactive hub, do nothing */
+		if (hub->quiescing)
+			goto loop_autopm;
 
 		if (hub->error) {
 			dev_dbg (hub_dev, "resetting for error %d\n",
@@ -2604,7 +2704,7 @@
 			if (ret) {
 				dev_dbg (hub_dev,
 					"error resetting hub: %d\n", ret);
-				goto loop;
+				goto loop_autopm;
 			}
 
 			hub->nerrors = 0;
@@ -2732,6 +2832,10 @@
 		if (!hdev->parent && !hub->busy_bits[0])
 			usb_enable_root_hub_irq(hdev->bus);
 
+loop_autopm:
+		/* Allow autosuspend if we're not going to run again */
+		if (list_empty(&hub->event_list))
+			usb_autopm_enable(intf);
 loop:
 		usb_unlock_device(hdev);
 		usb_put_intf(intf);
@@ -2773,6 +2877,7 @@
 	.post_reset =	hub_post_reset,
 	.ioctl =	hub_ioctl,
 	.id_table =	hub_id_table,
+	.supports_autosuspend =	1,
 };
 
 int usb_hub_init(void)
@@ -2818,7 +2923,7 @@
 		if (len < le16_to_cpu(udev->config[index].desc.wTotalLength))
 			len = le16_to_cpu(udev->config[index].desc.wTotalLength);
 	}
-	buf = kmalloc (len, SLAB_KERNEL);
+	buf = kmalloc (len, GFP_KERNEL);
 	if (buf == NULL) {
 		dev_err(&udev->dev, "no mem to re-read configs after reset\n");
 		/* assume the worst */
@@ -2997,7 +3102,7 @@
 	}
 
 	/* Prevent autosuspend during the reset */
-	usb_autoresume_device(udev, 1);
+	usb_autoresume_device(udev);
 
 	if (iface && iface->condition != USB_INTERFACE_BINDING)
 		iface = NULL;
@@ -3040,7 +3145,7 @@
 		}
 	}
 
-	usb_autosuspend_device(udev, 1);
+	usb_autosuspend_device(udev);
 	return ret;
 }
 EXPORT_SYMBOL(usb_reset_composite_device);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 0f8e82a..cf9559c 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -192,45 +192,4 @@
 
 extern void usb_hub_tt_clear_buffer (struct usb_device *dev, int pipe);
 
-struct usb_hub {
-	struct device		*intfdev;	/* the "interface" device */
-	struct usb_device	*hdev;
-	struct urb		*urb;		/* for interrupt polling pipe */
-
-	/* buffer for urb ... with extra space in case of babble */
-	char			(*buffer)[8];
-	dma_addr_t		buffer_dma;	/* DMA address for buffer */
-	union {
-		struct usb_hub_status	hub;
-		struct usb_port_status	port;
-	}			*status;	/* buffer for status reports */
-
-	int			error;		/* last reported error */
-	int			nerrors;	/* track consecutive errors */
-
-	struct list_head	event_list;	/* hubs w/data or errs ready */
-	unsigned long		event_bits[1];	/* status change bitmask */
-	unsigned long		change_bits[1];	/* ports with logical connect
-							status change */
-	unsigned long		busy_bits[1];	/* ports being reset or
-							resumed */
-#if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
-#error event_bits[] is too short!
-#endif
-
-	struct usb_hub_descriptor *descriptor;	/* class descriptor */
-	struct usb_tt		tt;		/* Transaction Translator */
-
-	unsigned		mA_per_port;	/* current for each child */
-
-	unsigned		limited_power:1;
-	unsigned		quiescing:1;
-	unsigned		activating:1;
-	unsigned		resume_root_hub:1;
-
-	unsigned		has_indicators:1;
-	enum hub_led_mode	indicator[USB_MAXCHILDREN];
-	struct work_struct	leds;
-};
-
 #endif /* __LINUX_HUB_H */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 7729c07..149aa8b 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -488,7 +488,7 @@
 		int	retval;
 
 		io->urbs [i]->dev = io->dev;
-		retval = usb_submit_urb (io->urbs [i], SLAB_ATOMIC);
+		retval = usb_submit_urb (io->urbs [i], GFP_ATOMIC);
 
 		/* after we submit, let completions or cancelations fire;
 		 * we handshake using io->status.
@@ -764,7 +764,7 @@
 			err = -EINVAL;
 			goto errout;
 		} else {
-			dev->have_langid = -1;
+			dev->have_langid = 1;
 			dev->string_langid = tbuf[2] | (tbuf[3]<< 8);
 				/* always use the first langid listed */
 			dev_dbg (&dev->dev, "default language 0x%04x\n",
@@ -1398,7 +1398,7 @@
 	}
 
 	/* Wake up the device so we can send it the Set-Config request */
-	ret = usb_autoresume_device(dev, 1);
+	ret = usb_autoresume_device(dev);
 	if (ret)
 		goto free_interfaces;
 
@@ -1421,7 +1421,7 @@
 	dev->actconfig = cp;
 	if (!cp) {
 		usb_set_device_state(dev, USB_STATE_ADDRESS);
-		usb_autosuspend_device(dev, 1);
+		usb_autosuspend_device(dev);
 		goto free_interfaces;
 	}
 	usb_set_device_state(dev, USB_STATE_CONFIGURED);
@@ -1490,7 +1490,7 @@
 		usb_create_sysfs_intf_files (intf);
 	}
 
-	usb_autosuspend_device(dev, 1);
+	usb_autosuspend_device(dev);
 	return 0;
 }
 
@@ -1501,9 +1501,10 @@
 };
 
 /* Worker routine for usb_driver_set_configuration() */
-static void driver_set_config_work(void *_req)
+static void driver_set_config_work(struct work_struct *work)
 {
-	struct set_config_request *req = _req;
+	struct set_config_request *req =
+		container_of(work, struct set_config_request, work);
 
 	usb_lock_device(req->udev);
 	usb_set_configuration(req->udev, req->config);
@@ -1541,7 +1542,7 @@
 		return -ENOMEM;
 	req->udev = udev;
 	req->config = config;
-	INIT_WORK(&req->work, driver_set_config_work, req);
+	INIT_WORK(&req->work, driver_set_config_work);
 
 	usb_get_dev(udev);
 	if (!schedule_work(&req->work)) {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 467cb02..02426d0 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -200,19 +200,13 @@
 	destroy_workqueue(ksuspend_usb_wq);
 }
 
-#else
-
-#define ksuspend_usb_init()	0
-#define ksuspend_usb_cleanup()	do {} while (0)
-
-#endif
-
 #ifdef	CONFIG_USB_SUSPEND
 
 /* usb_autosuspend_work - callback routine to autosuspend a USB device */
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {
-	struct usb_device	*udev = _udev;
+	struct usb_device *udev =
+		container_of(work, struct usb_device, autosuspend.work);
 
 	usb_pm_lock(udev);
 	udev->auto_pm = 1;
@@ -222,10 +216,17 @@
 
 #else
 
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {}
 
-#endif
+#endif	/* CONFIG_USB_SUSPEND */
+
+#else
+
+#define ksuspend_usb_init()	0
+#define ksuspend_usb_cleanup()	do {} while (0)
+
+#endif	/* CONFIG_PM */
 
 /**
  * usb_alloc_dev - usb device constructor (usbcore-internal)
@@ -304,7 +305,7 @@
 
 #ifdef	CONFIG_PM
 	mutex_init(&dev->pm_mutex);
-	INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev);
+	INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
 #endif
 	return dev;
 }
@@ -537,138 +538,6 @@
 	return usb_hcd_get_frame_number (dev);
 }
 
-/**
- * usb_endpoint_dir_in - check if the endpoint has IN direction
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type IN, otherwise it returns false.
- */
-int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
-{
-	return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
-}
-
-/**
- * usb_endpoint_dir_out - check if the endpoint has OUT direction
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type OUT, otherwise it returns false.
- */
-int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
-{
-	return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
-}
-
-/**
- * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type bulk, otherwise it returns false.
- */
-int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
-{
-	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-		USB_ENDPOINT_XFER_BULK);
-}
-
-/**
- * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type interrupt, otherwise it returns
- * false.
- */
-int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
-{
-	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-		USB_ENDPOINT_XFER_INT);
-}
-
-/**
- * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type isochronous, otherwise it returns
- * false.
- */
-int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
-{
-	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-		USB_ENDPOINT_XFER_ISOC);
-}
-
-/**
- * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has bulk transfer type and IN direction,
- * otherwise it returns false.
- */
-int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
-{
-	return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
-}
-
-/**
- * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has bulk transfer type and OUT direction,
- * otherwise it returns false.
- */
-int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
-{
-	return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
-}
-
-/**
- * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has interrupt transfer type and IN direction,
- * otherwise it returns false.
- */
-int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
-{
-	return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
-}
-
-/**
- * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has interrupt transfer type and OUT direction,
- * otherwise it returns false.
- */
-int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
-{
-	return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
-}
-
-/**
- * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has isochronous transfer type and IN direction,
- * otherwise it returns false.
- */
-int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
-{
-	return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
-}
-
-/**
- * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has isochronous transfer type and OUT direction,
- * otherwise it returns false.
- */
-int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
-{
-	return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
-}
-
 /*-------------------------------------------------------------------*/
 /*
  * __usb_get_extra_descriptor() finds a descriptor of specific type in the
@@ -1102,18 +971,6 @@
 EXPORT_SYMBOL(usb_find_device);
 EXPORT_SYMBOL(usb_get_current_frame_number);
 
-EXPORT_SYMBOL_GPL(usb_endpoint_dir_in);
-EXPORT_SYMBOL_GPL(usb_endpoint_dir_out);
-EXPORT_SYMBOL_GPL(usb_endpoint_xfer_bulk);
-EXPORT_SYMBOL_GPL(usb_endpoint_xfer_int);
-EXPORT_SYMBOL_GPL(usb_endpoint_xfer_isoc);
-EXPORT_SYMBOL_GPL(usb_endpoint_is_bulk_in);
-EXPORT_SYMBOL_GPL(usb_endpoint_is_bulk_out);
-EXPORT_SYMBOL_GPL(usb_endpoint_is_int_in);
-EXPORT_SYMBOL_GPL(usb_endpoint_is_int_out);
-EXPORT_SYMBOL_GPL(usb_endpoint_is_isoc_in);
-EXPORT_SYMBOL_GPL(usb_endpoint_is_isoc_out);
-
 EXPORT_SYMBOL (usb_buffer_alloc);
 EXPORT_SYMBOL (usb_buffer_free);
 
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 13322e3..17830a8 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -64,14 +64,13 @@
 
 #define USB_AUTOSUSPEND_DELAY	(HZ*2)
 
-extern void usb_autosuspend_device(struct usb_device *udev, int dec_busy_cnt);
-extern int usb_autoresume_device(struct usb_device *udev, int inc_busy_cnt);
+extern void usb_autosuspend_device(struct usb_device *udev);
+extern int usb_autoresume_device(struct usb_device *udev);
 
 #else
 
-#define usb_autosuspend_device(udev, dec_busy_cnt)	do {} while (0)
-static inline int usb_autoresume_device(struct usb_device *udev,
-		int inc_busy_cnt)
+#define usb_autosuspend_device(udev)	do {} while (0)
+static inline int usb_autoresume_device(struct usb_device *udev)
 {
 	return 0;
 }
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 1c17d26..d15bf22 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -1833,9 +1833,9 @@
 	spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
-static void eth_work (void *_dev)
+static void eth_work (struct work_struct *work)
 {
-	struct eth_dev		*dev = _dev;
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
 
 	if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
 		if (netif_running (dev->net))
@@ -1894,13 +1894,13 @@
 	if (!eth_is_promisc (dev)) {
 		u8		*dest = skb->data;
 
-		if (dest [0] & 0x01) {
+		if (is_multicast_ether_addr(dest)) {
 			u16	type;
 
 			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 			 * SET_ETHERNET_MULTICAST_FILTERS requests
 			 */
-			if (memcmp (dest, net->broadcast, ETH_ALEN) == 0)
+			if (is_broadcast_ether_addr(dest))
 				type = USB_CDC_PACKET_TYPE_BROADCAST;
 			else
 				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
@@ -2398,7 +2398,7 @@
 	dev = netdev_priv(net);
 	spin_lock_init (&dev->lock);
 	spin_lock_init (&dev->req_lock);
-	INIT_WORK (&dev->work, eth_work, dev);
+	INIT_WORK (&dev->work, eth_work);
 	INIT_LIST_HEAD (&dev->tx_reqs);
 	INIT_LIST_HEAD (&dev->rx_reqs);
 
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 8b975d1..c98316c 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -250,7 +250,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/utsname.h>
 
 #include <linux/usb_ch9.h>
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 64554ac..3135182 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -1236,7 +1236,7 @@
 
 
 	/* ok, we made sense of the hardware ... */
-	dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev) {
 		return -ENOMEM;
 	}
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index a3076da..805a982 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1864,7 +1864,7 @@
 	}
 
 	/* alloc, and start init */
-	dev = kmalloc (sizeof *dev, SLAB_KERNEL);
+	dev = kmalloc (sizeof *dev, GFP_KERNEL);
 	if (dev == NULL){
 		pr_debug("enomem %s\n", pci_name(pdev));
 		retval = -ENOMEM;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 86924f9..3fb1044a 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -412,7 +412,7 @@
 	/* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
 
 	value = -ENOMEM;
-	kbuf = kmalloc (len, SLAB_KERNEL);
+	kbuf = kmalloc (len, GFP_KERNEL);
 	if (unlikely (!kbuf))
 		goto free1;
 
@@ -456,7 +456,7 @@
 	/* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
 
 	value = -ENOMEM;
-	kbuf = kmalloc (len, SLAB_KERNEL);
+	kbuf = kmalloc (len, GFP_KERNEL);
 	if (!kbuf)
 		goto free1;
 	if (copy_from_user (kbuf, buf, len)) {
@@ -1898,7 +1898,7 @@
 	buf += 4;
 	length -= 4;
 
-	kbuf = kmalloc (length, SLAB_KERNEL);
+	kbuf = kmalloc (length, GFP_KERNEL);
 	if (!kbuf)
 		return -ENOMEM;
 	if (copy_from_user (kbuf, buf, length)) {
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
index 1792596..4a99156 100644
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -83,7 +83,6 @@
 static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
 static int lh7a40x_set_halt(struct usb_ep *ep, int);
 static int lh7a40x_fifo_status(struct usb_ep *ep);
-static int lh7a40x_fifo_status(struct usb_ep *ep);
 static void lh7a40x_fifo_flush(struct usb_ep *ep);
 static void lh7a40x_ep0_kick(struct lh7a40x_udc *dev, struct lh7a40x_ep *ep);
 static void lh7a40x_handle_ep0(struct lh7a40x_udc *dev, u32 intr);
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 3acc896..3024c67 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1040,6 +1040,7 @@
 
 	} /* else the irq handler advances the queue. */
 
+	ep->responded = 1;
 	if (req)
 		list_add_tail (&req->queue, &ep->queue);
 done:
@@ -2188,7 +2189,8 @@
 					ep->stopped = 1;
 					set_halt (ep);
 					mode = 2;
-				} else if (!req && !ep->stopped)
+				} else if (ep->responded &&
+						!req && !ep->stopped)
 					write_fifo (ep, NULL);
 			}
 		} else {
@@ -2203,7 +2205,7 @@
 			} else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
 					&& req
 					&& req->req.actual == req->req.length)
-					|| !req) {
+					|| (ep->responded && !req)) {
 				ep->dev->protocol_stall = 1;
 				set_halt (ep);
 				ep->stopped = 1;
@@ -2469,6 +2471,7 @@
 		/* we made the hardware handle most lowlevel requests;
 		 * everything else goes uplevel to the gadget code.
 		 */
+		ep->responded = 1;
 		switch (u.r.bRequest) {
 		case USB_REQ_GET_STATUS: {
 			struct net2280_ep	*e;
@@ -2537,6 +2540,7 @@
 				u.r.bRequestType, u.r.bRequest,
 				w_value, w_index, w_length,
 				readl (&ep->regs->ep_cfg));
+			ep->responded = 0;
 			spin_unlock (&dev->lock);
 			tmp = dev->driver->setup (&dev->gadget, &u.r);
 			spin_lock (&dev->lock);
@@ -2857,7 +2861,7 @@
 	}
 
 	/* alloc, and start init */
-	dev = kzalloc (sizeof *dev, SLAB_KERNEL);
+	dev = kzalloc (sizeof *dev, GFP_KERNEL);
 	if (dev == NULL){
 		retval = -ENOMEM;
 		goto done;
diff --git a/drivers/usb/gadget/net2280.h b/drivers/usb/gadget/net2280.h
index 957d6df..44ca139 100644
--- a/drivers/usb/gadget/net2280.h
+++ b/drivers/usb/gadget/net2280.h
@@ -110,7 +110,8 @@
 						out_overflow : 1,
 						stopped : 1,
 						is_in : 1,
-						is_iso : 1;
+						is_iso : 1,
+						responded : 1;
 };
 
 static inline void allow_status (struct net2280_ep *ep)
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 48a09fd..030d87c 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2581,7 +2581,7 @@
 	/* UDC_PULLUP_EN gates the chip clock */
 	// OTG_SYSCON_1_REG |= DEV_IDLE_EN;
 
-	udc = kzalloc(sizeof(*udc), SLAB_KERNEL);
+	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
 	if (!udc)
 		return -ENOMEM;
 
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 671c24b..1ed506e 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -2472,6 +2472,7 @@
 #define PXA210_B1		0x00000123
 #define PXA210_B0		0x00000122
 #define IXP425_A0		0x000001c1
+#define IXP425_B0		0x000001f1
 #define IXP465_AD		0x00000200
 
 /*
@@ -2509,6 +2510,7 @@
 		break;
 #elif	defined(CONFIG_ARCH_IXP4XX)
 	case IXP425_A0:
+	case IXP425_B0:
 	case IXP465_AD:
 		dev->has_cfr = 1;
 		out_dma = 0;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 0f809dd..40710ea 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -1190,7 +1190,7 @@
 
 
 	/* ok, we made sense of the hardware ... */
-	dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return -ENOMEM;
 	spin_lock_init (&dev->lock);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index cf10cbc..cc60759 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -153,7 +153,7 @@
 	  adapter will *NOT* work with PC cards that do not contain an OHCI
 	  controller.
 
-	  For those PC cards that contain multiple OHCI controllers only ther
+	  For those PC cards that contain multiple OHCI controllers only the
 	  first one is used.
 
 	  The driver consists of two modules, the "ftdi-elan" module is a
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 34b7a31..56349d2 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -492,7 +492,7 @@
 	unsigned		i;
 	__le32			tag;
 
-	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
+	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
 		return 0;
 	seen_count = 0;
 
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 9030994..025d333 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -126,6 +126,11 @@
 module_param (park, uint, S_IRUGO);
 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
 
+/* for flakey hardware, ignore overcurrent indicators */
+static int ignore_oc = 0;
+module_param (ignore_oc, bool, S_IRUGO);
+MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
+
 #define	INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
 
 /*-------------------------------------------------------------------------*/
@@ -541,9 +546,10 @@
 
 	temp = HC_VERSION(readl (&ehci->caps->hc_capbase));
 	ehci_info (ehci,
-		"USB %x.%x started, EHCI %x.%02x, driver %s\n",
+		"USB %x.%x started, EHCI %x.%02x, driver %s%s\n",
 		((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
-		temp >> 8, temp & 0xff, DRIVER_VERSION);
+		temp >> 8, temp & 0xff, DRIVER_VERSION,
+		ignore_oc ? ", overcurrent ignored" : "");
 
 	writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */
 
@@ -613,9 +619,8 @@
 		unsigned	i = HCS_N_PORTS (ehci->hcs_params);
 
 		/* resume root hub? */
-		status = readl (&ehci->regs->command);
-		if (!(status & CMD_RUN))
-			writel (status | CMD_RUN, &ehci->regs->command);
+		if (!(readl(&ehci->regs->command) & CMD_RUN))
+			usb_hcd_resume_root_hub(hcd);
 
 		while (i--) {
 			int pstatus = readl (&ehci->regs->port_status [i]);
@@ -632,7 +637,6 @@
 			 */
 			ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
 			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
-			usb_hcd_resume_root_hub(hcd);
 		}
 	}
 
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 1b20722..bfe5f30 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -34,6 +34,7 @@
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
 	int			port;
+	int			mask;
 
 	if (time_before (jiffies, ehci->next_statechange))
 		msleep(5);
@@ -51,14 +52,25 @@
 		ehci->reclaim_ready = 1;
 	ehci_work(ehci);
 
-	/* suspend any active/unsuspended ports, maybe allow wakeup */
+	/* Unlike other USB host controller types, EHCI doesn't have
+	 * any notion of "global" or bus-wide suspend.  The driver has
+	 * to manually suspend all the active unsuspended ports, and
+	 * then manually resume them in the bus_resume() routine.
+	 */
+	ehci->bus_suspended = 0;
 	while (port--) {
 		u32 __iomem	*reg = &ehci->regs->port_status [port];
 		u32		t1 = readl (reg) & ~PORT_RWC_BITS;
 		u32		t2 = t1;
 
-		if ((t1 & PORT_PE) && !(t1 & PORT_OWNER))
+		/* keep track of which ports we suspend */
+		if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
+				!(t1 & PORT_SUSPEND)) {
 			t2 |= PORT_SUSPEND;
+			set_bit(port, &ehci->bus_suspended);
+		}
+
+		/* enable remote wakeup on all ports */
 		if (device_may_wakeup(&hcd->self.root_hub->dev))
 			t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
 		else
@@ -76,6 +88,13 @@
 	ehci_halt (ehci);
 	hcd->state = HC_STATE_SUSPENDED;
 
+	/* allow remote wakeup */
+	mask = INTR_MASK;
+	if (!device_may_wakeup(&hcd->self.root_hub->dev))
+		mask &= ~STS_PCD;
+	writel(mask, &ehci->regs->intr_enable);
+	readl(&ehci->regs->intr_enable);
+
 	ehci->next_statechange = jiffies + msecs_to_jiffies(10);
 	spin_unlock_irq (&ehci->lock);
 	return 0;
@@ -88,7 +107,6 @@
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
 	u32			temp;
 	int			i;
-	int			intr_enable;
 
 	if (time_before (jiffies, ehci->next_statechange))
 		msleep(5);
@@ -100,31 +118,30 @@
 	 * the last user of the controller, not reset/pm hardware keeping
 	 * state we gave to it.
 	 */
+	temp = readl(&ehci->regs->intr_enable);
+	ehci_dbg(ehci, "resume root hub%s\n", temp ? "" : " after power loss");
 
-	/* re-init operational registers in case we lost power */
-	if (readl (&ehci->regs->intr_enable) == 0) {
-		/* at least some APM implementations will try to deliver
-		 * IRQs right away, so delay them until we're ready.
-		 */
-		intr_enable = 1;
-		writel (0, &ehci->regs->segment);
-		writel (ehci->periodic_dma, &ehci->regs->frame_list);
-		writel ((u32)ehci->async->qh_dma, &ehci->regs->async_next);
-	} else
-		intr_enable = 0;
-	ehci_dbg(ehci, "resume root hub%s\n",
-			intr_enable ? " after power loss" : "");
+	/* at least some APM implementations will try to deliver
+	 * IRQs right away, so delay them until we're ready.
+	 */
+	writel(0, &ehci->regs->intr_enable);
+
+	/* re-init operational registers */
+	writel(0, &ehci->regs->segment);
+	writel(ehci->periodic_dma, &ehci->regs->frame_list);
+	writel((u32) ehci->async->qh_dma, &ehci->regs->async_next);
 
 	/* restore CMD_RUN, framelist size, and irq threshold */
 	writel (ehci->command, &ehci->regs->command);
 
-	/* take ports out of suspend */
+	/* manually resume the ports we suspended during bus_suspend() */
 	i = HCS_N_PORTS (ehci->hcs_params);
 	while (i--) {
 		temp = readl (&ehci->regs->port_status [i]);
 		temp &= ~(PORT_RWC_BITS
 			| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
-		if (temp & PORT_SUSPEND) {
+		if (test_bit(i, &ehci->bus_suspended) &&
+				(temp & PORT_SUSPEND)) {
 			ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
 			temp |= PORT_RESUME;
 		}
@@ -134,11 +151,12 @@
 	mdelay (20);
 	while (i--) {
 		temp = readl (&ehci->regs->port_status [i]);
-		if ((temp & PORT_SUSPEND) == 0)
-			continue;
-		temp &= ~(PORT_RWC_BITS | PORT_RESUME);
-		writel (temp, &ehci->regs->port_status [i]);
-		ehci_vdbg (ehci, "resumed port %d\n", i + 1);
+		if (test_bit(i, &ehci->bus_suspended) &&
+				(temp & PORT_SUSPEND)) {
+			temp &= ~(PORT_RWC_BITS | PORT_RESUME);
+			writel (temp, &ehci->regs->port_status [i]);
+			ehci_vdbg (ehci, "resumed port %d\n", i + 1);
+		}
 	}
 	(void) readl (&ehci->regs->command);
 
@@ -157,8 +175,7 @@
 	hcd->state = HC_STATE_RUNNING;
 
 	/* Now we can safely re-enable irqs */
-	if (intr_enable)
-		writel (INTR_MASK, &ehci->regs->intr_enable);
+	writel(INTR_MASK, &ehci->regs->intr_enable);
 
 	spin_unlock_irq (&ehci->lock);
 	return 0;
@@ -218,6 +235,7 @@
 {
 	struct ehci_hcd	*ehci = hcd_to_ehci (hcd);
 	u32		temp, status = 0;
+	u32		mask;
 	int		ports, i, retval = 1;
 	unsigned long	flags;
 
@@ -233,6 +251,18 @@
 		retval++;
 	}
 
+	/* Some boards (mostly VIA?) report bogus overcurrent indications,
+	 * causing massive log spam unless we completely ignore them.  It
+	 * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+	 * PORT_POWER; that's surprising, but maybe within-spec.
+	 */
+	if (!ignore_oc)
+		mask = PORT_CSC | PORT_PEC | PORT_OCC;
+	else
+		mask = PORT_CSC | PORT_PEC;
+	// PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
+
 	/* no hub change reports (bit 0) for now (power, ...) */
 
 	/* port N changes (bit N)? */
@@ -250,8 +280,7 @@
 		}
 		if (!(temp & PORT_CONNECT))
 			ehci->reset_done [i] = 0;
-		if ((temp & (PORT_CSC | PORT_PEC | PORT_OCC)) != 0
-				// PORT_STAT_C_SUSPEND?
+		if ((temp & mask) != 0
 				|| ((temp & PORT_RESUME) != 0
 					&& time_after (jiffies,
 						ehci->reset_done [i]))) {
@@ -319,6 +348,7 @@
 	u32		temp, status;
 	unsigned long	flags;
 	int		retval = 0;
+	unsigned	selector;
 
 	/*
 	 * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
@@ -417,7 +447,7 @@
 			status |= 1 << USB_PORT_FEAT_C_CONNECTION;
 		if (temp & PORT_PEC)
 			status |= 1 << USB_PORT_FEAT_C_ENABLE;
-		if (temp & PORT_OCC)
+		if ((temp & PORT_OCC) && !ignore_oc)
 			status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
 
 		/* whoever resumes must GetPortStatus to complete it!! */
@@ -506,6 +536,8 @@
 		}
 		break;
 	case SetPortFeature:
+		selector = wIndex >> 8;
+		wIndex &= 0xff;
 		if (!wIndex || wIndex > ports)
 			goto error;
 		wIndex--;
@@ -559,6 +591,22 @@
 			}
 			writel (temp, &ehci->regs->port_status [wIndex]);
 			break;
+
+		/* For downstream facing ports (these):  one hub port is put
+		 * into test mode according to USB2 11.24.2.13, then the hub
+		 * must be reset (which for root hub now means rmmod+modprobe,
+		 * or else system reboot).  See EHCI 2.3.9 and 4.14 for info
+		 * about the EHCI-specific stuff.
+		 */
+		case USB_PORT_FEAT_TEST:
+			if (!selector || selector > 5)
+				goto error;
+			ehci_quiesce(ehci);
+			ehci_halt(ehci);
+			temp |= selector << 16;
+			writel (temp, &ehci->regs->port_status [wIndex]);
+			break;
+
 		default:
 			goto error;
 		}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index e51c1ed8..4bc7970 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -257,9 +257,7 @@
 static int ehci_pci_resume(struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
-	unsigned		port;
 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
-	int			retval = -EINVAL;
 
 	// maybe restore FLADJ
 
@@ -269,27 +267,19 @@
 	/* Mark hardware accessible again as we are out of D3 state by now */
 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 
-	/* If CF is clear, we lost PCI Vaux power and need to restart.  */
-	if (readl(&ehci->regs->configured_flag) != FLAG_CF)
-		goto restart;
-
-	/* If any port is suspended (or owned by the companion),
-	 * we know we can/must resume the HC (and mustn't reset it).
-	 * We just defer that to the root hub code.
+	/* If CF is still set, we maintained PCI Vaux power.
+	 * Just undo the effect of ehci_pci_suspend().
 	 */
-	for (port = HCS_N_PORTS(ehci->hcs_params); port > 0; ) {
-		u32	status;
-		port--;
-		status = readl(&ehci->regs->port_status [port]);
-		if (!(status & PORT_POWER))
-			continue;
-		if (status & (PORT_SUSPEND | PORT_RESUME | PORT_OWNER)) {
-			usb_hcd_resume_root_hub(hcd);
-			return 0;
-		}
+	if (readl(&ehci->regs->configured_flag) == FLAG_CF) {
+		int	mask = INTR_MASK;
+
+		if (!device_may_wakeup(&hcd->self.root_hub->dev))
+			mask &= ~STS_PCD;
+		writel(mask, &ehci->regs->intr_enable);
+		readl(&ehci->regs->intr_enable);
+		return 0;
 	}
 
-restart:
 	ehci_dbg(ehci, "lost power, restarting\n");
 	usb_root_hub_lost_power(hcd->self.root_hub);
 
@@ -307,13 +297,15 @@
 	ehci_work(ehci);
 	spin_unlock_irq(&ehci->lock);
 
-	/* restart; khubd will disconnect devices */
-	retval = ehci_run(hcd);
-
 	/* here we "know" root ports should always stay powered */
 	ehci_port_power(ehci, 1);
 
-	return retval;
+	writel(ehci->command, &ehci->regs->command);
+	writel(FLAG_CF, &ehci->regs->configured_flag);
+	readl(&ehci->regs->command);	/* unblock posted writes */
+
+	hcd->state = HC_STATE_SUSPENDED;
+	return 0;
 }
 #endif
 
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index bbc3082..74dbc6c 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -74,6 +74,7 @@
 
 	/* per root hub port */
 	unsigned long		reset_done [EHCI_MAX_ROOT_PORTS];
+	unsigned long		bus_suspended;
 
 	/* per-HC memory pools (could be per-bus, but ...) */
 	struct dma_pool		*qh_pool;	/* qh per active urb */
diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c
index 87eca6a..9325e46 100644
--- a/drivers/usb/host/hc_crisv10.c
+++ b/drivers/usb/host/hc_crisv10.c
@@ -188,7 +188,7 @@
 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
 
-#define SLAB_FLAG     (in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL)
+#define SLAB_FLAG     (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
 #define KMALLOC_FLAG  (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
 
 /* Most helpful debugging aid */
@@ -275,13 +275,13 @@
 static int zout_buffer[4] __attribute__ ((aligned (4)));
 
 /* Cache for allocating new EP and SB descriptors. */
-static kmem_cache_t *usb_desc_cache;
+static struct kmem_cache *usb_desc_cache;
 
 /* Cache for the registers allocated in the top half. */
-static kmem_cache_t *top_half_reg_cache;
+static struct kmem_cache *top_half_reg_cache;
 
 /* Cache for the data allocated in the isoc descr top half. */
-static kmem_cache_t *isoc_compl_cache;
+static struct kmem_cache *isoc_compl_cache;
 
 static struct usb_bus *etrax_usb_bus;
 
@@ -1743,7 +1743,7 @@
 
 		*R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
 
-		comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
+		comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
 		assert(comp_data != NULL);
 
                 INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data);
@@ -3010,7 +3010,7 @@
 			if (!urb->iso_frame_desc[i].length)
 				continue;
 
-			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
+			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
 			assert(next_sb_desc != NULL);
 
 			if (urb->iso_frame_desc[i].length > 0) {
@@ -3063,7 +3063,7 @@
 		if (TxIsocEPList[epid].sub == 0) {
 			dbg_isoc("Isoc traffic not already running, allocating SB");
 
-			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
+			next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
 			assert(next_sb_desc != NULL);
 
 			next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) |
@@ -3317,7 +3317,7 @@
 
 	restore_flags(flags);
 
-	reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, SLAB_ATOMIC);
+	reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, GFP_ATOMIC);
 
 	assert(reg != NULL);
 
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 8293c1d..0f47a57 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -505,7 +505,7 @@
 	char			*next;
 	unsigned		i;
 
-	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
+	if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
 		return 0;
 	seen_count = 0;
 
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 4776b3b..b28a9b6 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -729,6 +729,16 @@
 		ohci->next_statechange = jiffies + STATECHANGE_DELAY;
 		ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
 				&regs->intrstatus);
+
+		/* NOTE: Vendors didn't always make the same implementation
+		 * choices for RHSC.  Many followed the spec; RHSC triggers
+		 * on an edge, like setting and maybe clearing a port status
+		 * change bit.  With others it's level-triggered, active
+		 * until khubd clears all the port status change bits.  We'll
+		 * always disable it here and rely on polling until khubd
+		 * re-enables it.
+		 */
+		ohci_writel(ohci, OHCI_INTR_RHSC, &regs->intrdisable);
 		usb_hcd_poll_rh_status(hcd);
 	}
 
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 6995ea3..2441642 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -41,7 +41,11 @@
 {
 	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
 
-	ohci_writel (ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
+	spin_lock_irq(&ohci->lock);
+	if (!ohci->autostop)
+		del_timer(&hcd->rh_timer);	/* Prevent next poll */
+	ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
+	spin_unlock_irq(&ohci->lock);
 }
 
 #define OHCI_SCHED_ENABLES \
@@ -50,6 +54,9 @@
 static void dl_done_list (struct ohci_hcd *);
 static void finish_unlinks (struct ohci_hcd *, u16);
 
+#ifdef	CONFIG_PM
+static int ohci_restart(struct ohci_hcd *ohci);
+
 static int ohci_rh_suspend (struct ohci_hcd *ohci, int autostop)
 __releases(ohci->lock)
 __acquires(ohci->lock)
@@ -132,8 +139,6 @@
 	return ed;
 }
 
-static int ohci_restart (struct ohci_hcd *ohci);
-
 /* caller has locked the root hub */
 static int ohci_rh_resume (struct ohci_hcd *ohci)
 __releases(ohci->lock)
@@ -169,7 +174,7 @@
 		break;
 	case OHCI_USB_RESUME:
 		/* HCFS changes sometime after INTR_RD */
-		ohci_info(ohci, "%swakeup\n",
+		ohci_dbg(ohci, "%swakeup root hub\n",
 				autostopped ? "auto-" : "");
 		break;
 	case OHCI_USB_OPER:
@@ -181,7 +186,6 @@
 		ohci_dbg (ohci, "lost power\n");
 		status = -EBUSY;
 	}
-#ifdef	CONFIG_PM
 	if (status == -EBUSY) {
 		if (!autostopped) {
 			spin_unlock_irq (&ohci->lock);
@@ -191,25 +195,12 @@
 		}
 		return status;
 	}
-#endif
 	if (status != -EINPROGRESS)
 		return status;
 	if (autostopped)
 		goto skip_resume;
 	spin_unlock_irq (&ohci->lock);
 
-	temp = ohci->num_ports;
-	while (temp--) {
-		u32 stat = ohci_readl (ohci,
-				       &ohci->regs->roothub.portstatus [temp]);
-
-		/* force global, not selective, resume */
-		if (!(stat & RH_PS_PSS))
-			continue;
-		ohci_writel (ohci, RH_PS_POCI,
-				&ohci->regs->roothub.portstatus [temp]);
-	}
-
 	/* Some controllers (lucent erratum) need extra-long delays */
 	msleep (20 /* usb 11.5.1.10 */ + 12 /* 32 msec counter */ + 1);
 
@@ -217,6 +208,7 @@
 	temp &= OHCI_CTRL_HCFS;
 	if (temp != OHCI_USB_RESUME) {
 		ohci_err (ohci, "controller won't resume\n");
+		spin_lock_irq(&ohci->lock);
 		return -EBUSY;
 	}
 
@@ -296,8 +288,6 @@
 	return 0;
 }
 
-#ifdef	CONFIG_PM
-
 static int ohci_bus_suspend (struct usb_hcd *hcd)
 {
 	struct ohci_hcd		*ohci = hcd_to_ohci (hcd);
@@ -335,6 +325,83 @@
 	return rc;
 }
 
+/* Carry out polling-, autostop-, and autoresume-related state changes */
+static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
+		int any_connected)
+{
+	int	poll_rh = 1;
+
+	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
+
+	case OHCI_USB_OPER:
+		/* keep on polling until we know a device is connected
+		 * and RHSC is enabled */
+		if (!ohci->autostop) {
+			if (any_connected ||
+					!device_may_wakeup(&ohci_to_hcd(ohci)
+						->self.root_hub->dev)) {
+				if (ohci_readl(ohci, &ohci->regs->intrenable) &
+						OHCI_INTR_RHSC)
+					poll_rh = 0;
+			} else {
+				ohci->autostop = 1;
+				ohci->next_statechange = jiffies + HZ;
+			}
+
+		/* if no devices have been attached for one second, autostop */
+		} else {
+			if (changed || any_connected) {
+				ohci->autostop = 0;
+				ohci->next_statechange = jiffies +
+						STATECHANGE_DELAY;
+			} else if (time_after_eq(jiffies,
+						ohci->next_statechange)
+					&& !ohci->ed_rm_list
+					&& !(ohci->hc_control &
+						OHCI_SCHED_ENABLES)) {
+				ohci_rh_suspend(ohci, 1);
+			}
+		}
+		break;
+
+	/* if there is a port change, autostart or ask to be resumed */
+	case OHCI_USB_SUSPEND:
+	case OHCI_USB_RESUME:
+		if (changed) {
+			if (ohci->autostop)
+				ohci_rh_resume(ohci);
+			else
+				usb_hcd_resume_root_hub(ohci_to_hcd(ohci));
+		} else {
+			/* everything is idle, no need for polling */
+			poll_rh = 0;
+		}
+		break;
+	}
+	return poll_rh;
+}
+
+#else	/* CONFIG_PM */
+
+static inline int ohci_rh_resume(struct ohci_hcd *ohci)
+{
+	return 0;
+}
+
+/* Carry out polling-related state changes.
+ * autostop isn't used when CONFIG_PM is turned off.
+ */
+static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
+		int any_connected)
+{
+	int	poll_rh = 1;
+
+	/* keep on polling until RHSC is enabled */
+	if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
+		poll_rh = 0;
+	return poll_rh;
+}
+
 #endif	/* CONFIG_PM */
 
 /*-------------------------------------------------------------------------*/
@@ -346,7 +413,7 @@
 {
 	struct ohci_hcd	*ohci = hcd_to_ohci (hcd);
 	int		i, changed = 0, length = 1;
-	int		any_connected = 0, rhsc_enabled = 1;
+	int		any_connected = 0;
 	unsigned long	flags;
 
 	spin_lock_irqsave (&ohci->lock, flags);
@@ -387,67 +454,8 @@
 		}
 	}
 
-	/* NOTE:  vendors didn't always make the same implementation
-	 * choices for RHSC.  Sometimes it triggers on an edge (like
-	 * setting and maybe clearing a port status change bit); and
-	 * it's level-triggered on other silicon, active until khubd
-	 * clears all active port status change bits.  If it's still
-	 * set (level-triggered) we must disable it and rely on
-	 * polling until khubd re-enables it.
-	 */
-	if (ohci_readl (ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC) {
-		ohci_writel (ohci, OHCI_INTR_RHSC, &ohci->regs->intrdisable);
-		(void) ohci_readl (ohci, &ohci->regs->intrdisable);
-		rhsc_enabled = 0;
-	}
-	hcd->poll_rh = 1;
-
-	/* carry out appropriate state changes */
-	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
-
-	case OHCI_USB_OPER:
-		/* keep on polling until we know a device is connected
-		 * and RHSC is enabled */
-		if (!ohci->autostop) {
-			if (any_connected) {
-				if (rhsc_enabled)
-					hcd->poll_rh = 0;
-			} else {
-				ohci->autostop = 1;
-				ohci->next_statechange = jiffies + HZ;
-			}
-
-		/* if no devices have been attached for one second, autostop */
-		} else {
-			if (changed || any_connected) {
-				ohci->autostop = 0;
-				ohci->next_statechange = jiffies +
-						STATECHANGE_DELAY;
-			} else if (device_may_wakeup(&hcd->self.root_hub->dev)
-					&& time_after_eq(jiffies,
-						ohci->next_statechange)
-					&& !ohci->ed_rm_list
-					&& !(ohci->hc_control &
-						OHCI_SCHED_ENABLES)) {
-				ohci_rh_suspend (ohci, 1);
-			}
-		}
-		break;
-
-	/* if there is a port change, autostart or ask to be resumed */
-	case OHCI_USB_SUSPEND:
-	case OHCI_USB_RESUME:
-		if (changed) {
-			if (ohci->autostop)
-				ohci_rh_resume (ohci);
-			else
-				usb_hcd_resume_root_hub (hcd);
-		} else {
-			/* everything is idle, no need for polling */
-			hcd->poll_rh = 0;
-		}
-		break;
-	}
+	hcd->poll_rh = ohci_root_hub_state_changes(ohci, changed,
+			any_connected);
 
 done:
 	spin_unlock_irqrestore (&ohci->lock, flags);
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 2dbb774..7f26f9b 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -134,7 +134,7 @@
 {
 	struct i2c_client *c;
 
-	c = (struct i2c_client *)kzalloc(sizeof(*c), SLAB_KERNEL);
+	c = (struct i2c_client *)kzalloc(sizeof(*c), GFP_KERNEL);
 
 	if (!c)
 		return -ENOMEM;
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 54f554e..ac9f11d 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -169,21 +169,14 @@
 
 	DBG(0, "sl811_cs_config(0x%p)\n", link);
 
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	tuple.Attributes = 0;
-	tuple.TupleData = buf;
-	tuple.TupleDataMax = sizeof(buf);
-	tuple.TupleOffset = 0;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-	link->conf.ConfigBase = parse.config.base;
-	link->conf.Present = parse.config.rmask[0];
-
 	/* Look up the current Vcc */
 	CS_CHECK(GetConfigurationInfo,
 			pcmcia_get_configuration_info(link, &conf));
 
+	tuple.Attributes = 0;
+	tuple.TupleData = buf;
+	tuple.TupleDataMax = sizeof(buf);
+	tuple.TupleOffset = 0;
 	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
 	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
 	while (1) {
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 32c635e..a9d7119 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -71,7 +71,7 @@
 module_param(distrust_firmware, bool, 0);
 MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
         "t setup");
-DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
+static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
 /*
 * u132_module_lock exists to protect access to global variables
 *
@@ -163,7 +163,7 @@
         u16 queue_next;
         struct urb *urb_list[ENDP_QUEUE_SIZE];
         struct list_head urb_more;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 struct u132_ring {
         unsigned in_use:1;
@@ -171,7 +171,7 @@
         u8 number;
         struct u132 *u132;
         struct u132_endp *curr_endp;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 #define OHCI_QUIRK_AMD756 0x01
 #define OHCI_QUIRK_SUPERIO 0x02
@@ -198,20 +198,16 @@
         u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
         int flags;
         unsigned long next_statechange;
-        struct work_struct monitor;
+        struct delayed_work monitor;
         int num_endpoints;
         struct u132_addr addr[MAX_U132_ADDRS];
         struct u132_udev udev[MAX_U132_UDEVS];
         struct u132_port port[MAX_U132_PORTS];
         struct u132_endp *endp[MAX_U132_ENDPS];
 };
-int usb_ftdi_elan_read_reg(struct platform_device *pdev, u32 *data);
-int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, u8 addressofs,
-        u8 width, u32 *data);
-int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, u8 addressofs,
-        u8 width, u32 data);
+
 /*
-* these can not be inlines because we need the structure offset!!
+* these cannot be inlines because we need the structure offset!!
 * Does anyone have a better way?????
 */
 #define u132_read_pcimem(u132, member, data) \
@@ -314,7 +310,7 @@
         if (delta > 0) {
                 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
                         return;
-        } else if (queue_work(workqueue, &ring->scheduler))
+        } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
                 return;
         kref_put(&u132->kref, u132_hcd_delete);
         return;
@@ -393,12 +389,8 @@
 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &endp->scheduler, delta))
-                        kref_get(&endp->kref);
-        } else if (queue_work(workqueue, &endp->scheduler))
-                kref_get(&endp->kref);
-        return;
+	if (queue_delayed_work(workqueue, &endp->scheduler, delta))
+		kref_get(&endp->kref);
 }
 
 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -414,24 +406,14 @@
 
 static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta)) {
-                        kref_get(&u132->kref);
-                }
-        } else if (queue_work(workqueue, &u132->monitor))
-                kref_get(&u132->kref);
-        return;
+	if (queue_delayed_work(workqueue, &u132->monitor, delta))
+		kref_get(&u132->kref);
 }
 
 static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta))
-                        return;
-        } else if (queue_work(workqueue, &u132->monitor))
-                return;
-        kref_put(&u132->kref, u132_hcd_delete);
-        return;
+	if (!queue_delayed_work(workqueue, &u132->monitor, delta))
+		kref_put(&u132->kref, u132_hcd_delete);
 }
 
 static void u132_monitor_cancel_work(struct u132 *u132)
@@ -493,9 +475,9 @@
         return 0;
 }
 
-static void u132_hcd_monitor_work(void *data)
+static void u132_hcd_monitor_work(struct work_struct *work)
 {
-        struct u132 *u132 = data;
+        struct u132 *u132 = container_of(work, struct u132, monitor.work);
         if (u132->going > 1) {
                 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
                         , u132->going);
@@ -1319,15 +1301,14 @@
         }
 }
 
-static void u132_hcd_ring_work_scheduler(void *data);
-static void u132_hcd_endp_work_scheduler(void *data);
 /*
 * this work function is only executed from the work queue
 *
 */
-static void u132_hcd_ring_work_scheduler(void *data)
+static void u132_hcd_ring_work_scheduler(struct work_struct *work)
 {
-        struct u132_ring *ring = data;
+        struct u132_ring *ring =
+		container_of(work, struct u132_ring, scheduler.work);
         struct u132 *u132 = ring->u132;
         down(&u132->scheduler_lock);
         if (ring->in_use) {
@@ -1386,10 +1367,11 @@
         }
 }
 
-static void u132_hcd_endp_work_scheduler(void *data)
+static void u132_hcd_endp_work_scheduler(struct work_struct *work)
 {
         struct u132_ring *ring;
-        struct u132_endp *endp = data;
+        struct u132_endp *endp =
+		container_of(work, struct u132_endp, scheduler.work);
         struct u132 *u132 = endp->u132;
         down(&u132->scheduler_lock);
         ring = endp->ring;
@@ -1947,7 +1929,7 @@
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
@@ -2036,7 +2018,7 @@
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         endp->dequeueing = 0;
@@ -2121,7 +2103,7 @@
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
@@ -3045,7 +3027,7 @@
 * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
 * is held for writing, thus this module must not call usb_remove_hcd()
 * synchronously - but instead should immediately stop activity to the
-* device and ansynchronously call usb_remove_hcd()
+* device and asynchronously call usb_remove_hcd()
 */
 static int __devexit u132_remove(struct platform_device *pdev)
 {
@@ -3100,10 +3082,10 @@
                 ring->number = rings + 1;
                 ring->length = 0;
                 ring->curr_endp = NULL;
-                INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler,
-                        (void *)ring);
+                INIT_DELAYED_WORK(&ring->scheduler,
+				  u132_hcd_ring_work_scheduler);
         } down(&u132->sw_lock);
-        INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132);
+        INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
         while (ports-- > 0) {
                 struct u132_port *port = &u132->port[ports];
                 port->u132 = u132;
@@ -3241,7 +3223,7 @@
 #define u132_resume NULL
 #endif
 /*
-* this driver is loaded explicitely by ftdi_u132
+* this driver is loaded explicitly by ftdi_u132
 *
 * the platform_driver struct is static because it is per type of module
 */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 226bf3d..e87692c 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -81,7 +81,7 @@
 static char *errbuf;
 #define ERRBUF_LEN    (32 * 1024)
 
-static kmem_cache_t *uhci_up_cachep;	/* urb_priv */
+static struct kmem_cache *uhci_up_cachep;	/* urb_priv */
 
 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
 static void wakeup_rh(struct uhci_hcd *uhci);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 06115f2..30b8845 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -498,7 +498,7 @@
 {
 	struct urb_priv *urbp;
 
-	urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
+	urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC);
 	if (!urbp)
 		return NULL;
 
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 3038ed0..8ccddf7 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -796,7 +796,7 @@
 
 	new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL);
 	if (!new_desc->context.scsi_status)
-		goto out_kfree2;
+		goto out_free_urb;
 
 	new_desc->usb_dev = dev;
 	new_desc->usb_intf = intf;
@@ -822,18 +822,20 @@
 	new_desc->host = scsi_host_alloc(&mts_scsi_host_template,
 			sizeof(new_desc));
 	if (!new_desc->host)
-		goto out_free_urb;
+		goto out_kfree2;
 
 	new_desc->host->hostdata[0] = (unsigned long)new_desc;
 	if (scsi_add_host(new_desc->host, NULL)) {
 		err_retval = -EIO;
-		goto out_free_urb;
+		goto out_host_put;
 	}
 	scsi_scan_host(new_desc->host);
 
 	usb_set_intfdata(intf, new_desc);
 	return 0;
 
+ out_host_put:
+	scsi_host_put(new_desc->host);
  out_kfree2:
 	kfree(new_desc->context.scsi_status);
  out_free_urb:
diff --git a/drivers/usb/input/Kconfig b/drivers/usb/input/Kconfig
index 20db364..661af7a 100644
--- a/drivers/usb/input/Kconfig
+++ b/drivers/usb/input/Kconfig
@@ -221,6 +221,7 @@
 	  - ITM
 	  - some other eTurboTouch
 	  - Gunze AHL61
+	  - DMC TSC-10/25
 
 	  Have a look at <http://linux.chapter7.ch/touchkit/> for
 	  a usage description and the required user-space stuff.
@@ -258,6 +259,11 @@
 	bool "Gunze AHL61 device support" if EMBEDDED
 	depends on USB_TOUCHSCREEN
 
+config USB_TOUCHSCREEN_DMC_TSC10
+	default y
+	bool "DMC TSC-10/25 device support" if EMBEDDED
+	depends on USB_TOUCHSCREEN
+
 config USB_YEALINK
 	tristate "Yealink usb-p1k voip phone"
 	depends on USB && INPUT && EXPERIMENTAL
diff --git a/drivers/usb/input/acecad.c b/drivers/usb/input/acecad.c
index 0096373..909138e 100644
--- a/drivers/usb/input/acecad.c
+++ b/drivers/usb/input/acecad.c
@@ -152,7 +152,7 @@
 	if (!acecad || !input_dev)
 		goto fail1;
 
-	acecad->data = usb_buffer_alloc(dev, 8, SLAB_KERNEL, &acecad->data_dma);
+	acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma);
 	if (!acecad->data)
 		goto fail1;
 
diff --git a/drivers/usb/input/aiptek.c b/drivers/usb/input/aiptek.c
index bf42818..9f52429 100644
--- a/drivers/usb/input/aiptek.c
+++ b/drivers/usb/input/aiptek.c
@@ -1988,7 +1988,7 @@
 		goto fail1;
 
 	aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH,
-					SLAB_ATOMIC, &aiptek->data_dma);
+					GFP_ATOMIC, &aiptek->data_dma);
 	if (!aiptek->data)
 		goto fail1;
 
diff --git a/drivers/usb/input/ati_remote.c b/drivers/usb/input/ati_remote.c
index 787b847..b724e36 100644
--- a/drivers/usb/input/ati_remote.c
+++ b/drivers/usb/input/ati_remote.c
@@ -592,7 +592,7 @@
 			__FUNCTION__, urb->status);
 	}
 
-	retval = usb_submit_urb(urb, SLAB_ATOMIC);
+	retval = usb_submit_urb(urb, GFP_ATOMIC);
 	if (retval)
 		dev_err(&ati_remote->interface->dev, "%s: usb_submit_urb()=%d\n",
 			__FUNCTION__, retval);
@@ -604,12 +604,12 @@
 static int ati_remote_alloc_buffers(struct usb_device *udev,
 				    struct ati_remote *ati_remote)
 {
-	ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC,
+	ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
 					     &ati_remote->inbuf_dma);
 	if (!ati_remote->inbuf)
 		return -1;
 
-	ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC,
+	ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
 					      &ati_remote->outbuf_dma);
 	if (!ati_remote->outbuf)
 		return -1;
@@ -630,11 +630,8 @@
  */
 static void ati_remote_free_buffers(struct ati_remote *ati_remote)
 {
-	if (ati_remote->irq_urb)
-		usb_free_urb(ati_remote->irq_urb);
-
-	if (ati_remote->out_urb)
-		usb_free_urb(ati_remote->out_urb);
+	usb_free_urb(ati_remote->irq_urb);
+	usb_free_urb(ati_remote->out_urb);
 
 	usb_buffer_free(ati_remote->udev, DATA_BUFSIZE,
 		ati_remote->inbuf, ati_remote->inbuf_dma);
diff --git a/drivers/usb/input/ati_remote2.c b/drivers/usb/input/ati_remote2.c
index f982a2b..83f1f79 100644
--- a/drivers/usb/input/ati_remote2.c
+++ b/drivers/usb/input/ati_remote2.c
@@ -372,8 +372,7 @@
 	int i;
 
 	for (i = 0; i < 2; i++) {
-		if (ar2->urb[i])
-			usb_free_urb(ar2->urb[i]);
+		usb_free_urb(ar2->urb[i]);
 
 		if (ar2->buf[i])
 			usb_buffer_free(ar2->udev, 4, ar2->buf[i], ar2->buf_dma[i]);
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index 6d08a3b..f1d0e1d 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -968,20 +968,30 @@
 		hid_io_error(hid);
 }
 
-/* Workqueue routine to reset the device */
-static void hid_reset(void *_hid)
+/* Workqueue routine to reset the device or clear a halt */
+static void hid_reset(struct work_struct *work)
 {
-	struct hid_device *hid = (struct hid_device *) _hid;
-	int rc_lock, rc;
+	struct hid_device *hid =
+		container_of(work, struct hid_device, reset_work);
+	int rc_lock, rc = 0;
 
-	dev_dbg(&hid->intf->dev, "resetting device\n");
-	rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
-	if (rc_lock >= 0) {
-		rc = usb_reset_composite_device(hid->dev, hid->intf);
-		if (rc_lock)
-			usb_unlock_device(hid->dev);
+	if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
+		dev_dbg(&hid->intf->dev, "clear halt\n");
+		rc = usb_clear_halt(hid->dev, hid->urbin->pipe);
+		clear_bit(HID_CLEAR_HALT, &hid->iofl);
+		hid_start_in(hid);
 	}
-	clear_bit(HID_RESET_PENDING, &hid->iofl);
+
+	else if (test_bit(HID_RESET_PENDING, &hid->iofl)) {
+		dev_dbg(&hid->intf->dev, "resetting device\n");
+		rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
+		if (rc_lock >= 0) {
+			rc = usb_reset_composite_device(hid->dev, hid->intf);
+			if (rc_lock)
+				usb_unlock_device(hid->dev);
+		}
+		clear_bit(HID_RESET_PENDING, &hid->iofl);
+	}
 
 	switch (rc) {
 	case 0:
@@ -1023,9 +1033,8 @@
 
 		/* Retries failed, so do a port reset */
 		if (!test_and_set_bit(HID_RESET_PENDING, &hid->iofl)) {
-			if (schedule_work(&hid->reset_work))
-				goto done;
-			clear_bit(HID_RESET_PENDING, &hid->iofl);
+			schedule_work(&hid->reset_work);
+			goto done;
 		}
 	}
 
@@ -1049,6 +1058,11 @@
 			hid->retry_delay = 0;
 			hid_input_report(HID_INPUT_REPORT, urb, 1);
 			break;
+		case -EPIPE:		/* stall */
+			clear_bit(HID_IN_RUNNING, &hid->iofl);
+			set_bit(HID_CLEAR_HALT, &hid->iofl);
+			schedule_work(&hid->reset_work);
+			return;
 		case -ECONNRESET:	/* unlink */
 		case -ENOENT:
 		case -ESHUTDOWN:	/* unplug */
@@ -1065,7 +1079,7 @@
 			warn("input irq status %d received", urb->status);
 	}
 
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status) {
 		clear_bit(HID_IN_RUNNING, &hid->iofl);
 		if (status != -EPERM) {
@@ -1627,6 +1641,19 @@
 
 #define USB_VENDOR_ID_APPLE		0x05ac
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE	0x0304
+#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI	0x020e
+#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO	0x020f
+#define USB_DEVICE_ID_APPLE_GEYSER_ANSI	0x0214
+#define USB_DEVICE_ID_APPLE_GEYSER_ISO	0x0215
+#define USB_DEVICE_ID_APPLE_GEYSER_JIS	0x0216
+#define USB_DEVICE_ID_APPLE_GEYSER3_ANSI	0x0217
+#define USB_DEVICE_ID_APPLE_GEYSER3_ISO	0x0218
+#define USB_DEVICE_ID_APPLE_GEYSER3_JIS	0x0219
+#define USB_DEVICE_ID_APPLE_GEYSER4_ANSI	0x021a
+#define USB_DEVICE_ID_APPLE_GEYSER4_ISO	0x021b
+#define USB_DEVICE_ID_APPLE_GEYSER4_JIS	0x021c
+#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY	0x030a
+#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY	0x030b
 
 #define USB_VENDOR_ID_CHERRY		0x046a
 #define USB_DEVICE_ID_CHERRY_CYMOTION	0x0023
@@ -1794,17 +1821,19 @@
 
 	{ USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
 
-	{ USB_VENDOR_ID_APPLE, 0x020E, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x020F, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x0214, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x0215, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
-	{ USB_VENDOR_ID_APPLE, 0x0216, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x0217, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x0218, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
-	{ USB_VENDOR_ID_APPLE, 0x0219, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x021B, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x030A, HID_QUIRK_POWERBOOK_HAS_FN },
-	{ USB_VENDOR_ID_APPLE, 0x030B, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
+	{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
 
 	{ USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE },
 	{ USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE },
@@ -1835,13 +1864,13 @@
 
 static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
 {
-	if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->inbuf_dma)))
+	if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->inbuf_dma)))
 		return -1;
-	if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->outbuf_dma)))
+	if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->outbuf_dma)))
 		return -1;
-	if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), SLAB_ATOMIC, &hid->cr_dma)))
+	if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), GFP_ATOMIC, &hid->cr_dma)))
 		return -1;
-	if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->ctrlbuf_dma)))
+	if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->ctrlbuf_dma)))
 		return -1;
 
 	return 0;
@@ -1985,7 +2014,7 @@
 		if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0)
 			interval = hid_mousepoll_interval;
 
-		if (endpoint->bEndpointAddress & USB_DIR_IN) {
+		if (usb_endpoint_dir_in(endpoint)) {
 			if (hid->urbin)
 				continue;
 			if (!(hid->urbin = usb_alloc_urb(0, GFP_KERNEL)))
@@ -2015,7 +2044,7 @@
 
 	init_waitqueue_head(&hid->wait);
 
-	INIT_WORK(&hid->reset_work, hid_reset, hid);
+	INIT_WORK(&hid->reset_work, hid_reset);
 	setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
 
 	spin_lock_init(&hid->inlock);
@@ -2067,13 +2096,9 @@
 	return hid;
 
 fail:
-
-	if (hid->urbin)
-		usb_free_urb(hid->urbin);
-	if (hid->urbout)
-		usb_free_urb(hid->urbout);
-	if (hid->urbctrl)
-		usb_free_urb(hid->urbctrl);
+	usb_free_urb(hid->urbin);
+	usb_free_urb(hid->urbout);
+	usb_free_urb(hid->urbctrl);
 	hid_free_buffers(dev, hid);
 	hid_free_device(hid);
 
@@ -2104,8 +2129,7 @@
 
 	usb_free_urb(hid->urbin);
 	usb_free_urb(hid->urbctrl);
-	if (hid->urbout)
-		usb_free_urb(hid->urbout);
+	usb_free_urb(hid->urbout);
 
 	hid_free_buffers(hid->dev, hid);
 	hid_free_device(hid);
diff --git a/drivers/usb/input/hid.h b/drivers/usb/input/hid.h
index 0e76e6d..2a9bf07 100644
--- a/drivers/usb/input/hid.h
+++ b/drivers/usb/input/hid.h
@@ -385,6 +385,7 @@
 #define HID_IN_RUNNING		3
 #define HID_RESET_PENDING	4
 #define HID_SUSPENDED		5
+#define HID_CLEAR_HALT		6
 
 struct hid_input {
 	struct list_head list;
diff --git a/drivers/usb/input/keyspan_remote.c b/drivers/usb/input/keyspan_remote.c
index 50aa810..98bd323 100644
--- a/drivers/usb/input/keyspan_remote.c
+++ b/drivers/usb/input/keyspan_remote.c
@@ -456,7 +456,7 @@
 	remote->in_endpoint = endpoint;
 	remote->toggle = -1;	/* Set to -1 so we will always not match the toggle from the first remote message. */
 
-	remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, SLAB_ATOMIC, &remote->in_dma);
+	remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
 	if (!remote->in_buffer) {
 		retval = -ENOMEM;
 		goto fail1;
diff --git a/drivers/usb/input/mtouchusb.c b/drivers/usb/input/mtouchusb.c
index 79a85d4..92c4e07 100644
--- a/drivers/usb/input/mtouchusb.c
+++ b/drivers/usb/input/mtouchusb.c
@@ -164,7 +164,7 @@
 	dbg("%s - called", __FUNCTION__);
 
 	mtouch->data = usb_buffer_alloc(udev, MTOUCHUSB_REPORT_DATA_SIZE,
-					SLAB_ATOMIC, &mtouch->data_dma);
+					GFP_ATOMIC, &mtouch->data_dma);
 
 	if (!mtouch->data)
 		return -1;
diff --git a/drivers/usb/input/powermate.c b/drivers/usb/input/powermate.c
index 0bf9177..fea97e5 100644
--- a/drivers/usb/input/powermate.c
+++ b/drivers/usb/input/powermate.c
@@ -277,12 +277,12 @@
 static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
 {
 	pm->data = usb_buffer_alloc(udev, POWERMATE_PAYLOAD_SIZE_MAX,
-				    SLAB_ATOMIC, &pm->data_dma);
+				    GFP_ATOMIC, &pm->data_dma);
 	if (!pm->data)
 		return -1;
 
 	pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)),
-					SLAB_ATOMIC, &pm->configcr_dma);
+					GFP_ATOMIC, &pm->configcr_dma);
 	if (!pm->configcr)
 		return -1;
 
diff --git a/drivers/usb/input/touchkitusb.c b/drivers/usb/input/touchkitusb.c
index 05c0d1c..2a314b0 100644
--- a/drivers/usb/input/touchkitusb.c
+++ b/drivers/usb/input/touchkitusb.c
@@ -248,7 +248,7 @@
 				  struct touchkit_usb *touchkit)
 {
 	touchkit->data = usb_buffer_alloc(udev, TOUCHKIT_REPORT_DATA_SIZE,
-	                                  SLAB_ATOMIC, &touchkit->data_dma);
+	                                  GFP_ATOMIC, &touchkit->data_dma);
 
 	if (!touchkit->data)
 		return -1;
diff --git a/drivers/usb/input/usbkbd.c b/drivers/usb/input/usbkbd.c
index c73285c..8505824 100644
--- a/drivers/usb/input/usbkbd.c
+++ b/drivers/usb/input/usbkbd.c
@@ -122,7 +122,7 @@
 	memcpy(kbd->old, kbd->new, 8);
 
 resubmit:
-	i = usb_submit_urb (urb, SLAB_ATOMIC);
+	i = usb_submit_urb (urb, GFP_ATOMIC);
 	if (i)
 		err ("can't resubmit intr, %s-%s/input0, status %d",
 				kbd->usbdev->bus->bus_name,
@@ -196,11 +196,11 @@
 		return -1;
 	if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
 		return -1;
-	if (!(kbd->new = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &kbd->new_dma)))
+	if (!(kbd->new = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
 		return -1;
-	if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), SLAB_ATOMIC, &kbd->cr_dma)))
+	if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), GFP_ATOMIC, &kbd->cr_dma)))
 		return -1;
-	if (!(kbd->leds = usb_buffer_alloc(dev, 1, SLAB_ATOMIC, &kbd->leds_dma)))
+	if (!(kbd->leds = usb_buffer_alloc(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
 		return -1;
 
 	return 0;
@@ -208,10 +208,8 @@
 
 static void usb_kbd_free_mem(struct usb_device *dev, struct usb_kbd *kbd)
 {
-	if (kbd->irq)
-		usb_free_urb(kbd->irq);
-	if (kbd->led)
-		usb_free_urb(kbd->led);
+	usb_free_urb(kbd->irq);
+	usb_free_urb(kbd->led);
 	if (kbd->new)
 		usb_buffer_free(dev, 8, kbd->new, kbd->new_dma);
 	if (kbd->cr)
@@ -236,9 +234,7 @@
 		return -ENODEV;
 
 	endpoint = &interface->endpoint[0].desc;
-	if (!(endpoint->bEndpointAddress & USB_DIR_IN))
-		return -ENODEV;
-	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT)
+	if (!usb_endpoint_is_int_in(endpoint))
 		return -ENODEV;
 
 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
diff --git a/drivers/usb/input/usbmouse.c b/drivers/usb/input/usbmouse.c
index cbbbea3..64a33e4 100644
--- a/drivers/usb/input/usbmouse.c
+++ b/drivers/usb/input/usbmouse.c
@@ -86,7 +86,7 @@
 
 	input_sync(dev);
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status)
 		err ("can't resubmit intr, %s-%s/input0, status %d",
 				mouse->usbdev->bus->bus_name,
@@ -126,9 +126,7 @@
 		return -ENODEV;
 
 	endpoint = &interface->endpoint[0].desc;
-	if (!(endpoint->bEndpointAddress & USB_DIR_IN))
-		return -ENODEV;
-	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT)
+	if (!usb_endpoint_is_int_in(endpoint))
 		return -ENODEV;
 
 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
@@ -139,7 +137,7 @@
 	if (!mouse || !input_dev)
 		goto fail1;
 
-	mouse->data = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &mouse->data_dma);
+	mouse->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &mouse->data_dma);
 	if (!mouse->data)
 		goto fail1;
 
diff --git a/drivers/usb/input/usbtouchscreen.c b/drivers/usb/input/usbtouchscreen.c
index 933cedd..7f3c57d 100644
--- a/drivers/usb/input/usbtouchscreen.c
+++ b/drivers/usb/input/usbtouchscreen.c
@@ -8,6 +8,7 @@
  *  - PanJit TouchSet
  *  - eTurboTouch
  *  - Gunze AHL61
+ *  - DMC TSC-10/25
  *
  * Copyright (C) 2004-2006 by Daniel Ritz <daniel.ritz@gmx.ch>
  * Copyright (C) by Todd E. Johnson (mtouchusb.c)
@@ -30,6 +31,8 @@
  * - ITM parts are from itmtouch.c
  * - 3M parts are from mtouchusb.c
  * - PanJit parts are from an unmerged driver by Lanslott Gish
+ * - DMC TSC 10/25 are from Holger Schurig, with ideas from an unmerged
+ *   driver from Marius Vollmer
  *
  *****************************************************************************/
 
@@ -44,7 +47,7 @@
 #include <linux/usb/input.h>
 
 
-#define DRIVER_VERSION		"v0.4"
+#define DRIVER_VERSION		"v0.5"
 #define DRIVER_AUTHOR		"Daniel Ritz <daniel.ritz@gmx.ch>"
 #define DRIVER_DESC		"USB Touchscreen Driver"
 
@@ -103,6 +106,7 @@
 	DEVTYPE_ITM,
 	DEVTYPE_ETURBO,
 	DEVTYPE_GUNZE,
+	DEVTYPE_DMC_TSC10,
 };
 
 static struct usb_device_id usbtouch_devices[] = {
@@ -139,6 +143,10 @@
 	{USB_DEVICE(0x0637, 0x0001), .driver_info = DEVTYPE_GUNZE},
 #endif
 
+#ifdef CONFIG_USB_TOUCHSCREEN_DMC_TSC10
+	{USB_DEVICE(0x0afa, 0x03e8), .driver_info = DEVTYPE_DMC_TSC10},
+#endif
+
 	{}
 };
 
@@ -313,6 +321,80 @@
 #endif
 
 /*****************************************************************************
+ * DMC TSC-10/25 Part
+ *
+ * Documentation about the controller and it's protocol can be found at
+ *   http://www.dmccoltd.com/files/controler/tsc10usb_pi_e.pdf
+ *   http://www.dmccoltd.com/files/controler/tsc25_usb_e.pdf
+ */
+#ifdef CONFIG_USB_TOUCHSCREEN_DMC_TSC10
+
+/* supported data rates. currently using 130 */
+#define TSC10_RATE_POINT	0x50
+#define TSC10_RATE_30		0x40
+#define TSC10_RATE_50		0x41
+#define TSC10_RATE_80		0x42
+#define TSC10_RATE_100		0x43
+#define TSC10_RATE_130		0x44
+#define TSC10_RATE_150		0x45
+
+/* commands */
+#define TSC10_CMD_RESET		0x55
+#define TSC10_CMD_RATE		0x05
+#define TSC10_CMD_DATA1		0x01
+
+static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
+{
+	struct usb_device *dev = usbtouch->udev;
+	int ret;
+	unsigned char buf[2];
+
+	/* reset */
+	buf[0] = buf[1] = 0xFF;
+	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+	                      TSC10_CMD_RESET,
+	                      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+	                      0, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
+	if (ret < 0)
+		return ret;
+	if (buf[0] != 0x06 || buf[1] != 0x00)
+		return -ENODEV;
+
+	/* set coordinate output rate */
+	buf[0] = buf[1] = 0xFF;
+	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+	                      TSC10_CMD_RATE,
+	                      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+	                      TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT);
+	if (ret < 0)
+		return ret;
+	if (buf[0] != 0x06 || buf[1] != 0x00)
+		return -ENODEV;
+
+	/* start sending data */
+	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+	                      TSC10_CMD_DATA1,
+	                      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+	                      0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+
+static int dmc_tsc10_read_data(unsigned char *pkt, int *x, int *y, int *touch, int *press)
+{
+	*x = ((pkt[2] & 0x03) << 8) | pkt[1];
+	*y = ((pkt[4] & 0x03) << 8) | pkt[3];
+	*touch = pkt[0] & 0x01;
+
+	return 1;
+}
+#endif
+
+
+/*****************************************************************************
  * the different device descriptors
  */
 static struct usbtouch_device_info usbtouch_dev_info[] = {
@@ -389,6 +471,18 @@
 		.read_data	= gunze_read_data,
 	},
 #endif
+
+#ifdef CONFIG_USB_TOUCHSCREEN_DMC_TSC10
+	[DEVTYPE_DMC_TSC10] = {
+		.min_xc		= 0x0,
+		.max_xc		= 0x03ff,
+		.min_yc		= 0x0,
+		.max_yc		= 0x03ff,
+		.rept_size	= 5,
+		.init		= dmc_tsc10_init,
+		.read_data	= dmc_tsc10_read_data,
+	},
+#endif
 };
 
 
@@ -586,7 +680,7 @@
 		type->process_pkt = usbtouch_process_pkt;
 
 	usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
-	                                  SLAB_KERNEL, &usbtouch->data_dma);
+	                                  GFP_KERNEL, &usbtouch->data_dma);
 	if (!usbtouch->data)
 		goto out_free;
 
diff --git a/drivers/usb/input/wacom.h b/drivers/usb/input/wacom.h
index 1cf08f0..d85abfc 100644
--- a/drivers/usb/input/wacom.h
+++ b/drivers/usb/input/wacom.h
@@ -110,7 +110,6 @@
 };
 
 extern int wacom_wac_irq(struct wacom_wac * wacom_wac, void * wcombo);
-extern void wacom_sys_irq(struct urb *urb);
 extern void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data);
 extern void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data);
 extern void wacom_report_key(void *wcombo, unsigned int key_type, int key_data);
diff --git a/drivers/usb/input/wacom_sys.c b/drivers/usb/input/wacom_sys.c
index 3498b89..e7cc20a 100644
--- a/drivers/usb/input/wacom_sys.c
+++ b/drivers/usb/input/wacom_sys.c
@@ -42,7 +42,7 @@
 	return wcombo->wacom->dev;
 }
 
-void wacom_sys_irq(struct urb *urb)
+static void wacom_sys_irq(struct urb *urb)
 {
 	struct wacom *wacom = urb->context;
 	struct wacom_combo wcombo;
diff --git a/drivers/usb/input/xpad.c b/drivers/usb/input/xpad.c
index df97e5c..e4bc76e 100644
--- a/drivers/usb/input/xpad.c
+++ b/drivers/usb/input/xpad.c
@@ -325,7 +325,7 @@
 		goto fail1;
 
 	xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN,
-				       SLAB_ATOMIC, &xpad->idata_dma);
+				       GFP_ATOMIC, &xpad->idata_dma);
 	if (!xpad->idata)
 		goto fail1;
 
diff --git a/drivers/usb/input/yealink.c b/drivers/usb/input/yealink.c
index 905bf63..caff8e6 100644
--- a/drivers/usb/input/yealink.c
+++ b/drivers/usb/input/yealink.c
@@ -859,10 +859,8 @@
 
 	interface = intf->cur_altsetting;
 	endpoint = &interface->endpoint[0].desc;
-	if (!(endpoint->bEndpointAddress & USB_DIR_IN))
-		return -EIO;
-	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT)
-		return -EIO;
+	if (!usb_endpoint_is_int_in(endpoint))
+		return -ENODEV;
 
 	yld = kzalloc(sizeof(struct yealink_dev), GFP_KERNEL);
 	if (!yld)
@@ -876,17 +874,17 @@
 
 	/* allocate usb buffers */
 	yld->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
-					SLAB_ATOMIC, &yld->irq_dma);
+					GFP_ATOMIC, &yld->irq_dma);
 	if (yld->irq_data == NULL)
 		return usb_cleanup(yld, -ENOMEM);
 
 	yld->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
-					SLAB_ATOMIC, &yld->ctl_dma);
+					GFP_ATOMIC, &yld->ctl_dma);
 	if (!yld->ctl_data)
 		return usb_cleanup(yld, -ENOMEM);
 
 	yld->ctl_req = usb_buffer_alloc(udev, sizeof(*(yld->ctl_req)),
-					SLAB_ATOMIC, &yld->ctl_req_dma);
+					GFP_ATOMIC, &yld->ctl_req_dma);
 	if (yld->ctl_req == NULL)
 		return usb_cleanup(yld, -ENOMEM);
 
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 11dc595..2cba07d 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -4,6 +4,7 @@
 #
 
 obj-$(CONFIG_USB_ADUTUX)	+= adutux.o
+obj-$(CONFIG_USB_APPLEDISPLAY)	+= appledisplay.o
 obj-$(CONFIG_USB_AUERSWALD)	+= auerswald.o
 obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
 obj-$(CONFIG_USB_CYTHERM)	+= cytherm.o
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 6b23a1d..02cbb7f 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -76,7 +76,7 @@
 	char *urbdata;			/* interrupt URB data buffer */
 	char *msgdata;			/* control message data buffer */
 
-	struct work_struct work;
+	struct delayed_work work;
 	int button_pressed;
 	spinlock_t lock;
 };
@@ -117,7 +117,7 @@
 	case ACD_BTN_BRIGHT_UP:
 	case ACD_BTN_BRIGHT_DOWN:
 		pdata->button_pressed = 1;
-		queue_work(wq, &pdata->work);
+		queue_delayed_work(wq, &pdata->work, 0);
 		break;
 	case ACD_BTN_NONE:
 	default:
@@ -184,9 +184,10 @@
 	.max_brightness	= 0xFF
 };
 
-static void appledisplay_work(void *private)
+static void appledisplay_work(struct work_struct *work)
 {
-	struct appledisplay *pdata = private;
+	struct appledisplay *pdata =
+		container_of(work, struct appledisplay, work.work);
 	int retval;
 
 	up(&pdata->bd->sem);
@@ -216,10 +217,7 @@
 	iface_desc = iface->cur_altsetting;
 	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
 		endpoint = &iface_desc->endpoint[i].desc;
-		if (!int_in_endpointAddr &&
-		    (endpoint->bEndpointAddress & USB_DIR_IN) &&
-		    ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-		     USB_ENDPOINT_XFER_INT)) {
+		if (!int_in_endpointAddr && usb_endpoint_is_int_in(endpoint)) {
 			/* we found an interrupt in endpoint */
 			int_in_endpointAddr = endpoint->bEndpointAddress;
 			break;
@@ -241,7 +239,7 @@
 	pdata->udev = udev;
 
 	spin_lock_init(&pdata->lock);
-	INIT_WORK(&pdata->work, appledisplay_work, pdata);
+	INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
 
 	/* Allocate buffer for control messages */
 	pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index e4971d6..c703f73 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -704,9 +704,7 @@
 {
 	kfree(bp->bufp);
 	kfree(bp->dr);
-	if (bp->urbp) {
-		usb_free_urb(bp->urbp);
-	}
+	usb_free_urb(bp->urbp);
 	kfree(bp);
 }
 
@@ -1155,8 +1153,7 @@
         dbg ("auerswald_int_release");
 
         /* stop the int endpoint */
-        if (cp->inturbp)
-                usb_kill_urb (cp->inturbp);
+	usb_kill_urb (cp->inturbp);
 
         /* deallocate memory */
         auerswald_int_free (cp);
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 1fd9cb8..5c0a26c 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -53,13 +53,12 @@
 static int emi26_writememory (struct usb_device *dev, int address, unsigned char *data, int length, __u8 request)
 {
 	int result;
-	unsigned char *buffer =  kmalloc (length, GFP_KERNEL);
+	unsigned char *buffer =  kmemdup(data, length, GFP_KERNEL);
 
 	if (!buffer) {
 		err("emi26: kmalloc(%d) failed.", length);
 		return -ENOMEM;
 	}
-	memcpy (buffer, data, length);
 	/* Note: usb_control_msg returns negative value on error or length of the
 	 * 		 data that was written! */
 	result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300);
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index fe35137..23153ea 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -61,13 +61,12 @@
 static int emi62_writememory (struct usb_device *dev, int address, unsigned char *data, int length, __u8 request)
 {
 	int result;
-	unsigned char *buffer =  kmalloc (length, GFP_KERNEL);
+	unsigned char *buffer =  kmemdup(data, length, GFP_KERNEL);
 
 	if (!buffer) {
 		err("emi62: kmalloc(%d) failed.", length);
 		return -ENOMEM;
 	}
-	memcpy (buffer, data, length);
 	/* Note: usb_control_msg returns negative value on error or length of the
 	 * 		 data that was written! */
 	result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9b591b8..18b1925 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -156,9 +156,9 @@
         struct usb_device *udev;
         struct usb_interface *interface;
         struct usb_class_driver *class;
-        struct work_struct status_work;
-        struct work_struct command_work;
-        struct work_struct respond_work;
+        struct delayed_work status_work;
+        struct delayed_work command_work;
+        struct delayed_work respond_work;
         struct u132_platform_data platform_data;
         struct resource resources[0];
         struct platform_device platform_dev;
@@ -210,23 +210,14 @@
 
 static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        return;
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+	if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
+		kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                kref_get(&ftdi->kref);
-        return;
+	if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
+		kref_get(&ftdi->kref);
 }
 
 static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
@@ -237,25 +228,14 @@
 
 static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        return;
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+	if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
+		kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                kref_get(&ftdi->kref);
-        return;
+	if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
+		kref_get(&ftdi->kref);
 }
 
 static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
@@ -267,25 +247,14 @@
 static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        return;
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+	if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+		kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                kref_get(&ftdi->kref);
-        return;
+	if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+		kref_get(&ftdi->kref);
 }
 
 static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
@@ -303,7 +272,7 @@
 
 
 EXPORT_SYMBOL_GPL(ftdi_elan_gone_away);
-void ftdi_release_platform_dev(struct device *dev)
+static void ftdi_release_platform_dev(struct device *dev)
 {
         dev->parent = NULL;
 }
@@ -475,9 +444,11 @@
         return;
 }
 
-static void ftdi_elan_command_work(void *data)
+static void ftdi_elan_command_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+		container_of(work, struct usb_ftdi, command_work.work);
+
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
@@ -500,9 +471,10 @@
         return;
 }
 
-static void ftdi_elan_respond_work(void *data)
+static void ftdi_elan_respond_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+		container_of(work, struct usb_ftdi, respond_work.work);
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
@@ -534,9 +506,10 @@
 * after the FTDI has been synchronized
 *
 */
-static void ftdi_elan_status_work(void *data)
+static void ftdi_elan_status_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+		container_of(work, struct usb_ftdi, status_work.work);
         int work_delay_in_msec = 0;
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
@@ -1426,14 +1399,6 @@
         }
 }
 
-int usb_ftdi_elan_read_reg(struct platform_device *pdev, u32 *data)
-{
-        struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev);
-        return ftdi_elan_read_reg(ftdi, data);
-}
-
-
-EXPORT_SYMBOL_GPL(usb_ftdi_elan_read_reg);
 static int ftdi_elan_read_config(struct usb_ftdi *ftdi, int config_offset,
         u8 width, u32 *data)
 {
@@ -2633,10 +2598,7 @@
         for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                 endpoint = &iface_desc->endpoint[i].desc;
                 if (!ftdi->bulk_in_endpointAddr &&
-                        ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
-                        == USB_DIR_IN) && ((endpoint->bmAttributes &
-                        USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK))
-                        {
+		    usb_endpoint_is_bulk_in(endpoint)) {
                         buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
                         ftdi->bulk_in_size = buffer_size;
                         ftdi->bulk_in_endpointAddr = endpoint->bEndpointAddress;
@@ -2649,10 +2611,7 @@
                         }
                 }
                 if (!ftdi->bulk_out_endpointAddr &&
-                        ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
-                        == USB_DIR_OUT) && ((endpoint->bmAttributes &
-                        USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK))
-                        {
+		    usb_endpoint_is_bulk_out(endpoint)) {
                         ftdi->bulk_out_endpointAddr =
                                 endpoint->bEndpointAddress;
                 }
@@ -2691,12 +2650,9 @@
                 ftdi->class = NULL;
                 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
                         "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
-                INIT_WORK(&ftdi->status_work, ftdi_elan_status_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->command_work, ftdi_elan_command_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
-                        (void *)ftdi);
+                INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
+                INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
+                INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
                 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
                 return 0;
         } else {
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 8e6e195..c941853 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -125,12 +125,12 @@
 
 static int idmouse_create_image(struct usb_idmouse *dev)
 {
-	int bytes_read = 0;
-	int bulk_read = 0;
-	int result = 0;
+	int bytes_read;
+	int bulk_read;
+	int result;
 
 	memcpy(dev->bulk_in_buffer, HEADER, sizeof(HEADER)-1);
-	bytes_read += sizeof(HEADER)-1;
+	bytes_read = sizeof(HEADER)-1;
 
 	/* reset the device and set a fast blink rate */
 	result = ftip_command(dev, FTIP_RELEASE, 0, 0);
@@ -208,9 +208,9 @@
 
 static int idmouse_open(struct inode *inode, struct file *file)
 {
-	struct usb_idmouse *dev = NULL;
+	struct usb_idmouse *dev;
 	struct usb_interface *interface;
-	int result = 0;
+	int result;
 
 	/* prevent disconnects */
 	mutex_lock(&disconnect_mutex);
@@ -305,7 +305,7 @@
 				loff_t * ppos)
 {
 	struct usb_idmouse *dev;
-	int result = 0;
+	int result;
 
 	dev = (struct usb_idmouse *) file->private_data;
 
@@ -329,7 +329,7 @@
 				const struct usb_device_id *id)
 {
 	struct usb_device *udev = interface_to_usbdev(interface);
-	struct usb_idmouse *dev = NULL;
+	struct usb_idmouse *dev;
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor *endpoint;
 	int result;
@@ -350,11 +350,7 @@
 
 	/* set up the endpoint information - use only the first bulk-in endpoint */
 	endpoint = &iface_desc->endpoint[0].desc;
-	if (!dev->bulk_in_endpointAddr
-		&& (endpoint->bEndpointAddress & USB_DIR_IN)
-		&& ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-		USB_ENDPOINT_XFER_BULK)) {
-
+	if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) {
 		/* we found a bulk in endpoint */
 		dev->orig_bi_size = le16_to_cpu(endpoint->wMaxPacketSize);
 		dev->bulk_in_size = 0x200; /* works _much_ faster */
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 2708949..5dce797 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -317,12 +317,8 @@
 	tower_abort_transfers (dev);
 
 	/* free data structures */
-	if (dev->interrupt_in_urb != NULL) {
-		usb_free_urb (dev->interrupt_in_urb);
-	}
-	if (dev->interrupt_out_urb != NULL) {
-		usb_free_urb (dev->interrupt_out_urb);
-	}
+	usb_free_urb(dev->interrupt_in_urb);
+	usb_free_urb(dev->interrupt_out_urb);
 	kfree (dev->read_buffer);
 	kfree (dev->interrupt_in_buffer);
 	kfree (dev->interrupt_out_buffer);
@@ -502,15 +498,11 @@
 	if (dev->interrupt_in_running) {
 		dev->interrupt_in_running = 0;
 		mb();
-		if (dev->interrupt_in_urb != NULL && dev->udev) {
+		if (dev->udev)
 			usb_kill_urb (dev->interrupt_in_urb);
-		}
 	}
-	if (dev->interrupt_out_busy) {
-		if (dev->interrupt_out_urb != NULL && dev->udev) {
-			usb_kill_urb (dev->interrupt_out_urb);
-		}
-	}
+	if (dev->interrupt_out_busy && dev->udev)
+		usb_kill_urb(dev->interrupt_out_urb);
 
 exit:
 	dbg(2, "%s: leave", __FUNCTION__);
@@ -898,14 +890,11 @@
 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
 		endpoint = &iface_desc->endpoint[i].desc;
 
-		if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
-		    ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)) {
-			dev->interrupt_in_endpoint = endpoint;
-		}
-
-		if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) &&
-		    ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)) {
-			dev->interrupt_out_endpoint = endpoint;
+		if (usb_endpoint_xfer_int(endpoint)) {
+			if (usb_endpoint_dir_in(endpoint))
+				dev->interrupt_in_endpoint = endpoint;
+			else
+				dev->interrupt_out_endpoint = endpoint;
 		}
 	}
 	if(dev->interrupt_in_endpoint == NULL) {
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c
index abb4dcd..371bf2b 100644
--- a/drivers/usb/misc/phidgetkit.c
+++ b/drivers/usb/misc/phidgetkit.c
@@ -81,8 +81,8 @@
 	unsigned char *data;
 	dma_addr_t data_dma;
 
-	struct work_struct do_notify;
-	struct work_struct do_resubmit;
+	struct delayed_work do_notify;
+	struct delayed_work do_resubmit;
 	unsigned long input_events;
 	unsigned long sensor_events;
 };
@@ -374,19 +374,20 @@
 	}
 
 	if (kit->input_events || kit->sensor_events)
-		schedule_work(&kit->do_notify);
+		schedule_delayed_work(&kit->do_notify, 0);
 
 resubmit:
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status)
 		err("can't resubmit intr, %s-%s/interfacekit0, status %d",
 			kit->udev->bus->bus_name,
 			kit->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-	struct interfacekit *kit = data;
+	struct interfacekit *kit =
+		container_of(work, struct interfacekit, do_notify.work);
 	int i;
 	char sysfs_file[8];
 
@@ -405,9 +406,11 @@
 	}
 }
 
-static void do_resubmit(void *data)
+static void do_resubmit(struct work_struct *work)
 {
-	set_outputs(data);
+	struct interfacekit *kit =
+		container_of(work, struct interfacekit, do_resubmit.work);
+	set_outputs(kit);
 }
 
 #define show_set_output(value)		\
@@ -551,7 +554,7 @@
 		return -ENODEV;
 
 	endpoint = &interface->endpoint[0].desc;
-	if (!(endpoint->bEndpointAddress & 0x80)) 
+	if (!usb_endpoint_dir_in(endpoint))
 		return -ENODEV;
 	/*
 	 * bmAttributes
@@ -565,7 +568,7 @@
 
 	kit->dev_no = -1;
 	kit->ifkit = ifkit;
-	kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &kit->data_dma);
+	kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &kit->data_dma);
 	if (!kit->data)
 		goto out;
 
@@ -575,8 +578,8 @@
 
 	kit->udev = usb_get_dev(dev);
 	kit->intf = intf;
-	INIT_WORK(&kit->do_notify, do_notify, kit);
-	INIT_WORK(&kit->do_resubmit, do_resubmit, kit);
+	INIT_DELAYED_WORK(&kit->do_notify, do_notify);
+	INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
 	usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
 			maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
 			interfacekit_irq, kit, endpoint->bInterval);
@@ -650,8 +653,7 @@
 		device_remove_file(kit->dev, &dev_output_attrs[i]);
 out:
 	if (kit) {
-		if (kit->irq)
-			usb_free_urb(kit->irq);
+		usb_free_urb(kit->irq);
 		if (kit->data)
 			usb_buffer_free(dev, URB_INT_SIZE, kit->data, kit->data_dma);
 		if (kit->dev)
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c
index 5c780ca..5727e1e 100644
--- a/drivers/usb/misc/phidgetmotorcontrol.c
+++ b/drivers/usb/misc/phidgetmotorcontrol.c
@@ -41,7 +41,7 @@
 	unsigned char *data;
 	dma_addr_t data_dma;
 
-	struct work_struct do_notify;
+	struct delayed_work do_notify;
 	unsigned long input_events;
 	unsigned long speed_events;
 	unsigned long exceed_events;
@@ -148,10 +148,10 @@
 		set_bit(1, &mc->exceed_events);
 
 	if (mc->input_events || mc->exceed_events || mc->speed_events)
-		schedule_work(&mc->do_notify);
+		schedule_delayed_work(&mc->do_notify, 0);
 
 resubmit:
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status)
 		dev_err(&mc->intf->dev,
 			"can't resubmit intr, %s-%s/motorcontrol0, status %d",
@@ -159,9 +159,10 @@
 			mc->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-	struct motorcontrol *mc = data;
+	struct motorcontrol *mc =
+		container_of(work, struct motorcontrol, do_notify.work);
 	int i;
 	char sysfs_file[8];
 
@@ -323,7 +324,7 @@
 		return -ENODEV;
 
 	endpoint = &interface->endpoint[0].desc;
-	if (!(endpoint->bEndpointAddress & 0x80))
+	if (!usb_endpoint_dir_in(endpoint))
 		return -ENODEV;
 
 	/*
@@ -337,7 +338,7 @@
 		goto out;
 
 	mc->dev_no = -1;
-	mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &mc->data_dma);
+	mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &mc->data_dma);
 	if (!mc->data)
 		goto out;
 
@@ -348,7 +349,7 @@
 	mc->udev = usb_get_dev(dev);
 	mc->intf = intf;
 	mc->acceleration[0] = mc->acceleration[1] = 10;
-	INIT_WORK(&mc->do_notify, do_notify, mc);
+	INIT_DELAYED_WORK(&mc->do_notify, do_notify);
 	usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
 			maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
 			motorcontrol_irq, mc, endpoint->bInterval);
@@ -392,8 +393,7 @@
 		device_remove_file(mc->dev, &dev_attrs[i]);
 out:
 	if (mc) {
-		if (mc->irq)
-			usb_free_urb(mc->irq);
+		usb_free_urb(mc->irq);
 		if (mc->data)
 			usb_buffer_free(dev, URB_INT_SIZE, mc->data, mc->data_dma);
 		if (mc->dev)
diff --git a/drivers/usb/misc/usb_u132.h b/drivers/usb/misc/usb_u132.h
index 551ba89..dc2e5a3 100644
--- a/drivers/usb/misc/usb_u132.h
+++ b/drivers/usb/misc/usb_u132.h
@@ -52,7 +52,7 @@
 * the kernel to load the "u132-hcd" module.
 *
 * The "ftdi-u132" module provides the interface to the inserted
-* PC card and the "u132-hcd" module uses the API to send and recieve
+* PC card and the "u132-hcd" module uses the API to send and receive
 * data. The API features call-backs, so that part of the "u132-hcd"
 * module code will run in the context of one of the kernel threads
 * of the "ftdi-u132" module.
@@ -95,3 +95,7 @@
          int halted, int skipped, int actual, int non_null));
 int usb_ftdi_elan_edset_flush(struct platform_device *pdev, u8 ed_number,
         void *endp);
+int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, int mem_offset,
+			      u8 width, u32 *data);
+int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, int mem_offset,
+			       u8 width, u32 data);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 7c2cbdf..fb32186 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -138,7 +138,7 @@
 			default:
 				continue;
 			}
-			if (e->desc.bEndpointAddress & USB_DIR_IN) {
+			if (usb_endpoint_dir_in(&e->desc)) {
 				if (!in)
 					in = e;
 			} else {
@@ -147,7 +147,7 @@
 			}
 			continue;
 try_iso:
-			if (e->desc.bEndpointAddress & USB_DIR_IN) {
+			if (usb_endpoint_dir_in(&e->desc)) {
 				if (!iso_in)
 					iso_in = e;
 			} else {
@@ -213,7 +213,7 @@
 
 	if (bytes < 0)
 		return NULL;
-	urb = usb_alloc_urb (0, SLAB_KERNEL);
+	urb = usb_alloc_urb (0, GFP_KERNEL);
 	if (!urb)
 		return urb;
 	usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL);
@@ -223,7 +223,7 @@
 	urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
 	if (usb_pipein (pipe))
 		urb->transfer_flags |= URB_SHORT_NOT_OK;
-	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
+	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
 			&urb->transfer_dma);
 	if (!urb->transfer_buffer) {
 		usb_free_urb (urb);
@@ -315,7 +315,7 @@
 		init_completion (&completion);
 		if (usb_pipeout (urb->pipe))
 			simple_fill_buf (urb);
-		if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0)
+		if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0)
 			break;
 
 		/* NOTE:  no timeouts; can't be broken out of by interrupt */
@@ -374,7 +374,7 @@
 	unsigned		i;
 	unsigned		size = max;
 
-	sg = kmalloc (nents * sizeof *sg, SLAB_KERNEL);
+	sg = kmalloc (nents * sizeof *sg, GFP_KERNEL);
 	if (!sg)
 		return NULL;
 
@@ -382,7 +382,7 @@
 		char		*buf;
 		unsigned	j;
 
-		buf = kzalloc (size, SLAB_KERNEL);
+		buf = kzalloc (size, GFP_KERNEL);
 		if (!buf) {
 			free_sglist (sg, i);
 			return NULL;
@@ -428,7 +428,7 @@
 				(udev->speed == USB_SPEED_HIGH)
 					? (INTERRUPT_RATE << 3)
 					: INTERRUPT_RATE,
-				sg, nents, 0, SLAB_KERNEL);
+				sg, nents, 0, GFP_KERNEL);
 		
 		if (retval)
 			break;
@@ -819,7 +819,7 @@
 
 	/* resubmit if we need to, else mark this as done */
 	if ((status == 0) && (ctx->pending < ctx->count)) {
-		if ((status = usb_submit_urb (urb, SLAB_ATOMIC)) != 0) {
+		if ((status = usb_submit_urb (urb, GFP_ATOMIC)) != 0) {
 			dbg ("can't resubmit ctrl %02x.%02x, err %d",
 				reqp->bRequestType, reqp->bRequest, status);
 			urb->dev = NULL;
@@ -855,7 +855,7 @@
 	 * as with bulk/intr sglists, sglen is the queue depth; it also
 	 * controls which subtests run (more tests than sglen) or rerun.
 	 */
-	urb = kcalloc(param->sglen, sizeof(struct urb *), SLAB_KERNEL);
+	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
 	if (!urb)
 		return -ENOMEM;
 	for (i = 0; i < param->sglen; i++) {
@@ -981,7 +981,7 @@
 		if (!u)
 			goto cleanup;
 
-		reqp = usb_buffer_alloc (udev, sizeof *reqp, SLAB_KERNEL,
+		reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL,
 				&u->setup_dma);
 		if (!reqp)
 			goto cleanup;
@@ -999,7 +999,7 @@
 	context.urb = urb;
 	spin_lock_irq (&context.lock);
 	for (i = 0; i < param->sglen; i++) {
-		context.status = usb_submit_urb (urb [i], SLAB_ATOMIC);
+		context.status = usb_submit_urb (urb [i], GFP_ATOMIC);
 		if (context.status != 0) {
 			dbg ("can't submit urb[%d], status %d",
 					i, context.status);
@@ -1041,7 +1041,7 @@
 
 	// we "know" -EPIPE (stall) never happens
 	if (!status)
-		status = usb_submit_urb (urb, SLAB_ATOMIC);
+		status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status) {
 		urb->status = status;
 		complete ((struct completion *) urb->context);
@@ -1067,7 +1067,7 @@
 	 * FIXME want additional tests for when endpoint is STALLing
 	 * due to errors, or is just NAKing requests.
 	 */
-	if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) {
+	if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) {
 		dev_dbg (&dev->intf->dev, "submit fail %d\n", retval);
 		return retval;
 	}
@@ -1251,7 +1251,7 @@
 	if (length < 1 || length > 0xffff || vary >= length)
 		return -EINVAL;
 
-	buf = kmalloc(length, SLAB_KERNEL);
+	buf = kmalloc(length, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
@@ -1403,7 +1403,7 @@
 	maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
 	packets = (bytes + maxp - 1) / maxp;
 
-	urb = usb_alloc_urb (packets, SLAB_KERNEL);
+	urb = usb_alloc_urb (packets, GFP_KERNEL);
 	if (!urb)
 		return urb;
 	urb->dev = udev;
@@ -1411,7 +1411,7 @@
 
 	urb->number_of_packets = packets;
 	urb->transfer_buffer_length = bytes;
-	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
+	urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
 			&urb->transfer_dma);
 	if (!urb->transfer_buffer) {
 		usb_free_urb (urb);
@@ -1481,7 +1481,7 @@
 	spin_lock_irq (&context.lock);
 	for (i = 0; i < param->sglen; i++) {
 		++context.pending;
-		status = usb_submit_urb (urbs [i], SLAB_ATOMIC);
+		status = usb_submit_urb (urbs [i], GFP_ATOMIC);
 		if (status < 0) {
 			ERROR (dev, "submit iso[%d], error %d\n", i, status);
 			if (i == 0) {
@@ -1900,7 +1900,7 @@
 	}
 #endif
 
-	dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev)
 		return -ENOMEM;
 	info = (struct usbtest_info *) id->driver_info;
@@ -1910,7 +1910,7 @@
 	dev->intf = intf;
 
 	/* cacheline-aligned scratch for i/o */
-	if ((dev->buf = kmalloc (TBUF_SIZE, SLAB_KERNEL)) == NULL) {
+	if ((dev->buf = kmalloc (TBUF_SIZE, GFP_KERNEL)) == NULL) {
 		kfree (dev);
 		return -ENOMEM;
 	}
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 7a2346c..05cf2c9 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -50,7 +50,7 @@
 
 #define SLAB_NAME_SZ  30
 struct mon_reader_text {
-	kmem_cache_t *e_slab;
+	struct kmem_cache *e_slab;
 	int nevents;
 	struct list_head e_list;
 	struct mon_reader r;	/* In C, parent class can be placed anywhere */
@@ -63,7 +63,7 @@
 	char slab_name[SLAB_NAME_SZ];
 };
 
-static void mon_text_ctor(void *, kmem_cache_t *, unsigned long);
+static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
 
 /*
  * mon_text_submit
@@ -147,7 +147,7 @@
 	stamp = mon_get_timestamp();
 
 	if (rp->nevents >= EVENT_MAX ||
-	    (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) {
+	    (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
 		rp->r.m_bus->cnt_text_lost++;
 		return;
 	}
@@ -188,7 +188,7 @@
 	struct mon_event_text *ep;
 
 	if (rp->nevents >= EVENT_MAX ||
-	    (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) {
+	    (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
 		rp->r.m_bus->cnt_text_lost++;
 		return;
 	}
@@ -450,7 +450,7 @@
 /*
  * Slab interface: constructor.
  */
-static void mon_text_ctor(void *mem, kmem_cache_t *slab, unsigned long sflags)
+static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sflags)
 {
 	/*
 	 * Nothing to initialize. No, really!
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 881841e..95e682e 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -249,9 +249,9 @@
 
 	req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
 	req->bRequest = cmd;
-	req->wValue = value;
-	req->wIndex = index;
-	req->wLength = size;
+	req->wValue = cpu_to_le16(value);
+	req->wIndex = cpu_to_le16(index);
+	req->wLength = cpu_to_le16(size);
 
 	usb_fill_control_urb(urb, dev->udev,
 			     usb_sndctrlpipe(dev->udev, 0),
diff --git a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c
index f740325..4852012 100644
--- a/drivers/usb/net/catc.c
+++ b/drivers/usb/net/catc.c
@@ -345,7 +345,7 @@
 		} 
 	}
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status)
 		err ("can't resubmit intr, %s-%s, status %d",
 				catc->usbdev->bus->bus_name,
@@ -786,14 +786,10 @@
 	if ((!catc->ctrl_urb) || (!catc->tx_urb) || 
 	    (!catc->rx_urb) || (!catc->irq_urb)) {
 		err("No free urbs available.");
-		if (catc->ctrl_urb)
-			usb_free_urb(catc->ctrl_urb);
-		if (catc->tx_urb)
-			usb_free_urb(catc->tx_urb);
-		if (catc->rx_urb)
-			usb_free_urb(catc->rx_urb);
-		if (catc->irq_urb)
-			usb_free_urb(catc->irq_urb);
+		usb_free_urb(catc->ctrl_urb);
+		usb_free_urb(catc->tx_urb);
+		usb_free_urb(catc->rx_urb);
+		usb_free_urb(catc->irq_urb);
 		free_netdev(netdev);
 		return -ENOMEM;
 	}
diff --git a/drivers/usb/net/cdc_ether.c b/drivers/usb/net/cdc_ether.c
index f6971b8..44a9154 100644
--- a/drivers/usb/net/cdc_ether.c
+++ b/drivers/usb/net/cdc_ether.c
@@ -200,8 +200,7 @@
 
 		dev->status = &info->control->cur_altsetting->endpoint [0];
 		desc = &dev->status->desc;
-		if (desc->bmAttributes != USB_ENDPOINT_XFER_INT
-				|| !(desc->bEndpointAddress & USB_DIR_IN)
+		if (!usb_endpoint_is_int_in(desc)
 				|| (le16_to_cpu(desc->wMaxPacketSize)
 					< sizeof(struct usb_cdc_notification))
 				|| !desc->bInterval) {
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index 7c906a4..fa78326 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -222,7 +222,7 @@
 	int suspend_lowmem_ctrl;
 	int linkstate;
 	int opened;
-	struct work_struct lowmem_work;
+	struct delayed_work lowmem_work;
 
 	struct usb_device *dev;
 	struct net_device *net;
@@ -530,9 +530,10 @@
 	kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
 }
 
-static void kaweth_resubmit_tl(void *d)
+static void kaweth_resubmit_tl(struct work_struct *work)
 {
-	struct kaweth_device *kaweth = (struct kaweth_device *)d;
+	struct kaweth_device *kaweth =
+		container_of(work, struct kaweth_device, lowmem_work.work);
 
 	if (IS_BLOCKED(kaweth->status))
 		return;
@@ -1126,7 +1127,7 @@
 
 	/* kaweth is zeroed as part of alloc_netdev */
 
-	INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth);
+	INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
 
 	SET_MODULE_OWNER(netdev);
 
diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c
index ce00de8..4936359 100644
--- a/drivers/usb/net/net1080.c
+++ b/drivers/usb/net/net1080.c
@@ -237,12 +237,12 @@
 #define	STATUS_CONN_OTHER	(1 << 14)
 #define	STATUS_SUSPEND_OTHER	(1 << 13)
 #define	STATUS_MAILBOX_OTHER	(1 << 12)
-#define	STATUS_PACKETS_OTHER(n)	(((n) >> 8) && 0x03)
+#define	STATUS_PACKETS_OTHER(n)	(((n) >> 8) & 0x03)
 
 #define	STATUS_CONN_THIS	(1 << 6)
 #define	STATUS_SUSPEND_THIS	(1 << 5)
 #define	STATUS_MAILBOX_THIS	(1 << 4)
-#define	STATUS_PACKETS_THIS(n)	(((n) >> 0) && 0x03)
+#define	STATUS_PACKETS_THIS(n)	(((n) >> 0) & 0x03)
 
 #define	STATUS_UNSPEC_MASK	0x0c8c
 #define	STATUS_NOISE_MASK 	((u16)~(0x0303|STATUS_UNSPEC_MASK))
@@ -383,7 +383,7 @@
 		int			status;
 
 		/* Send a flush */
-		urb = usb_alloc_urb(0, SLAB_ATOMIC);
+		urb = usb_alloc_urb(0, GFP_ATOMIC);
 		if (!urb)
 			return;
 
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 33abbd2..d48c024 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -163,6 +163,7 @@
 
 	/* using ATOMIC, we'd never wake up if we slept */
 	if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
+		set_current_state(TASK_RUNNING);
 		if (ret == -ENODEV)
 			netif_device_detach(pegasus->net);
 		if (netif_msg_drv(pegasus))
@@ -855,7 +856,7 @@
 		pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
 	}
 
-	status = usb_submit_urb(urb, SLAB_ATOMIC);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status == -ENODEV)
 		netif_device_detach(pegasus->net);
 	if (status && netif_msg_timer(pegasus))
@@ -1280,9 +1281,9 @@
 static struct workqueue_struct *pegasus_workqueue = NULL;
 #define CARRIER_CHECK_DELAY (2 * HZ)
 
-static void check_carrier(void *data)
+static void check_carrier(struct work_struct *work)
 {
-	pegasus_t *pegasus = data;
+	pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
 	set_carrier(pegasus->net);
 	if (!(pegasus->flags & PEGASUS_UNPLUG)) {
 		queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
@@ -1318,7 +1319,7 @@
 
 	tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
 
-	INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus);
+	INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
 
 	pegasus->intf = intf;
 	pegasus->usb = dev;
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 0064380..98f6898 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -95,7 +95,7 @@
 	int			dev_index;
 	int			intr_interval;
 	struct tasklet_struct	rx_tl;
-	struct work_struct	carrier_check;
+	struct delayed_work	carrier_check;
 	struct urb		*ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
 	struct sk_buff		*rx_pool[RX_SKBS];
 	struct sk_buff		*rx_skb;
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index c2a28d8..99f26b3 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -469,7 +469,7 @@
 	struct rndis_halt	*halt;
 
 	/* try to clear any rndis state/activity (no i/o from stack!) */
-	halt = kcalloc(1, sizeof *halt, SLAB_KERNEL);
+	halt = kcalloc(1, sizeof *halt, GFP_KERNEL);
 	if (halt) {
 		halt->msg_type = RNDIS_MSG_HALT;
 		halt->msg_len = ccpu2(sizeof *halt);
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c
index 72171f9..c54235f 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/usb/net/rtl8150.c
@@ -587,7 +587,7 @@
 	}
 
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status == -ENODEV)
 		netif_device_detach(dev->netdev);
 	else if (status)
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
index 760b532..6e39e99 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/usb/net/usbnet.c
@@ -116,7 +116,7 @@
 			e = alt->endpoint + ep;
 			switch (e->desc.bmAttributes) {
 			case USB_ENDPOINT_XFER_INT:
-				if (!(e->desc.bEndpointAddress & USB_DIR_IN))
+				if (!usb_endpoint_dir_in(&e->desc))
 					continue;
 				intr = 1;
 				/* FALLTHROUGH */
@@ -125,7 +125,7 @@
 			default:
 				continue;
 			}
-			if (e->desc.bEndpointAddress & USB_DIR_IN) {
+			if (usb_endpoint_dir_in(&e->desc)) {
 				if (!intr && !in)
 					in = e;
 				else if (intr && !status)
@@ -179,9 +179,9 @@
 	period = max ((int) dev->status->desc.bInterval,
 		(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
 
-	buf = kmalloc (maxp, SLAB_KERNEL);
+	buf = kmalloc (maxp, GFP_KERNEL);
 	if (buf) {
-		dev->interrupt = usb_alloc_urb (0, SLAB_KERNEL);
+		dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
 		if (!dev->interrupt) {
 			kfree (buf);
 			return -ENOMEM;
@@ -782,9 +782,10 @@
  * especially now that control transfers can be queued.
  */
 static void
-kevent (void *data)
+kevent (struct work_struct *work)
 {
-	struct usbnet		*dev = data;
+	struct usbnet		*dev =
+		container_of(work, struct usbnet, kevent);
 	int			status;
 
 	/* usb_clear_halt() needs a thread context */
@@ -1146,7 +1147,7 @@
 	skb_queue_head_init (&dev->done);
 	dev->bh.func = usbnet_bh;
 	dev->bh.data = (unsigned long) dev;
-	INIT_WORK (&dev->kevent, kevent, dev);
+	INIT_WORK (&dev->kevent, kevent);
 	dev->delay.function = usbnet_bh;
 	dev->delay.data = (unsigned long) dev;
 	init_timer (&dev->delay);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 2a8dd4c..2f4d303 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -554,6 +554,17 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called omninet.
 
+config USB_SERIAL_DEBUG
+	tristate "USB Debugging Device"
+	depends on USB_SERIAL
+	help
+	  Say Y here if you have a USB debugging device used to recieve
+	  debugging data from another machine.  The most common of these
+	  devices is the NetChip TurboCONNECT device.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called usb-debug.
+
 config USB_EZUSB
 	bool
 	depends on USB_SERIAL_KEYSPAN_PDA || USB_SERIAL_XIRCOM || USB_SERIAL_KEYSPAN || USB_SERIAL_WHITEHEAT
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index a5047dc..61166ad 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -18,6 +18,7 @@
 obj-$(CONFIG_USB_SERIAL_CP2101)			+= cp2101.o
 obj-$(CONFIG_USB_SERIAL_CYBERJACK)		+= cyberjack.o
 obj-$(CONFIG_USB_SERIAL_CYPRESS_M8)		+= cypress_m8.o
+obj-$(CONFIG_USB_SERIAL_DEBUG)			+= usb_debug.o
 obj-$(CONFIG_USB_SERIAL_DIGI_ACCELEPORT)	+= digi_acceleport.o
 obj-$(CONFIG_USB_SERIAL_EDGEPORT)		+= io_edgeport.o
 obj-$(CONFIG_USB_SERIAL_EDGEPORT_TI)		+= io_ti.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 8122755..86bcf63 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -92,6 +92,7 @@
 	struct circ_buf *rx_buf;	/* read buffer */
 	int rx_flags;			/* for throttilng */
 	struct work_struct rx_work;	/* work cue for the receiving line */
+	struct usb_serial_port *port;	/* USB port with which associated */
 };
 
 /* Private methods */
@@ -251,10 +252,11 @@
 	schedule_work(&port->work);
 }
 
-static void aircable_read(void *params)
+static void aircable_read(struct work_struct *work)
 {
-	struct usb_serial_port *port = params;
-	struct aircable_private *priv = usb_get_serial_port_data(port);
+	struct aircable_private *priv =
+		container_of(work, struct aircable_private, rx_work);
+	struct usb_serial_port *port = priv->port;
 	struct tty_struct *tty;
 	unsigned char *data;
 	int count;
@@ -270,8 +272,11 @@
 	 */
 	tty = port->tty;
 
-	if (!tty)
+	if (!tty) {
 		schedule_work(&priv->rx_work);
+		err("%s - No tty available", __FUNCTION__);
+		return ;
+	}
 
 	count = min(64, serial_buf_data_avail(priv->rx_buf));
 
@@ -305,9 +310,7 @@
 
 	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
 		endpoint = &iface_desc->endpoint[i].desc;
-		if (((endpoint->bEndpointAddress & 0x80) == 0x00) &&
-			((endpoint->bmAttributes & 3) == 0x02)) {
-			/* we found our bulk out endpoint */
+		if (usb_endpoint_is_bulk_out(endpoint)) {
 			dbg("found bulk out on endpoint %d", i);
 			++num_bulk_out;
 		}
@@ -348,7 +351,8 @@
 	}
 
 	priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
-	INIT_WORK(&priv->rx_work, aircable_read, port);
+	priv->port = port;
+	INIT_WORK(&priv->rx_work, aircable_read);
 
 	usb_set_serial_port_data(serial->port[0], priv);
 
@@ -515,7 +519,7 @@
 					package_length - shift);
 			}
 		}
-		aircable_read(port);
+		aircable_read(&priv->rx_work);
 	}
 
 	/* Schedule the next read _if_ we are still open */
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index 7f5d546..96c7372 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -19,6 +19,7 @@
 static struct usb_device_id id_table [] = {
 	{ USB_DEVICE(0x0c88, 0x17da) }, /* Kyocera Wireless KPC650/Passport */
 	{ USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */
+	{ USB_DEVICE(0x1410, 0x1430) },	/* Novatel Merlin XU870 HSDPA/3G */
 	{ USB_DEVICE(0x1410, 0x1100) }, /* ExpressCard34 Qualcomm 3G CDMA */
 	{ },
 };
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index ca52f12..863966c 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -85,10 +85,9 @@
 	int i;
 
 	for (i = 0; i < serial->num_ports; ++i) {
-		priv = kmalloc(sizeof (struct ark3116_private), GFP_KERNEL);
+		priv = kzalloc(sizeof(struct ark3116_private), GFP_KERNEL);
 		if (!priv)
 			goto cleanup;
-		memset(priv, 0x00, sizeof (struct ark3116_private));
 		spin_lock_init(&priv->lock);
 
 		usb_set_serial_port_data(serial->port[i], priv);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 3a9073d..7167728 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -166,19 +166,17 @@
 	if (serial->type->set_termios) {
 		/* build up a fake tty structure so that the open call has something
 		 * to look at to get the cflag value */
-		tty = kmalloc (sizeof (*tty), GFP_KERNEL);
+		tty = kzalloc(sizeof(*tty), GFP_KERNEL);
 		if (!tty) {
 			err ("no more memory");
 			return -ENOMEM;
 		}
-		termios = kmalloc (sizeof (*termios), GFP_KERNEL);
+		termios = kzalloc(sizeof(*termios), GFP_KERNEL);
 		if (!termios) {
 			err ("no more memory");
 			kfree (tty);
 			return -ENOMEM;
 		}
-		memset (tty, 0x00, sizeof(*tty));
-		memset (termios, 0x00, sizeof(*termios));
 		termios->c_cflag = cflag;
 		tty->termios = termios;
 		port->tty = tty;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index f2e89a0..093f303 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1684,15 +1684,14 @@
 
 	info(DRIVER_DESC " " DRIVER_VERSION);
 	return 0;
-failed_usb_register:
-	usb_deregister(&cypress_driver);
-failed_ca42v2_register:
-	usb_serial_deregister(&cypress_ca42v2_device);
-failed_hidcom_register:
-	usb_serial_deregister(&cypress_hidcom_device);
-failed_em_register:
-	usb_serial_deregister(&cypress_earthmate_device);
 
+failed_usb_register:
+	usb_serial_deregister(&cypress_ca42v2_device);
+failed_ca42v2_register:
+	usb_serial_deregister(&cypress_hidcom_device);
+failed_hidcom_register:
+	usb_serial_deregister(&cypress_earthmate_device);
+failed_em_register:
 	return retval;
 }
 
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index bdb5810..83d0e21 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -157,7 +157,7 @@
 *       to TASK_RUNNING will be lost and write_chan's subsequent call to
 *       schedule() will never return (unless it catches a signal).
 *       This race condition occurs because write_bulk_callback() (and thus
-*       the wakeup) are called asynchonously from an interrupt, rather than
+*       the wakeup) are called asynchronously from an interrupt, rather than
 *       from the scheduler.  We can avoid the race by calling the wakeup
 *       from the scheduler queue and that's our fix:  Now, at the end of
 *       write_bulk_callback() we queue up a wakeup call on the scheduler
@@ -430,13 +430,14 @@
 	int dp_in_close;			/* close in progress */
 	wait_queue_head_t dp_close_wait;	/* wait queue for close */
 	struct work_struct dp_wakeup_work;
+	struct usb_serial_port *dp_port;
 };
 
 
 /* Local Function Declarations */
 
 static void digi_wakeup_write( struct usb_serial_port *port );
-static void digi_wakeup_write_lock(void *);
+static void digi_wakeup_write_lock(struct work_struct *work);
 static int digi_write_oob_command( struct usb_serial_port *port,
 	unsigned char *buf, int count, int interruptible );
 static int digi_write_inb_command( struct usb_serial_port *port,
@@ -598,11 +599,12 @@
 *  on writes.
 */
 
-static void digi_wakeup_write_lock(void *arg)
+static void digi_wakeup_write_lock(struct work_struct *work)
 {
-	struct usb_serial_port *port = arg;
+	struct digi_port *priv =
+		container_of(work, struct digi_port, dp_wakeup_work);
+	struct usb_serial_port *port = priv->dp_port;
 	unsigned long flags;
-	struct digi_port *priv = usb_get_serial_port_data(port);
 
 
 	spin_lock_irqsave( &priv->dp_port_lock, flags );
@@ -1702,8 +1704,8 @@
 		init_waitqueue_head( &priv->dp_flush_wait );
 		priv->dp_in_close = 0;
 		init_waitqueue_head( &priv->dp_close_wait );
-		INIT_WORK(&priv->dp_wakeup_work,
-				digi_wakeup_write_lock, serial->port[i]);
+		INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
+		priv->dp_port = serial->port[i];
 
 		/* initialize write wait queue for this port */
 		init_waitqueue_head( &serial->port[i]->write_wait );
diff --git a/drivers/usb/serial/ezusb.c b/drivers/usb/serial/ezusb.c
index 5169c2d..97ee718 100644
--- a/drivers/usb/serial/ezusb.c
+++ b/drivers/usb/serial/ezusb.c
@@ -31,12 +31,11 @@
 		return -ENODEV;
 	}
 
-	transfer_buffer =  kmalloc (length, GFP_KERNEL);
+	transfer_buffer = kmemdup(data, length, GFP_KERNEL);
 	if (!transfer_buffer) {
 		dev_err(&serial->dev->dev, "%s - kmalloc(%d) failed.\n", __FUNCTION__, length);
 		return -ENOMEM;
 	}
-	memcpy (transfer_buffer, data, length);
 	result = usb_control_msg (serial->dev, usb_sndctrlpipe(serial->dev, 0), bRequest, 0x40, address, 0, transfer_buffer, length, 3000);
 	kfree (transfer_buffer);
 	return result;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c186b4e..72e4d48 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -559,7 +559,8 @@
 	char prev_status, diff_status;        /* Used for TIOCMIWAIT */
 	__u8 rx_flags;		/* receive state flags (throttling) */
 	spinlock_t rx_lock;	/* spinlock for receive state */
-	struct work_struct rx_work;
+	struct delayed_work rx_work;
+	struct usb_serial_port *port;
 	int rx_processed;
 	unsigned long rx_bytes;
 
@@ -593,7 +594,7 @@
 static int  ftdi_chars_in_buffer	(struct usb_serial_port *port);
 static void ftdi_write_bulk_callback	(struct urb *urb);
 static void ftdi_read_bulk_callback	(struct urb *urb);
-static void ftdi_process_read		(void *param);
+static void ftdi_process_read		(struct work_struct *work);
 static void ftdi_set_termios		(struct usb_serial_port *port, struct termios * old);
 static int  ftdi_tiocmget               (struct usb_serial_port *port, struct file *file);
 static int  ftdi_tiocmset		(struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@@ -1201,7 +1202,8 @@
 		port->read_urb->transfer_buffer_length = BUFSZ;
 	}
 
-	INIT_WORK(&priv->rx_work, ftdi_process_read, port);
+	INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
+	priv->port = port;
 
 	/* Free port's existing write urb and transfer buffer. */
 	if (port->write_urb) {
@@ -1388,8 +1390,7 @@
 	flush_scheduled_work();
 
 	/* shutdown our bulk read */
-	if (port->read_urb)
-		usb_kill_urb(port->read_urb);
+	usb_kill_urb(port->read_urb);
 } /* ftdi_close */
 
 
@@ -1641,17 +1642,18 @@
 	priv->rx_bytes += countread;
 	spin_unlock_irqrestore(&priv->rx_lock, flags);
 
-	ftdi_process_read(port);
+	ftdi_process_read(&priv->rx_work.work);
 
 } /* ftdi_read_bulk_callback */
 
 
-static void ftdi_process_read (void *param)
+static void ftdi_process_read (struct work_struct *work)
 { /* ftdi_process_read */
-	struct usb_serial_port *port = (struct usb_serial_port*)param;
+	struct ftdi_private *priv =
+		container_of(work, struct ftdi_private, rx_work.work);
+	struct usb_serial_port *port = priv->port;
 	struct urb *urb;
 	struct tty_struct *tty;
-	struct ftdi_private *priv;
 	char error_flag;
 	unsigned char *data;
 
@@ -2180,7 +2182,7 @@
 	spin_unlock_irqrestore(&priv->rx_lock, flags);
 
 	if (actually_throttled)
-		schedule_work(&priv->rx_work);
+		schedule_delayed_work(&priv->rx_work, 0);
 }
 
 static int __init ftdi_init (void)
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 4543152..6530d39 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1523,12 +1523,11 @@
 
 	dbg("%s", __FUNCTION__);
 
-	garmin_data_p = kmalloc (sizeof(struct garmin_data), GFP_KERNEL);
+	garmin_data_p = kzalloc(sizeof(struct garmin_data), GFP_KERNEL);
 	if (garmin_data_p == NULL) {
 		dev_err(&port->dev, "%s - Out of memory\n", __FUNCTION__);
 		return -ENOMEM;
 	}
-	memset (garmin_data_p, 0, sizeof(struct garmin_data));
 	init_timer(&garmin_data_p->timer);
 	spin_lock_init(&garmin_data_p->lock);
 	INIT_LIST_HEAD(&garmin_data_p->pktlist);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 91bd301..d06547a 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1038,9 +1038,7 @@
 	edge_port->open = FALSE;
 	edge_port->openPending = FALSE;
 
-	if (edge_port->write_urb) {
-		usb_kill_urb(edge_port->write_urb);
-	}
+	usb_kill_urb(edge_port->write_urb);
 
 	if (edge_port->write_urb) {
 		/* if this urb had a transfer buffer already (old transfer) free it */
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 2a4bb66..d3b9a35 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -206,10 +206,9 @@
 
 	dbg("%s", __FUNCTION__);
 
-	buf_flow_init = kmalloc(16, GFP_KERNEL);
+	buf_flow_init = kmemdup(buf_flow_static, 16, GFP_KERNEL);
 	if (!buf_flow_init)
 		return -ENOMEM;
-	memcpy(buf_flow_init, buf_flow_static, 16);
 
 	if (port->tty)
 		port->tty->low_latency = 1;
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 53be824..7639652 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2306,22 +2306,16 @@
 	}
 
 	/* Now free them */
-	if (s_priv->instat_urb)
-		usb_free_urb(s_priv->instat_urb);
-	if (s_priv->glocont_urb)
-		usb_free_urb(s_priv->glocont_urb);
+	usb_free_urb(s_priv->instat_urb);
+	usb_free_urb(s_priv->glocont_urb);
 	for (i = 0; i < serial->num_ports; ++i) {
 		port = serial->port[i];
 		p_priv = usb_get_serial_port_data(port);
-		if (p_priv->inack_urb)
-			usb_free_urb(p_priv->inack_urb);
-		if (p_priv->outcont_urb)
-			usb_free_urb(p_priv->outcont_urb);
+		usb_free_urb(p_priv->inack_urb);
+		usb_free_urb(p_priv->outcont_urb);
 		for (j = 0; j < 2; j++) {
-			if (p_priv->in_urbs[j])
-				usb_free_urb(p_priv->in_urbs[j]);
-			if (p_priv->out_urbs[j])
-				usb_free_urb(p_priv->out_urbs[j]);
+			usb_free_urb(p_priv->in_urbs[j]);
+			usb_free_urb(p_priv->out_urbs[j]);
 		}
 	}
 
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 9090051..e09a0bf 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -120,6 +120,8 @@
 	int			tx_throttled;
 	struct work_struct			wakeup_work;
 	struct work_struct			unthrottle_work;
+	struct usb_serial	*serial;
+	struct usb_serial_port	*port;
 };
 
 
@@ -175,9 +177,11 @@
 };
 #endif
 
-static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
+static void keyspan_pda_wakeup_write(struct work_struct *work)
 {
-
+	struct keyspan_pda_private *priv =
+		container_of(work, struct keyspan_pda_private, wakeup_work);
+	struct usb_serial_port *port = priv->port;
 	struct tty_struct *tty = port->tty;
 
 	/* wake up port processes */
@@ -187,8 +191,11 @@
 	tty_wakeup(tty);
 }
 
-static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
+static void keyspan_pda_request_unthrottle(struct work_struct *work)
 {
+	struct keyspan_pda_private *priv =
+		container_of(work, struct keyspan_pda_private, unthrottle_work);
+	struct usb_serial *serial = priv->serial;
 	int result;
 
 	dbg(" request_unthrottle");
@@ -765,11 +772,10 @@
 		return (1); /* error */
 	usb_set_serial_port_data(serial->port[0], priv);
 	init_waitqueue_head(&serial->port[0]->write_wait);
-	INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write,
-			(void *)(serial->port[0]));
-	INIT_WORK(&priv->unthrottle_work,
-			(void *)keyspan_pda_request_unthrottle,
-			(void *)(serial));
+	INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
+	INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
+	priv->serial = serial;
+	priv->port = serial->port[0];
 	return (0);
 }
 
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index ff03331..2372899 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -185,13 +185,11 @@
   
  	for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
 		endpoint = &altsetting->endpoint[i];
-		if (((endpoint->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) && 
- 		    ((endpoint->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)) {
+		if (usb_endpoint_is_int_out(&endpoint->desc)) {
 		 	dbg("%s Found interrupt out endpoint. Address: %d", __FUNCTION__, endpoint->desc.bEndpointAddress);
 		 	priv->write_int_endpoint_address = endpoint->desc.bEndpointAddress;
  		}
- 		if (((endpoint->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) && 
- 		    ((endpoint->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)) {
+		if (usb_endpoint_is_int_in(&endpoint->desc)) {
 		 	dbg("%s Found interrupt in  endpoint. Address: %d", __FUNCTION__, endpoint->desc.bEndpointAddress);
 		 	priv->read_int_endpoint_address = endpoint->desc.bEndpointAddress;
 	 	}
@@ -355,8 +353,7 @@
 		usb_free_urb( port->write_urb );
 		port->write_urb = NULL;
 	}
-	if (port->interrupt_in_urb)
-		usb_kill_urb(port->interrupt_in_urb);
+	usb_kill_urb(port->interrupt_in_urb);
 }
 
 
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index b7582cc..a906e50 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -358,10 +358,8 @@
 	/* Puh, that's dirty */
 	port = serial->port[0];
 	rport = serial->port[1];
-	if (port->read_urb) {
-		/* No unlinking, it wasn't submitted yet. */
-		usb_free_urb(port->read_urb);
-	}
+	/* No unlinking, it wasn't submitted yet. */
+	usb_free_urb(port->read_urb);
 	port->read_urb = rport->interrupt_in_urb;
 	rport->interrupt_in_urb = NULL;
 	port->read_urb->context = port;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 82cd15b..70f93b1 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -363,7 +363,7 @@
 
 	/* Initialising the write urb pool */
 	for (j = 0; j < NUM_URBS; ++j) {
-		urb = usb_alloc_urb(0,SLAB_ATOMIC);
+		urb = usb_alloc_urb(0,GFP_ATOMIC);
 		mos7720_port->write_urb_pool[j] = urb;
 
 		if (urb == NULL) {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 5b71962..5432c63 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -826,7 +826,7 @@
 
 	/* Initialising the write urb pool */
 	for (j = 0; j < NUM_URBS; ++j) {
-		urb = usb_alloc_urb(0, SLAB_ATOMIC);
+		urb = usb_alloc_urb(0, GFP_ATOMIC);
 		mos7840_port->write_urb_pool[j] = urb;
 
 		if (urb == NULL) {
@@ -2596,12 +2596,11 @@
 
 	/* set up port private structures */
 	for (i = 0; i < serial->num_ports; ++i) {
-		mos7840_port = kmalloc(sizeof(struct moschip_port), GFP_KERNEL);
+		mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
 		if (mos7840_port == NULL) {
 			err("%s - Out of memory", __FUNCTION__);
 			return -ENOMEM;
 		}
-		memset(mos7840_port, 0, sizeof(struct moschip_port));
 
 		/* Initialize all port interrupt end point to port 0 int endpoint *
 		 * Our device has only one interrupt end point comman to all port */
@@ -2787,7 +2786,7 @@
 				    i + 1, status);
 
 		}
-		mos7840_port->control_urb = usb_alloc_urb(0, SLAB_ATOMIC);
+		mos7840_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC);
 		mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
 
 	}
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 0610409..054abee 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -95,8 +95,7 @@
 {
 	dbg("%s - port %d", __FUNCTION__, port->number);
 
-	if (port->interrupt_in_urb)
-		usb_kill_urb(port->interrupt_in_urb);
+	usb_kill_urb(port->interrupt_in_urb);
 }
 
 static int navman_write(struct usb_serial_port *port,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 07400c0..ae98d8c 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -228,6 +228,7 @@
 /* null entry */
 static struct usb_device_id ti_id_table_3410[1+TI_EXTRA_VID_PID_COUNT+1] = {
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
 };
 
 static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
@@ -239,6 +240,7 @@
 
 static struct usb_device_id ti_id_table_combined[] = {
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -459,13 +461,12 @@
 
 	/* set up port structures */
 	for (i = 0; i < serial->num_ports; ++i) {
-		tport = kmalloc(sizeof(struct ti_port), GFP_KERNEL);
+		tport = kzalloc(sizeof(struct ti_port), GFP_KERNEL);
 		if (tport == NULL) {
 			dev_err(&dev->dev, "%s - out of memory\n", __FUNCTION__);
 			status = -ENOMEM;
 			goto free_tports;
 		}
-		memset(tport, 0, sizeof(struct ti_port));
 		spin_lock_init(&tport->tp_lock);
 		tport->tp_uart_base_addr = (i == 0 ? TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
 		tport->tp_flags = low_latency ? ASYNC_LOW_LATENCY : 0;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 02c1aeb..b5541bf 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -28,6 +28,7 @@
 /* Vendor and product ids */
 #define TI_VENDOR_ID			0x0451
 #define TI_3410_PRODUCT_ID		0x3410
+#define TI_3410_EZ430_ID		0xF430  /* TI ez430 development tool */
 #define TI_5052_BOOT_PRODUCT_ID		0x5052	/* no EEPROM, no firmware */
 #define TI_5152_BOOT_PRODUCT_ID		0x5152	/* no EEPROM, no firmware */
 #define TI_5052_EEPROM_PRODUCT_ID	0x505A	/* EEPROM, no firmware */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8006e51..3d5072f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -533,9 +533,10 @@
 	schedule_work(&port->work);
 }
 
-static void usb_serial_port_work(void *private)
+static void usb_serial_port_work(struct work_struct *work)
 {
-	struct usb_serial_port *port = private;
+	struct usb_serial_port *port =
+		container_of(work, struct usb_serial_port, work);
 	struct tty_struct *tty;
 
 	dbg("%s - port %d", __FUNCTION__, port->number);
@@ -799,7 +800,7 @@
 		port->serial = serial;
 		spin_lock_init(&port->lock);
 		mutex_init(&port->mutex);
-		INIT_WORK(&port->work, usb_serial_port_work, port);
+		INIT_WORK(&port->work, usb_serial_port_work);
 		serial->port[i] = port;
 	}
 
@@ -952,32 +953,28 @@
 		port = serial->port[i];
 		if (!port)
 			continue;
-		if (port->read_urb)
-			usb_free_urb (port->read_urb);
+		usb_free_urb(port->read_urb);
 		kfree(port->bulk_in_buffer);
 	}
 	for (i = 0; i < num_bulk_out; ++i) {
 		port = serial->port[i];
 		if (!port)
 			continue;
-		if (port->write_urb)
-			usb_free_urb (port->write_urb);
+		usb_free_urb(port->write_urb);
 		kfree(port->bulk_out_buffer);
 	}
 	for (i = 0; i < num_interrupt_in; ++i) {
 		port = serial->port[i];
 		if (!port)
 			continue;
-		if (port->interrupt_in_urb)
-			usb_free_urb (port->interrupt_in_urb);
+		usb_free_urb(port->interrupt_in_urb);
 		kfree(port->interrupt_in_buffer);
 	}
 	for (i = 0; i < num_interrupt_out; ++i) {
 		port = serial->port[i];
 		if (!port)
 			continue;
-		if (port->interrupt_out_urb)
-			usb_free_urb (port->interrupt_out_urb);
+		usb_free_urb(port->interrupt_out_urb);
 		kfree(port->interrupt_out_buffer);
 	}
 
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
new file mode 100644
index 0000000..257a5e4
--- /dev/null
+++ b/drivers/usb/serial/usb_debug.c
@@ -0,0 +1,65 @@
+/*
+ * USB Debug cable driver
+ *
+ * Copyright (C) 2006 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License version
+ *	2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+static struct usb_device_id id_table [] = {
+	{ USB_DEVICE(0x0525, 0x127a) },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver debug_driver = {
+	.name =		"debug",
+	.probe =	usb_serial_probe,
+	.disconnect =	usb_serial_disconnect,
+	.id_table =	id_table,
+	.no_dynamic_id = 	1,
+};
+
+static struct usb_serial_driver debug_device = {
+	.driver = {
+		.owner =	THIS_MODULE,
+		.name =		"debug",
+	},
+	.id_table =		id_table,
+	.num_interrupt_in =	NUM_DONT_CARE,
+	.num_bulk_in =		NUM_DONT_CARE,
+	.num_bulk_out =		NUM_DONT_CARE,
+	.num_ports =		1,
+};
+
+static int __init debug_init(void)
+{
+	int retval;
+
+	retval = usb_serial_register(&debug_device);
+	if (retval)
+		return retval;
+	retval = usb_register(&debug_driver);
+	if (retval)
+		usb_serial_deregister(&debug_device);
+	return retval;
+}
+
+static void __exit debug_exit(void)
+{
+	usb_deregister(&debug_driver);
+	usb_serial_deregister(&debug_device);
+}
+
+module_init(debug_init);
+module_exit(debug_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index befe2e1..eef5eaa 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -348,8 +348,7 @@
 			 
 	/* shutdown our urbs */
 	usb_kill_urb(port->read_urb);
-	if (port->interrupt_in_urb)
-		usb_kill_urb(port->interrupt_in_urb);
+	usb_kill_urb(port->interrupt_in_urb);
 
 	/* Try to send shutdown message, if the device is gone, this will just fail. */
 	transfer_buffer =  kmalloc (0x12, GFP_KERNEL);
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 4d1cd7a..154c7d2 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -227,6 +227,7 @@
 	struct list_head	rx_urbs_submitted;
 	struct list_head	rx_urb_q;
 	struct work_struct	rx_work;
+	struct usb_serial_port	*port;
 	struct list_head	tx_urbs_free;
 	struct list_head	tx_urbs_submitted;
 };
@@ -241,7 +242,7 @@
 static int start_port_read(struct usb_serial_port *port);
 static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head);
 static struct list_head *list_first(struct list_head *head);
-static void rx_data_softint(void *private);
+static void rx_data_softint(struct work_struct *work);
 
 static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize);
 static int firm_open(struct usb_serial_port *port);
@@ -424,7 +425,8 @@
 		spin_lock_init(&info->lock);
 		info->flags = 0;
 		info->mcr = 0;
-		INIT_WORK(&info->rx_work, rx_data_softint, port);
+		INIT_WORK(&info->rx_work, rx_data_softint);
+		info->port = port;
 
 		INIT_LIST_HEAD(&info->rx_urbs_free);
 		INIT_LIST_HEAD(&info->rx_urbs_submitted);
@@ -949,7 +951,7 @@
 	spin_unlock_irqrestore(&info->lock, flags);
 
 	if (actually_throttled)
-		rx_data_softint(port);
+		rx_data_softint(&info->rx_work);
 
 	return;
 }
@@ -1400,10 +1402,11 @@
 }
 
 
-static void rx_data_softint(void *private)
+static void rx_data_softint(struct work_struct *work)
 {
-	struct usb_serial_port *port = (struct usb_serial_port *)private;
-	struct whiteheat_private *info = usb_get_serial_port_data(port);
+	struct whiteheat_private *info =
+		container_of(work, struct whiteheat_private, rx_work);
+	struct usb_serial_port *port = info->port;
 	struct tty_struct *tty = port->tty;
 	struct whiteheat_urb_wrap *wrap;
 	struct urb *urb;
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 3baf448..e565d3d 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -76,7 +76,7 @@
 	input_sync(dev);
 
 resubmit:
-	status = usb_submit_urb (urb, SLAB_ATOMIC);
+	status = usb_submit_urb (urb, GFP_ATOMIC);
 	if (status)
 		err ("can't resubmit intr, %s-%s/input0, status %d",
 			onetouch->udev->bus->bus_name,
@@ -142,10 +142,7 @@
 		return -ENODEV;
 
 	endpoint = &interface->endpoint[2].desc;
-	if (!(endpoint->bEndpointAddress & USB_DIR_IN))
-		return -ENODEV;
-	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-			!= USB_ENDPOINT_XFER_INT)
+	if (!usb_endpoint_is_int_in(endpoint))
 		return -ENODEV;
 
 	pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
@@ -157,7 +154,7 @@
 		goto fail1;
 
 	onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
-					  SLAB_ATOMIC, &onetouch->data_dma);
+					  GFP_ATOMIC, &onetouch->data_dma);
 	if (!onetouch->data)
 		goto fail1;
 
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 47644b5..323293a 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -427,7 +427,7 @@
 	US_DEBUGP("%s: xfer %u bytes, %d entries\n", __FUNCTION__,
 			length, num_sg);
 	result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0,
-			sg, num_sg, length, SLAB_NOIO);
+			sg, num_sg, length, GFP_NOIO);
 	if (result) {
 		US_DEBUGP("usb_sg_init returned %d\n", result);
 		return USB_STOR_XFER_ERROR;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index efb047f..db8b260 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1318,6 +1318,16 @@
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
 		0 ),
 
+/* Reported by Jaco Kroon <jaco@kroon.co.za>
+ * The usb-storage module found on the Digitech GNX4 (and supposedly other
+ * devices) misbehaves and causes a bunch of invalid I/O errors.
+ */
+UNUSUAL_DEV(  0x1210, 0x0003, 0x0100, 0x0100,
+		"Digitech HMG",
+		"DigiTech Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_IGNORE_RESIDUE ),
+
 /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
 UNUSUAL_DEV(  0x132b, 0x000b, 0x0001, 0x0001,
 		"Minolta",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index b8d6031..7064450 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -49,7 +49,7 @@
 
 #include <linux/sched.h>
 #include <linux/errno.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -740,18 +740,16 @@
 		ep = &altsetting->endpoint[i].desc;
 
 		/* Is it a BULK endpoint? */
-		if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-				== USB_ENDPOINT_XFER_BULK) {
+		if (usb_endpoint_xfer_bulk(ep)) {
 			/* BULK in or out? */
-			if (ep->bEndpointAddress & USB_DIR_IN)
+			if (usb_endpoint_dir_in(ep))
 				ep_in = ep;
 			else
 				ep_out = ep;
 		}
 
 		/* Is it an interrupt endpoint? */
-		else if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-				== USB_ENDPOINT_XFER_INT) {
+		else if (usb_endpoint_xfer_int(ep)) {
 			ep_int = ep;
 		}
 	}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 302174b..31f476a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -383,9 +383,9 @@
 		softback_top = 0;
 }
 
-static void fb_flashcursor(void *private)
+static void fb_flashcursor(struct work_struct *work)
 {
-	struct fb_info *info = private;
+	struct fb_info *info = container_of(work, struct fb_info, queue);
 	struct fbcon_ops *ops = info->fbcon_par;
 	struct display *p;
 	struct vc_data *vc = NULL;
@@ -442,7 +442,7 @@
 	if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
 	    !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
 		if (!info->queue.func)
-			INIT_WORK(&info->queue, fb_flashcursor, info);
+			INIT_WORK(&info->queue, fb_flashcursor);
 
 		init_timer(&ops->cursor_timer);
 		ops->cursor_timer.function = cursor_timer_handler;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 93ffcdd..e973a87 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1296,14 +1296,14 @@
 			break;
 	fb_info->node = i;
 
-	fb_info->class_device = class_device_create(fb_class, NULL, MKDEV(FB_MAJOR, i),
-				    fb_info->device, "fb%d", i);
-	if (IS_ERR(fb_info->class_device)) {
+	fb_info->dev = device_create(fb_class, fb_info->device,
+				     MKDEV(FB_MAJOR, i), "fb%d", i);
+	if (IS_ERR(fb_info->dev)) {
 		/* Not fatal */
-		printk(KERN_WARNING "Unable to create class_device for framebuffer %d; errno = %ld\n", i, PTR_ERR(fb_info->class_device));
-		fb_info->class_device = NULL;
+		printk(KERN_WARNING "Unable to create device for framebuffer %d; errno = %ld\n", i, PTR_ERR(fb_info->dev));
+		fb_info->dev = NULL;
 	} else
-		fb_init_class_device(fb_info);
+		fb_init_device(fb_info);
 
 	if (fb_info->pixmap.addr == NULL) {
 		fb_info->pixmap.addr = kmalloc(FBPIXMAPSIZE, GFP_KERNEL);
@@ -1356,8 +1356,8 @@
 	fb_destroy_modelist(&fb_info->modelist);
 	registered_fb[i]=NULL;
 	num_registered_fb--;
-	fb_cleanup_class_device(fb_info);
-	class_device_destroy(fb_class, MKDEV(FB_MAJOR, i));
+	fb_cleanup_device(fb_info);
+	device_destroy(fb_class, MKDEV(FB_MAJOR, i));
 	event.info = fb_info;
 	fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
 	return 0;
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index d3a5041..323bdf6 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -73,7 +73,7 @@
  *
  * @info: frame buffer info structure
  *
- * Drop the reference count of the class_device embedded in the
+ * Drop the reference count of the device embedded in the
  * framebuffer info structure.
  *
  */
@@ -120,10 +120,10 @@
 	                m, mode->xres, mode->yres, v, mode->refresh);
 }
 
-static ssize_t store_mode(struct class_device *class_device, const char * buf,
-			  size_t count)
+static ssize_t store_mode(struct device *device, struct device_attribute *attr,
+			  const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	char mstr[100];
 	struct fb_var_screeninfo var;
 	struct fb_modelist *modelist;
@@ -151,9 +151,10 @@
 	return -EINVAL;
 }
 
-static ssize_t show_mode(struct class_device *class_device, char *buf)
+static ssize_t show_mode(struct device *device, struct device_attribute *attr,
+			 char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 
 	if (!fb_info->mode)
 		return 0;
@@ -161,10 +162,11 @@
 	return mode_string(buf, 0, fb_info->mode);
 }
 
-static ssize_t store_modes(struct class_device *class_device, const char * buf,
-			   size_t count)
+static ssize_t store_modes(struct device *device,
+			   struct device_attribute *attr,
+			   const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	LIST_HEAD(old_list);
 	int i = count / sizeof(struct fb_videomode);
 
@@ -186,9 +188,10 @@
 	return 0;
 }
 
-static ssize_t show_modes(struct class_device *class_device, char *buf)
+static ssize_t show_modes(struct device *device, struct device_attribute *attr,
+			  char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	unsigned int i;
 	struct list_head *pos;
 	struct fb_modelist *modelist;
@@ -203,10 +206,10 @@
 	return i;
 }
 
-static ssize_t store_bpp(struct class_device *class_device, const char * buf,
-			 size_t count)
+static ssize_t store_bpp(struct device *device, struct device_attribute *attr,
+			 const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	struct fb_var_screeninfo var;
 	char ** last = NULL;
 	int err;
@@ -218,16 +221,18 @@
 	return count;
 }
 
-static ssize_t show_bpp(struct class_device *class_device, char *buf)
+static ssize_t show_bpp(struct device *device, struct device_attribute *attr,
+			char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.bits_per_pixel);
 }
 
-static ssize_t store_rotate(struct class_device *class_device, const char *buf,
-			    size_t count)
+static ssize_t store_rotate(struct device *device,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	struct fb_var_screeninfo var;
 	char **last = NULL;
 	int err;
@@ -242,17 +247,19 @@
 }
 
 
-static ssize_t show_rotate(struct class_device *class_device, char *buf)
+static ssize_t show_rotate(struct device *device,
+			   struct device_attribute *attr, char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.rotate);
 }
 
-static ssize_t store_virtual(struct class_device *class_device,
-			     const char * buf, size_t count)
+static ssize_t store_virtual(struct device *device,
+			     struct device_attribute *attr,
+			     const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	struct fb_var_screeninfo var;
 	char *last = NULL;
 	int err;
@@ -269,23 +276,26 @@
 	return count;
 }
 
-static ssize_t show_virtual(struct class_device *class_device, char *buf)
+static ssize_t show_virtual(struct device *device,
+			    struct device_attribute *attr, char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xres_virtual,
 			fb_info->var.yres_virtual);
 }
 
-static ssize_t show_stride(struct class_device *class_device, char *buf)
+static ssize_t show_stride(struct device *device,
+			   struct device_attribute *attr, char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length);
 }
 
-static ssize_t store_blank(struct class_device *class_device, const char * buf,
-			   size_t count)
+static ssize_t store_blank(struct device *device,
+			   struct device_attribute *attr,
+			   const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	char *last = NULL;
 	int err;
 
@@ -299,42 +309,48 @@
 	return count;
 }
 
-static ssize_t show_blank(struct class_device *class_device, char *buf)
+static ssize_t show_blank(struct device *device,
+			  struct device_attribute *attr, char *buf)
 {
-//	struct fb_info *fb_info = class_get_devdata(class_device);
+//	struct fb_info *fb_info = dev_get_drvdata(device);
 	return 0;
 }
 
-static ssize_t store_console(struct class_device *class_device,
-			     const char * buf, size_t count)
+static ssize_t store_console(struct device *device,
+			     struct device_attribute *attr,
+			     const char *buf, size_t count)
 {
-//	struct fb_info *fb_info = class_get_devdata(class_device);
+//	struct fb_info *fb_info = dev_get_drvdata(device);
 	return 0;
 }
 
-static ssize_t show_console(struct class_device *class_device, char *buf)
+static ssize_t show_console(struct device *device,
+			    struct device_attribute *attr, char *buf)
 {
-//	struct fb_info *fb_info = class_get_devdata(class_device);
+//	struct fb_info *fb_info = dev_get_drvdata(device);
 	return 0;
 }
 
-static ssize_t store_cursor(struct class_device *class_device,
-			    const char * buf, size_t count)
+static ssize_t store_cursor(struct device *device,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
 {
-//	struct fb_info *fb_info = class_get_devdata(class_device);
+//	struct fb_info *fb_info = dev_get_drvdata(device);
 	return 0;
 }
 
-static ssize_t show_cursor(struct class_device *class_device, char *buf)
+static ssize_t show_cursor(struct device *device,
+			   struct device_attribute *attr, char *buf)
 {
-//	struct fb_info *fb_info = class_get_devdata(class_device);
+//	struct fb_info *fb_info = dev_get_drvdata(device);
 	return 0;
 }
 
-static ssize_t store_pan(struct class_device *class_device, const char * buf,
-			 size_t count)
+static ssize_t store_pan(struct device *device,
+			 struct device_attribute *attr,
+			 const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	struct fb_var_screeninfo var;
 	char *last = NULL;
 	int err;
@@ -355,24 +371,27 @@
 	return count;
 }
 
-static ssize_t show_pan(struct class_device *class_device, char *buf)
+static ssize_t show_pan(struct device *device,
+			struct device_attribute *attr, char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xoffset,
 			fb_info->var.xoffset);
 }
 
-static ssize_t show_name(struct class_device *class_device, char *buf)
+static ssize_t show_name(struct device *device,
+			 struct device_attribute *attr, char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 
 	return snprintf(buf, PAGE_SIZE, "%s\n", fb_info->fix.id);
 }
 
-static ssize_t store_fbstate(struct class_device *class_device,
-			const char *buf, size_t count)
+static ssize_t store_fbstate(struct device *device,
+			     struct device_attribute *attr,
+			     const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	u32 state;
 	char *last = NULL;
 
@@ -385,17 +404,19 @@
 	return count;
 }
 
-static ssize_t show_fbstate(struct class_device *class_device, char *buf)
+static ssize_t show_fbstate(struct device *device,
+			    struct device_attribute *attr, char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state);
 }
 
 #ifdef CONFIG_FB_BACKLIGHT
-static ssize_t store_bl_curve(struct class_device *class_device,
-		const char *buf, size_t count)
+static ssize_t store_bl_curve(struct device *device,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	u8 tmp_curve[FB_BACKLIGHT_LEVELS];
 	unsigned int i;
 
@@ -432,9 +453,10 @@
 	return count;
 }
 
-static ssize_t show_bl_curve(struct class_device *class_device, char *buf)
+static ssize_t show_bl_curve(struct device *device,
+			     struct device_attribute *attr, char *buf)
 {
-	struct fb_info *fb_info = class_get_devdata(class_device);
+	struct fb_info *fb_info = dev_get_drvdata(device);
 	ssize_t len = 0;
 	unsigned int i;
 
@@ -465,7 +487,7 @@
 /* When cmap is added back in it should be a binary attribute
  * not a text one. Consideration should also be given to converting
  * fbdev to use configfs instead of sysfs */
-static struct class_device_attribute class_device_attrs[] = {
+static struct device_attribute device_attrs[] = {
 	__ATTR(bits_per_pixel, S_IRUGO|S_IWUSR, show_bpp, store_bpp),
 	__ATTR(blank, S_IRUGO|S_IWUSR, show_blank, store_blank),
 	__ATTR(console, S_IRUGO|S_IWUSR, show_console, store_console),
@@ -483,17 +505,16 @@
 #endif
 };
 
-int fb_init_class_device(struct fb_info *fb_info)
+int fb_init_device(struct fb_info *fb_info)
 {
 	int i, error = 0;
 
-	class_set_devdata(fb_info->class_device, fb_info);
+	dev_set_drvdata(fb_info->dev, fb_info);
 
 	fb_info->class_flag |= FB_SYSFS_FLAG_ATTR;
 
-	for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
-		error = class_device_create_file(fb_info->class_device,
-						 &class_device_attrs[i]);
+	for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+		error = device_create_file(fb_info->dev, &device_attrs[i]);
 
 		if (error)
 			break;
@@ -501,22 +522,20 @@
 
 	if (error) {
 		while (--i >= 0)
-			class_device_remove_file(fb_info->class_device,
-						 &class_device_attrs[i]);
+			device_remove_file(fb_info->dev, &device_attrs[i]);
 		fb_info->class_flag &= ~FB_SYSFS_FLAG_ATTR;
 	}
 
 	return 0;
 }
 
-void fb_cleanup_class_device(struct fb_info *fb_info)
+void fb_cleanup_device(struct fb_info *fb_info)
 {
 	unsigned int i;
 
 	if (fb_info->class_flag & FB_SYSFS_FLAG_ATTR) {
-		for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
-			class_device_remove_file(fb_info->class_device,
-						 &class_device_attrs[i]);
+		for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+			device_remove_file(fb_info->dev, &device_attrs[i]);
 
 		fb_info->class_flag &= ~FB_SYSFS_FLAG_ATTR;
 	}
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 0d3643f..a454dcb 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -380,7 +380,7 @@
 }
 
 static struct pci_device_id gxfb_id_table[] = {
-	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_VIDEO,
+	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO,
 	  PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16,
 	  0xff0000, 0 },
 	{ 0, }
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index fdb33cd..cb26c6d 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -34,6 +34,7 @@
 #include <asm/prom.h>
 #include <asm/pgtable.h>
 #include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 #include "macmodes.h"
 #include "platinumfb.h"
@@ -682,14 +683,14 @@
 		return -ENODEV;
 	platinumfb_setup(option);
 #endif
-	of_register_driver(&platinum_driver);
+	of_register_platform_driver(&platinum_driver);
 
 	return 0;
 }
 
 static void __exit platinumfb_exit(void)
 {
-	of_unregister_driver(&platinum_driver);	
+	of_unregister_platform_driver(&platinum_driver);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 8a8ae55..38eb0b6 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -964,9 +964,10 @@
  * Our LCD controller task (which is called when we blank or unblank)
  * via keventd.
  */
-static void pxafb_task(void *dummy)
+static void pxafb_task(struct work_struct *work)
 {
-	struct pxafb_info *fbi = dummy;
+	struct pxafb_info *fbi =
+		container_of(work, struct pxafb_info, task);
 	u_int state = xchg(&fbi->task_state, -1);
 
 	set_ctrlr_state(fbi, state);
@@ -1159,7 +1160,7 @@
 	}
 
 	init_waitqueue_head(&fbi->ctrlr_wait);
-	INIT_WORK(&fbi->task, pxafb_task, fbi);
+	INIT_WORK(&fbi->task, pxafb_task);
 	init_MUTEX(&fbi->ctrlr_sem);
 
 	return fbi;
diff --git a/drivers/w1/Makefile b/drivers/w1/Makefile
index 93845a2..6bb0b54 100644
--- a/drivers/w1/Makefile
+++ b/drivers/w1/Makefile
@@ -2,10 +2,6 @@
 # Makefile for the Dallas's 1-wire bus.
 #
 
-ifeq ($(CONFIG_W1_DS2433_CRC), y)
-EXTRA_CFLAGS	+= -DCONFIG_W1_F23_CRC
-endif
-
 obj-$(CONFIG_W1)	+= wire.o
 wire-objs		:= w1.o w1_int.o w1_family.o w1_netlink.o w1_io.o
 
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 70e21e2..725dcfd 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -2,10 +2,6 @@
 # Makefile for the Dallas's 1-wire slaves.
 #
 
-ifeq ($(CONFIG_W1_SLAVE_DS2433_CRC), y)
-EXTRA_CFLAGS += -DCONFIG_W1_F23_CRC
-endif
-
 obj-$(CONFIG_W1_SLAVE_THERM)	+= w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)	+= w1_smem.o
 obj-$(CONFIG_W1_SLAVE_DS2433)	+= w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 2ac238f..8ea17a5 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -13,7 +13,7 @@
 #include <linux/device.h>
 #include <linux/types.h>
 #include <linux/delay.h>
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 #include <linux/crc16.h>
 
 #define CRC16_INIT		0
@@ -62,7 +62,7 @@
 	return count;
 }
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
 				int block)
 {
@@ -89,13 +89,13 @@
 
 	return 0;
 }
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off,
 			       size_t count)
 {
 	struct w1_slave *sl = kobj_to_w1_slave(kobj);
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	struct w1_f23_data *data = sl->family_data;
 	int i, min_page, max_page;
 #else
@@ -107,7 +107,7 @@
 
 	mutex_lock(&sl->master->mutex);
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 
 	min_page = (off >> W1_PAGE_BITS);
 	max_page = (off + count - 1) >> W1_PAGE_BITS;
@@ -119,7 +119,7 @@
 	}
 	memcpy(buf, &data->memory[off], count);
 
-#else 	/* CONFIG_W1_F23_CRC */
+#else 	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	/* read directly from the EEPROM */
 	if (w1_reset_select_slave(sl)) {
@@ -133,7 +133,7 @@
 	w1_write_block(sl->master, wrbuf, 3);
 	w1_read_block(sl->master, buf, count);
 
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 out_up:
 	mutex_unlock(&sl->master->mutex);
@@ -208,7 +208,7 @@
 	if ((count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE)) == 0)
 		return 0;
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	/* can only write full blocks in cached mode */
 	if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) {
 		dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n",
@@ -223,7 +223,7 @@
 			return -EINVAL;
 		}
 	}
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	mutex_lock(&sl->master->mutex);
 
@@ -262,7 +262,7 @@
 static int w1_f23_add_slave(struct w1_slave *sl)
 {
 	int err;
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	struct w1_f23_data *data;
 
 	data = kmalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
@@ -271,24 +271,24 @@
 	memset(data, 0, sizeof(struct w1_f23_data));
 	sl->family_data = data;
 
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	err = sysfs_create_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	if (err)
 		kfree(data);
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 
 	return err;
 }
 
 static void w1_f23_remove_slave(struct w1_slave *sl)
 {
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 	kfree(sl->family_data);
 	sl->family_data = NULL;
-#endif	/* CONFIG_W1_F23_CRC */
+#endif	/* CONFIG_W1_SLAVE_DS2433_CRC */
 	sysfs_remove_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
 }
 
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 5372cfc..b022fff 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/sched.h>
 #include <linux/device.h>
 #include <linux/types.h>
 #include <linux/delay.h>
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index de3e979..63c0724 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -31,6 +31,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/atomic.h>
 
diff --git a/fs/9p/conv.c b/fs/9p/conv.c
index 56d88c1..a3ed571 100644
--- a/fs/9p/conv.c
+++ b/fs/9p/conv.c
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/idr.h>
 #include <asm/uaccess.h>
 #include "debug.h"
diff --git a/fs/9p/fcall.c b/fs/9p/fcall.c
index 8556097..dc336a6 100644
--- a/fs/9p/fcall.c
+++ b/fs/9p/fcall.c
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/idr.h>
 
 #include "debug.h"
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 70492cc..2750720 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -23,6 +23,7 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/idr.h>
 
 #include "debug.h"
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 90a79c7..944273c 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -110,8 +110,8 @@
 };
 
 static int v9fs_poll_proc(void *);
-static void v9fs_read_work(void *);
-static void v9fs_write_work(void *);
+static void v9fs_read_work(struct work_struct *work);
+static void v9fs_write_work(struct work_struct *work);
 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
 			  poll_table * p);
 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@
 	m->rbuf = NULL;
 	m->wpos = m->wsize = 0;
 	m->wbuf = NULL;
-	INIT_WORK(&m->rq, v9fs_read_work, m);
-	INIT_WORK(&m->wq, v9fs_write_work, m);
+	INIT_WORK(&m->rq, v9fs_read_work);
+	INIT_WORK(&m->wq, v9fs_write_work);
 	m->wsched = 0;
 	memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
 	m->poll_task = NULL;
@@ -458,13 +458,13 @@
 /**
  * v9fs_write_work - called when a transport can send some data
  */
-static void v9fs_write_work(void *a)
+static void v9fs_write_work(struct work_struct *work)
 {
 	int n, err;
 	struct v9fs_mux_data *m;
 	struct v9fs_req *req;
 
-	m = a;
+	m = container_of(work, struct v9fs_mux_data, wq);
 
 	if (m->err < 0) {
 		clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@
 /**
  * v9fs_read_work - called when there is some data to be read from a transport
  */
-static void v9fs_read_work(void *a)
+static void v9fs_read_work(struct work_struct *work)
 {
 	int n, err;
 	struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@
 	struct v9fs_fcall *rcall;
 	char *rbuf;
 
-	m = a;
+	m = container_of(work, struct v9fs_mux_data, rq);
 
 	if (m->err < 0)
 		return;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 0f62804..0b96fae 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/parser.h>
 #include <linux/idr.h>
 
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index e32d597..905c882 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -30,6 +30,7 @@
 #include <linux/stat.h>
 #include <linux/string.h>
 #include <linux/smp_lock.h>
+#include <linux/sched.h>
 #include <linux/inet.h>
 #include <linux/idr.h>
 
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index c3c47ed..79e6f9c 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/file.h>
 #include <linux/stat.h>
 #include <linux/string.h>
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 5241c60..18f26cd 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -256,7 +256,7 @@
 v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm,
 	u8 mode, char *extension, u32 *fidp, struct v9fs_qid *qid, u32 *iounit)
 {
-	u32 fid;
+	int fid;
 	int err;
 	struct v9fs_fcall *fcall;
 
@@ -310,7 +310,7 @@
 v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
 {
 	int err;
-	u32 nfid;
+	int nfid;
 	struct v9fs_fid *ret;
 	struct v9fs_fcall *fcall;
 
diff --git a/fs/Kconfig b/fs/Kconfig
index 7b1511d..b3b5aa0 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -972,7 +972,7 @@
 
 	Some system agents rely on the information in sysfs to operate.
 	/sbin/hotplug uses device and object attributes in sysfs to assist in
-	delegating policy decisions, like persistantly naming devices.
+	delegating policy decisions, like persistently naming devices.
 
 	sysfs is currently used by the block subsystem to mount the root
 	partition.  If sysfs is disabled you must specify the boot device on
@@ -1145,7 +1145,7 @@
 	help
 	  The BeOS File System (BeFS) is the native file system of Be, Inc's
 	  BeOS. Notable features include support for arbitrary attributes
-	  on files and directories, and database-like indeces on selected
+	  on files and directories, and database-like indices on selected
 	  attributes. (Also note that this driver doesn't make those features
 	  available at this time). It is a 64 bit filesystem, so it supports
 	  extremely large volumes and files.
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 9ade139..5023351 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -36,7 +36,7 @@
 	va_list args;
 
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 
 	printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
@@ -212,12 +212,12 @@
 	return 0;
 }
 
-static kmem_cache_t *adfs_inode_cachep;
+static struct kmem_cache *adfs_inode_cachep;
 
 static struct inode *adfs_alloc_inode(struct super_block *sb)
 {
 	struct adfs_inode_info *ei;
-	ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL);
+	ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -228,7 +228,7 @@
 	kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
 
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index ccd624e..f4de4b9 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -445,7 +445,7 @@
 	va_list	 args;
 
 	va_start(args,fmt);
-	vsprintf(ErrorBuffer,fmt,args);
+	vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
 	va_end(args);
 
 	printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id,
@@ -461,7 +461,7 @@
 	va_list	 args;
 
 	va_start(args,fmt);
-	vsprintf(ErrorBuffer,fmt,args);
+	vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
 	va_end(args);
 
 	printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id,
diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c
index b0b9536..b330009 100644
--- a/fs/affs/bitmap.c
+++ b/fs/affs/bitmap.c
@@ -289,12 +289,11 @@
 	sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved +
 				 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits;
 	size = sbi->s_bmap_count * sizeof(*bm);
-	bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL);
+	bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL);
 	if (!sbi->s_bitmap) {
 		printk(KERN_ERR "AFFS: Bitmap allocation failed\n");
 		return -ENOMEM;
 	}
-	memset(sbi->s_bitmap, 0, size);
 
 	bmap_blk = (__be32 *)sbi->s_root_bh->b_data;
 	blk = sb->s_blocksize / 4 - 49;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 5ea72c3..3de93e7 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -66,12 +66,12 @@
 	pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
 }
 
-static kmem_cache_t * affs_inode_cachep;
+static struct kmem_cache * affs_inode_cachep;
 
 static struct inode *affs_alloc_inode(struct super_block *sb)
 {
 	struct affs_inode_info *ei;
-	ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL);
+	ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	ei->vfs_inode.i_version = 1;
@@ -83,7 +83,7 @@
 	kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct affs_inode_info *ei = (struct affs_inode_info *) foo;
 
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
index f09a794..615df24 100644
--- a/fs/afs/kafsasyncd.c
+++ b/fs/afs/kafsasyncd.c
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include "cell.h"
 #include "server.h"
 #include "volume.h"
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c
index 65bc05a..694344e 100644
--- a/fs/afs/kafstimod.c
+++ b/fs/afs/kafstimod.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include "cell.h"
 #include "volume.h"
 #include "kafstimod.h"
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 22afaae..44aff81 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -55,13 +55,12 @@
 	_enter("%p,%08x,", cell, ntohl(addr->s_addr));
 
 	/* allocate and initialise a server record */
-	server = kmalloc(sizeof(struct afs_server), GFP_KERNEL);
+	server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
 	if (!server) {
 		_leave(" = -ENOMEM");
 		return -ENOMEM;
 	}
 
-	memset(server, 0, sizeof(struct afs_server));
 	atomic_set(&server->usage, 1);
 
 	INIT_LIST_HEAD(&server->link);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 67d1f5c..18d9b77 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -35,7 +35,7 @@
 	struct afs_volume	*volume;
 };
 
-static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
+static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
 			    unsigned long flags);
 
 static int afs_get_sb(struct file_system_type *fs_type,
@@ -65,7 +65,7 @@
 	.put_super	= afs_put_super,
 };
 
-static kmem_cache_t *afs_inode_cachep;
+static struct kmem_cache *afs_inode_cachep;
 static atomic_t afs_count_active_inodes;
 
 /*****************************************************************************/
@@ -242,14 +242,12 @@
 	kenter("");
 
 	/* allocate a superblock info record */
-	as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL);
+	as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
 	if (!as) {
 		_leave(" = -ENOMEM");
 		return -ENOMEM;
 	}
 
-	memset(as, 0, sizeof(struct afs_super_info));
-
 	afs_get_volume(params->volume);
 	as->volume = params->volume;
 
@@ -384,7 +382,7 @@
 /*
  * initialise an inode cache slab element prior to any use
  */
-static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep,
+static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
 			    unsigned long flags)
 {
 	struct afs_vnode *vnode = (struct afs_vnode *) _vnode;
@@ -412,7 +410,7 @@
 	struct afs_vnode *vnode;
 
 	vnode = (struct afs_vnode *)
-		kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL);
+		kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
 	if (!vnode)
 		return NULL;
 
diff --git a/fs/aio.c b/fs/aio.c
index 9476659..d3a6ec2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -47,19 +47,19 @@
 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
 /*----end sysctl variables---*/
 
-static kmem_cache_t	*kiocb_cachep;
-static kmem_cache_t	*kioctx_cachep;
+static struct kmem_cache	*kiocb_cachep;
+static struct kmem_cache	*kioctx_cachep;
 
 static struct workqueue_struct *aio_wq;
 
 /* Used for rare fput completion. */
-static void aio_fput_routine(void *);
-static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
+static void aio_fput_routine(struct work_struct *);
+static DECLARE_WORK(fput_work, aio_fput_routine);
 
 static DEFINE_SPINLOCK(fput_lock);
 static LIST_HEAD(fput_head);
 
-static void aio_kick_handler(void *);
+static void aio_kick_handler(struct work_struct *);
 static void aio_queue_work(struct kioctx *);
 
 /* aio_setup
@@ -227,7 +227,7 @@
 
 	INIT_LIST_HEAD(&ctx->active_reqs);
 	INIT_LIST_HEAD(&ctx->run_list);
-	INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+	INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
 
 	if (aio_setup_ring(ctx) < 0)
 		goto out_freectx;
@@ -367,8 +367,7 @@
 {
 	unsigned nr_events = ctx->max_reqs;
 
-	if (unlikely(ctx->reqs_active))
-		BUG();
+	BUG_ON(ctx->reqs_active);
 
 	cancel_delayed_work(&ctx->wq);
 	flush_workqueue(aio_wq);
@@ -470,7 +469,7 @@
 		wake_up(&ctx->wait);
 }
 
-static void aio_fput_routine(void *data)
+static void aio_fput_routine(struct work_struct *data)
 {
 	spin_lock_irq(&fput_lock);
 	while (likely(!list_empty(&fput_head))) {
@@ -505,8 +504,7 @@
 	assert_spin_locked(&ctx->ctx_lock);
 
 	req->ki_users --;
-	if (unlikely(req->ki_users < 0))
-		BUG();
+	BUG_ON(req->ki_users < 0);
 	if (likely(req->ki_users))
 		return 0;
 	list_del(&req->ki_list);		/* remove from active_reqs */
@@ -668,17 +666,6 @@
 	ssize_t (*retry)(struct kiocb *);
 	ssize_t ret;
 
-	if (iocb->ki_retried++ > 1024*1024) {
-		printk("Maximal retry count.  Bytes done %Zd\n",
-			iocb->ki_nbytes - iocb->ki_left);
-		return -EAGAIN;
-	}
-
-	if (!(iocb->ki_retried & 0xff)) {
-		pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried,
-			iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
-	}
-
 	if (!(retry = iocb->ki_retry)) {
 		printk("aio_run_iocb: iocb->ki_retry = NULL\n");
 		return 0;
@@ -859,9 +846,9 @@
  *      space.
  * Run on aiod's context.
  */
-static void aio_kick_handler(void *data)
+static void aio_kick_handler(struct work_struct *work)
 {
-	struct kioctx *ctx = data;
+	struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
 	mm_segment_t oldfs = get_fs();
 	int requeue;
 
@@ -876,7 +863,7 @@
 	 * we're in a worker thread already, don't use queue_delayed_work,
 	 */
 	if (requeue)
-		queue_work(aio_wq, &ctx->wq);
+		queue_delayed_work(aio_wq, &ctx->wq, 0);
 }
 
 
@@ -1007,9 +994,6 @@
 	kunmap_atomic(ring, KM_IRQ1);
 
 	pr_debug("added to ring %p at [%lu]\n", iocb, tail);
-
-	pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried,
-		iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
 put_rq:
 	/* everything turned out well, dispose of the aiocb. */
 	ret = __aio_put_req(ctx, iocb);
@@ -1415,7 +1399,6 @@
 	kiocb->ki_iovec->iov_len = kiocb->ki_left;
 	kiocb->ki_nr_segs = 1;
 	kiocb->ki_cur_seg = 0;
-	kiocb->ki_nbytes = kiocb->ki_left;
 	return 0;
 }
 
@@ -1593,7 +1576,6 @@
 	req->ki_opcode = iocb->aio_lio_opcode;
 	init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
 	INIT_LIST_HEAD(&req->ki_wait.task_list);
-	req->ki_retried = 0;
 
 	ret = aio_setup_iocb(req);
 
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 38ede5c..f968d13 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -28,10 +28,11 @@
 	/*
 	 * In the event of a failure in get_sb_nodev the superblock
 	 * info is not present so nothing else has been setup, so
-	 * just exit when we are called from deactivate_super.
+	 * just call kill_anon_super when we are called from
+	 * deactivate_super.
 	 */
 	if (!sbi)
-		return;
+		goto out_kill_sb;
 
 	if ( !sbi->catatonic )
 		autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */
@@ -44,6 +45,7 @@
 
 	kfree(sb->s_fs_info);
 
+out_kill_sb:
 	DPRINTK(("autofs: shutting down\n"));
 	kill_anon_super(sb);
 }
@@ -209,7 +211,6 @@
 fail_free:
 	kfree(sbi);
 	s->s_fs_info = NULL;
-	kill_anon_super(s);
 fail_unlock:
 	return -EINVAL;
 }
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index ce7c0f1..9c48250 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -152,10 +152,11 @@
 	/*
 	 * In the event of a failure in get_sb_nodev the superblock
 	 * info is not present so nothing else has been setup, so
-	 * just exit when we are called from deactivate_super.
+	 * just call kill_anon_super when we are called from
+	 * deactivate_super.
 	 */
 	if (!sbi)
-		return;
+		goto out_kill_sb;
 
 	sb->s_fs_info = NULL;
 
@@ -167,6 +168,7 @@
 
 	kfree(sbi);
 
+out_kill_sb:
 	DPRINTK("shutting down");
 	kill_anon_super(sb);
 }
@@ -426,7 +428,6 @@
 fail_free:
 	kfree(sbi);
 	s->s_fs_info = NULL;
-	kill_anon_super(s);
 fail_unlock:
 	return -EINVAL;
 }
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 07f7144..bce402e 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -61,7 +61,7 @@
 };
 
 /* slab cache for befs_inode_info objects */
-static kmem_cache_t *befs_inode_cachep;
+static struct kmem_cache *befs_inode_cachep;
 
 static const struct file_operations befs_dir_operations = {
 	.read		= generic_read_dir,
@@ -277,7 +277,7 @@
 {
         struct befs_inode_info *bi;
         bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
-							SLAB_KERNEL);
+							GFP_KERNEL);
         if (!bi)
                 return NULL;
         return &bi->vfs_inode;
@@ -289,7 +289,7 @@
         kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
         struct befs_inode_info *bi = (struct befs_inode_info *) foo;
 	
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index ed27ffb..eac175e 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -228,12 +228,12 @@
 	unlock_kernel();
 }
 
-static kmem_cache_t * bfs_inode_cachep;
+static struct kmem_cache * bfs_inode_cachep;
 
 static struct inode *bfs_alloc_inode(struct super_block *sb)
 {
 	struct bfs_inode_info *bi;
-	bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL);
+	bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL);
 	if (!bi)
 		return NULL;
 	return &bi->vfs_inode;
@@ -244,7 +244,7 @@
 	kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct bfs_inode_info *bi = foo;
 
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 79b05a1..be5869d 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -47,10 +47,6 @@
 static int load_elf_library(struct file *);
 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
 
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
 /*
  * If we don't support core dumping, then supply a NULL so we
  * don't even try.
@@ -243,8 +239,9 @@
 	if (interp_aout) {
 		argv = sp + 2;
 		envp = argv + argc + 1;
-		__put_user((elf_addr_t)(unsigned long)argv, sp++);
-		__put_user((elf_addr_t)(unsigned long)envp, sp++);
+		if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
+		    __put_user((elf_addr_t)(unsigned long)envp, sp++))
+			return -EFAULT;
 	} else {
 		argv = sp;
 		envp = argv + argc + 1;
@@ -254,7 +251,8 @@
 	p = current->mm->arg_end = current->mm->arg_start;
 	while (argc-- > 0) {
 		size_t len;
-		__put_user((elf_addr_t)p, argv++);
+		if (__put_user((elf_addr_t)p, argv++))
+			return -EFAULT;
 		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
 			return 0;
@@ -265,7 +263,8 @@
 	current->mm->arg_end = current->mm->env_start = p;
 	while (envc-- > 0) {
 		size_t len;
-		__put_user((elf_addr_t)p, envp++);
+		if (__put_user((elf_addr_t)p, envp++))
+			return -EFAULT;
 		len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
 		if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
 			return 0;
@@ -545,7 +544,7 @@
 	unsigned long reloc_func_desc = 0;
 	char passed_fileno[6];
 	struct files_struct *files;
-	int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
+	int executable_stack = EXSTACK_DEFAULT;
 	unsigned long def_flags = 0;
 	struct {
 		struct elfhdr elf_ex;
@@ -708,7 +707,6 @@
 				executable_stack = EXSTACK_DISABLE_X;
 			break;
 		}
-	have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
 
 	/* Some simple consistency checks for the interpreter */
 	if (elf_interpreter) {
@@ -856,7 +854,13 @@
 			 * default mmap base, as well as whatever program they
 			 * might try to exec.  This is because the brk will
 			 * follow the loader, and is not movable.  */
-			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+			if (current->flags & PF_RANDOMIZE)
+				load_bias = randomize_range(0x10000,
+							    ELF_ET_DYN_BASE,
+							    0);
+			else
+				load_bias = ELF_ET_DYN_BASE;
+			load_bias = ELF_PAGESTART(load_bias - vaddr);
 		}
 
 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
@@ -1582,6 +1586,10 @@
 		
 		sz += thread_status_size;
 
+#ifdef ELF_CORE_WRITE_EXTRA_NOTES
+		sz += ELF_CORE_EXTRA_NOTES_SIZE;
+#endif
+
 		fill_elf_note_phdr(&phdr, sz, offset);
 		offset += sz;
 		DUMP_WRITE(&phdr, sizeof(phdr));
@@ -1622,6 +1630,10 @@
 		if (!writenote(notes + i, file, &foffset))
 			goto end_coredump;
 
+#ifdef ELF_CORE_WRITE_EXTRA_NOTES
+	ELF_CORE_WRITE_EXTRA_NOTES;
+#endif
+
 	/* write out the thread status notes section */
 	list_for_each(t, &thread_list) {
 		struct elf_thread_status *tmp =
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index f86d5c9..ed9a61c 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -40,9 +40,6 @@
 #include <asm/pgalloc.h>
 
 typedef char *elf_caddr_t;
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
 
 #if 0
 #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
diff --git a/fs/bio.c b/fs/bio.c
index f95c874..7ec737e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
 
 #define BIO_POOL_SIZE 256
 
-static kmem_cache_t *bio_slab __read_mostly;
+static struct kmem_cache *bio_slab __read_mostly;
 
 #define BIOVEC_NR_POOLS 6
 
@@ -44,7 +44,7 @@
 struct biovec_slab {
 	int nr_vecs;
 	char *name; 
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 };
 
 /*
@@ -560,10 +560,8 @@
 			break;
 		}
 
-		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
-			ret = -EINVAL;
+		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
 			break;
-		}
 
 		len -= bytes;
 	}
@@ -622,10 +620,9 @@
 
 		nr_pages += end - start;
 		/*
-		 * transfer and buffer must be aligned to at least hardsector
-		 * size for now, in the future we can relax this restriction
+		 * buffer must be aligned to at least hardsector size for now
 		 */
-		if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+		if (uaddr & queue_dma_alignment(q))
 			return ERR_PTR(-EINVAL);
 	}
 
@@ -751,7 +748,6 @@
 			     int write_to_vm)
 {
 	struct bio *bio;
-	int len = 0, i;
 
 	bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
 
@@ -766,18 +762,7 @@
 	 */
 	bio_get(bio);
 
-	for (i = 0; i < iov_count; i++)
-		len += iov[i].iov_len;
-
-	if (bio->bi_size == len)
-		return bio;
-
-	/*
-	 * don't support partial mappings
-	 */
-	bio_endio(bio, bio->bi_size, 0);
-	bio_unmap_user(bio);
-	return ERR_PTR(-EINVAL);
+	return bio;
 }
 
 static void __bio_unmap_user(struct bio *bio)
@@ -955,16 +940,16 @@
  * run one bio_put() against the BIO.
  */
 
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
 
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
 static DEFINE_SPINLOCK(bio_dirty_lock);
 static struct bio *bio_dirty_list;
 
 /*
  * This runs in process context
  */
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
 {
 	unsigned long flags;
 	struct bio *bio;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 36c0e7a..13816b4d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -235,11 +235,11 @@
  */
 
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
-static kmem_cache_t * bdev_cachep __read_mostly;
+static struct kmem_cache * bdev_cachep __read_mostly;
 
 static struct inode *bdev_alloc_inode(struct super_block *sb)
 {
-	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
+	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -253,7 +253,7 @@
 	kmem_cache_free(bdev_cachep, bdi);
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct bdev_inode *ei = (struct bdev_inode *) foo;
 	struct block_device *bdev = &ei->bdev;
diff --git a/fs/buffer.c b/fs/buffer.c
index 35527dc..517860f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2908,7 +2908,7 @@
 /*
  * Buffer-head allocation
  */
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
@@ -2961,7 +2961,7 @@
 EXPORT_SYMBOL(free_buffer_head);
 
 static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
 {
 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
 			    SLAB_CTOR_CONSTRUCTOR) {
@@ -2972,7 +2972,6 @@
 	}
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void buffer_exit_cpu(int cpu)
 {
 	int i;
@@ -2994,7 +2993,6 @@
 		buffer_exit_cpu((unsigned long)hcpu);
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init buffer_init(void)
 {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 84976cd..71bc87a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -34,6 +34,7 @@
 #include <linux/mempool.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
 #define DECLARE_GLOBALS_HERE
@@ -81,7 +82,7 @@
 extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
 
-extern kmem_cache_t *cifs_oplock_cachep;
+extern struct kmem_cache *cifs_oplock_cachep;
 
 static int
 cifs_read_super(struct super_block *sb, void *data,
@@ -232,11 +233,11 @@
 		return generic_permission(inode, mask, NULL);
 }
 
-static kmem_cache_t *cifs_inode_cachep;
-static kmem_cache_t *cifs_req_cachep;
-static kmem_cache_t *cifs_mid_cachep;
-kmem_cache_t *cifs_oplock_cachep;
-static kmem_cache_t *cifs_sm_req_cachep;
+static struct kmem_cache *cifs_inode_cachep;
+static struct kmem_cache *cifs_req_cachep;
+static struct kmem_cache *cifs_mid_cachep;
+struct kmem_cache *cifs_oplock_cachep;
+static struct kmem_cache *cifs_sm_req_cachep;
 mempool_t *cifs_sm_req_poolp;
 mempool_t *cifs_req_poolp;
 mempool_t *cifs_mid_poolp;
@@ -245,7 +246,7 @@
 cifs_alloc_inode(struct super_block *sb)
 {
 	struct cifsInodeInfo *cifs_inode;
-	cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
+	cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
 	if (!cifs_inode)
 		return NULL;
 	cifs_inode->cifsAttrs = 0x20;	/* default */
@@ -668,7 +669,7 @@
 };
 
 static void
-cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
+cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct cifsInodeInfo *cifsi = inode;
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 71f7791..2caca06 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -31,6 +31,7 @@
 #include <linux/delay.h>
 #include <linux/completion.h>
 #include <linux/pagevec.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include "cifspdu.h"
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 1ad8c9f..c4fa91b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -318,6 +318,7 @@
 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 	char *tmp_path;
 	char *buf = NULL;
+	int adjustTZ = FALSE;
 
 	pTcon = cifs_sb->tcon;
 	cFYI(1,("Getting info on %s", search_path));
@@ -348,6 +349,7 @@
 					pfindData, cifs_sb->local_nls, 
 					cifs_sb->mnt_cifs_flags &
 					  CIFS_MOUNT_MAP_SPECIAL_CHR);
+			adjustTZ = TRUE;
 		}
 		
 	}
@@ -444,6 +446,10 @@
 		inode->i_ctime =
 		    cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
 		cFYI(0, ("Attributes came in as 0x%x", attr));
+		if(adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
+			inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
+	                inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
+		}
 
 		/* set default mode. will override for dirs below */
 		if (atomic_read(&cifsInfo->inUse) == 0)
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 0bee8b7..8e25996 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -69,17 +69,30 @@
 			rc = -EOPNOTSUPP;  
 	}
 
-/* if (!rc)     */
-	{
-		/*   renew_parental_timestamps(old_file);
-		   inode->i_nlink++;
-		   mark_inode_dirty(inode);
-		   d_instantiate(direntry, inode); */
-		/* BB add call to either mark inode dirty or refresh its data and timestamp to current time */
+	d_drop(direntry);	/* force new lookup from server of target */
+
+	/* if source file is cached (oplocked) revalidate will not go to server
+	   until the file is closed or oplock broken so update nlinks locally */
+	if(old_file->d_inode) {
+		cifsInode = CIFS_I(old_file->d_inode);
+		if(rc == 0) {
+			old_file->d_inode->i_nlink++;
+			old_file->d_inode->i_ctime = CURRENT_TIME;
+			/* parent dir timestamps will update from srv
+			within a second, would it really be worth it
+			to set the parent dir cifs inode time to zero
+			to force revalidate (faster) for it too? */
+		}
+		/* if not oplocked will force revalidate to get info 
+		   on source file from srv */
+		cifsInode->time = 0;
+
+                /* Will update parent dir timestamps from srv within a second.
+		   Would it really be worth it to set the parent dir (cifs
+		   inode) time field to zero to force revalidate on parent
+		   directory faster ie 
+			CIFS_I(inode)->time = 0;  */
 	}
-	d_drop(direntry);	/* force new lookup from server */
-	cifsInode = CIFS_I(old_file->d_inode);
-	cifsInode->time = 0;	/* will force revalidate to go get info when needed */
 
 cifs_hl_exit:
 	kfree(fromName);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index bbc9cd3..aedf683 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -153,7 +153,7 @@
    albeit slightly larger than necessary and maxbuffersize 
    defaults to this and can not be bigger */
 	ret_buf =
-	    (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+	    (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS);
 
 	/* clear the first few header bytes */
 	/* for most paths, more is cleared in header_assemble */
@@ -192,7 +192,7 @@
    albeit slightly larger than necessary and maxbuffersize 
    defaults to this and can not be bigger */
 	ret_buf =
-	    (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+	    (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
 	if (ret_buf) {
 	/* No need to clear memory here, cleared in header assemble */
 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 48d47b4..f80007e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -34,7 +34,7 @@
 #include "cifs_debug.h"
   
 extern mempool_t *cifs_mid_poolp;
-extern kmem_cache_t *cifs_oplock_cachep;
+extern struct kmem_cache *cifs_oplock_cachep;
 
 static struct mid_q_entry *
 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
@@ -51,7 +51,7 @@
 	}
 	
 	temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
-						    SLAB_KERNEL | SLAB_NOFS);
+						    GFP_KERNEL | GFP_NOFS);
 	if (temp == NULL)
 		return temp;
 	else {
@@ -118,7 +118,7 @@
 		return NULL;
 	}
 	temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
-						       SLAB_KERNEL);
+						       GFP_KERNEL);
 	if (temp == NULL)
 		return temp;
 	else {
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 88d1233..b64659f 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -38,12 +38,12 @@
 static void coda_put_super(struct super_block *);
 static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
 
-static kmem_cache_t * coda_inode_cachep;
+static struct kmem_cache * coda_inode_cachep;
 
 static struct inode *coda_alloc_inode(struct super_block *sb)
 {
 	struct coda_inode_info *ei;
-	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL);
+	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -58,7 +58,7 @@
 	kmem_cache_free(coda_inode_cachep, ITOC(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct coda_inode_info *ei = (struct coda_inode_info *) foo;
 
diff --git a/fs/compat.c b/fs/compat.c
index 8d0a001..a7e3f16 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -45,6 +45,8 @@
 #include <linux/personality.h>
 #include <linux/rwsem.h>
 #include <linux/tsacct_kern.h>
+#include <linux/highmem.h>
+#include <linux/poll.h>
 #include <linux/mm.h>
 
 #include <net/sock.h>		/* siocdevprivate_ioctl */
@@ -869,7 +871,7 @@
 
 	retval = -EINVAL;
 
-	if (type_page) {
+	if (type_page && data_page) {
 		if (!strcmp((char *)type_page, SMBFS_NAME)) {
 			do_smb_super_data_conv((void *)data_page);
 		} else if (!strcmp((char *)type_page, NCPFS_NAME)) {
@@ -1142,7 +1144,9 @@
 	lastdirent = buf.previous;
 	if (lastdirent) {
 		typeof(lastdirent->d_off) d_off = file->f_pos;
-		__put_user_unaligned(d_off, &lastdirent->d_off);
+		error = -EFAULT;
+		if (__put_user_unaligned(d_off, &lastdirent->d_off))
+			goto out_putf;
 		error = count - buf.count;
 	}
 
@@ -1609,14 +1613,14 @@
 		nr &= ~1UL;
 		while (nr) {
 			unsigned long h, l;
-			__get_user(l, ufdset);
-			__get_user(h, ufdset+1);
+			if (__get_user(l, ufdset) || __get_user(h, ufdset+1))
+				return -EFAULT;
 			ufdset += 2;
 			*fdset++ = h << 32 | l;
 			nr -= 2;
 		}
-		if (odd)
-			__get_user(*fdset, ufdset);
+		if (odd && __get_user(*fdset, ufdset))
+			return -EFAULT;
 	} else {
 		/* Tricky, must clear full unsigned long in the
 		 * kernel fdset at the end, this makes sure that
@@ -1628,14 +1632,14 @@
 }
 
 static
-void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
-			unsigned long *fdset)
+int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
+		      unsigned long *fdset)
 {
 	unsigned long odd;
 	nr = ROUND_UP(nr, __COMPAT_NFDBITS);
 
 	if (!ufdset)
-		return;
+		return 0;
 
 	odd = nr & 1UL;
 	nr &= ~1UL;
@@ -1643,13 +1647,14 @@
 		unsigned long h, l;
 		l = *fdset++;
 		h = l >> 32;
-		__put_user(l, ufdset);
-		__put_user(h, ufdset+1);
+		if (__put_user(l, ufdset) || __put_user(h, ufdset+1))
+			return -EFAULT;
 		ufdset += 2;
 		nr -= 2;
 	}
-	if (odd)
-		__put_user(*fdset, ufdset);
+	if (odd && __put_user(*fdset, ufdset))
+		return -EFAULT;
+	return 0;
 }
 
 
@@ -1724,10 +1729,10 @@
 		ret = 0;
 	}
 
-	compat_set_fd_set(n, inp, fds.res_in);
-	compat_set_fd_set(n, outp, fds.res_out);
-	compat_set_fd_set(n, exp, fds.res_ex);
-
+	if (compat_set_fd_set(n, inp, fds.res_in) ||
+	    compat_set_fd_set(n, outp, fds.res_out) ||
+	    compat_set_fd_set(n, exp, fds.res_ex))
+		ret = -EFAULT;
 out:
 	kfree(bits);
 out_nofds:
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a91f262..bcc3caf 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -211,8 +211,10 @@
 	up_native =
 		compat_alloc_user_space(sizeof(struct video_still_picture));
 
-	put_user(compat_ptr(fp), &up_native->iFrame);
-	put_user(size, &up_native->size);
+	err =  put_user(compat_ptr(fp), &up_native->iFrame);
+	err |= put_user(size, &up_native->size);
+	if (err)
+		return -EFAULT;
 
 	err = sys_ioctl(fd, cmd, (unsigned long) up_native);
 
@@ -236,8 +238,10 @@
 	err |= get_user(length, &up->length);
 
 	up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
-	put_user(compat_ptr(palp), &up_native->palette);
-	put_user(length, &up_native->length);
+	err  = put_user(compat_ptr(palp), &up_native->palette);
+	err |= put_user(length, &up_native->length);
+	if (err)
+		return -EFAULT;
 
 	err = sys_ioctl(fd, cmd, (unsigned long) up_native);
 
@@ -2043,16 +2047,19 @@
         struct serial_struct ss;
         mm_segment_t oldseg = get_fs();
         __u32 udata;
+	unsigned int base;
 
         if (cmd == TIOCSSERIAL) {
                 if (!access_ok(VERIFY_READ, ss32, sizeof(SS32)))
                         return -EFAULT;
                 if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base)))
 			return -EFAULT;
-                __get_user(udata, &ss32->iomem_base);
+                if (__get_user(udata, &ss32->iomem_base))
+			return -EFAULT;
                 ss.iomem_base = compat_ptr(udata);
-                __get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
-                __get_user(ss.port_high, &ss32->port_high);
+                if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+		    __get_user(ss.port_high, &ss32->port_high))
+			return -EFAULT;
                 ss.iomap_base = 0UL;
         }
         set_fs(KERNEL_DS);
@@ -2063,12 +2070,12 @@
                         return -EFAULT;
                 if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base)))
 			return -EFAULT;
-                __put_user((unsigned long)ss.iomem_base  >> 32 ?
-                            0xffffffff : (unsigned)(unsigned long)ss.iomem_base,
-                            &ss32->iomem_base);
-                __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
-                __put_user(ss.port_high, &ss32->port_high);
-
+		base = (unsigned long)ss.iomem_base  >> 32 ?
+			0xffffffff : (unsigned)(unsigned long)ss.iomem_base;
+		if (__put_user(base, &ss32->iomem_base) ||
+		    __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+		    __put_user(ss.port_high, &ss32->port_high))
+			return -EFAULT;
         }
         return err;
 }
@@ -2397,6 +2404,7 @@
 HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
 HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
 HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFHWBROADCAST, dev_ifsioc)
 
 /* ioctls used by appletalk ddp.c */
 HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc)
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 3f4ff7a..f92cd30 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -49,7 +49,7 @@
 #define CONFIGFS_NOT_PINNED	(CONFIGFS_ITEM_ATTR)
 
 extern struct vfsmount * configfs_mount;
-extern kmem_cache_t *configfs_dir_cachep;
+extern struct kmem_cache *configfs_dir_cachep;
 
 extern int configfs_is_root(struct config_item *item);
 
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8a3b6a1..c398861 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -93,8 +93,8 @@
  *
  * called with parent inode's i_mutex held
  */
-int configfs_dirent_exists(struct configfs_dirent *parent_sd,
-			   const unsigned char *new)
+static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
+				  const unsigned char *new)
 {
 	struct configfs_dirent * sd;
 
@@ -1176,8 +1176,9 @@
 		return;
 	}
 
-	mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
-	mutex_lock(&dentry->d_inode->i_mutex);
+	mutex_lock_nested(&configfs_sb->s_root->d_inode->i_mutex,
+			  I_MUTEX_PARENT);
+	mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
 	if (configfs_detach_prep(dentry)) {
 		printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n");
 	}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 68bd5c9..ed67852 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -38,7 +38,7 @@
 
 struct vfsmount * configfs_mount = NULL;
 struct super_block * configfs_sb = NULL;
-kmem_cache_t *configfs_dir_cachep;
+struct kmem_cache *configfs_dir_cachep;
 static int configfs_mnt_count = 0;
 
 static struct super_operations configfs_ops = {
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index a624c3e..0509ced 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -481,6 +481,8 @@
 		pgdata = kmap(page);
 		if (compr_len == 0)
 			; /* hole */
+		else if (compr_len > (PAGE_CACHE_SIZE << 1))
+			printk(KERN_ERR "cramfs: bad compressed blocksize %u\n", compr_len);
 		else {
 			mutex_lock(&read_mutex);
 			bytes_filled = cramfs_uncompress_block(pgdata,
diff --git a/fs/dcache.c b/fs/dcache.c
index fd4a428..d68631f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -43,7 +43,7 @@
 
 EXPORT_SYMBOL(dcache_lock);
 
-static kmem_cache_t *dentry_cache __read_mostly;
+static struct kmem_cache *dentry_cache __read_mostly;
 
 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
 
@@ -68,15 +68,19 @@
 	.age_limit = 45,
 };
 
-static void d_callback(struct rcu_head *head)
+static void __d_free(struct dentry *dentry)
 {
-	struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
-
 	if (dname_external(dentry))
 		kfree(dentry->d_name.name);
 	kmem_cache_free(dentry_cache, dentry); 
 }
 
+static void d_callback(struct rcu_head *head)
+{
+	struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
+	__d_free(dentry);
+}
+
 /*
  * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
  * inside dcache_lock.
@@ -85,7 +89,11 @@
 {
 	if (dentry->d_op && dentry->d_op->d_release)
 		dentry->d_op->d_release(dentry);
- 	call_rcu(&dentry->d_u.d_rcu, d_callback);
+	/* if dentry was never inserted into hash, immediate free is OK */
+	if (dentry->d_hash.pprev == NULL)
+		__d_free(dentry);
+	else
+		call_rcu(&dentry->d_u.d_rcu, d_callback);
 }
 
 /*
@@ -2072,10 +2080,10 @@
 }
 
 /* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep __read_mostly;
+struct kmem_cache *names_cachep __read_mostly;
 
 /* SLAB cache for file structures */
-kmem_cache_t *filp_cachep __read_mostly;
+struct kmem_cache *filp_cachep __read_mostly;
 
 EXPORT_SYMBOL(d_genocide);
 
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 0c4b067..21af162 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -37,7 +37,7 @@
 
 static LIST_HEAD(dcookie_users);
 static DEFINE_MUTEX(dcookie_mutex);
-static kmem_cache_t *dcookie_cache __read_mostly;
+static struct kmem_cache *dcookie_cache __read_mostly;
 static struct list_head *dcookie_hashtable __read_mostly;
 static size_t hash_size __read_mostly;
 
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index 81b2c64..b5654a2 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -1,14 +1,32 @@
 menu "Distributed Lock Manager"
-	depends on INET && IP_SCTP && EXPERIMENTAL
+	depends on EXPERIMENTAL && INET
 
 config DLM
 	tristate "Distributed Lock Manager (DLM)"
 	depends on IPV6 || IPV6=n
 	select CONFIGFS_FS
+	select IP_SCTP if DLM_SCTP
 	help
 	A general purpose distributed lock manager for kernel or userspace
 	applications.
 
+choice
+	prompt "Select DLM communications protocol"
+	depends on DLM
+	default DLM_TCP
+	help
+	The DLM Can use TCP or SCTP for it's network communications.
+	SCTP supports multi-homed operations whereas TCP doesn't.
+	However, SCTP seems to have stability problems at the moment.
+
+config DLM_TCP
+	bool "TCP/IP"
+
+config DLM_SCTP
+	bool "SCTP"
+
+endchoice
+
 config DLM_DEBUG
 	bool "DLM debugging"
 	depends on DLM
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index 1832e02..6538894 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -4,7 +4,6 @@
 				dir.o \
 				lock.o \
 				lockspace.o \
-				lowcomms.o \
 				main.o \
 				member.o \
 				memory.o \
@@ -17,3 +16,6 @@
 				util.o
 dlm-$(CONFIG_DLM_DEBUG) +=	debug_fs.o
 
+dlm-$(CONFIG_DLM_TCP)   += lowcomms-tcp.o
+
+dlm-$(CONFIG_DLM_SCTP)  += lowcomms-sctp.o
\ No newline at end of file
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 1e5cd67..1ee8195 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -471,6 +471,7 @@
 	char			*ls_recover_buf;
 	int			ls_recover_nodeid; /* for debugging */
 	uint64_t		ls_rcom_seq;
+	spinlock_t		ls_rcom_spin;
 	struct list_head	ls_recover_list;
 	spinlock_t		ls_recover_list_lock;
 	int			ls_recover_list_count;
@@ -488,7 +489,8 @@
 #define LSFL_RUNNING		1
 #define LSFL_RECOVERY_STOP	2
 #define LSFL_RCOM_READY		3
-#define LSFL_UEVENT_WAIT	4
+#define LSFL_RCOM_WAIT		4
+#define LSFL_UEVENT_WAIT	5
 
 /* much of this is just saving user space pointers associated with the
    lock that we pass back to the user lib with an ast */
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 3f2befa..30878de 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -2372,6 +2372,7 @@
 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
 {
 	lkb->lkb_exflags = ms->m_exflags;
+	lkb->lkb_sbflags = ms->m_sbflags;
 	lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
 		         (ms->m_flags & 0x0000FFFF);
 }
@@ -3028,10 +3029,17 @@
 
 	while (1) {
 		if (dlm_locking_stopped(ls)) {
-			if (!recovery)
-				dlm_add_requestqueue(ls, nodeid, hd);
-			error = -EINTR;
-			goto out;
+			if (recovery) {
+				error = -EINTR;
+				goto out;
+			}
+			error = dlm_add_requestqueue(ls, nodeid, hd);
+			if (error == -EAGAIN)
+				continue;
+			else {
+				error = -EINTR;
+				goto out;
+			}
 		}
 
 		if (lock_recovery_try(ls))
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f8842ca..59012b0 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -22,6 +22,7 @@
 #include "memory.h"
 #include "lock.h"
 #include "recover.h"
+#include "requestqueue.h"
 
 #ifdef CONFIG_DLM_DEBUG
 int dlm_create_debug_file(struct dlm_ls *ls);
@@ -478,6 +479,8 @@
 	ls->ls_recoverd_task = NULL;
 	mutex_init(&ls->ls_recoverd_active);
 	spin_lock_init(&ls->ls_recover_lock);
+	spin_lock_init(&ls->ls_rcom_spin);
+	get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
 	ls->ls_recover_status = 0;
 	ls->ls_recover_seq = 0;
 	ls->ls_recover_args = NULL;
@@ -684,6 +687,7 @@
 	 * Free structures on any other lists
 	 */
 
+	dlm_purge_requestqueue(ls);
 	kfree(ls->ls_recover_args);
 	dlm_clear_free_entries(ls);
 	dlm_clear_members(ls);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms-sctp.c
similarity index 87%
rename from fs/dlm/lowcomms.c
rename to fs/dlm/lowcomms-sctp.c
index 6da6b14..fe158d7 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms-sctp.c
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -75,13 +75,13 @@
 };
 
 static DEFINE_IDR(nodeinfo_idr);
-static struct rw_semaphore	nodeinfo_lock;
-static int			max_nodeid;
+static DECLARE_RWSEM(nodeinfo_lock);
+static int max_nodeid;
 
 struct cbuf {
-	unsigned		base;
-	unsigned		len;
-	unsigned		mask;
+	unsigned int base;
+	unsigned int len;
+	unsigned int mask;
 };
 
 /* Just the one of these, now. But this struct keeps
@@ -90,9 +90,9 @@
 #define CF_READ_PENDING 1
 
 struct connection {
-	struct socket          *sock;
+	struct socket           *sock;
 	unsigned long		flags;
-	struct page            *rx_page;
+	struct page             *rx_page;
 	atomic_t		waiting_requests;
 	struct cbuf		cb;
 	int                     eagain_flag;
@@ -102,36 +102,40 @@
 
 struct writequeue_entry {
 	struct list_head	list;
-	struct page            *page;
+	struct page             *page;
 	int			offset;
 	int			len;
 	int			end;
 	int			users;
-	struct nodeinfo        *ni;
+	struct nodeinfo         *ni;
 };
 
-#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0)
-#define CBUF_EMPTY(cb) ((cb)->len == 0)
-#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1))
-#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask)
+static void cbuf_add(struct cbuf *cb, int n)
+{
+	cb->len += n;
+}
 
-#define CBUF_INIT(cb, size) \
-do { \
-	(cb)->base = (cb)->len = 0; \
-	(cb)->mask = ((size)-1); \
-} while(0)
+static int cbuf_data(struct cbuf *cb)
+{
+	return ((cb->base + cb->len) & cb->mask);
+}
 
-#define CBUF_EAT(cb, n) \
-do { \
-	(cb)->len  -= (n); \
-	(cb)->base += (n); \
-	(cb)->base &= (cb)->mask; \
-} while(0)
+static void cbuf_init(struct cbuf *cb, int size)
+{
+	cb->base = cb->len = 0;
+	cb->mask = size-1;
+}
 
+static void cbuf_eat(struct cbuf *cb, int n)
+{
+	cb->len  -= n;
+	cb->base += n;
+	cb->base &= cb->mask;
+}
 
 /* List of nodes which have writes pending */
-static struct list_head write_nodes;
-static spinlock_t write_nodes_lock;
+static LIST_HEAD(write_nodes);
+static DEFINE_SPINLOCK(write_nodes_lock);
 
 /* Maximum number of incoming messages to process before
  * doing a schedule()
@@ -141,8 +145,7 @@
 /* Manage daemons */
 static struct task_struct *recv_task;
 static struct task_struct *send_task;
-static wait_queue_head_t lowcomms_recv_wait;
-static atomic_t accepting;
+static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_wait);
 
 /* The SCTP connection */
 static struct connection sctp_con;
@@ -161,11 +164,11 @@
 		return error;
 
 	if (dlm_local_addr[0]->ss_family == AF_INET) {
-	        struct sockaddr_in *in4  = (struct sockaddr_in *) &addr;
+		struct sockaddr_in *in4  = (struct sockaddr_in *) &addr;
 		struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr;
 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
 	} else {
-	        struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
+		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
 		memcpy(&ret6->sin6_addr, &in6->sin6_addr,
 		       sizeof(in6->sin6_addr));
@@ -174,6 +177,8 @@
 	return 0;
 }
 
+/* If alloc is 0 here we will not attempt to allocate a new
+   nodeinfo struct */
 static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
 {
 	struct nodeinfo *ni;
@@ -184,44 +189,45 @@
 	ni = idr_find(&nodeinfo_idr, nodeid);
 	up_read(&nodeinfo_lock);
 
-	if (!ni && alloc) {
-		down_write(&nodeinfo_lock);
+	if (ni || !alloc)
+		return ni;
 
-		ni = idr_find(&nodeinfo_idr, nodeid);
-		if (ni)
-			goto out_up;
+	down_write(&nodeinfo_lock);
 
-		r = idr_pre_get(&nodeinfo_idr, alloc);
-		if (!r)
-			goto out_up;
+	ni = idr_find(&nodeinfo_idr, nodeid);
+	if (ni)
+		goto out_up;
 
-		ni = kmalloc(sizeof(struct nodeinfo), alloc);
-		if (!ni)
-			goto out_up;
+	r = idr_pre_get(&nodeinfo_idr, alloc);
+	if (!r)
+		goto out_up;
 
-		r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
-		if (r) {
-			kfree(ni);
-			ni = NULL;
-			goto out_up;
-		}
-		if (n != nodeid) {
-			idr_remove(&nodeinfo_idr, n);
-			kfree(ni);
-			ni = NULL;
-			goto out_up;
-		}
-		memset(ni, 0, sizeof(struct nodeinfo));
-		spin_lock_init(&ni->lock);
-		INIT_LIST_HEAD(&ni->writequeue);
-		spin_lock_init(&ni->writequeue_lock);
-		ni->nodeid = nodeid;
+	ni = kmalloc(sizeof(struct nodeinfo), alloc);
+	if (!ni)
+		goto out_up;
 
-		if (nodeid > max_nodeid)
-			max_nodeid = nodeid;
-	out_up:
-		up_write(&nodeinfo_lock);
+	r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
+	if (r) {
+		kfree(ni);
+		ni = NULL;
+		goto out_up;
 	}
+	if (n != nodeid) {
+		idr_remove(&nodeinfo_idr, n);
+		kfree(ni);
+		ni = NULL;
+		goto out_up;
+	}
+	memset(ni, 0, sizeof(struct nodeinfo));
+	spin_lock_init(&ni->lock);
+	INIT_LIST_HEAD(&ni->writequeue);
+	spin_lock_init(&ni->writequeue_lock);
+	ni->nodeid = nodeid;
+
+	if (nodeid > max_nodeid)
+		max_nodeid = nodeid;
+out_up:
+	up_write(&nodeinfo_lock);
 
 	return ni;
 }
@@ -279,13 +285,13 @@
 		in4_addr->sin_port = cpu_to_be16(port);
 		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
 		memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) -
-				      sizeof(struct sockaddr_in));
+		       sizeof(struct sockaddr_in));
 		*addr_len = sizeof(struct sockaddr_in);
 	} else {
 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
 		in6_addr->sin6_port = cpu_to_be16(port);
 		memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) -
-				      sizeof(struct sockaddr_in6));
+		       sizeof(struct sockaddr_in6));
 		*addr_len = sizeof(struct sockaddr_in6);
 	}
 }
@@ -324,7 +330,7 @@
 	cmsg->cmsg_type = SCTP_SNDRCV;
 	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
 	outmessage.msg_controllen = cmsg->cmsg_len;
-	sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+	sinfo = CMSG_DATA(cmsg);
 	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
 
 	sinfo->sinfo_flags |= MSG_EOF;
@@ -387,7 +393,7 @@
 
 			if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
 				log_print("COMM_UP for invalid assoc ID %d",
-					 (int)sn->sn_assoc_change.sac_assoc_id);
+					  (int)sn->sn_assoc_change.sac_assoc_id);
 				init_failed();
 				return;
 			}
@@ -398,15 +404,18 @@
 			fs = get_fs();
 			set_fs(get_ds());
 			ret = sctp_con.sock->ops->getsockopt(sctp_con.sock,
-						IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
-						(char*)&prim, &prim_len);
+							     IPPROTO_SCTP,
+							     SCTP_PRIMARY_ADDR,
+							     (char*)&prim,
+							     &prim_len);
 			set_fs(fs);
 			if (ret < 0) {
 				struct nodeinfo *ni;
 
 				log_print("getsockopt/sctp_primary_addr on "
 					  "new assoc %d failed : %d",
-				    (int)sn->sn_assoc_change.sac_assoc_id, ret);
+					  (int)sn->sn_assoc_change.sac_assoc_id,
+					  ret);
 
 				/* Retry INIT later */
 				ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
@@ -426,12 +435,10 @@
 				return;
 
 			/* Save the assoc ID */
-			spin_lock(&ni->lock);
 			ni->assoc_id = sn->sn_assoc_change.sac_assoc_id;
-			spin_unlock(&ni->lock);
 
 			log_print("got new/restarted association %d nodeid %d",
-			       (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
+				  (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
 
 			/* Send any pending writes */
 			clear_bit(NI_INIT_PENDING, &ni->flags);
@@ -507,13 +514,12 @@
 		sctp_con.rx_page = alloc_page(GFP_ATOMIC);
 		if (sctp_con.rx_page == NULL)
 			goto out_resched;
-		CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE);
+		cbuf_init(&sctp_con.cb, PAGE_CACHE_SIZE);
 	}
 
 	memset(&incmsg, 0, sizeof(incmsg));
 	memset(&msgname, 0, sizeof(msgname));
 
-	memset(incmsg, 0, sizeof(incmsg));
 	msg.msg_name = &msgname;
 	msg.msg_namelen = sizeof(msgname);
 	msg.msg_flags = 0;
@@ -532,17 +538,17 @@
 	 * iov[0] is the bit of the circular buffer between the current end
 	 * point (cb.base + cb.len) and the end of the buffer.
 	 */
-	iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb);
+	iov[0].iov_len = sctp_con.cb.base - cbuf_data(&sctp_con.cb);
 	iov[0].iov_base = page_address(sctp_con.rx_page) +
-			  CBUF_DATA(&sctp_con.cb);
+		cbuf_data(&sctp_con.cb);
 	iov[1].iov_len = 0;
 
 	/*
 	 * iov[1] is the bit of the circular buffer between the start of the
 	 * buffer and the start of the currently used section (cb.base)
 	 */
-	if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) {
-		iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb);
+	if (cbuf_data(&sctp_con.cb) >= sctp_con.cb.base) {
+		iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&sctp_con.cb);
 		iov[1].iov_len = sctp_con.cb.base;
 		iov[1].iov_base = page_address(sctp_con.rx_page);
 		msg.msg_iovlen = 2;
@@ -557,7 +563,7 @@
 	msg.msg_control = incmsg;
 	msg.msg_controllen = sizeof(incmsg);
 	cmsg = CMSG_FIRSTHDR(&msg);
-	sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+	sinfo = CMSG_DATA(cmsg);
 
 	if (msg.msg_flags & MSG_NOTIFICATION) {
 		process_sctp_notification(&msg, page_address(sctp_con.rx_page));
@@ -583,29 +589,29 @@
 	if (r == 1)
 		return 0;
 
-	CBUF_ADD(&sctp_con.cb, ret);
+	cbuf_add(&sctp_con.cb, ret);
 	ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
 					  page_address(sctp_con.rx_page),
 					  sctp_con.cb.base, sctp_con.cb.len,
 					  PAGE_CACHE_SIZE);
 	if (ret < 0)
 		goto out_close;
-	CBUF_EAT(&sctp_con.cb, ret);
+	cbuf_eat(&sctp_con.cb, ret);
 
-      out:
+out:
 	ret = 0;
 	goto out_ret;
 
-      out_resched:
+out_resched:
 	lowcomms_data_ready(sctp_con.sock->sk, 0);
 	ret = 0;
-	schedule();
+	cond_resched();
 	goto out_ret;
 
-      out_close:
+out_close:
 	if (ret != -EAGAIN)
 		log_print("error reading from sctp socket: %d", ret);
-      out_ret:
+out_ret:
 	return ret;
 }
 
@@ -619,10 +625,12 @@
 	set_fs(get_ds());
 	if (num == 1)
 		result = sctp_con.sock->ops->bind(sctp_con.sock,
-					(struct sockaddr *) addr, addr_len);
+						  (struct sockaddr *) addr,
+						  addr_len);
 	else
 		result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP,
-				SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len);
+							SCTP_SOCKOPT_BINDX_ADD,
+							(char *)addr, addr_len);
 	set_fs(fs);
 
 	if (result < 0)
@@ -719,10 +727,10 @@
 
 	return 0;
 
- create_delsock:
+create_delsock:
 	sock_release(sock);
 	sctp_con.sock = NULL;
- out:
+out:
 	return result;
 }
 
@@ -756,16 +764,13 @@
 	int users = 0;
 	struct nodeinfo *ni;
 
-	if (!atomic_read(&accepting))
-		return NULL;
-
 	ni = nodeid2nodeinfo(nodeid, allocation);
 	if (!ni)
 		return NULL;
 
 	spin_lock(&ni->writequeue_lock);
 	e = list_entry(ni->writequeue.prev, struct writequeue_entry, list);
-	if (((struct list_head *) e == &ni->writequeue) ||
+	if ((&e->list == &ni->writequeue) ||
 	    (PAGE_CACHE_SIZE - e->end < len)) {
 		e = NULL;
 	} else {
@@ -776,7 +781,7 @@
 	spin_unlock(&ni->writequeue_lock);
 
 	if (e) {
-	      got_one:
+	got_one:
 		if (users == 0)
 			kmap(e->page);
 		*ppc = page_address(e->page) + offset;
@@ -803,9 +808,6 @@
 	int users;
 	struct nodeinfo *ni = e->ni;
 
-	if (!atomic_read(&accepting))
-		return;
-
 	spin_lock(&ni->writequeue_lock);
 	users = --e->users;
 	if (users)
@@ -822,7 +824,7 @@
 	}
 	return;
 
-      out:
+out:
 	spin_unlock(&ni->writequeue_lock);
 	return;
 }
@@ -878,7 +880,7 @@
 	cmsg->cmsg_level = IPPROTO_SCTP;
 	cmsg->cmsg_type = SCTP_SNDRCV;
 	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+	sinfo = CMSG_DATA(cmsg);
 	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
 	sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
 
@@ -892,7 +894,7 @@
 }
 
 /* Send a message */
-static int send_to_sock(struct nodeinfo *ni)
+static void send_to_sock(struct nodeinfo *ni)
 {
 	int ret = 0;
 	struct writequeue_entry *e;
@@ -903,13 +905,13 @@
 	struct sctp_sndrcvinfo *sinfo;
 	struct kvec iov;
 
-        /* See if we need to init an association before we start
+	/* See if we need to init an association before we start
 	   sending precious messages */
 	spin_lock(&ni->lock);
 	if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
 		spin_unlock(&ni->lock);
 		initiate_association(ni->nodeid);
-		return 0;
+		return;
 	}
 	spin_unlock(&ni->lock);
 
@@ -923,7 +925,7 @@
 	cmsg->cmsg_level = IPPROTO_SCTP;
 	cmsg->cmsg_type = SCTP_SNDRCV;
 	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+	sinfo = CMSG_DATA(cmsg);
 	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
 	sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
 	sinfo->sinfo_assoc_id = ni->assoc_id;
@@ -955,7 +957,7 @@
 				goto send_error;
 		} else {
 			/* Don't starve people filling buffers */
-			schedule();
+			cond_resched();
 		}
 
 		spin_lock(&ni->writequeue_lock);
@@ -964,15 +966,16 @@
 
 		if (e->len == 0 && e->users == 0) {
 			list_del(&e->list);
+			kunmap(e->page);
 			free_entry(e);
 			continue;
 		}
 	}
 	spin_unlock(&ni->writequeue_lock);
- out:
-	return ret;
+out:
+	return;
 
- send_error:
+send_error:
 	log_print("Error sending to node %d %d", ni->nodeid, ret);
 	spin_lock(&ni->lock);
 	if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
@@ -982,7 +985,7 @@
 	} else
 		spin_unlock(&ni->lock);
 
-	return ret;
+	return;
 }
 
 /* Try to send any messages that are pending */
@@ -994,7 +997,7 @@
 	spin_lock_bh(&write_nodes_lock);
 	list_for_each_safe(list, temp, &write_nodes) {
 		struct nodeinfo *ni =
-		    list_entry(list, struct nodeinfo, write_list);
+			list_entry(list, struct nodeinfo, write_list);
 		clear_bit(NI_WRITE_PENDING, &ni->flags);
 		list_del(&ni->write_list);
 
@@ -1106,7 +1109,7 @@
 		set_current_state(TASK_INTERRUPTIBLE);
 		add_wait_queue(&lowcomms_recv_wait, &wait);
 		if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
-			schedule();
+			cond_resched();
 		remove_wait_queue(&lowcomms_recv_wait, &wait);
 		set_current_state(TASK_RUNNING);
 
@@ -1118,12 +1121,12 @@
 
 				/* Don't starve out everyone else */
 				if (++count >= MAX_RX_MSG_COUNT) {
-					schedule();
+					cond_resched();
 					count = 0;
 				}
 			} while (!kthread_should_stop() && ret >=0);
 		}
-		schedule();
+		cond_resched();
 	}
 
 	return 0;
@@ -1138,7 +1141,7 @@
 	while (!kthread_should_stop()) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		if (write_list_empty())
-			schedule();
+			cond_resched();
 		set_current_state(TASK_RUNNING);
 
 		if (sctp_con.eagain_flag) {
@@ -1166,7 +1169,7 @@
 
 	p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
 	error = IS_ERR(p);
-       	if (error) {
+	if (error) {
 		log_print("can't start dlm_recvd %d", error);
 		return error;
 	}
@@ -1174,7 +1177,7 @@
 
 	p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
 	error = IS_ERR(p);
-       	if (error) {
+	if (error) {
 		log_print("can't start dlm_sendd %d", error);
 		kthread_stop(recv_task);
 		return error;
@@ -1197,43 +1200,28 @@
 	error = daemons_start();
 	if (error)
 		goto fail_sock;
-	atomic_set(&accepting, 1);
 	return 0;
 
- fail_sock:
+fail_sock:
 	close_connection();
 	return error;
 }
 
-/* Set all the activity flags to prevent any socket activity. */
-
 void dlm_lowcomms_stop(void)
 {
-	atomic_set(&accepting, 0);
+	int i;
+
 	sctp_con.flags = 0x7;
 	daemons_stop();
 	clean_writequeues();
 	close_connection();
 	dealloc_nodeinfo();
 	max_nodeid = 0;
-}
 
-int dlm_lowcomms_init(void)
-{
-	init_waitqueue_head(&lowcomms_recv_wait);
-	spin_lock_init(&write_nodes_lock);
-	INIT_LIST_HEAD(&write_nodes);
-	init_rwsem(&nodeinfo_lock);
-	return 0;
-}
-
-void dlm_lowcomms_exit(void)
-{
-	int i;
+	dlm_local_count = 0;
+	dlm_local_nodeid = 0;
 
 	for (i = 0; i < dlm_local_count; i++)
 		kfree(dlm_local_addr[i]);
-	dlm_local_count = 0;
-	dlm_local_nodeid = 0;
 }
 
diff --git a/fs/dlm/lowcomms-tcp.c b/fs/dlm/lowcomms-tcp.c
new file mode 100644
index 0000000..8f2791f
--- /dev/null
+++ b/fs/dlm/lowcomms-tcp.c
@@ -0,0 +1,1189 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+/*
+ * lowcomms.c
+ *
+ * This is the "low-level" comms layer.
+ *
+ * It is responsible for sending/receiving messages
+ * from other nodes in the cluster.
+ *
+ * Cluster nodes are referred to by their nodeids. nodeids are
+ * simply 32 bit numbers to the locking module - if they need to
+ * be expanded for the cluster infrastructure then that is it's
+ * responsibility. It is this layer's
+ * responsibility to resolve these into IP address or
+ * whatever it needs for inter-node communication.
+ *
+ * The comms level is two kernel threads that deal mainly with
+ * the receiving of messages from other nodes and passing them
+ * up to the mid-level comms layer (which understands the
+ * message format) for execution by the locking core, and
+ * a send thread which does all the setting up of connections
+ * to remote nodes and the sending of data. Threads are not allowed
+ * to send their own data because it may cause them to wait in times
+ * of high load. Also, this way, the sending thread can collect together
+ * messages bound for one node and send them in one block.
+ *
+ * I don't see any problem with the recv thread executing the locking
+ * code on behalf of remote processes as the locking code is
+ * short, efficient and never waits.
+ *
+ */
+
+
+#include <asm/ioctls.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/pagemap.h>
+
+#include "dlm_internal.h"
+#include "lowcomms.h"
+#include "midcomms.h"
+#include "config.h"
+
+struct cbuf {
+	unsigned int base;
+	unsigned int len;
+	unsigned int mask;
+};
+
+#define NODE_INCREMENT 32
+static void cbuf_add(struct cbuf *cb, int n)
+{
+	cb->len += n;
+}
+
+static int cbuf_data(struct cbuf *cb)
+{
+	return ((cb->base + cb->len) & cb->mask);
+}
+
+static void cbuf_init(struct cbuf *cb, int size)
+{
+	cb->base = cb->len = 0;
+	cb->mask = size-1;
+}
+
+static void cbuf_eat(struct cbuf *cb, int n)
+{
+	cb->len  -= n;
+	cb->base += n;
+	cb->base &= cb->mask;
+}
+
+static bool cbuf_empty(struct cbuf *cb)
+{
+	return cb->len == 0;
+}
+
+/* Maximum number of incoming messages to process before
+   doing a cond_resched()
+*/
+#define MAX_RX_MSG_COUNT 25
+
+struct connection {
+	struct socket *sock;	/* NULL if not connected */
+	uint32_t nodeid;	/* So we know who we are in the list */
+	struct rw_semaphore sock_sem; /* Stop connect races */
+	struct list_head read_list;   /* On this list when ready for reading */
+	struct list_head write_list;  /* On this list when ready for writing */
+	struct list_head state_list;  /* On this list when ready to connect */
+	unsigned long flags;	/* bit 1,2 = We are on the read/write lists */
+#define CF_READ_PENDING 1
+#define CF_WRITE_PENDING 2
+#define CF_CONNECT_PENDING 3
+#define CF_IS_OTHERCON 4
+	struct list_head writequeue;  /* List of outgoing writequeue_entries */
+	struct list_head listenlist;  /* List of allocated listening sockets */
+	spinlock_t writequeue_lock;
+	int (*rx_action) (struct connection *);	/* What to do when active */
+	struct page *rx_page;
+	struct cbuf cb;
+	int retries;
+	atomic_t waiting_requests;
+#define MAX_CONNECT_RETRIES 3
+	struct connection *othercon;
+};
+#define sock2con(x) ((struct connection *)(x)->sk_user_data)
+
+/* An entry waiting to be sent */
+struct writequeue_entry {
+	struct list_head list;
+	struct page *page;
+	int offset;
+	int len;
+	int end;
+	int users;
+	struct connection *con;
+};
+
+static struct sockaddr_storage dlm_local_addr;
+
+/* Manage daemons */
+static struct task_struct *recv_task;
+static struct task_struct *send_task;
+
+static wait_queue_t lowcomms_send_waitq_head;
+static DECLARE_WAIT_QUEUE_HEAD(lowcomms_send_waitq);
+static wait_queue_t lowcomms_recv_waitq_head;
+static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_waitq);
+
+/* An array of pointers to connections, indexed by NODEID */
+static struct connection **connections;
+static DECLARE_MUTEX(connections_lock);
+static kmem_cache_t *con_cache;
+static int conn_array_size;
+
+/* List of sockets that have reads pending */
+static LIST_HEAD(read_sockets);
+static DEFINE_SPINLOCK(read_sockets_lock);
+
+/* List of sockets which have writes pending */
+static LIST_HEAD(write_sockets);
+static DEFINE_SPINLOCK(write_sockets_lock);
+
+/* List of sockets which have connects pending */
+static LIST_HEAD(state_sockets);
+static DEFINE_SPINLOCK(state_sockets_lock);
+
+static struct connection *nodeid2con(int nodeid, gfp_t allocation)
+{
+	struct connection *con = NULL;
+
+	down(&connections_lock);
+	if (nodeid >= conn_array_size) {
+		int new_size = nodeid + NODE_INCREMENT;
+		struct connection **new_conns;
+
+		new_conns = kzalloc(sizeof(struct connection *) *
+				    new_size, allocation);
+		if (!new_conns)
+			goto finish;
+
+		memcpy(new_conns, connections,  sizeof(struct connection *) * conn_array_size);
+		conn_array_size = new_size;
+		kfree(connections);
+		connections = new_conns;
+
+	}
+
+	con = connections[nodeid];
+	if (con == NULL && allocation) {
+		con = kmem_cache_zalloc(con_cache, allocation);
+		if (!con)
+			goto finish;
+
+		con->nodeid = nodeid;
+		init_rwsem(&con->sock_sem);
+		INIT_LIST_HEAD(&con->writequeue);
+		spin_lock_init(&con->writequeue_lock);
+
+		connections[nodeid] = con;
+	}
+
+finish:
+	up(&connections_lock);
+	return con;
+}
+
+/* Data available on socket or listen socket received a connect */
+static void lowcomms_data_ready(struct sock *sk, int count_unused)
+{
+	struct connection *con = sock2con(sk);
+
+	atomic_inc(&con->waiting_requests);
+	if (test_and_set_bit(CF_READ_PENDING, &con->flags))
+		return;
+
+	spin_lock_bh(&read_sockets_lock);
+	list_add_tail(&con->read_list, &read_sockets);
+	spin_unlock_bh(&read_sockets_lock);
+
+	wake_up_interruptible(&lowcomms_recv_waitq);
+}
+
+static void lowcomms_write_space(struct sock *sk)
+{
+	struct connection *con = sock2con(sk);
+
+	if (test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+		return;
+
+	spin_lock_bh(&write_sockets_lock);
+	list_add_tail(&con->write_list, &write_sockets);
+	spin_unlock_bh(&write_sockets_lock);
+
+	wake_up_interruptible(&lowcomms_send_waitq);
+}
+
+static inline void lowcomms_connect_sock(struct connection *con)
+{
+	if (test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
+		return;
+
+	spin_lock_bh(&state_sockets_lock);
+	list_add_tail(&con->state_list, &state_sockets);
+	spin_unlock_bh(&state_sockets_lock);
+
+	wake_up_interruptible(&lowcomms_send_waitq);
+}
+
+static void lowcomms_state_change(struct sock *sk)
+{
+	if (sk->sk_state == TCP_ESTABLISHED)
+		lowcomms_write_space(sk);
+}
+
+/* Make a socket active */
+static int add_sock(struct socket *sock, struct connection *con)
+{
+	con->sock = sock;
+
+	/* Install a data_ready callback */
+	con->sock->sk->sk_data_ready = lowcomms_data_ready;
+	con->sock->sk->sk_write_space = lowcomms_write_space;
+	con->sock->sk->sk_state_change = lowcomms_state_change;
+
+	return 0;
+}
+
+/* Add the port number to an IP6 or 4 sockaddr and return the address
+   length */
+static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
+			  int *addr_len)
+{
+	saddr->ss_family =  dlm_local_addr.ss_family;
+	if (saddr->ss_family == AF_INET) {
+		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
+		in4_addr->sin_port = cpu_to_be16(port);
+		*addr_len = sizeof(struct sockaddr_in);
+	} else {
+		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
+		in6_addr->sin6_port = cpu_to_be16(port);
+		*addr_len = sizeof(struct sockaddr_in6);
+	}
+}
+
+/* Close a remote connection and tidy up */
+static void close_connection(struct connection *con, bool and_other)
+{
+	down_write(&con->sock_sem);
+
+	if (con->sock) {
+		sock_release(con->sock);
+		con->sock = NULL;
+	}
+	if (con->othercon && and_other) {
+		/* Will only re-enter once. */
+		close_connection(con->othercon, false);
+	}
+	if (con->rx_page) {
+		__free_page(con->rx_page);
+		con->rx_page = NULL;
+	}
+	con->retries = 0;
+	up_write(&con->sock_sem);
+}
+
+/* Data received from remote end */
+static int receive_from_sock(struct connection *con)
+{
+	int ret = 0;
+	struct msghdr msg;
+	struct iovec iov[2];
+	mm_segment_t fs;
+	unsigned len;
+	int r;
+	int call_again_soon = 0;
+
+	down_read(&con->sock_sem);
+
+	if (con->sock == NULL)
+		goto out;
+	if (con->rx_page == NULL) {
+		/*
+		 * This doesn't need to be atomic, but I think it should
+		 * improve performance if it is.
+		 */
+		con->rx_page = alloc_page(GFP_ATOMIC);
+		if (con->rx_page == NULL)
+			goto out_resched;
+		cbuf_init(&con->cb, PAGE_CACHE_SIZE);
+	}
+
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+	msg.msg_iovlen = 1;
+	msg.msg_iov = iov;
+	msg.msg_name = NULL;
+	msg.msg_namelen = 0;
+	msg.msg_flags = 0;
+
+	/*
+	 * iov[0] is the bit of the circular buffer between the current end
+	 * point (cb.base + cb.len) and the end of the buffer.
+	 */
+	iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
+	iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
+	iov[1].iov_len = 0;
+
+	/*
+	 * iov[1] is the bit of the circular buffer between the start of the
+	 * buffer and the start of the currently used section (cb.base)
+	 */
+	if (cbuf_data(&con->cb) >= con->cb.base) {
+		iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
+		iov[1].iov_len = con->cb.base;
+		iov[1].iov_base = page_address(con->rx_page);
+		msg.msg_iovlen = 2;
+	}
+	len = iov[0].iov_len + iov[1].iov_len;
+
+	fs = get_fs();
+	set_fs(get_ds());
+	r = ret = sock_recvmsg(con->sock, &msg, len,
+			       MSG_DONTWAIT | MSG_NOSIGNAL);
+	set_fs(fs);
+
+	if (ret <= 0)
+		goto out_close;
+	if (ret == len)
+		call_again_soon = 1;
+	cbuf_add(&con->cb, ret);
+	ret = dlm_process_incoming_buffer(con->nodeid,
+					  page_address(con->rx_page),
+					  con->cb.base, con->cb.len,
+					  PAGE_CACHE_SIZE);
+	if (ret == -EBADMSG) {
+		printk(KERN_INFO "dlm: lowcomms: addr=%p, base=%u, len=%u, "
+		       "iov_len=%u, iov_base[0]=%p, read=%d\n",
+		       page_address(con->rx_page), con->cb.base, con->cb.len,
+		       len, iov[0].iov_base, r);
+	}
+	if (ret < 0)
+		goto out_close;
+	cbuf_eat(&con->cb, ret);
+
+	if (cbuf_empty(&con->cb) && !call_again_soon) {
+		__free_page(con->rx_page);
+		con->rx_page = NULL;
+	}
+
+out:
+	if (call_again_soon)
+		goto out_resched;
+	up_read(&con->sock_sem);
+	return 0;
+
+out_resched:
+	lowcomms_data_ready(con->sock->sk, 0);
+	up_read(&con->sock_sem);
+	cond_resched();
+	return 0;
+
+out_close:
+	up_read(&con->sock_sem);
+	if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
+		close_connection(con, false);
+		/* Reconnect when there is something to send */
+	}
+
+	return ret;
+}
+
+/* Listening socket is busy, accept a connection */
+static int accept_from_sock(struct connection *con)
+{
+	int result;
+	struct sockaddr_storage peeraddr;
+	struct socket *newsock;
+	int len;
+	int nodeid;
+	struct connection *newcon;
+
+	memset(&peeraddr, 0, sizeof(peeraddr));
+	result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
+				  IPPROTO_TCP, &newsock);
+	if (result < 0)
+		return -ENOMEM;
+
+	down_read(&con->sock_sem);
+
+	result = -ENOTCONN;
+	if (con->sock == NULL)
+		goto accept_err;
+
+	newsock->type = con->sock->type;
+	newsock->ops = con->sock->ops;
+
+	result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
+	if (result < 0)
+		goto accept_err;
+
+	/* Get the connected socket's peer */
+	memset(&peeraddr, 0, sizeof(peeraddr));
+	if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
+				  &len, 2)) {
+		result = -ECONNABORTED;
+		goto accept_err;
+	}
+
+	/* Get the new node's NODEID */
+	make_sockaddr(&peeraddr, 0, &len);
+	if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
+		printk("dlm: connect from non cluster node\n");
+		sock_release(newsock);
+		up_read(&con->sock_sem);
+		return -1;
+	}
+
+	log_print("got connection from %d", nodeid);
+
+	/*  Check to see if we already have a connection to this node. This
+	 *  could happen if the two nodes initiate a connection at roughly
+	 *  the same time and the connections cross on the wire.
+	 * TEMPORARY FIX:
+	 *  In this case we store the incoming one in "othercon"
+	 */
+	newcon = nodeid2con(nodeid, GFP_KERNEL);
+	if (!newcon) {
+		result = -ENOMEM;
+		goto accept_err;
+	}
+	down_write(&newcon->sock_sem);
+	if (newcon->sock) {
+		struct connection *othercon = newcon->othercon;
+
+		if (!othercon) {
+			othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL);
+			if (!othercon) {
+				printk("dlm: failed to allocate incoming socket\n");
+				up_write(&newcon->sock_sem);
+				result = -ENOMEM;
+				goto accept_err;
+			}
+			othercon->nodeid = nodeid;
+			othercon->rx_action = receive_from_sock;
+			init_rwsem(&othercon->sock_sem);
+			set_bit(CF_IS_OTHERCON, &othercon->flags);
+			newcon->othercon = othercon;
+		}
+		othercon->sock = newsock;
+		newsock->sk->sk_user_data = othercon;
+		add_sock(newsock, othercon);
+	}
+	else {
+		newsock->sk->sk_user_data = newcon;
+		newcon->rx_action = receive_from_sock;
+		add_sock(newsock, newcon);
+
+	}
+
+	up_write(&newcon->sock_sem);
+
+	/*
+	 * Add it to the active queue in case we got data
+	 * beween processing the accept adding the socket
+	 * to the read_sockets list
+	 */
+	lowcomms_data_ready(newsock->sk, 0);
+	up_read(&con->sock_sem);
+
+	return 0;
+
+accept_err:
+	up_read(&con->sock_sem);
+	sock_release(newsock);
+
+	if (result != -EAGAIN)
+		printk("dlm: error accepting connection from node: %d\n", result);
+	return result;
+}
+
+/* Connect a new socket to its peer */
+static void connect_to_sock(struct connection *con)
+{
+	int result = -EHOSTUNREACH;
+	struct sockaddr_storage saddr;
+	int addr_len;
+	struct socket *sock;
+
+	if (con->nodeid == 0) {
+		log_print("attempt to connect sock 0 foiled");
+		return;
+	}
+
+	down_write(&con->sock_sem);
+	if (con->retries++ > MAX_CONNECT_RETRIES)
+		goto out;
+
+	/* Some odd races can cause double-connects, ignore them */
+	if (con->sock) {
+		result = 0;
+		goto out;
+	}
+
+	/* Create a socket to communicate with */
+	result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
+				  IPPROTO_TCP, &sock);
+	if (result < 0)
+		goto out_err;
+
+	memset(&saddr, 0, sizeof(saddr));
+	if (dlm_nodeid_to_addr(con->nodeid, &saddr))
+		goto out_err;
+
+	sock->sk->sk_user_data = con;
+	con->rx_action = receive_from_sock;
+
+	make_sockaddr(&saddr, dlm_config.tcp_port, &addr_len);
+
+	add_sock(sock, con);
+
+	log_print("connecting to %d", con->nodeid);
+	result =
+		sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
+				   O_NONBLOCK);
+	if (result == -EINPROGRESS)
+		result = 0;
+	if (result == 0)
+		goto out;
+
+out_err:
+	if (con->sock) {
+		sock_release(con->sock);
+		con->sock = NULL;
+	}
+	/*
+	 * Some errors are fatal and this list might need adjusting. For other
+	 * errors we try again until the max number of retries is reached.
+	 */
+	if (result != -EHOSTUNREACH && result != -ENETUNREACH &&
+	    result != -ENETDOWN && result != EINVAL
+	    && result != -EPROTONOSUPPORT) {
+		lowcomms_connect_sock(con);
+		result = 0;
+	}
+out:
+	up_write(&con->sock_sem);
+	return;
+}
+
+static struct socket *create_listen_sock(struct connection *con,
+					 struct sockaddr_storage *saddr)
+{
+	struct socket *sock = NULL;
+	mm_segment_t fs;
+	int result = 0;
+	int one = 1;
+	int addr_len;
+
+	if (dlm_local_addr.ss_family == AF_INET)
+		addr_len = sizeof(struct sockaddr_in);
+	else
+		addr_len = sizeof(struct sockaddr_in6);
+
+	/* Create a socket to communicate with */
+	result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &sock);
+	if (result < 0) {
+		printk("dlm: Can't create listening comms socket\n");
+		goto create_out;
+	}
+
+	fs = get_fs();
+	set_fs(get_ds());
+	result = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+				 (char *)&one, sizeof(one));
+	set_fs(fs);
+	if (result < 0) {
+		printk("dlm: Failed to set SO_REUSEADDR on socket: result=%d\n",
+		       result);
+	}
+	sock->sk->sk_user_data = con;
+	con->rx_action = accept_from_sock;
+	con->sock = sock;
+
+	/* Bind to our port */
+	make_sockaddr(saddr, dlm_config.tcp_port, &addr_len);
+	result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
+	if (result < 0) {
+		printk("dlm: Can't bind to port %d\n", dlm_config.tcp_port);
+		sock_release(sock);
+		sock = NULL;
+		con->sock = NULL;
+		goto create_out;
+	}
+
+	fs = get_fs();
+	set_fs(get_ds());
+
+	result = sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+				 (char *)&one, sizeof(one));
+	set_fs(fs);
+	if (result < 0) {
+		printk("dlm: Set keepalive failed: %d\n", result);
+	}
+
+	result = sock->ops->listen(sock, 5);
+	if (result < 0) {
+		printk("dlm: Can't listen on port %d\n", dlm_config.tcp_port);
+		sock_release(sock);
+		sock = NULL;
+		goto create_out;
+	}
+
+create_out:
+	return sock;
+}
+
+
+/* Listen on all interfaces */
+static int listen_for_all(void)
+{
+	struct socket *sock = NULL;
+	struct connection *con = nodeid2con(0, GFP_KERNEL);
+	int result = -EINVAL;
+
+	/* We don't support multi-homed hosts */
+	set_bit(CF_IS_OTHERCON, &con->flags);
+
+	sock = create_listen_sock(con, &dlm_local_addr);
+	if (sock) {
+		add_sock(sock, con);
+		result = 0;
+	}
+	else {
+		result = -EADDRINUSE;
+	}
+
+	return result;
+}
+
+
+
+static struct writequeue_entry *new_writequeue_entry(struct connection *con,
+						     gfp_t allocation)
+{
+	struct writequeue_entry *entry;
+
+	entry = kmalloc(sizeof(struct writequeue_entry), allocation);
+	if (!entry)
+		return NULL;
+
+	entry->page = alloc_page(allocation);
+	if (!entry->page) {
+		kfree(entry);
+		return NULL;
+	}
+
+	entry->offset = 0;
+	entry->len = 0;
+	entry->end = 0;
+	entry->users = 0;
+	entry->con = con;
+
+	return entry;
+}
+
+void *dlm_lowcomms_get_buffer(int nodeid, int len,
+			      gfp_t allocation, char **ppc)
+{
+	struct connection *con;
+	struct writequeue_entry *e;
+	int offset = 0;
+	int users = 0;
+
+	con = nodeid2con(nodeid, allocation);
+	if (!con)
+		return NULL;
+
+	e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
+	if ((&e->list == &con->writequeue) ||
+	    (PAGE_CACHE_SIZE - e->end < len)) {
+		e = NULL;
+	} else {
+		offset = e->end;
+		e->end += len;
+		users = e->users++;
+	}
+	spin_unlock(&con->writequeue_lock);
+
+	if (e) {
+	got_one:
+		if (users == 0)
+			kmap(e->page);
+		*ppc = page_address(e->page) + offset;
+		return e;
+	}
+
+	e = new_writequeue_entry(con, allocation);
+	if (e) {
+		spin_lock(&con->writequeue_lock);
+		offset = e->end;
+		e->end += len;
+		users = e->users++;
+		list_add_tail(&e->list, &con->writequeue);
+		spin_unlock(&con->writequeue_lock);
+		goto got_one;
+	}
+	return NULL;
+}
+
+void dlm_lowcomms_commit_buffer(void *mh)
+{
+	struct writequeue_entry *e = (struct writequeue_entry *)mh;
+	struct connection *con = e->con;
+	int users;
+
+	users = --e->users;
+	if (users)
+		goto out;
+	e->len = e->end - e->offset;
+	kunmap(e->page);
+	spin_unlock(&con->writequeue_lock);
+
+	if (test_and_set_bit(CF_WRITE_PENDING, &con->flags) == 0) {
+		spin_lock_bh(&write_sockets_lock);
+		list_add_tail(&con->write_list, &write_sockets);
+		spin_unlock_bh(&write_sockets_lock);
+
+		wake_up_interruptible(&lowcomms_send_waitq);
+	}
+	return;
+
+out:
+	spin_unlock(&con->writequeue_lock);
+	return;
+}
+
+static void free_entry(struct writequeue_entry *e)
+{
+	__free_page(e->page);
+	kfree(e);
+}
+
+/* Send a message */
+static void send_to_sock(struct connection *con)
+{
+	int ret = 0;
+	ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int);
+	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
+	struct writequeue_entry *e;
+	int len, offset;
+
+	down_read(&con->sock_sem);
+	if (con->sock == NULL)
+		goto out_connect;
+
+	sendpage = con->sock->ops->sendpage;
+
+	spin_lock(&con->writequeue_lock);
+	for (;;) {
+		e = list_entry(con->writequeue.next, struct writequeue_entry,
+			       list);
+		if ((struct list_head *) e == &con->writequeue)
+			break;
+
+		len = e->len;
+		offset = e->offset;
+		BUG_ON(len == 0 && e->users == 0);
+		spin_unlock(&con->writequeue_lock);
+
+		ret = 0;
+		if (len) {
+			ret = sendpage(con->sock, e->page, offset, len,
+				       msg_flags);
+			if (ret == -EAGAIN || ret == 0)
+				goto out;
+			if (ret <= 0)
+				goto send_error;
+		}
+		else {
+			/* Don't starve people filling buffers */
+			cond_resched();
+		}
+
+		spin_lock(&con->writequeue_lock);
+		e->offset += ret;
+		e->len -= ret;
+
+		if (e->len == 0 && e->users == 0) {
+			list_del(&e->list);
+			kunmap(e->page);
+			free_entry(e);
+			continue;
+		}
+	}
+	spin_unlock(&con->writequeue_lock);
+out:
+	up_read(&con->sock_sem);
+	return;
+
+send_error:
+	up_read(&con->sock_sem);
+	close_connection(con, false);
+	lowcomms_connect_sock(con);
+	return;
+
+out_connect:
+	up_read(&con->sock_sem);
+	lowcomms_connect_sock(con);
+	return;
+}
+
+static void clean_one_writequeue(struct connection *con)
+{
+	struct list_head *list;
+	struct list_head *temp;
+
+	spin_lock(&con->writequeue_lock);
+	list_for_each_safe(list, temp, &con->writequeue) {
+		struct writequeue_entry *e =
+			list_entry(list, struct writequeue_entry, list);
+		list_del(&e->list);
+		free_entry(e);
+	}
+	spin_unlock(&con->writequeue_lock);
+}
+
+/* Called from recovery when it knows that a node has
+   left the cluster */
+int dlm_lowcomms_close(int nodeid)
+{
+	struct connection *con;
+
+	if (!connections)
+		goto out;
+
+	log_print("closing connection to node %d", nodeid);
+	con = nodeid2con(nodeid, 0);
+	if (con) {
+		clean_one_writequeue(con);
+		close_connection(con, true);
+		atomic_set(&con->waiting_requests, 0);
+	}
+	return 0;
+
+out:
+	return -1;
+}
+
+/* API send message call, may queue the request */
+/* N.B. This is the old interface - use the new one for new calls */
+int lowcomms_send_message(int nodeid, char *buf, int len, gfp_t allocation)
+{
+	struct writequeue_entry *e;
+	char *b;
+
+	e = dlm_lowcomms_get_buffer(nodeid, len, allocation, &b);
+	if (e) {
+		memcpy(b, buf, len);
+		dlm_lowcomms_commit_buffer(e);
+		return 0;
+	}
+	return -ENOBUFS;
+}
+
+/* Look for activity on active sockets */
+static void process_sockets(void)
+{
+	struct list_head *list;
+	struct list_head *temp;
+	int count = 0;
+
+	spin_lock_bh(&read_sockets_lock);
+	list_for_each_safe(list, temp, &read_sockets) {
+
+		struct connection *con =
+			list_entry(list, struct connection, read_list);
+		list_del(&con->read_list);
+		clear_bit(CF_READ_PENDING, &con->flags);
+
+		spin_unlock_bh(&read_sockets_lock);
+
+		/* This can reach zero if we are processing requests
+		 * as they come in.
+		 */
+		if (atomic_read(&con->waiting_requests) == 0) {
+			spin_lock_bh(&read_sockets_lock);
+			continue;
+		}
+
+		do {
+			con->rx_action(con);
+
+			/* Don't starve out everyone else */
+			if (++count >= MAX_RX_MSG_COUNT) {
+				cond_resched();
+				count = 0;
+			}
+
+		} while (!atomic_dec_and_test(&con->waiting_requests) &&
+			 !kthread_should_stop());
+
+		spin_lock_bh(&read_sockets_lock);
+	}
+	spin_unlock_bh(&read_sockets_lock);
+}
+
+/* Try to send any messages that are pending
+ */
+static void process_output_queue(void)
+{
+	struct list_head *list;
+	struct list_head *temp;
+
+	spin_lock_bh(&write_sockets_lock);
+	list_for_each_safe(list, temp, &write_sockets) {
+		struct connection *con =
+			list_entry(list, struct connection, write_list);
+		clear_bit(CF_WRITE_PENDING, &con->flags);
+		list_del(&con->write_list);
+
+		spin_unlock_bh(&write_sockets_lock);
+		send_to_sock(con);
+		spin_lock_bh(&write_sockets_lock);
+	}
+	spin_unlock_bh(&write_sockets_lock);
+}
+
+static void process_state_queue(void)
+{
+	struct list_head *list;
+	struct list_head *temp;
+
+	spin_lock_bh(&state_sockets_lock);
+	list_for_each_safe(list, temp, &state_sockets) {
+		struct connection *con =
+			list_entry(list, struct connection, state_list);
+		list_del(&con->state_list);
+		clear_bit(CF_CONNECT_PENDING, &con->flags);
+		spin_unlock_bh(&state_sockets_lock);
+
+		connect_to_sock(con);
+		spin_lock_bh(&state_sockets_lock);
+	}
+	spin_unlock_bh(&state_sockets_lock);
+}
+
+
+/* Discard all entries on the write queues */
+static void clean_writequeues(void)
+{
+	int nodeid;
+
+	for (nodeid = 1; nodeid < conn_array_size; nodeid++) {
+		struct connection *con = nodeid2con(nodeid, 0);
+
+		if (con)
+			clean_one_writequeue(con);
+	}
+}
+
+static int read_list_empty(void)
+{
+	int status;
+
+	spin_lock_bh(&read_sockets_lock);
+	status = list_empty(&read_sockets);
+	spin_unlock_bh(&read_sockets_lock);
+
+	return status;
+}
+
+/* DLM Transport comms receive daemon */
+static int dlm_recvd(void *data)
+{
+	init_waitqueue_entry(&lowcomms_recv_waitq_head, current);
+	add_wait_queue(&lowcomms_recv_waitq, &lowcomms_recv_waitq_head);
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (read_list_empty())
+			cond_resched();
+		set_current_state(TASK_RUNNING);
+
+		process_sockets();
+	}
+
+	return 0;
+}
+
+static int write_and_state_lists_empty(void)
+{
+	int status;
+
+	spin_lock_bh(&write_sockets_lock);
+	status = list_empty(&write_sockets);
+	spin_unlock_bh(&write_sockets_lock);
+
+	spin_lock_bh(&state_sockets_lock);
+	if (list_empty(&state_sockets) == 0)
+		status = 0;
+	spin_unlock_bh(&state_sockets_lock);
+
+	return status;
+}
+
+/* DLM Transport send daemon */
+static int dlm_sendd(void *data)
+{
+	init_waitqueue_entry(&lowcomms_send_waitq_head, current);
+	add_wait_queue(&lowcomms_send_waitq, &lowcomms_send_waitq_head);
+
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (write_and_state_lists_empty())
+			cond_resched();
+		set_current_state(TASK_RUNNING);
+
+		process_state_queue();
+		process_output_queue();
+	}
+
+	return 0;
+}
+
+static void daemons_stop(void)
+{
+	kthread_stop(recv_task);
+	kthread_stop(send_task);
+}
+
+static int daemons_start(void)
+{
+	struct task_struct *p;
+	int error;
+
+	p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
+	error = IS_ERR(p);
+	if (error) {
+		log_print("can't start dlm_recvd %d", error);
+		return error;
+	}
+	recv_task = p;
+
+	p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
+	error = IS_ERR(p);
+	if (error) {
+		log_print("can't start dlm_sendd %d", error);
+		kthread_stop(recv_task);
+		return error;
+	}
+	send_task = p;
+
+	return 0;
+}
+
+/*
+ * Return the largest buffer size we can cope with.
+ */
+int lowcomms_max_buffer_size(void)
+{
+	return PAGE_CACHE_SIZE;
+}
+
+void dlm_lowcomms_stop(void)
+{
+	int i;
+
+	/* Set all the flags to prevent any
+	   socket activity.
+	*/
+	for (i = 0; i < conn_array_size; i++) {
+		if (connections[i])
+			connections[i]->flags |= 0xFF;
+	}
+
+	daemons_stop();
+	clean_writequeues();
+
+	for (i = 0; i < conn_array_size; i++) {
+		if (connections[i]) {
+			close_connection(connections[i], true);
+			if (connections[i]->othercon)
+				kmem_cache_free(con_cache, connections[i]->othercon);
+			kmem_cache_free(con_cache, connections[i]);
+		}
+	}
+
+	kfree(connections);
+	connections = NULL;
+
+	kmem_cache_destroy(con_cache);
+}
+
+/* This is quite likely to sleep... */
+int dlm_lowcomms_start(void)
+{
+	int error = 0;
+
+	error = -ENOMEM;
+	connections = kzalloc(sizeof(struct connection *) *
+			      NODE_INCREMENT, GFP_KERNEL);
+	if (!connections)
+		goto out;
+
+	conn_array_size = NODE_INCREMENT;
+
+	if (dlm_our_addr(&dlm_local_addr, 0)) {
+		log_print("no local IP address has been set");
+		goto fail_free_conn;
+	}
+	if (!dlm_our_addr(&dlm_local_addr, 1)) {
+		log_print("This dlm comms module does not support multi-homed clustering");
+		goto fail_free_conn;
+	}
+
+	con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
+				      __alignof__(struct connection), 0,
+				      NULL, NULL);
+	if (!con_cache)
+		goto fail_free_conn;
+
+
+	/* Start listening */
+	error = listen_for_all();
+	if (error)
+		goto fail_unlisten;
+
+	error = daemons_start();
+	if (error)
+		goto fail_unlisten;
+
+	return 0;
+
+fail_unlisten:
+	close_connection(connections[0], false);
+	kmem_cache_free(con_cache, connections[0]);
+	kmem_cache_destroy(con_cache);
+
+fail_free_conn:
+	kfree(connections);
+
+out:
+	return error;
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index 2d045e0..a9a9618 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -14,8 +14,6 @@
 #ifndef __LOWCOMMS_DOT_H__
 #define __LOWCOMMS_DOT_H__
 
-int dlm_lowcomms_init(void);
-void dlm_lowcomms_exit(void);
 int dlm_lowcomms_start(void);
 void dlm_lowcomms_stop(void);
 int dlm_lowcomms_close(int nodeid);
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index a8da8dc..162fbae 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -16,7 +16,6 @@
 #include "lock.h"
 #include "user.h"
 #include "memory.h"
-#include "lowcomms.h"
 #include "config.h"
 
 #ifdef CONFIG_DLM_DEBUG
@@ -47,20 +46,14 @@
 	if (error)
 		goto out_config;
 
-	error = dlm_lowcomms_init();
-	if (error)
-		goto out_debug;
-
 	error = dlm_user_init();
 	if (error)
-		goto out_lowcomms;
+		goto out_debug;
 
 	printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
 
 	return 0;
 
- out_lowcomms:
-	dlm_lowcomms_exit();
  out_debug:
 	dlm_unregister_debugfs();
  out_config:
@@ -76,7 +69,6 @@
 static void __exit exit_dlm(void)
 {
 	dlm_user_exit();
-	dlm_lowcomms_exit();
 	dlm_config_exit();
 	dlm_memory_exit();
 	dlm_lockspace_exit();
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index a3f7de7..85e2897 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -186,6 +186,14 @@
 	struct dlm_member *memb, *safe;
 	int i, error, found, pos = 0, neg = 0, low = -1;
 
+	/* previously removed members that we've not finished removing need to
+	   count as a negative change so the "neg" recovery steps will happen */
+
+	list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
+		log_debug(ls, "prev removed member %d", memb->nodeid);
+		neg++;
+	}
+
 	/* move departed members from ls_nodes to ls_nodes_gone */
 
 	list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 989b608..5352b03 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -15,7 +15,7 @@
 #include "config.h"
 #include "memory.h"
 
-static kmem_cache_t *lkb_cache;
+static struct kmem_cache *lkb_cache;
 
 
 int dlm_memory_init(void)
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 518239a..4cc31be 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -90,13 +90,28 @@
 	return 0;
 }
 
+static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
+{
+	spin_lock(&ls->ls_rcom_spin);
+	*new_seq = ++ls->ls_rcom_seq;
+	set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
+	spin_unlock(&ls->ls_rcom_spin);
+}
+
+static void disallow_sync_reply(struct dlm_ls *ls)
+{
+	spin_lock(&ls->ls_rcom_spin);
+	clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
+	clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
+	spin_unlock(&ls->ls_rcom_spin);
+}
+
 int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
 {
 	struct dlm_rcom *rc;
 	struct dlm_mhandle *mh;
 	int error = 0;
 
-	memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
 	ls->ls_recover_nodeid = nodeid;
 
 	if (nodeid == dlm_our_nodeid()) {
@@ -108,12 +123,14 @@
 	error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
 	if (error)
 		goto out;
-	rc->rc_id = ++ls->ls_rcom_seq;
+
+	allow_sync_reply(ls, &rc->rc_id);
+	memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
 
 	send_rcom(ls, mh, rc);
 
 	error = dlm_wait_function(ls, &rcom_response);
-	clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
+	disallow_sync_reply(ls);
 	if (error)
 		goto out;
 
@@ -150,14 +167,21 @@
 
 static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
 {
-	if (rc_in->rc_id != ls->ls_rcom_seq) {
-		log_debug(ls, "reject old reply %d got %llx wanted %llx",
-			  rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq);
-		return;
+	spin_lock(&ls->ls_rcom_spin);
+	if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
+	    rc_in->rc_id != ls->ls_rcom_seq) {
+		log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
+			  rc_in->rc_type, rc_in->rc_header.h_nodeid,
+			  (unsigned long long)rc_in->rc_id,
+			  (unsigned long long)ls->ls_rcom_seq);
+		goto out;
 	}
 	memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
 	set_bit(LSFL_RCOM_READY, &ls->ls_flags);
+	clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
 	wake_up(&ls->ls_wait_general);
+ out:
+	spin_unlock(&ls->ls_rcom_spin);
 }
 
 static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
@@ -171,7 +195,6 @@
 	struct dlm_mhandle *mh;
 	int error = 0, len = sizeof(struct dlm_rcom);
 
-	memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
 	ls->ls_recover_nodeid = nodeid;
 
 	if (nodeid == dlm_our_nodeid()) {
@@ -185,12 +208,14 @@
 	if (error)
 		goto out;
 	memcpy(rc->rc_buf, last_name, last_len);
-	rc->rc_id = ++ls->ls_rcom_seq;
+
+	allow_sync_reply(ls, &rc->rc_id);
+	memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
 
 	send_rcom(ls, mh, rc);
 
 	error = dlm_wait_function(ls, &rcom_response);
-	clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
+	disallow_sync_reply(ls);
  out:
 	return error;
 }
@@ -370,9 +395,10 @@
 static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
 {
 	struct dlm_rcom *rc;
+	struct rcom_config *rf;
 	struct dlm_mhandle *mh;
 	char *mb;
-	int mb_len = sizeof(struct dlm_rcom);
+	int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
 
 	mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
 	if (!mh)
@@ -391,6 +417,9 @@
 	rc->rc_id = rc_in->rc_id;
 	rc->rc_result = -ESRCH;
 
+	rf = (struct rcom_config *) rc->rc_buf;
+	rf->rf_lvblen = -1;
+
 	dlm_rcom_out(rc);
 	dlm_lowcomms_commit_buffer(mh);
 
@@ -412,9 +441,10 @@
 
 	ls = dlm_find_lockspace_global(hd->h_lockspace);
 	if (!ls) {
-		log_print("lockspace %x from %d not found",
-			  hd->h_lockspace, nodeid);
-		send_ls_not_ready(nodeid, rc);
+		log_print("lockspace %x from %d type %x not found",
+			  hd->h_lockspace, nodeid, rc->rc_type);
+		if (rc->rc_type == DLM_RCOM_STATUS)
+			send_ls_not_ready(nodeid, rc);
 		return;
 	}
 
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index a5e6d18..cf9f683 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -252,6 +252,7 @@
 	spin_lock(&ls->ls_recover_list_lock);
 	list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
 		list_del_init(&r->res_recover_list);
+		r->res_recover_locks_count = 0;
 		dlm_put_rsb(r);
 		ls->ls_recover_list_count--;
 	}
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 362e3ef..650536aa 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -45,7 +45,7 @@
 	unsigned long start;
 	int error, neg = 0;
 
-	log_debug(ls, "recover %llx", rv->seq);
+	log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
 
 	mutex_lock(&ls->ls_recoverd_active);
 
@@ -94,14 +94,6 @@
 	}
 
 	/*
-	 * Purge directory-related requests that are saved in requestqueue.
-	 * All dir requests from before recovery are invalid now due to the dir
-	 * rebuild and will be resent by the requesting nodes.
-	 */
-
-	dlm_purge_requestqueue(ls);
-
-	/*
 	 * Wait for all nodes to complete directory rebuild.
 	 */
 
@@ -164,10 +156,31 @@
 		 */
 
 		dlm_recover_rsbs(ls);
+	} else {
+		/*
+		 * Other lockspace members may be going through the "neg" steps
+		 * while also adding us to the lockspace, in which case they'll
+		 * be doing the recover_locks (RS_LOCKS) barrier.
+		 */
+		dlm_set_recover_status(ls, DLM_RS_LOCKS);
+
+		error = dlm_recover_locks_wait(ls);
+		if (error) {
+			log_error(ls, "recover_locks_wait failed %d", error);
+			goto fail;
+		}
 	}
 
 	dlm_release_root_list(ls);
 
+	/*
+	 * Purge directory-related requests that are saved in requestqueue.
+	 * All dir requests from before recovery are invalid now due to the dir
+	 * rebuild and will be resent by the requesting nodes.
+	 */
+
+	dlm_purge_requestqueue(ls);
+
 	dlm_set_recover_status(ls, DLM_RS_DONE);
 	error = dlm_recover_done_wait(ls);
 	if (error) {
@@ -199,7 +212,8 @@
 
 	dlm_astd_wake();
 
-	log_debug(ls, "recover %llx done: %u ms", rv->seq,
+	log_debug(ls, "recover %llx done: %u ms",
+		  (unsigned long long)rv->seq,
 		  jiffies_to_msecs(jiffies - start));
 	mutex_unlock(&ls->ls_recoverd_active);
 
@@ -207,11 +221,16 @@
 
  fail:
 	dlm_release_root_list(ls);
-	log_debug(ls, "recover %llx error %d", rv->seq, error);
+	log_debug(ls, "recover %llx error %d",
+		  (unsigned long long)rv->seq, error);
 	mutex_unlock(&ls->ls_recoverd_active);
 	return error;
 }
 
+/* The dlm_ls_start() that created the rv we take here may already have been
+   stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
+   flag set. */
+
 static void do_ls_recovery(struct dlm_ls *ls)
 {
 	struct dlm_recover *rv = NULL;
@@ -219,7 +238,8 @@
 	spin_lock(&ls->ls_recover_lock);
 	rv = ls->ls_recover_args;
 	ls->ls_recover_args = NULL;
-	clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
+	if (rv && ls->ls_recover_seq == rv->seq)
+		clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
 	spin_unlock(&ls->ls_recover_lock);
 
 	if (rv) {
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index 7b2b089..65008d7 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -30,26 +30,36 @@
  * lockspace is enabled on some while still suspended on others.
  */
 
-void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
+int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
 {
 	struct rq_entry *e;
 	int length = hd->h_length;
-
-	if (dlm_is_removed(ls, nodeid))
-		return;
+	int rv = 0;
 
 	e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
 	if (!e) {
 		log_print("dlm_add_requestqueue: out of memory\n");
-		return;
+		return 0;
 	}
 
 	e->nodeid = nodeid;
 	memcpy(e->request, hd, length);
 
+	/* We need to check dlm_locking_stopped() after taking the mutex to
+	   avoid a race where dlm_recoverd enables locking and runs
+	   process_requestqueue between our earlier dlm_locking_stopped check
+	   and this addition to the requestqueue. */
+
 	mutex_lock(&ls->ls_requestqueue_mutex);
-	list_add_tail(&e->list, &ls->ls_requestqueue);
+	if (dlm_locking_stopped(ls))
+		list_add_tail(&e->list, &ls->ls_requestqueue);
+	else {
+		log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid);
+		kfree(e);
+		rv = -EAGAIN;
+	}
 	mutex_unlock(&ls->ls_requestqueue_mutex);
+	return rv;
 }
 
 int dlm_process_requestqueue(struct dlm_ls *ls)
@@ -120,6 +130,10 @@
 {
 	uint32_t type = ms->m_type;
 
+	/* the ls is being cleaned up and freed by release_lockspace */
+	if (!ls->ls_count)
+		return 1;
+
 	if (dlm_is_removed(ls, nodeid))
 		return 1;
 
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
index 349f0d2..6a53ea0 100644
--- a/fs/dlm/requestqueue.h
+++ b/fs/dlm/requestqueue.h
@@ -13,7 +13,7 @@
 #ifndef __REQUESTQUEUE_DOT_H__
 #define __REQUESTQUEUE_DOT_H__
 
-void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
+int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
 int dlm_process_requestqueue(struct dlm_ls *ls);
 void dlm_wait_requestqueue(struct dlm_ls *ls);
 void dlm_purge_requestqueue(struct dlm_ls *ls);
diff --git a/fs/dnotify.c b/fs/dnotify.c
index 2b0442d..1f26a2b 100644
--- a/fs/dnotify.c
+++ b/fs/dnotify.c
@@ -23,7 +23,7 @@
 
 int dir_notify_enable __read_mostly = 1;
 
-static kmem_cache_t *dn_cache __read_mostly;
+static struct kmem_cache *dn_cache __read_mostly;
 
 static void redo_inode_mask(struct inode *inode)
 {
@@ -77,7 +77,7 @@
 	inode = filp->f_dentry->d_inode;
 	if (!S_ISDIR(inode->i_mode))
 		return -ENOTDIR;
-	dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL);
+	dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
 	if (dn == NULL)
 		return -ENOMEM;
 	spin_lock(&inode->i_lock);
diff --git a/fs/dquot.c b/fs/dquot.c
index 9af7895..f9cd5e2 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -131,7 +131,7 @@
 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
 
 /* SLAB cache for dquot structures */
-static kmem_cache_t *dquot_cachep;
+static struct kmem_cache *dquot_cachep;
 
 int register_quota_format(struct quota_format_type *fmt)
 {
@@ -600,7 +600,7 @@
 {
 	struct dquot *dquot;
 
-	dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS);
+	dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS);
 	if(!dquot)
 		return NODQUOT;
 
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f63a775..7196f50 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -628,7 +628,7 @@
 	num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
 	base_extent = (page->index * num_extents_per_page);
 	lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache,
-					   SLAB_KERNEL);
+					   GFP_KERNEL);
 	if (!lower_page_virt) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
@@ -1334,7 +1334,7 @@
 		goto out;
 	}
 	/* Released in this function */
-	page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, SLAB_USER);
+	page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER);
 	if (!page_virt) {
 		ecryptfs_printk(KERN_ERR, "Out of memory\n");
 		rc = -ENOMEM;
@@ -1493,7 +1493,7 @@
 	    &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
 
 	/* Read the first page from the underlying file */
-	page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, SLAB_USER);
+	page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER);
 	if (!page_virt) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n");
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index a92ef05..42099e7 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -250,7 +250,7 @@
 	int lower_flags;
 
 	/* Released in ecryptfs_release or end of function if failure */
-	file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL);
+	file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
 	ecryptfs_set_file_private(file, file_info);
 	if (!file_info) {
 		ecryptfs_printk(KERN_ERR,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index dfcc684..8a1945a 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -369,7 +369,7 @@
 	BUG_ON(!atomic_read(&lower_dentry->d_count));
 	ecryptfs_set_dentry_private(dentry,
 				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
-						     SLAB_KERNEL));
+						     GFP_KERNEL));
 	if (!ecryptfs_dentry_to_private(dentry)) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
@@ -404,7 +404,7 @@
 	/* Released in this function */
 	page_virt =
 	    (char *)kmem_cache_alloc(ecryptfs_header_cache_2,
-				     SLAB_USER);
+				     GFP_USER);
 	if (!page_virt) {
 		rc = -ENOMEM;
 		ecryptfs_printk(KERN_ERR,
@@ -795,7 +795,7 @@
 	/* Released at out_free: label */
 	ecryptfs_set_file_private(&fake_ecryptfs_file,
 				  kmem_cache_alloc(ecryptfs_file_info_cache,
-						   SLAB_KERNEL));
+						   GFP_KERNEL));
 	if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
 		rc = -ENOMEM;
 		goto out;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index c3746f5..745c0f1 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -207,7 +207,7 @@
 	/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
 	 * at end of function upon failure */
 	auth_tok_list_item =
-	    kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL);
+	    kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
 	if (!auth_tok_list_item) {
 		ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
 		rc = -ENOMEM;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index a78d87d..3ede12b 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -378,7 +378,7 @@
 	/* Released in ecryptfs_put_super() */
 	ecryptfs_set_superblock_private(sb,
 					kmem_cache_alloc(ecryptfs_sb_info_cache,
-							 SLAB_KERNEL));
+							 GFP_KERNEL));
 	if (!ecryptfs_superblock_to_private(sb)) {
 		ecryptfs_printk(KERN_WARNING, "Out of memory\n");
 		rc = -ENOMEM;
@@ -402,7 +402,7 @@
 	/* through deactivate_super(sb) from get_sb_nodev() */
 	ecryptfs_set_dentry_private(sb->s_root,
 				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
-						     SLAB_KERNEL));
+						     GFP_KERNEL));
 	if (!ecryptfs_dentry_to_private(sb->s_root)) {
 		ecryptfs_printk(KERN_ERR,
 				"dentry_info_cache alloc failed\n");
@@ -546,7 +546,7 @@
 }
 
 static struct ecryptfs_cache_info {
-	kmem_cache_t **cache;
+	struct kmem_cache **cache;
 	const char *name;
 	size_t size;
 	void (*ctor)(void*, struct kmem_cache *, unsigned long);
@@ -691,7 +691,7 @@
 
 static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version);
 
-struct ecryptfs_version_str_map_elem {
+static struct ecryptfs_version_str_map_elem {
 	u32 flag;
 	char *str;
 } ecryptfs_version_str_map[] = {
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 825757a..eaa5daa 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -50,7 +50,7 @@
 	struct inode *inode = NULL;
 
 	ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache,
-					  SLAB_KERNEL);
+					  GFP_KERNEL);
 	if (unlikely(!ecryptfs_inode))
 		goto out;
 	ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
diff --git a/fs/efs/super.c b/fs/efs/super.c
index b3f5065..dfebf21 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -52,12 +52,12 @@
 };
 
 
-static kmem_cache_t * efs_inode_cachep;
+static struct kmem_cache * efs_inode_cachep;
 
 static struct inode *efs_alloc_inode(struct super_block *sb)
 {
 	struct efs_inode_info *ei;
-	ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL);
+	ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -68,7 +68,7 @@
 	kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct efs_inode_info *ei = (struct efs_inode_info *) foo;
 
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ae228ec..88a6f8d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -283,10 +283,10 @@
 static struct poll_safewake psw;
 
 /* Slab cache used to allocate "struct epitem" */
-static kmem_cache_t *epi_cache __read_mostly;
+static struct kmem_cache *epi_cache __read_mostly;
 
 /* Slab cache used to allocate "struct eppoll_entry" */
-static kmem_cache_t *pwq_cache __read_mostly;
+static struct kmem_cache *pwq_cache __read_mostly;
 
 /* Virtual fs used to allocate inodes for eventpoll files */
 static struct vfsmount *eventpoll_mnt __read_mostly;
@@ -961,7 +961,7 @@
 	struct epitem *epi = ep_item_from_epqueue(pt);
 	struct eppoll_entry *pwq;
 
-	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) {
+	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
 		init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
 		pwq->whead = whead;
 		pwq->base = epi;
@@ -1004,7 +1004,7 @@
 	struct ep_pqueue epq;
 
 	error = -ENOMEM;
-	if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL)))
+	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
 		goto eexit_1;
 
 	/* Item initialization follow here ... */
diff --git a/fs/exec.c b/fs/exec.c
index d993ea1..add0e03 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -404,7 +404,7 @@
 		bprm->loader += stack_base;
 	bprm->exec += stack_base;
 
-	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!mpnt)
 		return -ENOMEM;
 
@@ -1515,7 +1515,8 @@
 		ispipe = 1;
  	} else
  		file = filp_open(corename,
-				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
+				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
+				 0600);
 	if (IS_ERR(file))
 		goto fail_unlock;
 	inode = file->f_dentry->d_inode;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 1dfba77..e3cf8c8 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -44,6 +44,7 @@
 		if (!S_ISDIR(inode->i_mode))
 			flags &= ~EXT2_DIRSYNC_FL;
 
+		mutex_lock(&inode->i_mutex);
 		oldflags = ei->i_flags;
 
 		/*
@@ -53,13 +54,16 @@
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
 		if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
-			if (!capable(CAP_LINUX_IMMUTABLE))
+			if (!capable(CAP_LINUX_IMMUTABLE)) {
+				mutex_unlock(&inode->i_mutex);
 				return -EPERM;
+			}
 		}
 
 		flags = flags & EXT2_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
 		ei->i_flags = flags;
+		mutex_unlock(&inode->i_mutex);
 
 		ext2_set_inode_flags(inode);
 		inode->i_ctime = CURRENT_TIME_SEC;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index d8b9abd..255cef5 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -135,12 +135,12 @@
 	return;
 }
 
-static kmem_cache_t * ext2_inode_cachep;
+static struct kmem_cache * ext2_inode_cachep;
 
 static struct inode *ext2_alloc_inode(struct super_block *sb)
 {
 	struct ext2_inode_info *ei;
-	ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL);
+	ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 #ifdef CONFIG_EXT2_FS_POSIX_ACL
@@ -156,7 +156,7 @@
 	kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
 
@@ -1090,8 +1090,10 @@
 {
 	struct super_block *sb = dentry->d_sb;
 	struct ext2_sb_info *sbi = EXT2_SB(sb);
+	struct ext2_super_block *es = sbi->s_es;
 	unsigned long overhead;
 	int i;
+	u64 fsid;
 
 	if (test_opt (sb, MINIX_DF))
 		overhead = 0;
@@ -1104,7 +1106,7 @@
 		 * All of the blocks before first_data_block are
 		 * overhead
 		 */
-		overhead = le32_to_cpu(sbi->s_es->s_first_data_block);
+		overhead = le32_to_cpu(es->s_first_data_block);
 
 		/*
 		 * Add the overhead attributed to the superblock and
@@ -1125,14 +1127,18 @@
 
 	buf->f_type = EXT2_SUPER_MAGIC;
 	buf->f_bsize = sb->s_blocksize;
-	buf->f_blocks = le32_to_cpu(sbi->s_es->s_blocks_count) - overhead;
+	buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead;
 	buf->f_bfree = ext2_count_free_blocks(sb);
-	buf->f_bavail = buf->f_bfree - le32_to_cpu(sbi->s_es->s_r_blocks_count);
-	if (buf->f_bfree < le32_to_cpu(sbi->s_es->s_r_blocks_count))
+	buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
+	if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
 		buf->f_bavail = 0;
-	buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count);
-	buf->f_ffree = ext2_count_free_inodes (sb);
+	buf->f_files = le32_to_cpu(es->s_inodes_count);
+	buf->f_ffree = ext2_count_free_inodes(sb);
 	buf->f_namelen = EXT2_NAME_LEN;
+	fsid = le64_to_cpup((void *)es->s_uuid) ^
+	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
 	return 0;
 }
 
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index af52a7f..247efd0 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -342,12 +342,9 @@
 	if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
-	lock_super(sb);
-	EXT2_SB(sb)->s_es->s_feature_compat |=
-		cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR);
+	EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
 	sb->s_dirt = 1;
 	mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
-	unlock_super(sb);
 }
 
 /*
diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile
index 704cd44..e77766a 100644
--- a/fs/ext3/Makefile
+++ b/fs/ext3/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_EXT3_FS) += ext3.o
 
 ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-	   ioctl.o namei.o super.o symlink.o hash.o resize.o
+	   ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
 
 ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
 ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b41a7d7..2216174 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -144,7 +144,7 @@
 
 	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
 	while (n) {
-		rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
+		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
 		if (verbose)
 			printk("reservation window 0x%p "
 			       "start:  %lu, end:  %lu\n",
@@ -730,7 +730,7 @@
 		here = 0;
 
 	p = ((char *)bh->b_data) + (here >> 3);
-	r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
 	next = (r - ((char *)bh->b_data)) << 3;
 
 	if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
@@ -949,7 +949,7 @@
 
 		prev = rsv;
 		next = rb_next(&rsv->rsv_node);
-		rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node);
+		rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
 
 		/*
 		 * Reached the last reservation, we can just append to the
@@ -1148,7 +1148,7 @@
 	 * check if the first free block is within the
 	 * free space we just reserved
 	 */
-	if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
+	if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
 		return 0;		/* success */
 	/*
 	 * if the first free bit we found is out of the reservable space
@@ -1193,7 +1193,7 @@
 	if (!next)
 		my_rsv->rsv_end += size;
 	else {
-		next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+		next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
 
 		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
 			my_rsv->rsv_end += size;
@@ -1271,7 +1271,7 @@
 	}
 	/*
 	 * grp_goal is a group relative block number (if there is a goal)
-	 * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
+	 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
 	 * first block is a filesystem wide block number
 	 * first block is the block number of the first block in this group
 	 */
@@ -1307,10 +1307,14 @@
 			if (!goal_in_my_reservation(&my_rsv->rsv_window,
 							grp_goal, group, sb))
 				grp_goal = -1;
-		} else if (grp_goal > 0 &&
-			  (my_rsv->rsv_end-grp_goal+1) < *count)
-			try_to_extend_reservation(my_rsv, sb,
-					*count-my_rsv->rsv_end + grp_goal - 1);
+		} else if (grp_goal >= 0) {
+			int curr = my_rsv->rsv_end -
+					(grp_goal + group_first_block) + 1;
+
+			if (curr < *count)
+				try_to_extend_reservation(my_rsv, sb,
+							*count - curr);
+		}
 
 		if ((my_rsv->rsv_start > group_last_block) ||
 				(my_rsv->rsv_end < group_first_block)) {
@@ -1511,10 +1515,8 @@
 		if (group_no >= ngroups)
 			group_no = 0;
 		gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-		if (!gdp) {
-			*errp = -EIO;
-			goto out;
-		}
+		if (!gdp)
+			goto io_error;
 		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
 		/*
 		 * skip this group if the number of
@@ -1548,6 +1550,7 @@
 	 */
 	if (my_rsv) {
 		my_rsv = NULL;
+		windowsz = 0;
 		group_no = goal_group;
 		goto retry_alloc;
 	}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index d0b54f3..5a9313e 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -154,6 +154,9 @@
 			ext3_error (sb, "ext3_readdir",
 				"directory #%lu contains a hole at offset %lu",
 				inode->i_ino, (unsigned long)filp->f_pos);
+			/* corrupt size?  Maybe no more blocks to read */
+			if (filp->f_pos > inode->i_blocks << 9)
+				break;
 			filp->f_pos += sb->s_blocksize - offset;
 			continue;
 		}
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
new file mode 100644
index 0000000..e1f91fd
--- /dev/null
+++ b/fs/ext3/ext3_jbd.c
@@ -0,0 +1,59 @@
+/*
+ * Interface between ext3 and JBD
+ */
+
+#include <linux/ext3_jbd.h>
+
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = journal_get_undo_access(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = journal_get_write_access(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = journal_forget(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+				unsigned long blocknr, struct buffer_head *bh)
+{
+	int err = journal_revoke(handle, blocknr, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = journal_get_create_access(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext3_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = journal_dirty_metadata(handle, bh);
+	if (err)
+		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 03ba5bc..beaf25f 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1148,37 +1148,102 @@
 	return ext3_journal_get_write_access(handle, bh);
 }
 
+/*
+ * The idea of this helper function is following:
+ * if prepare_write has allocated some blocks, but not all of them, the
+ * transaction must include the content of the newly allocated blocks.
+ * This content is expected to be set to zeroes by block_prepare_write().
+ * 2006/10/14  SAW
+ */
+static int ext3_prepare_failure(struct file *file, struct page *page,
+				unsigned from, unsigned to)
+{
+	struct address_space *mapping;
+	struct buffer_head *bh, *head, *next;
+	unsigned block_start, block_end;
+	unsigned blocksize;
+	int ret;
+	handle_t *handle = ext3_journal_current_handle();
+
+	mapping = page->mapping;
+	if (ext3_should_writeback_data(mapping->host)) {
+		/* optimization: no constraints about data */
+skip:
+		return ext3_journal_stop(handle);
+	}
+
+	head = page_buffers(page);
+	blocksize = head->b_size;
+	for (	bh = head, block_start = 0;
+		bh != head || !block_start;
+	    	block_start = block_end, bh = next)
+	{
+		next = bh->b_this_page;
+		block_end = block_start + blocksize;
+		if (block_end <= from)
+			continue;
+		if (block_start >= to) {
+			block_start = to;
+			break;
+		}
+		if (!buffer_mapped(bh))
+		/* prepare_write failed on this bh */
+			break;
+		if (ext3_should_journal_data(mapping->host)) {
+			ret = do_journal_get_write_access(handle, bh);
+			if (ret) {
+				ext3_journal_stop(handle);
+				return ret;
+			}
+		}
+	/*
+	 * block_start here becomes the first block where the current iteration
+	 * of prepare_write failed.
+	 */
+	}
+	if (block_start <= from)
+		goto skip;
+
+	/* commit allocated and zeroed buffers */
+	return mapping->a_ops->commit_write(file, page, from, block_start);
+}
+
 static int ext3_prepare_write(struct file *file, struct page *page,
 			      unsigned from, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
-	int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
+	int ret, ret2;
+	int needed_blocks = ext3_writepage_trans_blocks(inode);
 	handle_t *handle;
 	int retries = 0;
 
 retry:
 	handle = ext3_journal_start(inode, needed_blocks);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out;
-	}
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 	if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
 		ret = nobh_prepare_write(page, from, to, ext3_get_block);
 	else
 		ret = block_prepare_write(page, from, to, ext3_get_block);
 	if (ret)
-		goto prepare_write_failed;
+		goto failure;
 
 	if (ext3_should_journal_data(inode)) {
 		ret = walk_page_buffers(handle, page_buffers(page),
 				from, to, NULL, do_journal_get_write_access);
+		if (ret)
+			/* fatal error, just put the handle and return */
+			journal_stop(handle);
 	}
-prepare_write_failed:
-	if (ret)
-		ext3_journal_stop(handle);
+	return ret;
+
+failure:
+	ret2 = ext3_prepare_failure(file, page, from, to);
+	if (ret2 < 0)
+		return ret2;
 	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
 		goto retry;
-out:
+	/* retry number exceeded, or other error like -EDQUOT */
 	return ret;
 }
 
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 906731a..60d2f9d 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -552,6 +552,15 @@
 					   dir->i_sb->s_blocksize -
 					   EXT3_DIR_REC_LEN(0));
 	for (; de < top; de = ext3_next_entry(de)) {
+		if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+					(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+						+((char *)de - bh->b_data))) {
+			/* On error, skip the f_pos to the next block. */
+			dir_file->f_pos = (dir_file->f_pos |
+					(dir->i_sb->s_blocksize - 1)) + 1;
+			brelse (bh);
+			return count;
+		}
 		ext3fs_dirhash(de->name, de->name_len, hinfo);
 		if ((hinfo->hash < start_hash) ||
 		    ((hinfo->hash == start_hash) &&
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index afc2d4f..580b8a6 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -436,7 +436,7 @@
 	return;
 }
 
-static kmem_cache_t *ext3_inode_cachep;
+static struct kmem_cache *ext3_inode_cachep;
 
 /*
  * Called inside transaction, so use GFP_NOFS
@@ -445,7 +445,7 @@
 {
 	struct ext3_inode_info *ei;
 
-	ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS);
+	ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
 	if (!ei)
 		return NULL;
 #ifdef CONFIG_EXT3_FS_POSIX_ACL
@@ -462,7 +462,7 @@
 	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
 
@@ -1264,6 +1264,12 @@
 		return;
 	}
 
+	if (bdev_read_only(sb->s_bdev)) {
+		printk(KERN_ERR "EXT3-fs: write access "
+			"unavailable, skipping orphan cleanup.\n");
+		return;
+	}
+
 	if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
 		if (es->s_last_orphan)
 			jbd_debug(1, "Errors on filesystem, "
@@ -2387,6 +2393,7 @@
 	struct ext3_super_block *es = sbi->s_es;
 	ext3_fsblk_t overhead;
 	int i;
+	u64 fsid;
 
 	if (test_opt (sb, MINIX_DF))
 		overhead = 0;
@@ -2433,6 +2440,10 @@
 	buf->f_files = le32_to_cpu(es->s_inodes_count);
 	buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
 	buf->f_namelen = EXT3_NAME_LEN;
+	fsid = le64_to_cpup((void *)es->s_uuid) ^
+	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
 	return 0;
 }
 
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index f86f248..99857a40 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -459,14 +459,11 @@
 	if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
-	lock_super(sb);
 	if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
-		EXT3_SB(sb)->s_es->s_feature_compat |=
-			cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR);
+		EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR);
 		sb->s_dirt = 1;
 		ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
 	}
-	unlock_super(sb);
 }
 
 /*
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index a6acb96..ae6e7e5 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -5,7 +5,8 @@
 obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o
 
 ext4dev-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-	   ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o
+		   ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
+		   ext4_jbd2.o
 
 ext4dev-$(CONFIG_EXT4DEV_FS_XATTR)	+= xattr.o xattr_user.o xattr_trusted.o
 ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL)	+= acl.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 5d45582..c4dd110 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -165,7 +165,7 @@
 
 	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
 	while (n) {
-		rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node);
+		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
 		if (verbose)
 			printk("reservation window 0x%p "
 			       "start:  %llu, end:  %llu\n",
@@ -747,7 +747,7 @@
 		here = 0;
 
 	p = ((char *)bh->b_data) + (here >> 3);
-	r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
 	next = (r - ((char *)bh->b_data)) << 3;
 
 	if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
@@ -966,7 +966,7 @@
 
 		prev = rsv;
 		next = rb_next(&rsv->rsv_node);
-		rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node);
+		rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
 
 		/*
 		 * Reached the last reservation, we can just append to the
@@ -1165,7 +1165,7 @@
 	 * check if the first free block is within the
 	 * free space we just reserved
 	 */
-	if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
+	if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
 		return 0;		/* success */
 	/*
 	 * if the first free bit we found is out of the reservable space
@@ -1210,7 +1210,7 @@
 	if (!next)
 		my_rsv->rsv_end += size;
 	else {
-		next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node);
+		next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
 
 		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
 			my_rsv->rsv_end += size;
@@ -1288,7 +1288,7 @@
 	}
 	/*
 	 * grp_goal is a group relative block number (if there is a goal)
-	 * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
+	 * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
 	 * first block is a filesystem wide block number
 	 * first block is the block number of the first block in this group
 	 */
@@ -1324,10 +1324,14 @@
 			if (!goal_in_my_reservation(&my_rsv->rsv_window,
 							grp_goal, group, sb))
 				grp_goal = -1;
-		} else if (grp_goal > 0 &&
-			  (my_rsv->rsv_end-grp_goal+1) < *count)
-			try_to_extend_reservation(my_rsv, sb,
-					*count-my_rsv->rsv_end + grp_goal - 1);
+		} else if (grp_goal >= 0) {
+			int curr = my_rsv->rsv_end -
+					(grp_goal + group_first_block) + 1;
+
+			if (curr < *count)
+				try_to_extend_reservation(my_rsv, sb,
+							*count - curr);
+		}
 
 		if ((my_rsv->rsv_start > group_last_block) ||
 				(my_rsv->rsv_end < group_first_block)) {
@@ -1525,10 +1529,8 @@
 		if (group_no >= ngroups)
 			group_no = 0;
 		gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
-		if (!gdp) {
-			*errp = -EIO;
-			goto out;
-		}
+		if (!gdp)
+			goto io_error;
 		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
 		/*
 		 * skip this group if the number of
@@ -1562,6 +1564,7 @@
 	 */
 	if (my_rsv) {
 		my_rsv = NULL;
+		windowsz = 0;
 		group_no = goal_group;
 		goto retry_alloc;
 	}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index f859578..f2ed3e7 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -153,6 +153,9 @@
 			ext4_error (sb, "ext4_readdir",
 				"directory #%lu contains a hole at offset %lu",
 				inode->i_ino, (unsigned long)filp->f_pos);
+			/* corrupt size?  Maybe no more blocks to read */
+			if (filp->f_pos > inode->i_blocks << 9)
+				break;
 			filp->f_pos += sb->s_blocksize - offset;
 			continue;
 		}
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
new file mode 100644
index 0000000..d6afe4e
--- /dev/null
+++ b/fs/ext4/ext4_jbd2.c
@@ -0,0 +1,59 @@
+/*
+ * Interface between ext4 and JBD
+ */
+
+#include <linux/ext4_jbd2.h>
+
+int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = jbd2_journal_get_undo_access(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = jbd2_journal_get_write_access(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh)
+{
+	int err = jbd2_journal_forget(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_revoke(const char *where, handle_t *handle,
+				ext4_fsblk_t blocknr, struct buffer_head *bh)
+{
+	int err = jbd2_journal_revoke(handle, blocknr, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = jbd2_journal_get_create_access(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
+
+int __ext4_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh)
+{
+	int err = jbd2_journal_dirty_metadata(handle, bh);
+	if (err)
+		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+	return err;
+}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2608dce..dc2724f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -48,7 +48,7 @@
  * ext_pblock:
  * combine low and high parts of physical block number into ext4_fsblk_t
  */
-static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
+static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
 {
 	ext4_fsblk_t block;
 
@@ -61,7 +61,7 @@
  * idx_pblock:
  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
  */
-static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
+static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
 {
 	ext4_fsblk_t block;
 
@@ -75,7 +75,7 @@
  * stores a large physical block number into an extent struct,
  * breaking it into parts
  */
-static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
+static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
 {
 	ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
 	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -86,7 +86,7 @@
  * stores a large physical block number into an index struct,
  * breaking it into parts
  */
-static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
+static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
 {
 	ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
 	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -186,7 +186,8 @@
 		depth = path->p_depth;
 
 		/* try to predict block placement */
-		if ((ex = path[depth].p_ext))
+		ex = path[depth].p_ext;
+		if (ex)
 			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
 
 		/* it looks like index is empty;
@@ -215,7 +216,7 @@
 	return newblock;
 }
 
-static inline int ext4_ext_space_block(struct inode *inode)
+static int ext4_ext_space_block(struct inode *inode)
 {
 	int size;
 
@@ -228,7 +229,7 @@
 	return size;
 }
 
-static inline int ext4_ext_space_block_idx(struct inode *inode)
+static int ext4_ext_space_block_idx(struct inode *inode)
 {
 	int size;
 
@@ -241,7 +242,7 @@
 	return size;
 }
 
-static inline int ext4_ext_space_root(struct inode *inode)
+static int ext4_ext_space_root(struct inode *inode)
 {
 	int size;
 
@@ -255,7 +256,7 @@
 	return size;
 }
 
-static inline int ext4_ext_space_root_idx(struct inode *inode)
+static int ext4_ext_space_root_idx(struct inode *inode)
 {
 	int size;
 
@@ -476,13 +477,12 @@
 
 	/* account possible depth increase */
 	if (!path) {
-		path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
+		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
 				GFP_NOFS);
 		if (!path)
 			return ERR_PTR(-ENOMEM);
 		alloc = 1;
 	}
-	memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
 	path[0].p_hdr = eh;
 
 	/* walk through the tree */
@@ -543,7 +543,8 @@
 	struct ext4_extent_idx *ix;
 	int len, err;
 
-	if ((err = ext4_ext_get_access(handle, inode, curp)))
+	err = ext4_ext_get_access(handle, inode, curp);
+	if (err)
 		return err;
 
 	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
@@ -641,10 +642,9 @@
 	 * We need this to handle errors and free blocks
 	 * upon them.
 	 */
-	ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
+	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
 	if (!ablocks)
 		return -ENOMEM;
-	memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
 
 	/* allocate all needed blocks */
 	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
@@ -665,7 +665,8 @@
 	}
 	lock_buffer(bh);
 
-	if ((err = ext4_journal_get_create_access(handle, bh)))
+	err = ext4_journal_get_create_access(handle, bh);
+	if (err)
 		goto cleanup;
 
 	neh = ext_block_hdr(bh);
@@ -702,18 +703,21 @@
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
-	if ((err = ext4_journal_dirty_metadata(handle, bh)))
+	err = ext4_journal_dirty_metadata(handle, bh);
+	if (err)
 		goto cleanup;
 	brelse(bh);
 	bh = NULL;
 
 	/* correct old leaf */
 	if (m) {
-		if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+		err = ext4_ext_get_access(handle, inode, path + depth);
+		if (err)
 			goto cleanup;
 		path[depth].p_hdr->eh_entries =
 		     cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
-		if ((err = ext4_ext_dirty(handle, inode, path + depth)))
+		err = ext4_ext_dirty(handle, inode, path + depth);
+		if (err)
 			goto cleanup;
 
 	}
@@ -736,7 +740,8 @@
 		}
 		lock_buffer(bh);
 
-		if ((err = ext4_journal_get_create_access(handle, bh)))
+		err = ext4_journal_get_create_access(handle, bh);
+		if (err)
 			goto cleanup;
 
 		neh = ext_block_hdr(bh);
@@ -780,7 +785,8 @@
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
 
-		if ((err = ext4_journal_dirty_metadata(handle, bh)))
+		err = ext4_journal_dirty_metadata(handle, bh);
+		if (err)
 			goto cleanup;
 		brelse(bh);
 		bh = NULL;
@@ -800,9 +806,6 @@
 	}
 
 	/* insert new index */
-	if (err)
-		goto cleanup;
-
 	err = ext4_ext_insert_index(handle, inode, path + at,
 				    le32_to_cpu(border), newblock);
 
@@ -857,7 +860,8 @@
 	}
 	lock_buffer(bh);
 
-	if ((err = ext4_journal_get_create_access(handle, bh))) {
+	err = ext4_journal_get_create_access(handle, bh);
+	if (err) {
 		unlock_buffer(bh);
 		goto out;
 	}
@@ -877,11 +881,13 @@
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
-	if ((err = ext4_journal_dirty_metadata(handle, bh)))
+	err = ext4_journal_dirty_metadata(handle, bh);
+	if (err)
 		goto out;
 
 	/* create index in new top-level index: num,max,pointer */
-	if ((err = ext4_ext_get_access(handle, inode, curp)))
+	err = ext4_ext_get_access(handle, inode, curp);
+	if (err)
 		goto out;
 
 	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
@@ -1073,27 +1079,31 @@
 	 */
 	k = depth - 1;
 	border = path[depth].p_ext->ee_block;
-	if ((err = ext4_ext_get_access(handle, inode, path + k)))
+	err = ext4_ext_get_access(handle, inode, path + k);
+	if (err)
 		return err;
 	path[k].p_idx->ei_block = border;
-	if ((err = ext4_ext_dirty(handle, inode, path + k)))
+	err = ext4_ext_dirty(handle, inode, path + k);
+	if (err)
 		return err;
 
 	while (k--) {
 		/* change all left-side indexes */
 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
 			break;
-		if ((err = ext4_ext_get_access(handle, inode, path + k)))
+		err = ext4_ext_get_access(handle, inode, path + k);
+		if (err)
 			break;
 		path[k].p_idx->ei_block = border;
-		if ((err = ext4_ext_dirty(handle, inode, path + k)))
+		err = ext4_ext_dirty(handle, inode, path + k);
+		if (err)
 			break;
 	}
 
 	return err;
 }
 
-static int inline
+static int
 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
 				struct ext4_extent *ex2)
 {
@@ -1145,7 +1155,8 @@
 				le16_to_cpu(newext->ee_len),
 				le32_to_cpu(ex->ee_block),
 				le16_to_cpu(ex->ee_len), ext_pblock(ex));
-		if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+		err = ext4_ext_get_access(handle, inode, path + depth);
+		if (err)
 			return err;
 		ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
 					 + le16_to_cpu(newext->ee_len));
@@ -1195,7 +1206,8 @@
 has_space:
 	nearex = path[depth].p_ext;
 
-	if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+	err = ext4_ext_get_access(handle, inode, path + depth);
+	if (err)
 		goto cleanup;
 
 	if (!nearex) {
@@ -1383,7 +1395,7 @@
 	return err;
 }
 
-static inline void
+static void
 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
 			__u32 len, __u32 start, int type)
 {
@@ -1401,7 +1413,7 @@
  * calculate boundaries of the gap that the requested block fits into
  * and cache this gap
  */
-static inline void
+static void
 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
 				unsigned long block)
 {
@@ -1442,7 +1454,7 @@
 	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
 }
 
-static inline int
+static int
 ext4_ext_in_cache(struct inode *inode, unsigned long block,
 			struct ext4_extent *ex)
 {
@@ -1489,10 +1501,12 @@
 	path--;
 	leaf = idx_pblock(path->p_idx);
 	BUG_ON(path->p_hdr->eh_entries == 0);
-	if ((err = ext4_ext_get_access(handle, inode, path)))
+	err = ext4_ext_get_access(handle, inode, path);
+	if (err)
 		return err;
 	path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
-	if ((err = ext4_ext_dirty(handle, inode, path)))
+	err = ext4_ext_dirty(handle, inode, path);
+	if (err)
 		return err;
 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
 	bh = sb_find_get_block(inode->i_sb, leaf);
@@ -1509,7 +1523,7 @@
  * the caller should calculate credits under truncate_mutex and
  * pass the actual path.
  */
-int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
+int ext4_ext_calc_credits_for_insert(struct inode *inode,
 						struct ext4_ext_path *path)
 {
 	int depth, needed;
@@ -1534,16 +1548,17 @@
 
 	/*
 	 * tree can be full, so it would need to grow in depth:
-	 * allocation + old root + new root
+	 * we need one credit to modify old root, credits for
+	 * new root will be added in split accounting
 	 */
-	needed += 2 + 1 + 1;
+	needed += 1;
 
 	/*
 	 * Index split can happen, we would need:
 	 *    allocate intermediate indexes (bitmap + group)
 	 *  + change two blocks at each level, but root (already included)
 	 */
-	needed = (depth * 2) + (depth * 2);
+	needed += (depth * 2) + (depth * 2);
 
 	/* any allocation modifies superblock */
 	needed += 1;
@@ -1718,7 +1733,7 @@
  * ext4_ext_more_to_rm:
  * returns 1 if current index has to be freed (even partial)
  */
-static int inline
+static int
 ext4_ext_more_to_rm(struct ext4_ext_path *path)
 {
 	BUG_ON(path->p_idx == NULL);
@@ -1756,12 +1771,11 @@
 	 * We start scanning from right side, freeing all the blocks
 	 * after i_size and walking into the tree depth-wise.
 	 */
-	path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
+	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
 	if (path == NULL) {
 		ext4_journal_stop(handle);
 		return -ENOMEM;
 	}
-	memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
 	path[0].p_hdr = ext_inode_hdr(inode);
 	if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
 		err = -EIO;
@@ -1932,7 +1946,8 @@
 	mutex_lock(&EXT4_I(inode)->truncate_mutex);
 
 	/* check in cache */
-	if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) {
+	goal = ext4_ext_in_cache(inode, iblock, &newex);
+	if (goal) {
 		if (goal == EXT4_EXT_CACHE_GAP) {
 			if (!create) {
 				/* block isn't allocated yet and
@@ -1971,7 +1986,8 @@
 	 */
 	BUG_ON(path[depth].p_ext == NULL && depth != 0);
 
-	if ((ex = path[depth].p_ext)) {
+	ex = path[depth].p_ext;
+	if (ex) {
 	        unsigned long ee_block = le32_to_cpu(ex->ee_block);
 		ext4_fsblk_t ee_start = ext_pblock(ex);
 		unsigned short ee_len  = le16_to_cpu(ex->ee_len);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0a60ec5..1d85d4e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1147,37 +1147,102 @@
 	return ext4_journal_get_write_access(handle, bh);
 }
 
+/*
+ * The idea of this helper function is following:
+ * if prepare_write has allocated some blocks, but not all of them, the
+ * transaction must include the content of the newly allocated blocks.
+ * This content is expected to be set to zeroes by block_prepare_write().
+ * 2006/10/14  SAW
+ */
+static int ext4_prepare_failure(struct file *file, struct page *page,
+				unsigned from, unsigned to)
+{
+	struct address_space *mapping;
+	struct buffer_head *bh, *head, *next;
+	unsigned block_start, block_end;
+	unsigned blocksize;
+	int ret;
+	handle_t *handle = ext4_journal_current_handle();
+
+	mapping = page->mapping;
+	if (ext4_should_writeback_data(mapping->host)) {
+		/* optimization: no constraints about data */
+skip:
+		return ext4_journal_stop(handle);
+	}
+
+	head = page_buffers(page);
+	blocksize = head->b_size;
+	for (	bh = head, block_start = 0;
+		bh != head || !block_start;
+	    	block_start = block_end, bh = next)
+	{
+		next = bh->b_this_page;
+		block_end = block_start + blocksize;
+		if (block_end <= from)
+			continue;
+		if (block_start >= to) {
+			block_start = to;
+			break;
+		}
+		if (!buffer_mapped(bh))
+		/* prepare_write failed on this bh */
+			break;
+		if (ext4_should_journal_data(mapping->host)) {
+			ret = do_journal_get_write_access(handle, bh);
+			if (ret) {
+				ext4_journal_stop(handle);
+				return ret;
+			}
+		}
+	/*
+	 * block_start here becomes the first block where the current iteration
+	 * of prepare_write failed.
+	 */
+	}
+	if (block_start <= from)
+		goto skip;
+
+	/* commit allocated and zeroed buffers */
+	return mapping->a_ops->commit_write(file, page, from, block_start);
+}
+
 static int ext4_prepare_write(struct file *file, struct page *page,
 			      unsigned from, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
-	int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
+	int ret, ret2;
+	int needed_blocks = ext4_writepage_trans_blocks(inode);
 	handle_t *handle;
 	int retries = 0;
 
 retry:
 	handle = ext4_journal_start(inode, needed_blocks);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out;
-	}
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
 		ret = nobh_prepare_write(page, from, to, ext4_get_block);
 	else
 		ret = block_prepare_write(page, from, to, ext4_get_block);
 	if (ret)
-		goto prepare_write_failed;
+		goto failure;
 
 	if (ext4_should_journal_data(inode)) {
 		ret = walk_page_buffers(handle, page_buffers(page),
 				from, to, NULL, do_journal_get_write_access);
+		if (ret)
+			/* fatal error, just put the handle and return */
+			journal_stop(handle);
 	}
-prepare_write_failed:
-	if (ret)
-		ext4_journal_stop(handle);
+	return ret;
+
+failure:
+	ret2 = ext4_prepare_failure(file, page, from, to);
+	if (ret2 < 0)
+		return ret2;
 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
 		goto retry;
-out:
+	/* retry number exceeded, or other error like -EDQUOT */
 	return ret;
 }
 
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8b1bd03..859990e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -552,6 +552,15 @@
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
 	for (; de < top; de = ext4_next_entry(de)) {
+		if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+						+((char *)de - bh->b_data))) {
+			/* On error, skip the f_pos to the next block. */
+			dir_file->f_pos = (dir_file->f_pos |
+					(dir->i_sb->s_blocksize - 1)) + 1;
+			brelse (bh);
+			return count;
+		}
 		ext4fs_dirhash(de->name, de->name_len, hinfo);
 		if ((hinfo->hash < start_hash) ||
 		    ((hinfo->hash == start_hash) &&
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b4b022a..486a641 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -486,7 +486,7 @@
 	return;
 }
 
-static kmem_cache_t *ext4_inode_cachep;
+static struct kmem_cache *ext4_inode_cachep;
 
 /*
  * Called inside transaction, so use GFP_NOFS
@@ -495,7 +495,7 @@
 {
 	struct ext4_inode_info *ei;
 
-	ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS);
+	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
 	if (!ei)
 		return NULL;
 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
@@ -513,7 +513,7 @@
 	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
 
@@ -1321,6 +1321,12 @@
 		return;
 	}
 
+	if (bdev_read_only(sb->s_bdev)) {
+		printk(KERN_ERR "EXT4-fs: write access "
+			"unavailable, skipping orphan cleanup.\n");
+		return;
+	}
+
 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
 		if (es->s_last_orphan)
 			jbd_debug(1, "Errors on filesystem, "
@@ -2460,6 +2466,7 @@
 	struct ext4_super_block *es = sbi->s_es;
 	ext4_fsblk_t overhead;
 	int i;
+	u64 fsid;
 
 	if (test_opt (sb, MINIX_DF))
 		overhead = 0;
@@ -2506,6 +2513,10 @@
 	buf->f_files = le32_to_cpu(es->s_inodes_count);
 	buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
 	buf->f_namelen = EXT4_NAME_LEN;
+	fsid = le64_to_cpup((void *)es->s_uuid) ^
+	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
 	return 0;
 }
 
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 63233cd..dc969c3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -459,14 +459,11 @@
 	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
 		return;
 
-	lock_super(sb);
 	if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
-		EXT4_SB(sb)->s_es->s_feature_compat |=
-			cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR);
+		EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
 		sb->s_dirt = 1;
 		ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
 	}
-	unlock_super(sb);
 }
 
 /*
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 82cc4f59..05c2941 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -34,9 +34,9 @@
 	return FAT_MAX_CACHE;
 }
 
-static kmem_cache_t *fat_cache_cachep;
+static struct kmem_cache *fat_cache_cachep;
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct fat_cache *cache = (struct fat_cache *)foo;
 
@@ -63,7 +63,7 @@
 
 static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
 {
-	return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL);
+	return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL);
 }
 
 static inline void fat_cache_free(struct fat_cache *cache)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 78945b5..a9e4688 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -477,12 +477,12 @@
 	kfree(sbi);
 }
 
-static kmem_cache_t *fat_inode_cachep;
+static struct kmem_cache *fat_inode_cachep;
 
 static struct inode *fat_alloc_inode(struct super_block *sb)
 {
 	struct msdos_inode_info *ei;
-	ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -493,7 +493,7 @@
 	kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
 
diff --git a/fs/fcntl.c b/fs/fcntl.c
index e4f2616..4740d35 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -553,7 +553,7 @@
 }
 
 static DEFINE_RWLOCK(fasync_lock);
-static kmem_cache_t *fasync_cache __read_mostly;
+static struct kmem_cache *fasync_cache __read_mostly;
 
 /*
  * fasync_helper() is used by some character device drivers (mainly mice)
@@ -567,7 +567,7 @@
 	int result = 0;
 
 	if (on) {
-		new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
+		new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
 		if (!new)
 			return -ENOMEM;
 	}
diff --git a/fs/file.c b/fs/file.c
index 8e81775..51aef67 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -21,7 +21,6 @@
 struct fdtable_defer {
 	spinlock_t lock;
 	struct work_struct wq;
-	struct timer_list timer;
 	struct fdtable *next;
 };
 
@@ -75,24 +74,10 @@
 	kfree(fdt);
 }
 
-static void fdtable_timer(unsigned long data)
+static void free_fdtable_work(struct work_struct *work)
 {
-	struct fdtable_defer *fddef = (struct fdtable_defer *)data;
-
-	spin_lock(&fddef->lock);
-	/*
-	 * If someone already emptied the queue return.
-	 */
-	if (!fddef->next)
-		goto out;
-	if (!schedule_work(&fddef->wq))
-		mod_timer(&fddef->timer, 5);
-out:
-	spin_unlock(&fddef->lock);
-}
-
-static void free_fdtable_work(struct fdtable_defer *f)
-{
+	struct fdtable_defer *f =
+		container_of(work, struct fdtable_defer, wq);
 	struct fdtable *fdt;
 
 	spin_lock_bh(&f->lock);
@@ -142,13 +127,8 @@
 		spin_lock(&fddef->lock);
 		fdt->next = fddef->next;
 		fddef->next = fdt;
-		/*
-		 * vmallocs are handled from the workqueue context.
-		 * If the per-cpu workqueue is running, then we
-		 * defer work scheduling through a timer.
-		 */
-		if (!schedule_work(&fddef->wq))
-			mod_timer(&fddef->timer, 5);
+		/* vmallocs are handled from the workqueue context */
+		schedule_work(&fddef->wq);
 		spin_unlock(&fddef->lock);
 		put_cpu_var(fdtable_defer_list);
 	}
@@ -351,10 +331,7 @@
 {
 	struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
 	spin_lock_init(&fddef->lock);
-	INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef);
-	init_timer(&fddef->timer);
-	fddef->timer.data = (unsigned long)fddef;
-	fddef->timer.function = fdtable_timer;
+	INIT_WORK(&fddef->wq, free_fdtable_work);
 	fddef->next = NULL;
 }
 
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 4786d51..0b7ae89 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -46,7 +46,7 @@
 
 extern struct inode_operations vxfs_immed_symlink_iops;
 
-kmem_cache_t		*vxfs_inode_cachep;
+struct kmem_cache		*vxfs_inode_cachep;
 
 
 #ifdef DIAGNOSTIC
@@ -103,7 +103,7 @@
 		struct vxfs_inode_info	*vip;
 		struct vxfs_dinode	*dip;
 
-		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
 			goto fail;
 		dip = (struct vxfs_dinode *)(bp->b_data + offset);
 		memcpy(vip, dip, sizeof(*vip));
@@ -145,7 +145,7 @@
 		struct vxfs_dinode	*dip;
 		caddr_t			kaddr = (char *)page_address(pp);
 
-		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
 			goto fail;
 		dip = (struct vxfs_dinode *)(kaddr + offset);
 		memcpy(vip, dip, sizeof(*vip));
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 66571ea..357764d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,7 +19,7 @@
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 
-static kmem_cache_t *fuse_req_cachep;
+static struct kmem_cache *fuse_req_cachep;
 
 static struct fuse_conn *fuse_get_conn(struct file *file)
 {
@@ -41,7 +41,7 @@
 
 struct fuse_req *fuse_request_alloc(void)
 {
-	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
+	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
 	if (req)
 		fuse_request_init(req);
 	return req;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c71a6c0..1cabdb2 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -141,9 +141,6 @@
 		struct fuse_req *forget_req;
 		struct dentry *parent;
 
-		/* Doesn't hurt to "reset" the validity timeout */
-		fuse_invalidate_entry_cache(entry);
-
 		/* For negative dentries, always do a fresh lookup */
 		if (!inode)
 			return 0;
@@ -1027,6 +1024,8 @@
 	if (attr->ia_valid & ATTR_SIZE) {
 		unsigned long limit;
 		is_truncate = 1;
+		if (IS_SWAPFILE(inode))
+			return -ETXTBSY;
 		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
 		if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) {
 			send_sig(SIGXFSZ, current, 0);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 763a50d..128f79c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -754,6 +754,42 @@
 	return err;
 }
 
+static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
+{
+	struct inode *inode = mapping->host;
+	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_req *req;
+	struct fuse_bmap_in inarg;
+	struct fuse_bmap_out outarg;
+	int err;
+
+	if (!inode->i_sb->s_bdev || fc->no_bmap)
+		return 0;
+
+	req = fuse_get_req(fc);
+	if (IS_ERR(req))
+		return 0;
+
+	memset(&inarg, 0, sizeof(inarg));
+	inarg.block = block;
+	inarg.blocksize = inode->i_sb->s_blocksize;
+	req->in.h.opcode = FUSE_BMAP;
+	req->in.h.nodeid = get_node_id(inode);
+	req->in.numargs = 1;
+	req->in.args[0].size = sizeof(inarg);
+	req->in.args[0].value = &inarg;
+	req->out.numargs = 1;
+	req->out.args[0].size = sizeof(outarg);
+	req->out.args[0].value = &outarg;
+	request_send(fc, req);
+	err = req->out.h.error;
+	fuse_put_request(fc, req);
+	if (err == -ENOSYS)
+		fc->no_bmap = 1;
+
+	return err ? 0 : outarg.block;
+}
+
 static const struct file_operations fuse_file_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= do_sync_read,
@@ -787,6 +823,7 @@
 	.commit_write	= fuse_commit_write,
 	.readpages	= fuse_readpages,
 	.set_page_dirty	= fuse_set_page_dirty,
+	.bmap		= fuse_bmap,
 };
 
 void fuse_init_file_inode(struct inode *inode)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 91edb89..b98b20d 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -298,6 +298,9 @@
 	    reply, before any other request, and never cleared */
 	unsigned conn_error : 1;
 
+	/** Connection successful.  Only set in INIT */
+	unsigned conn_init : 1;
+
 	/** Do readpages asynchronously?  Only set in INIT */
 	unsigned async_read : 1;
 
@@ -339,6 +342,9 @@
 	/** Is interrupt not implemented by fs? */
 	unsigned no_interrupt : 1;
 
+	/** Is bmap not implemented by fs? */
+	unsigned no_bmap : 1;
+
 	/** The number of requests waiting for completion */
 	atomic_t num_waiting;
 
@@ -365,6 +371,9 @@
 
 	/** Key for lock owner ID scrambling */
 	u32 scramble_key[4];
+
+	/** Reserved request for the DESTROY message */
+	struct fuse_req *destroy_req;
 };
 
 static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index fc42035..12450d2 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -22,7 +22,7 @@
 MODULE_DESCRIPTION("Filesystem in Userspace");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t *fuse_inode_cachep;
+static struct kmem_cache *fuse_inode_cachep;
 struct list_head fuse_conn_list;
 DEFINE_MUTEX(fuse_mutex);
 
@@ -39,6 +39,7 @@
 	unsigned group_id_present : 1;
 	unsigned flags;
 	unsigned max_read;
+	unsigned blksize;
 };
 
 static struct inode *fuse_alloc_inode(struct super_block *sb)
@@ -46,7 +47,7 @@
 	struct inode *inode;
 	struct fuse_inode *fi;
 
-	inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL);
+	inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
 	if (!inode)
 		return NULL;
 
@@ -205,10 +206,23 @@
 		fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
 }
 
+static void fuse_send_destroy(struct fuse_conn *fc)
+{
+	struct fuse_req *req = fc->destroy_req;
+	if (req && fc->conn_init) {
+		fc->destroy_req = NULL;
+		req->in.h.opcode = FUSE_DESTROY;
+		req->force = 1;
+		request_send(fc, req);
+		fuse_put_request(fc, req);
+	}
+}
+
 static void fuse_put_super(struct super_block *sb)
 {
 	struct fuse_conn *fc = get_fuse_conn_super(sb);
 
+	fuse_send_destroy(fc);
 	spin_lock(&fc->lock);
 	fc->connected = 0;
 	fc->blocked = 0;
@@ -274,6 +288,7 @@
 	OPT_DEFAULT_PERMISSIONS,
 	OPT_ALLOW_OTHER,
 	OPT_MAX_READ,
+	OPT_BLKSIZE,
 	OPT_ERR
 };
 
@@ -285,14 +300,16 @@
 	{OPT_DEFAULT_PERMISSIONS,	"default_permissions"},
 	{OPT_ALLOW_OTHER,		"allow_other"},
 	{OPT_MAX_READ,			"max_read=%u"},
+	{OPT_BLKSIZE,			"blksize=%u"},
 	{OPT_ERR,			NULL}
 };
 
-static int parse_fuse_opt(char *opt, struct fuse_mount_data *d)
+static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
 {
 	char *p;
 	memset(d, 0, sizeof(struct fuse_mount_data));
 	d->max_read = ~0;
+	d->blksize = 512;
 
 	while ((p = strsep(&opt, ",")) != NULL) {
 		int token;
@@ -345,6 +362,12 @@
 			d->max_read = value;
 			break;
 
+		case OPT_BLKSIZE:
+			if (!is_bdev || match_int(&args[0], &value))
+				return 0;
+			d->blksize = value;
+			break;
+
 		default:
 			return 0;
 		}
@@ -400,6 +423,8 @@
 void fuse_conn_put(struct fuse_conn *fc)
 {
 	if (atomic_dec_and_test(&fc->count)) {
+		if (fc->destroy_req)
+			fuse_request_free(fc->destroy_req);
 		mutex_destroy(&fc->inst_mutex);
 		kfree(fc);
 	}
@@ -456,6 +481,7 @@
 		fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
 		fc->minor = arg->minor;
 		fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
+		fc->conn_init = 1;
 	}
 	fuse_put_request(fc, req);
 	fc->blocked = 0;
@@ -500,15 +526,23 @@
 	struct dentry *root_dentry;
 	struct fuse_req *init_req;
 	int err;
+	int is_bdev = sb->s_bdev != NULL;
 
 	if (sb->s_flags & MS_MANDLOCK)
 		return -EINVAL;
 
-	if (!parse_fuse_opt((char *) data, &d))
+	if (!parse_fuse_opt((char *) data, &d, is_bdev))
 		return -EINVAL;
 
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	if (is_bdev) {
+#ifdef CONFIG_BLOCK
+		if (!sb_set_blocksize(sb, d.blksize))
+			return -EINVAL;
+#endif
+	} else {
+		sb->s_blocksize = PAGE_CACHE_SIZE;
+		sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	}
 	sb->s_magic = FUSE_SUPER_MAGIC;
 	sb->s_op = &fuse_super_operations;
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -547,6 +581,12 @@
 	if (!init_req)
 		goto err_put_root;
 
+	if (is_bdev) {
+		fc->destroy_req = fuse_request_alloc();
+		if (!fc->destroy_req)
+			goto err_put_root;
+	}
+
 	mutex_lock(&fuse_mutex);
 	err = -EINVAL;
 	if (file->private_data)
@@ -598,10 +638,47 @@
 	.kill_sb	= kill_anon_super,
 };
 
+#ifdef CONFIG_BLOCK
+static int fuse_get_sb_blk(struct file_system_type *fs_type,
+			   int flags, const char *dev_name,
+			   void *raw_data, struct vfsmount *mnt)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super,
+			   mnt);
+}
+
+static struct file_system_type fuseblk_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "fuseblk",
+	.get_sb		= fuse_get_sb_blk,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static inline int register_fuseblk(void)
+{
+	return register_filesystem(&fuseblk_fs_type);
+}
+
+static inline void unregister_fuseblk(void)
+{
+	unregister_filesystem(&fuseblk_fs_type);
+}
+#else
+static inline int register_fuseblk(void)
+{
+	return 0;
+}
+
+static inline void unregister_fuseblk(void)
+{
+}
+#endif
+
 static decl_subsys(fuse, NULL, NULL);
 static decl_subsys(connections, NULL, NULL);
 
-static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
+static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
 				 unsigned long flags)
 {
 	struct inode * inode = foo;
@@ -617,24 +694,34 @@
 
 	err = register_filesystem(&fuse_fs_type);
 	if (err)
-		printk("fuse: failed to register filesystem\n");
-	else {
-		fuse_inode_cachep = kmem_cache_create("fuse_inode",
-						      sizeof(struct fuse_inode),
-						      0, SLAB_HWCACHE_ALIGN,
-						      fuse_inode_init_once, NULL);
-		if (!fuse_inode_cachep) {
-			unregister_filesystem(&fuse_fs_type);
-			err = -ENOMEM;
-		}
-	}
+		goto out;
 
+	err = register_fuseblk();
+	if (err)
+		goto out_unreg;
+
+	fuse_inode_cachep = kmem_cache_create("fuse_inode",
+					      sizeof(struct fuse_inode),
+					      0, SLAB_HWCACHE_ALIGN,
+					      fuse_inode_init_once, NULL);
+	err = -ENOMEM;
+	if (!fuse_inode_cachep)
+		goto out_unreg2;
+
+	return 0;
+
+ out_unreg2:
+	unregister_fuseblk();
+ out_unreg:
+	unregister_filesystem(&fuse_fs_type);
+ out:
 	return err;
 }
 
 static void fuse_fs_cleanup(void)
 {
 	unregister_filesystem(&fuse_fs_type);
+	unregister_fuseblk();
 	kmem_cache_destroy(fuse_inode_cachep);
 }
 
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 8c27de8..c0791cb 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -2,6 +2,7 @@
 	tristate "GFS2 file system support"
 	depends on EXPERIMENTAL
 	select FS_POSIX_ACL
+	select CRC32
 	help
 	A cluster filesystem.
 
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 5f959b8..6e80844 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -74,11 +74,11 @@
 {
 	if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
 		return -EOPNOTSUPP;
-	if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER))
+	if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER))
 		return -EPERM;
-	if (S_ISLNK(ip->i_di.di_mode))
+	if (S_ISLNK(ip->i_inode.i_mode))
 		return -EOPNOTSUPP;
-	if (!access && !S_ISDIR(ip->i_di.di_mode))
+	if (!access && !S_ISDIR(ip->i_inode.i_mode))
 		return -EACCES;
 
 	return 0;
@@ -145,14 +145,14 @@
 }
 
 /**
- * gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something
+ * gfs2_check_acl - Check an ACL to see if we're allowed to do something
  * @inode: the file we want to do something to
  * @mask: what we want to do
  *
  * Returns: errno
  */
 
-int gfs2_check_acl_locked(struct inode *inode, int mask)
+int gfs2_check_acl(struct inode *inode, int mask)
 {
 	struct posix_acl *acl = NULL;
 	int error;
@@ -170,21 +170,6 @@
 	return -EAGAIN;
 }
 
-int gfs2_check_acl(struct inode *inode, int mask)
-{
-	struct gfs2_inode *ip = GFS2_I(inode);
-	struct gfs2_holder i_gh;
-	int error;
-
-	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
-	if (!error) {
-		error = gfs2_check_acl_locked(inode, mask);
-		gfs2_glock_dq_uninit(&i_gh);
-	}
-
-	return error;
-}
-
 static int munge_mode(struct gfs2_inode *ip, mode_t mode)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -198,10 +183,10 @@
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
 		gfs2_assert_withdraw(sdp,
-				(ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT));
-		ip->i_di.di_mode = mode;
+				(ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT));
+		ip->i_inode.i_mode = mode;
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -215,12 +200,12 @@
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
 	struct posix_acl *acl = NULL, *clone;
 	struct gfs2_ea_request er;
-	mode_t mode = ip->i_di.di_mode;
+	mode_t mode = ip->i_inode.i_mode;
 	int error;
 
 	if (!sdp->sd_args.ar_posix_acl)
 		return 0;
-	if (S_ISLNK(ip->i_di.di_mode))
+	if (S_ISLNK(ip->i_inode.i_mode))
 		return 0;
 
 	memset(&er, 0, sizeof(struct gfs2_ea_request));
@@ -232,7 +217,7 @@
 		return error;
 	if (!acl) {
 		mode &= ~current->fs->umask;
-		if (mode != ip->i_di.di_mode)
+		if (mode != ip->i_inode.i_mode)
 			error = munge_mode(ip, mode);
 		return error;
 	}
@@ -244,7 +229,7 @@
 	posix_acl_release(acl);
 	acl = clone;
 
-	if (S_ISDIR(ip->i_di.di_mode)) {
+	if (S_ISDIR(ip->i_inode.i_mode)) {
 		er.er_name = GFS2_POSIX_ACL_DEFAULT;
 		er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
 		error = gfs2_system_eaops.eo_set(ip, &er);
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 05c294f..6751930 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -31,7 +31,6 @@
 			  struct gfs2_ea_request *er,
 			  int *remove, mode_t *mode);
 int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
-int gfs2_check_acl_locked(struct inode *inode, int mask);
 int gfs2_check_acl(struct inode *inode, int mask);
 int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
 int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 06e9a8c..8240c1f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -38,8 +38,8 @@
 };
 
 typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh,
-			     struct buffer_head *bh, u64 *top,
-			     u64 *bottom, unsigned int height,
+			     struct buffer_head *bh, __be64 *top,
+			     __be64 *bottom, unsigned int height,
 			     void *data);
 
 struct strip_mine {
@@ -163,6 +163,7 @@
 	if (ip->i_di.di_size) {
 		*(__be64 *)(di + 1) = cpu_to_be64(block);
 		ip->i_di.di_blocks++;
+		gfs2_set_inode_blocks(&ip->i_inode);
 		di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
 	}
 
@@ -230,7 +231,7 @@
 	struct buffer_head *blocks[GFS2_MAX_META_HEIGHT];
 	struct gfs2_dinode *di;
 	int error;
-	u64 *bp;
+	__be64 *bp;
 	u64 bn;
 	unsigned n;
 
@@ -255,7 +256,7 @@
 					  GFS2_FORMAT_IN);
 			gfs2_buffer_clear_tail(blocks[n],
 					       sizeof(struct gfs2_meta_header));
-			bp = (u64 *)(blocks[n]->b_data +
+			bp = (__be64 *)(blocks[n]->b_data +
 				     sizeof(struct gfs2_meta_header));
 			*bp = cpu_to_be64(blocks[n+1]->b_blocknr);
 			brelse(blocks[n]);
@@ -272,6 +273,7 @@
 	*(__be64 *)(di + 1) = cpu_to_be64(bn);
 	ip->i_di.di_height += new_height;
 	ip->i_di.di_blocks += new_height;
+	gfs2_set_inode_blocks(&ip->i_inode);
 	di->di_height = cpu_to_be16(ip->i_di.di_height);
 	di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
 	brelse(dibh);
@@ -360,15 +362,15 @@
  * metadata tree.
  */
 
-static inline u64 *metapointer(struct buffer_head *bh, int *boundary,
+static inline __be64 *metapointer(struct buffer_head *bh, int *boundary,
 			       unsigned int height, const struct metapath *mp)
 {
 	unsigned int head_size = (height > 0) ?
 		sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
-	u64 *ptr;
+	__be64 *ptr;
 	*boundary = 0;
-	ptr = ((u64 *)(bh->b_data + head_size)) + mp->mp_list[height];
-	if (ptr + 1 == (u64 *)(bh->b_data + bh->b_size))
+	ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
+	if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size))
 		*boundary = 1;
 	return ptr;
 }
@@ -394,7 +396,7 @@
 			int *new, u64 *block)
 {
 	int boundary;
-	u64 *ptr = metapointer(bh, &boundary, height, mp);
+	__be64 *ptr = metapointer(bh, &boundary, height, mp);
 
 	if (*ptr) {
 		*block = be64_to_cpu(*ptr);
@@ -415,113 +417,12 @@
 
 	*ptr = cpu_to_be64(*block);
 	ip->i_di.di_blocks++;
+	gfs2_set_inode_blocks(&ip->i_inode);
 
 	*new = 1;
 	return 0;
 }
 
-/**
- * gfs2_block_pointers - Map a block from an inode to a disk block
- * @inode: The inode
- * @lblock: The logical block number
- * @map_bh: The bh to be mapped
- * @mp: metapath to use
- *
- * Find the block number on the current device which corresponds to an
- * inode's block. If the block had to be created, "new" will be set.
- *
- * Returns: errno
- */
-
-static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
-			       struct buffer_head *bh_map, struct metapath *mp)
-{
-	struct gfs2_inode *ip = GFS2_I(inode);
-	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	struct buffer_head *bh;
-	unsigned int bsize;
-	unsigned int height;
-	unsigned int end_of_metadata;
-	unsigned int x;
-	int error = 0;
-	int new = 0;
-	u64 dblock = 0;
-	int boundary;
-	unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
-
-	BUG_ON(maxlen == 0);
-
-	if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
-		return 0;
-
-	bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize;
-
-	height = calc_tree_height(ip, (lblock + 1) * bsize);
-	if (ip->i_di.di_height < height) {
-		if (!create)
-			return 0;
-
-		error = build_height(inode, height);
-		if (error)
-			return error;
-	}
-
-	find_metapath(ip, lblock, mp);
-	end_of_metadata = ip->i_di.di_height - 1;
-
-	error = gfs2_meta_inode_buffer(ip, &bh);
-	if (error)
-		return error;
-
-	for (x = 0; x < end_of_metadata; x++) {
-		lookup_block(ip, bh, x, mp, create, &new, &dblock);
-		brelse(bh);
-		if (!dblock)
-			return 0;
-
-		error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh);
-		if (error)
-			return error;
-	}
-
-	boundary = lookup_block(ip, bh, end_of_metadata, mp, create, &new, &dblock);
-	clear_buffer_mapped(bh_map);
-	clear_buffer_new(bh_map);
-	clear_buffer_boundary(bh_map);
-
-	if (dblock) {
-		map_bh(bh_map, inode->i_sb, dblock);
-		if (boundary)
-			set_buffer_boundary(bh);
-		if (new) {
-			struct buffer_head *dibh;
-			error = gfs2_meta_inode_buffer(ip, &dibh);
-			if (!error) {
-				gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-				gfs2_dinode_out(&ip->i_di, dibh->b_data);
-				brelse(dibh);
-			}
-			set_buffer_new(bh_map);
-			goto out_brelse;
-		}
-		while(--maxlen && !buffer_boundary(bh_map)) {
-			u64 eblock;
-
-			mp->mp_list[end_of_metadata]++;
-			boundary = lookup_block(ip, bh, end_of_metadata, mp, 0, &new, &eblock);
-			if (eblock != ++dblock)
-				break;
-			bh_map->b_size += (1 << inode->i_blkbits);
-			if (boundary)
-				set_buffer_boundary(bh_map);
-		}
-	}
-out_brelse:
-	brelse(bh);
-	return 0;
-}
-
-
 static inline void bmap_lock(struct inode *inode, int create)
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
@@ -540,21 +441,116 @@
 		up_read(&ip->i_rw_mutex);
 }
 
+/**
+ * gfs2_block_map - Map a block from an inode to a disk block
+ * @inode: The inode
+ * @lblock: The logical block number
+ * @bh_map: The bh to be mapped
+ *
+ * Find the block number on the current device which corresponds to an
+ * inode's block. If the block had to be created, "new" will be set.
+ *
+ * Returns: errno
+ */
+
 int gfs2_block_map(struct inode *inode, u64 lblock, int create,
-		   struct buffer_head *bh)
+		   struct buffer_head *bh_map)
 {
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_sbd *sdp = GFS2_SB(inode);
+	struct buffer_head *bh;
+	unsigned int bsize;
+	unsigned int height;
+	unsigned int end_of_metadata;
+	unsigned int x;
+	int error = 0;
+	int new = 0;
+	u64 dblock = 0;
+	int boundary;
+	unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
 	struct metapath mp;
-	int ret;
+	u64 size;
+
+	BUG_ON(maxlen == 0);
+
+	if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
+		return 0;
 
 	bmap_lock(inode, create);
-	ret = gfs2_block_pointers(inode, lblock, create, bh, &mp);
+	clear_buffer_mapped(bh_map);
+	clear_buffer_new(bh_map);
+	clear_buffer_boundary(bh_map);
+	bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize;
+	size = (lblock + 1) * bsize;
+
+	if (size > ip->i_di.di_size) {
+		height = calc_tree_height(ip, size);
+		if (ip->i_di.di_height < height) {
+			if (!create)
+				goto out_ok;
+	
+			error = build_height(inode, height);
+			if (error)
+				goto out_fail;
+		}
+	}
+
+	find_metapath(ip, lblock, &mp);
+	end_of_metadata = ip->i_di.di_height - 1;
+	error = gfs2_meta_inode_buffer(ip, &bh);
+	if (error)
+		goto out_fail;
+
+	for (x = 0; x < end_of_metadata; x++) {
+		lookup_block(ip, bh, x, &mp, create, &new, &dblock);
+		brelse(bh);
+		if (!dblock)
+			goto out_ok;
+
+		error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh);
+		if (error)
+			goto out_fail;
+	}
+
+	boundary = lookup_block(ip, bh, end_of_metadata, &mp, create, &new, &dblock);
+	if (dblock) {
+		map_bh(bh_map, inode->i_sb, dblock);
+		if (boundary)
+			set_buffer_boundary(bh_map);
+		if (new) {
+			struct buffer_head *dibh;
+			error = gfs2_meta_inode_buffer(ip, &dibh);
+			if (!error) {
+				gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+				gfs2_dinode_out(ip, dibh->b_data);
+				brelse(dibh);
+			}
+			set_buffer_new(bh_map);
+			goto out_brelse;
+		}
+		while(--maxlen && !buffer_boundary(bh_map)) {
+			u64 eblock;
+
+			mp.mp_list[end_of_metadata]++;
+			boundary = lookup_block(ip, bh, end_of_metadata, &mp, 0, &new, &eblock);
+			if (eblock != ++dblock)
+				break;
+			bh_map->b_size += (1 << inode->i_blkbits);
+			if (boundary)
+				set_buffer_boundary(bh_map);
+		}
+	}
+out_brelse:
+	brelse(bh);
+out_ok:
+	error = 0;
+out_fail:
 	bmap_unlock(inode, create);
-	return ret;
+	return error;
 }
 
 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
 {
-	struct metapath mp;
 	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
 	int ret;
 	int create = *new;
@@ -564,9 +560,7 @@
 	BUG_ON(!new);
 
 	bh.b_size = 1 << (inode->i_blkbits + 5);
-	bmap_lock(inode, create);
-	ret = gfs2_block_pointers(inode, lblock, create, &bh, &mp);
-	bmap_unlock(inode, create);
+	ret = gfs2_block_map(inode, lblock, create, &bh);
 	*extlen = bh.b_size >> inode->i_blkbits;
 	*dblock = bh.b_blocknr;
 	if (buffer_new(&bh))
@@ -600,7 +594,7 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct buffer_head *bh = NULL;
-	u64 *top, *bottom;
+	__be64 *top, *bottom;
 	u64 bn;
 	int error;
 	int mh_size = sizeof(struct gfs2_meta_header);
@@ -611,17 +605,17 @@
 			return error;
 		dibh = bh;
 
-		top = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
-		bottom = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
+		top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
+		bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
 	} else {
 		error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh);
 		if (error)
 			return error;
 
-		top = (u64 *)(bh->b_data + mh_size) +
+		top = (__be64 *)(bh->b_data + mh_size) +
 				  (first ? mp->mp_list[height] : 0);
 
-		bottom = (u64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
+		bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
 	}
 
 	error = bc(ip, dibh, bh, top, bottom, height, data);
@@ -660,7 +654,7 @@
  */
 
 static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
-		    struct buffer_head *bh, u64 *top, u64 *bottom,
+		    struct buffer_head *bh, __be64 *top, __be64 *bottom,
 		    unsigned int height, void *data)
 {
 	struct strip_mine *sm = data;
@@ -668,7 +662,7 @@
 	struct gfs2_rgrp_list rlist;
 	u64 bn, bstart;
 	u32 blen;
-	u64 *p;
+	__be64 *p;
 	unsigned int rg_blocks = 0;
 	int metadata;
 	unsigned int revokes = 0;
@@ -770,6 +764,7 @@
 		if (!ip->i_di.di_blocks)
 			gfs2_consist_inode(ip);
 		ip->i_di.di_blocks--;
+		gfs2_set_inode_blocks(&ip->i_inode);
 	}
 	if (bstart) {
 		if (metadata)
@@ -778,9 +773,9 @@
 			gfs2_free_data(ip, bstart, blen);
 	}
 
-	ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+	ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 
 	up_write(&ip->i_rw_mutex);
 
@@ -819,7 +814,7 @@
 	if (error)
 		goto out;
 
-	error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+	error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
 	if (error)
 		goto out_gunlock_q;
 
@@ -853,14 +848,14 @@
 	}
 
 	ip->i_di.di_size = size;
-	ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+	ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (error)
 		goto out_end_trans;
 
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
 
 out_end_trans:
@@ -968,9 +963,9 @@
 
 	if (gfs2_is_stuffed(ip)) {
 		ip->i_di.di_size = size;
-		ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+		ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
 		error = 1;
 
@@ -980,10 +975,10 @@
 
 		if (!error) {
 			ip->i_di.di_size = size;
-			ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+			ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 			ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
 			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-			gfs2_dinode_out(&ip->i_di, dibh->b_data);
+			gfs2_dinode_out(ip, dibh->b_data);
 		}
 	}
 
@@ -1053,11 +1048,11 @@
 			ip->i_num.no_addr;
 		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 	}
-	ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+	ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 	ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
 
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
 
 out:
@@ -1109,7 +1104,7 @@
 {
 	int error;
 
-	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_di.di_mode)))
+	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode)))
 		return -EINVAL;
 
 	if (size > ip->i_di.di_size)
diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c
index cab1f68..683cb5b 100644
--- a/fs/gfs2/daemon.c
+++ b/fs/gfs2/daemon.c
@@ -112,6 +112,7 @@
 	struct gfs2_sbd *sdp = data;
 	struct gfs2_holder ji_gh;
 	unsigned long t;
+	int need_flush;
 
 	while (!kthread_should_stop()) {
 		/* Advance the log tail */
@@ -120,8 +121,10 @@
 		    gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
 
 		gfs2_ail1_empty(sdp, DIO_ALL);
-
-		if (time_after_eq(jiffies, t)) {
+		gfs2_log_lock(sdp);
+		need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
+		gfs2_log_unlock(sdp);
+		if (need_flush || time_after_eq(jiffies, t)) {
 			gfs2_log_flush(sdp, NULL);
 			sdp->sd_log_flush_time = jiffies;
 		}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index e24af28b1..0fdcb77 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -131,8 +131,8 @@
 	memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
 	if (ip->i_di.di_size < offset + size)
 		ip->i_di.di_size = offset + size;
-	ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
+	gfs2_dinode_out(ip, dibh->b_data);
 
 	brelse(dibh);
 
@@ -229,10 +229,10 @@
 
 	if (ip->i_di.di_size < offset + copied)
 		ip->i_di.di_size = offset + copied;
-	ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+	ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
 
 	return copied;
@@ -340,10 +340,15 @@
 	return (copied) ? copied : error;
 }
 
+static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
+{
+	return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
+}
+
 static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
 				     const struct qstr *name, int ret)
 {
-	if (dent->de_inum.no_addr != 0 &&
+	if (!gfs2_dirent_sentinel(dent) &&
 	    be32_to_cpu(dent->de_hash) == name->hash &&
 	    be16_to_cpu(dent->de_name_len) == name->len &&
 	    memcmp(dent+1, name->name, name->len) == 0)
@@ -388,7 +393,7 @@
 	unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
 	unsigned totlen = be16_to_cpu(dent->de_rec_len);
 
-	if (!dent->de_inum.no_addr)
+	if (gfs2_dirent_sentinel(dent))
 		actual = GFS2_DIRENT_SIZE(0);
 	if (totlen - actual >= required)
 		return 1;
@@ -405,7 +410,7 @@
 			      void *opaque)
 {
 	struct dirent_gather *g = opaque;
-	if (dent->de_inum.no_addr) {
+	if (!gfs2_dirent_sentinel(dent)) {
 		g->pdent[g->offset++] = dent;
 	}
 	return 0;
@@ -433,10 +438,10 @@
 	if (unlikely(offset + size > len))
 		goto error;
 	msg = "zero inode number";
-	if (unlikely(!first && !dent->de_inum.no_addr))
+	if (unlikely(!first && gfs2_dirent_sentinel(dent)))
 		goto error;
 	msg = "name length is greater than space in dirent";
-	if (dent->de_inum.no_addr &&
+	if (!gfs2_dirent_sentinel(dent) &&
 	    unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
 		     size))
 		goto error;
@@ -598,7 +603,7 @@
 		return ret;
 
         /* Only the first dent could ever have de_inum.no_addr == 0 */
-	if (!tmp->de_inum.no_addr) {
+	if (gfs2_dirent_sentinel(tmp)) {
 		gfs2_consist_inode(dip);
 		return -EIO;
 	}
@@ -621,7 +626,7 @@
 {
 	u16 cur_rec_len, prev_rec_len;
 
-	if (!cur->de_inum.no_addr) {
+	if (gfs2_dirent_sentinel(cur)) {
 		gfs2_consist_inode(dip);
 		return;
 	}
@@ -633,7 +638,8 @@
 	   out the inode number and return.  */
 
 	if (!prev) {
-		cur->de_inum.no_addr = 0;	/* No endianess worries */
+		cur->de_inum.no_addr = 0;
+		cur->de_inum.no_formal_ino = 0;
 		return;
 	}
 
@@ -664,7 +670,7 @@
 	struct gfs2_dirent *ndent;
 	unsigned offset = 0, totlen;
 
-	if (dent->de_inum.no_addr)
+	if (!gfs2_dirent_sentinel(dent))
 		offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
 	totlen = be16_to_cpu(dent->de_rec_len);
 	BUG_ON(offset + name->len > totlen);
@@ -713,12 +719,12 @@
 static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
 		       u64 *leaf_out)
 {
-	u64 leaf_no;
+	__be64 leaf_no;
 	int error;
 
 	error = gfs2_dir_read_data(dip, (char *)&leaf_no,
-				    index * sizeof(u64),
-				    sizeof(u64), 0);
+				    index * sizeof(__be64),
+				    sizeof(__be64), 0);
 	if (error != sizeof(u64))
 		return (error < 0) ? error : -EIO;
 
@@ -837,7 +843,8 @@
 	struct gfs2_leaf *leaf;
 	int y;
 	u32 x;
-	u64 *lp, bn;
+	__be64 *lp;
+	u64 bn;
 	int error;
 
 	error = gfs2_meta_inode_buffer(dip, &dibh);
@@ -893,20 +900,20 @@
 	gfs2_trans_add_bh(dip->i_gl, dibh, 1);
 	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 
-	lp = (u64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
+	lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
 
 	for (x = sdp->sd_hash_ptrs; x--; lp++)
 		*lp = cpu_to_be64(bn);
 
 	dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
 	dip->i_di.di_blocks++;
+	gfs2_set_inode_blocks(&dip->i_inode);
 	dip->i_di.di_flags |= GFS2_DIF_EXHASH;
-	dip->i_di.di_payload_format = 0;
 
 	for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
 	dip->i_di.di_depth = y;
 
-	gfs2_dinode_out(&dip->i_di, dibh->b_data);
+	gfs2_dinode_out(dip, dibh->b_data);
 
 	brelse(dibh);
 
@@ -929,7 +936,8 @@
 	struct gfs2_leaf *nleaf, *oleaf;
 	struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
 	u32 start, len, half_len, divider;
-	u64 bn, *lp, leaf_no;
+	u64 bn, leaf_no;
+	__be64 *lp;
 	u32 index;
 	int x, moved = 0;
 	int error;
@@ -974,7 +982,7 @@
 	/* Change the pointers.
 	   Don't bother distinguishing stuffed from non-stuffed.
 	   This code is complicated enough already. */
-	lp = kmalloc(half_len * sizeof(u64), GFP_NOFS | __GFP_NOFAIL);
+	lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL);
 	/*  Change the pointers  */
 	for (x = 0; x < half_len; x++)
 		lp[x] = cpu_to_be64(bn);
@@ -1000,7 +1008,7 @@
 		if (dirent_next(dip, obh, &next))
 			next = NULL;
 
-		if (dent->de_inum.no_addr &&
+		if (!gfs2_dirent_sentinel(dent) &&
 		    be32_to_cpu(dent->de_hash) < divider) {
 			struct qstr str;
 			str.name = (char*)(dent+1);
@@ -1037,7 +1045,8 @@
 	error = gfs2_meta_inode_buffer(dip, &dibh);
 	if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
 		dip->i_di.di_blocks++;
-		gfs2_dinode_out(&dip->i_di, dibh->b_data);
+		gfs2_set_inode_blocks(&dip->i_inode);
+		gfs2_dinode_out(dip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -1117,7 +1126,7 @@
 	error = gfs2_meta_inode_buffer(dip, &dibh);
 	if (!gfs2_assert_withdraw(sdp, !error)) {
 		dip->i_di.di_depth++;
-		gfs2_dinode_out(&dip->i_di, dibh->b_data);
+		gfs2_dinode_out(dip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -1194,7 +1203,7 @@
 			   int *copied)
 {
 	const struct gfs2_dirent *dent, *dent_next;
-	struct gfs2_inum inum;
+	struct gfs2_inum_host inum;
 	u64 off, off_next;
 	unsigned int x, y;
 	int run = 0;
@@ -1341,7 +1350,7 @@
 	u32 hsize, len = 0;
 	u32 ht_offset, lp_offset, ht_offset_cur = -1;
 	u32 hash, index;
-	u64 *lp;
+	__be64 *lp;
 	int copied = 0;
 	int error = 0;
 	unsigned depth = 0;
@@ -1365,7 +1374,7 @@
 
 		if (ht_offset_cur != ht_offset) {
 			error = gfs2_dir_read_data(dip, (char *)lp,
-						ht_offset * sizeof(u64),
+						ht_offset * sizeof(__be64),
 						sdp->sd_hash_bsize, 1);
 			if (error != sdp->sd_hash_bsize) {
 				if (error >= 0)
@@ -1456,7 +1465,7 @@
  */
 
 int gfs2_dir_search(struct inode *dir, const struct qstr *name,
-		    struct gfs2_inum *inum, unsigned int *type)
+		    struct gfs2_inum_host *inum, unsigned int *type)
 {
 	struct buffer_head *bh;
 	struct gfs2_dirent *dent;
@@ -1515,7 +1524,8 @@
 		return error;
 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 	ip->i_di.di_blocks++;
-	gfs2_dinode_out(&ip->i_di, bh->b_data);
+	gfs2_set_inode_blocks(&ip->i_inode);
+	gfs2_dinode_out(ip, bh->b_data);
 	brelse(bh);
 	return 0;
 }
@@ -1531,7 +1541,7 @@
  */
 
 int gfs2_dir_add(struct inode *inode, const struct qstr *name,
-		 const struct gfs2_inum *inum, unsigned type)
+		 const struct gfs2_inum_host *inum, unsigned type)
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct buffer_head *bh;
@@ -1558,8 +1568,8 @@
 				break;
 			gfs2_trans_add_bh(ip->i_gl, bh, 1);
 			ip->i_di.di_entries++;
-			ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
-			gfs2_dinode_out(&ip->i_di, bh->b_data);
+			ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
+			gfs2_dinode_out(ip, bh->b_data);
 			brelse(bh);
 			error = 0;
 			break;
@@ -1644,8 +1654,8 @@
 		gfs2_consist_inode(dip);
 	gfs2_trans_add_bh(dip->i_gl, bh, 1);
 	dip->i_di.di_entries--;
-	dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds();
-	gfs2_dinode_out(&dip->i_di, bh->b_data);
+	dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
+	gfs2_dinode_out(dip, bh->b_data);
 	brelse(bh);
 	mark_inode_dirty(&dip->i_inode);
 
@@ -1666,7 +1676,7 @@
  */
 
 int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
-		   struct gfs2_inum *inum, unsigned int new_type)
+		   struct gfs2_inum_host *inum, unsigned int new_type)
 {
 	struct buffer_head *bh;
 	struct gfs2_dirent *dent;
@@ -1692,8 +1702,8 @@
 		gfs2_trans_add_bh(dip->i_gl, bh, 1);
 	}
 
-	dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds();
-	gfs2_dinode_out(&dip->i_di, bh->b_data);
+	dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
+	gfs2_dinode_out(dip, bh->b_data);
 	brelse(bh);
 	return 0;
 }
@@ -1715,7 +1725,7 @@
 	u32 hsize, len;
 	u32 ht_offset, lp_offset, ht_offset_cur = -1;
 	u32 index = 0;
-	u64 *lp;
+	__be64 *lp;
 	u64 leaf_no;
 	int error = 0;
 
@@ -1735,7 +1745,7 @@
 
 		if (ht_offset_cur != ht_offset) {
 			error = gfs2_dir_read_data(dip, (char *)lp,
-						ht_offset * sizeof(u64),
+						ht_offset * sizeof(__be64),
 						sdp->sd_hash_bsize, 1);
 			if (error != sdp->sd_hash_bsize) {
 				if (error >= 0)
@@ -1859,6 +1869,7 @@
 		if (!dip->i_di.di_blocks)
 			gfs2_consist_inode(dip);
 		dip->i_di.di_blocks--;
+		gfs2_set_inode_blocks(&dip->i_inode);
 	}
 
 	error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
@@ -1873,7 +1884,7 @@
 		goto out_end_trans;
 
 	gfs2_trans_add_bh(dip->i_gl, dibh, 1);
-	gfs2_dinode_out(&dip->i_di, dibh->b_data);
+	gfs2_dinode_out(dip, dibh->b_data);
 	brelse(dibh);
 
 out_end_trans:
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 3712334..b21b336 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -31,17 +31,17 @@
 typedef int (*gfs2_filldir_t) (void *opaque,
 			      const char *name, unsigned int length,
 			      u64 offset,
-			      struct gfs2_inum *inum, unsigned int type);
+			      struct gfs2_inum_host *inum, unsigned int type);
 
 int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
-		    struct gfs2_inum *inum, unsigned int *type);
+		    struct gfs2_inum_host *inum, unsigned int *type);
 int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
-		 const struct gfs2_inum *inum, unsigned int type);
+		 const struct gfs2_inum_host *inum, unsigned int type);
 int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
 int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque,
 		  gfs2_filldir_t filldir);
 int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
-		   struct gfs2_inum *new_inum, unsigned int new_type);
+		   struct gfs2_inum_host *new_inum, unsigned int new_type);
 
 int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
 
diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c
index 92c54e9..cd747c0 100644
--- a/fs/gfs2/eaops.c
+++ b/fs/gfs2/eaops.c
@@ -120,7 +120,7 @@
 
 	if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
 		if (!(er->er_flags & GFS2_ERF_MODE)) {
-			er->er_mode = ip->i_di.di_mode;
+			er->er_mode = ip->i_inode.i_mode;
 			er->er_flags |= GFS2_ERF_MODE;
 		}
 		error = gfs2_acl_validate_set(ip, 1, er,
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index a65a4cc..ebebbdc 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -112,7 +112,7 @@
 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
 {
 	struct buffer_head *bh, *eabh;
-	u64 *eablk, *end;
+	__be64 *eablk, *end;
 	int error;
 
 	error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
@@ -129,7 +129,7 @@
 		goto out;
 	}
 
-	eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
+	eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
 	end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
 
 	for (; eablk < end; eablk++) {
@@ -224,7 +224,8 @@
 	struct gfs2_rgrpd *rgd;
 	struct gfs2_holder rg_gh;
 	struct buffer_head *dibh;
-	u64 *dataptrs, bn = 0;
+	__be64 *dataptrs;
+	u64 bn = 0;
 	u64 bstart = 0;
 	unsigned int blen = 0;
 	unsigned int blks = 0;
@@ -280,6 +281,7 @@
 		if (!ip->i_di.di_blocks)
 			gfs2_consist_inode(ip);
 		ip->i_di.di_blocks--;
+		gfs2_set_inode_blocks(&ip->i_inode);
 	}
 	if (bstart)
 		gfs2_free_meta(ip, bstart, blen);
@@ -299,9 +301,9 @@
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
-		ip->i_di.di_ctime = get_seconds();
+		ip->i_inode.i_ctime.tv_sec = get_seconds();
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -444,7 +446,7 @@
 	struct buffer_head **bh;
 	unsigned int amount = GFS2_EA_DATA_LEN(ea);
 	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
-	u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
+	__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
 	unsigned int x;
 	int error = 0;
 
@@ -597,6 +599,7 @@
 	ea->ea_num_ptrs = 0;
 
 	ip->i_di.di_blocks++;
+	gfs2_set_inode_blocks(&ip->i_inode);
 
 	return 0;
 }
@@ -629,7 +632,7 @@
 		ea->ea_num_ptrs = 0;
 		memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
 	} else {
-		u64 *dataptr = GFS2_EA2DATAPTRS(ea);
+		__be64 *dataptr = GFS2_EA2DATAPTRS(ea);
 		const char *data = er->er_data;
 		unsigned int data_len = er->er_data_len;
 		unsigned int copy;
@@ -648,6 +651,7 @@
 			gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
 
 			ip->i_di.di_blocks++;
+			gfs2_set_inode_blocks(&ip->i_inode);
 
 			copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
 							   data_len;
@@ -686,7 +690,7 @@
 	if (error)
 		goto out;
 
-	error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+	error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
 	if (error)
 		goto out_gunlock_q;
 
@@ -710,13 +714,13 @@
 	if (!error) {
 		if (er->er_flags & GFS2_ERF_MODE) {
 			gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
-					    (ip->i_di.di_mode & S_IFMT) ==
+					    (ip->i_inode.i_mode & S_IFMT) ==
 					    (er->er_mode & S_IFMT));
-			ip->i_di.di_mode = er->er_mode;
+			ip->i_inode.i_mode = er->er_mode;
 		}
-		ip->i_di.di_ctime = get_seconds();
+		ip->i_inode.i_ctime.tv_sec = get_seconds();
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -846,12 +850,12 @@
 
 	if (er->er_flags & GFS2_ERF_MODE) {
 		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
-			(ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
-		ip->i_di.di_mode = er->er_mode;
+			(ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
+		ip->i_inode.i_mode = er->er_mode;
 	}
-	ip->i_di.di_ctime = get_seconds();
+	ip->i_inode.i_ctime.tv_sec = get_seconds();
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
 out:
 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -931,12 +935,12 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct buffer_head *indbh, *newbh;
-	u64 *eablk;
+	__be64 *eablk;
 	int error;
 	int mh_size = sizeof(struct gfs2_meta_header);
 
 	if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
-		u64 *end;
+		__be64 *end;
 
 		error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
 				       &indbh);
@@ -948,7 +952,7 @@
 			goto out;
 		}
 
-		eablk = (u64 *)(indbh->b_data + mh_size);
+		eablk = (__be64 *)(indbh->b_data + mh_size);
 		end = eablk + sdp->sd_inptrs;
 
 		for (; eablk < end; eablk++)
@@ -971,11 +975,12 @@
 		gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
 		gfs2_buffer_clear_tail(indbh, mh_size);
 
-		eablk = (u64 *)(indbh->b_data + mh_size);
+		eablk = (__be64 *)(indbh->b_data + mh_size);
 		*eablk = cpu_to_be64(ip->i_di.di_eattr);
 		ip->i_di.di_eattr = blk;
 		ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
 		ip->i_di.di_blocks++;
+		gfs2_set_inode_blocks(&ip->i_inode);
 
 		eablk++;
 	}
@@ -1129,9 +1134,9 @@
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
-		ip->i_di.di_ctime = get_seconds();
+		ip->i_inode.i_ctime.tv_sec = get_seconds();
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -1202,7 +1207,7 @@
 	struct buffer_head **bh;
 	unsigned int amount = GFS2_EA_DATA_LEN(ea);
 	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
-	u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
+	__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
 	unsigned int x;
 	int error;
 
@@ -1284,9 +1289,8 @@
 	if (!error) {
 		error = inode_setattr(&ip->i_inode, attr);
 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
-		gfs2_inode_attr_out(ip);
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -1300,7 +1304,7 @@
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_rgrp_list rlist;
 	struct buffer_head *indbh, *dibh;
-	u64 *eablk, *end;
+	__be64 *eablk, *end;
 	unsigned int rg_blocks = 0;
 	u64 bstart = 0;
 	unsigned int blen = 0;
@@ -1319,7 +1323,7 @@
 		goto out;
 	}
 
-	eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
+	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
 	end = eablk + sdp->sd_inptrs;
 
 	for (; eablk < end; eablk++) {
@@ -1363,7 +1367,7 @@
 
 	gfs2_trans_add_bh(ip->i_gl, indbh, 1);
 
-	eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
+	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
 	bstart = 0;
 	blen = 0;
 
@@ -1387,6 +1391,7 @@
 		if (!ip->i_di.di_blocks)
 			gfs2_consist_inode(ip);
 		ip->i_di.di_blocks--;
+		gfs2_set_inode_blocks(&ip->i_inode);
 	}
 	if (bstart)
 		gfs2_free_meta(ip, bstart, blen);
@@ -1396,7 +1401,7 @@
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -1441,11 +1446,12 @@
 	if (!ip->i_di.di_blocks)
 		gfs2_consist_inode(ip);
 	ip->i_di.di_blocks--;
+	gfs2_set_inode_blocks(&ip->i_inode);
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (!error) {
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
diff --git a/fs/gfs2/eattr.h b/fs/gfs2/eattr.h
index ffa6594..c82dbe0 100644
--- a/fs/gfs2/eattr.h
+++ b/fs/gfs2/eattr.h
@@ -19,7 +19,7 @@
 #define GFS2_EA_SIZE(ea) \
 ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \
       ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \
-                                  (sizeof(u64) * (ea)->ea_num_ptrs)), 8)
+                                  (sizeof(__be64) * (ea)->ea_num_ptrs)), 8)
 
 #define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs)
 #define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST)
@@ -29,13 +29,13 @@
 
 #define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \
 ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \
-      sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
+      sizeof(__be64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
 
 #define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1))
 #define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len)
 
 #define GFS2_EA2DATAPTRS(ea) \
-((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
+((__be64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
 
 #define GFS2_EA2NEXT(ea) \
 ((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea)))
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 78fe0fa..4381469 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -35,7 +35,7 @@
 
 struct greedy {
 	struct gfs2_holder gr_gh;
-	struct work_struct gr_work;
+	struct delayed_work gr_work;
 };
 
 struct gfs2_gl_hash_bucket {
@@ -96,7 +96,7 @@
 	return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
 }
 #else /* not SMP, so no spinlocks required */
-static inline rwlock_t *gl_lock_addr(x)
+static inline rwlock_t *gl_lock_addr(unsigned int x)
 {
 	return NULL;
 }
@@ -769,7 +769,7 @@
 	} else {
 		spin_unlock(&gl->gl_spin);
 
-		new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
+		new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
 		if (!new_gh)
 			return;
 		set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
@@ -785,21 +785,6 @@
 		gfs2_holder_put(new_gh);
 }
 
-void gfs2_glock_inode_squish(struct inode *inode)
-{
-	struct gfs2_holder gh;
-	struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
-	gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
-	set_bit(HIF_DEMOTE, &gh.gh_iflags);
-	spin_lock(&gl->gl_spin);
-	gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
-	list_add_tail(&gh.gh_list, &gl->gl_waiters2);
-	run_queue(gl);
-	spin_unlock(&gl->gl_spin);
-	wait_for_completion(&gh.gh_wait);
-	gfs2_holder_uninit(&gh);
-}
-
 /**
  * state_change - record that the glock is now in a different state
  * @gl: the glock
@@ -847,12 +832,12 @@
 
 	if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
 		if (glops->go_inval)
-			glops->go_inval(gl, DIO_METADATA | DIO_DATA);
+			glops->go_inval(gl, DIO_METADATA);
 	} else if (gl->gl_state == LM_ST_DEFERRED) {
 		/* We might not want to do this here.
 		   Look at moving to the inode glops. */
 		if (glops->go_inval)
-			glops->go_inval(gl, DIO_DATA);
+			glops->go_inval(gl, 0);
 	}
 
 	/*  Deal with each possible exit condition  */
@@ -954,7 +939,7 @@
 	gfs2_assert_warn(sdp, state != gl->gl_state);
 
 	if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
-		glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
+		glops->go_sync(gl);
 
 	gfs2_glock_hold(gl);
 	gl->gl_req_bh = xmote_bh;
@@ -995,7 +980,7 @@
 	state_change(gl, LM_ST_UNLOCKED);
 
 	if (glops->go_inval)
-		glops->go_inval(gl, DIO_METADATA | DIO_DATA);
+		glops->go_inval(gl, DIO_METADATA);
 
 	if (gh) {
 		spin_lock(&gl->gl_spin);
@@ -1041,7 +1026,7 @@
 	gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
 
 	if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
-		glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
+		glops->go_sync(gl);
 
 	gfs2_glock_hold(gl);
 	gl->gl_req_bh = drop_bh;
@@ -1244,9 +1229,6 @@
 
 	clear_bit(GLF_PREFETCH, &gl->gl_flags);
 
-	if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
-		dump_glock(gl);
-
 	return error;
 }
 
@@ -1368,9 +1350,9 @@
 	glops->go_xmote_th(gl, state, flags);
 }
 
-static void greedy_work(void *data)
+static void greedy_work(struct work_struct *work)
 {
-	struct greedy *gr = data;
+	struct greedy *gr = container_of(work, struct greedy, gr_work.work);
 	struct gfs2_holder *gh = &gr->gr_gh;
 	struct gfs2_glock *gl = gh->gh_gl;
 	const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1404,7 @@
 
 	gfs2_holder_init(gl, 0, 0, gh);
 	set_bit(HIF_GREEDY, &gh->gh_iflags);
-	INIT_WORK(&gr->gr_work, greedy_work, gr);
+	INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
 
 	set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
 	schedule_delayed_work(&gr->gr_work, time);
@@ -1923,7 +1905,7 @@
 
 static void scan_glock(struct gfs2_glock *gl)
 {
-	if (gl->gl_ops == &gfs2_inode_glops)
+	if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
 		return;
 
 	if (gfs2_glmutex_trylock(gl)) {
@@ -2078,7 +2060,7 @@
 	printk(KERN_INFO "    num = %llu %llu\n",
 		    (unsigned long long)ip->i_num.no_formal_ino,
 		    (unsigned long long)ip->i_num.no_addr);
-	printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_di.di_mode));
+	printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_inode.i_mode));
 	printk(KERN_INFO "    i_flags =");
 	for (x = 0; x < 32; x++)
 		if (test_bit(x, &ip->i_flags))
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2b2a889..fb39108 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -27,8 +27,6 @@
 #define GL_ATIME		0x00000200
 #define GL_NOCACHE		0x00000400
 #define GL_NOCANCEL		0x00001000
-#define GL_AOP			0x00004000
-#define GL_DUMP			0x00008000
 
 #define GLR_TRYFAILED		13
 #define GLR_CANCELED		14
@@ -108,7 +106,6 @@
 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
 			     const struct gfs2_glock_operations *glops,
 			     unsigned int state, int flags);
-void gfs2_glock_inode_squish(struct inode *inode);
 
 /**
  * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 41a6b68..b068d10 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -92,7 +92,7 @@
 
 	ip = gl->gl_object;
 	inode = &ip->i_inode;
-	if (!ip || !S_ISREG(ip->i_di.di_mode))
+	if (!ip || !S_ISREG(inode->i_mode))
 		return;
 
 	if (!test_bit(GIF_PAGED, &ip->i_flags))
@@ -107,89 +107,20 @@
 }
 
 /**
- * gfs2_page_inval - Invalidate all pages associated with a glock
- * @gl: the glock
- *
- */
-
-static void gfs2_page_inval(struct gfs2_glock *gl)
-{
-	struct gfs2_inode *ip;
-	struct inode *inode;
-
-	ip = gl->gl_object;
-	inode = &ip->i_inode;
-	if (!ip || !S_ISREG(ip->i_di.di_mode))
-		return;
-
-	truncate_inode_pages(inode->i_mapping, 0);
-	gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
-	clear_bit(GIF_PAGED, &ip->i_flags);
-}
-
-/**
- * gfs2_page_wait - Wait for writeback of data
- * @gl: the glock
- *
- * Syncs data (not metadata) for a regular file.
- * No-op for all other types.
- */
-
-static void gfs2_page_wait(struct gfs2_glock *gl)
-{
-	struct gfs2_inode *ip = gl->gl_object;
-	struct inode *inode = &ip->i_inode;
-	struct address_space *mapping = inode->i_mapping;
-	int error;
-
-	if (!S_ISREG(ip->i_di.di_mode))
-		return;
-
-	error = filemap_fdatawait(mapping);
-
-	/* Put back any errors cleared by filemap_fdatawait()
-	   so they can be caught by someone who can pass them
-	   up to user space. */
-
-	if (error == -ENOSPC)
-		set_bit(AS_ENOSPC, &mapping->flags);
-	else if (error)
-		set_bit(AS_EIO, &mapping->flags);
-
-}
-
-static void gfs2_page_writeback(struct gfs2_glock *gl)
-{
-	struct gfs2_inode *ip = gl->gl_object;
-	struct inode *inode = &ip->i_inode;
-	struct address_space *mapping = inode->i_mapping;
-
-	if (!S_ISREG(ip->i_di.di_mode))
-		return;
-
-	filemap_fdatawrite(mapping);
-}
-
-/**
  * meta_go_sync - sync out the metadata for this glock
  * @gl: the glock
- * @flags: DIO_*
  *
  * Called when demoting or unlocking an EX glock.  We must flush
  * to disk all dirty buffers/pages relating to this glock, and must not
  * not return to caller to demote/unlock the glock until I/O is complete.
  */
 
-static void meta_go_sync(struct gfs2_glock *gl, int flags)
+static void meta_go_sync(struct gfs2_glock *gl)
 {
-	if (!(flags & DIO_METADATA))
-		return;
-
 	if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
 		gfs2_log_flush(gl->gl_sbd, gl);
 		gfs2_meta_sync(gl);
-		if (flags & DIO_RELEASE)
-			gfs2_ail_empty_gl(gl);
+		gfs2_ail_empty_gl(gl);
 	}
 
 }
@@ -264,31 +195,31 @@
 /**
  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
  * @gl: the glock protecting the inode
- * @flags:
  *
  */
 
-static void inode_go_sync(struct gfs2_glock *gl, int flags)
+static void inode_go_sync(struct gfs2_glock *gl)
 {
-	int meta = (flags & DIO_METADATA);
-	int data = (flags & DIO_DATA);
+	struct gfs2_inode *ip = gl->gl_object;
+
+	if (ip && !S_ISREG(ip->i_inode.i_mode))
+		ip = NULL;
 
 	if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
-		if (meta && data) {
-			gfs2_page_writeback(gl);
-			gfs2_log_flush(gl->gl_sbd, gl);
-			gfs2_meta_sync(gl);
-			gfs2_page_wait(gl);
-			clear_bit(GLF_DIRTY, &gl->gl_flags);
-		} else if (meta) {
-			gfs2_log_flush(gl->gl_sbd, gl);
-			gfs2_meta_sync(gl);
-		} else if (data) {
-			gfs2_page_writeback(gl);
-			gfs2_page_wait(gl);
+		gfs2_log_flush(gl->gl_sbd, gl);
+		if (ip)
+			filemap_fdatawrite(ip->i_inode.i_mapping);
+		gfs2_meta_sync(gl);
+		if (ip) {
+			struct address_space *mapping = ip->i_inode.i_mapping;
+			int error = filemap_fdatawait(mapping);
+			if (error == -ENOSPC)
+				set_bit(AS_ENOSPC, &mapping->flags);
+			else if (error)
+				set_bit(AS_EIO, &mapping->flags);
 		}
-		if (flags & DIO_RELEASE)
-			gfs2_ail_empty_gl(gl);
+		clear_bit(GLF_DIRTY, &gl->gl_flags);
+		gfs2_ail_empty_gl(gl);
 	}
 }
 
@@ -301,15 +232,20 @@
 
 static void inode_go_inval(struct gfs2_glock *gl, int flags)
 {
+	struct gfs2_inode *ip = gl->gl_object;
 	int meta = (flags & DIO_METADATA);
-	int data = (flags & DIO_DATA);
 
 	if (meta) {
 		gfs2_meta_inval(gl);
-		gl->gl_vn++;
+		if (ip)
+			set_bit(GIF_INVALID, &ip->i_flags);
 	}
-	if (data)
-		gfs2_page_inval(gl);
+
+	if (ip && S_ISREG(ip->i_inode.i_mode)) {
+		truncate_inode_pages(ip->i_inode.i_mapping, 0);
+		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages);
+		clear_bit(GIF_PAGED, &ip->i_flags);
+	}
 }
 
 /**
@@ -351,11 +287,10 @@
 	if (!ip)
 		return 0;
 
-	if (ip->i_vn != gl->gl_vn) {
+	if (test_bit(GIF_INVALID, &ip->i_flags)) {
 		error = gfs2_inode_refresh(ip);
 		if (error)
 			return error;
-		gfs2_inode_attr_in(ip);
 	}
 
 	if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
@@ -379,11 +314,8 @@
 	struct gfs2_glock *gl = gh->gh_gl;
 	struct gfs2_inode *ip = gl->gl_object;
 
-	if (ip == NULL)
-		return;
-	if (test_bit(GLF_DIRTY, &gl->gl_flags))
-		gfs2_inode_attr_in(ip);
-	gfs2_meta_cache_flush(ip);
+	if (ip)
+		gfs2_meta_cache_flush(ip);
 }
 
 /**
@@ -491,13 +423,13 @@
 	struct gfs2_sbd *sdp = gl->gl_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 	struct gfs2_glock *j_gl = ip->i_gl;
-	struct gfs2_log_header head;
+	struct gfs2_log_header_host head;
 	int error;
 
 	if (gl->gl_state != LM_ST_UNLOCKED &&
 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
 		gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
-		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
+		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
 
 		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
 		if (error)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 118dc69..734421e 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -14,8 +14,6 @@
 
 #define DIO_WAIT	0x00000010
 #define DIO_METADATA	0x00000020
-#define DIO_DATA	0x00000040
-#define DIO_RELEASE	0x00000080
 #define DIO_ALL		0x00000100
 
 struct gfs2_log_operations;
@@ -41,7 +39,7 @@
 	void (*lo_before_commit) (struct gfs2_sbd *sdp);
 	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
 	void (*lo_before_scan) (struct gfs2_jdesc *jd,
-				struct gfs2_log_header *head, int pass);
+				struct gfs2_log_header_host *head, int pass);
 	int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
 				 struct gfs2_log_descriptor *ld, __be64 *ptr,
 				 int pass);
@@ -67,8 +65,8 @@
 	struct list_head rd_list_mru;
 	struct list_head rd_recent;	/* Recently used rgrps */
 	struct gfs2_glock *rd_gl;	/* Glock for this rgrp */
-	struct gfs2_rindex rd_ri;
-	struct gfs2_rgrp rd_rg;
+	struct gfs2_rindex_host rd_ri;
+	struct gfs2_rgrp_host rd_rg;
 	u64 rd_rg_vn;
 	struct gfs2_bitmap *rd_bits;
 	unsigned int rd_bh_count;
@@ -103,18 +101,17 @@
 };
 
 struct gfs2_glock_operations {
-	void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
-			     int flags);
-	void (*go_xmote_bh) (struct gfs2_glock * gl);
-	void (*go_drop_th) (struct gfs2_glock * gl);
-	void (*go_drop_bh) (struct gfs2_glock * gl);
-	void (*go_sync) (struct gfs2_glock * gl, int flags);
-	void (*go_inval) (struct gfs2_glock * gl, int flags);
-	int (*go_demote_ok) (struct gfs2_glock * gl);
-	int (*go_lock) (struct gfs2_holder * gh);
-	void (*go_unlock) (struct gfs2_holder * gh);
-	void (*go_callback) (struct gfs2_glock * gl, unsigned int state);
-	void (*go_greedy) (struct gfs2_glock * gl);
+	void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags);
+	void (*go_xmote_bh) (struct gfs2_glock *gl);
+	void (*go_drop_th) (struct gfs2_glock *gl);
+	void (*go_drop_bh) (struct gfs2_glock *gl);
+	void (*go_sync) (struct gfs2_glock *gl);
+	void (*go_inval) (struct gfs2_glock *gl, int flags);
+	int (*go_demote_ok) (struct gfs2_glock *gl);
+	int (*go_lock) (struct gfs2_holder *gh);
+	void (*go_unlock) (struct gfs2_holder *gh);
+	void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
+	void (*go_greedy) (struct gfs2_glock *gl);
 	const int go_type;
 };
 
@@ -217,6 +214,7 @@
 };
 
 enum {
+	GIF_INVALID		= 0,
 	GIF_QD_LOCKED		= 1,
 	GIF_PAGED		= 2,
 	GIF_SW_PAGED		= 3,
@@ -224,12 +222,11 @@
 
 struct gfs2_inode {
 	struct inode i_inode;
-	struct gfs2_inum i_num;
+	struct gfs2_inum_host i_num;
 
 	unsigned long i_flags;		/* GIF_... */
 
-	u64 i_vn;
-	struct gfs2_dinode i_di; /* To be replaced by ref to block */
+	struct gfs2_dinode_host i_di; /* To be replaced by ref to block */
 
 	struct gfs2_glock *i_gl; /* Move into i_gh? */
 	struct gfs2_holder i_iopen_gh;
@@ -450,7 +447,7 @@
 	struct super_block *sd_vfs_meta;
 	struct kobject sd_kobj;
 	unsigned long sd_flags;	/* SDF_... */
-	struct gfs2_sb sd_sb;
+	struct gfs2_sb_host sd_sb;
 
 	/* Constants computed on mount */
 
@@ -503,8 +500,8 @@
 
 	spinlock_t sd_statfs_spin;
 	struct mutex sd_statfs_mutex;
-	struct gfs2_statfs_change sd_statfs_master;
-	struct gfs2_statfs_change sd_statfs_local;
+	struct gfs2_statfs_change_host sd_statfs_master;
+	struct gfs2_statfs_change_host sd_statfs_local;
 	unsigned long sd_statfs_sync_time;
 
 	/* Resource group stuff */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d470e52..d122074c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -38,83 +38,12 @@
 #include "trans.h"
 #include "util.h"
 
-/**
- * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
- * @ip: The GFS2 inode (with embedded disk inode data)
- * @inode:  The Linux VFS inode
- *
- */
-
-void gfs2_inode_attr_in(struct gfs2_inode *ip)
-{
-	struct inode *inode = &ip->i_inode;
-	struct gfs2_dinode *di = &ip->i_di;
-
-	inode->i_ino = ip->i_num.no_addr;
-
-	switch (di->di_mode & S_IFMT) {
-	case S_IFBLK:
-	case S_IFCHR:
-		inode->i_rdev = MKDEV(di->di_major, di->di_minor);
-		break;
-	default:
-		inode->i_rdev = 0;
-		break;
-	};
-
-	inode->i_mode = di->di_mode;
-	inode->i_nlink = di->di_nlink;
-	inode->i_uid = di->di_uid;
-	inode->i_gid = di->di_gid;
-	i_size_write(inode, di->di_size);
-	inode->i_atime.tv_sec = di->di_atime;
-	inode->i_mtime.tv_sec = di->di_mtime;
-	inode->i_ctime.tv_sec = di->di_ctime;
-	inode->i_atime.tv_nsec = 0;
-	inode->i_mtime.tv_nsec = 0;
-	inode->i_ctime.tv_nsec = 0;
-	inode->i_blocks = di->di_blocks <<
-		(GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
-
-	if (di->di_flags & GFS2_DIF_IMMUTABLE)
-		inode->i_flags |= S_IMMUTABLE;
-	else
-		inode->i_flags &= ~S_IMMUTABLE;
-
-	if (di->di_flags & GFS2_DIF_APPENDONLY)
-		inode->i_flags |= S_APPEND;
-	else
-		inode->i_flags &= ~S_APPEND;
-}
-
-/**
- * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
- * @ip: The GFS2 inode
- *
- * Only copy out the attributes that we want the VFS layer
- * to be able to modify.
- */
-
-void gfs2_inode_attr_out(struct gfs2_inode *ip)
-{
-	struct inode *inode = &ip->i_inode;
-	struct gfs2_dinode *di = &ip->i_di;
-	gfs2_assert_withdraw(GFS2_SB(inode),
-		(di->di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
-	di->di_mode = inode->i_mode;
-	di->di_uid = inode->i_uid;
-	di->di_gid = inode->i_gid;
-	di->di_atime = inode->i_atime.tv_sec;
-	di->di_mtime = inode->i_mtime.tv_sec;
-	di->di_ctime = inode->i_ctime.tv_sec;
-}
-
 static int iget_test(struct inode *inode, void *opaque)
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
-	struct gfs2_inum *inum = opaque;
+	struct gfs2_inum_host *inum = opaque;
 
-	if (ip && ip->i_num.no_addr == inum->no_addr)
+	if (ip->i_num.no_addr == inum->no_addr)
 		return 1;
 
 	return 0;
@@ -123,19 +52,20 @@
 static int iget_set(struct inode *inode, void *opaque)
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
-	struct gfs2_inum *inum = opaque;
+	struct gfs2_inum_host *inum = opaque;
 
 	ip->i_num = *inum;
+	inode->i_ino = inum->no_addr;
 	return 0;
 }
 
-struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum)
+struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
 {
 	return ilookup5(sb, (unsigned long)inum->no_formal_ino,
 			iget_test, inum);
 }
 
-static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
+static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
 {
 	return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
 		     iget_test, iget_set, inum);
@@ -150,7 +80,7 @@
  * Returns: A VFS inode, or an error
  */
 
-struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned int type)
+struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type)
 {
 	struct inode *inode = gfs2_iget(sb, inum);
 	struct gfs2_inode *ip = GFS2_I(inode);
@@ -188,7 +118,7 @@
 		if (unlikely(error))
 			goto fail_put;
 
-		ip->i_vn = ip->i_gl->gl_vn - 1;
+		set_bit(GIF_INVALID, &ip->i_flags);
 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
 		if (unlikely(error))
 			goto fail_iopen;
@@ -208,6 +138,63 @@
 	return ERR_PTR(error);
 }
 
+static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+{
+	struct gfs2_dinode_host *di = &ip->i_di;
+	const struct gfs2_dinode *str = buf;
+
+	if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) {
+		if (gfs2_consist_inode(ip))
+			gfs2_dinode_print(ip);
+		return -EIO;
+	}
+	if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino))
+		return -ESTALE;
+
+	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
+	ip->i_inode.i_rdev = 0;
+	switch (ip->i_inode.i_mode & S_IFMT) {
+	case S_IFBLK:
+	case S_IFCHR:
+		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
+					   be32_to_cpu(str->di_minor));
+		break;
+	};
+
+	ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
+	ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
+	/*
+	 * We will need to review setting the nlink count here in the
+	 * light of the forthcoming ro bind mount work. This is a reminder
+	 * to do that.
+	 */
+	ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
+	di->di_size = be64_to_cpu(str->di_size);
+	i_size_write(&ip->i_inode, di->di_size);
+	di->di_blocks = be64_to_cpu(str->di_blocks);
+	gfs2_set_inode_blocks(&ip->i_inode);
+	ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
+	ip->i_inode.i_atime.tv_nsec = 0;
+	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
+	ip->i_inode.i_mtime.tv_nsec = 0;
+	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
+	ip->i_inode.i_ctime.tv_nsec = 0;
+
+	di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
+	di->di_goal_data = be64_to_cpu(str->di_goal_data);
+	di->di_generation = be64_to_cpu(str->di_generation);
+
+	di->di_flags = be32_to_cpu(str->di_flags);
+	gfs2_set_inode_flags(&ip->i_inode);
+	di->di_height = be16_to_cpu(str->di_height);
+
+	di->di_depth = be16_to_cpu(str->di_depth);
+	di->di_entries = be32_to_cpu(str->di_entries);
+
+	di->di_eattr = be64_to_cpu(str->di_eattr);
+	return 0;
+}
+
 /**
  * gfs2_inode_refresh - Refresh the incore copy of the dinode
  * @ip: The GFS2 inode
@@ -229,21 +216,11 @@
 		return -EIO;
 	}
 
-	gfs2_dinode_in(&ip->i_di, dibh->b_data);
-
+	error = gfs2_dinode_in(ip, dibh->b_data);
 	brelse(dibh);
+	clear_bit(GIF_INVALID, &ip->i_flags);
 
-	if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) {
-		if (gfs2_consist_inode(ip))
-			gfs2_dinode_print(&ip->i_di);
-		return -EIO;
-	}
-	if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
-		return -ESTALE;
-
-	ip->i_vn = ip->i_gl->gl_vn;
-
-	return 0;
+	return error;
 }
 
 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
@@ -255,7 +232,7 @@
 
 	if (ip->i_di.di_blocks != 1) {
 		if (gfs2_consist_inode(ip))
-			gfs2_dinode_print(&ip->i_di);
+			gfs2_dinode_print(ip);
 		return -EIO;
 	}
 
@@ -318,14 +295,14 @@
 	u32 nlink;
 	int error;
 
-	BUG_ON(ip->i_di.di_nlink != ip->i_inode.i_nlink);
-	nlink = ip->i_di.di_nlink + diff;
+	BUG_ON(diff != 1 && diff != -1);
+	nlink = ip->i_inode.i_nlink + diff;
 
 	/* If we are reducing the nlink count, but the new value ends up being
 	   bigger than the old one, we must have underflowed. */
-	if (diff < 0 && nlink > ip->i_di.di_nlink) {
+	if (diff < 0 && nlink > ip->i_inode.i_nlink) {
 		if (gfs2_consist_inode(ip))
-			gfs2_dinode_print(&ip->i_di);
+			gfs2_dinode_print(ip);
 		return -EIO;
 	}
 
@@ -333,16 +310,19 @@
 	if (error)
 		return error;
 
-	ip->i_di.di_nlink = nlink;
-	ip->i_di.di_ctime = get_seconds();
-	ip->i_inode.i_nlink = nlink;
+	if (diff > 0)
+		inc_nlink(&ip->i_inode);
+	else
+		drop_nlink(&ip->i_inode);
+
+	ip->i_inode.i_ctime.tv_sec = get_seconds();
 
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
 	mark_inode_dirty(&ip->i_inode);
 
-	if (ip->i_di.di_nlink == 0) {
+	if (ip->i_inode.i_nlink == 0) {
 		struct gfs2_rgrpd *rgd;
 		struct gfs2_holder ri_gh, rg_gh;
 
@@ -357,7 +337,6 @@
 		if (error)
 			goto out_norgrp;
 
-		clear_nlink(&ip->i_inode);
 		gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
 		gfs2_glock_dq_uninit(&rg_gh);
 out_norgrp:
@@ -394,7 +373,7 @@
 	struct super_block *sb = dir->i_sb;
 	struct gfs2_inode *dip = GFS2_I(dir);
 	struct gfs2_holder d_gh;
-	struct gfs2_inum inum;
+	struct gfs2_inum_host inum;
 	unsigned int type;
 	int error = 0;
 	struct inode *inode = NULL;
@@ -436,7 +415,7 @@
 {
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
 	struct buffer_head *bh;
-	struct gfs2_inum_range ir;
+	struct gfs2_inum_range_host ir;
 	int error;
 
 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
@@ -479,7 +458,7 @@
 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
 	struct gfs2_holder gh;
 	struct buffer_head *bh;
-	struct gfs2_inum_range ir;
+	struct gfs2_inum_range_host ir;
 	int error;
 
 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
@@ -500,21 +479,22 @@
 	if (!ir.ir_length) {
 		struct buffer_head *m_bh;
 		u64 x, y;
+		__be64 z;
 
 		error = gfs2_meta_inode_buffer(m_ip, &m_bh);
 		if (error)
 			goto out_brelse;
 
-		x = *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
-		x = y = be64_to_cpu(x);
+		z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
+		x = y = be64_to_cpu(z);
 		ir.ir_start = x;
 		ir.ir_length = GFS2_INUM_QUANTUM;
 		x += GFS2_INUM_QUANTUM;
 		if (x < y)
 			gfs2_consist_inode(m_ip);
-		x = cpu_to_be64(x);
+		z = cpu_to_be64(x);
 		gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
-		*(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x;
+		*(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
 
 		brelse(m_bh);
 	}
@@ -567,7 +547,7 @@
 		return error;
 
 	/*  Don't create entries in an unlinked directory  */
-	if (!dip->i_di.di_nlink)
+	if (!dip->i_inode.i_nlink)
 		return -EPERM;
 
 	error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
@@ -583,7 +563,7 @@
 
 	if (dip->i_di.di_entries == (u32)-1)
 		return -EFBIG;
-	if (S_ISDIR(mode) && dip->i_di.di_nlink == (u32)-1)
+	if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
 		return -EMLINK;
 
 	return 0;
@@ -593,24 +573,24 @@
 			       unsigned int *uid, unsigned int *gid)
 {
 	if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
-	    (dip->i_di.di_mode & S_ISUID) && dip->i_di.di_uid) {
+	    (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
 		if (S_ISDIR(*mode))
 			*mode |= S_ISUID;
-		else if (dip->i_di.di_uid != current->fsuid)
+		else if (dip->i_inode.i_uid != current->fsuid)
 			*mode &= ~07111;
-		*uid = dip->i_di.di_uid;
+		*uid = dip->i_inode.i_uid;
 	} else
 		*uid = current->fsuid;
 
-	if (dip->i_di.di_mode & S_ISGID) {
+	if (dip->i_inode.i_mode & S_ISGID) {
 		if (S_ISDIR(*mode))
 			*mode |= S_ISGID;
-		*gid = dip->i_di.di_gid;
+		*gid = dip->i_inode.i_gid;
 	} else
 		*gid = current->fsgid;
 }
 
-static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum *inum,
+static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum,
 			u64 *generation)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
@@ -650,9 +630,9 @@
  */
 
 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
-			const struct gfs2_inum *inum, unsigned int mode,
+			const struct gfs2_inum_host *inum, unsigned int mode,
 			unsigned int uid, unsigned int gid,
-			const u64 *generation)
+			const u64 *generation, dev_t dev)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
 	struct gfs2_dinode *di;
@@ -669,14 +649,15 @@
 	di->di_mode = cpu_to_be32(mode);
 	di->di_uid = cpu_to_be32(uid);
 	di->di_gid = cpu_to_be32(gid);
-	di->di_nlink = cpu_to_be32(0);
-	di->di_size = cpu_to_be64(0);
+	di->di_nlink = 0;
+	di->di_size = 0;
 	di->di_blocks = cpu_to_be64(1);
 	di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
-	di->di_major = di->di_minor = cpu_to_be32(0);
+	di->di_major = cpu_to_be32(MAJOR(dev));
+	di->di_minor = cpu_to_be32(MINOR(dev));
 	di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
 	di->di_generation = cpu_to_be64(*generation);
-	di->di_flags = cpu_to_be32(0);
+	di->di_flags = 0;
 
 	if (S_ISREG(mode)) {
 		if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
@@ -693,22 +674,22 @@
 	}
 
 	di->__pad1 = 0;
-	di->di_payload_format = cpu_to_be32(0);
-	di->di_height = cpu_to_be32(0);
+	di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
+	di->di_height = 0;
 	di->__pad2 = 0;
 	di->__pad3 = 0;
-	di->di_depth = cpu_to_be16(0);
-	di->di_entries = cpu_to_be32(0);
+	di->di_depth = 0;
+	di->di_entries = 0;
 	memset(&di->__pad4, 0, sizeof(di->__pad4));
-	di->di_eattr = cpu_to_be64(0);
+	di->di_eattr = 0;
 	memset(&di->di_reserved, 0, sizeof(di->di_reserved));
 
 	brelse(dibh);
 }
 
 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
-		       unsigned int mode, const struct gfs2_inum *inum,
-		       const u64 *generation)
+		       unsigned int mode, const struct gfs2_inum_host *inum,
+		       const u64 *generation, dev_t dev)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
 	unsigned int uid, gid;
@@ -729,7 +710,7 @@
 	if (error)
 		goto out_quota;
 
-	init_dinode(dip, gl, inum, mode, uid, gid, generation);
+	init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
 	gfs2_quota_change(dip, +1, uid, gid);
 	gfs2_trans_end(sdp);
 
@@ -759,8 +740,7 @@
 	if (alloc_required < 0)
 		goto fail;
 	if (alloc_required) {
-		error = gfs2_quota_check(dip, dip->i_di.di_uid,
-					 dip->i_di.di_gid);
+		error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
 		if (error)
 			goto fail_quota_locks;
 
@@ -782,16 +762,16 @@
 			goto fail_quota_locks;
 	}
 
-	error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_di.di_mode));
+	error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode));
 	if (error)
 		goto fail_end_trans;
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (error)
 		goto fail_end_trans;
-	ip->i_di.di_nlink = 1;
+	ip->i_inode.i_nlink = 1;
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
 	return 0;
 
@@ -860,13 +840,13 @@
  */
 
 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
-			   unsigned int mode)
+			   unsigned int mode, dev_t dev)
 {
 	struct inode *inode;
 	struct gfs2_inode *dip = ghs->gh_gl->gl_object;
 	struct inode *dir = &dip->i_inode;
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-	struct gfs2_inum inum;
+	struct gfs2_inum_host inum;
 	int error;
 	u64 generation;
 
@@ -890,35 +870,12 @@
 	if (error)
 		goto fail_gunlock;
 
-	if (inum.no_addr < dip->i_num.no_addr) {
-		gfs2_glock_dq(ghs);
+	error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
+				  LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+	if (error)
+		goto fail_gunlock;
 
-		error = gfs2_glock_nq_num(sdp, inum.no_addr,
-					  &gfs2_inode_glops, LM_ST_EXCLUSIVE,
-					  GL_SKIP, ghs + 1);
-		if (error) {
-			return ERR_PTR(error);
-		}
-
-		gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
-		error = gfs2_glock_nq(ghs);
-		if (error) {
-			gfs2_glock_dq_uninit(ghs + 1);
-			return ERR_PTR(error);
-		}
-
-		error = create_ok(dip, name, mode);
-		if (error)
-			goto fail_gunlock2;
-	} else {
-		error = gfs2_glock_nq_num(sdp, inum.no_addr,
-					  &gfs2_inode_glops, LM_ST_EXCLUSIVE,
-					  GL_SKIP, ghs + 1);
-		if (error)
-			goto fail_gunlock;
-	}
-
-	error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation);
+	error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
 	if (error)
 		goto fail_gunlock2;
 
@@ -975,7 +932,7 @@
 
 	if (ip->i_di.di_entries != 2) {
 		if (gfs2_consist_inode(ip))
-			gfs2_dinode_print(&ip->i_di);
+			gfs2_dinode_print(ip);
 		return -EIO;
 	}
 
@@ -997,7 +954,12 @@
 	if (error)
 		return error;
 
-	error = gfs2_change_nlink(ip, -2);
+	/* It looks odd, but it really should be done twice */
+	error = gfs2_change_nlink(ip, -1);
+	if (error)
+		return error;
+
+	error = gfs2_change_nlink(ip, -1);
 	if (error)
 		return error;
 
@@ -1018,16 +980,16 @@
 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
 		   struct gfs2_inode *ip)
 {
-	struct gfs2_inum inum;
+	struct gfs2_inum_host inum;
 	unsigned int type;
 	int error;
 
 	if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
 		return -EPERM;
 
-	if ((dip->i_di.di_mode & S_ISVTX) &&
-	    dip->i_di.di_uid != current->fsuid &&
-	    ip->i_di.di_uid != current->fsuid && !capable(CAP_FOWNER))
+	if ((dip->i_inode.i_mode & S_ISVTX) &&
+	    dip->i_inode.i_uid != current->fsuid &&
+	    ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
 		return -EPERM;
 
 	if (IS_APPEND(&dip->i_inode))
@@ -1044,7 +1006,7 @@
 	if (!gfs2_inum_equal(&inum, &ip->i_num))
 		return -ENOENT;
 
-	if (IF2DT(ip->i_di.di_mode) != type) {
+	if (IF2DT(ip->i_inode.i_mode) != type) {
 		gfs2_consist_inode(dip);
 		return -EIO;
 	}
@@ -1194,7 +1156,7 @@
 		return 0;
 
 	curtime = get_seconds();
-	if (curtime - ip->i_di.di_atime >= quantum) {
+	if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
 		gfs2_glock_dq(gh);
 		gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
 				   gh);
@@ -1206,7 +1168,7 @@
 		   trying to get exclusive lock. */
 
 		curtime = get_seconds();
-		if (curtime - ip->i_di.di_atime >= quantum) {
+		if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
 			struct buffer_head *dibh;
 			struct gfs2_dinode *di;
 
@@ -1220,11 +1182,11 @@
 			if (error)
 				goto fail_end_trans;
 
-			ip->i_di.di_atime = curtime;
+			ip->i_inode.i_atime.tv_sec = curtime;
 
 			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 			di = (struct gfs2_dinode *)dibh->b_data;
-			di->di_atime = cpu_to_be64(ip->i_di.di_atime);
+			di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
 			brelse(dibh);
 
 			gfs2_trans_end(sdp);
@@ -1249,92 +1211,6 @@
 	return error;
 }
 
-/**
- * glock_compare_atime - Compare two struct gfs2_glock structures for sort
- * @arg_a: the first structure
- * @arg_b: the second structure
- *
- * Returns: 1 if A > B
- *         -1 if A < B
- *          0 if A == B
- */
-
-static int glock_compare_atime(const void *arg_a, const void *arg_b)
-{
-	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
-	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
-	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
-	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
-
-	if (a->ln_number > b->ln_number)
-		return 1;
-	if (a->ln_number < b->ln_number)
-		return -1;
-	if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
-		return 1;
-	if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME))
-		return 1;
-
-	return 0;
-}
-
-/**
- * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
- *      atime update
- * @num_gh: the number of structures
- * @ghs: an array of struct gfs2_holder structures
- *
- * Returns: 0 on success (all glocks acquired),
- *          errno on failure (no glocks acquired)
- */
-
-int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
-{
-	struct gfs2_holder **p;
-	unsigned int x;
-	int error = 0;
-
-	if (!num_gh)
-		return 0;
-
-	if (num_gh == 1) {
-		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
-		if (ghs->gh_flags & GL_ATIME)
-			error = gfs2_glock_nq_atime(ghs);
-		else
-			error = gfs2_glock_nq(ghs);
-		return error;
-	}
-
-	p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
-	if (!p)
-		return -ENOMEM;
-
-	for (x = 0; x < num_gh; x++)
-		p[x] = &ghs[x];
-
-	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
-
-	for (x = 0; x < num_gh; x++) {
-		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
-
-		if (p[x]->gh_flags & GL_ATIME)
-			error = gfs2_glock_nq_atime(p[x]);
-		else
-			error = gfs2_glock_nq(p[x]);
-
-		if (error) {
-			while (x--)
-				gfs2_glock_dq(p[x]);
-			break;
-		}
-	}
-
-	kfree(p);
-	return error;
-}
-
-
 static int
 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
 {
@@ -1345,10 +1221,8 @@
 	if (!error) {
 		error = inode_setattr(&ip->i_inode, attr);
 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
-		gfs2_inode_attr_out(ip);
-
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 	return error;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index f5d8617..b57f448 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -22,13 +22,19 @@
 
 static inline int gfs2_is_dir(struct gfs2_inode *ip)
 {
-	return S_ISDIR(ip->i_di.di_mode);
+	return S_ISDIR(ip->i_inode.i_mode);
+}
+
+static inline void gfs2_set_inode_blocks(struct inode *inode)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	inode->i_blocks = ip->i_di.di_blocks <<
+		(GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
 }
 
 void gfs2_inode_attr_in(struct gfs2_inode *ip);
-void gfs2_inode_attr_out(struct gfs2_inode *ip);
-struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type);
-struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum);
+struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned type);
+struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum);
 
 int gfs2_inode_refresh(struct gfs2_inode *ip);
 
@@ -37,19 +43,15 @@
 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
 			   int is_root, struct nameidata *nd);
 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
-			   unsigned int mode);
+			   unsigned int mode, dev_t dev);
 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
 		struct gfs2_inode *ip);
 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
 		   struct gfs2_inode *ip);
 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
-
 int gfs2_glock_nq_atime(struct gfs2_holder *gh);
-int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs);
-
 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
-
 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
 
 #endif /* __INODE_DOT_H__ */
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c
index 7365aec..3799f19 100644
--- a/fs/gfs2/locking/dlm/plock.c
+++ b/fs/gfs2/locking/dlm/plock.c
@@ -8,6 +8,7 @@
 
 #include <linux/miscdevice.h>
 #include <linux/lock_dlm_plock.h>
+#include <linux/poll.h>
 
 #include "lock_dlm.h"
 
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0cace3d..291415d 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -15,6 +15,7 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
 #include <linux/lm_interface.h>
+#include <linux/delay.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -142,7 +143,7 @@
 	return list_empty(&ai->ai_ail1_list);
 }
 
-void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
+static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
 {
 	struct list_head *head = &sdp->sd_ail1_list;
 	u64 sync_gen;
@@ -261,6 +262,12 @@
  * @sdp: The GFS2 superblock
  * @blks: The number of blocks to reserve
  *
+ * Note that we never give out the last 6 blocks of the journal. Thats
+ * due to the fact that there is are a small number of header blocks
+ * associated with each log flush. The exact number can't be known until
+ * flush time, so we ensure that we have just enough free blocks at all
+ * times to avoid running out during a log flush.
+ *
  * Returns: errno
  */
 
@@ -274,7 +281,7 @@
 
 	mutex_lock(&sdp->sd_log_reserve_mutex);
 	gfs2_log_lock(sdp);
-	while(sdp->sd_log_blks_free <= blks) {
+	while(sdp->sd_log_blks_free <= (blks + 6)) {
 		gfs2_log_unlock(sdp);
 		gfs2_ail1_empty(sdp, 0);
 		gfs2_log_flush(sdp, NULL);
@@ -319,7 +326,8 @@
 	bh_map.b_size = 1 << inode->i_blkbits;
 	error = gfs2_block_map(inode, lbn, 0, &bh_map);
 	if (error || !bh_map.b_blocknr)
-		printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn);
+		printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
+		       (unsigned long long)bh_map.b_blocknr, lbn);
 	gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
 
 	return bh_map.b_blocknr;
@@ -643,12 +651,9 @@
 	up_read(&sdp->sd_log_flush_lock);
 
 	gfs2_log_lock(sdp);
-	if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
-		gfs2_log_unlock(sdp);
-		gfs2_log_flush(sdp, NULL);
-	} else {
-		gfs2_log_unlock(sdp);
-	}
+	if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
+		wake_up_process(sdp->sd_logd_process);
+	gfs2_log_unlock(sdp);
 }
 
 /**
@@ -686,3 +691,21 @@
 	up_write(&sdp->sd_log_flush_lock);
 }
 
+
+/**
+ * gfs2_meta_syncfs - sync all the buffers in a filesystem
+ * @sdp: the filesystem
+ *
+ */
+
+void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
+{
+	gfs2_log_flush(sdp, NULL);
+	for (;;) {
+		gfs2_ail1_start(sdp, DIO_ALL);
+		if (gfs2_ail1_empty(sdp, DIO_ALL))
+			break;
+		msleep(10);
+	}
+}
+
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 7f5737d..8e7aa0f 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -48,7 +48,6 @@
 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
 			    unsigned int ssize);
 
-void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags);
 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
 
 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
@@ -61,5 +60,6 @@
 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
 
 void gfs2_log_shutdown(struct gfs2_sbd *sdp);
+void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
 
 #endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index ab6d111..4d7f94d 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -182,7 +182,7 @@
 }
 
 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
-			       struct gfs2_log_header *head, int pass)
+			       struct gfs2_log_header_host *head, int pass)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 
@@ -328,7 +328,7 @@
 }
 
 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
-				  struct gfs2_log_header *head, int pass)
+				  struct gfs2_log_header_host *head, int pass)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 
@@ -509,7 +509,7 @@
 {
 	LIST_HEAD(started);
 	struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
-	struct buffer_head *bh = NULL;
+	struct buffer_head *bh = NULL,*bh1 = NULL;
 	unsigned int offset = sizeof(struct gfs2_log_descriptor);
 	struct gfs2_log_descriptor *ld;
 	unsigned int limit;
@@ -537,8 +537,13 @@
 		list_for_each_entry_safe_continue(bd1, bdt,
 						  &sdp->sd_log_le_databuf,
 						  bd_le.le_list) {
+			/* store off the buffer head in a local ptr since
+			 * gfs2_bufdata might change when we drop the log lock
+			 */
+			bh1 = bd1->bd_bh;
+
 			/* An ordered write buffer */
-			if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
+			if (bh1 && !buffer_pinned(bh1)) {
 				list_move(&bd1->bd_le.le_list, &started);
 				if (bd1 == bd2) {
 					bd2 = NULL;
@@ -547,20 +552,21 @@
 							bd_le.le_list);
 				}
 				total_dbuf--;
-				if (bd1->bd_bh) {
-					get_bh(bd1->bd_bh);
-					if (buffer_dirty(bd1->bd_bh)) {
+				if (bh1) {
+					if (buffer_dirty(bh1)) {
+						get_bh(bh1);
+
 						gfs2_log_unlock(sdp);
-						wait_on_buffer(bd1->bd_bh);
-						ll_rw_block(WRITE, 1,
-							    &bd1->bd_bh);
+
+						ll_rw_block(SWRITE, 1, &bh1);
+						brelse(bh1);
+
 						gfs2_log_lock(sdp);
 					}
-					brelse(bd1->bd_bh);
 					continue;
 				}
 				continue;
-			} else if (bd1->bd_bh) { /* A journaled buffer */
+			} else if (bh1) { /* A journaled buffer */
 				int magic;
 				gfs2_log_unlock(sdp);
 				if (!bh) {
@@ -582,16 +588,16 @@
 					ld->ld_data2 = cpu_to_be32(0);
 					memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
 				}
-				magic = gfs2_check_magic(bd1->bd_bh);
-				*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
+				magic = gfs2_check_magic(bh1);
+				*ptr++ = cpu_to_be64(bh1->b_blocknr);
 				*ptr++ = cpu_to_be64((__u64)magic);
-				clear_buffer_escaped(bd1->bd_bh);
+				clear_buffer_escaped(bh1);
 				if (unlikely(magic != 0))
-					set_buffer_escaped(bd1->bd_bh);
+					set_buffer_escaped(bh1);
 				gfs2_log_lock(sdp);
 				if (n++ > num)
 					break;
-			} else if (!bd1->bd_bh) {
+			} else if (!bh1) {
 				total_dbuf--;
 				sdp->sd_log_num_databuf--;
 				list_del_init(&bd1->bd_le.le_list);
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 5839c05..965bc65 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -60,7 +60,7 @@
 }
 
 static inline void lops_before_scan(struct gfs2_jdesc *jd,
-				    struct gfs2_log_header *head,
+				    struct gfs2_log_header_host *head,
 				    unsigned int pass)
 {
 	int x;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 9889c1e..7c1a9e2 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -25,7 +25,7 @@
 #include "util.h"
 #include "glock.h"
 
-static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct gfs2_inode *ip = foo;
 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
@@ -37,7 +37,7 @@
 	}
 }
 
-static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct gfs2_glock *gl = foo;
 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3912d6a..0e34d99 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -127,17 +127,17 @@
 
 /**
  * getbuf - Get a buffer with a given address space
- * @sdp: the filesystem
- * @aspace: the address space
+ * @gl: the glock
  * @blkno: the block number (filesystem scope)
  * @create: 1 if the buffer should be created
  *
  * Returns: the buffer
  */
 
-static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
-				  u64 blkno, int create)
+static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
 {
+	struct address_space *mapping = gl->gl_aspace->i_mapping;
+	struct gfs2_sbd *sdp = gl->gl_sbd;
 	struct page *page;
 	struct buffer_head *bh;
 	unsigned int shift;
@@ -150,13 +150,13 @@
 
 	if (create) {
 		for (;;) {
-			page = grab_cache_page(aspace->i_mapping, index);
+			page = grab_cache_page(mapping, index);
 			if (page)
 				break;
 			yield();
 		}
 	} else {
-		page = find_lock_page(aspace->i_mapping, index);
+		page = find_lock_page(mapping, index);
 		if (!page)
 			return NULL;
 	}
@@ -202,7 +202,7 @@
 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
 {
 	struct buffer_head *bh;
-	bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
+	bh = getbuf(gl, blkno, CREATE);
 	meta_prep_new(bh);
 	return bh;
 }
@@ -220,7 +220,7 @@
 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
 		   struct buffer_head **bhp)
 {
-	*bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
+	*bhp = getbuf(gl, blkno, CREATE);
 	if (!buffer_uptodate(*bhp))
 		ll_rw_block(READ_META, 1, bhp);
 	if (flags & DIO_WAIT) {
@@ -379,11 +379,10 @@
 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct inode *aspace = ip->i_gl->gl_aspace;
 	struct buffer_head *bh;
 
 	while (blen) {
-		bh = getbuf(sdp, aspace, bstart, NO_CREATE);
+		bh = getbuf(ip->i_gl, bstart, NO_CREATE);
 		if (bh) {
 			struct gfs2_bufdata *bd = bh->b_private;
 
@@ -472,6 +471,9 @@
 	struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
 	int in_cache = 0;
 
+	BUG_ON(!gl);
+	BUG_ON(!sdp);
+
 	spin_lock(&ip->i_spin);
 	if (*bh_slot && (*bh_slot)->b_blocknr == num) {
 		bh = *bh_slot;
@@ -481,7 +483,7 @@
 	spin_unlock(&ip->i_spin);
 
 	if (!bh)
-		bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE);
+		bh = getbuf(gl, num, CREATE);
 
 	if (!bh)
 		return -ENOBUFS;
@@ -532,7 +534,6 @@
 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
 {
 	struct gfs2_sbd *sdp = gl->gl_sbd;
-	struct inode *aspace = gl->gl_aspace;
 	struct buffer_head *first_bh, *bh;
 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
 			  sdp->sd_sb.sb_bsize_shift;
@@ -544,7 +545,7 @@
 	if (extlen > max_ra)
 		extlen = max_ra;
 
-	first_bh = getbuf(sdp, aspace, dblock, CREATE);
+	first_bh = getbuf(gl, dblock, CREATE);
 
 	if (buffer_uptodate(first_bh))
 		goto out;
@@ -555,7 +556,7 @@
 	extlen--;
 
 	while (extlen) {
-		bh = getbuf(sdp, aspace, dblock, CREATE);
+		bh = getbuf(gl, dblock, CREATE);
 
 		if (!buffer_uptodate(bh) && !buffer_locked(bh))
 			ll_rw_block(READA, 1, &bh);
@@ -571,20 +572,3 @@
 	return first_bh;
 }
 
-/**
- * gfs2_meta_syncfs - sync all the buffers in a filesystem
- * @sdp: the filesystem
- *
- */
-
-void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
-{
-	gfs2_log_flush(sdp, NULL);
-	for (;;) {
-		gfs2_ail1_start(sdp, DIO_ALL);
-		if (gfs2_ail1_empty(sdp, DIO_ALL))
-			break;
-		msleep(10);
-	}
-}
-
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 3ec939e..e037425 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -67,7 +67,6 @@
 }
 
 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
-void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
 
 #define buffer_busy(bh) \
 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
diff --git a/fs/gfs2/ondisk.c b/fs/gfs2/ondisk.c
index 1025960..f2495f1 100644
--- a/fs/gfs2/ondisk.c
+++ b/fs/gfs2/ondisk.c
@@ -15,6 +15,8 @@
 
 #include "gfs2.h"
 #include <linux/gfs2_ondisk.h>
+#include <linux/lm_interface.h>
+#include "incore.h"
 
 #define pv(struct, member, fmt) printk(KERN_INFO "  "#member" = "fmt"\n", \
 				       struct->member);
@@ -32,7 +34,7 @@
  * first arg: the cpu-order structure
  */
 
-void gfs2_inum_in(struct gfs2_inum *no, const void *buf)
+void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf)
 {
 	const struct gfs2_inum *str = buf;
 
@@ -40,7 +42,7 @@
 	no->no_addr = be64_to_cpu(str->no_addr);
 }
 
-void gfs2_inum_out(const struct gfs2_inum *no, void *buf)
+void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf)
 {
 	struct gfs2_inum *str = buf;
 
@@ -48,13 +50,13 @@
 	str->no_addr = cpu_to_be64(no->no_addr);
 }
 
-static void gfs2_inum_print(const struct gfs2_inum *no)
+static void gfs2_inum_print(const struct gfs2_inum_host *no)
 {
 	printk(KERN_INFO "  no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino);
 	printk(KERN_INFO "  no_addr = %llu\n", (unsigned long long)no->no_addr);
 }
 
-static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf)
+static void gfs2_meta_header_in(struct gfs2_meta_header_host *mh, const void *buf)
 {
 	const struct gfs2_meta_header *str = buf;
 
@@ -63,23 +65,7 @@
 	mh->mh_format = be32_to_cpu(str->mh_format);
 }
 
-static void gfs2_meta_header_out(const struct gfs2_meta_header *mh, void *buf)
-{
-	struct gfs2_meta_header *str = buf;
-
-	str->mh_magic = cpu_to_be32(mh->mh_magic);
-	str->mh_type = cpu_to_be32(mh->mh_type);
-	str->mh_format = cpu_to_be32(mh->mh_format);
-}
-
-static void gfs2_meta_header_print(const struct gfs2_meta_header *mh)
-{
-	pv(mh, mh_magic, "0x%.8X");
-	pv(mh, mh_type, "%u");
-	pv(mh, mh_format, "%u");
-}
-
-void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
+void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
 {
 	const struct gfs2_sb *str = buf;
 
@@ -97,7 +83,7 @@
 	memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
 }
 
-void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf)
+void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf)
 {
 	const struct gfs2_rindex *str = buf;
 
@@ -109,7 +95,7 @@
 
 }
 
-void gfs2_rindex_print(const struct gfs2_rindex *ri)
+void gfs2_rindex_print(const struct gfs2_rindex_host *ri)
 {
 	printk(KERN_INFO "  ri_addr = %llu\n", (unsigned long long)ri->ri_addr);
 	pv(ri, ri_length, "%u");
@@ -120,22 +106,20 @@
 	pv(ri, ri_bitbytes, "%u");
 }
 
-void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf)
+void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf)
 {
 	const struct gfs2_rgrp *str = buf;
 
-	gfs2_meta_header_in(&rg->rg_header, buf);
 	rg->rg_flags = be32_to_cpu(str->rg_flags);
 	rg->rg_free = be32_to_cpu(str->rg_free);
 	rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
 	rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
 }
 
-void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf)
+void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf)
 {
 	struct gfs2_rgrp *str = buf;
 
-	gfs2_meta_header_out(&rg->rg_header, buf);
 	str->rg_flags = cpu_to_be32(rg->rg_flags);
 	str->rg_free = cpu_to_be32(rg->rg_free);
 	str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
@@ -144,7 +128,7 @@
 	memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
 }
 
-void gfs2_quota_in(struct gfs2_quota *qu, const void *buf)
+void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
 {
 	const struct gfs2_quota *str = buf;
 
@@ -153,96 +137,56 @@
 	qu->qu_value = be64_to_cpu(str->qu_value);
 }
 
-void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf)
+void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
 {
-	const struct gfs2_dinode *str = buf;
-
-	gfs2_meta_header_in(&di->di_header, buf);
-	gfs2_inum_in(&di->di_num, &str->di_num);
-
-	di->di_mode = be32_to_cpu(str->di_mode);
-	di->di_uid = be32_to_cpu(str->di_uid);
-	di->di_gid = be32_to_cpu(str->di_gid);
-	di->di_nlink = be32_to_cpu(str->di_nlink);
-	di->di_size = be64_to_cpu(str->di_size);
-	di->di_blocks = be64_to_cpu(str->di_blocks);
-	di->di_atime = be64_to_cpu(str->di_atime);
-	di->di_mtime = be64_to_cpu(str->di_mtime);
-	di->di_ctime = be64_to_cpu(str->di_ctime);
-	di->di_major = be32_to_cpu(str->di_major);
-	di->di_minor = be32_to_cpu(str->di_minor);
-
-	di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
-	di->di_goal_data = be64_to_cpu(str->di_goal_data);
-	di->di_generation = be64_to_cpu(str->di_generation);
-
-	di->di_flags = be32_to_cpu(str->di_flags);
-	di->di_payload_format = be32_to_cpu(str->di_payload_format);
-	di->di_height = be16_to_cpu(str->di_height);
-
-	di->di_depth = be16_to_cpu(str->di_depth);
-	di->di_entries = be32_to_cpu(str->di_entries);
-
-	di->di_eattr = be64_to_cpu(str->di_eattr);
-
-}
-
-void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf)
-{
+	const struct gfs2_dinode_host *di = &ip->i_di;
 	struct gfs2_dinode *str = buf;
 
-	gfs2_meta_header_out(&di->di_header, buf);
-	gfs2_inum_out(&di->di_num, (char *)&str->di_num);
+	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
+	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
+	str->di_header.__pad0 = 0;
+	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
+	str->di_header.__pad1 = 0;
 
-	str->di_mode = cpu_to_be32(di->di_mode);
-	str->di_uid = cpu_to_be32(di->di_uid);
-	str->di_gid = cpu_to_be32(di->di_gid);
-	str->di_nlink = cpu_to_be32(di->di_nlink);
+	gfs2_inum_out(&ip->i_num, &str->di_num);
+
+	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
+	str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
+	str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
+	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
 	str->di_size = cpu_to_be64(di->di_size);
 	str->di_blocks = cpu_to_be64(di->di_blocks);
-	str->di_atime = cpu_to_be64(di->di_atime);
-	str->di_mtime = cpu_to_be64(di->di_mtime);
-	str->di_ctime = cpu_to_be64(di->di_ctime);
-	str->di_major = cpu_to_be32(di->di_major);
-	str->di_minor = cpu_to_be32(di->di_minor);
+	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
+	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
+	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
 
 	str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
 	str->di_goal_data = cpu_to_be64(di->di_goal_data);
 	str->di_generation = cpu_to_be64(di->di_generation);
 
 	str->di_flags = cpu_to_be32(di->di_flags);
-	str->di_payload_format = cpu_to_be32(di->di_payload_format);
 	str->di_height = cpu_to_be16(di->di_height);
-
+	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
+					     !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
+					     GFS2_FORMAT_DE : 0);
 	str->di_depth = cpu_to_be16(di->di_depth);
 	str->di_entries = cpu_to_be32(di->di_entries);
 
 	str->di_eattr = cpu_to_be64(di->di_eattr);
-
 }
 
-void gfs2_dinode_print(const struct gfs2_dinode *di)
+void gfs2_dinode_print(const struct gfs2_inode *ip)
 {
-	gfs2_meta_header_print(&di->di_header);
-	gfs2_inum_print(&di->di_num);
+	const struct gfs2_dinode_host *di = &ip->i_di;
 
-	pv(di, di_mode, "0%o");
-	pv(di, di_uid, "%u");
-	pv(di, di_gid, "%u");
-	pv(di, di_nlink, "%u");
+	gfs2_inum_print(&ip->i_num);
+
 	printk(KERN_INFO "  di_size = %llu\n", (unsigned long long)di->di_size);
 	printk(KERN_INFO "  di_blocks = %llu\n", (unsigned long long)di->di_blocks);
-	printk(KERN_INFO "  di_atime = %lld\n", (long long)di->di_atime);
-	printk(KERN_INFO "  di_mtime = %lld\n", (long long)di->di_mtime);
-	printk(KERN_INFO "  di_ctime = %lld\n", (long long)di->di_ctime);
-	pv(di, di_major, "%u");
-	pv(di, di_minor, "%u");
-
 	printk(KERN_INFO "  di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta);
 	printk(KERN_INFO "  di_goal_data = %llu\n", (unsigned long long)di->di_goal_data);
 
 	pv(di, di_flags, "0x%.8X");
-	pv(di, di_payload_format, "%u");
 	pv(di, di_height, "%u");
 
 	pv(di, di_depth, "%u");
@@ -251,7 +195,7 @@
 	printk(KERN_INFO "  di_eattr = %llu\n", (unsigned long long)di->di_eattr);
 }
 
-void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf)
+void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
 {
 	const struct gfs2_log_header *str = buf;
 
@@ -263,7 +207,7 @@
 	lh->lh_hash = be32_to_cpu(str->lh_hash);
 }
 
-void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf)
+void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
 {
 	const struct gfs2_inum_range *str = buf;
 
@@ -271,7 +215,7 @@
 	ir->ir_length = be64_to_cpu(str->ir_length);
 }
 
-void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf)
+void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
 {
 	struct gfs2_inum_range *str = buf;
 
@@ -279,7 +223,7 @@
 	str->ir_length = cpu_to_be64(ir->ir_length);
 }
 
-void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf)
+void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
 {
 	const struct gfs2_statfs_change *str = buf;
 
@@ -288,7 +232,7 @@
 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
 }
 
-void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf)
+void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
 {
 	struct gfs2_statfs_change *str = buf;
 
@@ -297,7 +241,7 @@
 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
 }
 
-void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf)
+void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
 {
 	const struct gfs2_quota_change *str = buf;
 
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 015640b..d8d69a7 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -156,19 +156,6 @@
 	return 0;
 }
 
-static int zero_readpage(struct page *page)
-{
-	void *kaddr;
-
-	kaddr = kmap_atomic(page, KM_USER0);
-	memset(kaddr, 0, PAGE_CACHE_SIZE);
-	kunmap_atomic(kaddr, KM_USER0);
-
-	SetPageUptodate(page);
-
-	return 0;
-}
-
 /**
  * stuffed_readpage - Fill in a Linux page with stuffed file data
  * @ip: the inode
@@ -183,9 +170,7 @@
 	void *kaddr;
 	int error;
 
-	/* Only the first page of a stuffed file might contain data */
-	if (unlikely(page->index))
-		return zero_readpage(page);
+	BUG_ON(page->index);
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 	if (error)
@@ -230,9 +215,9 @@
 				/* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
 				goto skip_lock;
 		}
-		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
+		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
 		do_unlock = 1;
-		error = gfs2_glock_nq_m_atime(1, &gh);
+		error = gfs2_glock_nq_atime(&gh);
 		if (unlikely(error))
 			goto out_unlock;
 	}
@@ -254,6 +239,8 @@
 out:
 	return error;
 out_unlock:
+	if (error == GLR_TRYFAILED)
+		error = AOP_TRUNCATED_PAGE;
 	unlock_page(page);
 	if (do_unlock)
 		gfs2_holder_uninit(&gh);
@@ -293,9 +280,9 @@
 				goto skip_lock;
 		}
 		gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
-				 LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
+				 LM_FLAG_TRY_1CB|GL_ATIME, &gh);
 		do_unlock = 1;
-		ret = gfs2_glock_nq_m_atime(1, &gh);
+		ret = gfs2_glock_nq_atime(&gh);
 		if (ret == GLR_TRYFAILED)
 			goto out_noerror;
 		if (unlikely(ret))
@@ -366,10 +353,13 @@
 	unsigned int write_len = to - from;
 
 
-	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
-	error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
-	if (error)
+	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
+	error = gfs2_glock_nq_atime(&ip->i_gh);
+	if (unlikely(error)) {
+		if (error == GLR_TRYFAILED)
+			error = AOP_TRUNCATED_PAGE;
 		goto out_uninit;
+	}
 
 	gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
 
@@ -386,7 +376,7 @@
 		if (error)
 			goto out_alloc_put;
 
-		error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+		error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
 		if (error)
 			goto out_qunlock;
 
@@ -482,8 +472,10 @@
 
 		SetPageUptodate(page);
 
-		if (inode->i_size < file_size)
+		if (inode->i_size < file_size) {
 			i_size_write(inode, file_size);
+			mark_inode_dirty(inode);
+		}
 	} else {
 		if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
 		    gfs2_is_jdata(ip))
@@ -498,11 +490,6 @@
 		di->di_size = cpu_to_be64(inode->i_size);
 	}
 
-	di->di_mode = cpu_to_be32(inode->i_mode);
-	di->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
-	di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
-	di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
-
 	brelse(dibh);
 	gfs2_trans_end(sdp);
 	if (al->al_requested) {
@@ -624,7 +611,7 @@
 	 * on this path. All we need change is atime.
 	 */
 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
-	rv = gfs2_glock_nq_m_atime(1, &gh);
+	rv = gfs2_glock_nq_atime(&gh);
 	if (rv)
 		goto out;
 
@@ -737,6 +724,9 @@
 			if (!atomic_read(&aspace->i_writecount))
 				return 0;
 
+			if (!(gfp_mask & __GFP_WAIT))
+				return 0;
+
 			if (time_after_eq(jiffies, t)) {
 				stuck_releasepage(bh);
 				/* should we withdraw here? */
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 00041b1..d355899 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -43,7 +43,7 @@
 	struct inode *inode = dentry->d_inode;
 	struct gfs2_holder d_gh;
 	struct gfs2_inode *ip;
-	struct gfs2_inum inum;
+	struct gfs2_inum_host inum;
 	unsigned int type;
 	int error;
 
@@ -76,7 +76,7 @@
 	if (!gfs2_inum_equal(&ip->i_num, &inum))
 		goto invalid_gunlock;
 
-	if (IF2DT(ip->i_di.di_mode) != type) {
+	if (IF2DT(ip->i_inode.i_mode) != type) {
 		gfs2_consist_inode(dip);
 		goto fail_gunlock;
 	}
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 86127d9..b4e7b87 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -27,15 +27,16 @@
 #include "util.h"
 
 static struct dentry *gfs2_decode_fh(struct super_block *sb,
-				     __u32 *fh,
+				     __u32 *p,
 				     int fh_len,
 				     int fh_type,
 				     int (*acceptable)(void *context,
 						       struct dentry *dentry),
 				     void *context)
 {
+	__be32 *fh = (__force __be32 *)p;
 	struct gfs2_fh_obj fh_obj;
-	struct gfs2_inum *this, parent;
+	struct gfs2_inum_host *this, parent;
 
 	if (fh_type != fh_len)
 		return NULL;
@@ -65,9 +66,10 @@
 						    acceptable, context);
 }
 
-static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
+static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
 			  int connectable)
 {
+	__be32 *fh = (__force __be32 *)p;
 	struct inode *inode = dentry->d_inode;
 	struct super_block *sb = inode->i_sb;
 	struct gfs2_inode *ip = GFS2_I(inode);
@@ -76,14 +78,10 @@
 	    (connectable && *len < GFS2_LARGE_FH_SIZE))
 		return 255;
 
-	fh[0] = ip->i_num.no_formal_ino >> 32;
-	fh[0] = cpu_to_be32(fh[0]);
-	fh[1] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
-	fh[1] = cpu_to_be32(fh[1]);
-	fh[2] = ip->i_num.no_addr >> 32;
-	fh[2] = cpu_to_be32(fh[2]);
-	fh[3] = ip->i_num.no_addr & 0xFFFFFFFF;
-	fh[3] = cpu_to_be32(fh[3]);
+	fh[0] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
+	fh[1] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
+	fh[2] = cpu_to_be32(ip->i_num.no_addr >> 32);
+	fh[3] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
 	*len = GFS2_SMALL_FH_SIZE;
 
 	if (!connectable || inode == sb->s_root->d_inode)
@@ -95,14 +93,10 @@
 	igrab(inode);
 	spin_unlock(&dentry->d_lock);
 
-	fh[4] = ip->i_num.no_formal_ino >> 32;
-	fh[4] = cpu_to_be32(fh[4]);
-	fh[5] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
-	fh[5] = cpu_to_be32(fh[5]);
-	fh[6] = ip->i_num.no_addr >> 32;
-	fh[6] = cpu_to_be32(fh[6]);
-	fh[7] = ip->i_num.no_addr & 0xFFFFFFFF;
-	fh[7] = cpu_to_be32(fh[7]);
+	fh[4] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
+	fh[5] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
+	fh[6] = cpu_to_be32(ip->i_num.no_addr >> 32);
+	fh[7] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
 
 	fh[8]  = cpu_to_be32(inode->i_mode);
 	fh[9]  = 0;	/* pad to double word */
@@ -114,12 +108,12 @@
 }
 
 struct get_name_filldir {
-	struct gfs2_inum inum;
+	struct gfs2_inum_host inum;
 	char *name;
 };
 
 static int get_name_filldir(void *opaque, const char *name, unsigned int length,
-			    u64 offset, struct gfs2_inum *inum,
+			    u64 offset, struct gfs2_inum_host *inum,
 			    unsigned int type)
 {
 	struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque;
@@ -202,7 +196,7 @@
 {
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj;
-	struct gfs2_inum *inum = &fh_obj->this;
+	struct gfs2_inum_host *inum = &fh_obj->this;
 	struct gfs2_holder i_gh, ri_gh, rgd_gh;
 	struct gfs2_rgrpd *rgd;
 	struct inode *inode;
diff --git a/fs/gfs2/ops_export.h b/fs/gfs2/ops_export.h
index 09aca50..f925a95 100644
--- a/fs/gfs2/ops_export.h
+++ b/fs/gfs2/ops_export.h
@@ -15,7 +15,7 @@
 
 extern struct export_operations gfs2_export_ops;
 struct gfs2_fh_obj {
-	struct gfs2_inum this;
+	struct gfs2_inum_host this;
 	__u32            imode;
 };
 
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 3064f13..b3f1e03 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -22,6 +22,7 @@
 #include <linux/ext2_fs.h>
 #include <linux/crc32.h>
 #include <linux/lm_interface.h>
+#include <linux/writeback.h>
 #include <asm/uaccess.h>
 
 #include "gfs2.h"
@@ -71,7 +72,7 @@
 		size = count;
 
 	kaddr = kmap(page);
-	memcpy(desc->arg.buf, kaddr + offset, size);
+	memcpy(desc->arg.data, kaddr + offset, size);
 	kunmap(page);
 
 	desc->count = count - size;
@@ -86,7 +87,7 @@
 	struct inode *inode = &ip->i_inode;
 	read_descriptor_t desc;
 	desc.written = 0;
-	desc.arg.buf = buf;
+	desc.arg.data = buf;
 	desc.count = size;
 	desc.error = 0;
 	do_generic_mapping_read(inode->i_mapping, ra_state,
@@ -139,7 +140,7 @@
  */
 
 static int filldir_func(void *opaque, const char *name, unsigned int length,
-			u64 offset, struct gfs2_inum *inum,
+			u64 offset, struct gfs2_inum_host *inum,
 			unsigned int type)
 {
 	struct filldir_reg *fdr = (struct filldir_reg *)opaque;
@@ -253,7 +254,7 @@
 	u32 fsflags;
 
 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
-	error = gfs2_glock_nq_m_atime(1, &gh);
+	error = gfs2_glock_nq_atime(&gh);
 	if (error)
 		return error;
 
@@ -266,6 +267,24 @@
 	return error;
 }
 
+void gfs2_set_inode_flags(struct inode *inode)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_dinode_host *di = &ip->i_di;
+	unsigned int flags = inode->i_flags;
+
+	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	if (di->di_flags & GFS2_DIF_IMMUTABLE)
+		flags |= S_IMMUTABLE;
+	if (di->di_flags & GFS2_DIF_APPENDONLY)
+		flags |= S_APPEND;
+	if (di->di_flags & GFS2_DIF_NOATIME)
+		flags |= S_NOATIME;
+	if (di->di_flags & GFS2_DIF_SYNC)
+		flags |= S_SYNC;
+	inode->i_flags = flags;
+}
+
 /* Flags that can be set by user space */
 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
 			     GFS2_DIF_DIRECTIO|			\
@@ -336,8 +355,9 @@
 		goto out_trans_end;
 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 	ip->i_di.di_flags = new_flags;
-	gfs2_dinode_out(&ip->i_di, bh->b_data);
+	gfs2_dinode_out(ip, bh->b_data);
 	brelse(bh);
+	gfs2_set_inode_flags(inode);
 out_trans_end:
 	gfs2_trans_end(sdp);
 out:
@@ -425,7 +445,7 @@
 	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
 	file->private_data = fp;
 
-	if (S_ISREG(ip->i_di.di_mode)) {
+	if (S_ISREG(ip->i_inode.i_mode)) {
 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 					   &i_gh);
 		if (error)
@@ -484,16 +504,40 @@
  * @file: the file that points to the dentry (we ignore this)
  * @dentry: the dentry that points to the inode to sync
  *
+ * The VFS will flush "normal" data for us. We only need to worry
+ * about metadata here. For journaled data, we just do a log flush
+ * as we can't avoid it. Otherwise we can just bale out if datasync
+ * is set. For stuffed inodes we must flush the log in order to
+ * ensure that all data is on disk.
+ *
+ * The call to write_inode_now() is there to write back metadata and
+ * the inode itself. It does also try and write the data, but thats
+ * (hopefully) a no-op due to the VFS having already called filemap_fdatawrite()
+ * for us.
+ *
  * Returns: errno
  */
 
 static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
 {
-	struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+	struct inode *inode = dentry->d_inode;
+	int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
+	int ret = 0;
 
-	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
+	if (gfs2_is_jdata(GFS2_I(inode))) {
+		gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
+		return 0;
+	}
 
-	return 0;
+	if (sync_state != 0) {
+		if (!datasync)
+			ret = write_inode_now(inode, 0);
+
+		if (gfs2_is_stuffed(GFS2_I(inode)))
+			gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
+	}
+
+	return ret;
 }
 
 /**
@@ -515,7 +559,7 @@
 
 	if (!(fl->fl_flags & FL_POSIX))
 		return -ENOLCK;
-	if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+	if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
 		return -ENOLCK;
 
 	if (sdp->sd_args.ar_localflocks) {
@@ -617,7 +661,7 @@
 
 	if (!(fl->fl_flags & FL_FLOCK))
 		return -ENOLCK;
-	if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+	if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
 		return -ENOLCK;
 
 	if (sdp->sd_args.ar_localflocks)
diff --git a/fs/gfs2/ops_file.h b/fs/gfs2/ops_file.h
index ce319f8..7e5d8ec 100644
--- a/fs/gfs2/ops_file.h
+++ b/fs/gfs2/ops_file.h
@@ -17,7 +17,7 @@
 extern int gfs2_internal_read(struct gfs2_inode *ip,
 			      struct file_ra_state *ra_state,
 			      char *buf, loff_t *pos, unsigned size);
-
+extern void gfs2_set_inode_flags(struct inode *inode);
 extern const struct file_operations gfs2_file_fops;
 extern const struct file_operations gfs2_dir_fops;
 
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 882873a..d14e139 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -237,7 +237,7 @@
 }
 
 static struct inode *gfs2_lookup_root(struct super_block *sb,
-				      struct gfs2_inum *inum)
+				      struct gfs2_inum_host *inum)
 {
 	return gfs2_inode_lookup(sb, inum, DT_DIR);
 }
@@ -246,7 +246,7 @@
 {
 	struct super_block *sb = sdp->sd_vfs;
 	struct gfs2_holder sb_gh;
-	struct gfs2_inum *inum;
+	struct gfs2_inum_host *inum;
 	struct inode *inode;
 	int error = 0;
 
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index ef6e5ed..636dda4 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -59,7 +59,7 @@
 	gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
 	for (;;) {
-		inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode);
+		inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode, 0);
 		if (!IS_ERR(inode)) {
 			gfs2_trans_end(sdp);
 			if (dip->i_alloc.al_rgd)
@@ -144,7 +144,7 @@
 	int alloc_required;
 	int error;
 
-	if (S_ISDIR(ip->i_di.di_mode))
+	if (S_ISDIR(inode->i_mode))
 		return -EPERM;
 
 	gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
@@ -169,7 +169,7 @@
 	}
 
 	error = -EINVAL;
-	if (!dip->i_di.di_nlink)
+	if (!dip->i_inode.i_nlink)
 		goto out_gunlock;
 	error = -EFBIG;
 	if (dip->i_di.di_entries == (u32)-1)
@@ -178,10 +178,10 @@
 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 		goto out_gunlock;
 	error = -EINVAL;
-	if (!ip->i_di.di_nlink)
+	if (!ip->i_inode.i_nlink)
 		goto out_gunlock;
 	error = -EMLINK;
-	if (ip->i_di.di_nlink == (u32)-1)
+	if (ip->i_inode.i_nlink == (u32)-1)
 		goto out_gunlock;
 
 	alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name);
@@ -196,8 +196,7 @@
 		if (error)
 			goto out_alloc;
 
-		error = gfs2_quota_check(dip, dip->i_di.di_uid,
-					 dip->i_di.di_gid);
+		error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
 		if (error)
 			goto out_gunlock_q;
 
@@ -220,7 +219,7 @@
 	}
 
 	error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num,
-			     IF2DT(ip->i_di.di_mode));
+			     IF2DT(inode->i_mode));
 	if (error)
 		goto out_end_trans;
 
@@ -326,7 +325,7 @@
 
 	gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
-	inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO);
+	inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO, 0);
 	if (IS_ERR(inode)) {
 		gfs2_holder_uninit(ghs);
 		return PTR_ERR(inode);
@@ -339,7 +338,7 @@
 	error = gfs2_meta_inode_buffer(ip, &dibh);
 
 	if (!gfs2_assert_withdraw(sdp, !error)) {
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname,
 		       size);
 		brelse(dibh);
@@ -379,7 +378,7 @@
 
 	gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
-	inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode);
+	inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0);
 	if (IS_ERR(inode)) {
 		gfs2_holder_uninit(ghs);
 		return PTR_ERR(inode);
@@ -387,10 +386,9 @@
 
 	ip = ghs[1].gh_gl->gl_object;
 
-	ip->i_di.di_nlink = 2;
+	ip->i_inode.i_nlink = 2;
 	ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
 	ip->i_di.di_flags |= GFS2_DIF_JDATA;
-	ip->i_di.di_payload_format = GFS2_FORMAT_DE;
 	ip->i_di.di_entries = 2;
 
 	error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -414,7 +412,7 @@
 		gfs2_inum_out(&dip->i_num, &dent->de_inum);
 		dent->de_type = cpu_to_be16(DT_DIR);
 
-		gfs2_dinode_out(&ip->i_di, di);
+		gfs2_dinode_out(ip, di);
 
 		brelse(dibh);
 	}
@@ -467,7 +465,7 @@
 
 	if (ip->i_di.di_entries < 2) {
 		if (gfs2_consist_inode(ip))
-			gfs2_dinode_print(&ip->i_di);
+			gfs2_dinode_print(ip);
 		error = -EIO;
 		goto out_gunlock;
 	}
@@ -504,47 +502,19 @@
 static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
 		      dev_t dev)
 {
-	struct gfs2_inode *dip = GFS2_I(dir), *ip;
+	struct gfs2_inode *dip = GFS2_I(dir);
 	struct gfs2_sbd *sdp = GFS2_SB(dir);
 	struct gfs2_holder ghs[2];
 	struct inode *inode;
-	struct buffer_head *dibh;
-	u32 major = 0, minor = 0;
-	int error;
-
-	switch (mode & S_IFMT) {
-	case S_IFBLK:
-	case S_IFCHR:
-		major = MAJOR(dev);
-		minor = MINOR(dev);
-		break;
-	case S_IFIFO:
-	case S_IFSOCK:
-		break;
-	default:
-		return -EOPNOTSUPP;
-	};
 
 	gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
-	inode = gfs2_createi(ghs, &dentry->d_name, mode);
+	inode = gfs2_createi(ghs, &dentry->d_name, mode, dev);
 	if (IS_ERR(inode)) {
 		gfs2_holder_uninit(ghs);
 		return PTR_ERR(inode);
 	}
 
-	ip = ghs[1].gh_gl->gl_object;
-
-	ip->i_di.di_major = major;
-	ip->i_di.di_minor = minor;
-
-	error = gfs2_meta_inode_buffer(ip, &dibh);
-
-	if (!gfs2_assert_withdraw(sdp, !error)) {
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
-		brelse(dibh);
-	}
-
 	gfs2_trans_end(sdp);
 	if (dip->i_alloc.al_rgd)
 		gfs2_inplace_release(dip);
@@ -592,11 +562,10 @@
 
 	/* Make sure we aren't trying to move a dirctory into it's subdir */
 
-	if (S_ISDIR(ip->i_di.di_mode) && odip != ndip) {
+	if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) {
 		dir_rename = 1;
 
-		error = gfs2_glock_nq_init(sdp->sd_rename_gl,
-					   LM_ST_EXCLUSIVE, 0,
+		error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0,
 					   &r_gh);
 		if (error)
 			goto out;
@@ -637,10 +606,10 @@
 		if (error)
 			goto out_gunlock;
 
-		if (S_ISDIR(nip->i_di.di_mode)) {
+		if (S_ISDIR(nip->i_inode.i_mode)) {
 			if (nip->i_di.di_entries < 2) {
 				if (gfs2_consist_inode(nip))
-					gfs2_dinode_print(&nip->i_di);
+					gfs2_dinode_print(nip);
 				error = -EIO;
 				goto out_gunlock;
 			}
@@ -666,7 +635,7 @@
 		};
 
 		if (odip != ndip) {
-			if (!ndip->i_di.di_nlink) {
+			if (!ndip->i_inode.i_nlink) {
 				error = -EINVAL;
 				goto out_gunlock;
 			}
@@ -674,8 +643,8 @@
 				error = -EFBIG;
 				goto out_gunlock;
 			}
-			if (S_ISDIR(ip->i_di.di_mode) &&
-			    ndip->i_di.di_nlink == (u32)-1) {
+			if (S_ISDIR(ip->i_inode.i_mode) &&
+			    ndip->i_inode.i_nlink == (u32)-1) {
 				error = -EMLINK;
 				goto out_gunlock;
 			}
@@ -702,8 +671,7 @@
 		if (error)
 			goto out_alloc;
 
-		error = gfs2_quota_check(ndip, ndip->i_di.di_uid,
-					 ndip->i_di.di_gid);
+		error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid);
 		if (error)
 			goto out_gunlock_q;
 
@@ -729,7 +697,7 @@
 	/* Remove the target file, if it exists */
 
 	if (nip) {
-		if (S_ISDIR(nip->i_di.di_mode))
+		if (S_ISDIR(nip->i_inode.i_mode))
 			error = gfs2_rmdiri(ndip, &ndentry->d_name, nip);
 		else {
 			error = gfs2_dir_del(ndip, &ndentry->d_name);
@@ -760,9 +728,9 @@
 		error = gfs2_meta_inode_buffer(ip, &dibh);
 		if (error)
 			goto out_end_trans;
-		ip->i_di.di_ctime = get_seconds();
+		ip->i_inode.i_ctime.tv_sec = get_seconds();
 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-		gfs2_dinode_out(&ip->i_di, dibh->b_data);
+		gfs2_dinode_out(ip, dibh->b_data);
 		brelse(dibh);
 	}
 
@@ -771,7 +739,7 @@
 		goto out_end_trans;
 
 	error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num,
-			     IF2DT(ip->i_di.di_mode));
+			     IF2DT(ip->i_inode.i_mode));
 	if (error)
 		goto out_end_trans;
 
@@ -867,6 +835,10 @@
  * @mask:
  * @nd: passed from Linux VFS, ignored by us
  *
+ * This may be called from the VFS directly, or from within GFS2 with the
+ * inode locked, so we look to see if the glock is already locked and only
+ * lock the glock if its not already been done.
+ *
  * Returns: errno
  */
 
@@ -875,16 +847,19 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_holder i_gh;
 	int error;
+	int unlock = 0;
 
-	if (ip->i_vn == ip->i_gl->gl_vn)
-		return generic_permission(inode, mask, gfs2_check_acl);
-
-	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
-	if (!error) {
-		error = generic_permission(inode, mask, gfs2_check_acl_locked);
-		gfs2_glock_dq_uninit(&i_gh);
+	if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
+		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+		if (error)
+			return error;
+		unlock = 1;
 	}
 
+	error = generic_permission(inode, mask, gfs2_check_acl);
+	if (unlock)
+		gfs2_glock_dq_uninit(&i_gh);
+
 	return error;
 }
 
@@ -914,8 +889,8 @@
 	u32 ouid, ogid, nuid, ngid;
 	int error;
 
-	ouid = ip->i_di.di_uid;
-	ogid = ip->i_di.di_gid;
+	ouid = inode->i_uid;
+	ogid = inode->i_gid;
 	nuid = attr->ia_uid;
 	ngid = attr->ia_gid;
 
@@ -946,10 +921,9 @@
 
 	error = inode_setattr(inode, attr);
 	gfs2_assert_warn(sdp, !error);
-	gfs2_inode_attr_out(ip);
 
 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-	gfs2_dinode_out(&ip->i_di, dibh->b_data);
+	gfs2_dinode_out(ip, dibh->b_data);
 	brelse(dibh);
 
 	if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
@@ -1018,6 +992,12 @@
  * @dentry: The dentry to stat
  * @stat: The inode's stats
  *
+ * This may be called from the VFS directly, or from within GFS2 with the
+ * inode locked, so we look to see if the glock is already locked and only
+ * lock the glock if its not already been done. Note that its the NFS
+ * readdirplus operation which causes this to be called (from filldir)
+ * with the glock already held.
+ *
  * Returns: errno
  */
 
@@ -1028,14 +1008,20 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_holder gh;
 	int error;
+	int unlock = 0;
 
-	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
-	if (!error) {
-		generic_fillattr(inode, stat);
-		gfs2_glock_dq_uninit(&gh);
+	if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
+		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
+		if (error)
+			return error;
+		unlock = 1;
 	}
 
-	return error;
+	generic_fillattr(inode, stat);
+	if (unlock);
+		gfs2_glock_dq_uninit(&gh);
+
+	return 0;
 }
 
 static int gfs2_setxattr(struct dentry *dentry, const char *name,
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index b47d959..7685b46 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -157,7 +157,8 @@
 static int gfs2_sync_fs(struct super_block *sb, int wait)
 {
 	sb->s_dirt = 0;
-	gfs2_log_flush(sb->s_fs_info, NULL);
+	if (wait)
+		gfs2_log_flush(sb->s_fs_info, NULL);
 	return 0;
 }
 
@@ -215,7 +216,7 @@
 {
 	struct super_block *sb = dentry->d_inode->i_sb;
 	struct gfs2_sbd *sdp = sb->s_fs_info;
-	struct gfs2_statfs_change sc;
+	struct gfs2_statfs_change_host sc;
 	int error;
 
 	if (gfs2_tune_get(sdp, gt_statfs_slow))
@@ -293,8 +294,6 @@
 	 */
 	if (inode->i_private) {
 		struct gfs2_inode *ip = GFS2_I(inode);
-		gfs2_glock_inode_squish(inode);
-		gfs2_assert(inode->i_sb->s_fs_info, ip->i_gl->gl_state == LM_ST_UNLOCKED);
 		ip->i_gl->gl_object = NULL;
 		gfs2_glock_schedule_for_reclaim(ip->i_gl);
 		gfs2_glock_put(ip->i_gl);
@@ -395,7 +394,7 @@
 	if (!inode->i_private)
 		goto out;
 
-	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &gh);
+	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &gh);
 	if (unlikely(error)) {
 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 		goto out;
@@ -407,7 +406,7 @@
 	if (error)
 		goto out_uninit;
 
-	if (S_ISDIR(ip->i_di.di_mode) &&
+	if (S_ISDIR(inode->i_mode) &&
 	    (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
 		error = gfs2_dir_exhash_dealloc(ip);
 		if (error)
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 5453d29..45a5f11 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -76,7 +76,7 @@
 	if (error)
 		goto out;
 
-	error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+	error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
 	if (error)
 		goto out_gunlock_q;
 
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a3deae7..d0db881 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -452,19 +452,19 @@
 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 		return 0;
 
-	error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
+	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
 	if (error)
 		goto out;
 	al->al_qd_num++;
 	qd++;
 
-	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
+	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
 	if (error)
 		goto out;
 	al->al_qd_num++;
 	qd++;
 
-	if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
+	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
 		error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
 		if (error)
 			goto out;
@@ -472,7 +472,7 @@
 		qd++;
 	}
 
-	if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
+	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
 		error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
 		if (error)
 			goto out;
@@ -539,8 +539,7 @@
 		qc->qc_id = cpu_to_be32(qd->qd_id);
 	}
 
-	x = qc->qc_change;
-	x = be64_to_cpu(x) + change;
+	x = be64_to_cpu(qc->qc_change) + change;
 	qc->qc_change = cpu_to_be64(x);
 
 	spin_lock(&sdp->sd_quota_spin);
@@ -743,7 +742,7 @@
 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 	struct gfs2_holder i_gh;
-	struct gfs2_quota q;
+	struct gfs2_quota_host q;
 	char buf[sizeof(struct gfs2_quota)];
 	struct file_ra_state ra_state;
 	int error;
@@ -1103,7 +1102,7 @@
 
 		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
 		     y++, slot++) {
-			struct gfs2_quota_change qc;
+			struct gfs2_quota_change_host qc;
 			struct gfs2_quota_data *qd;
 
 			gfs2_quota_change_in(&qc, bh->b_data +
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 62cd223..d0c806b 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -132,10 +132,11 @@
  */
 
 static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
-			  struct gfs2_log_header *head)
+			  struct gfs2_log_header_host *head)
 {
 	struct buffer_head *bh;
-	struct gfs2_log_header lh;
+	struct gfs2_log_header_host lh;
+	const u32 nothing = 0;
 	u32 hash;
 	int error;
 
@@ -143,11 +144,11 @@
 	if (error)
 		return error;
 
-	memcpy(&lh, bh->b_data, sizeof(struct gfs2_log_header));
-	lh.lh_hash = 0;
-	hash = gfs2_disk_hash((char *)&lh, sizeof(struct gfs2_log_header));
+	hash = crc32_le((u32)~0, bh->b_data, sizeof(struct gfs2_log_header) -
+					     sizeof(u32));
+	hash = crc32_le(hash, (unsigned char const *)&nothing, sizeof(nothing));
+	hash ^= (u32)~0;
 	gfs2_log_header_in(&lh, bh->b_data);
-
 	brelse(bh);
 
 	if (lh.lh_header.mh_magic != GFS2_MAGIC ||
@@ -174,7 +175,7 @@
  */
 
 static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
-			struct gfs2_log_header *head)
+			struct gfs2_log_header_host *head)
 {
 	unsigned int orig_blk = *blk;
 	int error;
@@ -205,10 +206,10 @@
  * Returns: errno
  */
 
-static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
+static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
 {
 	unsigned int blk = head->lh_blkno;
-	struct gfs2_log_header lh;
+	struct gfs2_log_header_host lh;
 	int error;
 
 	for (;;) {
@@ -245,9 +246,9 @@
  * Returns: errno
  */
 
-int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
+int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
 {
-	struct gfs2_log_header lh_1, lh_m;
+	struct gfs2_log_header_host lh_1, lh_m;
 	u32 blk_1, blk_2, blk_m;
 	int error;
 
@@ -320,7 +321,7 @@
 		length = be32_to_cpu(ld->ld_length);
 
 		if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) {
-			struct gfs2_log_header lh;
+			struct gfs2_log_header_host lh;
 			error = get_log_header(jd, start, &lh);
 			if (!error) {
 				gfs2_replay_incr_blk(sdp, &start);
@@ -363,7 +364,7 @@
  * Returns: errno
  */
 
-static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
+static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
 {
 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
@@ -425,7 +426,7 @@
 {
 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
-	struct gfs2_log_header head;
+	struct gfs2_log_header_host head;
 	struct gfs2_holder j_gh, ji_gh, t_gh;
 	unsigned long t;
 	int ro = 0;
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 961feed..f7235e6 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -26,7 +26,7 @@
 void gfs2_revoke_clean(struct gfs2_sbd *sdp);
 
 int gfs2_find_jhead(struct gfs2_jdesc *jd,
-		    struct gfs2_log_header *head);
+		    struct gfs2_log_header_host *head);
 int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
 void gfs2_check_journals(struct gfs2_sbd *sdp);
 
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index b261385..ff08465 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -253,7 +253,7 @@
 
 }
 
-static inline int rgrp_contains_block(struct gfs2_rindex *ri, u64 block)
+static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block)
 {
 	u64 first = ri->ri_data0;
 	u64 last = first + ri->ri_data;
@@ -1217,7 +1217,7 @@
 	al->al_alloced++;
 
 	gfs2_statfs_change(sdp, 0, -1, 0);
-	gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
+	gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
 
 	spin_lock(&sdp->sd_rindex_spin);
 	rgd->rd_free_clone--;
@@ -1261,7 +1261,7 @@
 	al->al_alloced++;
 
 	gfs2_statfs_change(sdp, 0, -1, 0);
-	gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
+	gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
 	gfs2_trans_add_unrevoke(sdp, block);
 
 	spin_lock(&sdp->sd_rindex_spin);
@@ -1337,8 +1337,7 @@
 	gfs2_trans_add_rg(rgd);
 
 	gfs2_statfs_change(sdp, 0, +blen, 0);
-	gfs2_quota_change(ip, -(s64)blen,
-			 ip->i_di.di_uid, ip->i_di.di_gid);
+	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
 }
 
 /**
@@ -1366,7 +1365,7 @@
 	gfs2_trans_add_rg(rgd);
 
 	gfs2_statfs_change(sdp, 0, +blen, 0);
-	gfs2_quota_change(ip, -(s64)blen, ip->i_di.di_uid, ip->i_di.di_gid);
+	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
 	gfs2_meta_wipe(ip, bstart, blen);
 }
 
@@ -1411,7 +1410,7 @@
 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
 {
 	gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
-	gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
+	gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
 	gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
 }
 
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6a78b1b..43a24f2 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -97,7 +97,7 @@
  * changed.
  */
 
-int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent)
+int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
 {
 	unsigned int x;
 
@@ -180,6 +180,24 @@
 	return 0;
 }
 
+/**
+ * gfs2_read_super - Read the gfs2 super block from disk
+ * @sb: The VFS super block
+ * @sector: The location of the super block
+ *
+ * This uses the bio functions to read the super block from disk
+ * because we want to be 100% sure that we never read cached data.
+ * A super block is read twice only during each GFS2 mount and is
+ * never written to by the filesystem. The first time its read no
+ * locks are held, and the only details which are looked at are those
+ * relating to the locking protocol. Once locking is up and working,
+ * the sb is read again under the lock to establish the location of
+ * the master directory (contains pointers to journals etc) and the
+ * root directory.
+ *
+ * Returns: A page containing the sb or NULL
+ */
+
 struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
 {
 	struct page *page;
@@ -199,7 +217,7 @@
 		return NULL;
 	}
 
-	bio->bi_sector = sector;
+	bio->bi_sector = sector * (sb->s_blocksize >> 9);
 	bio->bi_bdev = sb->s_bdev;
 	bio_add_page(bio, page, PAGE_SIZE, 0);
 
@@ -508,7 +526,7 @@
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 	struct gfs2_glock *j_gl = ip->i_gl;
 	struct gfs2_holder t_gh;
-	struct gfs2_log_header head;
+	struct gfs2_log_header_host head;
 	int error;
 
 	error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
@@ -517,7 +535,7 @@
 		return error;
 
 	gfs2_meta_cache_flush(ip);
-	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
+	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
 
 	error = gfs2_find_jhead(sdp->sd_jdesc, &head);
 	if (error)
@@ -587,9 +605,9 @@
 int gfs2_statfs_init(struct gfs2_sbd *sdp)
 {
 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
-	struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
+	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
-	struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 	struct buffer_head *m_bh, *l_bh;
 	struct gfs2_holder gh;
 	int error;
@@ -634,7 +652,7 @@
 			s64 dinodes)
 {
 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
-	struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 	struct buffer_head *l_bh;
 	int error;
 
@@ -660,8 +678,8 @@
 {
 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
-	struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
-	struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
+	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 	struct gfs2_holder gh;
 	struct buffer_head *m_bh, *l_bh;
 	int error;
@@ -727,10 +745,10 @@
  * Returns: errno
  */
 
-int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
+int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
 {
-	struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
-	struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
+	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 
 	spin_lock(&sdp->sd_statfs_spin);
 
@@ -760,7 +778,7 @@
  */
 
 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
-			    struct gfs2_statfs_change *sc)
+			    struct gfs2_statfs_change_host *sc)
 {
 	gfs2_rgrp_verify(rgd);
 	sc->sc_total += rgd->rd_ri.ri_data;
@@ -782,7 +800,7 @@
  * Returns: errno
  */
 
-int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
+int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
 {
 	struct gfs2_holder ri_gh;
 	struct gfs2_rgrpd *rgd_next;
@@ -792,7 +810,7 @@
 	int done;
 	int error = 0, err;
 
-	memset(sc, 0, sizeof(struct gfs2_statfs_change));
+	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
 	gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
 	if (!gha)
 		return -ENOMEM;
@@ -873,7 +891,7 @@
 	struct gfs2_jdesc *jd;
 	struct lfcc *lfcc;
 	LIST_HEAD(list);
-	struct gfs2_log_header lh;
+	struct gfs2_log_header_host lh;
 	int error;
 
 	error = gfs2_jindex_hold(sdp, &ji_gh);
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 5bb443a..e590b2d 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -14,7 +14,7 @@
 
 void gfs2_tune_init(struct gfs2_tune *gt);
 
-int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent);
+int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent);
 int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
 struct page *gfs2_read_super(struct super_block *sb, sector_t sector);
 
@@ -45,8 +45,8 @@
 void gfs2_statfs_change(struct gfs2_sbd *sdp,
 			s64 total, s64 free, s64 dinodes);
 int gfs2_statfs_sync(struct gfs2_sbd *sdp);
-int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc);
-int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc);
+int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
+int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
 
 int gfs2_freeze_fs(struct gfs2_sbd *sdp);
 void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0e0ec98..983eaf1 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -426,9 +426,6 @@
 }                                                                             \
 TUNE_ATTR_2(name, name##_store)
 
-TUNE_ATTR(ilimit, 0);
-TUNE_ATTR(ilimit_tries, 0);
-TUNE_ATTR(ilimit_min, 0);
 TUNE_ATTR(demote_secs, 0);
 TUNE_ATTR(incore_log_blocks, 0);
 TUNE_ATTR(log_flush_secs, 0);
@@ -447,7 +444,6 @@
 TUNE_ATTR(quota_cache_secs, 1);
 TUNE_ATTR(max_atomic_write, 1);
 TUNE_ATTR(stall_secs, 1);
-TUNE_ATTR(entries_per_readdir, 1);
 TUNE_ATTR(greedy_default, 1);
 TUNE_ATTR(greedy_quantum, 1);
 TUNE_ATTR(greedy_max, 1);
@@ -459,9 +455,6 @@
 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
 
 static struct attribute *tune_attrs[] = {
-	&tune_attr_ilimit.attr,
-	&tune_attr_ilimit_tries.attr,
-	&tune_attr_ilimit_min.attr,
 	&tune_attr_demote_secs.attr,
 	&tune_attr_incore_log_blocks.attr,
 	&tune_attr_log_flush_secs.attr,
@@ -478,7 +471,6 @@
 	&tune_attr_quota_cache_secs.attr,
 	&tune_attr_max_atomic_write.attr,
 	&tune_attr_stall_secs.attr,
-	&tune_attr_entries_per_readdir.attr,
 	&tune_attr_greedy_default.attr,
 	&tune_attr_greedy_quantum.attr,
 	&tune_attr_greedy_max.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 196c604..e5707a9 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -23,9 +23,9 @@
 #include "lm.h"
 #include "util.h"
 
-kmem_cache_t *gfs2_glock_cachep __read_mostly;
-kmem_cache_t *gfs2_inode_cachep __read_mostly;
-kmem_cache_t *gfs2_bufdata_cachep __read_mostly;
+struct kmem_cache *gfs2_glock_cachep __read_mostly;
+struct kmem_cache *gfs2_inode_cachep __read_mostly;
+struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
 
 void gfs2_assert_i(struct gfs2_sbd *sdp)
 {
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 76a5089..28938a4 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -83,8 +83,7 @@
 				    char *file, unsigned int line)
 {
 	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
-	u32 magic = mh->mh_magic;
-	magic = be32_to_cpu(magic);
+	u32 magic = be32_to_cpu(mh->mh_magic);
 	if (unlikely(magic != GFS2_MAGIC))
 		return gfs2_meta_check_ii(sdp, bh, "magic number", function,
 					  file, line);
@@ -107,9 +106,8 @@
 					char *file, unsigned int line)
 {
 	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
-	u32 magic = mh->mh_magic;
+	u32 magic = be32_to_cpu(mh->mh_magic);
 	u16 t = be32_to_cpu(mh->mh_type);
-	magic = be32_to_cpu(magic);
 	if (unlikely(magic != GFS2_MAGIC))
 		return gfs2_meta_check_ii(sdp, bh, "magic number", function,
 					  file, line);
@@ -146,9 +144,9 @@
 gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
 
 
-extern kmem_cache_t *gfs2_glock_cachep;
-extern kmem_cache_t *gfs2_inode_cachep;
-extern kmem_cache_t *gfs2_bufdata_cachep;
+extern struct kmem_cache *gfs2_glock_cachep;
+extern struct kmem_cache *gfs2_inode_cachep;
+extern struct kmem_cache *gfs2_bufdata_cachep;
 
 static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
 					   unsigned int *p)
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 85b17b3..a369879 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -24,7 +24,7 @@
 #include "hfs_fs.h"
 #include "btree.h"
 
-static kmem_cache_t *hfs_inode_cachep;
+static struct kmem_cache *hfs_inode_cachep;
 
 MODULE_LICENSE("GPL");
 
@@ -145,7 +145,7 @@
 {
 	struct hfs_inode_info *i;
 
-	i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL);
+	i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
 	return i ? &i->vfs_inode : NULL;
 }
 
@@ -430,7 +430,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct hfs_inode_info *i = p;
 
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 194eede..0f513c6 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -434,13 +434,13 @@
 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t *hfsplus_inode_cachep;
+static struct kmem_cache *hfsplus_inode_cachep;
 
 static struct inode *hfsplus_alloc_inode(struct super_block *sb)
 {
 	struct hfsplus_inode_info *i;
 
-	i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL);
+	i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
 	return i ? &i->vfs_inode : NULL;
 }
 
@@ -467,7 +467,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct hfsplus_inode_info *i = p;
 
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index ecc9180..594f9c4 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -84,7 +84,8 @@
 		}
 		if (!fno->dirflag) {
 			e = 1;
-			hpfs_error(inode->i_sb, "not a directory, fnode %08x",inode->i_ino);
+			hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
+					(unsigned long)inode->i_ino);
 		}
 		if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) {
 			e = 1;
@@ -144,8 +145,11 @@
 		}
 		if (de->first || de->last) {
 			if (hpfs_sb(inode->i_sb)->sb_chk) {
-				if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08x", old_pos);
-				if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08x", old_pos);
+				if (de->first && !de->last && (de->namelen != 2
+				    || de ->name[0] != 1 || de->name[1] != 1))
+					hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos);
+				if (de->last && (de->namelen != 1 || de ->name[0] != 255))
+					hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos);
 			}
 			hpfs_brelse4(&qbh);
 			goto again;
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 229ff2f..fe83c2b 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -533,10 +533,13 @@
 			struct buffer_head *bh;
 			struct dnode *d1;
 			struct quad_buffer_head qbh1;
-			if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) {
-				hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08x", dno, up, i->i_ino);
+			if (hpfs_sb(i->i_sb)->sb_chk)
+			    if (up != i->i_ino) {
+				hpfs_error(i->i_sb,
+					"bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx",
+					dno, up, (unsigned long)i->i_ino);
 				return;
-			}
+			    }
 			if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
 				d1->up = up;
 				d1->root_dnode = 1;
@@ -851,7 +854,9 @@
 	/* Going to the next dirent */
 	if ((d = de_next_de(de)) < dnode_end_de(dnode)) {
 		if (!(++*posp & 077)) {
-			hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08x", *posp);
+			hpfs_error(inode->i_sb,
+				"map_pos_dirent: pos crossed dnode boundary; pos = %08llx",
+				(unsigned long long)*posp);
 			goto bail;
 		}
 		/* We're going down the tree */
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 66339dc..547a838 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -243,8 +243,9 @@
 		fnode->ea_offs = 0xc4;
 	}
 	if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) {
-		hpfs_error(s, "fnode %08x: ea_offs == %03x, ea_size_s == %03x",
-			inode->i_ino, fnode->ea_offs, fnode->ea_size_s);
+		hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
+			(unsigned long)inode->i_ino,
+			fnode->ea_offs, fnode->ea_size_s);
 		return;
 	}
 	if ((fnode->ea_size_s || !fnode->ea_size_l) &&
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 32ab51e..1c07aa8 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -317,7 +317,8 @@
 
 /* super.c */
 
-void hpfs_error(struct super_block *, char *, ...);
+void hpfs_error(struct super_block *, const char *, ...)
+	__attribute__((format (printf, 2, 3)));
 int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
 unsigned hpfs_count_one_bitmap(struct super_block *, secno);
 
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 7faef85..85d3e1d 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -251,7 +251,10 @@
 			de->file_size = 0;
 			hpfs_mark_4buffers_dirty(&qbh);
 			hpfs_brelse4(&qbh);
-		} else hpfs_error(i->i_sb, "directory %08x doesn't have '.' entry", i->i_ino);
+		} else
+			hpfs_error(i->i_sb,
+				"directory %08lx doesn't have '.' entry",
+				(unsigned long)i->i_ino);
 	}
 	mark_buffer_dirty(bh);
 	brelse(bh);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 0fecdac..c472458 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -126,32 +126,40 @@
 			struct extended_attribute *ea;
 			struct extended_attribute *ea_end;
 			if (fnode->magic != FNODE_MAGIC) {
-				hpfs_error(s, "bad magic on fnode %08x", ino);
+				hpfs_error(s, "bad magic on fnode %08lx",
+					(unsigned long)ino);
 				goto bail;
 			}
 			if (!fnode->dirflag) {
 				if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
 				    (fnode->btree.internal ? 12 : 8)) {
-					hpfs_error(s, "bad number of nodes in fnode %08x", ino);
+					hpfs_error(s,
+					   "bad number of nodes in fnode %08lx",
+					    (unsigned long)ino);
 					goto bail;
 				}
 				if (fnode->btree.first_free !=
 				    8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
-					hpfs_error(s, "bad first_free pointer in fnode %08x", ino);
+					hpfs_error(s,
+					    "bad first_free pointer in fnode %08lx",
+					    (unsigned long)ino);
 					goto bail;
 				}
 			}
 			if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 ||
 			   (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) {
-				hpfs_error(s, "bad EA info in fnode %08x: ea_offs == %04x ea_size_s == %04x",
-					ino, fnode->ea_offs, fnode->ea_size_s);
+				hpfs_error(s,
+					"bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
+					(unsigned long)ino,
+					fnode->ea_offs, fnode->ea_size_s);
 				goto bail;
 			}
 			ea = fnode_ea(fnode);
 			ea_end = fnode_end_ea(fnode);
 			while (ea != ea_end) {
 				if (ea > ea_end) {
-					hpfs_error(s, "bad EA in fnode %08x", ino);
+					hpfs_error(s, "bad EA in fnode %08lx",
+						(unsigned long)ino);
 					goto bail;
 				}
 				ea = next_ea(ea);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 450b5e0..d4abc1a 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -46,21 +46,17 @@
 }
 
 /* Filesystem error... */
+static char err_buf[1024];
 
-#define ERR_BUF_SIZE 1024
-
-void hpfs_error(struct super_block *s, char *m,...)
+void hpfs_error(struct super_block *s, const char *fmt, ...)
 {
-	char *buf;
-	va_list l;
-	va_start(l, m);
-	if (!(buf = kmalloc(ERR_BUF_SIZE, GFP_KERNEL)))
-		printk("HPFS: No memory for error message '%s'\n",m);
-	else if (vsprintf(buf, m, l) >= ERR_BUF_SIZE)
-		printk("HPFS: Grrrr... Kernel memory corrupted ... going on, but it'll crash very soon :-(\n");
-	printk("HPFS: filesystem error: ");
-	if (buf) printk("%s", buf);
-	else printk("%s\n",m);
+	va_list args;
+
+	va_start(args, fmt);
+	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+	va_end(args);
+
+	printk("HPFS: filesystem error: %s", err_buf);
 	if (!hpfs_sb(s)->sb_was_error) {
 		if (hpfs_sb(s)->sb_err == 2) {
 			printk("; crashing the system because you wanted it\n");
@@ -76,7 +72,6 @@
 		} else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
 		else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n");
 	} else printk("\n");
-	kfree(buf);
 	hpfs_sb(s)->sb_was_error = 1;
 }
 
@@ -160,12 +155,12 @@
 	return 0;
 }
 
-static kmem_cache_t * hpfs_inode_cachep;
+static struct kmem_cache * hpfs_inode_cachep;
 
 static struct inode *hpfs_alloc_inode(struct super_block *sb)
 {
 	struct hpfs_inode_info *ei;
-	ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, SLAB_NOFS);
+	ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
 	if (!ei)
 		return NULL;
 	ei->vfs_inode.i_version = 1;
@@ -177,7 +172,7 @@
 	kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
 
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7f47569..0706f5a 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -513,7 +513,7 @@
 }
 
 
-static kmem_cache_t *hugetlbfs_inode_cachep;
+static struct kmem_cache *hugetlbfs_inode_cachep;
 
 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
 {
@@ -522,7 +522,7 @@
 
 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
 		return NULL;
-	p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL);
+	p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
 	if (unlikely(!p)) {
 		hugetlbfs_inc_free_inodes(sbinfo);
 		return NULL;
@@ -545,7 +545,7 @@
 };
 
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
 
diff --git a/fs/inode.c b/fs/inode.c
index 26cdb11..9ecccab 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -97,7 +97,7 @@
  */
 struct inodes_stat_t inodes_stat;
 
-static kmem_cache_t * inode_cachep __read_mostly;
+static struct kmem_cache * inode_cachep __read_mostly;
 
 static struct inode *alloc_inode(struct super_block *sb)
 {
@@ -109,7 +109,7 @@
 	if (sb->s_op->alloc_inode)
 		inode = sb->s_op->alloc_inode(sb);
 	else
-		inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
+		inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
 
 	if (inode) {
 		struct address_space * const mapping = &inode->i_data;
@@ -209,7 +209,7 @@
 
 EXPORT_SYMBOL(inode_init_once);
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct inode * inode = (struct inode *) foo;
 
@@ -1242,9 +1242,6 @@
  */
 #ifdef CONFIG_QUOTA
 
-/* Function back in dquot.c */
-int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
-
 void remove_dquot_ref(struct super_block *sb, int type,
 			struct list_head *tofree_head)
 {
diff --git a/fs/inotify.c b/fs/inotify.c
index 723836a..f5099d8 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -27,6 +27,7 @@
 #include <linux/idr.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/writeback.h>
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 017cb0f..e1956e6 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -34,8 +34,8 @@
 
 #include <asm/ioctls.h>
 
-static kmem_cache_t *watch_cachep __read_mostly;
-static kmem_cache_t *event_cachep __read_mostly;
+static struct kmem_cache *watch_cachep __read_mostly;
+static struct kmem_cache *event_cachep __read_mostly;
 
 static struct vfsmount *inotify_mnt __read_mostly;
 
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index c34b862..ea55b6c 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -57,12 +57,12 @@
 static void isofs_read_inode(struct inode *);
 static int isofs_statfs (struct dentry *, struct kstatfs *);
 
-static kmem_cache_t *isofs_inode_cachep;
+static struct kmem_cache *isofs_inode_cachep;
 
 static struct inode *isofs_alloc_inode(struct super_block *sb)
 {
 	struct iso_inode_info *ei;
-	ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -73,7 +73,7 @@
 	kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct iso_inode_info *ei = foo;
 
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index b85c686..10fff94 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -31,7 +31,7 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/poison.h>
@@ -1630,7 +1630,7 @@
 #define JBD_MAX_SLABS 5
 #define JBD_SLAB_INDEX(size)  (size >> 11)
 
-static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
 static const char *jbd_slab_names[JBD_MAX_SLABS] = {
 	"jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
 };
@@ -1693,7 +1693,7 @@
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *journal_head_cache;
+static struct kmem_cache *journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -1996,7 +1996,7 @@
 
 #endif
 
-kmem_cache_t *jbd_handle_cache;
+struct kmem_cache *jbd_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index c532429..d204ab3 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -70,8 +70,8 @@
 #include <linux/init.h>
 #endif
 
-static kmem_cache_t *revoke_record_cache;
-static kmem_cache_t *revoke_table_cache;
+static struct kmem_cache *revoke_record_cache;
+static struct kmem_cache *revoke_table_cache;
 
 /* Each revoke record represents one single revoked block.  During
    journal replay, this involves recording the transaction ID of the
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 4f82bcd6..d38e0d5 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -27,6 +27,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 
+static void __journal_temp_unlink_buffer(struct journal_head *jh);
+
 /*
  * get_transaction: obtain a new transaction_t object.
  *
@@ -1499,7 +1501,7 @@
  *
  * Called under j_list_lock.  The journal may not be locked.
  */
-void __journal_temp_unlink_buffer(struct journal_head *jh)
+static void __journal_temp_unlink_buffer(struct journal_head *jh)
 {
 	struct journal_head **list = NULL;
 	transaction_t *transaction;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 70b2ae1..6bd8005 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -248,8 +248,12 @@
 				bufs = 0;
 				goto write_out_data;
 			}
-		}
-		else {
+		} else if (!locked && buffer_locked(bh)) {
+			__jbd2_journal_file_buffer(jh, commit_transaction,
+						BJ_Locked);
+			jbd_unlock_bh_state(bh);
+			put_bh(bh);
+		} else {
 			BUFFER_TRACE(bh, "writeout complete: unfile");
 			__jbd2_journal_unfile_buffer(jh);
 			jbd_unlock_bh_state(bh);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c60f378..44fc32b 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -31,7 +31,7 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/poison.h>
@@ -1641,7 +1641,7 @@
 #define JBD_MAX_SLABS 5
 #define JBD_SLAB_INDEX(size)  (size >> 11)
 
-static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
 static const char *jbd_slab_names[JBD_MAX_SLABS] = {
 	"jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
 };
@@ -1704,7 +1704,7 @@
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *jbd2_journal_head_cache;
+static struct kmem_cache *jbd2_journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -2007,7 +2007,7 @@
 
 #endif
 
-kmem_cache_t *jbd2_handle_cache;
+struct kmem_cache *jbd2_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 380d199..f506646 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -70,8 +70,8 @@
 #include <linux/init.h>
 #endif
 
-static kmem_cache_t *jbd2_revoke_record_cache;
-static kmem_cache_t *jbd2_revoke_table_cache;
+static struct kmem_cache *jbd2_revoke_record_cache;
+static struct kmem_cache *jbd2_revoke_table_cache;
 
 /* Each revoke record represents one single revoked block.  During
    journal replay, this involves recording the transaction ID of the
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index c051a94..3a87001 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -27,6 +27,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 
+static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+
 /*
  * jbd2_get_transaction: obtain a new transaction_t object.
  *
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 3f7899e..9f15bce 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -61,8 +61,8 @@
 static struct inode_operations jffs_dir_inode_operations;
 static const struct address_space_operations jffs_address_operations;
 
-kmem_cache_t     *node_cache = NULL;
-kmem_cache_t     *fm_cache = NULL;
+struct kmem_cache     *node_cache = NULL;
+struct kmem_cache     *fm_cache = NULL;
 
 /* Called by the VFS at mount time to initialize the whole file system.  */
 static int jffs_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 4a543e1..d0e783f 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -66,6 +66,7 @@
 #include <linux/smp_lock.h>
 #include <linux/time.h>
 #include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include "intrep.h"
 #include "jffs_fm.h"
@@ -591,7 +592,7 @@
 	D2(printk("jffs_add_virtual_root(): "
 		  "Creating a virtual root directory.\n"));
 
-	if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
+	if (!(root = kzalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
 		return -ENOMEM;
 	}
 	no_jffs_file++;
@@ -603,7 +604,6 @@
 	DJM(no_jffs_node++);
 	memset(node, 0, sizeof(struct jffs_node));
 	node->ino = JFFS_MIN_INO;
-	memset(root, 0, sizeof(struct jffs_file));
 	root->ino = JFFS_MIN_INO;
 	root->mode = S_IFDIR | S_IRWXU | S_IRGRP
 		     | S_IXGRP | S_IROTH | S_IXOTH;
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 29b68d9..077258b 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -29,8 +29,8 @@
 static struct jffs_fm *jffs_alloc_fm(void);
 static void jffs_free_fm(struct jffs_fm *n);
 
-extern kmem_cache_t     *fm_cache;
-extern kmem_cache_t     *node_cache;
+extern struct kmem_cache     *fm_cache;
+extern struct kmem_cache     *node_cache;
 
 #if CONFIG_JFFS_FS_VERBOSE > 0
 void
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 0ae3cd1..73f0d60 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
+#include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/crc32.h>
 #include <linux/jffs2.h>
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index ff2a872..6eb3dae 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -16,6 +16,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/completion.h>
 #include <linux/sched.h>
+#include <linux/freezer.h>
 #include "nodelist.h"
 
 
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 33f2910..83f9881 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -19,16 +19,16 @@
 
 /* These are initialised to NULL in the kernel startup code.
    If you're porting to other operating systems, beware */
-static kmem_cache_t *full_dnode_slab;
-static kmem_cache_t *raw_dirent_slab;
-static kmem_cache_t *raw_inode_slab;
-static kmem_cache_t *tmp_dnode_info_slab;
-static kmem_cache_t *raw_node_ref_slab;
-static kmem_cache_t *node_frag_slab;
-static kmem_cache_t *inode_cache_slab;
+static struct kmem_cache *full_dnode_slab;
+static struct kmem_cache *raw_dirent_slab;
+static struct kmem_cache *raw_inode_slab;
+static struct kmem_cache *tmp_dnode_info_slab;
+static struct kmem_cache *raw_node_ref_slab;
+static struct kmem_cache *node_frag_slab;
+static struct kmem_cache *inode_cache_slab;
 #ifdef CONFIG_JFFS2_FS_XATTR
-static kmem_cache_t *xattr_datum_cache;
-static kmem_cache_t *xattr_ref_cache;
+static struct kmem_cache *xattr_datum_cache;
+static struct kmem_cache *xattr_ref_cache;
 #endif
 
 int __init jffs2_create_slab_caches(void)
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index bc4b810..7deb782 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -28,12 +28,12 @@
 
 static void jffs2_put_super(struct super_block *);
 
-static kmem_cache_t *jffs2_inode_cachep;
+static struct kmem_cache *jffs2_inode_cachep;
 
 static struct inode *jffs2_alloc_inode(struct super_block *sb)
 {
 	struct jffs2_inode_info *ei;
-	ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL);
+	ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -44,7 +44,7 @@
 	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
 }
 
-static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
 
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index b9b7007..7070730 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -19,6 +19,7 @@
 #include <linux/crc32.h>
 #include <linux/mtd/nand.h>
 #include <linux/jiffies.h>
+#include <linux/sched.h>
 
 #include "nodelist.h"
 
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 37db524..ed814b1 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -9,6 +9,7 @@
 #include <linux/ctype.h>
 #include <linux/capability.h>
 #include <linux/time.h>
+#include <linux/sched.h>
 #include <asm/current.h>
 #include <asm/uaccess.h>
 
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index 9901928..eb550b3 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -81,7 +81,7 @@
 #define	JFS_SWAP_BYTES		0x00100000	/* running on big endian computer */
 
 /* Directory index */
-#define JFS_DIR_INDEX		0x00200000	/* Persistant index for */
+#define JFS_DIR_INDEX		0x00200000	/* Persistent index for */
 						/* directory entries    */
 
 
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index b89c9ab..5065baa 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -67,7 +67,7 @@
 #include <linux/kthread.h>
 #include <linux/buffer_head.h>		/* for sync_blockdev() */
 #include <linux/bio.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include "jfs_incore.h"
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 0cccd1c..b1a1c72 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -74,7 +74,7 @@
 }
 
 #define METAPOOL_MIN_PAGES 32
-static kmem_cache_t *metapage_cache;
+static struct kmem_cache *metapage_cache;
 static mempool_t *metapage_mempool;
 
 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
@@ -180,7 +180,7 @@
 
 #endif
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct metapage *mp = (struct metapage *)foo;
 
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 81f6f04..d558e51 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -46,7 +46,7 @@
 #include <linux/vmalloc.h>
 #include <linux/smp_lock.h>
 #include <linux/completion.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kthread.h>
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 9c1c6e0..846ac8f 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -44,7 +44,7 @@
 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t * jfs_inode_cachep;
+static struct kmem_cache * jfs_inode_cachep;
 
 static struct super_operations jfs_super_operations;
 static struct export_operations jfs_export_operations;
@@ -93,7 +93,7 @@
 	va_list args;
 
 	va_start(args, function);
-	vsprintf(error_buf, function, args);
+	vsnprintf(error_buf, sizeof(error_buf), function, args);
 	va_end(args);
 
 	printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
@@ -748,7 +748,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
 
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 3d84f60..497c3cd5 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -13,6 +13,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/utsname.h>
 #include <linux/smp_lock.h>
+#include <linux/freezer.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
@@ -729,7 +730,7 @@
 		goto retry_cancel;
 	}
 
-	dprintk("lockd: cancel status %d (task %d)\n",
+	dprintk("lockd: cancel status %u (task %u)\n",
 			req->a_res.status, task->tk_pid);
 
 	switch (req->a_res.status) {
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index fb24a97..3d4610c 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -36,34 +36,14 @@
 static void			nlm_gc_hosts(void);
 static struct nsm_handle *	__nsm_find(const struct sockaddr_in *,
 					const char *, int, int);
-
-/*
- * Find an NLM server handle in the cache. If there is none, create it.
- */
-struct nlm_host *
-nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
-			const char *hostname, int hostname_len)
-{
-	return nlm_lookup_host(0, sin, proto, version,
-			       hostname, hostname_len);
-}
-
-/*
- * Find an NLM client handle in the cache. If there is none, create it.
- */
-struct nlm_host *
-nlmsvc_lookup_host(struct svc_rqst *rqstp,
-			const char *hostname, int hostname_len)
-{
-	return nlm_lookup_host(1, &rqstp->rq_addr,
-			       rqstp->rq_prot, rqstp->rq_vers,
-			       hostname, hostname_len);
-}
+static struct nsm_handle *	nsm_find(const struct sockaddr_in *sin,
+					 const char *hostname,
+					 int hostname_len);
 
 /*
  * Common host lookup routine for server & client
  */
-struct nlm_host *
+static struct nlm_host *
 nlm_lookup_host(int server, const struct sockaddr_in *sin,
 					int proto, int version,
 					const char *hostname,
@@ -195,6 +175,29 @@
 }
 
 /*
+ * Find an NLM server handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
+			const char *hostname, int hostname_len)
+{
+	return nlm_lookup_host(0, sin, proto, version,
+			       hostname, hostname_len);
+}
+
+/*
+ * Find an NLM client handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmsvc_lookup_host(struct svc_rqst *rqstp,
+			const char *hostname, int hostname_len)
+{
+	return nlm_lookup_host(1, &rqstp->rq_addr,
+			       rqstp->rq_prot, rqstp->rq_vers,
+			       hostname, hostname_len);
+}
+
+/*
  * Create the NLM RPC client for an NLM peer
  */
 struct rpc_clnt *
@@ -495,7 +498,7 @@
 	return nsm;
 }
 
-struct nsm_handle *
+static struct nsm_handle *
 nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
 {
 	return __nsm_find(sin, hostname, hostname_len, 1);
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 0ce5c81..f67146a 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -234,7 +234,7 @@
  */
 static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
 {
-	dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
+	dprintk("lockd: %5u callback returned %d\n", task->tk_pid,
 			-task->tk_status);
 }
 
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 32e99a6..3707c3a 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -263,7 +263,7 @@
  */
 static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
 {
-	dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
+	dprintk("lockd: %5u callback returned %d\n", task->tk_pid,
 			-task->tk_status);
 }
 
diff --git a/fs/locks.c b/fs/locks.c
index e0b6a80..1cb0c57 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -142,12 +142,12 @@
 static LIST_HEAD(file_lock_list);
 static LIST_HEAD(blocked_list);
 
-static kmem_cache_t *filelock_cache __read_mostly;
+static struct kmem_cache *filelock_cache __read_mostly;
 
 /* Allocate an empty lock structure. */
 static struct file_lock *locks_alloc_lock(void)
 {
-	return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
+	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
 }
 
 static void locks_release_private(struct file_lock *fl)
@@ -199,7 +199,7 @@
  * Initialises the fields of the file lock which are invariant for
  * free file_locks.
  */
-static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
 {
 	struct file_lock *lock = (struct file_lock *) foo;
 
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 0ff7125..deeb9dc 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -85,7 +85,7 @@
 #ifndef MB_CACHE_INDEXES_COUNT
 	int				c_indexes_count;
 #endif
-	kmem_cache_t			*c_entry_cache;
+	struct kmem_cache			*c_entry_cache;
 	struct list_head		*c_block_hash;
 	struct list_head		*c_indexes_hash[0];
 };
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 1e36bae..629e09b 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -51,12 +51,12 @@
 	return;
 }
 
-static kmem_cache_t * minix_inode_cachep;
+static struct kmem_cache * minix_inode_cachep;
 
 static struct inode *minix_alloc_inode(struct super_block *sb)
 {
 	struct minix_inode_info *ei;
-	ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL);
+	ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -67,7 +67,7 @@
 	kmem_cache_free(minix_inode_cachep, minix_i(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct minix_inode_info *ei = (struct minix_inode_info *) foo;
 
diff --git a/fs/namei.c b/fs/namei.c
index 28d49b3..db1bca2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -249,9 +249,11 @@
 
 	/*
 	 * MAY_EXEC on regular files requires special handling: We override
-	 * filesystem execute permissions if the mode bits aren't set.
+	 * filesystem execute permissions if the mode bits aren't set or
+	 * the fs is mounted with the "noexec" flag.
 	 */
-	if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO))
+	if ((mask & MAY_EXEC) && S_ISREG(mode) && (!(mode & S_IXUGO) ||
+			(nd && nd->mnt && (nd->mnt->mnt_flags & MNT_NOEXEC))))
 		return -EACCES;
 
 	/* Ordinary permission routines do not understand MAY_APPEND. */
@@ -1996,8 +1998,7 @@
 void dentry_unhash(struct dentry *dentry)
 {
 	dget(dentry);
-	if (atomic_read(&dentry->d_count))
-		shrink_dcache_parent(dentry);
+	shrink_dcache_parent(dentry);
 	spin_lock(&dcache_lock);
 	spin_lock(&dentry->d_lock);
 	if (atomic_read(&dentry->d_count) == 2)
diff --git a/fs/namespace.c b/fs/namespace.c
index 55442a6..b00ac84 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -36,7 +36,7 @@
 
 static struct list_head *mount_hashtable __read_mostly;
 static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache __read_mostly;
+static struct kmem_cache *mnt_cache __read_mostly;
 static struct rw_semaphore namespace_sem;
 
 /* /sys/fs */
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 42e3bef..fae5324 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -40,12 +40,12 @@
 static void ncp_put_super(struct super_block *);
 static int  ncp_statfs(struct dentry *, struct kstatfs *);
 
-static kmem_cache_t * ncp_inode_cachep;
+static struct kmem_cache * ncp_inode_cachep;
 
 static struct inode *ncp_alloc_inode(struct super_block *sb)
 {
 	struct ncp_inode_info *ei;
-	ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL);
+	ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -56,7 +56,7 @@
 	kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
 
@@ -577,12 +577,12 @@
 		server->rcv.ptr = (unsigned char*)&server->rcv.buf;
 		server->rcv.len = 10;
 		server->rcv.state = 0;
-		INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
-		INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
+		INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
+		INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
 		sock->sk->sk_write_space = ncp_tcp_write_space;
 	} else {
-		INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
-		INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
+		INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
+		INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
 		server->timeout_tm.data = (unsigned long)server;
 		server->timeout_tm.function = ncpdgram_timeout_call;
 	}
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 11c2b25..e496d8b 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -350,9 +350,10 @@
 	}
 }
 
-void ncpdgram_rcv_proc(void *s)
+void ncpdgram_rcv_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, rcv.tq);
 	struct socket* sock;
 	
 	sock = server->ncp_sock;
@@ -468,9 +469,10 @@
 	}
 }
 
-void ncpdgram_timeout_proc(void *s)
+void ncpdgram_timeout_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, timeout_tq);
 	mutex_lock(&server->rcv.creq_mutex);
 	__ncpdgram_timeout_proc(server);
 	mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@
 	}
 }
 
-void ncp_tcp_rcv_proc(void *s)
+void ncp_tcp_rcv_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, rcv.tq);
 
 	mutex_lock(&server->rcv.creq_mutex);
 	__ncptcp_rcv_proc(server);
 	mutex_unlock(&server->rcv.creq_mutex);
 }
 
-void ncp_tcp_tx_proc(void *s)
+void ncp_tcp_tx_proc(struct work_struct *work)
 {
-	struct ncp_server *server = s;
+	struct ncp_server *server =
+		container_of(work, struct ncp_server, tx.tq);
 	
 	mutex_lock(&server->rcv.creq_mutex);
 	__ncptcp_try_send(server);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 5fea638..23ab145 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -143,7 +143,7 @@
 	INIT_LIST_HEAD(&clp->cl_state_owners);
 	INIT_LIST_HEAD(&clp->cl_unused);
 	spin_lock_init(&clp->cl_lock);
-	INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
 	clp->cl_boot_time = CURRENT_TIME;
 	clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index bdfabf8..f9d678f 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -58,7 +58,7 @@
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
-static kmem_cache_t *nfs_direct_cachep;
+static struct kmem_cache *nfs_direct_cachep;
 
 /*
  * This represents a set of asynchronous requests that we're waiting on
@@ -143,7 +143,7 @@
 {
 	struct nfs_direct_req *dreq;
 
-	dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
+	dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
 	if (!dreq)
 		return NULL;
 
@@ -307,9 +307,7 @@
 
 		data->task.tk_cookie = (unsigned long) inode;
 
-		lock_kernel();
 		rpc_execute(&data->task);
-		unlock_kernel();
 
 		dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
 				data->task.tk_pid,
@@ -475,9 +473,7 @@
 
 	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
 
-	lock_kernel();
 	rpc_execute(&data->task);
-	unlock_kernel();
 }
 
 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
@@ -641,9 +637,7 @@
 		data->task.tk_priority = RPC_PRIORITY_NORMAL;
 		data->task.tk_cookie = (unsigned long) inode;
 
-		lock_kernel();
 		rpc_execute(&data->task);
-		unlock_kernel();
 
 		dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
 				data->task.tk_pid,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index cc93865..8e28bff 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -307,28 +307,28 @@
 
 static void nfs_invalidate_page(struct page *page, unsigned long offset)
 {
-	struct inode *inode = page->mapping->host;
-
+	if (offset != 0)
+		return;
 	/* Cancel any unstarted writes on this page */
-	if (offset == 0)
-		nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE);
+	nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
 }
 
 static int nfs_release_page(struct page *page, gfp_t gfp)
 {
-	if (gfp & __GFP_FS)
-		return !nfs_wb_page(page->mapping->host, page);
-	else
-		/*
-		 * Avoid deadlock on nfs_wait_on_request().
-		 */
+	/*
+	 * Avoid deadlock on nfs_wait_on_request().
+	 */
+	if (!(gfp & __GFP_FS))
 		return 0;
+	/* Hack... Force nfs_wb_page() to write out the page */
+	SetPageDirty(page);
+	return !nfs_wb_page(page->mapping->host, page);
 }
 
 const struct address_space_operations nfs_file_aops = {
 	.readpage = nfs_readpage,
 	.readpages = nfs_readpages,
-	.set_page_dirty = __set_page_dirty_nobuffers,
+	.set_page_dirty = nfs_set_page_dirty,
 	.writepage = nfs_writepage,
 	.writepages = nfs_writepages,
 	.prepare_write = nfs_prepare_write,
@@ -375,6 +375,12 @@
 
 	nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
 	result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+	/* Return error values for O_SYNC and IS_SYNC() */
+	if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) {
+		int err = nfs_fsync(iocb->ki_filp, dentry, 1);
+		if (err < 0)
+			result = err;
+	}
 out:
 	return result;
 
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 08cc4c5..36680d1 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -55,7 +55,7 @@
 
 static void nfs_zap_acl_cache(struct inode *);
 
-static kmem_cache_t * nfs_inode_cachep;
+static struct kmem_cache * nfs_inode_cachep;
 
 static inline unsigned long
 nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
@@ -422,7 +422,7 @@
 	int err;
 
 	/* Flush out writes to the server in order to update c/mtime */
-	nfs_sync_inode_wait(inode, 0, 0, FLUSH_NOCOMMIT);
+	nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT);
 
 	/*
 	 * We may force a getattr if the user cares about atime.
@@ -1080,7 +1080,7 @@
 struct inode *nfs_alloc_inode(struct super_block *sb)
 {
 	struct nfs_inode *nfsi;
-	nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL);
+	nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
 	if (!nfsi)
 		return NULL;
 	nfsi->flags = 0UL;
@@ -1111,7 +1111,7 @@
 #endif
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct nfs_inode *nfsi = (struct nfs_inode *) foo;
 
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index d205466..a28f6ce 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -217,3 +217,21 @@
 	if (sb->s_maxbytes > MAX_LFS_FILESIZE || sb->s_maxbytes <= 0)
 		sb->s_maxbytes = MAX_LFS_FILESIZE;
 }
+
+/*
+ * Determine the number of bytes of data the page contains
+ */
+static inline
+unsigned int nfs_page_length(struct page *page)
+{
+	loff_t i_size = i_size_read(page->mapping->host);
+
+	if (i_size > 0) {
+		pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+		if (page->index < end_index)
+			return PAGE_CACHE_SIZE;
+		if (page->index == end_index)
+			return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
+	}
+	return 0;
+}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index ec1114b..371b804 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -18,10 +18,10 @@
 
 #define NFSDBG_FACILITY		NFSDBG_VFS
 
-static void nfs_expire_automounts(void *list);
+static void nfs_expire_automounts(struct work_struct *work);
 
 LIST_HEAD(nfs_automount_list);
-static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
 int nfs_mountpoint_expiry_timeout = 500 * HZ;
 
 static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@
 	.follow_link	= nfs_follow_mountpoint,
 };
 
-static void nfs_expire_automounts(void *data)
+static void nfs_expire_automounts(struct work_struct *work)
 {
-	struct list_head *list = (struct list_head *)data;
+	struct list_head *list = &nfs_automount_list;
 
 	mark_mounts_for_expiry(list);
 	if (!list_empty(list))
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e5f128f..510ae52 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -276,51 +276,6 @@
 	return status;
 }
 
-static int nfs3_proc_write(struct nfs_write_data *wdata)
-{
-	int			rpcflags = wdata->flags;
-	struct inode *		inode = wdata->inode;
-	struct nfs_fattr *	fattr = wdata->res.fattr;
-	struct rpc_message	msg = {
-		.rpc_proc	= &nfs3_procedures[NFS3PROC_WRITE],
-		.rpc_argp	= &wdata->args,
-		.rpc_resp	= &wdata->res,
-		.rpc_cred	= wdata->cred,
-	};
-	int			status;
-
-	dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
-			(long long) wdata->args.offset);
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
-	if (status >= 0)
-		nfs_post_op_update_inode(inode, fattr);
-	dprintk("NFS reply write: %d\n", status);
-	return status < 0? status : wdata->res.count;
-}
-
-static int nfs3_proc_commit(struct nfs_write_data *cdata)
-{
-	struct inode *		inode = cdata->inode;
-	struct nfs_fattr *	fattr = cdata->res.fattr;
-	struct rpc_message	msg = {
-		.rpc_proc	= &nfs3_procedures[NFS3PROC_COMMIT],
-		.rpc_argp	= &cdata->args,
-		.rpc_resp	= &cdata->res,
-		.rpc_cred	= cdata->cred,
-	};
-	int			status;
-
-	dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
-			(long long) cdata->args.offset);
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
-	if (status >= 0)
-		nfs_post_op_update_inode(inode, fattr);
-	dprintk("NFS reply commit: %d\n", status);
-	return status;
-}
-
 /*
  * Create a regular file.
  * For now, we don't implement O_EXCL.
@@ -369,7 +324,7 @@
 
 	/* If the server doesn't support the exclusive creation semantics,
 	 * try again with simple 'guarded' mode. */
-	if (status == NFSERR_NOTSUPP) {
+	if (status == -ENOTSUPP) {
 		switch (arg.createmode) {
 			case NFS3_CREATE_EXCLUSIVE:
 				arg.createmode = NFS3_CREATE_GUARDED;
@@ -690,8 +645,6 @@
 	};
 	int			status;
 
-	lock_kernel();
-
 	if (plus)
 		msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS];
 
@@ -702,7 +655,6 @@
 	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
 	nfs_refresh_inode(dir, &dir_attr);
 	dprintk("NFS reply readdir: %d\n", status);
-	unlock_kernel();
 	return status;
 }
 
@@ -904,8 +856,6 @@
 	.access		= nfs3_proc_access,
 	.readlink	= nfs3_proc_readlink,
 	.read		= nfs3_proc_read,
-	.write		= nfs3_proc_write,
-	.commit		= nfs3_proc_commit,
 	.create		= nfs3_proc_create,
 	.remove		= nfs3_proc_remove,
 	.unlink_setup	= nfs3_proc_unlink_setup,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6f34667..c26cd97 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,7 +185,7 @@
 extern void nfs4_schedule_state_renewal(struct nfs_client *);
 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
 extern void nfs4_kill_renewd(struct nfs_client *);
-extern void nfs4_renew_state(void *);
+extern void nfs4_renew_state(struct work_struct *);
 
 /* nfs4state.c */
 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8118036..ee458ae 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -636,7 +636,7 @@
 		smp_wmb();
 	} else
 		status = data->rpc_status;
-	rpc_release_task(task);
+	rpc_put_task(task);
 	return status;
 }
 
@@ -742,7 +742,7 @@
 		smp_wmb();
 	} else
 		status = data->rpc_status;
-	rpc_release_task(task);
+	rpc_put_task(task);
 	if (status != 0)
 		return status;
 
@@ -1775,89 +1775,6 @@
 	return err;
 }
 
-static int _nfs4_proc_write(struct nfs_write_data *wdata)
-{
-	int rpcflags = wdata->flags;
-	struct inode *inode = wdata->inode;
-	struct nfs_fattr *fattr = wdata->res.fattr;
-	struct nfs_server *server = NFS_SERVER(inode);
-	struct rpc_message msg = {
-		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_WRITE],
-		.rpc_argp	= &wdata->args,
-		.rpc_resp	= &wdata->res,
-		.rpc_cred	= wdata->cred,
-	};
-	int status;
-
-	dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
-			(long long) wdata->args.offset);
-
-	wdata->args.bitmask = server->attr_bitmask;
-	wdata->res.server = server;
-	wdata->timestamp = jiffies;
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(server->client, &msg, rpcflags);
-	dprintk("NFS reply write: %d\n", status);
-	if (status < 0)
-		return status;
-	renew_lease(server, wdata->timestamp);
-	nfs_post_op_update_inode(inode, fattr);
-	return wdata->res.count;
-}
-
-static int nfs4_proc_write(struct nfs_write_data *wdata)
-{
-	struct nfs4_exception exception = { };
-	int err;
-	do {
-		err = nfs4_handle_exception(NFS_SERVER(wdata->inode),
-				_nfs4_proc_write(wdata),
-				&exception);
-	} while (exception.retry);
-	return err;
-}
-
-static int _nfs4_proc_commit(struct nfs_write_data *cdata)
-{
-	struct inode *inode = cdata->inode;
-	struct nfs_fattr *fattr = cdata->res.fattr;
-	struct nfs_server *server = NFS_SERVER(inode);
-	struct rpc_message msg = {
-		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
-		.rpc_argp	= &cdata->args,
-		.rpc_resp	= &cdata->res,
-		.rpc_cred	= cdata->cred,
-	};
-	int status;
-
-	dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
-			(long long) cdata->args.offset);
-
-	cdata->args.bitmask = server->attr_bitmask;
-	cdata->res.server = server;
-	cdata->timestamp = jiffies;
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(server->client, &msg, 0);
-	if (status >= 0)
-		renew_lease(server, cdata->timestamp);
-	dprintk("NFS reply commit: %d\n", status);
-	if (status >= 0)
-		nfs_post_op_update_inode(inode, fattr);
-	return status;
-}
-
-static int nfs4_proc_commit(struct nfs_write_data *cdata)
-{
-	struct nfs4_exception exception = { };
-	int err;
-	do {
-		err = nfs4_handle_exception(NFS_SERVER(cdata->inode),
-				_nfs4_proc_commit(cdata),
-				&exception);
-	} while (exception.retry);
-	return err;
-}
-
 /*
  * Got race?
  * We will need to arrange for the VFS layer to provide an atomic open.
@@ -2223,13 +2140,11 @@
 			dentry->d_parent->d_name.name,
 			dentry->d_name.name,
 			(unsigned long long)cookie);
-	lock_kernel();
 	nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
 	res.pgbase = args.pgbase;
 	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
 	if (status == 0)
 		memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
-	unlock_kernel();
 	dprintk("%s: returns %d\n", __FUNCTION__, status);
 	return status;
 }
@@ -3067,7 +2982,7 @@
 		if (status == 0)
 			nfs_post_op_update_inode(inode, &data->fattr);
 	}
-	rpc_release_task(task);
+	rpc_put_task(task);
 	return status;
 }
 
@@ -3314,7 +3229,7 @@
 	if (IS_ERR(task))
 		goto out;
 	status = nfs4_wait_for_completion_rpc_task(task);
-	rpc_release_task(task);
+	rpc_put_task(task);
 out:
 	return status;
 }
@@ -3430,7 +3345,7 @@
 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
 				data->arg.lock_seqid);
 		if (!IS_ERR(task))
-			rpc_release_task(task);
+			rpc_put_task(task);
 		dprintk("%s: cancelling lock!\n", __FUNCTION__);
 	} else
 		nfs_free_seqid(data->arg.lock_seqid);
@@ -3472,7 +3387,7 @@
 			ret = -EAGAIN;
 	} else
 		data->cancelled = 1;
-	rpc_release_task(task);
+	rpc_put_task(task);
 	dprintk("%s: done, ret = %d!\n", __FUNCTION__, ret);
 	return ret;
 }
@@ -3732,8 +3647,6 @@
 	.access		= nfs4_proc_access,
 	.readlink	= nfs4_proc_readlink,
 	.read		= nfs4_proc_read,
-	.write		= nfs4_proc_write,
-	.commit		= nfs4_proc_commit,
 	.create		= nfs4_proc_create,
 	.remove		= nfs4_proc_remove,
 	.unlink_setup	= nfs4_proc_unlink_setup,
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 7b6df18..8232985 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,9 +59,10 @@
 #define NFSDBG_FACILITY	NFSDBG_PROC
 
 void
-nfs4_renew_state(void *data)
+nfs4_renew_state(struct work_struct *work)
 {
-	struct nfs_client *clp = (struct nfs_client *)data;
+	struct nfs_client *clp =
+		container_of(work, struct nfs_client, cl_renewd.work);
 	struct rpc_cred *cred;
 	long lease, timeout;
 	unsigned long last, now;
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 8dfefe4..75f819d 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -98,7 +98,7 @@
 static char nfs_root_name[256] __initdata = "";
 
 /* Address of NFS server */
-static __u32 servaddr __initdata = 0;
+static __be32 servaddr __initdata = 0;
 
 /* Name of directory to mount */
 static char nfs_path[NFS_MAXPATHLEN] __initdata = { 0, };
@@ -327,7 +327,7 @@
  */
 static int __init root_nfs_addr(void)
 {
-	if ((servaddr = root_server_addr) == INADDR_NONE) {
+	if ((servaddr = root_server_addr) == htonl(INADDR_NONE)) {
 		printk(KERN_ERR "Root-NFS: No NFS server available, giving up.\n");
 		return -1;
 	}
@@ -411,7 +411,7 @@
  *  Construct sockaddr_in from address and port number.
  */
 static inline void
-set_sockaddr(struct sockaddr_in *sin, __u32 addr, __u16 port)
+set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port)
 {
 	sin->sin_family = AF_INET;
 	sin->sin_addr.s_addr = addr;
@@ -468,14 +468,13 @@
 		dprintk("Root-NFS: Portmapper on server returned %d "
 			"as nfsd port\n", port);
 	}
-	nfs_port = htons(nfs_port);
 
 	if ((port = root_nfs_getport(NFS_MNT_PROGRAM, mountd_ver, proto)) < 0) {
 		printk(KERN_ERR "Root-NFS: Unable to get mountd port "
 				"number from server, using default\n");
 		port = mountd_port;
 	}
-	mount_port = htons(port);
+	mount_port = port;
 	dprintk("Root-NFS: mountd port is %d\n", port);
 
 	return 0;
@@ -496,7 +495,7 @@
 	int version = (nfs_data.flags & NFS_MOUNT_VER3) ?
 					NFS_MNT3_VERSION : NFS_MNT_VERSION;
 
-	set_sockaddr(&sin, servaddr, mount_port);
+	set_sockaddr(&sin, servaddr, htons(mount_port));
 	status = nfsroot_mount(&sin, nfs_path, &fh, version, protocol);
 	if (status < 0)
 		printk(KERN_ERR "Root-NFS: Server returned error %d "
@@ -519,6 +518,6 @@
 	 || root_nfs_ports() < 0
 	 || root_nfs_get_handle() < 0)
 		return NULL;
-	set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, nfs_port);
+	set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, htons(nfs_port));
 	return (void*)&nfs_data;
 }
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 829af32..ca4b1d4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -17,16 +17,17 @@
 #include <linux/nfs_page.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfs_mount.h>
+#include <linux/writeback.h>
 
 #define NFS_PARANOIA 1
 
-static kmem_cache_t *nfs_page_cachep;
+static struct kmem_cache *nfs_page_cachep;
 
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
 	struct nfs_page	*p;
-	p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL);
+	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
 	if (p) {
 		memset(p, 0, sizeof(*p));
 		INIT_LIST_HEAD(&p->wb_list);
@@ -268,11 +269,10 @@
 
 #define NFS_SCAN_MAXENTRIES 16
 /**
- * nfs_scan_lock_dirty - Scan the radix tree for dirty requests
- * @nfsi: NFS inode
+ * nfs_scan_dirty - Scan the radix tree for dirty requests
+ * @mapping: pointer to address space
+ * @wbc: writeback_control structure
  * @dst: Destination list
- * @idx_start: lower bound of page->index to scan
- * @npages: idx_start + npages sets the upper bound to scan.
  *
  * Moves elements from one of the inode request lists.
  * If the number of requests is set to 0, the entire address_space
@@ -280,46 +280,63 @@
  * The requests are *not* checked to ensure that they form a contiguous set.
  * You must be holding the inode's req_lock when calling this function
  */
-int
-nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
-	      unsigned long idx_start, unsigned int npages)
+long nfs_scan_dirty(struct address_space *mapping,
+			struct writeback_control *wbc,
+			struct list_head *dst)
 {
+	struct nfs_inode *nfsi = NFS_I(mapping->host);
 	struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
 	struct nfs_page *req;
-	unsigned long idx_end;
+	pgoff_t idx_start, idx_end;
+	long res = 0;
 	int found, i;
-	int res;
 
-	res = 0;
-	if (npages == 0)
-		idx_end = ~0;
-	else
-		idx_end = idx_start + npages - 1;
+	if (nfsi->ndirty == 0)
+		return 0;
+	if (wbc->range_cyclic) {
+		idx_start = 0;
+		idx_end = ULONG_MAX;
+	} else if (wbc->range_end == 0) {
+		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
+		idx_end = ULONG_MAX;
+	} else {
+		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
+		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
+	}
 
 	for (;;) {
+		unsigned int toscan = NFS_SCAN_MAXENTRIES;
+
 		found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
-				(void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES,
+				(void **)&pgvec[0], idx_start, toscan,
 				NFS_PAGE_TAG_DIRTY);
+
+		/* Did we make progress? */
 		if (found <= 0)
 			break;
+
 		for (i = 0; i < found; i++) {
 			req = pgvec[i];
-			if (req->wb_index > idx_end)
+			if (!wbc->range_cyclic && req->wb_index > idx_end)
 				goto out;
 
+			/* Try to lock request and mark it for writeback */
+			if (!nfs_set_page_writeback_locked(req))
+				goto next;
+			radix_tree_tag_clear(&nfsi->nfs_page_tree,
+					req->wb_index, NFS_PAGE_TAG_DIRTY);
+			nfsi->ndirty--;
+			nfs_list_remove_request(req);
+			nfs_list_add_request(req, dst);
+			res++;
+			if (res == LONG_MAX)
+				goto out;
+next:
 			idx_start = req->wb_index + 1;
-
-			if (nfs_set_page_writeback_locked(req)) {
-				radix_tree_tag_clear(&nfsi->nfs_page_tree,
-						req->wb_index, NFS_PAGE_TAG_DIRTY);
-				nfs_list_remove_request(req);
-				nfs_list_add_request(req, dst);
-				dec_zone_page_state(req->wb_page, NR_FILE_DIRTY);
-				res++;
-			}
 		}
 	}
 out:
+	WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty));
 	return res;
 }
 
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 4529cc4..10f5e80 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -215,32 +215,6 @@
 	return status;
 }
 
-static int nfs_proc_write(struct nfs_write_data *wdata)
-{
-	int			flags = wdata->flags;
-	struct inode *		inode = wdata->inode;
-	struct nfs_fattr *	fattr = wdata->res.fattr;
-	struct rpc_message	msg = {
-		.rpc_proc	= &nfs_procedures[NFSPROC_WRITE],
-		.rpc_argp	= &wdata->args,
-		.rpc_resp	= &wdata->res,
-		.rpc_cred	= wdata->cred,
-	};
-	int			status;
-
-	dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
-			(long long) wdata->args.offset);
-	nfs_fattr_init(fattr);
-	status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
-	if (status >= 0) {
-		nfs_post_op_update_inode(inode, fattr);
-		wdata->res.count = wdata->args.count;
-		wdata->verf.committed = NFS_FILE_SYNC;
-	}
-	dprintk("NFS reply write: %d\n", status);
-	return status < 0? status : wdata->res.count;
-}
-
 static int
 nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
 		int flags, struct nameidata *nd)
@@ -545,13 +519,10 @@
 	};
 	int			status;
 
-	lock_kernel();
-
 	dprintk("NFS call  readdir %d\n", (unsigned int)cookie);
 	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
 
 	dprintk("NFS reply readdir: %d\n", status);
-	unlock_kernel();
 	return status;
 }
 
@@ -696,8 +667,6 @@
 	.access		= NULL,		       /* access */
 	.readlink	= nfs_proc_readlink,
 	.read		= nfs_proc_read,
-	.write		= nfs_proc_write,
-	.commit		= NULL,		       /* commit */
 	.create		= nfs_proc_create,
 	.remove		= nfs_proc_remove,
 	.unlink_setup	= nfs_proc_unlink_setup,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c2e49c3..a9c2652 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -30,6 +30,7 @@
 
 #include <asm/system.h>
 
+#include "internal.h"
 #include "iostat.h"
 
 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
@@ -38,7 +39,7 @@
 static const struct rpc_call_ops nfs_read_partial_ops;
 static const struct rpc_call_ops nfs_read_full_ops;
 
-static kmem_cache_t *nfs_rdata_cachep;
+static struct kmem_cache *nfs_rdata_cachep;
 static mempool_t *nfs_rdata_mempool;
 
 #define MIN_POOL_READ	(32)
@@ -46,7 +47,7 @@
 struct nfs_read_data *nfs_readdata_alloc(size_t len)
 {
 	unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
+	struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
 
 	if (p) {
 		memset(p, 0, sizeof(*p));
@@ -65,35 +66,25 @@
 	return p;
 }
 
-static void nfs_readdata_free(struct nfs_read_data *p)
+static void nfs_readdata_rcu_free(struct rcu_head *head)
 {
+	struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
 	if (p && (p->pagevec != &p->page_array[0]))
 		kfree(p->pagevec);
 	mempool_free(p, nfs_rdata_mempool);
 }
 
+static void nfs_readdata_free(struct nfs_read_data *rdata)
+{
+	call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
+}
+
 void nfs_readdata_release(void *data)
 {
         nfs_readdata_free(data);
 }
 
 static
-unsigned int nfs_page_length(struct inode *inode, struct page *page)
-{
-	loff_t i_size = i_size_read(inode);
-	unsigned long idx;
-
-	if (i_size <= 0)
-		return 0;
-	idx = (i_size - 1) >> PAGE_CACHE_SHIFT;
-	if (page->index > idx)
-		return 0;
-	if (page->index != idx)
-		return PAGE_CACHE_SIZE;
-	return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1));
-}
-
-static
 int nfs_return_empty_page(struct page *page)
 {
 	memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
@@ -139,12 +130,12 @@
 {
 	unsigned int	rsize = NFS_SERVER(inode)->rsize;
 	unsigned int	count = PAGE_CACHE_SIZE;
-	int		result;
+	int result = -ENOMEM;
 	struct nfs_read_data *rdata;
 
 	rdata = nfs_readdata_alloc(count);
 	if (!rdata)
-		return -ENOMEM;
+		goto out_unlock;
 
 	memset(rdata, 0, sizeof(*rdata));
 	rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
@@ -212,8 +203,9 @@
 	result = 0;
 
 io_error:
-	unlock_page(page);
 	nfs_readdata_free(rdata);
+out_unlock:
+	unlock_page(page);
 	return result;
 }
 
@@ -224,7 +216,7 @@
 	struct nfs_page	*new;
 	unsigned int len;
 
-	len = nfs_page_length(inode, page);
+	len = nfs_page_length(page);
 	if (len == 0)
 		return nfs_return_empty_page(page);
 	new = nfs_create_request(ctx, inode, page, 0, len);
@@ -316,9 +308,7 @@
 	sigset_t oldset;
 
 	rpc_clnt_sigmask(clnt, &oldset);
-	lock_kernel();
 	rpc_execute(&data->task);
-	unlock_kernel();
 	rpc_clnt_sigunmask(clnt, &oldset);
 }
 
@@ -455,6 +445,55 @@
 }
 
 /*
+ * This is the callback from RPC telling us whether a reply was
+ * received or some error occurred (timeout or socket shutdown).
+ */
+int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
+{
+	int status;
+
+	dprintk("%s: %4d, (status %d)\n", __FUNCTION__, task->tk_pid,
+			task->tk_status);
+
+	status = NFS_PROTO(data->inode)->read_done(task, data);
+	if (status != 0)
+		return status;
+
+	nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
+
+	if (task->tk_status == -ESTALE) {
+		set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
+		nfs_mark_for_revalidate(data->inode);
+	}
+	spin_lock(&data->inode->i_lock);
+	NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+	spin_unlock(&data->inode->i_lock);
+	return 0;
+}
+
+static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
+{
+	struct nfs_readargs *argp = &data->args;
+	struct nfs_readres *resp = &data->res;
+
+	if (resp->eof || resp->count == argp->count)
+		return 0;
+
+	/* This is a short read! */
+	nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
+	/* Has the server at least made some progress? */
+	if (resp->count == 0)
+		return 0;
+
+	/* Yes, so retry the read at the end of the data */
+	argp->offset += resp->count;
+	argp->pgbase += resp->count;
+	argp->count -= resp->count;
+	rpc_restart_call(task);
+	return -EAGAIN;
+}
+
+/*
  * Handle a read reply that fills part of a page.
  */
 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
@@ -463,12 +502,16 @@
 	struct nfs_page *req = data->req;
 	struct page *page = req->wb_page;
  
-	if (likely(task->tk_status >= 0))
-		nfs_readpage_truncate_uninitialised_page(data);
-	else
-		SetPageError(page);
 	if (nfs_readpage_result(task, data) != 0)
 		return;
+
+	if (likely(task->tk_status >= 0)) {
+		nfs_readpage_truncate_uninitialised_page(data);
+		if (nfs_readpage_retry(task, data) != 0)
+			return;
+	}
+	if (unlikely(task->tk_status < 0))
+		SetPageError(page);
 	if (atomic_dec_and_test(&req->wb_complete)) {
 		if (!PageError(page))
 			SetPageUptodate(page);
@@ -496,25 +539,13 @@
 	count += base;
 	for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
 		SetPageUptodate(*pages);
-	if (count != 0)
+	if (count == 0)
+		return;
+	/* Was this a short read? */
+	if (data->res.eof || data->res.count == data->args.count)
 		SetPageUptodate(*pages);
 }
 
-static void nfs_readpage_set_pages_error(struct nfs_read_data *data)
-{
-	unsigned int count = data->args.count;
-	unsigned int base = data->args.pgbase;
-	struct page **pages;
-
-	pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
-	base &= ~PAGE_CACHE_MASK;
-	count += base;
-	for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
-		SetPageError(*pages);
-	if (count != 0)
-		SetPageError(*pages);
-}
-
 /*
  * This is the callback from RPC telling us whether a reply was
  * received or some error occurred (timeout or socket shutdown).
@@ -523,19 +554,20 @@
 {
 	struct nfs_read_data *data = calldata;
 
+	if (nfs_readpage_result(task, data) != 0)
+		return;
 	/*
-	 * Note: nfs_readpage_result may change the values of
+	 * Note: nfs_readpage_retry may change the values of
 	 * data->args. In the multi-page case, we therefore need
-	 * to ensure that we call the next nfs_readpage_set_page_uptodate()
-	 * first in the multi-page case.
+	 * to ensure that we call nfs_readpage_set_pages_uptodate()
+	 * first.
 	 */
 	if (likely(task->tk_status >= 0)) {
 		nfs_readpage_truncate_uninitialised_page(data);
 		nfs_readpage_set_pages_uptodate(data);
-	} else
-		nfs_readpage_set_pages_error(data);
-	if (nfs_readpage_result(task, data) != 0)
-		return;
+		if (nfs_readpage_retry(task, data) != 0)
+			return;
+	}
 	while (!list_empty(&data->pages)) {
 		struct nfs_page *req = nfs_list_entry(data->pages.next);
 
@@ -550,50 +582,6 @@
 };
 
 /*
- * This is the callback from RPC telling us whether a reply was
- * received or some error occurred (timeout or socket shutdown).
- */
-int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
-{
-	struct nfs_readargs *argp = &data->args;
-	struct nfs_readres *resp = &data->res;
-	int status;
-
-	dprintk("NFS: %4d nfs_readpage_result, (status %d)\n",
-		task->tk_pid, task->tk_status);
-
-	status = NFS_PROTO(data->inode)->read_done(task, data);
-	if (status != 0)
-		return status;
-
-	nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count);
-
-	if (task->tk_status < 0) {
-		if (task->tk_status == -ESTALE) {
-			set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
-			nfs_mark_for_revalidate(data->inode);
-		}
-	} else if (resp->count < argp->count && !resp->eof) {
-		/* This is a short read! */
-		nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
-		/* Has the server at least made some progress? */
-		if (resp->count != 0) {
-			/* Yes, so retry the read at the end of the data */
-			argp->offset += resp->count;
-			argp->pgbase += resp->count;
-			argp->count -= resp->count;
-			rpc_restart_call(task);
-			return -EAGAIN;
-		}
-		task->tk_status = -EIO;
-	}
-	spin_lock(&data->inode->i_lock);
-	NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
-	spin_unlock(&data->inode->i_lock);
-	return 0;
-}
-
-/*
  * Read a page over NFS.
  * We read the page synchronously in the following case:
  *  -	The error flag is set for this page. This happens only when a
@@ -626,9 +614,10 @@
 		goto out_error;
 
 	if (file == NULL) {
+		error = -EBADF;
 		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
 		if (ctx == NULL)
-			return -EBADF;
+			goto out_error;
 	} else
 		ctx = get_nfs_open_context((struct nfs_open_context *)
 				file->private_data);
@@ -663,7 +652,7 @@
 	unsigned int len;
 
 	nfs_wb_page(inode, page);
-	len = nfs_page_length(inode, page);
+	len = nfs_page_length(page);
 	if (len == 0)
 		return nfs_return_empty_page(page);
 	new = nfs_create_request(desc->ctx, inode, page, 0, len);
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
index 600bbe6..6c68611 100644
--- a/fs/nfs/symlink.c
+++ b/fs/nfs/symlink.c
@@ -33,9 +33,7 @@
 {
 	int error;
 
-	lock_kernel();
 	error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
-	unlock_kernel();
 	if (error < 0)
 		goto error;
 	SetPageUptodate(page);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 883dd4a..594eb16 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -63,6 +63,7 @@
 #include <linux/smp_lock.h>
 
 #include "delegation.h"
+#include "internal.h"
 #include "iostat.h"
 
 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
@@ -74,18 +75,17 @@
  * Local function declarations
  */
 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
-					    struct inode *,
 					    struct page *,
 					    unsigned int, unsigned int);
+static void nfs_mark_request_dirty(struct nfs_page *req);
 static int nfs_wait_on_write_congestion(struct address_space *, int);
 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
-static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
-			   unsigned int npages, int how);
+static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
 static const struct rpc_call_ops nfs_write_partial_ops;
 static const struct rpc_call_ops nfs_write_full_ops;
 static const struct rpc_call_ops nfs_commit_ops;
 
-static kmem_cache_t *nfs_wdata_cachep;
+static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
 static mempool_t *nfs_commit_mempool;
 
@@ -93,7 +93,7 @@
 
 struct nfs_write_data *nfs_commit_alloc(void)
 {
-	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
+	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
 
 	if (p) {
 		memset(p, 0, sizeof(*p));
@@ -102,17 +102,23 @@
 	return p;
 }
 
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_rcu_free(struct rcu_head *head)
 {
+	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
 	if (p && (p->pagevec != &p->page_array[0]))
 		kfree(p->pagevec);
 	mempool_free(p, nfs_commit_mempool);
 }
 
+void nfs_commit_free(struct nfs_write_data *wdata)
+{
+	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
+}
+
 struct nfs_write_data *nfs_writedata_alloc(size_t len)
 {
 	unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
+	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
 
 	if (p) {
 		memset(p, 0, sizeof(*p));
@@ -131,18 +137,47 @@
 	return p;
 }
 
-static void nfs_writedata_free(struct nfs_write_data *p)
+static void nfs_writedata_rcu_free(struct rcu_head *head)
 {
+	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
 	if (p && (p->pagevec != &p->page_array[0]))
 		kfree(p->pagevec);
 	mempool_free(p, nfs_wdata_mempool);
 }
 
+static void nfs_writedata_free(struct nfs_write_data *wdata)
+{
+	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
+}
+
 void nfs_writedata_release(void *wdata)
 {
 	nfs_writedata_free(wdata);
 }
 
+static struct nfs_page *nfs_page_find_request_locked(struct page *page)
+{
+	struct nfs_page *req = NULL;
+
+	if (PagePrivate(page)) {
+		req = (struct nfs_page *)page_private(page);
+		if (req != NULL)
+			atomic_inc(&req->wb_count);
+	}
+	return req;
+}
+
+static struct nfs_page *nfs_page_find_request(struct page *page)
+{
+	struct nfs_page *req = NULL;
+	spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+
+	spin_lock(req_lock);
+	req = nfs_page_find_request_locked(page);
+	spin_unlock(req_lock);
+	return req;
+}
+
 /* Adjust the file length if we're writing beyond the end */
 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
 {
@@ -164,113 +199,34 @@
  */
 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
 {
-	loff_t end_offs;
-
 	if (PageUptodate(page))
 		return;
 	if (base != 0)
 		return;
-	if (count == PAGE_CACHE_SIZE) {
-		SetPageUptodate(page);
+	if (count != nfs_page_length(page))
 		return;
-	}
-
-	end_offs = i_size_read(page->mapping->host) - 1;
-	if (end_offs < 0)
-		return;
-	/* Is this the last page? */
-	if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
-		return;
-	/* This is the last page: set PG_uptodate if we cover the entire
-	 * extent of the data, then zero the rest of the page.
-	 */
-	if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
+	if (count != PAGE_CACHE_SIZE)
 		memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
-		SetPageUptodate(page);
-	}
+	SetPageUptodate(page);
 }
 
-/*
- * Write a page synchronously.
- * Offset is the data offset within the page.
- */
-static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
-		struct page *page, unsigned int offset, unsigned int count,
-		int how)
-{
-	unsigned int	wsize = NFS_SERVER(inode)->wsize;
-	int		result, written = 0;
-	struct nfs_write_data *wdata;
-
-	wdata = nfs_writedata_alloc(wsize);
-	if (!wdata)
-		return -ENOMEM;
-
-	wdata->flags = how;
-	wdata->cred = ctx->cred;
-	wdata->inode = inode;
-	wdata->args.fh = NFS_FH(inode);
-	wdata->args.context = ctx;
-	wdata->args.pages = &page;
-	wdata->args.stable = NFS_FILE_SYNC;
-	wdata->args.pgbase = offset;
-	wdata->args.count = wsize;
-	wdata->res.fattr = &wdata->fattr;
-	wdata->res.verf = &wdata->verf;
-
-	dprintk("NFS:      nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
-		inode->i_sb->s_id,
-		(long long)NFS_FILEID(inode),
-		count, (long long)(page_offset(page) + offset));
-
-	set_page_writeback(page);
-	nfs_begin_data_update(inode);
-	do {
-		if (count < wsize)
-			wdata->args.count = count;
-		wdata->args.offset = page_offset(page) + wdata->args.pgbase;
-
-		result = NFS_PROTO(inode)->write(wdata);
-
-		if (result < 0) {
-			/* Must mark the page invalid after I/O error */
-			ClearPageUptodate(page);
-			goto io_error;
-		}
-		if (result < wdata->args.count)
-			printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
-					wdata->args.count, result);
-
-		wdata->args.offset += result;
-	        wdata->args.pgbase += result;
-		written += result;
-		count -= result;
-		nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
-	} while (count);
-	/* Update file length */
-	nfs_grow_file(page, offset, written);
-	/* Set the PG_uptodate flag? */
-	nfs_mark_uptodate(page, offset, written);
-
-	if (PageError(page))
-		ClearPageError(page);
-
-io_error:
-	nfs_end_data_update(inode);
-	end_page_writeback(page);
-	nfs_writedata_free(wdata);
-	return written ? written : result;
-}
-
-static int nfs_writepage_async(struct nfs_open_context *ctx,
-		struct inode *inode, struct page *page,
+static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
 		unsigned int offset, unsigned int count)
 {
 	struct nfs_page	*req;
+	int ret;
 
-	req = nfs_update_request(ctx, inode, page, offset, count);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
+	for (;;) {
+		req = nfs_update_request(ctx, page, offset, count);
+		if (!IS_ERR(req))
+			break;
+		ret = PTR_ERR(req);
+		if (ret != -EBUSY)
+			return ret;
+		ret = nfs_wb_page(page->mapping->host, page);
+		if (ret != 0)
+			return ret;
+	}
 	/* Update file length */
 	nfs_grow_file(page, offset, count);
 	/* Set the PG_uptodate flag? */
@@ -289,73 +245,94 @@
 }
 
 /*
+ * Find an associated nfs write request, and prepare to flush it out
+ * Returns 1 if there was no write request, or if the request was
+ * already tagged by nfs_set_page_dirty.Returns 0 if the request
+ * was not tagged.
+ * May also return an error if the user signalled nfs_wait_on_request().
+ */
+static int nfs_page_mark_flush(struct page *page)
+{
+	struct nfs_page *req;
+	spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+	int ret;
+
+	spin_lock(req_lock);
+	for(;;) {
+		req = nfs_page_find_request_locked(page);
+		if (req == NULL) {
+			spin_unlock(req_lock);
+			return 1;
+		}
+		if (nfs_lock_request_dontget(req))
+			break;
+		/* Note: If we hold the page lock, as is the case in nfs_writepage,
+		 *	 then the call to nfs_lock_request_dontget() will always
+		 *	 succeed provided that someone hasn't already marked the
+		 *	 request as dirty (in which case we don't care).
+		 */
+		spin_unlock(req_lock);
+		ret = nfs_wait_on_request(req);
+		nfs_release_request(req);
+		if (ret != 0)
+			return ret;
+		spin_lock(req_lock);
+	}
+	spin_unlock(req_lock);
+	if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) {
+		nfs_mark_request_dirty(req);
+		set_page_writeback(page);
+	}
+	ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
+	nfs_unlock_request(req);
+	return ret;
+}
+
+/*
  * Write an mmapped page to the server.
  */
-int nfs_writepage(struct page *page, struct writeback_control *wbc)
+static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
 {
 	struct nfs_open_context *ctx;
 	struct inode *inode = page->mapping->host;
-	unsigned long end_index;
-	unsigned offset = PAGE_CACHE_SIZE;
-	loff_t i_size = i_size_read(inode);
-	int inode_referenced = 0;
-	int priority = wb_priority(wbc);
+	unsigned offset;
 	int err;
 
 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
 	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
 
-	/*
-	 * Note: We need to ensure that we have a reference to the inode
-	 *       if we are to do asynchronous writes. If not, waiting
-	 *       in nfs_wait_on_request() may deadlock with clear_inode().
-	 *
-	 *       If igrab() fails here, then it is in any case safe to
-	 *       call nfs_wb_page(), since there will be no pending writes.
-	 */
-	if (igrab(inode) != 0)
-		inode_referenced = 1;
-	end_index = i_size >> PAGE_CACHE_SHIFT;
-
-	/* Ensure we've flushed out any previous writes */
-	nfs_wb_page_priority(inode, page, priority);
-
-	/* easy case */
-	if (page->index < end_index)
-		goto do_it;
-	/* things got complicated... */
-	offset = i_size & (PAGE_CACHE_SIZE-1);
-
-	/* OK, are we completely out? */
-	err = 0; /* potential race with truncate - ignore */
-	if (page->index >= end_index+1 || !offset)
+	err = nfs_page_mark_flush(page);
+	if (err <= 0)
 		goto out;
-do_it:
+	err = 0;
+	offset = nfs_page_length(page);
+	if (!offset)
+		goto out;
+
 	ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
 	if (ctx == NULL) {
 		err = -EBADF;
 		goto out;
 	}
-	lock_kernel();
-	if (!IS_SYNC(inode) && inode_referenced) {
-		err = nfs_writepage_async(ctx, inode, page, 0, offset);
-		if (!wbc->for_writepages)
-			nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
-	} else {
-		err = nfs_writepage_sync(ctx, inode, page, 0,
-						offset, priority);
-		if (err >= 0) {
-			if (err != offset)
-				redirty_page_for_writepage(wbc, page);
-			err = 0;
-		}
-	}
-	unlock_kernel();
+	err = nfs_writepage_setup(ctx, page, 0, offset);
 	put_nfs_open_context(ctx);
+	if (err != 0)
+		goto out;
+	err = nfs_page_mark_flush(page);
+	if (err > 0)
+		err = 0;
 out:
+	if (!wbc->for_writepages)
+		nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
+	return err;
+}
+
+int nfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	int err;
+
+	err = nfs_writepage_locked(page, wbc);
 	unlock_page(page);
-	if (inode_referenced)
-		iput(inode);
 	return err; 
 }
 
@@ -379,21 +356,18 @@
 			return 0;
 		nfs_wait_on_write_congestion(mapping, 0);
 	}
-	err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
+	err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
 	if (err < 0)
 		goto out;
 	nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
-	wbc->nr_to_write -= err;
 	if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
 		err = nfs_wait_on_requests(inode, 0, 0);
 		if (err < 0)
 			goto out;
 	}
 	err = nfs_commit_inode(inode, wb_priority(wbc));
-	if (err > 0) {
-		wbc->nr_to_write -= err;
+	if (err > 0)
 		err = 0;
-	}
 out:
 	clear_bit(BDI_write_congested, &bdi->state);
 	wake_up_all(&nfs_write_congestion);
@@ -420,6 +394,7 @@
 			nfsi->change_attr++;
 	}
 	SetPagePrivate(req->wb_page);
+	set_page_private(req->wb_page, (unsigned long)req);
 	nfsi->npages++;
 	atomic_inc(&req->wb_count);
 	return 0;
@@ -436,6 +411,7 @@
 	BUG_ON (!NFS_WBACK_BUSY(req));
 
 	spin_lock(&nfsi->req_lock);
+	set_page_private(req->wb_page, 0);
 	ClearPagePrivate(req->wb_page);
 	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
 	nfsi->npages--;
@@ -450,33 +426,6 @@
 }
 
 /*
- * Find a request
- */
-static inline struct nfs_page *
-_nfs_find_request(struct inode *inode, unsigned long index)
-{
-	struct nfs_inode *nfsi = NFS_I(inode);
-	struct nfs_page *req;
-
-	req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
-	if (req)
-		atomic_inc(&req->wb_count);
-	return req;
-}
-
-static struct nfs_page *
-nfs_find_request(struct inode *inode, unsigned long index)
-{
-	struct nfs_page		*req;
-	struct nfs_inode	*nfsi = NFS_I(inode);
-
-	spin_lock(&nfsi->req_lock);
-	req = _nfs_find_request(inode, index);
-	spin_unlock(&nfsi->req_lock);
-	return req;
-}
-
-/*
  * Add a request to the inode's dirty list.
  */
 static void
@@ -491,8 +440,14 @@
 	nfs_list_add_request(req, &nfsi->dirty);
 	nfsi->ndirty++;
 	spin_unlock(&nfsi->req_lock);
-	inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
-	mark_inode_dirty(inode);
+	__mark_inode_dirty(inode, I_DIRTY_PAGES);
+}
+
+static void
+nfs_redirty_request(struct nfs_page *req)
+{
+	clear_bit(PG_FLUSHING, &req->wb_flags);
+	__set_page_dirty_nobuffers(req->wb_page);
 }
 
 /*
@@ -501,8 +456,7 @@
 static inline int
 nfs_dirty_request(struct nfs_page *req)
 {
-	struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
-	return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
+	return test_bit(PG_FLUSHING, &req->wb_flags) == 0;
 }
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -520,7 +474,7 @@
 	nfsi->ncommit++;
 	spin_unlock(&nfsi->req_lock);
 	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-	mark_inode_dirty(inode);
+	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 }
 #endif
 
@@ -597,31 +551,6 @@
 	}
 }
 
-/*
- * nfs_scan_dirty - Scan an inode for dirty requests
- * @inode: NFS inode to scan
- * @dst: destination list
- * @idx_start: lower bound of page->index to scan.
- * @npages: idx_start + npages sets the upper bound to scan.
- *
- * Moves requests from the inode's dirty page list.
- * The requests are *not* checked to ensure that they form a contiguous set.
- */
-static int
-nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
-{
-	struct nfs_inode *nfsi = NFS_I(inode);
-	int res = 0;
-
-	if (nfsi->ndirty != 0) {
-		res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
-		nfsi->ndirty -= res;
-		if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
-			printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
-	}
-	return res;
-}
-
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 /*
  * nfs_scan_commit - Scan an inode for commit requests
@@ -698,27 +627,27 @@
  * Note: Should always be called with the Page Lock held!
  */
 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
-		struct inode *inode, struct page *page,
-		unsigned int offset, unsigned int bytes)
+		struct page *page, unsigned int offset, unsigned int bytes)
 {
-	struct nfs_server *server = NFS_SERVER(inode);
+	struct inode *inode = page->mapping->host;
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_page		*req, *new = NULL;
 	unsigned long		rqend, end;
 
 	end = offset + bytes;
 
-	if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
+	if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR))
 		return ERR_PTR(-ERESTARTSYS);
 	for (;;) {
 		/* Loop over all inode entries and see if we find
 		 * A request for the page we wish to update
 		 */
 		spin_lock(&nfsi->req_lock);
-		req = _nfs_find_request(inode, page->index);
+		req = nfs_page_find_request_locked(page);
 		if (req) {
 			if (!nfs_lock_request_dontget(req)) {
 				int error;
+
 				spin_unlock(&nfsi->req_lock);
 				error = nfs_wait_on_request(req);
 				nfs_release_request(req);
@@ -745,7 +674,6 @@
 				return ERR_PTR(error);
 			}
 			spin_unlock(&nfsi->req_lock);
-			nfs_mark_request_dirty(new);
 			return new;
 		}
 		spin_unlock(&nfsi->req_lock);
@@ -786,9 +714,8 @@
 int nfs_flush_incompatible(struct file *file, struct page *page)
 {
 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
-	struct inode	*inode = page->mapping->host;
 	struct nfs_page	*req;
-	int		status = 0;
+	int do_flush, status;
 	/*
 	 * Look for a request corresponding to this page. If there
 	 * is one, and it belongs to another file, we flush it out
@@ -797,13 +724,18 @@
 	 * Also do the same if we find a request from an existing
 	 * dropped page.
 	 */
-	req = nfs_find_request(inode, page->index);
-	if (req) {
-		if (req->wb_page != page || ctx != req->wb_context)
-			status = nfs_wb_page(inode, page);
+	do {
+		req = nfs_page_find_request(page);
+		if (req == NULL)
+			return 0;
+		do_flush = req->wb_page != page || req->wb_context != ctx
+			|| !nfs_dirty_request(req);
 		nfs_release_request(req);
-	}
-	return (status < 0) ? status : 0;
+		if (!do_flush)
+			return 0;
+		status = nfs_wb_page(page->mapping->host, page);
+	} while (status == 0);
+	return status;
 }
 
 /*
@@ -817,7 +749,6 @@
 {
 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
 	struct inode	*inode = page->mapping->host;
-	struct nfs_page	*req;
 	int		status = 0;
 
 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -827,62 +758,18 @@
 		file->f_dentry->d_name.name, count,
 		(long long)(page_offset(page) +offset));
 
-	if (IS_SYNC(inode)) {
-		status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
-		if (status > 0) {
-			if (offset == 0 && status == PAGE_CACHE_SIZE)
-				SetPageUptodate(page);
-			return 0;
-		}
-		return status;
-	}
-
 	/* If we're not using byte range locks, and we know the page
 	 * is entirely in cache, it may be more efficient to avoid
 	 * fragmenting write requests.
 	 */
 	if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
-		loff_t end_offs = i_size_read(inode) - 1;
-		unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
-
-		count += offset;
+		count = max(count + offset, nfs_page_length(page));
 		offset = 0;
-		if (unlikely(end_offs < 0)) {
-			/* Do nothing */
-		} else if (page->index == end_index) {
-			unsigned int pglen;
-			pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
-			if (count < pglen)
-				count = pglen;
-		} else if (page->index < end_index)
-			count = PAGE_CACHE_SIZE;
 	}
 
-	/*
-	 * Try to find an NFS request corresponding to this page
-	 * and update it.
-	 * If the existing request cannot be updated, we must flush
-	 * it out now.
-	 */
-	do {
-		req = nfs_update_request(ctx, inode, page, offset, count);
-		status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
-		if (status != -EBUSY)
-			break;
-		/* Request could not be updated. Flush it out and try again */
-		status = nfs_wb_page(inode, page);
-	} while (status >= 0);
-	if (status < 0)
-		goto done;
+	status = nfs_writepage_setup(ctx, page, offset, count);
+	__set_page_dirty_nobuffers(page);
 
-	status = 0;
-
-	/* Update file length */
-	nfs_grow_file(page, offset, count);
-	/* Set the PG_uptodate flag? */
-	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
-	nfs_unlock_request(req);
-done:
         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
 			status, (long long)i_size_read(inode));
 	if (status < 0)
@@ -897,7 +784,7 @@
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 	if (!PageError(req->wb_page)) {
 		if (NFS_NEED_RESCHED(req)) {
-			nfs_mark_request_dirty(req);
+			nfs_redirty_request(req);
 			goto out;
 		} else if (NFS_NEED_COMMIT(req)) {
 			nfs_mark_request_commit(req);
@@ -979,9 +866,7 @@
 	sigset_t oldset;
 
 	rpc_clnt_sigmask(clnt, &oldset);
-	lock_kernel();
 	rpc_execute(&data->task);
-	unlock_kernel();
 	rpc_clnt_sigunmask(clnt, &oldset);
 }
 
@@ -1015,7 +900,6 @@
 	atomic_set(&req->wb_complete, requests);
 
 	ClearPageError(page);
-	set_page_writeback(page);
 	offset = 0;
 	nbytes = req->wb_bytes;
 	do {
@@ -1043,9 +927,9 @@
 	while (!list_empty(&list)) {
 		data = list_entry(list.next, struct nfs_write_data, pages);
 		list_del(&data->pages);
-		nfs_writedata_free(data);
+		nfs_writedata_release(data);
 	}
-	nfs_mark_request_dirty(req);
+	nfs_redirty_request(req);
 	nfs_clear_page_writeback(req);
 	return -ENOMEM;
 }
@@ -1076,7 +960,6 @@
 		nfs_list_remove_request(req);
 		nfs_list_add_request(req, &data->pages);
 		ClearPageError(req->wb_page);
-		set_page_writeback(req->wb_page);
 		*pages++ = req->wb_page;
 		count += req->wb_bytes;
 	}
@@ -1091,7 +974,7 @@
 	while (!list_empty(head)) {
 		struct nfs_page *req = nfs_list_entry(head->next);
 		nfs_list_remove_request(req);
-		nfs_mark_request_dirty(req);
+		nfs_redirty_request(req);
 		nfs_clear_page_writeback(req);
 	}
 	return -ENOMEM;
@@ -1126,7 +1009,7 @@
 	while (!list_empty(head)) {
 		req = nfs_list_entry(head->next);
 		nfs_list_remove_request(req);
-		nfs_mark_request_dirty(req);
+		nfs_redirty_request(req);
 		nfs_clear_page_writeback(req);
 	}
 	return error;
@@ -1442,7 +1325,7 @@
 		}
 		/* We have a mismatch. Write the page again */
 		dprintk(" mismatch\n");
-		nfs_mark_request_dirty(req);
+		nfs_redirty_request(req);
 	next:
 		nfs_clear_page_writeback(req);
 	}
@@ -1459,18 +1342,17 @@
 }
 #endif
 
-static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
-			   unsigned int npages, int how)
+static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
 {
-	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_inode *nfsi = NFS_I(mapping->host);
 	LIST_HEAD(head);
-	int res;
+	long res;
 
 	spin_lock(&nfsi->req_lock);
-	res = nfs_scan_dirty(inode, &head, idx_start, npages);
+	res = nfs_scan_dirty(mapping, wbc, &head);
 	spin_unlock(&nfsi->req_lock);
 	if (res) {
-		int error = nfs_flush_list(inode, &head, res, how);
+		int error = nfs_flush_list(mapping->host, &head, res, how);
 		if (error < 0)
 			return error;
 	}
@@ -1496,38 +1378,62 @@
 }
 #endif
 
-int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
-		unsigned int npages, int how)
+long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
 {
+	struct inode *inode = mapping->host;
 	struct nfs_inode *nfsi = NFS_I(inode);
+	unsigned long idx_start, idx_end;
+	unsigned int npages = 0;
 	LIST_HEAD(head);
 	int nocommit = how & FLUSH_NOCOMMIT;
-	int pages, ret;
+	long pages, ret;
 
+	/* FIXME */
+	if (wbc->range_cyclic)
+		idx_start = 0;
+	else {
+		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
+		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		if (idx_end > idx_start) {
+			unsigned long l_npages = 1 + idx_end - idx_start;
+			npages = l_npages;
+			if (sizeof(npages) != sizeof(l_npages) &&
+					(unsigned long)npages != l_npages)
+				npages = 0;
+		}
+	}
 	how &= ~FLUSH_NOCOMMIT;
 	spin_lock(&nfsi->req_lock);
 	do {
+		wbc->pages_skipped = 0;
 		ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
 		if (ret != 0)
 			continue;
-		pages = nfs_scan_dirty(inode, &head, idx_start, npages);
+		pages = nfs_scan_dirty(mapping, wbc, &head);
 		if (pages != 0) {
 			spin_unlock(&nfsi->req_lock);
-			if (how & FLUSH_INVALIDATE)
+			if (how & FLUSH_INVALIDATE) {
 				nfs_cancel_dirty_list(&head);
-			else
+				ret = pages;
+			} else
 				ret = nfs_flush_list(inode, &head, pages, how);
 			spin_lock(&nfsi->req_lock);
 			continue;
 		}
+		if (wbc->pages_skipped != 0)
+			continue;
 		if (nocommit)
 			break;
 		pages = nfs_scan_commit(inode, &head, idx_start, npages);
-		if (pages == 0)
+		if (pages == 0) {
+			if (wbc->pages_skipped != 0)
+				continue;
 			break;
+		}
 		if (how & FLUSH_INVALIDATE) {
 			spin_unlock(&nfsi->req_lock);
 			nfs_cancel_commit_list(&head);
+			ret = pages;
 			spin_lock(&nfsi->req_lock);
 			continue;
 		}
@@ -1540,6 +1446,106 @@
 	return ret;
 }
 
+/*
+ * flush the inode to disk.
+ */
+int nfs_wb_all(struct inode *inode)
+{
+	struct address_space *mapping = inode->i_mapping;
+	struct writeback_control wbc = {
+		.bdi = mapping->backing_dev_info,
+		.sync_mode = WB_SYNC_ALL,
+		.nr_to_write = LONG_MAX,
+		.for_writepages = 1,
+		.range_cyclic = 1,
+	};
+	int ret;
+
+	ret = generic_writepages(mapping, &wbc);
+	if (ret < 0)
+		goto out;
+	ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
+	if (ret >= 0)
+		return 0;
+out:
+	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+	return ret;
+}
+
+int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
+{
+	struct writeback_control wbc = {
+		.bdi = mapping->backing_dev_info,
+		.sync_mode = WB_SYNC_ALL,
+		.nr_to_write = LONG_MAX,
+		.range_start = range_start,
+		.range_end = range_end,
+		.for_writepages = 1,
+	};
+	int ret;
+
+	if (!(how & FLUSH_NOWRITEPAGE)) {
+		ret = generic_writepages(mapping, &wbc);
+		if (ret < 0)
+			goto out;
+	}
+	ret = nfs_sync_mapping_wait(mapping, &wbc, how);
+	if (ret >= 0)
+		return 0;
+out:
+	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+	return ret;
+}
+
+int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
+{
+	loff_t range_start = page_offset(page);
+	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+	struct writeback_control wbc = {
+		.bdi = page->mapping->backing_dev_info,
+		.sync_mode = WB_SYNC_ALL,
+		.nr_to_write = LONG_MAX,
+		.range_start = range_start,
+		.range_end = range_end,
+	};
+	int ret;
+
+	BUG_ON(!PageLocked(page));
+	if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) {
+		ret = nfs_writepage_locked(page, &wbc);
+		if (ret < 0)
+			goto out;
+	}
+	ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
+	if (ret >= 0)
+		return 0;
+out:
+	__mark_inode_dirty(inode, I_DIRTY_PAGES);
+	return ret;
+}
+
+/*
+ * Write back all requests on one page - we do this before reading it.
+ */
+int nfs_wb_page(struct inode *inode, struct page* page)
+{
+	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
+}
+
+int nfs_set_page_dirty(struct page *page)
+{
+	struct nfs_page *req;
+
+	req = nfs_page_find_request(page);
+	if (req != NULL) {
+		/* Mark any existing write requests for flushing */
+		set_bit(PG_NEED_FLUSH, &req->wb_flags);
+		nfs_release_request(req);
+	}
+	return __set_page_dirty_nobuffers(page);
+}
+
+
 int __init nfs_init_writepagecache(void)
 {
 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index b4baca3..277df40 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -24,10 +24,6 @@
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
-#ifdef NFSD_OPTIMIZE_SPACE
-# define inline
-#endif
-
 
 /*
  * Mapping of S_IF* types to NFS file types
@@ -42,14 +38,14 @@
 /*
  * XDR functions for basic NFS types
  */
-static inline __be32 *
+static __be32 *
 encode_time3(__be32 *p, struct timespec *time)
 {
 	*p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_time3(__be32 *p, struct timespec *time)
 {
 	time->tv_sec = ntohl(*p++);
@@ -57,7 +53,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_fh(__be32 *p, struct svc_fh *fhp)
 {
 	unsigned int size;
@@ -77,7 +73,7 @@
 	return decode_fh(p, fhp);
 }
 
-static inline __be32 *
+static __be32 *
 encode_fh(__be32 *p, struct svc_fh *fhp)
 {
 	unsigned int size = fhp->fh_handle.fh_size;
@@ -91,7 +87,7 @@
  * Decode a file name and make sure that the path contains
  * no slashes or null bytes.
  */
-static inline __be32 *
+static __be32 *
 decode_filename(__be32 *p, char **namp, int *lenp)
 {
 	char		*name;
@@ -107,7 +103,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_sattr3(__be32 *p, struct iattr *iap)
 {
 	u32	tmp;
@@ -153,7 +149,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
 	      struct kstat *stat)
 {
@@ -186,7 +182,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
 {
 	struct inode	*inode = fhp->fh_dentry->d_inode;
@@ -776,7 +772,7 @@
 		return xdr_ressize_check(rqstp, p);
 }
 
-static inline __be32 *
+static __be32 *
 encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
 	     int namlen, ino_t ino)
 {
@@ -790,7 +786,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p,
 		struct svc_fh *fhp)
 {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 293b649..640c92b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -84,10 +84,10 @@
  */
 static DEFINE_MUTEX(client_mutex);
 
-static kmem_cache_t *stateowner_slab = NULL;
-static kmem_cache_t *file_slab = NULL;
-static kmem_cache_t *stateid_slab = NULL;
-static kmem_cache_t *deleg_slab = NULL;
+static struct kmem_cache *stateowner_slab = NULL;
+static struct kmem_cache *file_slab = NULL;
+static struct kmem_cache *stateid_slab = NULL;
+static struct kmem_cache *deleg_slab = NULL;
 
 void
 nfs4_lock_state(void)
@@ -1003,7 +1003,7 @@
 }
 
 static void
-nfsd4_free_slab(kmem_cache_t **slab)
+nfsd4_free_slab(struct kmem_cache **slab)
 {
 	if (*slab == NULL)
 		return;
@@ -1829,9 +1829,8 @@
 }
 
 static struct workqueue_struct *laundry_wq;
-static struct work_struct laundromat_work;
-static void laundromat_main(void *);
-static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
+static void laundromat_main(struct work_struct *);
+static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
 
 __be32
 nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@
 }
 
 void
-laundromat_main(void *not_used)
+laundromat_main(struct work_struct *not_used)
 {
 	time_t t;
 
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 56ebb14..f5243f9 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -18,11 +18,6 @@
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
-
-#ifdef NFSD_OPTIMIZE_SPACE
-# define inline
-#endif
-
 /*
  * Mapping of S_IF* types to NFS file types
  */
@@ -55,7 +50,7 @@
 	return decode_fh(p, fhp);
 }
 
-static inline __be32 *
+static __be32 *
 encode_fh(__be32 *p, struct svc_fh *fhp)
 {
 	memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE);
@@ -66,7 +61,7 @@
  * Decode a file name and make sure that the path contains
  * no slashes or null bytes.
  */
-static inline __be32 *
+static __be32 *
 decode_filename(__be32 *p, char **namp, int *lenp)
 {
 	char		*name;
@@ -82,7 +77,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_pathname(__be32 *p, char **namp, int *lenp)
 {
 	char		*name;
@@ -98,7 +93,7 @@
 	return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_sattr(__be32 *p, struct iattr *iap)
 {
 	u32	tmp, tmp1;
diff --git a/fs/nls/nls_cp936.c b/fs/nls/nls_cp936.c
index 046fde8..65e640c 100644
--- a/fs/nls/nls_cp936.c
+++ b/fs/nls/nls_cp936.c
@@ -4421,6 +4421,73 @@
 	c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL,   
 };
 
+static unsigned char u2c_00[512] = {
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+	0xA1, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xEC, /* 0xA4-0xA7 */
+	0xA1, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+	0xA1, 0xE3, 0xA1, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xA4, /* 0xB4-0xB7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC1, /* 0xD4-0xD7 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+	0xA8, 0xA4, 0xA8, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+	0xA8, 0xA8, 0xA8, 0xA6, 0xA8, 0xBA, 0x00, 0x00, /* 0xE8-0xEB */
+	0xA8, 0xAC, 0xA8, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+	0x00, 0x00, 0x00, 0x00, 0xA8, 0xB0, 0xA8, 0xAE, /* 0xF0-0xF3 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC2, /* 0xF4-0xF7 */
+	0x00, 0x00, 0xA8, 0xB4, 0xA8, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */
+	0xA8, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
 static unsigned char u2c_01[512] = {
 	0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
@@ -10825,7 +10892,7 @@
 };
 
 static unsigned char *page_uni2charset[256] = {
-	NULL,   u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,   
+	u2c_00, u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,
 	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
 	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
 	NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
@@ -10936,11 +11003,34 @@
 	unsigned char *uni2charset;
 	unsigned char cl = uni&0xFF;
 	unsigned char ch = (uni>>8)&0xFF;
-	int n;
+	unsigned char out0,out1;
 
 	if (boundlen <= 0)
 		return -ENAMETOOLONG;
 
+	if (uni == 0x20ac) {/* Euro symbol.The only exception with a non-ascii unicode */
+		out[0] = 0x80;
+		return 1;
+	}
+
+	if (ch == 0) { /* handle the U00 plane*/
+		/* if (cl == 0) return -EINVAL;*/ /*U0000 is legal in cp936*/
+		out0 = u2c_00[cl*2];
+		out1 = u2c_00[cl*2+1];
+		if (out0 == 0x00 && out1 == 0x00) {
+			if (cl<0x80) {
+				out[0] = cl;
+				return 1;
+			}
+			return -EINVAL;
+		} else {
+			if (boundlen <= 1)
+				return -ENAMETOOLONG;
+			out[0] = out0;
+			out[1] = out1;
+			return 2;
+		}
+	}
 
 	uni2charset = page_uni2charset[ch];
 	if (uni2charset) {
@@ -10950,15 +11040,10 @@
 		out[1] = uni2charset[cl*2+1];
 		if (out[0] == 0x00 && out[1] == 0x00)
 			return -EINVAL;
-		n = 2;
-	} else if (ch==0 && cl) {
-		out[0] = cl;
-		n = 1;
+		return 2;
 	}
 	else
 		return -EINVAL;
-
-	return n;
 }
 
 static int char2uni(const unsigned char *rawstring, int boundlen,
@@ -10972,7 +11057,11 @@
 		return -ENAMETOOLONG;
 
 	if (boundlen == 1) {
-		*uni = rawstring[0];
+		if (rawstring[0]==0x80) { /* Euro symbol.The only exception with a non-ascii unicode */
+			*uni = 0x20ac;
+		} else {
+			*uni = rawstring[0];
+		}
 		return 1;
 	}
 
@@ -10986,7 +11075,11 @@
 			return -EINVAL;
 		n = 2;
 	} else{
-		*uni = ch;
+		if (ch==0x80) {/* Euro symbol.The only exception with a non-ascii unicode */
+			*uni = 0x20ac;
+		} else {
+			*uni = ch;
+		}
 		n = 1;
 	}
 	return n;
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 9f08e85..c577d8e 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1272,7 +1272,7 @@
 {
 	ntfs_attr_search_ctx *ctx;
 
-	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS);
+	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
 	if (ctx)
 		ntfs_attr_init_search_ctx(ctx, ni, mrec);
 	return ctx;
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index e32cde4..2194eff 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -38,7 +38,7 @@
 {
 	ntfs_index_context *ictx;
 
-	ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS);
+	ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
 	if (ictx)
 		*ictx = (ntfs_index_context){ .idx_ni = idx_ni };
 	return ictx;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2d3de9c8..2479898 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -324,7 +324,7 @@
 	ntfs_inode *ni;
 
 	ntfs_debug("Entering.");
-	ni = kmem_cache_alloc(ntfs_big_inode_cache, SLAB_NOFS);
+	ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS);
 	if (likely(ni != NULL)) {
 		ni->state = 0;
 		return VFS_I(ni);
@@ -349,7 +349,7 @@
 	ntfs_inode *ni;
 
 	ntfs_debug("Entering.");
-	ni = kmem_cache_alloc(ntfs_inode_cache, SLAB_NOFS);
+	ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS);
 	if (likely(ni != NULL)) {
 		ni->state = 0;
 		return ni;
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index 6a495f7..005ca4b 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -266,7 +266,7 @@
 
 	/* We do not trust outside sources. */
 	if (likely(ins)) {
-		ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS);
+		ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS);
 		if (likely(ucs)) {
 			for (i = o = 0; i < ins_len; i += wc_len) {
 				wc_len = nls->char2uni(ins + i, ins_len - i,
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f43bc5f..edc91ca 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -52,14 +52,14 @@
 			       u64 blkno);
 
 static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
-				     struct ocfs2_journal_handle *handle,
+				     handle_t *handle,
 				     struct inode *inode,
 				     int wanted,
 				     struct ocfs2_alloc_context *meta_ac,
 				     struct buffer_head *bhs[]);
 
 static int ocfs2_add_branch(struct ocfs2_super *osb,
-			    struct ocfs2_journal_handle *handle,
+			    handle_t *handle,
 			    struct inode *inode,
 			    struct buffer_head *fe_bh,
 			    struct buffer_head *eb_bh,
@@ -67,14 +67,14 @@
 			    struct ocfs2_alloc_context *meta_ac);
 
 static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
-				  struct ocfs2_journal_handle *handle,
+				  handle_t *handle,
 				  struct inode *inode,
 				  struct buffer_head *fe_bh,
 				  struct ocfs2_alloc_context *meta_ac,
 				  struct buffer_head **ret_new_eb_bh);
 
 static int ocfs2_do_insert_extent(struct ocfs2_super *osb,
-				  struct ocfs2_journal_handle *handle,
+				  handle_t *handle,
 				  struct inode *inode,
 				  struct buffer_head *fe_bh,
 				  u64 blkno,
@@ -152,7 +152,7 @@
  * l_count for you
  */
 static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
-				     struct ocfs2_journal_handle *handle,
+				     handle_t *handle,
 				     struct inode *inode,
 				     int wanted,
 				     struct ocfs2_alloc_context *meta_ac,
@@ -253,7 +253,7 @@
  * contain a single record with e_clusters == 0.
  */
 static int ocfs2_add_branch(struct ocfs2_super *osb,
-			    struct ocfs2_journal_handle *handle,
+			    handle_t *handle,
 			    struct inode *inode,
 			    struct buffer_head *fe_bh,
 			    struct buffer_head *eb_bh,
@@ -418,7 +418,7 @@
  * after this call.
  */
 static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
-				  struct ocfs2_journal_handle *handle,
+				  handle_t *handle,
 				  struct inode *inode,
 				  struct buffer_head *fe_bh,
 				  struct ocfs2_alloc_context *meta_ac,
@@ -520,7 +520,7 @@
  * down.
  */
 static int ocfs2_do_insert_extent(struct ocfs2_super *osb,
-				  struct ocfs2_journal_handle *handle,
+				  handle_t *handle,
 				  struct inode *inode,
 				  struct buffer_head *fe_bh,
 				  u64 start_blk,
@@ -809,7 +809,7 @@
 
 /* the caller needs to update fe->i_clusters */
 int ocfs2_insert_extent(struct ocfs2_super *osb,
-			struct ocfs2_journal_handle *handle,
+			handle_t *handle,
 			struct inode *inode,
 			struct buffer_head *fe_bh,
 			u64 start_blk,
@@ -951,7 +951,7 @@
 }
 
 static int ocfs2_truncate_log_append(struct ocfs2_super *osb,
-				     struct ocfs2_journal_handle *handle,
+				     handle_t *handle,
 				     u64 start_blk,
 				     unsigned int num_clusters)
 {
@@ -1034,7 +1034,7 @@
 }
 
 static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
-					 struct ocfs2_journal_handle *handle,
+					 handle_t *handle,
 					 struct inode *data_alloc_inode,
 					 struct buffer_head *data_alloc_bh)
 {
@@ -1113,7 +1113,7 @@
 {
 	int status;
 	unsigned int num_to_flush;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle;
 	struct inode *tl_inode = osb->osb_tl_inode;
 	struct inode *data_alloc_inode = NULL;
 	struct buffer_head *tl_bh = osb->osb_tl_bh;
@@ -1130,7 +1130,7 @@
 	if (!OCFS2_IS_VALID_DINODE(di)) {
 		OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
 		status = -EIO;
-		goto bail;
+		goto out;
 	}
 
 	num_to_flush = le16_to_cpu(tl->tl_used);
@@ -1138,14 +1138,7 @@
 	     num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno);
 	if (!num_to_flush) {
 		status = 0;
-		goto bail;
-	}
-
-	handle = ocfs2_alloc_handle(osb);
-	if (!handle) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
+		goto out;
 	}
 
 	data_alloc_inode = ocfs2_get_system_file_inode(osb,
@@ -1154,41 +1147,40 @@
 	if (!data_alloc_inode) {
 		status = -EINVAL;
 		mlog(ML_ERROR, "Could not get bitmap inode!\n");
-		goto bail;
+		goto out;
 	}
 
-	ocfs2_handle_add_inode(handle, data_alloc_inode);
-	status = ocfs2_meta_lock(data_alloc_inode, handle, &data_alloc_bh, 1);
+	mutex_lock(&data_alloc_inode->i_mutex);
+
+	status = ocfs2_meta_lock(data_alloc_inode, &data_alloc_bh, 1);
 	if (status < 0) {
 		mlog_errno(status);
-		goto bail;
+		goto out_mutex;
 	}
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_TRUNCATE_LOG_UPDATE);
+	handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
-		handle = NULL;
 		mlog_errno(status);
-		goto bail;
+		goto out_unlock;
 	}
 
 	status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
 					       data_alloc_bh);
-	if (status < 0) {
+	if (status < 0)
 		mlog_errno(status);
-		goto bail;
-	}
 
-bail:
-	if (handle)
-		ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 
-	if (data_alloc_inode)
-		iput(data_alloc_inode);
+out_unlock:
+	brelse(data_alloc_bh);
+	ocfs2_meta_unlock(data_alloc_inode, 1);
 
-	if (data_alloc_bh)
-		brelse(data_alloc_bh);
+out_mutex:
+	mutex_unlock(&data_alloc_inode->i_mutex);
+	iput(data_alloc_inode);
 
+out:
 	mlog_exit(status);
 	return status;
 }
@@ -1205,10 +1197,12 @@
 	return status;
 }
 
-static void ocfs2_truncate_log_worker(void *data)
+static void ocfs2_truncate_log_worker(struct work_struct *work)
 {
 	int status;
-	struct ocfs2_super *osb = data;
+	struct ocfs2_super *osb =
+		container_of(work, struct ocfs2_super,
+			     osb_truncate_log_wq.work);
 
 	mlog_entry_void();
 
@@ -1347,7 +1341,7 @@
 	int i;
 	unsigned int clusters, num_recs, start_cluster;
 	u64 start_blk;
-	struct ocfs2_journal_handle *handle;
+	handle_t *handle;
 	struct inode *tl_inode = osb->osb_tl_inode;
 	struct ocfs2_truncate_log *tl;
 
@@ -1373,8 +1367,7 @@
 			}
 		}
 
-		handle = ocfs2_start_trans(osb, NULL,
-					   OCFS2_TRUNCATE_LOG_UPDATE);
+		handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
 		if (IS_ERR(handle)) {
 			status = PTR_ERR(handle);
 			mlog_errno(status);
@@ -1387,7 +1380,7 @@
 
 		status = ocfs2_truncate_log_append(osb, handle,
 						   start_blk, clusters);
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
 		if (status < 0) {
 			mlog_errno(status);
 			goto bail_up;
@@ -1441,7 +1434,8 @@
 	/* ocfs2_truncate_log_shutdown keys on the existence of
 	 * osb->osb_tl_inode so we don't set any of the osb variables
 	 * until we're sure all is well. */
-	INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb);
+	INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
+			  ocfs2_truncate_log_worker);
 	osb->osb_tl_bh    = tl_bh;
 	osb->osb_tl_inode = tl_inode;
 
@@ -1543,7 +1537,7 @@
 			     struct inode *inode,
 			     struct buffer_head *fe_bh,
 			     struct buffer_head *old_last_eb_bh,
-			     struct ocfs2_journal_handle *handle,
+			     handle_t *handle,
 			     struct ocfs2_truncate_context *tc)
 {
 	int status, i, depth;
@@ -1782,7 +1776,7 @@
 	struct ocfs2_extent_block *eb;
 	struct ocfs2_extent_list *el;
 	struct buffer_head *last_eb_bh;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct inode *tl_inode = osb->osb_tl_inode;
 
 	mlog_entry_void();
@@ -1868,7 +1862,7 @@
 
 	credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
 						fe, el);
-	handle = ocfs2_start_trans(osb, NULL, credits);
+	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -1891,7 +1885,7 @@
 	mutex_unlock(&tl_inode->i_mutex);
 	tl_sem = 0;
 
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 	handle = NULL;
 
 	BUG_ON(le32_to_cpu(fe->i_clusters) < target_i_clusters);
@@ -1906,7 +1900,7 @@
 		mutex_unlock(&tl_inode->i_mutex);
 
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
 
 	if (last_eb_bh)
 		brelse(last_eb_bh);
@@ -2011,10 +2005,7 @@
 		mutex_lock(&ext_alloc_inode->i_mutex);
 		(*tc)->tc_ext_alloc_inode = ext_alloc_inode;
 
-		status = ocfs2_meta_lock(ext_alloc_inode,
-					 NULL,
-					 &ext_alloc_bh,
-					 1);
+		status = ocfs2_meta_lock(ext_alloc_inode, &ext_alloc_bh, 1);
 		if (status < 0) {
 			mlog_errno(status);
 			goto bail;
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 12ba897..0b82e80 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -28,7 +28,7 @@
 
 struct ocfs2_alloc_context;
 int ocfs2_insert_extent(struct ocfs2_super *osb,
-			struct ocfs2_journal_handle *handle,
+			handle_t *handle,
 			struct inode *inode,
 			struct buffer_head *fe_bh,
 			u64 blkno,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3d7c082..2f7268e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -200,7 +200,7 @@
 
 	mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
 
-	ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
+	ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
 	if (ret != 0) {
 		if (ret == AOP_TRUNCATED_PAGE)
 			unlock = 0;
@@ -305,7 +305,7 @@
 
 	mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
 
-	ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
+	ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
 	if (ret != 0) {
 		mlog_errno(ret);
 		goto out;
@@ -355,16 +355,16 @@
 	return ret;
 }
 
-struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
+handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
 							 struct page *page,
 							 unsigned from,
 							 unsigned to)
 {
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	int ret = 0;
 
-	handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (!handle) {
 		ret = -ENOMEM;
 		mlog_errno(ret);
@@ -372,7 +372,7 @@
 	}
 
 	if (ocfs2_should_order_data(inode)) {
-		ret = walk_page_buffers(handle->k_handle,
+		ret = walk_page_buffers(handle,
 					page_buffers(page),
 					from, to, NULL,
 					ocfs2_journal_dirty_data);
@@ -382,7 +382,7 @@
 out:
 	if (ret) {
 		if (handle)
-			ocfs2_commit_trans(handle);
+			ocfs2_commit_trans(osb, handle);
 		handle = ERR_PTR(ret);
 	}
 	return handle;
@@ -394,7 +394,7 @@
 	int ret;
 	struct buffer_head *di_bh = NULL;
 	struct inode *inode = page->mapping->host;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct ocfs2_dinode *di;
 
 	mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
@@ -412,7 +412,7 @@
 	 *    stale inode allocation image (i_size, i_clusters, etc).
 	 */
 
-	ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, 1, page);
+	ret = ocfs2_meta_lock_with_page(inode, &di_bh, 1, page);
 	if (ret != 0) {
 		mlog_errno(ret);
 		goto out;
@@ -464,7 +464,7 @@
 	}
 
 out_commit:
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 out_unlock_data:
 	ocfs2_data_unlock(inode, 1);
 out_unlock_meta:
@@ -490,7 +490,7 @@
 	 * accessed concurrently from multiple nodes.
 	 */
 	if (!INODE_JOURNAL(inode)) {
-		err = ocfs2_meta_lock(inode, NULL, NULL, 0);
+		err = ocfs2_meta_lock(inode, NULL, 0);
 		if (err) {
 			if (err != -ENOENT)
 				mlog_errno(err);
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index e88c3f0..f446a15 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -25,7 +25,7 @@
 int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
 			       unsigned from, unsigned to);
 
-struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
+handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
 							 struct page *page,
 							 unsigned from,
 							 unsigned to);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 305cba3..4cd9a95 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -141,7 +141,7 @@
 	 * recognizes a node going up and down in one iteration */
 	u64			hr_generation;
 
-	struct work_struct	hr_write_timeout_work;
+	struct delayed_work	hr_write_timeout_work;
 	unsigned long		hr_last_timeout_start;
 
 	/* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@
 	int               wc_error;
 };
 
-static void o2hb_write_timeout(void *arg)
+static void o2hb_write_timeout(struct work_struct *work)
 {
-	struct o2hb_region *reg = arg;
+	struct o2hb_region *reg =
+		container_of(work, struct o2hb_region,
+			     hr_write_timeout_work.work);
 
 	mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
 	     "milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@
 		goto out;
 	}
 
-	INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg);
+	INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
 
 	/*
 	 * A node is considered live after it has beat LIVE_THRESHOLD
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 7bba98f..4705d65 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -88,7 +88,7 @@
 	o2quo_fence_self();
 }
 
-static void o2quo_make_decision(void *arg)
+static void o2quo_make_decision(struct work_struct *work)
 {
 	int quorum;
 	int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@
 	struct o2quo_state *qs = &o2quo_state;
 
 	spin_lock_init(&qs->qs_lock);
-	INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+	INIT_WORK(&qs->qs_work, o2quo_make_decision);
 }
 
 void o2quo_exit(void)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index b650efa..9b3209d 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -140,11 +140,11 @@
 		 [O2NET_ERR_DIED]	= -EHOSTDOWN,};
 
 /* can't quite avoid *all* internal declarations :/ */
-static void o2net_sc_connect_completed(void *arg);
-static void o2net_rx_until_empty(void *arg);
-static void o2net_shutdown_sc(void *arg);
+static void o2net_sc_connect_completed(struct work_struct *work);
+static void o2net_rx_until_empty(struct work_struct *work);
+static void o2net_shutdown_sc(struct work_struct *work);
 static void o2net_listen_data_ready(struct sock *sk, int bytes);
-static void o2net_sc_send_keep_req(void *arg);
+static void o2net_sc_send_keep_req(struct work_struct *work);
 static void o2net_idle_timer(unsigned long data);
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
 
@@ -308,10 +308,10 @@
 	o2nm_node_get(node);
 	sc->sc_node = node;
 
-	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc);
-	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc);
-	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc);
-	INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc);
+	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
+	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
+	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
+	INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
 
 	init_timer(&sc->sc_idle_timeout);
 	sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@
 		sc_put(sc);
 }
 static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
-					struct work_struct *work,
+					struct delayed_work *work,
 					int delay)
 {
 	sc_get(sc);
@@ -350,7 +350,7 @@
 		sc_put(sc);
 }
 static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
-					 struct work_struct *work)
+					 struct delayed_work *work)
 {
 	if (cancel_delayed_work(work))
 		sc_put(sc);
@@ -564,9 +564,11 @@
  * ourselves as state_change couldn't get the nn_lock and call set_nn_state
  * itself.
  */
-static void o2net_shutdown_sc(void *arg)
+static void o2net_shutdown_sc(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container,
+			     sc_shutdown_work);
 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 
 	sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@
 /* this work func is triggerd by data ready.  it reads until it can read no
  * more.  it interprets 0, eof, as fatal.  if data_ready hits while we're doing
  * our work the work struct will be marked and we'll be called again. */
-static void o2net_rx_until_empty(void *arg)
+static void o2net_rx_until_empty(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container, sc_rx_work);
 	int ret;
 
 	do {
@@ -1249,9 +1252,11 @@
 
 /* called when a connect completes and after a sock is accepted.  the
  * rx path will see the response and mark the sc valid */
-static void o2net_sc_connect_completed(void *arg)
+static void o2net_sc_connect_completed(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container,
+			     sc_connect_work);
 
 	mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
               (unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@
 }
 
 /* this is called as a work_struct func. */
-static void o2net_sc_send_keep_req(void *arg)
+static void o2net_sc_send_keep_req(struct work_struct *work)
 {
-	struct o2net_sock_container *sc = arg;
+	struct o2net_sock_container *sc =
+		container_of(work, struct o2net_sock_container,
+			     sc_keepalive_work.work);
 
 	o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
 	sc_put(sc);
@@ -1314,14 +1321,15 @@
  * having a connect attempt fail, etc. This centralizes the logic which decides
  * if a connect attempt should be made or if we should give up and all future
  * transmit attempts should fail */
-static void o2net_start_connect(void *arg)
+static void o2net_start_connect(struct work_struct *work)
 {
-	struct o2net_node *nn = arg;
+	struct o2net_node *nn =
+		container_of(work, struct o2net_node, nn_connect_work.work);
 	struct o2net_sock_container *sc = NULL;
 	struct o2nm_node *node = NULL, *mynode = NULL;
 	struct socket *sock = NULL;
 	struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
-	int ret = 0;
+	int ret = 0, stop;
 
 	/* if we're greater we initiate tx, otherwise we accept */
 	if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@
 
 	spin_lock(&nn->nn_lock);
 	/* see if we already have one pending or have given up */
-	if (nn->nn_sc || nn->nn_persistent_error)
-		arg = NULL;
+	stop = (nn->nn_sc || nn->nn_persistent_error);
 	spin_unlock(&nn->nn_lock);
-	if (arg == NULL) /* *shrug*, needed some indicator */
+	if (stop)
 		goto out;
 
 	nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@
 	return;
 }
 
-static void o2net_connect_expired(void *arg)
+static void o2net_connect_expired(struct work_struct *work)
 {
-	struct o2net_node *nn = arg;
+	struct o2net_node *nn =
+		container_of(work, struct o2net_node, nn_connect_expired.work);
 
 	spin_lock(&nn->nn_lock);
 	if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@
 	spin_unlock(&nn->nn_lock);
 }
 
-static void o2net_still_up(void *arg)
+static void o2net_still_up(struct work_struct *work)
 {
-	struct o2net_node *nn = arg;
+	struct o2net_node *nn =
+		container_of(work, struct o2net_node, nn_still_up.work);
 
 	o2quo_hb_still_up(o2net_num_from_nn(nn));
 }
@@ -1644,9 +1653,9 @@
 	return ret;
 }
 
-static void o2net_accept_many(void *arg)
+static void o2net_accept_many(struct work_struct *work)
 {
-	struct socket *sock = arg;
+	struct socket *sock = o2net_listen_sock;
 	while (o2net_accept_one(sock) == 0)
 		cond_resched();
 }
@@ -1700,7 +1709,7 @@
 	write_unlock_bh(&sock->sk->sk_callback_lock);
 
 	o2net_listen_sock = sock;
-	INIT_WORK(&o2net_listen_work, o2net_accept_many, sock);
+	INIT_WORK(&o2net_listen_work, o2net_accept_many);
 
 	sock->sk->sk_reuse = 1;
 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@
 		struct o2net_node *nn = o2net_nn_from_num(i);
 
 		spin_lock_init(&nn->nn_lock);
-		INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
-		INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
-		INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
+		INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
+		INIT_DELAYED_WORK(&nn->nn_connect_expired,
+				  o2net_connect_expired);
+		INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
 		/* until we see hb from a node we'll return einval */
 		nn->nn_persistent_error = -ENOTCONN;
 		init_waitqueue_head(&nn->nn_sc_wq);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4b46aac..daebbd3 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -86,18 +86,18 @@
 	 * connect attempt fails and so can be self-arming.  shutdown is
 	 * careful to first mark the nn such that no connects will be attempted
 	 * before canceling delayed connect work and flushing the queue. */
-	struct work_struct		nn_connect_work;
+	struct delayed_work		nn_connect_work;
 	unsigned long			nn_last_connect_attempt;
 
 	/* this is queued as nodes come up and is canceled when a connection is
 	 * established.  this expiring gives up on the node and errors out
 	 * transmits */
-	struct work_struct		nn_connect_expired;
+	struct delayed_work		nn_connect_expired;
 
 	/* after we give up on a socket we wait a while before deciding
 	 * that it is still heartbeating and that we should do some
 	 * quorum work */
-	struct work_struct		nn_still_up;
+	struct delayed_work		nn_still_up;
 };
 
 struct o2net_sock_container {
@@ -129,7 +129,7 @@
 	struct work_struct	sc_shutdown_work;
 
 	struct timer_list	sc_idle_timeout;
-	struct work_struct	sc_keepalive_work;
+	struct delayed_work	sc_keepalive_work;
 
 	unsigned		sc_handshake_ok:1;
 
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 04e0191..baad2aa 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -82,6 +82,7 @@
 	struct inode *inode = filp->f_dentry->d_inode;
 	struct super_block * sb = inode->i_sb;
 	unsigned int ra_sectors = 16;
+	int lock_level = 0;
 
 	mlog_entry("dirino=%llu\n",
 		   (unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -89,7 +90,15 @@
 	stored = 0;
 	bh = NULL;
 
-	error = ocfs2_meta_lock(inode, NULL, NULL, 0);
+	error = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+	if (lock_level && error >= 0) {
+		/* We release EX lock which used to update atime
+		 * and get PR lock again to reduce contention
+		 * on commonly accessed directories. */
+		ocfs2_meta_unlock(inode, 1);
+		lock_level = 0;
+		error = ocfs2_meta_lock(inode, NULL, 0);
+	}
 	if (error < 0) {
 		if (error != -ENOENT)
 			mlog_errno(error);
@@ -198,7 +207,7 @@
 
 	stored = 0;
 bail:
-	ocfs2_meta_unlock(inode, 0);
+	ocfs2_meta_unlock(inode, lock_level);
 
 bail_nolock:
 	mlog_exit(stored);
@@ -340,7 +349,7 @@
 
 /* returns a bh of the 1st new block in the allocation. */
 int ocfs2_do_extend_dir(struct super_block *sb,
-			struct ocfs2_journal_handle *handle,
+			handle_t *handle,
 			struct inode *dir,
 			struct buffer_head *parent_fe_bh,
 			struct ocfs2_alloc_context *data_ac,
@@ -398,7 +407,7 @@
 	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
 	struct ocfs2_alloc_context *data_ac = NULL;
 	struct ocfs2_alloc_context *meta_ac = NULL;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct buffer_head *new_bh = NULL;
 	struct ocfs2_dir_entry * de;
 	struct super_block *sb = osb->sb;
@@ -409,13 +418,6 @@
 	mlog(0, "extending dir %llu (i_size = %lld)\n",
 	     (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size);
 
-	handle = ocfs2_alloc_handle(osb);
-	if (handle == NULL) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
 	/* dir->i_size is always block aligned. */
 	spin_lock(&OCFS2_I(dir)->ip_lock);
 	if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
@@ -428,8 +430,7 @@
 		}
 
 		if (!num_free_extents) {
-			status = ocfs2_reserve_new_metadata(osb, handle,
-							    fe, &meta_ac);
+			status = ocfs2_reserve_new_metadata(osb, fe, &meta_ac);
 			if (status < 0) {
 				if (status != -ENOSPC)
 					mlog_errno(status);
@@ -437,7 +438,7 @@
 			}
 		}
 
-		status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
 		if (status < 0) {
 			if (status != -ENOSPC)
 				mlog_errno(status);
@@ -450,7 +451,7 @@
 		credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
 	}
 
-	handle = ocfs2_start_trans(osb, handle, credits);
+	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -496,7 +497,7 @@
 	get_bh(*new_de_bh);
 bail:
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
 
 	if (data_ac)
 		ocfs2_free_alloc_context(data_ac);
diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h
index 5f614ec..3f67e14 100644
--- a/fs/ocfs2/dir.h
+++ b/fs/ocfs2/dir.h
@@ -45,7 +45,7 @@
 				 struct buffer_head **ret_de_bh);
 struct ocfs2_alloc_context;
 int ocfs2_do_extend_dir(struct super_block *sb,
-			struct ocfs2_journal_handle *handle,
+			handle_t *handle,
 			struct inode *dir,
 			struct buffer_head *parent_fe_bh,
 			struct ocfs2_alloc_context *data_ac,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index fa96818..6b6ff76 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -153,7 +153,7 @@
  * called functions that cannot be directly called from the
  * net message handlers for some reason, usually because
  * they need to send net messages of their own. */
-void dlm_dispatch_work(void *data);
+void dlm_dispatch_work(struct work_struct *work);
 
 struct dlm_lock_resource;
 struct dlm_work_item;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 8d1065f..420a375 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -68,7 +68,8 @@
 			goto out_free;
 
 	mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
-	     pages, DLM_HASH_PAGES, (unsigned long)DLM_BUCKETS_PER_PAGE);
+	     pages, (unsigned long)DLM_HASH_PAGES,
+	     (unsigned long)DLM_BUCKETS_PER_PAGE);
 	return vec;
 out_free:
 	dlm_free_pagevec(vec, i);
@@ -1296,7 +1297,7 @@
 
 	spin_lock_init(&dlm->work_lock);
 	INIT_LIST_HEAD(&dlm->work_list);
-	INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm);
+	INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
 
 	kref_init(&dlm->dlm_refs);
 	dlm->dlm_state = DLM_CTXT_NEW;
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 16b8d1b..941acf1 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -66,7 +66,7 @@
 static struct inode_operations dlmfs_dir_inode_operations;
 static struct inode_operations dlmfs_root_inode_operations;
 static struct inode_operations dlmfs_file_inode_operations;
-static kmem_cache_t *dlmfs_inode_cache;
+static struct kmem_cache *dlmfs_inode_cache;
 
 struct workqueue_struct *user_dlm_worker;
 
@@ -257,7 +257,7 @@
 }
 
 static void dlmfs_init_once(void *foo,
-			    kmem_cache_t *cachep,
+			    struct kmem_cache *cachep,
 			    unsigned long flags)
 {
 	struct dlmfs_inode_private *ip =
@@ -276,7 +276,7 @@
 {
 	struct dlmfs_inode_private *ip;
 
-	ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS);
+	ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
 	if (!ip)
 		return NULL;
 
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index f784177..856012b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -221,7 +221,7 @@
 #endif  /*  0  */
 
 
-static kmem_cache_t *dlm_mle_cache = NULL;
+static struct kmem_cache *dlm_mle_cache = NULL;
 
 
 static void dlm_mle_release(struct kref *kref);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9d950d7..fb3e2b0 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -153,9 +153,10 @@
 }
 
 /* Worker function used during recovery. */
-void dlm_dispatch_work(void *data)
+void dlm_dispatch_work(struct work_struct *work)
 {
-	struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
+	struct dlm_ctxt *dlm =
+		container_of(work, struct dlm_ctxt, dispatched_work);
 	LIST_HEAD(tmp_list);
 	struct list_head *iter, *iter2;
 	struct dlm_work_item *item;
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index eead48b..7d2f578 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -171,15 +171,14 @@
 		BUG();
 }
 
-static void user_dlm_unblock_lock(void *opaque);
+static void user_dlm_unblock_lock(struct work_struct *work);
 
 static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
 {
 	if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
 		user_dlm_grab_inode_ref(lockres);
 
-		INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
-			  lockres);
+		INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
 
 		queue_work(user_dlm_worker, &lockres->l_work);
 		lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@
 	iput(inode);
 }
 
-static void user_dlm_unblock_lock(void *opaque)
+static void user_dlm_unblock_lock(struct work_struct *work)
 {
 	int new_level, status;
-	struct user_lock_res *lockres = (struct user_lock_res *) opaque;
+	struct user_lock_res *lockres =
+		container_of(work, struct user_lock_res, l_work);
 	struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
 
 	mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8801e41..69fba16 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -49,6 +49,7 @@
 #include "dcache.h"
 #include "dlmglue.h"
 #include "extent_map.h"
+#include "file.h"
 #include "heartbeat.h"
 #include "inode.h"
 #include "journal.h"
@@ -1063,10 +1064,10 @@
 	mlog_exit_void();
 }
 
-int ocfs2_create_new_lock(struct ocfs2_super *osb,
-			  struct ocfs2_lock_res *lockres,
-			  int ex,
-			  int local)
+static int ocfs2_create_new_lock(struct ocfs2_super *osb,
+				 struct ocfs2_lock_res *lockres,
+				 int ex,
+				 int local)
 {
 	int level =  ex ? LKM_EXMODE : LKM_PRMODE;
 	unsigned long flags;
@@ -1579,7 +1580,6 @@
  * the result of the lock will be communicated via the callback.
  */
 int ocfs2_meta_lock_full(struct inode *inode,
-			 struct ocfs2_journal_handle *handle,
 			 struct buffer_head **ret_bh,
 			 int ex,
 			 int arg_flags)
@@ -1668,12 +1668,6 @@
 		}
 	}
 
-	if (handle) {
-		status = ocfs2_handle_add_lock(handle, inode);
-		if (status < 0)
-			mlog_errno(status);
-	}
-
 bail:
 	if (status < 0) {
 		if (ret_bh && (*ret_bh)) {
@@ -1713,18 +1707,16 @@
  * the lock inversion simply.
  */
 int ocfs2_meta_lock_with_page(struct inode *inode,
-			      struct ocfs2_journal_handle *handle,
 			      struct buffer_head **ret_bh,
 			      int ex,
 			      struct page *page)
 {
 	int ret;
 
-	ret = ocfs2_meta_lock_full(inode, handle, ret_bh, ex,
-				   OCFS2_LOCK_NONBLOCK);
+	ret = ocfs2_meta_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
 	if (ret == -EAGAIN) {
 		unlock_page(page);
-		if (ocfs2_meta_lock(inode, handle, ret_bh, ex) == 0)
+		if (ocfs2_meta_lock(inode, ret_bh, ex) == 0)
 			ocfs2_meta_unlock(inode, ex);
 		ret = AOP_TRUNCATED_PAGE;
 	}
@@ -1732,6 +1724,44 @@
 	return ret;
 }
 
+int ocfs2_meta_lock_atime(struct inode *inode,
+			  struct vfsmount *vfsmnt,
+			  int *level)
+{
+	int ret;
+
+	mlog_entry_void();
+	ret = ocfs2_meta_lock(inode, NULL, 0);
+	if (ret < 0) {
+		mlog_errno(ret);
+		return ret;
+	}
+
+	/*
+	 * If we should update atime, we will get EX lock,
+	 * otherwise we just get PR lock.
+	 */
+	if (ocfs2_should_update_atime(inode, vfsmnt)) {
+		struct buffer_head *bh = NULL;
+
+		ocfs2_meta_unlock(inode, 0);
+		ret = ocfs2_meta_lock(inode, &bh, 1);
+		if (ret < 0) {
+			mlog_errno(ret);
+			return ret;
+		}
+		*level = 1;
+		if (ocfs2_should_update_atime(inode, vfsmnt))
+			ocfs2_update_inode_atime(inode, bh);
+		if (bh)
+			brelse(bh);
+	} else
+		*level = 0;
+
+	mlog_exit(ret);
+	return ret;
+}
+
 void ocfs2_meta_unlock(struct inode *inode,
 		       int ex)
 {
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 4a27693..c343fca 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -68,8 +68,6 @@
 				u64 parent, struct inode *inode);
 void ocfs2_lock_res_free(struct ocfs2_lock_res *res);
 int ocfs2_create_new_inode_locks(struct inode *inode);
-int ocfs2_create_new_lock(struct ocfs2_super *osb,
-			  struct ocfs2_lock_res *lockres, int ex, int local);
 int ocfs2_drop_inode_locks(struct inode *inode);
 int ocfs2_data_lock_full(struct inode *inode,
 			 int write,
@@ -82,19 +80,20 @@
 		       int write);
 int ocfs2_rw_lock(struct inode *inode, int write);
 void ocfs2_rw_unlock(struct inode *inode, int write);
+int ocfs2_meta_lock_atime(struct inode *inode,
+			  struct vfsmount *vfsmnt,
+			  int *level);
 int ocfs2_meta_lock_full(struct inode *inode,
-			 struct ocfs2_journal_handle *handle,
 			 struct buffer_head **ret_bh,
 			 int ex,
 			 int arg_flags);
 int ocfs2_meta_lock_with_page(struct inode *inode,
-			      struct ocfs2_journal_handle *handle,
 			      struct buffer_head **ret_bh,
 			      int ex,
 			      struct page *page);
 /* 99% of the time we don't want to supply any additional flags --
  * those are for very specific cases only. */
-#define ocfs2_meta_lock(i, h, b, e) ocfs2_meta_lock_full(i, h, b, e, 0)
+#define ocfs2_meta_lock(i, b, e) ocfs2_meta_lock_full(i, b, e, 0)
 void ocfs2_meta_unlock(struct inode *inode,
 		       int ex);
 int ocfs2_super_lock(struct ocfs2_super *osb,
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index fb91089..06be6e7 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -100,7 +100,7 @@
 	mlog(0, "find parent of directory %llu\n",
 	     (unsigned long long)OCFS2_I(dir)->ip_blkno);
 
-	status = ocfs2_meta_lock(dir, NULL, NULL, 0);
+	status = ocfs2_meta_lock(dir, NULL, 0);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index fcd4475..80ac69f 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -61,7 +61,7 @@
 	struct ocfs2_extent_map_entry *right_ent;
 };
 
-static kmem_cache_t *ocfs2_em_ent_cachep = NULL;
+static struct kmem_cache *ocfs2_em_ent_cachep = NULL;
 
 
 static struct ocfs2_extent_map_entry *
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 1be74c4..8786b3c 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -31,6 +31,8 @@
 #include <linux/pagemap.h>
 #include <linux/uio.h>
 #include <linux/sched.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/mount.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -134,7 +136,58 @@
 	return (err < 0) ? -EIO : 0;
 }
 
-int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle,
+int ocfs2_should_update_atime(struct inode *inode,
+			      struct vfsmount *vfsmnt)
+{
+	struct timespec now;
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+		return 0;
+
+	if ((inode->i_flags & S_NOATIME) ||
+	    ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
+		return 0;
+
+	if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
+	    ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
+		return 0;
+
+	now = CURRENT_TIME;
+	if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
+		return 0;
+	else
+		return 1;
+}
+
+int ocfs2_update_inode_atime(struct inode *inode,
+			     struct buffer_head *bh)
+{
+	int ret;
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	handle_t *handle;
+
+	mlog_entry_void();
+
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+	if (handle == NULL) {
+		ret = -ENOMEM;
+		mlog_errno(ret);
+		goto out;
+	}
+
+	inode->i_atime = CURRENT_TIME;
+	ret = ocfs2_mark_inode_dirty(handle, inode, bh);
+	if (ret < 0)
+		mlog_errno(ret);
+
+	ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+out:
+	mlog_exit(ret);
+	return ret;
+}
+
+int ocfs2_set_inode_size(handle_t *handle,
 			 struct inode *inode,
 			 struct buffer_head *fe_bh,
 			 u64 new_i_size)
@@ -163,10 +216,9 @@
 {
 	int ret;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 
-	handle = ocfs2_start_trans(osb, NULL,
-				   OCFS2_INODE_UPDATE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (handle == NULL) {
 		ret = -ENOMEM;
 		mlog_errno(ret);
@@ -178,7 +230,7 @@
 	if (ret < 0)
 		mlog_errno(ret);
 
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 out:
 	return ret;
 }
@@ -189,14 +241,14 @@
 				     u64 new_i_size)
 {
 	int status;
-	struct ocfs2_journal_handle *handle;
+	handle_t *handle;
 
 	mlog_entry_void();
 
 	/* TODO: This needs to actually orphan the inode in this
 	 * transaction. */
 
-	handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		mlog_errno(status);
@@ -207,7 +259,7 @@
 	if (status < 0)
 		mlog_errno(status);
 
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 out:
 	mlog_exit(status);
 	return status;
@@ -328,7 +380,7 @@
 			       struct inode *inode,
 			       u32 clusters_to_add,
 			       struct buffer_head *fe_bh,
-			       struct ocfs2_journal_handle *handle,
+			       handle_t *handle,
 			       struct ocfs2_alloc_context *data_ac,
 			       struct ocfs2_alloc_context *meta_ac,
 			       enum ocfs2_alloc_restarted *reason_ret)
@@ -433,7 +485,7 @@
 	u32 prev_clusters;
 	struct buffer_head *bh = NULL;
 	struct ocfs2_dinode *fe = NULL;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct ocfs2_alloc_context *data_ac = NULL;
 	struct ocfs2_alloc_context *meta_ac = NULL;
 	enum ocfs2_alloc_restarted why;
@@ -463,13 +515,6 @@
 	     (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
 	     fe->i_clusters, clusters_to_add);
 
-	handle = ocfs2_alloc_handle(osb);
-	if (handle == NULL) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto leave;
-	}
-
 	num_free_extents = ocfs2_num_free_extents(osb,
 						  inode,
 						  fe);
@@ -480,10 +525,7 @@
 	}
 
 	if (!num_free_extents) {
-		status = ocfs2_reserve_new_metadata(osb,
-						    handle,
-						    fe,
-						    &meta_ac);
+		status = ocfs2_reserve_new_metadata(osb, fe, &meta_ac);
 		if (status < 0) {
 			if (status != -ENOSPC)
 				mlog_errno(status);
@@ -491,10 +533,7 @@
 		}
 	}
 
-	status = ocfs2_reserve_clusters(osb,
-					handle,
-					clusters_to_add,
-					&data_ac);
+	status = ocfs2_reserve_clusters(osb, clusters_to_add, &data_ac);
 	if (status < 0) {
 		if (status != -ENOSPC)
 			mlog_errno(status);
@@ -509,7 +548,7 @@
 	drop_alloc_sem = 1;
 
 	credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
-	handle = ocfs2_start_trans(osb, handle, credits);
+	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -589,7 +628,7 @@
 		drop_alloc_sem = 0;
 	}
 	if (handle) {
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
 		handle = NULL;
 	}
 	if (data_ac) {
@@ -624,7 +663,7 @@
 	struct page *page;
 	unsigned long index;
 	unsigned int offset;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	int ret;
 
 	offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
@@ -668,7 +707,7 @@
 		ret = 0;
 
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 out_unlock:
 	unlock_page(page);
 	page_cache_release(page);
@@ -789,7 +828,7 @@
 	struct super_block *sb = inode->i_sb;
 	struct ocfs2_super *osb = OCFS2_SB(sb);
 	struct buffer_head *bh = NULL;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 
 	mlog_entry("(0x%p, '%.*s')\n", dentry,
 	           dentry->d_name.len, dentry->d_name.name);
@@ -825,7 +864,7 @@
 		}
 	}
 
-	status = ocfs2_meta_lock(inode, NULL, &bh, 1);
+	status = ocfs2_meta_lock(inode, &bh, 1);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -845,7 +884,7 @@
 		}
 	}
 
-	handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		mlog_errno(status);
@@ -863,7 +902,7 @@
 		mlog_errno(status);
 
 bail_commit:
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 bail_unlock:
 	ocfs2_meta_unlock(inode, 1);
 bail_unlock_rw:
@@ -906,19 +945,41 @@
 	return err;
 }
 
+int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+	int ret;
+
+	mlog_entry_void();
+
+	ret = ocfs2_meta_lock(inode, NULL, 0);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	ret = generic_permission(inode, mask, NULL);
+	if (ret)
+		mlog_errno(ret);
+
+	ocfs2_meta_unlock(inode, 0);
+out:
+	mlog_exit(ret);
+	return ret;
+}
+
 static int ocfs2_write_remove_suid(struct inode *inode)
 {
 	int ret;
 	struct buffer_head *bh = NULL;
 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
-	struct ocfs2_journal_handle *handle;
+	handle_t *handle;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct ocfs2_dinode *di;
 
 	mlog_entry("(Inode %llu, mode 0%o)\n",
 		   (unsigned long long)oi->ip_blkno, inode->i_mode);
 
-	handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (handle == NULL) {
 		ret = -ENOMEM;
 		mlog_errno(ret);
@@ -951,75 +1012,29 @@
 out_bh:
 	brelse(bh);
 out_trans:
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 out:
 	mlog_exit(ret);
 	return ret;
 }
 
-static inline int ocfs2_write_should_remove_suid(struct inode *inode)
+static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
+					 loff_t *ppos,
+					 size_t count,
+					 int appending)
 {
-	mode_t mode = inode->i_mode;
-
-	if (!capable(CAP_FSETID)) {
-		if (unlikely(mode & S_ISUID))
-			return 1;
-
-		if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-			return 1;
-	}
-	return 0;
-}
-
-static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
-				    const struct iovec *iov,
-				    unsigned long nr_segs,
-				    loff_t pos)
-{
-	int ret, rw_level = -1, meta_level = -1, have_alloc_sem = 0;
+	int ret = 0, meta_level = appending;
+	struct inode *inode = dentry->d_inode;
 	u32 clusters;
-	struct file *filp = iocb->ki_filp;
-	struct inode *inode = filp->f_dentry->d_inode;
 	loff_t newsize, saved_pos;
 
-	mlog_entry("(0x%p, %u, '%.*s')\n", filp,
-		   (unsigned int)nr_segs,
-		   filp->f_dentry->d_name.len,
-		   filp->f_dentry->d_name.name);
-
-	/* happy write of zero bytes */
-	if (iocb->ki_left == 0)
-		return 0;
-
-	if (!inode) {
-		mlog(0, "bad inode\n");
-		return -EIO;
-	}
-
-	mutex_lock(&inode->i_mutex);
-	/* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
-	if (filp->f_flags & O_DIRECT) {
-		have_alloc_sem = 1;
-		down_read(&inode->i_alloc_sem);
-	}
-
-	/* concurrent O_DIRECT writes are allowed */
-	rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1;
-	ret = ocfs2_rw_lock(inode, rw_level);
-	if (ret < 0) {
-		rw_level = -1;
-		mlog_errno(ret);
-		goto out;
-	}
-
 	/* 
 	 * We sample i_size under a read level meta lock to see if our write
 	 * is extending the file, if it is we back off and get a write level
 	 * meta lock.
 	 */
-	meta_level = (filp->f_flags & O_APPEND) ? 1 : 0;
 	for(;;) {
-		ret = ocfs2_meta_lock(inode, NULL, NULL, meta_level);
+		ret = ocfs2_meta_lock(inode, NULL, meta_level);
 		if (ret < 0) {
 			meta_level = -1;
 			mlog_errno(ret);
@@ -1035,7 +1050,7 @@
 		 * inode. There's also the dinode i_size state which
 		 * can be lost via setattr during extending writes (we
 		 * set inode->i_size at the end of a write. */
-		if (ocfs2_write_should_remove_suid(inode)) {
+		if (should_remove_suid(dentry)) {
 			if (meta_level == 0) {
 				ocfs2_meta_unlock(inode, meta_level);
 				meta_level = 1;
@@ -1045,19 +1060,19 @@
 			ret = ocfs2_write_remove_suid(inode);
 			if (ret < 0) {
 				mlog_errno(ret);
-				goto out;
+				goto out_unlock;
 			}
 		}
 
 		/* work on a copy of ppos until we're sure that we won't have
 		 * to recalculate it due to relocking. */
-		if (filp->f_flags & O_APPEND) {
+		if (appending) {
 			saved_pos = i_size_read(inode);
 			mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
 		} else {
-			saved_pos = iocb->ki_pos;
+			saved_pos = *ppos;
 		}
-		newsize = iocb->ki_left + saved_pos;
+		newsize = count + saved_pos;
 
 		mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
 		     (long long) saved_pos, (long long) newsize,
@@ -1090,19 +1105,66 @@
 		if (!clusters)
 			break;
 
-		ret = ocfs2_extend_file(inode, NULL, newsize, iocb->ki_left);
+		ret = ocfs2_extend_file(inode, NULL, newsize, count);
 		if (ret < 0) {
 			if (ret != -ENOSPC)
 				mlog_errno(ret);
-			goto out;
+			goto out_unlock;
 		}
 		break;
 	}
 
-	/* ok, we're done with i_size and alloc work */
-	iocb->ki_pos = saved_pos;
+	if (appending)
+		*ppos = saved_pos;
+
+out_unlock:
 	ocfs2_meta_unlock(inode, meta_level);
-	meta_level = -1;
+
+out:
+	return ret;
+}
+
+static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
+				    const struct iovec *iov,
+				    unsigned long nr_segs,
+				    loff_t pos)
+{
+	int ret, rw_level, have_alloc_sem = 0;
+	struct file *filp = iocb->ki_filp;
+	struct inode *inode = filp->f_dentry->d_inode;
+	int appending = filp->f_flags & O_APPEND ? 1 : 0;
+
+	mlog_entry("(0x%p, %u, '%.*s')\n", filp,
+		   (unsigned int)nr_segs,
+		   filp->f_dentry->d_name.len,
+		   filp->f_dentry->d_name.name);
+
+	/* happy write of zero bytes */
+	if (iocb->ki_left == 0)
+		return 0;
+
+	mutex_lock(&inode->i_mutex);
+	/* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
+	if (filp->f_flags & O_DIRECT) {
+		have_alloc_sem = 1;
+		down_read(&inode->i_alloc_sem);
+	}
+
+	/* concurrent O_DIRECT writes are allowed */
+	rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1;
+	ret = ocfs2_rw_lock(inode, rw_level);
+	if (ret < 0) {
+		rw_level = -1;
+		mlog_errno(ret);
+		goto out;
+	}
+
+	ret = ocfs2_prepare_inode_for_write(filp->f_dentry, &iocb->ki_pos,
+					    iocb->ki_left, appending);
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto out;
+	}
 
 	/* communicate with ocfs2_dio_end_io */
 	ocfs2_iocb_set_rw_locked(iocb);
@@ -1128,8 +1190,6 @@
 	}
 
 out:
-	if (meta_level != -1)
-		ocfs2_meta_unlock(inode, meta_level);
 	if (have_alloc_sem)
 		up_read(&inode->i_alloc_sem);
 	if (rw_level != -1) 
@@ -1140,12 +1200,83 @@
 	return ret;
 }
 
+static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
+				       struct file *out,
+				       loff_t *ppos,
+				       size_t len,
+				       unsigned int flags)
+{
+	int ret;
+	struct inode *inode = out->f_dentry->d_inode;
+
+	mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
+		   (unsigned int)len,
+		   out->f_dentry->d_name.len,
+		   out->f_dentry->d_name.name);
+
+	inode_double_lock(inode, pipe->inode);
+
+	ret = ocfs2_rw_lock(inode, 1);
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	ret = ocfs2_prepare_inode_for_write(out->f_dentry, ppos, len, 0);
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto out_unlock;
+	}
+
+	/* ok, we're done with i_size and alloc work */
+	ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
+
+out_unlock:
+	ocfs2_rw_unlock(inode, 1);
+out:
+	inode_double_unlock(inode, pipe->inode);
+
+	mlog_exit(ret);
+	return ret;
+}
+
+static ssize_t ocfs2_file_splice_read(struct file *in,
+				      loff_t *ppos,
+				      struct pipe_inode_info *pipe,
+				      size_t len,
+				      unsigned int flags)
+{
+	int ret = 0;
+	struct inode *inode = in->f_dentry->d_inode;
+
+	mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
+		   (unsigned int)len,
+		   in->f_dentry->d_name.len,
+		   in->f_dentry->d_name.name);
+
+	/*
+	 * See the comment in ocfs2_file_aio_read()
+	 */
+	ret = ocfs2_meta_lock(inode, NULL, 0);
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto bail;
+	}
+	ocfs2_meta_unlock(inode, 0);
+
+	ret = generic_file_splice_read(in, ppos, pipe, len, flags);
+
+bail:
+	mlog_exit(ret);
+	return ret;
+}
+
 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
 				   const struct iovec *iov,
 				   unsigned long nr_segs,
 				   loff_t pos)
 {
-	int ret = 0, rw_level = -1, have_alloc_sem = 0;
+	int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
 	struct file *filp = iocb->ki_filp;
 	struct inode *inode = filp->f_dentry->d_inode;
 
@@ -1187,12 +1318,12 @@
 	 * like i_size. This allows the checks down below
 	 * generic_file_aio_read() a chance of actually working. 
 	 */
-	ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
+	ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
 	if (ret < 0) {
 		mlog_errno(ret);
 		goto bail;
 	}
-	ocfs2_meta_unlock(inode, 0);
+	ocfs2_meta_unlock(inode, lock_level);
 
 	ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
 	if (ret == -EINVAL)
@@ -1220,11 +1351,13 @@
 struct inode_operations ocfs2_file_iops = {
 	.setattr	= ocfs2_setattr,
 	.getattr	= ocfs2_getattr,
+	.permission	= ocfs2_permission,
 };
 
 struct inode_operations ocfs2_special_file_iops = {
 	.setattr	= ocfs2_setattr,
 	.getattr	= ocfs2_getattr,
+	.permission	= ocfs2_permission,
 };
 
 const struct file_operations ocfs2_fops = {
@@ -1238,6 +1371,8 @@
 	.aio_read	= ocfs2_file_aio_read,
 	.aio_write	= ocfs2_file_aio_write,
 	.ioctl		= ocfs2_ioctl,
+	.splice_read	= ocfs2_file_splice_read,
+	.splice_write	= ocfs2_file_splice_write,
 };
 
 const struct file_operations ocfs2_dops = {
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 740c9e7..601a453 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -41,17 +41,24 @@
 			       struct inode *inode,
 			       u32 clusters_to_add,
 			       struct buffer_head *fe_bh,
-			       struct ocfs2_journal_handle *handle,
+			       handle_t *handle,
 			       struct ocfs2_alloc_context *data_ac,
 			       struct ocfs2_alloc_context *meta_ac,
 			       enum ocfs2_alloc_restarted *reason);
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
 int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		  struct kstat *stat);
+int ocfs2_permission(struct inode *inode, int mask,
+		     struct nameidata *nd);
 
-int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle,
+int ocfs2_set_inode_size(handle_t *handle,
 			 struct inode *inode,
 			 struct buffer_head *fe_bh,
 			 u64 new_i_size);
 
+int ocfs2_should_update_atime(struct inode *inode,
+			      struct vfsmount *vfsmnt);
+int ocfs2_update_inode_atime(struct inode *inode,
+			     struct buffer_head *bh);
+
 #endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 16e8e74..42e361f 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -360,7 +360,6 @@
 				  inode);
 
 	ocfs2_set_inode_flags(inode);
-	inode->i_flags |= S_NOATIME;
 
 	status = 0;
 bail:
@@ -441,7 +440,7 @@
 				  generation, inode);
 
 	if (can_lock) {
-		status = ocfs2_meta_lock(inode, NULL, NULL, 0);
+		status = ocfs2_meta_lock(inode, NULL, 0);
 		if (status) {
 			make_bad_inode(inode);
 			mlog_errno(status);
@@ -512,7 +511,7 @@
 				     struct buffer_head *fe_bh)
 {
 	int status = 0;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct ocfs2_truncate_context *tc = NULL;
 	struct ocfs2_dinode *fe;
 
@@ -524,7 +523,7 @@
 	if (!fe->i_clusters)
 		goto bail;
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_INODE_UPDATE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -538,7 +537,7 @@
 		goto bail;
 	}
 
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 	handle = NULL;
 
 	status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc);
@@ -554,7 +553,7 @@
 	}
 bail:
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
 
 	mlog_exit(status);
 	return status;
@@ -568,7 +567,7 @@
 	int status;
 	struct inode *inode_alloc_inode = NULL;
 	struct buffer_head *inode_alloc_bh = NULL;
-	struct ocfs2_journal_handle *handle;
+	handle_t *handle;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
 
@@ -582,7 +581,7 @@
 	}
 
 	mutex_lock(&inode_alloc_inode->i_mutex);
-	status = ocfs2_meta_lock(inode_alloc_inode, NULL, &inode_alloc_bh, 1);
+	status = ocfs2_meta_lock(inode_alloc_inode, &inode_alloc_bh, 1);
 	if (status < 0) {
 		mutex_unlock(&inode_alloc_inode->i_mutex);
 
@@ -590,7 +589,7 @@
 		goto bail;
 	}
 
-	handle = ocfs2_start_trans(osb, NULL, OCFS2_DELETE_INODE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		mlog_errno(status);
@@ -629,7 +628,7 @@
 		mlog_errno(status);
 
 bail_commit:
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 bail_unlock:
 	ocfs2_meta_unlock(inode_alloc_inode, 1);
 	mutex_unlock(&inode_alloc_inode->i_mutex);
@@ -705,7 +704,7 @@
 	 * delete_inode operation. We do this now to avoid races with
 	 * recovery completion on other nodes. */
 	mutex_lock(&orphan_dir_inode->i_mutex);
-	status = ocfs2_meta_lock(orphan_dir_inode, NULL, &orphan_dir_bh, 1);
+	status = ocfs2_meta_lock(orphan_dir_inode, &orphan_dir_bh, 1);
 	if (status < 0) {
 		mutex_unlock(&orphan_dir_inode->i_mutex);
 
@@ -933,7 +932,7 @@
 	 * allocation lock here as it won't be needed - nobody will
 	 * have the file open.
 	 */
-	status = ocfs2_meta_lock(inode, NULL, &di_bh, 1);
+	status = ocfs2_meta_lock(inode, &di_bh, 1);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -1067,12 +1066,6 @@
 	mlog_bug_on_msg(oi->ip_open_count,
 			"Clear inode of %llu has open count %d\n",
 			(unsigned long long)oi->ip_blkno, oi->ip_open_count);
-	mlog_bug_on_msg(!list_empty(&oi->ip_handle_list),
-			"Clear inode of %llu has non empty handle list\n",
-			(unsigned long long)oi->ip_blkno);
-	mlog_bug_on_msg(oi->ip_handle,
-			"Clear inode of %llu has non empty handle pointer\n",
-			(unsigned long long)oi->ip_blkno);
 
 	/* Clear all other flags. */
 	oi->ip_flags = OCFS2_INODE_CACHE_INLINE;
@@ -1186,7 +1179,7 @@
 
 	/* Let ocfs2_meta_lock do the work of updating our struct
 	 * inode for us. */
-	status = ocfs2_meta_lock(inode, NULL, NULL, 0);
+	status = ocfs2_meta_lock(inode, NULL, 0);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -1204,7 +1197,7 @@
  * struct inode.
  * Only takes ip_lock.
  */
-int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle,
+int ocfs2_mark_inode_dirty(handle_t *handle,
 			   struct inode *inode,
 			   struct buffer_head *bh)
 {
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 9957810..1a7dd29 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -48,13 +48,6 @@
 
 	struct mutex			ip_io_mutex;
 
-	/* Used by the journalling code to attach an inode to a
-	 * handle.  These are protected by ip_io_mutex in order to lock
-	 * out other I/O to the inode until we either commit or
-	 * abort. */
-	struct list_head		ip_handle_list;
-	struct ocfs2_journal_handle	*ip_handle;
-
 	u32				ip_flags; /* see below */
 	u32				ip_attr; /* inode attributes */
 
@@ -113,7 +106,7 @@
 #define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL)
 #define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL)
 
-extern kmem_cache_t *ocfs2_inode_cache;
+extern struct kmem_cache *ocfs2_inode_cache;
 
 extern const struct address_space_operations ocfs2_aops;
 
@@ -143,7 +136,7 @@
 void ocfs2_sync_blockdev(struct super_block *sb);
 void ocfs2_refresh_inode(struct inode *inode,
 			 struct ocfs2_dinode *fe);
-int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle,
+int ocfs2_mark_inode_dirty(handle_t *handle,
 			   struct inode *inode,
 			   struct buffer_head *bh);
 int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 3663cef..4768be5 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -26,7 +26,7 @@
 {
 	int status;
 
-	status = ocfs2_meta_lock(inode, NULL, NULL, 0);
+	status = ocfs2_meta_lock(inode, NULL, 0);
 	if (status < 0) {
 		mlog_errno(status);
 		return status;
@@ -43,14 +43,14 @@
 {
 	struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct buffer_head *bh = NULL;
 	unsigned oldflags;
 	int status;
 
 	mutex_lock(&inode->i_mutex);
 
-	status = ocfs2_meta_lock(inode, NULL, &bh, 1);
+	status = ocfs2_meta_lock(inode, &bh, 1);
 	if (status < 0) {
 		mlog_errno(status);
 		goto bail;
@@ -67,7 +67,7 @@
 	if (!S_ISDIR(inode->i_mode))
 		flags &= ~OCFS2_DIRSYNC_FL;
 
-	handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		mlog_errno(status);
@@ -96,7 +96,7 @@
 	if (status < 0)
 		mlog_errno(status);
 
-	ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
 bail_unlock:
 	ocfs2_meta_unlock(inode, 1);
 bail:
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index fd9734d..1d7f4ab 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -57,9 +57,6 @@
 static int __ocfs2_recovery_thread(void *arg);
 static int ocfs2_commit_cache(struct ocfs2_super *osb);
 static int ocfs2_wait_on_mount(struct ocfs2_super *osb);
-static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal,
-				       struct ocfs2_journal_handle *handle);
-static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle);
 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
 				      int dirty);
 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
@@ -113,46 +110,18 @@
 	return status;
 }
 
-struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb)
-{
-	struct ocfs2_journal_handle *retval = NULL;
-
-	retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
-	if (!retval) {
-		mlog(ML_ERROR, "Failed to allocate memory for journal "
-		     "handle!\n");
-		return NULL;
-	}
-
-	retval->max_buffs = 0;
-	retval->num_locks = 0;
-	retval->k_handle = NULL;
-
-	INIT_LIST_HEAD(&retval->locks);
-	INIT_LIST_HEAD(&retval->inode_list);
-	retval->journal = osb->journal;
-
-	return retval;
-}
-
 /* pass it NULL and it will allocate a new handle object for you.  If
  * you pass it a handle however, it may still return error, in which
  * case it has free'd the passed handle for you. */
-struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
-					       struct ocfs2_journal_handle *handle,
-					       int max_buffs)
+handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
 {
-	int ret;
 	journal_t *journal = osb->journal->j_journal;
-
-	mlog_entry("(max_buffs = %d)\n", max_buffs);
+	handle_t *handle;
 
 	BUG_ON(!osb || !osb->journal->j_journal);
 
-	if (ocfs2_is_hard_readonly(osb)) {
-		ret = -EROFS;
-		goto done_free;
-	}
+	if (ocfs2_is_hard_readonly(osb))
+		return ERR_PTR(-EROFS);
 
 	BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
 	BUG_ON(max_buffs <= 0);
@@ -163,154 +132,39 @@
 		BUG();
 	}
 
-	if (!handle)
-		handle = ocfs2_alloc_handle(osb);
-	if (!handle) {
-		ret = -ENOMEM;
-		mlog(ML_ERROR, "Failed to allocate memory for journal "
-		     "handle!\n");
-		goto done_free;
-	}
-
-	handle->max_buffs = max_buffs;
-
 	down_read(&osb->journal->j_trans_barrier);
 
-	/* actually start the transaction now */
-	handle->k_handle = journal_start(journal, max_buffs);
-	if (IS_ERR(handle->k_handle)) {
+	handle = journal_start(journal, max_buffs);
+	if (IS_ERR(handle)) {
 		up_read(&osb->journal->j_trans_barrier);
 
-		ret = PTR_ERR(handle->k_handle);
-		handle->k_handle = NULL;
-		mlog_errno(ret);
+		mlog_errno(PTR_ERR(handle));
 
 		if (is_journal_aborted(journal)) {
 			ocfs2_abort(osb->sb, "Detected aborted journal");
-			ret = -EROFS;
+			handle = ERR_PTR(-EROFS);
 		}
-		goto done_free;
-	}
+	} else
+		atomic_inc(&(osb->journal->j_num_trans));
 
-	atomic_inc(&(osb->journal->j_num_trans));
-	handle->flags |= OCFS2_HANDLE_STARTED;
-
-	mlog_exit_ptr(handle);
 	return handle;
-
-done_free:
-	if (handle)
-		ocfs2_commit_unstarted_handle(handle); /* will kfree handle */
-
-	mlog_exit(ret);
-	return ERR_PTR(ret);
 }
 
-void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
-			    struct inode *inode)
+int ocfs2_commit_trans(struct ocfs2_super *osb,
+		       handle_t *handle)
 {
-	BUG_ON(!handle);
-	BUG_ON(!inode);
-
-	atomic_inc(&inode->i_count);
-
-	/* we're obviously changing it... */
-	mutex_lock(&inode->i_mutex);
-
-	/* sanity check */
-	BUG_ON(OCFS2_I(inode)->ip_handle);
-	BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list));
-
-	OCFS2_I(inode)->ip_handle = handle;
-	list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
-}
-
-static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
-{
-	struct list_head *p, *n;
-	struct inode *inode;
-	struct ocfs2_inode_info *oi;
-
-	list_for_each_safe(p, n, &handle->inode_list) {
-		oi = list_entry(p, struct ocfs2_inode_info,
-				ip_handle_list);
-		inode = &oi->vfs_inode;
-
-		OCFS2_I(inode)->ip_handle = NULL;
-		list_del_init(&OCFS2_I(inode)->ip_handle_list);
-
-		mutex_unlock(&inode->i_mutex);
-		iput(inode);
-	}
-}
-
-/* This is trivial so we do it out of the main commit
- * paths. Beware, it can be called from start_trans too! */
-static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle)
-{
-	mlog_entry_void();
-
-	BUG_ON(handle->flags & OCFS2_HANDLE_STARTED);
-
-	ocfs2_handle_unlock_inodes(handle);
-	/* You are allowed to add journal locks before the transaction
-	 * has started. */
-	ocfs2_handle_cleanup_locks(handle->journal, handle);
-
-	kfree(handle);
-
-	mlog_exit_void();
-}
-
-void ocfs2_commit_trans(struct ocfs2_journal_handle *handle)
-{
-	handle_t *jbd_handle;
-	int retval;
-	struct ocfs2_journal *journal = handle->journal;
-
-	mlog_entry_void();
+	int ret;
+	struct ocfs2_journal *journal = osb->journal;
 
 	BUG_ON(!handle);
 
-	if (!(handle->flags & OCFS2_HANDLE_STARTED)) {
-		ocfs2_commit_unstarted_handle(handle);
-		mlog_exit_void();
-		return;
-	}
-
-	/* release inode semaphores we took during this transaction */
-	ocfs2_handle_unlock_inodes(handle);
-
-	/* ocfs2_extend_trans may have had to call journal_restart
-	 * which will always commit the transaction, but may return
-	 * error for any number of reasons. If this is the case, we
-	 * clear k_handle as it's not valid any more. */
-	if (handle->k_handle) {
-		jbd_handle = handle->k_handle;
-
-		if (handle->flags & OCFS2_HANDLE_SYNC)
-			jbd_handle->h_sync = 1;
-		else
-			jbd_handle->h_sync = 0;
-
-		/* actually stop the transaction. if we've set h_sync,
-		 * it'll have been committed when we return */
-		retval = journal_stop(jbd_handle);
-		if (retval < 0) {
-			mlog_errno(retval);
-			mlog(ML_ERROR, "Could not commit transaction\n");
-			BUG();
-		}
-
-		handle->k_handle = NULL; /* it's been free'd in journal_stop */
-	}
-
-	ocfs2_handle_cleanup_locks(journal, handle);
+	ret = journal_stop(handle);
+	if (ret < 0)
+		mlog_errno(ret);
 
 	up_read(&journal->j_trans_barrier);
 
-	kfree(handle);
-	mlog_exit_void();
+	return ret;
 }
 
 /*
@@ -326,20 +180,18 @@
  * good because transaction ids haven't yet been recorded on the
  * cluster locks associated with this handle.
  */
-int ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
-		       int nblocks)
+int ocfs2_extend_trans(handle_t *handle, int nblocks)
 {
 	int status;
 
 	BUG_ON(!handle);
-	BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
 	BUG_ON(!nblocks);
 
 	mlog_entry_void();
 
 	mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
 
-	status = journal_extend(handle->k_handle, nblocks);
+	status = journal_extend(handle, nblocks);
 	if (status < 0) {
 		mlog_errno(status);
 		goto bail;
@@ -347,15 +199,12 @@
 
 	if (status > 0) {
 		mlog(0, "journal_extend failed, trying journal_restart\n");
-		status = journal_restart(handle->k_handle, nblocks);
+		status = journal_restart(handle, nblocks);
 		if (status < 0) {
-			handle->k_handle = NULL;
 			mlog_errno(status);
 			goto bail;
 		}
-		handle->max_buffs = nblocks;
-	} else
-		handle->max_buffs += nblocks;
+	}
 
 	status = 0;
 bail:
@@ -364,7 +213,7 @@
 	return status;
 }
 
-int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
+int ocfs2_journal_access(handle_t *handle,
 			 struct inode *inode,
 			 struct buffer_head *bh,
 			 int type)
@@ -374,7 +223,6 @@
 	BUG_ON(!inode);
 	BUG_ON(!handle);
 	BUG_ON(!bh);
-	BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
 
 	mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
 		   (unsigned long long)bh->b_blocknr, type,
@@ -403,11 +251,11 @@
 	switch (type) {
 	case OCFS2_JOURNAL_ACCESS_CREATE:
 	case OCFS2_JOURNAL_ACCESS_WRITE:
-		status = journal_get_write_access(handle->k_handle, bh);
+		status = journal_get_write_access(handle, bh);
 		break;
 
 	case OCFS2_JOURNAL_ACCESS_UNDO:
-		status = journal_get_undo_access(handle->k_handle, bh);
+		status = journal_get_undo_access(handle, bh);
 		break;
 
 	default:
@@ -424,17 +272,15 @@
 	return status;
 }
 
-int ocfs2_journal_dirty(struct ocfs2_journal_handle *handle,
+int ocfs2_journal_dirty(handle_t *handle,
 			struct buffer_head *bh)
 {
 	int status;
 
-	BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
-
 	mlog_entry("(bh->b_blocknr=%llu)\n",
 		   (unsigned long long)bh->b_blocknr);
 
-	status = journal_dirty_metadata(handle->k_handle, bh);
+	status = journal_dirty_metadata(handle, bh);
 	if (status < 0)
 		mlog(ML_ERROR, "Could not dirty metadata buffer. "
 		     "(bh->b_blocknr=%llu)\n",
@@ -456,59 +302,6 @@
 	return err;
 }
 
-/* We always assume you're adding a metadata lock at level 'ex' */
-int ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle,
-			  struct inode *inode)
-{
-	int status;
-	struct ocfs2_journal_lock *lock;
-
-	BUG_ON(!inode);
-
-	lock = kmem_cache_alloc(ocfs2_lock_cache, GFP_NOFS);
-	if (!lock) {
-		status = -ENOMEM;
-		mlog_errno(-ENOMEM);
-		goto bail;
-	}
-
-	if (!igrab(inode))
-		BUG();
-	lock->jl_inode = inode;
-
-	list_add_tail(&(lock->jl_lock_list), &(handle->locks));
-	handle->num_locks++;
-
-	status = 0;
-bail:
-	mlog_exit(status);
-	return status;
-}
-
-static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal,
-				       struct ocfs2_journal_handle *handle)
-{
-	struct list_head *p, *n;
-	struct ocfs2_journal_lock *lock;
-	struct inode *inode;
-
-	list_for_each_safe(p, n, &(handle->locks)) {
-		lock = list_entry(p, struct ocfs2_journal_lock,
-				  jl_lock_list);
-		list_del(&lock->jl_lock_list);
-		handle->num_locks--;
-
-		inode = lock->jl_inode;
-		ocfs2_meta_unlock(inode, 1);
-		if (atomic_read(&inode->i_count) == 1)
-			mlog(ML_ERROR,
-			     "Inode %llu, I'm doing a last iput for!",
-			     (unsigned long long)OCFS2_I(inode)->ip_blkno);
-		iput(inode);
-		kmem_cache_free(ocfs2_lock_cache, lock);
-	}
-}
-
 #define OCFS2_DEFAULT_COMMIT_INTERVAL 	(HZ * 5)
 
 void ocfs2_set_journal_params(struct ocfs2_super *osb)
@@ -562,8 +355,7 @@
 	/* Skip recovery waits here - journal inode metadata never
 	 * changes in a live cluster so it can be considered an
 	 * exception to the rule. */
-	status = ocfs2_meta_lock_full(inode, NULL, &bh, 1,
-				      OCFS2_META_LOCK_RECOVERY);
+	status = ocfs2_meta_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
 	if (status < 0) {
 		if (status != -ERESTARTSYS)
 			mlog(ML_ERROR, "Could not get lock on journal!\n");
@@ -911,11 +703,12 @@
  * NOTE: This function can and will sleep on recovery of other nodes
  * during cluster locking, just like any other ocfs2 process.
  */
-void ocfs2_complete_recovery(void *data)
+void ocfs2_complete_recovery(struct work_struct *work)
 {
 	int ret;
-	struct ocfs2_super *osb = data;
-	struct ocfs2_journal *journal = osb->journal;
+	struct ocfs2_journal *journal =
+		container_of(work, struct ocfs2_journal, j_recovery_work);
+	struct ocfs2_super *osb = journal->j_osb;
 	struct ocfs2_dinode *la_dinode, *tl_dinode;
 	struct ocfs2_la_recovery_item *item;
 	struct list_head *p, *n;
@@ -1160,8 +953,7 @@
 	}
 	SET_INODE_JOURNAL(inode);
 
-	status = ocfs2_meta_lock_full(inode, NULL, &bh, 1,
-				      OCFS2_META_LOCK_RECOVERY);
+	status = ocfs2_meta_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
 	if (status < 0) {
 		mlog(0, "status returned from ocfs2_meta_lock=%d\n", status);
 		if (status != -ERESTARTSYS)
@@ -1350,7 +1142,7 @@
 	SET_INODE_JOURNAL(inode);
 
 	flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
-	status = ocfs2_meta_lock_full(inode, NULL, NULL, 1, flags);
+	status = ocfs2_meta_lock_full(inode, NULL, 1, flags);
 	if (status < 0) {
 		if (status != -EAGAIN)
 			mlog_errno(status);
@@ -1433,7 +1225,7 @@
 	}	
 
 	mutex_lock(&orphan_dir_inode->i_mutex);
-	status = ocfs2_meta_lock(orphan_dir_inode, NULL, NULL, 0);
+	status = ocfs2_meta_lock(orphan_dir_inode, NULL, 0);
 	if (status < 0) {
 		mlog_errno(status);
 		goto out;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 2f3a6ac..899112a 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -37,7 +37,6 @@
 
 struct ocfs2_super;
 struct ocfs2_dinode;
-struct ocfs2_journal_handle;
 
 struct ocfs2_journal {
 	enum ocfs2_journal_state   j_state;    /* Journals current state   */
@@ -133,46 +132,8 @@
 	spin_unlock(&trans_inc_lock);
 }
 
-extern kmem_cache_t *ocfs2_lock_cache;
-
-struct ocfs2_journal_lock {
-	struct inode     *jl_inode;
-	struct list_head  jl_lock_list;
-};
-
-struct ocfs2_journal_handle {
-	handle_t            *k_handle; /* kernel handle.                */
-	struct ocfs2_journal        *journal;
-	u32                 flags;     /* see flags below.              */
-	int                 max_buffs; /* Buffs reserved by this handle */
-
-	/* The following two fields are for ocfs2_handle_add_lock */
-	int                 num_locks;
-	struct list_head    locks;     /* A bunch of locks to
-					* release on commit. This
-					* should be a list_head */
-
-	struct list_head     inode_list;
-};
-
-#define OCFS2_HANDLE_STARTED			1
-/* should we sync-commit this handle? */
-#define OCFS2_HANDLE_SYNC			2
-static inline int ocfs2_handle_started(struct ocfs2_journal_handle *handle)
-{
-	return handle->flags & OCFS2_HANDLE_STARTED;
-}
-
-static inline void ocfs2_handle_set_sync(struct ocfs2_journal_handle *handle, int sync)
-{
-	if (sync)
-		handle->flags |= OCFS2_HANDLE_SYNC;
-	else
-		handle->flags &= ~OCFS2_HANDLE_SYNC;
-}
-
 /* Exported only for the journal struct init code in super.c. Do not call. */
-void ocfs2_complete_recovery(void *data);
+void ocfs2_complete_recovery(struct work_struct *work);
 
 /*
  *  Journal Control:
@@ -231,15 +192,14 @@
  *  Transaction Handling:
  *  Manage the lifetime of a transaction handle.
  *
- *  ocfs2_alloc_handle     - Only allocate a handle so we can start putting
- *                          cluster locks on it. To actually change blocks,
- *                          call ocfs2_start_trans with the handle returned
- *                          from this function. You may call ocfs2_commit_trans
- *                           at any time in the lifetime of a handle.
  *  ocfs2_start_trans      - Begin a transaction. Give it an upper estimate of
  *                          the number of blocks that will be changed during
  *                          this handle.
- *  ocfs2_commit_trans     - Complete a handle.
+ *  ocfs2_commit_trans - Complete a handle. It might return -EIO if
+ *                       the journal was aborted. The majority of paths don't
+ *                       check the return value as an error there comes too
+ *                       late to do anything (and will be picked up in a
+ *                       later transaction).
  *  ocfs2_extend_trans     - Extend a handle by nblocks credits. This may
  *                          commit the handle to disk in the process, but will
  *                          not release any locks taken during the transaction.
@@ -249,24 +209,16 @@
  *  ocfs2_journal_dirty    - Mark a journalled buffer as having dirty data.
  *  ocfs2_journal_dirty_data - Indicate that a data buffer should go out before
  *                             the current handle commits.
- *  ocfs2_handle_add_lock  - Sometimes we need to delay lock release
- *                          until after a transaction has been completed. Use
- *                          ocfs2_handle_add_lock to indicate that a lock needs
- *                          to be released at the end of that handle. Locks
- *                          will be released in the order that they are added.
- *  ocfs2_handle_add_inode - Add a locked inode to a transaction.
  */
 
 /* You must always start_trans with a number of buffs > 0, but it's
  * perfectly legal to go through an entire transaction without having
  * dirtied any buffers. */
-struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb);
-struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
-					       struct ocfs2_journal_handle *handle,
+handle_t		    *ocfs2_start_trans(struct ocfs2_super *osb,
 					       int max_buffs);
-void			     ocfs2_commit_trans(struct ocfs2_journal_handle *handle);
-int			     ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
-						int nblocks);
+int			     ocfs2_commit_trans(struct ocfs2_super *osb,
+						handle_t *handle);
+int			     ocfs2_extend_trans(handle_t *handle, int nblocks);
 
 /*
  * Create access is for when we get a newly created buffer and we're
@@ -283,7 +235,7 @@
 #define OCFS2_JOURNAL_ACCESS_WRITE  1
 #define OCFS2_JOURNAL_ACCESS_UNDO   2
 
-int                  ocfs2_journal_access(struct ocfs2_journal_handle *handle,
+int                  ocfs2_journal_access(handle_t *handle,
 					  struct inode *inode,
 					  struct buffer_head *bh,
 					  int type);
@@ -306,18 +258,10 @@
  *	<modify the bh>
  * 	ocfs2_journal_dirty(handle, bh);
  */
-int                  ocfs2_journal_dirty(struct ocfs2_journal_handle *handle,
+int                  ocfs2_journal_dirty(handle_t *handle,
 					 struct buffer_head *bh);
 int                  ocfs2_journal_dirty_data(handle_t *handle,
 					      struct buffer_head *bh);
-int                  ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle,
-					   struct inode *inode);
-/*
- * Use this to protect from other processes reading buffer state while
- * it's in flight.
- */
-void                 ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
-					    struct inode *inode);
 
 /*
  *  Credit Macros:
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 1f17a4d..698d79a 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -58,19 +58,18 @@
 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
 
 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
-				    struct ocfs2_journal_handle *handle,
+				    handle_t *handle,
 				    struct ocfs2_dinode *alloc,
 				    struct inode *main_bm_inode,
 				    struct buffer_head *main_bm_bh);
 
 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
-						struct ocfs2_journal_handle *handle,
 						struct ocfs2_alloc_context **ac,
 						struct inode **bitmap_inode,
 						struct buffer_head **bitmap_bh);
 
 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
-					struct ocfs2_journal_handle *handle,
+					handle_t *handle,
 					struct ocfs2_alloc_context *ac);
 
 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
@@ -196,7 +195,7 @@
 void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
 {
 	int status;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle;
 	struct inode *local_alloc_inode = NULL;
 	struct buffer_head *bh = NULL;
 	struct buffer_head *main_bm_bh = NULL;
@@ -207,7 +206,7 @@
 	mlog_entry_void();
 
 	if (osb->local_alloc_state == OCFS2_LA_UNUSED)
-		goto bail;
+		goto out;
 
 	local_alloc_inode =
 		ocfs2_get_system_file_inode(osb,
@@ -216,40 +215,34 @@
 	if (!local_alloc_inode) {
 		status = -ENOENT;
 		mlog_errno(status);
-		goto bail;
+		goto out;
 	}
 
 	osb->local_alloc_state = OCFS2_LA_DISABLED;
 
-	handle = ocfs2_alloc_handle(osb);
-	if (!handle) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
 	main_bm_inode = ocfs2_get_system_file_inode(osb,
 						    GLOBAL_BITMAP_SYSTEM_INODE,
 						    OCFS2_INVALID_SLOT);
 	if (!main_bm_inode) {
 		status = -EINVAL;
 		mlog_errno(status);
-		goto bail;
+		goto out;
 	}
 
-	ocfs2_handle_add_inode(handle, main_bm_inode);
-	status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1);
+	mutex_lock(&main_bm_inode->i_mutex);
+
+	status = ocfs2_meta_lock(main_bm_inode, &main_bm_bh, 1);
 	if (status < 0) {
 		mlog_errno(status);
-		goto bail;
+		goto out_mutex;
 	}
 
 	/* WINDOW_MOVE_CREDITS is a bit heavy... */
-	handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
 	if (IS_ERR(handle)) {
 		mlog_errno(PTR_ERR(handle));
 		handle = NULL;
-		goto bail;
+		goto out_unlock;
 	}
 
 	bh = osb->local_alloc_bh;
@@ -258,7 +251,7 @@
 	alloc_copy = kmalloc(bh->b_size, GFP_KERNEL);
 	if (!alloc_copy) {
 		status = -ENOMEM;
-		goto bail;
+		goto out_commit;
 	}
 	memcpy(alloc_copy, alloc, bh->b_size);
 
@@ -266,7 +259,7 @@
 				      OCFS2_JOURNAL_ACCESS_WRITE);
 	if (status < 0) {
 		mlog_errno(status);
-		goto bail;
+		goto out_commit;
 	}
 
 	ocfs2_clear_local_alloc(alloc);
@@ -274,7 +267,7 @@
 	status = ocfs2_journal_dirty(handle, bh);
 	if (status < 0) {
 		mlog_errno(status);
-		goto bail;
+		goto out_commit;
 	}
 
 	brelse(bh);
@@ -286,16 +279,20 @@
 	if (status < 0)
 		mlog_errno(status);
 
-bail:
-	if (handle)
-		ocfs2_commit_trans(handle);
+out_commit:
+	ocfs2_commit_trans(osb, handle);
 
+out_unlock:
 	if (main_bm_bh)
 		brelse(main_bm_bh);
 
-	if (main_bm_inode)
-		iput(main_bm_inode);
+	ocfs2_meta_unlock(main_bm_inode, 1);
 
+out_mutex:
+	mutex_unlock(&main_bm_inode->i_mutex);
+	iput(main_bm_inode);
+
+out:
 	if (local_alloc_inode)
 		iput(local_alloc_inode);
 
@@ -385,61 +382,59 @@
 					struct ocfs2_dinode *alloc)
 {
 	int status;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle;
 	struct buffer_head *main_bm_bh = NULL;
-	struct inode *main_bm_inode = NULL;
+	struct inode *main_bm_inode;
 
 	mlog_entry_void();
 
-	handle = ocfs2_alloc_handle(osb);
-	if (!handle) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
 	main_bm_inode = ocfs2_get_system_file_inode(osb,
 						    GLOBAL_BITMAP_SYSTEM_INODE,
 						    OCFS2_INVALID_SLOT);
 	if (!main_bm_inode) {
 		status = -EINVAL;
 		mlog_errno(status);
-		goto bail;
+		goto out;
 	}
 
-	ocfs2_handle_add_inode(handle, main_bm_inode);
-	status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1);
+	mutex_lock(&main_bm_inode->i_mutex);
+
+	status = ocfs2_meta_lock(main_bm_inode, &main_bm_bh, 1);
 	if (status < 0) {
 		mlog_errno(status);
-		goto bail;
+		goto out_mutex;
 	}
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
 		mlog_errno(status);
-		goto bail;
+		goto out_unlock;
 	}
 
 	/* we want the bitmap change to be recorded on disk asap */
-	ocfs2_handle_set_sync(handle, 1);
+	handle->h_sync = 1;
 
 	status = ocfs2_sync_local_to_main(osb, handle, alloc,
 					  main_bm_inode, main_bm_bh);
 	if (status < 0)
 		mlog_errno(status);
 
-bail:
-	if (handle)
-		ocfs2_commit_trans(handle);
+	ocfs2_commit_trans(osb, handle);
+
+out_unlock:
+	ocfs2_meta_unlock(main_bm_inode, 1);
+
+out_mutex:
+	mutex_unlock(&main_bm_inode->i_mutex);
 
 	if (main_bm_bh)
 		brelse(main_bm_bh);
 
-	if (main_bm_inode)
-		iput(main_bm_inode);
+	iput(main_bm_inode);
 
+out:
 	mlog_exit(status);
 	return status;
 }
@@ -452,7 +447,6 @@
  * our own in order to shift windows.
  */
 int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
-				   struct ocfs2_journal_handle *passed_handle,
 				   u32 bits_wanted,
 				   struct ocfs2_alloc_context *ac)
 {
@@ -463,9 +457,7 @@
 
 	mlog_entry_void();
 
-	BUG_ON(!passed_handle);
 	BUG_ON(!ac);
-	BUG_ON(passed_handle->flags & OCFS2_HANDLE_STARTED);
 
 	local_alloc_inode =
 		ocfs2_get_system_file_inode(osb,
@@ -476,7 +468,11 @@
 		mlog_errno(status);
 		goto bail;
 	}
-	ocfs2_handle_add_inode(passed_handle, local_alloc_inode);
+
+	mutex_lock(&local_alloc_inode->i_mutex);
+
+	ac->ac_inode = local_alloc_inode;
+	ac->ac_which = OCFS2_AC_USE_LOCAL;
 
 	if (osb->local_alloc_state != OCFS2_LA_ENABLED) {
 		status = -ENOSPC;
@@ -515,21 +511,17 @@
 		}
 	}
 
-	ac->ac_inode = igrab(local_alloc_inode);
 	get_bh(osb->local_alloc_bh);
 	ac->ac_bh = osb->local_alloc_bh;
-	ac->ac_which = OCFS2_AC_USE_LOCAL;
 	status = 0;
 bail:
-	if (local_alloc_inode)
-		iput(local_alloc_inode);
 
 	mlog_exit(status);
 	return status;
 }
 
 int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
-				 struct ocfs2_journal_handle *handle,
+				 handle_t *handle,
 				 struct ocfs2_alloc_context *ac,
 				 u32 min_bits,
 				 u32 *bit_off,
@@ -707,7 +699,7 @@
  * passed is used for caching.
  */
 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
-				    struct ocfs2_journal_handle *handle,
+				    handle_t *handle,
 				    struct ocfs2_dinode *alloc,
 				    struct inode *main_bm_inode,
 				    struct buffer_head *main_bm_bh)
@@ -778,7 +770,6 @@
 }
 
 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
-						struct ocfs2_journal_handle *handle,
 						struct ocfs2_alloc_context **ac,
 						struct inode **bitmap_inode,
 						struct buffer_head **bitmap_bh)
@@ -792,7 +783,6 @@
 		goto bail;
 	}
 
-	(*ac)->ac_handle = handle;
 	(*ac)->ac_bits_wanted = ocfs2_local_alloc_window_bits(osb);
 
 	status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
@@ -821,7 +811,7 @@
  * pass it the bitmap lock in lock_bh if you have it.
  */
 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
-					struct ocfs2_journal_handle *handle,
+					handle_t *handle,
 					struct ocfs2_alloc_context *ac)
 {
 	int status = 0;
@@ -888,23 +878,15 @@
 	int status = 0;
 	struct buffer_head *main_bm_bh = NULL;
 	struct inode *main_bm_inode = NULL;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct ocfs2_dinode *alloc;
 	struct ocfs2_dinode *alloc_copy = NULL;
 	struct ocfs2_alloc_context *ac = NULL;
 
 	mlog_entry_void();
 
-	handle = ocfs2_alloc_handle(osb);
-	if (!handle) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
 	/* This will lock the main bitmap for us. */
 	status = ocfs2_local_alloc_reserve_for_window(osb,
-						      handle,
 						      &ac,
 						      &main_bm_inode,
 						      &main_bm_bh);
@@ -914,7 +896,7 @@
 		goto bail;
 	}
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -972,7 +954,7 @@
 	status = 0;
 bail:
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
 
 	if (main_bm_bh)
 		brelse(main_bm_bh);
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index 30f88ce..385a101 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -42,12 +42,11 @@
 
 struct ocfs2_alloc_context;
 int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
-				   struct ocfs2_journal_handle *passed_handle,
 				   u32 bits_wanted,
 				   struct ocfs2_alloc_context *ac);
 
 int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
-				 struct ocfs2_journal_handle *handle,
+				 handle_t *handle,
 				 struct ocfs2_alloc_context *ac,
 				 u32 min_bits,
 				 u32 *bit_off,
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 83934e3..69f85ae 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -82,6 +82,8 @@
 
 int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	int ret = 0, lock_level = 0;
+
 	/* We don't want to support shared writable mappings yet. */
 	if (((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE))
 	    && ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) {
@@ -91,7 +93,14 @@
 		return -EINVAL;
 	}
 
-	file_accessed(file);
+	ret = ocfs2_meta_lock_atime(file->f_dentry->d_inode,
+				    file->f_vfsmnt, &lock_level);
+	if (ret < 0) {
+		mlog_errno(ret);
+		goto out;
+	}
+	ocfs2_meta_unlock(file->f_dentry->d_inode, lock_level);
+out:
 	vma->vm_ops = &ocfs2_file_vm_ops;
 	return 0;
 }
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a57b751..21db45d 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -75,12 +75,12 @@
 					unsigned long offset,
 					struct ocfs2_dir_entry **res_dir);
 
-static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle,
+static int ocfs2_delete_entry(handle_t *handle,
 			      struct inode *dir,
 			      struct ocfs2_dir_entry *de_del,
 			      struct buffer_head *bh);
 
-static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+static int __ocfs2_add_entry(handle_t *handle,
 			     struct inode *dir,
 			     const char *name, int namelen,
 			     struct inode *inode, u64 blkno,
@@ -93,43 +93,37 @@
 			      dev_t dev,
 			      struct buffer_head **new_fe_bh,
 			      struct buffer_head *parent_fe_bh,
-			      struct ocfs2_journal_handle *handle,
+			      handle_t *handle,
 			      struct inode **ret_inode,
 			      struct ocfs2_alloc_context *inode_ac);
 
 static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
-			      struct ocfs2_journal_handle *handle,
+			      handle_t *handle,
 			      struct inode *parent,
 			      struct inode *inode,
 			      struct buffer_head *fe_bh,
 			      struct ocfs2_alloc_context *data_ac);
 
-static int ocfs2_double_lock(struct ocfs2_super *osb,
-			     struct ocfs2_journal_handle *handle,
-			     struct buffer_head **bh1,
-			     struct inode *inode1,
-			     struct buffer_head **bh2,
-			     struct inode *inode2);
-
 static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-				    struct ocfs2_journal_handle *handle,
+				    struct inode **ret_orphan_dir,
 				    struct inode *inode,
 				    char *name,
 				    struct buffer_head **de_bh);
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
-			    struct ocfs2_journal_handle *handle,
+			    handle_t *handle,
 			    struct inode *inode,
 			    struct ocfs2_dinode *fe,
 			    char *name,
-			    struct buffer_head *de_bh);
+			    struct buffer_head *de_bh,
+			    struct inode *orphan_dir_inode);
 
 static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
-				     struct ocfs2_journal_handle *handle,
+				     handle_t *handle,
 				     struct inode *inode,
 				     const char *symname);
 
-static inline int ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_add_entry(handle_t *handle,
 				  struct dentry *dentry,
 				  struct inode *inode, u64 blkno,
 				  struct buffer_head *parent_fe_bh,
@@ -165,7 +159,7 @@
 	mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len,
 	     dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno);
 
-	status = ocfs2_meta_lock(dir, NULL, NULL, 0);
+	status = ocfs2_meta_lock(dir, NULL, 0);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -242,7 +236,7 @@
 }
 
 static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
-			      struct ocfs2_journal_handle *handle,
+			      handle_t *handle,
 			      struct inode *parent,
 			      struct inode *inode,
 			      struct buffer_head *fe_bh,
@@ -317,7 +311,7 @@
 {
 	int status = 0;
 	struct buffer_head *parent_fe_bh = NULL;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct ocfs2_super *osb;
 	struct ocfs2_dinode *dirfe;
 	struct buffer_head *new_fe_bh = NULL;
@@ -333,18 +327,11 @@
 	/* get our super block */
 	osb = OCFS2_SB(dir->i_sb);
 
-	handle = ocfs2_alloc_handle(osb);
-	if (handle == NULL) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto leave;
-	}
-
-	status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+	status = ocfs2_meta_lock(dir, &parent_fe_bh, 1);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
-		goto leave;
+		return status;
 	}
 
 	if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) {
@@ -374,7 +361,7 @@
 	}
 
 	/* reserve an inode spot */
-	status = ocfs2_reserve_new_inode(osb, handle, &inode_ac);
+	status = ocfs2_reserve_new_inode(osb, &inode_ac);
 	if (status < 0) {
 		if (status != -ENOSPC)
 			mlog_errno(status);
@@ -384,7 +371,7 @@
 	/* are we making a directory? If so, reserve a cluster for his
 	 * 1st extent. */
 	if (S_ISDIR(mode)) {
-		status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
 		if (status < 0) {
 			if (status != -ENOSPC)
 				mlog_errno(status);
@@ -392,7 +379,7 @@
 		}
 	}
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_MKNOD_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_MKNOD_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -453,7 +440,9 @@
 	status = 0;
 leave:
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
+
+	ocfs2_meta_unlock(dir, 1);
 
 	if (status == -ENOSPC)
 		mlog(0, "Disk is full\n");
@@ -487,7 +476,7 @@
 			      dev_t dev,
 			      struct buffer_head **new_fe_bh,
 			      struct buffer_head *parent_fe_bh,
-			      struct ocfs2_journal_handle *handle,
+			      handle_t *handle,
 			      struct inode **ret_inode,
 			      struct ocfs2_alloc_context *inode_ac)
 {
@@ -653,7 +642,7 @@
 		      struct inode *dir,
 		      struct dentry *dentry)
 {
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle;
 	struct inode *inode = old_dentry->d_inode;
 	int err;
 	struct buffer_head *fe_bh = NULL;
@@ -666,68 +655,60 @@
 		   old_dentry->d_name.len, old_dentry->d_name.name,
 		   dentry->d_name.len, dentry->d_name.name);
 
-	if (S_ISDIR(inode->i_mode)) {
-		err = -EPERM;
-		goto bail;
-	}
+	if (S_ISDIR(inode->i_mode))
+		return -EPERM;
 
-	handle = ocfs2_alloc_handle(osb);
-	if (handle == NULL) {
-		err = -ENOMEM;
-		goto bail;
-	}
-
-	err = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+	err = ocfs2_meta_lock(dir, &parent_fe_bh, 1);
 	if (err < 0) {
 		if (err != -ENOENT)
 			mlog_errno(err);
-		goto bail;
+		return err;
 	}
 
 	if (!dir->i_nlink) {
 		err = -ENOENT;
-		goto bail;
+		goto out;
 	}
 
 	err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
 					dentry->d_name.len);
 	if (err)
-		goto bail;
+		goto out;
 
 	err = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
 					   dentry->d_name.name,
 					   dentry->d_name.len, &de_bh);
 	if (err < 0) {
 		mlog_errno(err);
-		goto bail;
+		goto out;
 	}
 
-	err = ocfs2_meta_lock(inode, handle, &fe_bh, 1);
+	err = ocfs2_meta_lock(inode, &fe_bh, 1);
 	if (err < 0) {
 		if (err != -ENOENT)
 			mlog_errno(err);
-		goto bail;
+		goto out;
 	}
 
 	fe = (struct ocfs2_dinode *) fe_bh->b_data;
 	if (le16_to_cpu(fe->i_links_count) >= OCFS2_LINK_MAX) {
 		err = -EMLINK;
-		goto bail;
+		goto out_unlock_inode;
 	}
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_LINK_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_LINK_CREDITS);
 	if (IS_ERR(handle)) {
 		err = PTR_ERR(handle);
 		handle = NULL;
 		mlog_errno(err);
-		goto bail;
+		goto out_unlock_inode;
 	}
 
 	err = ocfs2_journal_access(handle, inode, fe_bh,
 				   OCFS2_JOURNAL_ACCESS_WRITE);
 	if (err < 0) {
 		mlog_errno(err);
-		goto bail;
+		goto out_commit;
 	}
 
 	inc_nlink(inode);
@@ -741,7 +722,7 @@
 		le16_add_cpu(&fe->i_links_count, -1);
 		drop_nlink(inode);
 		mlog_errno(err);
-		goto bail;
+		goto out_commit;
 	}
 
 	err = ocfs2_add_entry(handle, dentry, inode,
@@ -751,21 +732,27 @@
 		le16_add_cpu(&fe->i_links_count, -1);
 		drop_nlink(inode);
 		mlog_errno(err);
-		goto bail;
+		goto out_commit;
 	}
 
 	err = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno);
 	if (err) {
 		mlog_errno(err);
-		goto bail;
+		goto out_commit;
 	}
 
 	atomic_inc(&inode->i_count);
 	dentry->d_op = &ocfs2_dentry_ops;
 	d_instantiate(dentry, inode);
-bail:
-	if (handle)
-		ocfs2_commit_trans(handle);
+
+out_commit:
+	ocfs2_commit_trans(osb, handle);
+out_unlock_inode:
+	ocfs2_meta_unlock(inode, 1);
+
+out:
+	ocfs2_meta_unlock(dir, 1);
+
 	if (de_bh)
 		brelse(de_bh);
 	if (fe_bh)
@@ -812,13 +799,15 @@
 			struct dentry *dentry)
 {
 	int status;
+	int child_locked = 0;
 	struct inode *inode = dentry->d_inode;
+	struct inode *orphan_dir = NULL;
 	struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
 	u64 blkno;
 	struct ocfs2_dinode *fe = NULL;
 	struct buffer_head *fe_bh = NULL;
 	struct buffer_head *parent_node_bh = NULL;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct ocfs2_dir_entry *dirent = NULL;
 	struct buffer_head *dirent_bh = NULL;
 	char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
@@ -833,22 +822,14 @@
 
 	if (inode == osb->root_inode) {
 		mlog(0, "Cannot delete the root directory\n");
-		status = -EPERM;
-		goto leave;
+		return -EPERM;
 	}
 
-	handle = ocfs2_alloc_handle(osb);
-	if (handle == NULL) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto leave;
-	}
-
-	status = ocfs2_meta_lock(dir, handle, &parent_node_bh, 1);
+	status = ocfs2_meta_lock(dir, &parent_node_bh, 1);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
-		goto leave;
+		return status;
 	}
 
 	status = ocfs2_find_files_on_disk(dentry->d_name.name,
@@ -869,12 +850,13 @@
 		goto leave;
 	}
 
-	status = ocfs2_meta_lock(inode, handle, &fe_bh, 1);
+	status = ocfs2_meta_lock(inode, &fe_bh, 1);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
 		goto leave;
 	}
+	child_locked = 1;
 
 	if (S_ISDIR(inode->i_mode)) {
 	       	if (!ocfs2_empty_dir(inode)) {
@@ -895,7 +877,7 @@
 	}
 
 	if (inode_is_unlinkable(inode)) {
-		status = ocfs2_prepare_orphan_dir(osb, handle, inode,
+		status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, inode,
 						  orphan_name,
 						  &orphan_entry_bh);
 		if (status < 0) {
@@ -904,7 +886,7 @@
 		}
 	}
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_UNLINK_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_UNLINK_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -923,7 +905,7 @@
 
 	if (inode_is_unlinkable(inode)) {
 		status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name,
-					  orphan_entry_bh);
+					  orphan_entry_bh, orphan_dir);
 		if (status < 0) {
 			mlog_errno(status);
 			goto leave;
@@ -960,7 +942,19 @@
 
 leave:
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
+
+	if (child_locked)
+		ocfs2_meta_unlock(inode, 1);
+
+	ocfs2_meta_unlock(dir, 1);
+
+	if (orphan_dir) {
+		/* This was locked for us in ocfs2_prepare_orphan_dir() */
+		ocfs2_meta_unlock(orphan_dir, 1);
+		mutex_unlock(&orphan_dir->i_mutex);
+		iput(orphan_dir);
+	}
 
 	if (fe_bh)
 		brelse(fe_bh);
@@ -984,7 +978,6 @@
  * if they have the same id, then the 1st one is the only one locked.
  */
 static int ocfs2_double_lock(struct ocfs2_super *osb,
-			     struct ocfs2_journal_handle *handle,
 			     struct buffer_head **bh1,
 			     struct inode *inode1,
 			     struct buffer_head **bh2,
@@ -1000,8 +993,6 @@
 		   (unsigned long long)oi1->ip_blkno,
 		   (unsigned long long)oi2->ip_blkno);
 
-	BUG_ON(!handle);
-
 	if (*bh1)
 		*bh1 = NULL;
 	if (*bh2)
@@ -1021,25 +1012,41 @@
 			inode1 = tmpinode;
 		}
 		/* lock id2 */
-		status = ocfs2_meta_lock(inode2, handle, bh2, 1);
+		status = ocfs2_meta_lock(inode2, bh2, 1);
 		if (status < 0) {
 			if (status != -ENOENT)
 				mlog_errno(status);
 			goto bail;
 		}
 	}
+
 	/* lock id1 */
-	status = ocfs2_meta_lock(inode1, handle, bh1, 1);
+	status = ocfs2_meta_lock(inode1, bh1, 1);
 	if (status < 0) {
+		/*
+		 * An error return must mean that no cluster locks
+		 * were held on function exit.
+		 */
+		if (oi1->ip_blkno != oi2->ip_blkno)
+			ocfs2_meta_unlock(inode2, 1);
+
 		if (status != -ENOENT)
 			mlog_errno(status);
-		goto bail;
 	}
+
 bail:
 	mlog_exit(status);
 	return status;
 }
 
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2)
+{
+	ocfs2_meta_unlock(inode1, 1);
+
+	if (inode1 != inode2)
+		ocfs2_meta_unlock(inode2, 1);
+}
+
 #define PARENT_INO(buffer) \
 	((struct ocfs2_dir_entry *) \
 	 ((char *)buffer + \
@@ -1050,9 +1057,11 @@
 			struct inode *new_dir,
 			struct dentry *new_dentry)
 {
-	int status = 0, rename_lock = 0;
+	int status = 0, rename_lock = 0, parents_locked = 0;
+	int old_child_locked = 0, new_child_locked = 0;
 	struct inode *old_inode = old_dentry->d_inode;
 	struct inode *new_inode = new_dentry->d_inode;
+	struct inode *orphan_dir = NULL;
 	struct ocfs2_dinode *newfe = NULL;
 	char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
 	struct buffer_head *orphan_entry_bh = NULL;
@@ -1060,7 +1069,7 @@
 	struct buffer_head *insert_entry_bh = NULL;
 	struct ocfs2_super *osb = NULL;
 	u64 newfe_blkno;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct buffer_head *old_dir_bh = NULL;
 	struct buffer_head *new_dir_bh = NULL;
 	struct ocfs2_dir_entry *old_de = NULL, *new_de = NULL; // dirent for old_dentry
@@ -1105,21 +1114,14 @@
 		rename_lock = 1;
 	}
 
-	handle = ocfs2_alloc_handle(osb);
-	if (handle == NULL) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
 	/* if old and new are the same, this'll just do one lock. */
-	status = ocfs2_double_lock(osb, handle,
-				  &old_dir_bh, old_dir,
-				  &new_dir_bh, new_dir);
+	status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+				   &new_dir_bh, new_dir);
 	if (status < 0) {
 		mlog_errno(status);
 		goto bail;
 	}
+	parents_locked = 1;
 
 	/* make sure both dirs have bhs
 	 * get an extra ref on old_dir_bh if old==new */
@@ -1140,12 +1142,13 @@
 	 * the vote thread on other nodes won't have to concurrently
 	 * downconvert the inode and the dentry locks.
 	 */
-	status = ocfs2_meta_lock(old_inode, handle, NULL, 1);
+	status = ocfs2_meta_lock(old_inode, NULL, 1);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
 		goto bail;
 	}
+	old_child_locked = 1;
 
 	status = ocfs2_remote_dentry_delete(old_dentry);
 	if (status < 0) {
@@ -1231,12 +1234,13 @@
 			goto bail;
 		}
 
-		status = ocfs2_meta_lock(new_inode, handle, &newfe_bh, 1);
+		status = ocfs2_meta_lock(new_inode, &newfe_bh, 1);
 		if (status < 0) {
 			if (status != -ENOENT)
 				mlog_errno(status);
 			goto bail;
 		}
+		new_child_locked = 1;
 
 		status = ocfs2_remote_dentry_delete(new_dentry);
 		if (status < 0) {
@@ -1252,7 +1256,7 @@
 		     (unsigned long long)newfe_bh->b_blocknr : 0ULL);
 
 		if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) {
-			status = ocfs2_prepare_orphan_dir(osb, handle,
+			status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
 							  new_inode,
 							  orphan_name,
 							  &orphan_entry_bh);
@@ -1280,7 +1284,7 @@
 		}
 	}
 
-	handle = ocfs2_start_trans(osb, handle, OCFS2_RENAME_CREDITS);
+	handle = ocfs2_start_trans(osb, OCFS2_RENAME_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -1307,7 +1311,7 @@
 		    (newfe->i_links_count == cpu_to_le16(1))){
 			status = ocfs2_orphan_add(osb, handle, new_inode,
 						  newfe, orphan_name,
-						  orphan_entry_bh);
+						  orphan_entry_bh, orphan_dir);
 			if (status < 0) {
 				mlog_errno(status);
 				goto bail;
@@ -1424,7 +1428,23 @@
 		ocfs2_rename_unlock(osb);
 
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
+
+	if (parents_locked)
+		ocfs2_double_unlock(old_dir, new_dir);
+
+	if (old_child_locked)
+		ocfs2_meta_unlock(old_inode, 1);
+
+	if (new_child_locked)
+		ocfs2_meta_unlock(new_inode, 1);
+
+	if (orphan_dir) {
+		/* This was locked for us in ocfs2_prepare_orphan_dir() */
+		ocfs2_meta_unlock(orphan_dir, 1);
+		mutex_unlock(&orphan_dir->i_mutex);
+		iput(orphan_dir);
+	}
 
 	if (new_inode)
 		sync_mapping_buffers(old_inode->i_mapping);
@@ -1458,7 +1478,7 @@
  * data, including the null terminator.
  */
 static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
-				     struct ocfs2_journal_handle *handle,
+				     handle_t *handle,
 				     struct inode *inode,
 				     const char *symname)
 {
@@ -1573,7 +1593,7 @@
 	struct buffer_head *parent_fe_bh = NULL;
 	struct ocfs2_dinode *fe = NULL;
 	struct ocfs2_dinode *dirfe;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	struct ocfs2_alloc_context *inode_ac = NULL;
 	struct ocfs2_alloc_context *data_ac = NULL;
 
@@ -1587,19 +1607,12 @@
 
 	credits = ocfs2_calc_symlink_credits(sb);
 
-	handle = ocfs2_alloc_handle(osb);
-	if (handle == NULL) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
 	/* lock the parent directory */
-	status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+	status = ocfs2_meta_lock(dir, &parent_fe_bh, 1);
 	if (status < 0) {
 		if (status != -ENOENT)
 			mlog_errno(status);
-		goto bail;
+		return status;
 	}
 
 	dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
@@ -1622,7 +1635,7 @@
 		goto bail;
 	}
 
-	status = ocfs2_reserve_new_inode(osb, handle, &inode_ac);
+	status = ocfs2_reserve_new_inode(osb, &inode_ac);
 	if (status < 0) {
 		if (status != -ENOSPC)
 			mlog_errno(status);
@@ -1631,7 +1644,7 @@
 
 	/* don't reserve bitmap space for fast symlinks. */
 	if (l > ocfs2_fast_symlink_chars(sb)) {
-		status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+		status = ocfs2_reserve_clusters(osb, 1, &data_ac);
 		if (status < 0) {
 			if (status != -ENOSPC)
 				mlog_errno(status);
@@ -1639,7 +1652,7 @@
 		}
 	}
 
-	handle = ocfs2_start_trans(osb, handle, credits);
+	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -1717,7 +1730,10 @@
 	d_instantiate(dentry, inode);
 bail:
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
+
+	ocfs2_meta_unlock(dir, 1);
+
 	if (new_fe_bh)
 		brelse(new_fe_bh);
 	if (parent_fe_bh)
@@ -1768,7 +1784,7 @@
  * If you pass me insert_bh, I'll skip the search of the other dir
  * blocks and put the record in there.
  */
-static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+static int __ocfs2_add_entry(handle_t *handle,
 			     struct inode *dir,
 			     const char *name, int namelen,
 			     struct inode *inode, u64 blkno,
@@ -1854,7 +1870,7 @@
  * ocfs2_delete_entry deletes a directory entry by merging it with the
  * previous entry
  */
-static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle,
+static int ocfs2_delete_entry(handle_t *handle,
 			      struct inode *dir,
 			      struct ocfs2_dir_entry *de_del,
 			      struct buffer_head *bh)
@@ -2085,19 +2101,19 @@
 }
 
 static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-				    struct ocfs2_journal_handle *handle,
+				    struct inode **ret_orphan_dir,
 				    struct inode *inode,
 				    char *name,
 				    struct buffer_head **de_bh)
 {
-	struct inode *orphan_dir_inode = NULL;
+	struct inode *orphan_dir_inode;
 	struct buffer_head *orphan_dir_bh = NULL;
 	int status = 0;
 
 	status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
 	if (status < 0) {
 		mlog_errno(status);
-		goto leave;
+		return status;
 	}
 
 	orphan_dir_inode = ocfs2_get_system_file_inode(osb,
@@ -2106,11 +2122,12 @@
 	if (!orphan_dir_inode) {
 		status = -ENOENT;
 		mlog_errno(status);
-		goto leave;
+		return status;
 	}
 
-	ocfs2_handle_add_inode(handle, orphan_dir_inode);
-	status = ocfs2_meta_lock(orphan_dir_inode, handle, &orphan_dir_bh, 1);
+	mutex_lock(&orphan_dir_inode->i_mutex);
+
+	status = ocfs2_meta_lock(orphan_dir_inode, &orphan_dir_bh, 1);
 	if (status < 0) {
 		mlog_errno(status);
 		goto leave;
@@ -2120,13 +2137,19 @@
 					      orphan_dir_bh, name,
 					      OCFS2_ORPHAN_NAMELEN, de_bh);
 	if (status < 0) {
+		ocfs2_meta_unlock(orphan_dir_inode, 1);
+
 		mlog_errno(status);
 		goto leave;
 	}
 
+	*ret_orphan_dir = orphan_dir_inode;
+
 leave:
-	if (orphan_dir_inode)
+	if (status) {
+		mutex_unlock(&orphan_dir_inode->i_mutex);
 		iput(orphan_dir_inode);
+	}
 
 	if (orphan_dir_bh)
 		brelse(orphan_dir_bh);
@@ -2136,28 +2159,19 @@
 }
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
-			    struct ocfs2_journal_handle *handle,
+			    handle_t *handle,
 			    struct inode *inode,
 			    struct ocfs2_dinode *fe,
 			    char *name,
-			    struct buffer_head *de_bh)
+			    struct buffer_head *de_bh,
+			    struct inode *orphan_dir_inode)
 {
-	struct inode *orphan_dir_inode = NULL;
 	struct buffer_head *orphan_dir_bh = NULL;
 	int status = 0;
 	struct ocfs2_dinode *orphan_fe;
 
 	mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
 
-	orphan_dir_inode = ocfs2_get_system_file_inode(osb,
-						       ORPHAN_DIR_SYSTEM_INODE,
-						       osb->slot_num);
-	if (!orphan_dir_inode) {
-		status = -ENOENT;
-		mlog_errno(status);
-		goto leave;
-	}
-
 	status = ocfs2_read_block(osb,
 				  OCFS2_I(orphan_dir_inode)->ip_blkno,
 				  &orphan_dir_bh, OCFS2_BH_CACHED,
@@ -2209,9 +2223,6 @@
 	     (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num);
 
 leave:
-	if (orphan_dir_inode)
-		iput(orphan_dir_inode);
-
 	if (orphan_dir_bh)
 		brelse(orphan_dir_bh);
 
@@ -2221,7 +2232,7 @@
 
 /* unlike orphan_add, we expect the orphan dir to already be locked here. */
 int ocfs2_orphan_del(struct ocfs2_super *osb,
-		     struct ocfs2_journal_handle *handle,
+		     handle_t *handle,
 		     struct inode *orphan_dir_inode,
 		     struct inode *inode,
 		     struct buffer_head *orphan_dir_bh)
@@ -2300,4 +2311,5 @@
 	.rename		= ocfs2_rename,
 	.setattr	= ocfs2_setattr,
 	.getattr	= ocfs2_getattr,
+	.permission	= ocfs2_permission,
 };
diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h
index deaaa97..8425944 100644
--- a/fs/ocfs2/namei.h
+++ b/fs/ocfs2/namei.h
@@ -39,7 +39,7 @@
 				     struct inode *dir,
 				     struct ocfs2_dir_entry **res_dir);
 int ocfs2_orphan_del(struct ocfs2_super *osb,
-		     struct ocfs2_journal_handle *handle,
+		     handle_t *handle,
 		     struct inode *orphan_dir_inode,
 		     struct inode *inode,
 		     struct buffer_head *orphan_dir_bh);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 0462a7f..b767fd7 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -34,6 +34,7 @@
 #include <linux/workqueue.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
+#include <linux/jbd.h>
 
 #include "cluster/nodemanager.h"
 #include "cluster/heartbeat.h"
@@ -179,9 +180,9 @@
 #define OCFS2_OSB_SOFT_RO	0x0001
 #define OCFS2_OSB_HARD_RO	0x0002
 #define OCFS2_OSB_ERROR_FS	0x0004
+#define OCFS2_DEFAULT_ATIME_QUANTUM	60
 
 struct ocfs2_journal;
-struct ocfs2_journal_handle;
 struct ocfs2_super
 {
 	struct task_struct *commit_task;
@@ -218,6 +219,7 @@
 	unsigned long osb_flags;
 
 	unsigned long s_mount_opt;
+	unsigned int s_atime_quantum;
 
 	u16 max_slots;
 	s16 node_num;
@@ -283,7 +285,7 @@
 	/* Truncate log info */
 	struct inode			*osb_tl_inode;
 	struct buffer_head		*osb_tl_bh;
-	struct work_struct		osb_truncate_log_wq;
+	struct delayed_work		osb_truncate_log_wq;
 
 	struct ocfs2_node_map		osb_recovering_orphan_dirs;
 	unsigned int			*osb_orphan_wipes;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 9d91e66..000d71c 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -49,7 +49,7 @@
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
-static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
+static int ocfs2_block_group_fill(handle_t *handle,
 				  struct inode *alloc_inode,
 				  struct buffer_head *bg_bh,
 				  u64 group_blkno,
@@ -59,9 +59,6 @@
 				   struct inode *alloc_inode,
 				   struct buffer_head *bh);
 
-static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
-				       struct ocfs2_alloc_context *ac);
-
 static int ocfs2_cluster_group_search(struct inode *inode,
 				      struct buffer_head *group_bh,
 				      u32 bits_wanted, u32 min_bits,
@@ -72,6 +69,7 @@
 				    u16 *bit_off, u16 *bits_found);
 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
 				     struct ocfs2_alloc_context *ac,
+				     handle_t *handle,
 				     u32 bits_wanted,
 				     u32 min_bits,
 				     u16 *bit_off,
@@ -79,20 +77,20 @@
 				     u64 *bg_blkno);
 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
 					 int nr);
-static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_set_bits(handle_t *handle,
 					     struct inode *alloc_inode,
 					     struct ocfs2_group_desc *bg,
 					     struct buffer_head *group_bh,
 					     unsigned int bit_off,
 					     unsigned int num_bits);
-static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_clear_bits(handle_t *handle,
 					       struct inode *alloc_inode,
 					       struct ocfs2_group_desc *bg,
 					       struct buffer_head *group_bh,
 					       unsigned int bit_off,
 					       unsigned int num_bits);
 
-static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
+static int ocfs2_relink_block_group(handle_t *handle,
 				    struct inode *alloc_inode,
 				    struct buffer_head *fe_bh,
 				    struct buffer_head *bg_bh,
@@ -100,7 +98,7 @@
 				    u16 chain);
 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
 						     u32 wanted);
-static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
+static int ocfs2_free_suballoc_bits(handle_t *handle,
 				    struct inode *alloc_inode,
 				    struct buffer_head *alloc_bh,
 				    unsigned int start_bit,
@@ -120,8 +118,16 @@
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
 {
-	if (ac->ac_inode)
-		iput(ac->ac_inode);
+	struct inode *inode = ac->ac_inode;
+
+	if (inode) {
+		if (ac->ac_which != OCFS2_AC_USE_LOCAL)
+			ocfs2_meta_unlock(inode, 1);
+
+		mutex_unlock(&inode->i_mutex);
+
+		iput(inode);
+	}
 	if (ac->ac_bh)
 		brelse(ac->ac_bh);
 	kfree(ac);
@@ -190,7 +196,7 @@
 	return 0;
 }
 
-static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
+static int ocfs2_block_group_fill(handle_t *handle,
 				  struct inode *alloc_inode,
 				  struct buffer_head *bg_bh,
 				  u64 group_blkno,
@@ -273,7 +279,7 @@
 	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
 	struct ocfs2_chain_list *cl;
 	struct ocfs2_alloc_context *ac = NULL;
-	struct ocfs2_journal_handle *handle = NULL;
+	handle_t *handle = NULL;
 	u32 bit_off, num_bits;
 	u16 alloc_rec;
 	u64 bg_blkno;
@@ -284,16 +290,8 @@
 
 	mlog_entry_void();
 
-	handle = ocfs2_alloc_handle(osb);
-	if (!handle) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
 	cl = &fe->id2.i_chain;
 	status = ocfs2_reserve_clusters(osb,
-					handle,
 					le16_to_cpu(cl->cl_cpg),
 					&ac);
 	if (status < 0) {
@@ -304,7 +302,7 @@
 
 	credits = ocfs2_calc_group_alloc_credits(osb->sb,
 						 le16_to_cpu(cl->cl_cpg));
-	handle = ocfs2_start_trans(osb, handle, credits);
+	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
 		handle = NULL;
@@ -389,7 +387,7 @@
 	status = 0;
 bail:
 	if (handle)
-		ocfs2_commit_trans(handle);
+		ocfs2_commit_trans(osb, handle);
 
 	if (ac)
 		ocfs2_free_alloc_context(ac);
@@ -402,27 +400,38 @@
 }
 
 static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
-				       struct ocfs2_alloc_context *ac)
+				       struct ocfs2_alloc_context *ac,
+				       int type,
+				       u32 slot)
 {
 	int status;
 	u32 bits_wanted = ac->ac_bits_wanted;
-	struct inode *alloc_inode = ac->ac_inode;
+	struct inode *alloc_inode;
 	struct buffer_head *bh = NULL;
-	struct ocfs2_journal_handle *handle = ac->ac_handle;
 	struct ocfs2_dinode *fe;
 	u32 free_bits;
 
 	mlog_entry_void();
 
-	BUG_ON(handle->flags & OCFS2_HANDLE_STARTED);
-
-	ocfs2_handle_add_inode(handle, alloc_inode);
-	status = ocfs2_meta_lock(alloc_inode, handle, &bh, 1);
-	if (status < 0) {
-		mlog_errno(status);
-		goto bail;
+	alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
+	if (!alloc_inode) {
+		mlog_errno(-EINVAL);
+		return -EINVAL;
 	}
 
+	mutex_lock(&alloc_inode->i_mutex);
+
+	status = ocfs2_meta_lock(alloc_inode, &bh, 1);
+	if (status < 0) {
+		mutex_unlock(&alloc_inode->i_mutex);
+		iput(alloc_inode);
+
+		mlog_errno(status);
+		return status;
+	}
+
+	ac->ac_inode = alloc_inode;
+
 	fe = (struct ocfs2_dinode *) bh->b_data;
 	if (!OCFS2_IS_VALID_DINODE(fe)) {
 		OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
@@ -473,12 +482,11 @@
 }
 
 int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
-			       struct ocfs2_journal_handle *handle,
 			       struct ocfs2_dinode *fe,
 			       struct ocfs2_alloc_context **ac)
 {
 	int status;
-	struct inode *alloc_inode = NULL;
+	u32 slot;
 
 	*ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
 	if (!(*ac)) {
@@ -488,28 +496,18 @@
 	}
 
 	(*ac)->ac_bits_wanted = ocfs2_extend_meta_needed(fe);
-	(*ac)->ac_handle = handle;
 	(*ac)->ac_which = OCFS2_AC_USE_META;
 
 #ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS
-	alloc_inode = ocfs2_get_system_file_inode(osb,
-						  EXTENT_ALLOC_SYSTEM_INODE,
-						  0);
+	slot = 0;
 #else
-	alloc_inode = ocfs2_get_system_file_inode(osb,
-						  EXTENT_ALLOC_SYSTEM_INODE,
-						  osb->slot_num);
+	slot = osb->slot_num;
 #endif
-	if (!alloc_inode) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
 
-	(*ac)->ac_inode = igrab(alloc_inode);
 	(*ac)->ac_group_search = ocfs2_block_group_search;
 
-	status = ocfs2_reserve_suballoc_bits(osb, (*ac));
+	status = ocfs2_reserve_suballoc_bits(osb, (*ac),
+					     EXTENT_ALLOC_SYSTEM_INODE, slot);
 	if (status < 0) {
 		if (status != -ENOSPC)
 			mlog_errno(status);
@@ -523,19 +521,14 @@
 		*ac = NULL;
 	}
 
-	if (alloc_inode)
-		iput(alloc_inode);
-
 	mlog_exit(status);
 	return status;
 }
 
 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
-			    struct ocfs2_journal_handle *handle,
 			    struct ocfs2_alloc_context **ac)
 {
 	int status;
-	struct inode *alloc_inode = NULL;
 
 	*ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
 	if (!(*ac)) {
@@ -545,22 +538,13 @@
 	}
 
 	(*ac)->ac_bits_wanted = 1;
-	(*ac)->ac_handle = handle;
 	(*ac)->ac_which = OCFS2_AC_USE_INODE;
 
-	alloc_inode = ocfs2_get_system_file_inode(osb,
-						  INODE_ALLOC_SYSTEM_INODE,
-						  osb->slot_num);
-	if (!alloc_inode) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-
-	(*ac)->ac_inode = igrab(alloc_inode);
 	(*ac)->ac_group_search = ocfs2_block_group_search;
 
-	status = ocfs2_reserve_suballoc_bits(osb, *ac);
+	status = ocfs2_reserve_suballoc_bits(osb, *ac,
+					     INODE_ALLOC_SYSTEM_INODE,
+					     osb->slot_num);
 	if (status < 0) {
 		if (status != -ENOSPC)
 			mlog_errno(status);
@@ -574,9 +558,6 @@
 		*ac = NULL;
 	}
 
-	if (alloc_inode)
-		iput(alloc_inode);
-
 	mlog_exit(status);
 	return status;
 }
@@ -588,20 +569,17 @@
 {
 	int status;
 
-	ac->ac_inode = ocfs2_get_system_file_inode(osb,
-						   GLOBAL_BITMAP_SYSTEM_INODE,
-						   OCFS2_INVALID_SLOT);
-	if (!ac->ac_inode) {
-		status = -EINVAL;
-		mlog(ML_ERROR, "Could not get bitmap inode!\n");
-		goto bail;
-	}
 	ac->ac_which = OCFS2_AC_USE_MAIN;
 	ac->ac_group_search = ocfs2_cluster_group_search;
 
-	status = ocfs2_reserve_suballoc_bits(osb, ac);
-	if (status < 0 && status != -ENOSPC)
+	status = ocfs2_reserve_suballoc_bits(osb, ac,
+					     GLOBAL_BITMAP_SYSTEM_INODE,
+					     OCFS2_INVALID_SLOT);
+	if (status < 0 && status != -ENOSPC) {
 		mlog_errno(status);
+		goto bail;
+	}
+
 bail:
 	return status;
 }
@@ -610,7 +588,6 @@
  * use so we figure it out for them, but unfortunately this clutters
  * things a bit. */
 int ocfs2_reserve_clusters(struct ocfs2_super *osb,
-			   struct ocfs2_journal_handle *handle,
 			   u32 bits_wanted,
 			   struct ocfs2_alloc_context **ac)
 {
@@ -618,8 +595,6 @@
 
 	mlog_entry_void();
 
-	BUG_ON(!handle);
-
 	*ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
 	if (!(*ac)) {
 		status = -ENOMEM;
@@ -628,12 +603,10 @@
 	}
 
 	(*ac)->ac_bits_wanted = bits_wanted;
-	(*ac)->ac_handle = handle;
 
 	status = -ENOSPC;
 	if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
 		status = ocfs2_reserve_local_alloc_bits(osb,
-							handle,
 							bits_wanted,
 							*ac);
 		if ((status < 0) && (status != -ENOSPC)) {
@@ -774,7 +747,7 @@
 	return status;
 }
 
-static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_set_bits(handle_t *handle,
 					     struct inode *alloc_inode,
 					     struct ocfs2_group_desc *bg,
 					     struct buffer_head *group_bh,
@@ -845,7 +818,7 @@
 	return best;
 }
 
-static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
+static int ocfs2_relink_block_group(handle_t *handle,
 				    struct inode *alloc_inode,
 				    struct buffer_head *fe_bh,
 				    struct buffer_head *bg_bh,
@@ -1025,7 +998,7 @@
 }
 
 static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
-				       struct ocfs2_journal_handle *handle,
+				       handle_t *handle,
 				       struct buffer_head *di_bh,
 				       u32 num_bits,
 				       u16 chain)
@@ -1055,6 +1028,7 @@
 }
 
 static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
+				  handle_t *handle,
 				  u32 bits_wanted,
 				  u32 min_bits,
 				  u16 *bit_off,
@@ -1067,7 +1041,6 @@
 	struct buffer_head *group_bh = NULL;
 	struct ocfs2_group_desc *gd;
 	struct inode *alloc_inode = ac->ac_inode;
-	struct ocfs2_journal_handle *handle = ac->ac_handle;
 
 	ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno,
 			       &group_bh, OCFS2_BH_CACHED, alloc_inode);
@@ -1115,6 +1088,7 @@
 }
 
 static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+			      handle_t *handle,
 			      u32 bits_wanted,
 			      u32 min_bits,
 			      u16 *bit_off,
@@ -1126,7 +1100,6 @@
 	u16 chain, tmp_bits;
 	u32 tmp_used;
 	u64 next_group;
-	struct ocfs2_journal_handle *handle = ac->ac_handle;
 	struct inode *alloc_inode = ac->ac_inode;
 	struct buffer_head *group_bh = NULL;
 	struct buffer_head *prev_group_bh = NULL;
@@ -1272,6 +1245,7 @@
 /* will give out up to bits_wanted contiguous bits. */
 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
 				     struct ocfs2_alloc_context *ac,
+				     handle_t *handle,
 				     u32 bits_wanted,
 				     u32 min_bits,
 				     u16 *bit_off,
@@ -1313,8 +1287,8 @@
 		 * by jumping straight to the most recently used
 		 * allocation group. This helps us mantain some
 		 * contiguousness across allocations. */
-		status = ocfs2_search_one_group(ac, bits_wanted, min_bits,
-						bit_off, num_bits,
+		status = ocfs2_search_one_group(ac, handle, bits_wanted,
+						min_bits, bit_off, num_bits,
 						hint_blkno, &bits_left);
 		if (!status) {
 			/* Be careful to update *bg_blkno here as the
@@ -1336,7 +1310,7 @@
 	ac->ac_chain = victim;
 	ac->ac_allow_chain_relink = 1;
 
-	status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off,
+	status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off,
 				    num_bits, bg_blkno, &bits_left);
 	if (!status)
 		goto set_hint;
@@ -1360,7 +1334,7 @@
 			continue;
 
 		ac->ac_chain = i;
-		status = ocfs2_search_chain(ac, bits_wanted, min_bits,
+		status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
 					    bit_off, num_bits, bg_blkno,
 					    &bits_left);
 		if (!status)
@@ -1388,7 +1362,7 @@
 }
 
 int ocfs2_claim_metadata(struct ocfs2_super *osb,
-			 struct ocfs2_journal_handle *handle,
+			 handle_t *handle,
 			 struct ocfs2_alloc_context *ac,
 			 u32 bits_wanted,
 			 u16 *suballoc_bit_start,
@@ -1401,10 +1375,10 @@
 	BUG_ON(!ac);
 	BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
 	BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
-	BUG_ON(ac->ac_handle != handle);
 
 	status = ocfs2_claim_suballoc_bits(osb,
 					   ac,
+					   handle,
 					   bits_wanted,
 					   1,
 					   suballoc_bit_start,
@@ -1425,7 +1399,7 @@
 }
 
 int ocfs2_claim_new_inode(struct ocfs2_super *osb,
-			  struct ocfs2_journal_handle *handle,
+			  handle_t *handle,
 			  struct ocfs2_alloc_context *ac,
 			  u16 *suballoc_bit,
 			  u64 *fe_blkno)
@@ -1440,10 +1414,10 @@
 	BUG_ON(ac->ac_bits_given != 0);
 	BUG_ON(ac->ac_bits_wanted != 1);
 	BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
-	BUG_ON(ac->ac_handle != handle);
 
 	status = ocfs2_claim_suballoc_bits(osb,
 					   ac,
+					   handle,
 					   1,
 					   1,
 					   suballoc_bit,
@@ -1528,7 +1502,7 @@
  * of any size.
  */
 int ocfs2_claim_clusters(struct ocfs2_super *osb,
-			 struct ocfs2_journal_handle *handle,
+			 handle_t *handle,
 			 struct ocfs2_alloc_context *ac,
 			 u32 min_clusters,
 			 u32 *cluster_start,
@@ -1546,7 +1520,6 @@
 
 	BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
 	       && ac->ac_which != OCFS2_AC_USE_MAIN);
-	BUG_ON(ac->ac_handle != handle);
 
 	if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
 		status = ocfs2_claim_local_alloc_bits(osb,
@@ -1572,6 +1545,7 @@
 
 		status = ocfs2_claim_suballoc_bits(osb,
 						   ac,
+						   handle,
 						   bits_wanted,
 						   min_clusters,
 						   &bg_bit_off,
@@ -1598,7 +1572,7 @@
 	return status;
 }
 
-static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_clear_bits(handle_t *handle,
 					       struct inode *alloc_inode,
 					       struct ocfs2_group_desc *bg,
 					       struct buffer_head *group_bh,
@@ -1653,7 +1627,7 @@
 /*
  * expects the suballoc inode to already be locked.
  */
-static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
+static int ocfs2_free_suballoc_bits(handle_t *handle,
 				    struct inode *alloc_inode,
 				    struct buffer_head *alloc_bh,
 				    unsigned int start_bit,
@@ -1737,7 +1711,7 @@
 	return group;
 }
 
-int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
+int ocfs2_free_dinode(handle_t *handle,
 		      struct inode *inode_alloc_inode,
 		      struct buffer_head *inode_alloc_bh,
 		      struct ocfs2_dinode *di)
@@ -1750,7 +1724,7 @@
 					inode_alloc_bh, bit, bg_blkno, 1);
 }
 
-int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
+int ocfs2_free_extent_block(handle_t *handle,
 			    struct inode *eb_alloc_inode,
 			    struct buffer_head *eb_alloc_bh,
 			    struct ocfs2_extent_block *eb)
@@ -1763,7 +1737,7 @@
 					bit, bg_blkno, 1);
 }
 
-int ocfs2_free_clusters(struct ocfs2_journal_handle *handle,
+int ocfs2_free_clusters(handle_t *handle,
 		       struct inode *bitmap_inode,
 		       struct buffer_head *bitmap_bh,
 		       u64 start_blk,
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index c787838..1a3c94c 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -43,7 +43,6 @@
 #define OCFS2_AC_USE_INODE 3
 #define OCFS2_AC_USE_META  4
 	u32    ac_which;
-	struct ocfs2_journal_handle *ac_handle;
 
 	/* these are used by the chain search */
 	u16    ac_chain;
@@ -60,45 +59,42 @@
 }
 
 int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
-			       struct ocfs2_journal_handle *handle,
 			       struct ocfs2_dinode *fe,
 			       struct ocfs2_alloc_context **ac);
 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
-			    struct ocfs2_journal_handle *handle,
 			    struct ocfs2_alloc_context **ac);
 int ocfs2_reserve_clusters(struct ocfs2_super *osb,
-			   struct ocfs2_journal_handle *handle,
 			   u32 bits_wanted,
 			   struct ocfs2_alloc_context **ac);
 
 int ocfs2_claim_metadata(struct ocfs2_super *osb,
-			 struct ocfs2_journal_handle *handle,
+			 handle_t *handle,
 			 struct ocfs2_alloc_context *ac,
 			 u32 bits_wanted,
 			 u16 *suballoc_bit_start,
 			 u32 *num_bits,
 			 u64 *blkno_start);
 int ocfs2_claim_new_inode(struct ocfs2_super *osb,
-			  struct ocfs2_journal_handle *handle,
+			  handle_t *handle,
 			  struct ocfs2_alloc_context *ac,
 			  u16 *suballoc_bit,
 			  u64 *fe_blkno);
 int ocfs2_claim_clusters(struct ocfs2_super *osb,
-			 struct ocfs2_journal_handle *handle,
+			 handle_t *handle,
 			 struct ocfs2_alloc_context *ac,
 			 u32 min_clusters,
 			 u32 *cluster_start,
 			 u32 *num_clusters);
 
-int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
+int ocfs2_free_dinode(handle_t *handle,
 		      struct inode *inode_alloc_inode,
 		      struct buffer_head *inode_alloc_bh,
 		      struct ocfs2_dinode *di);
-int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
+int ocfs2_free_extent_block(handle_t *handle,
 			    struct inode *eb_alloc_inode,
 			    struct buffer_head *eb_alloc_bh,
 			    struct ocfs2_extent_block *eb);
-int ocfs2_free_clusters(struct ocfs2_journal_handle *handle,
+int ocfs2_free_clusters(handle_t *handle,
 			struct inode *bitmap_inode,
 			struct buffer_head *bitmap_bh,
 			u64 start_blk,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 76b46eb..4bf3954 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -68,9 +68,7 @@
 
 #include "buffer_head_io.h"
 
-static kmem_cache_t *ocfs2_inode_cachep = NULL;
-
-kmem_cache_t *ocfs2_lock_cache = NULL;
+static struct kmem_cache *ocfs2_inode_cachep = NULL;
 
 /* OCFS2 needs to schedule several differnt types of work which
  * require cluster locking, disk I/O, recovery waits, etc. Since these
@@ -141,6 +139,7 @@
 	Opt_hb_local,
 	Opt_data_ordered,
 	Opt_data_writeback,
+	Opt_atime_quantum,
 	Opt_err,
 };
 
@@ -154,6 +153,7 @@
 	{Opt_hb_local, OCFS2_HB_LOCAL},
 	{Opt_data_ordered, "data=ordered"},
 	{Opt_data_writeback, "data=writeback"},
+	{Opt_atime_quantum, "atime_quantum=%u"},
 	{Opt_err, NULL}
 };
 
@@ -303,7 +303,7 @@
 {
 	struct ocfs2_inode_info *oi;
 
-	oi = kmem_cache_alloc(ocfs2_inode_cachep, SLAB_NOFS);
+	oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS);
 	if (!oi)
 		return NULL;
 
@@ -707,6 +707,7 @@
 	while ((p = strsep(&options, ",")) != NULL) {
 		int token, option;
 		substring_t args[MAX_OPT_ARGS];
+		struct ocfs2_super * osb = OCFS2_SB(sb);
 
 		if (!*p)
 			continue;
@@ -747,6 +748,16 @@
 		case Opt_data_writeback:
 			*mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
 			break;
+		case Opt_atime_quantum:
+			if (match_int(&args[0], &option)) {
+				status = 0;
+				goto bail;
+			}
+			if (option >= 0)
+				osb->s_atime_quantum = option;
+			else
+				osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
+			break;
 		default:
 			mlog(ML_ERROR,
 			     "Unrecognized mount option \"%s\" "
@@ -867,7 +878,7 @@
 		goto bail;
 	}
 
-	status = ocfs2_meta_lock(inode, NULL, &bh, 0);
+	status = ocfs2_meta_lock(inode, &bh, 0);
 	if (status < 0) {
 		mlog_errno(status);
 		goto bail;
@@ -903,7 +914,7 @@
 }
 
 static void ocfs2_inode_init_once(void *data,
-				  kmem_cache_t *cachep,
+				  struct kmem_cache *cachep,
 				  unsigned long flags)
 {
 	struct ocfs2_inode_info *oi = data;
@@ -914,9 +925,7 @@
 		oi->ip_open_count = 0;
 		spin_lock_init(&oi->ip_lock);
 		ocfs2_extent_map_init(&oi->vfs_inode);
-		INIT_LIST_HEAD(&oi->ip_handle_list);
 		INIT_LIST_HEAD(&oi->ip_io_markers);
-		oi->ip_handle = NULL;
 		oi->ip_created_trans = 0;
 		oi->ip_last_trans = 0;
 		oi->ip_dir_start_lookup = 0;
@@ -948,14 +957,6 @@
 	if (!ocfs2_inode_cachep)
 		return -ENOMEM;
 
-	ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
-					     sizeof(struct ocfs2_journal_lock),
-					     0,
-					     SLAB_HWCACHE_ALIGN,
-					     NULL, NULL);
-	if (!ocfs2_lock_cache)
-		return -ENOMEM;
-
 	return 0;
 }
 
@@ -963,11 +964,8 @@
 {
 	if (ocfs2_inode_cachep)
 		kmem_cache_destroy(ocfs2_inode_cachep);
-	if (ocfs2_lock_cache)
-		kmem_cache_destroy(ocfs2_lock_cache);
 
 	ocfs2_inode_cachep = NULL;
-	ocfs2_lock_cache = NULL;
 }
 
 static int ocfs2_get_sector(struct super_block *sb,
@@ -1280,6 +1278,8 @@
 	init_waitqueue_head(&osb->checkpoint_event);
 	atomic_set(&osb->needs_checkpoint, 0);
 
+	osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
+
 	osb->node_num = O2NM_INVALID_NODE_NUM;
 	osb->slot_num = OCFS2_INVALID_SLOT;
 
@@ -1365,7 +1365,7 @@
 	spin_lock_init(&journal->j_lock);
 	journal->j_trans_id = (unsigned long) 1;
 	INIT_LIST_HEAD(&journal->j_la_cleanups);
-	INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb);
+	INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
 	journal->j_state = OCFS2_JOURNAL_FREE;
 
 	/* get some pseudo constants for clustersize bits */
@@ -1674,7 +1674,7 @@
 	va_list args;
 
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 
 	/* Not using mlog here because we want to show the actual
@@ -1695,7 +1695,7 @@
 	va_list args;
 
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 
 	printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n",
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index c0f68aa..957d687 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -126,6 +126,10 @@
 		goto out;
 	}
 
+	/*
+	 * Without vfsmount we can't update atime now,
+	 * but we will update atime here ultimately.
+	 */
 	ret = vfs_readlink(dentry, buffer, buflen, link);
 
 	brelse(bh);
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 9707ed7..39814b9 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -69,7 +69,7 @@
 	sector_t	c_block;
 };
 
-static kmem_cache_t *ocfs2_uptodate_cachep = NULL;
+static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
 
 void ocfs2_metadata_cache_init(struct inode *inode)
 {
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 592a640..26f44e0 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -330,13 +330,13 @@
 	return 0;
 }
 
-static kmem_cache_t *op_inode_cachep;
+static struct kmem_cache *op_inode_cachep;
 
 static struct inode *openprom_alloc_inode(struct super_block *sb)
 {
 	struct op_inode_info *oi;
 
-	oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL);
+	oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL);
 	if (!oi)
 		return NULL;
 
@@ -415,7 +415,7 @@
 	.kill_sb	= kill_anon_super,
 };
 
-static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags)
+static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct op_inode_info *oi = (struct op_inode_info *) data;
 
diff --git a/fs/partitions/amiga.c b/fs/partitions/amiga.c
index 3068528..9917a8c 100644
--- a/fs/partitions/amiga.c
+++ b/fs/partitions/amiga.c
@@ -43,6 +43,7 @@
 			if (warn_no_part)
 				printk("Dev %s: unable to read RDB block %d\n",
 				       bdevname(bdev, b), blk);
+			res = -1;
 			goto rdb_done;
 		}
 		if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK))
@@ -79,6 +80,7 @@
 			if (warn_no_part)
 				printk("Dev %s: unable to read partition block %d\n",
 				       bdevname(bdev, b), blk);
+			res = -1;
 			goto rdb_done;
 		}
 		pb  = (struct PartitionBlock *)data;
diff --git a/fs/partitions/atari.c b/fs/partitions/atari.c
index 192a6ad..1f3572d 100644
--- a/fs/partitions/atari.c
+++ b/fs/partitions/atari.c
@@ -88,7 +88,7 @@
 			if (!xrs) {
 				printk (" block %ld read failed\n", partsect);
 				put_dev_sector(sect);
-				return 0;
+				return -1;
 			}
 
 			/* ++roman: sanity check: bit 0 of flg field must be set */
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 6fb4b61..1901137 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -153,7 +153,7 @@
 check_partition(struct gendisk *hd, struct block_device *bdev)
 {
 	struct parsed_partitions *state;
-	int i, res;
+	int i, res, err;
 
 	state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
 	if (!state)
@@ -165,19 +165,30 @@
 		sprintf(state->name, "p");
 
 	state->limit = hd->minors;
-	i = res = 0;
+	i = res = err = 0;
 	while (!res && check_part[i]) {
 		memset(&state->parts, 0, sizeof(state->parts));
 		res = check_part[i++](state, bdev);
+		if (res < 0) {
+			/* We have hit an I/O error which we don't report now.
+		 	* But record it, and let the others do their job.
+		 	*/
+			err = res;
+			res = 0;
+		}
+
 	}
 	if (res > 0)
 		return state;
+	if (!err)
+	/* The partition is unrecognized. So report I/O errors if there were any */
+		res = err;
 	if (!res)
 		printk(" unknown partition table\n");
 	else if (warn_no_part)
 		printk(" unable to read partition table\n");
 	kfree(state);
-	return NULL;
+	return ERR_PTR(res);
 }
 
 /*
@@ -494,6 +505,8 @@
 		disk->fops->revalidate_disk(disk);
 	if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
 		return 0;
+	if (IS_ERR(state))	/* I/O error reading the partition table */
+		return PTR_ERR(state);
 	for (p = 1; p < state->limit; p++) {
 		sector_t size = state->parts[p].size;
 		sector_t from = state->parts[p].from;
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index d352a73..9f7ad42 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -43,7 +43,7 @@
 int
 ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
 {
-	int blocksize, offset, size;
+	int blocksize, offset, size,res;
 	loff_t i_size;
 	dasd_information_t *info;
 	struct hd_geometry *geo;
@@ -56,15 +56,16 @@
 	unsigned char *data;
 	Sector sect;
 
+	res = 0;
 	blocksize = bdev_hardsect_size(bdev);
 	if (blocksize <= 0)
-		return 0;
+		goto out_exit;
 	i_size = i_size_read(bdev->bd_inode);
 	if (i_size == 0)
-		return 0;
+		goto out_exit;
 
 	if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL)
-		goto out_noinfo;
+		goto out_exit;
 	if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
 		goto out_nogeo;
 	if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL)
@@ -72,7 +73,7 @@
 
 	if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
 	    ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
-		goto out_noioctl;
+		goto out_freeall;
 
 	/*
 	 * Get volume label, extract name and type.
@@ -92,6 +93,8 @@
 	EBCASC(type, 4);
 	EBCASC(name, 6);
 
+	res = 1;
+
 	/*
 	 * Three different types: CMS1, VOL1 and LNX1/unlabeled
 	 */
@@ -156,6 +159,9 @@
 			counter++;
 			blk++;
 		}
+		if (!data)
+		/* Are we not supposed to report this ? */
+			goto out_readerr;
 	} else {
 		/*
 		 * Old style LNX1 or unlabeled disk
@@ -171,18 +177,17 @@
 	}
 
 	printk("\n");
-	kfree(label);
-	kfree(geo);
-	kfree(info);
-	return 1;
+	goto out_freeall;
+
 
 out_readerr:
-out_noioctl:
+	res = -1;
+out_freeall:
 	kfree(label);
 out_nolab:
 	kfree(geo);
 out_nogeo:
 	kfree(info);
-out_noinfo:
-	return 0;
+out_exit:
+	return res;
 }
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
index c087100..d4a0fad 100644
--- a/fs/partitions/mac.c
+++ b/fs/partitions/mac.c
@@ -74,6 +74,8 @@
 			be32_to_cpu(part->start_block) * (secsize/512),
 			be32_to_cpu(part->block_count) * (secsize/512));
 
+		if (!strnicmp(part->type, "Linux_RAID", 10))
+			state->parts[slot].flags = 1;
 #ifdef CONFIG_PPC_PMAC
 		/*
 		 * If this is the first bootable partition, tell the
diff --git a/fs/pipe.c b/fs/pipe.c
index b1626f2..ae36b89 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -830,7 +830,14 @@
 static struct vfsmount *pipe_mnt __read_mostly;
 static int pipefs_delete_dentry(struct dentry *dentry)
 {
-	return 1;
+	/*
+	 * At creation time, we pretended this dentry was hashed
+	 * (by clearing DCACHE_UNHASHED bit in d_flags)
+	 * At delete time, we restore the truth : not hashed.
+	 * (so that dput() can proceed correctly)
+	 */
+	dentry->d_flags |= DCACHE_UNHASHED;
+	return 0;
 }
 
 static struct dentry_operations pipefs_dentry_operations = {
@@ -891,17 +898,22 @@
 	if (!inode)
 		goto err_file;
 
-	sprintf(name, "[%lu]", inode->i_ino);
+	this.len = sprintf(name, "[%lu]", inode->i_ino);
 	this.name = name;
-	this.len = strlen(name);
-	this.hash = inode->i_ino; /* will go */
+	this.hash = 0;
 	err = -ENOMEM;
 	dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
 	if (!dentry)
 		goto err_inode;
 
 	dentry->d_op = &pipefs_dentry_operations;
-	d_add(dentry, inode);
+	/*
+	 * We dont want to publish this dentry into global dentry hash table.
+	 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+	 * This permits a working /proc/$pid/fd/XXX on pipes
+	 */
+	dentry->d_flags &= ~DCACHE_UNHASHED;
+	d_instantiate(dentry, inode);
 	f->f_vfsmnt = mntget(pipe_mnt);
 	f->f_dentry = dentry;
 	f->f_mapping = inode->i_mapping;
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 7431d7b..f6c7762 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -8,8 +8,9 @@
 proc-$(CONFIG_MMU)	:= mmu.o task_mmu.o
 
 proc-y       += inode.o root.o base.o generic.o array.o \
-		kmsg.o proc_tty.o proc_misc.o
+		proc_tty.o proc_misc.o
 
 proc-$(CONFIG_PROC_KCORE)	+= kcore.o
 proc-$(CONFIG_PROC_VMCORE)	+= vmcore.o
 proc-$(CONFIG_PROC_DEVICETREE)	+= proc_devtree.o
+proc-$(CONFIG_PRINTK)	+= kmsg.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 795319c..b859fc7 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -683,8 +683,6 @@
 	char buffer[PROC_NUMBUF], *end;
 	int oom_adjust;
 
-	if (!capable(CAP_SYS_RESOURCE))
-		return -EPERM;
 	memset(buffer, 0, sizeof(buffer));
 	if (count > sizeof(buffer) - 1)
 		count = sizeof(buffer) - 1;
@@ -699,6 +697,10 @@
 	task = get_proc_task(file->f_dentry->d_inode);
 	if (!task)
 		return -ESRCH;
+	if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
+		put_task_struct(task);
+		return -EACCES;
+	}
 	task->oomkilladj = oom_adjust;
 	put_task_struct(task);
 	if (end - buffer == 0)
@@ -1883,8 +1885,9 @@
 	return;
 }
 
-struct dentry *proc_pid_instantiate(struct inode *dir,
-	struct dentry * dentry, struct task_struct *task, void *ptr)
+static struct dentry *proc_pid_instantiate(struct inode *dir,
+					   struct dentry * dentry,
+					   struct task_struct *task, void *ptr)
 {
 	struct dentry *error = ERR_PTR(-ENOENT);
 	struct inode *inode;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 49dfb2a..e26945b 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -81,14 +81,14 @@
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 }
 
-static kmem_cache_t * proc_inode_cachep;
+static struct kmem_cache * proc_inode_cachep;
 
 static struct inode *proc_alloc_inode(struct super_block *sb)
 {
 	struct proc_inode *ei;
 	struct inode *inode;
 
-	ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL);
+	ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	ei->pid = NULL;
@@ -105,7 +105,7 @@
 	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct proc_inode *ei = (struct proc_inode *) foo;
 
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 1294eda..1be7308 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -22,6 +22,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
+#define CORE_STR "CORE"
 
 static int open_kcore(struct inode * inode, struct file * filp)
 {
@@ -82,10 +83,11 @@
 	}
 	*elf_buflen =	sizeof(struct elfhdr) + 
 			(*nphdr + 2)*sizeof(struct elf_phdr) + 
-			3 * (sizeof(struct elf_note) + 4) +
-			sizeof(struct elf_prstatus) +
-			sizeof(struct elf_prpsinfo) +
-			sizeof(struct task_struct);
+			3 * ((sizeof(struct elf_note)) +
+			     roundup(sizeof(CORE_STR), 4)) +
+			roundup(sizeof(struct elf_prstatus), 4) +
+			roundup(sizeof(struct elf_prpsinfo), 4) +
+			roundup(sizeof(struct task_struct), 4);
 	*elf_buflen = PAGE_ALIGN(*elf_buflen);
 	return size + *elf_buflen;
 }
@@ -210,7 +212,7 @@
 	nhdr->p_offset	= offset;
 
 	/* set up the process status */
-	notes[0].name = "CORE";
+	notes[0].name = CORE_STR;
 	notes[0].type = NT_PRSTATUS;
 	notes[0].datasz = sizeof(struct elf_prstatus);
 	notes[0].data = &prstatus;
@@ -221,7 +223,7 @@
 	bufp = storenote(&notes[0], bufp);
 
 	/* set up the process info */
-	notes[1].name	= "CORE";
+	notes[1].name	= CORE_STR;
 	notes[1].type	= NT_PRPSINFO;
 	notes[1].datasz	= sizeof(struct elf_prpsinfo);
 	notes[1].data	= &prpsinfo;
@@ -238,7 +240,7 @@
 	bufp = storenote(&notes[1], bufp);
 
 	/* set up the task structure */
-	notes[2].name	= "CORE";
+	notes[2].name	= CORE_STR;
 	notes[2].type	= NT_TASKSTRUCT;
 	notes[2].datasz	= sizeof(struct task_struct);
 	notes[2].data	= current;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 93c43b6..51815ce 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -696,9 +696,11 @@
 	proc_symlink("mounts", NULL, "self/mounts");
 
 	/* And now for trickier ones */
+#ifdef CONFIG_PRINTK
 	entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
 	if (entry)
 		entry->proc_fops = &proc_kmsg_operations;
+#endif
 	create_seq_entry("devices", 0, &proc_devinfo_operations);
 	create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
 #ifdef CONFIG_BLOCK
diff --git a/fs/proc/root.c b/fs/proc/root.c
index ffe66c3..64d242b6 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -13,6 +13,7 @@
 #include <linux/proc_fs.h>
 #include <linux/stat.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/smp_lock.h>
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 5a41db2..c047dc6 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -515,12 +515,12 @@
 	brelse(bh);
 }
 
-static kmem_cache_t *qnx4_inode_cachep;
+static struct kmem_cache *qnx4_inode_cachep;
 
 static struct inode *qnx4_alloc_inode(struct super_block *sb)
 {
 	struct qnx4_inode_info *ei;
-	ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -531,7 +531,7 @@
 	kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep,
+static void init_once(void *foo, struct kmem_cache * cachep,
 		      unsigned long flags)
 {
 	struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index ac14318..373d862 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -317,12 +317,11 @@
 			/* area filled with zeroes, to supply as list of zero blocknumbers
 			   We allocate it outside of loop just in case loop would spin for
 			   several iterations. */
-			char *zeros = kmalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC);	// We cannot insert more than MAX_ITEM_LEN bytes anyway.
+			char *zeros = kzalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC);	// We cannot insert more than MAX_ITEM_LEN bytes anyway.
 			if (!zeros) {
 				res = -ENOMEM;
 				goto error_exit_free_blocks;
 			}
-			memset(zeros, 0, to_paste * UNFM_P_SIZE);
 			do {
 				to_paste =
 				    min_t(__u64, hole_size,
@@ -407,6 +406,8 @@
 				   we restart it. This will also free the path. */
 				if (journal_transaction_should_end
 				    (th, th->t_blocks_allocated)) {
+					inode->i_size = cpu_key_k_offset(&key) +
+						(to_paste << inode->i_blkbits);
 					res =
 					    restart_transaction(th, inode,
 								&path);
@@ -1045,6 +1046,7 @@
 			char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
 			memset(kaddr, 0, from);
 			kunmap_atomic(kaddr, KM_USER0);
+			flush_dcache_page(prepared_pages[0]);
 		}
 		if (to != PAGE_CACHE_SIZE) {	/* Last page needs to be partially zeroed */
 			char *kaddr =
@@ -1052,6 +1054,7 @@
 					KM_USER0);
 			memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
 			kunmap_atomic(kaddr, KM_USER0);
+			flush_dcache_page(prepared_pages[num_pages - 1]);
 		}
 
 		/* Since all blocks are new - use already calculated value */
@@ -1185,6 +1188,7 @@
 					memset(kaddr + block_start, 0,
 					       from - block_start);
 					kunmap_atomic(kaddr, KM_USER0);
+					flush_dcache_page(prepared_pages[0]);
 					set_buffer_uptodate(bh);
 				}
 			}
@@ -1222,6 +1226,7 @@
 							KM_USER0);
 					memset(kaddr + to, 0, block_end - to);
 					kunmap_atomic(kaddr, KM_USER0);
+					flush_dcache_page(prepared_pages[num_pages - 1]);
 					set_buffer_uptodate(bh);
 				}
 			}
@@ -1307,56 +1312,8 @@
 			count = MAX_NON_LFS - (unsigned long)*ppos;
 	}
 
-	if (file->f_flags & O_DIRECT) {	// Direct IO needs treatment
-		ssize_t result, after_file_end = 0;
-		if ((*ppos + count >= inode->i_size)
-		    || (file->f_flags & O_APPEND)) {
-			/* If we are appending a file, we need to put this savelink in here.
-			   If we will crash while doing direct io, finish_unfinished will
-			   cut the garbage from the file end. */
-			reiserfs_write_lock(inode->i_sb);
-			err =
-			    journal_begin(&th, inode->i_sb,
-					  JOURNAL_PER_BALANCE_CNT);
-			if (err) {
-				reiserfs_write_unlock(inode->i_sb);
-				return err;
-			}
-			reiserfs_update_inode_transaction(inode);
-			add_save_link(&th, inode, 1 /* Truncate */ );
-			after_file_end = 1;
-			err =
-			    journal_end(&th, inode->i_sb,
-					JOURNAL_PER_BALANCE_CNT);
-			reiserfs_write_unlock(inode->i_sb);
-			if (err)
-				return err;
-		}
-		result = do_sync_write(file, buf, count, ppos);
-
-		if (after_file_end) {	/* Now update i_size and remove the savelink */
-			struct reiserfs_transaction_handle th;
-			reiserfs_write_lock(inode->i_sb);
-			err = journal_begin(&th, inode->i_sb, 1);
-			if (err) {
-				reiserfs_write_unlock(inode->i_sb);
-				return err;
-			}
-			reiserfs_update_inode_transaction(inode);
-			mark_inode_dirty(inode);
-			err = journal_end(&th, inode->i_sb, 1);
-			if (err) {
-				reiserfs_write_unlock(inode->i_sb);
-				return err;
-			}
-			err = remove_save_link(inode, 1 /* truncate */ );
-			reiserfs_write_unlock(inode->i_sb);
-			if (err)
-				return err;
-		}
-
-		return result;
-	}
+	if (file->f_flags & O_DIRECT)
+		return do_sync_write(file, buf, count, ppos);
 
 	if (unlikely((ssize_t) count < 0))
 		return -EINVAL;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9c69bca..254239e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -216,11 +216,12 @@
 	BUG_ON(!th->t_trans_id);
 	BUG_ON(!th->t_refcount);
 
+	pathrelse(path);
+
 	/* we cannot restart while nested */
 	if (th->t_refcount > 1) {
 		return 0;
 	}
-	pathrelse(path);
 	reiserfs_update_sd(th, inode);
 	err = journal_end(th, s, len);
 	if (!err) {
@@ -928,15 +929,12 @@
 			if (blocks_needed == 1) {
 				un = &unf_single;
 			} else {
-				un = kmalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);	// We need to avoid scheduling.
+				un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);	// We need to avoid scheduling.
 				if (!un) {
 					un = &unf_single;
 					blocks_needed = 1;
 					max_to_insert = 0;
-				} else
-					memset(un, 0,
-					       UNFM_P_SIZE * min(blocks_needed,
-								 max_to_insert));
+				}
 			}
 			if (blocks_needed <= max_to_insert) {
 				/* we are going to add target block to the file. Use allocated
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 85ce232..7280a23 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -104,7 +104,7 @@
 			       struct reiserfs_journal *journal);
 static int dirty_one_transaction(struct super_block *s,
 				 struct reiserfs_journal_list *jl);
-static void flush_async_commits(void *p);
+static void flush_async_commits(struct work_struct *work);
 static void queue_log_writer(struct super_block *s);
 
 /* values for join in do_journal_begin_r */
@@ -1464,7 +1464,7 @@
 		}
 
 		/* if someone has this block in a newer transaction, just make
-		 ** sure they are commited, and don't try writing it to disk
+		 ** sure they are committed, and don't try writing it to disk
 		 */
 		if (pjl) {
 			if (atomic_read(&pjl->j_commit_left))
@@ -2836,7 +2836,8 @@
 	if (reiserfs_mounted_fs_count <= 1)
 		commit_wq = create_workqueue("reiserfs");
 
-	INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+	INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+	journal->j_work_sb = p_s_sb;
 	return 0;
       free_and_return:
 	free_journal_ram(p_s_sb);
@@ -3384,7 +3385,7 @@
 
 /*
 ** for any cnode in a journal list, it can only be dirtied of all the
-** transactions that include it are commited to disk.
+** transactions that include it are committed to disk.
 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
 ** and 0 if you aren't
 **
@@ -3426,7 +3427,7 @@
 }
 
 /* syncs the commit blocks, but does not force the real buffers to disk
-** will wait until the current transaction is done/commited before returning 
+** will wait until the current transaction is done/committed before returning 
 */
 int journal_end_sync(struct reiserfs_transaction_handle *th,
 		     struct super_block *p_s_sb, unsigned long nblocks)
@@ -3447,10 +3448,11 @@
 /*
 ** writeback the pending async commits to disk
 */
-static void flush_async_commits(void *p)
+static void flush_async_commits(struct work_struct *work)
 {
-	struct super_block *p_s_sb = p;
-	struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+	struct reiserfs_journal *journal =
+		container_of(work, struct reiserfs_journal, j_work.work);
+	struct super_block *p_s_sb = journal->j_work_sb;
 	struct reiserfs_journal_list *jl;
 	struct list_head *entry;
 
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 1724999..7fb5fb0 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -490,13 +490,13 @@
 	return;
 }
 
-static kmem_cache_t *reiserfs_inode_cachep;
+static struct kmem_cache *reiserfs_inode_cachep;
 
 static struct inode *reiserfs_alloc_inode(struct super_block *sb)
 {
 	struct reiserfs_inode_info *ei;
 	ei = (struct reiserfs_inode_info *)
-	    kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL);
+	    kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -507,7 +507,7 @@
 	kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
 
@@ -1549,13 +1549,12 @@
 	struct reiserfs_sb_info *sbi;
 	int errval = -EINVAL;
 
-	sbi = kmalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
+	sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
 	if (!sbi) {
 		errval = -ENOMEM;
 		goto error;
 	}
 	s->s_fs_info = sbi;
-	memset(sbi, 0, sizeof(struct reiserfs_sb_info));
 	/* Set default values for options: non-aggressive tails, RO on errors */
 	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
 	REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 7bdb0ed..1e4d685 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -41,7 +41,7 @@
 #include <linux/reiserfs_xattr.h>
 #include <linux/reiserfs_acl.h>
 #include <asm/uaccess.h>
-#include <asm/checksum.h>
+#include <net/checksum.h>
 #include <linux/smp_lock.h>
 #include <linux/stat.h>
 #include <asm/semaphore.h>
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index ddcd9e1..c5af088 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -550,12 +550,12 @@
 	}
 }
 
-static kmem_cache_t * romfs_inode_cachep;
+static struct kmem_cache * romfs_inode_cachep;
 
 static struct inode *romfs_alloc_inode(struct super_block *sb)
 {
 	struct romfs_inode_info *ei;
-	ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL);
+	ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -566,7 +566,7 @@
 	kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
 
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 555b9ac..10690aa 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -26,7 +26,7 @@
  *	ERR_PTR(error).  In the end of sequence they return %NULL. ->show()
  *	returns 0 in case of success and negative number in case of error.
  */
-int seq_open(struct file *file, struct seq_operations *op)
+int seq_open(struct file *file, const struct seq_operations *op)
 {
 	struct seq_file *p = file->private_data;
 
@@ -408,7 +408,7 @@
 
 int single_release(struct inode *inode, struct file *file)
 {
-	struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
+	const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
 	int res = seq_release(inode, file);
 	kfree(op);
 	return res;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 2c122ee..4af4cd7 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -50,12 +50,12 @@
 static int  smb_statfs(struct dentry *, struct kstatfs *);
 static int  smb_show_options(struct seq_file *, struct vfsmount *);
 
-static kmem_cache_t *smb_inode_cachep;
+static struct kmem_cache *smb_inode_cachep;
 
 static struct inode *smb_alloc_inode(struct super_block *sb)
 {
 	struct smb_inode_info *ei;
-	ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL);
+	ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
@@ -66,7 +66,7 @@
 	kmem_cache_free(smb_inode_cachep, SMB_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct smb_inode_info *ei = (struct smb_inode_info *) foo;
 	unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index 0fb7469..a4bcae8 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -25,7 +25,7 @@
 #define ROUND_UP(x) (((x)+3) & ~3)
 
 /* cache for request structures */
-static kmem_cache_t *req_cachep;
+static struct kmem_cache *req_cachep;
 
 static int smb_request_send_req(struct smb_request *req);
 
@@ -61,7 +61,7 @@
 	struct smb_request *req;
 	unsigned char *buf = NULL;
 
-	req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
+	req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
 	VERBOSE("allocating request: %p\n", req);
 	if (!req)
 		goto out;
diff --git a/fs/stat.c b/fs/stat.c
index bca07eb..a0ebfc7 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -51,13 +51,6 @@
 		return inode->i_op->getattr(mnt, dentry, stat);
 
 	generic_fillattr(inode, stat);
-	if (!stat->blksize) {
-		struct super_block *s = inode->i_sb;
-		unsigned blocks;
-		blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits;
-		stat->blocks = (s->s_blocksize / 512) * blocks;
-		stat->blksize = s->s_blocksize;
-	}
 	return 0;
 }
 
diff --git a/fs/super.c b/fs/super.c
index 47e554c..84c320f 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -221,6 +221,24 @@
 }
 
 /*
+ * Superblock locking.  We really ought to get rid of these two.
+ */
+void lock_super(struct super_block * sb)
+{
+	get_fs_excl();
+	mutex_lock(&sb->s_lock);
+}
+
+void unlock_super(struct super_block * sb)
+{
+	put_fs_excl();
+	mutex_unlock(&sb->s_lock);
+}
+
+EXPORT_SYMBOL(lock_super);
+EXPORT_SYMBOL(unlock_super);
+
+/*
  * Write out and wait upon all dirty data associated with this
  * superblock.  Filesystem data as well as the underlying block
  * device.  Takes the superblock lock.  Requires a second blkdev
diff --git a/fs/sync.c b/fs/sync.c
index 1de747b..865f32be 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -6,6 +6,7 @@
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/writeback.h>
 #include <linux/syscalls.h>
 #include <linux/linkage.h>
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 3aa3434..a5782e8 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -372,6 +372,51 @@
 	return error;
 }
 
+int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent)
+{
+	struct dentry *old_parent_dentry, *new_parent_dentry, *new_dentry;
+	struct sysfs_dirent *new_parent_sd, *sd;
+	int error;
+
+	if (!new_parent)
+		return -EINVAL;
+
+	old_parent_dentry = kobj->parent ?
+		kobj->parent->dentry : sysfs_mount->mnt_sb->s_root;
+	new_parent_dentry = new_parent->dentry;
+
+again:
+	mutex_lock(&old_parent_dentry->d_inode->i_mutex);
+	if (!mutex_trylock(&new_parent_dentry->d_inode->i_mutex)) {
+		mutex_unlock(&old_parent_dentry->d_inode->i_mutex);
+		goto again;
+	}
+
+	new_parent_sd = new_parent_dentry->d_fsdata;
+	sd = kobj->dentry->d_fsdata;
+
+	new_dentry = lookup_one_len(kobj->name, new_parent_dentry,
+				    strlen(kobj->name));
+	if (IS_ERR(new_dentry)) {
+		error = PTR_ERR(new_dentry);
+		goto out;
+	} else
+		error = 0;
+	d_add(new_dentry, NULL);
+	d_move(kobj->dentry, new_dentry);
+	dput(new_dentry);
+
+	/* Remove from old parent's list and insert into new parent's list. */
+	list_del_init(&sd->s_sibling);
+	list_add(&sd->s_sibling, &new_parent_sd->s_children);
+
+out:
+	mutex_unlock(&new_parent_dentry->d_inode->i_mutex);
+	mutex_unlock(&old_parent_dentry->d_inode->i_mutex);
+
+	return error;
+}
+
 static int sysfs_dir_open(struct inode *inode, struct file *file)
 {
 	struct dentry * dentry = file->f_dentry;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 298303b..95c1651 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -190,6 +190,9 @@
 		count = PAGE_SIZE - 1;
 	error = copy_from_user(buffer->page,buf,count);
 	buffer->needs_read_fill = 1;
+	/* if buf is assumed to contain a string, terminate it by \0,
+	   so e.g. sscanf() can scan the string easily */
+	buffer->page[count] = 0;
 	return error ? -EFAULT : count;
 }
 
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 20551a1..e503f85 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -16,7 +16,7 @@
 
 struct vfsmount *sysfs_mount;
 struct super_block * sysfs_sb = NULL;
-kmem_cache_t *sysfs_dir_cachep;
+struct kmem_cache *sysfs_dir_cachep;
 
 static struct super_operations sysfs_ops = {
 	.statfs		= simple_statfs,
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 6f3d6bd..bd7cec2 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -1,6 +1,6 @@
 
 extern struct vfsmount * sysfs_mount;
-extern kmem_cache_t *sysfs_dir_cachep;
+extern struct kmem_cache *sysfs_dir_cachep;
 
 extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *);
 extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
diff --git a/fs/sysv/CHANGES b/fs/sysv/CHANGES
deleted file mode 100644
index 66ea6e9..0000000
--- a/fs/sysv/CHANGES
+++ /dev/null
@@ -1,60 +0,0 @@
-Mon, 15 Dec 1997	  Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-	*    namei.c: struct sysv_dir_inode_operations updated to use dentries.
-
-Fri, 23 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-	*    inode.c: corrected 1 track offset setting (in sb->sv_block_base).
-		      Originally it was overridden (by setting to zero)
-		      in detected_[xenix,sysv4,sysv2,coherent]. Thanks
-		      to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
-		      for identifying the problem.
-
-Tue, 27 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-        *    inode.c: added 2048-byte block support to SystemV FS.
-		      Merged detected_bs[512,1024,2048]() into one function:
-		      void detected_bs (u_char type, struct super_block *sb).
-		      Thanks to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
-		      for the patch.
-
-Wed, 4 Feb 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-	*    namei.c: removed static subdir(); is_subdir() from dcache.c
-		      is used instead. Cosmetic changes.
-
-Thu, 3 Dec 1998   Al Viro (viro@parcelfarce.linux.theplanet.co.uk)
-	*    namei.c (sysv_rmdir):
-		      Bugectomy: old check for victim being busy
-		      (inode->i_count) wasn't replaced (with checking
-		      dentry->d_count) and escaped Linus in the last round
-		      of changes. Shot and buried.
-
-Wed, 9 Dec 1998   AV
-	*    namei.c (do_sysv_rename):
-		       Fixed incorrect check for other owners + race.
-		       Removed checks that went to VFS.
-	*    namei.c (sysv_unlink):
-		       Removed checks that went to VFS.
-
-Thu, 10 Dec 1998   AV
-	*    namei.c (do_mknod):
-			Removed dead code - mknod is never asked to
-			create a symlink or directory. Incidentially,
-			it wouldn't do it right if it would be called.
-
-Sat, 26 Dec 1998   KGB
-	*    inode.c (detect_sysv4):
-			Added detection of expanded s_type field (0x10,
-			0x20 and 0x30).  Forced read-only access in this case.
-
-Sun, 21 Mar 1999   AV
-	*    namei.c (sysv_link):
-			Fixed i_count usage that resulted in dcache corruption.
-	*    inode.c:
-			Filled ->delete_inode() method with sysv_delete_inode().
-			sysv_put_inode() is gone, as it tried to do ->delete_
-			_inode()'s job.
-	*    ialloc.c: (sysv_free_inode):
-			Fixed race.
-
-Sun, 30 Apr 1999   AV
-	*    namei.c (sysv_mknod):
-			Removed dead code (S_IFREG case is now passed to
-			->create() by VFS).
diff --git a/fs/sysv/ChangeLog b/fs/sysv/ChangeLog
deleted file mode 100644
index f403f8b..0000000
--- a/fs/sysv/ChangeLog
+++ /dev/null
@@ -1,106 +0,0 @@
-Thu Feb 14 2002  Andrew Morton  <akpm@zip.com.au>
-
-	* dir_commit_chunk(): call writeout_one_page() as well as
-	  waitfor_one_page() for IS_SYNC directories, so that we
-	  actually do sync the directory. (forward-port from 2.4).
-
-Thu Feb  7 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* super.c: switched to ->get_sb()
-	* ChangeLog: fixed dates ;-)
-
-2002-01-24  David S. Miller  <davem@redhat.com>
-
-	* inode.c: Include linux/init.h
-
-Mon Jan 21 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-	* ialloc.c (sysv_new_inode): zero SYSV_I(inode)->i_data out.
-	* i_vnode renamed to vfs_inode.  Sorry, but let's keep that
-	  consistent.
-
-Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
-
-	* include/linux/sysv_fs.h (SYSV_I): Get fs-private inode data using
-		list_entry() instead of inode->u.
-	* include/linux/sysv_fs_i.h: Add 'struct inode  i_vnode' field to
-		sysv_inode_info structure.
-	* inode.c: Include <linux/slab.h>, implement alloc_inode/destroy_inode
-		sop methods, add infrastructure for per-fs inode slab cache.
-	* super.c (init_sysv_fs): Initialize inode cache, recover properly
-		in the case of failed register_filesystem for V7.
-	(exit_sysv_fs): Destroy inode cache.
-
-Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
-
-	* include/linux/sysv_fs.h: Include <linux/sysv_fs_i.h>, declare SYSV_I().
-	* dir.c (sysv_find_entry): Use SYSV_I() instead of ->u.sysv_i to
-		access fs-private inode data.
-	* ialloc.c (sysv_new_inode): Likewise.
-	* inode.c (sysv_read_inode): Likewise.
-	(sysv_update_inode): Likewise.
-	* itree.c (get_branch): Likewise.
-	(sysv_truncate): Likewise.
-	* symlink.c (sysv_readlink): Likewise.
-	(sysv_follow_link): Likewise.
-
-Fri Jan  4 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* ialloc.c (sysv_free_inode): Use sb->s_id instead of bdevname().
-	* inode.c (sysv_read_inode): Likewise.
-	  (sysv_update_inode): Likewise.
-	  (sysv_sync_inode): Likewise.
-	* super.c (detect_sysv): Likewise.
-	  (complete_read_super): Likewise.
-	  (sysv_read_super): Likewise.
-	  (v7_read_super): Likewise.
-
-Sun Dec 30 2001  Manfred Spraul  <manfred@colorfullife.com>
-
-	* dir.c (dir_commit_chunk): Do not set dir->i_version.
-	(sysv_readdir): Likewise.
-
-Thu Dec 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* itree.c (get_block): Use map_bh() to fill out bh_result.
-
-Tue Dec 25 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* super.c (sysv_read_super): Use sb_set_blocksize() to set blocksize.
-	  (v7_read_super): Likewise.
-
-Tue Nov 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-	* itree.c (get_block): Change type for iblock argument to sector_t.
-	* super.c (sysv_read_super): Set s_blocksize early.
-	  (v7_read_super): Likewise.
-	* balloc.c (sysv_new_block): Use sb_bread(). instead of bread().
-	  (sysv_count_free_blocks): Likewise.
-	* ialloc.c (sysv_raw_inode): Likewise.
-	* itree.c (get_branch): Likewise.
-	  (free_branches): Likewise.
-	* super.c (sysv_read_super): Likewise.
-	  (v7_read_super): Likewise.
-
-Sat Dec 15 2001  Christoph Hellwig  <hch@infradead.org>
-
-	* inode.c (sysv_read_inode): Mark inode as bad in case of failure.
-	* super.c (complete_read_super): Check for bad root inode.
-
-Wed Nov 21 2001  Andrew Morton  <andrewm@uow.edu.au>
-
-	* file.c (sysv_sync_file): Call fsync_inode_data_buffers.
-
-Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
-
-	* dir.c, ialloc.c, namei.c, include/linux/sysv_fs_i.h:
-	Implement per-Inode lookup offset cache.
-	Modelled after Ted's ext2 patch.
-
-Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
-
-	* inode.c, super.c, include/linux/sysv_fs.h,
-	  include/linux/sysv_fs_sb.h:
-	Remove symlink faking.	Noone really wants to use these as
-	linux filesystems and native OSes don't support it anyway.
-
-
diff --git a/fs/sysv/INTRO b/fs/sysv/INTRO
deleted file mode 100644
index de4e4d1..0000000
--- a/fs/sysv/INTRO
+++ /dev/null
@@ -1,182 +0,0 @@
-This is the implementation of the SystemV/Coherent filesystem for Linux.
-It grew out of separate filesystem implementations
-
-    Xenix FS      Doug Evans <dje@cygnus.com>  June 1992
-    SystemV FS    Paul B. Monday <pmonday@eecs.wsu.edu> March-June 1993
-    Coherent FS   B. Haible <haible@ma2s2.mathematik.uni-karlsruhe.de> June 1993
-
-and was merged together in July 1993.
-
-These filesystems are rather similar. Here is a comparison with Minix FS:
-
-* Linux fdisk reports on partitions
-  - Minix FS     0x81 Linux/Minix
-  - Xenix FS     ??
-  - SystemV FS   ??
-  - Coherent FS  0x08 AIX bootable
-
-* Size of a block or zone (data allocation unit on disk)
-  - Minix FS     1024
-  - Xenix FS     1024 (also 512 ??)
-  - SystemV FS   1024 (also 512 and 2048)
-  - Coherent FS   512
-
-* General layout: all have one boot block, one super block and
-  separate areas for inodes and for directories/data.
-  On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
-  all the block numbers (including the super block) are offset by one track.
-
-* Byte ordering of "short" (16 bit entities) on disk:
-  - Minix FS     little endian  0 1
-  - Xenix FS     little endian  0 1
-  - SystemV FS   little endian  0 1
-  - Coherent FS  little endian  0 1
-  Of course, this affects only the file system, not the data of files on it!
-
-* Byte ordering of "long" (32 bit entities) on disk:
-  - Minix FS     little endian  0 1 2 3
-  - Xenix FS     little endian  0 1 2 3
-  - SystemV FS   little endian  0 1 2 3
-  - Coherent FS  PDP-11         2 3 0 1
-  Of course, this affects only the file system, not the data of files on it!
-
-* Inode on disk: "short", 0 means non-existent, the root dir ino is:
-  - Minix FS                            1
-  - Xenix FS, SystemV FS, Coherent FS   2
-
-* Maximum number of hard links to a file:
-  - Minix FS     250
-  - Xenix FS     ??
-  - SystemV FS   ??
-  - Coherent FS  >=10000
-
-* Free inode management:
-  - Minix FS                             a bitmap
-  - Xenix FS, SystemV FS, Coherent FS
-      There is a cache of a certain number of free inodes in the super-block.
-      When it is exhausted, new free inodes are found using a linear search.
-
-* Free block management:
-  - Minix FS                             a bitmap
-  - Xenix FS, SystemV FS, Coherent FS
-      Free blocks are organized in a "free list". Maybe a misleading term,
-      since it is not true that every free block contains a pointer to
-      the next free block. Rather, the free blocks are organized in chunks
-      of limited size, and every now and then a free block contains pointers
-      to the free blocks pertaining to the next chunk; the first of these
-      contains pointers and so on. The list terminates with a "block number"
-      0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
-
-* Super-block location:
-  - Minix FS     block 1 = bytes 1024..2047
-  - Xenix FS     block 1 = bytes 1024..2047
-  - SystemV FS   bytes 512..1023
-  - Coherent FS  block 1 = bytes 512..1023
-
-* Super-block layout:
-  - Minix FS
-                    unsigned short s_ninodes;
-                    unsigned short s_nzones;
-                    unsigned short s_imap_blocks;
-                    unsigned short s_zmap_blocks;
-                    unsigned short s_firstdatazone;
-                    unsigned short s_log_zone_size;
-                    unsigned long s_max_size;
-                    unsigned short s_magic;
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short s_firstdatazone;
-                    unsigned long  s_nzones;
-                    unsigned short s_fzone_count;
-                    unsigned long  s_fzones[NICFREE];
-                    unsigned short s_finode_count;
-                    unsigned short s_finodes[NICINOD];
-                    char           s_flock;
-                    char           s_ilock;
-                    char           s_modified;
-                    char           s_rdonly;
-                    unsigned long  s_time;
-                    short          s_dinfo[4]; -- SystemV FS only
-                    unsigned long  s_free_zones;
-                    unsigned short s_free_inodes;
-                    short          s_dinfo[4]; -- Xenix FS only
-                    unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
-                    char           s_fname[6];
-                    char           s_fpack[6];
-    then they differ considerably:
-        Xenix FS
-                    char           s_clean;
-                    char           s_fill[371];
-                    long           s_magic;
-                    long           s_type;
-        SystemV FS
-                    long           s_fill[12 or 14];
-                    long           s_state;
-                    long           s_magic;
-                    long           s_type;
-        Coherent FS
-                    unsigned long  s_unique;
-    Note that Coherent FS has no magic.
-
-* Inode layout:
-  - Minix FS
-                    unsigned short i_mode;
-                    unsigned short i_uid;
-                    unsigned long  i_size;
-                    unsigned long  i_time;
-                    unsigned char  i_gid;
-                    unsigned char  i_nlinks;
-                    unsigned short i_zone[7+1+1];
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short i_mode;
-                    unsigned short i_nlink;
-                    unsigned short i_uid;
-                    unsigned short i_gid;
-                    unsigned long  i_size;
-                    unsigned char  i_zone[3*(10+1+1+1)];
-                    unsigned long  i_atime;
-                    unsigned long  i_mtime;
-                    unsigned long  i_ctime;
-
-* Regular file data blocks are organized as
-  - Minix FS
-               7 direct blocks
-               1 indirect block (pointers to blocks)
-               1 double-indirect block (pointer to pointers to blocks)
-  - Xenix FS, SystemV FS, Coherent FS
-              10 direct blocks
-               1 indirect block (pointers to blocks)
-               1 double-indirect block (pointer to pointers to blocks)
-               1 triple-indirect block (pointer to pointers to pointers to blocks)
-
-* Inode size, inodes per block
-  - Minix FS        32   32
-  - Xenix FS        64   16
-  - SystemV FS      64   16
-  - Coherent FS     64    8
-
-* Directory entry on disk
-  - Minix FS
-                    unsigned short inode;
-                    char name[14/30];
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short inode;
-                    char name[14];
-
-* Dir entry size, dir entries per block
-  - Minix FS     16/32    64/32
-  - Xenix FS     16       64
-  - SystemV FS   16       64
-  - Coherent FS  16       32
-
-* How to implement symbolic links such that the host fsck doesn't scream:
-  - Minix FS     normal
-  - Xenix FS     kludge: as regular files with  chmod 1000
-  - SystemV FS   ??
-  - Coherent FS  kludge: as regular files with  chmod 1000
-
-
-Notation: We often speak of a "block" but mean a zone (the allocation unit)
-and not the disk driver's notion of "block".
-
-
-Bruno Haible  <haible@ma2s2.mathematik.uni-karlsruhe.de>
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index d63c5e4..ead9864 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -301,13 +301,13 @@
 	unlock_kernel();
 }
 
-static kmem_cache_t *sysv_inode_cachep;
+static struct kmem_cache *sysv_inode_cachep;
 
 static struct inode *sysv_alloc_inode(struct super_block *sb)
 {
 	struct sysv_inode_info *si;
 
-	si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL);
+	si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL);
 	if (!si)
 		return NULL;
 	return &si->vfs_inode;
@@ -318,7 +318,7 @@
 	kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
 }
 
-static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct sysv_inode_info *si = (struct sysv_inode_info *)p;
 
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 1aea6a4..1dbc295 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -107,12 +107,12 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-static kmem_cache_t * udf_inode_cachep;
+static struct kmem_cache * udf_inode_cachep;
 
 static struct inode *udf_alloc_inode(struct super_block *sb)
 {
 	struct udf_inode_info *ei;
-	ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
+	ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 
@@ -130,7 +130,7 @@
 	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct udf_inode_info *ei = (struct udf_inode_info *) foo;
 
@@ -1709,7 +1709,7 @@
 		sb->s_dirt = 1;
 	}
 	va_start(args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 	printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
 		sb->s_id, function, error_buf);
@@ -1721,7 +1721,7 @@
 	va_list args;
 
 	va_start (args, fmt);
-	vsprintf(error_buf, fmt, args);
+	vsnprintf(error_buf, sizeof(error_buf), fmt, args);
 	va_end(args);
 	printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
 		sb->s_id, function, error_buf);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index ec79e30..8a8e938 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -224,7 +224,7 @@
 		sb->s_flags |= MS_RDONLY;
 	}
 	va_start (args, fmt);
-	vsprintf (error_buf, fmt, args);
+	vsnprintf (error_buf, sizeof(error_buf), fmt, args);
 	va_end (args);
 	switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) {
 	case UFS_MOUNT_ONERROR_PANIC:
@@ -255,7 +255,7 @@
 		sb->s_dirt = 1;
 	}
 	va_start (args, fmt);
-	vsprintf (error_buf, fmt, args);
+	vsnprintf (error_buf, sizeof(error_buf), fmt, args);
 	va_end (args);
 	sb->s_flags |= MS_RDONLY;
 	printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n",
@@ -268,7 +268,7 @@
 	va_list args;
 
 	va_start (args, fmt);
-	vsprintf (error_buf, fmt, args);
+	vsnprintf (error_buf, sizeof(error_buf), fmt, args);
 	va_end (args);
 	printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n",
 		sb->s_id, function, error_buf);
@@ -1204,12 +1204,12 @@
 	return 0;
 }
 
-static kmem_cache_t * ufs_inode_cachep;
+static struct kmem_cache * ufs_inode_cachep;
 
 static struct inode *ufs_alloc_inode(struct super_block *sb)
 {
 	struct ufs_inode_info *ei;
-	ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL);
+	ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	ei->vfs_inode.i_version = 1;
@@ -1221,7 +1221,7 @@
 	kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
 
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 28fce6c..7dd12bb 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -299,7 +299,7 @@
 
 #define ubh_get_addr16(ubh,begin) \
 	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
-	((begin) & (uspi->fsize>>1) - 1)))
+	((begin) & ((uspi->fsize>>1) - 1)))
 
 #define ubh_get_addr32(ubh,begin) \
 	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
diff --git a/fs/utimes.c b/fs/utimes.c
index 1bcd852..99cf2cb 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -2,6 +2,7 @@
 #include <linux/fs.h>
 #include <linux/linkage.h>
 #include <linux/namei.h>
+#include <linux/sched.h>
 #include <linux/utime.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 09360cf..8e6b56f 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -149,9 +149,10 @@
  */
 STATIC void
 xfs_end_bio_delalloc(
-	void			*data)
+	struct work_struct	*work)
 {
-	xfs_ioend_t		*ioend = data;
+	xfs_ioend_t		*ioend =
+		container_of(work, xfs_ioend_t, io_work);
 
 	xfs_destroy_ioend(ioend);
 }
@@ -161,9 +162,10 @@
  */
 STATIC void
 xfs_end_bio_written(
-	void			*data)
+	struct work_struct	*work)
 {
-	xfs_ioend_t		*ioend = data;
+	xfs_ioend_t		*ioend =
+		container_of(work, xfs_ioend_t, io_work);
 
 	xfs_destroy_ioend(ioend);
 }
@@ -176,9 +178,10 @@
  */
 STATIC void
 xfs_end_bio_unwritten(
-	void			*data)
+	struct work_struct	*work)
 {
-	xfs_ioend_t		*ioend = data;
+	xfs_ioend_t		*ioend =
+		container_of(work, xfs_ioend_t, io_work);
 	bhv_vnode_t		*vp = ioend->io_vnode;
 	xfs_off_t		offset = ioend->io_offset;
 	size_t			size = ioend->io_size;
@@ -220,11 +223,11 @@
 	ioend->io_size = 0;
 
 	if (type == IOMAP_UNWRITTEN)
-		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
 	else if (type == IOMAP_DELAY)
-		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
 	else
-		INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+		INIT_WORK(&ioend->io_work, xfs_end_bio_written);
 
 	return ioend;
 }
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d338284..4fb01ff 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -32,6 +32,7 @@
 #include <linux/kthread.h>
 #include <linux/migrate.h>
 #include <linux/backing-dev.h>
+#include <linux/freezer.h>
 
 STATIC kmem_zone_t *xfs_buf_zone;
 STATIC kmem_shaker_t xfs_buf_shake;
@@ -994,9 +995,10 @@
 
 STATIC void
 xfs_buf_iodone_work(
-	void			*v)
+	struct work_struct	*work)
 {
-	xfs_buf_t		*bp = (xfs_buf_t *)v;
+	xfs_buf_t		*bp =
+		container_of(work, xfs_buf_t, b_iodone_work);
 
 	if (bp->b_iodone)
 		(*(bp->b_iodone))(bp);
@@ -1017,10 +1019,10 @@
 
 	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
 		if (schedule) {
-			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
 			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
 		} else {
-			xfs_buf_iodone_work(bp);
+			xfs_buf_iodone_work(&bp->b_iodone_work);
 		}
 	} else {
 		up(&bp->b_iodonesema);
@@ -1825,11 +1827,11 @@
 	if (!xfs_buf_zone)
 		goto out_free_trace_buf;
 
-	xfslogd_workqueue = create_workqueue("xfslogd");
+	xfslogd_workqueue = create_freezeable_workqueue("xfslogd");
 	if (!xfslogd_workqueue)
 		goto out_free_buf_zone;
 
-	xfsdatad_workqueue = create_workqueue("xfsdatad");
+	xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad");
 	if (!xfsdatad_workqueue)
 		goto out_destroy_xfslogd_workqueue;
 
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index de05abb..b93265b 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -56,6 +56,7 @@
 #include <linux/mempool.h>
 #include <linux/writeback.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 STATIC struct quotactl_ops xfs_quotactl_operations;
 STATIC struct super_operations xfs_super_operations;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index f338e40..fdd1095 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -357,7 +357,7 @@
 /* helper */
 acpi_handle acpi_get_child(acpi_handle, acpi_integer);
 acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
-#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->firmware_data))
+#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle))
 
 #endif /* CONFIG_ACPI */
 
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 47faf27..7f1e929 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -64,7 +64,7 @@
 /* Host-dependent types and defines */
 
 #define ACPI_MACHINE_WIDTH          BITS_PER_LONG
-#define acpi_cache_t                        kmem_cache_t
+#define acpi_cache_t                        struct kmem_cache
 #define acpi_spinlock                   spinlock_t *
 #define ACPI_EXPORT_SYMBOL(symbol)  EXPORT_SYMBOL(symbol);
 #define strtoul                     simple_strtoul
diff --git a/include/asm-alpha/checksum.h b/include/asm-alpha/checksum.h
index a5c9f08..d3854bb 100644
--- a/include/asm-alpha/checksum.h
+++ b/include/asm-alpha/checksum.h
@@ -7,21 +7,20 @@
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-extern unsigned short int csum_tcpudp_magic(unsigned long saddr,
-					   unsigned long daddr,
+extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 					   unsigned short len,
 					   unsigned short proto,
-					   unsigned int sum);
+					   __wsum sum);
 
-unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 				unsigned short len, unsigned short proto,
-				unsigned int sum);
+				__wsum sum);
 
 /*
  * computes the checksum of a memory block at buff, length len,
@@ -35,7 +34,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -44,9 +43,9 @@
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-unsigned int csum_partial_copy_from_user(const char __user *src, char *dst, int len, unsigned int sum, int *errp);
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
 
-unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
 
 
 /*
@@ -54,24 +53,23 @@
  * in icmp.c
  */
 
-extern unsigned short ip_compute_csum(unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 /*
  *	Fold a partial checksum without adding pseudo headers
  */
 
-static inline unsigned short csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
+	u32 sum = (__force u32)csum;
 	sum = (sum & 0xffff) + (sum >> 16);
 	sum = (sum & 0xffff) + (sum >> 16);
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-                                          struct in6_addr *daddr,
-                                          __u32 len,
-                                          unsigned short proto,
-                                          unsigned int sum);
-
+extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+			       const struct in6_addr *daddr,
+			       __u32 len, unsigned short proto,
+			       __wsum sum);
 #endif
diff --git a/include/asm-alpha/device.h b/include/asm-alpha/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-alpha/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index b9ff4d8..57e09f5 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -51,7 +51,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f)	dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h)	dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(dev)			(1)
+#define dma_is_consistent(d, h)			(1)
 
 int dma_set_mask(struct device *dev, u64 mask);
 
@@ -60,7 +60,7 @@
 #define dma_sync_single_range(dev, addr, off, size, dir)  do { } while (0)
 #define dma_sync_sg_for_cpu(dev, sg, nents, dir)	  do { } while (0)
 #define dma_sync_sg_for_device(dev, sg, nents, dir)	  do { } while (0)
-#define dma_cache_sync(va, size, dir)			  do { } while (0)
+#define dma_cache_sync(dev, va, size, dir)		  do { } while (0)
 
 #define dma_get_cache_alignment()			  L1_CACHE_BYTES
 
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 2cabbd4..84313d1 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -387,188 +387,6 @@
 
 #define NR_SYSCALLS			447
 
-#if defined(__GNUC__)
-
-#define _syscall_return(type)						\
-	return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
-
-#define _syscall_clobbers						\
-	"$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",			\
-	"$22", "$23", "$24", "$25", "$27", "$28" 			\
-
-#define _syscall0(type, name)						\
-type name(void)								\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		__asm__("callsys # %0 %1 %2"				\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0)					\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall1(type,name,type1,arg1)					\
-type name(type1 arg1)							\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		__asm__("callsys # %0 %1 %2 %3"				\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16)			\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1,type2 arg2)					\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		__asm__("callsys # %0 %1 %2 %3 %4"			\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17)		\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1,type2 arg2,type3 arg3)				\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5"			\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18)					\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4)		 \
-{									 \
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		_sc_19 = (long) (arg4);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6"		\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18), "1"(_sc_19)			\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5)							 \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-		register long _sc_20 __asm__("$20");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		_sc_19 = (long) (arg4);					\
-		_sc_20 = (long) (arg5);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7"		\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18), "1"(_sc_19), "r"(_sc_20)		\
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5,type6,arg6)					 \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
-{									\
-	long _sc_ret, _sc_err;						\
-	{								\
-		register long _sc_0 __asm__("$0");			\
-		register long _sc_16 __asm__("$16");			\
-		register long _sc_17 __asm__("$17");			\
-		register long _sc_18 __asm__("$18");			\
-		register long _sc_19 __asm__("$19");			\
-		register long _sc_20 __asm__("$20");			\
-		register long _sc_21 __asm__("$21");			\
-									\
-		_sc_0 = __NR_##name;					\
-		_sc_16 = (long) (arg1);					\
-		_sc_17 = (long) (arg2);					\
-		_sc_18 = (long) (arg3);					\
-		_sc_19 = (long) (arg4);					\
-		_sc_20 = (long) (arg5);					\
-		_sc_21 = (long) (arg6);					\
-		__asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8"		\
-			: "=r"(_sc_0), "=r"(_sc_19)			\
-			: "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),		\
-			  "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
-			: _syscall_clobbers);				\
-		_sc_ret = _sc_0, _sc_err = _sc_19;			\
-	}								\
-	_syscall_return(type);						\
-}
-
-#endif /* __GNUC__ */
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-arm/arch-ixp4xx/platform.h b/include/asm-arm/arch-ixp4xx/platform.h
index 8d10a91..ab194e5 100644
--- a/include/asm-arm/arch-ixp4xx/platform.h
+++ b/include/asm-arm/arch-ixp4xx/platform.h
@@ -86,6 +86,19 @@
 	unsigned long scl_pin;
 };
 
+/*
+ * This structure provide a means for the board setup code
+ * to give information to th pata_ixp4xx driver. It is
+ * passed as platform_data.
+ */
+struct ixp4xx_pata_data {
+	volatile u32	*cs0_cfg;
+	volatile u32	*cs1_cfg;
+	unsigned long	cs0_bits;
+	unsigned long	cs1_bits;
+	void __iomem	*cs0;
+	void __iomem	*cs1;
+};
 
 struct sys_timer;
 
diff --git a/include/asm-arm/arch-omap/irda.h b/include/asm-arm/arch-omap/irda.h
index 805ae35..345a649 100644
--- a/include/asm-arm/arch-omap/irda.h
+++ b/include/asm-arm/arch-omap/irda.h
@@ -24,7 +24,7 @@
 	/* Very specific to the needs of some platforms (h3,h4)
 	 * having calls which can sleep in irda_set_speed.
 	 */
-	struct work_struct gpio_expa;
+	struct delayed_work gpio_expa;
 	int rx_channel;
 	int tx_channel;
 	unsigned long dest_start;
diff --git a/include/asm-arm/checksum.h b/include/asm-arm/checksum.h
index 747bdd3..8c0bb5b 100644
--- a/include/asm-arm/checksum.h
+++ b/include/asm-arm/checksum.h
@@ -23,7 +23,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -33,26 +33,18 @@
  * better 64-bit) boundary
  */
 
-unsigned int
-csum_partial_copy_nocheck(const char *src, char *dst, int len, int sum);
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
 
-unsigned int
-csum_partial_copy_from_user(const char __user *src, char *dst, int len, int sum, int *err_ptr);
-
-/*
- * This is the old (and unsafe) way of doing checksums, a warning message will
- * be printed if it is used and an exception occurs.
- *
- * this functions should go away after some time.
- */
-#define csum_partial_copy(src,dst,len,sum)	csum_partial_copy_nocheck(src,dst,len,sum)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
 
 /*
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-static inline unsigned short
-ip_fast_csum(unsigned char * iph, unsigned int ihl)
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum, tmp1;
 
@@ -78,14 +70,13 @@
 	: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
 	: "1" (iph), "2" (ihl)
 	: "cc", "memory");
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
  * 	Fold a partial checksum without adding pseudo headers
  */
-static inline unsigned int
-csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__(
 	"adds	%0, %1, %1, lsl #16	@ csum_fold		\n\
@@ -93,21 +84,25 @@
 	: "=r" (sum)
 	: "r" (sum)
 	: "cc");
-	return (~sum) >> 16;
+	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
-		   unsigned int proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
 {
 	__asm__(
 	"adds	%0, %1, %2		@ csum_tcpudp_nofold	\n\
-	adcs	%0, %0, %3					\n\
-	adcs	%0, %0, %4					\n\
-	adcs	%0, %0, %5					\n\
+	adcs	%0, %0, %3					\n"
+#ifdef __ARMEB__
+	"adcs	%0, %0, %4					\n"
+#else
+	"adcs	%0, %0, %4, lsl #8				\n"
+#endif
+	"adcs	%0, %0, %5					\n\
 	adc	%0, %0, #0"
 	: "=&r"(sum)
-	: "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
+	: "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
 	: "cc");
 	return sum;
 }	
@@ -115,23 +110,27 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned int proto, unsigned int sum)
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	__asm__(
 	"adds	%0, %1, %2		@ csum_tcpudp_magic	\n\
-	adcs	%0, %0, %3					\n\
-	adcs	%0, %0, %4					\n\
-	adcs	%0, %0, %5					\n\
+	adcs	%0, %0, %3					\n"
+#ifdef __ARMEB__
+	"adcs	%0, %0, %4					\n"
+#else
+	"adcs	%0, %0, %4, lsl #8				\n"
+#endif
+	"adcs	%0, %0, %5					\n\
 	adc	%0, %0, #0					\n\
 	adds	%0, %0, %0, lsl #16				\n\
 	addcs	%0, %0, #0x10000				\n\
 	mvn	%0, %0"
 	: "=&r"(sum)
-	: "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
+	: "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
 	: "cc");
-	return sum >> 16;
+	return (__force __sum16)((__force u32)sum >> 16);
 }
 
 
@@ -139,20 +138,20 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-static inline unsigned short
-ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16
+ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-extern unsigned long
-__csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
-		__u32 proto, unsigned int sum);
+extern __wsum
+__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
+		__be32 proto, __wsum sum);
 
-static inline unsigned short int
-csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
-		unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
+		unsigned short proto, __wsum sum)
 {
 	return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
 					   htonl(proto), sum));
diff --git a/include/asm-arm/device.h b/include/asm-arm/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-arm/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index 6666177..9bc46b4 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -48,7 +48,7 @@
 	return 32;
 }
 
-static inline int dma_is_consistent(dma_addr_t handle)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
 {
 	return !!arch_is_coherent();
 }
diff --git a/include/asm-arm/setup.h b/include/asm-arm/setup.h
index aa4b578..e540739 100644
--- a/include/asm-arm/setup.h
+++ b/include/asm-arm/setup.h
@@ -14,55 +14,57 @@
 #ifndef __ASMARM_SETUP_H
 #define __ASMARM_SETUP_H
 
+#include <asm/types.h>
+
 #define COMMAND_LINE_SIZE 1024
 
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE	0x00000000
 
 struct tag_header {
-	u32 size;
-	u32 tag;
+	__u32 size;
+	__u32 tag;
 };
 
 /* The list must start with an ATAG_CORE node */
 #define ATAG_CORE	0x54410001
 
 struct tag_core {
-	u32 flags;		/* bit 0 = read-only */
-	u32 pagesize;
-	u32 rootdev;
+	__u32 flags;		/* bit 0 = read-only */
+	__u32 pagesize;
+	__u32 rootdev;
 };
 
 /* it is allowed to have multiple ATAG_MEM nodes */
 #define ATAG_MEM	0x54410002
 
 struct tag_mem32 {
-	u32	size;
-	u32	start;	/* physical start address */
+	__u32	size;
+	__u32	start;	/* physical start address */
 };
 
 /* VGA text type displays */
 #define ATAG_VIDEOTEXT	0x54410003
 
 struct tag_videotext {
-	u8		x;
-	u8		y;
-	u16		video_page;
-	u8		video_mode;
-	u8		video_cols;
-	u16		video_ega_bx;
-	u8		video_lines;
-	u8		video_isvga;
-	u16		video_points;
+	__u8		x;
+	__u8		y;
+	__u16		video_page;
+	__u8		video_mode;
+	__u8		video_cols;
+	__u16		video_ega_bx;
+	__u8		video_lines;
+	__u8		video_isvga;
+	__u16		video_points;
 };
 
 /* describes how the ramdisk will be used in kernel */
 #define ATAG_RAMDISK	0x54410004
 
 struct tag_ramdisk {
-	u32 flags;	/* bit 0 = load, bit 1 = prompt */
-	u32 size;	/* decompressed ramdisk size in _kilo_ bytes */
-	u32 start;	/* starting block of floppy-based RAM disk image */
+	__u32 flags;	/* bit 0 = load, bit 1 = prompt */
+	__u32 size;	/* decompressed ramdisk size in _kilo_ bytes */
+	__u32 start;	/* starting block of floppy-based RAM disk image */
 };
 
 /* describes where the compressed ramdisk image lives (virtual address) */
@@ -76,23 +78,23 @@
 #define ATAG_INITRD2	0x54420005
 
 struct tag_initrd {
-	u32 start;	/* physical start address */
-	u32 size;	/* size of compressed ramdisk image in bytes */
+	__u32 start;	/* physical start address */
+	__u32 size;	/* size of compressed ramdisk image in bytes */
 };
 
 /* board serial number. "64 bits should be enough for everybody" */
 #define ATAG_SERIAL	0x54410006
 
 struct tag_serialnr {
-	u32 low;
-	u32 high;
+	__u32 low;
+	__u32 high;
 };
 
 /* board revision */
 #define ATAG_REVISION	0x54410007
 
 struct tag_revision {
-	u32 rev;
+	__u32 rev;
 };
 
 /* initial values for vesafb-type framebuffers. see struct screen_info
@@ -101,20 +103,20 @@
 #define ATAG_VIDEOLFB	0x54410008
 
 struct tag_videolfb {
-	u16		lfb_width;
-	u16		lfb_height;
-	u16		lfb_depth;
-	u16		lfb_linelength;
-	u32		lfb_base;
-	u32		lfb_size;
-	u8		red_size;
-	u8		red_pos;
-	u8		green_size;
-	u8		green_pos;
-	u8		blue_size;
-	u8		blue_pos;
-	u8		rsvd_size;
-	u8		rsvd_pos;
+	__u16		lfb_width;
+	__u16		lfb_height;
+	__u16		lfb_depth;
+	__u16		lfb_linelength;
+	__u32		lfb_base;
+	__u32		lfb_size;
+	__u8		red_size;
+	__u8		red_pos;
+	__u8		green_size;
+	__u8		green_pos;
+	__u8		blue_size;
+	__u8		blue_pos;
+	__u8		rsvd_size;
+	__u8		rsvd_pos;
 };
 
 /* command line: \0 terminated string */
@@ -128,17 +130,17 @@
 #define ATAG_ACORN	0x41000101
 
 struct tag_acorn {
-	u32 memc_control_reg;
-	u32 vram_pages;
-	u8 sounddefault;
-	u8 adfsdrives;
+	__u32 memc_control_reg;
+	__u32 vram_pages;
+	__u8 sounddefault;
+	__u8 adfsdrives;
 };
 
 /* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */
 #define ATAG_MEMCLK	0x41000402
 
 struct tag_memclk {
-	u32 fmemclk;
+	__u32 fmemclk;
 };
 
 struct tag {
@@ -167,24 +169,26 @@
 };
 
 struct tagtable {
-	u32 tag;
+	__u32 tag;
 	int (*parse)(const struct tag *);
 };
 
-#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
-#define __tagtable(tag, fn) \
-static struct tagtable __tagtable_##fn __tag = { tag, fn }
-
 #define tag_member_present(tag,member)				\
 	((unsigned long)(&((struct tag *)0L)->member + 1)	\
 		<= (tag)->hdr.size * 4)
 
-#define tag_next(t)	((struct tag *)((u32 *)(t) + (t)->hdr.size))
+#define tag_next(t)	((struct tag *)((__u32 *)(t) + (t)->hdr.size))
 #define tag_size(type)	((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
 
 #define for_each_tag(t,base)		\
 	for (t = base; t->hdr.size; t = tag_next(t))
 
+#ifdef __KERNEL__
+
+#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
+#define __tagtable(tag, fn) \
+static struct tagtable __tagtable_##fn __tag = { tag, fn }
+
 /*
  * Memory map description
  */
@@ -217,4 +221,6 @@
 static struct early_params __early_##fn __attribute_used__	\
 __attribute__((__section__(".early_param.init"))) = { name, fn }
 
+#endif  /*  __KERNEL__  */
+
 #endif
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 14a87ee..d44c629 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -377,156 +377,6 @@
 #endif
 
 #ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#if defined(__thumb__) || defined(__ARM_EABI__)
-#define __SYS_REG(name) register long __sysreg __asm__("r7") = __NR_##name;
-#define __SYS_REG_LIST(regs...) "r" (__sysreg) , ##regs
-#define __syscall(name) "swi\t0"
-#else
-#define __SYS_REG(name)
-#define __SYS_REG_LIST(regs...) regs
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-#endif
-
-#define __syscall_return(type, res)					\
-do {									\
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) {	\
-		errno = -(res);						\
-		res = -1;						\
-	}								\
-	return (type) (res);						\
-} while (0)
-
-#define _syscall0(type,name)						\
-type name(void) {							\
-  __SYS_REG(name)							\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST()						\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall1(type,name,type1,arg1) 				\
-type name(type1 arg1) { 						\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0) )					\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1,type2 arg2) {					\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1) )			\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1,type2 arg2,type3 arg3) {				\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2) )		\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {		\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) ) \
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-  
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) {	\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2),		\
-			  "r" (__r3), "r" (__r4) )			\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) {	\
-  __SYS_REG(name)							\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __r5 __asm__("r5") = (long)arg6;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2),		\
-			  "r" (__r3), "r" (__r4), "r" (__r5) )		\
-	: "memory" );							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-arm26/checksum.h b/include/asm-arm26/checksum.h
index d4256d5..f2b4b0a 100644
--- a/include/asm-arm26/checksum.h
+++ b/include/asm-arm26/checksum.h
@@ -23,7 +23,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -33,26 +33,18 @@
  * better 64-bit) boundary
  */
 
-unsigned int
-csum_partial_copy_nocheck(const char *src, char *dst, int len, int sum);
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
 
-unsigned int
-csum_partial_copy_from_user(const char __user *src, char *dst, int len, int sum, int *err_ptr);
-
-/*
- * This is the old (and unsafe) way of doing checksums, a warning message will
- * be printed if it is used and an exception occurs.
- *
- * this functions should go away after some time.
- */
-#define csum_partial_copy(src,dst,len,sum)	csum_partial_copy_nocheck(src,dst,len,sum)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
 
 /*
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-static inline unsigned short
-ip_fast_csum(unsigned char * iph, unsigned int ihl)
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum, tmp1;
 
@@ -78,14 +70,13 @@
 	: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
 	: "1" (iph), "2" (ihl)
 	: "cc");
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
  * 	Fold a partial checksum without adding pseudo headers
  */
-static inline unsigned int
-csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__(
 	"adds	%0, %1, %1, lsl #16	@ csum_fold		\n\
@@ -93,12 +84,12 @@
 	: "=r" (sum)
 	: "r" (sum)
 	: "cc");
-	return (~sum) >> 16;
+	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
-		   unsigned int proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
 {
 	__asm__(
 	"adds	%0, %1, %2		@ csum_tcpudp_nofold	\n\
@@ -107,7 +98,7 @@
 	adcs	%0, %0, %5					\n\
 	adc	%0, %0, #0"
 	: "=&r"(sum)
-	: "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
+	: "r" (sum), "r" (daddr), "r" (saddr), "r" (htons(len)), "Ir" (htons(proto))
 	: "cc");
 	return sum;
 }	
@@ -115,9 +106,9 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned int proto, unsigned int sum)
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	__asm__(
 	"adds	%0, %1, %2		@ csum_tcpudp_magic	\n\
@@ -129,9 +120,9 @@
 	addcs	%0, %0, #0x10000				\n\
 	mvn	%0, %0"
 	: "=&r"(sum)
-	: "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (ntohs(proto))
+	: "r" (sum), "r" (daddr), "r" (saddr), "r" (htons(len)), "Ir" (htons(proto))
 	: "cc");
-	return sum >> 16;
+	return (__force __sum16)((__force u32)sum >> 16);
 }
 
 
@@ -139,20 +130,20 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-static inline unsigned short
-ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16
+ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-extern unsigned long
-__csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
-		__u32 proto, unsigned int sum);
+extern __wsum
+__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
+		__be32 proto, __wsum sum);
 
-static inline unsigned short int
-csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
-		unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
+		unsigned short proto, __wsum sum)
 {
 	return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
 					   htonl(proto), sum));
diff --git a/include/asm-arm26/device.h b/include/asm-arm26/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-arm26/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-arm26/pgalloc.h b/include/asm-arm26/pgalloc.h
index 6437167..7725af3 100644
--- a/include/asm-arm26/pgalloc.h
+++ b/include/asm-arm26/pgalloc.h
@@ -15,7 +15,7 @@
 #include <asm/tlbflush.h>
 #include <linux/slab.h>
 
-extern kmem_cache_t *pte_cache;
+extern struct kmem_cache *pte_cache;
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){
 	return kmem_cache_alloc(pte_cache, GFP_KERNEL);
diff --git a/include/asm-arm26/setup.h b/include/asm-arm26/setup.h
index 6348931..1a867b4 100644
--- a/include/asm-arm26/setup.h
+++ b/include/asm-arm26/setup.h
@@ -16,6 +16,8 @@
 
 #define COMMAND_LINE_SIZE 1024
 
+#ifdef __KERNEL__
+
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE	0x00000000
 
@@ -202,4 +204,6 @@
 
 extern struct meminfo meminfo;
 
+#endif  /*  __KERNEL__  */
+
 #endif
diff --git a/include/asm-arm26/unistd.h b/include/asm-arm26/unistd.h
index 25a5eea..4c3b919 100644
--- a/include/asm-arm26/unistd.h
+++ b/include/asm-arm26/unistd.h
@@ -311,139 +311,6 @@
 #define __ARM_NR_usr26			(__ARM_NR_BASE+3)
 
 #ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-
-#define __syscall_return(type, res)					\
-do {									\
-	if ((unsigned long)(res) >= (unsigned long)-MAX_ERRNO) {	\
-		errno = -(res);						\
-		res = -1;						\
-	}								\
-	return (type) (res);						\
-} while (0)
-
-#define _syscall0(type,name)						\
-type name(void) {							\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	:								\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall1(type,name,type1,arg1) 				\
-type name(type1 arg1) { 						\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0)							\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1,type2 arg2) {					\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1) 					\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1,type2 arg2,type3 arg3) {				\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2)				\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {		\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3)			\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-  
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) {	\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3),"r" (__r4)	\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6)	\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) {	\
-  register long __r0 __asm__("r0") = (long)arg1;			\
-  register long __r1 __asm__("r1") = (long)arg2;			\
-  register long __r2 __asm__("r2") = (long)arg3;			\
-  register long __r3 __asm__("r3") = (long)arg4;			\
-  register long __r4 __asm__("r4") = (long)arg5;			\
-  register long __r5 __asm__("r5") = (long)arg6;			\
-  register long __res_r0 __asm__("r0");					\
-  long __res;								\
-  __asm__ __volatile__ (						\
-  __syscall(name)							\
-	: "=r" (__res_r0)						\
-	: "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3), "r" (__r4),"r" (__r5)		\
-	: "lr");							\
-  __res = __res_r0;							\
-  __syscall_return(type,__res);						\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-avr32/checksum.h b/include/asm-avr32/checksum.h
index 41b7af0..af9d53f 100644
--- a/include/asm-avr32/checksum.h
+++ b/include/asm-avr32/checksum.h
@@ -20,8 +20,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len,
-			  unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -30,8 +29,8 @@
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-unsigned int csum_partial_copy_generic(const char *src, char *dst, int len,
-				       int sum, int *src_err_ptr,
+__wsum csum_partial_copy_generic(const void *src, void *dst, int len,
+				       __wsum sum, int *src_err_ptr,
 				       int *dst_err_ptr);
 
 /*
@@ -42,17 +41,17 @@
  *	verify_area().
  */
 static inline
-unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
-				       int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum)
 {
 	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
 }
 
 static inline
-unsigned int csum_partial_copy_from_user (const char __user *src, char *dst,
-					  int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+					  int len, __wsum sum, int *err_ptr)
 {
-	return csum_partial_copy_generic((const char __force *)src, dst, len,
+	return csum_partial_copy_generic((const void __force *)src, dst, len,
 					 sum, err_ptr, NULL);
 }
 
@@ -60,8 +59,7 @@
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-static inline unsigned short ip_fast_csum(unsigned char *iph,
-					  unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum, tmp;
 
@@ -90,14 +88,14 @@
 		: "=r"(sum), "=r"(iph), "=r"(ihl), "=r"(tmp)
 		: "1"(iph), "2"(ihl)
 		: "memory", "cc");
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
  *	Fold a partial checksum
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	unsigned int tmp;
 
@@ -109,21 +107,20 @@
 	    : "=&r"(sum), "=&r"(tmp)
 	    : "0"(sum));
 
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-					       unsigned long daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 					       unsigned short len,
 					       unsigned short proto,
-					       unsigned int sum)
+					       __wsum sum)
 {
 	asm("	add	%0, %1\n"
 	    "	adc	%0, %0, %2\n"
 	    "	adc	%0, %0, %3\n"
 	    "	acr	%0"
 	    : "=r"(sum)
-	    : "r"(daddr), "r"(saddr), "r"(ntohs(len) | (proto << 16)),
+	    : "r"(daddr), "r"(saddr), "r"(len + proto),
 	      "0"(sum)
 	    : "cc");
 
@@ -134,11 +131,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -148,7 +144,7 @@
  * in icmp.c
  */
 
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
     return csum_fold(csum_partial(buff, len, 0));
 }
diff --git a/include/asm-avr32/device.h b/include/asm-avr32/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-avr32/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 4c40cb4..0580b5d 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -8,7 +8,8 @@
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
-extern void dma_cache_sync(void *vaddr, size_t size, int direction);
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	int direction);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -307,7 +308,7 @@
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 1;
 }
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 10193da..0a52242 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -13,6 +13,8 @@
 
 #define COMMAND_LINE_SIZE 256
 
+#ifdef __KERNEL__
+
 /* Magic number indicating that a tag table is present */
 #define ATAG_MAGIC	0xa2a25441
 
@@ -138,4 +140,6 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* __ASM_AVR32_SETUP_H__ */
diff --git a/include/asm-avr32/types.h b/include/asm-avr32/types.h
index 3f47db9..2bff153 100644
--- a/include/asm-avr32/types.h
+++ b/include/asm-avr32/types.h
@@ -57,11 +57,6 @@
 
 typedef u32 dma_addr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-cris/arch-v10/bitops.h b/include/asm-cris/arch-v10/bitops.h
index b73f539..be85f6d 100644
--- a/include/asm-cris/arch-v10/bitops.h
+++ b/include/asm-cris/arch-v10/bitops.h
@@ -10,7 +10,7 @@
  * number.  They differ in that the first function also inverts all bits
  * in the input.
  */
-extern inline unsigned long cris_swapnwbrlz(unsigned long w)
+static inline unsigned long cris_swapnwbrlz(unsigned long w)
 {
 	/* Let's just say we return the result in the same register as the
 	   input.  Saying we clobber the input but can return the result
@@ -26,7 +26,7 @@
 	return res;
 }
 
-extern inline unsigned long cris_swapwbrlz(unsigned long w)
+static inline unsigned long cris_swapwbrlz(unsigned long w)
 {
 	unsigned res;
 	__asm__ ("swapwbr %0 \n\t"
@@ -40,7 +40,7 @@
  * ffz = Find First Zero in word. Undefined if no zero exists,
  * so code should check against ~0UL first..
  */
-extern inline unsigned long ffz(unsigned long w)
+static inline unsigned long ffz(unsigned long w)
 {
 	return cris_swapnwbrlz(w);
 }
@@ -51,7 +51,7 @@
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-extern inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
 {
 	return cris_swapnwbrlz(~word);
 }
@@ -65,7 +65,7 @@
  * differs in spirit from the above ffz (man ffs).
  */
 
-extern inline unsigned long kernel_ffs(unsigned long w)
+static inline unsigned long kernel_ffs(unsigned long w)
 {
 	return w ? cris_swapwbrlz (w) + 1 : 0;
 }
diff --git a/include/asm-cris/arch-v10/checksum.h b/include/asm-cris/arch-v10/checksum.h
index 633f234..b8000c5 100644
--- a/include/asm-cris/arch-v10/checksum.h
+++ b/include/asm-cris/arch-v10/checksum.h
@@ -8,11 +8,11 @@
  * to split all of those into 16-bit components, then add.
  */
 
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
-		   unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
 {
-	int res;
+	__wsum res;
 	__asm__ ("add.d %2, %0\n\t"
 		 "ax\n\t"
 		 "add.d %3, %0\n\t"
@@ -21,7 +21,7 @@
 		 "ax\n\t"
 		 "addq 0, %0\n"
 	: "=r" (res)
-	: "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8)));
+	: "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8));
 
 	return res;
 }	
diff --git a/include/asm-cris/arch-v32/checksum.h b/include/asm-cris/arch-v32/checksum.h
index 97ef89e..e5dcfce 100644
--- a/include/asm-cris/arch-v32/checksum.h
+++ b/include/asm-cris/arch-v32/checksum.h
@@ -9,11 +9,11 @@
  * checksum. Which means it would be necessary to split all those into
  * 16-bit components and then add.
  */
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
-		   unsigned short len, unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+		   unsigned short len, unsigned short proto, __wsum sum)
 {
-	int res;
+	__wsum res;
 
 	__asm__ __volatile__ ("add.d %2, %0\n\t"
 			      "addc %3, %0\n\t"
@@ -21,7 +21,7 @@
 			      "addc 0, %0\n\t"
 			      : "=r" (res)
 			      : "0" (sum), "r" (daddr), "r" (saddr), \
-			      "r" ((ntohs(len) << 16) + (proto << 8)));
+			      "r" ((len + proto) << 8));
 
 	return res;
 }
diff --git a/include/asm-cris/checksum.h b/include/asm-cris/checksum.h
index 26a7719..180dbf2 100644
--- a/include/asm-cris/checksum.h
+++ b/include/asm-cris/checksum.h
@@ -17,7 +17,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -27,26 +27,23 @@
  * better 64-bit) boundary
  */
 
-unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
-				       int len, unsigned int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum);
 
 /*
  *	Fold a partial checksum into a word
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
-	/* the while loop is unnecessary really, it's always enough with two
-	   iterations */
-	
-	while(sum >> 16)
-		sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
-	
-	return ~sum;
+	u32 sum = (__force u32)csum;
+	sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
+	sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
+	return (__force __sum16)~sum;
 }
 
-extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
-						int len, unsigned int sum, 
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						int len, __wsum sum,
 						int *errptr);
 
 /*
@@ -55,8 +52,7 @@
  *
  */
 
-static inline unsigned short ip_fast_csum(unsigned char * iph,
-					  unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	return csum_fold(csum_partial(iph, ihl * 4, 0));
 }
@@ -66,11 +62,10 @@
  * returns a 16-bit checksum, already complemented
  */
 
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 int csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -80,7 +75,8 @@
  * in icmp.c
  */
 
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
 	return csum_fold (csum_partial(buff, len, 0));
 }
 
diff --git a/include/asm-cris/device.h b/include/asm-cris/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-cris/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index cbf1a98..662cea7 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -156,10 +156,10 @@
 	return (1 << INTERNODE_CACHE_SHIFT);
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 }
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
index dbd0f30..a8e1e6c 100644
--- a/include/asm-cris/semaphore-helper.h
+++ b/include/asm-cris/semaphore-helper.h
@@ -20,12 +20,12 @@
 /*
  * These two _must_ execute atomically wrt each other.
  */
-extern inline void wake_one_more(struct semaphore * sem)
+static inline void wake_one_more(struct semaphore * sem)
 {
 	atomic_inc(&sem->waking);
 }
 
-extern inline int waking_non_zero(struct semaphore *sem)
+static inline int waking_non_zero(struct semaphore *sem)
 {
 	unsigned long flags;
 	int ret = 0;
@@ -40,7 +40,7 @@
 	return ret;
 }
 
-extern inline int waking_non_zero_interruptible(struct semaphore *sem,
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
 						struct task_struct *tsk)
 {
 	int ret = 0;
@@ -59,7 +59,7 @@
 	return ret;
 }
 
-extern inline int waking_non_zero_trylock(struct semaphore *sem)
+static inline int waking_non_zero_trylock(struct semaphore *sem)
 {
         int ret = 1;
 	unsigned long flags;
diff --git a/include/asm-frv/checksum.h b/include/asm-frv/checksum.h
index 42bf0db..9b16898 100644
--- a/include/asm-frv/checksum.h
+++ b/include/asm-frv/checksum.h
@@ -26,7 +26,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -35,7 +35,7 @@
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
 
 /*
  * the same as csum_partial_copy, but copies from user space.
@@ -43,11 +43,8 @@
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern unsigned int csum_partial_copy_from_user(const char __user *src, char *dst,
-						int len, int sum, int *csum_err);
-
-#define csum_partial_copy_nocheck(src, dst, len, sum)	\
-	csum_partial_copy((src), (dst), (len), (sum))
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						int len, __wsum sum, int *csum_err);
 
 /*
  *	This is a version of ip_compute_csum() optimized for IP headers,
@@ -55,7 +52,7 @@
  *
  */
 static inline
-unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int tmp, inc, sum = 0;
 
@@ -81,13 +78,13 @@
 	    : "icc0", "icc1"
 	    );
 
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
 /*
  *	Fold a partial checksum
  */
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	unsigned int tmp;
 
@@ -100,16 +97,16 @@
 	    : "0"(sum)
 	    );
 
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
 /*
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	asm("	addcc		%1,%0,%0,icc0	\n"
 	    "	addxcc		%2,%0,%0,icc0	\n"
@@ -122,9 +119,9 @@
 	return sum;
 }
 
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -133,12 +130,12 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-extern unsigned short ip_compute_csum(const unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 #define _HAVE_ARCH_IPV6_CSUM
-static inline unsigned short int
-csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
-		__u32 len, unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+		__u32 len, unsigned short proto, __wsum sum)
 {
 	unsigned long tmp, tmp2;
 
@@ -177,7 +174,7 @@
 	    : "icc0"
 	    );
 
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
 #endif /* _ASM_CHECKSUM_H */
diff --git a/include/asm-frv/device.h b/include/asm-frv/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-frv/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index e9fc1d4..bcb2df6 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -172,10 +172,10 @@
 	return 1 << L1_CACHE_SHIFT;
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		    enum dma_data_direction direction)
 {
 	flush_write_buffers();
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index 0f390f4..ff4d6cd 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -115,7 +115,7 @@
 {
 	unsigned long paddr;
 
-	inc_preempt_count();
+	pagefault_disable();
 	paddr = page_to_phys(page);
 
 	switch (type) {
@@ -170,8 +170,7 @@
 	default:
 		BUG();
 	}
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/param.h b/include/asm-frv/param.h
index 168381e..365653b 100644
--- a/include/asm-frv/param.h
+++ b/include/asm-frv/param.h
@@ -18,6 +18,5 @@
 #endif
 
 #define MAXHOSTNAMELEN		64	/* max length of hostname */
-#define COMMAND_LINE_SIZE	512
 
 #endif /* _ASM_PARAM_H */
diff --git a/include/asm-frv/setup.h b/include/asm-frv/setup.h
index 0d293b9..afd787c 100644
--- a/include/asm-frv/setup.h
+++ b/include/asm-frv/setup.h
@@ -12,6 +12,10 @@
 #ifndef _ASM_SETUP_H
 #define _ASM_SETUP_H
 
+#define COMMAND_LINE_SIZE       512
+
+#ifdef __KERNEL__
+
 #include <linux/init.h>
 
 #ifndef __ASSEMBLY__
@@ -22,4 +26,6 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* _ASM_SETUP_H */
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 725e854..584c041 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -320,125 +320,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 310
-#include <linux/err.h>
-
-/*
- * process the return value of a syscall, consigning it to one of two possible fates
- * - user-visible error numbers are in the range -1 - -4095: see <asm-frv/errno.h>
- */
-#undef __syscall_return
-#define __syscall_return(type, res)					\
-do {									\
-        unsigned long __sr2 = (res);					\
-	if (__builtin_expect(__sr2 >= (unsigned long)(-MAX_ERRNO), 0)) { \
-		errno = (-__sr2);					\
-		__sr2 = ~0UL;						\
-	}								\
-	return (type) __sr2;						\
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#undef _syscall0
-#define _syscall0(type,name)						\
-type name(void)								\
-{									\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);	\
-	register unsigned long __sc0 __asm__ ("gr8");			\
-	__asm__ __volatile__ ("tira gr0,#0"				\
-			      : "=r" (__sc0)				\
-			      : "r" (__scnum));				\
-	__syscall_return(type, __sc0);					\
-}
-
-#undef _syscall1
-#define _syscall1(type,name,type1,arg1)						\
-type name(type1 arg1)								\
-{										\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);		\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;	\
-	__asm__ __volatile__ ("tira gr0,#0"					\
-			      : "+r" (__sc0)					\
-			      : "r" (__scnum));					\
-	__syscall_return(type, __sc0);						\
-}
-
-#undef _syscall2
-#define _syscall2(type,name,type1,arg1,type2,arg2)				\
-type name(type1 arg1,type2 arg2)						\
-{										\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);		\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;	\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;	\
-	__asm__ __volatile__ ("tira gr0,#0"					\
-			      : "+r" (__sc0)					\
-			      : "r" (__scnum), "r" (__sc1));			\
-	__syscall_return(type, __sc0);						\
-}
-
-#undef _syscall3
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)			\
-type name(type1 arg1,type2 arg2,type3 arg3)					\
-{										\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);		\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;	\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;	\
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;	\
-	__asm__ __volatile__ ("tira gr0,#0"					\
-			      : "+r" (__sc0)					\
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2));	\
-	__syscall_return(type, __sc0);						\
-}
-
-#undef _syscall4
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)		\
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4)				\
-{											\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);			\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;		\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;		\
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;		\
-	register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;		\
-	__asm__ __volatile__ ("tira gr0,#0"						\
-			      : "+r" (__sc0)						\
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2), "r" (__sc3));	\
-	__syscall_return(type, __sc0);							\
-}
-
-#undef _syscall5
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)	\
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)			\
-{											\
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);			\
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;		\
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;		\
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;		\
-	register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;		\
-	register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5;		\
-	__asm__ __volatile__ ("tira gr0,#0"						\
-			      : "+r" (__sc0)						\
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2),		\
-			      "r" (__sc3), "r" (__sc4));				\
-	__syscall_return(type, __sc0);							\
-}
-
-#undef _syscall6
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6)		 \
-{												 \
-	register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);				 \
-	register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;			 \
-	register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;			 \
-	register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;			 \
-	register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;			 \
-	register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5;			 \
-	register unsigned long __sc5 __asm__ ("gr13") = (unsigned long) arg6;			 \
-	__asm__ __volatile__ ("tira gr0,#0"							 \
-			      : "+r" (__sc0)							 \
-			      : "r" (__scnum), "r" (__sc1), "r" (__sc2),			 \
-			      "r" (__sc3), "r" (__sc4), "r" (__sc5));				 \
-	__syscall_return(type, __sc0);								 \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 /* #define __ARCH_WANT_OLD_READDIR */
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 3c06be3..fa14f8c 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -1,4 +1,3 @@
-header-y += atomic.h
 header-y += errno-base.h
 header-y += errno.h
 header-y += fcntl.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index a84c3d8..a37e95f 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -14,6 +14,7 @@
 unifdef-y += ptrace.h
 unifdef-y += resource.h
 unifdef-y += sembuf.h
+unifdef-y += setup.h
 unifdef-y += shmbuf.h
 unifdef-y += sigcontext.h
 unifdef-y += siginfo.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 42a95d9..b7e4a04 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -66,7 +66,7 @@
 	atomic64_sub(i, v);
 }
 
-#else
+#else  /*  BITS_PER_LONG == 64  */
 
 typedef atomic_t atomic_long_t;
 
@@ -113,5 +113,6 @@
 	atomic_sub(i, v);
 }
 
-#endif
-#endif
+#endif  /*  BITS_PER_LONG == 64  */
+
+#endif  /*  _ASM_GENERIC_ATOMIC_H  */
diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h
new file mode 100644
index 0000000..c17c960
--- /dev/null
+++ b/include/asm-generic/device.h
@@ -0,0 +1,12 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_GENERIC_DEVICE_H
+#define _ASM_GENERIC_DEVICE_H
+
+struct dev_archdata {
+};
+
+#endif /* _ASM_GENERIC_DEVICE_H */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index b541e48..783ab99 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -266,7 +266,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -295,7 +295,7 @@
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	/* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index df893c1..f422df0 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -21,7 +21,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -33,7 +33,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e60d6f2..4d4c62d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -11,8 +11,8 @@
 
 #define RODATA								\
 	. = ALIGN(4096);						\
-	__start_rodata = .;						\
 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
+		VMLINUX_SYMBOL(__start_rodata) = .;			\
 		*(.rodata) *(.rodata.*)					\
 		*(__vermagic)		/* Kernel version magic */	\
 	}								\
@@ -119,17 +119,16 @@
 		*(__ksymtab_strings)					\
 	}								\
 									\
+	EH_FRAME							\
+									\
 	/* Built-in module parameters. */				\
 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
 		VMLINUX_SYMBOL(__start___param) = .;			\
 		*(__param)						\
 		VMLINUX_SYMBOL(__stop___param) = .;			\
+		VMLINUX_SYMBOL(__end_rodata) = .;			\
 	}								\
 									\
-	/* Unwind data binary search table */				\
-	EH_FRAME_HDR							\
-									\
-	__end_rodata = .;						\
 	. = ALIGN(4096);
 
 #define SECURITY_INIT							\
@@ -162,15 +161,23 @@
 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
 
 #ifdef CONFIG_STACK_UNWIND
-		/* Unwind data binary search table */
-#define EH_FRAME_HDR							\
+#define EH_FRAME							\
+		/* Unwind data binary search table */			\
+		. = ALIGN(8);						\
         	.eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) {	\
 			VMLINUX_SYMBOL(__start_unwind_hdr) = .;		\
 			*(.eh_frame_hdr)				\
 			VMLINUX_SYMBOL(__end_unwind_hdr) = .;		\
+		}							\
+		/* Unwind data */					\
+		. = ALIGN(8);						\
+		.eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {		\
+			VMLINUX_SYMBOL(__start_unwind) = .;		\
+		  	*(.eh_frame)					\
+			VMLINUX_SYMBOL(__end_unwind) = .;		\
 		}
 #else
-#define EH_FRAME_HDR
+#define EH_FRAME
 #endif
 
 		/* DWARF debug sections.
diff --git a/include/asm-h8300/checksum.h b/include/asm-h8300/checksum.h
index 3051931..98724e1 100644
--- a/include/asm-h8300/checksum.h
+++ b/include/asm-h8300/checksum.h
@@ -13,7 +13,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -23,7 +23,7 @@
  * better 64-bit) boundary
  */
 
-unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
 
 
 /*
@@ -33,20 +33,17 @@
  * better 64-bit) boundary
  */
 
-extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
-						int len, int sum, int *csum_err);
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						int len, __wsum sum, int *csum_err);
 
-#define csum_partial_copy_nocheck(src, dst, len, sum)	\
-	csum_partial_copy((src), (dst), (len), (sum))
-
-unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 
 /*
  *	Fold a partial checksum
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__("mov.l %0,er0\n\t"
 		"add.w e0,r0\n\t"
@@ -58,7 +55,7 @@
 		: "=r"(sum)
 		: "0"(sum)
 		: "er0");
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
 
@@ -67,9 +64,9 @@
  * returns a 16-bit checksum, already complemented
  */
 
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	__asm__ ("sub.l er0,er0\n\t"
 		 "add.l %2,%0\n\t"
@@ -88,9 +85,9 @@
 	return sum;
 }
 
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -100,6 +97,6 @@
  * in icmp.c
  */
 
-extern unsigned short ip_compute_csum(const unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 #endif /* _H8300_CHECKSUM_H */
diff --git a/include/asm-h8300/delay.h b/include/asm-h8300/delay.h
index cbccbbd..743beba 100644
--- a/include/asm-h8300/delay.h
+++ b/include/asm-h8300/delay.h
@@ -9,7 +9,7 @@
  * Delay routines, using a pre-computed "loops_per_second" value.
  */
 
-extern __inline__ void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
 {
 	__asm__ __volatile__ ("1:\n\t"
 			      "dec.l #1,%0\n\t"
@@ -27,7 +27,7 @@
 
 extern unsigned long loops_per_jiffy;
 
-extern __inline__ void udelay(unsigned long usecs)
+static inline void udelay(unsigned long usecs)
 {
 	usecs *= 4295;		/* 2**32 / 1000000 */
 	usecs /= (loops_per_jiffy*HZ);
diff --git a/include/asm-h8300/device.h b/include/asm-h8300/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-h8300/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h
index 855721a..5c165f7 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/include/asm-h8300/mmu_context.h
@@ -9,7 +9,7 @@
 {
 }
 
-extern inline int
+static inline int
 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
 	// mm->context = virt_to_phys(mm->pgd);
@@ -23,7 +23,7 @@
 {
 }
 
-extern inline void activate_mm(struct mm_struct *prev_mm,
+static inline void activate_mm(struct mm_struct *prev_mm,
 			       struct mm_struct *next_mm)
 {
 }
diff --git a/include/asm-h8300/pci.h b/include/asm-h8300/pci.h
index 5edad5b..0c771b0 100644
--- a/include/asm-h8300/pci.h
+++ b/include/asm-h8300/pci.h
@@ -10,12 +10,12 @@
 #define pcibios_assign_all_busses()	0
 #define pcibios_scan_all_fns(a, b)	0
 
-extern inline void pcibios_set_master(struct pci_dev *dev)
+static inline void pcibios_set_master(struct pci_dev *dev)
 {
 	/* No special bus mastering setup handling */
 }
 
-extern inline void pcibios_penalize_isa_irq(int irq, int active)
+static inline void pcibios_penalize_isa_irq(int irq, int active)
 {
 	/* We don't do dynamic PCI IRQ allocation */
 }
diff --git a/include/asm-h8300/tlbflush.h b/include/asm-h8300/tlbflush.h
index bbdffbe..9a2c5c9 100644
--- a/include/asm-h8300/tlbflush.h
+++ b/include/asm-h8300/tlbflush.h
@@ -47,12 +47,12 @@
 	BUG();
 }
 
-extern inline void flush_tlb_kernel_page(unsigned long addr)
+static inline void flush_tlb_kernel_page(unsigned long addr)
 {
 	BUG();
 }
 
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
 				      unsigned long start, unsigned long end)
 {
 	BUG();
diff --git a/include/asm-h8300/types.h b/include/asm-h8300/types.h
index da2402b..2a8b1b2 100644
--- a/include/asm-h8300/types.h
+++ b/include/asm-h8300/types.h
@@ -55,12 +55,6 @@
 
 typedef u32 dma_addr_t;
 
-#define HAVE_SECTOR_T
-typedef u64 sector_t;
-
-#define HAVE_BLKCNT_T
-typedef u64 blkcnt_t;
-
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/asm-h8300/unistd.h b/include/asm-h8300/unistd.h
index 747788d..7ddd414 100644
--- a/include/asm-h8300/unistd.h
+++ b/include/asm-h8300/unistd.h
@@ -295,172 +295,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 289
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* avoid using res which is declared to be in register d0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name)				\
-type name(void)						\
-{							\
-  register long __res __asm__("er0");			\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name)		\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall1(type, name, atype, a)			\
-type name(atype a)					\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  _a = (long)a;						\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a)			\
-			: "cc", "memory");	 	\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall2(type, name, atype, a, btype, b)	\
-type name(atype a, btype b)				\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  _a = (long)a;						\
-  _b = (long)b;						\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b)			\
-			: "cc", "memory");	 	\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)	\
-type name(atype a, btype b, ctype c)			\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  _a = (long)a;						\
-  _b = (long)b;						\
-  _c = (long)c;						\
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall4(type, name, atype, a, btype, b,	\
-                  ctype, c, dtype, d)			\
-type name(atype a, btype b, ctype c, dtype d)		\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  register long _d __asm__("er4");			\
-  _a = (long)a;						\
-  _b = (long)b;						\
-  _c = (long)c;						\
-  _d = (long)d;						\
-  __asm__ __volatile__ ("mov.l	%1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c),			\
-			  "g" (_d)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall5(type, name, atype, a, btype, b,	\
-                  ctype, c, dtype, d, etype, e)		\
-type name(atype a, btype b, ctype c, dtype d, etype e)	\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  register long _d __asm__("er4");			\
-  register long _e __asm__("er5");			\
-  _a = (long)a;                                       	\
-  _b = (long)b;                                       	\
-  _c = (long)c;                                       	\
-  _d = (long)d;                                       	\
-  _e = (long)e;                                       	\
-  __asm__ __volatile__ ("mov.l	%1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c),			\
-			  "g" (_d),			\
-			  "g" (_e)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
-
-#define _syscall6(type, name, atype, a, btype, b,	\
-                  ctype, c, dtype, d, etype, e, ftype, f)	\
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f)	\
-{							\
-  register long __res __asm__("er0");			\
-  register long _a __asm__("er1");			\
-  register long _b __asm__("er2");			\
-  register long _c __asm__("er3");			\
-  register long _d __asm__("er4");			\
-  register long _e __asm__("er5");			\
-  register long _f __asm__("er6");			\
-  _a = (long)a;                                       	\
-  _b = (long)b;                                       	\
-  _c = (long)c;                                       	\
-  _d = (long)d;                                       	\
-  _e = (long)e;                                       	\
-  _f = (long)f;                                       	\
-  __asm__ __volatile__ ("mov.l	%1,er0\n\t"		\
-                        "trapa	#0\n\t"			\
-			: "=r" (__res)			\
-			: "g" (__NR_##name),		\
-			  "g" (_a),			\
-			  "g" (_b),			\
-			  "g" (_c),			\
-			  "g" (_d),			\
-			  "g" (_e)			\
-			  "g" (_f)			\
-			: "cc", "memory");		\
-  __syscall_return(type, __res);			\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
index 147e4ac..5ae93af 100644
--- a/include/asm-i386/Kbuild
+++ b/include/asm-i386/Kbuild
@@ -7,5 +7,4 @@
 header-y += ucontext.h
 
 unifdef-y += mtrr.h
-unifdef-y += setup.h
 unifdef-y += vm86.h
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index b01a7ec..b8fa955 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -4,7 +4,7 @@
 #ifdef __KERNEL__
 
 #include <asm/types.h>
-
+#include <linux/stddef.h>
 #include <linux/types.h>
 
 struct alt_instr {
@@ -118,4 +118,15 @@
 #define LOCK_PREFIX ""
 #endif
 
+struct paravirt_patch;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
+#else
+static inline void
+apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{}
+#define __start_parainstructions NULL
+#define __stop_parainstructions NULL
+#endif
+
 #endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index b952957..41a4431 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -37,18 +37,27 @@
 /*
  * Basic functions accessing APICs.
  */
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define apic_write native_apic_write
+#define apic_write_atomic native_apic_write_atomic
+#define apic_read native_apic_read
+#endif
 
-static __inline void apic_write(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write(unsigned long reg,
+						unsigned long v)
 {
 	*((volatile unsigned long *)(APIC_BASE+reg)) = v;
 }
 
-static __inline void apic_write_atomic(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write_atomic(unsigned long reg,
+						       unsigned long v)
 {
 	xchg((volatile unsigned long *)(APIC_BASE+reg), v);
 }
 
-static __inline unsigned long apic_read(unsigned long reg)
+static __inline fastcall unsigned long native_apic_read(unsigned long reg)
 {
 	return *((volatile unsigned long *)(APIC_BASE+reg));
 }
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 51a1662..c57441b 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -14,7 +14,7 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
 
 #define ATOMIC_INIT(i)	{ (i) }
 
@@ -187,9 +187,9 @@
 	/* Modern 486+ processor */
 	__i = i;
 	__asm__ __volatile__(
-		LOCK_PREFIX "xaddl %0, %1;"
-		:"=r"(i)
-		:"m"(v->counter), "0"(i));
+		LOCK_PREFIX "xaddl %0, %1"
+		:"+r" (i), "+m" (v->counter)
+		: : "memory");
 	return i + __i;
 
 #ifdef CONFIG_M386
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index 96b228e..8ce79a6 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -12,4 +12,8 @@
 #define EXTENDED_VGA	0xfffe		/* 80x50 mode */
 #define ASK_VGA		0xfffd		/* ask for it at bootup */
 
-#endif
+/* Physical address where kenrel should be loaded. */
+#define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \
+				& ~(CONFIG_PHYSICAL_ALIGN - 1))
+
+#endif /* _LINUX_BOOT_H */
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 592ffee..38f1aeb 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -21,6 +21,7 @@
 #include <asm/processor.h>
 #include <asm/i387.h>
 #include <asm/msr.h>
+#include <asm/paravirt.h>
 
 static int __init no_halt(char *s)
 {
@@ -91,6 +92,9 @@
 
 static void __init check_hlt(void)
 {
+	if (paravirt_enabled())
+		return;
+
 	printk(KERN_INFO "Checking 'hlt' instruction... ");
 	if (!boot_cpu_data.hlt_works_ok) {
 		printk("disabled\n");
diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h
index 67d3630..75194ab 100644
--- a/include/asm-i386/checksum.h
+++ b/include/asm-i386/checksum.h
@@ -17,7 +17,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -27,8 +27,8 @@
  * better 64-bit) boundary
  */
 
-asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsigned char *dst,
-						  int len, int sum, int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+						  int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
 
 /*
  *	Note: when you get a NULL pointer exception here this means someone
@@ -38,18 +38,18 @@
  *	access_ok().
  */
 static __inline__
-unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst,
-					int len, int sum)
+__wsum csum_partial_copy_nocheck (const void *src, void *dst,
+					int len, __wsum sum)
 {
 	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
 }
 
 static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
-						int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						int len, __wsum sum, int *err_ptr)
 {
 	might_sleep();
-	return csum_partial_copy_generic((__force unsigned char *)src, dst,
+	return csum_partial_copy_generic((__force void *)src, dst,
 					len, sum, err_ptr, NULL);
 }
 
@@ -60,8 +60,7 @@
  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
  *	Arnt Gulbrandsen.
  */
-static inline unsigned short ip_fast_csum(unsigned char * iph,
-					  unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum;
 
@@ -89,29 +88,29 @@
 	: "=r" (sum), "=r" (iph), "=r" (ihl)
 	: "1" (iph), "2" (ihl)
 	: "memory");
-	return(sum);
+	return (__force __sum16)sum;
 }
 
 /*
  *	Fold a partial checksum
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__(
 		"addl %1, %0		;\n"
 		"adcl $0xffff, %0	;\n"
 		: "=r" (sum)
-		: "r" (sum << 16), "0" (sum & 0xffff0000)
+		: "r" ((__force u32)sum << 16),
+		  "0" ((__force u32)sum & 0xffff0000)
 	);
-	return (~sum) >> 16;
+	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-						   unsigned long daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   unsigned int sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+					   unsigned short len,
+					   unsigned short proto,
+					   __wsum sum)
 {
     __asm__(
 	"addl %1, %0	;\n"
@@ -119,7 +118,7 @@
 	"adcl %3, %0	;\n"
 	"adcl $0, %0	;\n"
 	: "=r" (sum)
-	: "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
+	: "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
     return sum;
 }
 
@@ -127,11 +126,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -141,17 +139,16 @@
  * in icmp.c
  */
 
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
     return csum_fold (csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						     struct in6_addr *daddr,
-						     __u32 len,
-						     unsigned short proto,
-						     unsigned int sum)
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum sum)
 {
 	__asm__(
 		"addl 0(%1), %0		;\n"
@@ -176,19 +173,19 @@
  *	Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
-						     unsigned char __user *dst,
-						     int len, int sum, 
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+						     void __user *dst,
+						     int len, __wsum sum,
 						     int *err_ptr)
 {
 	might_sleep();
 	if (access_ok(VERIFY_WRITE, dst, len))
-		return csum_partial_copy_generic(src, (__force unsigned char *)dst, len, sum, NULL, err_ptr);
+		return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr);
 
 	if (len)
 		*err_ptr = -EFAULT;
 
-	return -1; /* invalid checksum */
+	return (__force __wsum)-1; /* invalid checksum */
 }
 
 #endif
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index b1bc7b1..9d914e1 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -13,6 +13,9 @@
 extern int arch_register_cpu(int num);
 #ifdef CONFIG_HOTPLUG_CPU
 extern void arch_unregister_cpu(int);
+extern int enable_cpu_hotplug;
+#else
+#define enable_cpu_hotplug	0
 #endif
 
 DECLARE_PER_CPU(int, cpu_state);
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index d314ebb..3f92b94 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -31,7 +31,7 @@
 #define X86_FEATURE_PSE36	(0*32+17) /* 36-bit PSEs */
 #define X86_FEATURE_PN		(0*32+18) /* Processor serial number */
 #define X86_FEATURE_CLFLSH	(0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES	(0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_DS		(0*32+21) /* Debug Store */
 #define X86_FEATURE_ACPI	(0*32+22) /* ACPI via MSR */
 #define X86_FEATURE_MMX		(0*32+23) /* Multimedia Extensions */
 #define X86_FEATURE_FXSR	(0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -73,6 +73,8 @@
 #define X86_FEATURE_UP		(3*32+ 9) /* smp kernel running on up */
 #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS	(3*32+12)  /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS		(3*32+13)  /* Branch Trace Store */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -134,6 +136,10 @@
 #define cpu_has_phe_enabled	boot_cpu_has(X86_FEATURE_PHE_EN)
 #define cpu_has_pmm		boot_cpu_has(X86_FEATURE_PMM)
 #define cpu_has_pmm_enabled	boot_cpu_has(X86_FEATURE_PMM_EN)
+#define cpu_has_ds		boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs 		boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_clflush		boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_bts 		boot_cpu_has(X86_FEATURE_BTS)
 
 #endif /* __ASM_I386_CPUFEATURE_H */
 
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index 3cbbecd..5252ee0 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -1,13 +1,14 @@
 #ifndef _I386_CURRENT_H
 #define _I386_CURRENT_H
 
-#include <linux/thread_info.h>
+#include <asm/pda.h>
+#include <linux/compiler.h>
 
 struct task_struct;
 
-static __always_inline struct task_struct * get_current(void)
+static __always_inline struct task_struct *get_current(void)
 {
-	return current_thread_info()->task;
+	return read_pda(pcurrent);
 }
  
 #define current get_current()
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index b1c7650..32d6678 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -7,6 +7,7 @@
  * Delay routines calling functions in arch/i386/lib/delay.c
  */
  
+/* Undefined functions to get compile-time errors */
 extern void __bad_udelay(void);
 extern void __bad_ndelay(void);
 
@@ -15,13 +16,23 @@
 extern void __const_udelay(unsigned long usecs);
 extern void __delay(unsigned long loops);
 
+#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
+#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
+
+#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
+
+#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
 #define udelay(n) (__builtin_constant_p(n) ? \
 	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
 	__udelay(n))
-	
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
 #define ndelay(n) (__builtin_constant_p(n) ? \
 	((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
 	__ndelay(n))
+#endif
 
 void use_tsc_delay(void);
 
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 5874ef1..f398cc4 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -4,8 +4,6 @@
 #include <asm/ldt.h>
 #include <asm/segment.h>
 
-#define CPU_16BIT_STACK_SIZE 1024
-
 #ifndef __ASSEMBLY__
 
 #include <linux/preempt.h>
@@ -16,8 +14,6 @@
 
 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 
-DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-
 struct Xgt_desc_struct {
 	unsigned short size;
 	unsigned long address __attribute__((packed));
@@ -33,11 +29,6 @@
 	return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
 }
 
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
 extern struct desc_struct idt_table[];
 extern void set_intr_gate(unsigned int irq, void * addr);
 
@@ -64,8 +55,10 @@
 #define DESCTYPE_DPL3	0x60	/* DPL-3 */
 #define DESCTYPE_S	0x10	/* !system */
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
 
 #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
 #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
@@ -88,6 +81,10 @@
 #undef C
 }
 
+#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+
 static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
 {
 	__u32 *lp = (__u32 *)((char *)dt + entry*8);
@@ -95,9 +92,25 @@
 	*(lp+1) = entry_b;
 }
 
-#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define set_ldt native_set_ldt
+#endif /* CONFIG_PARAVIRT */
+
+static inline fastcall void native_set_ldt(const void *addr,
+					   unsigned int entries)
+{
+	if (likely(entries == 0))
+		__asm__ __volatile__("lldt %w0"::"q" (0));
+	else {
+		unsigned cpu = smp_processor_id();
+		__u32 a, b;
+
+		pack_descriptor(&a, &b, (unsigned long)addr,
+				entries * sizeof(struct desc_struct) - 1,
+				DESCTYPE_LDT, 0);
+		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
+		__asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+	}
+}
 
 static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
 {
@@ -115,14 +128,6 @@
 	write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
 }
 
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
-{
-	__u32 a, b;
-	pack_descriptor(&a, &b, (unsigned long)addr,
-			entries * sizeof(struct desc_struct) - 1,
-			DESCTYPE_LDT, 0);
-	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
-}
 
 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
 
@@ -153,35 +158,22 @@
 
 static inline void clear_LDT(void)
 {
-	int cpu = get_cpu();
-
-	set_ldt_desc(cpu, &default_ldt[0], 5);
-	load_LDT_desc();
-	put_cpu();
+	set_ldt(NULL, 0);
 }
 
 /*
  * load one particular LDT into the current CPU
  */
-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+static inline void load_LDT_nolock(mm_context_t *pc)
 {
-	void *segments = pc->ldt;
-	int count = pc->size;
-
-	if (likely(!count)) {
-		segments = &default_ldt[0];
-		count = 5;
-	}
-		
-	set_ldt_desc(cpu, segments, count);
-	load_LDT_desc();
+	set_ldt(pc->ldt, pc->size);
 }
 
 static inline void load_LDT(mm_context_t *pc)
 {
-	int cpu = get_cpu();
-	load_LDT_nolock(pc, cpu);
-	put_cpu();
+	preempt_disable();
+	load_LDT_nolock(pc);
+	preempt_enable();
 }
 
 static inline unsigned long get_desc_base(unsigned long *desc)
@@ -193,6 +185,29 @@
 	return base;
 }
 
+#else /* __ASSEMBLY__ */
+
+/*
+ * GET_DESC_BASE reads the descriptor base of the specified segment.
+ *
+ * Args:
+ *    idx - descriptor index
+ *    gdt - GDT pointer
+ *    base - 32bit register to which the base will be written
+ *    lo_w - lo word of the "base" register
+ *    lo_b - lo byte of the "base" register
+ *    hi_b - hi byte of the low word of the "base" register
+ *
+ * Example:
+ *    GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
+ *    Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
+ */
+#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
+	movb idx*8+4(gdt), lo_b; \
+	movb idx*8+7(gdt), hi_b; \
+	shll $16, base; \
+	movw idx*8+2(gdt), lo_w;
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
diff --git a/include/asm-i386/device.h b/include/asm-i386/device.h
new file mode 100644
index 0000000..849604c
--- /dev/null
+++ b/include/asm-i386/device.h
@@ -0,0 +1,15 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_I386_DEVICE_H
+#define _ASM_I386_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_ACPI
+	void	*acpi_handle;
+#endif
+};
+
+#endif /* _ASM_I386_DEVICE_H */
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 81999a3..183eebe 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -156,10 +156,10 @@
 	return (1 << INTERNODE_CACHE_SHIFT);
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	flush_write_buffers();
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index f7514fb..395077a 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -38,6 +38,11 @@
 
 extern int e820_all_mapped(unsigned long start, unsigned long end,
 			   unsigned type);
+extern void find_max_pfn(void);
+extern void register_bootmem_low_pages(unsigned long max_low_pfn);
+extern void register_memory(void);
+extern void limit_regions(unsigned long long size);
+extern void print_memory_map(char *who);
 
 #endif/*!__ASSEMBLY__*/
 
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 3a05436..45d21a0 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -91,7 +91,7 @@
 	pr_reg[7] = regs->xds;				\
 	pr_reg[8] = regs->xes;				\
 	savesegment(fs,pr_reg[9]);			\
-	savesegment(gs,pr_reg[10]);			\
+	pr_reg[10] = regs->xgs;				\
 	pr_reg[11] = regs->orig_eax;			\
 	pr_reg[12] = regs->eip;				\
 	pr_reg[13] = regs->xcs;				\
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 946d97c..438ef0e 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -56,7 +56,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	if (op == FUTEX_OP_SET)
 		__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
@@ -88,7 +88,7 @@
 		}
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
index 8ffbb0f..fd2be59 100644
--- a/include/asm-i386/genapic.h
+++ b/include/asm-i386/genapic.h
@@ -122,6 +122,6 @@
 	APICFUNC(phys_pkg_id) \
 	}
 
-extern struct genapic *genapic;
+extern struct genapic *genapic, apic_default;
 
 #endif
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index bc1d6ed..434936c 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -76,7 +76,9 @@
 
 #define __unlazy_fpu( tsk ) do { \
 	if (task_thread_info(tsk)->status & TS_USEDFPU) \
-		save_init_fpu( tsk ); \
+		save_init_fpu( tsk ); 			\
+	else						\
+		tsk->fpu_counter = 0;			\
 } while (0)
 
 #define __clear_fpu( tsk )					\
@@ -118,6 +120,7 @@
 extern unsigned short get_fpu_cwd( struct task_struct *tsk );
 extern unsigned short get_fpu_swd( struct task_struct *tsk );
 extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
+extern asmlinkage void math_state_restore(void);
 
 /*
  * Signal frame handlers...
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 68df0dc3..86ff5e8 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -256,11 +256,11 @@
 
 #endif /* __KERNEL__ */
 
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#if defined(CONFIG_PARAVIRT)
+#include <asm/paravirt.h>
 #else
+
 #define __SLOW_DOWN_IO "outb %%al,$0x80;"
-#endif
 
 static inline void slow_down_io(void) {
 	__asm__ __volatile__(
@@ -271,6 +271,8 @@
 		: : );
 }
 
+#endif
+
 #ifdef CONFIG_X86_NUMAQ
 extern void *xquad_portio;    /* Where the IO area was mapped */
 #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 331726b..11761cd 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -37,8 +37,13 @@
 extern int irqbalance_disable(char *str);
 #endif
 
+extern void quirk_intel_irqbalance(void);
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern void fixup_irqs(cpumask_t map);
 #endif
 
+void init_IRQ(void);
+void __init native_init_IRQ(void);
+
 #endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h
index 3dd9c0b..a1b3f7f 100644
--- a/include/asm-i386/irq_regs.h
+++ b/include/asm-i386/irq_regs.h
@@ -1 +1,27 @@
-#include <asm-generic/irq_regs.h>
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the PDA.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_I386_IRQ_REGS_H
+#define _ASM_I386_IRQ_REGS_H
+
+#include <asm/pda.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+	return read_pda(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+	struct pt_regs *old_regs;
+
+	old_regs = read_pda(irq_regs);
+	write_pda(irq_regs, new_regs);
+
+	return old_regs;
+}
+
+#endif /* _ASM_I386_IRQ_REGS_H */
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index e1bdb97..17b18cf 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -10,6 +10,9 @@
 #ifndef _ASM_IRQFLAGS_H
 #define _ASM_IRQFLAGS_H
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #ifndef __ASSEMBLY__
 
 static inline unsigned long __raw_local_save_flags(void)
@@ -25,9 +28,6 @@
 	return flags;
 }
 
-#define raw_local_save_flags(flags) \
-		do { (flags) = __raw_local_save_flags(); } while (0)
-
 static inline void raw_local_irq_restore(unsigned long flags)
 {
 	__asm__ __volatile__(
@@ -66,18 +66,6 @@
 	__asm__ __volatile__("hlt": : :"memory");
 }
 
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
-	return !(flags & (1 << 9));
-}
-
-static inline int raw_irqs_disabled(void)
-{
-	unsigned long flags = __raw_local_save_flags();
-
-	return raw_irqs_disabled_flags(flags);
-}
-
 /*
  * For spinlocks, etc:
  */
@@ -90,9 +78,33 @@
 	return flags;
 }
 
+#else
+#define DISABLE_INTERRUPTS(clobbers)	cli
+#define ENABLE_INTERRUPTS(clobbers)	sti
+#define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
+#define INTERRUPT_RETURN		iret
+#define GET_CR0_INTO_EAX		movl %cr0, %eax
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
+#ifndef __ASSEMBLY__
+#define raw_local_save_flags(flags) \
+		do { (flags) = __raw_local_save_flags(); } while (0)
+
 #define raw_local_irq_save(flags) \
 		do { (flags) = __raw_local_irq_save(); } while (0)
 
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+	return !(flags & (1 << 9));
+}
+
+static inline int raw_irqs_disabled(void)
+{
+	unsigned long flags = __raw_local_save_flags();
+
+	return raw_irqs_disabled_flags(flags);
+}
 #endif /* __ASSEMBLY__ */
 
 /*
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h
index fb42099..605e3cc 100644
--- a/include/asm-i386/mach-default/setup_arch.h
+++ b/include/asm-i386/mach-default/setup_arch.h
@@ -2,4 +2,6 @@
 
 /* no action for generic */
 
+#ifndef ARCH_SETUP
 #define ARCH_SETUP
+#endif
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h
index 697673b..a4b0aa3 100644
--- a/include/asm-i386/math_emu.h
+++ b/include/asm-i386/math_emu.h
@@ -21,6 +21,7 @@
 	long ___eax;
 	long ___ds;
 	long ___es;
+	long ___fs;
 	long ___orig_eax;
 	long ___eip;
 	long ___cs;
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 62b7bf1..68ff102 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -44,7 +44,7 @@
 		 * load the LDT, if the LDT is different:
 		 */
 		if (unlikely(prev->context.ldt != next->context.ldt))
-			load_LDT_nolock(&next->context, cpu);
+			load_LDT_nolock(&next->context);
 	}
 #ifdef CONFIG_SMP
 	else {
@@ -56,14 +56,14 @@
 			 * tlb flush IPI delivery. We must reload %cr3.
 			 */
 			load_cr3(next->pgd);
-			load_LDT_nolock(&next->context, cpu);
+			load_LDT_nolock(&next->context);
 		}
 	}
 #endif
 }
 
-#define deactivate_mm(tsk, mm) \
-	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+#define deactivate_mm(tsk, mm)			\
+	asm("movl %0,%%fs": :"r" (0));
 
 #define activate_mm(prev, next) \
 	switch_mm((prev),(next),NULL)
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 61b0733..3503ad6 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -120,13 +120,26 @@
 	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_low_pages(x) \
 	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-#define alloc_bootmem_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_pages_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages_node(ignore, x) \
-	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-
+#define alloc_bootmem_node(pgdat, x)					\
+({									\
+	struct pglist_data  __attribute__ ((unused))			\
+				*__alloc_bootmem_node__pgdat = (pgdat);	\
+	__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES,	\
+						__pa(MAX_DMA_ADDRESS));	\
+})
+#define alloc_bootmem_pages_node(pgdat, x)				\
+({									\
+	struct pglist_data  __attribute__ ((unused))			\
+				*__alloc_bootmem_node__pgdat = (pgdat);	\
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE,		\
+						__pa(MAX_DMA_ADDRESS))	\
+})
+#define alloc_bootmem_low_pages_node(pgdat, x)				\
+({									\
+	struct pglist_data  __attribute__ ((unused))			\
+				*__alloc_bootmem_node__pgdat = (pgdat);	\
+	__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0);		\
+})
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
 
 #endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
index 424661d..02f8f54 100644
--- a/include/asm-i386/module.h
+++ b/include/asm-i386/module.h
@@ -20,6 +20,8 @@
 #define MODULE_PROC_FAMILY "586TSC "
 #elif defined CONFIG_M586MMX
 #define MODULE_PROC_FAMILY "586MMX "
+#elif defined CONFIG_MCORE2
+#define MODULE_PROC_FAMILY "CORE2 "
 #elif defined CONFIG_M686
 #define MODULE_PROC_FAMILY "686 "
 #elif defined CONFIG_MPENTIUMII
@@ -60,18 +62,12 @@
 #error unknown processor family
 #endif
 
-#ifdef CONFIG_REGPARM
-#define MODULE_REGPARM "REGPARM "
-#else
-#define MODULE_REGPARM ""
-#endif
-
 #ifdef CONFIG_4KSTACKS
 #define MODULE_STACKSIZE "4KSTACKS "
 #else
 #define MODULE_STACKSIZE ""
 #endif
 
-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
 
 #endif /* _ASM_I386_MODULE_H */
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
index 76feedf..13bafb1 100644
--- a/include/asm-i386/mpspec_def.h
+++ b/include/asm-i386/mpspec_def.h
@@ -97,7 +97,6 @@
 #define BUSTYPE_TC	"TC"
 #define BUSTYPE_VME	"VME"
 #define BUSTYPE_XPRESS	"XPRESS"
-#define BUSTYPE_NEC98	"NEC98"
 
 struct mpc_config_ioapic
 {
@@ -182,7 +181,6 @@
 	MP_BUS_EISA,
 	MP_BUS_PCI,
 	MP_BUS_MCA,
-	MP_BUS_NEC98
 };
 #endif
 
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 62b76cd..5679d49 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,10 @@
 #ifndef __ASM_MSR_H
 #define __ASM_MSR_H
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
 /*
  * Access to machine-specific registers (available on 586 and better only)
  * Note: the rd* operations modify the parameters directly (without using
@@ -77,6 +81,7 @@
      __asm__ __volatile__("rdpmc" \
 			  : "=a" (low), "=d" (high) \
 			  : "c" (counter))
+#endif	/* !CONFIG_PARAVIRT */
 
 /* symbolic names for some interesting MSRs */
 /* Intel defined MSRs. */
@@ -141,6 +146,10 @@
 #define MSR_IA32_MC0_ADDR		0x402
 #define MSR_IA32_MC0_MISC		0x403
 
+#define MSR_IA32_PEBS_ENABLE		0x3f1
+#define MSR_IA32_DS_AREA		0x600
+#define MSR_IA32_PERF_CAPABILITIES	0x345
+
 /* Pentium IV performance counter MSRs */
 #define MSR_P4_BPU_PERFCTR0 		0x300
 #define MSR_P4_BPU_PERFCTR1 		0x301
@@ -284,4 +293,13 @@
 #define MSR_TMTA_LRTI_READOUT		0x80868018
 #define MSR_TMTA_LRTI_VOLT_MHZ		0x8086801a
 
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0	0x309
+#define MSR_CORE_PERF_FIXED_CTR1	0x30a
+#define MSR_CORE_PERF_FIXED_CTR2	0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL	0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS	0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL	0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x390
+
 #endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 269d315..b04333e 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,6 +5,9 @@
 #define ASM_NMI_H
 
 #include <linux/pm.h>
+#include <asm/irq.h>
+
+#ifdef ARCH_HAS_NMI_WATCHDOG
 
 /**
  * do_nmi_callback
@@ -42,4 +45,9 @@
 			void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
+#endif
+
 #endif /* ASM_NMI_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index f5bf544..fd3f64a 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -52,6 +52,7 @@
 #define pte_val(x)	((x).pte_low | ((unsigned long long)(x).pte_high << 32))
 #define __pmd(x) ((pmd_t) { (x) } )
 #define HPAGE_SHIFT	21
+#include <asm-generic/pgtable-nopud.h>
 #else
 typedef struct { unsigned long pte_low; } pte_t;
 typedef struct { unsigned long pgd; } pgd_t;
@@ -59,6 +60,7 @@
 #define boot_pte_t pte_t /* or would you rather have a typedef */
 #define pte_val(x)	((x).pte_low)
 #define HPAGE_SHIFT	22
+#include <asm-generic/pgtable-nopmd.h>
 #endif
 #define PTE_MASK	PAGE_MASK
 
@@ -112,18 +114,18 @@
 
 #ifdef __ASSEMBLY__
 #define __PAGE_OFFSET		CONFIG_PAGE_OFFSET
-#define __PHYSICAL_START	CONFIG_PHYSICAL_START
 #else
 #define __PAGE_OFFSET		((unsigned long)CONFIG_PAGE_OFFSET)
-#define __PHYSICAL_START	((unsigned long)CONFIG_PHYSICAL_START)
 #endif
-#define __KERNEL_START		(__PAGE_OFFSET + __PHYSICAL_START)
 
 
 #define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
 #define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
 #define MAXMEM			(-__PAGE_OFFSET-__VMALLOC_RESERVE)
 #define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
+/* __pa_symbol should be used for C visible symbols.
+   This seems to be the official gcc blessed way to do such arithmetic. */
+#define __pa_symbol(x)          __pa(RELOC_HIDE((unsigned long)(x),0))
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 #ifdef CONFIG_FLATMEM
diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h
index 745dc5b..21b3246 100644
--- a/include/asm-i386/param.h
+++ b/include/asm-i386/param.h
@@ -18,6 +18,5 @@
 #endif
 
 #define MAXHOSTNAMELEN	64	/* max length of hostname */
-#define COMMAND_LINE_SIZE 256
 
 #endif
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
new file mode 100644
index 0000000..9f06265
--- /dev/null
+++ b/include/asm-i386/paravirt.h
@@ -0,0 +1,505 @@
+#ifndef __ASM_PARAVIRT_H
+#define __ASM_PARAVIRT_H
+/* Various instructions on x86 need to be replaced for
+ * para-virtualization: those hooks are defined here. */
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_PARAVIRT
+/* These are the most performance critical ops, so we want to be able to patch
+ * callers */
+#define PARAVIRT_IRQ_DISABLE 0
+#define PARAVIRT_IRQ_ENABLE 1
+#define PARAVIRT_RESTORE_FLAGS 2
+#define PARAVIRT_SAVE_FLAGS 3
+#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
+#define PARAVIRT_INTERRUPT_RETURN 5
+#define PARAVIRT_STI_SYSEXIT 6
+
+/* Bitmask of what can be clobbered: usually at least eax. */
+#define CLBR_NONE 0x0
+#define CLBR_EAX 0x1
+#define CLBR_ECX 0x2
+#define CLBR_EDX 0x4
+#define CLBR_ANY 0x7
+
+#ifndef __ASSEMBLY__
+struct thread_struct;
+struct Xgt_desc_struct;
+struct tss_struct;
+struct mm_struct;
+struct paravirt_ops
+{
+	unsigned int kernel_rpl;
+ 	int paravirt_enabled;
+	const char *name;
+
+	/*
+	 * Patch may replace one of the defined code sequences with arbitrary
+	 * code, subject to the same register constraints.  This generally
+	 * means the code is not free to clobber any registers other than EAX.
+	 * The patch function should return the number of bytes of code
+	 * generated, as we nop pad the rest in generic code.
+	 */
+	unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
+
+	void (*arch_setup)(void);
+	char *(*memory_setup)(void);
+	void (*init_IRQ)(void);
+
+	void (*banner)(void);
+
+	unsigned long (*get_wallclock)(void);
+	int (*set_wallclock)(unsigned long);
+	void (*time_init)(void);
+
+	/* All the function pointers here are declared as "fastcall"
+	   so that we get a specific register-based calling
+	   convention.  This makes it easier to implement inline
+	   assembler replacements. */
+
+	void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
+		      unsigned int *ecx, unsigned int *edx);
+
+	unsigned long (fastcall *get_debugreg)(int regno);
+	void (fastcall *set_debugreg)(int regno, unsigned long value);
+
+	void (fastcall *clts)(void);
+
+	unsigned long (fastcall *read_cr0)(void);
+	void (fastcall *write_cr0)(unsigned long);
+
+	unsigned long (fastcall *read_cr2)(void);
+	void (fastcall *write_cr2)(unsigned long);
+
+	unsigned long (fastcall *read_cr3)(void);
+	void (fastcall *write_cr3)(unsigned long);
+
+	unsigned long (fastcall *read_cr4_safe)(void);
+	unsigned long (fastcall *read_cr4)(void);
+	void (fastcall *write_cr4)(unsigned long);
+
+	unsigned long (fastcall *save_fl)(void);
+	void (fastcall *restore_fl)(unsigned long);
+	void (fastcall *irq_disable)(void);
+	void (fastcall *irq_enable)(void);
+	void (fastcall *safe_halt)(void);
+	void (fastcall *halt)(void);
+	void (fastcall *wbinvd)(void);
+
+	/* err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
+	u64 (fastcall *read_msr)(unsigned int msr, int *err);
+	int (fastcall *write_msr)(unsigned int msr, u64 val);
+
+	u64 (fastcall *read_tsc)(void);
+	u64 (fastcall *read_pmc)(void);
+
+	void (fastcall *load_tr_desc)(void);
+	void (fastcall *load_gdt)(const struct Xgt_desc_struct *);
+	void (fastcall *load_idt)(const struct Xgt_desc_struct *);
+	void (fastcall *store_gdt)(struct Xgt_desc_struct *);
+	void (fastcall *store_idt)(struct Xgt_desc_struct *);
+	void (fastcall *set_ldt)(const void *desc, unsigned entries);
+	unsigned long (fastcall *store_tr)(void);
+	void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu);
+	void (fastcall *write_ldt_entry)(void *dt, int entrynum,
+					 u32 low, u32 high);
+	void (fastcall *write_gdt_entry)(void *dt, int entrynum,
+					 u32 low, u32 high);
+	void (fastcall *write_idt_entry)(void *dt, int entrynum,
+					 u32 low, u32 high);
+	void (fastcall *load_esp0)(struct tss_struct *tss,
+				   struct thread_struct *thread);
+
+	void (fastcall *set_iopl_mask)(unsigned mask);
+
+	void (fastcall *io_delay)(void);
+	void (*const_udelay)(unsigned long loops);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+	void (fastcall *apic_write)(unsigned long reg, unsigned long v);
+	void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v);
+	unsigned long (fastcall *apic_read)(unsigned long reg);
+#endif
+
+	void (fastcall *flush_tlb_user)(void);
+	void (fastcall *flush_tlb_kernel)(void);
+	void (fastcall *flush_tlb_single)(u32 addr);
+
+	void (fastcall *set_pte)(pte_t *ptep, pte_t pteval);
+	void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval);
+	void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+	void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+	void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+#ifdef CONFIG_X86_PAE
+	void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval);
+	void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+	void (fastcall *set_pud)(pud_t *pudp, pud_t pudval);
+	void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+	void (fastcall *pmd_clear)(pmd_t *pmdp);
+#endif
+
+	/* These two are jmp to, not actually called. */
+	void (fastcall *irq_enable_sysexit)(void);
+	void (fastcall *iret)(void);
+};
+
+/* Mark a paravirt probe function. */
+#define paravirt_probe(fn)						\
+ static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
+		__attribute__((__section__(".paravirtprobe"))) = fn
+
+extern struct paravirt_ops paravirt_ops;
+
+#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
+
+static inline void load_esp0(struct tss_struct *tss,
+			     struct thread_struct *thread)
+{
+	paravirt_ops.load_esp0(tss, thread);
+}
+
+#define ARCH_SETUP			paravirt_ops.arch_setup();
+static inline unsigned long get_wallclock(void)
+{
+	return paravirt_ops.get_wallclock();
+}
+
+static inline int set_wallclock(unsigned long nowtime)
+{
+	return paravirt_ops.set_wallclock(nowtime);
+}
+
+static inline void do_time_init(void)
+{
+	return paravirt_ops.time_init();
+}
+
+/* The paravirtualized CPUID instruction. */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+			   unsigned int *ecx, unsigned int *edx)
+{
+	paravirt_ops.cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
+#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
+
+#define clts() paravirt_ops.clts()
+
+#define read_cr0() paravirt_ops.read_cr0()
+#define write_cr0(x) paravirt_ops.write_cr0(x)
+
+#define read_cr2() paravirt_ops.read_cr2()
+#define write_cr2(x) paravirt_ops.write_cr2(x)
+
+#define read_cr3() paravirt_ops.read_cr3()
+#define write_cr3(x) paravirt_ops.write_cr3(x)
+
+#define read_cr4() paravirt_ops.read_cr4()
+#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
+#define write_cr4(x) paravirt_ops.write_cr4(x)
+
+static inline void raw_safe_halt(void)
+{
+	paravirt_ops.safe_halt();
+}
+
+static inline void halt(void)
+{
+	paravirt_ops.safe_halt();
+}
+#define wbinvd() paravirt_ops.wbinvd()
+
+#define get_kernel_rpl()  (paravirt_ops.kernel_rpl)
+
+#define rdmsr(msr,val1,val2) do {				\
+	int _err;						\
+	u64 _l = paravirt_ops.read_msr(msr,&_err);		\
+	val1 = (u32)_l;						\
+	val2 = _l >> 32;					\
+} while(0)
+
+#define wrmsr(msr,val1,val2) do {				\
+	u64 _l = ((u64)(val2) << 32) | (val1);			\
+	paravirt_ops.write_msr((msr), _l);			\
+} while(0)
+
+#define rdmsrl(msr,val) do {					\
+	int _err;						\
+	val = paravirt_ops.read_msr((msr),&_err);		\
+} while(0)
+
+#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
+#define wrmsr_safe(msr,a,b) ({					\
+	u64 _l = ((u64)(b) << 32) | (a);			\
+	paravirt_ops.write_msr((msr),_l);			\
+})
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr,a,b) ({					\
+	int _err;						\
+	u64 _l = paravirt_ops.read_msr(msr,&_err);		\
+	(*a) = (u32)_l;						\
+	(*b) = _l >> 32;					\
+	_err; })
+
+#define rdtsc(low,high) do {					\
+	u64 _l = paravirt_ops.read_tsc();			\
+	low = (u32)_l;						\
+	high = _l >> 32;					\
+} while(0)
+
+#define rdtscl(low) do {					\
+	u64 _l = paravirt_ops.read_tsc();			\
+	low = (int)_l;						\
+} while(0)
+
+#define rdtscll(val) (val = paravirt_ops.read_tsc())
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) do {				\
+	u64 _l = paravirt_ops.read_pmc();			\
+	low = (u32)_l;						\
+	high = _l >> 32;					\
+} while(0)
+
+#define load_TR_desc() (paravirt_ops.load_tr_desc())
+#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
+#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
+#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
+#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
+#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
+#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
+#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
+#define write_ldt_entry(dt, entry, low, high)				\
+	(paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
+#define write_gdt_entry(dt, entry, low, high)				\
+	(paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
+#define write_idt_entry(dt, entry, low, high)				\
+	(paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
+#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask))
+
+/* The paravirtualized I/O functions */
+static inline void slow_down_io(void) {
+	paravirt_ops.io_delay();
+#ifdef REALLY_SLOW_IO
+	paravirt_ops.io_delay();
+	paravirt_ops.io_delay();
+	paravirt_ops.io_delay();
+#endif
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Basic functions accessing APICs.
+ */
+static inline void apic_write(unsigned long reg, unsigned long v)
+{
+	paravirt_ops.apic_write(reg,v);
+}
+
+static inline void apic_write_atomic(unsigned long reg, unsigned long v)
+{
+	paravirt_ops.apic_write_atomic(reg,v);
+}
+
+static inline unsigned long apic_read(unsigned long reg)
+{
+	return paravirt_ops.apic_read(reg);
+}
+#endif
+
+
+#define __flush_tlb() paravirt_ops.flush_tlb_user()
+#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
+#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+	paravirt_ops.set_pte(ptep, pteval);
+}
+
+static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
+{
+	paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+	paravirt_ops.set_pmd(pmdp, pmdval);
+}
+
+static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+	paravirt_ops.pte_update(mm, addr, ptep);
+}
+
+static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+	paravirt_ops.pte_update_defer(mm, addr, ptep);
+}
+
+#ifdef CONFIG_X86_PAE
+static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
+{
+	paravirt_ops.set_pte_atomic(ptep, pteval);
+}
+
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+	paravirt_ops.set_pte_present(mm, addr, ptep, pte);
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pudval)
+{
+	paravirt_ops.set_pud(pudp, pudval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	paravirt_ops.pte_clear(mm, addr, ptep);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	paravirt_ops.pmd_clear(pmdp);
+}
+#endif
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch {
+	u8 *instr; 		/* original instructions */
+	u8 instrtype;		/* type of this instruction */
+	u8 len;			/* length of original instruction */
+	u16 clobbers;		/* what registers you may clobber */
+};
+
+#define paravirt_alt(insn_string, typenum, clobber)	\
+	"771:\n\t" insn_string "\n" "772:\n"		\
+	".pushsection .parainstructions,\"a\"\n"	\
+	"  .long 771b\n"				\
+	"  .byte " __stringify(typenum) "\n"		\
+	"  .byte 772b-771b\n"				\
+	"  .short " __stringify(clobber) "\n"		\
+	".popsection"
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+	unsigned long f;
+
+	__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+					   "call *%1;"
+					   "popl %%edx; popl %%ecx",
+					  PARAVIRT_SAVE_FLAGS, CLBR_NONE)
+			     : "=a"(f): "m"(paravirt_ops.save_fl)
+			     : "memory", "cc");
+	return f;
+}
+
+static inline void raw_local_irq_restore(unsigned long f)
+{
+	__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+					   "call *%1;"
+					   "popl %%edx; popl %%ecx",
+					  PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
+			     : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
+			     : "memory", "cc");
+}
+
+static inline void raw_local_irq_disable(void)
+{
+	__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+					   "call *%0;"
+					   "popl %%edx; popl %%ecx",
+					  PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+			     : : "m" (paravirt_ops.irq_disable)
+			     : "memory", "eax", "cc");
+}
+
+static inline void raw_local_irq_enable(void)
+{
+	__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+					   "call *%0;"
+					   "popl %%edx; popl %%ecx",
+					  PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+			     : : "m" (paravirt_ops.irq_enable)
+			     : "memory", "eax", "cc");
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+	unsigned long f;
+
+	__asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+					   "call *%1; pushl %%eax;"
+					   "call *%2; popl %%eax;"
+					   "popl %%edx; popl %%ecx",
+					  PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
+					  CLBR_NONE)
+			     : "=a"(f)
+			     : "m" (paravirt_ops.save_fl),
+			       "m" (paravirt_ops.irq_disable)
+			     : "memory", "cc");
+	return f;
+}
+
+#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;"		\
+		     "call *paravirt_ops+%c[irq_disable];"		\
+		     "popl %%edx; popl %%ecx",				\
+		     PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+
+#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;"		\
+		     "call *paravirt_ops+%c[irq_enable];"		\
+		     "popl %%edx; popl %%ecx",				\
+		     PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+#define CLI_STI_CLOBBERS , "%eax"
+#define CLI_STI_INPUT_ARGS \
+	,								\
+	[irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)),	\
+	[irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
+
+#else  /* __ASSEMBLY__ */
+
+#define PARA_PATCH(ptype, clobbers, ops)	\
+771:;						\
+	ops;					\
+772:;						\
+	.pushsection .parainstructions,"a";	\
+	 .long 771b;				\
+	 .byte ptype;				\
+	 .byte 772b-771b;			\
+	 .short clobbers;			\
+	.popsection
+
+#define INTERRUPT_RETURN				\
+	PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY,	\
+	jmp *%cs:paravirt_ops+PARAVIRT_iret)
+
+#define DISABLE_INTERRUPTS(clobbers)			\
+	PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers,	\
+	pushl %ecx; pushl %edx;				\
+	call *paravirt_ops+PARAVIRT_irq_disable;	\
+	popl %edx; popl %ecx)				\
+
+#define ENABLE_INTERRUPTS(clobbers)			\
+	PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers,	\
+	pushl %ecx; pushl %edx;				\
+	call *%cs:paravirt_ops+PARAVIRT_irq_enable;	\
+	popl %edx; popl %ecx)
+
+#define ENABLE_INTERRUPTS_SYSEXIT			\
+	PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY,	\
+	jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
+
+#define GET_CR0_INTO_EAX			\
+	call *paravirt_ops+PARAVIRT_read_cr0
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+#endif	/* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h
new file mode 100644
index 0000000..2ba2736
--- /dev/null
+++ b/include/asm-i386/pda.h
@@ -0,0 +1,100 @@
+/*
+   Per-processor Data Areas
+   Jeremy Fitzhardinge <jeremy@goop.org> 2006
+   Based on asm-x86_64/pda.h by Andi Kleen.
+ */
+#ifndef _I386_PDA_H
+#define _I386_PDA_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct i386_pda
+{
+	struct i386_pda *_pda;		/* pointer to self */
+
+	int cpu_number;
+	struct task_struct *pcurrent;	/* current process */
+	struct pt_regs *irq_regs;
+};
+
+extern struct i386_pda *_cpu_pda[];
+
+#define cpu_pda(i)	(_cpu_pda[i])
+
+#define pda_offset(field) offsetof(struct i386_pda, field)
+
+extern void __bad_pda_field(void);
+
+/* This variable is never instantiated.  It is only used as a stand-in
+   for the real per-cpu PDA memory, so that gcc can understand what
+   memory operations the inline asms() below are performing.  This
+   eliminates the need to make the asms volatile or have memory
+   clobbers, so gcc can readily analyse them. */
+extern struct i386_pda _proxy_pda;
+
+#define pda_to_op(op,field,val)						\
+	do {								\
+		typedef typeof(_proxy_pda.field) T__;			\
+		if (0) { T__ tmp__; tmp__ = (val); }			\
+		switch (sizeof(_proxy_pda.field)) {			\
+		case 1:							\
+			asm(op "b %1,%%gs:%c2"				\
+			    : "+m" (_proxy_pda.field)			\
+			    :"ri" ((T__)val),				\
+			     "i"(pda_offset(field)));			\
+			break;						\
+		case 2:							\
+			asm(op "w %1,%%gs:%c2"				\
+			    : "+m" (_proxy_pda.field)			\
+			    :"ri" ((T__)val),				\
+			     "i"(pda_offset(field)));			\
+			break;						\
+		case 4:							\
+			asm(op "l %1,%%gs:%c2"				\
+			    : "+m" (_proxy_pda.field)			\
+			    :"ri" ((T__)val),				\
+			     "i"(pda_offset(field)));			\
+			break;						\
+		default: __bad_pda_field();				\
+		}							\
+	} while (0)
+
+#define pda_from_op(op,field)						\
+	({								\
+		typeof(_proxy_pda.field) ret__;				\
+		switch (sizeof(_proxy_pda.field)) {			\
+		case 1:							\
+			asm(op "b %%gs:%c1,%0"				\
+			    : "=r" (ret__)				\
+			    : "i" (pda_offset(field)),			\
+			      "m" (_proxy_pda.field));			\
+			break;						\
+		case 2:							\
+			asm(op "w %%gs:%c1,%0"				\
+			    : "=r" (ret__)				\
+			    : "i" (pda_offset(field)),			\
+			      "m" (_proxy_pda.field));			\
+			break;						\
+		case 4:							\
+			asm(op "l %%gs:%c1,%0"				\
+			    : "=r" (ret__)				\
+			    : "i" (pda_offset(field)),			\
+			      "m" (_proxy_pda.field));			\
+			break;						\
+		default: __bad_pda_field();				\
+		}							\
+		ret__; })
+
+/* Return a pointer to a pda field */
+#define pda_addr(field)							\
+	((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \
+				      pda_offset(field)))
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+#define or_pda(field,val) pda_to_op("or",field,val)
+
+#endif	/* _I386_PDA_H */
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index 5764afa..510ae1d 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -1,6 +1,31 @@
 #ifndef __ARCH_I386_PERCPU__
 #define __ARCH_I386_PERCPU__
 
+#ifndef __ASSEMBLY__
 #include <asm-generic/percpu.h>
+#else
+
+/*
+ * PER_CPU finds an address of a per-cpu variable.
+ *
+ * Args:
+ *    var - variable name
+ *    cpu - 32bit register containing the current CPU number
+ *
+ * The resulting address is stored in the "cpu" argument.
+ *
+ * Example:
+ *    PER_CPU(cpu_gdt_descr, %ebx)
+ */
+#ifdef CONFIG_SMP
+#define PER_CPU(var, cpu) \
+	movl __per_cpu_offset(,cpu,4), cpu;	\
+	addl $per_cpu__/**/var, cpu;
+#else /* ! SMP */
+#define PER_CPU(var, cpu) \
+	movl $per_cpu__/**/var, cpu;
+#endif	/* SMP */
+
+#endif /* !__ASSEMBLY__ */
 
 #endif /* __ARCH_I386_PERCPU__ */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 8d8d3b9..38c3fcc 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -1,8 +1,6 @@
 #ifndef _I386_PGTABLE_2LEVEL_H
 #define _I386_PGTABLE_2LEVEL_H
 
-#include <asm-generic/pgtable-nopmd.h>
-
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
 #define pgd_ERROR(e) \
@@ -13,17 +11,19 @@
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
  */
+#ifndef CONFIG_PARAVIRT
 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
+
 #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
 #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
 
 #define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 #define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
 
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(mm,addr,xp)	__pte(xchg(&(xp)->pte_low, 0))
+#define raw_ptep_get_and_clear(xp)	__pte(xchg(&(xp)->pte_low, 0))
 
 #define pte_page(x)		pfn_to_page(pte_pfn(x))
 #define pte_none(x)		(!(x).pte_low)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index c2d701e..7a2318f 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -1,8 +1,6 @@
 #ifndef _I386_PGTABLE_3LEVEL_H
 #define _I386_PGTABLE_3LEVEL_H
 
-#include <asm-generic/pgtable-nopud.h>
-
 /*
  * Intel Physical Address Extension (PAE) Mode - three-level page
  * tables on PPro+ CPUs.
@@ -44,6 +42,7 @@
 	return pte_x(pte);
 }
 
+#ifndef CONFIG_PARAVIRT
 /* Rules for using set_pte: the pte being assigned *must* be
  * either not present or in a state where the hardware will
  * not attempt to update the pte.  In places where this is
@@ -81,25 +80,6 @@
 		(*(pudptr) = (pudval))
 
 /*
- * Pentium-II erratum A13: in PAE mode we explicitly have to flush
- * the TLB via cr3 if the top-level pgd is changed...
- * We do not let the generic code free and clear pgd entries due to
- * this erratum.
- */
-static inline void pud_clear (pud_t * pud) { }
-
-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
-
-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
-			pmd_index(address))
-
-/*
  * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
  * entry, so clear the bottom half first and enforce ordering with a compiler
  * barrier.
@@ -118,9 +98,28 @@
 	smp_wmb();
 	*(tmp + 1) = 0;
 }
+#endif
 
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+/*
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+ * the TLB via cr3 if the top-level pgd is changed...
+ * We do not let the generic code free and clear pgd entries due to
+ * this erratum.
+ */
+static inline void pud_clear (pud_t * pud) { }
+
+#define pud_page(pud) \
+((struct page *) __va(pud_val(pud) & PAGE_MASK))
+
+#define pud_page_vaddr(pud) \
+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+			pmd_index(address))
+
+static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
 {
 	pte_t res;
 
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 7d398f4..e6a4723 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -15,6 +15,7 @@
 #include <asm/processor.h>
 #include <asm/fixmap.h>
 #include <linux/threads.h>
+#include <asm/paravirt.h>
 
 #ifndef _I386_BITOPS_H
 #include <asm/bitops.h>
@@ -34,14 +35,14 @@
 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 extern unsigned long empty_zero_page[1024];
 extern pgd_t swapper_pg_dir[1024];
-extern kmem_cache_t *pgd_cache;
-extern kmem_cache_t *pmd_cache;
+extern struct kmem_cache *pgd_cache;
+extern struct kmem_cache *pmd_cache;
 extern spinlock_t pgd_lock;
 extern struct page *pgd_list;
 
-void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pmd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_dtor(void *, struct kmem_cache *, unsigned long);
 void pgtable_cache_init(void);
 void paging_init(void);
 
@@ -246,6 +247,7 @@
 # include <asm/pgtable-2level.h>
 #endif
 
+#ifndef CONFIG_PARAVIRT
 /*
  * Rules for using pte_update - it must be called after any PTE update which
  * has not been done using the set_pte / clear_pte interfaces.  It is used by
@@ -261,7 +263,7 @@
  */
 #define pte_update(mm, addr, ptep)		do { } while (0)
 #define pte_update_defer(mm, addr, ptep)	do { } while (0)
-
+#endif
 
 /*
  * We only update the dirty/accessed state if we set
@@ -275,7 +277,7 @@
 do {									\
 	if (dirty) {							\
 		(ptep)->pte_low = (entry).pte_low;			\
-		pte_update_defer((vma)->vm_mm, (addr), (ptep));		\
+		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
 		flush_tlb_page(vma, address);				\
 	}								\
 } while (0)
@@ -305,7 +307,7 @@
 	__dirty = pte_dirty(*(ptep));					\
 	if (__dirty) {							\
 		clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low);		\
-		pte_update_defer((vma)->vm_mm, (addr), (ptep));		\
+		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
 		flush_tlb_page(vma, address);				\
 	}								\
 	__dirty;							\
@@ -318,12 +320,20 @@
 	__young = pte_young(*(ptep));					\
 	if (__young) {							\
 		clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low);	\
-		pte_update_defer((vma)->vm_mm, (addr), (ptep));		\
+		pte_update_defer((vma)->vm_mm, (address), (ptep));	\
 		flush_tlb_page(vma, address);				\
 	}								\
 	__young;							\
 })
 
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	pte_t pte = raw_ptep_get_and_clear(ptep);
+	pte_update(mm, addr, ptep);
+	return pte;
+}
+
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
 {
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index e0ddca9..a52d654 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -20,6 +20,7 @@
 #include <linux/threads.h>
 #include <asm/percpu.h>
 #include <linux/cpumask.h>
+#include <linux/init.h>
 
 /* flag for disabling the tsc */
 extern int tsc_disable;
@@ -72,6 +73,7 @@
 #endif
 	unsigned char x86_max_cores;	/* cpuid returned max cores value */
 	unsigned char apicid;
+	unsigned short x86_clflush_size;
 #ifdef CONFIG_SMP
 	unsigned char booted_cores;	/* number of cores as seen by OS */
 	__u8 phys_proc_id; 		/* Physical processor id. */
@@ -111,6 +113,8 @@
 extern	int cpu_llc_id[NR_CPUS];
 extern char ignore_fpu_irq;
 
+void __init cpu_detect(struct cpuinfo_x86 *c);
+
 extern void identify_cpu(struct cpuinfo_x86 *);
 extern void print_cpu_info(struct cpuinfo_x86 *);
 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
@@ -143,8 +147,8 @@
 #define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
 #define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
 
-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
-			   unsigned int *ecx, unsigned int *edx)
+static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
+					 unsigned int *ecx, unsigned int *edx)
 {
 	/* ecx is often an input as well as an output. */
 	__asm__("cpuid"
@@ -155,59 +159,6 @@
 		: "0" (*eax), "2" (*ecx));
 }
 
-/*
- * Generic CPUID function
- * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
- * resulting in stale register contents being returned.
- */
-static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-{
-	*eax = op;
-	*ecx = 0;
-	__cpuid(eax, ebx, ecx, edx);
-}
-
-/* Some CPUID calls want 'count' to be placed in ecx */
-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-			       int *edx)
-{
-	*eax = op;
-	*ecx = count;
-	__cpuid(eax, ebx, ecx, edx);
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
-	unsigned int eax, ebx, ecx, edx;
-
-	cpuid(op, &eax, &ebx, &ecx, &edx);
-	return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
-	unsigned int eax, ebx, ecx, edx;
-
-	cpuid(op, &eax, &ebx, &ecx, &edx);
-	return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
-	unsigned int eax, ebx, ecx, edx;
-
-	cpuid(op, &eax, &ebx, &ecx, &edx);
-	return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
-	unsigned int eax, ebx, ecx, edx;
-
-	cpuid(op, &eax, &ebx, &ecx, &edx);
-	return edx;
-}
-
 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
 
 /*
@@ -473,6 +424,7 @@
 	.vm86_info = NULL,						\
 	.sysenter_cs = __KERNEL_CS,					\
 	.io_bitmap_ptr = NULL,						\
+	.gs = __KERNEL_PDA,						\
 }
 
 /*
@@ -489,18 +441,9 @@
 	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\
 }
 
-static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-{
-	tss->esp0 = thread->esp0;
-	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
-	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
-		tss->ss1 = thread->sysenter_cs;
-		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-	}
-}
-
 #define start_thread(regs, new_eip, new_esp) do {		\
-	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
+	__asm__("movl %0,%%fs": :"r" (0));			\
+	regs->xgs = 0;						\
 	set_fs(USER_DS);					\
 	regs->xds = __USER_DS;					\
 	regs->xes = __USER_DS;					\
@@ -510,33 +453,6 @@
 	regs->esp = new_esp;					\
 } while (0)
 
-/*
- * These special macros can be used to get or set a debugging register
- */
-#define get_debugreg(var, register)				\
-		__asm__("movl %%db" #register ", %0"		\
-			:"=r" (var))
-#define set_debugreg(value, register)			\
-		__asm__("movl %0,%%db" #register		\
-			: /* no output */			\
-			:"r" (value))
-
-/*
- * Set IOPL bits in EFLAGS from given mask
- */
-static inline void set_iopl_mask(unsigned mask)
-{
-	unsigned int reg;
-	__asm__ __volatile__ ("pushfl;"
-			      "popl %0;"
-			      "andl %1, %0;"
-			      "orl %2, %0;"
-			      "pushl %0;"
-			      "popfl"
-				: "=&r" (reg)
-				: "i" (~X86_EFLAGS_IOPL), "r" (mask));
-}
-
 /* Forward declaration, a strange C thing */
 struct task_struct;
 struct mm_struct;
@@ -628,6 +544,105 @@
 
 #define cpu_relax()	rep_nop()
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define paravirt_enabled() 0
+#define __cpuid native_cpuid
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+	tss->esp0 = thread->esp0;
+	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
+	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+		tss->ss1 = thread->sysenter_cs;
+		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+	}
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, register)				\
+		__asm__("movl %%db" #register ", %0"		\
+			:"=r" (var))
+#define set_debugreg(value, register)			\
+		__asm__("movl %0,%%db" #register		\
+			: /* no output */			\
+			:"r" (value))
+
+#define set_iopl_mask native_set_iopl_mask
+#endif /* CONFIG_PARAVIRT */
+
+/*
+ * Set IOPL bits in EFLAGS from given mask
+ */
+static fastcall inline void native_set_iopl_mask(unsigned mask)
+{
+	unsigned int reg;
+	__asm__ __volatile__ ("pushfl;"
+			      "popl %0;"
+			      "andl %1, %0;"
+			      "orl %2, %0;"
+			      "pushl %0;"
+			      "popfl"
+				: "=&r" (reg)
+				: "i" (~X86_EFLAGS_IOPL), "r" (mask));
+}
+
+/*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+{
+	*eax = op;
+	*ecx = 0;
+	__cpuid(eax, ebx, ecx, edx);
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+			       int *edx)
+{
+	*eax = op;
+	*ecx = count;
+	__cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	cpuid(op, &eax, &ebx, &ecx, &edx);
+	return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	cpuid(op, &eax, &ebx, &ecx, &edx);
+	return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	cpuid(op, &eax, &ebx, &ecx, &edx);
+	return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	cpuid(op, &eax, &ebx, &ecx, &edx);
+	return edx;
+}
+
 /* generic versions from gas */
 #define GENERIC_NOP1	".byte 0x90\n"
 #define GENERIC_NOP2    	".byte 0x89,0xf6\n"
@@ -727,4 +742,7 @@
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
+extern int init_gdt(int cpu, struct task_struct *idle);
+extern void secondary_cpu_init(void);
+
 #endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index d505f50..bdbc894 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -16,6 +16,8 @@
 	long eax;
 	int  xds;
 	int  xes;
+	/* int  xfs; */
+	int  xgs;
 	long orig_eax;
 	long eip;
 	int  xcs;
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index bc598d6..041906f 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -75,8 +75,8 @@
 
 
 #define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
-	__RWSEM_DEP_MAP_INIT(name) }
+{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+  LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index b7ab596..3c796af 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -39,7 +39,7 @@
  *  25 - APM BIOS support 
  *
  *  26 - ESPFIX small SS
- *  27 - unused
+ *  27 - PDA				[ per-cpu private data area ]
  *  28 - unused
  *  29 - unused
  *  30 - unused
@@ -74,6 +74,9 @@
 #define GDT_ENTRY_ESPFIX_SS		(GDT_ENTRY_KERNEL_BASE + 14)
 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
 
+#define GDT_ENTRY_PDA			(GDT_ENTRY_KERNEL_BASE + 15)
+#define __KERNEL_PDA (GDT_ENTRY_PDA * 8)
+
 #define GDT_ENTRY_DOUBLEFAULT_TSS	31
 
 /*
@@ -128,5 +131,7 @@
 #define SEGMENT_LDT		0x4
 #define SEGMENT_GDT		0x0
 
+#ifndef CONFIG_PARAVIRT
 #define get_kernel_rpl()  0
 #endif
+#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 2734909..67659db 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -6,6 +6,8 @@
 #ifndef _i386_SETUP_H
 #define _i386_SETUP_H
 
+#define COMMAND_LINE_SIZE 256
+
 #ifdef __KERNEL__
 #include <linux/pfn.h>
 
@@ -14,10 +16,8 @@
  */
 #define MAXMEM_PFN	PFN_DOWN(MAXMEM)
 #define MAX_NONPAE_PFN	(1 << 20)
-#endif
 
 #define PARAM_SIZE 4096
-#define COMMAND_LINE_SIZE 256
 
 #define OLD_CL_MAGIC_ADDR	0x90020
 #define OLD_CL_MAGIC		0xA33F
@@ -70,6 +70,7 @@
 struct e820entry;
 
 char * __init machine_specific_memory_setup(void);
+char *memory_setup(void);
 
 int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
 int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
@@ -78,4 +79,6 @@
 
 #endif /* __ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* _i386_SETUP_H */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index bd59c15..64fe624 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/threads.h>
 #include <linux/cpumask.h>
+#include <asm/pda.h>
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -56,7 +57,7 @@
  * from the initial startup. We map APIC_BASE very early in page_setup(),
  * so this is correct in the x86 case.
  */
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define raw_smp_processor_id() (read_pda(cpu_number))
 
 extern cpumask_t cpu_callout_map;
 extern cpumask_t cpu_callin_map;
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index c18b71f..d3bcebe 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,8 +7,14 @@
 #include <asm/processor.h>
 #include <linux/compiler.h>
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #define CLI_STRING	"cli"
 #define STI_STRING	"sti"
+#define CLI_STI_CLOBBERS
+#define CLI_STI_INPUT_ARGS
+#endif /* CONFIG_PARAVIRT */
 
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -53,25 +59,28 @@
 {
 	asm volatile(
 		"\n1:\t"
-		LOCK_PREFIX " ; decb %0\n\t"
+		LOCK_PREFIX " ; decb %[slock]\n\t"
 		"jns 5f\n"
 		"2:\t"
-		"testl $0x200, %1\n\t"
+		"testl $0x200, %[flags]\n\t"
 		"jz 4f\n\t"
 		STI_STRING "\n"
 		"3:\t"
 		"rep;nop\n\t"
-		"cmpb $0, %0\n\t"
+		"cmpb $0, %[slock]\n\t"
 		"jle 3b\n\t"
 		CLI_STRING "\n\t"
 		"jmp 1b\n"
 		"4:\t"
 		"rep;nop\n\t"
-		"cmpb $0, %0\n\t"
+		"cmpb $0, %[slock]\n\t"
 		"jg 1b\n\t"
 		"jmp 4b\n"
 		"5:\n\t"
-		: "+m" (lock->slock) : "r" (flags) : "memory");
+		: [slock] "+m" (lock->slock)
+		: [flags] "r" (flags)
+	 	  CLI_STI_INPUT_ARGS
+		: "memory" CLI_STI_CLOBBERS);
 }
 #endif
 
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
index 59efe84..4da9345 100644
--- a/include/asm-i386/spinlock_types.h
+++ b/include/asm-i386/spinlock_types.h
@@ -6,13 +6,13 @@
 #endif
 
 typedef struct {
-	volatile unsigned int slock;
+	unsigned int slock;
 } raw_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED	{ 1 }
 
 typedef struct {
-	volatile unsigned int lock;
+	unsigned int lock;
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index 08be1e5..8dbaafe 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -6,29 +6,14 @@
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-static inline int
-arch_prepare_suspend(void)
-{
-	/* If you want to make non-PSE machine work, turn off paging
-           in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
-           it could work...  */
-	if (!cpu_has_pse) {
-		printk(KERN_ERR "PSE is required for swsusp.\n");
-		return -EPERM;
-	}
-	return 0;
-}
+static inline int arch_prepare_suspend(void) { return 0; }
 
 /* image of the saved processor state */
 struct saved_context {
   	u16 es, fs, gs, ss;
 	unsigned long cr0, cr2, cr3, cr4;
-	u16 gdt_pad;
-	u16 gdt_limit;
-	unsigned long gdt_base;
-	u16 idt_pad;
-	u16 idt_limit;
-	unsigned long idt_base;
+	struct Xgt_desc_struct gdt;
+	struct Xgt_desc_struct idt;
 	u16 ldt;
 	u16 tss;
 	unsigned long tr;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6dabbc..a6d20d9 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,6 +88,9 @@
 #define savesegment(seg, value) \
 	asm volatile("mov %%" #seg ",%0":"=rm" (value))
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #define read_cr0() ({ \
 	unsigned int __dummy; \
 	__asm__ __volatile__( \
@@ -139,17 +142,18 @@
 #define write_cr4(x) \
 	__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
 
-/*
- * Clear and set 'TS' bit respectively
- */
+#define wbinvd() \
+	__asm__ __volatile__ ("wbinvd": : :"memory")
+
+/* Clear the 'TS' bit */
 #define clts() __asm__ __volatile__ ("clts")
+#endif/* CONFIG_PARAVIRT */
+
+/* Set the 'TS' bit */
 #define stts() write_cr0(8 | read_cr0())
 
 #endif	/* __KERNEL__ */
 
-#define wbinvd() \
-	__asm__ __volatile__ ("wbinvd": : :"memory")
-
 static inline unsigned long get_limit(unsigned long segment)
 {
 	unsigned long __limit;
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 54d6d7a..46d32ad 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -95,15 +95,7 @@
 
 /* thread information allocation */
 #ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(tsk)					\
-	({							\
-		struct thread_info *ret;			\
-								\
-		ret = kmalloc(THREAD_SIZE, GFP_KERNEL);		\
-		if (ret)					\
-			memset(ret, 0, THREAD_SIZE);		\
-		ret;						\
-	})
+#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
 #else
 #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
 #endif
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
new file mode 100644
index 0000000..ea8065a
--- /dev/null
+++ b/include/asm-i386/time.h
@@ -0,0 +1,41 @@
+#ifndef _ASMi386_TIME_H
+#define _ASMi386_TIME_H
+
+#include <linux/efi.h>
+#include "mach_time.h"
+
+static inline unsigned long native_get_wallclock(void)
+{
+	unsigned long retval;
+
+	if (efi_enabled)
+		retval = efi_get_time();
+	else
+		retval = mach_get_cmos_time();
+
+	return retval;
+}
+
+static inline int native_set_wallclock(unsigned long nowtime)
+{
+	int retval;
+
+	if (efi_enabled)
+		retval = efi_set_rtc_mmss(nowtime);
+	else
+		retval = mach_set_rtc_mmss(nowtime);
+
+	return retval;
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else /* !CONFIG_PARAVIRT */
+
+#define get_wallclock() native_get_wallclock()
+#define set_wallclock(x) native_set_wallclock(x)
+#define do_time_init() time_init_hook()
+
+#endif /* CONFIG_PARAVIRT */
+
+#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index 360648b..4dd8284 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -4,7 +4,15 @@
 #include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()							\
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __flush_tlb() __native_flush_tlb()
+#define __flush_tlb_global() __native_flush_tlb_global()
+#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#endif
+
+#define __native_flush_tlb()						\
 	do {								\
 		unsigned int tmpreg;					\
 									\
@@ -19,7 +27,7 @@
  * Global pages have to be flushed a bit differently. Not a real
  * performance problem because this does not happen often.
  */
-#define __flush_tlb_global()						\
+#define __native_flush_tlb_global()					\
 	do {								\
 		unsigned int tmpreg, cr4, cr4_orig;			\
 									\
@@ -36,6 +44,9 @@
 			: "memory");					\
 	} while (0)
 
+#define __native_flush_tlb_single(addr) 				\
+	__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
+
 # define __flush_tlb_all()						\
 	do {								\
 		if (cpu_has_pge)					\
@@ -46,9 +57,6 @@
 
 #define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
 
-#define __flush_tlb_single(addr) \
-	__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
-
 #ifdef CONFIG_X86_INVLPG
 # define __flush_tlb_one(addr) __flush_tlb_single(addr)
 #else
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h
index 4b4b295..ad0a55b 100644
--- a/include/asm-i386/types.h
+++ b/include/asm-i386/types.h
@@ -57,16 +57,6 @@
 #endif
 typedef u64 dma64_addr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index beeeaf6..833fa17 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,104 +329,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 320
-#include <linux/err.h>
-
-/*
- * user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-i386/errno.h>
- */
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-		errno = -(res); \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile ("int $0x80" \
-	: "=a" (__res) \
-	: "0" (__NR_##name)); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-		  "d" ((long)(arg3)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
-__syscall_return(type,__res); \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
-                  "int $0x80 ; pop %%ebx" \
-	: "=a" (__res) \
-	: "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
-  struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
-__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
-                  "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
-                  "pop %%ebx ;  pop %%ebp" \
-	: "=a" (__res) \
-	: "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index 5031d69..aa2c931 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -71,6 +71,7 @@
 	info->regs.xss = __KERNEL_DS;
 	info->regs.xds = __USER_DS;
 	info->regs.xes = __USER_DS;
+	info->regs.xgs = __KERNEL_PDA;
 }
 
 extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
@@ -78,17 +79,13 @@
                                                                           void *arg),
                                                void *arg);
 
-static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
 {
-#if 0 /* This can only work when selector register and EFLAGS saves/restores
-         are properly annotated (and tracked in UNW_REGISTER_INFO). */
-	return user_mode_vm(&info->regs);
-#else
-	return info->regs.eip < PAGE_OFFSET
+	return user_mode_vm(&info->regs)
+	       || info->regs.eip < PAGE_OFFSET
 	       || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
-	            && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
+	           && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
 	       || info->regs.esp < PAGE_OFFSET;
-#endif
 }
 
 #else
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index 952fd69..a5edf51 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -145,26 +145,13 @@
  * at the end of the structure. Look at ptrace.h to see the "normal"
  * setup. For user space layout see 'struct vm86_regs' above.
  */
+#include <asm/ptrace.h>
 
 struct kernel_vm86_regs {
 /*
  * normal regs, with special meaning for the segment descriptors..
  */
-	long ebx;
-	long ecx;
-	long edx;
-	long esi;
-	long edi;
-	long ebp;
-	long eax;
-	long __null_ds;
-	long __null_es;
-	long orig_eax;
-	long eip;
-	unsigned short cs, __csh;
-	long eflags;
-	long esp;
-	unsigned short ss, __ssh;
+	struct pt_regs pt;
 /*
  * these are specific to v86 mode:
  */
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
index 15818a1..4a1e48b 100644
--- a/include/asm-ia64/Kbuild
+++ b/include/asm-ia64/Kbuild
@@ -10,7 +10,6 @@
 header-y += perfmon_default_smpl.h
 header-y += ptrace_offsets.h
 header-y += rse.h
-header-y += setup.h
 header-y += ucontext.h
 
 unifdef-y += perfmon.h
diff --git a/include/asm-ia64/checksum.h b/include/asm-ia64/checksum.h
index 1f230ff..2b78582 100644
--- a/include/asm-ia64/checksum.h
+++ b/include/asm-ia64/checksum.h
@@ -10,23 +10,21 @@
  * This is a version of ip_compute_csum() optimized for IP headers,
  * which always checksum on 4 octet boundaries.
  */
-extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl);
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
  * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
  * checksum, already complemented
  */
-extern unsigned short int csum_tcpudp_magic (unsigned long saddr,
-					     unsigned long daddr,
+extern __sum16 csum_tcpudp_magic (__be32 saddr, __be32 daddr,
 					     unsigned short len,
 					     unsigned short proto,
-					     unsigned int sum);
+					     __wsum sum);
 
-extern unsigned int csum_tcpudp_nofold (unsigned long saddr,
-					unsigned long daddr,
+extern __wsum csum_tcpudp_nofold (__be32 saddr, __be32 daddr,
 					unsigned short len,
 					unsigned short proto,
-					unsigned int sum);
+					__wsum sum);
 
 /*
  * Computes the checksum of a memory block at buff, length len,
@@ -40,8 +38,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern unsigned int csum_partial (const unsigned char * buff, int len,
-				  unsigned int sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * Same as csum_partial, but copies from src while it checksums.
@@ -49,28 +46,34 @@
  * Here it is even more important to align src and dst on a 32-bit (or
  * even better 64-bit) boundary.
  */
-extern unsigned int csum_partial_copy_from_user (const char *src, char *dst,
-						 int len, unsigned int sum,
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						 int len, __wsum sum,
 						 int *errp);
 
-extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst,
-					       int len, unsigned int sum);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					       int len, __wsum sum);
 
 /*
  * This routine is used for miscellaneous IP-like checksums, mainly in
  * icmp.c
  */
-extern unsigned short ip_compute_csum (unsigned char *buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 /*
  * Fold a partial checksum without adding pseudo headers.
  */
-static inline unsigned short
-csum_fold (unsigned int sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
+	u32 sum = (__force u32)csum;
 	sum = (sum & 0xffff) + (sum >> 16);
 	sum = (sum & 0xffff) + (sum >> 16);
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
+#define _HAVE_ARCH_IPV6_CSUM	1
+struct in6_addr;
+extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
+	struct in6_addr *daddr, __u32 len, unsigned short proto,
+	unsigned int csum);
+
 #endif /* _ASM_IA64_CHECKSUM_H */
diff --git a/include/asm-ia64/device.h b/include/asm-ia64/device.h
new file mode 100644
index 0000000..3db6daf
--- /dev/null
+++ b/include/asm-ia64/device.h
@@ -0,0 +1,15 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_IA64_DEVICE_H
+#define _ASM_IA64_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_ACPI
+	void	*acpi_handle;
+#endif
+};
+
+#endif /* _ASM_IA64_DEVICE_H */
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 99a8f8e..ebd5887 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -50,7 +50,8 @@
 extern int dma_get_cache_alignment(void);
 
 static inline void
-dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction dir)
 {
 	/*
 	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
@@ -59,6 +60,6 @@
 	mb();
 }
 
-#define dma_is_consistent(dma_handle)	(1)	/* all we do is coherent memory... */
+#define dma_is_consistent(d, h)	(1)	/* all we do is coherent memory... */
 
 #endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
index 07d77f3..8a98a26 100644
--- a/include/asm-ia64/futex.h
+++ b/include/asm-ia64/futex.h
@@ -59,7 +59,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -83,7 +83,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index 855c30a..6311e16 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -32,7 +32,7 @@
  */
 #define IO_SPACE_LIMIT		0xffffffffffffffffUL
 
-#define MAX_IO_SPACES_BITS		4
+#define MAX_IO_SPACES_BITS		8
 #define MAX_IO_SPACES			(1UL << MAX_IO_SPACES_BITS)
 #define IO_SPACE_BITS			24
 #define IO_SPACE_SIZE			(1UL << IO_SPACE_BITS)
diff --git a/include/asm-ia64/kexec.h b/include/asm-ia64/kexec.h
new file mode 100644
index 0000000..01c36b0
--- /dev/null
+++ b/include/asm-ia64/kexec.h
@@ -0,0 +1,47 @@
+#ifndef _ASM_IA64_KEXEC_H
+#define _ASM_IA64_KEXEC_H
+
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+
+#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096)
+
+/* The native architecture */
+#define KEXEC_ARCH KEXEC_ARCH_IA_64
+
+#define MAX_NOTE_BYTES 1024
+
+#define kexec_flush_icache_page(page) do { \
+                unsigned long page_addr = (unsigned long)page_address(page); \
+                flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
+        } while(0)
+
+extern struct kimage *ia64_kimage;
+DECLARE_PER_CPU(u64, ia64_mca_pal_base);
+const extern unsigned int relocate_new_kernel_size;
+extern void relocate_new_kernel(unsigned long, unsigned long,
+		struct ia64_boot_param *, unsigned long);
+static inline void
+crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
+{
+}
+extern struct resource efi_memmap_res;
+extern struct resource boot_param_res;
+extern void kdump_smp_send_stop(void);
+extern void kdump_smp_send_init(void);
+extern void kexec_disable_iosapic(void);
+extern void crash_save_this_cpu(void);
+struct rsvd_region;
+extern unsigned long kdump_find_rsvd_region(unsigned long size,
+		struct rsvd_region *rsvd_regions, int n);
+extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
+extern int kdump_status[];
+extern atomic_t kdump_cpu_freezed;
+extern atomic_t kdump_in_progress;
+
+#endif /* _ASM_IA64_KEXEC_H */
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index 7ffbddf..a3891eb 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -36,6 +36,8 @@
 typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
 					u8 size);
 typedef void ia64_mv_migrate_t(struct task_struct * task);
+typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *);
+typedef void ia64_mv_kernel_launch_event_t(void);
 
 /* DMA-mapping interface: */
 typedef void ia64_mv_dma_init (void);
@@ -95,6 +97,11 @@
 {
 }
 
+static inline void
+machvec_noop_bus (struct pci_bus *bus)
+{
+}
+
 extern void machvec_setup (char **);
 extern void machvec_timer_interrupt (int, void *);
 extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
@@ -159,6 +166,7 @@
 #  define platform_migrate		ia64_mv.migrate
 #  define platform_setup_msi_irq	ia64_mv.setup_msi_irq
 #  define platform_teardown_msi_irq	ia64_mv.teardown_msi_irq
+#  define platform_pci_fixup_bus	ia64_mv.pci_fixup_bus
 # endif
 
 /* __attribute__((__aligned__(16))) is required to make size of the
@@ -210,6 +218,8 @@
 	ia64_mv_migrate_t *migrate;
 	ia64_mv_setup_msi_irq_t *setup_msi_irq;
 	ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
+	ia64_mv_pci_fixup_bus_t *pci_fixup_bus;
+	ia64_mv_kernel_launch_event_t *kernel_launch_event;
 } __attribute__((__aligned__(16))); /* align attrib? see above comment */
 
 #define MACHVEC_INIT(name)			\
@@ -257,6 +267,7 @@
 	platform_migrate,			\
 	platform_setup_msi_irq,			\
 	platform_teardown_msi_irq,		\
+	platform_pci_fixup_bus,			\
 }
 
 extern struct ia64_machine_vector ia64_mv;
@@ -309,6 +320,9 @@
 #ifndef platform_tlb_migrate_finish
 # define platform_tlb_migrate_finish	machvec_noop_mm
 #endif
+#ifndef platform_kernel_launch_event
+# define platform_kernel_launch_event	machvec_noop
+#endif
 #ifndef platform_dma_init
 # define platform_dma_init		swiotlb_init
 #endif
@@ -416,5 +430,8 @@
 #ifndef platform_teardown_msi_irq
 # define platform_teardown_msi_irq	((ia64_mv_teardown_msi_irq_t*)NULL)
 #endif
+#ifndef platform_pci_fixup_bus
+# define platform_pci_fixup_bus	machvec_noop_bus
+#endif
 
 #endif /* _ASM_IA64_MACHVEC_H */
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
index c54b165..eaa2fce 100644
--- a/include/asm-ia64/machvec_sn2.h
+++ b/include/asm-ia64/machvec_sn2.h
@@ -67,8 +67,10 @@
 extern ia64_mv_dma_mapping_error	sn_dma_mapping_error;
 extern ia64_mv_dma_supported		sn_dma_supported;
 extern ia64_mv_migrate_t		sn_migrate;
+extern ia64_mv_kernel_launch_event_t	sn_kernel_launch_event;
 extern ia64_mv_setup_msi_irq_t		sn_setup_msi_irq;
 extern ia64_mv_teardown_msi_irq_t	sn_teardown_msi_irq;
+extern ia64_mv_pci_fixup_bus_t		sn_pci_fixup_bus;
 
 
 /*
@@ -120,6 +122,7 @@
 #define platform_dma_mapping_error		sn_dma_mapping_error
 #define platform_dma_supported		sn_dma_supported
 #define platform_migrate		sn_migrate
+#define platform_kernel_launch_event    sn_kernel_launch_event
 #ifdef CONFIG_PCI_MSI
 #define platform_setup_msi_irq		sn_setup_msi_irq
 #define platform_teardown_msi_irq	sn_teardown_msi_irq
@@ -127,6 +130,7 @@
 #define platform_setup_msi_irq		((ia64_mv_setup_msi_irq_t*)NULL)
 #define platform_teardown_msi_irq	((ia64_mv_teardown_msi_irq_t*)NULL)
 #endif
+#define platform_pci_fixup_bus		sn_pci_fixup_bus
 
 #include <asm/sn/io.h>
 
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index c3b1f86..c8df759 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -15,11 +15,12 @@
  * 	- initrd (optional)
  * 	- command line string
  * 	- kernel code & data
+ * 	- crash dumping code reserved region
  * 	- Kernel memory map built from EFI memory map
  *
  * More could be added if necessary
  */
-#define IA64_MAX_RSVD_REGIONS 6
+#define IA64_MAX_RSVD_REGIONS 7
 
 struct rsvd_region {
 	unsigned long start;	/* virtual address of beginning of element */
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 947cb72..485759b 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -101,7 +101,7 @@
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 extern int ia64_pfn_valid (unsigned long pfn);
-#elif defined(CONFIG_FLATMEM)
+#else
 # define ia64_pfn_valid(pfn) 1
 #endif
 
@@ -110,12 +110,11 @@
 #ifdef CONFIG_DISCONTIGMEM
 # define page_to_pfn(page)	((unsigned long) (page - vmem_map))
 # define pfn_to_page(pfn)	(vmem_map + (pfn))
+#else
+# include <asm-generic/memory_model.h>
 #endif
-#endif
-
-#if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM)
-/* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */
-#include <asm-generic/memory_model.h>
+#else
+# include <asm-generic/memory_model.h>
 #endif
 
 #ifdef CONFIG_FLATMEM
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 4283ddc..bc76815 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -20,6 +20,8 @@
  * 00/05/24     eranian Updated to latest PAL spec, fix structures bugs, added
  * 00/05/25	eranian Support for stack calls, and static physical calls
  * 00/06/18	eranian Support for stacked physical calls
+ * 06/10/26	rja	Support for Intel Itanium Architecture Software Developer's
+ *			Manual Rev 2.2 (Jan 2006)
  */
 
 /*
@@ -69,6 +71,8 @@
 #define PAL_PREFETCH_VISIBILITY	41	/* Make Processor Prefetches Visible */
 #define PAL_LOGICAL_TO_PHYSICAL 42	/* returns information on logical to physical processor mapping */
 #define PAL_CACHE_SHARED_INFO	43	/* returns information on caches shared by logical processor */
+#define PAL_GET_HW_POLICY	48	/* Get current hardware resource sharing policy */
+#define PAL_SET_HW_POLICY	49	/* Set current hardware resource sharing policy */
 
 #define PAL_COPY_PAL		256	/* relocate PAL procedures and PAL PMI */
 #define PAL_HALT_INFO		257	/* return the low power capabilities of processor */
@@ -80,6 +84,11 @@
 #define PAL_SET_PSTATE		263	/* set the P-state */
 #define PAL_BRAND_INFO		274	/* Processor branding information */
 
+#define PAL_GET_PSTATE_TYPE_LASTSET	0
+#define PAL_GET_PSTATE_TYPE_AVGANDRESET	1
+#define PAL_GET_PSTATE_TYPE_AVGNORESET	2
+#define PAL_GET_PSTATE_TYPE_INSTANT	3
+
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
@@ -102,6 +111,7 @@
 						 * cache without sideeffects
 						 * and "restrict" was 1
 						 */
+#define PAL_STATUS_REQUIRES_MEMORY	(-9)	/* Call requires PAL memory buffer */
 
 /* Processor cache level in the heirarchy */
 typedef u64				pal_cache_level_t;
@@ -456,7 +466,9 @@
 						 * by the processor
 						 */
 
-			reserved2	: 11,
+			se		: 1,	/* Shared error.  MCA in a
+						   shared structure */
+			reserved2	: 10,
 			cc		: 1,	/* Cache check */
 			tc		: 1,	/* TLB check */
 			bc		: 1,	/* Bus check */
@@ -487,10 +499,12 @@
 						 * error occurred
 						 */
 			wiv		: 1,	/* Way field valid */
-			reserved2	: 10,
+			reserved2	: 1,
+			dp		: 1,	/* Data poisoned on MBE */
+			reserved3	: 8,
 
 			index		: 20,	/* Cache line index */
-			reserved3	: 2,
+			reserved4	: 2,
 
 			is		: 1,	/* instruction set (1 == ia32) */
 			iv		: 1,	/* instruction set field valid */
@@ -557,7 +571,7 @@
 			type		: 8,	/* Bus xaction type*/
 			sev		: 5,	/* Bus error severity*/
 			hier		: 2,	/* Bus hierarchy level */
-			reserved1	: 1,
+			dp		: 1,	/* Data poisoned on MBE */
 			bsi		: 8,	/* Bus error status
 						 * info
 						 */
@@ -834,7 +848,9 @@
 		u64	pbf_req_bus_parking			:	1;
 		u64	pbf_bus_lock_mask			:	1;
 		u64	pbf_enable_half_xfer_rate		:	1;
-		u64	pbf_reserved2				:	22;
+		u64	pbf_reserved2				:	20;
+		u64	pbf_enable_shared_line_replace		:	1;
+		u64	pbf_enable_exclusive_line_replace	:	1;
 		u64	pbf_disable_xaction_queueing		:	1;
 		u64	pbf_disable_resp_err_check		:	1;
 		u64	pbf_disable_berr_check			:	1;
@@ -1077,6 +1093,24 @@
 	return iprv.status;
 }
 
+/*
+ * Get the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted,
+			u64 *la)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0);
+	if (cur_policy)
+		*cur_policy = iprv.v0;
+	if (num_impacted)
+		*num_impacted = iprv.v1;
+	if (la)
+		*la = iprv.v2;
+	return iprv.status;
+}
+
 /* Make the processor enter HALT or one of the implementation dependent low
  * power states where prefetching and execution are suspended and cache and
  * TLB coherency is not maintained.
@@ -1112,10 +1146,10 @@
 
 /* Get the current P-state information */
 static inline s64
-ia64_pal_get_pstate (u64 *pstate_index)
+ia64_pal_get_pstate (u64 *pstate_index, unsigned long type)
 {
 	struct ia64_pal_retval iprv;
-	PAL_CALL_STK(iprv, PAL_GET_PSTATE, 0, 0, 0);
+	PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0);
 	*pstate_index = iprv.v0;
 	return iprv.status;
 }
@@ -1401,6 +1435,17 @@
 	return iprv.status;
 }
 
+/*
+ * Set the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_set_hw_policy (u64 policy)
+{
+	struct ia64_pal_retval iprv;
+	PAL_CALL(iprv, PAL_SET_HW_POLICY, policy, 0, 0);
+	return iprv.status;
+}
+
 /* Cause the processor to enter	SHUTDOWN state, where prefetching and execution are
  * suspended, but cause cache and TLB coherency to be maintained.
  * This is usually called in IA-32 mode.
@@ -1524,12 +1569,15 @@
 	} pal_vm_info_1_s;
 } pal_vm_info_1_u_t;
 
+#define PAL_MAX_PURGES		0xFFFF		/* all ones is means unlimited */
+
 typedef union pal_vm_info_2_u {
 	u64			pvi2_val;
 	struct {
 		u64		impl_va_msb	: 8,
 				rid_size	: 8,
-				reserved	: 48;
+				max_purges	: 16,
+				reserved	: 32;
 	} pal_vm_info_2_s;
 } pal_vm_info_2_u_t;
 
diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h
index ef616fd..825eb7d 100644
--- a/include/asm-ia64/pci.h
+++ b/include/asm-ia64/pci.h
@@ -26,16 +26,18 @@
 struct pci_dev;
 
 /*
- * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
- * between device bus addresses and CPU physical addresses.  Platforms with a hardware I/O
- * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
- * network device layers.  Platforms with separate bus address spaces _must_ turn this off
- * and provide a device DMA mapping implementation that takes care of the necessary
+ * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
+ * correspondence between device bus addresses and CPU physical addresses.
+ * Platforms with a hardware I/O MMU _must_ turn this off to suppress the
+ * bounce buffer handling code in the block and network device layers.
+ * Platforms with separate bus address spaces _must_ turn this off and provide
+ * a device DMA mapping implementation that takes care of the necessary
  * address translation.
  *
- * For now, the ia64 platforms which may have separate/multiple bus address spaces all
- * have I/O MMUs which support the merging of physically discontiguous buffers, so we can
- * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
+ * For now, the ia64 platforms which may have separate/multiple bus address
+ * spaces all have I/O MMUs which support the merging of physically
+ * discontiguous buffers, so we can use that as the sole factor to determine
+ * the setting of PCI_DMA_BUS_IS_PHYS.
  */
 extern unsigned long ia64_max_iommu_merge_mask;
 #define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
@@ -52,9 +54,6 @@
 	/* We don't do dynamic PCI IRQ allocation */
 }
 
-#define HAVE_ARCH_PCI_MWI 1
-extern int pcibios_prep_mwi (struct pci_dev *);
-
 #include <asm-generic/pci-dma-compat.h>
 
 /* pci_unmap_{single,page} is not a nop, thus... */
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 9cb68e9..393e04c 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -60,7 +60,7 @@
 static inline void pgtable_quicklist_free(void *pgtable_entry)
 {
 #ifdef CONFIG_NUMA
-	unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
+	int nid = page_to_nid(virt_to_page(pgtable_entry));
 
 	if (unlikely(nid != numa_node_id())) {
 		free_page((unsigned long)pgtable_entry);
diff --git a/include/asm-ia64/sn/acpi.h b/include/asm-ia64/sn/acpi.h
new file mode 100644
index 0000000..2850a7e
--- /dev/null
+++ b/include/asm-ia64/sn/acpi.h
@@ -0,0 +1,16 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_ACPI_H
+#define _ASM_IA64_SN_ACPI_H
+
+#include "acpi/acglobal.h"
+
+#define SN_ACPI_BASE_SUPPORT()   (acpi_gbl_DSDT->oem_revision >= 0x20101)
+
+#endif /* _ASM_IA64_SN_ACPI_H */
diff --git a/include/asm-ia64/sn/pcidev.h b/include/asm-ia64/sn/pcidev.h
index eac3561..9fe89a9 100644
--- a/include/asm-ia64/sn/pcidev.h
+++ b/include/asm-ia64/sn/pcidev.h
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
  */
 #ifndef _ASM_IA64_SN_PCI_PCIDEV_H
 #define _ASM_IA64_SN_PCI_PCIDEV_H
@@ -12,31 +12,29 @@
 
 /*
  * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
- * the pcidev_info structs for all devices under a controller, we extend the
- * definition of pci_controller, via sn_pci_controller, to include a list
- * of pcidev_info.
+ * the pcidev_info structs for all devices under a controller, we keep a
+ * list of pcidev_info under pci_controller->platform_data.
  */
-struct sn_pci_controller {
-	struct pci_controller pci_controller;
+struct sn_platform_data {
+	void *provider_soft;
 	struct list_head pcidev_info;
 };
 
-#define SN_PCI_CONTROLLER(dev) ((struct sn_pci_controller *) dev->sysdata)
+#define SN_PLATFORM_DATA(busdev) \
+	((struct sn_platform_data *)(PCI_CONTROLLER(busdev)->platform_data))
 
 #define SN_PCIDEV_INFO(dev)	sn_pcidev_info_get(dev)
 
-#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
-	(struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
 /*
  * Given a pci_bus, return the sn pcibus_bussoft struct.  Note that
  * this only works for root busses, not for busses represented by PPB's.
  */
 
 #define SN_PCIBUS_BUSSOFT(pci_bus) \
-        ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
+	((struct pcibus_bussoft *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
 
 #define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
-	(struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
+	((struct pcibus_info *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
 /*
  * Given a struct pci_dev, return the sn pcibus_bussoft struct.  Note
  * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
@@ -72,8 +70,6 @@
 			 struct sn_irq_info *sn_irq_info);
 extern void sn_irq_unfixup(struct pci_dev *pci_dev);
 extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
-extern void sn_pci_controller_fixup(int segment, int busnum,
- 				    struct pci_bus *bus);
 extern void sn_bus_store_sysdata(struct pci_dev *dev);
 extern void sn_bus_free_sysdata(void);
 extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h
index 30dcfa4..bfdc362 100644
--- a/include/asm-ia64/sn/sn_feature_sets.h
+++ b/include/asm-ia64/sn/sn_feature_sets.h
@@ -44,8 +44,14 @@
  * Once enabled, a feature cannot be disabled.
  *
  * By default, features are disabled unless explicitly enabled.
+ *
+ * These defines must be kept in sync with the corresponding
+ * PROM definitions in feature_sets.h.
  */
 #define  OSF_MCA_SLV_TO_OS_INIT_SLV	0
 #define  OSF_FEAT_LOG_SBES		1
+#define  OSF_ACPI_ENABLE		2
+#define  OSF_PCISEGMENT_ENABLE		3
+
 
 #endif /* _ASM_IA64_SN_FEATURE_SETS_H */
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index ba826b3..2c4004e 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -77,6 +77,7 @@
 #define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST	   0x02000058	// deprecated
 #define  SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST	   0x0200005a
 
+#define SN_SAL_IOIF_INIT			   0x0200005f
 #define SN_SAL_HUB_ERROR_INTERRUPT		   0x02000060
 #define SN_SAL_BTE_RECOVER			   0x02000061
 #define SN_SAL_RESERVED_DO_NOT_USE		   0x02000062
@@ -87,6 +88,8 @@
 #define  SN_SAL_INJECT_ERROR			   0x02000067
 #define  SN_SAL_SET_CPU_NUMBER			   0x02000068
 
+#define  SN_SAL_KERNEL_LAUNCH_EVENT		   0x02000069
+
 /*
  * Service-specific constants
  */
@@ -1154,4 +1157,11 @@
 	SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
 	return rv.status;
 }
+static inline int
+ia64_sn_kernel_launch_event(void)
+{
+ 	struct ia64_sal_retval rv;
+	SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
+	return rv.status;
+}
 #endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/include/asm-m32r/checksum.h b/include/asm-m32r/checksum.h
index 877ebf4..a7a7c4f 100644
--- a/include/asm-m32r/checksum.h
+++ b/include/asm-m32r/checksum.h
@@ -31,8 +31,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-asmlinkage unsigned int csum_partial(const unsigned char *buff,
-				     int len, unsigned int sum);
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * The same as csum_partial, but copies from src while it checksums.
@@ -40,24 +39,22 @@
  * Here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern unsigned int csum_partial_copy_nocheck(const unsigned char *src,
-					      unsigned char *dst,
-                                              int len, unsigned int sum);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+                                              int len, __wsum sum);
 
 /*
  * This is a new version of the above that records errors it finds in *errp,
  * but continues and zeros thre rest of the buffer.
  */
-extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
-                                                unsigned char *dst,
-                                                int len, unsigned int sum,
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+                                                int len, __wsum sum,
                                                 int *err_ptr);
 
 /*
  *	Fold a partial checksum
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	unsigned long tmpreg;
 	__asm__(
@@ -72,16 +69,17 @@
 		: "0"  (sum)
 		: "cbit"
 	);
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
  * This is a version of ip_compute_csum() optimized for IP headers,
  * which always checksum on 4 octet boundaries.
  */
-static inline unsigned short ip_fast_csum(unsigned char * iph,
-					  unsigned int ihl) {
-	unsigned long sum, tmpreg0, tmpreg1;
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	unsigned long tmpreg0, tmpreg1;
+	__wsum sum;
 
 	__asm__ __volatile__(
 		"	ld	%0, @%1+ \n"
@@ -115,16 +113,15 @@
 	return csum_fold(sum);
 }
 
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-					       unsigned long daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 					       unsigned short len,
 					       unsigned short proto,
-					       unsigned int sum)
+					       __wsum sum)
 {
 #if defined(__LITTLE_ENDIAN)
-	unsigned long len_proto = (ntohs(len)<<16)+proto*256;
+	unsigned long len_proto = (proto + len) << 8;
 #else
-	unsigned long len_proto = (proto<<16)+len;
+	unsigned long len_proto = proto + len;
 #endif
 	unsigned long tmpreg;
 
@@ -147,11 +144,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -161,16 +157,16 @@
  * in icmp.c
  */
 
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
 	return csum_fold (csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						 struct in6_addr *daddr,
-						 __u16 len,
-						 unsigned short proto,
-						 unsigned int sum)
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+				      const struct in6_addr *daddr,
+				      __u32 len, unsigned short proto,
+				      __wsum sum)
 {
 	unsigned long tmpreg0, tmpreg1, tmpreg2, tmpreg3;
 	__asm__(
@@ -197,7 +193,7 @@
 		: "=&r" (sum), "=&r" (tmpreg0), "=&r" (tmpreg1),
 		  "=&r" (tmpreg2), "=&r" (tmpreg3)
 		: "r" (saddr), "r" (daddr),
-		  "r" (htonl((__u32) (len))), "r" (htonl(proto)), "0" (sum)
+		  "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
 		: "cbit"
 	);
 
diff --git a/include/asm-m32r/device.h b/include/asm-m32r/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-m32r/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h
index 52f4fa2..6a0b322 100644
--- a/include/asm-m32r/setup.h
+++ b/include/asm-m32r/setup.h
@@ -1,6 +1,11 @@
 /*
  * This is set up by the setup-routine at boot-time
  */
+
+#define COMMAND_LINE_SIZE       512
+
+#ifdef __KERNEL__
+
 #define PARAM			((unsigned char *)empty_zero_page)
 
 #define MOUNT_ROOT_RDONLY	(*(unsigned long *) (PARAM+0x000))
@@ -18,8 +23,6 @@
 
 #define SCREEN_INFO		(*(struct screen_info *) (PARAM+0x200))
 
-#define COMMAND_LINE_SIZE	(512)
-
 #define RAMDISK_IMAGE_START_MASK	(0x07FF)
 #define RAMDISK_PROMPT_FLAG		(0x8000)
 #define RAMDISK_LOAD_FLAG		(0x4000)
@@ -27,3 +30,5 @@
 extern unsigned long memory_start;
 extern unsigned long memory_end;
 
+#endif  /*  __KERNEL__  */
+
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index 95aa342..5b66bd3 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -296,117 +296,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls 285
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-m32r/errno.h>
- */
-
-#include <asm/syscall.h>	/* SYSCALL_* */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* Avoid using "res" which is declared to be in register r0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__("r0"); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2), \
-		"r" (__arg3) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2), \
-		"r" (__arg3), "r" (__arg4) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	type5,arg5) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg5 __asm__ ("r4") = (long)(arg5); \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-	"trap #" SYSCALL_VECTOR "|| nop"\
-	: "=r" (__res) \
-	: "r" (__scno), "0" (__res), "r" (__arg2), \
-		"r" (__arg3), "r" (__arg4), "r" (__arg5) \
-	: "memory"); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-m68k/checksum.h b/include/asm-m68k/checksum.h
index 17280ef..494f9ae 100644
--- a/include/asm-m68k/checksum.h
+++ b/include/asm-m68k/checksum.h
@@ -15,7 +15,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -25,22 +25,21 @@
  * better 64-bit) boundary
  */
 
-extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
-						unsigned char *dst,
-						int len, int sum,
+extern __wsum csum_partial_copy_from_user(const void __user *src,
+						void *dst,
+						int len, __wsum sum,
 						int *csum_err);
 
-extern unsigned int csum_partial_copy_nocheck(const unsigned char *src,
-					      unsigned char *dst, int len,
-					      int sum);
+extern __wsum csum_partial_copy_nocheck(const void *src,
+					      void *dst, int len,
+					      __wsum sum);
 
 /*
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  *
  */
-static inline unsigned short
-ip_fast_csum(unsigned char *iph, unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum = 0;
 	unsigned long tmp;
@@ -58,29 +57,29 @@
 		 : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
 		 : "0" (sum), "1" (iph), "2" (ihl)
 		 : "memory");
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
 /*
  *	Fold a partial checksum
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
-	unsigned int tmp = sum;
+	unsigned int tmp = (__force u32)sum;
 	__asm__("swap %1\n\t"
 		"addw %1, %0\n\t"
 		"clrw %1\n\t"
 		"addxw %1, %0"
 		: "=&d" (sum), "=&d" (tmp)
 		: "0" (sum), "1" (tmp));
-	return ~sum;
+	return (__force __sum16)~sum;
 }
 
 
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	__asm__ ("addl  %2,%0\n\t"
 		 "addxl %3,%0\n\t"
@@ -98,9 +97,9 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -110,16 +109,15 @@
  * in icmp.c
  */
 
-static inline unsigned short
-ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold (csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int
-csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
-		__u32 len, unsigned short proto, unsigned int sum)
+static __inline__ __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+		__u32 len, unsigned short proto, __wsum sum)
 {
 	register unsigned long tmp;
 	__asm__("addl %2@,%0\n\t"
diff --git a/include/asm-m68k/device.h b/include/asm-m68k/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-m68k/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index d90d841..00259ed 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -21,7 +21,7 @@
 	return 1 << L1_CACHE_SHIFT;
 }
 
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return 0;
 }
@@ -41,7 +41,7 @@
 {
 	dma_free_coherent(dev, size, addr, handle);
 }
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 				  enum dma_data_direction dir)
 {
 	/* we use coherent allocation, so not much to do here. */
diff --git a/include/asm-m68k/setup.h b/include/asm-m68k/setup.h
index 7facc9a..2a8853c 100644
--- a/include/asm-m68k/setup.h
+++ b/include/asm-m68k/setup.h
@@ -41,8 +41,12 @@
 #define MACH_Q40     10
 #define MACH_SUN3X   11
 
+#define COMMAND_LINE_SIZE 256
+
 #ifdef __KERNEL__
 
+#define CL_SIZE COMMAND_LINE_SIZE
+
 #ifndef __ASSEMBLY__
 extern unsigned long m68k_machtype;
 #endif /* !__ASSEMBLY__ */
@@ -355,8 +359,6 @@
      */
 
 #define NUM_MEMINFO	4
-#define CL_SIZE		256
-#define COMMAND_LINE_SIZE	CL_SIZE
 
 #ifndef __ASSEMBLY__
 struct mem_info {
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index ad43480..fdbb60e 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -317,103 +317,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls		311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* avoid using res which is declared to be in register d0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-__asm__ __volatile__ ("trap  #0" \
-                      : "+d" (__res) ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-		      : "d" (__a)  ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a,btype b) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-                      : "d" (__a), "d" (__b) \
-		     ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a,btype b,ctype c) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-                      : "d" (__a), "d" (__b), \
-			"d" (__c) \
-		     ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name (atype a, btype b, ctype c, dtype d) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-__asm__ __volatile__ ("trap  #0" \
-                      : "+d" (__res) \
-                      : "d" (__a), "d" (__b), \
-			"d" (__c), "d" (__d)  \
-		     ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-register long __e __asm__ ("%d5") = (long)(e); \
-__asm__ __volatile__ ("trap  #0" \
-		      : "+d" (__res) \
-		      : "d" (__a), "d" (__b), \
-			"d" (__c), "d" (__d), "d" (__e)  \
-                     ); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-m68knommu/checksum.h b/include/asm-m68knommu/checksum.h
index 294ec75..8188348 100644
--- a/include/asm-m68knommu/checksum.h
+++ b/include/asm-m68knommu/checksum.h
@@ -15,7 +15,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -25,8 +25,8 @@
  * better 64-bit) boundary
  */
 
-unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst,
-	int len, int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+	int len, __wsum sum);
 
 
 /*
@@ -36,33 +36,31 @@
  * better 64-bit) boundary
  */
 
-extern unsigned int csum_partial_copy_from_user(const unsigned char *src,
-	unsigned char *dst, int len, int sum, int *csum_err);
+extern __wsum csum_partial_copy_from_user(const void __user *src,
+	void *dst, int len, __wsum sum, int *csum_err);
 
-#define csum_partial_copy_nocheck(src, dst, len, sum)	\
-	csum_partial_copy((src), (dst), (len), (sum))
-
-unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
  *	Fold a partial checksum
  */
 
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
+	unsigned int tmp = (__force u32)sum;
 #ifdef CONFIG_COLDFIRE
-	sum = (sum & 0xffff) + (sum >> 16);
-	sum = (sum & 0xffff) + (sum >> 16);
+	tmp = (tmp & 0xffff) + (tmp >> 16);
+	tmp = (tmp & 0xffff) + (tmp >> 16);
+	return (__force __sum16)~tmp;
 #else
-	unsigned int tmp = sum;
 	__asm__("swap %1\n\t"
 		"addw %1, %0\n\t"
 		"clrw %1\n\t"
 		"addxw %1, %0"
 		: "=&d" (sum), "=&d" (tmp)
 		: "0" (sum), "1" (sum));
+	return (__force __sum16)~sum;
 #endif
-	return ~sum;
 }
 
 
@@ -71,9 +69,9 @@
  * returns a 16-bit checksum, already complemented
  */
 
-static inline unsigned int
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	__asm__ ("addl  %1,%0\n\t"
 		 "addxl %4,%0\n\t"
@@ -86,9 +84,9 @@
 	return sum;
 }
 
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
-		  unsigned short proto, unsigned int sum)
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+		  unsigned short proto, __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -98,12 +96,12 @@
  * in icmp.c
  */
 
-extern unsigned short ip_compute_csum(const unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int
-csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
-		__u32 len, unsigned short proto, unsigned int sum) 
+static __inline__ __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+		__u32 len, unsigned short proto, __wsum sum)
 {
 	register unsigned long tmp;
 	__asm__("addl %2@,%0\n\t"
diff --git a/include/asm-m68knommu/device.h b/include/asm-m68knommu/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-m68knommu/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-m68knommu/dma-mapping.h b/include/asm-m68knommu/dma-mapping.h
index 5622b85..6aeab18 100644
--- a/include/asm-m68knommu/dma-mapping.h
+++ b/include/asm-m68knommu/dma-mapping.h
@@ -1,9 +1,10 @@
 #ifndef _M68KNOMMU_DMA_MAPPING_H
 #define _M68KNOMMU_DMA_MAPPING_H
 
-
 #ifdef CONFIG_PCI
 #include <asm-generic/dma-mapping.h>
+#else
+#include <asm-generic/dma-mapping-broken.h>
 #endif
 
 #endif  /* _M68KNOMMU_DMA_MAPPING_H */
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index 45e7a2fd..7b8f874 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -86,5 +86,6 @@
 #define enable_irq(x)	do { } while (0)
 #define disable_irq(x)	do { } while (0)
 #define disable_irq_nosync(x)	disable_irq(x)
+#define irq_canonicalize(irq)	(irq)
 
 #endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68knommu/m520xsim.h b/include/asm-m68knommu/m520xsim.h
index 1dac22e..49d016e 100644
--- a/include/asm-m68knommu/m520xsim.h
+++ b/include/asm-m68knommu/m520xsim.h
@@ -31,6 +31,16 @@
 #define MCFINT_QSPI         31          /* Interrupt number for QSPI */
 #define MCFINT_PIT1         4           /* Interrupt number for PIT1 (PIT0 in processor) */
 
+/*
+ *  SDRAM configuration registers.
+ */
+#define MCFSIM_SDMR         0x000a8000	/* SDRAM Mode/Extended Mode Register */
+#define MCFSIM_SDCR         0x000a8004	/* SDRAM Control Register */
+#define MCFSIM_SDCFG1       0x000a8008	/* SDRAM Configuration Register 1 */
+#define MCFSIM_SDCFG2       0x000a800c	/* SDRAM Configuration Register 2 */
+#define MCFSIM_SDCS0        0x000a8110	/* SDRAM Chip Select 0 Configuration */
+#define MCFSIM_SDCS1        0x000a8114	/* SDRAM Chip Select 1 Configuration */
+
 
 #define MCF_GPIO_PAR_UART                   (0xA4036)
 #define MCF_GPIO_PAR_FECI2C                 (0xA4033)
@@ -47,7 +57,7 @@
 
 #define ICR_INTRCONF		0x05
 #define MCFPIT_IMR		MCFINTC_IMRL
-#define MCFPIT_IMR_IBIT	(1 << MCFINT_PIT1)
+#define MCFPIT_IMR_IBIT		(1 << MCFINT_PIT1)
 
 /****************************************************************************/
 #endif  /* m520xsim_h */
diff --git a/include/asm-m68knommu/mcfmbus.h b/include/asm-m68knommu/mcfmbus.h
index 13df9d4..319899c 100644
--- a/include/asm-m68knommu/mcfmbus.h
+++ b/include/asm-m68knommu/mcfmbus.h
@@ -37,7 +37,7 @@
 #define MCFMBUS_MFDR_MBC(a)	((a)&0x3F)	   /*M-Bus Clock*/
 
 /*
-*	Define bit flags in Controll Register
+*	Define bit flags in Control Register
 */
 
 #define MCFMBUS_MBCR_MEN           (0x80)  /* M-Bus Enable                 */
diff --git a/include/asm-m68knommu/rtc.h b/include/asm-m68knommu/rtc.h
new file mode 100644
index 0000000..eaf18ec
--- /dev/null
+++ b/include/asm-m68knommu/rtc.h
@@ -0,0 +1 @@
+#include <asm-m68k/rtc.h>
diff --git a/include/asm-m68knommu/scatterlist.h b/include/asm-m68knommu/scatterlist.h
index 12309b1..2085d6f 100644
--- a/include/asm-m68knommu/scatterlist.h
+++ b/include/asm-m68knommu/scatterlist.h
@@ -10,7 +10,7 @@
 	unsigned int	length;
 };
 
-#define sg_address(sg) (page_address((sg)->page) + (sg)->offset
+#define sg_address(sg)		(page_address((sg)->page) + (sg)->offset)
 #define sg_dma_address(sg)      ((sg)->dma_address)
 #define sg_dma_len(sg)          ((sg)->length)
 
diff --git a/include/asm-m68knommu/setup.h b/include/asm-m68knommu/setup.h
index d2b0fcc..fb86bb2 100644
--- a/include/asm-m68knommu/setup.h
+++ b/include/asm-m68knommu/setup.h
@@ -1,5 +1,10 @@
+#ifdef __KERNEL__
+
 #include <asm-m68k/setup.h>
 
 /* We have a bigger command line buffer. */
 #undef COMMAND_LINE_SIZE
+
+#endif  /*  __KERNEL__  */
+
 #define COMMAND_LINE_SIZE	512
diff --git a/include/asm-m68knommu/ucontext.h b/include/asm-m68knommu/ucontext.h
index 5d570ce..713a27f 100644
--- a/include/asm-m68knommu/ucontext.h
+++ b/include/asm-m68knommu/ucontext.h
@@ -5,21 +5,17 @@
 #define NGREG 18
 typedef greg_t gregset_t[NGREG];
 
-#ifdef CONFIG_FPU
 typedef struct fpregset {
 	int f_pcr;
 	int f_psr;
 	int f_fpiaddr;
 	int f_fpregs[8][3];
 } fpregset_t;
-#endif
 
 struct mcontext {
 	int version;
 	gregset_t gregs;
-#ifdef CONFIG_FPU
 	fpregset_t fpregs;
-#endif
 };
 
 #define MCONTEXT_VERSION 2
@@ -29,9 +25,7 @@
 	struct ucontext  *uc_link;
 	stack_t		  uc_stack;
 	struct mcontext	  uc_mcontext;
-#ifdef CONFIG_FPU
 	unsigned long	  uc_filler[80];
-#endif
 	sigset_t	  uc_sigmask;	/* mask last for extensibility */
 };
 
diff --git a/include/asm-m68knommu/unistd.h b/include/asm-m68knommu/unistd.h
index ebaf031..82e0319 100644
--- a/include/asm-m68knommu/unistd.h
+++ b/include/asm-m68knommu/unistd.h
@@ -318,156 +318,6 @@
 #ifdef __KERNEL__
 
 #define NR_syscalls		311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* avoid using res which is declared to be in register d0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name)							\
-type name(void)									\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name)					\
-			: "cc", "%d0");						\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall1(type, name, atype, a)						\
-type name(atype a)								\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%2, %%d1\n\t"					\
-  			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "g" ((long)a)						\
-			: "cc", "%d0", "%d1");					\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall2(type, name, atype, a, btype, b)				\
-type name(atype a, btype b)							\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "g" ((long)b)						\
-			: "cc", "%d0", "%d1", "%d2");				\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)			\
-type name(atype a, btype b, ctype c)						\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%4, %%d3\n\t"					\
-			"movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "a" ((long)b),					\
-			  "g" ((long)c)						\
-			: "cc", "%d0", "%d1", "%d2", "%d3");			\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d)		\
-type name(atype a, btype b, ctype c, dtype d)					\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%5, %%d4\n\t"					\
-			"movel	%4, %%d3\n\t"					\
-			"movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "a" ((long)b),					\
-			  "a" ((long)c),					\
-			  "g" ((long)d)						\
-			: "cc", "%d0", "%d1", "%d2", "%d3",			\
-			  "%d4");						\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e)	\
-type name(atype a, btype b, ctype c, dtype d, etype e)				\
-{										\
-  long __res;									\
-  __asm__ __volatile__ ("movel	%6, %%d5\n\t"					\
-			"movel	%5, %%d4\n\t"					\
-			"movel	%4, %%d3\n\t"					\
-			"movel	%3, %%d2\n\t"					\
-  			"movel	%2, %%d1\n\t"					\
-			"movel	%1, %%d0\n\t"					\
-  			"trap	#0\n\t"						\
-  			"movel	%%d0, %0"					\
-			: "=g" (__res)						\
-			: "i" (__NR_##name),					\
-			  "a" ((long)a),					\
-			  "a" ((long)b),					\
-			  "a" ((long)c),					\
-			  "a" ((long)d),					\
-			  "g" ((long)e)						\
-			: "cc", "%d0", "%d1", "%d2", "%d3",			\
-			  "%d4", "%d5");					\
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {				\
-    errno = -__res;								\
-    __res = -1;									\
-  }										\
-  return (type)__res;								\
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h
index 45c706e..c627508 100644
--- a/include/asm-mips/addrspace.h
+++ b/include/asm-mips/addrspace.h
@@ -19,12 +19,16 @@
 #define _ATYPE_
 #define _ATYPE32_
 #define _ATYPE64_
-#define _LLCONST_(x)	x
+#define _CONST64_(x)	x
 #else
 #define _ATYPE_		__PTRDIFF_TYPE__
 #define _ATYPE32_	int
-#define _ATYPE64_	long long
-#define _LLCONST_(x)	x ## LL
+#define _ATYPE64_	__s64
+#ifdef CONFIG_64BIT
+#define _CONST64_(x)	x ## L
+#else
+#define _CONST64_(x)	x ## LL
+#endif
 #endif
 
 /*
@@ -48,7 +52,7 @@
  */
 #define CPHYSADDR(a)		((_ACAST32_(a)) & 0x1fffffff)
 #define XPHYSADDR(a)            ((_ACAST64_(a)) &			\
-				 _LLCONST_(0x000000ffffffffff))
+				 _CONST64_(0x000000ffffffffff))
 
 #ifdef CONFIG_64BIT
 
@@ -57,14 +61,14 @@
  * The compatibility segments use the full 64-bit sign extended value.  Note
  * the R8000 doesn't have them so don't reference these in generic MIPS code.
  */
-#define XKUSEG			_LLCONST_(0x0000000000000000)
-#define XKSSEG			_LLCONST_(0x4000000000000000)
-#define XKPHYS			_LLCONST_(0x8000000000000000)
-#define XKSEG			_LLCONST_(0xc000000000000000)
-#define CKSEG0			_LLCONST_(0xffffffff80000000)
-#define CKSEG1			_LLCONST_(0xffffffffa0000000)
-#define CKSSEG			_LLCONST_(0xffffffffc0000000)
-#define CKSEG3			_LLCONST_(0xffffffffe0000000)
+#define XKUSEG			_CONST64_(0x0000000000000000)
+#define XKSSEG			_CONST64_(0x4000000000000000)
+#define XKPHYS			_CONST64_(0x8000000000000000)
+#define XKSEG			_CONST64_(0xc000000000000000)
+#define CKSEG0			_CONST64_(0xffffffff80000000)
+#define CKSEG1			_CONST64_(0xffffffffa0000000)
+#define CKSSEG			_CONST64_(0xffffffffc0000000)
+#define CKSEG3			_CONST64_(0xffffffffe0000000)
 
 #define CKSEG0ADDR(a)		(CPHYSADDR(a) | CKSEG0)
 #define CKSEG1ADDR(a)		(CPHYSADDR(a) | CKSEG1)
@@ -122,7 +126,7 @@
 #define PHYS_TO_XKSEG_UNCACHED(p)	PHYS_TO_XKPHYS(K_CALG_UNCACHED,(p))
 #define PHYS_TO_XKSEG_CACHED(p)		PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE,(p))
 #define XKPHYS_TO_PHYS(p)		((p) & TO_PHYS_MASK)
-#define PHYS_TO_XKPHYS(cm,a)		(_LLCONST_(0x8000000000000000) | \
+#define PHYS_TO_XKPHYS(cm,a)		(_CONST64_(0x8000000000000000) | \
 					 ((cm)<<59) | (a))
 
 #if defined (CONFIG_CPU_R4300)						\
@@ -132,20 +136,20 @@
     || defined (CONFIG_CPU_NEVADA)					\
     || defined (CONFIG_CPU_TX49XX)					\
     || defined (CONFIG_CPU_MIPS64)
-#define TO_PHYS_MASK	_LLCONST_(0x0000000fffffffff)	/* 2^^36 - 1 */
+#define TO_PHYS_MASK	_CONST64_(0x0000000fffffffff)	/* 2^^36 - 1 */
 #endif
 
 #if defined (CONFIG_CPU_R8000)
 /* We keep KUSIZE consistent with R4000 for now (2^^40) instead of (2^^48) */
-#define TO_PHYS_MASK	_LLCONST_(0x000000ffffffffff)	/* 2^^40 - 1 */
+#define TO_PHYS_MASK	_CONST64_(0x000000ffffffffff)	/* 2^^40 - 1 */
 #endif
 
 #if defined (CONFIG_CPU_R10000)
-#define TO_PHYS_MASK	_LLCONST_(0x000000ffffffffff)	/* 2^^40 - 1 */
+#define TO_PHYS_MASK	_CONST64_(0x000000ffffffffff)	/* 2^^40 - 1 */
 #endif
 
 #if defined(CONFIG_CPU_SB1) || defined(CONFIG_CPU_SB1A)
-#define TO_PHYS_MASK	_LLCONST_(0x00000fffffffffff)	/* 2^^44 - 1 */
+#define TO_PHYS_MASK	_CONST64_(0x00000fffffffffff)	/* 2^^44 - 1 */
 #endif
 
 #ifndef CONFIG_CPU_R8000
@@ -155,7 +159,7 @@
  * in order to catch bugs in the source code.
  */
 
-#define COMPAT_K1BASE32		_LLCONST_(0xffffffffa0000000)
+#define COMPAT_K1BASE32		_CONST64_(0xffffffffa0000000)
 #define PHYS_TO_COMPATK1(x)	((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
 
 #endif
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index e64abc0..c1a2409 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -9,20 +9,13 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle
+ * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
  */
-
-/*
- * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
- * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
- * main big wrapper ...
- */
-#include <linux/spinlock.h>
-
 #ifndef _ASM_ATOMIC_H
 #define _ASM_ATOMIC_H
 
 #include <linux/irqflags.h>
+#include <asm/barrier.h>
 #include <asm/cpu-features.h>
 #include <asm/war.h>
 
@@ -138,6 +131,8 @@
 {
 	unsigned long result;
 
+	smp_mb();
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long temp;
 
@@ -148,7 +143,6 @@
 		"	sc	%0, %2					\n"
 		"	beqzl	%0, 1b					\n"
 		"	addu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -163,7 +157,6 @@
 		"	sc	%0, %2					\n"
 		"	beqz	%0, 1b					\n"
 		"	addu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -178,6 +171,8 @@
 		local_irq_restore(flags);
 	}
 
+	smp_mb();
+
 	return result;
 }
 
@@ -185,6 +180,8 @@
 {
 	unsigned long result;
 
+	smp_mb();
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long temp;
 
@@ -195,7 +192,6 @@
 		"	sc	%0, %2					\n"
 		"	beqzl	%0, 1b					\n"
 		"	subu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -210,7 +206,6 @@
 		"	sc	%0, %2					\n"
 		"	beqz	%0, 1b					\n"
 		"	subu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -225,6 +220,8 @@
 		local_irq_restore(flags);
 	}
 
+	smp_mb();
+
 	return result;
 }
 
@@ -240,6 +237,8 @@
 {
 	unsigned long result;
 
+	smp_mb();
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long temp;
 
@@ -253,7 +252,6 @@
 		"	beqzl	%0, 1b					\n"
 		"	 subu	%0, %1, %3				\n"
 		"	.set	reorder					\n"
-		"	sync						\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -272,7 +270,6 @@
 		"	beqz	%0, 1b					\n"
 		"	 subu	%0, %1, %3				\n"
 		"	.set	reorder					\n"
-		"	sync						\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -289,6 +286,8 @@
 		local_irq_restore(flags);
 	}
 
+	smp_mb();
+
 	return result;
 }
 
@@ -383,7 +382,7 @@
 
 #ifdef CONFIG_64BIT
 
-typedef struct { volatile __s64 counter; } atomic64_t;
+typedef struct { volatile long counter; } atomic64_t;
 
 #define ATOMIC64_INIT(i)    { (i) }
 
@@ -492,6 +491,8 @@
 {
 	unsigned long result;
 
+	smp_mb();
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long temp;
 
@@ -502,7 +503,6 @@
 		"	scd	%0, %2					\n"
 		"	beqzl	%0, 1b					\n"
 		"	addu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -517,7 +517,6 @@
 		"	scd	%0, %2					\n"
 		"	beqz	%0, 1b					\n"
 		"	addu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -532,6 +531,8 @@
 		local_irq_restore(flags);
 	}
 
+	smp_mb();
+
 	return result;
 }
 
@@ -539,6 +540,8 @@
 {
 	unsigned long result;
 
+	smp_mb();
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long temp;
 
@@ -549,7 +552,6 @@
 		"	scd	%0, %2					\n"
 		"	beqzl	%0, 1b					\n"
 		"	subu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -564,7 +566,6 @@
 		"	scd	%0, %2					\n"
 		"	beqz	%0, 1b					\n"
 		"	subu	%0, %1, %3				\n"
-		"	sync						\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
@@ -579,6 +580,8 @@
 		local_irq_restore(flags);
 	}
 
+	smp_mb();
+
 	return result;
 }
 
@@ -594,6 +597,8 @@
 {
 	unsigned long result;
 
+	smp_mb();
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long temp;
 
@@ -607,7 +612,6 @@
 		"	beqzl	%0, 1b					\n"
 		"	 dsubu	%0, %1, %3				\n"
 		"	.set	reorder					\n"
-		"	sync						\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -626,7 +630,6 @@
 		"	beqz	%0, 1b					\n"
 		"	 dsubu	%0, %1, %3				\n"
 		"	.set	reorder					\n"
-		"	sync						\n"
 		"1:							\n"
 		"	.set	mips0					\n"
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -643,6 +646,8 @@
 		local_irq_restore(flags);
 	}
 
+	smp_mb();
+
 	return result;
 }
 
diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h
new file mode 100644
index 0000000..ed82631
--- /dev/null
+++ b/include/asm-mips/barrier.h
@@ -0,0 +1,132 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+/*
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	b = 2;
+ *	memory_barrier();
+ *	p = &b;				q = p;
+ *					read_barrier_depends();
+ *					d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	a = 2;
+ *	memory_barrier();
+ *	b = 3;				y = b;
+ *					read_barrier_depends();
+ *					x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like this where there are no data dependencies.
+ */
+
+#define read_barrier_depends()		do { } while(0)
+#define smp_read_barrier_depends()	do { } while(0)
+
+#ifdef CONFIG_CPU_HAS_SYNC
+#define __sync()				\
+	__asm__ __volatile__(			\
+		".set	push\n\t"		\
+		".set	noreorder\n\t"		\
+		".set	mips2\n\t"		\
+		"sync\n\t"			\
+		".set	pop"			\
+		: /* no output */		\
+		: /* no input */		\
+		: "memory")
+#else
+#define __sync()	do { } while(0)
+#endif
+
+#define __fast_iob()				\
+	__asm__ __volatile__(			\
+		".set	push\n\t"		\
+		".set	noreorder\n\t"		\
+		"lw	$0,%0\n\t"		\
+		"nop\n\t"			\
+		".set	pop"			\
+		: /* no output */		\
+		: "m" (*(int *)CKSEG1)		\
+		: "memory")
+
+#define fast_wmb()	__sync()
+#define fast_rmb()	__sync()
+#define fast_mb()	__sync()
+#define fast_iob()				\
+	do {					\
+		__sync();			\
+		__fast_iob();			\
+	} while (0)
+
+#ifdef CONFIG_CPU_HAS_WB
+
+#include <asm/wbflush.h>
+
+#define wmb()		fast_wmb()
+#define rmb()		fast_rmb()
+#define mb()		wbflush()
+#define iob()		wbflush()
+
+#else /* !CONFIG_CPU_HAS_WB */
+
+#define wmb()		fast_wmb()
+#define rmb()		fast_rmb()
+#define mb()		fast_mb()
+#define iob()		fast_iob()
+
+#endif /* !CONFIG_CPU_HAS_WB */
+
+#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
+#define __WEAK_ORDERING_MB	"       sync	\n"
+#else
+#define __WEAK_ORDERING_MB	"		\n"
+#endif
+
+#define smp_mb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_rmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_wmb()	__asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+
+#define set_mb(var, value) \
+	do { var = value; smp_mb(); } while (0)
+
+#endif /* __ASM_BARRIER_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 1bb89c5..06445de 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -3,38 +3,34 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
+ * Copyright (c) 1994 - 1997, 1999, 2000, 06  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  */
 #ifndef _ASM_BITOPS_H
 #define _ASM_BITOPS_H
 
 #include <linux/compiler.h>
+#include <linux/irqflags.h>
 #include <linux/types.h>
+#include <asm/barrier.h>
 #include <asm/bug.h>
 #include <asm/byteorder.h>		/* sigh ... */
 #include <asm/cpu-features.h>
+#include <asm/sgidefs.h>
+#include <asm/war.h>
 
 #if (_MIPS_SZLONG == 32)
 #define SZLONG_LOG 5
 #define SZLONG_MASK 31UL
 #define __LL		"ll	"
 #define __SC		"sc	"
-#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
 #elif (_MIPS_SZLONG == 64)
 #define SZLONG_LOG 6
 #define SZLONG_MASK 63UL
 #define __LL		"lld	"
 #define __SC		"scd	"
-#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
 #endif
 
-#ifdef __KERNEL__
-
-#include <linux/irqflags.h>
-#include <asm/sgidefs.h>
-#include <asm/war.h>
-
 /*
  * clear_bit() doesn't provide any barrier for the compiler.
  */
@@ -42,20 +38,6 @@
 #define smp_mb__after_clear_bit()	smp_mb()
 
 /*
- * Only disable interrupt for kernel mode stuff to keep usermode stuff
- * that dares to use kernel include files alive.
- */
-
-#define __bi_flags			unsigned long flags
-#define __bi_local_irq_save(x)		local_irq_save(x)
-#define __bi_local_irq_restore(x)	local_irq_restore(x)
-#else
-#define __bi_flags
-#define __bi_local_irq_save(x)
-#define __bi_local_irq_restore(x)
-#endif /* __KERNEL__ */
-
-/*
  * set_bit - Atomically set a bit in memory
  * @nr: the bit to set
  * @addr: the address to start counting from
@@ -93,13 +75,13 @@
 	} else {
 		volatile unsigned long *a = addr;
 		unsigned long mask;
-		__bi_flags;
+		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
 		mask = 1UL << (nr & SZLONG_MASK);
-		__bi_local_irq_save(flags);
+		local_irq_save(flags);
 		*a |= mask;
-		__bi_local_irq_restore(flags);
+		local_irq_restore(flags);
 	}
 }
 
@@ -141,13 +123,13 @@
 	} else {
 		volatile unsigned long *a = addr;
 		unsigned long mask;
-		__bi_flags;
+		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
 		mask = 1UL << (nr & SZLONG_MASK);
-		__bi_local_irq_save(flags);
+		local_irq_save(flags);
 		*a &= ~mask;
-		__bi_local_irq_restore(flags);
+		local_irq_restore(flags);
 	}
 }
 
@@ -191,13 +173,13 @@
 	} else {
 		volatile unsigned long *a = addr;
 		unsigned long mask;
-		__bi_flags;
+		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
 		mask = 1UL << (nr & SZLONG_MASK);
-		__bi_local_irq_save(flags);
+		local_irq_save(flags);
 		*a ^= mask;
-		__bi_local_irq_restore(flags);
+		local_irq_restore(flags);
 	}
 }
 
@@ -223,9 +205,6 @@
 		"	" __SC	"%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
 		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -245,9 +224,6 @@
 		"	" __SC	"%2, %1					\n"
 		"	beqz	%2, 1b					\n"
 		"	 and	%2, %0, %3				\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	pop					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
 		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -258,17 +234,19 @@
 		volatile unsigned long *a = addr;
 		unsigned long mask;
 		int retval;
-		__bi_flags;
+		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
 		mask = 1UL << (nr & SZLONG_MASK);
-		__bi_local_irq_save(flags);
+		local_irq_save(flags);
 		retval = (mask & *a) != 0;
 		*a |= mask;
-		__bi_local_irq_restore(flags);
+		local_irq_restore(flags);
 
 		return retval;
 	}
+
+	smp_mb();
 }
 
 /*
@@ -294,9 +272,6 @@
 		"	" __SC 	"%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
 		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -317,9 +292,6 @@
 		"	" __SC 	"%2, %1					\n"
 		"	beqz	%2, 1b					\n"
 		"	 and	%2, %0, %3				\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	pop					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
 		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -330,17 +302,19 @@
 		volatile unsigned long *a = addr;
 		unsigned long mask;
 		int retval;
-		__bi_flags;
+		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
 		mask = 1UL << (nr & SZLONG_MASK);
-		__bi_local_irq_save(flags);
+		local_irq_save(flags);
 		retval = (mask & *a) != 0;
 		*a &= ~mask;
-		__bi_local_irq_restore(flags);
+		local_irq_restore(flags);
 
 		return retval;
 	}
+
+	smp_mb();
 }
 
 /*
@@ -365,9 +339,6 @@
 		"	" __SC	"%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
 		"	and	%2, %0, %3				\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
 		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -387,9 +358,6 @@
 		"	" __SC	"\t%2, %1				\n"
 		"	beqz	%2, 1b					\n"
 		"	 and	%2, %0, %3				\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	pop					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
 		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -399,22 +367,20 @@
 	} else {
 		volatile unsigned long *a = addr;
 		unsigned long mask, retval;
-		__bi_flags;
+		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
 		mask = 1UL << (nr & SZLONG_MASK);
-		__bi_local_irq_save(flags);
+		local_irq_save(flags);
 		retval = (mask & *a) != 0;
 		*a ^= mask;
-		__bi_local_irq_restore(flags);
+		local_irq_restore(flags);
 
 		return retval;
 	}
-}
 
-#undef __bi_flags
-#undef __bi_local_irq_save
-#undef __bi_local_irq_restore
+	smp_mb();
+}
 
 #include <asm-generic/bitops/non-atomic.h>
 
diff --git a/include/asm-mips/bug.h b/include/asm-mips/bug.h
index 7b4739d..4d560a5 100644
--- a/include/asm-mips/bug.h
+++ b/include/asm-mips/bug.h
@@ -1,6 +1,7 @@
 #ifndef __ASM_BUG_H
 #define __ASM_BUG_H
 
+#include <asm/sgidefs.h>
 
 #ifdef CONFIG_BUG
 
@@ -13,6 +14,17 @@
 
 #define HAVE_ARCH_BUG
 
+#if (_MIPS_ISA > _MIPS_ISA_MIPS1)
+
+#define BUG_ON(condition)						\
+do {									\
+	__asm__ __volatile__("tne $0, %0" : : "r" (condition));		\
+} while (0)
+
+#define HAVE_ARCH_BUG_ON
+
+#endif /* _MIPS_ISA > _MIPS_ISA_MIPS1 */
+
 #endif
 
 #include <asm-generic/bug.h>
diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h
index a5e6050..9b768c3 100644
--- a/include/asm-mips/checksum.h
+++ b/include/asm-mips/checksum.h
@@ -27,23 +27,22 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * this is a new version of the above that records errors it finds in *errp,
  * but continues and zeros the rest of the buffer.
  */
-unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
-					 unsigned char *dst, int len,
-					 unsigned int sum, int *errp);
+__wsum csum_partial_copy_from_user(const void __user *src,
+					 void *dst, int len,
+					 __wsum sum, int *errp);
 
 /*
  * Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static inline unsigned int csum_and_copy_to_user (const unsigned char *src,
-						  unsigned char __user *dst,
-						  int len, int sum,
+static inline __wsum csum_and_copy_to_user (const void *src, void __user *dst,
+						  int len, __wsum sum,
 						  int *err_ptr)
 {
 	might_sleep();
@@ -51,7 +50,7 @@
 
 	if (copy_to_user(dst, src, len)) {
 		*err_ptr = -EFAULT;
-		return -1;
+		return (__force __wsum)-1;
 	}
 
 	return sum;
@@ -61,13 +60,13 @@
  * the same as csum_partial, but copies from user space (but on MIPS
  * we have just one address space, so this is identical to the above)
  */
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
-				       int len, unsigned int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+				       int len, __wsum sum);
 
 /*
  *	Fold a partial checksum without adding pseudo headers
  */
-static inline unsigned short int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__(
 	"	.set	push		# csum_fold\n"
@@ -82,7 +81,7 @@
 	: "=r" (sum)
 	: "0" (sum));
 
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
@@ -92,10 +91,10 @@
  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
  *	Arnt Gulbrandsen.
  */
-static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
-	unsigned int *word = (unsigned int *) iph;
-	unsigned int *stop = word + ihl;
+	const unsigned int *word = iph;
+	const unsigned int *stop = word + ihl;
 	unsigned int csum;
 	int carry;
 
@@ -123,9 +122,9 @@
 	return csum_fold(csum);
 }
 
-static inline unsigned int csum_tcpudp_nofold(unsigned long saddr,
-	unsigned long daddr, unsigned short len, unsigned short proto,
-	unsigned int sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr,
+	__be32 daddr, unsigned short len, unsigned short proto,
+	__wsum sum)
 {
 	__asm__(
 	"	.set	push		# csum_tcpudp_nofold\n"
@@ -155,9 +154,9 @@
 	: "=r" (sum)
 	: "0" (daddr), "r"(saddr),
 #ifdef __MIPSEL__
-	  "r" (((unsigned long)htons(len)<<16) + proto*256),
+	  "r" ((proto + len) << 8),
 #else
-	  "r" (((unsigned long)(proto)<<16) + len),
+	  "r" (proto + len),
 #endif
 	  "r" (sum));
 
@@ -168,11 +167,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 }
@@ -181,17 +179,16 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						     struct in6_addr *daddr,
-						     __u32 len,
-						     unsigned short proto,
-						     unsigned int sum)
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+				          const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum sum)
 {
 	__asm__(
 	"	.set	push		# csum_ipv6_magic\n"
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 900f472..55a0152 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -32,6 +32,7 @@
 	s32	val[2];
 } compat_fsid_t;
 typedef s32		compat_timer_t;
+typedef s32		compat_key_t;
 
 typedef s32		compat_int_t;
 typedef s32		compat_long_t;
@@ -146,4 +147,71 @@
 	return (void __user *) (regs->regs[29] - len);
 }
 
+struct compat_ipc64_perm {
+	compat_key_t key;
+	__compat_uid32_t uid;
+	__compat_gid32_t gid;
+	__compat_uid32_t cuid;
+	__compat_gid32_t cgid;
+	compat_mode_t mode;
+	unsigned short seq;
+	unsigned short __pad2;
+	compat_ulong_t __unused1;
+	compat_ulong_t __unused2;
+};
+
+struct compat_semid64_ds {
+	struct compat_ipc64_perm sem_perm;
+	compat_time_t	sem_otime;
+	compat_time_t	sem_ctime;
+	compat_ulong_t	sem_nsems;
+	compat_ulong_t	__unused1;
+	compat_ulong_t	__unused2;
+};
+
+struct compat_msqid64_ds {
+	struct compat_ipc64_perm msg_perm;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused1;
+#endif
+	compat_time_t	msg_stime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused1;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused2;
+#endif
+	compat_time_t	msg_rtime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused2;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused3;
+#endif
+	compat_time_t	msg_ctime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+	compat_ulong_t	__unused3;
+#endif
+	compat_ulong_t	msg_cbytes;
+	compat_ulong_t	msg_qnum;
+	compat_ulong_t	msg_qbytes;
+	compat_pid_t	msg_lspid;
+	compat_pid_t	msg_lrpid;
+	compat_ulong_t	__unused4;
+	compat_ulong_t	__unused5;
+};
+
+struct compat_shmid64_ds {
+	struct compat_ipc64_perm shm_perm;
+	compat_size_t	shm_segsz;
+	compat_time_t	shm_atime;
+	compat_time_t	shm_dtime;
+	compat_time_t	shm_ctime;
+	compat_pid_t	shm_cpid;
+	compat_pid_t	shm_lpid;
+	compat_ulong_t	shm_nattch;
+	compat_ulong_t	__unused1;
+	compat_ulong_t	__unused2;
+};
+
 #endif /* _ASM_COMPAT_H */
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h
index a2f0c8e..610d0cd 100644
--- a/include/asm-mips/cpu-info.h
+++ b/include/asm-mips/cpu-info.h
@@ -22,12 +22,12 @@
  * Descriptor for a cache
  */
 struct cache_desc {
-	unsigned short linesz;	/* Size of line in bytes */
-	unsigned short ways;	/* Number of ways */
-	unsigned short sets;	/* Number of lines per set */
 	unsigned int waysize;	/* Bytes per way */
-	unsigned int waybit;	/* Bits to select in a cache set */
-	unsigned int flags;	/* Flags describing cache properties */
+	unsigned short sets;	/* Number of lines per set */
+	unsigned char ways;	/* Number of ways */
+	unsigned char linesz;	/* Size of line in bytes */
+	unsigned char waybit;	/* Bits to select in a cache set */
+	unsigned char flags;	/* Flags describing cache properties */
 };
 
 /*
diff --git a/include/asm-mips/dec/kn02.h b/include/asm-mips/dec/kn02.h
index 8319ad7..93430b5 100644
--- a/include/asm-mips/dec/kn02.h
+++ b/include/asm-mips/dec/kn02.h
@@ -82,11 +82,9 @@
 
 #ifndef __ASSEMBLY__
 
-#include <linux/spinlock.h>
 #include <linux/types.h>
 
 extern u32 cached_kn02_csr;
-extern spinlock_t kn02_lock;
 extern void init_kn02_irqs(int base);
 #endif
 
diff --git a/include/asm-mips/device.h b/include/asm-mips/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-mips/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index 4328863..236d1a4 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -63,9 +63,9 @@
 	return 128;
 }
 
-extern int dma_is_consistent(dma_addr_t dma_addr);
+extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
 
-extern void dma_cache_sync(void *vaddr, size_t size,
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction);
 
 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
diff --git a/include/asm-mips/dma.h b/include/asm-mips/dma.h
index e85849a..23f789c 100644
--- a/include/asm-mips/dma.h
+++ b/include/asm-mips/dma.h
@@ -74,7 +74,9 @@
  *
  */
 
+#ifndef GENERIC_ISA_DMA_SUPPORT_BROKEN
 #define MAX_DMA_CHANNELS	8
+#endif
 
 /*
  * The maximum address in KSEG0 that we can perform a DMA transfer to on this
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index ed023ea..47e5679 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -1,19 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2006  Ralf Baechle (ralf@linux-mips.org)
+ */
 #ifndef _ASM_FUTEX_H
 #define _ASM_FUTEX_H
 
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <asm/barrier.h>
 #include <asm/errno.h>
 #include <asm/uaccess.h>
 #include <asm/war.h>
 
-#ifdef CONFIG_SMP
-#define __FUTEX_SMP_SYNC "	sync					\n"
-#else
-#define __FUTEX_SMP_SYNC
-#endif
-
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)		\
 {									\
 	if (cpu_has_llsc && R10000_LLSC_WAR) {				\
@@ -27,7 +29,7 @@
 		"	.set	mips3				\n"	\
 		"2:	sc	$1, %2				\n"	\
 		"	beqzl	$1, 1b				\n"	\
-		__FUTEX_SMP_SYNC					\
+		__WEAK_ORDERING_MB					\
 		"3:						\n"	\
 		"	.set	pop				\n"	\
 		"	.set	mips0				\n"	\
@@ -53,7 +55,7 @@
 		"	.set	mips3				\n"	\
 		"2:	sc	$1, %2				\n"	\
 		"	beqz	$1, 1b				\n"	\
-		__FUTEX_SMP_SYNC					\
+		__WEAK_ORDERING_MB					\
 		"3:						\n"	\
 		"	.set	pop				\n"	\
 		"	.set	mips0				\n"	\
@@ -86,7 +88,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -113,7 +115,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
@@ -150,7 +152,7 @@
 		"	.set	mips3					\n"
 		"2:	sc	$1, %1					\n"
 		"	beqzl	$1, 1b					\n"
-		__FUTEX_SMP_SYNC
+		__WEAK_ORDERING_MB
 		"3:							\n"
 		"	.set	pop					\n"
 		"	.section .fixup,\"ax\"				\n"
@@ -177,7 +179,7 @@
 		"	.set	mips3					\n"
 		"2:	sc	$1, %1					\n"
 		"	beqz	$1, 1b					\n"
-		__FUTEX_SMP_SYNC
+		__WEAK_ORDERING_MB
 		"3:							\n"
 		"	.set	pop					\n"
 		"	.section .fixup,\"ax\"				\n"
diff --git a/include/asm-mips/gt64120.h b/include/asm-mips/gt64120.h
index 2edd171..4bf8e28 100644
--- a/include/asm-mips/gt64120.h
+++ b/include/asm-mips/gt64120.h
@@ -451,6 +451,13 @@
 #define GT_SDRAM_OPMODE_OP_MODE		3
 #define GT_SDRAM_OPMODE_OP_CBR		4
 
+#define GT_TC_CONTROL_ENTC0_SHF		0
+#define GT_TC_CONTROL_ENTC0_MSK		(MSK(1) << GT_TC_CONTROL_ENTC0_SHF)
+#define GT_TC_CONTROL_ENTC0_BIT		GT_TC_CONTROL_ENTC0_MSK
+#define GT_TC_CONTROL_SELTC0_SHF	1
+#define GT_TC_CONTROL_SELTC0_MSK	(MSK(1) << GT_TC_CONTROL_SELTC0_SHF)
+#define GT_TC_CONTROL_SELTC0_BIT	GT_TC_CONTROL_SELTC0_MSK
+
 
 #define GT_PCI0_BARE_SWSCS3BOOTDIS_SHF	0
 #define GT_PCI0_BARE_SWSCS3BOOTDIS_MSK	(MSK(1) << GT_PCI0_BARE_SWSCS3BOOTDIS_SHF)
@@ -523,6 +530,13 @@
 #define GT_PCI0_CMD_SWORDSWAP_MSK	(MSK(1) << GT_PCI0_CMD_SWORDSWAP_SHF)
 #define GT_PCI0_CMD_SWORDSWAP_BIT	GT_PCI0_CMD_SWORDSWAP_MSK
 
+#define GT_INTR_T0EXP_SHF		8
+#define GT_INTR_T0EXP_MSK		(MSK(1) << GT_INTR_T0EXP_SHF)
+#define GT_INTR_T0EXP_BIT		GT_INTR_T0EXP_MSK
+#define GT_INTR_RETRYCTR0_SHF		20
+#define GT_INTR_RETRYCTR0_MSK		(MSK(1) << GT_INTR_RETRYCTR0_SHF)
+#define GT_INTR_RETRYCTR0_BIT		GT_INTR_RETRYCTR0_MSK
+
 /*
  *  Misc
  */
diff --git a/include/asm-mips/highmem.h b/include/asm-mips/highmem.h
index c976bfa..f8c8182 100644
--- a/include/asm-mips/highmem.h
+++ b/include/asm-mips/highmem.h
@@ -21,6 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/uaccess.h>
 #include <asm/kmap_types.h>
 
 /* undef for production */
@@ -70,11 +71,16 @@
 
 static inline void *kmap_atomic(struct page *page, enum km_type type)
 {
+	pagefault_disable();
 	return page_address(page);
 }
 
-static inline void kunmap_atomic(void *kvaddr, enum km_type type) { }
-#define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
+static inline void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+	pagefault_enable();
+}
+
+#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
 
 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
 
diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h
index 0214abe..4df8d8b 100644
--- a/include/asm-mips/i8259.h
+++ b/include/asm-mips/i8259.h
@@ -19,10 +19,31 @@
 
 #include <asm/io.h>
 
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD		0x20
+#define PIC_MASTER_IMR		0x21
+#define PIC_MASTER_ISR		PIC_MASTER_CMD
+#define PIC_MASTER_POLL		PIC_MASTER_ISR
+#define PIC_MASTER_OCW3		PIC_MASTER_ISR
+#define PIC_SLAVE_CMD		0xa0
+#define PIC_SLAVE_IMR		0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR		2
+#define MASTER_ICW4_DEFAULT	0x01
+#define SLAVE_ICW4_DEFAULT	0x01
+#define PIC_ICW4_AEOI		2
+
 extern spinlock_t i8259A_lock;
 
+extern void init_8259A(int auto_eoi);
+extern void enable_8259A_irq(unsigned int irq);
+extern void disable_8259A_irq(unsigned int irq);
+
 extern void init_i8259_irqs(void);
 
+#define I8259A_IRQ_BASE	0
+
 /*
  * Do the traditional i8259 interrupt polling thing.  This is for the few
  * cases where no better interrupt acknowledge method is available and we
@@ -35,15 +56,15 @@
 	spin_lock(&i8259A_lock);
 
 	/* Perform an interrupt acknowledge cycle on controller 1. */
-	outb(0x0C, 0x20);		/* prepare for poll */
-	irq = inb(0x20) & 7;
-	if (irq == 2) {
+	outb(0x0C, PIC_MASTER_CMD);		/* prepare for poll */
+	irq = inb(PIC_MASTER_CMD) & 7;
+	if (irq == PIC_CASCADE_IR) {
 		/*
 		 * Interrupt is cascaded so perform interrupt
 		 * acknowledge on controller 2.
 		 */
-		outb(0x0C, 0xA0);		/* prepare for poll */
-		irq = (inb(0xA0) & 7) + 8;
+		outb(0x0C, PIC_SLAVE_CMD);		/* prepare for poll */
+		irq = (inb(PIC_SLAVE_CMD) & 7) + 8;
 	}
 
 	if (unlikely(irq == 7)) {
@@ -54,14 +75,14 @@
 		 * significant bit is not set then there is no valid
 		 * interrupt.
 		 */
-		outb(0x0B, 0x20);		/* ISR register */
-		if(~inb(0x20) & 0x80)
+		outb(0x0B, PIC_MASTER_ISR);		/* ISR register */
+		if(~inb(PIC_MASTER_ISR) & 0x80)
 			irq = -1;
 	}
 
 	spin_unlock(&i8259A_lock);
 
-	return irq;
+	return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq;
 }
 
 #endif /* _ASM_I8259_H */
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index bc5f3c5..d77b657 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -113,7 +113,7 @@
  *     almost all conceivable cases a device driver should not be using
  *     this function
  */
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile const void *address)
 {
 	return (unsigned long)address - PAGE_OFFSET;
 }
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 35a05ca..6765708 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -24,8 +24,6 @@
 #define irq_canonicalize(irq) (irq)	/* Sane hardware, sane code ... */
 #endif
 
-extern asmlinkage unsigned int do_IRQ(unsigned int irq);
-
 #ifdef CONFIG_MIPS_MT_SMTC
 /*
  * Clear interrupt mask handling "backstop" if irq_hwmask
@@ -43,8 +41,6 @@
 #define __DO_IRQ_SMTC_HOOK() do { } while (0)
 #endif
 
-#ifdef CONFIG_PREEMPT
-
 /*
  * do_IRQ handles all normal device IRQ's (the special
  * SMP cross-CPU interrupts have their own specific
@@ -57,12 +53,10 @@
 do {									\
 	irq_enter();							\
 	__DO_IRQ_SMTC_HOOK();						\
-	__do_IRQ((irq));						\
+	generic_handle_irq(irq);					\
 	irq_exit();							\
 } while (0)
 
-#endif
-
 extern void arch_init_irq(void);
 extern void spurious_interrupt(void);
 
diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h
new file mode 100644
index 0000000..b25267e
--- /dev/null
+++ b/include/asm-mips/kexec.h
@@ -0,0 +1,32 @@
+/*
+ * kexec.h for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 14:59:34 2006
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2.  See the file COPYING for more details.
+ */
+
+#ifndef _MIPS_KEXEC
+# define _MIPS_KEXEC
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
+ /* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
+
+#define KEXEC_CONTROL_CODE_SIZE 4096
+
+/* The native architecture */
+#define KEXEC_ARCH KEXEC_ARCH_MIPS
+
+#define MAX_NOTE_BYTES 1024
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+				    struct pt_regs *oldregs)
+{
+	/* Dummy implementation for now */
+}
+
+#endif /* !_MIPS_KEXEC */
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index b3c5ecb..00b0fc6 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -67,34 +67,9 @@
 #define COBALT_BRD_ID_QUBE2    0x5
 #define COBALT_BRD_ID_RAQ2     0x6
 
-/*
- * Galileo chipset access macros for the Cobalt. The base address for
- * the GT64111 chip is 0x14000000
- *
- * Most of this really should go into a separate GT64111 header file.
- */
-#define GT64111_IO_BASE		0x10000000UL
-#define GT64111_IO_END		0x11ffffffUL
-#define GT64111_MEM_BASE	0x12000000UL
-#define GT64111_MEM_END		0x13ffffffUL
-#define GT64111_BASE		0x14000000UL
-#define GALILEO_REG(ofs)	CKSEG1ADDR(GT64111_BASE + (unsigned long)(ofs))
-
-#define GALILEO_INL(port)	(*(volatile unsigned int *) GALILEO_REG(port))
-#define GALILEO_OUTL(val, port)						\
-do {									\
-	*(volatile unsigned int *) GALILEO_REG(port) = (val);		\
-} while (0)
-
-#define GALILEO_INTR_T0EXP	(1 << 8)
-#define GALILEO_INTR_RETRY_CTR	(1 << 20)
-
-#define GALILEO_ENTC0		0x01
-#define GALILEO_SELTC0		0x02
-
 #define PCI_CFG_SET(devfn,where)					\
-	GALILEO_OUTL((0x80000000 | (PCI_SLOT (devfn) << 11) |		\
-		(PCI_FUNC (devfn) << 8) | (where)), GT_PCI0_CFGADDR_OFS)
+	GT_WRITE(GT_PCI0_CFGADDR_OFS, (0x80000000 | (PCI_SLOT (devfn) << 11) |		\
+		(PCI_FUNC (devfn) << 8) | (where)))
 
 #define COBALT_LED_PORT		(*(volatile unsigned char *) CKSEG1ADDR(0x1c000000))
 # define COBALT_LED_BAR_LEFT	(1 << 0)	/* Qube */
diff --git a/include/asm-mips/mach-cobalt/mach-gt64120.h b/include/asm-mips/mach-cobalt/mach-gt64120.h
index 587fc43..ae9c552 100644
--- a/include/asm-mips/mach-cobalt/mach-gt64120.h
+++ b/include/asm-mips/mach-cobalt/mach-gt64120.h
@@ -1 +1,27 @@
-/* there's something here ... in the dark */
+/*
+ *  Copyright (C) 2006  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef _COBALT_MACH_GT64120_H
+#define _COBALT_MACH_GT64120_H
+
+/*
+ * Cobalt uses GT64111. GT64111 is almost the same as GT64120.
+ */
+
+#define GT64120_BASE	CKSEG1ADDR(GT_DEF_BASE)
+
+#endif /* _COBALT_MACH_GT64120_H */
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index 1f318d7..9985cb7 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -545,62 +545,6 @@
 #define MIPS_FPIR_L		(_ULCAST_(1) << 21)
 #define MIPS_FPIR_F64		(_ULCAST_(1) << 22)
 
-/*
- * R10000 performance counter definitions.
- *
- * FIXME: The R10000 performance counter opens a nice way to implement CPU
- *        time accounting with a precission of one cycle.  I don't have
- *        R10000 silicon but just a manual, so ...
- */
-
-/*
- * Events counted by counter #0
- */
-#define CE0_CYCLES			0
-#define CE0_INSN_ISSUED			1
-#define CE0_LPSC_ISSUED			2
-#define CE0_S_ISSUED			3
-#define CE0_SC_ISSUED			4
-#define CE0_SC_FAILED			5
-#define CE0_BRANCH_DECODED		6
-#define CE0_QW_WB_SECONDARY		7
-#define CE0_CORRECTED_ECC_ERRORS	8
-#define CE0_ICACHE_MISSES		9
-#define CE0_SCACHE_I_MISSES		10
-#define CE0_SCACHE_I_WAY_MISSPREDICTED	11
-#define CE0_EXT_INTERVENTIONS_REQ	12
-#define CE0_EXT_INVALIDATE_REQ		13
-#define CE0_VIRTUAL_COHERENCY_COND	14
-#define CE0_INSN_GRADUATED		15
-
-/*
- * Events counted by counter #1
- */
-#define CE1_CYCLES			0
-#define CE1_INSN_GRADUATED		1
-#define CE1_LPSC_GRADUATED		2
-#define CE1_S_GRADUATED			3
-#define CE1_SC_GRADUATED		4
-#define CE1_FP_INSN_GRADUATED		5
-#define CE1_QW_WB_PRIMARY		6
-#define CE1_TLB_REFILL			7
-#define CE1_BRANCH_MISSPREDICTED	8
-#define CE1_DCACHE_MISS			9
-#define CE1_SCACHE_D_MISSES		10
-#define CE1_SCACHE_D_WAY_MISSPREDICTED	11
-#define CE1_EXT_INTERVENTION_HITS	12
-#define CE1_EXT_INVALIDATE_REQ		13
-#define CE1_SP_HINT_TO_CEXCL_SC_BLOCKS	14
-#define CE1_SP_HINT_TO_SHARED_SC_BLOCKS	15
-
-/*
- * These flags define in which privilege mode the counters count events
- */
-#define CEB_USER	8	/* Count events in user mode, EXL = ERL = 0 */
-#define CEB_SUPERVISOR	4	/* Count events in supvervisor mode EXL = ERL = 0 */
-#define CEB_KERNEL	2	/* Count events in kernel mode EXL = ERL = 0 */
-#define CEB_EXL		1	/* Count events with EXL = 1, ERL = 0 */
-
 #ifndef __ASSEMBLY__
 
 /*
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index 85b258e..0dc1a45 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -34,7 +34,9 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/pfn.h>
 #include <asm/cpu-features.h>
+#include <asm/io.h>
 
 extern void clear_page(void * page);
 extern void copy_page(void * to, void * from);
@@ -134,8 +136,14 @@
 /* to align the pointer to the (next) page boundary */
 #define PAGE_ALIGN(addr)	(((addr) + PAGE_SIZE - 1) & PAGE_MASK)
 
-#define __pa(x)			((unsigned long) (x) - PAGE_OFFSET)
-#define __va(x)			((void *)((unsigned long) (x) + PAGE_OFFSET))
+#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64)
+#define __pa_page_offset(x)	((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0)
+#else
+#define __pa_page_offset(x)	PAGE_OFFSET
+#endif
+#define __pa(x)			((unsigned long)(x) - __pa_page_offset(x))
+#define __pa_symbol(x)		__pa(RELOC_HIDE((unsigned long)(x),0))
+#define __va(x)			((void *)((unsigned long)(x) + PAGE_OFFSET))
 
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 
@@ -160,8 +168,8 @@
 
 #endif
 
-#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr)	pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
+#define virt_addr_valid(kaddr)	pfn_valid(PFN_DOWN(virt_to_phys(kaddr)))
 
 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index d20f2e9..2fbd47e 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -156,9 +156,9 @@
 #define __pte_offset(address)						\
 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset(dir, address)					\
-	((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
-	((pte_t *) pmd_page_vaddr(*(dir)) +  __pte_offset(address))
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
+#define pte_offset_kernel(dir, address)					\
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 
 #define pte_offset_map(dir, address)                                    \
 	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index 7e73203..a5b1871 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -14,6 +14,7 @@
 #include <asm/addrspace.h>
 #include <asm/page.h>
 #include <asm/cachectl.h>
+#include <asm/fixmap.h>
 
 #include <asm-generic/pgtable-nopud.h>
 
@@ -103,6 +104,13 @@
 #define VMALLOC_START		MAP_BASE
 #define VMALLOC_END	\
 	(VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE)
+#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) && \
+	VMALLOC_START != CKSSEG
+/* Load modules into 32bit-compatible segment. */
+#define MODULE_START	CKSSEG
+#define MODULE_END	(FIXADDR_START-2*PAGE_SIZE)
+extern pgd_t module_pg_dir[PTRS_PER_PGD];
+#endif
 
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -174,7 +182,12 @@
 #define __pmd_offset(address)	pmd_index(address)
 
 /* to find an entry in a kernel page-table-directory */
+#ifdef MODULE_START
+#define pgd_offset_k(address) \
+	((address) >= MODULE_START ? module_pg_dir : pgd_offset(&init_mm, 0UL))
+#else
 #define pgd_offset_k(address) pgd_offset(&init_mm, 0UL)
+#endif
 
 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 #define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
@@ -199,9 +212,9 @@
 #define __pte_offset(address)						\
 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset(dir, address)					\
-	((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address))
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 #define pte_offset_kernel(dir, address)					\
-	((pte_t *) pmd_page_vaddr(*(dir)) +  __pte_offset(address))
+	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 #define pte_offset_map(dir, address)					\
 	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 #define pte_offset_map_nested(dir, address)				\
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 1ca4d1e..f2e1325 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -67,7 +67,7 @@
 extern unsigned long zero_page_mask;
 
 #define ZERO_PAGE(vaddr) \
-	(virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
+	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
 
 #define __HAVE_ARCH_MOVE_PTE
 #define move_pte(pte, prot, old_addr, new_addr)				\
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h
index 5f3a907..30bf555 100644
--- a/include/asm-mips/ptrace.h
+++ b/include/asm-mips/ptrace.h
@@ -80,8 +80,6 @@
 #define instruction_pointer(regs) ((regs)->cp0_epc)
 #define profile_pc(regs) instruction_pointer(regs)
 
-extern void show_regs(struct pt_regs *);
-
 extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit);
 
 #endif
diff --git a/include/asm-mips/setup.h b/include/asm-mips/setup.h
index 737fa4a..70009a9 100644
--- a/include/asm-mips/setup.h
+++ b/include/asm-mips/setup.h
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
 #ifndef _MIPS_SETUP_H
 #define _MIPS_SETUP_H
 
 #define COMMAND_LINE_SIZE	256
 
 #endif /* __SETUP_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h
index b63cd06..15d70ca 100644
--- a/include/asm-mips/sn/klconfig.h
+++ b/include/asm-mips/sn/klconfig.h
@@ -176,7 +176,7 @@
 /* --- New Macros for the changed kl_config_hdr_t structure --- */
 
 #define PTR_CH_MALLOC_HDR(_k)   ((klc_malloc_hdr_t *)\
-			(unsigned long)_k + (_k->ch_malloc_hdr_off)))
+			((unsigned long)_k + (_k->ch_malloc_hdr_off)))
 
 #define KL_CONFIG_CH_MALLOC_HDR(_n)   PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
 
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index c8d5587..fc3217f 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -3,12 +3,13 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 06 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
 #ifndef _ASM_SPINLOCK_H
 #define _ASM_SPINLOCK_H
 
+#include <asm/barrier.h>
 #include <asm/war.h>
 
 /*
@@ -40,7 +41,6 @@
 		"	sc	%1, %0					\n"
 		"	beqzl	%1, 1b					\n"
 		"	 nop						\n"
-		"	sync						\n"
 		"	.set	reorder					\n"
 		: "=m" (lock->lock), "=&r" (tmp)
 		: "m" (lock->lock)
@@ -53,19 +53,22 @@
 		"	 li	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	beqz	%1, 1b					\n"
-		"	 sync						\n"
+		"	 nop						\n"
 		"	.set	reorder					\n"
 		: "=m" (lock->lock), "=&r" (tmp)
 		: "m" (lock->lock)
 		: "memory");
 	}
+
+	smp_mb();
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
+	smp_mb();
+
 	__asm__ __volatile__(
 	"	.set	noreorder	# __raw_spin_unlock	\n"
-	"	sync						\n"
 	"	sw	$0, %0					\n"
 	"	.set\treorder					\n"
 	: "=m" (lock->lock)
@@ -86,7 +89,6 @@
 		"	beqzl	%2, 1b					\n"
 		"	 nop						\n"
 		"	andi	%2, %0, 1				\n"
-		"	sync						\n"
 		"	.set	reorder"
 		: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
 		: "m" (lock->lock)
@@ -99,13 +101,14 @@
 		"	sc	%2, %1					\n"
 		"	beqz	%2, 1b					\n"
 		"	 andi	%2, %0, 1				\n"
-		"	sync						\n"
 		"	.set	reorder"
 		: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
 		: "m" (lock->lock)
 		: "memory");
 	}
 
+	smp_mb();
+
 	return res == 0;
 }
 
@@ -143,7 +146,6 @@
 		"	sc	%1, %0					\n"
 		"	beqzl	%1, 1b					\n"
 		"	 nop						\n"
-		"	sync						\n"
 		"	.set	reorder					\n"
 		: "=m" (rw->lock), "=&r" (tmp)
 		: "m" (rw->lock)
@@ -156,12 +158,14 @@
 		"	 addu	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	beqz	%1, 1b					\n"
-		"	 sync						\n"
+		"	 nop						\n"
 		"	.set	reorder					\n"
 		: "=m" (rw->lock), "=&r" (tmp)
 		: "m" (rw->lock)
 		: "memory");
 	}
+
+	smp_mb();
 }
 
 /* Note the use of sub, not subu which will make the kernel die with an
@@ -171,13 +175,14 @@
 {
 	unsigned int tmp;
 
+	smp_mb();
+
 	if (R10000_LLSC_WAR) {
 		__asm__ __volatile__(
 		"1:	ll	%1, %2		# __raw_read_unlock	\n"
 		"	sub	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	beqzl	%1, 1b					\n"
-		"	sync						\n"
 		: "=m" (rw->lock), "=&r" (tmp)
 		: "m" (rw->lock)
 		: "memory");
@@ -188,7 +193,7 @@
 		"	sub	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	beqz	%1, 1b					\n"
-		"	 sync						\n"
+		"	 nop						\n"
 		"	.set	reorder					\n"
 		: "=m" (rw->lock), "=&r" (tmp)
 		: "m" (rw->lock)
@@ -208,7 +213,7 @@
 		"	 lui	%1, 0x8000				\n"
 		"	sc	%1, %0					\n"
 		"	beqzl	%1, 1b					\n"
-		"	 sync						\n"
+		"	 nop						\n"
 		"	.set	reorder					\n"
 		: "=m" (rw->lock), "=&r" (tmp)
 		: "m" (rw->lock)
@@ -221,18 +226,22 @@
 		"	 lui	%1, 0x8000				\n"
 		"	sc	%1, %0					\n"
 		"	beqz	%1, 1b					\n"
-		"	 sync						\n"
+		"	 nop						\n"
 		"	.set	reorder					\n"
 		: "=m" (rw->lock), "=&r" (tmp)
 		: "m" (rw->lock)
 		: "memory");
 	}
+
+	smp_mb();
 }
 
 static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
+	smp_mb();
+
 	__asm__ __volatile__(
-	"	sync			# __raw_write_unlock	\n"
+	"				# __raw_write_unlock	\n"
 	"	sw	$0, %0					\n"
 	: "=m" (rw->lock)
 	: "m" (rw->lock)
@@ -252,11 +261,10 @@
 		"	bnez	%1, 2f					\n"
 		"	 addu	%1, 1					\n"
 		"	sc	%1, %0					\n"
-		"	beqzl	%1, 1b					\n"
 		"	.set	reorder					\n"
-#ifdef CONFIG_SMP
-		"	 sync						\n"
-#endif
+		"	beqzl	%1, 1b					\n"
+		"	 nop						\n"
+		__WEAK_ORDERING_MB
 		"	li	%2, 1					\n"
 		"2:							\n"
 		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -271,10 +279,9 @@
 		"	 addu	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	beqz	%1, 1b					\n"
+		"	 nop						\n"
 		"	.set	reorder					\n"
-#ifdef CONFIG_SMP
-		"	 sync						\n"
-#endif
+		__WEAK_ORDERING_MB
 		"	li	%2, 1					\n"
 		"2:							\n"
 		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -299,7 +306,8 @@
 		"	 lui	%1, 0x8000				\n"
 		"	sc	%1, %0					\n"
 		"	beqzl	%1, 1b					\n"
-		"	 sync						\n"
+		"	 nop						\n"
+		__WEAK_ORDERING_MB
 		"	li	%2, 1					\n"
 		"	.set	reorder					\n"
 		"2:							\n"
@@ -315,7 +323,8 @@
 		"	lui	%1, 0x8000				\n"
 		"	sc	%1, %0					\n"
 		"	beqz	%1, 1b					\n"
-		"	 sync						\n"
+		"	 nop						\n"
+		__WEAK_ORDERING_MB
 		"	li	%2, 1					\n"
 		"	.set	reorder					\n"
 		"2:							\n"
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 3056fee..9428057 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
  * Copyright (C) 1996 by Paul M. Antoine
  * Copyright (C) 1999 Silicon Graphics
  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
@@ -16,132 +16,12 @@
 #include <linux/irqflags.h>
 
 #include <asm/addrspace.h>
+#include <asm/barrier.h>
 #include <asm/cpu-features.h>
 #include <asm/dsp.h>
 #include <asm/ptrace.h>
 #include <asm/war.h>
 
-/*
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier.  All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies.  See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	b = 2;
- *	memory_barrier();
- *	p = &b;				q = p;
- *					read_barrier_depends();
- *					d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	a = 2;
- *	memory_barrier();
- *	b = 3;				y = b;
- *					read_barrier_depends();
- *					x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like this where there are no data dependencies.
- */
-
-#define read_barrier_depends()	do { } while(0)
-
-#ifdef CONFIG_CPU_HAS_SYNC
-#define __sync()				\
-	__asm__ __volatile__(			\
-		".set	push\n\t"		\
-		".set	noreorder\n\t"		\
-		".set	mips2\n\t"		\
-		"sync\n\t"			\
-		".set	pop"			\
-		: /* no output */		\
-		: /* no input */		\
-		: "memory")
-#else
-#define __sync()	do { } while(0)
-#endif
-
-#define __fast_iob()				\
-	__asm__ __volatile__(			\
-		".set	push\n\t"		\
-		".set	noreorder\n\t"		\
-		"lw	$0,%0\n\t"		\
-		"nop\n\t"			\
-		".set	pop"			\
-		: /* no output */		\
-		: "m" (*(int *)CKSEG1)		\
-		: "memory")
-
-#define fast_wmb()	__sync()
-#define fast_rmb()	__sync()
-#define fast_mb()	__sync()
-#define fast_iob()				\
-	do {					\
-		__sync();			\
-		__fast_iob();			\
-	} while (0)
-
-#ifdef CONFIG_CPU_HAS_WB
-
-#include <asm/wbflush.h>
-
-#define wmb()		fast_wmb()
-#define rmb()		fast_rmb()
-#define mb()		wbflush()
-#define iob()		wbflush()
-
-#else /* !CONFIG_CPU_HAS_WB */
-
-#define wmb()		fast_wmb()
-#define rmb()		fast_rmb()
-#define mb()		fast_mb()
-#define iob()		fast_iob()
-
-#endif /* !CONFIG_CPU_HAS_WB */
-
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#define smp_rmb()	rmb()
-#define smp_wmb()	wmb()
-#define smp_read_barrier_depends()	read_barrier_depends()
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do { } while(0)
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
 
 /*
  * switch_to(n) should switch tasks to task nr n, first
@@ -217,9 +97,6 @@
 		"	.set	mips3					\n"
 		"	sc	%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	mips0					\n"
 		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
 		: "R" (*m), "Jr" (val)
@@ -235,9 +112,6 @@
 		"	.set	mips3					\n"
 		"	sc	%2, %1					\n"
 		"	beqz	%2, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	mips0					\n"
 		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
 		: "R" (*m), "Jr" (val)
@@ -251,6 +125,8 @@
 		local_irq_restore(flags);	/* implies memory barrier  */
 	}
 
+	smp_mb();
+
 	return retval;
 }
 
@@ -268,9 +144,6 @@
 		"	move	%2, %z4					\n"
 		"	scd	%2, %1					\n"
 		"	beqzl	%2, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	mips0					\n"
 		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
 		: "R" (*m), "Jr" (val)
@@ -284,9 +157,6 @@
 		"	move	%2, %z4					\n"
 		"	scd	%2, %1					\n"
 		"	beqz	%2, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"	.set	mips0					\n"
 		: "=&r" (retval), "=m" (*m), "=&r" (dummy)
 		: "R" (*m), "Jr" (val)
@@ -300,6 +170,8 @@
 		local_irq_restore(flags);	/* implies memory barrier  */
 	}
 
+	smp_mb();
+
 	return retval;
 }
 #else
@@ -345,9 +217,6 @@
 		"	.set	mips3					\n"
 		"	sc	$1, %1					\n"
 		"	beqzl	$1, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"2:							\n"
 		"	.set	pop					\n"
 		: "=&r" (retval), "=R" (*m)
@@ -365,9 +234,6 @@
 		"	.set	mips3					\n"
 		"	sc	$1, %1					\n"
 		"	beqz	$1, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"2:							\n"
 		"	.set	pop					\n"
 		: "=&r" (retval), "=R" (*m)
@@ -383,6 +249,8 @@
 		local_irq_restore(flags);	/* implies memory barrier  */
 	}
 
+	smp_mb();
+
 	return retval;
 }
 
@@ -402,9 +270,6 @@
 		"	move	$1, %z4					\n"
 		"	scd	$1, %1					\n"
 		"	beqzl	$1, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"2:							\n"
 		"	.set	pop					\n"
 		: "=&r" (retval), "=R" (*m)
@@ -420,9 +285,6 @@
 		"	move	$1, %z4					\n"
 		"	scd	$1, %1					\n"
 		"	beqz	$1, 1b					\n"
-#ifdef CONFIG_SMP
-		"	sync						\n"
-#endif
 		"2:							\n"
 		"	.set	pop					\n"
 		: "=&r" (retval), "=R" (*m)
@@ -438,6 +300,8 @@
 		local_irq_restore(flags);	/* implies memory barrier  */
 	}
 
+	smp_mb();
+
 	return retval;
 }
 #else
diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h
index 625acd3..a632cef 100644
--- a/include/asm-mips/time.h
+++ b/include/asm-mips/time.h
@@ -21,6 +21,7 @@
 #include <linux/ptrace.h>
 #include <linux/rtc.h>
 #include <linux/spinlock.h>
+#include <linux/clocksource.h>
 
 extern spinlock_t rtc_lock;
 
@@ -44,12 +45,10 @@
 extern void (*mips_timer_ack)(void);
 
 /*
- * High precision timer functions.
- * If mips_hpt_read is NULL, an R4k-compatible timer setup is attempted.
+ * High precision timer clocksource.
+ * If .read is NULL, an R4k-compatible timer setup is attempted.
  */
-extern unsigned int (*mips_hpt_read)(void);
-extern void (*mips_hpt_init)(void);
-extern unsigned int mips_hpt_mask;
+extern struct clocksource clocksource_mips;
 
 /*
  * to_tm() converts system time back to (year, mon, day, hour, min, sec).
diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h
index 2b52e18..63a13c5 100644
--- a/include/asm-mips/types.h
+++ b/include/asm-mips/types.h
@@ -93,16 +93,6 @@
 typedef unsigned long phys_t;
 #endif
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index ec56aa5..696cff3 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -933,268 +933,6 @@
 
 #ifndef __ASSEMBLY__
 
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %2\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-/*
- * DANGER: This macro isn't usable for the pipe(2) call
- * which has a unusual return convention.
- */
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %3\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "r" (__a0), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a, btype b) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %4\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "r" (__a0), "r" (__a1), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a, btype b, ctype c) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7"); \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "=r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name(atype a, btype b, ctype c, dtype d) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#if (_MIPS_SIM == _MIPS_SIM_ABI32)
-
-/*
- * Using those means your brain needs more than an oil change ;-)
- */
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name(atype a, btype b, ctype c, dtype d, etype e) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"lw\t$2, %6\n\t" \
-	"subu\t$29, 32\n\t" \
-	"sw\t$2, 16($29)\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	"addiu\t$29, 32\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
-	  "m" ((unsigned long)e) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"lw\t$2, %6\n\t" \
-	"lw\t$8, %7\n\t" \
-	"subu\t$29, 32\n\t" \
-	"sw\t$2, 16($29)\n\t" \
-	"sw\t$8, 20($29)\n\t" \
-	"li\t$2, %5\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	"addiu\t$29, 32\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
-	  "m" ((unsigned long)e), "m" ((unsigned long)f) \
-	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */
-
-#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	register unsigned long __a4 asm("$8") = (unsigned long) e; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %6\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \
-	: "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
-{ \
-	register unsigned long __a0 asm("$4") = (unsigned long) a; \
-	register unsigned long __a1 asm("$5") = (unsigned long) b; \
-	register unsigned long __a2 asm("$6") = (unsigned long) c; \
-	register unsigned long __a3 asm("$7") = (unsigned long) d; \
-	register unsigned long __a4 asm("$8") = (unsigned long) e; \
-	register unsigned long __a5 asm("$9") = (unsigned long) f; \
-	unsigned long __v0; \
-	\
-	__asm__ volatile ( \
-	".set\tnoreorder\n\t" \
-	"li\t$2, %7\t\t\t# " #name "\n\t" \
-	"syscall\n\t" \
-	"move\t%0, $2\n\t" \
-	".set\treorder" \
-	: "=&r" (__v0), "+r" (__a3) \
-	: "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \
-	  "i" (__NR_##name) \
-	: "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-	  "memory"); \
-	\
-	if (__a3 == 0) \
-		return (type) __v0; \
-	errno = __v0; \
-	return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */
-
-
 #define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-parisc/checksum.h b/include/asm-parisc/checksum.h
index 229cb56..cc3ec1b 100644
--- a/include/asm-parisc/checksum.h
+++ b/include/asm-parisc/checksum.h
@@ -15,7 +15,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern unsigned int csum_partial(const unsigned char *, int, unsigned int);
+extern __wsum csum_partial(const void *, int, __wsum);
 
 /*
  * The same as csum_partial, but copies from src while it checksums.
@@ -23,15 +23,14 @@
  * Here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern unsigned int csum_partial_copy_nocheck(const unsigned char *, unsigned char *,
-					      int, unsigned int);
+extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
 
 /*
  * this is a new version of the above that records errors it finds in *errp,
  * but continues and zeros the rest of the buffer.
  */
-extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
-		unsigned char *dst, int len, unsigned int sum, int *errp);
+extern __wsum csum_partial_copy_from_user(const void __user *src,
+		void *dst, int len, __wsum sum, int *errp);
 
 /*
  *	Optimized for IP headers, which always checksum on 4 octet boundaries.
@@ -39,11 +38,10 @@
  *	Written by Randolph Chung <tausq@debian.org>, and then mucked with by
  *	LaMont Jones <lamont@debian.org>
  */
-static inline unsigned short ip_fast_csum(unsigned char * iph,
-					  unsigned int ihl) {
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
 	unsigned int sum;
 
-
 	__asm__ __volatile__ (
 "	ldws,ma		4(%1), %0\n"
 "	addib,<=	-4, %2, 2f\n"
@@ -69,27 +67,27 @@
 	: "1" (iph), "2" (ihl)
 	: "r19", "r20", "r21" );
 
-	return(sum);
+	return (__force __sum16)sum;
 }
 
 /*
  *	Fold a partial checksum
  */
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
+	u32 sum = (__force u32)csum;
 	/* add the swapped two 16-bit halves of sum,
 	   a possible carry from adding the two 16-bit halves,
 	   will carry from the lower half into the upper half,
 	   giving us the correct sum in the upper half. */
 	sum += (sum << 16) + (sum >> 16);
-	return (~sum) >> 16;
+	return (__force __sum16)(~sum >> 16);
 }
  
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-					       unsigned long daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 					       unsigned short len,
 					       unsigned short proto,
-					       unsigned int sum) 
+					       __wsum sum)
 {
 	__asm__(
 	"	add  %1, %0, %0\n"
@@ -97,19 +95,18 @@
 	"	addc %3, %0, %0\n"
 	"	addc %%r0, %0, %0\n"
 		: "=r" (sum)
-		: "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum));
-    return sum;
+		: "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));
+	return sum;
 }
 
 /*
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum) 
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -118,17 +115,17 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-static inline unsigned short ip_compute_csum(unsigned char * buf, int len) {
+static inline __sum16 ip_compute_csum(const void *buf, int len)
+{
 	 return csum_fold (csum_partial(buf, len, 0));
 }
 
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						     struct in6_addr *daddr,
-						     __u16 len,
-						     unsigned short proto,
-						     unsigned int sum) 
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum sum)
 {
 	__asm__ __volatile__ (
 
@@ -193,9 +190,9 @@
  *	Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src,
-						      unsigned char __user *dst,
-						      int len, int sum, 
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+						      void __user *dst,
+						      int len, __wsum sum,
 						      int *err_ptr)
 {
 	/* code stolen from include/asm-mips64 */
@@ -203,7 +200,7 @@
 	 
 	if (copy_to_user(dst, src, len)) {
 		*err_ptr = -EFAULT;
-		return -1;
+		return (__force __wsum)-1;
 	}
 
 	return sum;
diff --git a/include/asm-parisc/device.h b/include/asm-parisc/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-parisc/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 1e387e1..66f0b40 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -191,13 +191,13 @@
 }
 
 static inline int
-dma_is_consistent(dma_addr_t dma_addr)
+dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
 	return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	if(hppa_dma_ops->dma_sync_single_for_cpu)
diff --git a/include/asm-parisc/dma.h b/include/asm-parisc/dma.h
index da2cf37..31ad0f0 100644
--- a/include/asm-parisc/dma.h
+++ b/include/asm-parisc/dma.h
@@ -17,10 +17,10 @@
 
 /*
 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
-** (or rather not merge) DMA's into managable chunks.
+** (or rather not merge) DMAs into manageable chunks.
 ** On parisc, this is more of the software/tuning constraint
-** rather than the HW. I/O MMU allocation alogorithms can be
-** faster with smaller size is (to some degree).
+** rather than the HW. I/O MMU allocation algorithms can be
+** faster with smaller sizes (to some degree).
 */
 #define DMA_CHUNK_SIZE	(BITS_PER_LONG*PAGE_SIZE)
 
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
index d84bbb2..dbee6e6 100644
--- a/include/asm-parisc/futex.h
+++ b/include/asm-parisc/futex.h
@@ -21,7 +21,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -33,7 +33,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index 7b8ad11..7b3be9a 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -149,7 +149,7 @@
 /*
 ** Most PCI devices (eg Tulip, NCR720) also export the same registers
 ** to both MMIO and I/O port space.  Due to poor performance of I/O Port
-** access under HP PCI bus adapters, strongly reccomend use of MMIO
+** access under HP PCI bus adapters, strongly recommend the use of MMIO
 ** address space.
 **
 ** While I'm at it more PA programming notes:
diff --git a/include/asm-parisc/ropes.h b/include/asm-parisc/ropes.h
index 5542dd0..007a8806 100644
--- a/include/asm-parisc/ropes.h
+++ b/include/asm-parisc/ropes.h
@@ -14,7 +14,7 @@
 #endif
 
 /*
-** The number of pdir entries to "free" before issueing
+** The number of pdir entries to "free" before issuing
 ** a read to PCOM register to flush out PCOM writes.
 ** Interacts with allocation granularity (ie 4 or 8 entries
 ** allocated and free'd/purged at a time might make this
diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild
index 9827849..1e63738 100644
--- a/include/asm-powerpc/Kbuild
+++ b/include/asm-powerpc/Kbuild
@@ -17,6 +17,7 @@
 header-y += poll.h
 header-y += shmparam.h
 header-y += sockios.h
+header-y += spu_info.h
 header-y += ucontext.h
 header-y += ioctl.h
 header-y += linkage.h
diff --git a/include/asm-powerpc/cell-pmu.h b/include/asm-powerpc/cell-pmu.h
new file mode 100644
index 0000000..e8c2ebd
--- /dev/null
+++ b/include/asm-powerpc/cell-pmu.h
@@ -0,0 +1,113 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author:
+ *   David Erb (djerb@us.ibm.com)
+ *   Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ASM_CELL_PMU_H__
+#define __ASM_CELL_PMU_H__
+
+/* The Cell PMU has four hardware performance counters, which can be
+ * configured as four 32-bit counters or eight 16-bit counters.
+ */
+#define NR_PHYS_CTRS 4
+#define NR_CTRS      (NR_PHYS_CTRS * 2)
+
+/* Macros for the pm_control register. */
+#define CBE_PM_16BIT_CTR(ctr)              (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1))))
+#define CBE_PM_ENABLE_PERF_MON             0x80000000
+#define CBE_PM_STOP_AT_MAX                 0x40000000
+#define CBE_PM_TRACE_MODE_GET(pm_control)  (((pm_control) >> 28) & 0x3)
+#define CBE_PM_TRACE_MODE_SET(mode)        (((mode)  & 0x3) << 28)
+#define CBE_PM_COUNT_MODE_SET(count)       (((count) & 0x3) << 18)
+#define CBE_PM_FREEZE_ALL_CTRS             0x00100000
+#define CBE_PM_ENABLE_EXT_TRACE            0x00008000
+
+/* Macros for the trace_address register. */
+#define CBE_PM_TRACE_BUF_FULL              0x00000800
+#define CBE_PM_TRACE_BUF_EMPTY             0x00000400
+#define CBE_PM_TRACE_BUF_DATA_COUNT(ta)    ((ta) & 0x3ff)
+#define CBE_PM_TRACE_BUF_MAX_COUNT         0x400
+
+/* Macros for the pm07_control registers. */
+#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f)
+#define CBE_PM_CTR_INPUT_CONTROL           0x02000000
+#define CBE_PM_CTR_POLARITY                0x01000000
+#define CBE_PM_CTR_COUNT_CYCLES            0x00800000
+#define CBE_PM_CTR_ENABLE                  0x00400000
+
+/* Macros for the pm_status register. */
+#define CBE_PM_CTR_OVERFLOW_INTR(ctr)      (1 << (31 - ((ctr) & 7)))
+
+enum pm_reg_name {
+	group_control,
+	debug_bus_control,
+	trace_address,
+	ext_tr_timer,
+	pm_status,
+	pm_control,
+	pm_interval,
+	pm_start_stop,
+};
+
+/* Routines for reading/writing the PMU registers. */
+extern u32  cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
+extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+extern u32  cbe_read_ctr(u32 cpu, u32 ctr);
+extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+extern u32  cbe_read_pm07_control(u32 cpu, u32 ctr);
+extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+extern u32  cbe_read_pm(u32 cpu, enum pm_reg_name reg);
+extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+extern u32  cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
+extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+extern void cbe_enable_pm(u32 cpu);
+extern void cbe_disable_pm(u32 cpu);
+
+extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
+
+extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+extern void cbe_disable_pm_interrupts(u32 cpu);
+extern u32  cbe_query_pm_interrupts(u32 cpu);
+extern u32  cbe_clear_pm_interrupts(u32 cpu);
+extern void cbe_sync_irq(int node);
+
+/* Utility functions, macros */
+extern u32 cbe_get_hw_thread_id(int cpu);
+
+#define cbe_cpu_to_node(cpu) ((cpu) >> 1)
+
+#define CBE_COUNT_SUPERVISOR_MODE       0
+#define CBE_COUNT_HYPERVISOR_MODE       1
+#define CBE_COUNT_PROBLEM_MODE          2
+#define CBE_COUNT_ALL_MODES             3
+
+/* Macros for the pm07_control registers. */
+#define PM07_CTR_INPUT_MUX(x)                    (((x) & 0x3F) << 26)
+#define PM07_CTR_INPUT_CONTROL(x)                (((x) & 1) << 25)
+#define PM07_CTR_POLARITY(x)                     (((x) & 1) << 24)
+#define PM07_CTR_COUNT_CYCLES(x)                 (((x) & 1) << 23)
+#define PM07_CTR_ENABLE(x)                       (((x) & 1) << 22)
+
+#endif /* __ASM_CELL_PMU_H__ */
diff --git a/include/asm-powerpc/checksum.h b/include/asm-powerpc/checksum.h
index 609ecbb..7cdf358 100644
--- a/include/asm-powerpc/checksum.h
+++ b/include/asm-powerpc/checksum.h
@@ -14,17 +14,16 @@
  * which always checksum on 4 octet boundaries.  ihl is the number
  * of 32-bit words and is always >= 5.
  */
-extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-extern unsigned short csum_tcpudp_magic(unsigned long saddr,
-					unsigned long daddr,
+extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 					unsigned short len,
 					unsigned short proto,
-					unsigned int sum);
+					__wsum sum);
 
 /*
  * computes the checksum of a memory block at buff, length len,
@@ -38,8 +37,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern unsigned int csum_partial(const unsigned char * buff, int len,
-				 unsigned int sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * Computes the checksum of a memory block at src, length len,
@@ -51,20 +49,15 @@
  * Like csum_partial, this must be called with even lengths,
  * except for the last fragment.
  */
-extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
-					      int len, unsigned int sum,
+extern __wsum csum_partial_copy_generic(const void *src, void *dst,
+					      int len, __wsum sum,
 					      int *src_err, int *dst_err);
 /*
  * the same as csum_partial, but copies from src to dst while it
  * checksums.
  */
-unsigned int csum_partial_copy_nocheck(const char *src, 
-				       char *dst, 
-				       int len, 
-				       unsigned int sum);
-
 #define csum_partial_copy_from_user(src, dst, len, sum, errp)   \
-        csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL)
+        csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL)
 
 #define csum_partial_copy_nocheck(src, dst, len, sum)   \
         csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
@@ -74,7 +67,7 @@
  * turns a 32-bit partial checksum (e.g. from csum_partial) into a
  * 1's complement 16-bit checksum.
  */
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	unsigned int tmp;
 
@@ -83,41 +76,32 @@
 	/* if there is a carry from adding the two 16-bit halves,
 	   it will carry from the lower half into the upper half,
 	   giving us the correct sum in the upper half. */
-	sum = ~(sum + tmp) >> 16;
-	return sum;
+	return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
 }
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
 
-#ifdef __powerpc64__
-static inline u32 csum_tcpudp_nofold(u32 saddr,
-                                     u32 daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
                                      unsigned short len,
                                      unsigned short proto,
-                                     unsigned int sum)
+                                     __wsum sum)
 {
-	unsigned long s = sum;
+#ifdef __powerpc64__
+	unsigned long s = (__force u32)sum;
 
-	s += saddr;
-	s += daddr;
-	s += (proto << 16) + len;
+	s += (__force u32)saddr;
+	s += (__force u32)daddr;
+	s += proto + len;
 	s += (s >> 32);
-	return (u32) s;
-}
+	return (__force __wsum) s;
 #else
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-						   unsigned long daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   unsigned int sum)
-{
     __asm__("\n\
 	addc %0,%0,%1 \n\
 	adde %0,%0,%2 \n\
@@ -125,10 +109,9 @@
 	addze %0,%0 \n\
 	"
 	: "=r" (sum)
-	: "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum));
-    return sum;
-}
-
+	: "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
+	return sum;
 #endif
+}
 #endif /* __KERNEL__ */
 #endif
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index a9a4014..6fe5c9d 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -24,6 +24,8 @@
 #define PPC_FEATURE_ICACHE_SNOOP	0x00002000
 #define PPC_FEATURE_ARCH_2_05		0x00001000
 #define PPC_FEATURE_PA6T		0x00000800
+#define PPC_FEATURE_HAS_DFP		0x00000400
+#define PPC_FEATURE_POWER6_EXT		0x00000200
 
 #define PPC_FEATURE_TRUE_LE		0x00000002
 #define PPC_FEATURE_PPC_LE		0x00000001
@@ -45,6 +47,7 @@
 	PPC_OPROFILE_POWER4 = 2,
 	PPC_OPROFILE_G4 = 3,
 	PPC_OPROFILE_BOOKE = 4,
+	PPC_OPROFILE_CELL = 5,
 };
 
 struct cpu_spec {
@@ -91,7 +94,7 @@
 
 extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
 
-extern struct cpu_spec *identify_cpu(unsigned long offset);
+extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
 extern void do_feature_fixups(unsigned long value, void *fixup_start,
 			      void *fixup_end);
 
@@ -148,19 +151,13 @@
 #define CPU_FTR_PAUSE_ZERO		LONG_ASM_CONST(0x0000200000000000)
 #define CPU_FTR_PURR			LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_CELL_TB_BUG		LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_SPURR			LONG_ASM_CONST(0x0001000000000000)
 
 #ifndef __ASSEMBLY__
 
-#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
-					CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
-					CPU_FTR_NODSISRALIGN)
-
-/* iSeries doesn't support large pages */
-#ifdef CONFIG_PPC_ISERIES
-#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE)
-#else
-#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
-#endif /* CONFIG_PPC_ISERIES */
+#define CPU_FTR_PPCAS_ARCH_V2	(CPU_FTR_SLB | \
+				 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
+				 CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE)
 
 /* We only set the altivec features if the kernel was compiled with altivec
  * support
@@ -311,7 +308,8 @@
 #define CPU_FTRS_E500_2	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 	    CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN)
 #define CPU_FTRS_GENERIC_32	(CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
-#ifdef __powerpc64__
+
+/* 64-bit CPUs */
 #define CPU_FTRS_POWER3	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
 #define CPU_FTRS_RS64	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
@@ -332,7 +330,13 @@
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
-	    CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_REAL_LE)
+	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE)
+#define CPU_FTRS_POWER6X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
+	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+	    CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | \
+	    CPU_FTR_SPURR | CPU_FTR_REAL_LE)
 #define CPU_FTRS_CELL	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -343,7 +347,6 @@
 	    CPU_FTR_PURR | CPU_FTR_REAL_LE)
 #define CPU_FTRS_COMPATIBLE	(CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
 	    CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
-#endif
 
 #ifdef __powerpc64__
 #define CPU_FTRS_POSSIBLE	\
diff --git a/include/asm-powerpc/dbdma.h b/include/asm-powerpc/dbdma.h
index 8973565..e23f07e 100644
--- a/include/asm-powerpc/dbdma.h
+++ b/include/asm-powerpc/dbdma.h
@@ -95,7 +95,13 @@
 #define DBDMA_DO_STOP(regs) do {				\
 	out_le32(&((regs)->control), (RUN|FLUSH)<<16);		\
 	while(in_le32(&((regs)->status)) & (ACTIVE|FLUSH))	\
-		;						\
+		; \
+} while(0)
+
+#define DBDMA_DO_RESET(regs) do {				\
+	out_le32(&((regs)->control), (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);\
+	while(in_le32(&((regs)->status)) & (RUN)) \
+		; \
 } while(0)
 
 #endif /* _ASM_DBDMA_H_ */
diff --git a/include/asm-powerpc/dcr-mmio.h b/include/asm-powerpc/dcr-mmio.h
new file mode 100644
index 0000000..5dbfca8
--- /dev/null
+++ b/include/asm-powerpc/dcr-mmio.h
@@ -0,0 +1,51 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_MMIO_H
+#define _ASM_POWERPC_DCR_MMIO_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+typedef struct { void __iomem *token; unsigned int stride; } dcr_host_t;
+
+#define DCR_MAP_OK(host)	((host).token != NULL)
+
+extern dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
+			  unsigned int dcr_c);
+extern void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c);
+
+static inline u32 dcr_read(dcr_host_t host, unsigned int dcr_n)
+{
+	return in_be32(host.token + dcr_n * host.stride);
+}
+
+static inline void dcr_write(dcr_host_t host, unsigned int dcr_n, u32 value)
+{
+	out_be32(host.token + dcr_n * host.stride, value);
+}
+
+extern u64 of_translate_dcr_address(struct device_node *dev,
+				    unsigned int dcr_n,
+				    unsigned int *stride);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_MMIO_H */
+
+
diff --git a/include/asm-powerpc/dcr-native.h b/include/asm-powerpc/dcr-native.h
new file mode 100644
index 0000000..fd4a5f5
--- /dev/null
+++ b/include/asm-powerpc/dcr-native.h
@@ -0,0 +1,39 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_NATIVE_H
+#define _ASM_POWERPC_DCR_NATIVE_H
+#ifdef __KERNEL__
+
+#include <asm/reg.h>
+
+typedef struct {} dcr_host_t;
+
+#define DCR_MAP_OK(host)	(1)
+
+#define dcr_map(dev, dcr_n, dcr_c)	{}
+#define dcr_unmap(host, dcr_n, dcr_c)	{}
+#define dcr_read(host, dcr_n)		mfdcr(dcr_n)
+#define dcr_write(host, dcr_n, value)	mtdcr(dcr_n, value)
+
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_NATIVE_H */
+
+
diff --git a/include/asm-powerpc/dcr.h b/include/asm-powerpc/dcr.h
new file mode 100644
index 0000000..473f2c7
--- /dev/null
+++ b/include/asm-powerpc/dcr.h
@@ -0,0 +1,42 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_H
+#define _ASM_POWERPC_DCR_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+#include <asm/dcr-native.h>
+#else
+#include <asm/dcr-mmio.h>
+#endif
+
+/*
+ * On CONFIG_PPC_MERGE, we have additional helpers to read the DCR
+ * base from the device-tree
+ */
+#ifdef CONFIG_PPC_MERGE
+extern unsigned int dcr_resource_start(struct device_node *np,
+				       unsigned int index);
+extern unsigned int dcr_resource_len(struct device_node *np,
+				     unsigned int index);
+#endif /* CONFIG_PPC_MERGE */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_H */
diff --git a/include/asm-powerpc/device.h b/include/asm-powerpc/device.h
new file mode 100644
index 0000000..228ab2a
--- /dev/null
+++ b/include/asm-powerpc/device.h
@@ -0,0 +1,24 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_POWERPC_DEVICE_H
+#define _ASM_POWERPC_DEVICE_H
+
+struct dma_mapping_ops;
+struct device_node;
+
+struct dev_archdata {
+	/* Optional pointer to an OF device node */
+	struct device_node	*of_node;
+
+	/* DMA operations on that device */
+	struct dma_mapping_ops	*dma_ops;
+	void			*dma_data;
+
+	/* NUMA node if applicable */
+	int			numa_node;
+};
+
+#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 2ab9baf..7c7de87 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -44,26 +44,150 @@
 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
 
 #ifdef CONFIG_PPC64
+/*
+ * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
+ */
+struct dma_mapping_ops {
+	void *		(*alloc_coherent)(struct device *dev, size_t size,
+				dma_addr_t *dma_handle, gfp_t flag);
+	void		(*free_coherent)(struct device *dev, size_t size,
+				void *vaddr, dma_addr_t dma_handle);
+	dma_addr_t	(*map_single)(struct device *dev, void *ptr,
+				size_t size, enum dma_data_direction direction);
+	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr,
+				size_t size, enum dma_data_direction direction);
+	int		(*map_sg)(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction direction);
+	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction direction);
+	int		(*dma_supported)(struct device *dev, u64 mask);
+	int		(*dac_dma_supported)(struct device *dev, u64 mask);
+	int		(*set_dma_mask)(struct device *dev, u64 dma_mask);
+};
 
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
-		dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-		dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-		size_t size, enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-		size_t size, enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size,
-		enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
-		size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-		enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-		int nhwentries, enum dma_data_direction direction);
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+{
+	/* We don't handle the NULL dev case for ISA for now. We could
+	 * do it via an out of line call but it is not needed for now. The
+	 * only ISA DMA device we support is the floppy and we have a hack
+	 * in the floppy driver directly to get a device for us.
+	 */
+	if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
+		return NULL;
+	return dev->archdata.dma_ops;
+}
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	if (unlikely(dma_ops == NULL))
+		return 0;
+	if (dma_ops->dma_supported == NULL)
+		return 1;
+	return dma_ops->dma_supported(dev, mask);
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	if (unlikely(dma_ops == NULL))
+		return -EIO;
+	if (dma_ops->set_dma_mask != NULL)
+		return dma_ops->set_dma_mask(dev, dma_mask);
+	if (!dev->dma_mask || !dma_supported(dev, *dev->dma_mask))
+		return -EIO;
+	*dev->dma_mask = dma_mask;
+	return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+					size_t size,
+					enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->map_single(dev, cpu_addr, size, direction);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+				    size_t size,
+				    enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	dma_ops->unmap_single(dev, dma_addr, size, direction);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+				      unsigned long offset, size_t size,
+				      enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->map_single(dev, page_address(page) + offset, size,
+			direction);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+				  size_t size,
+				  enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	dma_ops->unmap_single(dev, dma_address, size, direction);
+}
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+			     int nents, enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->map_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+				int nhwentries,
+				enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+}
+
+
+/*
+ * Available generic sets of operations
+ */
+extern struct dma_mapping_ops dma_iommu_ops;
+extern struct dma_mapping_ops dma_direct_ops;
+
+extern unsigned long dma_direct_offset;
 
 #else /* CONFIG_PPC64 */
 
@@ -218,9 +342,9 @@
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 #ifdef CONFIG_NOT_COHERENT_CACHE
-#define dma_is_consistent(d)	(0)
+#define dma_is_consistent(d, h)	(0)
 #else
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 #endif
 
 static inline int dma_get_cache_alignment(void)
@@ -254,32 +378,12 @@
 	dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		enum dma_data_direction direction)
 {
 	BUG_ON(direction == DMA_NONE);
 	__dma_sync(vaddr, size, (int)direction);
 }
 
-/*
- * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
- */
-struct dma_mapping_ops {
-	void *		(*alloc_coherent)(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag);
-	void		(*free_coherent)(struct device *dev, size_t size,
-				void *vaddr, dma_addr_t dma_handle);
-	dma_addr_t	(*map_single)(struct device *dev, void *ptr,
-				size_t size, enum dma_data_direction direction);
-	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr,
-				size_t size, enum dma_data_direction direction);
-	int		(*map_sg)(struct device *dev, struct scatterlist *sg,
-				int nents, enum dma_data_direction direction);
-	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg,
-				int nents, enum dma_data_direction direction);
-	int		(*dma_supported)(struct device *dev, u64 mask);
-	int		(*dac_dma_supported)(struct device *dev, u64 mask);
-};
-
 #endif /* __KERNEL__ */
 #endif	/* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h
index 6a78439..b886bec 100644
--- a/include/asm-powerpc/eeh.h
+++ b/include/asm-powerpc/eeh.h
@@ -120,10 +120,6 @@
 		return eeh_check_failure(addr, val);
 	return val;
 }
-static inline void eeh_writeb(u8 val, volatile void __iomem *addr)
-{
-	out_8(addr, val);
-}
 
 static inline u16 eeh_readw(const volatile void __iomem *addr)
 {
@@ -132,21 +128,6 @@
 		return eeh_check_failure(addr, val);
 	return val;
 }
-static inline void eeh_writew(u16 val, volatile void __iomem *addr)
-{
-	out_le16(addr, val);
-}
-static inline u16 eeh_raw_readw(const volatile void __iomem *addr)
-{
-	u16 val = in_be16(addr);
-	if (EEH_POSSIBLE_ERROR(val, u16))
-		return eeh_check_failure(addr, val);
-	return val;
-}
-static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
-	volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr;
-	out_be16(vaddr, val);
-}
 
 static inline u32 eeh_readl(const volatile void __iomem *addr)
 {
@@ -155,21 +136,6 @@
 		return eeh_check_failure(addr, val);
 	return val;
 }
-static inline void eeh_writel(u32 val, volatile void __iomem *addr)
-{
-	out_le32(addr, val);
-}
-static inline u32 eeh_raw_readl(const volatile void __iomem *addr)
-{
-	u32 val = in_be32(addr);
-	if (EEH_POSSIBLE_ERROR(val, u32))
-		return eeh_check_failure(addr, val);
-	return val;
-}
-static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr)
-{
-	out_be32(addr, val);
-}
 
 static inline u64 eeh_readq(const volatile void __iomem *addr)
 {
@@ -178,182 +144,67 @@
 		return eeh_check_failure(addr, val);
 	return val;
 }
-static inline void eeh_writeq(u64 val, volatile void __iomem *addr)
+
+static inline u16 eeh_readw_be(const volatile void __iomem *addr)
 {
-	out_le64(addr, val);
+	u16 val = in_be16(addr);
+	if (EEH_POSSIBLE_ERROR(val, u16))
+		return eeh_check_failure(addr, val);
+	return val;
 }
-static inline u64 eeh_raw_readq(const volatile void __iomem *addr)
+
+static inline u32 eeh_readl_be(const volatile void __iomem *addr)
+{
+	u32 val = in_be32(addr);
+	if (EEH_POSSIBLE_ERROR(val, u32))
+		return eeh_check_failure(addr, val);
+	return val;
+}
+
+static inline u64 eeh_readq_be(const volatile void __iomem *addr)
 {
 	u64 val = in_be64(addr);
 	if (EEH_POSSIBLE_ERROR(val, u64))
 		return eeh_check_failure(addr, val);
 	return val;
 }
-static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr)
-{
-	out_be64(addr, val);
-}
 
-#define EEH_CHECK_ALIGN(v,a) \
-	((((unsigned long)(v)) & ((a) - 1)) == 0)
-
-static inline void eeh_memset_io(volatile void __iomem *addr, int c,
-				 unsigned long n)
-{
-	void *p = (void __force *)addr;
-	u32 lc = c;
-	lc |= lc << 8;
-	lc |= lc << 16;
-
-	__asm__ __volatile__ ("sync" : : : "memory");
-	while(n && !EEH_CHECK_ALIGN(p, 4)) {
-		*((volatile u8 *)p) = c;
-		p++;
-		n--;
-	}
-	while(n >= 4) {
-		*((volatile u32 *)p) = lc;
-		p += 4;
-		n -= 4;
-	}
-	while(n) {
-		*((volatile u8 *)p) = c;
-		p++;
-		n--;
-	}
-	__asm__ __volatile__ ("sync" : : : "memory");
-}
-static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src,
+static inline void eeh_memcpy_fromio(void *dest, const
+				     volatile void __iomem *src,
 				     unsigned long n)
 {
-	void *vsrc = (void __force *) src;
-	void *destsave = dest;
-	unsigned long nsave = n;
-
-	__asm__ __volatile__ ("sync" : : : "memory");
-	while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
-		*((u8 *)dest) = *((volatile u8 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
-		vsrc++;
-		dest++;
-		n--;
-	}
-	while(n > 4) {
-		*((u32 *)dest) = *((volatile u32 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
-		vsrc += 4;
-		dest += 4;
-		n -= 4;
-	}
-	while(n) {
-		*((u8 *)dest) = *((volatile u8 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
-		vsrc++;
-		dest++;
-		n--;
-	}
-	__asm__ __volatile__ ("sync" : : : "memory");
+	_memcpy_fromio(dest, src, n);
 
 	/* Look for ffff's here at dest[n].  Assume that at least 4 bytes
 	 * were copied. Check all four bytes.
 	 */
-	if ((nsave >= 4) &&
-		(EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
-		eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
-	}
-}
-
-static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
-				   unsigned long n)
-{
-	void *vdest = (void __force *) dest;
-
-	__asm__ __volatile__ ("sync" : : : "memory");
-	while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
-		*((volatile u8 *)vdest) = *((u8 *)src);
-		src++;
-		vdest++;
-		n--;
-	}
-	while(n > 4) {
-		*((volatile u32 *)vdest) = *((volatile u32 *)src);
-		src += 4;
-		vdest += 4;
-		n-=4;
-	}
-	while(n) {
-		*((volatile u8 *)vdest) = *((u8 *)src);
-		src++;
-		vdest++;
-		n--;
-	}
-	__asm__ __volatile__ ("sync" : : : "memory");
-}
-
-#undef EEH_CHECK_ALIGN
-
-static inline u8 eeh_inb(unsigned long port)
-{
-	u8 val;
-	val = in_8((u8 __iomem *)(port+pci_io_base));
-	if (EEH_POSSIBLE_ERROR(val, u8))
-		return eeh_check_failure((void __iomem *)(port), val);
-	return val;
-}
-
-static inline void eeh_outb(u8 val, unsigned long port)
-{
-	out_8((u8 __iomem *)(port+pci_io_base), val);
-}
-
-static inline u16 eeh_inw(unsigned long port)
-{
-	u16 val;
-	val = in_le16((u16 __iomem *)(port+pci_io_base));
-	if (EEH_POSSIBLE_ERROR(val, u16))
-		return eeh_check_failure((void __iomem *)(port), val);
-	return val;
-}
-
-static inline void eeh_outw(u16 val, unsigned long port)
-{
-	out_le16((u16 __iomem *)(port+pci_io_base), val);
-}
-
-static inline u32 eeh_inl(unsigned long port)
-{
-	u32 val;
-	val = in_le32((u32 __iomem *)(port+pci_io_base));
-	if (EEH_POSSIBLE_ERROR(val, u32))
-		return eeh_check_failure((void __iomem *)(port), val);
-	return val;
-}
-
-static inline void eeh_outl(u32 val, unsigned long port)
-{
-	out_le32((u32 __iomem *)(port+pci_io_base), val);
+	if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32))
+		eeh_check_failure(src, *((u32 *)(dest + n - 4)));
 }
 
 /* in-string eeh macros */
-static inline void eeh_insb(unsigned long port, void * buf, int ns)
+static inline void eeh_readsb(const volatile void __iomem *addr, void * buf,
+			      int ns)
 {
-	_insb((u8 __iomem *)(port+pci_io_base), buf, ns);
+	_insb(addr, buf, ns);
 	if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
-		eeh_check_failure((void __iomem *)(port), *(u8*)buf);
+		eeh_check_failure(addr, *(u8*)buf);
 }
 
-static inline void eeh_insw_ns(unsigned long port, void * buf, int ns)
+static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
+			      int ns)
 {
-	_insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns);
+	_insw(addr, buf, ns);
 	if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
-		eeh_check_failure((void __iomem *)(port), *(u16*)buf);
+		eeh_check_failure(addr, *(u16*)buf);
 }
 
-static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
+static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
+			      int nl)
 {
-	_insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl);
+	_insl(addr, buf, nl);
 	if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
-		eeh_check_failure((void __iomem *)(port), *(u32*)buf);
+		eeh_check_failure(addr, *(u32*)buf);
 }
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 9a83a98..d36426c 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -124,12 +124,10 @@
 # define ELF_DATA	ELFDATA2MSB
   typedef elf_greg_t64 elf_greg_t;
   typedef elf_gregset_t64 elf_gregset_t;
-# define elf_addr_t unsigned long
 #else
   /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
   typedef elf_greg_t32 elf_greg_t;
   typedef elf_gregset_t32 elf_gregset_t;
-# define elf_addr_t __u32
 #endif /* ELF_ARCH */
 
 /* Floating point registers */
@@ -411,4 +409,17 @@
 /* Keep this the last entry.  */
 #define R_PPC64_NUM		107
 
+#ifdef CONFIG_SPU_BASE
+/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
+#define NT_SPU		1
+
+extern int arch_notes_size(void);
+extern void arch_write_notes(struct file *file);
+
+#define ELF_CORE_EXTRA_NOTES_SIZE arch_notes_size()
+#define ELF_CORE_WRITE_EXTRA_NOTES arch_write_notes(file)
+
+#define ARCH_HAVE_EXTRA_ELF_NOTES
+#endif /* CONFIG_PPC_CELL */
+
 #endif /* _ASM_POWERPC_ELF_H */
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index fdf9aff..98f7b62 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -42,6 +42,7 @@
 #define FW_FEATURE_SPLPAR	ASM_CONST(0x0000000000100000)
 #define FW_FEATURE_ISERIES	ASM_CONST(0x0000000000200000)
 #define FW_FEATURE_LPAR		ASM_CONST(0x0000000000400000)
+#define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
 
 #ifndef __ASSEMBLY__
 
@@ -58,6 +59,10 @@
 	FW_FEATURE_PSERIES_ALWAYS = 0,
 	FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
 	FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
+	FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+	FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+	FW_FEATURE_NATIVE_POSSIBLE = 0,
+	FW_FEATURE_NATIVE_ALWAYS = 0,
 	FW_FEATURE_POSSIBLE =
 #ifdef CONFIG_PPC_PSERIES
 		FW_FEATURE_PSERIES_POSSIBLE |
@@ -65,6 +70,12 @@
 #ifdef CONFIG_PPC_ISERIES
 		FW_FEATURE_ISERIES_POSSIBLE |
 #endif
+#ifdef CONFIG_PPC_PS3
+		FW_FEATURE_PS3_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_NATIVE
+		FW_FEATURE_NATIVE_ALWAYS |
+#endif
 		0,
 	FW_FEATURE_ALWAYS =
 #ifdef CONFIG_PPC_PSERIES
@@ -73,6 +84,12 @@
 #ifdef CONFIG_PPC_ISERIES
 		FW_FEATURE_ISERIES_ALWAYS &
 #endif
+#ifdef CONFIG_PPC_PS3
+		FW_FEATURE_PS3_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_NATIVE
+		FW_FEATURE_NATIVE_ALWAYS &
+#endif
 		FW_FEATURE_POSSIBLE,
 
 #else /* CONFIG_PPC64 */
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 936422e..3f3673f 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -43,7 +43,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -65,7 +65,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
index d403592..d604863 100644
--- a/include/asm-powerpc/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -7,16 +7,40 @@
 #ifdef __KERNEL__
 
 #include <linux/errno.h>
+#include <linux/compiler.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
 extern void timer_interrupt(struct pt_regs *);
 
-#ifdef CONFIG_PPC_ISERIES
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
 
-extern unsigned long local_get_flags(void);
-extern unsigned long local_irq_disable(void);
+static inline unsigned long local_get_flags(void)
+{
+	unsigned long flags;
+
+	__asm__ __volatile__("lbz %0,%1(13)"
+	: "=r" (flags)
+	: "i" (offsetof(struct paca_struct, soft_enabled)));
+
+	return flags;
+}
+
+static inline unsigned long local_irq_disable(void)
+{
+	unsigned long flags, zero;
+
+	__asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
+	: "=r" (flags), "=&r" (zero)
+	: "i" (offsetof(struct paca_struct, soft_enabled))
+	: "memory");
+
+	return flags;
+}
+
 extern void local_irq_restore(unsigned long);
+extern void iseries_handle_interrupts(void);
 
 #define local_irq_enable()	local_irq_restore(1)
 #define local_save_flags(flags)	((flags) = local_get_flags())
@@ -24,17 +48,14 @@
 
 #define irqs_disabled()		(local_get_flags() == 0)
 
+#define hard_irq_enable()	__mtmsrd(mfmsr() | MSR_EE, 1)
+#define hard_irq_disable()	__mtmsrd(mfmsr() & ~MSR_EE, 1)
+
 #else
 
 #if defined(CONFIG_BOOKE)
 #define SET_MSR_EE(x)	mtmsr(x)
 #define local_irq_restore(flags)	__asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
-#elif defined(__powerpc64__)
-#define SET_MSR_EE(x)	__mtmsrd(x, 1)
-#define local_irq_restore(flags) do { \
-	__asm__ __volatile__("": : :"memory"); \
-	__mtmsrd((flags), 1); \
-} while(0)
 #else
 #define SET_MSR_EE(x)	mtmsr(x)
 #define local_irq_restore(flags)	mtmsr(flags)
@@ -81,7 +102,10 @@
 #define local_irq_save(flags)	local_irq_save_ptr(&flags)
 #define irqs_disabled()		((mfmsr() & MSR_EE) == 0)
 
-#endif /* CONFIG_PPC_ISERIES */
+#define hard_irq_enable()	local_irq_enable()
+#define hard_irq_disable()	local_irq_disable()
+
+#endif /* CONFIG_PPC64 */
 
 #define mask_irq(irq)						\
 	({							\
diff --git a/include/asm-powerpc/ibmebus.h b/include/asm-powerpc/ibmebus.h
index 3493429b..6611211 100644
--- a/include/asm-powerpc/ibmebus.h
+++ b/include/asm-powerpc/ibmebus.h
@@ -44,7 +44,6 @@
 #include <linux/mod_devicetable.h>
 #include <asm/of_device.h>
 
-extern struct dma_mapping_ops ibmebus_dma_ops;
 extern struct bus_type ibmebus_bus_type;
 
 struct ibmebus_dev {	
diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h
index c8390f9..0f66f0f 100644
--- a/include/asm-powerpc/ide.h
+++ b/include/asm-powerpc/ide.h
@@ -22,10 +22,10 @@
 #endif
 #endif
 
-#define __ide_mm_insw(p, a, c)	_insw_ns((volatile u16 __iomem *)(p), (a), (c))
-#define __ide_mm_insl(p, a, c)	_insl_ns((volatile u32 __iomem *)(p), (a), (c))
-#define __ide_mm_outsw(p, a, c)	_outsw_ns((volatile u16 __iomem *)(p), (a), (c))
-#define __ide_mm_outsl(p, a, c)	_outsl_ns((volatile u32 __iomem *)(p), (a), (c))
+#define __ide_mm_insw(p, a, c)	readsw((void __iomem *)(p), (a), (c))
+#define __ide_mm_insl(p, a, c)	readsl((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsw(p, a, c)	writesw((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsl(p, a, c)	writesl((void __iomem *)(p), (a), (c))
 
 #ifndef  __powerpc64__
 #include <linux/hdreg.h>
diff --git a/include/asm-powerpc/immap_qe.h b/include/asm-powerpc/immap_qe.h
index ce12f85..9fdd049 100644
--- a/include/asm-powerpc/immap_qe.h
+++ b/include/asm-powerpc/immap_qe.h
@@ -136,22 +136,7 @@
 
 /* BRG */
 struct qe_brg {
-	__be32	brgc1;		/* BRG1 configuration register */
-	__be32	brgc2;		/* BRG2 configuration register */
-	__be32	brgc3;		/* BRG3 configuration register */
-	__be32	brgc4;		/* BRG4 configuration register */
-	__be32	brgc5;		/* BRG5 configuration register */
-	__be32	brgc6;		/* BRG6 configuration register */
-	__be32	brgc7;		/* BRG7 configuration register */
-	__be32	brgc8;		/* BRG8 configuration register */
-	__be32	brgc9;		/* BRG9 configuration register */
-	__be32	brgc10;		/* BRG10 configuration register */
-	__be32	brgc11;		/* BRG11 configuration register */
-	__be32	brgc12;		/* BRG12 configuration register */
-	__be32	brgc13;		/* BRG13 configuration register */
-	__be32	brgc14;		/* BRG14 configuration register */
-	__be32	brgc15;		/* BRG15 configuration register */
-	__be32	brgc16;		/* BRG16 configuration register */
+	__be32	brgc[16];	/* BRG configuration registers */
 	u8	res0[0x40];
 } __attribute__ ((packed));
 
diff --git a/include/asm-powerpc/io-defs.h b/include/asm-powerpc/io-defs.h
new file mode 100644
index 0000000..03691ab
--- /dev/null
+++ b/include/asm-powerpc/io-defs.h
@@ -0,0 +1,59 @@
+/* This file is meant to be include multiple times by other headers */
+
+DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr))
+
+#ifdef __powerpc64__
+DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr))
+#endif /* __powerpc64__ */
+
+DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port))
+DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port))
+DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port))
+DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port))
+DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port))
+DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port))
+
+DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c), \
+		 (a, b, c))
+DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c), \
+		 (a, b, c))
+DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c), \
+		 (a, b, c))
+DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c), \
+		 (a, b, c))
+DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c), \
+		 (a, b, c))
+DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c), \
+		 (a, b, c))
+
+DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), \
+		 (p, b, c))
+DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), \
+		 (p, b, c))
+DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), \
+		 (p, b, c))
+DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), \
+		 (p, b, c))
+DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), \
+		 (p, b, c))
+DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), \
+		 (p, b, c))
+
+DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n),	   \
+		 (a, c, n))
+DEF_PCI_AC_NORET(memcpy_fromio,(void *d,const PCI_IO_ADDR s,unsigned long n), \
+		 (d, s, n))
+DEF_PCI_AC_NORET(memcpy_toio,(PCI_IO_ADDR d,const void *s,unsigned long n),   \
+		 (d, s, n))
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index c2c5f14..1cd5323 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -13,154 +13,530 @@
 extern int check_legacy_ioport(unsigned long base_port);
 #define PNPBIOS_BASE	0xf000	/* only relevant for PReP */
 
-#ifndef CONFIG_PPC64
-#include <asm-ppc/io.h>
-#else
-
 #include <linux/compiler.h>
 #include <asm/page.h>
 #include <asm/byteorder.h>
-#include <asm/paca.h>
 #include <asm/synch.h>
 #include <asm/delay.h>
+#include <asm/mmu.h>
 
 #include <asm-generic/iomap.h>
 
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+
 #define SIO_CONFIG_RA	0x398
 #define SIO_CONFIG_RD	0x399
 
 #define SLOW_DOWN_IO
 
+/* 32 bits uses slightly different variables for the various IO
+ * bases. Most of this file only uses _IO_BASE though which we
+ * define properly based on the platform
+ */
+#ifndef CONFIG_PCI
+#define _IO_BASE	0
+#define _ISA_MEM_BASE	0
+#define PCI_DRAM_OFFSET 0
+#elif defined(CONFIG_PPC32)
+#define _IO_BASE	isa_io_base
+#define _ISA_MEM_BASE	isa_mem_base
+#define PCI_DRAM_OFFSET	pci_dram_offset
+#else
+#define _IO_BASE	pci_io_base
+#define _ISA_MEM_BASE	0
+#define PCI_DRAM_OFFSET	0
+#endif
+
 extern unsigned long isa_io_base;
+extern unsigned long isa_mem_base;
 extern unsigned long pci_io_base;
+extern unsigned long pci_dram_offset;
 
-#ifdef CONFIG_PPC_ISERIES
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO)
+#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits
+#endif
 
-extern int in_8(const volatile unsigned char __iomem *addr);
-extern void out_8(volatile unsigned char __iomem *addr, int val);
-extern int in_le16(const volatile unsigned short __iomem *addr);
-extern int in_be16(const volatile unsigned short __iomem *addr);
-extern void out_le16(volatile unsigned short __iomem *addr, int val);
-extern void out_be16(volatile unsigned short __iomem *addr, int val);
-extern unsigned in_le32(const volatile unsigned __iomem *addr);
-extern unsigned in_be32(const volatile unsigned __iomem *addr);
-extern void out_le32(volatile unsigned __iomem *addr, int val);
-extern void out_be32(volatile unsigned __iomem *addr, int val);
-extern unsigned long in_le64(const volatile unsigned long __iomem *addr);
-extern unsigned long in_be64(const volatile unsigned long __iomem *addr);
-extern void out_le64(volatile unsigned long __iomem *addr, unsigned long val);
-extern void out_be64(volatile unsigned long __iomem *addr, unsigned long val);
+/*
+ *
+ * Low level MMIO accessors
+ *
+ * This provides the non-bus specific accessors to MMIO. Those are PowerPC
+ * specific and thus shouldn't be used in generic code. The accessors
+ * provided here are:
+ *
+ *	in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64
+ *	out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64
+ *	_insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns
+ *
+ * Those operate directly on a kernel virtual address. Note that the prototype
+ * for the out_* accessors has the arguments in opposite order from the usual
+ * linux PCI accessors. Unlike those, they take the address first and the value
+ * next.
+ *
+ * Note: I might drop the _ns suffix on the stream operations soon as it is
+ * simply normal for stream operations to not swap in the first place.
+ *
+ */
 
-extern unsigned char __raw_readb(const volatile void __iomem *addr);
-extern unsigned short __raw_readw(const volatile void __iomem *addr);
-extern unsigned int __raw_readl(const volatile void __iomem *addr);
-extern unsigned long __raw_readq(const volatile void __iomem *addr);
-extern void __raw_writeb(unsigned char v, volatile void __iomem *addr);
-extern void __raw_writew(unsigned short v, volatile void __iomem *addr);
-extern void __raw_writel(unsigned int v, volatile void __iomem *addr);
-extern void __raw_writeq(unsigned long v, volatile void __iomem *addr);
+#ifdef CONFIG_PPC64
+#define IO_SET_SYNC_FLAG()	do { get_paca()->io_sync = 1; } while(0)
+#else
+#define IO_SET_SYNC_FLAG()
+#endif
 
-extern void memset_io(volatile void __iomem *addr, int c, unsigned long n);
-extern void memcpy_fromio(void *dest, const volatile void __iomem *src,
-                                 unsigned long n);
-extern void memcpy_toio(volatile void __iomem *dest, const void *src,
-                                 unsigned long n);
+#define DEF_MMIO_IN(name, type, insn)					\
+static inline type name(const volatile type __iomem *addr)		\
+{									\
+	type ret;							\
+	__asm__ __volatile__("sync;" insn ";twi 0,%0,0;isync"		\
+ 		: "=r" (ret) : "r" (addr), "m" (*addr));		\
+	return ret;							\
+}
 
-#else /* CONFIG_PPC_ISERIES */
+#define DEF_MMIO_OUT(name, type, insn)					\
+static inline void name(volatile type __iomem *addr, type val)		\
+{									\
+	__asm__ __volatile__("sync;" insn				\
+ 		: "=m" (*addr) : "r" (val), "r" (addr));		\
+	IO_SET_SYNC_FLAG();					\
+}
 
-#define in_8(addr)		__in_8((addr))
-#define out_8(addr, val)	__out_8((addr), (val))
-#define in_le16(addr)		__in_le16((addr))
-#define in_be16(addr)		__in_be16((addr))
-#define out_le16(addr, val)	__out_le16((addr), (val))
-#define out_be16(addr, val)	__out_be16((addr), (val))
-#define in_le32(addr)		__in_le32((addr))
-#define in_be32(addr)		__in_be32((addr))
-#define out_le32(addr, val)	__out_le32((addr), (val))
-#define out_be32(addr, val)	__out_be32((addr), (val))
-#define in_le64(addr)		__in_le64((addr))
-#define in_be64(addr)		__in_be64((addr))
-#define out_le64(addr, val)	__out_le64((addr), (val))
-#define out_be64(addr, val)	__out_be64((addr), (val))
+
+#define DEF_MMIO_IN_BE(name, size, insn) \
+	DEF_MMIO_IN(name, u##size, __stringify(insn)"%U2%X2 %0,%2")
+#define DEF_MMIO_IN_LE(name, size, insn) \
+	DEF_MMIO_IN(name, u##size, __stringify(insn)" %0,0,%1")
+
+#define DEF_MMIO_OUT_BE(name, size, insn) \
+	DEF_MMIO_OUT(name, u##size, __stringify(insn)"%U0%X0 %1,%0")
+#define DEF_MMIO_OUT_LE(name, size, insn) \
+	DEF_MMIO_OUT(name, u##size, __stringify(insn)" %1,0,%2")
+
+DEF_MMIO_IN_BE(in_8,     8, lbz);
+DEF_MMIO_IN_BE(in_be16, 16, lhz);
+DEF_MMIO_IN_BE(in_be32, 32, lwz);
+DEF_MMIO_IN_LE(in_le16, 16, lhbrx);
+DEF_MMIO_IN_LE(in_le32, 32, lwbrx);
+
+DEF_MMIO_OUT_BE(out_8,     8, stb);
+DEF_MMIO_OUT_BE(out_be16, 16, sth);
+DEF_MMIO_OUT_BE(out_be32, 32, stw);
+DEF_MMIO_OUT_LE(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_LE(out_le32, 32, stwbrx);
+
+#ifdef __powerpc64__
+DEF_MMIO_OUT_BE(out_be64, 64, std);
+DEF_MMIO_IN_BE(in_be64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_le64(const volatile u64 __iomem *addr)
+{
+	return le64_to_cpu(in_be64(addr));
+}
+
+static inline void out_le64(volatile u64 __iomem *addr, u64 val)
+{
+	out_be64(addr, cpu_to_le64(val));
+}
+#endif /* __powerpc64__ */
+
+/*
+ * Low level IO stream instructions are defined out of line for now
+ */
+extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
+extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
+extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
+
+/* The _ns naming is historical and will be removed. For now, just #define
+ * the non _ns equivalent names
+ */
+#define _insw	_insw_ns
+#define _insl	_insl_ns
+#define _outsw	_outsw_ns
+#define _outsl	_outsl_ns
+
+
+/*
+ * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line
+ */
+
+extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n);
+extern void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+			   unsigned long n);
+extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
+			 unsigned long n);
+
+/*
+ *
+ * PCI and standard ISA accessors
+ *
+ * Those are globally defined linux accessors for devices on PCI or ISA
+ * busses. They follow the Linux defined semantics. The current implementation
+ * for PowerPC is as close as possible to the x86 version of these, and thus
+ * provides fairly heavy weight barriers for the non-raw versions
+ *
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO
+ * allowing the platform to provide its own implementation of some or all
+ * of the accessors.
+ */
+
+/*
+ * Include the EEH definitions when EEH is enabled only so they don't get
+ * in the way when building for 32 bits
+ */
+#ifdef CONFIG_EEH
+#include <asm/eeh.h>
+#endif
+
+/* Shortcut to the MMIO argument pointer */
+#define PCI_IO_ADDR	volatile void __iomem *
+
+/* Indirect IO address tokens:
+ *
+ * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks
+ * on all IOs. (Note that this is all 64 bits only for now)
+ *
+ * To help platforms who may need to differenciate MMIO addresses in
+ * their hooks, a bitfield is reserved for use by the platform near the
+ * top of MMIO addresses (not PIO, those have to cope the hard way).
+ *
+ * This bit field is 12 bits and is at the top of the IO virtual
+ * addresses PCI_IO_INDIRECT_TOKEN_MASK.
+ *
+ * The kernel virtual space is thus:
+ *
+ *  0xD000000000000000		: vmalloc
+ *  0xD000080000000000		: PCI PHB IO space
+ *  0xD000080080000000		: ioremap
+ *  0xD0000fffffffffff		: end of ioremap region
+ *
+ * Since the top 4 bits are reserved as the region ID, we use thus
+ * the next 12 bits and keep 4 bits available for the future if the
+ * virtual address space is ever to be extended.
+ *
+ * The direct IO mapping operations will then mask off those bits
+ * before doing the actual access, though that only happen when
+ * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that
+ * mechanism
+ */
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+#define PCI_IO_IND_TOKEN_MASK	0x0fff000000000000ul
+#define PCI_IO_IND_TOKEN_SHIFT	48
+#define PCI_FIX_ADDR(addr)						\
+	((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
+#define PCI_GET_ADDR_TOKEN(addr)					\
+	(((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> 		\
+		PCI_IO_IND_TOKEN_SHIFT)
+#define PCI_SET_ADDR_TOKEN(addr, token) 				\
+do {									\
+	unsigned long __a = (unsigned long)(addr);			\
+	__a &= ~PCI_IO_IND_TOKEN_MASK;					\
+	__a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT;	\
+	(addr) = (void __iomem *)__a;					\
+} while(0)
+#else
+#define PCI_FIX_ADDR(addr) (addr)
+#endif
+
+
+/*
+ * Non ordered and non-swapping "raw" accessors
+ */
 
 static inline unsigned char __raw_readb(const volatile void __iomem *addr)
 {
-	return *(volatile unsigned char __force *)addr;
+	return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr);
 }
 static inline unsigned short __raw_readw(const volatile void __iomem *addr)
 {
-	return *(volatile unsigned short __force *)addr;
+	return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr);
 }
 static inline unsigned int __raw_readl(const volatile void __iomem *addr)
 {
-	return *(volatile unsigned int __force *)addr;
-}
-static inline unsigned long __raw_readq(const volatile void __iomem *addr)
-{
-	return *(volatile unsigned long __force *)addr;
+	return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr);
 }
 static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
 {
-	*(volatile unsigned char __force *)addr = v;
+	*(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v;
 }
 static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
 {
-	*(volatile unsigned short __force *)addr = v;
+	*(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v;
 }
 static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
 {
-	*(volatile unsigned int __force *)addr = v;
+	*(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v;
+}
+
+#ifdef __powerpc64__
+static inline unsigned long __raw_readq(const volatile void __iomem *addr)
+{
+	return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
 }
 static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
 {
-	*(volatile unsigned long __force *)addr = v;
+	*(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
 }
-#define memset_io(a,b,c)	eeh_memset_io((a),(b),(c))
-#define memcpy_fromio(a,b,c)	eeh_memcpy_fromio((a),(b),(c))
-#define memcpy_toio(a,b,c)	eeh_memcpy_toio((a),(b),(c))
-
-#endif /* CONFIG_PPC_ISERIES */
+#endif /* __powerpc64__ */
 
 /*
- * The insw/outsw/insl/outsl macros don't do byte-swapping.
- * They are only used in practice for transferring buffers which
- * are arrays of bytes, and byte-swapping is not appropriate in
- * that case.  - paulus */
-#define insb(port, buf, ns)	eeh_insb((port), (buf), (ns))
-#define insw(port, buf, ns)	eeh_insw_ns((port), (buf), (ns))
-#define insl(port, buf, nl)	eeh_insl_ns((port), (buf), (nl))
+ *
+ * PCI PIO and MMIO accessors.
+ *
+ *
+ * On 32 bits, PIO operations have a recovery mechanism in case they trigger
+ * machine checks (which they occasionally do when probing non existing
+ * IO ports on some platforms, like PowerMac and 8xx).
+ * I always found it to be of dubious reliability and I am tempted to get
+ * rid of it one of these days. So if you think it's important to keep it,
+ * please voice up asap. We never had it for 64 bits and I do not intend
+ * to port it over
+ */
 
-#define outsb(port, buf, ns)  _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define outsw(port, buf, ns)  _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define outsl(port, buf, nl)  _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
+#ifdef CONFIG_PPC32
 
-#define readb(addr)		eeh_readb(addr)
-#define readw(addr)		eeh_readw(addr)
-#define readl(addr)		eeh_readl(addr)
-#define readq(addr)		eeh_readq(addr)
-#define writeb(data, addr)	eeh_writeb((data), (addr))
-#define writew(data, addr)	eeh_writew((data), (addr))
-#define writel(data, addr)	eeh_writel((data), (addr))
-#define writeq(data, addr)	eeh_writeq((data), (addr))
-#define inb(port)		eeh_inb((unsigned long)port)
-#define outb(val, port)		eeh_outb(val, (unsigned long)port)
-#define inw(port)		eeh_inw((unsigned long)port)
-#define outw(val, port)		eeh_outw(val, (unsigned long)port)
-#define inl(port)		eeh_inl((unsigned long)port)
-#define outl(val, port)		eeh_outl(val, (unsigned long)port)
+#define __do_in_asm(name, op)				\
+static inline unsigned int name(unsigned int port)	\
+{							\
+	unsigned int x;					\
+	__asm__ __volatile__(				\
+		"sync\n"				\
+		"0:"	op "	%0,0,%1\n"		\
+		"1:	twi	0,%0,0\n"		\
+		"2:	isync\n"			\
+		"3:	nop\n"				\
+		"4:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"5:	li	%0,-1\n"		\
+		"	b	4b\n"			\
+		".previous\n"				\
+		".section __ex_table,\"a\"\n"		\
+		"	.align	2\n"			\
+		"	.long	0b,5b\n"		\
+		"	.long	1b,5b\n"		\
+		"	.long	2b,5b\n"		\
+		"	.long	3b,5b\n"		\
+		".previous"				\
+		: "=&r" (x)				\
+		: "r" (port + _IO_BASE));		\
+	return x;					\
+}
 
+#define __do_out_asm(name, op)				\
+static inline void name(unsigned int val, unsigned int port) \
+{							\
+	__asm__ __volatile__(				\
+		"sync\n"				\
+		"0:" op " %0,0,%1\n"			\
+		"1:	sync\n"				\
+		"2:\n"					\
+		".section __ex_table,\"a\"\n"		\
+		"	.align	2\n"			\
+		"	.long	0b,2b\n"		\
+		"	.long	1b,2b\n"		\
+		".previous"				\
+		: : "r" (val), "r" (port + _IO_BASE));	\
+}
+
+__do_in_asm(_rec_inb, "lbzx")
+__do_in_asm(_rec_inw, "lhbrx")
+__do_in_asm(_rec_inl, "lwbrx")
+__do_out_asm(_rec_outb, "stbx")
+__do_out_asm(_rec_outw, "sthbrx")
+__do_out_asm(_rec_outl, "stwbrx")
+
+#endif /* CONFIG_PPC32 */
+
+/* The "__do_*" operations below provide the actual "base" implementation
+ * for each of the defined acccessor. Some of them use the out_* functions
+ * directly, some of them still use EEH, though we might change that in the
+ * future. Those macros below provide the necessary argument swapping and
+ * handling of the IO base for PIO.
+ *
+ * They are themselves used by the macros that define the actual accessors
+ * and can be used by the hooks if any.
+ *
+ * Note that PIO operations are always defined in terms of their corresonding
+ * MMIO operations. That allows platforms like iSeries who want to modify the
+ * behaviour of both to only hook on the MMIO version and get both. It's also
+ * possible to hook directly at the toplevel PIO operation if they have to
+ * be handled differently
+ */
+#define __do_writeb(val, addr)	out_8(PCI_FIX_ADDR(addr), val)
+#define __do_writew(val, addr)	out_le16(PCI_FIX_ADDR(addr), val)
+#define __do_writel(val, addr)	out_le32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq(val, addr)	out_le64(PCI_FIX_ADDR(addr), val)
+#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val)
+#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val)
+
+#ifdef CONFIG_EEH
+#define __do_readb(addr)	eeh_readb(PCI_FIX_ADDR(addr))
+#define __do_readw(addr)	eeh_readw(PCI_FIX_ADDR(addr))
+#define __do_readl(addr)	eeh_readl(PCI_FIX_ADDR(addr))
+#define __do_readq(addr)	eeh_readq(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr)	eeh_readw_be(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr)	eeh_readl_be(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr)	eeh_readq_be(PCI_FIX_ADDR(addr))
+#else /* CONFIG_EEH */
+#define __do_readb(addr)	in_8(PCI_FIX_ADDR(addr))
+#define __do_readw(addr)	in_le16(PCI_FIX_ADDR(addr))
+#define __do_readl(addr)	in_le32(PCI_FIX_ADDR(addr))
+#define __do_readq(addr)	in_le64(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr)	in_be16(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr)	in_be32(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr)	in_be64(PCI_FIX_ADDR(addr))
+#endif /* !defined(CONFIG_EEH) */
+
+#ifdef CONFIG_PPC32
+#define __do_outb(val, port)	_rec_outb(val, port)
+#define __do_outw(val, port)	_rec_outw(val, port)
+#define __do_outl(val, port)	_rec_outl(val, port)
+#define __do_inb(port)		_rec_inb(port)
+#define __do_inw(port)		_rec_inw(port)
+#define __do_inl(port)		_rec_inl(port)
+#else /* CONFIG_PPC32 */
+#define __do_outb(val, port)	writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outw(val, port)	writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outl(val, port)	writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_inb(port)		readb((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inw(port)		readw((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inl(port)		readl((PCI_IO_ADDR)_IO_BASE + port);
+#endif /* !CONFIG_PPC32 */
+
+#ifdef CONFIG_EEH
+#define __do_readsb(a, b, n)	eeh_readsb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n)	eeh_readsw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n)	eeh_readsl(PCI_FIX_ADDR(a), (b), (n))
+#else /* CONFIG_EEH */
+#define __do_readsb(a, b, n)	_insb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n)	_insw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n)	_insl(PCI_FIX_ADDR(a), (b), (n))
+#endif /* !CONFIG_EEH */
+#define __do_writesb(a, b, n)	_outsb(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesw(a, b, n)	_outsw(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesl(a, b, n)	_outsl(PCI_FIX_ADDR(a),(b),(n))
+
+#define __do_insb(p, b, n)	readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insw(p, b, n)	readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insl(p, b, n)	readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_outsb(p, b, n)	writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsw(p, b, n)	writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsl(p, b, n)	writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+
+#define __do_memset_io(addr, c, n)	\
+				_memset_io(PCI_FIX_ADDR(addr), c, n)
+#define __do_memcpy_toio(dst, src, n)	\
+				_memcpy_toio(PCI_FIX_ADDR(dst), src, n)
+
+#ifdef CONFIG_EEH
+#define __do_memcpy_fromio(dst, src, n)	\
+				eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n)
+#else /* CONFIG_EEH */
+#define __do_memcpy_fromio(dst, src, n)	\
+				_memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
+#endif /* !CONFIG_EEH */
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+#define DEF_PCI_HOOK(x)		x
+#else
+#define DEF_PCI_HOOK(x)		NULL
+#endif
+
+/* Structure containing all the hooks */
+extern struct ppc_pci_io {
+
+#define DEF_PCI_AC_RET(name, ret, at, al)	ret (*name) at;
+#define DEF_PCI_AC_NORET(name, at, al)		void (*name) at;
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+} ppc_pci_io;
+
+/* The inline wrappers */
+#define DEF_PCI_AC_RET(name, ret, at, al)			\
+static inline ret name at					\
+{								\
+	if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL)		\
+		return ppc_pci_io.name al;			\
+	return __do_##name al;					\
+}
+
+#define DEF_PCI_AC_NORET(name, at, al)				\
+static inline void name at					\
+{								\
+	if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL)		\
+		ppc_pci_io.name al;				\
+	else							\
+		__do_##name al;					\
+}
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+/* Some drivers check for the presence of readq & writeq with
+ * a #ifdef, so we make them happy here.
+ */
+#ifdef __powerpc64__
+#define readq	readq
+#define writeq	writeq
+#endif
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+
+#define dma_cache_inv(_start,_size) \
+	invalidate_dcache_range(_start, (_start + _size))
+#define dma_cache_wback(_start,_size) \
+	clean_dcache_range(_start, (_start + _size))
+#define dma_cache_wback_inv(_start,_size) \
+	flush_dcache_range(_start, (_start + _size))
+
+#else /* CONFIG_NOT_COHERENT_CACHE */
+
+#define dma_cache_inv(_start,_size)		do { } while (0)
+#define dma_cache_wback(_start,_size)		do { } while (0)
+#define dma_cache_wback_inv(_start,_size)	do { } while (0)
+
+#endif /* !CONFIG_NOT_COHERENT_CACHE */
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)	__va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)	p
+
+/*
+ * We don't do relaxed operations yet, at least not with this semantic
+ */
 #define readb_relaxed(addr) readb(addr)
 #define readw_relaxed(addr) readw(addr)
 #define readl_relaxed(addr) readl(addr)
 #define readq_relaxed(addr) readq(addr)
 
-extern void _insb(volatile u8 __iomem *port, void *buf, long count);
-extern void _outsb(volatile u8 __iomem *port, const void *buf, long count);
-extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count);
-extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count);
-extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count);
-extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
-
+#ifdef CONFIG_PPC32
+#define mmiowb()
+#else
+/*
+ * Enforce synchronisation of stores vs. spin_unlock
+ * (this does it explicitely, though our implementation of spin_unlock
+ * does it implicitely too)
+ */
 static inline void mmiowb(void)
 {
 	unsigned long tmp;
@@ -169,6 +545,24 @@
 	: "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
 	: "memory");
 }
+#endif /* !CONFIG_PPC32 */
+
+static inline void iosync(void)
+{
+        __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* Enforce in-order execution of data I/O.
+ * No distinction between read/write on PPC; use eieio for all three.
+ * Those are fairly week though. They don't provide a barrier between
+ * MMIO and cacheable storage nor do they provide a barrier vs. locks,
+ * they only provide barriers between 2 __raw MMIO operations and
+ * possibly break write combining.
+ */
+#define iobarrier_rw() eieio()
+#define iobarrier_r()  eieio()
+#define iobarrier_w()  eieio()
+
 
 /*
  * output pause versions need a delay at least for the
@@ -185,11 +579,6 @@
 #define IO_SPACE_LIMIT ~(0UL)
 
 
-extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr,
-		     	      unsigned long size, unsigned long flags);
-extern void __iomem *__ioremap(unsigned long address, unsigned long size,
-		       unsigned long flags);
-
 /**
  * ioremap     -   map bus memory into CPU space
  * @address:   bus address of the memory
@@ -200,14 +589,77 @@
  * writew/writel functions and the other mmio helpers. The returned
  * address is not guaranteed to be usable directly as a virtual
  * address.
+ *
+ * We provide a few variations of it:
+ *
+ * * ioremap is the standard one and provides non-cacheable guarded mappings
+ *   and can be hooked by the platform via ppc_md
+ *
+ * * ioremap_flags allows to specify the page flags as an argument and can
+ *   also be hooked by the platform via ppc_md
+ *
+ * * ioremap_nocache is identical to ioremap
+ *
+ * * iounmap undoes such a mapping and can be hooked
+ *
+ * * __ioremap_explicit (and the pending __iounmap_explicit) are low level
+ *   functions to create hand-made mappings for use only by the PCI code
+ *   and cannot currently be hooked.
+ *
+ * * __ioremap is the low level implementation used by ioremap and
+ *   ioremap_flags and cannot be hooked (but can be used by a hook on one
+ *   of the previous ones)
+ *
+ * * __iounmap, is the low level implementation used by iounmap and cannot
+ *   be hooked (but can be used by a hook on iounmap)
+ *
  */
-extern void __iomem *ioremap(unsigned long address, unsigned long size);
-
+extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
+extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
+				   unsigned long flags);
 #define ioremap_nocache(addr, size)	ioremap((addr), (size))
-extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size);
 extern void iounmap(volatile void __iomem *addr);
+
+extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
+			       unsigned long flags);
+extern void __iounmap(volatile void __iomem *addr);
+
+extern int __ioremap_explicit(phys_addr_t p_addr, unsigned long v_addr,
+		     	      unsigned long size, unsigned long flags);
+extern int __iounmap_explicit(volatile void __iomem *start,
+			      unsigned long size);
+
 extern void __iomem * reserve_phb_iospace(unsigned long size);
 
+/* Those are more 32 bits only functions */
+extern unsigned long iopa(unsigned long addr);
+extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
+extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
+			     unsigned int size, int flags);
+
+
+/*
+ * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation
+ * which needs some additional definitions here. They basically allow PIO
+ * space overall to be 1GB. This will work as long as we never try to use
+ * iomap to map MMIO below 1GB which should be fine on ppc64
+ */
+#define HAVE_ARCH_PIO_SIZE		1
+#define PIO_OFFSET			0x00000000UL
+#define PIO_MASK			0x3fffffffUL
+#define PIO_RESERVED			0x40000000UL
+
+#define mmio_read16be(addr)		readw_be(addr)
+#define mmio_read32be(addr)		readl_be(addr)
+#define mmio_write16be(val, addr)	writew_be(val, addr)
+#define mmio_write32be(val, addr)	writel_be(val, addr)
+#define mmio_insb(addr, dst, count)	readsb(addr, dst, count)
+#define mmio_insw(addr, dst, count)	readsw(addr, dst, count)
+#define mmio_insl(addr, dst, count)	readsl(addr, dst, count)
+#define mmio_outsb(addr, src, count)	writesb(addr, src, count)
+#define mmio_outsw(addr, src, count)	writesw(addr, src, count)
+#define mmio_outsl(addr, src, count)	writesl(addr, src, count)
+
 /**
  *	virt_to_phys	-	map virtual addresses to physical
  *	@address: address to remap
@@ -254,178 +706,33 @@
  */
 #define BIO_VMERGE_BOUNDARY	0
 
-static inline void iosync(void)
-{
-        __asm__ __volatile__ ("sync" : : : "memory");
-}
-
-/* Enforce in-order execution of data I/O. 
- * No distinction between read/write on PPC; use eieio for all three.
- */
-#define iobarrier_rw() eieio()
-#define iobarrier_r()  eieio()
-#define iobarrier_w()  eieio()
-
 /*
- * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
- * These routines do not perform EEH-related I/O address translation,
- * and should not be used directly by device drivers.  Use inb/readb
- * instead.
+ * 32 bits still uses virt_to_bus() for it's implementation of DMA
+ * mappings se we have to keep it defined here. We also have some old
+ * drivers (shame shame shame) that use bus_to_virt() and haven't been
+ * fixed yet so I need to define it here.
  */
-static inline int __in_8(const volatile unsigned char __iomem *addr)
-{
-	int ret;
+#ifdef CONFIG_PPC32
 
-	__asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
-			     : "=r" (ret) : "m" (*addr));
-	return ret;
+static inline unsigned long virt_to_bus(volatile void * address)
+{
+        if (address == NULL)
+		return 0;
+        return __pa(address) + PCI_DRAM_OFFSET;
 }
 
-static inline void __out_8(volatile unsigned char __iomem *addr, int val)
+static inline void * bus_to_virt(unsigned long address)
 {
-	__asm__ __volatile__("sync; stb%U0%X0 %1,%0"
-			     : "=m" (*addr) : "r" (val));
-	get_paca()->io_sync = 1;
+        if (address == 0)
+		return NULL;
+        return __va(address - PCI_DRAM_OFFSET);
 }
 
-static inline int __in_le16(const volatile unsigned short __iomem *addr)
-{
-	int ret;
+#define page_to_bus(page)	(page_to_phys(page) + PCI_DRAM_OFFSET)
 
-	__asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync"
-			     : "=r" (ret) : "r" (addr), "m" (*addr));
-	return ret;
-}
+#endif /* CONFIG_PPC32 */
 
-static inline int __in_be16(const volatile unsigned short __iomem *addr)
-{
-	int ret;
-
-	__asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync"
-			     : "=r" (ret) : "m" (*addr));
-	return ret;
-}
-
-static inline void __out_le16(volatile unsigned short __iomem *addr, int val)
-{
-	__asm__ __volatile__("sync; sthbrx %1,0,%2"
-			     : "=m" (*addr) : "r" (val), "r" (addr));
-	get_paca()->io_sync = 1;
-}
-
-static inline void __out_be16(volatile unsigned short __iomem *addr, int val)
-{
-	__asm__ __volatile__("sync; sth%U0%X0 %1,%0"
-			     : "=m" (*addr) : "r" (val));
-	get_paca()->io_sync = 1;
-}
-
-static inline unsigned __in_le32(const volatile unsigned __iomem *addr)
-{
-	unsigned ret;
-
-	__asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync"
-			     : "=r" (ret) : "r" (addr), "m" (*addr));
-	return ret;
-}
-
-static inline unsigned __in_be32(const volatile unsigned __iomem *addr)
-{
-	unsigned ret;
-
-	__asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
-			     : "=r" (ret) : "m" (*addr));
-	return ret;
-}
-
-static inline void __out_le32(volatile unsigned __iomem *addr, int val)
-{
-	__asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr)
-			     : "r" (val), "r" (addr));
-	get_paca()->io_sync = 1;
-}
-
-static inline void __out_be32(volatile unsigned __iomem *addr, int val)
-{
-	__asm__ __volatile__("sync; stw%U0%X0 %1,%0"
-			     : "=m" (*addr) : "r" (val));
-	get_paca()->io_sync = 1;
-}
-
-static inline unsigned long __in_le64(const volatile unsigned long __iomem *addr)
-{
-	unsigned long tmp, ret;
-
-	__asm__ __volatile__(
-			     "sync\n"
-			     "ld %1,0(%2)\n"
-			     "twi 0,%1,0\n"
-			     "isync\n"
-			     "rldimi %0,%1,5*8,1*8\n"
-			     "rldimi %0,%1,3*8,2*8\n"
-			     "rldimi %0,%1,1*8,3*8\n"
-			     "rldimi %0,%1,7*8,4*8\n"
-			     "rldicl %1,%1,32,0\n"
-			     "rlwimi %0,%1,8,8,31\n"
-			     "rlwimi %0,%1,24,16,23\n"
-			     : "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr));
-	return ret;
-}
-
-static inline unsigned long __in_be64(const volatile unsigned long __iomem *addr)
-{
-	unsigned long ret;
-
-	__asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync"
-			     : "=r" (ret) : "m" (*addr));
-	return ret;
-}
-
-static inline void __out_le64(volatile unsigned long __iomem *addr, unsigned long val)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			     "rldimi %0,%1,5*8,1*8\n"
-			     "rldimi %0,%1,3*8,2*8\n"
-			     "rldimi %0,%1,1*8,3*8\n"
-			     "rldimi %0,%1,7*8,4*8\n"
-			     "rldicl %1,%1,32,0\n"
-			     "rlwimi %0,%1,8,8,31\n"
-			     "rlwimi %0,%1,24,16,23\n"
-			     "sync\n"
-			     "std %0,0(%3)"
-			     : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr));
-	get_paca()->io_sync = 1;
-}
-
-static inline void __out_be64(volatile unsigned long __iomem *addr, unsigned long val)
-{
-	__asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
-	get_paca()->io_sync = 1;
-}
-
-#include <asm/eeh.h>
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size)		do { } while (0)
-#define dma_cache_wback(_start,_size)		do { } while (0)
-#define dma_cache_wback_inv(_start,_size)	do { } while (0)
-
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)	__va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)	p
 
 #endif /* __KERNEL__ */
 
-#endif /* CONFIG_PPC64 */
 #endif /* _ASM_POWERPC_IO_H */
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index 39fad68..f85dbd3 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -34,7 +34,9 @@
 #define IOMMU_PAGE_MASK       (~((1 << IOMMU_PAGE_SHIFT) - 1))
 #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
 
-#ifndef __ASSEMBLY__
+/* Boot time flags */
+extern int iommu_is_off;
+extern int iommu_force_on;
 
 /* Pure 2^n version of get_order */
 static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
@@ -42,8 +44,6 @@
 	return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
 }
 
-#endif   /* __ASSEMBLY__ */
-
 
 /*
  * IOMAP_MAX_ORDER defines the largest contiguous block
@@ -70,39 +70,31 @@
 struct scatterlist;
 struct device_node;
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
-/* Walks all buses and creates iommu tables */
-extern void iommu_setup_pSeries(void);
-extern void iommu_setup_dart(void);
-
 /* Frees table for an individual device node */
 extern void iommu_free_table(struct device_node *dn);
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 /* Initializes an iommu_table based in values set in the passed-in
  * structure
  */
 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
 					    int nid);
 
-extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
-		struct scatterlist *sglist, int nelems, unsigned long mask,
-		enum dma_data_direction direction);
+extern int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+			int nelems, unsigned long mask,
+			enum dma_data_direction direction);
 extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
-		int nelems, enum dma_data_direction direction);
+			   int nelems, enum dma_data_direction direction);
 
 extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
-		dma_addr_t *dma_handle, unsigned long mask,
-		gfp_t flag, int node);
+				  dma_addr_t *dma_handle, unsigned long mask,
+				  gfp_t flag, int node);
 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
-		void *vaddr, dma_addr_t dma_handle);
+				void *vaddr, dma_addr_t dma_handle);
 extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
-		size_t size, unsigned long mask,
-		enum dma_data_direction direction);
+				   size_t size, unsigned long mask,
+				   enum dma_data_direction direction);
 extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
-		size_t size, enum dma_data_direction direction);
+			       size_t size, enum dma_data_direction direction);
 
 extern void iommu_init_early_pSeries(void);
 extern void iommu_init_early_iSeries(void);
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index f960f53..46476e9 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -135,6 +135,10 @@
 
 extern struct irq_map_entry irq_map[NR_IRQS];
 
+static inline irq_hw_number_t virq_to_hw(unsigned int virq)
+{
+	return irq_map[virq].hwirq;
+}
 
 /**
  * irq_alloc_host - Allocate a new irq_host data structure
diff --git a/include/asm-powerpc/iseries/iommu.h b/include/asm-powerpc/iseries/iommu.h
index 0edbfe1..6e323a1 100644
--- a/include/asm-powerpc/iseries/iommu.h
+++ b/include/asm-powerpc/iseries/iommu.h
@@ -21,11 +21,13 @@
  * Boston, MA  02111-1307  USA
  */
 
+struct pci_dev;
 struct device_node;
 struct iommu_table;
 
 /* Creates table for an individual device node */
-extern void iommu_devnode_init_iSeries(struct device_node *dn);
+extern void iommu_devnode_init_iSeries(struct pci_dev *pdev,
+				       struct device_node *dn);
 
 /* Get table parameters from HV */
 extern void iommu_table_getparms_iSeries(unsigned long busno,
diff --git a/include/asm-powerpc/lv1call.h b/include/asm-powerpc/lv1call.h
new file mode 100644
index 0000000..f733bee
--- /dev/null
+++ b/include/asm-powerpc/lv1call.h
@@ -0,0 +1,345 @@
+/*
+ *  PS3 hvcall interface.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *  Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_ASM_POWERPC_LV1CALL_H)
+#define _ASM_POWERPC_LV1CALL_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/types.h>
+
+/* lv1 call declaration macros */
+
+#define LV1_1_IN_ARG_DECL u64 in_1
+#define LV1_2_IN_ARG_DECL LV1_1_IN_ARG_DECL, u64 in_2
+#define LV1_3_IN_ARG_DECL LV1_2_IN_ARG_DECL, u64 in_3
+#define LV1_4_IN_ARG_DECL LV1_3_IN_ARG_DECL, u64 in_4
+#define LV1_5_IN_ARG_DECL LV1_4_IN_ARG_DECL, u64 in_5
+#define LV1_6_IN_ARG_DECL LV1_5_IN_ARG_DECL, u64 in_6
+#define LV1_7_IN_ARG_DECL LV1_6_IN_ARG_DECL, u64 in_7
+#define LV1_8_IN_ARG_DECL LV1_7_IN_ARG_DECL, u64 in_8
+#define LV1_1_OUT_ARG_DECL u64 *out_1
+#define LV1_2_OUT_ARG_DECL LV1_1_OUT_ARG_DECL, u64 *out_2
+#define LV1_3_OUT_ARG_DECL LV1_2_OUT_ARG_DECL, u64 *out_3
+#define LV1_4_OUT_ARG_DECL LV1_3_OUT_ARG_DECL, u64 *out_4
+#define LV1_5_OUT_ARG_DECL LV1_4_OUT_ARG_DECL, u64 *out_5
+#define LV1_6_OUT_ARG_DECL LV1_5_OUT_ARG_DECL, u64 *out_6
+#define LV1_7_OUT_ARG_DECL LV1_6_OUT_ARG_DECL, u64 *out_7
+
+#define LV1_0_IN_0_OUT_ARG_DECL void
+#define LV1_1_IN_0_OUT_ARG_DECL LV1_1_IN_ARG_DECL
+#define LV1_2_IN_0_OUT_ARG_DECL LV1_2_IN_ARG_DECL
+#define LV1_3_IN_0_OUT_ARG_DECL LV1_3_IN_ARG_DECL
+#define LV1_4_IN_0_OUT_ARG_DECL LV1_4_IN_ARG_DECL
+#define LV1_5_IN_0_OUT_ARG_DECL LV1_5_IN_ARG_DECL
+#define LV1_6_IN_0_OUT_ARG_DECL LV1_6_IN_ARG_DECL
+#define LV1_7_IN_0_OUT_ARG_DECL LV1_7_IN_ARG_DECL
+
+#define LV1_0_IN_1_OUT_ARG_DECL                    LV1_1_OUT_ARG_DECL
+#define LV1_1_IN_1_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_2_IN_1_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_3_IN_1_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_4_IN_1_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_5_IN_1_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_6_IN_1_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_7_IN_1_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_8_IN_1_OUT_ARG_DECL LV1_8_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+
+#define LV1_0_IN_2_OUT_ARG_DECL                    LV1_2_OUT_ARG_DECL
+#define LV1_1_IN_2_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_2_IN_2_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_3_IN_2_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_4_IN_2_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_5_IN_2_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_6_IN_2_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_7_IN_2_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+
+#define LV1_0_IN_3_OUT_ARG_DECL                    LV1_3_OUT_ARG_DECL
+#define LV1_1_IN_3_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_2_IN_3_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_3_IN_3_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_4_IN_3_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_5_IN_3_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_6_IN_3_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_7_IN_3_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+
+#define LV1_0_IN_4_OUT_ARG_DECL                    LV1_4_OUT_ARG_DECL
+#define LV1_1_IN_4_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_2_IN_4_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_3_IN_4_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_4_IN_4_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_5_IN_4_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_6_IN_4_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_7_IN_4_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+
+#define LV1_0_IN_5_OUT_ARG_DECL                    LV1_5_OUT_ARG_DECL
+#define LV1_1_IN_5_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_2_IN_5_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_3_IN_5_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_4_IN_5_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_5_IN_5_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_6_IN_5_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_7_IN_5_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+
+#define LV1_0_IN_6_OUT_ARG_DECL                    LV1_6_OUT_ARG_DECL
+#define LV1_1_IN_6_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_2_IN_6_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_3_IN_6_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_4_IN_6_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_5_IN_6_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_6_IN_6_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_7_IN_6_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+
+#define LV1_0_IN_7_OUT_ARG_DECL                    LV1_7_OUT_ARG_DECL
+#define LV1_1_IN_7_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_2_IN_7_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_3_IN_7_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_4_IN_7_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_5_IN_7_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_6_IN_7_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_7_IN_7_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+
+#define LV1_1_IN_ARGS in_1
+#define LV1_2_IN_ARGS LV1_1_IN_ARGS, in_2
+#define LV1_3_IN_ARGS LV1_2_IN_ARGS, in_3
+#define LV1_4_IN_ARGS LV1_3_IN_ARGS, in_4
+#define LV1_5_IN_ARGS LV1_4_IN_ARGS, in_5
+#define LV1_6_IN_ARGS LV1_5_IN_ARGS, in_6
+#define LV1_7_IN_ARGS LV1_6_IN_ARGS, in_7
+#define LV1_8_IN_ARGS LV1_7_IN_ARGS, in_8
+
+#define LV1_1_OUT_ARGS out_1
+#define LV1_2_OUT_ARGS LV1_1_OUT_ARGS, out_2
+#define LV1_3_OUT_ARGS LV1_2_OUT_ARGS, out_3
+#define LV1_4_OUT_ARGS LV1_3_OUT_ARGS, out_4
+#define LV1_5_OUT_ARGS LV1_4_OUT_ARGS, out_5
+#define LV1_6_OUT_ARGS LV1_5_OUT_ARGS, out_6
+#define LV1_7_OUT_ARGS LV1_6_OUT_ARGS, out_7
+
+#define LV1_0_IN_0_OUT_ARGS
+#define LV1_1_IN_0_OUT_ARGS LV1_1_IN_ARGS
+#define LV1_2_IN_0_OUT_ARGS LV1_2_IN_ARGS
+#define LV1_3_IN_0_OUT_ARGS LV1_3_IN_ARGS
+#define LV1_4_IN_0_OUT_ARGS LV1_4_IN_ARGS
+#define LV1_5_IN_0_OUT_ARGS LV1_5_IN_ARGS
+#define LV1_6_IN_0_OUT_ARGS LV1_6_IN_ARGS
+#define LV1_7_IN_0_OUT_ARGS LV1_7_IN_ARGS
+
+#define LV1_0_IN_1_OUT_ARGS                LV1_1_OUT_ARGS
+#define LV1_1_IN_1_OUT_ARGS LV1_1_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_2_IN_1_OUT_ARGS LV1_2_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_3_IN_1_OUT_ARGS LV1_3_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_4_IN_1_OUT_ARGS LV1_4_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_5_IN_1_OUT_ARGS LV1_5_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_6_IN_1_OUT_ARGS LV1_6_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_7_IN_1_OUT_ARGS LV1_7_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_8_IN_1_OUT_ARGS LV1_8_IN_ARGS, LV1_1_OUT_ARGS
+
+#define LV1_0_IN_2_OUT_ARGS                LV1_2_OUT_ARGS
+#define LV1_1_IN_2_OUT_ARGS LV1_1_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_2_IN_2_OUT_ARGS LV1_2_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_3_IN_2_OUT_ARGS LV1_3_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_4_IN_2_OUT_ARGS LV1_4_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_5_IN_2_OUT_ARGS LV1_5_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_6_IN_2_OUT_ARGS LV1_6_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_7_IN_2_OUT_ARGS LV1_7_IN_ARGS, LV1_2_OUT_ARGS
+
+#define LV1_0_IN_3_OUT_ARGS                LV1_3_OUT_ARGS
+#define LV1_1_IN_3_OUT_ARGS LV1_1_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_2_IN_3_OUT_ARGS LV1_2_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_3_IN_3_OUT_ARGS LV1_3_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_4_IN_3_OUT_ARGS LV1_4_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_5_IN_3_OUT_ARGS LV1_5_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_6_IN_3_OUT_ARGS LV1_6_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_7_IN_3_OUT_ARGS LV1_7_IN_ARGS, LV1_3_OUT_ARGS
+
+#define LV1_0_IN_4_OUT_ARGS                LV1_4_OUT_ARGS
+#define LV1_1_IN_4_OUT_ARGS LV1_1_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_2_IN_4_OUT_ARGS LV1_2_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_3_IN_4_OUT_ARGS LV1_3_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_4_IN_4_OUT_ARGS LV1_4_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_5_IN_4_OUT_ARGS LV1_5_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_6_IN_4_OUT_ARGS LV1_6_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_7_IN_4_OUT_ARGS LV1_7_IN_ARGS, LV1_4_OUT_ARGS
+
+#define LV1_0_IN_5_OUT_ARGS                LV1_5_OUT_ARGS
+#define LV1_1_IN_5_OUT_ARGS LV1_1_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_2_IN_5_OUT_ARGS LV1_2_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_3_IN_5_OUT_ARGS LV1_3_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_4_IN_5_OUT_ARGS LV1_4_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_5_IN_5_OUT_ARGS LV1_5_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_6_IN_5_OUT_ARGS LV1_6_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_7_IN_5_OUT_ARGS LV1_7_IN_ARGS, LV1_5_OUT_ARGS
+
+#define LV1_0_IN_6_OUT_ARGS                LV1_6_OUT_ARGS
+#define LV1_1_IN_6_OUT_ARGS LV1_1_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_2_IN_6_OUT_ARGS LV1_2_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_3_IN_6_OUT_ARGS LV1_3_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_4_IN_6_OUT_ARGS LV1_4_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_5_IN_6_OUT_ARGS LV1_5_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_6_IN_6_OUT_ARGS LV1_6_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_7_IN_6_OUT_ARGS LV1_7_IN_ARGS, LV1_6_OUT_ARGS
+
+#define LV1_0_IN_7_OUT_ARGS                LV1_7_OUT_ARGS
+#define LV1_1_IN_7_OUT_ARGS LV1_1_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_2_IN_7_OUT_ARGS LV1_2_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_3_IN_7_OUT_ARGS LV1_3_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_4_IN_7_OUT_ARGS LV1_4_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_5_IN_7_OUT_ARGS LV1_5_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_6_IN_7_OUT_ARGS LV1_6_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_7_IN_7_OUT_ARGS LV1_7_IN_ARGS, LV1_7_OUT_ARGS
+
+/*
+ * This LV1_CALL() macro is for use by callers.  It expands into an
+ * inline call wrapper and an underscored HV call declaration.  The
+ * wrapper can be used to instrument the lv1 call interface.  The
+ * file lv1call.S defines its own LV1_CALL() macro to expand into
+ * the actual underscored call definition.
+ */
+
+#if !defined(LV1_CALL)
+#define LV1_CALL(name, in, out, num)                               \
+  extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL);      \
+  static inline int lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL) \
+    {return _lv1_##name(LV1_##in##_IN_##out##_OUT_ARGS);}
+#endif
+
+#endif /* !defined(__ASSEMBLY__) */
+
+/* lv1 call table */
+
+LV1_CALL(allocate_memory,                               4, 2,   0 )
+LV1_CALL(write_htab_entry,                              4, 0,   1 )
+LV1_CALL(construct_virtual_address_space,               3, 2,   2 )
+LV1_CALL(invalidate_htab_entries,                       5, 0,   3 )
+LV1_CALL(get_virtual_address_space_id_of_ppe,           1, 1,   4 )
+LV1_CALL(query_logical_partition_address_region_info,   1, 5,   6 )
+LV1_CALL(select_virtual_address_space,                  1, 0,   7 )
+LV1_CALL(pause,                                         1, 0,   9 )
+LV1_CALL(destruct_virtual_address_space,                1, 0,  10 )
+LV1_CALL(configure_irq_state_bitmap,                    3, 0,  11 )
+LV1_CALL(connect_irq_plug_ext,                          5, 0,  12 )
+LV1_CALL(release_memory,                                1, 0,  13 )
+LV1_CALL(disconnect_irq_plug_ext,                       3, 0,  17 )
+LV1_CALL(construct_event_receive_port,                  0, 1,  18 )
+LV1_CALL(destruct_event_receive_port,                   1, 0,  19 )
+LV1_CALL(send_event_locally,                            1, 0,  24 )
+LV1_CALL(end_of_interrupt,                              1, 0,  27 )
+LV1_CALL(connect_irq_plug,                              2, 0,  28 )
+LV1_CALL(disconnect_irq_plug,                           1, 0,  29 )
+LV1_CALL(end_of_interrupt_ext,                          3, 0,  30 )
+LV1_CALL(did_update_interrupt_mask,                     2, 0,  31 )
+LV1_CALL(shutdown_logical_partition,                    1, 0,  44 )
+LV1_CALL(destruct_logical_spe,                          1, 0,  54 )
+LV1_CALL(construct_logical_spe,                         7, 6,  57 )
+LV1_CALL(set_spe_interrupt_mask,                        3, 0,  61 )
+LV1_CALL(set_spe_transition_notifier,                   3, 0,  64 )
+LV1_CALL(disable_logical_spe,                           2, 0,  65 )
+LV1_CALL(clear_spe_interrupt_status,                    4, 0,  66 )
+LV1_CALL(get_spe_interrupt_status,                      2, 1,  67 )
+LV1_CALL(get_logical_ppe_id,                            0, 1,  69 )
+LV1_CALL(set_interrupt_mask,                            5, 0,  73 )
+LV1_CALL(get_logical_partition_id,                      0, 1,  74 )
+LV1_CALL(configure_execution_time_variable,             1, 0,  77 )
+LV1_CALL(get_spe_irq_outlet,                            2, 1,  78 )
+LV1_CALL(set_spe_privilege_state_area_1_register,       3, 0,  79 )
+LV1_CALL(create_repository_node,                        6, 0,  90 )
+LV1_CALL(get_repository_node_value,                     5, 2,  91 )
+LV1_CALL(modify_repository_node_value,                  6, 0,  92 )
+LV1_CALL(remove_repository_node,                        4, 0,  93 )
+LV1_CALL(read_htab_entries,                             2, 5,  95 )
+LV1_CALL(set_dabr,                                      2, 0,  96 )
+LV1_CALL(get_total_execution_time,                      2, 1, 103 )
+LV1_CALL(construct_io_irq_outlet,                       1, 1, 120 )
+LV1_CALL(destruct_io_irq_outlet,                        1, 0, 121 )
+LV1_CALL(map_htab,                                      1, 1, 122 )
+LV1_CALL(unmap_htab,                                    1, 0, 123 )
+LV1_CALL(get_version_info,                              0, 1, 127 )
+LV1_CALL(insert_htab_entry,                             6, 3, 158 )
+LV1_CALL(read_virtual_uart,                             3, 1, 162 )
+LV1_CALL(write_virtual_uart,                            3, 1, 163 )
+LV1_CALL(set_virtual_uart_param,                        3, 0, 164 )
+LV1_CALL(get_virtual_uart_param,                        2, 1, 165 )
+LV1_CALL(configure_virtual_uart_irq,                    1, 1, 166 )
+LV1_CALL(open_device,                                   3, 0, 170 )
+LV1_CALL(close_device,                                  2, 0, 171 )
+LV1_CALL(map_device_mmio_region,                        5, 1, 172 )
+LV1_CALL(unmap_device_mmio_region,                      3, 0, 173 )
+LV1_CALL(allocate_device_dma_region,                    5, 1, 174 )
+LV1_CALL(free_device_dma_region,                        3, 0, 175 )
+LV1_CALL(map_device_dma_region,                         6, 0, 176 )
+LV1_CALL(unmap_device_dma_region,                       4, 0, 177 )
+LV1_CALL(net_add_multicast_address,                     4, 0, 185 )
+LV1_CALL(net_remove_multicast_address,                  4, 0, 186 )
+LV1_CALL(net_start_tx_dma,                              4, 0, 187 )
+LV1_CALL(net_stop_tx_dma,                               3, 0, 188 )
+LV1_CALL(net_start_rx_dma,                              4, 0, 189 )
+LV1_CALL(net_stop_rx_dma,                               3, 0, 190 )
+LV1_CALL(net_set_interrupt_status_indicator,            4, 0, 191 )
+LV1_CALL(net_set_interrupt_mask,                        4, 0, 193 )
+LV1_CALL(net_control,                                   6, 2, 194 )
+LV1_CALL(connect_interrupt_event_receive_port,          4, 0, 197 )
+LV1_CALL(disconnect_interrupt_event_receive_port,       4, 0, 198 )
+LV1_CALL(get_spe_all_interrupt_statuses,                1, 1, 199 )
+LV1_CALL(deconfigure_virtual_uart_irq,                  0, 0, 202 )
+LV1_CALL(enable_logical_spe,                            2, 0, 207 )
+LV1_CALL(gpu_open,                                      1, 0, 210 )
+LV1_CALL(gpu_close,                                     0, 0, 211 )
+LV1_CALL(gpu_device_map,                                1, 2, 212 )
+LV1_CALL(gpu_device_unmap,                              1, 0, 213 )
+LV1_CALL(gpu_memory_allocate,                           5, 2, 214 )
+LV1_CALL(gpu_memory_free,                               1, 0, 216 )
+LV1_CALL(gpu_context_allocate,                          2, 5, 217 )
+LV1_CALL(gpu_context_free,                              1, 0, 218 )
+LV1_CALL(gpu_context_iomap,                             5, 0, 221 )
+LV1_CALL(gpu_context_attribute,                         6, 0, 225 )
+LV1_CALL(gpu_context_intr,                              1, 1, 227 )
+LV1_CALL(gpu_attribute,                                 5, 0, 228 )
+LV1_CALL(get_rtc,                                       0, 2, 232 )
+LV1_CALL(set_ppe_periodic_tracer_frequency,             1, 0, 240 )
+LV1_CALL(start_ppe_periodic_tracer,                     5, 0, 241 )
+LV1_CALL(stop_ppe_periodic_tracer,                      1, 1, 242 )
+LV1_CALL(storage_read,                                  6, 1, 245 )
+LV1_CALL(storage_write,                                 6, 1, 246 )
+LV1_CALL(storage_send_device_command,                   6, 1, 248 )
+LV1_CALL(storage_get_async_status,                      1, 2, 249 )
+LV1_CALL(storage_check_async_status,                    2, 1, 254 )
+LV1_CALL(panic,                                         1, 0, 255 )
+LV1_CALL(construct_lpm,                                 6, 3, 140 )
+LV1_CALL(destruct_lpm,                                  1, 0, 141 )
+LV1_CALL(start_lpm,                                     1, 0, 142 )
+LV1_CALL(stop_lpm,                                      1, 1, 143 )
+LV1_CALL(copy_lpm_trace_buffer,                         3, 1, 144 )
+LV1_CALL(add_lpm_event_bookmark,                        5, 0, 145 )
+LV1_CALL(delete_lpm_event_bookmark,                     3, 0, 146 )
+LV1_CALL(set_lpm_interrupt_mask,                        3, 1, 147 )
+LV1_CALL(get_lpm_interrupt_status,                      1, 1, 148 )
+LV1_CALL(set_lpm_general_control,                       5, 2, 149 )
+LV1_CALL(set_lpm_interval,                              3, 1, 150 )
+LV1_CALL(set_lpm_trigger_control,                       3, 1, 151 )
+LV1_CALL(set_lpm_counter_control,                       4, 1, 152 )
+LV1_CALL(set_lpm_group_control,                         3, 1, 153 )
+LV1_CALL(set_lpm_debug_bus_control,                     3, 1, 154 )
+LV1_CALL(set_lpm_counter,                               5, 2, 155 )
+LV1_CALL(set_lpm_signal,                                7, 0, 156 )
+LV1_CALL(set_lpm_spr_trigger,                           2, 0, 157 )
+
+#endif
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index dac90dc..1b04e57 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -26,6 +26,7 @@
 struct iommu_table;
 struct rtc_time;
 struct file;
+struct pci_controller;
 #ifdef CONFIG_KEXEC
 struct kimage;
 #endif
@@ -84,9 +85,12 @@
 	unsigned long	(*tce_get)(struct iommu_table *tbl,
 				    long index);
 	void		(*tce_flush)(struct iommu_table *tbl);
-	void		(*iommu_dev_setup)(struct pci_dev *dev);
-	void		(*iommu_bus_setup)(struct pci_bus *bus);
-	void		(*irq_bus_setup)(struct pci_bus *bus);
+	void		(*pci_dma_dev_setup)(struct pci_dev *dev);
+	void		(*pci_dma_bus_setup)(struct pci_bus *bus);
+
+	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,
+				   unsigned long flags);
+	void		(*iounmap)(volatile void __iomem *token);
 #endif /* CONFIG_PPC64 */
 
 	int		(*probe)(void);
@@ -106,6 +110,10 @@
 	/* Called after scanning the bus, before allocating resources */
 	void		(*pcibios_fixup)(void);
 	int		(*pci_probe_mode)(struct pci_bus *);
+	void		(*pci_irq_fixup)(struct pci_dev *dev);
+
+	/* To setup PHBs when using automatic OF platform driver for PCI */
+	int		(*pci_setup_phb)(struct pci_controller *host);
 
 	void		(*restart)(char *cmd);
 	void		(*power_off)(void);
@@ -199,10 +207,6 @@
 	 * Returns 0 to allow assignment/enabling of the device. */
 	int  (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
 
-	/* For interrupt routing */
-	unsigned char (*pci_swizzle)(struct pci_dev *, unsigned char *);
-	int (*pci_map_irq)(struct pci_dev *, unsigned char, unsigned char);
-
 	/* Called in indirect_* to avoid touching devices */
 	int (*pci_exclude_device)(unsigned char, unsigned char);
 
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index c3fc7a2..41c8c9c 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -248,21 +248,6 @@
 extern void hpte_init_lpar(void);
 extern void hpte_init_iSeries(void);
 
-extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
-				     unsigned long va, unsigned long prpn,
-				     unsigned long rflags,
-				     unsigned long vflags, int psize);
-
-extern long native_hpte_insert(unsigned long hpte_group,
-			       unsigned long va, unsigned long prpn,
-			       unsigned long rflags,
-			       unsigned long vflags, int psize);
-
-extern long iSeries_hpte_insert(unsigned long hpte_group,
-				unsigned long va, unsigned long prpn,
-				unsigned long rflags,
-				unsigned long vflags, int psize);
-
 extern void stabs_alloc(void);
 extern void slb_initialize(void);
 extern void slb_flush_and_rebolt(void);
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
new file mode 100644
index 0000000..4a28a85
--- /dev/null
+++ b/include/asm-powerpc/mpc52xx.h
@@ -0,0 +1,254 @@
+/*
+ * Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
+ * May need to be cleaned as the port goes on ...
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_POWERPC_MPC52xx_H__
+#define __ASM_POWERPC_MPC52xx_H__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/prom.h>
+#endif /* __ASSEMBLY__ */
+
+
+/* ======================================================================== */
+/* Structures mapping of some unit register set                             */
+/* ======================================================================== */
+
+#ifndef __ASSEMBLY__
+
+/* Memory Mapping Control */
+struct mpc52xx_mmap_ctl {
+	u32 mbar;		/* MMAP_CTRL + 0x00 */
+
+	u32 cs0_start;		/* MMAP_CTRL + 0x04 */
+	u32 cs0_stop;		/* MMAP_CTRL + 0x08 */
+	u32 cs1_start;		/* MMAP_CTRL + 0x0c */
+	u32 cs1_stop;		/* MMAP_CTRL + 0x10 */
+	u32 cs2_start;		/* MMAP_CTRL + 0x14 */
+	u32 cs2_stop;		/* MMAP_CTRL + 0x18 */
+	u32 cs3_start;		/* MMAP_CTRL + 0x1c */
+	u32 cs3_stop;		/* MMAP_CTRL + 0x20 */
+	u32 cs4_start;		/* MMAP_CTRL + 0x24 */
+	u32 cs4_stop;		/* MMAP_CTRL + 0x28 */
+	u32 cs5_start;		/* MMAP_CTRL + 0x2c */
+	u32 cs5_stop;		/* MMAP_CTRL + 0x30 */
+
+	u32 sdram0;		/* MMAP_CTRL + 0x34 */
+	u32 sdram1;		/* MMAP_CTRL + 0X38 */
+
+	u32 reserved[4];	/* MMAP_CTRL + 0x3c .. 0x48 */
+
+	u32 boot_start;		/* MMAP_CTRL + 0x4c */
+	u32 boot_stop;		/* MMAP_CTRL + 0x50 */
+
+	u32 ipbi_ws_ctrl;	/* MMAP_CTRL + 0x54 */
+
+	u32 cs6_start;		/* MMAP_CTRL + 0x58 */
+	u32 cs6_stop;		/* MMAP_CTRL + 0x5c */
+	u32 cs7_start;		/* MMAP_CTRL + 0x60 */
+	u32 cs7_stop;		/* MMAP_CTRL + 0x64 */
+};
+
+/* SDRAM control */
+struct mpc52xx_sdram {
+	u32 mode;		/* SDRAM + 0x00 */
+	u32 ctrl;		/* SDRAM + 0x04 */
+	u32 config1;		/* SDRAM + 0x08 */
+	u32 config2;		/* SDRAM + 0x0c */
+};
+
+/* SDMA */
+struct mpc52xx_sdma {
+	u32 taskBar;		/* SDMA + 0x00 */
+	u32 currentPointer;	/* SDMA + 0x04 */
+	u32 endPointer;		/* SDMA + 0x08 */
+	u32 variablePointer;	/* SDMA + 0x0c */
+
+	u8 IntVect1;		/* SDMA + 0x10 */
+	u8 IntVect2;		/* SDMA + 0x11 */
+	u16 PtdCntrl;		/* SDMA + 0x12 */
+
+	u32 IntPend;		/* SDMA + 0x14 */
+	u32 IntMask;		/* SDMA + 0x18 */
+
+	u16 tcr[16];		/* SDMA + 0x1c .. 0x3a */
+
+	u8 ipr[32];		/* SDMA + 0x3c .. 0x5b */
+
+	u32 cReqSelect;		/* SDMA + 0x5c */
+	u32 task_size0;		/* SDMA + 0x60 */
+	u32 task_size1;		/* SDMA + 0x64 */
+	u32 MDEDebug;		/* SDMA + 0x68 */
+	u32 ADSDebug;		/* SDMA + 0x6c */
+	u32 Value1;		/* SDMA + 0x70 */
+	u32 Value2;		/* SDMA + 0x74 */
+	u32 Control;		/* SDMA + 0x78 */
+	u32 Status;		/* SDMA + 0x7c */
+	u32 PTDDebug;		/* SDMA + 0x80 */
+};
+
+/* GPT */
+struct mpc52xx_gpt {
+	u32 mode;		/* GPTx + 0x00 */
+	u32 count;		/* GPTx + 0x04 */
+	u32 pwm;		/* GPTx + 0x08 */
+	u32 status;		/* GPTx + 0X0c */
+};
+
+/* GPIO */
+struct mpc52xx_gpio {
+	u32 port_config;	/* GPIO + 0x00 */
+	u32 simple_gpioe;	/* GPIO + 0x04 */
+	u32 simple_ode;		/* GPIO + 0x08 */
+	u32 simple_ddr;		/* GPIO + 0x0c */
+	u32 simple_dvo;		/* GPIO + 0x10 */
+	u32 simple_ival;	/* GPIO + 0x14 */
+	u8 outo_gpioe;		/* GPIO + 0x18 */
+	u8 reserved1[3];	/* GPIO + 0x19 */
+	u8 outo_dvo;		/* GPIO + 0x1c */
+	u8 reserved2[3];	/* GPIO + 0x1d */
+	u8 sint_gpioe;		/* GPIO + 0x20 */
+	u8 reserved3[3];	/* GPIO + 0x21 */
+	u8 sint_ode;		/* GPIO + 0x24 */
+	u8 reserved4[3];	/* GPIO + 0x25 */
+	u8 sint_ddr;		/* GPIO + 0x28 */
+	u8 reserved5[3];	/* GPIO + 0x29 */
+	u8 sint_dvo;		/* GPIO + 0x2c */
+	u8 reserved6[3];	/* GPIO + 0x2d */
+	u8 sint_inten;		/* GPIO + 0x30 */
+	u8 reserved7[3];	/* GPIO + 0x31 */
+	u16 sint_itype;		/* GPIO + 0x34 */
+	u16 reserved8;		/* GPIO + 0x36 */
+	u8 gpio_control;	/* GPIO + 0x38 */
+	u8 reserved9[3];	/* GPIO + 0x39 */
+	u8 sint_istat;		/* GPIO + 0x3c */
+	u8 sint_ival;		/* GPIO + 0x3d */
+	u8 bus_errs;		/* GPIO + 0x3e */
+	u8 reserved10;		/* GPIO + 0x3f */
+};
+
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD	4
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD	5
+#define MPC52xx_GPIO_PCI_DIS			(1<<15)
+
+/* GPIO with WakeUp*/
+struct mpc52xx_gpio_wkup {
+	u8 wkup_gpioe;		/* GPIO_WKUP + 0x00 */
+	u8 reserved1[3];	/* GPIO_WKUP + 0x03 */
+	u8 wkup_ode;		/* GPIO_WKUP + 0x04 */
+	u8 reserved2[3];	/* GPIO_WKUP + 0x05 */
+	u8 wkup_ddr;		/* GPIO_WKUP + 0x08 */
+	u8 reserved3[3];	/* GPIO_WKUP + 0x09 */
+	u8 wkup_dvo;		/* GPIO_WKUP + 0x0C */
+	u8 reserved4[3];	/* GPIO_WKUP + 0x0D */
+	u8 wkup_inten;		/* GPIO_WKUP + 0x10 */
+	u8 reserved5[3];	/* GPIO_WKUP + 0x11 */
+	u8 wkup_iinten;		/* GPIO_WKUP + 0x14 */
+	u8 reserved6[3];	/* GPIO_WKUP + 0x15 */
+	u16 wkup_itype;		/* GPIO_WKUP + 0x18 */
+	u8 reserved7[2];	/* GPIO_WKUP + 0x1A */
+	u8 wkup_maste;		/* GPIO_WKUP + 0x1C */
+	u8 reserved8[3];	/* GPIO_WKUP + 0x1D */
+	u8 wkup_ival;		/* GPIO_WKUP + 0x20 */
+	u8 reserved9[3];	/* GPIO_WKUP + 0x21 */
+	u8 wkup_istat;		/* GPIO_WKUP + 0x24 */
+	u8 reserved10[3];	/* GPIO_WKUP + 0x25 */
+};
+
+/* XLB Bus control */
+struct mpc52xx_xlb {
+	u8 reserved[0x40];
+	u32 config;		/* XLB + 0x40 */
+	u32 version;		/* XLB + 0x44 */
+	u32 status;		/* XLB + 0x48 */
+	u32 int_enable;		/* XLB + 0x4c */
+	u32 addr_capture;	/* XLB + 0x50 */
+	u32 bus_sig_capture;	/* XLB + 0x54 */
+	u32 addr_timeout;	/* XLB + 0x58 */
+	u32 data_timeout;	/* XLB + 0x5c */
+	u32 bus_act_timeout;	/* XLB + 0x60 */
+	u32 master_pri_enable;	/* XLB + 0x64 */
+	u32 master_priority;	/* XLB + 0x68 */
+	u32 base_address;	/* XLB + 0x6c */
+	u32 snoop_window;	/* XLB + 0x70 */
+};
+
+#define MPC52xx_XLB_CFG_PLDIS		(1 << 31)
+#define MPC52xx_XLB_CFG_SNOOP		(1 << 15)
+
+/* Clock Distribution control */
+struct mpc52xx_cdm {
+	u32 jtag_id;		/* CDM + 0x00  reg0 read only */
+	u32 rstcfg;		/* CDM + 0x04  reg1 read only */
+	u32 breadcrumb;		/* CDM + 0x08  reg2 */
+
+	u8 mem_clk_sel;		/* CDM + 0x0c  reg3 byte0 */
+	u8 xlb_clk_sel;		/* CDM + 0x0d  reg3 byte1 read only */
+	u8 ipb_clk_sel;		/* CDM + 0x0e  reg3 byte2 */
+	u8 pci_clk_sel;		/* CDM + 0x0f  reg3 byte3 */
+
+	u8 ext_48mhz_en;	/* CDM + 0x10  reg4 byte0 */
+	u8 fd_enable;		/* CDM + 0x11  reg4 byte1 */
+	u16 fd_counters;	/* CDM + 0x12  reg4 byte2,3 */
+
+	u32 clk_enables;	/* CDM + 0x14  reg5 */
+
+	u8 osc_disable;		/* CDM + 0x18  reg6 byte0 */
+	u8 reserved0[3];	/* CDM + 0x19  reg6 byte1,2,3 */
+
+	u8 ccs_sleep_enable;	/* CDM + 0x1c  reg7 byte0 */
+	u8 osc_sleep_enable;	/* CDM + 0x1d  reg7 byte1 */
+	u8 reserved1;		/* CDM + 0x1e  reg7 byte2 */
+	u8 ccs_qreq_test;	/* CDM + 0x1f  reg7 byte3 */
+
+	u8 soft_reset;		/* CDM + 0x20  u8 byte0 */
+	u8 no_ckstp;		/* CDM + 0x21  u8 byte0 */
+	u8 reserved2[2];	/* CDM + 0x22  u8 byte1,2,3 */
+
+	u8 pll_lock;		/* CDM + 0x24  reg9 byte0 */
+	u8 pll_looselock;	/* CDM + 0x25  reg9 byte1 */
+	u8 pll_sm_lockwin;	/* CDM + 0x26  reg9 byte2 */
+	u8 reserved3;		/* CDM + 0x27  reg9 byte3 */
+
+	u16 reserved4;		/* CDM + 0x28  reg10 byte0,1 */
+	u16 mclken_div_psc1;	/* CDM + 0x2a  reg10 byte2,3 */
+
+	u16 reserved5;		/* CDM + 0x2c  reg11 byte0,1 */
+	u16 mclken_div_psc2;	/* CDM + 0x2e  reg11 byte2,3 */
+
+	u16 reserved6;		/* CDM + 0x30  reg12 byte0,1 */
+	u16 mclken_div_psc3;	/* CDM + 0x32  reg12 byte2,3 */
+
+	u16 reserved7;		/* CDM + 0x34  reg13 byte0,1 */
+	u16 mclken_div_psc6;	/* CDM + 0x36  reg13 byte2,3 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+
+/* ========================================================================= */
+/* Prototypes for MPC52xx sysdev                                             */
+/* ========================================================================= */
+
+#ifndef __ASSEMBLY__
+
+extern void __iomem * mpc52xx_find_and_map(const char *);
+extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
+extern void mpc52xx_setup_cpu(void);
+
+extern void mpc52xx_init_irq(void);
+extern unsigned int mpc52xx_get_irq(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_MPC52xx_H__ */
+
diff --git a/include/asm-powerpc/mpc85xx.h b/include/asm-powerpc/mpc85xx.h
index ccdb8a2..5414299 100644
--- a/include/asm-powerpc/mpc85xx.h
+++ b/include/asm-powerpc/mpc85xx.h
@@ -31,14 +31,6 @@
 #include <platforms/85xx/mpc85xx_cds.h>
 #endif
 
-#define _IO_BASE        isa_io_base
-#define _ISA_MEM_BASE   isa_mem_base
-#ifdef CONFIG_PCI
-#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
-
 /* Let modules/drivers get at CCSRBAR */
 extern phys_addr_t get_ccsrbar(void);
 
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index ef0a545..b71e7b3 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -3,6 +3,7 @@
 #ifdef __KERNEL__
 
 #include <linux/irq.h>
+#include <asm/dcr.h>
 
 /*
  * Global registers
@@ -225,6 +226,23 @@
 #endif /* CONFIG_MPIC_BROKEN_U3 */
 
 
+enum mpic_reg_type {
+	mpic_access_mmio_le,
+	mpic_access_mmio_be,
+#ifdef CONFIG_PPC_DCR
+	mpic_access_dcr
+#endif
+};
+
+struct mpic_reg_bank {
+	u32 __iomem	*base;
+#ifdef CONFIG_PPC_DCR
+	dcr_host_t	dhost;
+	unsigned int	dbase;
+	unsigned int	doff;
+#endif /* CONFIG_PPC_DCR */
+};
+
 /* The instance data of a given MPIC */
 struct mpic
 {
@@ -264,11 +282,18 @@
 	spinlock_t		fixup_lock;
 #endif
 
+	/* Register access method */
+	enum mpic_reg_type	reg_type;
+
 	/* The various ioremap'ed bases */
-	volatile u32 __iomem	*gregs;
-	volatile u32 __iomem	*tmregs;
-	volatile u32 __iomem	*cpuregs[MPIC_MAX_CPUS];
-	volatile u32 __iomem	*isus[MPIC_MAX_ISU];
+	struct mpic_reg_bank	gregs;
+	struct mpic_reg_bank	tmregs;
+	struct mpic_reg_bank	cpuregs[MPIC_MAX_CPUS];
+	struct mpic_reg_bank	isus[MPIC_MAX_ISU];
+
+#ifdef CONFIG_PPC_DCR
+	unsigned int		dcr_base;
+#endif
 
 #ifdef CONFIG_MPIC_WEIRD
 	/* Pointer to HW info array */
@@ -305,6 +330,8 @@
 #define MPIC_SPV_EOI			0x00000020
 /* No passthrough disable */
 #define MPIC_NO_PTHROU_DIS		0x00000040
+/* DCR based MPIC */
+#define MPIC_USES_DCR			0x00000080
 
 /* MPIC HW modification ID */
 #define MPIC_REGSET_MASK		0xf0000000
@@ -337,7 +364,7 @@
  * that is senses[0] correspond to linux irq "irq_offset".
  */
 extern struct mpic *mpic_alloc(struct device_node *node,
-			       unsigned long phys_addr,
+			       phys_addr_t phys_addr,
 			       unsigned int flags,
 			       unsigned int isu_size,
 			       unsigned int irq_count,
@@ -350,7 +377,7 @@
  * @phys_addr:	physical address of the ISU
  */
 extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
-			    unsigned long phys_addr);
+			    phys_addr_t phys_addr);
 
 /* Set default sense codes
  *
diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h
index c5c0b0b..a889b20 100644
--- a/include/asm-powerpc/of_device.h
+++ b/include/asm-powerpc/of_device.h
@@ -6,12 +6,6 @@
 #include <linux/mod_devicetable.h>
 #include <asm/prom.h>
 
-/*
- * The of_platform_bus_type is a bus type used by drivers that do not
- * attach to a macio or similar bus but still use OF probing
- * mechanism
- */
-extern struct bus_type of_platform_bus_type;
 
 /*
  * The of_device is a kind of "base class" that is a superset of
@@ -20,46 +14,22 @@
  */
 struct of_device
 {
-	struct device_node	*node;		/* OF device node */
+	struct device_node	*node;		/* to be obsoleted */
 	u64			dma_mask;	/* DMA mask */
 	struct device		dev;		/* Generic device interface */
 };
 #define	to_of_device(d) container_of(d, struct of_device, dev)
 
+extern const struct of_device_id *of_match_node(
+	const struct of_device_id *matches, const struct device_node *node);
 extern const struct of_device_id *of_match_device(
 	const struct of_device_id *matches, const struct of_device *dev);
 
 extern struct of_device *of_dev_get(struct of_device *dev);
 extern void of_dev_put(struct of_device *dev);
 
-/*
- * An of_platform_driver driver is attached to a basic of_device on
- * the "platform bus" (of_platform_bus_type)
- */
-struct of_platform_driver
-{
-	char			*name;
-	struct of_device_id	*match_table;
-	struct module		*owner;
-
-	int	(*probe)(struct of_device* dev, const struct of_device_id *match);
-	int	(*remove)(struct of_device* dev);
-
-	int	(*suspend)(struct of_device* dev, pm_message_t state);
-	int	(*resume)(struct of_device* dev);
-	int	(*shutdown)(struct of_device* dev);
-
-	struct device_driver	driver;
-};
-#define	to_of_platform_driver(drv) container_of(drv,struct of_platform_driver, driver)
-
-extern int of_register_driver(struct of_platform_driver *drv);
-extern void of_unregister_driver(struct of_platform_driver *drv);
 extern int of_device_register(struct of_device *ofdev);
 extern void of_device_unregister(struct of_device *ofdev);
-extern struct of_device *of_platform_device_create(struct device_node *np,
-						   const char *bus_id,
-						   struct device *parent);
 extern void of_release_dev(struct device *dev);
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/of_platform.h b/include/asm-powerpc/of_platform.h
new file mode 100644
index 0000000..217eafb
--- /dev/null
+++ b/include/asm-powerpc/of_platform.h
@@ -0,0 +1,60 @@
+/*
+ *    Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ *			 <benh@kernel.crashing.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/of_device.h>
+
+/*
+ * The of_platform_bus_type is a bus type used by drivers that do not
+ * attach to a macio or similar bus but still use OF probing
+ * mechanism
+ */
+extern struct bus_type of_platform_bus_type;
+
+/*
+ * An of_platform_driver driver is attached to a basic of_device on
+ * the "platform bus" (of_platform_bus_type)
+ */
+struct of_platform_driver
+{
+	char			*name;
+	struct of_device_id	*match_table;
+	struct module		*owner;
+
+	int	(*probe)(struct of_device* dev,
+			 const struct of_device_id *match);
+	int	(*remove)(struct of_device* dev);
+
+	int	(*suspend)(struct of_device* dev, pm_message_t state);
+	int	(*resume)(struct of_device* dev);
+	int	(*shutdown)(struct of_device* dev);
+
+	struct device_driver	driver;
+};
+#define	to_of_platform_driver(drv) \
+	container_of(drv,struct of_platform_driver, driver)
+
+/* Platform drivers register/unregister */
+extern int of_register_platform_driver(struct of_platform_driver *drv);
+extern void of_unregister_platform_driver(struct of_platform_driver *drv);
+
+/* Platform devices and busses creation */
+extern struct of_device *of_platform_device_create(struct device_node *np,
+						   const char *bus_id,
+						   struct device *parent);
+/* pseudo "matches" value to not do deep probe */
+#define OF_NO_DEEP_PROBE ((struct of_device_id *)-1)
+
+extern int of_platform_bus_probe(struct device_node *root,
+				 struct of_device_id *matches,
+				 struct device *parent);
+
+extern struct of_device *of_find_device_by_node(struct device_node *np);
+extern struct of_device *of_find_device_by_phandle(phandle ph);
diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
index 07a10e5..71043bf 100644
--- a/include/asm-powerpc/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -44,7 +44,9 @@
 			   int num_counters);
 	void (*cpu_setup) (struct op_counter_config *);
 	void (*start) (struct op_counter_config *);
+        void (*global_start) (struct op_counter_config *);
 	void (*stop) (void);
+	void (*global_stop) (void);
 	void (*handle_interrupt) (struct pt_regs *,
 				  struct op_counter_config *);
 	int num_counters;
@@ -54,6 +56,7 @@
 extern struct op_powerpc_model op_model_rs64;
 extern struct op_powerpc_model op_model_power4;
 extern struct op_powerpc_model op_model_7450;
+extern struct op_powerpc_model op_model_cell;
 
 #ifndef CONFIG_FSL_BOOKE
 
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index 0a4e5c9..0d3adc0 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -93,7 +93,8 @@
 	u64 stab_rr;			/* stab/slb round-robin counter */
 	u64 saved_r1;			/* r1 save for RTAS calls */
 	u64 saved_msr;			/* MSR saved here by enter_rtas */
-	u8 proc_enabled;		/* irq soft-enable flag */
+	u8 soft_enabled;		/* irq soft-enable flag */
+	u8 hard_enabled;		/* set if irqs are enabled in MSR */
 	u8 io_sync;			/* writel() needs spin_unlock sync */
 
 	/* Stuff for accurate time accounting */
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index 86ee46b..7bb7f90 100644
--- a/include/asm-powerpc/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -25,6 +25,7 @@
 	int node;
 	void *arch_data;
 	struct list_head list_node;
+	struct device *parent;
 
 	int first_busno;
 	int last_busno;
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
index 46afd29..16f1331 100644
--- a/include/asm-powerpc/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -62,29 +62,23 @@
 }
 
 #ifdef CONFIG_PPC64
-#define HAVE_ARCH_PCI_MWI 1
-static inline int pcibios_prep_mwi(struct pci_dev *dev)
-{
-	/*
-	 * We would like to avoid touching the cacheline size or MWI bit
-	 * but we cant do that with the current pcibios_prep_mwi 
-	 * interface. pSeries firmware sets the cacheline size (which is not
-	 * the cpu cacheline size in all cases) and hardware treats MWI 
-	 * the same as memory write. So we dont touch the cacheline size
-	 * here and allow the generic code to set the MWI bit.
-	 */
-	return 0;
-}
 
-extern struct dma_mapping_ops pci_dma_ops;
+/*
+ * We want to avoid touching the cacheline size or MWI bit.
+ * pSeries firmware sets the cacheline size (which is not the cpu cacheline
+ * size in all cases) and hardware treats MWI the same as memory write.
+ */
+#define PCI_DISABLE_MWI
+
+extern struct dma_mapping_ops *pci_dma_ops;
 
 /* For DAC DMA, we currently don't support it by default, but
  * we let 64-bit platforms override this.
  */
 static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
 {
-	if (pci_dma_ops.dac_dma_supported)
-		return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask);
+	if (pci_dma_ops && pci_dma_ops->dac_dma_supported)
+		return pci_dma_ops->dac_dma_supported(&hwdev->dev, mask);
 	return 0;
 }
 
@@ -216,6 +210,8 @@
 extern void pcibios_fixup_device_resources(struct pci_dev *dev,
 			struct pci_bus *bus);
 
+extern void pcibios_setup_new_device(struct pci_dev *dev);
+
 extern void pcibios_claim_one_bus(struct pci_bus *b);
 
 extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
@@ -238,12 +234,10 @@
 					 unsigned long size,
 					 pgprot_t prot);
 
-#if defined(CONFIG_PPC_MULTIPLATFORM) || defined(CONFIG_PPC32)
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
 extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
 				 const struct resource *rsrc,
 				 resource_size_t *start, resource_size_t *end);
-#endif /* CONFIG_PPC_MULTIPLATFORM || CONFIG_PPC32 */
 
 #endif	/* __KERNEL__ */
 #endif /* __ASM_POWERPC_PCI_H */
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index ae63db7..b0830db 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -11,7 +11,7 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 
-extern kmem_cache_t *pgtable_cache[];
+extern struct kmem_cache *pgtable_cache[];
 
 #ifdef CONFIG_PPC_64K_PAGES
 #define PTE_CACHE_NUM	0
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 1115756..ab6eddb 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -36,18 +36,17 @@
 void *traverse_pci_devices(struct device_node *start, traverse_func pre,
 		void *data);
 
-void pci_devs_phb_init(void);
-void pci_devs_phb_init_dynamic(struct pci_controller *phb);
-int setup_phb(struct device_node *dev, struct pci_controller *phb);
-void __devinit scan_phb(struct pci_controller *hose);
+extern void pci_devs_phb_init(void);
+extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+extern void scan_phb(struct pci_controller *hose);
 
 /* From rtas_pci.h */
-void init_pci_config_tokens (void);
-unsigned long get_phb_buid (struct device_node *);
+extern void init_pci_config_tokens (void);
+extern unsigned long get_phb_buid (struct device_node *);
+extern int rtas_setup_phb(struct pci_controller *phb);
 
 /* From pSeries_pci.h */
 extern void pSeries_final_fixup(void);
-extern void pSeries_irq_bus_setup(struct pci_bus *bus);
 
 extern unsigned long pci_probe_only;
 
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index 6cb6fb1..a26c32e 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -53,10 +53,6 @@
 
 #endif /* CONFIG_PPC_PREP */
 
-#ifndef CONFIG_PPC_MULTIPLATFORM
-#define _machine 0
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
 
 /*
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index ec11d44..0afee17 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -17,6 +17,7 @@
  */
 #include <linux/types.h>
 #include <linux/proc_fs.h>
+#include <linux/platform_device.h>
 #include <asm/atomic.h>
 
 /* Definitions used by the flattened device tree */
@@ -333,6 +334,20 @@
 struct pci_dev;
 extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
 
+static inline int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
+{
+	int irq = irq_of_parse_and_map(dev, index);
+
+	/* Only dereference the resource if both the
+	 * resource and the irq are valid. */
+	if (r && irq != NO_IRQ) {
+		r->start = r->end = irq;
+		r->flags = IORESOURCE_IRQ;
+	}
+
+	return irq;
+}
+
 
 #endif /* __KERNEL__ */
 #endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/ps3.h b/include/asm-powerpc/ps3.h
new file mode 100644
index 0000000..52a69ed
--- /dev/null
+++ b/include/asm-powerpc/ps3.h
@@ -0,0 +1,462 @@
+/*
+ *  PS3 platform declarations.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_ASM_POWERPC_PS3_H)
+#define _ASM_POWERPC_PS3_H
+
+#include <linux/compiler.h> /* for __deprecated */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+/**
+ * struct ps3_device_id - HV bus device identifier from the system repository
+ * @bus_id: HV bus id, {1..} (zero invalid)
+ * @dev_id: HV device id, {0..}
+ */
+
+struct ps3_device_id {
+	unsigned int bus_id;
+	unsigned int dev_id;
+};
+
+
+/* dma routines */
+
+enum ps3_dma_page_size {
+	PS3_DMA_4K = 12U,
+	PS3_DMA_64K = 16U,
+	PS3_DMA_1M = 20U,
+	PS3_DMA_16M = 24U,
+};
+
+enum ps3_dma_region_type {
+	PS3_DMA_OTHER = 0,
+	PS3_DMA_INTERNAL = 2,
+};
+
+/**
+ * struct ps3_dma_region - A per device dma state variables structure
+ * @did: The HV device id.
+ * @page_size: The ioc pagesize.
+ * @region_type: The HV region type.
+ * @bus_addr: The 'translated' bus address of the region.
+ * @len: The length in bytes of the region.
+ * @chunk_list: Opaque variable used by the ioc page manager.
+ */
+
+struct ps3_dma_region {
+	struct ps3_device_id did;
+	enum ps3_dma_page_size page_size;
+	enum ps3_dma_region_type region_type;
+	unsigned long bus_addr;
+	unsigned long len;
+	struct {
+		spinlock_t lock;
+		struct list_head head;
+	} chunk_list;
+};
+
+/**
+ * struct ps3_dma_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+static inline void ps3_dma_region_init(struct ps3_dma_region *r,
+	const struct ps3_device_id* did, enum ps3_dma_page_size page_size,
+	enum ps3_dma_region_type region_type)
+{
+	r->did = *did;
+	r->page_size = page_size;
+	r->region_type = region_type;
+}
+int ps3_dma_region_create(struct ps3_dma_region *r);
+int ps3_dma_region_free(struct ps3_dma_region *r);
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+	unsigned long len, unsigned long *bus_addr);
+int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+	unsigned long len);
+
+/* mmio routines */
+
+enum ps3_mmio_page_size {
+	PS3_MMIO_4K = 12U,
+	PS3_MMIO_64K = 16U
+};
+
+/**
+ * struct ps3_mmio_region - a per device mmio state variables structure
+ *
+ * Current systems can be supported with a single region per device.
+ */
+
+struct ps3_mmio_region {
+	struct ps3_device_id did;
+	unsigned long bus_addr;
+	unsigned long len;
+	enum ps3_mmio_page_size page_size;
+	unsigned long lpar_addr;
+};
+
+/**
+ * struct ps3_mmio_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+static inline void ps3_mmio_region_init(struct ps3_mmio_region *r,
+	const struct ps3_device_id* did, unsigned long bus_addr,
+	unsigned long len, enum ps3_mmio_page_size page_size)
+{
+	r->did = *did;
+	r->bus_addr = bus_addr;
+	r->len = len;
+	r->page_size = page_size;
+}
+int ps3_mmio_region_create(struct ps3_mmio_region *r);
+int ps3_free_mmio_region(struct ps3_mmio_region *r);
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr);
+
+/* inrerrupt routines */
+
+int ps3_alloc_io_irq(unsigned int interrupt_id, unsigned int *virq);
+int ps3_free_io_irq(unsigned int virq);
+int ps3_alloc_event_irq(unsigned int *virq);
+int ps3_free_event_irq(unsigned int virq);
+int ps3_send_event_locally(unsigned int virq);
+int ps3_connect_event_irq(const struct ps3_device_id *did,
+	unsigned int interrupt_id, unsigned int *virq);
+int ps3_disconnect_event_irq(const struct ps3_device_id *did,
+	unsigned int interrupt_id, unsigned int virq);
+int ps3_alloc_vuart_irq(void* virt_addr_bmp, unsigned int *virq);
+int ps3_free_vuart_irq(unsigned int virq);
+int ps3_alloc_spe_irq(unsigned long spe_id, unsigned int class,
+	unsigned int *virq);
+int ps3_free_spe_irq(unsigned int virq);
+
+/* lv1 result codes */
+
+enum lv1_result {
+	LV1_SUCCESS                     = 0,
+	/* not used                       -1 */
+	LV1_RESOURCE_SHORTAGE           = -2,
+	LV1_NO_PRIVILEGE                = -3,
+	LV1_DENIED_BY_POLICY            = -4,
+	LV1_ACCESS_VIOLATION            = -5,
+	LV1_NO_ENTRY                    = -6,
+	LV1_DUPLICATE_ENTRY             = -7,
+	LV1_TYPE_MISMATCH               = -8,
+	LV1_BUSY                        = -9,
+	LV1_EMPTY                       = -10,
+	LV1_WRONG_STATE                 = -11,
+	/* not used                       -12 */
+	LV1_NO_MATCH                    = -13,
+	LV1_ALREADY_CONNECTED           = -14,
+	LV1_UNSUPPORTED_PARAMETER_VALUE = -15,
+	LV1_CONDITION_NOT_SATISFIED     = -16,
+	LV1_ILLEGAL_PARAMETER_VALUE     = -17,
+	LV1_BAD_OPTION                  = -18,
+	LV1_IMPLEMENTATION_LIMITATION   = -19,
+	LV1_NOT_IMPLEMENTED             = -20,
+	LV1_INVALID_CLASS_ID            = -21,
+	LV1_CONSTRAINT_NOT_SATISFIED    = -22,
+	LV1_ALIGNMENT_ERROR             = -23,
+	LV1_INTERNAL_ERROR              = -32768,
+};
+
+static inline const char* ps3_result(int result)
+{
+#if defined(DEBUG)
+	switch (result) {
+	case LV1_SUCCESS:
+		return "LV1_SUCCESS (0)";
+	case -1:
+		return "** unknown result ** (-1)";
+	case LV1_RESOURCE_SHORTAGE:
+		return "LV1_RESOURCE_SHORTAGE (-2)";
+	case LV1_NO_PRIVILEGE:
+		return "LV1_NO_PRIVILEGE (-3)";
+	case LV1_DENIED_BY_POLICY:
+		return "LV1_DENIED_BY_POLICY (-4)";
+	case LV1_ACCESS_VIOLATION:
+		return "LV1_ACCESS_VIOLATION (-5)";
+	case LV1_NO_ENTRY:
+		return "LV1_NO_ENTRY (-6)";
+	case LV1_DUPLICATE_ENTRY:
+		return "LV1_DUPLICATE_ENTRY (-7)";
+	case LV1_TYPE_MISMATCH:
+		return "LV1_TYPE_MISMATCH (-8)";
+	case LV1_BUSY:
+		return "LV1_BUSY (-9)";
+	case LV1_EMPTY:
+		return "LV1_EMPTY (-10)";
+	case LV1_WRONG_STATE:
+		return "LV1_WRONG_STATE (-11)";
+	case -12:
+		return "** unknown result ** (-12)";
+	case LV1_NO_MATCH:
+		return "LV1_NO_MATCH (-13)";
+	case LV1_ALREADY_CONNECTED:
+		return "LV1_ALREADY_CONNECTED (-14)";
+	case LV1_UNSUPPORTED_PARAMETER_VALUE:
+		return "LV1_UNSUPPORTED_PARAMETER_VALUE (-15)";
+	case LV1_CONDITION_NOT_SATISFIED:
+		return "LV1_CONDITION_NOT_SATISFIED (-16)";
+	case LV1_ILLEGAL_PARAMETER_VALUE:
+		return "LV1_ILLEGAL_PARAMETER_VALUE (-17)";
+	case LV1_BAD_OPTION:
+		return "LV1_BAD_OPTION (-18)";
+	case LV1_IMPLEMENTATION_LIMITATION:
+		return "LV1_IMPLEMENTATION_LIMITATION (-19)";
+	case LV1_NOT_IMPLEMENTED:
+		return "LV1_NOT_IMPLEMENTED (-20)";
+	case LV1_INVALID_CLASS_ID:
+		return "LV1_INVALID_CLASS_ID (-21)";
+	case LV1_CONSTRAINT_NOT_SATISFIED:
+		return "LV1_CONSTRAINT_NOT_SATISFIED (-22)";
+	case LV1_ALIGNMENT_ERROR:
+		return "LV1_ALIGNMENT_ERROR (-23)";
+	case LV1_INTERNAL_ERROR:
+		return "LV1_INTERNAL_ERROR (-32768)";
+	default:
+		BUG();
+		return "** unknown result **";
+	};
+#else
+	return "";
+#endif
+}
+
+/* repository bus info */
+
+enum ps3_bus_type {
+	PS3_BUS_TYPE_SB = 4,
+	PS3_BUS_TYPE_STORAGE = 5,
+};
+
+enum ps3_dev_type {
+	PS3_DEV_TYPE_SB_GELIC = 3,
+	PS3_DEV_TYPE_SB_USB = 4,
+	PS3_DEV_TYPE_SB_GPIO = 6,
+};
+
+int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
+	u64 *value);
+int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id);
+int ps3_repository_read_bus_type(unsigned int bus_index,
+	enum ps3_bus_type *bus_type);
+int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+	unsigned int *num_dev);
+
+/* repository bus device info */
+
+enum ps3_interrupt_type {
+	PS3_INTERRUPT_TYPE_EVENT_PORT = 2,
+	PS3_INTERRUPT_TYPE_SB_OHCI = 3,
+	PS3_INTERRUPT_TYPE_SB_EHCI = 4,
+	PS3_INTERRUPT_TYPE_OTHER = 5,
+};
+
+enum ps3_region_type {
+	PS3_REGION_TYPE_SB_OHCI = 3,
+	PS3_REGION_TYPE_SB_EHCI = 4,
+	PS3_REGION_TYPE_SB_GPIO = 5,
+};
+
+int ps3_repository_read_dev_str(unsigned int bus_index,
+	unsigned int dev_index, const char *dev_str, u64 *value);
+int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
+	unsigned int *dev_id);
+int ps3_repository_read_dev_type(unsigned int bus_index,
+	unsigned int dev_index, enum ps3_dev_type *dev_type);
+int ps3_repository_read_dev_intr(unsigned int bus_index,
+	unsigned int dev_index, unsigned int intr_index,
+	enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id);
+int ps3_repository_read_dev_reg_type(unsigned int bus_index,
+	unsigned int dev_index, unsigned int reg_index,
+	enum ps3_region_type *reg_type);
+int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
+	unsigned int dev_index, unsigned int reg_index, u64 *bus_addr,
+	u64 *len);
+int ps3_repository_read_dev_reg(unsigned int bus_index,
+	unsigned int dev_index, unsigned int reg_index,
+	enum ps3_region_type *reg_type, u64 *bus_addr, u64 *len);
+
+/* repository bus enumerators */
+
+struct ps3_repository_device {
+	unsigned int bus_index;
+	unsigned int dev_index;
+	struct ps3_device_id did;
+};
+
+int ps3_repository_find_device(enum ps3_bus_type bus_type,
+	enum ps3_dev_type dev_type,
+	const struct ps3_repository_device *start_dev,
+	struct ps3_repository_device *dev);
+static inline int ps3_repository_find_first_device(
+	enum ps3_bus_type bus_type, enum ps3_dev_type dev_type,
+	struct ps3_repository_device *dev)
+{
+	return ps3_repository_find_device(bus_type, dev_type, NULL, dev);
+}
+int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
+	enum ps3_interrupt_type intr_type, unsigned int *interrupt_id);
+int ps3_repository_find_region(const struct ps3_repository_device *dev,
+	enum ps3_region_type reg_type, u64 *bus_addr, u64 *len);
+
+/* repository block device info */
+
+int ps3_repository_read_dev_port(unsigned int bus_index,
+	unsigned int dev_index, u64 *port);
+int ps3_repository_read_dev_blk_size(unsigned int bus_index,
+	unsigned int dev_index, u64 *blk_size);
+int ps3_repository_read_dev_num_blocks(unsigned int bus_index,
+	unsigned int dev_index, u64 *num_blocks);
+int ps3_repository_read_dev_num_regions(unsigned int bus_index,
+	unsigned int dev_index, unsigned int *num_regions);
+int ps3_repository_read_dev_region_id(unsigned int bus_index,
+	unsigned int dev_index, unsigned int region_index,
+	unsigned int *region_id);
+int ps3_repository_read_dev_region_size(unsigned int bus_index,
+	unsigned int dev_index,	unsigned int region_index, u64 *region_size);
+int ps3_repository_read_dev_region_start(unsigned int bus_index,
+	unsigned int dev_index, unsigned int region_index, u64 *region_start);
+
+/* repository pu and memory info */
+
+int ps3_repository_read_num_pu(unsigned int *num_pu);
+int ps3_repository_read_ppe_id(unsigned int *pu_index, unsigned int *ppe_id);
+int ps3_repository_read_rm_base(unsigned int ppe_id, u64 *rm_base);
+int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size);
+int ps3_repository_read_region_total(u64 *region_total);
+int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size,
+	u64 *region_total);
+
+/* repository pme info */
+
+int ps3_repository_read_num_be(unsigned int *num_be);
+int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id);
+int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq);
+int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq);
+
+/* repository 'Other OS' area */
+
+int ps3_repository_read_boot_dat_addr(u64 *lpar_addr);
+int ps3_repository_read_boot_dat_size(unsigned int *size);
+int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size);
+
+/* repository spu info */
+
+/**
+ * enum spu_resource_type - Type of spu resource.
+ * @spu_resource_type_shared: Logical spu is shared with other partions.
+ * @spu_resource_type_exclusive: Logical spu is not shared with other partions.
+ *
+ * Returned by ps3_repository_read_spu_resource_id().
+ */
+
+enum ps3_spu_resource_type {
+	PS3_SPU_RESOURCE_TYPE_SHARED = 0,
+	PS3_SPU_RESOURCE_TYPE_EXCLUSIVE = 0x8000000000000000UL,
+};
+
+int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved);
+int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id);
+int ps3_repository_read_spu_resource_id(unsigned int res_index,
+	enum ps3_spu_resource_type* resource_type, unsigned int *resource_id);
+
+
+/* system bus routines */
+
+enum ps3_match_id {
+	PS3_MATCH_ID_EHCI = 1,
+	PS3_MATCH_ID_OHCI,
+	PS3_MATCH_ID_GELIC,
+	PS3_MATCH_ID_AV_SETTINGS,
+	PS3_MATCH_ID_SYSTEM_MANAGER,
+};
+
+/**
+ * struct ps3_system_bus_device - a device on the system bus
+ */
+
+struct ps3_system_bus_device {
+	enum ps3_match_id match_id;
+	struct ps3_device_id did;
+	unsigned int interrupt_id;
+/*	struct iommu_table *iommu_table; -- waiting for Ben's cleanups */
+	struct ps3_dma_region *d_region;
+	struct ps3_mmio_region *m_region;
+	struct device core;
+};
+
+/**
+ * struct ps3_system_bus_driver - a driver for a device on the system bus
+ */
+
+struct ps3_system_bus_driver {
+	enum ps3_match_id match_id;
+	struct device_driver core;
+	int (*probe)(struct ps3_system_bus_device *);
+	int (*remove)(struct ps3_system_bus_device *);
+/*	int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
+/*	int (*resume)(struct ps3_system_bus_device *); */
+};
+
+int ps3_system_bus_device_register(struct ps3_system_bus_device *dev);
+int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv);
+void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv);
+static inline struct ps3_system_bus_driver *to_ps3_system_bus_driver(
+	struct device_driver *_drv)
+{
+	return container_of(_drv, struct ps3_system_bus_driver, core);
+}
+static inline struct ps3_system_bus_device *to_ps3_system_bus_device(
+	struct device *_dev)
+{
+	return container_of(_dev, struct ps3_system_bus_device, core);
+}
+
+/**
+ * ps3_system_bus_set_drvdata -
+ * @dev: device structure
+ * @data: Data to set
+ */
+
+static inline void ps3_system_bus_set_driver_data(
+	struct ps3_system_bus_device *dev, void *data)
+{
+	dev->core.driver_data = data;
+}
+static inline void *ps3_system_bus_get_driver_data(
+	struct ps3_system_bus_device *dev)
+{
+	return dev->core.driver_data;
+}
+
+/* These two need global scope for get_dma_ops(). */
+
+extern struct bus_type ps3_system_bus_type;
+
+#endif
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index d34f9e1..5a0c136 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -54,8 +54,6 @@
 	rtas_arg_t *rets;     /* Pointer to return values in args[]. */
 };  
 
-extern struct rtas_args rtas_stop_self_args;
-
 struct rtas_t {
 	unsigned long entry;		/* physical address pointer */
 	unsigned long base;		/* physical address pointer */
diff --git a/include/asm-powerpc/setup.h b/include/asm-powerpc/setup.h
index 3d9740a..817fac0 100644
--- a/include/asm-powerpc/setup.h
+++ b/include/asm-powerpc/setup.h
@@ -1,9 +1,6 @@
 #ifndef _ASM_POWERPC_SETUP_H
 #define _ASM_POWERPC_SETUP_H
 
-#ifdef __KERNEL__
-
 #define COMMAND_LINE_SIZE	512
 
-#endif	/* __KERNEL__ */
 #endif	/* _ASM_POWERPC_SETUP_H */
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
index 38b1ea3..48ad807 100644
--- a/include/asm-powerpc/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -9,8 +9,14 @@
  * MAX_PHYSMEM_BITS		2^N: how much memory we can have in that space
  */
 #define SECTION_SIZE_BITS       24
+
+#if defined(CONFIG_PS3_USE_LPAR_ADDR)
+#define MAX_PHYSADDR_BITS       47
+#define MAX_PHYSMEM_BITS        47
+#else
 #define MAX_PHYSADDR_BITS       44
 #define MAX_PHYSMEM_BITS        44
+#endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 extern void create_section_mapping(unsigned long start, unsigned long end);
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index e73ea00..fdad426 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -111,14 +111,12 @@
 	u8 *local_store;
 	unsigned long problem_phys;
 	struct spu_problem __iomem *problem;
-	struct spu_priv1 __iomem *priv1;
 	struct spu_priv2 __iomem *priv2;
 	struct list_head list;
 	struct list_head sched_list;
+	struct list_head full_list;
 	int number;
-	int nid;
 	unsigned int irqs[3];
-	u32 isrc;
 	u32 node;
 	u64 flags;
 	u64 dar;
@@ -144,6 +142,7 @@
 	char irq_c1[8];
 	char irq_c2[8];
 
+	void* pdata; /* platform private data */
 	struct sys_device sysdev;
 };
 
@@ -170,6 +169,13 @@
 	struct module *owner;
 } spufs_calls;
 
+/* coredump calls implemented in spufs */
+struct spu_coredump_calls {
+	asmlinkage int (*arch_notes_size)(void);
+	asmlinkage void (*arch_write_notes)(struct file *file);
+	struct module *owner;
+};
+
 /* return status from spu_run, same as in libspe */
 #define SPE_EVENT_DMA_ALIGNMENT		0x0008	/*A DMA alignment error */
 #define SPE_EVENT_SPE_ERROR		0x0010	/*An illegal instruction error*/
@@ -182,8 +188,10 @@
  */
 #define SPU_CREATE_EVENTS_ENABLED	0x0001
 #define SPU_CREATE_GANG			0x0002
+#define SPU_CREATE_NOSCHED		0x0004
+#define SPU_CREATE_ISOLATE		0x0008
 
-#define SPU_CREATE_FLAG_ALL		0x0003 /* mask of all valid flags */
+#define SPU_CREATE_FLAG_ALL		0x000f /* mask of all valid flags */
 
 
 #ifdef CONFIG_SPU_FS_MODULE
@@ -199,6 +207,15 @@
 }
 #endif /* MODULE */
 
+int register_arch_coredump_calls(struct spu_coredump_calls *calls);
+void unregister_arch_coredump_calls(struct spu_coredump_calls *calls);
+
+int spu_add_sysdev_attr(struct sysdev_attribute *attr);
+void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
+
+int spu_add_sysdev_attr_group(struct attribute_group *attrs);
+void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
+
 
 /*
  * Notifier blocks:
@@ -277,6 +294,7 @@
 	u32 spu_runcntl_RW;					/* 0x401c */
 #define SPU_RUNCNTL_STOP	0L
 #define SPU_RUNCNTL_RUNNABLE	1L
+#define SPU_RUNCNTL_ISOLATE	2L
 	u8  pad_0x4020_0x4024[0x4];				/* 0x4020 */
 	u32 spu_status_R;					/* 0x4024 */
 #define SPU_STOP_STATUS_SHIFT           16
@@ -289,8 +307,8 @@
 #define SPU_STATUS_INVALID_INSTR        0x20
 #define SPU_STATUS_INVALID_CH           0x40
 #define SPU_STATUS_ISOLATED_STATE       0x80
-#define SPU_STATUS_ISOLATED_LOAD_STAUTUS 0x200
-#define SPU_STATUS_ISOLATED_EXIT_STAUTUS 0x400
+#define SPU_STATUS_ISOLATED_LOAD_STATUS 0x200
+#define SPU_STATUS_ISOLATED_EXIT_STATUS 0x400
 	u8  pad_0x4028_0x402c[0x4];				/* 0x4028 */
 	u32 spu_spe_R;						/* 0x402c */
 	u8  pad_0x4030_0x4034[0x4];				/* 0x4030 */
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
index 964c2d3..bdbf906 100644
--- a/include/asm-powerpc/spu_csa.h
+++ b/include/asm-powerpc/spu_csa.h
@@ -151,7 +151,6 @@
 	u64 mfc_fir_chkstp_enable_RW;
 	u64 smf_sbi_signal_sel;
 	u64 smf_ato_signal_sel;
-	u64 mfc_sdr_RW;
 	u64 tlb_index_hint_RO;
 	u64 tlb_index_W;
 	u64 tlb_vpn_RW;
diff --git a/include/asm-powerpc/spu_info.h b/include/asm-powerpc/spu_info.h
new file mode 100644
index 0000000..3545efb
--- /dev/null
+++ b/include/asm-powerpc/spu_info.h
@@ -0,0 +1,54 @@
+/*
+ * SPU info structures
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_INFO_H
+#define _SPU_INFO_H
+
+#ifdef __KERNEL__
+#include <asm/spu.h>
+#include <linux/types.h>
+#else
+struct mfc_cq_sr {
+	__u64 mfc_cq_data0_RW;
+	__u64 mfc_cq_data1_RW;
+	__u64 mfc_cq_data2_RW;
+	__u64 mfc_cq_data3_RW;
+};
+#endif /* __KERNEL__ */
+
+struct spu_dma_info {
+	__u64 dma_info_type;
+	__u64 dma_info_mask;
+	__u64 dma_info_status;
+	__u64 dma_info_stall_and_notify;
+	__u64 dma_info_atomic_command_status;
+	struct mfc_cq_sr dma_info_command_data[16];
+};
+
+struct spu_proxydma_info {
+	__u64 proxydma_info_type;
+	__u64 proxydma_info_mask;
+	__u64 proxydma_info_status;
+	struct mfc_cq_sr proxydma_info_command_data[8];
+};
+
+#endif
diff --git a/include/asm-powerpc/spu_priv1.h b/include/asm-powerpc/spu_priv1.h
index 300c458..69dcb0c 100644
--- a/include/asm-powerpc/spu_priv1.h
+++ b/include/asm-powerpc/spu_priv1.h
@@ -21,12 +21,13 @@
 #define _SPU_PRIV1_H
 #if defined(__KERNEL__)
 
+#include <linux/types.h>
+
 struct spu;
 
 /* access to priv1 registers */
 
-struct spu_priv1_ops
-{
+struct spu_priv1_ops {
 	void (*int_mask_and) (struct spu *spu, int class, u64 mask);
 	void (*int_mask_or) (struct spu *spu, int class, u64 mask);
 	void (*int_mask_set) (struct spu *spu, int class, u64 mask);
@@ -37,7 +38,7 @@
 	u64 (*mfc_dar_get) (struct spu *spu);
 	u64 (*mfc_dsisr_get) (struct spu *spu);
 	void (*mfc_dsisr_set) (struct spu *spu, u64 dsisr);
-	void (*mfc_sdr_set) (struct spu *spu, u64 sdr);
+	void (*mfc_sdr_setup) (struct spu *spu);
 	void (*mfc_sr1_set) (struct spu *spu, u64 sr1);
 	u64 (*mfc_sr1_get) (struct spu *spu);
 	void (*mfc_tclass_id_set) (struct spu *spu, u64 tclass_id);
@@ -112,9 +113,9 @@
 }
 
 static inline void
-spu_mfc_sdr_set (struct spu *spu, u64 sdr)
+spu_mfc_sdr_setup (struct spu *spu)
 {
-	spu_priv1_ops->mfc_sdr_set(spu, sdr);
+	spu_priv1_ops->mfc_sdr_setup(spu);
 }
 
 static inline void
@@ -171,12 +172,41 @@
 	return spu_priv1_ops->resource_allocation_enable_get(spu);
 }
 
-/* The declarations folowing are put here for convenience
- * and only intended to be used by the platform setup code
- * for initializing spu_priv1_ops.
+/* spu management abstraction */
+
+struct spu_management_ops {
+	int (*enumerate_spus)(int (*fn)(void *data));
+	int (*create_spu)(struct spu *spu, void *data);
+	int (*destroy_spu)(struct spu *spu);
+};
+
+extern const struct spu_management_ops* spu_management_ops;
+
+static inline int
+spu_enumerate_spus (int (*fn)(void *data))
+{
+	return spu_management_ops->enumerate_spus(fn);
+}
+
+static inline int
+spu_create_spu (struct spu *spu, void *data)
+{
+	return spu_management_ops->create_spu(spu, data);
+}
+
+static inline int
+spu_destroy_spu (struct spu *spu)
+{
+	return spu_management_ops->destroy_spu(spu);
+}
+
+/*
+ * The declarations folowing are put here for convenience
+ * and only intended to be used by the platform setup code.
  */
 
 extern const struct spu_priv1_ops spu_priv1_mmio_ops;
+extern const struct spu_management_ops spu_management_of_ops;
 
 #endif /* __KERNEL__ */
 #endif
diff --git a/include/asm-powerpc/todc.h b/include/asm-powerpc/todc.h
deleted file mode 100644
index 60a8c39..0000000
--- a/include/asm-powerpc/todc.h
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Definitions for the M48Txx and mc146818 series of Time of day/Real Time
- * Clock chips.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001 (c) MontaVista, Software, Inc.  This file is licensed under
- * the terms of the GNU General Public License version 2.  This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/*
- * Support for the M48T37/M48T59/.../mc146818 Real Time Clock chips.
- * Purpose is to make one generic file that handles all of these chips instead
- * of every platform implementing the same code over & over again.
- */
-
-#ifndef __PPC_KERNEL_TODC_H
-#define __PPC_KERNEL_TODC_H
-
-typedef struct {
-	uint rtc_type;		/* your particular chip */
-
-	/*
-	 * Following are the addresses of the AS0, AS1, and DATA registers
-	 * of these chips.  Note that these are board-specific.
-	 */
-	unsigned int nvram_as0;
-	unsigned int nvram_as1;
-	unsigned int nvram_data;
-
-	/*
-	 * Define bits to stop external set of regs from changing so
-	 * the chip can be read/written reliably.
-	 */
-	unsigned char enable_read;
-	unsigned char enable_write;
-
-	/*
-	 * Following is the number of AS0 address bits.  This is normally
-	 * 8 but some bad hardware routes address lines incorrectly.
-	 */
-	int as0_bits;
-
-	int nvram_size;	/* Size of NVRAM on chip */
-	int sw_flags;	/* Software control flags */
-
-	/* Following are the register offsets for the particular chip */
-	int year;
-	int month;
-	int day_of_month;
-	int day_of_week;
-	int hours;
-	int minutes;
-	int seconds;
-	int control_b;
-	int control_a;
-	int watchdog;
-	int interrupts;
-	int alarm_date;
-	int alarm_hour;
-	int alarm_minutes;
-	int alarm_seconds;
-	int century;
-	int flags;
-
-	/*
-	 * Some RTC chips have their NVRAM buried behind a addr/data pair of
-	 * regs on the first level/clock registers.  The following fields
-	 * are the addresses for those addr/data regs.
-	 */
-	int nvram_addr_reg;
-	int nvram_data_reg;
-} todc_info_t;
-
-/*
- * Define the types of TODC/RTC variants that are supported in
- * arch/ppc/kernel/todc_time.c
- * Make a new one of these for any chip somehow differs from what's already
- * defined.  That way, if you ever need to put in code to touch those
- * bits/registers in todc_time.c, you can put it inside an
- * 'if (todc_info->rtc_type == TODC_TYPE_XXX)' so you won't break
- * anyone else.
- */
-#define TODC_TYPE_MK48T35		1
-#define TODC_TYPE_MK48T37		2
-#define TODC_TYPE_MK48T59		3
-#define TODC_TYPE_DS1693		4	/* Dallas DS1693 RTC */
-#define TODC_TYPE_DS1743		5	/* Dallas DS1743 RTC */
-#define TODC_TYPE_DS1746		6	/* Dallas DS1746 RTC */
-#define TODC_TYPE_DS1747		7	/* Dallas DS1747 RTC */
-#define TODC_TYPE_DS1501		8	/* Dallas DS1501 RTC */
-#define TODC_TYPE_DS1643		9	/* Dallas DS1643 RTC */
-#define TODC_TYPE_PC97307		10	/* PC97307 internal RTC */
-#define TODC_TYPE_DS1557		11	/* Dallas DS1557 RTC */
-#define TODC_TYPE_DS17285		12	/* Dallas DS17285 RTC */
-#define TODC_TYPE_DS1553		13	/* Dallas DS1553 RTC */
-#define TODC_TYPE_MC146818		100	/* Leave room for m48txx's */
-
-/*
- * Bit to clear/set to enable reads/writes to the chip
- */
-#define TODC_MK48TXX_CNTL_A_R		0x40
-#define TODC_MK48TXX_CNTL_A_W		0x80
-#define TODC_MK48TXX_DAY_CB		0x80
-
-#define TODC_DS1501_CNTL_B_TE		0x80
-
-/*
- * Define flag bits used by todc routines.
- */
-#define TODC_FLAG_2_LEVEL_NVRAM		0x00000001
-
-/*
- * Define the values for the various RTC's that should to into the todc_info
- * table.
- * Note: The XXX_NVRAM_SIZE, XXX_NVRAM_ADDR_REG, and XXX_NVRAM_DATA_REG only
- * matter if XXX_SW_FLAGS has TODC_FLAG_2_LEVEL_NVRAM set.
- */
-#define TODC_TYPE_MK48T35_NVRAM_SIZE		0x7ff8
-#define TODC_TYPE_MK48T35_SW_FLAGS		0
-#define TODC_TYPE_MK48T35_YEAR			0x7fff
-#define TODC_TYPE_MK48T35_MONTH			0x7ffe
-#define TODC_TYPE_MK48T35_DOM			0x7ffd	/* Day of Month */
-#define TODC_TYPE_MK48T35_DOW			0x7ffc	/* Day of Week */
-#define TODC_TYPE_MK48T35_HOURS			0x7ffb
-#define TODC_TYPE_MK48T35_MINUTES		0x7ffa
-#define TODC_TYPE_MK48T35_SECONDS		0x7ff9
-#define TODC_TYPE_MK48T35_CNTL_B		0x7ff9
-#define TODC_TYPE_MK48T35_CNTL_A		0x7ff8
-#define TODC_TYPE_MK48T35_WATCHDOG		0x0000
-#define TODC_TYPE_MK48T35_INTERRUPTS		0x0000
-#define TODC_TYPE_MK48T35_ALARM_DATE		0x0000
-#define TODC_TYPE_MK48T35_ALARM_HOUR		0x0000
-#define TODC_TYPE_MK48T35_ALARM_MINUTES		0x0000
-#define TODC_TYPE_MK48T35_ALARM_SECONDS		0x0000
-#define TODC_TYPE_MK48T35_CENTURY		0x0000
-#define TODC_TYPE_MK48T35_FLAGS			0x0000
-#define TODC_TYPE_MK48T35_NVRAM_ADDR_REG	0
-#define TODC_TYPE_MK48T35_NVRAM_DATA_REG	0
-
-#define TODC_TYPE_MK48T37_NVRAM_SIZE		0x7ff0
-#define TODC_TYPE_MK48T37_SW_FLAGS		0
-#define TODC_TYPE_MK48T37_YEAR			0x7fff
-#define TODC_TYPE_MK48T37_MONTH			0x7ffe
-#define TODC_TYPE_MK48T37_DOM			0x7ffd	/* Day of Month */
-#define TODC_TYPE_MK48T37_DOW			0x7ffc	/* Day of Week */
-#define TODC_TYPE_MK48T37_HOURS			0x7ffb
-#define TODC_TYPE_MK48T37_MINUTES		0x7ffa
-#define TODC_TYPE_MK48T37_SECONDS		0x7ff9
-#define TODC_TYPE_MK48T37_CNTL_B		0x7ff9
-#define TODC_TYPE_MK48T37_CNTL_A		0x7ff8
-#define TODC_TYPE_MK48T37_WATCHDOG		0x7ff7
-#define TODC_TYPE_MK48T37_INTERRUPTS		0x7ff6
-#define TODC_TYPE_MK48T37_ALARM_DATE		0x7ff5
-#define TODC_TYPE_MK48T37_ALARM_HOUR		0x7ff4
-#define TODC_TYPE_MK48T37_ALARM_MINUTES		0x7ff3
-#define TODC_TYPE_MK48T37_ALARM_SECONDS		0x7ff2
-#define TODC_TYPE_MK48T37_CENTURY		0x7ff1
-#define TODC_TYPE_MK48T37_FLAGS			0x7ff0
-#define TODC_TYPE_MK48T37_NVRAM_ADDR_REG	0
-#define TODC_TYPE_MK48T37_NVRAM_DATA_REG	0
-
-#define TODC_TYPE_MK48T59_NVRAM_SIZE		0x1ff0
-#define TODC_TYPE_MK48T59_SW_FLAGS		0
-#define TODC_TYPE_MK48T59_YEAR			0x1fff
-#define TODC_TYPE_MK48T59_MONTH			0x1ffe
-#define TODC_TYPE_MK48T59_DOM			0x1ffd	/* Day of Month */
-#define TODC_TYPE_MK48T59_DOW			0x1ffc	/* Day of Week */
-#define TODC_TYPE_MK48T59_HOURS			0x1ffb
-#define TODC_TYPE_MK48T59_MINUTES		0x1ffa
-#define TODC_TYPE_MK48T59_SECONDS		0x1ff9
-#define TODC_TYPE_MK48T59_CNTL_B		0x1ff9
-#define TODC_TYPE_MK48T59_CNTL_A		0x1ff8
-#define TODC_TYPE_MK48T59_WATCHDOG		0x1fff
-#define TODC_TYPE_MK48T59_INTERRUPTS		0x1fff
-#define TODC_TYPE_MK48T59_ALARM_DATE		0x1fff
-#define TODC_TYPE_MK48T59_ALARM_HOUR		0x1fff
-#define TODC_TYPE_MK48T59_ALARM_MINUTES		0x1fff
-#define TODC_TYPE_MK48T59_ALARM_SECONDS		0x1fff
-#define TODC_TYPE_MK48T59_CENTURY		0x1fff
-#define TODC_TYPE_MK48T59_FLAGS			0x1fff
-#define TODC_TYPE_MK48T59_NVRAM_ADDR_REG	0
-#define TODC_TYPE_MK48T59_NVRAM_DATA_REG	0
-
-#define TODC_TYPE_DS1501_NVRAM_SIZE	0x100
-#define TODC_TYPE_DS1501_SW_FLAGS	TODC_FLAG_2_LEVEL_NVRAM
-#define TODC_TYPE_DS1501_YEAR		(TODC_TYPE_DS1501_NVRAM_SIZE + 0x06)
-#define TODC_TYPE_DS1501_MONTH		(TODC_TYPE_DS1501_NVRAM_SIZE + 0x05)
-#define TODC_TYPE_DS1501_DOM		(TODC_TYPE_DS1501_NVRAM_SIZE + 0x04)
-#define TODC_TYPE_DS1501_DOW		(TODC_TYPE_DS1501_NVRAM_SIZE + 0x03)
-#define TODC_TYPE_DS1501_HOURS		(TODC_TYPE_DS1501_NVRAM_SIZE + 0x02)
-#define TODC_TYPE_DS1501_MINUTES	(TODC_TYPE_DS1501_NVRAM_SIZE + 0x01)
-#define TODC_TYPE_DS1501_SECONDS	(TODC_TYPE_DS1501_NVRAM_SIZE + 0x00)
-#define TODC_TYPE_DS1501_CNTL_B		(TODC_TYPE_DS1501_NVRAM_SIZE + 0x0f)
-#define TODC_TYPE_DS1501_CNTL_A		(TODC_TYPE_DS1501_NVRAM_SIZE + 0x0f)
-#define TODC_TYPE_DS1501_WATCHDOG	(TODC_TYPE_DS1501_NVRAM_SIZE + 0xff)
-#define TODC_TYPE_DS1501_INTERRUPTS	(TODC_TYPE_DS1501_NVRAM_SIZE + 0xff)
-#define TODC_TYPE_DS1501_ALARM_DATE	(TODC_TYPE_DS1501_NVRAM_SIZE + 0x0b)
-#define TODC_TYPE_DS1501_ALARM_HOUR	(TODC_TYPE_DS1501_NVRAM_SIZE + 0x0a)
-#define TODC_TYPE_DS1501_ALARM_MINUTES	(TODC_TYPE_DS1501_NVRAM_SIZE + 0x09)
-#define TODC_TYPE_DS1501_ALARM_SECONDS	(TODC_TYPE_DS1501_NVRAM_SIZE + 0x08)
-#define TODC_TYPE_DS1501_CENTURY	(TODC_TYPE_DS1501_NVRAM_SIZE + 0x07)
-#define TODC_TYPE_DS1501_FLAGS		(TODC_TYPE_DS1501_NVRAM_SIZE + 0xff)
-#define TODC_TYPE_DS1501_NVRAM_ADDR_REG	0x10
-#define TODC_TYPE_DS1501_NVRAM_DATA_REG	0x13
-
-#define TODC_TYPE_DS1553_NVRAM_SIZE		0x1ff0
-#define TODC_TYPE_DS1553_SW_FLAGS		0
-#define TODC_TYPE_DS1553_YEAR			0x1fff
-#define TODC_TYPE_DS1553_MONTH			0x1ffe
-#define TODC_TYPE_DS1553_DOM			0x1ffd	/* Day of Month */
-#define TODC_TYPE_DS1553_DOW			0x1ffc	/* Day of Week */
-#define TODC_TYPE_DS1553_HOURS			0x1ffb
-#define TODC_TYPE_DS1553_MINUTES		0x1ffa
-#define TODC_TYPE_DS1553_SECONDS		0x1ff9
-#define TODC_TYPE_DS1553_CNTL_B			0x1ff9
-#define TODC_TYPE_DS1553_CNTL_A			0x1ff8	/* control_a R/W regs */
-#define TODC_TYPE_DS1553_WATCHDOG		0x1ff7
-#define TODC_TYPE_DS1553_INTERRUPTS		0x1ff6
-#define TODC_TYPE_DS1553_ALARM_DATE		0x1ff5
-#define TODC_TYPE_DS1553_ALARM_HOUR		0x1ff4
-#define TODC_TYPE_DS1553_ALARM_MINUTES		0x1ff3
-#define TODC_TYPE_DS1553_ALARM_SECONDS		0x1ff2
-#define TODC_TYPE_DS1553_CENTURY		0x1ff8
-#define TODC_TYPE_DS1553_FLAGS			0x1ff0
-#define TODC_TYPE_DS1553_NVRAM_ADDR_REG		0
-#define TODC_TYPE_DS1553_NVRAM_DATA_REG		0
-
-#define TODC_TYPE_DS1557_NVRAM_SIZE		0x7fff0
-#define TODC_TYPE_DS1557_SW_FLAGS		0
-#define TODC_TYPE_DS1557_YEAR			0x7ffff
-#define TODC_TYPE_DS1557_MONTH			0x7fffe
-#define TODC_TYPE_DS1557_DOM			0x7fffd	/* Day of Month */
-#define TODC_TYPE_DS1557_DOW			0x7fffc	/* Day of Week */
-#define TODC_TYPE_DS1557_HOURS			0x7fffb
-#define TODC_TYPE_DS1557_MINUTES		0x7fffa
-#define TODC_TYPE_DS1557_SECONDS		0x7fff9
-#define TODC_TYPE_DS1557_CNTL_B			0x7fff9
-#define TODC_TYPE_DS1557_CNTL_A			0x7fff8	/* control_a R/W regs */
-#define TODC_TYPE_DS1557_WATCHDOG		0x7fff7
-#define TODC_TYPE_DS1557_INTERRUPTS		0x7fff6
-#define TODC_TYPE_DS1557_ALARM_DATE		0x7fff5
-#define TODC_TYPE_DS1557_ALARM_HOUR		0x7fff4
-#define TODC_TYPE_DS1557_ALARM_MINUTES		0x7fff3
-#define TODC_TYPE_DS1557_ALARM_SECONDS		0x7fff2
-#define TODC_TYPE_DS1557_CENTURY		0x7fff8
-#define TODC_TYPE_DS1557_FLAGS			0x7fff0
-#define TODC_TYPE_DS1557_NVRAM_ADDR_REG		0
-#define TODC_TYPE_DS1557_NVRAM_DATA_REG		0
-
-#define TODC_TYPE_DS1643_NVRAM_SIZE		0x1ff8
-#define TODC_TYPE_DS1643_SW_FLAGS		0
-#define TODC_TYPE_DS1643_YEAR			0x1fff
-#define TODC_TYPE_DS1643_MONTH			0x1ffe
-#define TODC_TYPE_DS1643_DOM			0x1ffd	/* Day of Month */
-#define TODC_TYPE_DS1643_DOW			0x1ffc	/* Day of Week */
-#define TODC_TYPE_DS1643_HOURS			0x1ffb
-#define TODC_TYPE_DS1643_MINUTES		0x1ffa
-#define TODC_TYPE_DS1643_SECONDS		0x1ff9
-#define TODC_TYPE_DS1643_CNTL_B			0x1ff9
-#define TODC_TYPE_DS1643_CNTL_A			0x1ff8	/* control_a R/W regs */
-#define TODC_TYPE_DS1643_WATCHDOG		0x1fff
-#define TODC_TYPE_DS1643_INTERRUPTS		0x1fff
-#define TODC_TYPE_DS1643_ALARM_DATE		0x1fff
-#define TODC_TYPE_DS1643_ALARM_HOUR		0x1fff
-#define TODC_TYPE_DS1643_ALARM_MINUTES		0x1fff
-#define TODC_TYPE_DS1643_ALARM_SECONDS		0x1fff
-#define TODC_TYPE_DS1643_CENTURY		0x1ff8
-#define TODC_TYPE_DS1643_FLAGS			0x1fff
-#define TODC_TYPE_DS1643_NVRAM_ADDR_REG		0
-#define TODC_TYPE_DS1643_NVRAM_DATA_REG		0
-
-#define TODC_TYPE_DS1693_NVRAM_SIZE		0 /* Not handled yet */
-#define TODC_TYPE_DS1693_SW_FLAGS		0
-#define TODC_TYPE_DS1693_YEAR			0x09
-#define TODC_TYPE_DS1693_MONTH			0x08
-#define TODC_TYPE_DS1693_DOM			0x07	/* Day of Month */
-#define TODC_TYPE_DS1693_DOW			0x06	/* Day of Week */
-#define TODC_TYPE_DS1693_HOURS			0x04
-#define TODC_TYPE_DS1693_MINUTES		0x02
-#define TODC_TYPE_DS1693_SECONDS		0x00
-#define TODC_TYPE_DS1693_CNTL_B			0x0b
-#define TODC_TYPE_DS1693_CNTL_A			0x0a
-#define TODC_TYPE_DS1693_WATCHDOG		0xff
-#define TODC_TYPE_DS1693_INTERRUPTS		0xff
-#define TODC_TYPE_DS1693_ALARM_DATE		0x49
-#define TODC_TYPE_DS1693_ALARM_HOUR		0x05
-#define TODC_TYPE_DS1693_ALARM_MINUTES		0x03
-#define TODC_TYPE_DS1693_ALARM_SECONDS		0x01
-#define TODC_TYPE_DS1693_CENTURY		0x48
-#define TODC_TYPE_DS1693_FLAGS			0xff
-#define TODC_TYPE_DS1693_NVRAM_ADDR_REG		0
-#define TODC_TYPE_DS1693_NVRAM_DATA_REG		0
-
-#define TODC_TYPE_DS1743_NVRAM_SIZE		0x1ff8
-#define TODC_TYPE_DS1743_SW_FLAGS		0
-#define TODC_TYPE_DS1743_YEAR			0x1fff
-#define TODC_TYPE_DS1743_MONTH			0x1ffe
-#define TODC_TYPE_DS1743_DOM			0x1ffd	/* Day of Month */
-#define TODC_TYPE_DS1743_DOW			0x1ffc	/* Day of Week */
-#define TODC_TYPE_DS1743_HOURS			0x1ffb
-#define TODC_TYPE_DS1743_MINUTES		0x1ffa
-#define TODC_TYPE_DS1743_SECONDS		0x1ff9
-#define TODC_TYPE_DS1743_CNTL_B			0x1ff9
-#define TODC_TYPE_DS1743_CNTL_A			0x1ff8	/* control_a R/W regs */
-#define TODC_TYPE_DS1743_WATCHDOG		0x1fff
-#define TODC_TYPE_DS1743_INTERRUPTS		0x1fff
-#define TODC_TYPE_DS1743_ALARM_DATE		0x1fff
-#define TODC_TYPE_DS1743_ALARM_HOUR		0x1fff
-#define TODC_TYPE_DS1743_ALARM_MINUTES		0x1fff
-#define TODC_TYPE_DS1743_ALARM_SECONDS		0x1fff
-#define TODC_TYPE_DS1743_CENTURY		0x1ff8
-#define TODC_TYPE_DS1743_FLAGS			0x1fff
-#define TODC_TYPE_DS1743_NVRAM_ADDR_REG		0
-#define TODC_TYPE_DS1743_NVRAM_DATA_REG		0
-
-#define TODC_TYPE_DS1746_NVRAM_SIZE		0x1fff8
-#define TODC_TYPE_DS1746_SW_FLAGS		0
-#define TODC_TYPE_DS1746_YEAR			0x1ffff
-#define TODC_TYPE_DS1746_MONTH			0x1fffe
-#define TODC_TYPE_DS1746_DOM			0x1fffd	/* Day of Month */
-#define TODC_TYPE_DS1746_DOW			0x1fffc	/* Day of Week */
-#define TODC_TYPE_DS1746_HOURS			0x1fffb
-#define TODC_TYPE_DS1746_MINUTES		0x1fffa
-#define TODC_TYPE_DS1746_SECONDS		0x1fff9
-#define TODC_TYPE_DS1746_CNTL_B			0x1fff9
-#define TODC_TYPE_DS1746_CNTL_A			0x1fff8	/* control_a R/W regs */
-#define TODC_TYPE_DS1746_WATCHDOG		0x00000
-#define TODC_TYPE_DS1746_INTERRUPTS		0x00000
-#define TODC_TYPE_DS1746_ALARM_DATE		0x00000
-#define TODC_TYPE_DS1746_ALARM_HOUR		0x00000
-#define TODC_TYPE_DS1746_ALARM_MINUTES		0x00000
-#define TODC_TYPE_DS1746_ALARM_SECONDS		0x00000
-#define TODC_TYPE_DS1746_CENTURY		0x00000
-#define TODC_TYPE_DS1746_FLAGS			0x00000
-#define TODC_TYPE_DS1746_NVRAM_ADDR_REG		0
-#define TODC_TYPE_DS1746_NVRAM_DATA_REG		0
-
-#define TODC_TYPE_DS1747_NVRAM_SIZE		0x7fff8
-#define TODC_TYPE_DS1747_SW_FLAGS		0
-#define TODC_TYPE_DS1747_YEAR			0x7ffff
-#define TODC_TYPE_DS1747_MONTH			0x7fffe
-#define TODC_TYPE_DS1747_DOM			0x7fffd	/* Day of Month */
-#define TODC_TYPE_DS1747_DOW			0x7fffc	/* Day of Week */
-#define TODC_TYPE_DS1747_HOURS			0x7fffb
-#define TODC_TYPE_DS1747_MINUTES		0x7fffa
-#define TODC_TYPE_DS1747_SECONDS		0x7fff9
-#define TODC_TYPE_DS1747_CNTL_B			0x7fff9
-#define TODC_TYPE_DS1747_CNTL_A			0x7fff8	/* control_a R/W regs */
-#define TODC_TYPE_DS1747_WATCHDOG		0x00000
-#define TODC_TYPE_DS1747_INTERRUPTS		0x00000
-#define TODC_TYPE_DS1747_ALARM_DATE		0x00000
-#define TODC_TYPE_DS1747_ALARM_HOUR		0x00000
-#define TODC_TYPE_DS1747_ALARM_MINUTES		0x00000
-#define TODC_TYPE_DS1747_ALARM_SECONDS		0x00000
-#define TODC_TYPE_DS1747_CENTURY		0x00000
-#define TODC_TYPE_DS1747_FLAGS			0x00000
-#define TODC_TYPE_DS1747_NVRAM_ADDR_REG		0
-#define TODC_TYPE_DS1747_NVRAM_DATA_REG		0
-
-#define TODC_TYPE_DS17285_NVRAM_SIZE		(0x1000-0x80) /* 4Kx8 NVRAM (minus RTC regs) */
-#define TODC_TYPE_DS17285_SW_FLAGS		TODC_FLAG_2_LEVEL_NVRAM
-#define TODC_TYPE_DS17285_SECONDS		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x00)
-#define TODC_TYPE_DS17285_ALARM_SECONDS		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x01)
-#define TODC_TYPE_DS17285_MINUTES		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x02)
-#define TODC_TYPE_DS17285_ALARM_MINUTES		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x03)
-#define TODC_TYPE_DS17285_HOURS			(TODC_TYPE_DS17285_NVRAM_SIZE + 0x04)
-#define TODC_TYPE_DS17285_ALARM_HOUR		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x05)
-#define TODC_TYPE_DS17285_DOW			(TODC_TYPE_DS17285_NVRAM_SIZE + 0x06)
-#define TODC_TYPE_DS17285_DOM			(TODC_TYPE_DS17285_NVRAM_SIZE + 0x07)
-#define TODC_TYPE_DS17285_MONTH			(TODC_TYPE_DS17285_NVRAM_SIZE + 0x08)
-#define TODC_TYPE_DS17285_YEAR			(TODC_TYPE_DS17285_NVRAM_SIZE + 0x09)
-#define TODC_TYPE_DS17285_CNTL_A		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x0A)
-#define TODC_TYPE_DS17285_CNTL_B		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x0B)
-#define TODC_TYPE_DS17285_CNTL_C		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x0C)
-#define TODC_TYPE_DS17285_CNTL_D		(TODC_TYPE_DS17285_NVRAM_SIZE + 0x0D)
-#define TODC_TYPE_DS17285_WATCHDOG		0
-#define TODC_TYPE_DS17285_INTERRUPTS		0
-#define TODC_TYPE_DS17285_ALARM_DATE		0
-#define TODC_TYPE_DS17285_CENTURY		0
-#define TODC_TYPE_DS17285_FLAGS			0
-#define TODC_TYPE_DS17285_NVRAM_ADDR_REG	0x50
-#define TODC_TYPE_DS17285_NVRAM_DATA_REG	0x53
-
-#define TODC_TYPE_MC146818_NVRAM_SIZE		0	/* XXXX */
-#define TODC_TYPE_MC146818_SW_FLAGS		0
-#define TODC_TYPE_MC146818_YEAR			0x09
-#define TODC_TYPE_MC146818_MONTH		0x08
-#define TODC_TYPE_MC146818_DOM			0x07	/* Day of Month */
-#define TODC_TYPE_MC146818_DOW			0x06	/* Day of Week */
-#define TODC_TYPE_MC146818_HOURS		0x04
-#define TODC_TYPE_MC146818_MINUTES		0x02
-#define TODC_TYPE_MC146818_SECONDS		0x00
-#define TODC_TYPE_MC146818_CNTL_B		0x0a
-#define TODC_TYPE_MC146818_CNTL_A		0x0b	/* control_a R/W regs */
-#define TODC_TYPE_MC146818_WATCHDOG		0
-#define TODC_TYPE_MC146818_INTERRUPTS		0x0c
-#define TODC_TYPE_MC146818_ALARM_DATE		0xff
-#define TODC_TYPE_MC146818_ALARM_HOUR		0x05
-#define TODC_TYPE_MC146818_ALARM_MINUTES	0x03
-#define TODC_TYPE_MC146818_ALARM_SECONDS	0x01
-#define TODC_TYPE_MC146818_CENTURY		0xff
-#define TODC_TYPE_MC146818_FLAGS		0xff
-#define TODC_TYPE_MC146818_NVRAM_ADDR_REG	0
-#define TODC_TYPE_MC146818_NVRAM_DATA_REG	0
-
-#define TODC_TYPE_PC97307_NVRAM_SIZE		0	/* No NVRAM? */
-#define TODC_TYPE_PC97307_SW_FLAGS		0
-#define TODC_TYPE_PC97307_YEAR			0x09
-#define TODC_TYPE_PC97307_MONTH			0x08
-#define TODC_TYPE_PC97307_DOM			0x07	/* Day of Month */
-#define TODC_TYPE_PC97307_DOW			0x06	/* Day of Week */
-#define TODC_TYPE_PC97307_HOURS			0x04
-#define TODC_TYPE_PC97307_MINUTES		0x02
-#define TODC_TYPE_PC97307_SECONDS		0x00
-#define TODC_TYPE_PC97307_CNTL_B		0x0a
-#define TODC_TYPE_PC97307_CNTL_A		0x0b	/* control_a R/W regs */
-#define TODC_TYPE_PC97307_WATCHDOG		0x0c
-#define TODC_TYPE_PC97307_INTERRUPTS		0x0d
-#define TODC_TYPE_PC97307_ALARM_DATE		0xff
-#define TODC_TYPE_PC97307_ALARM_HOUR		0x05
-#define TODC_TYPE_PC97307_ALARM_MINUTES		0x03
-#define TODC_TYPE_PC97307_ALARM_SECONDS		0x01
-#define TODC_TYPE_PC97307_CENTURY		0xff
-#define TODC_TYPE_PC97307_FLAGS			0xff
-#define TODC_TYPE_PC97307_NVRAM_ADDR_REG	0
-#define TODC_TYPE_PC97307_NVRAM_DATA_REG	0
-
-/*
- * Define macros to allocate and init the todc_info_t table that will
- * be used by the todc_time.c routines.
- */
-#define TODC_ALLOC()							\
-	static todc_info_t todc_info_alloc;				\
-	todc_info_t *todc_info = &todc_info_alloc;
-
-#define TODC_INIT(clock_type, as0, as1, data, bits) {			\
-	todc_info->rtc_type = clock_type;				\
-									\
-	todc_info->nvram_as0 = (unsigned int)(as0);			\
-	todc_info->nvram_as1 = (unsigned int)(as1);			\
-	todc_info->nvram_data = (unsigned int)(data);			\
-									\
-	todc_info->as0_bits = (bits);					\
-									\
-	todc_info->nvram_size = clock_type ##_NVRAM_SIZE;		\
-	todc_info->sw_flags = clock_type ##_SW_FLAGS;			\
-									\
-	todc_info->year = clock_type ##_YEAR;				\
-	todc_info->month = clock_type ##_MONTH;				\
-	todc_info->day_of_month = clock_type ##_DOM;			\
-	todc_info->day_of_week = clock_type ##_DOW;			\
-	todc_info->hours = clock_type ##_HOURS;				\
-	todc_info->minutes = clock_type ##_MINUTES;			\
-	todc_info->seconds = clock_type ##_SECONDS;			\
-	todc_info->control_b = clock_type ##_CNTL_B;			\
-	todc_info->control_a = clock_type ##_CNTL_A;			\
-	todc_info->watchdog = clock_type ##_WATCHDOG;			\
-	todc_info->interrupts = clock_type ##_INTERRUPTS;		\
-	todc_info->alarm_date = clock_type ##_ALARM_DATE;		\
-	todc_info->alarm_hour = clock_type ##_ALARM_HOUR;		\
-	todc_info->alarm_minutes = clock_type ##_ALARM_MINUTES;		\
-	todc_info->alarm_seconds = clock_type ##_ALARM_SECONDS;		\
-	todc_info->century = clock_type ##_CENTURY;			\
-	todc_info->flags = clock_type ##_FLAGS;				\
-									\
-	todc_info->nvram_addr_reg = clock_type ##_NVRAM_ADDR_REG;	\
-	todc_info->nvram_data_reg = clock_type ##_NVRAM_DATA_REG;	\
-}
-
-extern todc_info_t *todc_info;
-
-unsigned char todc_direct_read_val(int addr);
-void todc_direct_write_val(int addr, unsigned char val);
-unsigned char todc_m48txx_read_val(int addr);
-void todc_m48txx_write_val(int addr, unsigned char val);
-unsigned char todc_mc146818_read_val(int addr);
-void todc_mc146818_write_val(int addr, unsigned char val);
-
-long todc_time_init(void);
-void todc_get_rtc_time(struct rtc_time *);
-int todc_set_rtc_time(struct rtc_time *);
-void todc_calibrate_decr(void);
-
-#endif				/* __PPC_KERNEL_TODC_H */
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 9fe7894..50c0140 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -32,7 +32,14 @@
 int of_node_to_nid(struct device_node *device);
 
 struct pci_bus;
+#ifdef CONFIG_PCI
 extern int pcibus_to_node(struct pci_bus *bus);
+#else
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+	return -1;
+}
+#endif
 
 #define pcibus_to_cpumask(bus)	(pcibus_to_node(bus) == -1 ? \
 					CPU_MASK_ALL : \
diff --git a/include/asm-powerpc/tsi108.h b/include/asm-powerpc/tsi108.h
index 2c702d3..4e95d15 100644
--- a/include/asm-powerpc/tsi108.h
+++ b/include/asm-powerpc/tsi108.h
@@ -98,12 +98,12 @@
 extern u32 get_vir_csrbase(void);
 extern u32 tsi108_csr_vir_base;
 
-extern inline u32 tsi108_read_reg(u32 reg_offset)
+static inline u32 tsi108_read_reg(u32 reg_offset)
 {
 	return in_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset));
 }
 
-extern inline void tsi108_write_reg(u32 reg_offset, u32 val)
+static inline void tsi108_write_reg(u32 reg_offset, u32 val)
 {
 	out_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset), val);
 }
diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h
index d6fb56b..3b36375 100644
--- a/include/asm-powerpc/types.h
+++ b/include/asm-powerpc/types.h
@@ -97,16 +97,6 @@
 	unsigned long env;
 } func_descr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
index d83fc29..adbf16b 100644
--- a/include/asm-powerpc/uaccess.h
+++ b/include/asm-powerpc/uaccess.h
@@ -304,7 +304,7 @@
 
 #ifndef __powerpc64__
 
-extern inline unsigned long copy_from_user(void *to,
+static inline unsigned long copy_from_user(void *to,
 		const void __user *from, unsigned long n)
 {
 	unsigned long over;
@@ -319,7 +319,7 @@
 	return n;
 }
 
-extern inline unsigned long copy_to_user(void __user *to,
+static inline unsigned long copy_to_user(void __user *to,
 		const void *from, unsigned long n)
 {
 	unsigned long over;
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 0e4ea37..0ae954e 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -334,119 +334,9 @@
 
 #ifndef __ASSEMBLY__
 
-/* On powerpc a system call basically clobbers the same registers like a
- * function call, with the exception of LR (which is needed for the
- * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
- * an error return status).
- */
-
-#define __syscall_nr(nr, type, name, args...)				\
-	unsigned long __sc_ret, __sc_err;				\
-	{								\
-		register unsigned long __sc_0  __asm__ ("r0");		\
-		register unsigned long __sc_3  __asm__ ("r3");		\
-		register unsigned long __sc_4  __asm__ ("r4");		\
-		register unsigned long __sc_5  __asm__ ("r5");		\
-		register unsigned long __sc_6  __asm__ ("r6");		\
-		register unsigned long __sc_7  __asm__ ("r7");		\
-		register unsigned long __sc_8  __asm__ ("r8");		\
-									\
-		__sc_loadargs_##nr(name, args);				\
-		__asm__ __volatile__					\
-			("sc           \n\t"				\
-			 "mfcr %0      "				\
-			: "=&r" (__sc_0),				\
-			  "=&r" (__sc_3),  "=&r" (__sc_4),		\
-			  "=&r" (__sc_5),  "=&r" (__sc_6),		\
-			  "=&r" (__sc_7),  "=&r" (__sc_8)		\
-			: __sc_asm_input_##nr				\
-			: "cr0", "ctr", "memory",			\
-			  "r9", "r10","r11", "r12");			\
-		__sc_ret = __sc_3;					\
-		__sc_err = __sc_0;					\
-	}								\
-	if (__sc_err & 0x10000000)					\
-	{								\
-		errno = __sc_ret;					\
-		__sc_ret = -1;						\
-	}								\
-	return (type) __sc_ret
-
-#define __sc_loadargs_0(name, dummy...)					\
-	__sc_0 = __NR_##name
-#define __sc_loadargs_1(name, arg1)					\
-	__sc_loadargs_0(name);						\
-	__sc_3 = (unsigned long) (arg1)
-#define __sc_loadargs_2(name, arg1, arg2)				\
-	__sc_loadargs_1(name, arg1);					\
-	__sc_4 = (unsigned long) (arg2)
-#define __sc_loadargs_3(name, arg1, arg2, arg3)				\
-	__sc_loadargs_2(name, arg1, arg2);				\
-	__sc_5 = (unsigned long) (arg3)
-#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4)			\
-	__sc_loadargs_3(name, arg1, arg2, arg3);			\
-	__sc_6 = (unsigned long) (arg4)
-#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5)		\
-	__sc_loadargs_4(name, arg1, arg2, arg3, arg4);			\
-	__sc_7 = (unsigned long) (arg5)
-#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6)	\
-	__sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5);		\
-	__sc_8 = (unsigned long) (arg6)
-
-#define __sc_asm_input_0 "0" (__sc_0)
-#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
-#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
-#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
-#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
-#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
-#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
-
-#define _syscall0(type,name)						\
-type name(void)								\
-{									\
-	__syscall_nr(0, type, name);					\
-}
-
-#define _syscall1(type,name,type1,arg1)					\
-type name(type1 arg1)							\
-{									\
-	__syscall_nr(1, type, name, arg1);				\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)			\
-type name(type1 arg1, type2 arg2)					\
-{									\
-	__syscall_nr(2, type, name, arg1, arg2);			\
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)		\
-type name(type1 arg1, type2 arg2, type3 arg3)				\
-{									\
-	__syscall_nr(3, type, name, arg1, arg2, arg3);			\
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4)		\
-{									\
-	__syscall_nr(4, type, name, arg1, arg2, arg3, arg4);		\
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)	\
-{									\
-	__syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5);	\
-}
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{									\
-	__syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
-}
-
-
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/linkage.h>
-#include <asm/syscalls.h>
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
@@ -481,16 +371,9 @@
 
 /*
  * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
  */
-#ifdef CONFIG_PPC32
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#else
-#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
-#endif
-
+#define cond_syscall(x) \
+	asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
 
 #endif		/* __ASSEMBLY__ */
 #endif		/* __KERNEL__ */
diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h
index 4b51d42..0117b54 100644
--- a/include/asm-powerpc/vio.h
+++ b/include/asm-powerpc/vio.h
@@ -45,7 +45,6 @@
  * The vio_dev structure is used to describe virtual I/O devices.
  */
 struct vio_dev {
-	struct iommu_table *iommu_table;     /* vio_map_* uses this */
 	const char *name;
 	const char *type;
 	uint32_t unit_address;
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
index f1d337e..88320a0 100644
--- a/include/asm-powerpc/xmon.h
+++ b/include/asm-powerpc/xmon.h
@@ -14,8 +14,10 @@
 
 #ifdef CONFIG_XMON
 extern void xmon_setup(void);
+extern void xmon_register_spus(struct list_head *list);
 #else
 static inline void xmon_setup(void) { };
+static inline void xmon_register_spus(struct list_head *list) { };
 #endif
 
 #endif /* __KERNEL __ */
diff --git a/include/asm-ppc/device.h b/include/asm-ppc/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-ppc/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h
index 1d2c4ef..f7b21ee 100644
--- a/include/asm-ppc/highmem.h
+++ b/include/asm-ppc/highmem.h
@@ -79,7 +79,7 @@
 	unsigned long vaddr;
 
 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-	inc_preempt_count();
+	pagefault_disable();
 	if (!PageHighMem(page))
 		return page_address(page);
 
@@ -101,8 +101,7 @@
 	unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
 
 	if (vaddr < KMAP_FIX_BEGIN) { // FIXME
-		dec_preempt_count();
-		preempt_check_resched();
+		pagefault_enable();
 		return;
 	}
 
@@ -115,8 +114,7 @@
 	pte_clear(&init_mm, vaddr, kmap_pte+idx);
 	flush_tlb_page(NULL, vaddr);
 #endif
-	dec_preempt_count();
-	preempt_check_resched();
+	pagefault_enable();
 }
 
 static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index a4c411b..ccf1a9b 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -26,17 +26,11 @@
 
 #if defined(CONFIG_4xx)
 #include <asm/ibm4xx.h>
-#elif defined(CONFIG_PPC_MPC52xx)
-#include <asm/mpc52xx.h>
 #elif defined(CONFIG_8xx)
 #include <asm/mpc8xx.h>
 #elif defined(CONFIG_8260)
 #include <asm/mpc8260.h>
-#elif defined(CONFIG_83xx)
-#include <asm/mpc83xx.h>
-#elif defined(CONFIG_85xx)
-#include <asm/mpc85xx.h>
-#elif defined(CONFIG_APUS)
+#elif defined(CONFIG_APUS) || !defined(CONFIG_PCI)
 #define _IO_BASE	0
 #define _ISA_MEM_BASE	0
 #define PCI_DRAM_OFFSET 0
@@ -237,6 +231,14 @@
 #define insl(port, buf, nl)	_insl_ns((port)+___IO_BASE, (buf), (nl))
 #define outsl(port, buf, nl)	_outsl_ns((port)+___IO_BASE, (buf), (nl))
 
+#define readsb(a, b, n)		_insb((a), (b), (n))
+#define readsw(a, b, n)		_insw_ns((a), (b), (n))
+#define readsl(a, b, n)		_insl_ns((a), (b), (n))
+#define writesb(a, b, n)	_outsb((a),(b),(n))
+#define writesw(a, b, n)	_outsw_ns((a),(b),(n))
+#define writesl(a, b, n)	_outsl_ns((a),(b),(n))
+
+
 /*
  * On powermacs and 8xx we will get a machine check exception 
  * if we try to read data from a non-existent I/O port. Because
@@ -327,12 +329,12 @@
 #define inl_p(port)		inl((port))
 #define outl_p(val, port)	outl((val), (port))
 
-extern void _insb(volatile u8 __iomem *port, void *buf, long count);
-extern void _outsb(volatile u8 __iomem *port, const void *buf, long count);
-extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count);
-extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count);
-extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count);
-extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
+extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
+extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
+extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
 
 
 #define IO_SPACE_LIMIT ~0
diff --git a/include/asm-ppc/m48t35.h b/include/asm-ppc/m48t35.h
index f3c5e5d..a5277ea 100644
--- a/include/asm-ppc/m48t35.h
+++ b/include/asm-ppc/m48t35.h
@@ -39,7 +39,7 @@
 #define M48T35_RTC_WATCHDOG_RB         0x03
 #define M48T35_RTC_WATCHDOG_BMB        0x7c
 #define M48T35_RTC_WATCHDOG_WDS        0x80
-#define M48T35_RTC_WATCHDOG_ALL        (M48T35_RTC_WATCHDOG_RB|M48T35_RTC_WATCHDOG_BMB|M48T35_RTC_W
+#define M48T35_RTC_WATCHDOG_ALL        (M48T35_RTC_WATCHDOG_RB|M48T35_RTC_WATCHDOG_BMB|M48T35_RTC_W)
 
 #define M48T35_RTC_CONTROL_WRITE       0x80
 #define M48T35_RTC_CONTROL_READ        0x40
diff --git a/include/asm-ppc/mpc52xx.h b/include/asm-ppc/mpc52xx.h
index 64c8874..d9d21aa 100644
--- a/include/asm-ppc/mpc52xx.h
+++ b/include/asm-ppc/mpc52xx.h
@@ -29,17 +29,6 @@
 #endif /* __ASSEMBLY__ */
 
 
-#ifdef CONFIG_PCI
-#define _IO_BASE	isa_io_base
-#define _ISA_MEM_BASE	isa_mem_base
-#define PCI_DRAM_OFFSET	pci_dram_offset
-#else
-#define _IO_BASE	0
-#define _ISA_MEM_BASE	0
-#define PCI_DRAM_OFFSET	0
-#endif
-
-
 /* ======================================================================== */
 /* PPC Sys devices definition                                               */
 /* ======================================================================== */
diff --git a/include/asm-ppc/mpc83xx.h b/include/asm-ppc/mpc83xx.h
index 02ed2c3..c306197 100644
--- a/include/asm-ppc/mpc83xx.h
+++ b/include/asm-ppc/mpc83xx.h
@@ -25,14 +25,6 @@
 #include <platforms/83xx/mpc834x_sys.h>
 #endif
 
-#define _IO_BASE        isa_io_base
-#define _ISA_MEM_BASE   isa_mem_base
-#ifdef CONFIG_PCI
-#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
-
 /*
  * The "residual" board information structure the boot loader passes
  * into the kernel.
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
index 9b48511..d7e4a79 100644
--- a/include/asm-ppc/mpc85xx.h
+++ b/include/asm-ppc/mpc85xx.h
@@ -44,14 +44,6 @@
 #include <platforms/85xx/tqm85xx.h>
 #endif
 
-#define _IO_BASE        isa_io_base
-#define _ISA_MEM_BASE   isa_mem_base
-#ifdef CONFIG_PCI
-#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
-
 /*
  * The "residual" board information structure the boot loader passes
  * into the kernel.
diff --git a/include/asm-ppc/pci-bridge.h b/include/asm-ppc/pci-bridge.h
index 9d52306..6c955d0 100644
--- a/include/asm-ppc/pci-bridge.h
+++ b/include/asm-ppc/pci-bridge.h
@@ -43,6 +43,7 @@
 	struct pci_controller *next;
         struct pci_bus *bus;
 	void *arch_data;
+	struct device *parent;
 
 	int first_busno;
 	int last_busno;
diff --git a/include/asm-s390/checksum.h b/include/asm-s390/checksum.h
index 37c362d..0a3cd7e 100644
--- a/include/asm-s390/checksum.h
+++ b/include/asm-s390/checksum.h
@@ -27,8 +27,8 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-static inline unsigned int
-csum_partial(const unsigned char * buff, int len, unsigned int sum)
+static inline __wsum
+csum_partial(const void *buff, int len, __wsum sum)
 {
 	register unsigned long reg2 asm("2") = (unsigned long) buff;
 	register unsigned long reg3 asm("3") = (unsigned long) len;
@@ -49,9 +49,9 @@
  * Copy from userspace and compute checksum.  If we catch an exception
  * then zero the rest of the buffer.
  */
-static inline unsigned int
-csum_partial_copy_from_user(const char __user *src, char *dst,
-                                          int len, unsigned int sum,
+static inline __wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+                                          int len, __wsum sum,
                                           int *err_ptr)
 {
 	int missing;
@@ -66,8 +66,8 @@
 }
 
 
-static inline unsigned int
-csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
+static inline __wsum
+csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
 {
         memcpy(dst,src,len);
 	return csum_partial(dst, len, sum);
@@ -76,8 +76,7 @@
 /*
  *      Fold a partial checksum without adding pseudo headers
  */
-static inline unsigned short
-csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 #ifndef __s390x__
 	register_pair rp;
@@ -100,7 +99,7 @@
 		"	srl	%0,16\n"	/* %0 = H+L+C */
 		: "+&d" (sum) : : "cc", "2", "3");
 #endif /* __s390x__ */
-	return ((unsigned short) ~sum);
+	return (__force __sum16) ~sum;
 }
 
 /*
@@ -108,8 +107,7 @@
  *	which always checksum on 4 octet boundaries.
  *
  */
-static inline unsigned short
-ip_fast_csum(unsigned char *iph, unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	return csum_fold(csum_partial(iph, ihl*4, 0));
 }
@@ -118,10 +116,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 32-bit checksum
  */
-static inline unsigned int 
-csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
                    unsigned short len, unsigned short proto,
-                   unsigned int sum)
+                   __wsum sum)
 {
 #ifndef __s390x__
 	asm volatile(
@@ -137,12 +135,12 @@
 		"1:"
 		: "+&d" (sum) : "d" (daddr) : "cc");
 	asm volatile(
-		"	alr	%0,%1\n" /* sum += (len<<16) + (proto<<8) */
+		"	alr	%0,%1\n" /* sum += len + proto */
 		"	brc	12,2f\n"
 		"	ahi	%0,1\n"  /* add carry */
 		"2:"
 		: "+&d" (sum)
-		: "d" (((unsigned int) len<<16) + (unsigned int) proto)
+		: "d" (len + proto)
 		: "cc");
 #else /* __s390x__ */
 	asm volatile(
@@ -153,7 +151,7 @@
 		"0:	algr	%0,%2\n"  /* sum += daddr */
 		"	brc	12,1f\n"
 		"	aghi	%0,1\n"   /* add carry */
-		"1:	algfr	%0,%3\n"  /* sum += (len<<16) + proto */
+		"1:	algfr	%0,%3\n"  /* sum += len + proto */
 		"	brc	12,2f\n"
 		"	aghi	%0,1\n"   /* add carry */
 		"2:	srlg	0,%0,32\n"
@@ -163,7 +161,7 @@
 		"3:	llgfr	%0,%0"
 		: "+&d" (sum)
 		: "d" (saddr), "d" (daddr),
-		  "d" (((unsigned int) len<<16) + (unsigned int) proto)
+		  "d" (len + proto)
 		: "cc", "0");
 #endif /* __s390x__ */
 	return sum;
@@ -174,10 +172,10 @@
  * returns a 16-bit checksum, already complemented
  */
 
-static inline unsigned short int
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr,
                   unsigned short len, unsigned short proto,
-                  unsigned int sum)
+                  __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -187,8 +185,7 @@
  * in icmp.c
  */
 
-static inline unsigned short
-ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 81287d8..d927850 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -278,17 +278,16 @@
 static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
 				      struct ccw_dev_id *dev_id2)
 {
-	return !memcmp(dev_id1, dev_id2, sizeof(struct ccw_dev_id));
+	if ((dev_id1->ssid == dev_id2->ssid) &&
+	    (dev_id1->devno == dev_id2->devno))
+		return 1;
+	return 0;
 }
 
 extern int diag210(struct diag210 *addr);
 
 extern void wait_cons_dev(void);
 
-extern void clear_all_subchannels(void);
-
-extern void cio_reset_channel_paths(void);
-
 extern void css_schedule_reprobe(void);
 
 extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/include/asm-s390/cpcmd.h b/include/asm-s390/cpcmd.h
index 1fcf65b..48a9eab 100644
--- a/include/asm-s390/cpcmd.h
+++ b/include/asm-s390/cpcmd.h
@@ -7,8 +7,8 @@
  *               Christian Borntraeger (cborntra@de.ibm.com),
  */
 
-#ifndef __CPCMD__
-#define __CPCMD__
+#ifndef _ASM_S390_CPCMD_H
+#define _ASM_S390_CPCMD_H
 
 /*
  * the lowlevel function for cpcmd
@@ -16,9 +16,6 @@
  */
 extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code);
 
-#ifndef __s390x__
-#define cpcmd __cpcmd
-#else
 /*
  * cpcmd is the in-kernel interface for issuing CP commands
  *
@@ -33,6 +30,5 @@
  * NOTE: If the response buffer is not below 2 GB, cpcmd can sleep
  */
 extern int cpcmd(const char *cmd, char *response, int rlen, int *response_code);
-#endif /*__s390x__*/
 
-#endif
+#endif /* _ASM_S390_CPCMD_H */
diff --git a/include/asm-s390/device.h b/include/asm-s390/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-s390/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h
index ce28ddd..9c35c8a 100644
--- a/include/asm-s390/kexec.h
+++ b/include/asm-s390/kexec.h
@@ -26,7 +26,7 @@
 
 /* Maximum address we can use for the control pages */
 /* Not more than 2GB */
-#define KEXEC_CONTROL_MEMORY_LIMIT (1<<31)
+#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
 
 /* Allocate one page for the pdp and the second for the code */
 #define KEXEC_CONTROL_CODE_SIZE 4096
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 06583ed..74f7389 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -362,6 +362,14 @@
 	asm volatile("spx %0" : : "m" (address) : "memory");
 }
 
+static inline __u32 store_prefix(void)
+{
+	__u32 address;
+
+	asm volatile("stpx %0" : "=m" (address));
+	return address;
+}
+
 #define __PANIC_MAGIC           0xDEADC0DE
 
 #endif
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 36bb6da..2d968a6 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -110,12 +110,21 @@
 #define VMALLOC_OFFSET  (8*1024*1024)
 #define VMALLOC_START   (((unsigned long) high_memory + VMALLOC_OFFSET) \
 			 & ~(VMALLOC_OFFSET-1))
-#ifndef __s390x__
-# define VMALLOC_END     (0x7fffffffL)
-#else /* __s390x__ */
-# define VMALLOC_END     (0x40000000000L)
-#endif /* __s390x__ */
 
+/*
+ * We need some free virtual space to be able to do vmalloc.
+ * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
+ * area. On a machine with 2GB memory we make sure that we
+ * have at least 128MB free space for vmalloc. On a machine
+ * with 4TB we make sure we have at least 1GB.
+ */
+#ifndef __s390x__
+#define VMALLOC_MIN_SIZE	0x8000000UL
+#define VMALLOC_END		0x80000000UL
+#else /* __s390x__ */
+#define VMALLOC_MIN_SIZE	0x40000000UL
+#define VMALLOC_END		0x40000000000UL
+#endif /* __s390x__ */
 
 /*
  * A 31 bit pagetable entry of S390 has following format:
diff --git a/include/asm-s390/reset.h b/include/asm-s390/reset.h
new file mode 100644
index 0000000..9b439cf
--- /dev/null
+++ b/include/asm-s390/reset.h
@@ -0,0 +1,23 @@
+/*
+ *  include/asm-s390/reset.h
+ *
+ *    Copyright IBM Corp. 2006
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_RESET_H
+#define _ASM_S390_RESET_H
+
+#include <linux/list.h>
+
+struct reset_call {
+	struct list_head list;
+	void (*fn)(void);
+};
+
+extern void register_reset_call(struct reset_call *reset);
+extern void unregister_reset_call(struct reset_call *reset);
+extern void s390_reset_system(void);
+extern void (*s390_reset_mcck_handler)(void);
+
+#endif /* _ASM_S390_RESET_H */
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 5d72eda..9574fe8 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -2,18 +2,19 @@
  *  include/asm-s390/setup.h
  *
  *  S390 version
- *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Copyright IBM Corp. 1999,2006
  */
 
 #ifndef _ASM_S390_SETUP_H
 #define _ASM_S390_SETUP_H
 
+#define COMMAND_LINE_SIZE 	896
+
 #ifdef __KERNEL__
 
 #include <asm/types.h>
 
 #define PARMAREA		0x10400
-#define COMMAND_LINE_SIZE 	896
 #define MEMORY_CHUNKS		16	/* max 0x7fff */
 #define IPL_PARMBLOCK_ORIGIN	0x2000
 
@@ -30,6 +31,17 @@
 #endif /* __s390x__ */
 #define COMMAND_LINE      ((char *)            (0x10480))
 
+#define CHUNK_READ_WRITE 0
+#define CHUNK_READ_ONLY  1
+
+struct mem_chunk {
+	unsigned long addr;
+	unsigned long size;
+	unsigned long type;
+};
+
+extern struct mem_chunk memory_chunk[];
+
 /*
  * Machine features detected in head.S
  */
@@ -53,7 +65,6 @@
 #define MACHINE_HAS_MVCOS	(machine_flags & 512)
 #endif /* __s390x__ */
 
-
 #define MACHINE_HAS_SCLP	(!MACHINE_IS_P390)
 
 /*
@@ -71,7 +82,6 @@
 #define SET_CONSOLE_3215	do { console_mode = 2; } while (0)
 #define SET_CONSOLE_3270	do { console_mode = 3; } while (0)
 
-
 struct ipl_list_hdr {
 	u32 len;
 	u8  reserved1[3];
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index c3cf030..7097c96 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -18,6 +18,7 @@
 
 #include <asm/lowcore.h>
 #include <asm/sigp.h>
+#include <asm/ptrace.h>
 
 /*
   s390 specific smp.c headers
@@ -101,6 +102,13 @@
 	func(info);
 	return 0;
 }
+
+static inline void smp_send_stop(void)
+{
+	/* Disable all interrupts/machine checks */
+	__load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
+}
+
 #define smp_cpu_not_running(cpu)	1
 #define smp_get_cpu(cpu) ({ 0; })
 #define smp_put_cpu(cpu) ({ 0; })
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index ccbafe4..bd0b05a 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -115,6 +115,16 @@
 #define account_vtime(x) do { /* empty */ } while (0)
 #endif
 
+#ifdef CONFIG_PFAULT
+extern void pfault_irq_init(void);
+extern int pfault_init(void);
+extern void pfault_fini(void);
+#else /* CONFIG_PFAULT */
+#define pfault_irq_init()	do { } while (0)
+#define pfault_init()		({-1;})
+#define pfault_fini()		do { } while (0)
+#endif /* CONFIG_PFAULT */
+
 #define finish_arch_switch(prev) do {					     \
 	set_fs(current->thread.mm_segment);				     \
 	account_vtime(prev);						     \
diff --git a/include/asm-s390/termios.h b/include/asm-s390/termios.h
index d1e29cc..62b23ca 100644
--- a/include/asm-s390/termios.h
+++ b/include/asm-s390/termios.h
@@ -75,39 +75,7 @@
 */
 #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
 
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
-	unsigned short __tmp; \
-	get_user(__tmp,&(termio)->x); \
-	(termios)->x = (0xffff0000 & ((termios)->x)) | __tmp; \
-}
-
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
-	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
-	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
-	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
-	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
-	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
-	put_user((termios)->c_iflag, &(termio)->c_iflag); \
-	put_user((termios)->c_oflag, &(termio)->c_oflag); \
-	put_user((termios)->c_cflag, &(termio)->c_cflag); \
-	put_user((termios)->c_lflag, &(termio)->c_lflag); \
-	put_user((termios)->c_line,  &(termio)->c_line); \
-	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#include <asm-generic/termios.h>
 
 #endif	/* __KERNEL__ */
 
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h
index ae2951c..fc5d7cf 100644
--- a/include/asm-s390/types.h
+++ b/include/asm-s390/types.h
@@ -87,16 +87,6 @@
 	} subreg;
 } register_pair;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* ! __s390x__   */
 #endif /* __ASSEMBLY__  */
 #endif /* __KERNEL__    */
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 72ae4ef..73ac4e8 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -201,7 +201,7 @@
  * Returns number of bytes that could not be copied.
  * On success, this will be zero.
  */
-static inline unsigned long
+static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	if (__builtin_constant_p(n) && (n <= 256))
@@ -226,7 +226,7 @@
  * Returns number of bytes that could not be copied.
  * On success, this will be zero.
  */
-static inline unsigned long
+static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	might_sleep();
@@ -252,7 +252,7 @@
  * If some data could not be copied, this function will pad the copied
  * data to the requested size using zero bytes.
  */
-static inline unsigned long
+static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	if (__builtin_constant_p(n) && (n <= 256))
@@ -277,7 +277,7 @@
  * If some data could not be copied, this function will pad the copied
  * data to the requested size using zero bytes.
  */
-static inline unsigned long
+static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	might_sleep();
@@ -288,13 +288,13 @@
 	return n;
 }
 
-static inline unsigned long
+static inline unsigned long __must_check
 __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	return uaccess.copy_in_user(n, to, from);
 }
 
-static inline unsigned long
+static inline unsigned long __must_check
 copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	might_sleep();
@@ -306,7 +306,7 @@
 /*
  * Copy a null terminated string from userspace.
  */
-static inline long
+static inline long __must_check
 strncpy_from_user(char *dst, const char __user *src, long count)
 {
         long res = -EFAULT;
@@ -343,13 +343,13 @@
  * Zero Userspace
  */
 
-static inline unsigned long
+static inline unsigned long __must_check
 __clear_user(void __user *to, unsigned long n)
 {
 	return uaccess.clear_user(n, to);
 }
 
-static inline unsigned long
+static inline unsigned long __must_check
 clear_user(void __user *to, unsigned long n)
 {
 	might_sleep();
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index 71d3c21..fb6fef9 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -345,160 +345,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/err.h>
-
-#define __syscall_return(type, res)			     \
-do {							     \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-		errno = -(res);				     \
-		res = -1;				     \
-	}						     \
-	return (type) (res);				     \
-} while (0)
-
-#define _svc_clobber "1", "cc", "memory"
-
-#define _syscall0(type,name)					\
-type name(void) {						\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name)				\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall1(type,name,type1,arg1)				\
-type name(type1 arg1) {						\
-	register type1 __arg1 asm("2") = arg1;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)		\
-type name(type1 arg1, type2 arg2) {				\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2)					\
-		: _svc_clobber );				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
-type name(type1 arg1, type2 arg2, type3 arg3) {			\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register type3 __arg3 asm("4") = arg3;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2),					\
-		  "d" (__arg3)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,	\
-		  type4,name4)					\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {	\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register type3 __arg3 asm("4") = arg3;			\
-	register type4 __arg4 asm("5") = arg4;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2),					\
-		  "d" (__arg3),					\
-		  "d" (__arg4)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,	\
-		  type4,name4,type5,name5)			\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4,	\
-	  type5 arg5) {						\
-	register type1 __arg1 asm("2") = arg1;			\
-	register type2 __arg2 asm("3") = arg2;			\
-	register type3 __arg3 asm("4") = arg3;			\
-	register type4 __arg4 asm("5") = arg4;			\
-	register type5 __arg5 asm("6") = arg5;			\
-	register long __svcres asm("2");			\
-	long __res;						\
-	asm volatile(						\
-		"	.if	%1 < 256\n"			\
-		"	svc	%b1\n"				\
-		"	.else\n"				\
-		"	la	%%r1,%1\n"			\
-		"	svc	0\n"				\
-		"	.endif"					\
-		: "=d" (__svcres)				\
-		: "i" (__NR_##name),				\
-		  "0" (__arg1),					\
-		  "d" (__arg2),					\
-		  "d" (__arg3),					\
-		  "d" (__arg4),					\
-		  "d" (__arg5)					\
-		: _svc_clobber);				\
-	__res = __svcres;					\
-	__syscall_return(type,__res);				\
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_ALARM
diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h
index 7244c68..b90e558 100644
--- a/include/asm-s390/zcrypt.h
+++ b/include/asm-s390/zcrypt.h
@@ -180,40 +180,8 @@
  *	     for the implementation details for the contents of the
  *	     block
  *
- *   Z90STAT_TOTALCOUNT
- *     Return an integer count of all device types together.
- *
- *   Z90STAT_PCICACOUNT
- *     Return an integer count of all PCICAs.
- *
- *   Z90STAT_PCICCCOUNT
- *     Return an integer count of all PCICCs.
- *
- *   Z90STAT_PCIXCCMCL2COUNT
- *     Return an integer count of all MCL2 PCIXCCs.
- *
- *   Z90STAT_PCIXCCMCL3COUNT
- *     Return an integer count of all MCL3 PCIXCCs.
- *
- *   Z90STAT_CEX2CCOUNT
- *     Return an integer count of all CEX2Cs.
- *
- *   Z90STAT_CEX2ACOUNT
- *     Return an integer count of all CEX2As.
- *
- *   Z90STAT_REQUESTQ_COUNT
- *     Return an integer count of the number of entries waiting to be
- *     sent to a device.
- *
- *   Z90STAT_PENDINGQ_COUNT
- *     Return an integer count of the number of entries sent to a
- *     device awaiting the reply.
- *
- *   Z90STAT_TOTALOPEN_COUNT
- *     Return an integer count of the number of open file handles.
- *
- *   Z90STAT_DOMAIN_INDEX
- *     Return the integer value of the Cryptographic Domain.
+ *   ZSECSENDCPRB
+ *     Send an arbitrary CPRB to a crypto card.
  *
  *   Z90STAT_STATUS_MASK
  *     Return an 64 element array of unsigned chars for the status of
@@ -235,28 +203,51 @@
  *     of successfully completed requests per device since the device
  *     was detected and made available.
  *
- *   ICAZ90STATUS (deprecated)
+ *   Z90STAT_REQUESTQ_COUNT
+ *     Return an integer count of the number of entries waiting to be
+ *     sent to a device.
+ *
+ *   Z90STAT_PENDINGQ_COUNT
+ *     Return an integer count of the number of entries sent to all
+ *     devices awaiting the reply.
+ *
+ *   Z90STAT_TOTALOPEN_COUNT
+ *     Return an integer count of the number of open file handles.
+ *
+ *   Z90STAT_DOMAIN_INDEX
+ *     Return the integer value of the Cryptographic Domain.
+ *
+ *   The following ioctls are deprecated and should be no longer used:
+ *
+ *   Z90STAT_TOTALCOUNT
+ *     Return an integer count of all device types together.
+ *
+ *   Z90STAT_PCICACOUNT
+ *     Return an integer count of all PCICAs.
+ *
+ *   Z90STAT_PCICCCOUNT
+ *     Return an integer count of all PCICCs.
+ *
+ *   Z90STAT_PCIXCCMCL2COUNT
+ *     Return an integer count of all MCL2 PCIXCCs.
+ *
+ *   Z90STAT_PCIXCCMCL3COUNT
+ *     Return an integer count of all MCL3 PCIXCCs.
+ *
+ *   Z90STAT_CEX2CCOUNT
+ *     Return an integer count of all CEX2Cs.
+ *
+ *   Z90STAT_CEX2ACOUNT
+ *     Return an integer count of all CEX2As.
+ *
+ *   ICAZ90STATUS
  *     Return some device driver status in a ica_z90_status struct
  *     This takes an ica_z90_status struct as its arg.
  *
- *     NOTE: this ioctl() is deprecated, and has been replaced with
- *	     single ioctl()s for each type of status being requested
- *
- *   Z90STAT_PCIXCCCOUNT (deprecated)
+ *   Z90STAT_PCIXCCCOUNT
  *     Return an integer count of all PCIXCCs (MCL2 + MCL3).
  *     This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
  *     MCL2 PCIXCCs.
- *
- *   Z90QUIESCE (not recommended)
- *     Quiesce the driver.  This is intended to stop all new
- *     requests from being processed.  Its use is NOT recommended,
- *     except in circumstances where there is no other way to stop
- *     callers from accessing the driver.  Its original use was to
- *     allow the driver to be "drained" of work in preparation for
- *     a system shutdown.
- *
- *     NOTE: once issued, this ban on new work cannot be undone
- *	     except by unloading and reloading the driver.
  */
 
 /**
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 8bdc1ba..28305c3 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -28,11 +28,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_add	\n"
-"	add	%2, %0				\n"
-"	movco.l	%0, @%3				\n"
+"1:	movli.l @%2, %0		! atomic_add	\n"
+"	add	%1, %0				\n"
+"	movco.l	%0, @%2				\n"
 "	bf	1b				\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -50,11 +50,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_sub	\n"
-"	sub	%2, %0				\n"
-"	movco.l	%0, @%3				\n"
+"1:	movli.l @%2, %0		! atomic_sub	\n"
+"	sub	%1, %0				\n"
+"	movco.l	%0, @%2				\n"
 "	bf	1b				\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -80,12 +80,12 @@
 
 #ifdef CONFIG_CPU_SH4A
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_add_return	\n"
-"	add	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_add_return	\n"
+"	add	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
 "	synco						\n"
-	: "=&z" (temp), "=r" (&v->counter)
+	: "=&z" (temp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -109,12 +109,12 @@
 
 #ifdef CONFIG_CPU_SH4A
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_sub_return	\n"
-"	sub	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_sub_return	\n"
+"	sub	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
 "	synco						\n"
-	: "=&z" (temp), "=r" (&v->counter)
+	: "=&z" (temp)
 	: "r" (i), "r" (&v->counter)
 	: "t");
 #else
@@ -186,11 +186,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_clear_mask	\n"
-"	and	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_clear_mask	\n"
+"	and	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (~mask), "r" (&v->counter)
 	: "t");
 #else
@@ -208,11 +208,11 @@
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
-"1:	movli.l @%3, %0		! atomic_set_mask	\n"
-"	or	%2, %0					\n"
-"	movco.l	%0, @%3					\n"
+"1:	movli.l @%2, %0		! atomic_set_mask	\n"
+"	or	%1, %0					\n"
+"	movco.l	%0, @%2					\n"
 "	bf	1b					\n"
-	: "=&z" (tmp), "=r" (&v->counter)
+	: "=&z" (tmp)
 	: "r" (mask), "r" (&v->counter)
 	: "t");
 #else
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index beeea40..795047d 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -23,16 +23,20 @@
 	cpu_data->loops_per_jiffy = loops_per_jiffy;
 
 	switch (cpu_data->type) {
-	case CPU_SH7604:
+	case CPU_SH7604 ... CPU_SH7619:
 		*p++ = '2';
 		break;
+	case CPU_SH7206:
+		*p++ = '2';
+		*p++ = 'a';
+		break;
 	case CPU_SH7705 ... CPU_SH7300:
 		*p++ = '3';
 		break;
 	case CPU_SH7750 ... CPU_SH4_501:
 		*p++ = '4';
 		break;
-	case CPU_SH7770 ... CPU_SH7781:
+	case CPU_SH7770 ... CPU_SH7785:
 		*p++ = '4';
 		*p++ = 'a';
 		break;
diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h
index 08168af..d44344c 100644
--- a/include/asm-sh/checksum.h
+++ b/include/asm-sh/checksum.h
@@ -23,7 +23,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -33,8 +33,8 @@
  * better 64-bit) boundary
  */
 
-asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsigned char *dst,
-						  int len, int sum, int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+					  int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
 
 /*
  *	Note: when you get a NULL pointer exception here this means someone
@@ -44,24 +44,25 @@
  *	access_ok().
  */
 static __inline__
-unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst,
-					int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					int len, __wsum sum)
 {
 	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
 }
 
 static __inline__
-unsigned int csum_partial_copy_from_user (const unsigned char *src, unsigned char *dst,
-						int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						int len, __wsum sum, int *err_ptr)
 {
-	return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
+	return csum_partial_copy_generic((__force const void *)src, dst,
+					len, sum, err_ptr, NULL);
 }
 
 /*
  *	Fold a partial checksum
  */
 
-static __inline__ unsigned int csum_fold(unsigned int sum)
+static __inline__ __sum16 csum_fold(__wsum sum)
 {
 	unsigned int __dummy;
 	__asm__("swap.w %0, %1\n\t"
@@ -74,7 +75,7 @@
 		: "=r" (sum), "=&r" (__dummy)
 		: "0" (sum)
 		: "t");
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
@@ -84,7 +85,7 @@
  *      i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
  *      for linux by * Arnt Gulbrandsen.
  */
-static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum, __dummy0, __dummy1;
 
@@ -112,16 +113,15 @@
 	return	csum_fold(sum);
 }
 
-static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
-						   unsigned long daddr,
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum) 
+						   __wsum sum)
 {
 #ifdef __LITTLE_ENDIAN__
-	unsigned long len_proto = (ntohs(len)<<16)+proto*256;
+	unsigned long len_proto = (proto + len) << 8;
 #else
-	unsigned long len_proto = (proto<<16)+len;
+	unsigned long len_proto = proto + len;
 #endif
 	__asm__("clrt\n\t"
 		"addc	%0, %1\n\t"
@@ -139,11 +139,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						       unsigned long daddr,
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						       unsigned short len,
 						       unsigned short proto,
-						       unsigned int sum) 
+						       __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -153,18 +152,17 @@
  * in icmp.c
  */
 
-static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
 {
     return csum_fold (csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
 #ifdef CONFIG_IPV6
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						     struct in6_addr *daddr,
-						     __u32 len,
-						     unsigned short proto,
-						     unsigned int sum) 
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum sum)
 {
 	unsigned int __dummy;
 	__asm__("clrt\n\t"
@@ -201,17 +199,18 @@
  *	Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src,
-						      unsigned char __user *dst,
-						      int len, int sum,
+static __inline__ __wsum csum_and_copy_to_user (const void *src,
+						      void __user *dst,
+						      int len, __wsum sum,
 						      int *err_ptr)
 {
 	if (access_ok(VERIFY_WRITE, dst, len))
-		return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
+		return csum_partial_copy_generic((__force const void *)src,
+						dst, len, sum, NULL, err_ptr);
 
 	if (len)
 		*err_ptr = -EFAULT;
 
-	return -1; /* invalid checksum */
+	return (__force __wsum)-1; /* invalid checksum */
 }
 #endif /* __ASM_SH_CHECKSUM_H */
diff --git a/include/asm-sh/clock.h b/include/asm-sh/clock.h
index fdfb75b..1df9280 100644
--- a/include/asm-sh/clock.h
+++ b/include/asm-sh/clock.h
@@ -4,6 +4,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/seq_file.h>
+#include <linux/clk.h>
 
 struct clk;
 
@@ -18,7 +19,7 @@
 struct clk {
 	struct list_head	node;
 	const char		*name;
-
+	int			id;
 	struct module		*owner;
 
 	struct clk		*parent;
@@ -40,22 +41,13 @@
 int clk_init(void);
 
 int __clk_enable(struct clk *);
-int clk_enable(struct clk *);
-
 void __clk_disable(struct clk *);
-void clk_disable(struct clk *);
 
-int clk_set_rate(struct clk *, unsigned long rate);
-unsigned long clk_get_rate(struct clk *);
 void clk_recalc_rate(struct clk *);
 
-struct clk *clk_get(const char *id);
-void clk_put(struct clk *);
-
 int clk_register(struct clk *);
 void clk_unregister(struct clk *);
 
 int show_clocks(struct seq_file *m);
 
 #endif /* __ASM_SH_CLOCK_H */
-
diff --git a/include/asm-sh/cpu-sh2/cache.h b/include/asm-sh/cpu-sh2/cache.h
index cd96402..20b9796 100644
--- a/include/asm-sh/cpu-sh2/cache.h
+++ b/include/asm-sh/cpu-sh2/cache.h
@@ -12,6 +12,7 @@
 
 #define L1_CACHE_SHIFT	4
 
+#if defined(CONFIG_CPU_SUBTYPE_SH7604)
 #define CCR		0xfffffe92	/* Address of Cache Control Register */
 
 #define CCR_CACHE_CE	0x01	/* Cache enable */
@@ -27,5 +28,26 @@
 #define CCR_CACHE_ORA		CCR_CACHE_TW
 #define CCR_CACHE_WT		0x00	/* SH-2 is _always_ write-through */
 
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define CCR1		0xffffffec
+#define CCR		CCR1
+
+#define CCR_CACHE_CE	0x01	/* Cache enable */
+#define CCR_CACHE_WT	0x06    /* CCR[bit1=1,bit2=1] */
+				/* 0x00000000-0x7fffffff: Write-through  */
+				/* 0x80000000-0x9fffffff: Write-back     */
+                                /* 0xc0000000-0xdfffffff: Write-through  */
+#define CCR_CACHE_CB	0x00    /* CCR[bit1=0,bit2=0] */
+				/* 0x00000000-0x7fffffff: Write-back     */
+				/* 0x80000000-0x9fffffff: Write-through  */
+                                /* 0xc0000000-0xdfffffff: Write-back     */
+#define CCR_CACHE_CF	0x08	/* Cache invalidate */
+
+#define CACHE_OC_ADDRESS_ARRAY	0xf0000000
+#define CACHE_OC_DATA_ARRAY	0xf1000000
+
+#define CCR_CACHE_ENABLE	CCR_CACHE_CE
+#define CCR_CACHE_INVALIDATE	CCR_CACHE_CF
+#endif
 #endif /* __ASM_CPU_SH2_CACHE_H */
 
diff --git a/include/asm-sh/cpu-sh2/freq.h b/include/asm-sh/cpu-sh2/freq.h
new file mode 100644
index 0000000..31de475
--- /dev/null
+++ b/include/asm-sh/cpu-sh2/freq.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-sh/cpu-sh2/freq.h
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2_FREQ_H
+#define __ASM_CPU_SH2_FREQ_H
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define FREQCR	0xf815ff80
+#endif
+
+#endif /* __ASM_CPU_SH2_FREQ_H */
+
diff --git a/include/asm-sh/cpu-sh2/mmu_context.h b/include/asm-sh/cpu-sh2/mmu_context.h
new file mode 100644
index 0000000..beeb299
--- /dev/null
+++ b/include/asm-sh/cpu-sh2/mmu_context.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-sh/cpu-sh2/mmu_context.h
+ *
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2_MMU_CONTEXT_H
+#define __ASM_CPU_SH2_MMU_CONTEXT_H
+
+/* No MMU */
+
+#endif /* __ASM_CPU_SH2_MMU_CONTEXT_H */
+
diff --git a/include/asm-sh/cpu-sh2/timer.h b/include/asm-sh/cpu-sh2/timer.h
new file mode 100644
index 0000000..a39c241
--- /dev/null
+++ b/include/asm-sh/cpu-sh2/timer.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_CPU_SH2_TIMER_H
+#define __ASM_CPU_SH2_TIMER_H
+
+/* Nothing needed yet */
+
+#endif /* __ASM_CPU_SH2_TIMER_H */
diff --git a/include/asm-sh/cpu-sh2a/addrspace.h b/include/asm-sh/cpu-sh2a/addrspace.h
new file mode 100644
index 0000000..3d2e9aa
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/addrspace.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/addrspace.h>
diff --git a/include/asm-sh/cpu-sh2a/cache.h b/include/asm-sh/cpu-sh2a/cache.h
new file mode 100644
index 0000000..3e4b9e4
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/cache.h
@@ -0,0 +1,39 @@
+/*
+ * include/asm-sh/cpu-sh2a/cache.h
+ *
+ * Copyright (C) 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2A_CACHE_H
+#define __ASM_CPU_SH2A_CACHE_H
+
+#define L1_CACHE_SHIFT	4
+
+#define CCR1		0xfffc1000
+#define CCR2		0xfffc1004
+
+/* CCR1 behaves more like the traditional CCR */
+#define CCR		CCR1
+
+/*
+ * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
+ * listed here are reserved.
+ */
+#define CCR_CACHE_CB	0x0000	/* Hack */
+#define CCR_CACHE_OCE	0x0001
+#define CCR_CACHE_WT	0x0002
+#define CCR_CACHE_OCI	0x0008	/* OCF */
+#define CCR_CACHE_ICE	0x0100
+#define CCR_CACHE_ICI	0x0800	/* ICF */
+
+#define CACHE_IC_ADDRESS_ARRAY	0xf0000000
+#define CACHE_OC_ADDRESS_ARRAY	0xf0800000
+
+#define CCR_CACHE_ENABLE	(CCR_CACHE_OCE | CCR_CACHE_ICE)
+#define CCR_CACHE_INVALIDATE	(CCR_CACHE_OCI | CCR_CACHE_ICI)
+
+#endif /* __ASM_CPU_SH2A_CACHE_H */
+
diff --git a/include/asm-sh/cpu-sh2a/cacheflush.h b/include/asm-sh/cpu-sh2a/cacheflush.h
new file mode 100644
index 0000000..fa3186c
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/cacheflush.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/cacheflush.h>
diff --git a/include/asm-sh/cpu-sh2a/dma.h b/include/asm-sh/cpu-sh2a/dma.h
new file mode 100644
index 0000000..0d5ad85
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/dma.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/dma.h>
diff --git a/include/asm-sh/cpu-sh2a/freq.h b/include/asm-sh/cpu-sh2a/freq.h
new file mode 100644
index 0000000..e518fff
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/freq.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-sh/cpu-sh2a/freq.h
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2A_FREQ_H
+#define __ASM_CPU_SH2A_FREQ_H
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7206)
+#define FREQCR	0xfffe0010
+#endif
+
+#endif /* __ASM_CPU_SH2A_FREQ_H */
+
diff --git a/include/asm-sh/cpu-sh2a/mmu_context.h b/include/asm-sh/cpu-sh2a/mmu_context.h
new file mode 100644
index 0000000..cd2387f
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/mmu_context.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/mmu_context.h>
diff --git a/include/asm-sh/cpu-sh2a/timer.h b/include/asm-sh/cpu-sh2a/timer.h
new file mode 100644
index 0000000..fee504a
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/timer.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/timer.h>
diff --git a/include/asm-sh/cpu-sh2a/ubc.h b/include/asm-sh/cpu-sh2a/ubc.h
new file mode 100644
index 0000000..cf28062
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/ubc.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/ubc.h>
diff --git a/include/asm-sh/cpu-sh2a/watchdog.h b/include/asm-sh/cpu-sh2a/watchdog.h
new file mode 100644
index 0000000..c1b3e24
--- /dev/null
+++ b/include/asm-sh/cpu-sh2a/watchdog.h
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/watchdog.h>
diff --git a/include/asm-sh/device.h b/include/asm-sh/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-sh/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 56cd4b9..37ab0c1 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -53,7 +53,7 @@
 	consistent_free(vaddr, size);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 				  enum dma_data_direction dir)
 {
 	consistent_sync(vaddr, size, (int)dir);
diff --git a/include/asm-sh/dma.h b/include/asm-sh/dma.h
index d9daa02..faf3051 100644
--- a/include/asm-sh/dma.h
+++ b/include/asm-sh/dma.h
@@ -14,9 +14,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/sysdev.h>
-#include <linux/device.h>
 #include <asm/cpu/dma.h>
-#include <asm/semaphore.h>
 
 /* The maximum address that we can perform a DMA transfer to on this platform */
 /* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any
@@ -46,16 +44,21 @@
  * DMAC (dma_info) flags
  */
 enum {
-	DMAC_CHANNELS_CONFIGURED	= 0x00,
-	DMAC_CHANNELS_TEI_CAPABLE	= 0x01,
+	DMAC_CHANNELS_CONFIGURED	= 0x01,
+	DMAC_CHANNELS_TEI_CAPABLE	= 0x02,	/* Transfer end interrupt */
 };
 
 /*
  * DMA channel capabilities / flags
  */
 enum {
-	DMA_TEI_CAPABLE			= 0x01,
-	DMA_CONFIGURED			= 0x02,
+	DMA_CONFIGURED			= 0x01,
+
+	/*
+	 * Transfer end interrupt, inherited from DMAC.
+	 * wait_queue used in dma_wait_for_completion.
+	 */
+	DMA_TEI_CAPABLE			= 0x02,
 };
 
 extern spinlock_t dma_spin_lock;
@@ -68,28 +71,31 @@
 
 	int (*get_residue)(struct dma_channel *chan);
 	int (*xfer)(struct dma_channel *chan);
-	void (*configure)(struct dma_channel *chan, unsigned long flags);
+	int (*configure)(struct dma_channel *chan, unsigned long flags);
+	int (*extend)(struct dma_channel *chan, unsigned long op, void *param);
 };
 
 struct dma_channel {
-	char dev_id[16];
+	char dev_id[16];		/* unique name per DMAC of channel */
 
-	unsigned int chan;		/* Physical channel number */
+	unsigned int chan;		/* DMAC channel number */
 	unsigned int vchan;		/* Virtual channel number */
+
 	unsigned int mode;
 	unsigned int count;
 
 	unsigned long sar;
 	unsigned long dar;
 
+	const char **caps;
+
 	unsigned long flags;
 	atomic_t busy;
 
-	struct semaphore sem;
 	wait_queue_head_t wait_queue;
 
 	struct sys_device dev;
-	char *name;
+	void *priv_data;
 };
 
 struct dma_info {
@@ -103,6 +109,12 @@
 	struct dma_channel *channels;
 
 	struct list_head list;
+	int first_channel_nr;
+};
+
+struct dma_chan_caps {
+	int ch_num;
+	const char **caplist;
 };
 
 #define to_dma_channel(channel) container_of(channel, struct dma_channel, dev)
@@ -121,6 +133,8 @@
 #define dma_read_page(chan, from, to)	\
 	dma_read(chan, from, to, PAGE_SIZE)
 
+extern int request_dma_bycap(const char **dmac, const char **caps,
+			     const char *dev_id);
 extern int request_dma(unsigned int chan, const char *dev_id);
 extern void free_dma(unsigned int chan);
 extern int get_dma_residue(unsigned int chan);
@@ -131,6 +145,10 @@
 
 extern int register_dmac(struct dma_info *info);
 extern void unregister_dmac(struct dma_info *info);
+extern struct dma_info *get_dma_info_by_name(const char *dmac_name);
+
+extern int dma_extend(unsigned int chan, unsigned long op, void *param);
+extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
 
 #ifdef CONFIG_SYSFS
 /* arch/sh/drivers/dma/dma-sysfs.c */
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h
index fc050fd..43ca244 100644
--- a/include/asm-sh/elf.h
+++ b/include/asm-sh/elf.h
@@ -74,7 +74,7 @@
 #define ELF_ARCH	EM_SH
 
 #define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE	4096
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
    use of this is to invoke "./ld.so someprog" to test out a new version of
diff --git a/include/asm-sh/entry-macros.S b/include/asm-sh/entry-macros.S
new file mode 100644
index 0000000..500030e
--- /dev/null
+++ b/include/asm-sh/entry-macros.S
@@ -0,0 +1,33 @@
+! entry.S macro define
+	
+	.macro	cli
+	stc	sr, r0
+	or	#0xf0, r0
+	ldc	r0, sr
+	.endm
+
+	.macro	sti
+	mov	#0xf0, r11
+	extu.b	r11, r11
+	not	r11, r11
+	stc	sr, r10
+	and	r11, r10
+#ifdef CONFIG_HAS_SR_RB
+	stc	k_g_imask, r11
+	or	r11, r10
+#endif
+	ldc	r10, sr
+	.endm
+
+	.macro	get_current_thread_info, ti, tmp
+#ifdef CONFIG_HAS_SR_RB
+	stc	r7_bank, \ti
+#else
+	mov	#((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
+	shll8	\tmp
+	shll2	\tmp
+	mov	r15, \ti
+	and	\tmp, \ti
+#endif	
+	.endm
+
diff --git a/include/asm-sh/irq-sh73180.h b/include/asm-sh/irq-sh73180.h
deleted file mode 100644
index b28af9a..0000000
--- a/include/asm-sh/irq-sh73180.h
+++ /dev/null
@@ -1,314 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH73180_H
-#define __ASM_SH_IRQ_SH73180_H
-
-/*
- * linux/include/asm-sh/irq-sh73180.h
- *
- * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp>
- */
-
-#undef INTC_IPRA
-#undef INTC_IPRB
-#undef INTC_IPRC
-#undef INTC_IPRD
-
-#undef DMTE0_IRQ
-#undef DMTE1_IRQ
-#undef DMTE2_IRQ
-#undef DMTE3_IRQ
-#undef DMTE4_IRQ
-#undef DMTE5_IRQ
-#undef DMTE6_IRQ
-#undef DMTE7_IRQ
-#undef DMAE_IRQ
-#undef DMA_IPR_ADDR
-#undef DMA_IPR_POS
-#undef DMA_PRIORITY
-
-#undef INTC_IMCR0
-#undef INTC_IMCR1
-#undef INTC_IMCR2
-#undef INTC_IMCR3
-#undef INTC_IMCR4
-#undef INTC_IMCR5
-#undef INTC_IMCR6
-#undef INTC_IMCR7
-#undef INTC_IMCR8
-#undef INTC_IMCR9
-#undef INTC_IMCR10
-
-
-#define INTC_IPRA  	0xA4080000UL
-#define INTC_IPRB  	0xA4080004UL
-#define INTC_IPRC  	0xA4080008UL
-#define INTC_IPRD  	0xA408000CUL
-#define INTC_IPRE  	0xA4080010UL
-#define INTC_IPRF  	0xA4080014UL
-#define INTC_IPRG  	0xA4080018UL
-#define INTC_IPRH  	0xA408001CUL
-#define INTC_IPRI  	0xA4080020UL
-#define INTC_IPRJ  	0xA4080024UL
-#define INTC_IPRK  	0xA4080028UL
-
-#define INTC_IMR0	0xA4080080UL
-#define INTC_IMR1	0xA4080084UL
-#define INTC_IMR2	0xA4080088UL
-#define INTC_IMR3	0xA408008CUL
-#define INTC_IMR4	0xA4080090UL
-#define INTC_IMR5	0xA4080094UL
-#define INTC_IMR6	0xA4080098UL
-#define INTC_IMR7	0xA408009CUL
-#define INTC_IMR8	0xA40800A0UL
-#define INTC_IMR9	0xA40800A4UL
-#define INTC_IMR10	0xA40800A8UL
-#define INTC_IMR11	0xA40800ACUL
-
-#define INTC_IMCR0	0xA40800C0UL
-#define INTC_IMCR1	0xA40800C4UL
-#define INTC_IMCR2	0xA40800C8UL
-#define INTC_IMCR3	0xA40800CCUL
-#define INTC_IMCR4	0xA40800D0UL
-#define INTC_IMCR5	0xA40800D4UL
-#define INTC_IMCR6	0xA40800D8UL
-#define INTC_IMCR7	0xA40800DCUL
-#define INTC_IMCR8	0xA40800E0UL
-#define INTC_IMCR9	0xA40800E4UL
-#define INTC_IMCR10	0xA40800E8UL
-#define INTC_IMCR11	0xA40800ECUL
-
-#define INTC_ICR0	0xA4140000UL
-#define INTC_ICR1	0xA414001CUL
-
-#define INTMSK0		0xa4140044
-#define INTMSKCLR0	0xa4140064
-#define INTC_INTPRI0	0xa4140010
-
-/*
-  NOTE:
-
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-
-/* TMU0 */
-#define TMU0_IRQ	16
-#define TMU0_IPR_ADDR	INTC_IPRA
-#define TMU0_IPR_POS	 3
-#define TMU0_PRIORITY	 2
-
-#define TIMER_IRQ       16
-#define TIMER_IPR_ADDR  INTC_IPRA
-#define TIMER_IPR_POS    3
-#define TIMER_PRIORITY   2
-
-/* TMU1 */
-#define TMU1_IRQ	17
-#define TMU1_IPR_ADDR	INTC_IPRA
-#define TMU1_IPR_POS	 2
-#define TMU1_PRIORITY	 2
-
-/* TMU2 */
-#define TMU2_IRQ	18
-#define TMU2_IPR_ADDR	INTC_IPRA
-#define TMU2_IPR_POS	 1
-#define TMU2_PRIORITY	 2
-
-/* LCDC */
-#define LCDC_IRQ	28
-#define LCDC_IPR_ADDR	INTC_IPRB
-#define LCDC_IPR_POS	 2
-#define LCDC_PRIORITY	 2
-
-/* VIO (Video I/O) */
-#define CEU_IRQ		52
-#define BEU_IRQ		53
-#define VEU_IRQ		54
-#define VOU_IRQ		55
-#define VIO_IPR_ADDR	INTC_IPRE
-#define VIO_IPR_POS	 2
-#define VIO_PRIORITY	 2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ		56
-#define MFI_IPR_ADDR	INTC_IPRE
-#define MFI_IPR_POS	 1
-#define MFI_PRIORITY	 2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ		60
-#define VPU_IPR_ADDR	INTC_IPRE
-#define VPU_IPR_POS	 0
-#define VPU_PRIORITY	 2
-
-/* 3DG */
-#define TDG_IRQ		63
-#define TDG_IPR_ADDR	INTC_IPRJ
-#define TDG_IPR_POS	 2
-#define TDG_PRIORITY	 2
-
-/* DMAC(1) */
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA1_IPR_ADDR	INTC_IPRE
-#define DMA1_IPR_POS	3
-#define DMA1_PRIORITY	7
-
-/* DMAC(2) */
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-/* SCIF0 */
-#define SCIF_ERI_IRQ	80
-#define SCIF_RXI_IRQ	81
-#define SCIF_BRI_IRQ	82
-#define SCIF_TXI_IRQ	83
-#define SCIF_IPR_ADDR	INTC_IPRG
-#define SCIF_IPR_POS	3
-#define SCIF_PRIORITY	3
-
-/* SIOF0 */
-#define SIOF0_IRQ	84
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	3
-#define SIOF0_PRIORITY	3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ	92
-#define FLTEND_IRQ	93
-#define FLTRQ0_IRQ	94
-#define FLTRQ1_IRQ	95
-#define FLCTL_IPR_ADDR	INTC_IPRH
-#define FLCTL_IPR_POS	1
-#define FLCTL_PRIORITY	3
-
-/* IIC(0) (IIC Bus Interface) */
-#define IIC0_ALI_IRQ	96
-#define IIC0_TACKI_IRQ	97
-#define IIC0_WAITI_IRQ	98
-#define IIC0_DTEI_IRQ	99
-#define IIC0_IPR_ADDR	INTC_IPRH
-#define IIC0_IPR_POS	0
-#define IIC0_PRIORITY	3
-
-/* IIC(1) (IIC Bus Interface) */
-#define IIC1_ALI_IRQ	44
-#define IIC1_TACKI_IRQ	45
-#define IIC1_WAITI_IRQ	46
-#define IIC1_DTEI_IRQ	47
-#define IIC1_IPR_ADDR	INTC_IPRG
-#define IIC1_IPR_POS	0
-#define IIC1_PRIORITY	3
-
-/* SIO0 */
-#define SIO0_IRQ	88
-#define SIO0_IPR_ADDR	INTC_IPRI
-#define SIO0_IPR_POS	3
-#define SIO0_PRIORITY	3
-
-/* SDHI */
-#define SDHI_SDHII0_IRQ	100
-#define SDHI_SDHII1_IRQ	101
-#define SDHI_SDHII2_IRQ	102
-#define SDHI_SDHII3_IRQ	103
-#define SDHI_IPR_ADDR	INTC_IPRK
-#define SDHI_IPR_POS	0
-#define SDHI_PRIORITY	3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ		108
-#define SIU_IPR_ADDR	INTC_IPRJ
-#define SIU_IPR_POS	1
-#define SIU_PRIORITY	3
-
-#define PORT_PACR	0xA4050100UL
-#define PORT_PBCR	0xA4050102UL
-#define PORT_PCCR	0xA4050104UL
-#define PORT_PDCR	0xA4050106UL
-#define PORT_PECR	0xA4050108UL
-#define PORT_PFCR	0xA405010AUL
-#define PORT_PGCR	0xA405010CUL
-#define PORT_PHCR	0xA405010EUL
-#define PORT_PJCR	0xA4050110UL
-#define PORT_PKCR	0xA4050112UL
-#define PORT_PLCR	0xA4050114UL
-#define PORT_SCPCR	0xA4050116UL
-#define PORT_PMCR	0xA4050118UL
-#define PORT_PNCR	0xA405011AUL
-#define PORT_PQCR	0xA405011CUL
-#define PORT_PRCR	0xA405011EUL
-#define PORT_PTCR	0xA405014CUL
-#define PORT_PUCR	0xA405014EUL
-#define PORT_PVCR	0xA4050150UL
-
-#define PORT_PSELA	0xA4050140UL
-#define PORT_PSELB	0xA4050142UL
-#define PORT_PSELC	0xA4050144UL
-#define PORT_PSELE	0xA4050158UL
-
-#define PORT_HIZCRA	0xA4050146UL
-#define PORT_HIZCRB	0xA4050148UL
-#define PORT_DRVCR	0xA405014AUL
-
-#define PORT_PADR  	0xA4050120UL
-#define PORT_PBDR  	0xA4050122UL
-#define PORT_PCDR  	0xA4050124UL
-#define PORT_PDDR  	0xA4050126UL
-#define PORT_PEDR  	0xA4050128UL
-#define PORT_PFDR  	0xA405012AUL
-#define PORT_PGDR  	0xA405012CUL
-#define PORT_PHDR  	0xA405012EUL
-#define PORT_PJDR  	0xA4050130UL
-#define PORT_PKDR  	0xA4050132UL
-#define PORT_PLDR  	0xA4050134UL
-#define PORT_SCPDR  	0xA4050136UL
-#define PORT_PMDR  	0xA4050138UL
-#define PORT_PNDR  	0xA405013AUL
-#define PORT_PQDR  	0xA405013CUL
-#define PORT_PRDR  	0xA405013EUL
-#define PORT_PTDR  	0xA405016CUL
-#define PORT_PUDR  	0xA405016EUL
-#define PORT_PVDR  	0xA4050170UL
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-#define IRQ6_IRQ	38
-#define IRQ7_IRQ	39
-
-#define INTPRI00	0xA4140010UL
-
-#define IRQ0_IPR_ADDR	INTPRI00
-#define IRQ1_IPR_ADDR	INTPRI00
-#define IRQ2_IPR_ADDR	INTPRI00
-#define IRQ3_IPR_ADDR	INTPRI00
-#define IRQ4_IPR_ADDR	INTPRI00
-#define IRQ5_IPR_ADDR	INTPRI00
-#define IRQ6_IPR_ADDR	INTPRI00
-#define IRQ7_IPR_ADDR	INTPRI00
-
-#define IRQ0_IPR_POS	7
-#define IRQ1_IPR_POS	6
-#define IRQ2_IPR_POS	5
-#define IRQ3_IPR_POS	4
-#define IRQ4_IPR_POS	3
-#define IRQ5_IPR_POS	2
-#define IRQ6_IPR_POS	1
-#define IRQ7_IPR_POS	0
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-#define IRQ6_PRIORITY	1
-#define IRQ7_PRIORITY	1
-
-#endif /* __ASM_SH_IRQ_SH73180_H */
diff --git a/include/asm-sh/irq-sh7343.h b/include/asm-sh/irq-sh7343.h
deleted file mode 100644
index 5d15419..0000000
--- a/include/asm-sh/irq-sh7343.h
+++ /dev/null
@@ -1,317 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH7343_H
-#define __ASM_SH_IRQ_SH7343_H
-
-/*
- * linux/include/asm-sh/irq-sh7343.h
- *
- * Copyright (C) 2006 Kenati Technologies Inc.
- * Andre Mccurdy <andre@kenati.com>
- * Ranjit Deshpande <ranjit@kenati.com>
- */
-
-#undef INTC_IPRA
-#undef INTC_IPRB
-#undef INTC_IPRC
-#undef INTC_IPRD
-
-#undef DMTE0_IRQ
-#undef DMTE1_IRQ
-#undef DMTE2_IRQ
-#undef DMTE3_IRQ
-#undef DMTE4_IRQ
-#undef DMTE5_IRQ
-#undef DMTE6_IRQ
-#undef DMTE7_IRQ
-#undef DMAE_IRQ
-#undef DMA_IPR_ADDR
-#undef DMA_IPR_POS
-#undef DMA_PRIORITY
-
-#undef INTC_IMCR0
-#undef INTC_IMCR1
-#undef INTC_IMCR2
-#undef INTC_IMCR3
-#undef INTC_IMCR4
-#undef INTC_IMCR5
-#undef INTC_IMCR6
-#undef INTC_IMCR7
-#undef INTC_IMCR8
-#undef INTC_IMCR9
-#undef INTC_IMCR10
-
-
-#define INTC_IPRA  	0xA4080000UL
-#define INTC_IPRB  	0xA4080004UL
-#define INTC_IPRC  	0xA4080008UL
-#define INTC_IPRD  	0xA408000CUL
-#define INTC_IPRE  	0xA4080010UL
-#define INTC_IPRF  	0xA4080014UL
-#define INTC_IPRG  	0xA4080018UL
-#define INTC_IPRH  	0xA408001CUL
-#define INTC_IPRI  	0xA4080020UL
-#define INTC_IPRJ  	0xA4080024UL
-#define INTC_IPRK  	0xA4080028UL
-#define INTC_IPRL  	0xA408002CUL
-
-#define INTC_IMR0	0xA4080080UL
-#define INTC_IMR1	0xA4080084UL
-#define INTC_IMR2	0xA4080088UL
-#define INTC_IMR3	0xA408008CUL
-#define INTC_IMR4	0xA4080090UL
-#define INTC_IMR5	0xA4080094UL
-#define INTC_IMR6	0xA4080098UL
-#define INTC_IMR7	0xA408009CUL
-#define INTC_IMR8	0xA40800A0UL
-#define INTC_IMR9	0xA40800A4UL
-#define INTC_IMR10	0xA40800A8UL
-#define INTC_IMR11	0xA40800ACUL
-
-#define INTC_IMCR0	0xA40800C0UL
-#define INTC_IMCR1	0xA40800C4UL
-#define INTC_IMCR2	0xA40800C8UL
-#define INTC_IMCR3	0xA40800CCUL
-#define INTC_IMCR4	0xA40800D0UL
-#define INTC_IMCR5	0xA40800D4UL
-#define INTC_IMCR6	0xA40800D8UL
-#define INTC_IMCR7	0xA40800DCUL
-#define INTC_IMCR8	0xA40800E0UL
-#define INTC_IMCR9	0xA40800E4UL
-#define INTC_IMCR10	0xA40800E8UL
-#define INTC_IMCR11	0xA40800ECUL
-
-#define INTC_ICR0	0xA4140000UL
-#define INTC_ICR1	0xA414001CUL
-
-#define INTMSK0		0xa4140044
-#define INTMSKCLR0	0xa4140064
-#define INTC_INTPRI0	0xa4140010
-
-/*
-  NOTE:
-
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-
-/* TMU0 */
-#define TMU0_IRQ	16
-#define TMU0_IPR_ADDR	INTC_IPRA
-#define TMU0_IPR_POS	 3
-#define TMU0_PRIORITY	 2
-
-#define TIMER_IRQ       16
-#define TIMER_IPR_ADDR  INTC_IPRA
-#define TIMER_IPR_POS    3
-#define TIMER_PRIORITY   2
-
-/* TMU1 */
-#define TMU1_IRQ	17
-#define TMU1_IPR_ADDR	INTC_IPRA
-#define TMU1_IPR_POS	 2
-#define TMU1_PRIORITY	 2
-
-/* TMU2 */
-#define TMU2_IRQ	18
-#define TMU2_IPR_ADDR	INTC_IPRA
-#define TMU2_IPR_POS	 1
-#define TMU2_PRIORITY	 2
-
-/* LCDC */
-#define LCDC_IRQ	28
-#define LCDC_IPR_ADDR	INTC_IPRB
-#define LCDC_IPR_POS	 2
-#define LCDC_PRIORITY	 2
-
-/* VIO (Video I/O) */
-#define CEU_IRQ		52
-#define BEU_IRQ		53
-#define VEU_IRQ		54
-#define VOU_IRQ		55
-#define VIO_IPR_ADDR	INTC_IPRE
-#define VIO_IPR_POS	 2
-#define VIO_PRIORITY	 2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ		56
-#define MFI_IPR_ADDR	INTC_IPRE
-#define MFI_IPR_POS	 1
-#define MFI_PRIORITY	 2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ		60
-#define VPU_IPR_ADDR	INTC_IPRE
-#define VPU_IPR_POS	 0
-#define VPU_PRIORITY	 2
-
-/* 3DG */
-#define TDG_IRQ		63
-#define TDG_IPR_ADDR	INTC_IPRJ
-#define TDG_IPR_POS	 2
-#define TDG_PRIORITY	 2
-
-/* DMAC(1) */
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA1_IPR_ADDR	INTC_IPRE
-#define DMA1_IPR_POS	3
-#define DMA1_PRIORITY	7
-
-/* DMAC(2) */
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-/* SCIF0 */
-#define SCIF_ERI_IRQ	80
-#define SCIF_RXI_IRQ	81
-#define SCIF_BRI_IRQ	82
-#define SCIF_TXI_IRQ	83
-#define SCIF_IPR_ADDR	INTC_IPRG
-#define SCIF_IPR_POS	3
-#define SCIF_PRIORITY	3
-
-/* SIOF0 */
-#define SIOF0_IRQ	84
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	3
-#define SIOF0_PRIORITY	3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ	92
-#define FLTEND_IRQ	93
-#define FLTRQ0_IRQ	94
-#define FLTRQ1_IRQ	95
-#define FLCTL_IPR_ADDR	INTC_IPRH
-#define FLCTL_IPR_POS	1
-#define FLCTL_PRIORITY	3
-
-/* IIC(0) (IIC Bus Interface) */
-#define IIC0_ALI_IRQ	96
-#define IIC0_TACKI_IRQ	97
-#define IIC0_WAITI_IRQ	98
-#define IIC0_DTEI_IRQ	99
-#define IIC0_IPR_ADDR	INTC_IPRH
-#define IIC0_IPR_POS	0
-#define IIC0_PRIORITY	3
-
-/* IIC(1) (IIC Bus Interface) */
-#define IIC1_ALI_IRQ	44
-#define IIC1_TACKI_IRQ	45
-#define IIC1_WAITI_IRQ	46
-#define IIC1_DTEI_IRQ	47
-#define IIC1_IPR_ADDR	INTC_IPRI
-#define IIC1_IPR_POS	0
-#define IIC1_PRIORITY	3
-
-/* SIO0 */
-#define SIO0_IRQ	88
-#define SIO0_IPR_ADDR	INTC_IPRI
-#define SIO0_IPR_POS	3
-#define SIO0_PRIORITY	3
-
-/* SDHI */
-#define SDHI_SDHII0_IRQ	100
-#define SDHI_SDHII1_IRQ	101
-#define SDHI_SDHII2_IRQ	102
-#define SDHI_SDHII3_IRQ	103
-#define SDHI_IPR_ADDR	INTC_IPRK
-#define SDHI_IPR_POS	0
-#define SDHI_PRIORITY	3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ		108
-#define SIU_IPR_ADDR	INTC_IPRJ
-#define SIU_IPR_POS	1
-#define SIU_PRIORITY	3
-
-#define PORT_PACR	0xA4050100UL
-#define PORT_PBCR	0xA4050102UL
-#define PORT_PCCR	0xA4050104UL
-#define PORT_PDCR	0xA4050106UL
-#define PORT_PECR	0xA4050108UL
-#define PORT_PFCR	0xA405010AUL
-#define PORT_PGCR	0xA405010CUL
-#define PORT_PHCR	0xA405010EUL
-#define PORT_PJCR	0xA4050110UL
-#define PORT_PKCR	0xA4050112UL
-#define PORT_PLCR	0xA4050114UL
-#define PORT_SCPCR	0xA4050116UL
-#define PORT_PMCR	0xA4050118UL
-#define PORT_PNCR	0xA405011AUL
-#define PORT_PQCR	0xA405011CUL
-#define PORT_PRCR	0xA405011EUL
-#define PORT_PTCR	0xA405014CUL
-#define PORT_PUCR	0xA405014EUL
-#define PORT_PVCR	0xA4050150UL
-
-#define PORT_PSELA	0xA4050140UL
-#define PORT_PSELB	0xA4050142UL
-#define PORT_PSELC	0xA4050144UL
-#define PORT_PSELE	0xA4050158UL
-
-#define PORT_HIZCRA	0xA4050146UL
-#define PORT_HIZCRB	0xA4050148UL
-#define PORT_DRVCR	0xA405014AUL
-
-#define PORT_PADR  	0xA4050120UL
-#define PORT_PBDR  	0xA4050122UL
-#define PORT_PCDR  	0xA4050124UL
-#define PORT_PDDR  	0xA4050126UL
-#define PORT_PEDR  	0xA4050128UL
-#define PORT_PFDR  	0xA405012AUL
-#define PORT_PGDR  	0xA405012CUL
-#define PORT_PHDR  	0xA405012EUL
-#define PORT_PJDR  	0xA4050130UL
-#define PORT_PKDR  	0xA4050132UL
-#define PORT_PLDR  	0xA4050134UL
-#define PORT_SCPDR  	0xA4050136UL
-#define PORT_PMDR  	0xA4050138UL
-#define PORT_PNDR  	0xA405013AUL
-#define PORT_PQDR  	0xA405013CUL
-#define PORT_PRDR  	0xA405013EUL
-#define PORT_PTDR  	0xA405016CUL
-#define PORT_PUDR  	0xA405016EUL
-#define PORT_PVDR  	0xA4050170UL
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-#define IRQ6_IRQ	38
-#define IRQ7_IRQ	39
-
-#define INTPRI00	0xA4140010UL
-
-#define IRQ0_IPR_ADDR	INTPRI00
-#define IRQ1_IPR_ADDR	INTPRI00
-#define IRQ2_IPR_ADDR	INTPRI00
-#define IRQ3_IPR_ADDR	INTPRI00
-#define IRQ4_IPR_ADDR	INTPRI00
-#define IRQ5_IPR_ADDR	INTPRI00
-#define IRQ6_IPR_ADDR	INTPRI00
-#define IRQ7_IPR_ADDR	INTPRI00
-
-#define IRQ0_IPR_POS	7
-#define IRQ1_IPR_POS	6
-#define IRQ2_IPR_POS	5
-#define IRQ3_IPR_POS	4
-#define IRQ4_IPR_POS	3
-#define IRQ5_IPR_POS	2
-#define IRQ6_IPR_POS	1
-#define IRQ7_IPR_POS	0
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-#define IRQ6_PRIORITY	1
-#define IRQ7_PRIORITY	1
-
-#endif /* __ASM_SH_IRQ_SH7343_H */
diff --git a/include/asm-sh/irq-sh7780.h b/include/asm-sh/irq-sh7780.h
deleted file mode 100644
index 19912ae..0000000
--- a/include/asm-sh/irq-sh7780.h
+++ /dev/null
@@ -1,311 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH7780_H
-#define __ASM_SH_IRQ_SH7780_H
-
-/*
- * linux/include/asm-sh/irq-sh7780.h
- *
- * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp>
- */
-#define INTC_BASE	0xffd00000
-#define INTC_ICR0	(INTC_BASE+0x0)
-#define INTC_ICR1	(INTC_BASE+0x1c)
-#define INTC_INTPRI	(INTC_BASE+0x10)
-#define INTC_INTREQ	(INTC_BASE+0x24)
-#define INTC_INTMSK0	(INTC_BASE+0x44)
-#define INTC_INTMSK1	(INTC_BASE+0x48)
-#define INTC_INTMSK2	(INTC_BASE+0x40080)
-#define INTC_INTMSKCLR0	(INTC_BASE+0x64)
-#define INTC_INTMSKCLR1	(INTC_BASE+0x68)
-#define INTC_INTMSKCLR2	(INTC_BASE+0x40084)
-#define INTC_NMIFCR	(INTC_BASE+0xc0)
-#define INTC_USERIMASK	(INTC_BASE+0x30000)
-
-#define	INTC_INT2PRI0	(INTC_BASE+0x40000)
-#define	INTC_INT2PRI1	(INTC_BASE+0x40004)
-#define	INTC_INT2PRI2	(INTC_BASE+0x40008)
-#define	INTC_INT2PRI3	(INTC_BASE+0x4000c)
-#define	INTC_INT2PRI4	(INTC_BASE+0x40010)
-#define	INTC_INT2PRI5	(INTC_BASE+0x40014)
-#define	INTC_INT2PRI6	(INTC_BASE+0x40018)
-#define	INTC_INT2PRI7	(INTC_BASE+0x4001c)
-#define	INTC_INT2A0	(INTC_BASE+0x40030)
-#define	INTC_INT2A1	(INTC_BASE+0x40034)
-#define	INTC_INT2MSKR	(INTC_BASE+0x40038)
-#define	INTC_INT2MSKCR	(INTC_BASE+0x4003c)
-#define	INTC_INT2B0	(INTC_BASE+0x40040)
-#define	INTC_INT2B1	(INTC_BASE+0x40044)
-#define	INTC_INT2B2	(INTC_BASE+0x40048)
-#define	INTC_INT2B3	(INTC_BASE+0x4004c)
-#define	INTC_INT2B4	(INTC_BASE+0x40050)
-#define	INTC_INT2B5	(INTC_BASE+0x40054)
-#define	INTC_INT2B6	(INTC_BASE+0x40058)
-#define	INTC_INT2B7	(INTC_BASE+0x4005c)
-#define	INTC_INT2GPIC	(INTC_BASE+0x40090)
-/*
-  NOTE:
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-/* IRQ 0-7 line external int*/
-#define IRQ0_IRQ	2
-#define IRQ0_IPR_ADDR	INTC_INTPRI
-#define IRQ0_IPR_POS	7
-#define IRQ0_PRIORITY	2
-
-#define IRQ1_IRQ	4
-#define IRQ1_IPR_ADDR	INTC_INTPRI
-#define IRQ1_IPR_POS	6
-#define IRQ1_PRIORITY	2
-
-#define IRQ2_IRQ	6
-#define IRQ2_IPR_ADDR	INTC_INTPRI
-#define IRQ2_IPR_POS	5
-#define IRQ2_PRIORITY	2
-
-#define IRQ3_IRQ	8
-#define IRQ3_IPR_ADDR	INTC_INTPRI
-#define IRQ3_IPR_POS	4
-#define IRQ3_PRIORITY	2
-
-#define IRQ4_IRQ	10
-#define IRQ4_IPR_ADDR	INTC_INTPRI
-#define IRQ4_IPR_POS	3
-#define IRQ4_PRIORITY	2
-
-#define IRQ5_IRQ	12
-#define IRQ5_IPR_ADDR	INTC_INTPRI
-#define IRQ5_IPR_POS	2
-#define IRQ5_PRIORITY	2
-
-#define IRQ6_IRQ	14
-#define IRQ6_IPR_ADDR	INTC_INTPRI
-#define IRQ6_IPR_POS	1
-#define IRQ6_PRIORITY	2
-
-#define IRQ7_IRQ	0
-#define IRQ7_IPR_ADDR	INTC_INTPRI
-#define IRQ7_IPR_POS	0
-#define IRQ7_PRIORITY	2
-
-/* TMU */
-/* ch0 */
-#define TMU_IRQ		28
-#define	TMU_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_IPR_POS	3
-#define TMU_PRIORITY	2
-
-#define TIMER_IRQ	28
-#define	TIMER_IPR_ADDR	INTC_INT2PRI0
-#define	TIMER_IPR_POS	3
-#define TIMER_PRIORITY	2
-
-/* ch 1*/
-#define TMU_CH1_IRQ		29
-#define	TMU_CH1_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_CH1_IPR_POS		2
-#define TMU_CH1_PRIORITY	2
-
-#define TIMER1_IRQ	29
-#define	TIMER1_IPR_ADDR	INTC_INT2PRI0
-#define	TIMER1_IPR_POS	2
-#define TIMER1_PRIORITY	2
-
-/* ch 2*/
-#define TMU_CH2_IRQ		30
-#define	TMU_CH2_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_CH2_IPR_POS		1
-#define TMU_CH2_PRIORITY	2
-/* ch 2 Input capture */
-#define TMU_CH2IC_IRQ		31
-#define	TMU_CH2IC_IPR_ADDR	INTC_INT2PRI0
-#define	TMU_CH2IC_IPR_POS	0
-#define TMU_CH2IC_PRIORITY	2
-/* ch 3 */
-#define TMU_CH3_IRQ		96
-#define	TMU_CH3_IPR_ADDR	INTC_INT2PRI1
-#define	TMU_CH3_IPR_POS		3
-#define TMU_CH3_PRIORITY	2
-/* ch 4 */
-#define TMU_CH4_IRQ		97
-#define	TMU_CH4_IPR_ADDR	INTC_INT2PRI1
-#define	TMU_CH4_IPR_POS		2
-#define TMU_CH4_PRIORITY	2
-/* ch 5*/
-#define TMU_CH5_IRQ		98
-#define	TMU_CH5_IPR_ADDR	INTC_INT2PRI1
-#define	TMU_CH5_IPR_POS		1
-#define TMU_CH5_PRIORITY	2
-
-/* SCIF0 */
-#define SCIF0_ERI_IRQ	40
-#define SCIF0_RXI_IRQ	41
-#define SCIF0_BRI_IRQ	42
-#define SCIF0_TXI_IRQ	43
-#define	SCIF0_IPR_ADDR	INTC_INT2PRI2
-#define	SCIF0_IPR_POS	3
-#define SCIF0_PRIORITY	3
-
-/* SCIF1 */
-#define SCIF1_ERI_IRQ	76
-#define SCIF1_RXI_IRQ	77
-#define SCIF1_BRI_IRQ	78
-#define SCIF1_TXI_IRQ	79
-#define	SCIF1_IPR_ADDR	INTC_INT2PRI2
-#define	SCIF1_IPR_POS	2
-#define SCIF1_PRIORITY	3
-
-#define	WDT_IRQ		27
-#define	WDT_IPR_ADDR	INTC_INT2PRI2
-#define	WDT_IPR_POS	1
-#define	WDT_PRIORITY	2
-
-/* DMAC(0) */
-#define	DMINT0_IRQ	34
-#define	DMINT1_IRQ	35
-#define	DMINT2_IRQ	36
-#define	DMINT3_IRQ	37
-#define	DMINT4_IRQ	44
-#define	DMINT5_IRQ	45
-#define	DMINT6_IRQ	46
-#define	DMINT7_IRQ	47
-#define	DMAE_IRQ	38
-#define	DMA0_IPR_ADDR	INTC_INT2PRI3
-#define	DMA0_IPR_POS	2
-#define	DMA0_PRIORITY	7
-
-/* DMAC(1) */
-#define	DMINT8_IRQ	92
-#define	DMINT9_IRQ	93
-#define	DMINT10_IRQ	94
-#define	DMINT11_IRQ	95
-#define	DMA1_IPR_ADDR	INTC_INT2PRI3
-#define	DMA1_IPR_POS	1
-#define	DMA1_PRIORITY	7
-
-#define	DMTE0_IRQ	DMINT0_IRQ
-#define	DMTE4_IRQ	DMINT4_IRQ
-#define	DMA_IPR_ADDR	DMA0_IPR_ADDR
-#define	DMA_IPR_POS	DMA0_IPR_POS
-#define	DMA_PRIORITY	DMA0_PRIORITY
-
-/* CMT */
-#define	CMT_IRQ		56
-#define	CMT_IPR_ADDR	INTC_INT2PRI4
-#define	CMT_IPR_POS	3
-#define	CMT_PRIORITY	0
-
-/* HAC */
-#define	HAC_IRQ		60
-#define	HAC_IPR_ADDR	INTC_INT2PRI4
-#define	HAC_IPR_POS	2
-#define	CMT_PRIORITY	0
-
-/* PCIC(0) */
-#define	PCIC0_IRQ	64
-#define	PCIC0_IPR_ADDR	INTC_INT2PRI4
-#define	PCIC0_IPR_POS	1
-#define	PCIC0_PRIORITY	2
-
-/* PCIC(1) */
-#define	PCIC1_IRQ	65
-#define	PCIC1_IPR_ADDR	INTC_INT2PRI4
-#define	PCIC1_IPR_POS	0
-#define	PCIC1_PRIORITY	2
-
-/* PCIC(2) */
-#define	PCIC2_IRQ	66
-#define	PCIC2_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC2_IPR_POS	3
-#define	PCIC2_PRIORITY	2
-
-/* PCIC(3) */
-#define	PCIC3_IRQ	67
-#define	PCIC3_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC3_IPR_POS	2
-#define	PCIC3_PRIORITY	2
-
-/* PCIC(4) */
-#define	PCIC4_IRQ	68
-#define	PCIC4_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC4_IPR_POS	1
-#define	PCIC4_PRIORITY	2
-
-/* PCIC(5) */
-#define	PCICERR_IRQ	69
-#define	PCICPWD3_IRQ	70
-#define	PCICPWD2_IRQ	71
-#define	PCICPWD1_IRQ	72
-#define	PCICPWD0_IRQ	73
-#define	PCIC5_IPR_ADDR	INTC_INT2PRI5
-#define	PCIC5_IPR_POS	0
-#define	PCIC5_PRIORITY	2
-
-/* SIOF */
-#define	SIOF_IRQ	80
-#define	SIOF_IPR_ADDR	INTC_INT2PRI6
-#define	SIOF_IPR_POS	3
-#define	SIOF_PRIORITY	3
-
-/* HSPI */
-#define	HSPI_IRQ	84
-#define	HSPI_IPR_ADDR	INTC_INT2PRI6
-#define	HSPI_IPR_POS	2
-#define	HSPI_PRIORITY	3
-
-/* MMCIF */
-#define	MMCIF_FSTAT_IRQ	88
-#define	MMCIF_TRAN_IRQ	89
-#define	MMCIF_ERR_IRQ	90
-#define	MMCIF_FRDY_IRQ	91
-#define	MMCIF_IPR_ADDR	INTC_INT2PRI6
-#define	MMCIF_IPR_POS	1
-#define	HSPI_PRIORITY	3
-
-/* SSI */
-#define	SSI_IRQ		100
-#define	SSI_IPR_ADDR	INTC_INT2PRI6
-#define	SSI_IPR_POS	0
-#define	SSI_PRIORITY	3
-
-/* FLCTL */
-#define	FLCTL_FLSTE_IRQ		104
-#define	FLCTL_FLTEND_IRQ	105
-#define	FLCTL_FLTRQ0_IRQ	106
-#define	FLCTL_FLTRQ1_IRQ	107
-#define	FLCTL_IPR_ADDR		INTC_INT2PRI7
-#define	FLCTL_IPR_POS		3
-#define	FLCTL_PRIORITY		3
-
-/* GPIO */
-#define	GPIO0_IRQ	108
-#define	GPIO1_IRQ	109
-#define	GPIO2_IRQ	110
-#define	GPIO3_IRQ	111
-#define	GPIO_IPR_ADDR	INTC_INT2PRI7
-#define	GPIO_IPR_POS	2
-#define	GPIO_PRIORITY	3
-
-#define	INTC_TMU0_MSK	0
-#define	INTC_TMU3_MSK	1
-#define	INTC_RTC_MSK	2
-#define	INTC_SCIF0_MSK	3
-#define	INTC_SCIF1_MSK	4
-#define	INTC_WDT_MSK	5
-#define	INTC_HUID_MSK	7
-#define	INTC_DMAC0_MSK	8
-#define	INTC_DMAC1_MSK	9
-#define	INTC_CMT_MSK	12
-#define	INTC_HAC_MSK	13
-#define	INTC_PCIC0_MSK	14
-#define	INTC_PCIC1_MSK	15
-#define	INTC_PCIC2_MSK	16
-#define	INTC_PCIC3_MSK	17
-#define	INTC_PCIC4_MSK	18
-#define	INTC_PCIC5_MSK	19
-#define	INTC_SIOF_MSK	20
-#define	INTC_HSPI_MSK	21
-#define	INTC_MMCIF_MSK	22
-#define	INTC_SSI_MSK	23
-#define	INTC_FLCTL_MSK	24
-#define	INTC_GPIO_MSK	25
-
-#endif /* __ASM_SH_IRQ_SH7780_H */
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index 6cd3e9e..fd57608 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -1,233 +1,9 @@
 #ifndef __ASM_SH_IRQ_H
 #define __ASM_SH_IRQ_H
 
-/*
- *
- * linux/include/asm-sh/irq.h
- *
- * Copyright (C) 1999  Niibe Yutaka & Takeshi Yaegashi
- * Copyright (C) 2000  Kazumoto Kojima
- * Copyright (C) 2003  Paul Mundt
- *
- */
-
 #include <asm/machvec.h>
 #include <asm/ptrace.h>		/* for pt_regs */
 
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
-
-#define INTC_DMAC0_MSK	0
-
-#if defined(CONFIG_CPU_SH3)
-#define INTC_IPRA	0xfffffee2UL
-#define INTC_IPRB	0xfffffee4UL
-#elif defined(CONFIG_CPU_SH4)
-#define INTC_IPRA	0xffd00004UL
-#define INTC_IPRB	0xffd00008UL
-#define INTC_IPRC	0xffd0000cUL
-#define INTC_IPRD	0xffd00010UL
-#endif
-
-#define TIMER_IRQ	16
-#define TIMER_IPR_ADDR	INTC_IPRA
-#define TIMER_IPR_POS	 3
-#define TIMER_PRIORITY	 2
-
-#define TIMER1_IRQ	17
-#define TIMER1_IPR_ADDR	INTC_IPRA
-#define TIMER1_IPR_POS	 2
-#define TIMER1_PRIORITY	 4
-
-#define RTC_IRQ		22
-#define RTC_IPR_ADDR	INTC_IPRA
-#define RTC_IPR_POS	 0
-#define RTC_PRIORITY	TIMER_PRIORITY
-
-#if defined(CONFIG_CPU_SH3)
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA_IPR_ADDR	INTC_IPRE
-#define DMA_IPR_POS	3
-#define DMA_PRIORITY	7
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-/* TMU2 */
-#define TIMER2_IRQ      18
-#define TIMER2_IPR_ADDR INTC_IPRA
-#define TIMER2_IPR_POS   1
-#define TIMER2_PRIORITY  2
-
-/* WDT */
-#define WDT_IRQ		27
-#define WDT_IPR_ADDR	INTC_IPRB
-#define WDT_IPR_POS	 3
-#define WDT_PRIORITY	 2
-
-/* SIM (SIM Card Module) */
-#define SIM_ERI_IRQ	23
-#define SIM_RXI_IRQ	24
-#define SIM_TXI_IRQ	25
-#define SIM_TEND_IRQ	26
-#define SIM_IPR_ADDR	INTC_IPRB
-#define SIM_IPR_POS	 1
-#define SIM_PRIORITY	 2
-
-/* VIO (Video I/O) */
-#define VIO_IRQ		52
-#define VIO_IPR_ADDR	INTC_IPRE
-#define VIO_IPR_POS	 2
-#define VIO_PRIORITY	 2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ		56
-#define MFI_IPR_ADDR	INTC_IPRE
-#define MFI_IPR_POS	 1
-#define MFI_PRIORITY	 2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ		60
-#define VPU_IPR_ADDR	INTC_IPRE
-#define VPU_IPR_POS	 0
-#define VPU_PRIORITY	 2
-
-/* KEY (Key Scan Interface) */
-#define KEY_IRQ		79
-#define KEY_IPR_ADDR	INTC_IPRF
-#define KEY_IPR_POS	 3
-#define KEY_PRIORITY	 2
-
-/* CMT (Compare Match Timer) */
-#define CMT_IRQ		104
-#define CMT_IPR_ADDR	INTC_IPRF
-#define CMT_IPR_POS	 0
-#define CMT_PRIORITY	 2
-
-/* DMAC(1) */
-#define DMTE0_IRQ	48
-#define DMTE1_IRQ	49
-#define DMTE2_IRQ	50
-#define DMTE3_IRQ	51
-#define DMA1_IPR_ADDR	INTC_IPRE
-#define DMA1_IPR_POS	3
-#define DMA1_PRIORITY	7
-
-/* DMAC(2) */
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-/* SIOF0 */
-#define SIOF0_IRQ	84
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	3
-#define SIOF0_PRIORITY	3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ	92
-#define FLTEND_IRQ	93
-#define FLTRQ0_IRQ	94
-#define FLTRQ1_IRQ	95
-#define FLCTL_IPR_ADDR	INTC_IPRH
-#define FLCTL_IPR_POS	1
-#define FLCTL_PRIORITY	3
-
-/* IIC (IIC Bus Interface) */
-#define IIC_ALI_IRQ	96
-#define IIC_TACKI_IRQ	97
-#define IIC_WAITI_IRQ	98
-#define IIC_DTEI_IRQ	99
-#define IIC_IPR_ADDR	INTC_IPRH
-#define IIC_IPR_POS	0
-#define IIC_PRIORITY	3
-
-/* SIO0 */
-#define SIO0_IRQ	88
-#define SIO0_IPR_ADDR	INTC_IPRI
-#define SIO0_IPR_POS	3
-#define SIO0_PRIORITY	3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ		108
-#define SIU_IPR_ADDR	INTC_IPRJ
-#define SIU_IPR_POS	1
-#define SIU_PRIORITY	3
-
-#endif
-#elif defined(CONFIG_CPU_SH4)
-#define DMTE0_IRQ	34
-#define DMTE1_IRQ	35
-#define DMTE2_IRQ	36
-#define DMTE3_IRQ	37
-#define DMTE4_IRQ	44	/* 7751R only */
-#define DMTE5_IRQ	45	/* 7751R only */
-#define DMTE6_IRQ	46	/* 7751R only */
-#define DMTE7_IRQ	47	/* 7751R only */
-#define DMAE_IRQ	38
-#define DMA_IPR_ADDR	INTC_IPRC
-#define DMA_IPR_POS	2
-#define DMA_PRIORITY	7
-#endif
-
-#if defined (CONFIG_CPU_SUBTYPE_SH7707) || defined (CONFIG_CPU_SUBTYPE_SH7708) || \
-    defined (CONFIG_CPU_SUBTYPE_SH7709) || defined (CONFIG_CPU_SUBTYPE_SH7750) || \
-    defined (CONFIG_CPU_SUBTYPE_SH7751) || defined (CONFIG_CPU_SUBTYPE_SH7706)
-#define SCI_ERI_IRQ	23
-#define SCI_RXI_IRQ	24
-#define SCI_TXI_IRQ	25
-#define SCI_IPR_ADDR	INTC_IPRB
-#define SCI_IPR_POS	1
-#define SCI_PRIORITY	3
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-#define SCIF0_IRQ	80
-#define SCIF0_IPR_ADDR	INTC_IPRG
-#define SCIF0_IPR_POS	3
-#define SCIF0_PRIORITY	3
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7709)
-#define SCIF_ERI_IRQ	56
-#define SCIF_RXI_IRQ	57
-#define SCIF_BRI_IRQ	58
-#define SCIF_TXI_IRQ	59
-#define SCIF_IPR_ADDR	INTC_IPRE
-#define SCIF_IPR_POS	1
-#define SCIF_PRIORITY	3
-
-#define IRDA_ERI_IRQ	52
-#define IRDA_RXI_IRQ	53
-#define IRDA_BRI_IRQ	54
-#define IRDA_TXI_IRQ	55
-#define IRDA_IPR_ADDR	INTC_IPRE
-#define IRDA_IPR_POS	2
-#define IRDA_PRIORITY	3
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \
-      defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
-#define SCIF_ERI_IRQ	40
-#define SCIF_RXI_IRQ	41
-#define SCIF_BRI_IRQ	42
-#define SCIF_TXI_IRQ	43
-#define SCIF_IPR_ADDR	INTC_IPRC
-#define SCIF_IPR_POS	1
-#define SCIF_PRIORITY	3
-#if defined(CONFIG_CPU_SUBTYPE_ST40STB1)
-#define SCIF1_ERI_IRQ	23
-#define SCIF1_RXI_IRQ	24
-#define SCIF1_BRI_IRQ	25
-#define SCIF1_TXI_IRQ	26
-#define SCIF1_IPR_ADDR	INTC_IPRB
-#define SCIF1_IPR_POS	1
-#define SCIF1_PRIORITY	3
-#endif /* ST40STB1 */
-
-#endif /* 775x / SH4-202 / ST40STB1 */
-#endif /* 7780 */
-
 /* NR_IRQS is made from three components:
  *   1. ONCHIP_NR_IRQS - number of IRLS + on-chip peripherial modules
  *   2. PINT_NR_IRQS   - number of PINT interrupts
@@ -265,6 +41,10 @@
 # define ONCHIP_NR_IRQS 109
 #elif defined(CONFIG_CPU_SUBTYPE_SH7780)
 # define ONCHIP_NR_IRQS 111
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+# define ONCHIP_NR_IRQS 256
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+# define ONCHIP_NR_IRQS 128
 #elif defined(CONFIG_SH_UNKNOWN)	/* Most be last */
 # define ONCHIP_NR_IRQS 144
 #endif
@@ -312,9 +92,11 @@
 /* NR_IRQS. 1+2+3 */
 #define NR_IRQS (ONCHIP_NR_IRQS + PINT_NR_IRQS + OFFCHIP_NR_IRQS)
 
-extern void disable_irq(unsigned int);
-extern void disable_irq_nosync(unsigned int);
-extern void enable_irq(unsigned int);
+/*
+ * Convert back and forth between INTEVT and IRQ values.
+ */
+#define evt2irq(evt)		(((evt) >> 5) - 16)
+#define irq2evt(irq)		(((irq) + 16) << 5)
 
 /*
  * Simple Mask Register Support
@@ -327,362 +109,36 @@
  */
 void init_IRQ_pint(void);
 
+/*
+ * The shift value is now the number of bits to shift, not the number of
+ * bits/4. This is to make it easier to read the value directly from the
+ * datasheets. The IPR address, addr, will be set from ipr_idx via the
+ * map_ipridx_to_addr function.
+ */
 struct ipr_data {
 	unsigned int irq;
-	unsigned int addr;	/* Address of Interrupt Priority Register */
-	int shift;		/* Shifts of the 16-bit data */
+	int ipr_idx;		/* Index for the IPR registered */
+	int shift;		/* Number of bits to shift the data */
 	int priority;		/* The priority */
+	unsigned int addr;	/* Address of Interrupt Priority Register */
 };
 
 /*
+ * Given an IPR IDX, map the value to an IPR register address.
+ */
+unsigned int map_ipridx_to_addr(int idx);
+
+/*
+ * Enable individual interrupt mode for external IPR IRQs.
+ */
+void ipr_irq_enable_irlm(void);
+
+/*
  * Function for "on chip support modules".
  */
-extern void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs);
-extern void make_imask_irq(unsigned int irq);
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-#undef INTC_IPRA
-#undef INTC_IPRB
-#define INTC_IPRA  	0xA414FEE2UL
-#define INTC_IPRB  	0xA414FEE4UL
-#define INTC_IPRC  	0xA4140016UL
-#define INTC_IPRD  	0xA4140018UL
-#define INTC_IPRE  	0xA414001AUL
-#define INTC_IPRF  	0xA4080000UL
-#define INTC_IPRG  	0xA4080002UL
-#define INTC_IPRH  	0xA4080004UL
-#define INTC_IPRI  	0xA4080006UL
-#define INTC_IPRJ  	0xA4080008UL
-
-#define INTC_IMR0	0xA4080040UL
-#define INTC_IMR1	0xA4080042UL
-#define INTC_IMR2	0xA4080044UL
-#define INTC_IMR3	0xA4080046UL
-#define INTC_IMR4	0xA4080048UL
-#define INTC_IMR5	0xA408004AUL
-#define INTC_IMR6	0xA408004CUL
-#define INTC_IMR7	0xA408004EUL
-#define INTC_IMR8	0xA4080050UL
-#define INTC_IMR9	0xA4080052UL
-#define INTC_IMR10	0xA4080054UL
-
-#define INTC_IMCR0	0xA4080060UL
-#define INTC_IMCR1	0xA4080062UL
-#define INTC_IMCR2	0xA4080064UL
-#define INTC_IMCR3	0xA4080066UL
-#define INTC_IMCR4	0xA4080068UL
-#define INTC_IMCR5	0xA408006AUL
-#define INTC_IMCR6	0xA408006CUL
-#define INTC_IMCR7	0xA408006EUL
-#define INTC_IMCR8	0xA4080070UL
-#define INTC_IMCR9	0xA4080072UL
-#define INTC_IMCR10	0xA4080074UL
-
-#define INTC_ICR0	0xA414FEE0UL
-#define INTC_ICR1	0xA4140010UL
-
-#define INTC_IRR0	0xA4140004UL
-
-#define PORT_PACR	0xA4050100UL
-#define PORT_PBCR	0xA4050102UL
-#define PORT_PCCR	0xA4050104UL
-#define PORT_PDCR	0xA4050106UL
-#define PORT_PECR	0xA4050108UL
-#define PORT_PFCR	0xA405010AUL
-#define PORT_PGCR	0xA405010CUL
-#define PORT_PHCR	0xA405010EUL
-#define PORT_PJCR	0xA4050110UL
-#define PORT_PKCR	0xA4050112UL
-#define PORT_PLCR	0xA4050114UL
-#define PORT_SCPCR	0xA4050116UL
-#define PORT_PMCR	0xA4050118UL
-#define PORT_PNCR	0xA405011AUL
-#define PORT_PQCR	0xA405011CUL
-
-#define PORT_PSELA	0xA4050140UL
-#define PORT_PSELB	0xA4050142UL
-#define PORT_PSELC	0xA4050144UL
-
-#define PORT_HIZCRA	0xA4050146UL
-#define PORT_HIZCRB	0xA4050148UL
-#define PORT_DRVCR	0xA4050150UL
-
-#define PORT_PADR  	0xA4050120UL
-#define PORT_PBDR  	0xA4050122UL
-#define PORT_PCDR  	0xA4050124UL
-#define PORT_PDDR  	0xA4050126UL
-#define PORT_PEDR  	0xA4050128UL
-#define PORT_PFDR  	0xA405012AUL
-#define PORT_PGDR  	0xA405012CUL
-#define PORT_PHDR  	0xA405012EUL
-#define PORT_PJDR  	0xA4050130UL
-#define PORT_PKDR  	0xA4050132UL
-#define PORT_PLDR  	0xA4050134UL
-#define PORT_SCPDR  	0xA4050136UL
-#define PORT_PMDR  	0xA4050138UL
-#define PORT_PNDR  	0xA405013AUL
-#define PORT_PQDR  	0xA405013CUL
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-
-#define IRQ0_IPR_ADDR	INTC_IPRC
-#define IRQ1_IPR_ADDR	INTC_IPRC
-#define IRQ2_IPR_ADDR	INTC_IPRC
-#define IRQ3_IPR_ADDR	INTC_IPRC
-#define IRQ4_IPR_ADDR	INTC_IPRD
-#define IRQ5_IPR_ADDR	INTC_IPRD
-
-#define IRQ0_IPR_POS	0
-#define IRQ1_IPR_POS	1
-#define IRQ2_IPR_POS	2
-#define IRQ3_IPR_POS	3
-#define IRQ4_IPR_POS	0
-#define IRQ5_IPR_POS	1
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-
-extern int ipr_irq_demux(int irq);
-#define __irq_demux(irq) ipr_irq_demux(irq)
-
-#elif defined(CONFIG_CPU_SUBTYPE_SH7604)
-#define INTC_IPRA	0xfffffee2UL
-#define INTC_IPRB	0xfffffe60UL
-
-#define INTC_VCRA	0xfffffe62UL
-#define INTC_VCRB	0xfffffe64UL
-#define INTC_VCRC	0xfffffe66UL
-#define INTC_VCRD	0xfffffe68UL
-
-#define INTC_VCRWDT	0xfffffee4UL
-#define INTC_VCRDIV	0xffffff0cUL
-#define INTC_VCRDMA0	0xffffffa0UL
-#define INTC_VCRDMA1	0xffffffa8UL
-
-#define INTC_ICR	0xfffffee0UL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7710)
-#define INTC_IRR0	0xa4000004UL
-#define INTC_IRR1	0xa4000006UL
-#define INTC_IRR2	0xa4000008UL
-
-#define INTC_ICR0	0xfffffee0UL
-#define INTC_ICR1	0xa4000010UL
-#define INTC_ICR2	0xa4000012UL
-#define INTC_INTER	0xa4000014UL
-
-#define INTC_IPRC	0xa4000016UL
-#define INTC_IPRD	0xa4000018UL
-#define INTC_IPRE	0xa400001aUL
-#if defined(CONFIG_CPU_SUBTYPE_SH7707)
-#define INTC_IPRF	0xa400001cUL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
-#define INTC_IPRF	0xa4080000UL
-#define INTC_IPRG	0xa4080002UL
-#define INTC_IPRH	0xa4080004UL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710)
-/* Interrupt Controller Registers */
-#undef INTC_IPRA
-#undef INTC_IPRB
-#define INTC_IPRA  	0xA414FEE2UL
-#define INTC_IPRB  	0xA414FEE4UL
-#define INTC_IPRF  	0xA4080000UL
-#define INTC_IPRG  	0xA4080002UL
-#define INTC_IPRH  	0xA4080004UL
-#define INTC_IPRI  	0xA4080006UL
-
-#undef INTC_ICR0
-#undef INTC_ICR1
-#define INTC_ICR0	0xA414FEE0UL
-#define INTC_ICR1	0xA4140010UL
-
-#define INTC_IRR0	0xa4000004UL
-#define INTC_IRR1	0xa4000006UL
-#define INTC_IRR2	0xa4000008UL
-#define INTC_IRR3	0xa400000AUL
-#define INTC_IRR4	0xa400000CUL
-#define INTC_IRR5	0xa4080020UL
-#define INTC_IRR7	0xa4080024UL
-#define INTC_IRR8	0xa4080026UL
-
-/* Interrupt numbers */
-#define TIMER2_IRQ      18
-#define TIMER2_IPR_ADDR INTC_IPRA
-#define TIMER2_IPR_POS   1
-#define TIMER2_PRIORITY  2
-
-/* WDT */
-#define WDT_IRQ		27
-#define WDT_IPR_ADDR	INTC_IPRB
-#define WDT_IPR_POS	 3
-#define WDT_PRIORITY	 2
-
-#define SCIF0_ERI_IRQ	52
-#define SCIF0_RXI_IRQ	53
-#define SCIF0_BRI_IRQ	54
-#define SCIF0_TXI_IRQ	55
-#define SCIF0_IPR_ADDR	INTC_IPRE
-#define SCIF0_IPR_POS	2
-#define SCIF0_PRIORITY	3
-
-#define DMTE4_IRQ	76
-#define DMTE5_IRQ	77
-#define DMA2_IPR_ADDR	INTC_IPRF
-#define DMA2_IPR_POS	2
-#define DMA2_PRIORITY	7
-
-#define IPSEC_IRQ	79
-#define IPSEC_IPR_ADDR	INTC_IPRF
-#define IPSEC_IPR_POS	3
-#define IPSEC_PRIORITY	3
-
-/* EDMAC */
-#define EDMAC0_IRQ	80
-#define EDMAC0_IPR_ADDR	INTC_IPRG
-#define EDMAC0_IPR_POS	3
-#define EDMAC0_PRIORITY	3
-
-#define EDMAC1_IRQ	81
-#define EDMAC1_IPR_ADDR	INTC_IPRG
-#define EDMAC1_IPR_POS	2
-#define EDMAC1_PRIORITY	3
-
-#define EDMAC2_IRQ	82
-#define EDMAC2_IPR_ADDR	INTC_IPRG
-#define EDMAC2_IPR_POS	1
-#define EDMAC2_PRIORITY	3
-
-/* SIOF */
-#define SIOF0_ERI_IRQ	96
-#define SIOF0_TXI_IRQ	97
-#define SIOF0_RXI_IRQ	98
-#define SIOF0_CCI_IRQ	99
-#define SIOF0_IPR_ADDR	INTC_IPRH
-#define SIOF0_IPR_POS	0
-#define SIOF0_PRIORITY	7
-
-#define SIOF1_ERI_IRQ	100
-#define SIOF1_TXI_IRQ	101
-#define SIOF1_RXI_IRQ	102
-#define SIOF1_CCI_IRQ	103
-#define SIOF1_IPR_ADDR	INTC_IPRI
-#define SIOF1_IPR_POS	1
-#define SIOF1_PRIORITY	7
-#endif /* CONFIG_CPU_SUBTYPE_SH7710 */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7710)
-#define PORT_PACR	0xa4050100UL
-#define PORT_PBCR	0xa4050102UL
-#define PORT_PCCR	0xa4050104UL
-#define PORT_PETCR	0xa4050106UL
-#define PORT_PADR  	0xa4050120UL
-#define PORT_PBDR  	0xa4050122UL
-#define PORT_PCDR  	0xa4050124UL
-#else
-#define PORT_PACR	0xa4000100UL
-#define PORT_PBCR	0xa4000102UL
-#define PORT_PCCR	0xa4000104UL
-#define PORT_PFCR	0xa400010aUL
-#define PORT_PADR  	0xa4000120UL
-#define PORT_PBDR  	0xa4000122UL
-#define PORT_PCDR  	0xa4000124UL
-#define PORT_PFDR  	0xa400012aUL
-#endif
-
-#define IRQ0_IRQ	32
-#define IRQ1_IRQ	33
-#define IRQ2_IRQ	34
-#define IRQ3_IRQ	35
-#define IRQ4_IRQ	36
-#define IRQ5_IRQ	37
-
-#define IRQ0_IPR_ADDR	INTC_IPRC
-#define IRQ1_IPR_ADDR	INTC_IPRC
-#define IRQ2_IPR_ADDR	INTC_IPRC
-#define IRQ3_IPR_ADDR	INTC_IPRC
-#define IRQ4_IPR_ADDR	INTC_IPRD
-#define IRQ5_IPR_ADDR	INTC_IPRD
-
-#define IRQ0_IPR_POS	0
-#define IRQ1_IPR_POS	1
-#define IRQ2_IPR_POS	2
-#define IRQ3_IPR_POS	3
-#define IRQ4_IPR_POS	0
-#define IRQ5_IPR_POS	1
-
-#define IRQ0_PRIORITY	1
-#define IRQ1_PRIORITY	1
-#define IRQ2_PRIORITY	1
-#define IRQ3_PRIORITY	1
-#define IRQ4_PRIORITY	1
-#define IRQ5_PRIORITY	1
-
-#define PINT0_IRQ	40
-#define PINT8_IRQ	41
-
-#define PINT0_IPR_ADDR	INTC_IPRD
-#define PINT8_IPR_ADDR	INTC_IPRD
-
-#define PINT0_IPR_POS	3
-#define PINT8_IPR_POS	2
-#define PINT0_PRIORITY	2
-#define PINT8_PRIORITY	2
-
-extern int ipr_irq_demux(int irq);
-#define __irq_demux(irq) ipr_irq_demux(irq)
-#endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \
-    defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
-#define INTC_ICR        0xffd00000
-#define INTC_ICR_NMIL	(1<<15)
-#define INTC_ICR_MAI	(1<<14)
-#define INTC_ICR_NMIB	(1<<9)
-#define INTC_ICR_NMIE	(1<<8)
-#define INTC_ICR_IRLM	(1<<7)
-#endif
-
-#ifdef CONFIG_CPU_SUBTYPE_SH7780
-#include <asm/irq-sh7780.h>
-#endif
-
-/* SH with INTC2-style interrupts */
-#ifdef CONFIG_CPU_HAS_INTC2_IRQ
-#if defined(CONFIG_CPU_SUBTYPE_ST40STB1)
-#define INTC2_BASE	0xfe080000
-#define INTC2_FIRST_IRQ 64
-#define INTC2_INTREQ_OFFSET	0x20
-#define INTC2_INTMSK_OFFSET	0x40
-#define INTC2_INTMSKCLR_OFFSET	0x60
-#define NR_INTC2_IRQS	25
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-#define INTC2_BASE	0xfe080000
-#define INTC2_FIRST_IRQ 48	/* INTEVT 0x800 */
-#define INTC2_INTREQ_OFFSET	0x20
-#define INTC2_INTMSK_OFFSET	0x40
-#define INTC2_INTMSKCLR_OFFSET	0x60
-#define NR_INTC2_IRQS	64
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-#define INTC2_BASE	0xffd40000
-#define INTC2_FIRST_IRQ	21
-#define INTC2_INTMSK_OFFSET	(0x38)
-#define INTC2_INTMSKCLR_OFFSET	(0x3c)
-#define NR_INTC2_IRQS	60
-#endif
-
-#define INTC2_INTPRI_OFFSET	0x00
+void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs);
+void make_imask_irq(unsigned int irq);
+void init_IRQ_ipr(void);
 
 struct intc2_data {
 	unsigned short irq;
@@ -693,20 +149,14 @@
 
 void make_intc2_irq(struct intc2_data *, unsigned int nr_irqs);
 void init_IRQ_intc2(void);
-#endif
-
-extern int shmse_irq_demux(int irq);
 
 static inline int generic_irq_demux(int irq)
 {
 	return irq;
 }
 
-#ifndef __irq_demux
-#define __irq_demux(irq)	(irq)
-#endif
 #define irq_canonicalize(irq)	(irq)
-#define irq_demux(irq)		__irq_demux(sh_mv.mv_irq_demux(irq))
+#define irq_demux(irq)		sh_mv.mv_irq_demux(irq)
 
 #ifdef CONFIG_4KSTACKS
 extern void irq_ctx_init(int cpu);
@@ -717,12 +167,4 @@
 # define irq_ctx_exit(cpu) do { } while (0)
 #endif
 
-#if defined(CONFIG_CPU_SUBTYPE_SH73180)
-#include <asm/irq-sh73180.h>
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7343)
-#include <asm/irq-sh7343.h>
-#endif
-
 #endif /* __ASM_SH_IRQ_H */
diff --git a/include/asm-sh/irqflags.h b/include/asm-sh/irqflags.h
new file mode 100644
index 0000000..9dedc1b
--- /dev/null
+++ b/include/asm-sh/irqflags.h
@@ -0,0 +1,123 @@
+#ifndef __ASM_SH_IRQFLAGS_H
+#define __ASM_SH_IRQFLAGS_H
+
+static inline void raw_local_irq_enable(void)
+{
+	unsigned long __dummy0, __dummy1;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"and	%1, %0\n\t"
+#ifdef CONFIG_CPU_HAS_SR_RB
+		"stc	r6_bank, %1\n\t"
+		"or	%1, %0\n\t"
+#endif
+		"ldc	%0, sr\n\t"
+		: "=&r" (__dummy0), "=r" (__dummy1)
+		: "1" (~0x000000f0)
+		: "memory"
+	);
+}
+
+static inline void raw_local_irq_disable(void)
+{
+	unsigned long flags;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"or	#0xf0, %0\n\t"
+		"ldc	%0, sr\n\t"
+		: "=&z" (flags)
+		: /* no inputs */
+		: "memory"
+	);
+}
+
+static inline void set_bl_bit(void)
+{
+	unsigned long __dummy0, __dummy1;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"or	%2, %0\n\t"
+		"and	%3, %0\n\t"
+		"ldc	%0, sr\n\t"
+		: "=&r" (__dummy0), "=r" (__dummy1)
+		: "r" (0x10000000), "r" (0xffffff0f)
+		: "memory"
+	);
+}
+
+static inline void clear_bl_bit(void)
+{
+	unsigned long __dummy0, __dummy1;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"and	%2, %0\n\t"
+		"ldc	%0, sr\n\t"
+		: "=&r" (__dummy0), "=r" (__dummy1)
+		: "1" (~0x10000000)
+		: "memory"
+	);
+}
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+	unsigned long flags;
+
+	__asm__ __volatile__ (
+		"stc	sr, %0\n\t"
+		"and	#0xf0, %0\n\t"
+		: "=&z" (flags)
+		: /* no inputs */
+		: "memory"
+	);
+
+	return flags;
+}
+
+#define raw_local_save_flags(flags) \
+		do { (flags) = __raw_local_save_flags(); } while (0)
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+	return (flags != 0);
+}
+
+static inline int raw_irqs_disabled(void)
+{
+	unsigned long flags = __raw_local_save_flags();
+
+	return raw_irqs_disabled_flags(flags);
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+	unsigned long flags, __dummy;
+
+	__asm__ __volatile__ (
+		"stc	sr, %1\n\t"
+		"mov	%1, %0\n\t"
+		"or	#0xf0, %0\n\t"
+		"ldc	%0, sr\n\t"
+		"mov	%1, %0\n\t"
+		"and	#0xf0, %0\n\t"
+		: "=&z" (flags), "=&r" (__dummy)
+		: /* no inputs */
+		: "memory"
+	);
+
+	return flags;
+}
+
+#define raw_local_irq_save(flags) \
+		do { (flags) = __raw_local_irq_save(); } while (0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+	if ((flags & 0xf0) != 0xf0)
+		raw_local_irq_enable();
+}
+
+#endif /* __ASM_SH_IRQFLAGS_H */
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index c7088ef..46f04e2 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -10,7 +10,6 @@
 
 #include <asm/cpu/mmu_context.h>
 #include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
@@ -42,10 +41,8 @@
 /*
  * Get MMU context if needed.
  */
-static __inline__ void
-get_mmu_context(struct mm_struct *mm)
+static inline void get_mmu_context(struct mm_struct *mm)
 {
-	extern void flush_tlb_all(void);
 	unsigned long mc = mmu_context_cache;
 
 	/* Check if we have old version of context. */
@@ -61,6 +58,7 @@
 		 * Flush all TLB and start new cycle.
 		 */
 		flush_tlb_all();
+
 		/*
 		 * Fix version; Note that we avoid version #0
 		 * to distingush NO_CONTEXT.
@@ -75,11 +73,10 @@
  * Initialize the context related info for a new mm_struct
  * instance.
  */
-static __inline__ int init_new_context(struct task_struct *tsk,
+static inline int init_new_context(struct task_struct *tsk,
 				       struct mm_struct *mm)
 {
 	mm->context.id = NO_CONTEXT;
-
 	return 0;
 }
 
@@ -87,12 +84,12 @@
  * Destroy context related info for an mm_struct that is about
  * to be put to rest.
  */
-static __inline__ void destroy_context(struct mm_struct *mm)
+static inline void destroy_context(struct mm_struct *mm)
 {
 	/* Do nothing */
 }
 
-static __inline__ void set_asid(unsigned long asid)
+static inline void set_asid(unsigned long asid)
 {
 	unsigned long __dummy;
 
@@ -105,7 +102,7 @@
 			        "r" (0xffffff00));
 }
 
-static __inline__ unsigned long get_asid(void)
+static inline unsigned long get_asid(void)
 {
 	unsigned long asid;
 
@@ -120,24 +117,29 @@
  * After we have set current->mm to a new value, this activates
  * the context for the new mm so we see the new mappings.
  */
-static __inline__ void activate_context(struct mm_struct *mm)
+static inline void activate_context(struct mm_struct *mm)
 {
 	get_mmu_context(mm);
 	set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
 }
 
-/* MMU_TTB can be used for optimizing the fault handling.
-   (Currently not used) */
-static __inline__ void switch_mm(struct mm_struct *prev,
-				 struct mm_struct *next,
-				 struct task_struct *tsk)
+/* MMU_TTB is used for optimizing the fault handling. */
+static inline void set_TTB(pgd_t *pgd)
+{
+	ctrl_outl((unsigned long)pgd, MMU_TTB);
+}
+
+static inline pgd_t *get_TTB(void)
+{
+	return (pgd_t *)ctrl_inl(MMU_TTB);
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+			     struct mm_struct *next,
+			     struct task_struct *tsk)
 {
 	if (likely(prev != next)) {
-		unsigned long __pgdir = (unsigned long)next->pgd;
-
-		__asm__ __volatile__("mov.l	%0, %1"
-				     : /* no output */
-				     : "r" (__pgdir), "m" (__m(MMU_TTB)));
+		set_TTB(next->pgd);
 		activate_context(next);
 	}
 }
@@ -147,7 +149,7 @@
 #define activate_mm(prev, next) \
 	switch_mm((prev),(next),NULL)
 
-static __inline__ void
+static inline void
 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index ca8b26d..380fd62 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -13,9 +13,16 @@
    [ P4 control   ]		0xE0000000
  */
 
-
 /* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT	12
+#if defined(CONFIG_PAGE_SIZE_4KB)
+# define PAGE_SHIFT	12
+#elif defined(CONFIG_PAGE_SIZE_8KB)
+# define PAGE_SHIFT	13
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define PAGE_SHIFT	16
+#else
+# error "Bogus kernel page size?"
+#endif
 
 #ifdef __ASSEMBLY__
 #define PAGE_SIZE	(1 << PAGE_SHIFT)
@@ -28,8 +35,14 @@
 
 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 #define HPAGE_SHIFT	16
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+#define HPAGE_SHIFT	18
 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
 #define HPAGE_SHIFT	20
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HPAGE_SHIFT	22
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
+#define HPAGE_SHIFT	26
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -69,15 +82,25 @@
 /*
  * These are used to make use of C type-checking..
  */
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pgd; } pgd_t;
+#ifdef CONFIG_X2TLB
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pgprot; } pgprot_t;
+#define pte_val(x) \
+	((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define __pte(x) \
+	({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+#else
+typedef struct { unsigned long pte_low; } pte_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
+#define pte_val(x)	((x).pte_low)
+#define __pte(x) ((pte_t) { (x) } )
+#endif
 
-#define pte_val(x)	((x).pte)
+typedef struct { unsigned long pgd; } pgd_t;
+
 #define pgd_val(x)	((x).pgd)
 #define pgprot_val(x)	((x).pgprot)
 
-#define __pte(x) ((pte_t) { (x) } )
 #define __pgd(x) ((pgd_t) { (x) } )
 #define __pgprot(x)	((pgprot_t) { (x) } )
 
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
index e841465..888e452 100644
--- a/include/asm-sh/pgalloc.h
+++ b/include/asm-sh/pgalloc.h
@@ -1,13 +1,16 @@
 #ifndef __ASM_SH_PGALLOC_H
 #define __ASM_SH_PGALLOC_H
 
-#define pmd_populate_kernel(mm, pmd, pte) \
-		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+				       pte_t *pte)
+{
+	set_pmd(pmd, __pmd((unsigned long)pte));
+}
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 				struct page *pte)
 {
-	set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
+	set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
 }
 
 /*
@@ -15,7 +18,16 @@
  */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-	return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+
+	if (pgd) {
+		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+		memcpy(pgd + USER_PTRS_PER_PGD,
+		       swapper_pg_dir + USER_PTRS_PER_PGD,
+		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+	}
+
+	return pgd;
 }
 
 static inline void pgd_free(pgd_t *pgd)
diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h
deleted file mode 100644
index b525db6..0000000
--- a/include/asm-sh/pgtable-2level.h
+++ /dev/null
@@ -1,70 +0,0 @@
-#ifndef __ASM_SH_PGTABLE_2LEVEL_H
-#define __ASM_SH_PGTABLE_2LEVEL_H
-
-/*
- * traditional two-level paging structure:
- */
-
-#define PGDIR_SHIFT	22
-#define PTRS_PER_PGD	1024
-
-/*
- * this is two-level, so we don't really have any
- * PMD directory physically.
- */
-#define PMD_SHIFT	22
-#define PTRS_PER_PMD	1
-
-#define PTRS_PER_PTE	1024
-
-#ifndef __ASSEMBLY__
-#define pte_ERROR(e) \
-	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
-	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
-	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd)		{ return 0; }
-static inline int pgd_bad(pgd_t pgd)		{ return 0; }
-static inline int pgd_present(pgd_t pgd)	{ return 1; }
-static inline void pgd_clear (pgd_t * pgdp) 	{ }
-
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified.  Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-/*
- * (pmds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
-#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
-
-#define pgd_page_vaddr(pgd) \
-((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
-
-#define pgd_page(pgd) \
-	(phys_to_page(pgd_val(pgd)))
-
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
-{
-	return (pmd_t *) dir;
-}
-
-#define pte_pfn(x)		((unsigned long)(((x).pte >> PAGE_SHIFT)))
-#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 2c8682a..c84901d 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -15,15 +15,10 @@
 #include <asm-generic/pgtable-nopmd.h>
 #include <asm/page.h>
 
-#define PTRS_PER_PGD		1024
-
 #ifndef __ASSEMBLY__
 #include <asm/addrspace.h>
 #include <asm/fixmap.h>
 
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void paging_init(void);
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -33,15 +28,28 @@
 
 #endif /* !__ASSEMBLY__ */
 
-/* traditional two-level paging structure */
-#define PGDIR_SHIFT	22
-#define PTRS_PER_PMD	1
-#define PTRS_PER_PTE	1024
-#define PMD_SIZE	(1UL << PMD_SHIFT)
-#define PMD_MASK	(~(PMD_SIZE-1))
-#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+/*
+ * traditional two-level paging structure
+ */
+/* PTE bits */
+#ifdef CONFIG_X2TLB
+# define PTE_MAGNITUDE	3	/* 64-bit PTEs on extended mode SH-X2 TLB */
+#else
+# define PTE_MAGNITUDE	2	/* 32-bit PTEs */
+#endif
+#define PTE_SHIFT	PAGE_SHIFT
+#define PTE_BITS	(PTE_SHIFT - PTE_MAGNITUDE)
+
+/* PGD bits */
+#define PGDIR_SHIFT	(PTE_SHIFT + PTE_BITS)
+#define PGDIR_BITS	(32 - PGDIR_SHIFT)
+#define PGDIR_SIZE	(1 << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
+/* Entries per level */
+#define PTRS_PER_PTE	(PAGE_SIZE / 4)
+#define PTRS_PER_PGD	(PAGE_SIZE / 4)
+
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS	0
 
@@ -49,7 +57,7 @@
 
 /*
  * First 1MB map is used by fixed purpose.
- * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c)
+ * Currently only 4-entry (16kB) is used (see arch/sh/mm/cache.c)
  */
 #define VMALLOC_START	(P3SEG+0x00100000)
 #define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
@@ -57,7 +65,8 @@
 /*
  * Linux PTEL encoding.
  *
- * Hardware and software bit definitions for the PTEL value:
+ * Hardware and software bit definitions for the PTEL value (see below for
+ * notes on SH-X2 MMUs and 64-bit PTEs):
  *
  * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
  *
@@ -76,20 +85,57 @@
  *
  * - Bits 31, 30, and 29 remain unused by everyone and can be used for future
  *   software flags, although care must be taken to update _PAGE_CLEAR_FLAGS.
+ *
+ * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
+ *
+ * SH-X2 MMUs and extended PTEs
+ *
+ * SH-X2 supports an extended mode TLB with split data arrays due to the
+ * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
+ * SZ bit placeholders still exist in data array 1, but are implemented as
+ * reserved bits, with the real logic existing in data array 2.
+ *
+ * The downside to this is that we can no longer fit everything in to a 32-bit
+ * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
+ * side, this gives us quite a few spare bits to play with for future usage.
  */
+/* Legacy and compat mode bits */
 #define	_PAGE_WT	0x001		/* WT-bit on SH-4, 0 on SH-3 */
 #define _PAGE_HW_SHARED	0x002		/* SH-bit  : shared among processes */
 #define _PAGE_DIRTY	0x004		/* D-bit   : page changed */
 #define _PAGE_CACHABLE	0x008		/* C-bit   : cachable */
-#define _PAGE_SZ0	0x010		/* SZ0-bit : Size of page */
-#define _PAGE_RW	0x020		/* PR0-bit : write access allowed */
-#define _PAGE_USER	0x040		/* PR1-bit : user space access allowed */
-#define _PAGE_SZ1	0x080		/* SZ1-bit : Size of page (on SH-4) */
+#ifndef CONFIG_X2TLB
+# define _PAGE_SZ0	0x010		/* SZ0-bit : Size of page */
+# define _PAGE_RW	0x020		/* PR0-bit : write access allowed */
+# define _PAGE_USER	0x040		/* PR1-bit : user space access allowed*/
+# define _PAGE_SZ1	0x080		/* SZ1-bit : Size of page (on SH-4) */
+#endif
 #define _PAGE_PRESENT	0x100		/* V-bit   : page is valid */
 #define _PAGE_PROTNONE	0x200		/* software: if not present  */
 #define _PAGE_ACCESSED	0x400		/* software: page referenced */
 #define _PAGE_FILE	_PAGE_WT	/* software: pagecache or swap? */
 
+/* Extended mode bits */
+#define _PAGE_EXT_ESZ0		0x0010	/* ESZ0-bit: Size of page */
+#define _PAGE_EXT_ESZ1		0x0020	/* ESZ1-bit: Size of page */
+#define _PAGE_EXT_ESZ2		0x0040	/* ESZ2-bit: Size of page */
+#define _PAGE_EXT_ESZ3		0x0080	/* ESZ3-bit: Size of page */
+
+#define _PAGE_EXT_USER_EXEC	0x0100	/* EPR0-bit: User space executable */
+#define _PAGE_EXT_USER_WRITE	0x0200	/* EPR1-bit: User space writable */
+#define _PAGE_EXT_USER_READ	0x0400	/* EPR2-bit: User space readable */
+
+#define _PAGE_EXT_KERN_EXEC	0x0800	/* EPR3-bit: Kernel space executable */
+#define _PAGE_EXT_KERN_WRITE	0x1000	/* EPR4-bit: Kernel space writable */
+#define _PAGE_EXT_KERN_READ	0x2000	/* EPR5-bit: Kernel space readable */
+
+/* Wrapper for extended mode pgprot twiddling */
+#ifdef CONFIG_X2TLB
+# define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
+#else
+# define _PAGE_EXT(x)		(0)
+#endif
+
 /* software: moves to PTEA.TC (Timing Control) */
 #define _PAGE_PCC_AREA5	0x00000000	/* use BSC registers for area5 */
 #define _PAGE_PCC_AREA6	0x80000000	/* use BSC registers for area6 */
@@ -114,37 +160,160 @@
 
 #define _PAGE_FLAGS_HARDWARE_MASK	(0x1fffffff & ~(_PAGE_CLEAR_FLAGS))
 
-/* Hardware flags: SZ0=1 (4k-byte) */
-#define _PAGE_FLAGS_HARD	_PAGE_SZ0
-
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE	(_PAGE_SZ1)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
-#define _PAGE_SZHUGE	(_PAGE_SZ0 | _PAGE_SZ1)
+/* Hardware flags, page size encoding */
+#if defined(CONFIG_X2TLB)
+# if defined(CONFIG_PAGE_SIZE_4KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_EXT(_PAGE_EXT_ESZ0)
+# elif defined(CONFIG_PAGE_SIZE_8KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_EXT(_PAGE_EXT_ESZ1)
+# elif defined(CONFIG_PAGE_SIZE_64KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_EXT(_PAGE_EXT_ESZ2)
+# endif
+#else
+# if defined(CONFIG_PAGE_SIZE_4KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_SZ0
+# elif defined(CONFIG_PAGE_SIZE_64KB)
+#  define _PAGE_FLAGS_HARD	_PAGE_SZ1
+# endif
 #endif
 
-#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
+#if defined(CONFIG_X2TLB)
+# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ3)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
+#  define _PAGE_SZHUGE	(_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
+# endif
+#else
+# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#  define _PAGE_SZHUGE	(_PAGE_SZ1)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#  define _PAGE_SZHUGE	(_PAGE_SZ0 | _PAGE_SZ1)
+# endif
+#endif
+
+/*
+ * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
+ * to make pte_mkhuge() happy.
+ */
+#ifndef _PAGE_SZHUGE
+# define _PAGE_SZHUGE	(_PAGE_FLAGS_HARD)
+#endif
+
+#define _PAGE_CHG_MASK \
+	(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MMU
-#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_READ | \
+					   _PAGE_EXT_USER_WRITE))
+
+#define PAGE_EXECREAD	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_EXEC | \
+					   _PAGE_EXT_USER_READ))
+
+#define PAGE_COPY	PAGE_EXECREAD
+
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_READ))
+
+#define PAGE_WRITEONLY	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_WRITE))
+
+#define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+				 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_USER_WRITE | \
+					   _PAGE_EXT_USER_READ  | \
+					   _PAGE_EXT_USER_EXEC))
+
+#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_WRITE | \
+					   _PAGE_EXT_KERN_EXEC))
+
 #define PAGE_KERNEL_NOCACHE \
-			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
-#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+			__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
+				 _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_WRITE | \
+					   _PAGE_EXT_KERN_EXEC))
+
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_EXEC))
+
 #define PAGE_KERNEL_PCC(slot, type) \
-			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type))
+			__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
+				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+					   _PAGE_EXT_KERN_WRITE | \
+					   _PAGE_EXT_KERN_EXEC) \
+				 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
+				 (type))
+
+#elif defined(CONFIG_MMU) /* SH-X TLB */
+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+				 _PAGE_CACHABLE | _PAGE_ACCESSED | \
+				 _PAGE_FLAGS_HARD)
+
+#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_EXECREAD	PAGE_READONLY
+#define PAGE_RWX	PAGE_SHARED
+#define PAGE_WRITEONLY	PAGE_SHARED
+
+#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_NOCACHE \
+			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
+				 _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+				 _PAGE_DIRTY | _PAGE_ACCESSED | \
+				 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_PCC(slot, type) \
+			__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
+				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
+				 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
+				 (type))
 #else /* no mmu */
 #define PAGE_NONE		__pgprot(0)
 #define PAGE_SHARED		__pgprot(0)
 #define PAGE_COPY		__pgprot(0)
+#define PAGE_EXECREAD		__pgprot(0)
+#define PAGE_RWX		__pgprot(0)
 #define PAGE_READONLY		__pgprot(0)
+#define PAGE_WRITEONLY		__pgprot(0)
 #define PAGE_KERNEL		__pgprot(0)
 #define PAGE_KERNEL_NOCACHE	__pgprot(0)
 #define PAGE_KERNEL_RO		__pgprot(0)
@@ -154,27 +323,32 @@
 #endif /* __ASSEMBLY__ */
 
 /*
- * As i386 and MIPS, SuperH can't do page protection for execute, and
- * considers that the same as a read.  Also, write permissions imply
- * read permissions. This is the closest we can get..
+ * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
+ * protection for execute, and considers it the same as a read. Also, write
+ * permission implies read permission. This is the closest we can get..
+ *
+ * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
+ * not only supporting separate execute, read, and write bits, but having
+ * completely separate permission bits for user and kernel space.
  */
+	 /*xwr*/
 #define __P000	PAGE_NONE
 #define __P001	PAGE_READONLY
 #define __P010	PAGE_COPY
 #define __P011	PAGE_COPY
-#define __P100	PAGE_READONLY
-#define __P101	PAGE_READONLY
+#define __P100	PAGE_EXECREAD
+#define __P101	PAGE_EXECREAD
 #define __P110	PAGE_COPY
 #define __P111	PAGE_COPY
 
 #define __S000	PAGE_NONE
 #define __S001	PAGE_READONLY
-#define __S010	PAGE_SHARED
+#define __S010	PAGE_WRITEONLY
 #define __S011	PAGE_SHARED
-#define __S100	PAGE_READONLY
-#define __S101	PAGE_READONLY
-#define __S110	PAGE_SHARED
-#define __S111	PAGE_SHARED
+#define __S100	PAGE_EXECREAD
+#define __S101	PAGE_EXECREAD
+#define __S110	PAGE_RWX
+#define __S111	PAGE_RWX
 
 #ifndef __ASSEMBLY__
 
@@ -183,7 +357,17 @@
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
  */
+#ifdef CONFIG_X2TLB
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+	ptep->pte_high = pte.pte_high;
+	smp_wmb();
+	ptep->pte_low = pte.pte_low;
+}
+#else
 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#endif
+
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
 /*
@@ -192,18 +376,18 @@
  */
 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 
-#define pte_pfn(x)		((unsigned long)(((x).pte >> PAGE_SHIFT)))
+#define pte_pfn(x)		((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
 #define pte_none(x)	(!pte_val(x))
 #define pte_present(x)	(pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 
 #define pmd_none(x)	(!pmd_val(x))
-#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
+#define pmd_present(x)	(pmd_val(x))
 #define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define	pmd_bad(x)	(pmd_val(x) & ~PAGE_MASK)
 
 #define pages_to_mb(x)	((x) >> (20-PAGE_SHIFT))
 #define pte_page(x)	phys_to_page(pte_val(x)&PTE_PHYS_MASK)
@@ -212,28 +396,52 @@
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
-static inline int pte_not_present(pte_t pte){ return !(pte_val(pte) & _PAGE_PRESENT); }
+#define pte_not_present(pte)	(!(pte_val(pte) & _PAGE_PRESENT))
+#define pte_dirty(pte)		(pte_val(pte) & _PAGE_DIRTY)
+#define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
+#define pte_file(pte)		(pte_val(pte) & _PAGE_FILE)
 
-static inline pte_t pte_rdprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-static inline pte_t pte_exprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-static inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-static inline pte_t pte_mkread(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-static inline pte_t pte_mkexec(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-#ifdef CONFIG_HUGETLB_PAGE
-static inline pte_t pte_mkhuge(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
+#ifdef CONFIG_X2TLB
+#define pte_read(pte)		((pte).pte_high & _PAGE_EXT_USER_READ)
+#define pte_exec(pte)		((pte).pte_high & _PAGE_EXT_USER_EXEC)
+#define pte_write(pte)		((pte).pte_high & _PAGE_EXT_USER_WRITE)
+#else
+#define pte_read(pte)		(pte_val(pte) & _PAGE_USER)
+#define pte_exec(pte)		(pte_val(pte) & _PAGE_USER)
+#define pte_write(pte)		(pte_val(pte) & _PAGE_RW)
 #endif
 
+#define PTE_BIT_FUNC(h,fn,op) \
+static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
+
+#ifdef CONFIG_X2TLB
+/*
+ * We cheat a bit in the SH-X2 TLB case. As the permission bits are
+ * individually toggled (and user permissions are entirely decoupled from
+ * kernel permissions), we attempt to couple them a bit more sanely here.
+ */
+PTE_BIT_FUNC(high, rdprotect, &= ~_PAGE_EXT_USER_READ);
+PTE_BIT_FUNC(high, mkread, |= _PAGE_EXT_USER_READ | _PAGE_EXT_KERN_READ);
+PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
+PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
+PTE_BIT_FUNC(high, exprotect, &= ~_PAGE_EXT_USER_EXEC);
+PTE_BIT_FUNC(high, mkexec, |= _PAGE_EXT_USER_EXEC | _PAGE_EXT_KERN_EXEC);
+PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
+#else
+PTE_BIT_FUNC(low, rdprotect, &= ~_PAGE_USER);
+PTE_BIT_FUNC(low, mkread, |= _PAGE_USER);
+PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
+PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
+PTE_BIT_FUNC(low, exprotect, &= ~_PAGE_USER);
+PTE_BIT_FUNC(low, mkexec, |= _PAGE_USER);
+PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
+#endif
+
+PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
+PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
+PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
+PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
+
 /*
  * Macro and implementation to make a page protection as uncachable.
  */
@@ -258,13 +466,14 @@
 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
+{
+	set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+			    pgprot_val(newprot)));
+	return pte;
+}
 
-#define pmd_page_vaddr(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pmd_page(pmd) \
-	(phys_to_page(pmd_val(pmd)))
+#define pmd_page_vaddr(pmd)	pmd_val(pmd)
+#define pmd_page(pmd)		(virt_to_page(pmd_val(pmd)))
 
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -283,8 +492,15 @@
 #define pte_unmap(pte)		do { } while (0)
 #define pte_unmap_nested(pte)	do { } while (0)
 
+#ifdef CONFIG_X2TLB
+#define pte_ERROR(e) \
+	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
+	       &(e), (e).pte_high, (e).pte_low)
+#else
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#endif
+
 #define pgd_ERROR(e) \
 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
@@ -337,6 +553,9 @@
 extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 #endif
 
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init(void);
+
 #include <asm-generic/pgtable.h>
 
 #endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index 45bb74e..6f1dd7c 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -36,7 +36,10 @@
  */
 enum cpu_type {
 	/* SH-2 types */
-	CPU_SH7604,
+	CPU_SH7604, CPU_SH7619,
+
+	/* SH-2A types */
+	CPU_SH7206,
 
 	/* SH-3 types */
 	CPU_SH7705, CPU_SH7706, CPU_SH7707,
@@ -47,7 +50,10 @@
 	/* SH-4 types */
 	CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R,
 	CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501,
+
+	/* SH-4A types */
 	CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781,
+	CPU_SH7785,
 
 	/* Unknown subtype */
 	CPU_SH_NONE
@@ -130,12 +136,11 @@
 };
 
 struct thread_struct {
+	/* Saved registers when thread is descheduled */
 	unsigned long sp;
 	unsigned long pc;
 
-	unsigned long trap_no, error_code;
-	unsigned long address;
-	/* Hardware debugging registers may come here */
+	/* Hardware debugging registers */
 	unsigned long ubc_pc;
 
 	/* floating point info */
@@ -150,12 +155,7 @@
 extern int ubc_usercnt;
 
 #define INIT_THREAD  {						\
-	sizeof(init_stack) + (long) &init_stack, /* sp */	\
-	0,					 /* pc */	\
-	0, 0,							\
-	0,							\
-	0,							\
-	{{{0,}},}				/* fpu state */	\
+	.sp = sizeof(init_stack) + (long) &init_stack,		\
 }
 
 /*
@@ -259,8 +259,8 @@
 		struct pt_regs *regs);
 extern unsigned long get_wchan(struct task_struct *p);
 
-#define KSTK_EIP(tsk)  ((tsk)->thread.pc)
-#define KSTK_ESP(tsk)  ((tsk)->thread.sp)
+#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
 
 #define cpu_sleep()	__asm__ __volatile__ ("sleep" : : : "memory")
 #define cpu_relax()	barrier()
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h
new file mode 100644
index 0000000..dfc6bad
--- /dev/null
+++ b/include/asm-sh/push-switch.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_SH_PUSH_SWITCH_H
+#define __ASM_SH_PUSH_SWITCH_H
+
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct push_switch {
+	/* switch state */
+	unsigned int		state:1;
+	/* debounce timer */
+	struct timer_list	debounce;
+	/* workqueue */
+	struct work_struct	work;
+};
+
+struct push_switch_platform_info {
+	/* IRQ handler */
+	irqreturn_t		(*irq_handler)(int irq, void *data);
+	/* Special IRQ flags */
+	unsigned int		irq_flags;
+	/* Bit location of switch */
+	unsigned int		bit;
+	/* Symbolic switch name */
+	const char		*name;
+};
+
+#endif /* __ASM_SH_PUSH_SWITCH_H */
diff --git a/include/asm-sh/rwsem.h b/include/asm-sh/rwsem.h
index 9d2aea5..4931ba8 100644
--- a/include/asm-sh/rwsem.h
+++ b/include/asm-sh/rwsem.h
@@ -25,11 +25,21 @@
 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 	spinlock_t		wait_lock;
 	struct list_head	wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map	dep_map;
+#endif
 };
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
 #define __RWSEM_INITIALIZER(name) \
 	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-	  LIST_HEAD_INIT((name).wait_list) }
+	  LIST_HEAD_INIT((name).wait_list) \
+	  __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name)		\
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -39,6 +49,16 @@
 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
 
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+			 struct lock_class_key *key);
+
+#define init_rwsem(sem)				\
+do {						\
+	static struct lock_class_key __key;	\
+						\
+	__init_rwsem((sem), #sem, &__key);	\
+} while (0)
+
 static inline void init_rwsem(struct rw_semaphore *sem)
 {
 	sem->count = RWSEM_UNLOCKED_VALUE;
@@ -141,6 +161,11 @@
 		rwsem_downgrade_wake(sem);
 }
 
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+	__down_write(sem);
+}
+
 /*
  * implement exchange and add functionality
  */
diff --git a/include/asm-sh/se7206.h b/include/asm-sh/se7206.h
new file mode 100644
index 0000000..698eb80
--- /dev/null
+++ b/include/asm-sh/se7206.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_SH_SE7206_H
+#define __ASM_SH_SE7206_H
+
+#define PA_SMSC		0x30000000
+#define PA_MRSHPC	0x34000000
+#define PA_LED		0x31400000
+
+void init_se7206_IRQ(void);
+
+#define __IO_PREFIX	se7206
+#include <asm/io_generic.h>
+
+#endif /* __ASM_SH_SE7206_H */
diff --git a/include/asm-sh/setup.h b/include/asm-sh/setup.h
index 34ca8a7..1583c6b 100644
--- a/include/asm-sh/setup.h
+++ b/include/asm-sh/setup.h
@@ -1,10 +1,12 @@
-#ifdef __KERNEL__
 #ifndef _SH_SETUP_H
 #define _SH_SETUP_H
 
 #define COMMAND_LINE_SIZE 256
 
+#ifdef __KERNEL__
+
 int setup_early_printk(char *);
 
-#endif /* _SH_SETUP_H */
 #endif /* __KERNEL__ */
+
+#endif /* _SH_SETUP_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 3340126..b1e42e7 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -6,6 +6,7 @@
  * Copyright (C) 2002 Paul Mundt
  */
 
+#include <linux/irqflags.h>
 #include <asm/types.h>
 
 /*
@@ -131,103 +132,6 @@
 
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
 
-/* Interrupt Control */
-#ifdef CONFIG_CPU_HAS_SR_RB
-static inline void local_irq_enable(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__("stc	sr, %0\n\t"
-			     "and	%1, %0\n\t"
-			     "stc	r6_bank, %1\n\t"
-			     "or	%1, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&r" (__dummy0), "=r" (__dummy1)
-			     : "1" (~0x000000f0)
-			     : "memory");
-}
-#else
-static inline void local_irq_enable(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__ (
-		"stc	sr, %0\n\t"
-		"and	%1, %0\n\t"
-		"ldc	%0, sr\n\t"
-		: "=&r" (__dummy0), "=r" (__dummy1)
-		: "1" (~0x000000f0)
-		: "memory");
-}
-#endif
-
-static inline void local_irq_disable(void)
-{
-	unsigned long __dummy;
-	__asm__ __volatile__("stc	sr, %0\n\t"
-			     "or	#0xf0, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&z" (__dummy)
-			     : /* no inputs */
-			     : "memory");
-}
-
-static inline void set_bl_bit(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__ ("stc	sr, %0\n\t"
-			     "or	%2, %0\n\t"
-			     "and	%3, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&r" (__dummy0), "=r" (__dummy1)
-			     : "r" (0x10000000), "r" (0xffffff0f)
-			     : "memory");
-}
-
-static inline void clear_bl_bit(void)
-{
-	unsigned long __dummy0, __dummy1;
-
-	__asm__ __volatile__ ("stc	sr, %0\n\t"
-			     "and	%2, %0\n\t"
-			     "ldc	%0, sr"
-			     : "=&r" (__dummy0), "=r" (__dummy1)
-			     : "1" (~0x10000000)
-			     : "memory");
-}
-
-#define local_save_flags(x) \
-	__asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
-
-#define irqs_disabled()			\
-({					\
-	unsigned long flags;		\
-	local_save_flags(flags);	\
-	(flags != 0);			\
-})
-
-static inline unsigned long local_irq_save(void)
-{
-	unsigned long flags, __dummy;
-
-	__asm__ __volatile__("stc	sr, %1\n\t"
-			     "mov	%1, %0\n\t"
-			     "or	#0xf0, %0\n\t"
-			     "ldc	%0, sr\n\t"
-			     "mov	%1, %0\n\t"
-			     "and	#0xf0, %0"
-			     : "=&z" (flags), "=&r" (__dummy)
-			     :/**/
-			     : "memory" );
-	return flags;
-}
-
-#define local_irq_restore(x) do {			\
-	if ((x & 0x000000f0) != 0x000000f0)		\
-		local_irq_enable();			\
-} while (0)
-
 /*
  * Jump to P2 area.
  * When handling TLB or caches, we need to do it from P2 area.
@@ -264,9 +168,6 @@
 		: "=&r" (__dummy));			\
 } while (0)
 
-/* For spinlocks etc */
-#define local_irq_save(x)	x = local_irq_save()
-
 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 {
 	unsigned long flags, retval;
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 3ebc3f9..0c01dc5 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -90,13 +90,7 @@
 #endif
 #define free_thread_info(ti)	kfree(ti)
 
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
-	stc     r7_bank, reg
-
-#endif
+#endif /* __ASSEMBLY__ */
 
 /*
  * thread information flags
diff --git a/include/asm-sh/timer.h b/include/asm-sh/timer.h
index 5df842b..17b5e76 100644
--- a/include/asm-sh/timer.h
+++ b/include/asm-sh/timer.h
@@ -18,11 +18,32 @@
 
 	struct sys_device	dev;
 	struct sys_timer_ops	*ops;
+
+#ifdef CONFIG_NO_IDLE_HZ
+	struct dyn_tick_timer	*dyn_tick;
+#endif
 };
 
+#ifdef CONFIG_NO_IDLE_HZ
+#define DYN_TICK_ENABLED	(1 << 1)
+
+struct dyn_tick_timer {
+	spinlock_t	lock;
+	unsigned int	state;			/* Current state */
+	int		(*enable)(void);	/* Enables dynamic tick */
+	int		(*disable)(void);	/* Disables dynamic tick */
+	void		(*reprogram)(unsigned long); /* Reprograms the timer */
+	int		(*handler)(int, void *);
+};
+
+void timer_dyn_reprogram(void);
+#else
+#define timer_dyn_reprogram()	do { } while (0)
+#endif
+
 #define TICK_SIZE (tick_nsec / 1000)
 
-extern struct sys_timer tmu_timer;
+extern struct sys_timer tmu_timer, cmt_timer, mtu2_timer;
 extern struct sys_timer *sys_timer;
 
 #ifndef CONFIG_GENERIC_TIME
diff --git a/include/asm-sh/titan.h b/include/asm-sh/titan.h
index 270a4f4..03f3583 100644
--- a/include/asm-sh/titan.h
+++ b/include/asm-sh/titan.h
@@ -1,9 +1,8 @@
 /*
  * Platform defintions for Titan
  */
-
-#ifndef _ASM_SH_TITAN_TITAN_H
-#define _ASM_SH_TITAN_TITAN_H
+#ifndef _ASM_SH_TITAN_H
+#define _ASM_SH_TITAN_H
 
 #define __IO_PREFIX titan
 #include <asm/io_generic.h>
@@ -15,29 +14,4 @@
 #define TITAN_IRQ_MPCIB		11	/* mPCI B */
 #define TITAN_IRQ_USB		11	/* USB */
 
-/*
- * The external interrupt lines, these take up ints 0 - 15 inclusive
- * depending on the priority for the interrupt.  In fact the priority
- * is the interrupt :-)
- */
-#define IRL0_IRQ	0
-#define IRL0_IPR_ADDR	INTC_IPRD
-#define IRL0_IPR_POS	3
-#define IRL0_PRIORITY	8
-
-#define IRL1_IRQ	1
-#define IRL1_IPR_ADDR	INTC_IPRD
-#define IRL1_IPR_POS	2
-#define IRL1_PRIORITY	8
-
-#define IRL2_IRQ	2
-#define IRL2_IPR_ADDR	INTC_IPRD
-#define IRL2_IPR_POS	1
-#define IRL2_PRIORITY	8
-
-#define IRL3_IRQ	3
-#define IRL3_IPR_ADDR	INTC_IPRD
-#define IRL3_IPR_POS	0
-#define IRL3_PRIORITY	8
-
-#endif
+#endif /* __ASM_SH_TITAN_H */
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h
index 3c09dd4..fd00dbb 100644
--- a/include/asm-sh/types.h
+++ b/include/asm-sh/types.h
@@ -52,16 +52,6 @@
 
 typedef u32 dma_addr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index 1c2abde..f982073 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -332,125 +332,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-	/* Avoid using "res" which is declared to be in register r0; \
-	   errno might expand to a function call and clobber it.  */ \
-		int __err = -(res); \
-		errno = __err; \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-__asm__ __volatile__ ("trapa	#0x10" \
-	: "=z" (__sc0) \
-	: "0" (__sc0) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-__asm__ __volatile__ ("trapa	#0x11" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4) \
-	: "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-__asm__ __volatile__ ("trapa	#0x12" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5) \
-	: "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-__asm__ __volatile__ ("trapa	#0x13" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \
-	: "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-__asm__ __volatile__ ("trapa	#0x14" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6),  \
-	  "r" (__sc7) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-__asm__ __volatile__ ("trapa	#0x15" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7),  \
-	  "r" (__sc3) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-register long __sc1 __asm__ ("r1") = (long) arg6; \
-__asm__ __volatile__ ("trapa	#0x16" \
-	: "=z" (__sc0) \
-	: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7),  \
-	  "r" (__sc3), "r" (__sc1) \
-	: "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
diff --git a/include/asm-sh64/checksum.h b/include/asm-sh64/checksum.h
index fd034e9..ba594cc 100644
--- a/include/asm-sh64/checksum.h
+++ b/include/asm-sh64/checksum.h
@@ -26,8 +26,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-asmlinkage unsigned int csum_partial(const unsigned char *buff, int len,
-				     unsigned int sum);
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  *	Note: when you get a NULL pointer exception here this means someone
@@ -38,46 +37,34 @@
  */
 
 
-unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len,
-				       unsigned int sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
+				       __wsum sum);
 
-unsigned int csum_partial_copy_from_user(const char *src, char *dst,
-					 int len, int sum, int *err_ptr);
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+					 int len, __wsum sum, int *err_ptr);
 
-/*
- * These are the old (and unsafe) way of doing checksums, a warning message will be
- * printed if they are used and an exeption occurs.
- *
- * these functions should go away after some time.
- */
-
-#define csum_partial_copy_fromuser csum_partial_copy
-
-unsigned int csum_partial_copy(const char *src, char *dst, int len,
-			       unsigned int sum);
-
-static inline unsigned short csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
+	u32 sum = (__force u32)csum;
         sum = (sum & 0xffff) + (sum >> 16);
         sum = (sum & 0xffff) + (sum >> 16);
-        return ~(sum);
+        return (__force __sum16)~sum;
 }
 
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
-unsigned long csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 				 unsigned short len, unsigned short proto,
-				 unsigned int sum);
+				 __wsum sum);
 
 /*
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -86,7 +73,7 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
diff --git a/include/asm-sh64/device.h b/include/asm-sh64/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-sh64/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index 68e27a8..5efe906 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -35,7 +35,7 @@
 	consistent_free(NULL, size, vaddr, dma_handle);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 				  enum dma_data_direction dir)
 {
 	dma_cache_wback_inv((unsigned long)vaddr, size);
diff --git a/include/asm-sh64/setup.h b/include/asm-sh64/setup.h
index ebd42eb..5b07b14 100644
--- a/include/asm-sh64/setup.h
+++ b/include/asm-sh64/setup.h
@@ -1,6 +1,10 @@
 #ifndef __ASM_SH64_SETUP_H
 #define __ASM_SH64_SETUP_H
 
+#define COMMAND_LINE_SIZE 256
+
+#ifdef __KERNEL__
+
 #define PARAM ((unsigned char *)empty_zero_page)
 #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
 #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
@@ -12,5 +16,7 @@
 #define COMMAND_LINE ((char *) (PARAM+256))
 #define COMMAND_LINE_SIZE 256
 
+#endif  /*  __KERNEL__  */
+
 #endif /* __ASM_SH64_SETUP_H */
 
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
index ee7828b..1f38a7a 100644
--- a/include/asm-sh64/unistd.h
+++ b/include/asm-sh64/unistd.h
@@ -347,148 +347,6 @@
 #ifdef __KERNEL__ 
 
 #define NR_syscalls 321
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh64/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-	/* Note: when returning from kernel the return value is in r9	    \
-	**       This prevents conflicts between return value and arg1      \
-	**       when dispatching signal handler, in other words makes	    \
-	**       life easier in the system call epilogue (see entry.S)      \
-	*/								    \
-        register unsigned long __sr2 __asm__ ("r2") = res;		    \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) {	    \
-		errno = -(res);						    \
-		__sr2 = -1; 						    \
-	} \
-	return (type) (__sr2); 						    \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "()"			    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0) ); 						    \
-__syscall_return(type,__sc0); 						    \
-}
-
-	/*
-	 * The apparent spurious "dummy" assembler comment is *needed*,
-	 * as without it, the compiler treats the arg<n> variables
-	 * as no longer live just before the asm. The compiler can
-	 * then optimize the storage into any registers it wishes.
-	 * The additional dummy statement forces the compiler to put
-	 * the arguments into the correct registers before the TRAPA.
-	 */
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2)"		    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2));					    \
-__asm__ __volatile__ ("!dummy	%0 %1"				   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2));					    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3)"		    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3) );			    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2"			   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3) );			    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4)"		    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );		    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3"			   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );	   	    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5)"	    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4"			   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5,%6)"	    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6));							    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4 %5"		   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6));							    \
-__syscall_return(type,__sc0); 						    \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;	    \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;	    \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;	    \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;	    \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;	    \
-register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6;	    \
-__asm__ __volatile__ ("trapa	%1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)"	    \
-	: "=r" (__sc0) 							    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6), "r" (__sc7));					    \
-__asm__ __volatile__ ("!dummy	%0 %1 %2 %3 %4 %5 %6"		   	    \
-	:								    \
-	: "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-	  "r" (__sc6), "r" (__sc7));					    \
-__syscall_return(type,__sc0); 						    \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-sparc/checksum.h b/include/asm-sparc/checksum.h
index 2861581..267e631 100644
--- a/include/asm-sparc/checksum.h
+++ b/include/asm-sparc/checksum.h
@@ -30,7 +30,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /* the same as csum_partial, but copies from fs:src while it
  * checksums
@@ -41,9 +41,8 @@
 
 extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
 
-static inline unsigned int 
-csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, int len,
-			   unsigned int sum)
+static inline __wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
 	register unsigned int ret asm("o0") = (unsigned int)src;
 	register char *d asm("o1") = dst;
@@ -57,42 +56,36 @@
 	: "o2", "o3", "o4", "o5", "o7",
 	  "g2", "g3", "g4", "g5", "g7",
 	  "memory", "cc");
-	return ret;
+	return (__force __wsum)ret;
 }
 
-static inline unsigned int 
-csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, int len,
-			    unsigned int sum, int *err)
+static inline __wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+			    __wsum sum, int *err)
   {
-	if (!access_ok (VERIFY_READ, src, len)) {
-		*err = -EFAULT;
-		memset (dst, 0, len);
-		return sum;
-	} else {
-		register unsigned long ret asm("o0") = (unsigned long)src;
-		register char *d asm("o1") = dst;
-		register int l asm("g1") = len;
-		register unsigned int s asm("g7") = sum;
+	register unsigned long ret asm("o0") = (unsigned long)src;
+	register char *d asm("o1") = dst;
+	register int l asm("g1") = len;
+	register __wsum s asm("g7") = sum;
 
-		__asm__ __volatile__ (
-		".section __ex_table,#alloc\n\t"
-		".align 4\n\t"
-		".word 1f,2\n\t"
-		".previous\n"
-		"1:\n\t"
-		"call __csum_partial_copy_sparc_generic\n\t"
-		" st %8, [%%sp + 64]\n"
-		: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
-		: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
-		: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
-		  "cc", "memory");
-		return ret;
-	}
-  }
+	__asm__ __volatile__ (
+	".section __ex_table,#alloc\n\t"
+	".align 4\n\t"
+	".word 1f,2\n\t"
+	".previous\n"
+	"1:\n\t"
+	"call __csum_partial_copy_sparc_generic\n\t"
+	" st %8, [%%sp + 64]\n"
+	: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
+	: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
+	: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
+	  "cc", "memory");
+	return (__force __wsum)ret;
+}
   
-static inline unsigned int 
-csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, int len,
-			  unsigned int sum, int *err)
+static inline __wsum
+csum_partial_copy_to_user(const void *src, void __user *dst, int len,
+			  __wsum sum, int *err)
 {
 	if (!access_ok (VERIFY_WRITE, dst, len)) {
 		*err = -EFAULT;
@@ -101,7 +94,7 @@
 		register unsigned long ret asm("o0") = (unsigned long)src;
 		register char __user *d asm("o1") = dst;
 		register int l asm("g1") = len;
-		register unsigned int s asm("g7") = sum;
+		register __wsum s asm("g7") = sum;
 
 		__asm__ __volatile__ (
 		".section __ex_table,#alloc\n\t"
@@ -116,7 +109,7 @@
 		: "o2", "o3", "o4", "o5", "o7",
 		  "g2", "g3", "g4", "g5",
 		  "cc", "memory");
-		return ret;
+		return (__force __wsum)ret;
 	}
 }
 
@@ -126,10 +119,9 @@
 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
  * the majority of the time.
  */
-static inline unsigned short ip_fast_csum(const unsigned char *iph,
-					  unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
-	unsigned short sum;
+	__sum16 sum;
 
 	/* Note: We must read %2 before we touch %0 for the first time,
 	 *       because GCC can legitimately use the same register for
@@ -164,7 +156,7 @@
 }
 
 /* Fold a partial checksum without adding pseudo headers. */
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	unsigned int tmp;
 
@@ -173,23 +165,22 @@
 			     "addx\t%1, %%g0, %1\n\t"
 			     "xnor\t%%g0, %1, %0"
 			     : "=&r" (sum), "=r" (tmp)
-			     : "0" (sum), "1" (sum<<16)
+			     : "0" (sum), "1" ((__force u32)sum<<16)
 			     : "cc");
-	return sum;
+	return (__force __sum16)sum;
 }
 
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-					       unsigned long daddr,
-					       unsigned int len,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+					       unsigned short len,
 					       unsigned short proto,
-					       unsigned int sum)
+					       __wsum sum)
 {
 	__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
 			     "addxcc\t%2, %0, %0\n\t"
 			     "addxcc\t%3, %0, %0\n\t"
 			     "addx\t%0, %%g0, %0\n\t"
 			     : "=r" (sum), "=r" (saddr)
-			     : "r" (daddr), "r" ((proto<<16)+len), "0" (sum),
+			     : "r" (daddr), "r" (proto + len), "0" (sum),
 			       "1" (saddr)
 			     : "cc");
 	return sum;
@@ -199,22 +190,20 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum) 
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
 
-static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						 struct in6_addr *daddr,
-						 __u32 len,
-						 unsigned short proto,
-						 unsigned int sum) 
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+				      const struct in6_addr *daddr,
+				      __u32 len, unsigned short proto,
+				      __wsum sum)
 {
 	__asm__ __volatile__ (
 		"addcc	%3, %4, %%g4\n\t"
@@ -245,7 +234,7 @@
 }
 
 /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
diff --git a/include/asm-sparc/device.h b/include/asm-sparc/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-sparc/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index f7827fa..d5b2f80 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -329,136 +329,6 @@
  *          find a free slot in the 0-302 range.
  */
 
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res)\
-		      : "r" (__g1) \
-		      : "o0", "cc"); \
-if (__res < -255 || __res >= 0) \
-    return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-		      "bcc 1f\n\t" \
-		      "mov %%o0, %0\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "1:\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
-		      : "cc"); \
-if (__res < -255 || __res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-sparc64/checksum.h b/include/asm-sparc64/checksum.h
index dc8bed2..70a006d 100644
--- a/include/asm-sparc64/checksum.h
+++ b/include/asm-sparc64/checksum.h
@@ -30,7 +30,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+extern __wsum csum_partial(const void * buff, int len, __wsum sum);
 
 /* the same as csum_partial, but copies from user space while it
  * checksums
@@ -38,52 +38,50 @@
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern unsigned int csum_partial_copy_nocheck(const unsigned char *src,
-					      unsigned char *dst,
-					      int len, unsigned int sum);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					      int len, __wsum sum);
 
-extern long __csum_partial_copy_from_user(const unsigned char __user *src,
-					  unsigned char *dst, int len,
-					  unsigned int sum);
+extern long __csum_partial_copy_from_user(const void __user *src,
+					  void *dst, int len,
+					  __wsum sum);
 
-static inline unsigned int
-csum_partial_copy_from_user(const unsigned char __user *src,
-			    unsigned char *dst, int len,
-			    unsigned int sum, int *err)
+static inline __wsum
+csum_partial_copy_from_user(const void __user *src,
+			    void *dst, int len,
+			    __wsum sum, int *err)
 {
 	long ret = __csum_partial_copy_from_user(src, dst, len, sum);
 	if (ret < 0)
 		*err = -EFAULT;
-	return (unsigned int) ret;
+	return (__force __wsum) ret;
 }
 
 /* 
  *	Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-extern long __csum_partial_copy_to_user(const unsigned char *src,
-					unsigned char __user *dst, int len,
-					  unsigned int sum);
+extern long __csum_partial_copy_to_user(const void *src,
+					void __user *dst, int len,
+					  __wsum sum);
 
-static inline unsigned int
-csum_and_copy_to_user(const unsigned char *src,
-		      unsigned char __user *dst, int len,
-		      unsigned int sum, int *err)
+static inline __wsum
+csum_and_copy_to_user(const void *src,
+		      void __user *dst, int len,
+		      __wsum sum, int *err)
 {
 	long ret = __csum_partial_copy_to_user(src, dst, len, sum);
 	if (ret < 0)
 		*err = -EFAULT;
-	return (unsigned int) ret;
+	return (__force __wsum) ret;
 }
 
 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
  * the majority of the time.
  */
-extern unsigned short ip_fast_csum(__const__ unsigned char *iph,
-				   unsigned int ihl);
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /* Fold a partial checksum without adding pseudo headers. */
-static inline unsigned short csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	unsigned int tmp;
 
@@ -93,16 +91,15 @@
 "	addc		%1, %%g0, %1\n"
 "	xnor		%%g0, %1, %0\n"
 	: "=&r" (sum), "=r" (tmp)
-	: "0" (sum), "1" (sum<<16)
+	: "0" (sum), "1" ((__force u32)sum<<16)
 	: "cc");
-	return (sum & 0xffff);
+	return (__force __sum16)sum;
 }
 
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
-					       unsigned long daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 					       unsigned int len,
 					       unsigned short proto,
-					       unsigned int sum)
+					       __wsum sum)
 {
 	__asm__ __volatile__(
 "	addcc		%1, %0, %0\n"
@@ -110,7 +107,7 @@
 "	addccc		%3, %0, %0\n"
 "	addc		%0, %%g0, %0\n"
 	: "=r" (sum), "=r" (saddr)
-	: "r" (daddr), "r" ((proto<<16)+len), "0" (sum), "1" (saddr)
+	: "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
 	: "cc");
 	return sum;
 }
@@ -119,22 +116,20 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						   unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum) 
+						   __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
 
-static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						 struct in6_addr *daddr,
-						 __u32 len,
-						 unsigned short proto,
-						 unsigned int sum) 
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+				      const struct in6_addr *daddr,
+				      __u32 len, unsigned short proto,
+				      __wsum sum)
 {
 	__asm__ __volatile__ (
 "	addcc		%3, %4, %%g7\n"
@@ -165,7 +160,7 @@
 }
 
 /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return csum_fold(csum_partial(buff, len, 0));
 }
diff --git a/include/asm-sparc64/device.h b/include/asm-sparc64/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-sparc64/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 27c46fb..2f858a2 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -181,7 +181,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -210,7 +210,7 @@
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	/* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 7392fc4..876312f 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -45,7 +45,7 @@
 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
 		oparg = 1 << oparg;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -67,7 +67,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index e1ea67b..ca65602 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -18,6 +18,8 @@
 
 #define PCI_IRQ_NONE		0xffffffff
 
+#define PCI_CACHE_LINE_BYTES	64
+
 static inline void pcibios_set_master(struct pci_dev *dev)
 {
 	/* No special bus mastering setup handling */
@@ -291,10 +293,6 @@
 			       enum pci_mmap_state mmap_state,
 			       int write_combine);
 
-/* Platform specific MWI support. */
-#define HAVE_ARCH_PCI_MWI
-extern int pcibios_prep_mwi(struct pci_dev *dev);
-
 extern void
 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
 			struct resource *res);
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index 010f9cd..5891ff7 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -13,7 +13,7 @@
 #include <asm/page.h>
 
 /* Page table allocation/freeing. */
-extern kmem_cache_t *pgtable_cache;
+extern struct kmem_cache *pgtable_cache;
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 63669da..4704753 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -332,124 +332,6 @@
  *          find a free slot in the 0-302 range.
  */
 
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res)\
-		      : "r" (__g1) \
-		      : "o0", "cc"); \
-if (__res >= 0) \
-    return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__g1) \
-		      : "cc"); \
-if (__res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__g1) \
-		      : "cc"); \
-if (__res >= 0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
-		      : "cc"); \
-if (__res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
-		      : "cc"); \
-if (__res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-		      "sub %%g0, %%o0, %0\n\t" \
-		      "movcc %%xcc, %%o0, %0\n\t" \
-		      : "=r" (__res), "=&r" (__o0) \
-		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
-		      : "cc"); \
-if (__res>=0) \
-	return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
 /* sysconf options, for SunOS compatibility */
 #define   _SC_ARG_MAX             1
 #define   _SC_CHILD_MAX           2
diff --git a/include/asm-um/device.h b/include/asm-um/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-um/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index babd298..f0ee4fb 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -94,7 +94,7 @@
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -112,7 +112,7 @@
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	BUG();
diff --git a/include/asm-v850/checksum.h b/include/asm-v850/checksum.h
index 4df5e71..d1dddd9 100644
--- a/include/asm-v850/checksum.h
+++ b/include/asm-v850/checksum.h
@@ -26,8 +26,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern unsigned int csum_partial (const unsigned char * buff, int len,
-				  unsigned int sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -36,8 +35,8 @@
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern unsigned csum_partial_copy (const unsigned char *src,
-				   unsigned char *dst, int len, unsigned sum);
+extern __wsum csum_partial_copy_nocheck(const void *src,
+				   void *dst, int len, __wsum sum);
 
 
 /*
@@ -46,20 +45,17 @@
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern unsigned csum_partial_copy_from_user (const unsigned char *src,
-					     unsigned char *dst,
-					     int len, unsigned sum,
+extern __wsum csum_partial_copy_from_user (const void *src,
+					     void *dst,
+					     int len, __wsum sum,
 					     int *csum_err);
 
-#define csum_partial_copy_nocheck(src, dst, len, sum)	\
-	csum_partial_copy ((src), (dst), (len), (sum))
-
-unsigned short ip_fast_csum (unsigned char *iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
  *	Fold a partial checksum
  */
-static inline unsigned int csum_fold (unsigned long sum)
+static inline __sum16 csum_fold (__wsum sum)
 {
 	unsigned int result;
 	/*
@@ -68,7 +64,7 @@
 	      add %1, %0	H     L		H+L+C H+L
 	*/
 	asm ("hsw %1, %0; add %1, %0" : "=&r" (result) : "r" (sum));
-	return (~result) >> 16;
+	return (__force __sum16)(~result >> 16);
 }
 
 
@@ -76,10 +72,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static inline unsigned int
-csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr,
+static inline __wsum
+csum_tcpudp_nofold (__be32 saddr, __be32 daddr,
 		    unsigned short len,
-		    unsigned short proto, unsigned int sum)
+		    unsigned short proto, __wsum sum)
 {
 	int __carry;
 	__asm__ ("add %2, %0;"
@@ -93,15 +89,15 @@
 		 "add %1, %0"
 		 : "=&r" (sum), "=&r" (__carry)
 		 : "r" (daddr), "r" (saddr),
-		 "r" (ntohs (len) + (proto << 8)),
+		 "r" ((len + proto) << 8),
 		 "0" (sum));
 	return sum;
 }
 
-static inline unsigned short int
-csum_tcpudp_magic (unsigned long saddr, unsigned long daddr,
+static inline __sum16
+csum_tcpudp_magic (__be32 saddr, __be32 daddr,
 		   unsigned short len,
-		   unsigned short proto, unsigned int sum)
+		   unsigned short proto, __wsum sum)
 {
 	return csum_fold (csum_tcpudp_nofold (saddr, daddr, len, proto, sum));
 }
@@ -110,7 +106,7 @@
  * this routine is used for miscellaneous IP-like checksums, mainly
  * in icmp.c
  */
-extern unsigned short ip_compute_csum (const unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 
 #endif /* __V850_CHECKSUM_H__ */
diff --git a/include/asm-v850/device.h b/include/asm-v850/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-v850/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-v850/irq.h b/include/asm-v850/irq.h
index 1bf096d..88687c1 100644
--- a/include/asm-v850/irq.h
+++ b/include/asm-v850/irq.h
@@ -46,8 +46,6 @@
 init_irq_handlers (int base_irq, int num, int interval,
 		   struct hw_interrupt_type *irq_type);
 
-typedef void (*irq_handler_t)(int irq, void *data, struct pt_regs *regs);
-
 /* Handle interrupt IRQ.  REGS are the registers at the time of ther
    interrupt.  */
 extern unsigned int handle_irq (int irq, struct pt_regs *regs);
diff --git a/include/asm-v850/unistd.h b/include/asm-v850/unistd.h
index 737401e..2241ed4 100644
--- a/include/asm-v850/unistd.h
+++ b/include/asm-v850/unistd.h
@@ -204,168 +204,8 @@
 #define __NR_gettid		201
 #define __NR_tkill		202
 
-
-/* Syscall protocol:
-   Syscall number in r12, args in r6-r9, r13-r14
-   Return value in r10
-   Trap 0 for `short' syscalls, where all the args can fit in function
-   call argument registers, and trap 1 when there are additional args in
-   r13-r14.  */
-
-#define SYSCALL_NUM	"r12"
-#define SYSCALL_ARG0	"r6"
-#define SYSCALL_ARG1	"r7"
-#define SYSCALL_ARG2	"r8"
-#define SYSCALL_ARG3	"r9"
-#define SYSCALL_ARG4	"r13"
-#define SYSCALL_ARG5	"r14"
-#define SYSCALL_RET	"r10"
-
-#define SYSCALL_SHORT_TRAP	"0"
-#define SYSCALL_LONG_TRAP	"1"
-
-/* Registers clobbered by any syscall.  This _doesn't_ include the syscall
-   number (r12) or the `extended arg' registers (r13, r14), even though
-   they are actually clobbered too (this is because gcc's `asm' statement
-   doesn't allow a clobber to be used as an input or output).  */
-#define SYSCALL_CLOBBERS	"r1", "r5", "r11", "r15", "r16", \
-				"r17", "r18", "r19"
-
-/* Registers clobbered by a `short' syscall.  This includes all clobbers
-   except the syscall number (r12).  */
-#define SYSCALL_SHORT_CLOBBERS	SYSCALL_CLOBBERS, "r13", "r14"
-
 #ifdef __KERNEL__
 
-#include <asm/clinkage.h>
-#include <linux/err.h>
-
-#define __syscall_return(type, res)					      \
-  do {									      \
-	  /* user-visible error numbers are in the range -1 - -MAX_ERRNO:      \
-	     see <asm-v850/errno.h> */					      \
-	  if (__builtin_expect ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO), 0)) { \
-		  errno = -(res);					      \
-		  res = -1;						      \
-	  }								      \
-	  return (type) (res);						      \
-  } while (0)
-
-
-#define _syscall0(type, name)						      \
-type name (void)							      \
-{									      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)	 	      \
-			: "1" (__syscall)				      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall1(type, name, atype, a)					      \
-type name (atype a)							      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall), "r" (__a)			      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall2(type, name, atype, a, btype, b)			      \
-type name (atype a, btype b)						      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall), "r" (__a), "r" (__b)		      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)		      \
-type name (atype a, btype b, ctype c)					      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall), "r" (__a), "r" (__b), "r" (__c)    \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d)	      \
-type name (atype a, btype b, ctype c, dtype d)				      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall)		      \
-			: "1" (__syscall),				      \
-			"r" (__a), "r" (__b), "r" (__c), "r" (__d)	      \
-			: SYSCALL_SHORT_CLOBBERS);			      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype,e)\
-type name (atype a, btype b, ctype c, dtype d, etype e)			      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;			      \
-  register etype __e __asm__ (SYSCALL_ARG4) = e;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP			      \
-			: "=r" (__ret), "=r" (__syscall), "=r" (__e)	      \
-			: "1" (__syscall),				      \
-			"r" (__a), "r" (__b), "r" (__c), "r" (__d), "2" (__e) \
-			: SYSCALL_CLOBBERS);				      \
-  __syscall_return (type, __ret);					      \
-}
-
-#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f)			      \
-  __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP			      \
-			: "=r" (ret), "=r" (syscall),			      \
- 			"=r" (e), "=r" (f)				      \
-			: "1" (syscall),				      \
-			"r" (a), "r" (b), "r" (c), "r" (d),		      \
-			"2" (e), "3" (f)				      \
-			: SYSCALL_CLOBBERS);
-
-#define _syscall6(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e, ftype, f) \
-type name (atype a, btype b, ctype c, dtype d, etype e, ftype f)	      \
-{									      \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;			      \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;			      \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;			      \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;			      \
-  register etype __e __asm__ (SYSCALL_ARG4) = e;			      \
-  register etype __f __asm__ (SYSCALL_ARG5) = f;			      \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;	      \
-  register unsigned long __ret __asm__ (SYSCALL_RET);			      \
-  __SYSCALL6_TRAP(__syscall, __ret, __a, __b, __c, __d, __e, __f);	      \
-  __syscall_return (type, __ret);					      \
-}
-		
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index 1ee9b07..ebd7117 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -6,13 +6,11 @@
 
 header-y += boot.h
 header-y += bootsetup.h
-header-y += cpufeature.h
 header-y += debugreg.h
 header-y += ldt.h
 header-y += msr.h
 header-y += prctl.h
 header-y += ptrace-abi.h
-header-y += setup.h
 header-y += sigcontext32.h
 header-y += ucontext.h
 header-y += vsyscall32.h
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index a584826..a6657b4 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
 #ifdef __KERNEL__
 
 #include <linux/types.h>
+#include <linux/stddef.h>
 #include <asm/cpufeature.h>
 
 struct alt_instr {
@@ -133,4 +134,15 @@
 #define LOCK_PREFIX ""
 #endif
 
+struct paravirt_patch;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
+#else
+static inline void
+apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{}
+#define __start_parainstructions NULL
+#define __stop_parainstructions NULL
+#endif
+
 #endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 007e88d..706ca4b 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -21,7 +21,7 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
 
 #define ATOMIC_INIT(i)	{ (i) }
 
@@ -189,9 +189,9 @@
 {
 	int __i = i;
 	__asm__ __volatile__(
-		LOCK_PREFIX "xaddl %0, %1;"
-		:"=r"(i)
-		:"m"(v->counter), "0"(i));
+		LOCK_PREFIX "xaddl %0, %1"
+		:"+r" (i), "+m" (v->counter)
+		: : "memory");
 	return i + __i;
 }
 
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index 6b93f5a..7ee9006 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -51,6 +51,8 @@
 #define TCE_TABLE_SIZE_4M		6
 #define TCE_TABLE_SIZE_8M		7
 
+extern int use_calgary;
+
 #ifdef CONFIG_CALGARY_IOMMU
 extern int calgary_iommu_init(void);
 extern void detect_calgary(void);
diff --git a/include/asm-x86_64/checksum.h b/include/asm-x86_64/checksum.h
index 989469e..419fe88 100644
--- a/include/asm-x86_64/checksum.h
+++ b/include/asm-x86_64/checksum.h
@@ -19,15 +19,16 @@
  * the last step before putting a checksum into a packet.
  * Make sure not to mix with 64bit checksums.
  */
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
 {
 	__asm__(
 		"  addl %1,%0\n"
 		"  adcl $0xffff,%0"
 		: "=r" (sum)
-		: "r" (sum << 16), "0" (sum & 0xffff0000)
+		: "r" ((__force u32)sum << 16),
+		  "0" ((__force u32)sum & 0xffff0000)
 	);
-	return (~sum) >> 16;
+	return (__force __sum16)(~(__force u32)sum >> 16);
 }
 
 /*
@@ -43,7 +44,7 @@
  * iph: ipv4 header
  * ihl: length of header / 4
  */ 
-static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) 
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum;
 
@@ -70,7 +71,7 @@
 	: "=r" (sum), "=r" (iph), "=r" (ihl)
 	: "1" (iph), "2" (ihl)
 	: "memory");
-	return(sum);
+	return (__force __sum16)sum;
 }
 
 /** 
@@ -84,16 +85,17 @@
  * Returns the pseudo header checksum the input data. Result is 
  * 32bit unfolded.
  */
-static inline unsigned long 
-csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len,
-		   unsigned short proto, unsigned int sum) 
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
 {
 	asm("  addl %1, %0\n"
 	    "  adcl %2, %0\n"
 	    "  adcl %3, %0\n"
 	    "  adcl $0, %0\n"
 		: "=r" (sum)
-	    : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum));
+	    : "g" (daddr), "g" (saddr),
+	      "g" ((len + proto)<<8), "0" (sum));
     return sum;
 }
 
@@ -109,9 +111,9 @@
  * Returns the 16bit pseudo header checksum the input data already
  * complemented and ready to be filled in.
  */
-static inline unsigned short int 
-csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
-		  unsigned short len, unsigned short proto, unsigned int sum) 
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+		  unsigned short len, unsigned short proto, __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -126,25 +128,25 @@
  * Before filling it in it needs to be csum_fold()'ed.
  * buff should be aligned to a 64bit boundary if possible.
  */ 
-extern unsigned int csum_partial(const unsigned char *buff, unsigned len, unsigned int sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 #define  _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
 #define HAVE_CSUM_COPY_USER 1
 
 
 /* Do not call this directly. Use the wrappers below */
-extern unsigned long csum_partial_copy_generic(const unsigned char *src, const unsigned char *dst,
-					       unsigned len,
-					       unsigned sum, 
+extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
+					       int len,
+					       __wsum sum,
 					       int *src_err_ptr, int *dst_err_ptr);
 
 
-extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
-				       int len, unsigned int isum, int *errp);
-extern unsigned int csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst,
-				      int len, unsigned int isum, int *errp);
-extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len,
-					      unsigned int sum);
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+				       int len, __wsum isum, int *errp);
+extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
+				      int len, __wsum isum, int *errp);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
+					      __wsum sum);
 
 /* Old names. To be removed. */
 #define csum_and_copy_to_user csum_partial_copy_to_user
@@ -158,7 +160,7 @@
  * Returns the 16bit folded/inverted checksum of the passed buffer.
  * Ready to fill in.
  */
-extern unsigned short ip_compute_csum(unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
 
 /**
  * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
@@ -176,9 +178,9 @@
 struct in6_addr;
 
 #define _HAVE_ARCH_IPV6_CSUM 1
-extern unsigned short 
-csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
-		__u32 len, unsigned short proto, unsigned int sum);
+extern __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+		__u32 len, unsigned short proto, __wsum sum);
 
 static inline unsigned add32_with_carry(unsigned a, unsigned b)
 {
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index ee792fa..0b3c686 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -29,7 +29,7 @@
 #define X86_FEATURE_PSE36	(0*32+17) /* 36-bit PSEs */
 #define X86_FEATURE_PN		(0*32+18) /* Processor serial number */
 #define X86_FEATURE_CLFLSH	(0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES	(0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_DS		(0*32+21) /* Debug Store */
 #define X86_FEATURE_ACPI	(0*32+22) /* ACPI via MSR */
 #define X86_FEATURE_MMX		(0*32+23) /* Multimedia Extensions */
 #define X86_FEATURE_FXSR	(0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -68,6 +68,8 @@
 #define X86_FEATURE_FXSAVE_LEAK (3*32+7)  /* FIP/FOP/FDP leaks through FXSAVE */
 #define X86_FEATURE_UP		(3*32+8) /* SMP kernel running on UP */
 #define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS	(3*32+10) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS		(3*32+11) /* Branch Trace Store */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3	(4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -112,5 +114,8 @@
 #define cpu_has_cyrix_arr      0
 #define cpu_has_centaur_mcr    0
 #define cpu_has_clflush	       boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_ds 	       boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs 	       boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_bts 	       boot_cpu_has(X86_FEATURE_BTS)
 
 #endif /* __ASM_X8664_CPUFEATURE_H */
diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h
index 65f64ac..c2669f1 100644
--- a/include/asm-x86_64/delay.h
+++ b/include/asm-x86_64/delay.h
@@ -7,18 +7,21 @@
  * Delay routines calling functions in arch/x86_64/lib/delay.c
  */
  
+/* Undefined functions to get compile-time errors */
 extern void __bad_udelay(void);
 extern void __bad_ndelay(void);
 
 extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
 extern void __const_udelay(unsigned long usecs);
 extern void __delay(unsigned long loops);
 
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
 #define udelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
+	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
 	__udelay(n))
 
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
 #define ndelay(n) (__builtin_constant_p(n) ? \
        ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
        __ndelay(n))
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index eb7723a..913d6ac 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -9,64 +9,13 @@
 
 #include <linux/string.h>
 #include <linux/smp.h>
+#include <asm/desc_defs.h>
 
 #include <asm/segment.h>
 #include <asm/mmu.h>
 
-// 8 byte segment descriptor
-struct desc_struct { 
-	u16 limit0;
-	u16 base0;
-	unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
-	unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-} __attribute__((packed)); 
-
-struct n_desc_struct { 
-	unsigned int a,b;
-}; 	
-
 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 
-enum { 
-	GATE_INTERRUPT = 0xE, 
-	GATE_TRAP = 0xF, 	
-	GATE_CALL = 0xC,
-}; 	
-
-// 16byte gate
-struct gate_struct {          
-	u16 offset_low;
-	u16 segment; 
-	unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
-	u16 offset_middle;
-	u32 offset_high;
-	u32 zero1; 
-} __attribute__((packed));
-
-#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) 
-#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
-
-enum { 
-	DESC_TSS = 0x9,
-	DESC_LDT = 0x2,
-}; 
-
-// LDT or TSS descriptor in the GDT. 16 bytes.
-struct ldttss_desc { 
-	u16 limit0;
-	u16 base0;
-	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
-	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
-	u32 base3;
-	u32 zero1; 
-} __attribute__((packed)); 
-
-struct desc_ptr {
-	unsigned short size;
-	unsigned long address;
-} __attribute__((packed)) ;
-
 #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
 #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
 #define clear_LDT()  asm volatile("lldt %w0"::"r" (0))
diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h
new file mode 100644
index 0000000..0890040
--- /dev/null
+++ b/include/asm-x86_64/desc_defs.h
@@ -0,0 +1,69 @@
+/* Written 2000 by Andi Kleen */
+#ifndef __ARCH_DESC_DEFS_H
+#define __ARCH_DESC_DEFS_H
+
+/*
+ * Segment descriptor structure definitions, usable from both x86_64 and i386
+ * archs.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+// 8 byte segment descriptor
+struct desc_struct {
+	u16 limit0;
+	u16 base0;
+	unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
+	unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
+} __attribute__((packed));
+
+struct n_desc_struct {
+	unsigned int a,b;
+};
+
+enum {
+	GATE_INTERRUPT = 0xE,
+	GATE_TRAP = 0xF,
+	GATE_CALL = 0xC,
+};
+
+// 16byte gate
+struct gate_struct {
+	u16 offset_low;
+	u16 segment;
+	unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
+	u16 offset_middle;
+	u32 offset_high;
+	u32 zero1;
+} __attribute__((packed));
+
+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
+
+enum {
+	DESC_TSS = 0x9,
+	DESC_LDT = 0x2,
+};
+
+// LDT or TSS descriptor in the GDT. 16 bytes.
+struct ldttss_desc {
+	u16 limit0;
+	u16 base0;
+	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+	u32 base3;
+	u32 zero1;
+} __attribute__((packed));
+
+struct desc_ptr {
+	unsigned short size;
+	unsigned long address;
+} __attribute__((packed)) ;
+
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-x86_64/device.h b/include/asm-x86_64/device.h
new file mode 100644
index 0000000..3afa03f
--- /dev/null
+++ b/include/asm-x86_64/device.h
@@ -0,0 +1,15 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_X86_64_DEVICE_H
+#define _ASM_X86_64_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_ACPI
+	void	*acpi_handle;
+#endif
+};
+
+#endif /* _ASM_X86_64_DEVICE_H */
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 10174b1..be9ec68 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -180,12 +180,13 @@
 	return boot_cpu_data.x86_clflush_size;
 }
 
-#define dma_is_consistent(h) 1
+#define dma_is_consistent(d, h) 1
 
 extern int dma_set_mask(struct device *dev, u64 mask);
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+	enum dma_data_direction dir)
 {
 	flush_write_buffers();
 }
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
index a406fcb..6d24ea7 100644
--- a/include/asm-x86_64/elf.h
+++ b/include/asm-x86_64/elf.h
@@ -45,7 +45,6 @@
 
 #ifdef __KERNEL__
 #include <asm/processor.h>
-#include <asm/compat.h>
 
 /*
  * This is used to ensure we don't load something for the wrong architecture.
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 9804bf0..5cdfb08 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -55,7 +55,7 @@
 	if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
-	inc_preempt_count();
+	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -78,7 +78,7 @@
 		ret = -ENOSYS;
 	}
 
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (!ret) {
 		switch (cmp) {
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
index a0e9a4b..b80f4bb 100644
--- a/include/asm-x86_64/genapic.h
+++ b/include/asm-x86_64/genapic.h
@@ -30,6 +30,6 @@
 };
 
 
-extern struct genapic *genapic;
+extern struct genapic *genapic, *genapic_force, apic_flat;
 
 #endif
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 37e1941..952783d 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -169,8 +169,8 @@
 #define MSR_LSTAR 0xc0000082 		/* long mode SYSCALL target */
 #define MSR_CSTAR 0xc0000083		/* compatibility mode SYSCALL target */
 #define MSR_SYSCALL_MASK 0xc0000084	/* EFLAGS mask for syscall */
-#define MSR_FS_BASE 0xc0000100		/* 64bit GS base */
-#define MSR_GS_BASE 0xc0000101		/* 64bit FS base */
+#define MSR_FS_BASE 0xc0000100		/* 64bit FS base */
+#define MSR_GS_BASE 0xc0000101		/* 64bit GS base */
 #define MSR_KERNEL_GS_BASE  0xc0000102	/* SwapGS GS shadow (or USER_GS from kernel) */ 
 /* EFER bits: */ 
 #define _EFER_SCE 0  /* SYSCALL/SYSRET */
@@ -210,6 +210,10 @@
 #define MSR_IA32_LASTINTFROMIP     0x1dd
 #define MSR_IA32_LASTINTTOIP       0x1de
 
+#define MSR_IA32_PEBS_ENABLE		0x3f1
+#define MSR_IA32_DS_AREA		0x600
+#define MSR_IA32_PERF_CAPABILITIES	0x345
+
 #define MSR_MTRRfix64K_00000	0x250
 #define MSR_MTRRfix16K_80000	0x258
 #define MSR_MTRRfix16K_A0000	0x259
@@ -407,4 +411,13 @@
 #define MSR_P4_U2L_ESCR0 		0x3b0
 #define MSR_P4_U2L_ESCR1 		0x3b1
 
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0	0x309
+#define MSR_CORE_PERF_FIXED_CTR1	0x30a
+#define MSR_CORE_PERF_FIXED_CTR2	0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL	0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS	0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL	0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x390
+
 #endif
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index f367d40..72375e7 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -77,4 +77,7 @@
 
 extern int unknown_nmi_panic;
 
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
 #endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h
index eba9cb4..6823fa4 100644
--- a/include/asm-x86_64/pci-direct.h
+++ b/include/asm-x86_64/pci-direct.h
@@ -10,6 +10,7 @@
 extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
 extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
 extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
+extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
 
 extern int early_pci_allowed(void);
 
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 0555c1c..59901c6 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -221,20 +221,19 @@
 #define __S110	PAGE_SHARED_EXEC
 #define __S111	PAGE_SHARED_EXEC
 
-static inline unsigned long pgd_bad(pgd_t pgd) 
-{ 
-       unsigned long val = pgd_val(pgd);
-       val &= ~PTE_MASK; 
-       val &= ~(_PAGE_USER | _PAGE_DIRTY); 
-       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);      
-} 
+static inline unsigned long pgd_bad(pgd_t pgd)
+{
+	return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
 
 static inline unsigned long pud_bad(pud_t pud)
 {
-       unsigned long val = pud_val(pud);
-       val &= ~PTE_MASK;
-       val &= ~(_PAGE_USER | _PAGE_DIRTY);
-       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+	return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
+
+static inline unsigned long pmd_bad(pmd_t pmd)
+{
+	return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
 }
 
 #define pte_none(x)	(!pte_val(x))
@@ -347,7 +346,6 @@
 #define pmd_none(x)	(!pmd_val(x))
 #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
 #define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-#define	pmd_bad(x)	((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
 #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
 #define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index cef17e0..76552d7 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -475,6 +475,14 @@
 		: :"a" (eax), "c" (ecx));
 }
 
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+	/* "mwait %eax,%ecx;" */
+	asm volatile(
+		"sti; .byte 0x0f,0x01,0xc9;"
+		: :"a" (eax), "c" (ecx));
+}
+
 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
 
 #define stack_current() \
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index e72cfcd..6d324b8 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -61,7 +61,6 @@
 extern unsigned long numa_free_all_bootmem(void);
 
 extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
-extern void free_bootmem_generic(unsigned long phys, unsigned len);
 
 extern void load_gs_index(unsigned gs);
 
@@ -88,6 +87,7 @@
 extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
 
 extern void early_quirks(void);
+extern void quirk_intel_irqbalance(void);
 extern void check_efer(void);
 
 extern int unhandled_signal(struct task_struct *tsk, int sig);
diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h
new file mode 100644
index 0000000..c7350f6
--- /dev/null
+++ b/include/asm-x86_64/rio.h
@@ -0,0 +1,74 @@
+/*
+ * Derived from include/asm-i386/mach-summit/mach_mpparse.h
+ *          and include/asm-i386/mach-default/bios_ebda.h
+ *
+ * Author: Laurent Vivier <Laurent.Vivier@bull.net>
+ */
+
+#ifndef __ASM_RIO_H
+#define __ASM_RIO_H
+
+#define RIO_TABLE_VERSION	3
+
+struct rio_table_hdr {
+	u8 version;      /* Version number of this data structure  */
+	u8 num_scal_dev; /* # of Scalability devices               */
+	u8 num_rio_dev;  /* # of RIO I/O devices                   */
+} __attribute__((packed));
+
+struct scal_detail {
+	u8 node_id;      /* Scalability Node ID                    */
+	u32 CBAR;        /* Address of 1MB register space          */
+	u8 port0node;    /* Node ID port connected to: 0xFF=None   */
+	u8 port0port;    /* Port num port connected to: 0,1,2, or  */
+	                 /* 0xFF=None                              */
+	u8 port1node;    /* Node ID port connected to: 0xFF = None */
+	u8 port1port;    /* Port num port connected to: 0,1,2, or  */
+	                 /* 0xFF=None                              */
+	u8 port2node;    /* Node ID port connected to: 0xFF = None */
+	u8 port2port;    /* Port num port connected to: 0,1,2, or  */
+	                 /* 0xFF=None                              */
+	u8 chassis_num;  /* 1 based Chassis number (1 = boot node) */
+} __attribute__((packed));
+
+struct rio_detail {
+	u8 node_id;      /* RIO Node ID                            */
+	u32 BBAR;        /* Address of 1MB register space          */
+	u8 type;         /* Type of device                         */
+	u8 owner_id;     /* Node ID of Hurricane that owns this    */
+	                 /* node                                   */
+	u8 port0node;    /* Node ID port connected to: 0xFF=None   */
+	u8 port0port;    /* Port num port connected to: 0,1,2, or  */
+	                 /* 0xFF=None                              */
+	u8 port1node;    /* Node ID port connected to: 0xFF=None   */
+	u8 port1port;    /* Port num port connected to: 0,1,2, or  */
+	                 /* 0xFF=None                              */
+	u8 first_slot;   /* Lowest slot number below this Calgary  */
+	u8 status;       /* Bit 0 = 1 : the XAPIC is used          */
+	                 /*       = 0 : the XAPIC is not used, ie: */
+	                 /*            ints fwded to another XAPIC */
+	                 /*           Bits1:7 Reserved             */
+	u8 WP_index;     /* instance index - lower ones have       */
+	                 /*     lower slot numbers/PCI bus numbers */
+	u8 chassis_num;  /* 1 based Chassis number                 */
+} __attribute__((packed));
+
+enum {
+	HURR_SCALABILTY	= 0,  /* Hurricane Scalability info */
+	HURR_RIOIB	= 2,  /* Hurricane RIOIB info       */
+	COMPAT_CALGARY	= 4,  /* Compatibility Calgary      */
+	ALT_CALGARY	= 5,  /* Second Planar Calgary      */
+};
+
+/*
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E.
+ */
+static inline unsigned long get_bios_ebda(void)
+{
+	unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
+	address <<= 4;
+	return address;
+}
+
+#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d6b7c05..e17b9ec4 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -82,11 +82,6 @@
 extern u8 x86_cpu_to_log_apicid[NR_CPUS];
 extern u8 bios_cpu_apicid[];
 
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-{
-	return cpus_addr(cpumask)[0];
-}
-
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
 	if (mps_cpu < NR_CPUS)
@@ -118,13 +113,6 @@
 #define cpu_physical_id(cpu)		x86_cpu_to_apicid[cpu]
 #else
 #define cpu_physical_id(cpu)		boot_cpu_id
-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
-				void *info, int retry, int wait)
-{
-	/* Disable interrupts here? */
-	func(info);
-	return 0;
-}
 #endif /* !CONFIG_SMP */
 #endif
 
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 05ef097..88bf981 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -36,7 +36,34 @@
 		"2:\t" : "=m" (lock->slock) : : "memory");
 }
 
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+/*
+ * Same as __raw_spin_lock, but reenable interrupts during spinning.
+ */
+#ifndef CONFIG_PROVE_LOCKING
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+	asm volatile(
+		"\n1:\t"
+		LOCK_PREFIX " ; decl %0\n\t"
+		"jns 5f\n"
+		"testl $0x200, %1\n\t"	/* interrupts were disabled? */
+		"jz 4f\n\t"
+	        "sti\n"
+		"3:\t"
+		"rep;nop\n\t"
+		"cmpl $0, %0\n\t"
+		"jle 3b\n\t"
+		"cli\n\t"
+		"jmp 1b\n"
+		"4:\t"
+		"rep;nop\n\t"
+		"cmpl $0, %0\n\t"
+		"jg 1b\n\t"
+		"jmp 4b\n"
+		"5:\n\t"
+		: "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
+}
+#endif
 
 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h
index 59efe84..4da9345 100644
--- a/include/asm-x86_64/spinlock_types.h
+++ b/include/asm-x86_64/spinlock_types.h
@@ -6,13 +6,13 @@
 #endif
 
 typedef struct {
-	volatile unsigned int slock;
+	unsigned int slock;
 } raw_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED	{ 1 }
 
 typedef struct {
-	volatile unsigned int lock;
+	unsigned int lock;
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h
index 5eb9799..6f0b545 100644
--- a/include/asm-x86_64/stacktrace.h
+++ b/include/asm-x86_64/stacktrace.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_STACKTRACE_H
 #define _ASM_STACKTRACE_H 1
 
+extern int kstack_depth_to_print;
+
 /* Generic stack tracer with callbacks */
 
 struct stacktrace_ops {
diff --git a/include/asm-x86_64/types.h b/include/asm-x86_64/types.h
index c86c2e6..2d4491a 100644
--- a/include/asm-x86_64/types.h
+++ b/include/asm-x86_64/types.h
@@ -48,9 +48,6 @@
 typedef u64 dma64_addr_t;
 typedef u64 dma_addr_t;
 
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 19f9917..d5dbc87 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -6,7 +6,6 @@
  */
 #include <linux/compiler.h>
 #include <linux/errno.h>
-#include <linux/sched.h>
 #include <linux/prefetch.h>
 #include <asm/page.h>
 
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 777288e..c5f596e 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -622,25 +622,7 @@
 
 #define __NR_syscall_max __NR_move_pages
 
-#ifdef __KERNEL__
-#include <linux/err.h>
-#endif
-
 #ifndef __NO_STUBS
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO */
-
-#define __syscall_clobber "r11","rcx","memory" 
-
-#define __syscall_return(type, res) \
-do { \
-	if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-		errno = -(res); \
-		res = -1; \
-	} \
-	return (type) (res); \
-} while (0)
-
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
 #define __ARCH_WANT_SYS_ALARM
@@ -664,87 +646,6 @@
 #define __ARCH_WANT_SYS_TIME
 #define __ARCH_WANT_COMPAT_SYS_TIME
 
-#define __syscall "syscall"
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-		  "d" ((long)(arg3)) : __syscall_clobber); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ;" __syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \
-__syscall_return(type,__res); \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-	  "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \
-	__syscall_clobber,"r8","r10" ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-	  type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \
-	: "=a" (__res) \
-	: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-	  "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \
-	  "g" ((long)(arg6)) : \
-	__syscall_clobber,"r8","r10","r9" ); \
-__syscall_return(type,__res); \
-}
-
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index 2e7ff10..2f6349e 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -87,14 +87,10 @@
 
 static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
 {
-#if 0 /* This can only work when selector register saves/restores
-         are properly annotated (and tracked in UNW_REGISTER_INFO). */
-	return user_mode(&info->regs);
-#else
-	return (long)info->regs.rip >= 0
+	return user_mode(&info->regs)
+	       || (long)info->regs.rip >= 0
 	       || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
 	       || (long)info->regs.rsp >= 0;
-#endif
 }
 
 #else
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 01d1c17..05cb8dd 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -10,6 +10,7 @@
 #define VSYSCALL_START (-10UL << 20)
 #define VSYSCALL_SIZE 1024
 #define VSYSCALL_END (-2UL << 20)
+#define VSYSCALL_MAPPED_PAGES 1
 #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
 
 #ifdef __KERNEL__
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
index 03114f8..5435aff 100644
--- a/include/asm-xtensa/checksum.h
+++ b/include/asm-xtensa/checksum.h
@@ -26,7 +26,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /*
  * the same as csum_partial, but copies from src while it
@@ -36,7 +36,7 @@
  * better 64-bit) boundary
  */
 
-asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
 						   int *src_err_ptr, int *dst_err_ptr);
 
 /*
@@ -46,34 +46,25 @@
  *	If you use these functions directly please don't forget the access_ok().
  */
 static inline
-unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
-					int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					int len, __wsum sum)
 {
-	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
 }
 
 static inline
-unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
-						int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						int len, __wsum sum, int *err_ptr)
 {
-	return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
+	return csum_partial_copy_generic((__force const void *)src, dst,
+					len, sum, err_ptr, NULL);
 }
 
 /*
- * These are the old (and unsafe) way of doing checksums, a warning message will be
- * printed if they are used and an exeption occurs.
- *
- * these functions should go away after some time.
- */
-
-#define csum_partial_copy_fromuser csum_partial_copy
-unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
-
-/*
  *	Fold a partial checksum
  */
 
-static __inline__ unsigned int csum_fold(unsigned int sum)
+static __inline__ __sum16 csum_fold(__wsum sum)
 {
 	unsigned int __dummy;
 	__asm__("extui	%1, %0, 16, 16\n\t"
@@ -87,14 +78,14 @@
 		"extui	%0, %0, 0, 16\n\t"
 		: "=r" (sum), "=&r" (__dummy)
 		: "0" (sum));
-	return sum;
+	return (__force __sum16)sum;
 }
 
 /*
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
  */
-static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	unsigned int sum, tmp, endaddr;
 
@@ -127,17 +118,16 @@
 	return	csum_fold(sum);
 }
 
-static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
-						   unsigned long daddr,
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 						   unsigned short len,
 						   unsigned short proto,
-						   unsigned int sum)
+						   __wsum sum)
 {
 
 #ifdef __XTENSA_EL__
-	unsigned long len_proto = (ntohs(len)<<16)+proto*256;
+	unsigned long len_proto = (len + proto) << 8;
 #elif defined(__XTENSA_EB__)
-	unsigned long len_proto = (proto<<16)+len;
+	unsigned long len_proto = len + proto;
 #else
 # error processor byte order undefined!
 #endif
@@ -162,11 +152,10 @@
  * computes the checksum of the TCP/UDP pseudo-header
  * returns a 16-bit checksum, already complemented
  */
-static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
-						       unsigned long daddr,
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 						       unsigned short len,
 						       unsigned short proto,
-						       unsigned int sum)
+						       __wsum sum)
 {
 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
@@ -176,17 +165,16 @@
  * in icmp.c
  */
 
-static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
 {
     return csum_fold (csum_partial(buff, len, 0));
 }
 
 #define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						     struct in6_addr *daddr,
-						     __u32 len,
-						     unsigned short proto,
-						     unsigned int sum)
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum sum)
 {
 	unsigned int __dummy;
 	__asm__("l32i	%1, %2, 0\n\t"
@@ -248,8 +236,8 @@
  *	Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst,
-				    int len, int sum, int *err_ptr)
+static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
+				    int len, __wsum sum, int *err_ptr)
 {
 	if (access_ok(VERIFY_WRITE, dst, len))
 		return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
@@ -257,6 +245,6 @@
 	if (len)
 		*err_ptr = -EFAULT;
 
-	return -1; /* invalid checksum */
+	return (__force __wsum)-1; /* invalid checksum */
 }
 #endif
diff --git a/include/asm-xtensa/device.h b/include/asm-xtensa/device.h
new file mode 100644
index 0000000..d8f9872
--- /dev/null
+++ b/include/asm-xtensa/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index c39c91d..82b03b3 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -170,10 +170,10 @@
 	return L1_CACHE_BYTES;
 }
 
-#define dma_is_consistent(d)	(1)
+#define dma_is_consistent(d, h)	(1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 	       enum dma_data_direction direction)
 {
 	consistent_sync(vaddr, size, direction);
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
index 411f810..2e1a1b9 100644
--- a/include/asm-xtensa/unistd.h
+++ b/include/asm-xtensa/unistd.h
@@ -218,190 +218,6 @@
 
 #define SYSXTENSA_COUNT		   5	/* count of syscall0 functions*/
 
-#ifdef __KERNEL__
-#include <linux/linkage.h>
-
-#define __syscall_return(type, res) return ((type)(res))
-
-/* Tensilica's xt-xcc compiler is much more agressive at code
- * optimization than gcc.  Multiple __asm__ statements are
- * insufficient for xt-xcc because subsequent optimization passes
- * (beyond the front-end that knows of __asm__ statements and other
- * such GNU Extensions to C) can modify the register selection for
- * containment of C variables.
- *
- * xt-xcc cannot modify the contents of a single __asm__ statement, so
- * we create single-asm versions of the syscall macros that are
- * suitable and optimal for both xt-xcc and gcc.
- *
- * Linux takes system-call arguments in registers.  The following
- * design is optimized for user-land apps (e.g., glibc) which
- * typically have a function wrapper around the "syscall" assembly
- * instruction.  It satisfies the Xtensa ABI while minizing argument
- * shifting.
- *
- * The Xtensa ABI and software conventions require the system-call
- * number in a2.  If an argument exists in a2, we move it to the next
- * available register.  Note that for improved efficiency, we do NOT
- * shift all parameters down one register to maintain the original
- * order.
- *
- * At best case (zero arguments), we just write the syscall number to
- * a2.  At worst case (1 to 6 arguments), we move the argument in a2
- * to the next available register, then write the syscall number to
- * a2.
- *
- * For clarity, the following truth table enumerates all possibilities.
- *
- * arguments	syscall number	arg0, arg1, arg2, arg3, arg4, arg5
- * ---------	--------------	----------------------------------
- *	0	      a2
- *	1	      a2	a3
- *	2	      a2	a4,   a3
- *	3	      a2	a5,   a3,   a4
- *	4	      a2	a6,   a3,   a4,   a5
- *	5	      a2	a7,   a3,   a4,   a5,   a6
- *	6	      a2	a8,   a3,   a4,   a5,   a6,   a7
- */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name) \
-	: "a2" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type0,arg0) \
-type name(type0 arg0) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a3, %2 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0) \
-	: "a2", "a3" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type0,arg0,type1,arg1) \
-type name(type0 arg0,type1 arg1) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a4, %2 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1) \
-	: "a2", "a3", "a4" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \
-type name(type0 arg0,type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a5, %2 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \
-	: "a2", "a3", "a4", "a5" \
-	); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a6, %2 \n" \
-	"  mov   a5, %5 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \
-	: "a2", "a3", "a4", "a5", "a6" \
-	); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a9, a7 \n" \
-	"  mov   a7, %2 \n" \
-	"  mov   a6, %6 \n" \
-	"  mov   a5, %5 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   a7, a9 \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
-                             "a" (arg3), "a" (arg4) \
-	: "a2", "a3", "a4", "a5", "a6", "a9" \
-	); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-	"  mov   a9, a7 \n" \
-	"  mov   a8, %2 \n" \
-	"  mov   a7, %7 \n" \
-	"  mov   a6, %6 \n" \
-	"  mov   a5, %5 \n" \
-	"  mov   a4, %4 \n" \
-	"  mov   a3, %3 \n" \
-	"  movi  a2, %1 \n" \
-	"  syscall      \n" \
-	"  mov   a7, a9 \n" \
-	"  mov   %0, a2 \n" \
-	: "=a" (__res) \
-	: "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
-                             "a" (arg3), "a" (arg4), "a" (arg5)  \
-	: "a2", "a3", "a4", "a5", "a6", "a8", "a9" \
-	); \
-__syscall_return(type,__res); \
-}
-
 /*
  * "Conditional" syscalls
  *
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
new file mode 100644
index 0000000..0b8e6bc
--- /dev/null
+++ b/include/crypto/b128ops.h
@@ -0,0 +1,80 @@
+/* b128ops.h - common 128-bit block operations
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 13/06/2006
+*/
+
+#ifndef _CRYPTO_B128OPS_H
+#define _CRYPTO_B128OPS_H
+
+#include <linux/types.h>
+
+typedef struct {
+	u64 a, b;
+} u128;
+
+typedef struct {
+	__be64 a, b;
+} be128;
+
+typedef struct {
+	__le64 b, a;
+} le128;
+
+static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+{
+	r->a = p->a ^ q->a;
+	r->b = p->b ^ q->b;
+}
+
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
+{
+	u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
+{
+	u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
new file mode 100644
index 0000000..4fd3152
--- /dev/null
+++ b/include/crypto/gf128mul.h
@@ -0,0 +1,198 @@
+/* gf128mul.h - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 31/01/2006
+
+ An implementation of field multiplication in Galois Field GF(128)
+*/
+
+#ifndef _CRYPTO_GF128MUL_H
+#define _CRYPTO_GF128MUL_H
+
+#include <crypto/b128ops.h>
+#include <linux/slab.h>
+
+/* Comment by Rik:
+ *
+ * For some background on GF(2^128) see for example: http://-
+ * csrc.nist.gov/CryptoToolkit/modes/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
+ * be mapped to computer memory in a variety of ways. Let's examine
+ * three common cases.
+ *
+ * Take a look at the 16 binary octets below in memory order. The msb's
+ * are left and the lsb's are right. char b[16] is an array and b[0] is
+ * the first octet.
+ *
+ * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ *   b[0]     b[1]     b[2]     b[3]          b[13]    b[14]    b[15]
+ *
+ * Every bit is a coefficient of some power of X. We can store the bits
+ * in every byte in little-endian order and the bytes themselves also in
+ * little endian order. I will call this lle (little-little-endian).
+ * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks
+ * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }.
+ * This format was originally implemented in gf128mul and is used
+ * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length).
+ *
+ * Another convention says: store the bits in bigendian order and the
+ * bytes also. This is bbe (big-big-endian). Now the buffer above
+ * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111,
+ * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe
+ * is partly implemented.
+ *
+ * Both of the above formats are easy to implement on big-endian
+ * machines.
+ *
+ * EME (which is patent encumbered) uses the ble format (bits are stored
+ * in big endian order and the bytes in little endian). The above buffer
+ * represents X^7 in this case and the primitive polynomial is b[0] = 0x87.
+ *
+ * The common machine word-size is smaller than 128 bits, so to make
+ * an efficient implementation we must split into machine word sizes.
+ * This file uses one 32bit for the moment. Machine endianness comes into
+ * play. The lle format in relation to machine endianness is discussed
+ * below by the original author of gf128mul Dr Brian Gladman.
+ *
+ * Let's look at the bbe and ble format on a little endian machine.
+ *
+ * bbe on a little endian machine u32 x[4]:
+ *
+ *  MS            x[0]           LS  MS            x[1]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  103..96 111.104 119.112 127.120  71...64 79...72 87...80 95...88
+ *
+ *  MS            x[2]           LS  MS            x[3]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  39...32 47...40 55...48 63...56  07...00 15...08 23...16 31...24
+ *
+ * ble on a little endian machine
+ *
+ *  MS            x[0]           LS  MS            x[1]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  31...24 23...16 15...08 07...00  63...56 55...48 47...40 39...32
+ *
+ *  MS            x[2]           LS  MS            x[3]		  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  95...88 87...80 79...72 71...64  127.120 199.112 111.104 103..96
+ *
+ * Multiplications in GF(2^128) are mostly bit-shifts, so you see why
+ * ble (and lbe also) are easier to implement on a little-endian
+ * machine than on a big-endian machine. The converse holds for bbe
+ * and lle.
+ *
+ * Note: to have good alignment, it seems to me that it is sufficient
+ * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize
+ * machines this will automatically aligned to wordsize and on a 64-bit
+ * machine also.
+ */
+/*	Multiply a GF128 field element by x. Field elements are held in arrays
+    of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower
+    indexed bits placed in the more numerically significant bit positions
+    within bytes.
+
+    On little endian machines the bit indexes translate into the bit
+    positions within four 32-bit words in the following way
+
+    MS            x[0]           LS  MS            x[1]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    24...31 16...23 08...15 00...07  56...63 48...55 40...47 32...39
+
+    MS            x[2]           LS  MS            x[3]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    88...95 80...87 72...79 64...71  120.127 112.119 104.111 96..103
+
+    On big endian machines the bit indexes translate into the bit
+    positions within four 32-bit words in the following way
+
+    MS            x[0]           LS  MS            x[1]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    00...07 08...15 16...23 24...31  32...39 40...47 48...55 56...63
+
+    MS            x[2]           LS  MS            x[3]		  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    64...71 72...79 80...87 88...95  96..103 104.111 112.119 120.127
+*/
+
+/*	A slow generic version of gf_mul, implemented for lle and bbe
+ * 	It multiplies a and b and puts the result in a */
+void gf128mul_lle(be128 *a, const be128 *b);
+
+void gf128mul_bbe(be128 *a, const be128 *b);
+
+
+/* 4k table optimization */
+
+struct gf128mul_4k {
+	be128 t[256];
+};
+
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
+void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
+void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
+
+static inline void gf128mul_free_4k(struct gf128mul_4k *t)
+{
+	kfree(t);
+}
+
+
+/* 64k table optimization, implemented for lle and bbe */
+
+struct gf128mul_64k {
+	struct gf128mul_4k *t[16];
+};
+
+/* first initialize with the constant factor with which you
+ * want to multiply and then call gf128_64k_lle with the other
+ * factor in the first argument, the table in the second and a
+ * scratch register in the third. Afterwards *a = *r. */
+struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
+void gf128mul_free_64k(struct gf128mul_64k *t);
+void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);
+void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
+
+#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a1155a2..e618b25 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -60,8 +60,6 @@
 header-y += fd.h
 header-y += fdreg.h
 header-y += fib_rules.h
-header-y += ftape-header-segment.h
-header-y += ftape-vendors.h
 header-y += fuse.h
 header-y += futex.h
 header-y += genetlink.h
@@ -193,7 +191,6 @@
 unifdef-y += cyclades.h
 unifdef-y += dccp.h
 unifdef-y += dirent.h
-unifdef-y += divert.h
 unifdef-y += dlm.h
 unifdef-y += elfcore.h
 unifdef-y += errno.h
@@ -207,7 +204,6 @@
 unifdef-y += filter.h
 unifdef-y += flat.h
 unifdef-y += fs.h
-unifdef-y += ftape.h
 unifdef-y += gameport.h
 unifdef-y += generic_serial.h
 unifdef-y += genhd.h
@@ -225,6 +221,7 @@
 unifdef-y += if_ec.h
 unifdef-y += if_eql.h
 unifdef-y += if_ether.h
+unifdef-y += if_fddi.h
 unifdef-y += if_frad.h
 unifdef-y += if_ltalk.h
 unifdef-y += if_pppox.h
@@ -286,6 +283,7 @@
 unifdef-y += parport.h
 unifdef-y += patchkey.h
 unifdef-y += pci.h
+unifdef-y += personality.h
 unifdef-y += pktcdvd.h
 unifdef-y += pmu.h
 unifdef-y += poll.h
@@ -341,7 +339,7 @@
 unifdef-y += wait.h
 unifdef-y += wanrouter.h
 unifdef-y += watchdog.h
+unifdef-y += wireless.h
 unifdef-y += xfrm.h
-unifdef-y += zftape.h
 
 objhdr-y += version.h
diff --git a/include/linux/acct.h b/include/linux/acct.h
index 0496d1f..302eb72 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -119,6 +119,7 @@
 #ifdef CONFIG_BSD_PROCESS_ACCT
 struct vfsmount;
 struct super_block;
+struct pacct_struct;
 extern void acct_auto_close_mnt(struct vfsmount *m);
 extern void acct_auto_close(struct super_block *sb);
 extern void acct_init_pacct(struct pacct_struct *pacct);
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0d71c00..3372ec6 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -111,7 +111,6 @@
 	size_t			ki_nbytes; 	/* copy of iocb->aio_nbytes */
 	char 			__user *ki_buf;	/* remaining iocb->aio_buf */
 	size_t			ki_left; 	/* remaining bytes */
-	long			ki_retried; 	/* just for testing */
 	struct iovec		ki_inline_vec;	/* inline vector */
  	struct iovec		*ki_iovec;
  	unsigned long		ki_nr_segs;
@@ -194,7 +193,7 @@
 
 	struct aio_ring_info	ring_info;
 
-	struct work_struct	wq;
+	struct delayed_work	wq;
 };
 
 /* prototypes */
@@ -238,7 +237,6 @@
 } while (0)
 
 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-#define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1)
 
 #include <linux/aio_abi.h>
 
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d894419..1df9416 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -200,8 +200,9 @@
 	ATA_CBL_NONE		= 0,
 	ATA_CBL_PATA40		= 1,
 	ATA_CBL_PATA80		= 2,
-	ATA_CBL_PATA_UNK	= 3,
-	ATA_CBL_SATA		= 4,
+	ATA_CBL_PATA40_SHORT	= 3,		/* 40 wire cable to high UDMA spec */
+	ATA_CBL_PATA_UNK	= 4,
+	ATA_CBL_SATA		= 5,
 
 	/* SATA Status and Control Registers */
 	SCR_STATUS		= 0,
@@ -342,6 +343,15 @@
 	return 0;
 }
 
+static inline int ata_drive_40wire(const u16 *dev_id)
+{
+	if (ata_id_major_version(dev_id) >= 5 && ata_id_is_sata(dev_id))
+		return 0;	/* SATA */
+	if (dev_id[93] & 0x4000)
+		return 0;	/* 80 wire */
+	return 1;
+}
+
 static inline int atapi_cdb_len(const u16 *dev_id)
 {
 	u16 tmp = dev_id[0] & 0x3;
diff --git a/include/linux/atmarp.h b/include/linux/atmarp.h
index 24f8233..ee108f9 100644
--- a/include/linux/atmarp.h
+++ b/include/linux/atmarp.h
@@ -37,7 +37,7 @@
 struct atmarp_ctrl {
 	enum atmarp_ctrl_type	type;	/* message type */
 	int			itf_num;/* interface number (if present) */
-	uint32_t		ip;	/* IP address (act_need only) */
+	__be32			ip;	/* IP address (act_need only) */
 };
 
 #endif
diff --git a/include/linux/atmbr2684.h b/include/linux/atmbr2684.h
index 7981b73..969fb6c 100644
--- a/include/linux/atmbr2684.h
+++ b/include/linux/atmbr2684.h
@@ -86,8 +86,8 @@
  * efficient per-if in/out filters, this support will be removed
  */
 struct br2684_filter {
-	__u32	prefix;		/* network byte order */
-	__u32	netmask;	/* 0 = disable filter */
+	__be32	prefix;		/* network byte order */
+	__be32	netmask;	/* 0 = disable filter */
 };
 
 struct br2684_filter_set {
diff --git a/include/linux/atmmpc.h b/include/linux/atmmpc.h
index 5fbfa68..ea16504 100644
--- a/include/linux/atmmpc.h
+++ b/include/linux/atmmpc.h
@@ -13,7 +13,7 @@
 
 struct atmmpc_ioc {
         int dev_num;
-        uint32_t ipaddr;              /* the IP address of the shortcut    */
+        __be32 ipaddr;              /* the IP address of the shortcut    */
         int type;                     /* ingress or egress                 */
 };
 
@@ -21,8 +21,8 @@
         uint8_t   Last_NHRP_CIE_code;
         uint8_t   Last_Q2931_cause_value;     
         uint8_t   eg_MPC_ATM_addr[ATM_ESA_LEN];
-        uint32_t  tag;
-        uint32_t  in_dst_ip;      /* IP address this ingress MPC sends packets to */
+        __be32  tag;
+        __be32  in_dst_ip;      /* IP address this ingress MPC sends packets to */
         uint16_t  holding_time;
         uint32_t  request_id;
 } in_ctrl_info;
@@ -30,10 +30,10 @@
 typedef struct eg_ctrl_info {
         uint8_t   DLL_header[256];
         uint8_t   DH_length;
-        uint32_t  cache_id;
-        uint32_t  tag;
-        uint32_t  mps_ip;
-        uint32_t  eg_dst_ip;      /* IP address to which ingress MPC sends packets */
+        __be32  cache_id;
+        __be32  tag;
+        __be32  mps_ip;
+        __be32  eg_dst_ip;      /* IP address to which ingress MPC sends packets */
         uint8_t   in_MPC_data_ATM_addr[ATM_ESA_LEN];
         uint16_t  holding_time;
 } eg_ctrl_info;
@@ -49,7 +49,7 @@
 
 struct k_message {
         uint16_t type;
-        uint32_t ip_mask;
+        __be32 ip_mask;
         uint8_t  MPS_ctrl[ATM_ESA_LEN];
         union {
                 in_ctrl_info in_info;
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b2ca666..0e07db6 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -101,6 +101,10 @@
 #define AUDIT_MAC_CIPSOV4_DEL	1408	/* NetLabel: del CIPSOv4 DOI entry */
 #define AUDIT_MAC_MAP_ADD	1409	/* NetLabel: add LSM domain mapping */
 #define AUDIT_MAC_MAP_DEL	1410	/* NetLabel: del LSM domain mapping */
+#define AUDIT_MAC_IPSEC_ADDSA	1411	/* Add a XFRM state */
+#define AUDIT_MAC_IPSEC_DELSA	1412	/* Delete a XFRM state */
+#define AUDIT_MAC_IPSEC_ADDSPD	1413	/* Add a XFRM policy */
+#define AUDIT_MAC_IPSEC_DELSPD	1414	/* Delete a XFRM policy */
 
 #define AUDIT_FIRST_KERN_ANOM_MSG   1700
 #define AUDIT_LAST_KERN_ANOM_MSG    1799
@@ -377,6 +381,7 @@
 			      struct timespec *t, unsigned int *serial);
 extern int  audit_set_loginuid(struct task_struct *task, uid_t loginuid);
 extern uid_t audit_get_loginuid(struct audit_context *ctx);
+extern void audit_log_task_context(struct audit_buffer *ab);
 extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode);
 extern int audit_bprm(struct linux_binprm *bprm);
@@ -449,6 +454,7 @@
 #define audit_inode_update(i) do { ; } while (0)
 #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
 #define audit_get_loginuid(c) ({ -1; })
+#define audit_log_task_context(b) do { ; } while (0)
 #define audit_ipc_obj(i) ({ 0; })
 #define audit_ipc_set_perm(q,u,g,m) ({ 0; })
 #define audit_bprm(p) ({ 0; })
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7bfcde2..e1c7286 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -678,10 +678,11 @@
 extern void blk_run_queue(request_queue_t *);
 extern void blk_start_queueing(request_queue_t *);
 extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
-extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
-extern int blk_rq_unmap_user(struct bio *, unsigned int);
+extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
+extern int blk_rq_unmap_user(struct request *);
 extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
+extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
+			       struct sg_iovec *, int, unsigned int);
 extern int blk_execute_rq(request_queue_t *, struct gendisk *,
 			  struct request *, int);
 extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index b99a714..3680ff9 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -50,6 +50,15 @@
 };
 
 /*
+ * Notify events.
+ */
+enum blktrace_notify {
+	__BLK_TN_PROCESS = 0,		/* establish pid/name mapping */
+	__BLK_TN_TIMESTAMP,		/* include system clock */
+};
+
+
+/*
  * Trace actions in full. Additionally, read or write is masked
  */
 #define BLK_TA_QUEUE		(__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
@@ -68,6 +77,9 @@
 #define BLK_TA_BOUNCE		(__BLK_TA_BOUNCE)
 #define BLK_TA_REMAP		(__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
 
+#define BLK_TN_PROCESS		(__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
+#define BLK_TN_TIMESTAMP	(__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
+
 #define BLK_IO_TRACE_MAGIC	0x65617400
 #define BLK_IO_TRACE_VERSION	0x07
 
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 31e9abb..2275f27 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -119,8 +119,7 @@
 				     unsigned int *_hash_mask,
 				     unsigned long limit);
 
-#define HASH_HIGHMEM	0x00000001	/* Consider highmem? */
-#define HASH_EARLY	0x00000002	/* Allocating during early boot? */
+#define HASH_EARLY	0x00000001	/* Allocating during early boot? */
 
 /* Only NUMA needs hash distribution.
  * IA64 is known to have sufficient vmalloc space.
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
new file mode 100644
index 0000000..777dbf6
--- /dev/null
+++ b/include/linux/bottom_half.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_BH_H
+#define _LINUX_BH_H
+
+extern void local_bh_disable(void);
+extern void __local_bh_enable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+
+#endif /* _LINUX_BH_H */
diff --git a/include/linux/carta_random32.h b/include/linux/carta_random32.h
deleted file mode 100644
index f6f3bd9..0000000
--- a/include/linux/carta_random32.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Fast, simple, yet decent quality random number generator based on
- * a paper by David G. Carta ("Two Fast Implementations of the
- * `Minimal Standard' Random Number Generator," Communications of the
- * ACM, January, 1990).
- *
- * Copyright (c) 2002-2006 Hewlett-Packard Development Company, L.P.
- *	Contributed by Stephane Eranian <eranian@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
- */
-#ifndef _LINUX_CARTA_RANDOM32_H_
-#define _LINUX_CARTA_RANDOM32_H_
-
-u64 carta_random32(u64 seed);
-
-#endif /* _LINUX_CARTA_RANDOM32_H_ */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 6e27f42..cb57c30 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -80,7 +80,7 @@
 #define HWORD __u16
 #define DWORD __u32
 
-#define CISS_MAX_LUN	16	
+#define CISS_MAX_LUN	1024
 
 #define LEVEL2LUN   1   // index into Target(x) structure, due to byte swapping
 #define LEVEL3LUN   0
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index ee5f53f..f309b00 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -2,6 +2,10 @@
 #define _LINUX_CDEV_H
 #ifdef __KERNEL__
 
+#include <linux/kobject.h>
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+
 struct cdev {
 	struct kobject kobj;
 	struct module *owner;
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 4c02119..3ea1cd5 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -133,7 +133,7 @@
 struct cn_callback_entry {
 	struct list_head callback_entry;
 	struct cn_callback *cb;
-	struct work_struct work;
+	struct delayed_work work;
 	struct cn_queue_dev *pdev;
 
 	struct cn_callback_id id;
@@ -170,7 +170,7 @@
 
 int cn_cb_equal(struct cb_id *, struct cb_id *);
 
-void cn_queue_wrapper(void *data);
+void cn_queue_wrapper(struct work_struct *work);
 
 extern int cn_already_initialized;
 
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 3fef7d6..bfb5202 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -24,15 +24,24 @@
 #include <linux/compiler.h>
 #include <linux/cpumask.h>
 #include <asm/semaphore.h>
+#include <linux/mutex.h>
 
 struct cpu {
 	int node_id;		/* The node which contains the CPU */
-	int no_control;		/* Should the sysfs control file be created? */
+	int hotpluggable;	/* creates sysfs control file if hotpluggable */
 	struct sys_device sysdev;
 };
 
 extern int register_cpu(struct cpu *cpu, int num);
 extern struct sys_device *get_cpu_sysdev(unsigned cpu);
+
+extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr);
+extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
+
+extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
+extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
+
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern void unregister_cpu(struct cpu *cpu);
 #endif
@@ -66,6 +75,17 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 /* Stop CPUs going up and down. */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{
+	mutex_lock(cpu_hp_mutex);
+}
+
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{
+	mutex_unlock(cpu_hp_mutex);
+}
+
 extern void lock_cpu_hotplug(void);
 extern void unlock_cpu_hotplug(void);
 #define hotcpu_notifier(fn, pri) {				\
@@ -77,17 +97,24 @@
 #define unregister_hotcpu_notifier(nb)	unregister_cpu_notifier(nb)
 int cpu_down(unsigned int cpu);
 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-#else
+
+#else		/* CONFIG_HOTPLUG_CPU */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{ }
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{ }
+
 #define lock_cpu_hotplug()	do { } while (0)
 #define unlock_cpu_hotplug()	do { } while (0)
 #define lock_cpu_hotplug_interruptible() 0
-#define hotcpu_notifier(fn, pri)	do { } while (0)
-#define register_hotcpu_notifier(nb)	do { } while (0)
-#define unregister_hotcpu_notifier(nb)	do { } while (0)
+#define hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
+#define register_hotcpu_notifier(nb)	do { (void)(nb); } while (0)
+#define unregister_hotcpu_notifier(nb)	do { (void)(nb); } while (0)
 
 /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
 static inline int cpu_is_offline(int cpu) { return 0; }
-#endif
+#endif		/* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_SUSPEND_SMP
 extern int disable_nonboot_cpus(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 4d8adf6..8821e1f 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -23,6 +23,7 @@
 extern void cpuset_exit(struct task_struct *p);
 extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+#define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
 void cpuset_update_task_memory_state(void);
 #define cpuset_nodes_subset_current_mems_allowed(nodes) \
@@ -45,7 +46,7 @@
 extern int cpuset_memory_pressure_enabled;
 extern void __cpuset_memory_pressure_bump(void);
 
-extern struct file_operations proc_cpuset_operations;
+extern const struct file_operations proc_cpuset_operations;
 extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
 
 extern void cpuset_lock(void);
@@ -83,6 +84,7 @@
 	return node_possible_map;
 }
 
+#define cpuset_current_mems_allowed (node_online_map)
 static inline void cpuset_init_current_mems_allowed(void) {}
 static inline void cpuset_update_task_memory_state(void) {}
 #define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6485e97..4aa9046 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -241,12 +241,8 @@
  * Algorithm query interface.
  */
 #ifdef CONFIG_CRYPTO
-int crypto_alg_available(const char *name, u32 flags)
-	__deprecated_for_modules;
 int crypto_has_alg(const char *name, u32 type, u32 mask);
 #else
-static int crypto_alg_available(const char *name, u32 flags)
-	__deprecated_for_modules;
 static inline int crypto_alg_available(const char *name, u32 flags)
 {
 	return 0;
@@ -707,16 +703,6 @@
 						dst, src);
 }
 
-void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules;
-void crypto_digest_update(struct crypto_tfm *tfm,
-			  struct scatterlist *sg, unsigned int nsg)
-	__deprecated_for_modules;
-void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
-	__deprecated_for_modules;
-void crypto_digest_digest(struct crypto_tfm *tfm,
-			  struct scatterlist *sg, unsigned int nsg, u8 *out)
-	__deprecated_for_modules;
-
 static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
 {
 	return (struct crypto_hash *)tfm;
@@ -729,14 +715,6 @@
 	return __crypto_hash_cast(tfm);
 }
 
-static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key,
-				unsigned int keylen) __deprecated;
-static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
-                                       const u8 *key, unsigned int keylen)
-{
-	return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen);
-}
-
 static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
 						    u32 type, u32 mask)
 {
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 53553c9..ed6cc89 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -30,7 +30,7 @@
 #else
 #error  "Adjust your <asm/byteorder.h> defines"
 #endif
-	__u16	dccph_checksum;
+	__sum16	dccph_checksum;
 #if defined(__LITTLE_ENDIAN_BITFIELD)
 	__u8	dccph_x:1,
 		dccph_type:4,
@@ -175,17 +175,21 @@
 	DCCPC_CCID3 = 3,
 };
 
-/* DCCP features */
-enum {
-	DCCPF_RESERVED = 0,
-	DCCPF_CCID = 1,
-	DCCPF_SEQUENCE_WINDOW = 3,
-	DCCPF_ACK_RATIO = 5,
-	DCCPF_SEND_ACK_VECTOR = 6,
-	DCCPF_SEND_NDP_COUNT = 7,
-	/* 10-127 reserved */
-	DCCPF_MIN_CCID_SPECIFIC = 128,
-	DCCPF_MAX_CCID_SPECIFIC = 255,
+/* DCCP features (RFC 4340 section 6.4) */
+ enum {
+ 	DCCPF_RESERVED = 0,
+ 	DCCPF_CCID = 1,
+	DCCPF_SHORT_SEQNOS = 2,		/* XXX: not yet implemented */
+ 	DCCPF_SEQUENCE_WINDOW = 3,
+	DCCPF_ECN_INCAPABLE = 4,	/* XXX: not yet implemented */
+ 	DCCPF_ACK_RATIO = 5,
+ 	DCCPF_SEND_ACK_VECTOR = 6,
+ 	DCCPF_SEND_NDP_COUNT = 7,
+	DCCPF_MIN_CSUM_COVER = 8,
+	DCCPF_DATA_CHECKSUM = 9,	/* XXX: not yet implemented */
+ 	/* 10-127 reserved */
+ 	DCCPF_MIN_CCID_SPECIFIC = 128,
+ 	DCCPF_MAX_CCID_SPECIFIC = 255,
 };
 
 /* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
@@ -196,13 +200,16 @@
 };
 
 /* DCCP socket options */
-#define DCCP_SOCKOPT_PACKET_SIZE	1
+#define DCCP_SOCKOPT_PACKET_SIZE	1 /* XXX deprecated, without effect */
 #define DCCP_SOCKOPT_SERVICE		2
 #define DCCP_SOCKOPT_CHANGE_L		3
 #define DCCP_SOCKOPT_CHANGE_R		4
+#define DCCP_SOCKOPT_SEND_CSCOV		10
+#define DCCP_SOCKOPT_RECV_CSCOV		11
 #define DCCP_SOCKOPT_CCID_RX_INFO	128
 #define DCCP_SOCKOPT_CCID_TX_INFO	192
 
+/* maximum number of services provided on the same listening port */
 #define DCCP_SERVICE_LIST_MAX_LEN      32
 
 #ifdef __KERNEL__
@@ -256,6 +263,13 @@
 	return (struct dccp_hdr *)skb->h.raw;
 }
 
+static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
+{
+	skb->h.raw = skb_push(skb, headlen);
+	memset(skb->h.raw, 0, headlen);
+	return dccp_hdr(skb);
+}
+
 static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb)
 {
 	return (struct dccp_hdr_ext *)(skb->h.raw + sizeof(struct dccp_hdr));
@@ -342,6 +356,9 @@
   * @dccpms_ccid - Congestion Control Id (CCID) (section 10)
   * @dccpms_send_ack_vector - Send Ack Vector Feature (section 11.5)
   * @dccpms_send_ndp_count - Send NDP Count Feature (7.7.2)
+  * @dccpms_ack_ratio - Ack Ratio Feature (section 11.3)
+  * @dccpms_pending - List of features being negotiated
+  * @dccpms_conf -
   */
 struct dccp_minisock {
 	__u64			dccpms_sequence_window;
@@ -439,12 +456,25 @@
  * @dccps_gss - greatest sequence number sent
  * @dccps_gsr - greatest valid sequence number received
  * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss
+ * @dccps_service - first (passive sock) or unique (active sock) service code
+ * @dccps_service_list - second .. last service code on passive socket
  * @dccps_timestamp_time - time of latest TIMESTAMP option
  * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
- * @dccps_packet_size - Set thru setsockopt
- * @dccps_role - Role of this sock, one of %dccp_role
+ * @dccps_l_ack_ratio -
+ * @dccps_r_ack_ratio -
+ * @dccps_pcslen - sender   partial checksum coverage (via sockopt)
+ * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
  * @dccps_ndp_count - number of Non Data Packets since last data packet
+ * @dccps_mss_cache -
+ * @dccps_minisock -
  * @dccps_hc_rx_ackvec - rx half connection ack vector
+ * @dccps_hc_rx_ccid -
+ * @dccps_hc_tx_ccid -
+ * @dccps_options_received -
+ * @dccps_epoch -
+ * @dccps_role - Role of this sock, one of %dccp_role
+ * @dccps_hc_rx_insert_options -
+ * @dccps_hc_tx_insert_options -
  * @dccps_xmit_timer - timer for when CCID is not ready to send
  */
 struct dccp_sock {
@@ -464,9 +494,10 @@
 	struct dccp_service_list	*dccps_service_list;
 	struct timeval			dccps_timestamp_time;
 	__u32				dccps_timestamp_echo;
-	__u32				dccps_packet_size;
 	__u16				dccps_l_ack_ratio;
 	__u16				dccps_r_ack_ratio;
+	__u16				dccps_pcslen;
+	__u16				dccps_pcrlen;
 	unsigned long			dccps_ndp_count;
 	__u32				dccps_mss_cache;
 	struct dccp_minisock		dccps_minisock;
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 952bee7..a1c10b0 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -24,7 +24,7 @@
 	int __ret = 0;							\
 									\
 	if (unlikely(c)) {						\
-		if (debug_locks_off())					\
+		if (debug_locks_silent || debug_locks_off())		\
 			WARN_ON(1);					\
 		__ret = 1;						\
 	}								\
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 561e2a7..55d1ca5 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -30,7 +30,7 @@
 #ifdef CONFIG_TASK_DELAY_ACCT
 
 extern int delayacct_on;	/* Delay accounting turned on/off */
-extern kmem_cache_t *delayacct_cache;
+extern struct kmem_cache *delayacct_cache;
 extern void delayacct_init(void);
 extern void __delayacct_tsk_init(struct task_struct *);
 extern void __delayacct_tsk_exit(struct task_struct *);
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d4f6a9..49ab53c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -21,6 +21,7 @@
 #include <linux/pm.h>
 #include <asm/semaphore.h>
 #include <asm/atomic.h>
+#include <asm/device.h>
 
 #define DEVICE_NAME_SIZE	50
 #define DEVICE_NAME_HALF	__stringify(20)	/* Less than half to accommodate slop */
@@ -42,6 +43,8 @@
 	struct klist		klist_devices;
 	struct klist		klist_drivers;
 
+	struct blocking_notifier_head bus_notifier;
+
 	struct bus_attribute	* bus_attrs;
 	struct device_attribute	* dev_attrs;
 	struct driver_attribute	* drv_attrs;
@@ -75,6 +78,29 @@
 		struct device_driver *start, void *data,
 		int (*fn)(struct device_driver *, void *));
 
+/*
+ * Bus notifiers: Get notified of addition/removal of devices
+ * and binding/unbinding of drivers to devices.
+ * In the long run, it should be a replacement for the platform
+ * notify hooks.
+ */
+struct notifier_block;
+
+extern int bus_register_notifier(struct bus_type *bus,
+				 struct notifier_block *nb);
+extern int bus_unregister_notifier(struct bus_type *bus,
+				   struct notifier_block *nb);
+
+/* All 4 notifers below get called with the target struct device *
+ * as an argument. Note that those functions are likely to be called
+ * with the device semaphore held in the core, so be careful.
+ */
+#define BUS_NOTIFY_ADD_DEVICE		0x00000001 /* device added */
+#define BUS_NOTIFY_DEL_DEVICE		0x00000002 /* device removed */
+#define BUS_NOTIFY_BOUND_DRIVER		0x00000003 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER	0x00000004 /* driver about to be
+						      unbound */
+
 /* driverfs interface for exporting bus attributes */
 
 struct bus_attribute {
@@ -343,10 +369,11 @@
 	void		*driver_data;	/* data private to the driver */
 	void		*platform_data;	/* Platform specific data, device
 					   core doesn't touch it */
-	void		*firmware_data; /* Firmware specific data (e.g. ACPI,
-					   BIOS data),reserved for device core*/
 	struct dev_pm_info	power;
 
+#ifdef CONFIG_NUMA
+	int		numa_node;	/* NUMA node this device is close to */
+#endif
 	u64		*dma_mask;	/* dma mask (if dma'able device) */
 	u64		coherent_dma_mask;/* Like dma_mask, but for
 					     alloc_coherent mappings as
@@ -358,6 +385,8 @@
 
 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
 					     override */
+	/* arch specific additions */
+	struct dev_archdata	archdata;
 
 	/* class_device migration path */
 	struct list_head	node;
@@ -368,6 +397,25 @@
 	void	(*release)(struct device * dev);
 };
 
+#ifdef CONFIG_NUMA
+static inline int dev_to_node(struct device *dev)
+{
+	return dev->numa_node;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+	dev->numa_node = node;
+}
+#else
+static inline int dev_to_node(struct device *dev)
+{
+	return -1;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+}
+#endif
+
 static inline void *
 dev_get_drvdata (struct device *dev)
 {
@@ -395,7 +443,10 @@
 extern void device_del(struct device * dev);
 extern int device_for_each_child(struct device *, void *,
 		     int (*fn)(struct device *, void *));
+extern struct device *device_find_child(struct device *, void *data,
+					int (*match)(struct device *, void *));
 extern int device_rename(struct device *dev, char *new_name);
+extern int device_move(struct device *dev, struct device *new_parent);
 
 /*
  * Manual binding of a device to driver. See drivers/base/bus.c
@@ -415,8 +466,6 @@
 				    __attribute__((format(printf,4,5)));
 extern void device_destroy(struct class *cls, dev_t devt);
 
-extern int virtual_device_parent(struct device *dev);
-
 /*
  * Platform "fixup" functions - allow the platform to have their say
  * about devices and actions that the general device layer doesn't
diff --git a/include/linux/divert.h b/include/linux/divert.h
deleted file mode 100644
index 8fb4e9d..0000000
--- a/include/linux/divert.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Frame Diversion, Benoit Locher <Benoit.Locher@skf.com>
- *
- * Changes:
- * 		06/09/2000	BL:	initial version
- * 
- */
- 
-#ifndef _LINUX_DIVERT_H
-#define _LINUX_DIVERT_H
-
-#include <asm/types.h>
-
-#define	MAX_DIVERT_PORTS	8	/* Max number of ports to divert (tcp, udp) */
-
-/* Divertable protocols */
-#define	DIVERT_PROTO_NONE	0x0000
-#define	DIVERT_PROTO_IP		0x0001
-#define	DIVERT_PROTO_ICMP	0x0002
-#define	DIVERT_PROTO_TCP	0x0004
-#define	DIVERT_PROTO_UDP	0x0008
-
-/*
- *	This is an Ethernet Frame Diverter option block
- */
-struct divert_blk
-{
-	int		divert;  /* are we active */
-	unsigned int protos;	/* protocols */
-	__u16		tcp_dst[MAX_DIVERT_PORTS]; /* specific tcp dst ports to divert */
-	__u16		tcp_src[MAX_DIVERT_PORTS]; /* specific tcp src ports to divert */
-	__u16		udp_dst[MAX_DIVERT_PORTS]; /* specific udp dst ports to divert */
-	__u16		udp_src[MAX_DIVERT_PORTS]; /* specific udp src ports to divert */
-};
-
-/*
- * Diversion control block, for configuration with the userspace tool
- * divert
- */
-
-typedef union _divert_cf_arg
-{
-	__s16		int16;
-	__u16		uint16;
-	__s32		int32;
-	__u32		uint32;
-	__s64		int64;
-	__u64		uint64;
-	void	__user *ptr;
-} divert_cf_arg;
-
-
-struct divert_cf
-{
-	int	cmd;				/* Command */
-	divert_cf_arg 	arg1,
-					arg2,
-					arg3;
-	int	dev_index;	/* device index (eth0=0, etc...) */
-};
-
-
-/* Diversion commands */
-#define	DIVCMD_DIVERT			1 /* ENABLE/DISABLE diversion */
-#define	DIVCMD_IP				2 /* ENABLE/DISABLE whold IP diversion */
-#define	DIVCMD_TCP				3 /* ENABLE/DISABLE whold TCP diversion */
-#define	DIVCMD_TCPDST			4 /* ADD/REMOVE TCP DST port for diversion */
-#define	DIVCMD_TCPSRC			5 /* ADD/REMOVE TCP SRC port for diversion */
-#define	DIVCMD_UDP				6 /* ENABLE/DISABLE whole UDP diversion */
-#define	DIVCMD_UDPDST			7 /* ADD/REMOVE UDP DST port for diversion */
-#define	DIVCMD_UDPSRC			8 /* ADD/REMOVE UDP SRC port for diversion */
-#define	DIVCMD_ICMP				9 /* ENABLE/DISABLE whole ICMP diversion */
-#define	DIVCMD_GETSTATUS		10 /* GET the status of the diverter */
-#define	DIVCMD_RESET			11 /* Reset the diverter on the specified dev */
-#define DIVCMD_GETVERSION		12 /* Retrieve the diverter code version (char[32]) */
-
-/* General syntax of the commands:
- * 
- * DIVCMD_xxxxxx(arg1, arg2, arg3, dev_index)
- * 
- * SIOCSIFDIVERT:
- *   DIVCMD_DIVERT(DIVARG1_ENABLE|DIVARG1_DISABLE, , ,ifindex)
- *   DIVCMD_IP(DIVARG1_ENABLE|DIVARG1_DISABLE, , , ifindex)
- *   DIVCMD_TCP(DIVARG1_ENABLE|DIVARG1_DISABLE, , , ifindex)
- *   DIVCMD_TCPDST(DIVARG1_ADD|DIVARG1_REMOVE, port, , ifindex)
- *   DIVCMD_TCPSRC(DIVARG1_ADD|DIVARG1_REMOVE, port, , ifindex)
- *   DIVCMD_UDP(DIVARG1_ENABLE|DIVARG1_DISABLE, , , ifindex)
- *   DIVCMD_UDPDST(DIVARG1_ADD|DIVARG1_REMOVE, port, , ifindex)
- *   DIVCMD_UDPSRC(DIVARG1_ADD|DIVARG1_REMOVE, port, , ifindex)
- *   DIVCMD_ICMP(DIVARG1_ENABLE|DIVARG1_DISABLE, , , ifindex)
- *   DIVCMD_RESET(, , , ifindex)
- *   
- * SIOGIFDIVERT:
- *   DIVCMD_GETSTATUS(divert_blk, , , ifindex)
- *   DIVCMD_GETVERSION(string[3])
- */
-
-
-/* Possible values for arg1 */
-#define	DIVARG1_ENABLE			0 /* ENABLE something */
-#define	DIVARG1_DISABLE			1 /* DISABLE something */
-#define DIVARG1_ADD				2 /* ADD something */
-#define DIVARG1_REMOVE			3 /* REMOVE something */
-
-
-#ifdef __KERNEL__
-
-/* diverter functions */
-#include <linux/skbuff.h>
-
-#ifdef CONFIG_NET_DIVERT
-#include <linux/netdevice.h>
-
-int alloc_divert_blk(struct net_device *);
-void free_divert_blk(struct net_device *);
-int divert_ioctl(unsigned int cmd, struct divert_cf __user *arg);
-void divert_frame(struct sk_buff *skb);
-static inline void handle_diverter(struct sk_buff *skb)
-{
-	/* if diversion is supported on device, then divert */
-	if (skb->dev->divert && skb->dev->divert->divert)
-		divert_frame(skb);
-}
-
-#else
-# define alloc_divert_blk(dev)		(0)
-# define free_divert_blk(dev)		do {} while (0)
-# define divert_ioctl(cmd, arg)		(-ENOPKG)
-# define handle_diverter(skb)		do {} while (0)
-#endif
-#endif 
-#endif	/* _LINUX_DIVERT_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 66d621d..df1c918 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -300,8 +300,9 @@
 extern int __init efi_uart_console_only (void);
 extern void efi_initialize_iomem_resources(struct resource *code_resource,
 					struct resource *data_resource);
-extern unsigned long __init efi_get_time(void);
+extern unsigned long efi_get_time(void);
 extern int __init efi_set_rtc_mmss(unsigned long nowtime);
+extern int is_available_memory(efi_memory_desc_t * md);
 extern struct efi_memory_map memmap;
 
 /**
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2fa9f11..a24931d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -21,11 +21,11 @@
 typedef int (elevator_may_queue_fn) (request_queue_t *, int);
 
 typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t);
-typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
+typedef void (elevator_put_req_fn) (struct request *);
 typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
 typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
 
-typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *);
+typedef void *(elevator_init_fn) (request_queue_t *);
 typedef void (elevator_exit_fn) (elevator_t *);
 
 struct elevator_ops
diff --git a/include/linux/elf.h b/include/linux/elf.h
index b70d1d2..60713e6 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -6,6 +6,8 @@
 #include <linux/elf-em.h>
 #include <asm/elf.h>
 
+struct file;
+
 #ifndef elf_read_implies_exec
   /* Executables for which elf_read_implies_exec() returns TRUE will
      have the READ_IMPLIES_EXEC personality flag set automatically.
@@ -358,6 +360,7 @@
 #define elfhdr		elf32_hdr
 #define elf_phdr	elf32_phdr
 #define elf_note	elf32_note
+#define elf_addr_t	Elf32_Off
 
 #else
 
@@ -365,8 +368,16 @@
 #define elfhdr		elf64_hdr
 #define elf_phdr	elf64_phdr
 #define elf_note	elf64_note
+#define elf_addr_t	Elf64_Off
 
 #endif
 
+#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+static inline int arch_notes_size(void) { return 0; }
+static inline void arch_write_notes(struct file *file) { }
+
+#define ELF_CORE_EXTRA_NOTES_SIZE arch_notes_size()
+#define ELF_CORE_WRITE_EXTRA_NOTES arch_write_notes(file)
+#endif /* ARCH_HAVE_EXTRA_ELF_NOTES */
 
 #endif /* _LINUX_ELF_H */
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index ce0e610..8c43b13 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -109,74 +109,32 @@
  * been done yet.
  */
 
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_undo_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline int
-__ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_write_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline void
-ext3_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+static inline void ext3_journal_release_buffer(handle_t *handle,
+						struct buffer_head *bh)
 {
 	journal_release_buffer(handle, bh);
 }
 
-static inline int
-__ext3_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_forget(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+		struct buffer_head *bh, handle_t *handle, int err);
 
-static inline int
-__ext3_journal_revoke(const char *where, handle_t *handle,
-		      unsigned long blocknr, struct buffer_head *bh)
-{
-	int err = journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext3_journal_get_create_access(const char *where,
-				 handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_get_create_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext3_journal_dirty_metadata(const char *where,
-			      handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext3_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+				unsigned long blocknr, struct buffer_head *bh);
+
+int __ext3_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh);
+
+int __ext3_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh);
 
 #define ext3_journal_get_undo_access(handle, bh) \
 	__ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
index 72dd631..d716e63 100644
--- a/include/linux/ext4_jbd2.h
+++ b/include/linux/ext4_jbd2.h
@@ -114,74 +114,32 @@
  * been done yet.
  */
 
-void ext4_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext4_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = jbd2_journal_get_undo_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline int
-__ext4_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = jbd2_journal_get_write_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
-
-static inline void
-ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+static inline void ext4_journal_release_buffer(handle_t *handle,
+						struct buffer_head *bh)
 {
 	jbd2_journal_release_buffer(handle, bh);
 }
 
-static inline int
-__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
-	int err = jbd2_journal_forget(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+void ext4_journal_abort_handle(const char *caller, const char *err_fn,
+		struct buffer_head *bh, handle_t *handle, int err);
 
-static inline int
-__ext4_journal_revoke(const char *where, handle_t *handle,
-		      ext4_fsblk_t blocknr, struct buffer_head *bh)
-{
-	int err = jbd2_journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext4_journal_get_create_access(const char *where,
-				 handle_t *handle, struct buffer_head *bh)
-{
-	int err = jbd2_journal_get_create_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext4_journal_get_write_access(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
-static inline int
-__ext4_journal_dirty_metadata(const char *where,
-			      handle_t *handle, struct buffer_head *bh)
-{
-	int err = jbd2_journal_dirty_metadata(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-	return err;
-}
+int __ext4_journal_forget(const char *where, handle_t *handle,
+				struct buffer_head *bh);
 
+int __ext4_journal_revoke(const char *where, handle_t *handle,
+				ext4_fsblk_t blocknr, struct buffer_head *bh);
+
+int __ext4_journal_get_create_access(const char *where,
+				handle_t *handle, struct buffer_head *bh);
+
+int __ext4_journal_dirty_metadata(const char *where,
+				handle_t *handle, struct buffer_head *bh);
 
 #define ext4_journal_get_undo_access(handle, bh) \
 	__ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3e69241..fa23e06 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -774,8 +774,8 @@
 #endif
 
 	struct fb_ops *fbops;
-	struct device *device;
-	struct class_device *class_device; /* sysfs per device attrs */
+	struct device *device;		/* This is the parent */
+	struct device *dev;		/* This is this fb device */
 	int class_flag;                    /* private sysfs flags */
 #ifdef CONFIG_FB_TILEBLITTING
 	struct fb_tile_ops *tileops;    /* Tile Blitting */
@@ -910,8 +910,8 @@
 /* drivers/video/fbsysfs.c */
 extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
 extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_class_device(struct fb_info *fb_info);
-extern void fb_cleanup_class_device(struct fb_info *head);
+extern int fb_init_device(struct fb_info *fb_info);
+extern void fb_cleanup_device(struct fb_info *head);
 extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
 
 /* drivers/video/fbmon.c */
diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h
index 4418c8d..8270aac 100644
--- a/include/linux/fib_rules.h
+++ b/include/linux/fib_rules.h
@@ -6,6 +6,7 @@
 
 /* rule is permanent, and cannot be deleted */
 #define FIB_RULE_PERMANENT	1
+#define FIB_RULE_INVERT		2
 
 struct fib_rule_hdr
 {
@@ -34,7 +35,7 @@
 	FRA_UNUSED3,
 	FRA_UNUSED4,
 	FRA_UNUSED5,
-	FRA_FWMARK,	/* netfilter mark */
+	FRA_FWMARK,	/* mark */
 	FRA_FLOW,	/* flow/class id */
 	FRA_UNUSED6,
 	FRA_UNUSED7,
diff --git a/include/linux/file.h b/include/linux/file.h
index 74183e6..6e77b91 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -64,6 +64,8 @@
 
 #define files_fdtable(files) (rcu_dereference((files)->fdt))
 
+extern struct kmem_cache *filp_cachep;
+
 extern void FASTCALL(__fput(struct file *));
 extern void FASTCALL(fput(struct file *));
 
@@ -114,4 +116,6 @@
 void FASTCALL(put_files_struct(struct files_struct *fs));
 void reset_files_struct(struct task_struct *, struct files_struct *);
 
+extern struct kmem_cache *files_cachep;
+
 #endif /* __LINUX_FILE_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
new file mode 100644
index 0000000..6e05e3e
--- /dev/null
+++ b/include/linux/freezer.h
@@ -0,0 +1,87 @@
+/* Freezer declarations */
+
+#ifdef CONFIG_PM
+/*
+ * Check if a process has been frozen
+ */
+static inline int frozen(struct task_struct *p)
+{
+	return p->flags & PF_FROZEN;
+}
+
+/*
+ * Check if there is a request to freeze a process
+ */
+static inline int freezing(struct task_struct *p)
+{
+	return p->flags & PF_FREEZE;
+}
+
+/*
+ * Request that a process be frozen
+ * FIXME: SMP problem. We may not modify other process' flags!
+ */
+static inline void freeze(struct task_struct *p)
+{
+	p->flags |= PF_FREEZE;
+}
+
+/*
+ * Sometimes we may need to cancel the previous 'freeze' request
+ */
+static inline void do_not_freeze(struct task_struct *p)
+{
+	p->flags &= ~PF_FREEZE;
+}
+
+/*
+ * Wake up a frozen process
+ */
+static inline int thaw_process(struct task_struct *p)
+{
+	if (frozen(p)) {
+		p->flags &= ~PF_FROZEN;
+		wake_up_process(p);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * freezing is complete, mark process as frozen
+ */
+static inline void frozen_process(struct task_struct *p)
+{
+	p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
+}
+
+extern void refrigerator(void);
+extern int freeze_processes(void);
+extern void thaw_processes(void);
+
+static inline int try_to_freeze(void)
+{
+	if (freezing(current)) {
+		refrigerator();
+		return 1;
+	} else
+		return 0;
+}
+
+extern void thaw_some_processes(int all);
+
+#else
+static inline int frozen(struct task_struct *p) { return 0; }
+static inline int freezing(struct task_struct *p) { return 0; }
+static inline void freeze(struct task_struct *p) { BUG(); }
+static inline int thaw_process(struct task_struct *p) { return 1; }
+static inline void frozen_process(struct task_struct *p) { BUG(); }
+
+static inline void refrigerator(void) {}
+static inline int freeze_processes(void) { BUG(); return 0; }
+static inline void thaw_processes(void) {}
+
+static inline int try_to_freeze(void) { return 0; }
+
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2fe6e3f..70b99fb 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -276,7 +276,7 @@
 #include <linux/radix-tree.h>
 #include <linux/prio_tree.h>
 #include <linux/init.h>
-#include <linux/sched.h>
+#include <linux/pid.h>
 #include <linux/mutex.h>
 
 #include <asm/atomic.h>
@@ -543,19 +543,22 @@
 	struct list_head	i_dentry;
 	unsigned long		i_ino;
 	atomic_t		i_count;
-	umode_t			i_mode;
 	unsigned int		i_nlink;
 	uid_t			i_uid;
 	gid_t			i_gid;
 	dev_t			i_rdev;
+	unsigned long		i_version;
 	loff_t			i_size;
+#ifdef __NEED_I_SIZE_ORDERED
+	seqcount_t		i_size_seqcount;
+#endif
 	struct timespec		i_atime;
 	struct timespec		i_mtime;
 	struct timespec		i_ctime;
 	unsigned int		i_blkbits;
-	unsigned long		i_version;
 	blkcnt_t		i_blocks;
 	unsigned short          i_bytes;
+	umode_t			i_mode;
 	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
 	struct mutex		i_mutex;
 	struct rw_semaphore	i_alloc_sem;
@@ -598,9 +601,6 @@
 	void			*i_security;
 #endif
 	void			*i_private; /* fs or device private pointer */
-#ifdef __NEED_I_SIZE_ORDERED
-	seqcount_t		i_size_seqcount;
-#endif
 };
 
 /*
@@ -636,7 +636,7 @@
  * cmpxchg8b without the need of the lock prefix). For SMP compiles
  * and 64bit archs it makes no difference if preempt is enabled or not.
  */
-static inline loff_t i_size_read(struct inode *inode)
+static inline loff_t i_size_read(const struct inode *inode)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	loff_t i_size;
@@ -679,12 +679,12 @@
 #endif
 }
 
-static inline unsigned iminor(struct inode *inode)
+static inline unsigned iminor(const struct inode *inode)
 {
 	return MINOR(inode->i_rdev);
 }
 
-static inline unsigned imajor(struct inode *inode)
+static inline unsigned imajor(const struct inode *inode)
 {
 	return MAJOR(inode->i_rdev);
 }
@@ -977,36 +977,13 @@
 #define vfs_check_frozen(sb, level) \
 	wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
 
-static inline void get_fs_excl(void)
-{
-	atomic_inc(&current->fs_excl);
-}
+#define get_fs_excl() atomic_inc(&current->fs_excl)
+#define put_fs_excl() atomic_dec(&current->fs_excl)
+#define has_fs_excl() atomic_read(&current->fs_excl)
 
-static inline void put_fs_excl(void)
-{
-	atomic_dec(&current->fs_excl);
-}
-
-static inline int has_fs_excl(void)
-{
-	return atomic_read(&current->fs_excl);
-}
-
-
-/*
- * Superblock locking.
- */
-static inline void lock_super(struct super_block * sb)
-{
-	get_fs_excl();
-	mutex_lock(&sb->s_lock);
-}
-
-static inline void unlock_super(struct super_block * sb)
-{
-	put_fs_excl();
-	mutex_unlock(&sb->s_lock);
-}
+/* not quite ready to be deprecated, but... */
+extern void lock_super(struct super_block *);
+extern void unlock_super(struct super_block *);
 
 /*
  * VFS helper functions..
@@ -1504,7 +1481,9 @@
 extern void __init vfs_caches_init_early(void);
 extern void __init vfs_caches_init(unsigned long);
 
-#define __getname()	kmem_cache_alloc(names_cachep, SLAB_KERNEL)
+extern struct kmem_cache *names_cachep;
+
+#define __getname()	kmem_cache_alloc(names_cachep, GFP_KERNEL)
 #define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
 #ifndef CONFIG_AUDITSYSCALL
 #define putname(name)   __putname(name)
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index c623d12..11a36ce 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -18,6 +18,8 @@
 	.umask		= 0022, \
 }
 
+extern struct kmem_cache *fs_cachep;
+
 extern void exit_fs(struct task_struct *);
 extern void set_fs_altroot(void);
 extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *);
diff --git a/include/linux/ftape-header-segment.h b/include/linux/ftape-header-segment.h
deleted file mode 100644
index 4732218..0000000
--- a/include/linux/ftape-header-segment.h
+++ /dev/null
@@ -1,122 +0,0 @@
-#ifndef _FTAPE_HEADER_SEGMENT_H
-#define _FTAPE_HEADER_SEGMENT_H
-
-/*
- * Copyright (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/include/linux/ftape-header-segment.h,v $
- * $Revision: 1.2 $
- * $Date: 1997/10/05 19:19:28 $
- *
- *      This file defines some offsets into the header segment of a
- *      floppy tape cartridge.  For use with the QIC-40/80/3010/3020
- *      floppy-tape driver "ftape" for Linux.
- */
-
-#define FT_SIGNATURE   0  /* must be 0xaa55aa55 */
-#define FT_FMT_CODE    4
-#define FT_REV_LEVEL   5  /* only for QIC-80 since. Rev. L (== 0x0c)         */
-#define FT_HSEG_1      6  /* first header segment, except for format code  6 */
-#define FT_HSEG_2      8  /* second header segment, except for format code 6 */
-#define FT_FRST_SEG   10  /* first data segment, except for format code 6    */
-#define FT_LAST_SEG   12  /* last data segment, except for format code 6     */
-#define FT_FMT_DATE   14  /* date and time of most recent format, see below  */
-#define FT_WR_DATE    18  /* date and time of most recent write or format    */
-#define FT_SPT        24  /* segments per track                              */
-#define FT_TPC        26  /* tracks per cartridge                            */
-#define FT_FHM        27  /* floppy drive head (maximum of it)               */
-#define FT_FTM        28  /* floppy track max.                               */
-#define FT_FSM        29  /* floppy sector max. (128)                        */
-#define FT_LABEL      30  /* floppy tape label                               */
-#define FT_LABEL_DATE 74  /* date and time the tape label was written        */
-#define FT_LABEL_SZ   (FT_LABEL_DATE - FT_LABEL)
-#define FT_CMAP_START 78  /* starting segment of compression map             */
-#define FT_FMT_ERROR 128  /* must be set to 0xff if remainder gets lost during
-			   * tape format
-			   */
-#define FT_SEG_CNT   130  /* number of seg. written, formatted or verified
-			   * through lifetime of tape (why not read?)
-			   */
-#define FT_INIT_DATE 138  /* date and time of initial tape format    */
-#define FT_FMT_CNT   142  /* number of times tape has been formatted */
-#define FT_FSL_CNT   144  /* number of segments in failed sector log */
-#define FT_MK_CODE   146  /* id string of tape manufacturer          */
-#define FT_LOT_CODE  190  /* tape manufacturer lot code              */
-#define FT_6_HSEG_1  234  /* first header segment for format code  6 */
-#define FT_6_HSEG_2  238  /* second header segment for format code 6 */
-#define FT_6_FRST_SEG 242 /* first data segment for format code 6    */
-#define FT_6_LAST_SEG 246 /* last data segment for format code 6     */
-
-#define FT_FSL        256
-#define FT_HEADER_END 256 /* space beyond this point:
-			   * format codes 2, 3 and 5: 
-			   * -  failed sector log until byte 2047
-			   * -  bad sector map in the reamining part of segment
-			   * format codes 4 and 6:
-			   * -  bad sector map  starts hear
-			   */
-
-
-/*  value to be stored at the FT_SIGNATURE offset 
- */
-#define FT_HSEG_MAGIC 0xaa55aa55
-#define FT_D2G_MAGIC  0x82288228 /* Ditto 2GB */
-
-/* data and time encoding: */
-#define FT_YEAR_SHIFT 25
-#define FT_YEAR_MASK  0xfe000000
-#define FT_YEAR_0     1970
-#define FT_YEAR_MAX   127
-#define FT_YEAR(year) ((((year)-FT_YEAR_0)<<FT_YEAR_SHIFT)&FT_YEAR_MASK)
-
-#define FT_TIME_SHIFT   0
-#define FT_TIME_MASK    0x01FFFFFF
-#define FT_TIME_MAX     0x01ea6dff /* last second of a year */
-#define FT_TIME(mo,d,h,m,s) \
-	((((s)+60*((m)+60*((h)+24*((d)+31*(mo))))) & FT_TIME_MASK))
-
-#define FT_TIME_STAMP(y,mo,d,h,m,s) (FT_YEAR(y) | FT_TIME(mo,d,h,m,s))
-
-/* values for the format code field */
-typedef enum {
-	fmt_normal = 2, /*  QIC-80 post Rev. B 205Ft or 307Ft tape    */
-	fmt_1100ft = 3, /*  QIC-80 post Rev. B 1100Ft tape            */
-	fmt_var    = 4, /*  QIC-80 post Rev. B variabel length format */
-	fmt_425ft  = 5, /*  QIC-80 post Rev. B 425Ft tape             */
-	fmt_big    = 6  /*  QIC-3010/3020 variable length tape with more 
-			 *  than 2^16 segments per tape
-			 */
-} ft_format_type;
-
-/* definitions for the failed sector log */
-#define FT_FSL_SIZE        (2 * FT_SECTOR_SIZE - FT_HEADER_END)
-#define FT_FSL_MAX_ENTRIES (FT_FSL_SIZE/sizeof(__u32))
-
-typedef struct ft_fsl_entry {
-	__u16 segment;
-	__u16 date;
-} __attribute__ ((packed)) ft_fsl_entry;
-
-
-/*  date encoding for the failed sector log 
- *  month: 1..12, day: 1..31, year: 1970..2097
- */
-#define FT_FSL_TIME_STAMP(y,m,d) \
-	(((((y) - FT_YEAR_0)<<9)&0xfe00) | (((m)<<5)&0x01e0) | ((d)&0x001f))
-
-#endif /* _FTAPE_HEADER_SEGMENT_H */
diff --git a/include/linux/ftape-vendors.h b/include/linux/ftape-vendors.h
deleted file mode 100644
index ec1a81f..0000000
--- a/include/linux/ftape-vendors.h
+++ /dev/null
@@ -1,137 +0,0 @@
-#ifndef _FTAPE_VENDORS_H
-#define _FTAPE_VENDORS_H
-
-/*
- *      Copyright (C) 1993-1996 Bas Laarhoven,
- *                (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/include/linux/ftape-vendors.h,v $
- * $Revision: 1.6 $
- * $Date: 1997/10/09 15:38:11 $
- *
- *      This file contains the supported drive types with their
- *      QIC-117 spec. vendor code and drive dependent configuration
- *      information.
- */
-
-typedef enum {
-	unknown_wake_up = 0,
-	no_wake_up,
-	wake_up_colorado,
-	wake_up_mountain,
-	wake_up_insight,
-} wake_up_types;
-
-typedef struct {
-	wake_up_types wake_up;	/* see wake_up_types */
-	char *name;		/* Text describing the drive */
-} wakeup_method;
-
-/*  Note: order of entries in WAKEUP_METHODS must be so that a variable
- *        of type wake_up_types can be used as an index in the array.
- */
-#define WAKEUP_METHODS { \
-  { unknown_wake_up,    "Unknown" }, \
-  { no_wake_up,         "None" }, \
-  { wake_up_colorado,   "Colorado" }, \
-  { wake_up_mountain,   "Mountain" }, \
-  { wake_up_insight,    "Motor-on" }, \
-}
-
-typedef struct {
-	unsigned int vendor_id;	/* vendor id from drive */
-	int speed;		/* maximum tape transport speed (ips) */
-	wake_up_types wake_up;	/* see wake_up_types */
-	char *name;		/* Text describing the drive */
-} vendor_struct;
-
-#define UNKNOWN_VENDOR (-1)
-
-#define QIC117_VENDORS {						    \
-/* see _vendor_struct */						    \
-  { 0x00000,  82, wake_up_colorado,  "Colorado DJ-10 (old)" },		    \
-  { 0x00047,  90, wake_up_colorado,  "Colorado DJ-10/DJ-20" },		    \
-  { 0x011c2,  84, wake_up_colorado,  "Colorado 700" },			    \
-  { 0x011c3,  90, wake_up_colorado,  "Colorado 1400" },			    \
-  { 0x011c4,  84, wake_up_colorado,  "Colorado DJ-10/DJ-20 (new)" },	    \
-  { 0x011c5,  84, wake_up_colorado,  "HP Colorado T1000" },		    \
-  { 0x011c6,  90, wake_up_colorado,  "HP Colorado T3000" },		    \
-  { 0x00005,  45, wake_up_mountain,  "Archive 5580i" },			    \
-  { 0x10005,  50, wake_up_insight,   "Insight 80Mb, Irwin 80SX" },	    \
-  { 0x00140,  74, wake_up_mountain,  "Archive S.Hornet [Identity/Escom]" }, \
-  { 0x00146,  72, wake_up_mountain,  "Archive 31250Q [Escom]" },	    \
-  { 0x0014a, 100, wake_up_mountain,  "Archive XL9250i [Conner/Escom]" },    \
-  { 0x0014c,  98, wake_up_mountain,  "Conner C250MQT" },		    \
-  { 0x0014e,  80, wake_up_mountain,  "Conner C250MQ" },			    \
-  { 0x00150,  80, wake_up_mountain,  "Conner TSM420R/TST800R" },	    \
-  { 0x00152,  80, wake_up_mountain,  "Conner TSM850R" },		    \
-  { 0x00156,  80, wake_up_mountain,  "Conner TSM850R/1700R/TST3200R" },	    \
-  { 0x00180,   0, wake_up_mountain,  "Summit SE 150" },			    \
-  { 0x00181,  85, wake_up_mountain,  "Summit SE 250, Mountain FS8000" },    \
-  { 0x001c1,  82, no_wake_up,        "Wangtek 3040F" },			    \
-  { 0x001c8,  64, no_wake_up,        "Wangtek 3080F" },			    \
-  { 0x001c8,  64, wake_up_colorado,  "Wangtek 3080F" },			    \
-  { 0x001ca,  67, no_wake_up,        "Wangtek 3080F (new)" },		    \
-  { 0x001cc,  77, wake_up_colorado,  "Wangtek 3200 / Teac 700" },	    \
-  { 0x001cd,  75, wake_up_colorado,  "Reveal TB1400" },			    \
-  { 0x00380,  85, wake_up_colorado,  "Exabyte Eagle-96" },		    \
-  { 0x00381,  85, wake_up_colorado,  "Exabyte Eagle TR-3" },		    \
-  { 0x00382,  85, wake_up_colorado,  "Exabyte Eagle TR-3" },		    \
-  { 0x003ce,  77, wake_up_colorado,  "Teac 800" },			    \
-  { 0x003cf,   0, wake_up_colorado,  "Teac FT3010TR" },			    \
-  { 0x08880,  64, no_wake_up,        "Iomega 250, Ditto 800" },		    \
-  { 0x08880,  64, wake_up_colorado,  "Iomega 250, Ditto 800" },		    \
-  { 0x08880,  64, wake_up_insight,   "Iomega 250, Ditto 800" },		    \
-  { 0x08881,  80, wake_up_colorado,  "Iomega 700" },			    \
-  { 0x08882,  80, wake_up_colorado,  "Iomega 3200" },			    \
-  { 0x08883,  80, wake_up_colorado,  "Iomega DITTO 2GB" },		    \
-  { 0x00021,  70, no_wake_up,        "AIWA CT-803" },			    \
-  { 0x004c0,  80, no_wake_up,        "AIWA TD-S1600" },			    \
-  { 0x00021,   0, wake_up_mountain,  "COREtape QIC80" },		    \
-  { 0x00441,   0, wake_up_mountain,  "ComByte DoublePlay" },		    \
-  { 0x00481, 127, wake_up_mountain,  "PERTEC MyTape 800" },		    \
-  { 0x00483, 130, wake_up_mountain,  "PERTEC MyTape 3200" },		    \
-  { UNKNOWN_VENDOR, 0, no_wake_up, "unknown" }				    \
-}
-
-#define QIC117_MAKE_CODES {			\
-  { 0, "Unassigned" },				\
-  { 1, "Alloy Computer Products" },		\
-  { 2, "3M" },					\
-  { 3, "Tandberg Data" },			\
-  { 4, "Colorado" },				\
-  { 5, "Archive/Conner" },			\
-  { 6, "Mountain/Summit Memory Systems" },	\
-  { 7, "Wangtek/Rexon/Tecmar" },		\
-  { 8, "Sony" },				\
-  { 9, "Cipher Data Products" },		\
-  { 10, "Irwin Magnetic Systems" },		\
-  { 11, "Braemar" },				\
-  { 12, "Verbatim" },				\
-  { 13, "Core International" },			\
-  { 14, "Exabyte" },				\
-  { 15, "Teac" },				\
-  { 16, "Gigatek" },				\
-  { 17, "ComByte" },				\
-  { 18, "PERTEC Memories" },			\
-  { 19, "Aiwa" },				\
-  { 71, "Colorado" },				\
-  { 546, "Iomega Inc" },			\
-}
-
-#endif /* _FTAPE_VENDORS_H */
diff --git a/include/linux/ftape.h b/include/linux/ftape.h
deleted file mode 100644
index 7e7038c..0000000
--- a/include/linux/ftape.h
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef _FTAPE_H
-#define _FTAPE_H
-
-/*
- * Copyright (C) 1994-1996 Bas Laarhoven,
- *           (C) 1996-1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/include/linux/ftape.h,v $
- * $Revision: 1.17.6.4 $
- * $Date: 1997/11/25 01:52:54 $
- *
- *      This file contains global definitions, typedefs and macro's
- *      for the QIC-40/80/3010/3020 floppy-tape driver for Linux.
- */
-
-#define FTAPE_VERSION "ftape v3.04d 25/11/97"
-
-#ifdef __KERNEL__
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#endif
-#include <linux/types.h>
-#include <linux/mtio.h>
-
-#define FT_SECTOR(x)		(x+1)	/* sector offset into real sector */
-#define FT_SECTOR_SIZE		1024
-#define FT_SECTORS_PER_SEGMENT	  32
-#define FT_ECC_SECTORS		   3
-#define FT_SEGMENT_SIZE		((FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS) * FT_SECTOR_SIZE)
-#define FT_BUFF_SIZE    (FT_SECTORS_PER_SEGMENT * FT_SECTOR_SIZE)
-
-/*
- *   bits of the minor device number that define drive selection
- *   methods. Could be used one day to access multiple tape
- *   drives on the same controller.
- */
-#define FTAPE_SEL_A     0
-#define FTAPE_SEL_B     1
-#define FTAPE_SEL_C     2
-#define FTAPE_SEL_D     3
-#define FTAPE_SEL_MASK     3
-#define FTAPE_SEL(unit) ((unit) & FTAPE_SEL_MASK)
-#define FTAPE_NO_REWIND 4	/* mask for minor nr */
-
-/* the following two may be reported when MTIOCGET is requested ... */
-typedef union {
-	struct {
-		__u8 error;
-		__u8 command;
-	} error;
-	long space;
-} ft_drive_error;
-typedef union {
-	struct {
-		__u8 drive_status;
-		__u8 drive_config;
-		__u8 tape_status;
-	} status;
-	long space;
-} ft_drive_status;
-
-#ifdef __KERNEL__
-
-#define FT_RQM_DELAY    12
-#define FT_MILLISECOND  1
-#define FT_SECOND       1000
-#define FT_FOREVER      -1
-#ifndef HZ
-#error "HZ undefined."
-#endif
-#define FT_USPT         (1000000/HZ) /* microseconds per tick */
-
-/* This defines the number of retries that the driver will allow
- * before giving up (and letting a higher level handle the error).
- */
-#ifdef TESTING
-#define FT_SOFT_RETRIES 1	   /* number of low level retries */
-#define FT_RETRIES_ON_ECC_ERROR 3  /* ecc error when correcting segment */
-#else
-#define FT_SOFT_RETRIES 6	   /* number of low level retries (triple) */
-#define FT_RETRIES_ON_ECC_ERROR 3  /* ecc error when correcting segment */
-#endif
-
-#ifndef THE_FTAPE_MAINTAINER
-#define THE_FTAPE_MAINTAINER "the ftape maintainer"
-#endif
-
-/* Initialize missing configuration parameters.
- */
-#ifndef CONFIG_FT_NR_BUFFERS
-# define CONFIG_FT_NR_BUFFERS 3
-#endif
-#ifndef CONFIG_FT_FDC_THR
-# define CONFIG_FT_FDC_THR 8
-#endif
-#ifndef CONFIG_FT_FDC_MAX_RATE
-# define CONFIG_FT_FDC_MAX_RATE 2000
-#endif
-#ifndef CONFIG_FT_FDC_BASE
-# define CONFIG_FT_FDC_BASE 0
-#endif
-#ifndef CONFIG_FT_FDC_IRQ
-# define CONFIG_FT_FDC_IRQ  0
-#endif
-#ifndef CONFIG_FT_FDC_DMA
-# define CONFIG_FT_FDC_DMA  0
-#endif
-
-/* Turn some booleans into numbers.
- */
-#ifdef CONFIG_FT_PROBE_FC10
-# undef CONFIG_FT_PROBE_FC10
-# define CONFIG_FT_PROBE_FC10 1
-#else
-# define CONFIG_FT_PROBE_FC10 0
-#endif
-#ifdef CONFIG_FT_MACH2
-# undef CONFIG_FT_MACH2
-# define CONFIG_FT_MACH2 1
-#else
-# define CONFIG_FT_MACH2 0
-#endif
-
-/* Insert default settings
- */
-#if CONFIG_FT_PROBE_FC10 == 1
-# if CONFIG_FT_FDC_BASE == 0
-#  undef  CONFIG_FT_FDC_BASE
-#  define CONFIG_FT_FDC_BASE 0x180
-# endif
-# if CONFIG_FT_FDC_IRQ == 0
-#  undef  CONFIG_FT_FDC_IRQ
-#  define CONFIG_FT_FDC_IRQ 9
-# endif
-# if CONFIG_FT_FDC_DMA == 0
-#  undef  CONFIG_FT_FDC_DMA
-#  define CONFIG_FT_FDC_DMA 3
-# endif
-#elif CONFIG_FT_MACH2 == 1    /* CONFIG_FT_PROBE_FC10 == 1 */
-# if CONFIG_FT_FDC_BASE == 0
-#  undef  CONFIG_FT_FDC_BASE
-#  define CONFIG_FT_FDC_BASE 0x1E0
-# endif
-# if CONFIG_FT_FDC_IRQ == 0
-#  undef  CONFIG_FT_FDC_IRQ
-#  define CONFIG_FT_FDC_IRQ 6
-# endif
-# if CONFIG_FT_FDC_DMA == 0
-#  undef  CONFIG_FT_FDC_DMA
-#  define CONFIG_FT_FDC_DMA 2
-# endif
-#elif defined(CONFIG_FT_ALT_FDC)  /* CONFIG_FT_MACH2 */
-# if CONFIG_FT_FDC_BASE == 0
-#  undef  CONFIG_FT_FDC_BASE
-#  define CONFIG_FT_FDC_BASE 0x370
-# endif
-# if CONFIG_FT_FDC_IRQ == 0
-#  undef  CONFIG_FT_FDC_IRQ
-#  define CONFIG_FT_FDC_IRQ 6
-# endif
-# if CONFIG_FT_FDC_DMA == 0
-#  undef  CONFIG_FT_FDC_DMA
-#  define CONFIG_FT_FDC_DMA 2
-# endif
-#else                          /* CONFIG_FT_ALT_FDC */
-# if CONFIG_FT_FDC_BASE == 0
-#  undef  CONFIG_FT_FDC_BASE
-#  define CONFIG_FT_FDC_BASE 0x3f0
-# endif
-# if CONFIG_FT_FDC_IRQ == 0
-#  undef  CONFIG_FT_FDC_IRQ
-#  define CONFIG_FT_FDC_IRQ 6
-# endif
-# if CONFIG_FT_FDC_DMA == 0
-#  undef  CONFIG_FT_FDC_DMA
-#  define CONFIG_FT_FDC_DMA 2
-# endif
-#endif                         /* standard FDC */
-
-/*      some useful macro's
- */
-#define NR_ITEMS(x)     (int)(sizeof(x)/ sizeof(*x))
-
-#endif  /* __KERNEL__ */
-
-#endif
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9fc48a6..534744e 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -15,7 +15,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 7
+#define FUSE_KERNEL_MINOR_VERSION 8
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -92,6 +92,11 @@
 #define FUSE_ASYNC_READ		(1 << 0)
 #define FUSE_POSIX_LOCKS	(1 << 1)
 
+/**
+ * Release flags
+ */
+#define FUSE_RELEASE_FLUSH	(1 << 0)
+
 enum fuse_opcode {
 	FUSE_LOOKUP	   = 1,
 	FUSE_FORGET	   = 2,  /* no reply */
@@ -127,6 +132,8 @@
 	FUSE_ACCESS        = 34,
 	FUSE_CREATE        = 35,
 	FUSE_INTERRUPT     = 36,
+	FUSE_BMAP          = 37,
+	FUSE_DESTROY       = 38,
 };
 
 /* The read buffer is required to be at least 8k, but may be much larger */
@@ -205,12 +212,13 @@
 struct fuse_release_in {
 	__u64	fh;
 	__u32	flags;
-	__u32	padding;
+	__u32	release_flags;
+	__u64	lock_owner;
 };
 
 struct fuse_flush_in {
 	__u64	fh;
-	__u32	flush_flags;
+	__u32	unused;
 	__u32	padding;
 	__u64	lock_owner;
 };
@@ -296,6 +304,16 @@
 	__u64	unique;
 };
 
+struct fuse_bmap_in {
+	__u64	block;
+	__u32	blocksize;
+	__u32	padding;
+};
+
+struct fuse_bmap_out {
+	__u64	block;
+};
+
 struct fuse_in_header {
 	__u32	len;
 	__u32	opcode;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 9049dc6..f7a9377 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -17,6 +17,9 @@
 #define GENL_HDRLEN	NLMSG_ALIGN(sizeof(struct genlmsghdr))
 
 #define GENL_ADMIN_PERM		0x01
+#define GENL_CMD_CAP_DO		0x02
+#define GENL_CMD_CAP_DUMP	0x04
+#define GENL_CMD_CAP_HASPOL	0x08
 
 /*
  * List of reserved static generic netlink identifiers:
@@ -58,9 +61,6 @@
 	CTRL_ATTR_OP_UNSPEC,
 	CTRL_ATTR_OP_ID,
 	CTRL_ATTR_OP_FLAGS,
-	CTRL_ATTR_OP_POLICY,
-	CTRL_ATTR_OP_DOIT,
-	CTRL_ATTR_OP_DUMPIT,
 	__CTRL_ATTR_OP_MAX,
 };
 
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bf2b6bc..00c314a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -116,6 +116,9 @@
 #ifndef HAVE_ARCH_FREE_PAGE
 static inline void arch_free_page(struct page *page, int order) { }
 #endif
+#ifndef HAVE_ARCH_ALLOC_PAGE
+static inline void arch_alloc_page(struct page *page, int order) { }
+#endif
 
 extern struct page *
 FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index a7ae7c1..8b7e4c1 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -54,8 +54,13 @@
 	__be64 no_addr;
 };
 
-static inline int gfs2_inum_equal(const struct gfs2_inum *ino1,
-				  const struct gfs2_inum *ino2)
+struct gfs2_inum_host {
+	__u64 no_formal_ino;
+	__u64 no_addr;
+};
+
+static inline int gfs2_inum_equal(const struct gfs2_inum_host *ino1,
+				  const struct gfs2_inum_host *ino2)
 {
 	return ino1->no_formal_ino == ino2->no_formal_ino &&
 	       ino1->no_addr == ino2->no_addr;
@@ -89,6 +94,12 @@
 	__be32 __pad1;		/* Was incarnation number in gfs1 */
 };
 
+struct gfs2_meta_header_host {
+	__u32 mh_magic;
+	__u32 mh_type;
+	__u32 mh_format;
+};
+
 /*
  * super-block structure
  *
@@ -128,6 +139,23 @@
 	/* In gfs1, quota and license dinodes followed */
 };
 
+struct gfs2_sb_host {
+	struct gfs2_meta_header_host sb_header;
+
+	__u32 sb_fs_format;
+	__u32 sb_multihost_format;
+
+	__u32 sb_bsize;
+	__u32 sb_bsize_shift;
+
+	struct gfs2_inum_host sb_master_dir; /* Was jindex dinode in gfs1 */
+	struct gfs2_inum_host sb_root_dir;
+
+	char sb_lockproto[GFS2_LOCKNAME_LEN];
+	char sb_locktable[GFS2_LOCKNAME_LEN];
+	/* In gfs1, quota and license dinodes followed */
+};
+
 /*
  * resource index structure
  */
@@ -145,6 +173,14 @@
 	__u8 ri_reserved[64];
 };
 
+struct gfs2_rindex_host {
+	__u64 ri_addr;	/* grp block disk address */
+	__u64 ri_data0;	/* first data location */
+	__u32 ri_length;	/* length of rgrp header in fs blocks */
+	__u32 ri_data;	/* num of data blocks in rgrp */
+	__u32 ri_bitbytes;	/* number of bytes in data bitmaps */
+};
+
 /*
  * resource group header structure
  */
@@ -176,6 +212,13 @@
 	__u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
 };
 
+struct gfs2_rgrp_host {
+	__u32 rg_flags;
+	__u32 rg_free;
+	__u32 rg_dinodes;
+	__u64 rg_igeneration;
+};
+
 /*
  * quota structure
  */
@@ -187,6 +230,12 @@
 	__u8 qu_reserved[64];
 };
 
+struct gfs2_quota_host {
+	__u64 qu_limit;
+	__u64 qu_warn;
+	__u64 qu_value;
+};
+
 /*
  * dinode structure
  */
@@ -270,6 +319,27 @@
 	__u8 di_reserved[56];
 };
 
+struct gfs2_dinode_host {
+	__u64 di_size;	/* number of bytes in file */
+	__u64 di_blocks;	/* number of blocks in file */
+
+	/* This section varies from gfs1. Padding added to align with
+         * remainder of dinode
+	 */
+	__u64 di_goal_meta;	/* rgrp to alloc from next */
+	__u64 di_goal_data;	/* data block goal */
+	__u64 di_generation;	/* generation number for NFS */
+
+	__u32 di_flags;	/* GFS2_DIF_... */
+	__u16 di_height;	/* height of metadata */
+
+	/* These only apply to directories  */
+	__u16 di_depth;	/* Number of bits in the table */
+	__u32 di_entries;	/* The number of entries in the directory */
+
+	__u64 di_eattr;	/* extended attribute block number */
+};
+
 /*
  * directory structure - many of these per directory file
  */
@@ -344,6 +414,16 @@
 	__be32 lh_hash;
 };
 
+struct gfs2_log_header_host {
+	struct gfs2_meta_header_host lh_header;
+
+	__u64 lh_sequence;	/* Sequence number of this transaction */
+	__u32 lh_flags;	/* GFS2_LOG_HEAD_... */
+	__u32 lh_tail;		/* Block number of log tail */
+	__u32 lh_blkno;
+	__u32 lh_hash;
+};
+
 /*
  * Log type descriptor
  */
@@ -384,6 +464,11 @@
 	__be64 ir_length;
 };
 
+struct gfs2_inum_range_host {
+	__u64 ir_start;
+	__u64 ir_length;
+};
+
 /*
  * Statfs change
  * Describes an change to the pool of free and allocated
@@ -396,6 +481,12 @@
 	__be64 sc_dinodes;
 };
 
+struct gfs2_statfs_change_host {
+	__u64 sc_total;
+	__u64 sc_free;
+	__u64 sc_dinodes;
+};
+
 /*
  * Quota change
  * Describes an allocation change for a particular
@@ -410,33 +501,38 @@
 	__be32 qc_id;
 };
 
+struct gfs2_quota_change_host {
+	__u64 qc_change;
+	__u32 qc_flags;	/* GFS2_QCF_... */
+	__u32 qc_id;
+};
+
 #ifdef __KERNEL__
 /* Translation functions */
 
-extern void gfs2_inum_in(struct gfs2_inum *no, const void *buf);
-extern void gfs2_inum_out(const struct gfs2_inum *no, void *buf);
-extern void gfs2_sb_in(struct gfs2_sb *sb, const void *buf);
-extern void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf);
-extern void gfs2_rindex_out(const struct gfs2_rindex *ri, void *buf);
-extern void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf);
-extern void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf);
-extern void gfs2_quota_in(struct gfs2_quota *qu, const void *buf);
-extern void gfs2_quota_out(const struct gfs2_quota *qu, void *buf);
-extern void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf);
-extern void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf);
+extern void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf);
+extern void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf);
+extern void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf);
+extern void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf);
+extern void gfs2_rindex_out(const struct gfs2_rindex_host *ri, void *buf);
+extern void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf);
+extern void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf);
+extern void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf);
+struct gfs2_inode;
+extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
 extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf);
 extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf);
-extern void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf);
-extern void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf);
-extern void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf);
-extern void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf);
-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf);
-extern void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf);
+extern void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf);
+extern void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf);
+extern void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf);
+extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf);
+extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf);
+extern void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf);
 
 /* Printing functions */
 
-extern void gfs2_rindex_print(const struct gfs2_rindex *ri);
-extern void gfs2_dinode_print(const struct gfs2_dinode *di);
+extern void gfs2_rindex_print(const struct gfs2_rindex_host *ri);
+extern void gfs2_dinode_print(const struct gfs2_inode *ip);
 
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index fd7d12d..3d8768b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -3,6 +3,7 @@
 
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
 
@@ -41,9 +42,10 @@
 
 #define kunmap(page) do { (void) (page); } while (0)
 
-#define kmap_atomic(page, idx)		page_address(page)
-#define kunmap_atomic(addr, idx)	do { } while (0)
-#define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
+#define kmap_atomic(page, idx) \
+	({ pagefault_disable(); page_address(page); })
+#define kunmap_atomic(addr, idx)	do { pagefault_enable(); } while (0)
+#define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx))
 #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
 #endif
 
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ace64e5..a60995a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,7 @@
 
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 			      int write);
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index c115e9e..52f53e2 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -461,7 +461,7 @@
 	int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
 
 	/* Event handler */
-	void (*event) (struct i2o_event *);
+	work_func_t event;
 
 	struct workqueue_struct *event_queue;	/* Event queue */
 
@@ -490,7 +490,7 @@
  */
 struct i2o_pool {
 	char *name;
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 	mempool_t *mempool;
 };
 
@@ -986,7 +986,8 @@
 
 /**
  *	i2o_driver_notify_controller_add - Send notification of added controller
- *					   to a single I2O driver
+ *	@drv: I2O driver
+ *	@c: I2O controller
  *
  *	Send notification of added controller to a single registered driver.
  */
@@ -998,8 +999,9 @@
 };
 
 /**
- *	i2o_driver_notify_controller_remove - Send notification of removed
- *					      controller to a single I2O driver
+ *	i2o_driver_notify_controller_remove - Send notification of removed controller
+ *	@drv: I2O driver
+ *	@c: I2O controller
  *
  *	Send notification of removed controller to a single registered driver.
  */
@@ -1011,8 +1013,9 @@
 };
 
 /**
- *	i2o_driver_notify_device_add - Send notification of added device to a
- *				       single I2O driver
+ *	i2o_driver_notify_device_add - Send notification of added device
+ *	@drv: I2O driver
+ *	@i2o_dev: the added i2o_device
  *
  *	Send notification of added device to a single registered driver.
  */
@@ -1025,7 +1028,8 @@
 
 /**
  *	i2o_driver_notify_device_remove - Send notification of removed device
- *					  to a single I2O driver
+ *	@drv: I2O driver
+ *	@i2o_dev: the added i2o_device
  *
  *	Send notification of removed device to a single registered driver.
  */
@@ -1148,7 +1152,7 @@
 /**
  * 	i2o_msg_post_wait - Post and wait a message and wait until return
  *	@c: controller
- *	@m: message to post
+ *	@msg: message to post
  *	@timeout: time in seconds to wait
  *
  * 	This API allows an OSM to post a message and then be told whether or
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 878cfe4..24da4fb 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -68,7 +68,7 @@
 struct icmphdr {
   __u8		type;
   __u8		code;
-  __be16	checksum;
+  __sum16	checksum;
   union {
 	struct {
 		__be16	id;
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index c771a7d..68d3526 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -7,17 +7,17 @@
 
 	__u8		icmp6_type;
 	__u8		icmp6_code;
-	__u16		icmp6_cksum;
+	__sum16		icmp6_cksum;
 
 
 	union {
-		__u32			un_data32[1];
-		__u16			un_data16[2];
+		__be32			un_data32[1];
+		__be16			un_data16[2];
 		__u8			un_data8[4];
 
 		struct icmpv6_echo {
-			__u16		identifier;
-			__u16		sequence;
+			__be16		identifier;
+			__be16		sequence;
 		} u_echo;
 
                 struct icmpv6_nd_advt {
@@ -53,7 +53,7 @@
 #else
 #error	"Please fix <asm/byteorder.h>"
 #endif
-			__u16		rt_lifetime;
+			__be16		rt_lifetime;
                 } u_nd_ra;
 
 	} icmp6_dataun;
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index b9255854..99393ef 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -1,17 +1,19 @@
 #ifndef __LINUX_IF_PACKET_H
 #define __LINUX_IF_PACKET_H
 
+#include <linux/types.h>
+
 struct sockaddr_pkt
 {
 	unsigned short spkt_family;
 	unsigned char spkt_device[14];
-	unsigned short spkt_protocol;
+	__be16 spkt_protocol;
 };
 
 struct sockaddr_ll
 {
 	unsigned short	sll_family;
-	unsigned short	sll_protocol;
+	__be16		sll_protocol;
 	int		sll_ifindex;
 	unsigned short	sll_hatype;
 	unsigned char	sll_pkttype;
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index bef9f8f..8de079b 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -19,10 +19,10 @@
 {
 	char			name[IFNAMSIZ];
 	int			link;
-	__u16			i_flags;
-	__u16			o_flags;
-	__u32			i_key;
-	__u32			o_key;
+	__be16			i_flags;
+	__be16			o_flags;
+	__be32			i_key;
+	__be32			o_key;
 	struct iphdr		iph;
 };
 
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 21dd569..9dbb525 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -30,7 +30,7 @@
 {
 	__u8 type;
 	__u8 code;		/* For newer IGMP */
-	__be16 csum;
+	__sum16 csum;
 	__be32 group;
 };
 
@@ -127,6 +127,7 @@
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
+#include <linux/timer.h>
 #include <linux/in.h>
 
 extern int sysctl_igmp_max_memberships;
diff --git a/include/linux/in.h b/include/linux/in.h
index 2619859..1912e7c 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -45,6 +45,7 @@
 
   IPPROTO_COMP   = 108,                /* Compression Header protocol */
   IPPROTO_SCTP   = 132,		/* Stream Control Transport Protocol	*/
+  IPPROTO_UDPLITE = 136,	/* UDP-Lite (RFC 3828)			*/
 
   IPPROTO_RAW	 = 255,		/* Raw IP packets			*/
   IPPROTO_MAX
diff --git a/include/linux/in6.h b/include/linux/in6.h
index f28621f..4e8350a 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -54,7 +54,7 @@
 struct sockaddr_in6 {
 	unsigned short int	sin6_family;    /* AF_INET6 */
 	__be16			sin6_port;      /* Transport layer port # */
-	__u32			sin6_flowinfo;  /* IPv6 flow information */
+	__be32			sin6_flowinfo;  /* IPv6 flow information */
 	struct in6_addr		sin6_addr;      /* IPv6 address */
 	__u32			sin6_scope_id;  /* scope id (new in RFC2553) */
 };
@@ -72,7 +72,7 @@
 struct in6_flowlabel_req
 {
 	struct in6_addr	flr_dst;
-	__u32	flr_label;
+	__be32	flr_label;
 	__u8	flr_action;
 	__u8	flr_share;
 	__u16	flr_flags;
diff --git a/include/linux/inet.h b/include/linux/inet.h
index b7c6da7..675a7db 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -46,7 +46,7 @@
 #include <linux/types.h>
 
 extern __be32 in_aton(const char *str);
-extern int in4_pton(const char *src, int srclen, u8 *dst, char delim, const char **end);
-extern int in6_pton(const char *src, int srclen, u8 *dst, char delim, const char **end);
+extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
 #endif
 #endif	/* _LINUX_INET_H */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 5a0ab04..c0f7aec 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -124,12 +124,13 @@
  *	Check if a mask is acceptable.
  */
  
-static __inline__ int bad_mask(u32 mask, u32 addr)
+static __inline__ int bad_mask(__be32 mask, __be32 addr)
 {
+	__u32 hmask;
 	if (addr & (mask = ~mask))
 		return 1;
-	mask = ntohl(mask);
-	if (mask & (mask+1))
+	hmask = ntohl(mask);
+	if (hmask & (hmask+1))
 		return 1;
 	return 0;
 }
@@ -190,11 +191,12 @@
 	return 0;
 }
 
-static __inline__ int inet_mask_len(__u32 mask)
+static __inline__ int inet_mask_len(__be32 mask)
 {
-	if (!(mask = ntohl(mask)))
+	__u32 hmask = ntohl(mask);
+	if (!hmask)
 		return 0;
-	return 32 - ffz(~mask);
+	return 32 - ffz(~hmask);
 }
 
 
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 33c5daa..733790d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -73,7 +73,7 @@
 extern struct nsproxy init_nsproxy;
 #define INIT_NSPROXY(nsproxy) {						\
 	.count		= ATOMIC_INIT(1),				\
-	.nslock		= SPIN_LOCK_UNLOCKED,				\
+	.nslock		= __SPIN_LOCK_UNLOCKED(nsproxy.nslock),		\
 	.uts_ns		= &init_uts_ns,					\
 	.namespace	= NULL,						\
 	INIT_IPC_NS(ipc_ns)						\
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5b83e7b..de7593f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,6 +11,7 @@
 #include <linux/hardirq.h>
 #include <linux/sched.h>
 #include <linux/irqflags.h>
+#include <linux/bottom_half.h>
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
@@ -217,12 +218,6 @@
 #define save_and_cli(x)	save_and_cli(&x)
 #endif /* CONFIG_SMP */
 
-extern void local_bh_disable(void);
-extern void __local_bh_enable(void);
-extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
-
 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
    frequency threaded job scheduling. For almost all the purposes
    tasklets are more than enough. F.e. all serial device BHs et
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index d42c833..cf8696d 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -89,6 +89,7 @@
 #define IORESOURCE_ROM_ENABLE		(1<<0)	/* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
 #define IORESOURCE_ROM_SHADOW		(1<<1)	/* ROM is copy at C000:0 */
 #define IORESOURCE_ROM_COPY		(1<<2)	/* ROM is alloc'd copy, resource field overlaid */
+#define IORESOURCE_ROM_BIOS_COPY	(1<<3)	/* ROM is BIOS copy, resource field overlaid */
 
 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
 extern struct resource ioport_resource;
diff --git a/include/linux/ip.h b/include/linux/ip.h
index ecee9bb..1d36b97 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -98,7 +98,7 @@
 	__be16	frag_off;
 	__u8	ttl;
 	__u8	protocol;
-	__be16	check;
+	__sum16	check;
 	__be32	saddr;
 	__be32	daddr;
 	/*The options start here. */
diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
index 5c23aeb..af3f4a7 100644
--- a/include/linux/ip6_tunnel.h
+++ b/include/linux/ip6_tunnel.h
@@ -25,7 +25,7 @@
 	__u8 proto;		/* tunnel protocol */
 	__u8 encap_limit;	/* encapsulation limit for tunnel */
 	__u8 hop_limit;		/* hop limit for tunnel */
-	__u32 flowinfo;		/* traffic class and flowlabel for tunnel */
+	__be32 flowinfo;	/* traffic class and flowlabel for tunnel */
 	__u32 flags;		/* tunnel flags */
 	struct in6_addr laddr;	/* local tunnel end-point address */
 	struct in6_addr raddr;	/* remote tunnel end-point address */
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 796ca00..7a9db390 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -208,6 +208,15 @@
    code as the first byte of the incoming data, unlike a response. */
 
 
+/*
+ * Modes for ipmi_set_maint_mode() and the userland IOCTL.  The AUTO
+ * setting is the default and means it will be set on certain
+ * commands.  Hard setting it on and off will override automatic
+ * operation.
+ */
+#define IPMI_MAINTENANCE_MODE_AUTO	0
+#define IPMI_MAINTENANCE_MODE_OFF	1
+#define IPMI_MAINTENANCE_MODE_ON	2
 
 #ifdef __KERNEL__
 
@@ -374,6 +383,35 @@
 			    unsigned int  chans);
 
 /*
+ * Go into a mode where the driver will not autonomously attempt to do
+ * things with the interface.  It will still respond to attentions and
+ * interrupts, and it will expect that commands will complete.  It
+ * will not automatcially check for flags, events, or things of that
+ * nature.
+ *
+ * This is primarily used for firmware upgrades.  The idea is that
+ * when you go into firmware upgrade mode, you do this operation
+ * and the driver will not attempt to do anything but what you tell
+ * it or what the BMC asks for.
+ *
+ * Note that if you send a command that resets the BMC, the driver
+ * will still expect a response from that command.  So the BMC should
+ * reset itself *after* the response is sent.  Resetting before the
+ * response is just silly.
+ *
+ * If in auto maintenance mode, the driver will automatically go into
+ * maintenance mode for 30 seconds if it sees a cold reset, a warm
+ * reset, or a firmware NetFN.  This means that code that uses only
+ * firmware NetFN commands to do upgrades will work automatically
+ * without change, assuming it sends a message every 30 seconds or
+ * less.
+ *
+ * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
+ */
+int ipmi_get_maintenance_mode(ipmi_user_t user);
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+
+/*
  * Allow run-to-completion mode to be set for the interface of
  * a specific user.
  */
@@ -656,4 +694,11 @@
 #define IPMICTL_GET_TIMING_PARMS_CMD	_IOR(IPMI_IOC_MAGIC, 23, \
 					     struct ipmi_timing_parms)
 
+/*
+ * Set the maintenance mode.  See ipmi_set_maintenance_mode() above
+ * for a description of what this does.
+ */
+#define IPMICTL_GET_MAINTENANCE_MODE_CMD	_IOR(IPMI_IOC_MAGIC, 30, int)
+#define IPMICTL_SET_MAINTENANCE_MODE_CMD	_IOW(IPMI_IOC_MAGIC, 31, int)
+
 #endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h
index 4d04d8b..b56a158 100644
--- a/include/linux/ipmi_msgdefs.h
+++ b/include/linux/ipmi_msgdefs.h
@@ -46,6 +46,8 @@
 #define IPMI_NETFN_APP_REQUEST			0x06
 #define IPMI_NETFN_APP_RESPONSE			0x07
 #define IPMI_GET_DEVICE_ID_CMD		0x01
+#define IPMI_COLD_RESET_CMD		0x02
+#define IPMI_WARM_RESET_CMD		0x03
 #define IPMI_CLEAR_MSG_FLAGS_CMD	0x30
 #define IPMI_GET_DEVICE_GUID_CMD	0x08
 #define IPMI_GET_MSG_FLAGS_CMD		0x31
@@ -60,20 +62,27 @@
 #define IPMI_NETFN_STORAGE_RESPONSE		0x0b
 #define IPMI_ADD_SEL_ENTRY_CMD		0x44
 
+#define IPMI_NETFN_FIRMWARE_REQUEST		0x08
+#define IPMI_NETFN_FIRMWARE_RESPONSE		0x09
+
 /* The default slave address */
 #define IPMI_BMC_SLAVE_ADDR	0x20
 
 /* The BT interface on high-end HP systems supports up to 255 bytes in
  * one transfer.  Its "virtual" BMC supports some commands that are longer
  * than 128 bytes.  Use the full 256, plus NetFn/LUN, Cmd, cCode, plus
- * some overhead.  It would be nice to base this on the "BT Capabilities"
- * but that's too hard to propagate to the rest of the driver. */
+ * some overhead; it's not worth the effort to dynamically size this based
+ * on the results of the "Get BT Capabilities" command. */
 #define IPMI_MAX_MSG_LENGTH	272	/* multiple of 16 */
 
 #define IPMI_CC_NO_ERROR		0x00
 #define IPMI_NODE_BUSY_ERR		0xc0
 #define IPMI_INVALID_COMMAND_ERR	0xc1
+#define IPMI_TIMEOUT_ERR		0xc3
 #define IPMI_ERR_MSG_TRUNCATED		0xc6
+#define IPMI_REQ_LEN_INVALID_ERR	0xc7
+#define IPMI_REQ_LEN_EXCEEDED_ERR	0xc8
+#define IPMI_NOT_IN_MY_STATE_ERR	0xd5	/* IPMI 2.0 */
 #define IPMI_LOST_ARBITRATION_ERR	0x81
 #define IPMI_BUS_ERR			0x82
 #define IPMI_NAK_ON_WRITE_ERR		0x83
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 6d9c7e4..c063310 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,13 @@
 	   poll for operations during things like crash dumps. */
 	void (*poll)(void *send_info);
 
+	/* Enable/disable firmware maintenance mode.  Note that this
+	   is *not* the modes defined, this is simply an on/off
+	   setting.  The message handler does the mode handling.  Note
+	   that this is called from interupt context, so it cannot
+	   block. */
+	void (*set_maintenance_mode)(void *send_info, int enable);
+
 	/* Tell the handler that we are using it/not using it.  The
 	   message handler get the modules that this handler belongs
 	   to; this function lets the SMI claim any modules that it
@@ -173,6 +180,7 @@
 		      void                     *send_info,
 		      struct ipmi_device_id    *device_id,
 		      struct device            *dev,
+		      const char               *sysfs_name,
 		      unsigned char            slave_addr);
 
 /*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 4f435c5..f824113 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -274,7 +274,7 @@
 	struct in6_addr		*saddr_cache;
 #endif
 
-	__u32			flow_label;
+	__be32			flow_label;
 	__u32			frag_size;
 	__s16			hop_limit;
 	__s16			mcast_hops;
diff --git a/include/linux/ixjuser.h b/include/linux/ixjuser.h
index fd1756d..88b4589 100644
--- a/include/linux/ixjuser.h
+++ b/include/linux/ixjuser.h
@@ -315,7 +315,7 @@
 * structures.  If the freq0 variable is non-zero, the tone table contents
 * for the tone_index are updated to the frequencies and gains defined.  It
 * should be noted that DTMF tones cannot be reassigned, so if DTMF tone
-* table indexs are used in a cadence the frequency and gain variables will
+* table indexes are used in a cadence the frequency and gain variables will
 * be ignored.
 *
 * If the array elements contain frequency parameters the driver will
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index fe89444..4527375 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -839,7 +839,6 @@
  */
 
 /* Filing buffers */
-extern void __journal_temp_unlink_buffer(struct journal_head *jh);
 extern void journal_unfile_buffer(journal_t *, struct journal_head *);
 extern void __journal_unfile_buffer(struct journal_head *);
 extern void __journal_refile_buffer(struct journal_head *);
@@ -949,7 +948,7 @@
 /*
  * handle management
  */
-extern kmem_cache_t *jbd_handle_cache;
+extern struct kmem_cache *jbd_handle_cache;
 
 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
 {
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index ddb1287..0e0fedd 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -848,7 +848,6 @@
  */
 
 /* Filing buffers */
-extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
 extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
 extern void __jbd2_journal_unfile_buffer(struct journal_head *);
 extern void __jbd2_journal_refile_buffer(struct journal_head *);
@@ -958,7 +957,7 @@
 /*
  * handle management
  */
-extern kmem_cache_t *jbd2_handle_cache;
+extern struct kmem_cache *jbd2_handle_cache;
 
 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
 {
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c8d5f20..0ec6e28 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -74,7 +74,7 @@
 #define __jiffy_data  __attribute__((section(".data")))
 
 /*
- * The 64-bit value is not volatile - you MUST NOT read it
+ * The 64-bit value is not atomic - you MUST NOT read it
  * without sampling the sequence number in xtime_lock.
  * get_jiffies_64() will do this for you as appropriate.
  */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index efe0ee4..06c58c4 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -158,7 +158,7 @@
 	if (t->buf.tail != NULL)
 		t->buf.tail->commit = t->buf.tail->used;
 	spin_unlock_irqrestore(&t->buf.lock, flags);
-	schedule_work(&t->buf.work);
+	schedule_delayed_work(&t->buf.work, 0);
 }
 
 #endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index b9b5e4b..6738283 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -65,7 +65,7 @@
  * context (spinlock, irq-handler, ...).
  *
  * This is a useful debugging help to be able to catch problems early and not
- * be biten later when the calling function happens to sleep when it is not
+ * be bitten later when the calling function happens to sleep when it is not
  * supposed to.
  */
 #ifdef CONFIG_PREEMPT_VOLUNTARY
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index 891bb2c..aea34e74 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -47,6 +47,8 @@
 
 #include <linux/list.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <asm/semaphore.h>
 
 #define	KCI_CONTRUP	0	/* arg: struct capi_profile */
 #define	KCI_CONTRDOWN	1	/* arg: NULL */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 6427949..d02425c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -105,9 +105,14 @@
 						unsigned int order);
 extern void crash_kexec(struct pt_regs *);
 int kexec_should_crash(struct task_struct *);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
 extern struct kimage *kexec_image;
 extern struct kimage *kexec_crash_image;
 
+#ifndef kexec_flush_icache_page
+#define kexec_flush_icache_page(page)
+#endif
+
 #define KEXEC_ON_CRASH  0x00000001
 #define KEXEC_ARCH_MASK 0xffff0000
 
@@ -122,6 +127,8 @@
 #define KEXEC_ARCH_IA_64   (50 << 16)
 #define KEXEC_ARCH_S390    (22 << 16)
 #define KEXEC_ARCH_SH      (42 << 16)
+#define KEXEC_ARCH_MIPS_LE (10 << 16)
+#define KEXEC_ARCH_MIPS    ( 8 << 16)
 
 #define KEXEC_FLAGS    (KEXEC_ON_CRASH)  /* List of defined/legal kexec flags */
 
@@ -131,6 +138,7 @@
 typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
 extern note_buf_t *crash_notes;
 
+
 #else /* !CONFIG_KEXEC */
 struct pt_regs;
 struct task_struct;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index bcd9cd1..d1c8d28 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -47,6 +47,7 @@
 	KOBJ_UMOUNT	= (__force kobject_action_t) 0x05,	/* umount event for block devices (broken) */
 	KOBJ_OFFLINE	= (__force kobject_action_t) 0x06,	/* device offline */
 	KOBJ_ONLINE	= (__force kobject_action_t) 0x07,	/* device online */
+	KOBJ_MOVE	= (__force kobject_action_t) 0x08,	/* device move */
 };
 
 struct kobject {
@@ -76,6 +77,7 @@
 extern void kobject_del(struct kobject *);
 
 extern int __must_check kobject_rename(struct kobject *, const char *new_name);
+extern int __must_check kobject_move(struct kobject *, struct kobject *);
 
 extern int __must_check kobject_register(struct kobject *);
 extern void kobject_unregister(struct kobject *);
@@ -264,6 +266,8 @@
 
 #if defined(CONFIG_HOTPLUG)
 void kobject_uevent(struct kobject *kobj, enum kobject_action action);
+void kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+			char *envp[]);
 
 int add_uevent_var(char **envp, int num_envp, int *cur_index,
 			char *buffer, int buffer_size, int *cur_len,
@@ -271,6 +275,10 @@
 	__attribute__((format (printf, 7, 8)));
 #else
 static inline void kobject_uevent(struct kobject *kobj, enum kobject_action action) { }
+static inline void kobject_uevent_env(struct kobject *kobj,
+				      enum kobject_action action,
+				      char *envp[])
+{ }
 
 static inline int add_uevent_var(char **envp, int num_envp, int *cur_index,
 				      char *buffer, int buffer_size, int *cur_len, 
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ac4c055..769be39 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -165,7 +165,7 @@
 extern int arch_init_kprobes(void);
 extern void show_registers(struct pt_regs *regs);
 extern kprobe_opcode_t *get_insn_slot(void);
-extern void free_insn_slot(kprobe_opcode_t *slot);
+extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
 extern void kprobes_inc_nmissed_count(struct kprobe *p);
 
 /* Get the kprobe at this addr (if any) - called with preemption disabled */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 84eeecd..611f17f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -248,9 +248,9 @@
  *
  * Returns the scalar nanoseconds representation of kt
  */
-static inline u64 ktime_to_ns(const ktime_t kt)
+static inline s64 ktime_to_ns(const ktime_t kt)
 {
-	return (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
+	return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
 }
 
 #endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index abd2deb..ab27548 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -140,6 +140,7 @@
 	ATA_DFLAG_LBA48		= (1 << 1), /* device supports LBA48 */
 	ATA_DFLAG_CDB_INTR	= (1 << 2), /* device asserts INTRQ when ready for CDB */
 	ATA_DFLAG_NCQ		= (1 << 3), /* device supports NCQ */
+	ATA_DFLAG_FLUSH_EXT	= (1 << 4), /* do FLUSH_EXT instead of FLUSH */
 	ATA_DFLAG_CFG_MASK	= (1 << 8) - 1,
 
 	ATA_DFLAG_PIO		= (1 << 8), /* device limited to PIO mode */
@@ -175,6 +176,7 @@
 	ATA_FLAG_SKIP_D2H_BSY	= (1 << 12), /* can't wait for the first D2H
 					      * Register FIS clearing BSY */
 	ATA_FLAG_DEBUGMSG	= (1 << 13),
+	ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */
 
 	/* The following flag belongs to ap->pflags but is kept in
 	 * ap->flags because it's referenced in many LLDs and will be
@@ -283,6 +285,9 @@
 	ATA_EHI_QUIET		= (1 << 3),  /* be quiet */
 
 	ATA_EHI_DID_RESET	= (1 << 16), /* already reset this port */
+	ATA_EHI_PRINTINFO	= (1 << 17), /* print configuration info */
+	ATA_EHI_SETMODE		= (1 << 18), /* configure transfer mode */
+	ATA_EHI_POST_SETMODE	= (1 << 19), /* revaildating after setmode */
 
 	ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
 
@@ -307,10 +312,11 @@
 	   (some horkage may be drive/controller pair dependant */
 
 	ATA_HORKAGE_DIAGNOSTIC	= (1 << 0),	/* Failed boot diag */
+	ATA_HORKAGE_NODMA	= (1 << 1),	/* DMA problems */
+	ATA_HORKAGE_NONCQ	= (1 << 2),	/* Don't use NCQ */
 };
 
 enum hsm_task_states {
-	HSM_ST_UNKNOWN,		/* state unknown */
 	HSM_ST_IDLE,		/* no command on going */
 	HSM_ST,			/* (waiting the device to) transfer data */
 	HSM_ST_LAST,		/* (waiting the device to) complete command */
@@ -329,6 +335,7 @@
 	AC_ERR_SYSTEM		= (1 << 6), /* system error */
 	AC_ERR_INVALID		= (1 << 7), /* invalid argument */
 	AC_ERR_OTHER		= (1 << 8), /* unknown */
+	AC_ERR_NODEV_HINT	= (1 << 9), /* polling device detection hint */
 };
 
 /* forward declarations */
@@ -568,8 +575,9 @@
 	struct ata_host		*host;
 	struct device 		*dev;
 
-	struct work_struct	port_task;
-	struct work_struct	hotplug_task;
+	void			*port_task_data;
+	struct delayed_work	port_task;
+	struct delayed_work	hotplug_task;
 	struct work_struct	scsi_rescan_task;
 
 	unsigned int		hsm_task_state;
@@ -700,6 +708,8 @@
 extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
 extern int ata_std_prereset(struct ata_port *ap);
 extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
+extern int sata_port_hardreset(struct ata_port *ap,
+			       const unsigned long *timing);
 extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
 extern void ata_port_disable(struct ata_port *);
@@ -744,10 +754,9 @@
 extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
 extern void ata_host_resume(struct ata_host *host);
 extern int ata_ratelimit(void);
-extern unsigned int ata_busy_sleep(struct ata_port *ap,
-				   unsigned long timeout_pat,
-				   unsigned long timeout);
-extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
+extern int ata_busy_sleep(struct ata_port *ap,
+			  unsigned long timeout_pat, unsigned long timeout);
+extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
 				void *data, unsigned long delay);
 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
 			     unsigned long interval_msec,
@@ -787,6 +796,7 @@
 			  unsigned int ofs, unsigned int len);
 extern void ata_id_c_string(const u16 *id, unsigned char *s,
 			    unsigned int ofs, unsigned int len);
+extern unsigned long ata_device_blacklisted(const struct ata_device *dev);
 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
 extern void ata_bmdma_start (struct ata_queued_cmd *qc);
 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
@@ -1061,7 +1071,7 @@
 		udelay(10);
 		status = ata_chk_status(ap);
 		max--;
-	} while ((status & bits) && (max > 0));
+	} while (status != 0xff && (status & bits) && (max > 0));
 
 	return status;
 }
@@ -1082,7 +1092,7 @@
 {
 	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 
-	if (status & (ATA_BUSY | ATA_DRQ)) {
+	if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) {
 		unsigned long l = ap->ioaddr.status_addr;
 		if (ata_msg_warn(ap))
 			printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
@@ -1148,37 +1158,6 @@
 }
 
 /**
- *	ata_irq_on - Enable interrupts on a port.
- *	@ap: Port on which interrupts are enabled.
- *
- *	Enable interrupts on a legacy IDE device using MMIO or PIO,
- *	wait for idle, clear any pending interrupts.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static inline u8 ata_irq_on(struct ata_port *ap)
-{
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-	u8 tmp;
-
-	ap->ctl &= ~ATA_NIEN;
-	ap->last_ctl = ap->ctl;
-
-	if (ap->flags & ATA_FLAG_MMIO)
-		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
-	else
-		outb(ap->ctl, ioaddr->ctl_addr);
-	tmp = ata_wait_idle(ap);
-
-	ap->ops->irq_clear(ap);
-
-	return tmp;
-}
-
-
-/**
  *	ata_irq_ack - Acknowledge a device interrupt.
  *	@ap: Port on which interrupts are enabled.
  *
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 862d973..8c39654 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -164,14 +164,12 @@
  */
 struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int);
 struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int);
-struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *, int, int, const char *, int);
 struct rpc_clnt * nlm_bind_host(struct nlm_host *);
 void		  nlm_rebind_host(struct nlm_host *);
 struct nlm_host * nlm_get_host(struct nlm_host *);
 void		  nlm_release_host(struct nlm_host *);
 void		  nlm_shutdown_hosts(void);
 extern void	  nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32);
-struct nsm_handle *nsm_find(const struct sockaddr_in *, const char *, int);
 void		  nsm_release(struct nsm_handle *);
 
 
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 819f08f..498bfbd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -193,7 +193,6 @@
 
 extern void lockdep_off(void);
 extern void lockdep_on(void);
-extern int lockdep_internal(void);
 
 /*
  * These methods are used by specific locking variants (spinlocks,
@@ -243,6 +242,8 @@
 
 # define INIT_LOCKDEP				.lockdep_recursion = 0,
 
+#define lockdep_depth(tsk)	((tsk)->lockdep_depth)
+
 #else /* !LOCKDEP */
 
 static inline void lockdep_off(void)
@@ -253,11 +254,6 @@
 {
 }
 
-static inline int lockdep_internal(void)
-{
-	return 0;
-}
-
 # define lock_acquire(l, s, t, r, c, i)		do { } while (0)
 # define lock_release(l, n, i)			do { } while (0)
 # define lockdep_init()				do { } while (0)
@@ -277,6 +273,9 @@
  * The class key takes no space if lockdep is disabled:
  */
 struct lock_class_key { };
+
+#define lockdep_depth(tsk)	(0)
+
 #endif /* !LOCKDEP */
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index b03cfb9..326da7d 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -31,15 +31,14 @@
 #define	HPET_MINOR	     228
 
 struct device;
-struct class_device;
 
 struct miscdevice  {
 	int minor;
 	const char *name;
 	const struct file_operations *fops;
 	struct list_head list;
-	struct device *dev;
-	struct class_device *class;
+	struct device *parent;
+	struct device *this_device;
 };
 
 extern int misc_register(struct miscdevice * misc);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d538de9..a17b147 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -114,6 +114,8 @@
 #endif
 };
 
+extern struct kmem_cache *vm_area_cachep;
+
 /*
  * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
  * disabled, then there's a single shared list of VMAs maintained by the
@@ -294,6 +296,24 @@
 void split_page(struct page *page, unsigned int order);
 
 /*
+ * Compound pages have a destructor function.  Provide a
+ * prototype for that function and accessor functions.
+ * These are _only_ valid on the head of a PG_compound page.
+ */
+typedef void compound_page_dtor(struct page *);
+
+static inline void set_compound_page_dtor(struct page *page,
+						compound_page_dtor *dtor)
+{
+	page[1].lru.next = (void *)dtor;
+}
+
+static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+{
+	return (compound_page_dtor *)page[1].lru.next;
+}
+
+/*
  * Multiple processes may "see" the same page. E.g. for untouched
  * mappings of /dev/null, all processes see the same page full of
  * zeroes, and text pages of executables and shared libraries have
@@ -396,7 +416,9 @@
  * We are going to use the flags for the page to node mapping if its in
  * there.  This includes the case where there is no node, so it is implicit.
  */
-#define FLAGS_HAS_NODE		(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#define NODE_NOT_IN_PAGE_FLAGS
+#endif
 
 #ifndef PFN_SECTION_SHIFT
 #define PFN_SECTION_SHIFT 0
@@ -411,13 +433,18 @@
 #define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
 #define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
 
-/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
-#if FLAGS_HAS_NODE
-#define ZONETABLE_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
+#ifdef NODE_NOT_IN_PAGEFLAGS
+#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
 #else
-#define ZONETABLE_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
 #endif
-#define ZONETABLE_PGSHIFT	ZONES_PGSHIFT
+
+#if ZONES_WIDTH > 0
+#define ZONEID_PGSHIFT		ZONES_PGSHIFT
+#else
+#define ZONEID_PGSHIFT		NODES_PGOFF
+#endif
 
 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
@@ -426,26 +453,28 @@
 #define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
 #define NODES_MASK		((1UL << NODES_WIDTH) - 1)
 #define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
-#define ZONETABLE_MASK		((1UL << ZONETABLE_SHIFT) - 1)
+#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
 
 static inline enum zone_type page_zonenum(struct page *page)
 {
 	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 }
 
-struct zone;
-extern struct zone *zone_table[];
-
+/*
+ * The identification function is only used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really
+ * identifying a zone since we could be using a the section number
+ * id if we have not node id available in page flags.
+ * We guarantee only that it will return the same value for two
+ * combinable pages in a zone.
+ */
 static inline int page_zone_id(struct page *page)
 {
-	return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
-}
-static inline struct zone *page_zone(struct page *page)
-{
-	return zone_table[page_zone_id(page)];
+	BUILD_BUG_ON(ZONEID_PGSHIFT == 0 && ZONEID_MASK);
+	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 }
 
-static inline unsigned long zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(struct zone *zone)
 {
 #ifdef CONFIG_NUMA
 	return zone->node;
@@ -454,13 +483,20 @@
 #endif
 }
 
-static inline unsigned long page_to_nid(struct page *page)
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+extern int page_to_nid(struct page *page);
+#else
+static inline int page_to_nid(struct page *page)
 {
-	if (FLAGS_HAS_NODE)
-		return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
-	else
-		return zone_to_nid(page_zone(page));
+	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 }
+#endif
+
+static inline struct zone *page_zone(struct page *page)
+{
+	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
+}
+
 static inline unsigned long page_to_section(struct page *page)
 {
 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -477,6 +513,7 @@
 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
 	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
 }
+
 static inline void set_page_section(struct page *page, unsigned long section)
 {
 	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
@@ -947,8 +984,6 @@
 extern void show_mem(void);
 extern void si_meminfo(struct sysinfo * val);
 extern void si_meminfo_node(struct sysinfo *val, int nid);
-extern void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-					unsigned long pfn, unsigned long size);
 
 #ifdef CONFIG_NUMA
 extern void setup_per_cpu_pageset(void);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 991a373..d0e6a54 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -39,6 +39,10 @@
 				write_misalign:1;
 };
 
+struct mmc_ext_csd {
+	unsigned int		hs_max_dtr;
+};
+
 struct sd_scr {
 	unsigned char		sda_vsn;
 	unsigned char		bus_widths;
@@ -46,6 +50,10 @@
 #define SD_SCR_BUS_WIDTH_4	(1<<2)
 };
 
+struct sd_switch_caps {
+	unsigned int		hs_max_dtr;
+};
+
 struct mmc_host;
 
 /*
@@ -62,12 +70,15 @@
 #define MMC_STATE_BAD		(1<<2)		/* unrecognised device */
 #define MMC_STATE_SDCARD	(1<<3)		/* is an SD card */
 #define MMC_STATE_READONLY	(1<<4)		/* card is read-only */
+#define MMC_STATE_HIGHSPEED	(1<<5)		/* card is in high speed mode */
 	u32			raw_cid[4];	/* raw card CID */
 	u32			raw_csd[4];	/* raw card CSD */
 	u32			raw_scr[2];	/* raw card SCR */
 	struct mmc_cid		cid;		/* card identification */
 	struct mmc_csd		csd;		/* card specific */
+	struct mmc_ext_csd	ext_csd;	/* mmc v4 extended card specific */
 	struct sd_scr		scr;		/* extra SD information */
+	struct sd_switch_caps	sw_caps;	/* switch (CMD6) caps */
 };
 
 #define mmc_card_present(c)	((c)->state & MMC_STATE_PRESENT)
@@ -75,12 +86,14 @@
 #define mmc_card_bad(c)		((c)->state & MMC_STATE_BAD)
 #define mmc_card_sd(c)		((c)->state & MMC_STATE_SDCARD)
 #define mmc_card_readonly(c)	((c)->state & MMC_STATE_READONLY)
+#define mmc_card_highspeed(c)	((c)->state & MMC_STATE_HIGHSPEED)
 
 #define mmc_card_set_present(c)	((c)->state |= MMC_STATE_PRESENT)
 #define mmc_card_set_dead(c)	((c)->state |= MMC_STATE_DEAD)
 #define mmc_card_set_bad(c)	((c)->state |= MMC_STATE_BAD)
 #define mmc_card_set_sd(c)	((c)->state |= MMC_STATE_SDCARD)
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
+#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
 
 #define mmc_card_name(c)	((c)->cid.prod_name)
 #define mmc_card_id(c)		((c)->dev.bus_id)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 587264a..c15ae19 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -74,8 +74,8 @@
 struct device;
 
 struct mmc_host {
-	struct device		*dev;
-	struct class_device	class_dev;
+	struct device		*parent;
+	struct device		class_dev;
 	int			index;
 	const struct mmc_host_ops *ops;
 	unsigned int		f_min;
@@ -110,7 +110,7 @@
 	struct mmc_card		*card_busy;	/* the MMC card claiming host */
 	struct mmc_card		*card_selected;	/* the selected MMC card */
 
-	struct work_struct	detect;
+	struct delayed_work	detect;
 
 	unsigned long		private[0] ____cacheline_aligned;
 };
@@ -125,8 +125,8 @@
 	return (void *)host->private;
 }
 
-#define mmc_dev(x)	((x)->dev)
-#define mmc_hostname(x)	((x)->class_dev.class_id)
+#define mmc_dev(x)	((x)->parent)
+#define mmc_hostname(x)	((x)->class_dev.bus_id)
 
 extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
 extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index 08dec8d..2dce60c 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -25,14 +25,16 @@
 #ifndef MMC_MMC_PROTOCOL_H
 #define MMC_MMC_PROTOCOL_H
 
-/* Standard MMC commands (3.1)           type  argument     response */
+/* Standard MMC commands (4.1)           type  argument     response */
    /* class 1 */
 #define	MMC_GO_IDLE_STATE         0   /* bc                          */
 #define MMC_SEND_OP_COND          1   /* bcr  [31:0] OCR         R3  */
 #define MMC_ALL_SEND_CID          2   /* bcr                     R2  */
 #define MMC_SET_RELATIVE_ADDR     3   /* ac   [31:16] RCA        R1  */
 #define MMC_SET_DSR               4   /* bc   [31:16] RCA            */
+#define MMC_SWITCH                6   /* ac   [31:0] See below   R1b */
 #define MMC_SELECT_CARD           7   /* ac   [31:16] RCA        R1  */
+#define MMC_SEND_EXT_CSD          8   /* adtc                    R1  */
 #define MMC_SEND_CSD              9   /* ac   [31:16] RCA        R2  */
 #define MMC_SEND_CID             10   /* ac   [31:16] RCA        R2  */
 #define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
@@ -80,6 +82,7 @@
   /* class 8 */
 /* This is basically the same command as for MMC with some quirks. */
 #define SD_SEND_RELATIVE_ADDR     3   /* bcr                     R6  */
+#define SD_SWITCH                 6   /* adtc [31:0] See below   R1  */
 
   /* Application commands */
 #define SD_APP_SET_BUS_WIDTH      6   /* ac   [1:0] bus width    R1  */
@@ -88,6 +91,30 @@
 #define SD_APP_SEND_SCR          51   /* adtc                    R1  */
 
 /*
+ * MMC_SWITCH argument format:
+ *
+ *	[31:26] Always 0
+ *	[25:24] Access Mode
+ *	[23:16] Location of target Byte in EXT_CSD
+ *	[15:08] Value Byte
+ *	[07:03] Always 0
+ *	[02:00] Command Set
+ */
+
+/*
+ * SD_SWITCH argument format:
+ *
+ *      [31] Check (0) or switch (1)
+ *      [30:24] Reserved (0)
+ *      [23:20] Function group 6
+ *      [19:16] Function group 5
+ *      [15:12] Function group 4
+ *      [11:8] Function group 3
+ *      [7:4] Function group 2
+ *      [3:0] Function group 1
+ */
+
+/*
   MMC status in R1
   Type
   	e : error bit
@@ -230,13 +257,54 @@
 
 #define CSD_STRUCT_VER_1_0  0           /* Valid for system specification 1.0 - 1.2 */
 #define CSD_STRUCT_VER_1_1  1           /* Valid for system specification 1.4 - 2.2 */
-#define CSD_STRUCT_VER_1_2  2           /* Valid for system specification 3.1       */
+#define CSD_STRUCT_VER_1_2  2           /* Valid for system specification 3.1 - 3.2 - 3.31 - 4.0 - 4.1 */
+#define CSD_STRUCT_EXT_CSD  3           /* Version is coded in CSD_STRUCTURE in EXT_CSD */
 
 #define CSD_SPEC_VER_0      0           /* Implements system specification 1.0 - 1.2 */
 #define CSD_SPEC_VER_1      1           /* Implements system specification 1.4 */
 #define CSD_SPEC_VER_2      2           /* Implements system specification 2.0 - 2.2 */
-#define CSD_SPEC_VER_3      3           /* Implements system specification 3.1 */
+#define CSD_SPEC_VER_3      3           /* Implements system specification 3.1 - 3.2 - 3.31 */
+#define CSD_SPEC_VER_4      4           /* Implements system specification 4.0 - 4.1 */
 
+/*
+ * EXT_CSD fields
+ */
+
+#define EXT_CSD_BUS_WIDTH	183	/* R/W */
+#define EXT_CSD_HS_TIMING	185	/* R/W */
+#define EXT_CSD_CARD_TYPE	196	/* RO */
+
+/*
+ * EXT_CSD field definitions
+ */
+
+#define EXT_CSD_CMD_SET_NORMAL		(1<<0)
+#define EXT_CSD_CMD_SET_SECURE		(1<<1)
+#define EXT_CSD_CMD_SET_CPSECURE	(1<<2)
+
+#define EXT_CSD_CARD_TYPE_26	(1<<0)	/* Card can run at 26MHz */
+#define EXT_CSD_CARD_TYPE_52	(1<<1)	/* Card can run at 52MHz */
+
+#define EXT_CSD_BUS_WIDTH_1	0	/* Card is in 1 bit mode */
+#define EXT_CSD_BUS_WIDTH_4	1	/* Card is in 4 bit mode */
+#define EXT_CSD_BUS_WIDTH_8	2	/* Card is in 8 bit mode */
+
+/*
+ * MMC_SWITCH access modes
+ */
+
+#define MMC_SWITCH_MODE_CMD_SET		0x00	/* Change the command set */
+#define MMC_SWITCH_MODE_SET_BITS	0x01	/* Set bits which are 1 in value */
+#define MMC_SWITCH_MODE_CLEAR_BITS	0x02	/* Clear bits which are 1 in value */
+#define MMC_SWITCH_MODE_WRITE_BYTE	0x03	/* Set target to value */
+
+/*
+ * SCR field definitions
+ */
+
+#define SCR_SPEC_VER_0      0           /* Implements system specification 1.0 - 1.01 */
+#define SCR_SPEC_VER_1      1           /* Implements system specification 1.10 */
+#define SCR_SPEC_VER_2      2           /* Implements system specification 2.00 */
 
 /*
  * SD bus widths
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e06683e..e339a73 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -278,7 +278,7 @@
 	/*
 	 * rarely used fields:
 	 */
-	char			*name;
+	const char		*name;
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -288,19 +288,94 @@
  */
 #define DEF_PRIORITY 12
 
+/* Maximum number of zones on a zonelist */
+#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
+
+#ifdef CONFIG_NUMA
+/*
+ * We cache key information from each zonelist for smaller cache
+ * footprint when scanning for free pages in get_page_from_freelist().
+ *
+ * 1) The BITMAP fullzones tracks which zones in a zonelist have come
+ *    up short of free memory since the last time (last_fullzone_zap)
+ *    we zero'd fullzones.
+ * 2) The array z_to_n[] maps each zone in the zonelist to its node
+ *    id, so that we can efficiently evaluate whether that node is
+ *    set in the current tasks mems_allowed.
+ *
+ * Both fullzones and z_to_n[] are one-to-one with the zonelist,
+ * indexed by a zones offset in the zonelist zones[] array.
+ *
+ * The get_page_from_freelist() routine does two scans.  During the
+ * first scan, we skip zones whose corresponding bit in 'fullzones'
+ * is set or whose corresponding node in current->mems_allowed (which
+ * comes from cpusets) is not set.  During the second scan, we bypass
+ * this zonelist_cache, to ensure we look methodically at each zone.
+ *
+ * Once per second, we zero out (zap) fullzones, forcing us to
+ * reconsider nodes that might have regained more free memory.
+ * The field last_full_zap is the time we last zapped fullzones.
+ *
+ * This mechanism reduces the amount of time we waste repeatedly
+ * reexaming zones for free memory when they just came up low on
+ * memory momentarilly ago.
+ *
+ * The zonelist_cache struct members logically belong in struct
+ * zonelist.  However, the mempolicy zonelists constructed for
+ * MPOL_BIND are intentionally variable length (and usually much
+ * shorter).  A general purpose mechanism for handling structs with
+ * multiple variable length members is more mechanism than we want
+ * here.  We resort to some special case hackery instead.
+ *
+ * The MPOL_BIND zonelists don't need this zonelist_cache (in good
+ * part because they are shorter), so we put the fixed length stuff
+ * at the front of the zonelist struct, ending in a variable length
+ * zones[], as is needed by MPOL_BIND.
+ *
+ * Then we put the optional zonelist cache on the end of the zonelist
+ * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in
+ * the fixed length portion at the front of the struct.  This pointer
+ * both enables us to find the zonelist cache, and in the case of
+ * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
+ * to know that the zonelist cache is not there.
+ *
+ * The end result is that struct zonelists come in two flavors:
+ *  1) The full, fixed length version, shown below, and
+ *  2) The custom zonelists for MPOL_BIND.
+ * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
+ *
+ * Even though there may be multiple CPU cores on a node modifying
+ * fullzones or last_full_zap in the same zonelist_cache at the same
+ * time, we don't lock it.  This is just hint data - if it is wrong now
+ * and then, the allocator will still function, perhaps a bit slower.
+ */
+
+
+struct zonelist_cache {
+	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */
+	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */
+	unsigned long last_full_zap;		/* when last zap'd (jiffies) */
+};
+#else
+struct zonelist_cache;
+#endif
+
 /*
  * One allocation request operates on a zonelist. A zonelist
  * is a list of zones, the first one is the 'goal' of the
  * allocation, the other zones are fallback zones, in decreasing
  * priority.
  *
- * Right now a zonelist takes up less than a cacheline. We never
- * modify it apart from boot-up, and only a few indices are used,
- * so despite the zonelist table being relatively big, the cache
- * footprint of this construct is very small.
+ * If zlcache_ptr is not NULL, then it is just the address of zlcache,
+ * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
  */
+
 struct zonelist {
-	struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
+	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache
+	struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];      // NULL delimited
+#ifdef CONFIG_NUMA
+	struct zonelist_cache zlcache;			     // optional ...
+#endif
 };
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
diff --git a/include/linux/module.h b/include/linux/module.h
index d1d00ce..d33df24 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -6,7 +6,6 @@
  * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
  * Rewritten again by Rusty Russell, 2002
  */
-#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/stat.h>
@@ -264,6 +263,7 @@
 	struct module_attribute *modinfo_attrs;
 	const char *version;
 	const char *srcversion;
+	struct kobject *drivers_dir;
 
 	/* Exported symbols */
 	const struct kernel_symbol *syms;
@@ -410,17 +410,7 @@
 	return ret;
 }
 
-static inline void module_put(struct module *module)
-{
-	if (module) {
-		unsigned int cpu = get_cpu();
-		local_dec(&module->ref[cpu].count);
-		/* Maybe they're waiting for us to drop reference? */
-		if (unlikely(!module_is_live(module)))
-			wake_up_process(module->waiter);
-		put_cpu();
-	}
-}
+extern void module_put(struct module *module);
 
 #else /*!CONFIG_MODULE_UNLOAD*/
 static inline int try_module_get(struct module *module)
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 7c0c2c1..4a189da 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -63,6 +63,9 @@
    not there, read bits mean it's readable, write bits mean it's
    writable. */
 #define __module_param_call(prefix, name, set, get, arg, perm)		\
+	/* Default value instead of permissions? */			\
+	static int __param_perm_check_##name __attribute__((unused)) =	\
+	BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2));	\
 	static char __param_str_##name[] = prefix #name;		\
 	static struct kernel_param const __param_##name			\
 	__attribute_used__						\
diff --git a/include/linux/mqueue.h b/include/linux/mqueue.h
index 8db9d75..8b5a796 100644
--- a/include/linux/mqueue.h
+++ b/include/linux/mqueue.h
@@ -18,8 +18,6 @@
 #ifndef _LINUX_MQUEUE_H
 #define _LINUX_MQUEUE_H
 
-#include <linux/types.h>
-
 #define MQ_PRIO_MAX 	32768
 /* per-uid limit of kernel memory used by mqueue, in bytes */
 #define MQ_BYTES_MAX	819200
diff --git a/include/linux/msg.h b/include/linux/msg.h
index acc7c17..f1b6074 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -92,6 +92,12 @@
 	struct list_head q_senders;
 };
 
+/* Helper routines for sys_msgsnd and sys_msgrcv */
+extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
+			size_t msgsz, int msgflg);
+extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+			size_t msgsz, long msgtyp, int msgflg);
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_MSG_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 27c48da..b2b91c4 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -94,7 +94,7 @@
 
 #define __MUTEX_INITIALIZER(lockname) \
 		{ .count = ATOMIC_INIT(1) \
-		, .wait_lock = SPIN_LOCK_UNLOCKED \
+		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
 		, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
 		__DEBUG_MUTEX_INITIALIZER(lockname) \
 		__DEP_MAP_MUTEX_INITIALIZER(lockname) }
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index edfa012..aff25c0 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -724,7 +724,7 @@
 #define MV643XX_ETH_RX_FIFO_URGENT_THRESHOLD_REG(port)             (0x2470 + (port<<10))
 #define MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port)             (0x2474 + (port<<10))
 #define MV643XX_ETH_RX_MINIMAL_FRAME_SIZE_REG(port)                (0x247c + (port<<10))
-#define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port)              (0x2484 + (port<<10)
+#define MV643XX_ETH_RX_DISCARDED_FRAMES_COUNTER(port)              (0x2484 + (port<<10))
 #define MV643XX_ETH_PORT_DEBUG_0_REG(port)                         (0x248c + (port<<10))
 #define MV643XX_ETH_PORT_DEBUG_1_REG(port)                         (0x2490 + (port<<10))
 #define MV643XX_ETH_PORT_INTERNAL_ADDR_ERROR_REG(port)             (0x2494 + (port<<10))
@@ -1135,7 +1135,7 @@
 #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_1	(1<<19)
 #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_2	(1<<20)
 #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_3	((1<<20) | (1<<19))
-#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4	((1<<21)
+#define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_4	(1<<21)
 #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_5	((1<<21) | (1<<19))
 #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_6	((1<<21) | (1<<20))
 #define MV643XX_ETH_DEFAULT_RX_UDP_QUEUE_7	((1<<21) | (1<<20) | (1<<19))
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index d6b6dc0..0f3e693 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -64,6 +64,7 @@
 	struct gendisk *disk;
 	int blksize;
 	u64 bytesize;
+	pid_t pid; /* pid of nbd-client, if attached */
 };
 
 #endif
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index b089d95..a503052 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -127,10 +127,10 @@
 	} unexpected_packet;
 };
 
-extern void ncp_tcp_rcv_proc(void *server);
-extern void ncp_tcp_tx_proc(void *server);
-extern void ncpdgram_rcv_proc(void *server);
-extern void ncpdgram_timeout_proc(void *server);
+extern void ncp_tcp_rcv_proc(struct work_struct *work);
+extern void ncp_tcp_tx_proc(struct work_struct *work);
+extern void ncpdgram_rcv_proc(struct work_struct *work);
+extern void ncpdgram_timeout_proc(struct work_struct *work);
 extern void ncpdgram_timeout_call(unsigned long server);
 extern void ncp_tcp_data_ready(struct sock* sk, int len);
 extern void ncp_tcp_write_space(struct sock* sk);
diff --git a/include/linux/net.h b/include/linux/net.h
index 15c733b..6f0dfeb 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -196,7 +196,7 @@
 extern int	     net_ratelimit(void);
 
 #define net_random()		random32()
-#define net_srandom(seed)	srandom32(seed)
+#define net_srandom(seed)	srandom32((__force u32)seed)
 
 extern int   	     kernel_sendmsg(struct socket *sock, struct msghdr *msg,
 				    struct kvec *vec, size_t num, size_t len);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 83b8c4f..c57088f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -30,6 +30,7 @@
 #include <linux/if_packet.h>
 
 #ifdef __KERNEL__
+#include <linux/timer.h>
 #include <asm/atomic.h>
 #include <asm/cache.h>
 #include <asm/byteorder.h>
@@ -38,7 +39,6 @@
 #include <linux/percpu.h>
 #include <linux/dmaengine.h>
 
-struct divert_blk;
 struct vlan_group;
 struct ethtool_ops;
 struct netpoll_info;
@@ -67,6 +67,10 @@
 #define NET_RX_CN_HIGH		4   /* The storm is here */
 #define NET_RX_BAD		5  /* packet dropped due to kernel error */
 
+/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
+ * indicates that the device will soon be dropping packets, or already drops
+ * some packets of the same priority; prompting us to send less aggressively. */
+#define net_xmit_eval(e)	((e) == NET_XMIT_CN? 0 : (e))
 #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0)
 
 #endif
@@ -193,7 +197,7 @@
                                          *  NOTE:  For VLANs, this will be the
                                          *  encapuslated type. --BLG
                                          */
-	int		hh_len;		/* length of header */
+	u16		hh_len;		/* length of header */
 	int		(*hh_output)(struct sk_buff *skb);
 	rwlock_t	hh_lock;
 
@@ -517,11 +521,6 @@
 	/* bridge stuff */
 	struct net_bridge_port	*br_port;
 
-#ifdef CONFIG_NET_DIVERT
-	/* this will get initialized at each interface type init routine */
-	struct divert_blk	*divert;
-#endif /* CONFIG_NET_DIVERT */
-
 	/* class/net/name entry */
 	struct class_device	class_dev;
 	/* space for optional statistics and wireless sysfs groups */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b7e67d1..d4c4c51 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -117,6 +117,16 @@
 int nf_register_sockopt(struct nf_sockopt_ops *reg);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
+#ifdef CONFIG_SYSCTL
+/* Sysctl registration */
+struct ctl_table_header *nf_register_sysctl_table(struct ctl_table *path,
+						  struct ctl_table *table);
+void nf_unregister_sysctl_table(struct ctl_table_header *header,
+				struct ctl_table *table);
+extern struct ctl_table nf_net_netfilter_sysctl_path[];
+extern struct ctl_table nf_net_ipv4_netfilter_sysctl_path[];
+#endif /* CONFIG_SYSCTL */
+
 extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
 
 /* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will
@@ -282,15 +292,31 @@
    Returns true or false. */
 extern int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len);
 
-extern u_int16_t nf_csum_update(u_int32_t oldval, u_int32_t newval,
-				u_int32_t csum);
-extern u_int16_t nf_proto_csum_update(struct sk_buff *skb,
-				      u_int32_t oldval, u_int32_t newval,
-				      u_int16_t csum, int pseudohdr);
+static inline void nf_csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+{
+	__be32 diff[] = { ~from, to };
+
+	*sum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(*sum)));
+}
+
+static inline void nf_csum_replace2(__sum16 *sum, __be16 from, __be16 to)
+{
+	nf_csum_replace4(sum, (__force __be32)from, (__force __be32)to);
+}
+
+extern void nf_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+				      __be32 from, __be32 to, int pseudohdr);
+
+static inline void nf_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+				      __be16 from, __be16 to, int pseudohdr)
+{
+	nf_proto_csum_replace4(sum, skb, (__force __be32)from,
+				(__force __be32)to, pseudohdr);
+}
 
 struct nf_afinfo {
 	unsigned short	family;
-	unsigned int	(*checksum)(struct sk_buff *skb, unsigned int hook,
+	__sum16		(*checksum)(struct sk_buff *skb, unsigned int hook,
 				    unsigned int dataoff, u_int8_t protocol);
 	void		(*saveroute)(const struct sk_buff *skb,
 				     struct nf_info *info);
@@ -305,12 +331,12 @@
 	return rcu_dereference(nf_afinfo[family]);
 }
 
-static inline unsigned int
+static inline __sum16
 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
 	    u_int8_t protocol, unsigned short family)
 {
 	struct nf_afinfo *afinfo;
-	unsigned int csum = 0;
+	__sum16 csum = 0;
 
 	rcu_read_lock();
 	afinfo = nf_get_afinfo(family);
@@ -331,7 +357,7 @@
 static inline void
 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family)
 {
-#ifdef CONFIG_IP_NF_NAT_NEEDED
+#if defined(CONFIG_IP_NF_NAT_NEEDED) || defined(CONFIG_NF_NAT_NEEDED)
 	void (*decodefn)(struct sk_buff *, struct flowi *);
 
 	if (family == AF_INET && (decodefn = ip_nat_decode_session) != NULL)
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 312bd2f..6328175 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -14,6 +14,7 @@
 header-y += xt_DSCP.h
 header-y += xt_esp.h
 header-y += xt_helper.h
+header-y += xt_hashlimit.h
 header-y += xt_length.h
 header-y += xt_limit.h
 header-y += xt_mac.h
@@ -21,6 +22,7 @@
 header-y += xt_MARK.h
 header-y += xt_multiport.h
 header-y += xt_NFQUEUE.h
+header-y += xt_NFLOG.h
 header-y += xt_pkttype.h
 header-y += xt_policy.h
 header-y += xt_realm.h
diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h
new file mode 100644
index 0000000..26c2235
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_amanda.h
@@ -0,0 +1,10 @@
+#ifndef _NF_CONNTRACK_AMANDA_H
+#define _NF_CONNTRACK_AMANDA_H
+/* AMANDA tracking. */
+
+extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff **pskb,
+					  enum ip_conntrack_info ctinfo,
+					  unsigned int matchoff,
+					  unsigned int matchlen,
+					  struct nf_conntrack_expect *exp);
+#endif /* _NF_CONNTRACK_AMANDA_H */
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
index ad4a41c..81453ea 100644
--- a/include/linux/netfilter/nf_conntrack_ftp.h
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -3,16 +3,16 @@
 /* FTP tracking. */
 
 /* This enum is exposed to userspace */
-enum ip_ct_ftp_type
+enum nf_ct_ftp_type
 {
 	/* PORT command from client */
-	IP_CT_FTP_PORT,
+	NF_CT_FTP_PORT,
 	/* PASV response from server */
-	IP_CT_FTP_PASV,
+	NF_CT_FTP_PASV,
 	/* EPRT command from client */
-	IP_CT_FTP_EPRT,
+	NF_CT_FTP_EPRT,
 	/* EPSV response from server */
-	IP_CT_FTP_EPSV,
+	NF_CT_FTP_EPSV,
 };
 
 #ifdef __KERNEL__
@@ -21,23 +21,23 @@
 
 #define NUM_SEQ_TO_REMEMBER 2
 /* This structure exists only once per master */
-struct ip_ct_ftp_master {
+struct nf_ct_ftp_master {
 	/* Valid seq positions for cmd matching after newline */
 	u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
 	/* 0 means seq_match_aft_nl not set */
 	int seq_aft_nl_num[IP_CT_DIR_MAX];
 };
 
-struct ip_conntrack_expect;
+struct nf_conntrack_expect;
 
 /* For NAT to hook in when we find a packet which describes what other
  * connection we should expect. */
-extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
+extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff **pskb,
 				       enum ip_conntrack_info ctinfo,
-				       enum ip_ct_ftp_type type,
+				       enum nf_ct_ftp_type type,
 				       unsigned int matchoff,
 				       unsigned int matchlen,
-				       struct ip_conntrack_expect *exp,
+				       struct nf_conntrack_expect *exp,
 				       u32 *seq);
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
new file mode 100644
index 0000000..08e2f49
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -0,0 +1,92 @@
+#ifndef _NF_CONNTRACK_H323_H
+#define _NF_CONNTRACK_H323_H
+
+#ifdef __KERNEL__
+
+#include <linux/netfilter/nf_conntrack_h323_asn1.h>
+
+#define RAS_PORT 1719
+#define Q931_PORT 1720
+#define H323_RTP_CHANNEL_MAX 4	/* Audio, video, FAX and other */
+
+/* This structure exists only once per master */
+struct nf_ct_h323_master {
+
+	/* Original and NATed Q.931 or H.245 signal ports */
+	__be16 sig_port[IP_CT_DIR_MAX];
+
+	/* Original and NATed RTP ports */
+	__be16 rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX];
+
+	union {
+		/* RAS connection timeout */
+		u_int32_t timeout;
+
+		/* Next TPKT length (for separate TPKT header and data) */
+		u_int16_t tpkt_len[IP_CT_DIR_MAX];
+	};
+};
+
+struct nf_conn;
+
+extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+			 TransportAddress *taddr,
+			 union nf_conntrack_address *addr, __be16 *port);
+extern void nf_conntrack_h245_expect(struct nf_conn *new,
+				     struct nf_conntrack_expect *this);
+extern void nf_conntrack_q931_expect(struct nf_conn *new,
+				     struct nf_conntrack_expect *this);
+extern int (*set_h245_addr_hook) (struct sk_buff **pskb,
+				  unsigned char **data, int dataoff,
+				  H245_TransportAddress *taddr,
+				  union nf_conntrack_address *addr,
+				  __be16 port);
+extern int (*set_h225_addr_hook) (struct sk_buff **pskb,
+				  unsigned char **data, int dataoff,
+				  TransportAddress *taddr,
+				  union nf_conntrack_address *addr,
+				  __be16 port);
+extern int (*set_sig_addr_hook) (struct sk_buff **pskb,
+				 struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo,
+				 unsigned char **data,
+				 TransportAddress *taddr, int count);
+extern int (*set_ras_addr_hook) (struct sk_buff **pskb,
+				 struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo,
+				 unsigned char **data,
+				 TransportAddress *taddr, int count);
+extern int (*nat_rtp_rtcp_hook) (struct sk_buff **pskb,
+				 struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo,
+				 unsigned char **data, int dataoff,
+				 H245_TransportAddress *taddr,
+				 __be16 port, __be16 rtp_port,
+				 struct nf_conntrack_expect *rtp_exp,
+				 struct nf_conntrack_expect *rtcp_exp);
+extern int (*nat_t120_hook) (struct sk_buff **pskb, struct nf_conn *ct,
+			     enum ip_conntrack_info ctinfo,
+			     unsigned char **data, int dataoff,
+			     H245_TransportAddress *taddr, __be16 port,
+			     struct nf_conntrack_expect *exp);
+extern int (*nat_h245_hook) (struct sk_buff **pskb, struct nf_conn *ct,
+			     enum ip_conntrack_info ctinfo,
+			     unsigned char **data, int dataoff,
+			     TransportAddress *taddr, __be16 port,
+			     struct nf_conntrack_expect *exp);
+extern int (*nat_callforwarding_hook) (struct sk_buff **pskb,
+				       struct nf_conn *ct,
+				       enum ip_conntrack_info ctinfo,
+				       unsigned char **data, int dataoff,
+				       TransportAddress *taddr,
+				       __be16 port,
+				       struct nf_conntrack_expect *exp);
+extern int (*nat_q931_hook) (struct sk_buff **pskb, struct nf_conn *ct,
+			     enum ip_conntrack_info ctinfo,
+			     unsigned char **data, TransportAddress *taddr,
+			     int idx, __be16 port,
+			     struct nf_conntrack_expect *exp);
+
+#endif
+
+#endif
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h
similarity index 92%
rename from include/linux/netfilter_ipv4/ip_conntrack_helper_h323_asn1.h
rename to include/linux/netfilter/nf_conntrack_h323_asn1.h
index c6e9a0b..8dab596 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_asn1.h
+++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h
@@ -1,6 +1,6 @@
 /****************************************************************************
- * ip_conntrack_helper_h323_asn1.h - BER and PER decoding library for H.323
- * 			      	     conntrack/NAT module.
+ * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323
+ * 			      conntrack/NAT module.
  *
  * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
  *
@@ -34,13 +34,13 @@
  *
  ****************************************************************************/
 
-#ifndef _IP_CONNTRACK_HELPER_H323_ASN1_H_
-#define _IP_CONNTRACK_HELPER_H323_ASN1_H_
+#ifndef _NF_CONNTRACK_HELPER_H323_ASN1_H_
+#define _NF_CONNTRACK_HELPER_H323_ASN1_H_
 
 /*****************************************************************************
  * H.323 Types
  ****************************************************************************/
-#include "ip_conntrack_helper_h323_types.h"
+#include "nf_conntrack_h323_types.h"
 
 typedef struct {
 	enum {
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
similarity index 98%
rename from include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h
rename to include/linux/netfilter/nf_conntrack_h323_types.h
index 3d4a773..38d74d5 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_helper_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -10,6 +10,11 @@
 	unsigned ip;
 } TransportAddress_ipAddress;
 
+typedef struct TransportAddress_ip6Address {	/* SEQUENCE */
+	int options;		/* No use */
+	unsigned ip6;
+} TransportAddress_ip6Address;
+
 typedef struct TransportAddress {	/* CHOICE */
 	enum {
 		eTransportAddress_ipAddress,
@@ -22,6 +27,7 @@
 	} choice;
 	union {
 		TransportAddress_ipAddress ipAddress;
+		TransportAddress_ip6Address ip6Address;
 	};
 } TransportAddress;
 
@@ -93,6 +99,11 @@
 	unsigned network;
 } UnicastAddress_iPAddress;
 
+typedef struct UnicastAddress_iP6Address {	/* SEQUENCE */
+	int options;		/* No use */
+	unsigned network;
+} UnicastAddress_iP6Address;
+
 typedef struct UnicastAddress {	/* CHOICE */
 	enum {
 		eUnicastAddress_iPAddress,
@@ -105,6 +116,7 @@
 	} choice;
 	union {
 		UnicastAddress_iPAddress iPAddress;
+		UnicastAddress_iP6Address iP6Address;
 	};
 } UnicastAddress;
 
diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h
new file mode 100644
index 0000000..2ab6b82
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_irc.h
@@ -0,0 +1,15 @@
+#ifndef _NF_CONNTRACK_IRC_H
+#define _NF_CONNTRACK_IRC_H
+
+#ifdef __KERNEL__
+
+#define IRC_PORT	6667
+
+extern unsigned int (*nf_nat_irc_hook)(struct sk_buff **pskb,
+				       enum ip_conntrack_info ctinfo,
+				       unsigned int matchoff,
+				       unsigned int matchlen,
+				       struct nf_conntrack_expect *exp);
+
+#endif /* __KERNEL__ */
+#endif /* _NF_CONNTRACK_IRC_H */
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
new file mode 100644
index 0000000..9d8144a
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -0,0 +1,322 @@
+/* PPTP constants and structs */
+#ifndef _NF_CONNTRACK_PPTP_H
+#define _NF_CONNTRACK_PPTP_H
+
+#include <linux/netfilter/nf_conntrack_common.h>
+
+/* state of the control session */
+enum pptp_ctrlsess_state {
+	PPTP_SESSION_NONE,			/* no session present */
+	PPTP_SESSION_ERROR,			/* some session error */
+	PPTP_SESSION_STOPREQ,			/* stop_sess request seen */
+	PPTP_SESSION_REQUESTED,			/* start_sess request seen */
+	PPTP_SESSION_CONFIRMED,			/* session established */
+};
+
+/* state of the call inside the control session */
+enum pptp_ctrlcall_state {
+	PPTP_CALL_NONE,
+	PPTP_CALL_ERROR,
+	PPTP_CALL_OUT_REQ,
+	PPTP_CALL_OUT_CONF,
+	PPTP_CALL_IN_REQ,
+	PPTP_CALL_IN_REP,
+	PPTP_CALL_IN_CONF,
+	PPTP_CALL_CLEAR_REQ,
+};
+
+/* conntrack private data */
+struct nf_ct_pptp_master {
+	enum pptp_ctrlsess_state sstate;	/* session state */
+	enum pptp_ctrlcall_state cstate;	/* call state */
+	__be16 pac_call_id;			/* call id of PAC */
+	__be16 pns_call_id;			/* call id of PNS */
+
+	/* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack
+	 * and therefore imposes a fixed limit on the number of maps */
+	struct nf_ct_gre_keymap *keymap[IP_CT_DIR_MAX];
+};
+
+struct nf_nat_pptp {
+	__be16 pns_call_id;			/* NAT'ed PNS call id */
+	__be16 pac_call_id;			/* NAT'ed PAC call id */
+};
+
+#ifdef __KERNEL__
+
+#define PPTP_CONTROL_PORT	1723
+
+#define PPTP_PACKET_CONTROL	1
+#define PPTP_PACKET_MGMT	2
+
+#define PPTP_MAGIC_COOKIE	0x1a2b3c4d
+
+struct pptp_pkt_hdr {
+	__u16	packetLength;
+	__be16	packetType;
+	__be32	magicCookie;
+};
+
+/* PptpControlMessageType values */
+#define PPTP_START_SESSION_REQUEST	1
+#define PPTP_START_SESSION_REPLY	2
+#define PPTP_STOP_SESSION_REQUEST	3
+#define PPTP_STOP_SESSION_REPLY		4
+#define PPTP_ECHO_REQUEST		5
+#define PPTP_ECHO_REPLY			6
+#define PPTP_OUT_CALL_REQUEST		7
+#define PPTP_OUT_CALL_REPLY		8
+#define PPTP_IN_CALL_REQUEST		9
+#define PPTP_IN_CALL_REPLY		10
+#define PPTP_IN_CALL_CONNECT		11
+#define PPTP_CALL_CLEAR_REQUEST		12
+#define PPTP_CALL_DISCONNECT_NOTIFY	13
+#define PPTP_WAN_ERROR_NOTIFY		14
+#define PPTP_SET_LINK_INFO		15
+
+#define PPTP_MSG_MAX			15
+
+/* PptpGeneralError values */
+#define PPTP_ERROR_CODE_NONE		0
+#define PPTP_NOT_CONNECTED		1
+#define PPTP_BAD_FORMAT			2
+#define PPTP_BAD_VALUE			3
+#define PPTP_NO_RESOURCE		4
+#define PPTP_BAD_CALLID			5
+#define PPTP_REMOVE_DEVICE_ERROR	6
+
+struct PptpControlHeader {
+	__be16	messageType;
+	__u16	reserved;
+};
+
+/* FramingCapability Bitmap Values */
+#define PPTP_FRAME_CAP_ASYNC		0x1
+#define PPTP_FRAME_CAP_SYNC		0x2
+
+/* BearerCapability Bitmap Values */
+#define PPTP_BEARER_CAP_ANALOG		0x1
+#define PPTP_BEARER_CAP_DIGITAL		0x2
+
+struct PptpStartSessionRequest {
+	__be16	protocolVersion;
+	__u16	reserved1;
+	__be32	framingCapability;
+	__be32	bearerCapability;
+	__be16	maxChannels;
+	__be16	firmwareRevision;
+	__u8	hostName[64];
+	__u8	vendorString[64];
+};
+
+/* PptpStartSessionResultCode Values */
+#define PPTP_START_OK			1
+#define PPTP_START_GENERAL_ERROR	2
+#define PPTP_START_ALREADY_CONNECTED	3
+#define PPTP_START_NOT_AUTHORIZED	4
+#define PPTP_START_UNKNOWN_PROTOCOL	5
+
+struct PptpStartSessionReply {
+	__be16	protocolVersion;
+	__u8	resultCode;
+	__u8	generalErrorCode;
+	__be32	framingCapability;
+	__be32	bearerCapability;
+	__be16	maxChannels;
+	__be16	firmwareRevision;
+	__u8	hostName[64];
+	__u8	vendorString[64];
+};
+
+/* PptpStopReasons */
+#define PPTP_STOP_NONE			1
+#define PPTP_STOP_PROTOCOL		2
+#define PPTP_STOP_LOCAL_SHUTDOWN	3
+
+struct PptpStopSessionRequest {
+	__u8	reason;
+	__u8	reserved1;
+	__u16	reserved2;
+};
+
+/* PptpStopSessionResultCode */
+#define PPTP_STOP_OK			1
+#define PPTP_STOP_GENERAL_ERROR		2
+
+struct PptpStopSessionReply {
+	__u8	resultCode;
+	__u8	generalErrorCode;
+	__u16	reserved1;
+};
+
+struct PptpEchoRequest {
+	__be32 identNumber;
+};
+
+/* PptpEchoReplyResultCode */
+#define PPTP_ECHO_OK			1
+#define PPTP_ECHO_GENERAL_ERROR		2
+
+struct PptpEchoReply {
+	__be32	identNumber;
+	__u8	resultCode;
+	__u8	generalErrorCode;
+	__u16	reserved;
+};
+
+/* PptpFramingType */
+#define PPTP_ASYNC_FRAMING		1
+#define PPTP_SYNC_FRAMING		2
+#define PPTP_DONT_CARE_FRAMING		3
+
+/* PptpCallBearerType */
+#define PPTP_ANALOG_TYPE		1
+#define PPTP_DIGITAL_TYPE		2
+#define PPTP_DONT_CARE_BEARER_TYPE	3
+
+struct PptpOutCallRequest {
+	__be16	callID;
+	__be16	callSerialNumber;
+	__be32	minBPS;
+	__be32	maxBPS;
+	__be32	bearerType;
+	__be32	framingType;
+	__be16	packetWindow;
+	__be16	packetProcDelay;
+	__be16	phoneNumberLength;
+	__u16	reserved1;
+	__u8	phoneNumber[64];
+	__u8	subAddress[64];
+};
+
+/* PptpCallResultCode */
+#define PPTP_OUTCALL_CONNECT		1
+#define PPTP_OUTCALL_GENERAL_ERROR	2
+#define PPTP_OUTCALL_NO_CARRIER		3
+#define PPTP_OUTCALL_BUSY		4
+#define PPTP_OUTCALL_NO_DIAL_TONE	5
+#define PPTP_OUTCALL_TIMEOUT		6
+#define PPTP_OUTCALL_DONT_ACCEPT	7
+
+struct PptpOutCallReply {
+	__be16	callID;
+	__be16	peersCallID;
+	__u8	resultCode;
+	__u8	generalErrorCode;
+	__be16	causeCode;
+	__be32	connectSpeed;
+	__be16	packetWindow;
+	__be16	packetProcDelay;
+	__be32	physChannelID;
+};
+
+struct PptpInCallRequest {
+	__be16	callID;
+	__be16	callSerialNumber;
+	__be32	callBearerType;
+	__be32	physChannelID;
+	__be16	dialedNumberLength;
+	__be16	dialingNumberLength;
+	__u8	dialedNumber[64];
+	__u8	dialingNumber[64];
+	__u8	subAddress[64];
+};
+
+/* PptpInCallResultCode */
+#define PPTP_INCALL_ACCEPT		1
+#define PPTP_INCALL_GENERAL_ERROR	2
+#define PPTP_INCALL_DONT_ACCEPT		3
+
+struct PptpInCallReply {
+	__be16	callID;
+	__be16	peersCallID;
+	__u8	resultCode;
+	__u8	generalErrorCode;
+	__be16	packetWindow;
+	__be16	packetProcDelay;
+	__u16	reserved;
+};
+
+struct PptpInCallConnected {
+	__be16	peersCallID;
+	__u16	reserved;
+	__be32	connectSpeed;
+	__be16	packetWindow;
+	__be16	packetProcDelay;
+	__be32	callFramingType;
+};
+
+struct PptpClearCallRequest {
+	__be16	callID;
+	__u16	reserved;
+};
+
+struct PptpCallDisconnectNotify {
+	__be16	callID;
+	__u8	resultCode;
+	__u8	generalErrorCode;
+	__be16	causeCode;
+	__u16	reserved;
+	__u8	callStatistics[128];
+};
+
+struct PptpWanErrorNotify {
+	__be16	peersCallID;
+	__u16	reserved;
+	__be32	crcErrors;
+	__be32	framingErrors;
+	__be32	hardwareOverRuns;
+	__be32	bufferOverRuns;
+	__be32	timeoutErrors;
+	__be32	alignmentErrors;
+};
+
+struct PptpSetLinkInfo {
+	__be16	peersCallID;
+	__u16	reserved;
+	__be32	sendAccm;
+	__be32	recvAccm;
+};
+
+union pptp_ctrl_union {
+	struct PptpStartSessionRequest	sreq;
+	struct PptpStartSessionReply	srep;
+	struct PptpStopSessionRequest	streq;
+	struct PptpStopSessionReply	strep;
+	struct PptpOutCallRequest	ocreq;
+	struct PptpOutCallReply		ocack;
+	struct PptpInCallRequest	icreq;
+	struct PptpInCallReply		icack;
+	struct PptpInCallConnected	iccon;
+	struct PptpClearCallRequest	clrreq;
+	struct PptpCallDisconnectNotify disc;
+	struct PptpWanErrorNotify	wanerr;
+	struct PptpSetLinkInfo		setlink;
+};
+
+/* crap needed for nf_conntrack_compat.h */
+struct nf_conn;
+struct nf_conntrack_expect;
+
+extern int
+(*nf_nat_pptp_hook_outbound)(struct sk_buff **pskb,
+			     struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			     struct PptpControlHeader *ctlh,
+			     union pptp_ctrl_union *pptpReq);
+
+extern int
+(*nf_nat_pptp_hook_inbound)(struct sk_buff **pskb,
+			    struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			    struct PptpControlHeader *ctlh,
+			    union pptp_ctrl_union *pptpReq);
+
+extern void
+(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig,
+			    struct nf_conntrack_expect *exp_reply);
+
+extern void
+(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
+			     struct nf_conntrack_expect *exp);
+
+#endif /* __KERNEL__ */
+#endif /* _NF_CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
new file mode 100644
index 0000000..4e6bbce
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -0,0 +1,112 @@
+#ifndef _CONNTRACK_PROTO_GRE_H
+#define _CONNTRACK_PROTO_GRE_H
+#include <asm/byteorder.h>
+
+/* GRE PROTOCOL HEADER */
+
+/* GRE Version field */
+#define GRE_VERSION_1701	0x0
+#define GRE_VERSION_PPTP	0x1
+
+/* GRE Protocol field */
+#define GRE_PROTOCOL_PPTP	0x880B
+
+/* GRE Flags */
+#define GRE_FLAG_C		0x80
+#define GRE_FLAG_R		0x40
+#define GRE_FLAG_K		0x20
+#define GRE_FLAG_S		0x10
+#define GRE_FLAG_A		0x80
+
+#define GRE_IS_C(f)	((f)&GRE_FLAG_C)
+#define GRE_IS_R(f)	((f)&GRE_FLAG_R)
+#define GRE_IS_K(f)	((f)&GRE_FLAG_K)
+#define GRE_IS_S(f)	((f)&GRE_FLAG_S)
+#define GRE_IS_A(f)	((f)&GRE_FLAG_A)
+
+/* GRE is a mess: Four different standards */
+struct gre_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	__u16	rec:3,
+		srr:1,
+		seq:1,
+		key:1,
+		routing:1,
+		csum:1,
+		version:3,
+		reserved:4,
+		ack:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	__u16	csum:1,
+		routing:1,
+		key:1,
+		seq:1,
+		srr:1,
+		rec:3,
+		ack:1,
+		reserved:4,
+		version:3;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+	__be16	protocol;
+};
+
+/* modified GRE header for PPTP */
+struct gre_hdr_pptp {
+	__u8   flags;		/* bitfield */
+	__u8   version;		/* should be GRE_VERSION_PPTP */
+	__be16 protocol;	/* should be GRE_PROTOCOL_PPTP */
+	__be16 payload_len;	/* size of ppp payload, not inc. gre header */
+	__be16 call_id;		/* peer's call_id for this session */
+	__be32 seq;		/* sequence number.  Present if S==1 */
+	__be32 ack;		/* seq number of highest packet recieved by */
+				/*  sender in this session */
+};
+
+struct nf_ct_gre {
+	unsigned int stream_timeout;
+	unsigned int timeout;
+};
+
+#ifdef __KERNEL__
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+struct nf_conn;
+
+/* structure for original <-> reply keymap */
+struct nf_ct_gre_keymap {
+	struct list_head list;
+	struct nf_conntrack_tuple tuple;
+};
+
+/* add new tuple->key_reply pair to keymap */
+int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
+			 struct nf_conntrack_tuple *t);
+
+/* delete keymap entries */
+void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
+
+/* get pointer to gre key, if present */
+static inline __be32 *gre_key(struct gre_hdr *greh)
+{
+	if (!greh->key)
+		return NULL;
+	if (greh->csum || greh->routing)
+		return (__be32 *)(greh+sizeof(*greh)+4);
+	return (__be32 *)(greh+sizeof(*greh));
+}
+
+/* get pointer ot gre csum, if present */
+static inline __sum16 *gre_csum(struct gre_hdr *greh)
+{
+	if (!greh->csum)
+		return NULL;
+	return (__sum16 *)(greh+sizeof(*greh));
+}
+
+extern void nf_ct_gre_keymap_flush(void);
+extern void nf_nat_need_gre(void);
+
+#endif /* __KERNEL__ */
+#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index b8994d9..5cf2c11 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -20,7 +20,7 @@
 {
 	enum sctp_conntrack state;
 
-	u_int32_t vtag[IP_CT_DIR_MAX];
+	__be32 vtag[IP_CT_DIR_MAX];
 	u_int32_t ttag[IP_CT_DIR_MAX];
 };
 
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
new file mode 100644
index 0000000..bb7f204
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -0,0 +1,41 @@
+#ifndef __NF_CONNTRACK_SIP_H__
+#define __NF_CONNTRACK_SIP_H__
+#ifdef __KERNEL__
+
+#define SIP_PORT	5060
+#define SIP_TIMEOUT	3600
+
+enum sip_header_pos {
+	POS_REG_REQ_URI,
+	POS_REQ_URI,
+	POS_FROM,
+	POS_TO,
+	POS_VIA,
+	POS_CONTACT,
+	POS_CONTENT,
+	POS_MEDIA,
+	POS_OWNER_IP4,
+	POS_CONNECTION_IP4,
+	POS_OWNER_IP6,
+	POS_CONNECTION_IP6,
+	POS_SDP_HEADER,
+};
+
+extern unsigned int (*nf_nat_sip_hook)(struct sk_buff **pskb,
+				       enum ip_conntrack_info ctinfo,
+				       struct nf_conn *ct,
+				       const char **dptr);
+extern unsigned int (*nf_nat_sdp_hook)(struct sk_buff **pskb,
+				       enum ip_conntrack_info ctinfo,
+				       struct nf_conntrack_expect *exp,
+				       const char *dptr);
+
+extern int ct_sip_get_info(struct nf_conn *ct, const char *dptr, size_t dlen,
+			   unsigned int *matchoff, unsigned int *matchlen,
+			   enum sip_header_pos pos);
+extern int ct_sip_lnlen(const char *line, const char *limit);
+extern const char *ct_sip_search(const char *needle, const char *haystack,
+				 size_t needle_len, size_t haystack_len,
+				 int case_sensitive);
+#endif /* __KERNEL__ */
+#endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h
new file mode 100644
index 0000000..0d79b7a
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_tftp.h
@@ -0,0 +1,20 @@
+#ifndef _NF_CONNTRACK_TFTP_H
+#define _NF_CONNTRACK_TFTP_H
+
+#define TFTP_PORT 69
+
+struct tftphdr {
+	__be16 opcode;
+};
+
+#define TFTP_OPCODE_READ	1
+#define TFTP_OPCODE_WRITE	2
+#define TFTP_OPCODE_DATA	3
+#define TFTP_OPCODE_ACK		4
+#define TFTP_OPCODE_ERROR	5
+
+extern unsigned int (*nf_nat_tftp_hook)(struct sk_buff **pskb,
+				        enum ip_conntrack_info ctinfo,
+				        struct nf_conntrack_expect *exp);
+
+#endif /* _NF_CONNTRACK_TFTP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 6d8e3e5..1e9c821 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -78,7 +78,7 @@
 struct nfgenmsg {
 	u_int8_t  nfgen_family;		/* AF_xxx */
 	u_int8_t  version;		/* nfnetlink version */
-	u_int16_t res_id;		/* resource id */
+	__be16    res_id;		/* resource id */
 };
 
 #define NFNETLINK_V0	0
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index 87b92f8..5966afa 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -16,24 +16,22 @@
 };
 
 struct nfulnl_msg_packet_hdr {
-	u_int16_t	hw_protocol;	/* hw protocol (network order) */
+	__be16		hw_protocol;	/* hw protocol (network order) */
 	u_int8_t	hook;		/* netfilter hook */
 	u_int8_t	_pad;
 };
 
 struct nfulnl_msg_packet_hw {
-	u_int16_t	hw_addrlen;
+	__be16		hw_addrlen;
 	u_int16_t	_pad;
 	u_int8_t	hw_addr[8];
 };
 
 struct nfulnl_msg_packet_timestamp {
-	aligned_u64	sec;
-	aligned_u64	usec;
+	aligned_be64	sec;
+	aligned_be64	usec;
 };
 
-#define NFULNL_PREFIXLEN	30	/* just like old log target */
-
 enum nfulnl_attr_type {
 	NFULA_UNSPEC,
 	NFULA_PACKET_HDR,
@@ -67,7 +65,7 @@
 } __attribute__ ((packed));
 
 struct nfulnl_msg_config_mode {
-	u_int32_t	copy_range;
+	__be32		copy_range;
 	u_int8_t	copy_mode;
 	u_int8_t	_pad;
 } __attribute__ ((packed));
diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
index 36af036..83e7896 100644
--- a/include/linux/netfilter/nfnetlink_queue.h
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -13,20 +13,20 @@
 };
 
 struct nfqnl_msg_packet_hdr {
-	u_int32_t	packet_id;	/* unique ID of packet in queue */
-	u_int16_t	hw_protocol;	/* hw protocol (network order) */
+	__be32		packet_id;	/* unique ID of packet in queue */
+	__be16		hw_protocol;	/* hw protocol (network order) */
 	u_int8_t	hook;		/* netfilter hook */
 } __attribute__ ((packed));
 
 struct nfqnl_msg_packet_hw {
-	u_int16_t	hw_addrlen;
+	__be16		hw_addrlen;
 	u_int16_t	_pad;
 	u_int8_t	hw_addr[8];
 };
 
 struct nfqnl_msg_packet_timestamp {
-	aligned_u64	sec;
-	aligned_u64	usec;
+	aligned_be64	sec;
+	aligned_be64	usec;
 };
 
 enum nfqnl_attr_type {
@@ -47,8 +47,8 @@
 #define NFQA_MAX (__NFQA_MAX - 1)
 
 struct nfqnl_msg_verdict_hdr {
-	u_int32_t verdict;
-	u_int32_t id;
+	__be32 verdict;
+	__be32 id;
 };
 
 
@@ -63,7 +63,7 @@
 struct nfqnl_msg_config_cmd {
 	u_int8_t	command;	/* nfqnl_msg_config_cmds */
 	u_int8_t	_pad;
-	u_int16_t	pf;		/* AF_xxx for PF_[UN]BIND */
+	__be16		pf;		/* AF_xxx for PF_[UN]BIND */
 };
 
 enum nfqnl_config_mode {
@@ -73,7 +73,7 @@
 };
 
 struct nfqnl_msg_config_params {
-	u_int32_t	copy_range;
+	__be32		copy_range;
 	u_int8_t	copy_mode;	/* enum nfqnl_config_mode */
 } __attribute__ ((packed));
 
@@ -82,6 +82,7 @@
 	NFQA_CFG_UNSPEC,
 	NFQA_CFG_CMD,			/* nfqnl_msg_config_cmd */
 	NFQA_CFG_PARAMS,		/* nfqnl_msg_config_params */
+	NFQA_CFG_QUEUE_MAXLEN,		/* u_int32_t */
 	__NFQA_CFG_MAX
 };
 #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1)
diff --git a/include/linux/netfilter/xt_NFLOG.h b/include/linux/netfilter/xt_NFLOG.h
new file mode 100644
index 0000000..cdcd0ed
--- /dev/null
+++ b/include/linux/netfilter/xt_NFLOG.h
@@ -0,0 +1,18 @@
+#ifndef _XT_NFLOG_TARGET
+#define _XT_NFLOG_TARGET
+
+#define XT_NFLOG_DEFAULT_GROUP		0x1
+#define XT_NFLOG_DEFAULT_THRESHOLD	1
+
+#define XT_NFLOG_MASK			0x0
+
+struct xt_nflog_info {
+	u_int32_t	len;
+	u_int16_t	group;
+	u_int16_t	threshold;
+	u_int16_t	flags;
+	u_int16_t	pad;
+	char		prefix[64];
+};
+
+#endif /* _XT_NFLOG_TARGET */
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 4c2d994..70b6f71 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -29,14 +29,14 @@
 struct ip_conntrack_old_tuple
 {
 	struct {
-		__u32 ip;
+		__be32 ip;
 		union {
 			__u16 all;
 		} u;
 	} src;
 
 	struct {
-		__u32 ip;
+		__be32 ip;
 		union {
 			__u16 all;
 		} u;
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
new file mode 100644
index 0000000..b4556b8
--- /dev/null
+++ b/include/linux/netfilter/xt_hashlimit.h
@@ -0,0 +1,40 @@
+#ifndef _XT_HASHLIMIT_H
+#define _XT_HASHLIMIT_H
+
+/* timings are in milliseconds. */
+#define XT_HASHLIMIT_SCALE 10000
+/* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
+   seconds, or one every 59 hours. */
+
+/* details of this structure hidden by the implementation */
+struct xt_hashlimit_htable;
+
+#define XT_HASHLIMIT_HASH_DIP	0x0001
+#define XT_HASHLIMIT_HASH_DPT	0x0002
+#define XT_HASHLIMIT_HASH_SIP	0x0004
+#define XT_HASHLIMIT_HASH_SPT	0x0008
+
+struct hashlimit_cfg {
+	u_int32_t mode;	  /* bitmask of IPT_HASHLIMIT_HASH_* */
+	u_int32_t avg;    /* Average secs between packets * scale */
+	u_int32_t burst;  /* Period multiplier for upper limit. */
+
+	/* user specified */
+	u_int32_t size;		/* how many buckets */
+	u_int32_t max;		/* max number of entries */
+	u_int32_t gc_interval;	/* gc interval */
+	u_int32_t expire;	/* when do entries expire? */
+};
+
+struct xt_hashlimit_info {
+	char name [IFNAMSIZ];		/* name */
+	struct hashlimit_cfg cfg;
+	struct xt_hashlimit_htable *hinfo;
+
+	/* Used internally by the kernel */
+	union {
+		void *ptr;
+		struct xt_hashlimit_info *master;
+	} u;
+};
+#endif /*_XT_HASHLIMIT_H*/
diff --git a/include/linux/netfilter/xt_policy.h b/include/linux/netfilter/xt_policy.h
index a8132ec..45654d3 100644
--- a/include/linux/netfilter/xt_policy.h
+++ b/include/linux/netfilter/xt_policy.h
@@ -39,7 +39,7 @@
 	union xt_policy_addr	smask;
 	union xt_policy_addr	daddr;
 	union xt_policy_addr	dmask;
-	u_int32_t		spi;
+	__be32			spi;
 	u_int32_t		reqid;
 	u_int8_t		proto;
 	u_int8_t		mode;
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 9a4dd11..6c4613f 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -64,7 +64,7 @@
 
 struct bridge_skb_cb {
 	union {
-		__u32 ipv4;
+		__be32 ipv4;
 	} daddr;
 };
 
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
index b9f712c..07f044f 100644
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/linux/netfilter_bridge/ebt_802_3.h
@@ -28,21 +28,21 @@
 	uint8_t ssap;
 	uint8_t ctrl;
 	uint8_t orig[3];
-	uint16_t type;
+	__be16 type;
 };
 
 struct hdr_ni {
 	uint8_t dsap;
 	uint8_t ssap;
-	uint16_t ctrl;
+	__be16 ctrl;
 	uint8_t  orig[3];
-	uint16_t type;
+	__be16 type;
 };
 
 struct ebt_802_3_hdr {
 	uint8_t  daddr[6];
 	uint8_t  saddr[6];
-	uint16_t len;
+	__be16 len;
 	union {
 		struct hdr_ui ui;
 		struct hdr_ni ni;
@@ -61,7 +61,7 @@
 struct ebt_802_3_info 
 {
 	uint8_t  sap;
-	uint16_t type;
+	__be16 type;
 	uint8_t  bitmask;
 	uint8_t  invflags;
 };
diff --git a/include/linux/netfilter_bridge/ebt_among.h b/include/linux/netfilter_bridge/ebt_among.h
index 307c1fe..7654069 100644
--- a/include/linux/netfilter_bridge/ebt_among.h
+++ b/include/linux/netfilter_bridge/ebt_among.h
@@ -32,7 +32,7 @@
 struct ebt_mac_wormhash_tuple
 {
 	uint32_t cmp[2];
-	uint32_t ip;
+	__be32 ip;
 };
 
 struct ebt_mac_wormhash
diff --git a/include/linux/netfilter_bridge/ebt_arp.h b/include/linux/netfilter_bridge/ebt_arp.h
index 537ec6b..97e4dbd 100644
--- a/include/linux/netfilter_bridge/ebt_arp.h
+++ b/include/linux/netfilter_bridge/ebt_arp.h
@@ -14,13 +14,13 @@
 
 struct ebt_arp_info
 {
-	uint16_t htype;
-	uint16_t ptype;
-	uint16_t opcode;
-	uint32_t saddr;
-	uint32_t smsk;
-	uint32_t daddr;
-	uint32_t dmsk;
+	__be16 htype;
+	__be16 ptype;
+	__be16 opcode;
+	__be32 saddr;
+	__be32 smsk;
+	__be32 daddr;
+	__be32 dmsk;
 	unsigned char smaddr[ETH_ALEN];
 	unsigned char smmsk[ETH_ALEN];
 	unsigned char dmaddr[ETH_ALEN];
diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h
index 7247385..d684747 100644
--- a/include/linux/netfilter_bridge/ebt_ip.h
+++ b/include/linux/netfilter_bridge/ebt_ip.h
@@ -28,10 +28,10 @@
 /* the same values are used for the invflags */
 struct ebt_ip_info
 {
-	uint32_t saddr;
-	uint32_t daddr;
-	uint32_t smsk;
-	uint32_t dmsk;
+	__be32 saddr;
+	__be32 daddr;
+	__be32 smsk;
+	__be32 dmsk;
 	uint8_t  tos;
 	uint8_t  protocol;
 	uint8_t  bitmask;
diff --git a/include/linux/netfilter_bridge/ebt_nat.h b/include/linux/netfilter_bridge/ebt_nat.h
index 26fd90d..435b886 100644
--- a/include/linux/netfilter_bridge/ebt_nat.h
+++ b/include/linux/netfilter_bridge/ebt_nat.h
@@ -1,6 +1,7 @@
 #ifndef __LINUX_BRIDGE_EBT_NAT_H
 #define __LINUX_BRIDGE_EBT_NAT_H
 
+#define NAT_ARP_BIT  (0x00000010)
 struct ebt_nat_info
 {
 	unsigned char mac[ETH_ALEN];
diff --git a/include/linux/netfilter_bridge/ebt_vlan.h b/include/linux/netfilter_bridge/ebt_vlan.h
index cb1fcc4..1d98be4 100644
--- a/include/linux/netfilter_bridge/ebt_vlan.h
+++ b/include/linux/netfilter_bridge/ebt_vlan.h
@@ -10,7 +10,7 @@
 struct ebt_vlan_info {
 	uint16_t id;		/* VLAN ID {1-4095} */
 	uint8_t prio;		/* VLAN User Priority {0-7} */
-	uint16_t encap;		/* VLAN Encapsulated frame code {0-65535} */
+	__be16 encap;		/* VLAN Encapsulated frame code {0-65535} */
 	uint8_t bitmask;		/* Args bitmask bit 1=1 - ID arg,
 				   bit 2=1 User-Priority arg, bit 3=1 encap*/
 	uint8_t invflags;		/* Inverse bitmask  bit 1=1 - inversed ID arg, 
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index b1a7cc9..94e0a7d 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -26,6 +26,10 @@
 #define EBT_CONTINUE -3
 #define EBT_RETURN   -4
 #define NUM_STANDARD_TARGETS   4
+/* ebtables target modules store the verdict inside an int. We can
+ * reclaim a part of this int for backwards compatible extensions.
+ * The 4 lsb are more than enough to store the verdict. */
+#define EBT_VERDICT_BITS 0x0000000F
 
 struct ebt_counter
 {
@@ -42,6 +46,23 @@
 	/* total size of the entries */
 	unsigned int entries_size;
 	/* start of the chains */
+	struct ebt_entries __user *hook_entry[NF_BR_NUMHOOKS];
+	/* nr of counters userspace expects back */
+	unsigned int num_counters;
+	/* where the kernel will put the old counters */
+	struct ebt_counter __user *counters;
+	char __user *entries;
+};
+
+struct ebt_replace_kernel
+{
+	char name[EBT_TABLE_MAXNAMELEN];
+	unsigned int valid_hooks;
+	/* nr of rules in the table */
+	unsigned int nentries;
+	/* total size of the entries */
+	unsigned int entries_size;
+	/* start of the chains */
 	struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
 	/* nr of counters userspace expects back */
 	unsigned int num_counters;
@@ -141,7 +162,7 @@
 	/* this needs to be the first field */
 	unsigned int bitmask;
 	unsigned int invflags;
-	uint16_t ethproto;
+	__be16 ethproto;
 	/* the physical in-dev */
 	char in[IFNAMSIZ];
 	/* the logical in-dev */
@@ -251,7 +272,7 @@
 {
 	struct list_head list;
 	char name[EBT_TABLE_MAXNAMELEN];
-	struct ebt_replace *table;
+	struct ebt_replace_kernel *table;
 	unsigned int valid_hooks;
 	rwlock_t lock;
 	/* e.g. could be the table explicitly only allows certain
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 5b63a23..5821eb5 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -79,7 +79,7 @@
 #ifdef __KERNEL__
 extern int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type);
 extern int ip_xfrm_me_harder(struct sk_buff **pskb);
-extern unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 				   unsigned int dataoff, u_int8_t protocol);
 #endif /*__KERNEL__*/
 
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index 591c1a8..1803378 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -1,6 +1,4 @@
 header-y += ip_conntrack_helper.h
-header-y += ip_conntrack_helper_h323_asn1.h
-header-y += ip_conntrack_helper_h323_types.h
 header-y += ip_conntrack_protocol.h
 header-y += ip_conntrack_sctp.h
 header-y += ip_conntrack_tcp.h
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index 64e8680..33581c1 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -9,6 +9,7 @@
 #include <linux/compiler.h>
 #include <asm/atomic.h>
 
+#include <linux/timer.h>
 #include <linux/netfilter_ipv4/ip_conntrack_tcp.h>
 #include <linux/netfilter_ipv4/ip_conntrack_icmp.h>
 #include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
@@ -277,7 +278,7 @@
 __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple);
 
 extern struct ip_conntrack_expect *
-ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple);
+ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple);
 
 extern struct ip_conntrack_tuple_hash *
 __ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
index 6381193..2129fc3 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
@@ -1,6 +1,44 @@
 #ifndef _IP_CONNTRACK_FTP_H
 #define _IP_CONNTRACK_FTP_H
+/* FTP tracking. */
 
-#include <linux/netfilter/nf_conntrack_ftp.h>
+/* This enum is exposed to userspace */
+enum ip_ct_ftp_type
+{
+	/* PORT command from client */
+	IP_CT_FTP_PORT,
+	/* PASV response from server */
+	IP_CT_FTP_PASV,
+	/* EPRT command from client */
+	IP_CT_FTP_EPRT,
+	/* EPSV response from server */
+	IP_CT_FTP_EPSV,
+};
+
+#ifdef __KERNEL__
+
+#define FTP_PORT	21
+
+#define NUM_SEQ_TO_REMEMBER 2
+/* This structure exists only once per master */
+struct ip_ct_ftp_master {
+	/* Valid seq positions for cmd matching after newline */
+	u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
+	/* 0 means seq_match_aft_nl not set */
+	int seq_aft_nl_num[IP_CT_DIR_MAX];
+};
+
+struct ip_conntrack_expect;
+
+/* For NAT to hook in when we find a packet which describes what other
+ * connection we should expect. */
+extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
+				       enum ip_conntrack_info ctinfo,
+				       enum ip_ct_ftp_type type,
+				       unsigned int matchoff,
+				       unsigned int matchlen,
+				       struct ip_conntrack_expect *exp,
+				       u32 *seq);
+#endif /* __KERNEL__ */
 
 #endif /* _IP_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_h323.h b/include/linux/netfilter_ipv4/ip_conntrack_h323.h
index 943cc6a..18f7698 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_h323.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_h323.h
@@ -3,7 +3,7 @@
 
 #ifdef __KERNEL__
 
-#include <linux/netfilter_ipv4/ip_conntrack_helper_h323_asn1.h>
+#include <linux/netfilter/nf_conntrack_h323_asn1.h>
 
 #define RAS_PORT 1719
 #define Q931_PORT 1720
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
index 1d853aa..e371e0f 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
@@ -102,11 +102,11 @@
 }
 
 /* get pointer ot gre csum, if present */
-static inline u_int16_t *gre_csum(struct gre_hdr *greh)
+static inline __sum16 *gre_csum(struct gre_hdr *greh)
 {
 	if (!greh->csum)
 		return NULL;
-	return (u_int16_t *) (greh+sizeof(*greh));
+	return (__sum16 *) (greh+sizeof(*greh));
 }
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sip.h b/include/linux/netfilter_ipv4/ip_conntrack_sip.h
index 913dad6..bef6c64 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_sip.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_sip.h
@@ -5,23 +5,18 @@
 #define SIP_PORT	5060
 #define SIP_TIMEOUT	3600
 
-#define POS_VIA		0
-#define POS_CONTACT	1
-#define POS_CONTENT	2
-#define POS_MEDIA	3
-#define POS_OWNER	4
-#define POS_CONNECTION	5
-#define POS_REQ_HEADER	6
-#define POS_SDP_HEADER	7
-
-struct sip_header_nfo {
-	const char	*lname;
-	const char	*sname;
-	const char	*ln_str;
-	size_t		lnlen;
-	size_t		snlen;
-	size_t		ln_strlen;
-	int		(*match_len)(const char *, const char *, int *);
+enum sip_header_pos {
+	POS_REG_REQ_URI,
+	POS_REQ_URI,
+	POS_FROM,
+	POS_TO,
+	POS_VIA,
+	POS_CONTACT,
+	POS_CONTENT,
+	POS_MEDIA,
+	POS_OWNER,
+	POS_CONNECTION,
+	POS_SDP_HEADER,
 };
 
 extern unsigned int (*ip_nat_sip_hook)(struct sk_buff **pskb,
@@ -36,9 +31,10 @@
 extern int ct_sip_get_info(const char *dptr, size_t dlen,
 			   unsigned int *matchoff,
 			   unsigned int *matchlen,
-			   struct sip_header_nfo *hnfo);
+			   enum sip_header_pos pos);
 extern int ct_sip_lnlen(const char *line, const char *limit);
 extern const char *ct_sip_search(const char *needle, const char *haystack,
-                                 size_t needle_len, size_t haystack_len);
+				 size_t needle_len, size_t haystack_len,
+				 int case_sensitive);
 #endif /* __KERNEL__ */
 #endif /* __IP_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tftp.h b/include/linux/netfilter_ipv4/ip_conntrack_tftp.h
index cde9729..a404fc0 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_tftp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_tftp.h
@@ -4,7 +4,7 @@
 #define TFTP_PORT 69
 
 struct tftphdr {
-	u_int16_t opcode;
+	__be16 opcode;
 };
 
 #define TFTP_OPCODE_READ	1
diff --git a/include/linux/netfilter_ipv4/ipt_LOG.h b/include/linux/netfilter_ipv4/ipt_LOG.h
index 892f9a3..90fa652 100644
--- a/include/linux/netfilter_ipv4/ipt_LOG.h
+++ b/include/linux/netfilter_ipv4/ipt_LOG.h
@@ -6,7 +6,7 @@
 #define IPT_LOG_TCPOPT		0x02	/* Log TCP options */
 #define IPT_LOG_IPOPT		0x04	/* Log IP options */
 #define IPT_LOG_UID		0x08	/* Log UID owning local socket */
-#define IPT_LOG_NFLOG		0x10	/* Log using nf_log backend */
+#define IPT_LOG_NFLOG		0x10	/* Unsupported, don't reuse */
 #define IPT_LOG_MASK		0x1f
 
 struct ipt_log_info {
diff --git a/include/linux/netfilter_ipv4/ipt_hashlimit.h b/include/linux/netfilter_ipv4/ipt_hashlimit.h
index ac2cb64..5662120 100644
--- a/include/linux/netfilter_ipv4/ipt_hashlimit.h
+++ b/include/linux/netfilter_ipv4/ipt_hashlimit.h
@@ -1,40 +1,14 @@
 #ifndef _IPT_HASHLIMIT_H
 #define _IPT_HASHLIMIT_H
 
-/* timings are in milliseconds. */
-#define IPT_HASHLIMIT_SCALE 10000
-/* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
-   seconds, or one every 59 hours. */
+#include <linux/netfilter/xt_hashlimit.h>
 
-/* details of this structure hidden by the implementation */
-struct ipt_hashlimit_htable;
+#define IPT_HASHLIMIT_SCALE	XT_HASHLIMIT_SCALE
+#define IPT_HASHLIMIT_HASH_DIP	XT_HASHLIMIT_HASH_DIP
+#define IPT_HASHLIMIT_HASH_DPT	XT_HASHLIMIT_HASH_DPT
+#define IPT_HASHLIMIT_HASH_SIP	XT_HASHLIMIT_HASH_SIP
+#define IPT_HASHLIMIT_HASH_SPT	XT_HASHLIMIT_HASH_SPT
 
-#define IPT_HASHLIMIT_HASH_DIP	0x0001
-#define IPT_HASHLIMIT_HASH_DPT	0x0002
-#define IPT_HASHLIMIT_HASH_SIP	0x0004
-#define IPT_HASHLIMIT_HASH_SPT	0x0008
+#define ipt_hashlimit_info xt_hashlimit_info
 
-struct hashlimit_cfg {
-	u_int32_t mode;	  /* bitmask of IPT_HASHLIMIT_HASH_* */
-	u_int32_t avg;    /* Average secs between packets * scale */
-	u_int32_t burst;  /* Period multiplier for upper limit. */
-
-	/* user specified */
-	u_int32_t size;		/* how many buckets */
-	u_int32_t max;		/* max number of entries */
-	u_int32_t gc_interval;	/* gc interval */
-	u_int32_t expire;	/* when do entries expire? */
-};
-
-struct ipt_hashlimit_info {
-	char name [IFNAMSIZ];		/* name */
-	struct hashlimit_cfg cfg;
-	struct ipt_hashlimit_htable *hinfo;
-
-	/* Used internally by the kernel */
-	union {
-		void *ptr;
-		struct ipt_hashlimit_info *master;
-	} u;
-};
-#endif /*_IPT_HASHLIMIT_H*/
+#endif /* _IPT_HASHLIMIT_H */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index d97e268..ab81a6d 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -74,7 +74,7 @@
 
 #ifdef CONFIG_NETFILTER
 extern int ip6_route_me_harder(struct sk_buff *skb);
-extern unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 				    unsigned int dataoff, u_int8_t protocol);
 
 extern int ipv6_netfilter_init(void);
diff --git a/include/linux/netfilter_ipv6/ip6t_LOG.h b/include/linux/netfilter_ipv6/ip6t_LOG.h
index 060c1a1..0d0119b 100644
--- a/include/linux/netfilter_ipv6/ip6t_LOG.h
+++ b/include/linux/netfilter_ipv6/ip6t_LOG.h
@@ -6,7 +6,7 @@
 #define IP6T_LOG_TCPOPT		0x02	/* Log TCP options */
 #define IP6T_LOG_IPOPT		0x04	/* Log IP options */
 #define IP6T_LOG_UID		0x08	/* Log UID owning local socket */
-#define IP6T_LOG_NFLOG		0x10	/* Log using nf_log backend */
+#define IP6T_LOG_NFLOG		0x10	/* Unsupported, don't use */
 #define IP6T_LOG_MASK		0x1f
 
 struct ip6t_log_info {
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6641162..b3b9b60 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -141,7 +141,6 @@
 {
 	struct ucred		creds;		/* Skb credentials	*/
 	__u32			pid;
-	__u32			dst_pid;
 	__u32			dst_group;
 	kernel_cap_t		eff_cap;
 	__u32			loginuid;	/* Login (audit) uid */
@@ -174,6 +173,7 @@
  */
 #define NLMSG_GOODORDER 0
 #define NLMSG_GOODSIZE (SKB_MAX_ORDER(0, NLMSG_GOODORDER))
+#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
 
 
 struct netlink_callback
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 1efe60c..29930b71 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -12,26 +12,27 @@
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 
-struct netpoll;
-
 struct netpoll {
 	struct net_device *dev;
-	char dev_name[16], *name;
+	char dev_name[IFNAMSIZ];
+	const char *name;
 	void (*rx_hook)(struct netpoll *, int, char *, int);
-	void (*drop)(struct sk_buff *skb);
+
 	u32 local_ip, remote_ip;
 	u16 local_port, remote_port;
-	unsigned char local_mac[6], remote_mac[6];
+ 	u8 local_mac[ETH_ALEN], remote_mac[ETH_ALEN];
 };
 
 struct netpoll_info {
+	atomic_t refcnt;
 	spinlock_t poll_lock;
 	int poll_owner;
-	int tries;
 	int rx_flags;
 	spinlock_t rx_lock;
 	struct netpoll *rx_np; /* netpoll that registered an rx_hook */
 	struct sk_buff_head arp_tx; /* list of arp requests to reply to */
+	struct sk_buff_head txq;
+	struct delayed_work tx_work;
 };
 
 void netpoll_poll(struct netpoll *np);
@@ -42,7 +43,7 @@
 void netpoll_set_trap(int trap);
 void netpoll_cleanup(struct netpoll *np);
 int __netpoll_rx(struct sk_buff *skb);
-void netpoll_queue(struct sk_buff *skb);
+
 
 #ifdef CONFIG_NETPOLL
 static inline int netpoll_rx(struct sk_buff *skb)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 45228c1..0496306 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -33,6 +33,7 @@
 #define FLUSH_HIGHPRI		16	/* high priority memory reclaim flush */
 #define FLUSH_NOCOMMIT		32	/* Don't send the NFSv3/v4 COMMIT */
 #define FLUSH_INVALIDATE	64	/* Invalidate the page cache */
+#define FLUSH_NOWRITEPAGE	128	/* Don't call writepage() */
 
 #ifdef __KERNEL__
 
@@ -318,7 +319,7 @@
 extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
 
 /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
-extern u32 root_nfs_parse_addr(char *name); /*__init*/
+extern __be32 root_nfs_parse_addr(char *name); /*__init*/
 
 static inline void nfs_fattr_init(struct nfs_fattr *fattr)
 {
@@ -427,19 +428,21 @@
 extern int  nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
 extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
 extern void nfs_writedata_release(void *);
-
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-struct nfs_write_data *nfs_commit_alloc(void);
-void nfs_commit_free(struct nfs_write_data *p);
-#endif
+extern int nfs_set_page_dirty(struct page *);
 
 /*
  * Try to write back everything synchronously (but check the
  * return value!)
  */
-extern int  nfs_sync_inode_wait(struct inode *, unsigned long, unsigned int, int);
+extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int);
+extern int nfs_sync_mapping_range(struct address_space *, loff_t, loff_t, int);
+extern int nfs_wb_all(struct inode *inode);
+extern int nfs_wb_page(struct inode *inode, struct page* page);
+extern int nfs_wb_page_priority(struct inode *inode, struct page* page, int how);
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 extern int  nfs_commit_inode(struct inode *, int);
+extern struct nfs_write_data *nfs_commit_alloc(void);
+extern void nfs_commit_free(struct nfs_write_data *wdata);
 extern void nfs_commit_release(void *wdata);
 #else
 static inline int
@@ -455,28 +458,6 @@
 	return NFS_I(inode)->npages != 0;
 }
 
-static inline int
-nfs_wb_all(struct inode *inode)
-{
-	int error = nfs_sync_inode_wait(inode, 0, 0, 0);
-	return (error < 0) ? error : 0;
-}
-
-/*
- * Write back all requests on one page - we do this before reading it.
- */
-static inline int nfs_wb_page_priority(struct inode *inode, struct page* page, int how)
-{
-	int error = nfs_sync_inode_wait(inode, page->index, 1,
-			how | FLUSH_STABLE);
-	return (error < 0) ? error : 0;
-}
-
-static inline int nfs_wb_page(struct inode *inode, struct page* page)
-{
-	return nfs_wb_page_priority(inode, page, 0);
-}
-
 /*
  * Allocate nfs_write_data structures
  */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7ccfc7e..95796e6 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -51,7 +51,7 @@
 
 	unsigned long		cl_lease_time;
 	unsigned long		cl_last_renewal;
-	struct work_struct	cl_renewd;
+	struct delayed_work	cl_renewd;
 
 	struct rpc_wait_queue	cl_rpcwaitq;
 
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 1f7bd28..2e555d4 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -30,6 +30,8 @@
 #define PG_BUSY			0
 #define PG_NEED_COMMIT		1
 #define PG_NEED_RESCHED		2
+#define PG_NEED_FLUSH		3
+#define PG_FLUSHING		4
 
 struct nfs_inode;
 struct nfs_page {
@@ -60,8 +62,9 @@
 extern	void nfs_release_request(struct nfs_page *req);
 
 
-extern  int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
-				unsigned long idx_start, unsigned int npages);
+extern	long nfs_scan_dirty(struct address_space *mapping,
+				struct writeback_control *wbc,
+				struct list_head *dst);
 extern	int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst,
 			  unsigned long idx_start, unsigned int npages);
 extern	int nfs_coalesce_requests(struct list_head *, struct list_head *,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 768c1ad..9ee9da5 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -785,8 +785,6 @@
 	int	(*readlink)(struct inode *, struct page *, unsigned int,
 			    unsigned int);
 	int	(*read)    (struct nfs_read_data *);
-	int	(*write)   (struct nfs_write_data *);
-	int	(*commit)  (struct nfs_write_data *);
 	int	(*create)  (struct inode *, struct dentry *,
 			    struct iattr *, int, struct nameidata *);
 	int	(*remove)  (struct inode *, struct qstr *);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index e16904e..acb4ed1 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -15,9 +15,14 @@
  * disables interrupts for a long time. This call is stateless.
  */
 #ifdef ARCH_HAS_NMI_WATCHDOG
+#include <asm/nmi.h>
 extern void touch_nmi_watchdog(void);
 #else
 # define touch_nmi_watchdog() touch_softlockup_watchdog()
 #endif
 
+#ifndef trigger_all_cpu_backtrace
+#define trigger_all_cpu_backtrace() do { } while (0)
+#endif
+
 #endif
diff --git a/include/linux/pata_platform.h b/include/linux/pata_platform.h
new file mode 100644
index 0000000..2d5fd64
--- /dev/null
+++ b/include/linux/pata_platform.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_PATA_PLATFORM_H
+#define __LINUX_PATA_PLATFORM_H
+
+struct pata_platform_info {
+	/*
+	 * I/O port shift, for platforms with ports that are
+	 * constantly spaced and need larger than the 1-byte
+	 * spacing used by ata_std_ports().
+	 */
+	unsigned int ioport_shift;
+};
+
+#endif /* __LINUX_PATA_PLATFORM_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 09be0f8..01c7072 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -51,6 +51,7 @@
 #include <linux/list.h>
 #include <linux/compiler.h>
 #include <linux/errno.h>
+#include <asm/atomic.h>
 #include <linux/device.h>
 
 /* File state for mmap()s on /proc/bus/pci/X/Y */
@@ -159,7 +160,6 @@
 	unsigned int	transparent:1;	/* Transparent PCI bridge */
 	unsigned int	multifunction:1;/* Part of multi-function device */
 	/* keep track of device state */
-	unsigned int	is_enabled:1;	/* pci_enable_device has been called */
 	unsigned int	is_busmaster:1; /* device is busmaster */
 	unsigned int	no_msi:1;	/* device may not use msi */
 	unsigned int	no_d1d2:1;   /* only allow d0 or d3 */
@@ -167,6 +167,7 @@
 	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
 	unsigned int 	msi_enabled:1;
 	unsigned int	msix_enabled:1;
+	atomic_t	enable_cnt;	/* pci_enable_device has been called */
 
 	u32		saved_config_space[16]; /* config space saved at suspend time */
 	struct hlist_head saved_cap_space;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fa4e1d7..4d972bb 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -390,7 +390,7 @@
 #define PCI_DEVICE_ID_NS_CS5535_IDE	0x002d
 #define PCI_DEVICE_ID_NS_CS5535_AUDIO	0x002e
 #define PCI_DEVICE_ID_NS_CS5535_USB	0x002f
-#define PCI_DEVICE_ID_NS_CS5535_VIDEO	0x0030
+#define PCI_DEVICE_ID_NS_GX_VIDEO	0x0030
 #define PCI_DEVICE_ID_NS_SATURN		0x0035
 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE	0x0500
 #define PCI_DEVICE_ID_NS_SCx200_SMI	0x0501
@@ -403,8 +403,7 @@
 #define PCI_DEVICE_ID_NS_SC1100_XBUS	0x0515
 #define PCI_DEVICE_ID_NS_87410		0xd001
 
-#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE  0x0028
-#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE   0x002b
+#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE  0x0028
 
 #define PCI_VENDOR_ID_TSENG		0x100c
 #define PCI_DEVICE_ID_TSENG_W32P_2	0x3202
@@ -1213,6 +1212,10 @@
 #define PCI_DEVICE_ID_NVIDIA_NVENET_21              0x0451
 #define PCI_DEVICE_ID_NVIDIA_NVENET_22              0x0452
 #define PCI_DEVICE_ID_NVIDIA_NVENET_23              0x0453
+#define PCI_DEVICE_ID_NVIDIA_NVENET_24              0x054C
+#define PCI_DEVICE_ID_NVIDIA_NVENET_25              0x054D
+#define PCI_DEVICE_ID_NVIDIA_NVENET_26              0x054E
+#define PCI_DEVICE_ID_NVIDIA_NVENET_27              0x054F
 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE       0x0560
 
 #define PCI_VENDOR_ID_IMS		0x10e0
@@ -1860,6 +1863,7 @@
 #define PCI_DEVICE_ID_OXSEMI_16PCI95N	0x9511
 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP	0x9513
 #define PCI_DEVICE_ID_OXSEMI_16PCI952	0x9521
+#define PCI_DEVICE_ID_OXSEMI_16PCI952PP	0x9523
 
 #define PCI_VENDOR_ID_SAMSUNG		0x144d
 
@@ -1893,6 +1897,7 @@
 #define PCI_VENDOR_ID_BROADCOM		0x14e4
 #define PCI_DEVICE_ID_TIGON3_5752	0x1600
 #define PCI_DEVICE_ID_TIGON3_5752M	0x1601
+#define PCI_DEVICE_ID_NX2_5709		0x1639
 #define PCI_DEVICE_ID_TIGON3_5700	0x1644
 #define PCI_DEVICE_ID_TIGON3_5701	0x1645
 #define PCI_DEVICE_ID_TIGON3_5702	0x1646
@@ -1926,6 +1931,7 @@
 #define PCI_DEVICE_ID_TIGON3_5750M	0x167c
 #define PCI_DEVICE_ID_TIGON3_5751M	0x167d
 #define PCI_DEVICE_ID_TIGON3_5751F	0x167e
+#define PCI_DEVICE_ID_TIGON3_5787F	0x167f
 #define PCI_DEVICE_ID_TIGON3_5787M	0x1693
 #define PCI_DEVICE_ID_TIGON3_5782	0x1696
 #define PCI_DEVICE_ID_TIGON3_5786	0x169a
@@ -1997,6 +2003,8 @@
 #define PCI_DEVICE_ID_FARSITE_TE1       0x1610
 #define PCI_DEVICE_ID_FARSITE_TE1C      0x1612
 
+#define PCI_VENDOR_ID_ARIMA		0x161f
+
 #define PCI_VENDOR_ID_SIBYTE		0x166d
 #define PCI_DEVICE_ID_BCM1250_PCI	0x0001
 #define PCI_DEVICE_ID_BCM1250_HT	0x0002
@@ -2211,6 +2219,13 @@
 #define PCI_DEVICE_ID_INTEL_ICH8_4	0x2815
 #define PCI_DEVICE_ID_INTEL_ICH8_5	0x283e
 #define PCI_DEVICE_ID_INTEL_ICH8_6	0x2850
+#define PCI_DEVICE_ID_INTEL_ICH9_0	0x2910
+#define PCI_DEVICE_ID_INTEL_ICH9_1	0x2911
+#define PCI_DEVICE_ID_INTEL_ICH9_2	0x2912
+#define PCI_DEVICE_ID_INTEL_ICH9_3	0x2913
+#define PCI_DEVICE_ID_INTEL_ICH9_4	0x2914
+#define PCI_DEVICE_ID_INTEL_ICH9_5	0x2915
+#define PCI_DEVICE_ID_INTEL_ICH9_6	0x2930
 #define PCI_DEVICE_ID_INTEL_82855PM_HB	0x3340
 #define PCI_DEVICE_ID_INTEL_82830_HB	0x3575
 #define PCI_DEVICE_ID_INTEL_82830_CGC	0x3577
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index c321316..064b1dc 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -292,6 +292,12 @@
 #define PCI_MSI_DATA_64		12	/* 16 bits of data for 64-bit devices */
 #define PCI_MSI_MASK_BIT	16	/* Mask bits register */
 
+/* MSI-X registers (these are at offset PCI_MSI_FLAGS) */
+#define PCI_MSIX_FLAGS_QSIZE	0x7FF
+#define PCI_MSIX_FLAGS_ENABLE	(1 << 15)
+#define PCI_MSIX_FLAGS_BIRMASK	(7 << 0)
+#define PCI_MSIX_FLAGS_BITMASK	(1 << 0)
+
 /* CompactPCI Hotswap Register */
 
 #define PCI_CHSWP_CSR		2	/* Control and Status Register */
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index d5dd471..265bafa 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -32,7 +32,7 @@
 struct sadb_sa {
 	uint16_t	sadb_sa_len;
 	uint16_t	sadb_sa_exttype;
-	uint32_t	sadb_sa_spi;
+	__be32		sadb_sa_spi;
 	uint8_t		sadb_sa_replay;
 	uint8_t		sadb_sa_state;
 	uint8_t		sadb_sa_auth;
@@ -211,7 +211,7 @@
 struct sadb_x_nat_t_port {
 	uint16_t	sadb_x_nat_t_port_len;
 	uint16_t	sadb_x_nat_t_port_exttype;
-	uint16_t	sadb_x_nat_t_port_port;
+	__be16		sadb_x_nat_t_port_port;
 	uint16_t	sadb_x_nat_t_port_reserved;
 } __attribute__((packed));
 /* sizeof(struct sadb_x_nat_t_port) == 8 */
@@ -285,6 +285,7 @@
 #define SADB_X_AALG_SHA2_384HMAC	6
 #define SADB_X_AALG_SHA2_512HMAC	7
 #define SADB_X_AALG_RIPEMD160HMAC	8
+#define SADB_X_AALG_AES_XCBC_MAC	9
 #define SADB_X_AALG_NULL		251	/* kame */
 #define SADB_AALG_MAX			251
 
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 9447a57..edd4c88 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -20,6 +20,10 @@
 
 #include <linux/spinlock.h>
 #include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
 
 #define PHY_BASIC_FEATURES	(SUPPORTED_10baseT_Half | \
 				 SUPPORTED_10baseT_Full | \
@@ -43,15 +47,26 @@
 #define PHY_HAS_INTERRUPT	0x00000001
 #define PHY_HAS_MAGICANEG	0x00000002
 
+/* Interface Mode definitions */
+typedef enum {
+	PHY_INTERFACE_MODE_MII,
+	PHY_INTERFACE_MODE_GMII,
+	PHY_INTERFACE_MODE_SGMII,
+	PHY_INTERFACE_MODE_TBI,
+	PHY_INTERFACE_MODE_RMII,
+	PHY_INTERFACE_MODE_RGMII,
+	PHY_INTERFACE_MODE_RTBI
+} phy_interface_t;
+
 #define MII_BUS_MAX 4
 
 
-#define PHY_INIT_TIMEOUT 100000
+#define PHY_INIT_TIMEOUT	100000
 #define PHY_STATE_TIME		1
 #define PHY_FORCE_TIMEOUT	10
 #define PHY_AN_TIMEOUT		10
 
-#define PHY_MAX_ADDR 32
+#define PHY_MAX_ADDR	32
 
 /* Used when trying to connect to a specific phy (mii bus id:phy device id) */
 #define PHY_ID_FMT "%x:%02x"
@@ -83,8 +98,8 @@
 	int *irq;
 };
 
-#define PHY_INTERRUPT_DISABLED 0x0
-#define PHY_INTERRUPT_ENABLED 0x80000000
+#define PHY_INTERRUPT_DISABLED	0x0
+#define PHY_INTERRUPT_ENABLED	0x80000000
 
 /* PHY state machine states:
  *
@@ -226,6 +241,8 @@
 
 	u32 dev_flags;
 
+	phy_interface_t interface;
+
 	/* Bus address of the PHY (0-32) */
 	int addr;
 
@@ -341,9 +358,10 @@
 int phy_clear_interrupt(struct phy_device *phydev);
 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
 struct phy_device * phy_attach(struct net_device *dev,
-		const char *phy_id, u32 flags);
+		const char *phy_id, u32 flags, phy_interface_t interface);
 struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
-		void (*handler)(struct net_device *), u32 flags);
+		void (*handler)(struct net_device *), u32 flags,
+		phy_interface_t interface);
 void phy_disconnect(struct phy_device *phydev);
 void phy_detach(struct phy_device *phydev);
 void phy_start(struct phy_device *phydev);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 29cd6de..20f47b8 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -58,6 +58,12 @@
 extern int platform_driver_register(struct platform_driver *);
 extern void platform_driver_unregister(struct platform_driver *);
 
+/* non-hotpluggable platform devices may use this so that probe() and
+ * its support may live in __init sections, conserving runtime memory.
+ */
+extern int platform_driver_probe(struct platform_driver *driver,
+		int (*probe)(struct platform_device *));
+
 #define platform_get_drvdata(_dev)	dev_get_drvdata(&(_dev)->dev)
 #define platform_set_drvdata(_dev,data)	dev_set_drvdata(&(_dev)->dev, (data))
 
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 51e1b56..2769079 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -8,7 +8,8 @@
 #include <linux/compiler.h>
 #include <linux/wait.h>
 #include <linux/string.h>
-#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
 #include <asm/uaccess.h>
 
 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
diff --git a/include/linux/profile.h b/include/linux/profile.h
index acce53f..5670b34 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -6,10 +6,15 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/cpumask.h>
+#include <linux/cache.h>
+
 #include <asm/errno.h>
 
+extern int prof_on __read_mostly;
+
 #define CPU_PROFILING	1
 #define SCHED_PROFILING	2
+#define SLEEP_PROFILING	3
 
 struct proc_dir_entry;
 struct pt_regs;
@@ -18,7 +23,24 @@
 /* init basic kernel profiler */
 void __init profile_init(void);
 void profile_tick(int);
-void profile_hit(int, void *);
+
+/*
+ * Add multiple profiler hits to a given address:
+ */
+void profile_hits(int, void *ip, unsigned int nr_hits);
+
+/*
+ * Single profiler hit:
+ */
+static inline void profile_hit(int type, void *ip)
+{
+	/*
+	 * Speedup for the common (no profiling enabled) case:
+	 */
+	if (unlikely(prof_on == type))
+		profile_hits(type, ip, 1);
+}
+
 #ifdef CONFIG_PROC_FS
 void create_prof_cpu_mask(struct proc_dir_entry *);
 #else
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 5110201..90c23f6 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,6 +37,9 @@
 extern int dquot_commit_info(struct super_block *sb, int type);
 extern int dquot_mark_dquot_dirty(struct dquot *dquot);
 
+int remove_inode_dquot_ref(struct inode *inode, int type,
+			   struct list_head *tofree_head);
+
 extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
 extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
 		int format_id, int type);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 9158a68..0deb842 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2001 Momchil Velikov
  * Portions Copyright (C) 2001 Christoph Hellwig
+ * Copyright (C) 2006 Nick Piggin
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -19,9 +20,37 @@
 #ifndef _LINUX_RADIX_TREE_H
 #define _LINUX_RADIX_TREE_H
 
-#include <linux/sched.h>
 #include <linux/preempt.h>
 #include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+
+/*
+ * A direct pointer (root->rnode pointing directly to a data item,
+ * rather than another radix_tree_node) is signalled by the low bit
+ * set in the root->rnode pointer.
+ *
+ * In this case root->height is also NULL, but the direct pointer tests are
+ * needed for RCU lookups when root->height is unreliable.
+ */
+#define RADIX_TREE_DIRECT_PTR	1
+
+static inline void *radix_tree_ptr_to_direct(void *ptr)
+{
+	return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR);
+}
+
+static inline void *radix_tree_direct_to_ptr(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR);
+}
+
+static inline int radix_tree_is_direct_ptr(void *ptr)
+{
+	return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR);
+}
+
+/*** radix-tree API starts here ***/
 
 #define RADIX_TREE_MAX_TAGS 2
 
@@ -48,6 +77,77 @@
 	(root)->rnode = NULL;						\
 } while (0)
 
+/**
+ * Radix-tree synchronization
+ *
+ * The radix-tree API requires that users provide all synchronisation (with
+ * specific exceptions, noted below).
+ *
+ * Synchronization of access to the data items being stored in the tree, and
+ * management of their lifetimes must be completely managed by API users.
+ *
+ * For API usage, in general,
+ * - any function _modifying_ the the tree or tags (inserting or deleting
+ *   items, setting or clearing tags must exclude other modifications, and
+ *   exclude any functions reading the tree.
+ * - any function _reading_ the the tree or tags (looking up items or tags,
+ *   gang lookups) must exclude modifications to the tree, but may occur
+ *   concurrently with other readers.
+ *
+ * The notable exceptions to this rule are the following functions:
+ * radix_tree_lookup
+ * radix_tree_tag_get
+ * radix_tree_gang_lookup
+ * radix_tree_gang_lookup_tag
+ * radix_tree_tagged
+ *
+ * The first 4 functions are able to be called locklessly, using RCU. The
+ * caller must ensure calls to these functions are made within rcu_read_lock()
+ * regions. Other readers (lock-free or otherwise) and modifications may be
+ * running concurrently.
+ *
+ * It is still required that the caller manage the synchronization and lifetimes
+ * of the items. So if RCU lock-free lookups are used, typically this would mean
+ * that the items have their own locks, or are amenable to lock-free access; and
+ * that the items are freed by RCU (or only freed after having been deleted from
+ * the radix tree *and* a synchronize_rcu() grace period).
+ *
+ * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
+ * access to data items when inserting into or looking up from the radix tree)
+ *
+ * radix_tree_tagged is able to be called without locking or RCU.
+ */
+
+/**
+ * radix_tree_deref_slot	- dereference a slot
+ * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
+ * Returns:	item that was stored in that slot with any direct pointer flag
+ *		removed.
+ *
+ * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
+ * locked across slot lookup and dereference.  More likely, will be used with
+ * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ */
+static inline void *radix_tree_deref_slot(void **pslot)
+{
+	return radix_tree_direct_to_ptr(*pslot);
+}
+/**
+ * radix_tree_replace_slot	- replace item in a slot
+ * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
+ * @item:	new item to store in the slot.
+ *
+ * For use with radix_tree_lookup_slot().  Caller must hold tree write locked
+ * across slot lookup and replacement.
+ */
+static inline void radix_tree_replace_slot(void **pslot, void *item)
+{
+	BUG_ON(radix_tree_is_direct_ptr(item));
+	rcu_assign_pointer(*pslot,
+		(void *)((unsigned long)item |
+			((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR)));
+}
+
 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index f13299a..03636d7 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -235,7 +235,7 @@
 	 */
 	int			active_name;
 	char			cache_name[2][20];
-	kmem_cache_t		*slab_cache; /* for allocating stripes */
+	struct kmem_cache		*slab_cache; /* for allocating stripes */
 
 	int			seq_flush, seq_write;
 	int			quiesce;
diff --git a/include/linux/random.h b/include/linux/random.h
index 0248b30..01ad710 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -51,16 +51,16 @@
 extern void get_random_bytes(void *buf, int nbytes);
 void generate_random_uuid(unsigned char uuid_out[16]);
 
-extern __u32 secure_ip_id(__u32 daddr);
-extern u32 secure_ipv4_port_ephemeral(__u32 saddr, __u32 daddr, __u16 dport);
-extern u32 secure_ipv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, 
-				      __u16 dport);
-extern __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
-					__u16 sport, __u16 dport);
-extern __u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
-					  __u16 sport, __u16 dport);
-extern u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
-				       __u16 sport, __u16 dport);
+extern __u32 secure_ip_id(__be32 daddr);
+extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+				      __be16 dport);
+extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+					__be16 sport, __be16 dport);
+extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+					  __be16 sport, __be16 dport);
+extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+				       __be16 sport, __be16 dport);
 
 #ifndef MODULE
 extern struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 7bc6bfb..d0e4dce 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -739,7 +739,7 @@
 #define PUT_B_FREE_SPACE(p_s_bh,val)  do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0)
 
 /* Get right delimiting key. -- little endian */
-#define B_PRIGHT_DELIM_KEY(p_s_bh)   (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))
+#define B_PRIGHT_DELIM_KEY(p_s_bh)   (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))))
 
 /* Does the buffer contain a disk leaf. */
 #define B_IS_ITEMS_LEVEL(p_s_bh)     (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL)
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 73e0bec..3a28742 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -249,7 +249,8 @@
 	int j_errno;
 
 	/* when flushing ordered buffers, throttle new ordered writers */
-	struct work_struct j_work;
+	struct delayed_work j_work;
+	struct super_block *j_work_sb;
 	atomic_t j_async_throttle;
 };
 
@@ -429,7 +430,7 @@
 /* -o hash={tea, rupasov, r5, detect} is meant for properly mounting 
 ** reiserfs disks from 3.5.19 or earlier.  99% of the time, this option
 ** is not required.  If the normal autodection code can't determine which
-** hash to use (because both hases had the same value for a file)
+** hash to use (because both hashes had the same value for a file)
 ** use this option to force a specific hash.  It won't allow you to override
 ** the existing hash on the FS, so if you have a tea hash disk, and mount
 ** with -o hash=rupasov, the mount will fail.
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 24accb4..c6a48bf 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -38,7 +38,7 @@
 	size_t subbufs_consumed;	/* count of sub-buffers consumed */
 	struct rchan *chan;		/* associated channel */
 	wait_queue_head_t read_wait;	/* reader wait queue */
-	struct work_struct wake_readers; /* reader wake-up work struct */
+	struct delayed_work wake_readers; /* reader wake-up work struct */
 	struct dentry *dentry;		/* channel file dentry */
 	struct kref kref;		/* channel buffer refcount */
 	struct page **page_array;	/* array of current buffer pages */
@@ -274,7 +274,7 @@
 /*
  * exported relay file operations, kernel/relay.c
  */
-extern struct file_operations relay_file_operations;
+extern const struct file_operations relay_file_operations;
 
 #endif /* _LINUX_RELAY_H */
 
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index db2c1df..36f8503 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -30,11 +30,11 @@
 
 #ifdef CONFIG_MMU
 
-extern kmem_cache_t *anon_vma_cachep;
+extern struct kmem_cache *anon_vma_cachep;
 
 static inline struct anon_vma *anon_vma_alloc(void)
 {
-	return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL);
+	return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
 }
 
 static inline void anon_vma_free(struct anon_vma *anon_vma)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 5d41dee..b0090e9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -63,7 +63,7 @@
 #endif
 
 #define __RT_MUTEX_INITIALIZER(mutexname) \
-	{ .wait_lock = SPIN_LOCK_UNLOCKED \
+	{ .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
 	, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
 	, .owner = NULL \
 	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 3a18add..493297a 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -81,8 +81,6 @@
 
 	RTM_NEWPREFIX	= 52,
 #define RTM_NEWPREFIX	RTM_NEWPREFIX
-	RTM_GETPREFIX	= 54,
-#define RTM_GETPREFIX	RTM_GETPREFIX
 
 	RTM_GETMULTICAST = 58,
 #define RTM_GETMULTICAST RTM_GETMULTICAST
@@ -587,6 +585,9 @@
 		       struct nlmsghdr *nlh, gfp_t flags);
 extern void rtnl_set_sk_err(u32 group, int error);
 extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
+extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
+			      u32 id, u32 ts, u32 tsage, long expires,
+			      u32 error);
 
 extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
 
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index ae1fcad..813cee1 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -44,7 +44,8 @@
 #endif
 
 #define __RWSEM_INITIALIZER(name) \
-{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+  __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eafe4a7..dede82c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -194,7 +194,16 @@
 
 extern cpumask_t nohz_cpu_mask;
 
-extern void show_state(void);
+/*
+ * Only dump TASK_* tasks. (-1 for all tasks)
+ */
+extern void show_state_filter(unsigned long state_filter);
+
+static inline void show_state(void)
+{
+	show_state_filter(-1);
+}
+
 extern void show_regs(struct pt_regs *);
 
 /*
@@ -338,15 +347,23 @@
 
 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 
-	unsigned dumpable:2;
 	cpumask_t cpu_vm_mask;
 
 	/* Architecture-specific MM context */
 	mm_context_t context;
 
-	/* Token based thrashing protection. */
-	unsigned long swap_token_time;
-	char recent_pagein;
+	/* Swap token stuff */
+	/*
+	 * Last value of global fault stamp as seen by this process.
+	 * In other words, this value gives an indication of how long
+	 * it has been since this task got the token.
+	 * Look at mm/thrash.c
+	 */
+	unsigned int faultstamp;
+	unsigned int token_priority;
+	unsigned int last_interval;
+
+	unsigned char dumpable:2;
 
 	/* coredumping support */
 	int core_waiters;
@@ -556,7 +573,7 @@
 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 
 #ifdef CONFIG_SCHEDSTATS
-extern struct file_operations proc_schedstat_operations;
+extern const struct file_operations proc_schedstat_operations;
 #endif /* CONFIG_SCHEDSTATS */
 
 #ifdef CONFIG_TASK_DELAY_ACCT
@@ -1288,7 +1305,6 @@
 extern int kill_pid(struct pid *pid, int sig, int priv);
 extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
 extern int kill_pg_info(int, struct siginfo *, pid_t);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
 extern void do_notify_parent(struct task_struct *, int);
 extern void force_sig(int, struct task_struct *);
 extern void force_sig_specific(int, struct task_struct *);
@@ -1610,87 +1626,6 @@
 
 extern void normalize_rt_tasks(void);
 
-#ifdef CONFIG_PM
-/*
- * Check if a process has been frozen
- */
-static inline int frozen(struct task_struct *p)
-{
-	return p->flags & PF_FROZEN;
-}
-
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
-{
-	return p->flags & PF_FREEZE;
-}
-
-/*
- * Request that a process be frozen
- * FIXME: SMP problem. We may not modify other process' flags!
- */
-static inline void freeze(struct task_struct *p)
-{
-	p->flags |= PF_FREEZE;
-}
-
-/*
- * Sometimes we may need to cancel the previous 'freeze' request
- */
-static inline void do_not_freeze(struct task_struct *p)
-{
-	p->flags &= ~PF_FREEZE;
-}
-
-/*
- * Wake up a frozen process
- */
-static inline int thaw_process(struct task_struct *p)
-{
-	if (frozen(p)) {
-		p->flags &= ~PF_FROZEN;
-		wake_up_process(p);
-		return 1;
-	}
-	return 0;
-}
-
-/*
- * freezing is complete, mark process as frozen
- */
-static inline void frozen_process(struct task_struct *p)
-{
-	p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
-}
-
-extern void refrigerator(void);
-extern int freeze_processes(void);
-extern void thaw_processes(void);
-
-static inline int try_to_freeze(void)
-{
-	if (freezing(current)) {
-		refrigerator();
-		return 1;
-	} else
-		return 0;
-}
-#else
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void freeze(struct task_struct *p) { BUG(); }
-static inline int thaw_process(struct task_struct *p) { return 1; }
-static inline void frozen_process(struct task_struct *p) { BUG(); }
-
-static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
-static inline void thaw_processes(void) {}
-
-static inline int try_to_freeze(void) { return 0; }
-
-#endif /* CONFIG_PM */
 #endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 2925e66..b02308e 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -42,7 +42,8 @@
 	u16 pages;		/* 0x32 */
 	u16 vesa_attributes;	/* 0x34 */
 	u32 capabilities;       /* 0x36 */
-				/* 0x3a -- 0x3f reserved for future expansion */
+				/* 0x3a -- 0x3b reserved for future expansion */
+				/* 0x3c -- 0x3f micro stack for relocatable kernels */
 };
 
 extern struct screen_info screen_info;
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 6ec66de..35108fe 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -57,17 +57,17 @@
 
 /* Section 3.1.  SCTP Common Header Format */
 typedef struct sctphdr {
-	__u16 source;
-	__u16 dest;
-	__u32 vtag;
-	__u32 checksum;
+	__be16 source;
+	__be16 dest;
+	__be32 vtag;
+	__be32 checksum;
 } __attribute__((packed)) sctp_sctphdr_t;
 
 /* Section 3.2.  Chunk Field Descriptions. */
 typedef struct sctp_chunkhdr {
 	__u8 type;
 	__u8 flags;
-	__u16 length;
+	__be16 length;
 } __attribute__((packed)) sctp_chunkhdr_t;
 
 
@@ -153,8 +153,8 @@
  */
 
 typedef struct sctp_paramhdr {
-	__u16 type;
-	__u16 length;
+	__be16 type;
+	__be16 length;
 } __attribute__((packed)) sctp_paramhdr_t;
 
 typedef enum {
@@ -203,10 +203,10 @@
 /* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */
 
 typedef struct sctp_datahdr {
-	__u32 tsn;
-	__u16 stream;
-	__u16 ssn;
-	__u32 ppid;
+	__be32 tsn;
+	__be16 stream;
+	__be16 ssn;
+	__be32 ppid;
 	__u8  payload[0];
 } __attribute__((packed)) sctp_datahdr_t;
 
@@ -232,11 +232,11 @@
  *  endpoints.
  */
 typedef struct sctp_inithdr {
-	__u32 init_tag;
-	__u32 a_rwnd;
-	__u16 num_outbound_streams;
-	__u16 num_inbound_streams;
-	__u32 initial_tsn;
+	__be32 init_tag;
+	__be32 a_rwnd;
+	__be16 num_outbound_streams;
+	__be16 num_inbound_streams;
+	__be32 initial_tsn;
 	__u8  params[0];
 } __attribute__((packed)) sctp_inithdr_t;
 
@@ -261,7 +261,7 @@
 /* Section 3.3.2.1 Cookie Preservative (9) */
 typedef struct sctp_cookie_preserve_param {
 	sctp_paramhdr_t param_hdr;
-	uint32_t        lifespan_increment;
+	__be32          lifespan_increment;
 } __attribute__((packed)) sctp_cookie_preserve_param_t;
 
 /* Section 3.3.2.1 Host Name Address (11) */
@@ -273,7 +273,7 @@
 /* Section 3.3.2.1 Supported Address Types (12) */
 typedef struct sctp_supported_addrs_param {
 	sctp_paramhdr_t param_hdr;
-	uint16_t types[0];
+	__be16 types[0];
 } __attribute__((packed)) sctp_supported_addrs_param_t;
 
 /* Appendix A. ECN Capable (32768) */
@@ -284,7 +284,7 @@
 /* ADDIP Section 3.2.6 Adaption Layer Indication */
 typedef struct sctp_adaption_ind_param {
 	struct sctp_paramhdr param_hdr;
-	__u32 adaption_ind;
+	__be32 adaption_ind;
 } __attribute__((packed)) sctp_adaption_ind_param_t;
 
 /* RFC 2960.  Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
@@ -316,11 +316,11 @@
  */
 
 typedef struct sctp_gap_ack_block {
-	__u16 start;
-	__u16 end;
+	__be16 start;
+	__be16 end;
 } __attribute__((packed)) sctp_gap_ack_block_t;
 
-typedef uint32_t sctp_dup_tsn_t;
+typedef __be32 sctp_dup_tsn_t;
 
 typedef union {
 	sctp_gap_ack_block_t	gab;
@@ -328,10 +328,10 @@
 } sctp_sack_variable_t;
 
 typedef struct sctp_sackhdr {
-	__u32 cum_tsn_ack;
-	__u32 a_rwnd;
-	__u16 num_gap_ack_blocks;
-	__u16 num_dup_tsns;
+	__be32 cum_tsn_ack;
+	__be32 a_rwnd;
+	__be16 num_gap_ack_blocks;
+	__be16 num_dup_tsns;
 	sctp_sack_variable_t variable[0];
 } __attribute__((packed)) sctp_sackhdr_t;
 
@@ -371,7 +371,7 @@
  * and the highest consecutive acking value.
  */
 typedef struct sctp_shutdownhdr {
-	__u32 cum_tsn_ack;
+	__be32 cum_tsn_ack;
 } __attribute__((packed)) sctp_shutdownhdr_t;
 
 struct sctp_shutdown_chunk_t {
@@ -382,8 +382,8 @@
 /* RFC 2960.  Section 3.3.10 Operation Error (ERROR) (9) */
 
 typedef struct sctp_errhdr {
-	__u16 cause;
-	__u16 length;
+	__be16 cause;
+	__be16 length;
 	__u8  variable[0];
 } __attribute__((packed)) sctp_errhdr_t;
 
@@ -462,7 +462,7 @@
  *   Explicit Congestion Notification Echo (ECNE) (12)
  */
 typedef struct sctp_ecnehdr {
-	__u32 lowest_tsn;
+	__be32 lowest_tsn;
 } sctp_ecnehdr_t;
 
 typedef struct sctp_ecne_chunk {
@@ -474,7 +474,7 @@
  *   Congestion Window Reduced (CWR) (13)
  */
 typedef struct sctp_cwrhdr {
-	__u32 lowest_tsn;
+	__be32 lowest_tsn;
 } sctp_cwrhdr_t;
 
 typedef struct sctp_cwr_chunk {
@@ -529,12 +529,12 @@
  *       chunks this field MUST be filled in.
  */
 struct sctp_fwdtsn_skip {
-	__u16 stream;
-	__u16 ssn;
+	__be16 stream;
+	__be16 ssn;
 } __attribute__((packed));
 
 struct sctp_fwdtsn_hdr {
-	__u32 new_cum_tsn;
+	__be32 new_cum_tsn;
 	struct sctp_fwdtsn_skip skip[0];
 } __attribute((packed));
 
@@ -578,11 +578,11 @@
  */
 typedef struct sctp_addip_param {
 	sctp_paramhdr_t	param_hdr;
-	__u32		crr_id;	
+	__be32		crr_id;
 } __attribute__((packed)) sctp_addip_param_t;
 
 typedef struct sctp_addiphdr {
-	__u32	serial;
+	__be32	serial;
 	__u8	params[0];
 } __attribute__((packed)) sctp_addiphdr_t;
 
diff --git a/include/linux/security.h b/include/linux/security.h
index b200b98..83cdefa 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -826,6 +826,8 @@
  *	Sets the openreq's sid to socket's sid with MLS portion taken from peer sid.
  * @inet_csk_clone:
  *	Sets the new child socket's sid to the openreq sid.
+ * @inet_conn_established:
+ *     Sets the connection's peersid to the secmark on skb.
  * @req_classify_flow:
  *	Sets the flow's sid to the openreq sid.
  *
@@ -836,10 +838,8 @@
  *	used by the XFRM system.
  *	@sec_ctx contains the security context information being provided by
  *	the user-level policy update program (e.g., setkey).
- *	@sk refers to the sock from which to derive the security context.
  *	Allocate a security structure to the xp->security field; the security
- *	field is initialized to NULL when the xfrm_policy is allocated. Only
- *	one of sec_ctx or sock can be specified.
+ *	field is initialized to NULL when the xfrm_policy is allocated.
  *	Return 0 if operation was successful (memory to allocate, legal context)
  * @xfrm_policy_clone_security:
  *	@old contains an existing xfrm_policy in the SPD.
@@ -858,9 +858,6 @@
  *	Database by the XFRM system.
  *	@sec_ctx contains the security context information being provided by
  *	the user-level SA generation program (e.g., setkey or racoon).
- *	@polsec contains the security context information associated with a xfrm
- *	policy rule from which to take the base context. polsec must be NULL
- *	when sec_ctx is specified.
  *	@secid contains the secid from which to take the mls portion of the context.
  *	Allocate a security structure to the x->security field; the security
  *	field is initialized to NULL when the xfrm_state is allocated. Set the
@@ -889,11 +886,6 @@
  *	@xp contains the policy to check for a match.
  *	@fl contains the flow to check for a match.
  *	Return 1 if there is a match.
- * @xfrm_flow_state_match:
- *	@fl contains the flow key to match.
- *	@xfrm points to the xfrm_state to match.
- *	@xp points to the xfrm_policy to match.
- *	Return 1 if there is a match.
  * @xfrm_decode_session:
  *	@skb points to skb to decode.
  *	@secid points to the flow key secid to set.
@@ -1373,25 +1365,24 @@
 	int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
 					struct request_sock *req);
 	void (*inet_csk_clone)(struct sock *newsk, const struct request_sock *req);
+	void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
 	void (*req_classify_flow)(const struct request_sock *req, struct flowi *fl);
 #endif	/* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 	int (*xfrm_policy_alloc_security) (struct xfrm_policy *xp,
-			struct xfrm_user_sec_ctx *sec_ctx, struct sock *sk);
+			struct xfrm_user_sec_ctx *sec_ctx);
 	int (*xfrm_policy_clone_security) (struct xfrm_policy *old, struct xfrm_policy *new);
 	void (*xfrm_policy_free_security) (struct xfrm_policy *xp);
 	int (*xfrm_policy_delete_security) (struct xfrm_policy *xp);
 	int (*xfrm_state_alloc_security) (struct xfrm_state *x,
-		struct xfrm_user_sec_ctx *sec_ctx, struct xfrm_sec_ctx *polsec,
+		struct xfrm_user_sec_ctx *sec_ctx,
 		u32 secid);
 	void (*xfrm_state_free_security) (struct xfrm_state *x);
 	int (*xfrm_state_delete_security) (struct xfrm_state *x);
 	int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
 	int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
 			struct xfrm_policy *xp, struct flowi *fl);
-	int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm,
-			struct xfrm_policy *xp);
 	int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
 #endif	/* CONFIG_SECURITY_NETWORK_XFRM */
 
@@ -2966,9 +2957,15 @@
 {
 	security_ops->inet_csk_clone(newsk, req);
 }
+
+static inline void security_inet_conn_established(struct sock *sk,
+			struct sk_buff *skb)
+{
+	security_ops->inet_conn_established(sk, skb);
+}
 #else	/* CONFIG_SECURITY_NETWORK */
 static inline int security_unix_stream_connect(struct socket * sock,
-					       struct socket * other, 
+					       struct socket * other,
 					       struct sock * newsk)
 {
 	return 0;
@@ -3115,12 +3112,17 @@
 			const struct request_sock *req)
 {
 }
+
+static inline void security_inet_conn_established(struct sock *sk,
+			struct sk_buff *skb)
+{
+}
 #endif	/* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx)
 {
-	return security_ops->xfrm_policy_alloc_security(xp, sec_ctx, NULL);
+	return security_ops->xfrm_policy_alloc_security(xp, sec_ctx);
 }
 
 static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new)
@@ -3141,7 +3143,7 @@
 static inline int security_xfrm_state_alloc(struct xfrm_state *x,
 			struct xfrm_user_sec_ctx *sec_ctx)
 {
-	return security_ops->xfrm_state_alloc_security(x, sec_ctx, NULL, 0);
+	return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0);
 }
 
 static inline int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
@@ -3149,7 +3151,11 @@
 {
 	if (!polsec)
 		return 0;
-	return security_ops->xfrm_state_alloc_security(x, NULL, polsec, secid);
+	/*
+	 * We want the context to be taken from secid which is usually
+	 * from the sock.
+	 */
+	return security_ops->xfrm_state_alloc_security(x, NULL, secid);
 }
 
 static inline int security_xfrm_state_delete(struct xfrm_state *x)
@@ -3173,12 +3179,6 @@
 	return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
 }
 
-static inline int security_xfrm_flow_state_match(struct flowi *fl,
-			struct xfrm_state *xfrm, struct xfrm_policy *xp)
-{
-	return security_ops->xfrm_flow_state_match(fl, xfrm, xp);
-}
-
 static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
 {
 	return security_ops->xfrm_decode_session(skb, secid, 1);
@@ -3242,12 +3242,6 @@
 	return 1;
 }
 
-static inline int security_xfrm_flow_state_match(struct flowi *fl,
-			struct xfrm_state *xfrm, struct xfrm_policy *xp)
-{
-	return 1;
-}
-
 static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
 {
 	return 0;
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index b95f6eb..3e3cccb 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,7 +20,7 @@
 	loff_t index;
 	loff_t version;
 	struct mutex lock;
-	struct seq_operations *op;
+	const struct seq_operations *op;
 	void *private;
 };
 
@@ -31,7 +31,7 @@
 	int (*show) (struct seq_file *m, void *v);
 };
 
-int seq_open(struct file *, struct seq_operations *);
+int seq_open(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
 loff_t seq_lseek(struct file *, loff_t, int);
 int seq_release(struct inode *, struct file *);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 8e96814..71310d8 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -41,6 +41,7 @@
 	PLAT8250_DEV_FOURPORT,
 	PLAT8250_DEV_ACCENT,
 	PLAT8250_DEV_BOCA,
+	PLAT8250_DEV_EXAR_ST16C554,
 	PLAT8250_DEV_HUB6,
 	PLAT8250_DEV_MCA,
 	PLAT8250_DEV_AU1X00,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 463ab95..8276721 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -132,6 +132,8 @@
 
 #define PORT_S3C2412	73
 
+/* Xilinx uartlite */
+#define PORT_UARTLITE	74
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 117135e..1474905 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -241,6 +241,8 @@
 struct pt_regs;
 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
 
+extern struct kmem_cache *sighand_cachep;
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 85577a4..4ff3940 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -22,12 +22,10 @@
 #include <asm/atomic.h>
 #include <asm/types.h>
 #include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/poll.h>
 #include <linux/net.h>
 #include <linux/textsearch.h>
 #include <net/checksum.h>
+#include <linux/rcupdate.h>
 #include <linux/dmaengine.h>
 
 #define HAVE_ALLOC_SKB		/* For the drivers to know */
@@ -139,7 +137,7 @@
 	/* Warning: this field is not always filled in (UFO)! */
 	unsigned short	gso_segs;
 	unsigned short  gso_type;
-	unsigned int    ip6_frag_id;
+	__be32          ip6_frag_id;
 	struct sk_buff	*frag_list;
 	skb_frag_t	frags[MAX_SKB_FRAGS];
 };
@@ -216,7 +214,7 @@
  *	@tail: Tail pointer
  *	@end: End pointer
  *	@destructor: Destruct function
- *	@nfmark: Can be used for communication between hooks
+ *	@mark: Generic packet mark
  *	@nfct: Associated connection, if any
  *	@ipvs_property: skbuff is owned by ipvs
  *	@nfctinfo: Relationship of this skb to the connection
@@ -273,8 +271,11 @@
 
 	unsigned int		len,
 				data_len,
-				mac_len,
-				csum;
+				mac_len;
+	union {
+		__wsum		csum;
+		__u32		csum_offset;
+	};
 	__u32			priority;
 	__u8			local_df:1,
 				cloned:1,
@@ -295,7 +296,6 @@
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct nf_bridge_info	*nf_bridge;
 #endif
-	__u32			nfmark;
 #endif /* CONFIG_NETFILTER */
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
@@ -310,6 +310,7 @@
 	__u32			secmark;
 #endif
 
+	__u32			mark;
 
 	/* These elements must be at the end, see alloc_skb() for details.  */
 	unsigned int		truesize;
@@ -331,20 +332,20 @@
 extern void kfree_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
-				   gfp_t priority, int fclone);
+				   gfp_t priority, int fclone, int node);
 static inline struct sk_buff *alloc_skb(unsigned int size,
 					gfp_t priority)
 {
-	return __alloc_skb(size, priority, 0);
+	return __alloc_skb(size, priority, 0, -1);
 }
 
 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 					       gfp_t priority)
 {
-	return __alloc_skb(size, priority, 1);
+	return __alloc_skb(size, priority, 1, -1);
 }
 
-extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
 					    unsigned int size,
 					    gfp_t priority);
 extern void	       kfree_skbmem(struct sk_buff *skb);
@@ -1199,8 +1200,7 @@
 
 	if (skb->ip_summed == CHECKSUM_NONE) {
 		int err = 0;
-		unsigned int csum = csum_and_copy_from_user(from,
-							    skb_put(skb, copy),
+		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
 							    copy, 0, &err);
 		if (!err) {
 			skb->csum = csum_block_add(skb->csum, csum, off);
@@ -1293,24 +1293,6 @@
 	return __pskb_trim(skb, len);
 }
 
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
-{
-#ifdef CONFIG_HIGHMEM
-	BUG_ON(in_irq());
-
-	local_bh_disable();
-#endif
-	return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
-}
-
-static inline void kunmap_skb_frag(void *vaddr)
-{
-	kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
-#ifdef CONFIG_HIGHMEM
-	local_bh_enable();
-#endif
-}
-
 #define skb_queue_walk(queue, skb) \
 		for (skb = (queue)->next;					\
 		     prefetch(skb->next), (skb != (struct sk_buff *)(queue));	\
@@ -1335,15 +1317,15 @@
 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
 extern void	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
 					 unsigned int flags);
-extern unsigned int    skb_checksum(const struct sk_buff *skb, int offset,
-				    int len, unsigned int csum);
+extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
+				    int len, __wsum csum);
 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
 				     void *to, int len);
 extern int	       skb_store_bits(const struct sk_buff *skb, int offset,
 				      void *from, int len);
-extern unsigned int    skb_copy_and_csum_bits(const struct sk_buff *skb,
+extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
 					      int offset, u8 *to, int len,
-					      unsigned int csum);
+					      __wsum csum);
 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
 extern void	       skb_split(struct sk_buff *skb,
 				 struct sk_buff *skb1, const u32 len);
@@ -1399,7 +1381,7 @@
 
 extern void __net_timestamp(struct sk_buff *skb);
 
-extern unsigned int __skb_checksum_complete(struct sk_buff *skb);
+extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
 
 /**
  *	skb_checksum_complete - Calculate checksum of an entire packet
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c4947b8..2271886 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -7,27 +7,17 @@
 #ifndef _LINUX_SLAB_H
 #define	_LINUX_SLAB_H
 
-#if	defined(__KERNEL__)
+#ifdef __KERNEL__
 
-typedef struct kmem_cache kmem_cache_t;
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
+#include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
+#include <linux/compiler.h>
 
-#include	<linux/gfp.h>
-#include	<linux/init.h>
-#include	<linux/types.h>
-#include	<asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
-#include	<asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
-
-/* flags for kmem_cache_alloc() */
-#define	SLAB_NOFS		GFP_NOFS
-#define	SLAB_NOIO		GFP_NOIO
-#define	SLAB_ATOMIC		GFP_ATOMIC
-#define	SLAB_USER		GFP_USER
-#define	SLAB_KERNEL		GFP_KERNEL
-#define	SLAB_DMA		GFP_DMA
-
-#define SLAB_LEVEL_MASK		GFP_LEVEL_MASK
-
-#define	SLAB_NO_GROW		__GFP_NO_GROW	/* don't grow a cache */
+/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
+typedef struct kmem_cache kmem_cache_t __deprecated;
 
 /* flags to pass to kmem_cache_create().
  * The first 3 are only valid when the allocator as been build
@@ -57,22 +47,23 @@
 /* prototypes */
 extern void __init kmem_cache_init(void);
 
-extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
-				       void (*)(void *, kmem_cache_t *, unsigned long),
-				       void (*)(void *, kmem_cache_t *, unsigned long));
-extern void kmem_cache_destroy(kmem_cache_t *);
-extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
+extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+			unsigned long,
+			void (*)(void *, struct kmem_cache *, unsigned long),
+			void (*)(void *, struct kmem_cache *, unsigned long));
+extern void kmem_cache_destroy(struct kmem_cache *);
+extern int kmem_cache_shrink(struct kmem_cache *);
+extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
-extern void kmem_cache_free(kmem_cache_t *, void *);
-extern unsigned int kmem_cache_size(kmem_cache_t *);
-extern const char *kmem_cache_name(kmem_cache_t *);
+extern void kmem_cache_free(struct kmem_cache *, void *);
+extern unsigned int kmem_cache_size(struct kmem_cache *);
+extern const char *kmem_cache_name(struct kmem_cache *);
 
 /* Size description struct for general caches. */
 struct cache_sizes {
-	size_t		 cs_size;
-	kmem_cache_t	*cs_cachep;
-	kmem_cache_t	*cs_dmacachep;
+	size_t		 	cs_size;
+	struct kmem_cache	*cs_cachep;
+	struct kmem_cache	*cs_dmacachep;
 };
 extern struct cache_sizes malloc_sizes[];
 
@@ -211,7 +202,7 @@
 extern int slab_is_available(void);
 
 #ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
+extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 
 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
@@ -236,8 +227,27 @@
 	}
 	return __kmalloc_node(size, flags, node);
 }
+
+/*
+ * kmalloc_node_track_caller is a special version of kmalloc_node that
+ * records the calling function of the routine calling it for slab leak
+ * tracking instead of just the calling function (confusing, eh?).
+ * It's useful when the call to kmalloc_node comes from a widely-used
+ * standard allocator where we care about the real place the memory
+ * allocation request comes from.
+ */
+#ifndef CONFIG_DEBUG_SLAB
+#define kmalloc_node_track_caller(size, flags, node) \
+	__kmalloc_node(size, flags, node)
 #else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
+#define kmalloc_node_track_caller(size, flags, node) \
+	__kmalloc_node_track_caller(size, flags, node, \
+			__builtin_return_address(0))
+#endif
+#else /* CONFIG_NUMA */
+static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+					gfp_t flags, int node)
 {
 	return kmem_cache_alloc(cachep, flags);
 }
@@ -245,10 +255,13 @@
 {
 	return kmalloc(size, flags);
 }
+
+#define kmalloc_node_track_caller(size, flags, node) \
+	kmalloc_track_caller(size, flags)
 #endif
 
 extern int FASTCALL(kmem_cache_reap(int));
-extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
+extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
 
 #else /* CONFIG_SLOB */
 
@@ -283,16 +296,9 @@
 #define kzalloc(s, f) __kzalloc(s, f)
 #define kmalloc_track_caller kmalloc
 
-#endif /* CONFIG_SLOB */
+#define kmalloc_node_track_caller kmalloc_node
 
-/* System wide caches */
-extern kmem_cache_t	*vm_area_cachep;
-extern kmem_cache_t	*names_cachep;
-extern kmem_cache_t	*files_cachep;
-extern kmem_cache_t	*filp_cachep;
-extern kmem_cache_t	*fs_cachep;
-extern kmem_cache_t	*sighand_cachep;
-extern kmem_cache_t	*bio_cachep;
+#endif /* CONFIG_SLOB */
 
 #endif	/* __KERNEL__ */
 
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 5164998..7ba23ec 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -99,6 +99,13 @@
 static inline void smp_send_reschedule(int cpu) { }
 #define num_booting_cpus()			1
 #define smp_prepare_boot_cpu()			do {} while (0)
+static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
+				void *info, int retry, int wait)
+{
+	/* Disable interrupts here? */
+	func(info);
+	return 0;
+}
 
 #endif /* !SMP */
 
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 3614090..92cd38e 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -264,6 +264,7 @@
 #define SOL_IPV6	41
 #define SOL_ICMPV6	58
 #define SOL_SCTP	132
+#define SOL_UDPLITE	136     /* UDP-Lite (RFC 3828) */
 #define SOL_RAW		255
 #define SOL_IPX		256
 #define SOL_AX25	257
@@ -292,7 +293,7 @@
 extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
 					  struct iovec *iov, 
 					  int offset, 
-					  unsigned int len, int *csump);
+					  unsigned int len, __wsum *csump);
 
 extern int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode);
 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
diff --git a/include/linux/sockios.h b/include/linux/sockios.h
index e6b9d1d..abef759 100644
--- a/include/linux/sockios.h
+++ b/include/linux/sockios.h
@@ -72,8 +72,8 @@
 #define SIOCGIFTXQLEN	0x8942		/* Get the tx queue length	*/
 #define SIOCSIFTXQLEN	0x8943		/* Set the tx queue length 	*/
 
-#define SIOCGIFDIVERT	0x8944		/* Frame diversion support */
-#define SIOCSIFDIVERT	0x8945		/* Set frame diversion options */
+/* SIOCGIFDIVERT was:	0x8944		Frame diversion support */
+/* SIOCSIFDIVERT was:	0x8945		Set frame diversion options */
 
 #define SIOCETHTOOL	0x8946		/* Ethtool interface		*/
 
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 8451052..94b767d 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -52,6 +52,7 @@
 #include <linux/thread_info.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
+#include <linux/bottom_half.h>
 
 #include <asm/system.h>
 
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
new file mode 100644
index 0000000..d3e5f27
--- /dev/null
+++ b/include/linux/start_kernel.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_START_KERNEL_H
+#define _LINUX_START_KERNEL_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+/* Define the prototype for start_kernel here, rather than cluttering
+   up something else. */
+
+extern asmlinkage void __init start_kernel(void);
+
+#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 97b62e9..2db2fbf 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -90,8 +90,6 @@
 #define gc_flags		gc_base.cr_flags
 #define gc_expire		gc_base.cr_expire
 
-void print_hexl(u32 *p, u_int length, u_int offset);
-
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_AUTH_GSS_H */
 
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index f6d1d64..a1be89d 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -53,6 +53,7 @@
 	struct dentry *		cl_dentry;	/* inode */
 	struct rpc_clnt *	cl_parent;	/* Points to parent of clones */
 	struct rpc_rtt		cl_rtt_default;
+	struct rpc_program *	cl_program;
 	char			cl_inline_name[32];
 };
 
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index e4729aa..60fce3c 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -62,12 +62,6 @@
 # define RPC_IFDEBUG(x)
 #endif
 
-#ifdef RPC_PROFILE
-# define pprintk(args...)	printk(## args)
-#else
-# define pprintk(args...)	do ; while (0)
-#endif
-
 /*
  * Sysctl interface for RPC debugging
  */
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index e30ba20..5a4b1e0 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -42,10 +42,6 @@
 
 struct krb5_ctx {
 	int			initiate; /* 1 = initiating, 0 = accepting */
-	int			seed_init;
-	unsigned char		seed[16];
-	int			signalg;
-	int			sealalg;
 	struct crypto_blkcipher	*enc;
 	struct crypto_blkcipher	*seq;
 	s32			endtime;
@@ -117,7 +113,7 @@
 #define ENCTYPE_UNKNOWN         0x01ff
 
 s32
-make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
+make_checksum(char *, char *header, int hdrlen, struct xdr_buf *body,
 		   int body_offset, struct xdr_netobj *cksum);
 
 u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h
index 2cf3fbb..e3e6a34 100644
--- a/include/linux/sunrpc/gss_spkm3.h
+++ b/include/linux/sunrpc/gss_spkm3.h
@@ -12,27 +12,19 @@
 #include <linux/sunrpc/gss_asn1.h>
 
 struct spkm3_ctx {
-	struct xdr_netobj	ctx_id; /* per message context id */
-	int			qop;         /* negotiated qop */
+	struct xdr_netobj	ctx_id;  /* per message context id */
+	int			endtime; /* endtime of the context */
 	struct xdr_netobj	mech_used;
 	unsigned int		ret_flags ;
-	unsigned int		req_flags ;
-	struct xdr_netobj	share_key;
-	int			conf_alg;
-	struct crypto_blkcipher	*derived_conf_key;
-	int			intg_alg;
-	struct crypto_blkcipher	*derived_integ_key;
-	int			keyestb_alg;   /* alg used to get share_key */
-	int			owf_alg;   /* one way function */
+	struct xdr_netobj	conf_alg;
+	struct xdr_netobj	derived_conf_key;
+	struct xdr_netobj	intg_alg;
+	struct xdr_netobj 	derived_integ_key;
 };
 
-/* from openssl/objects.h */
-/* XXX need SEAL_ALG_NONE */
-#define NID_md5		4
-#define NID_dhKeyAgreement	28 
-#define NID_des_cbc		31 
-#define NID_sha1		64
-#define NID_cast5_cbc		108
+/* OIDs declarations for K-ALG, I-ALG, C-ALG, and OWF-ALG */
+extern const struct xdr_netobj hmac_md5_oid;
+extern const struct xdr_netobj cast5_cbc_oid;
 
 /* SPKM InnerContext Token types */
 
@@ -46,11 +38,13 @@
 u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype);
 
 #define CKSUMTYPE_RSA_MD5            0x0007
+#define CKSUMTYPE_HMAC_MD5           0x0008
 
-s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
-                   int body_offset, struct xdr_netobj *cksum);
+s32 make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
+		unsigned int hdrlen, struct xdr_buf *body,
+		unsigned int body_offset, struct xdr_netobj *cksum);
 void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits);
-int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, 
+int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen,
                    int explen);
 void spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, 
                    unsigned char *ctxhdr, int elen, int zbit);
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index a2eb9b4..4a68125 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -30,7 +30,7 @@
 #define RPC_PIPE_WAIT_FOR_OPEN	1
 	int flags;
 	struct rpc_pipe_ops *ops;
-	struct work_struct queue_timeout;
+	struct delayed_work queue_timeout;
 };
 
 static inline struct rpc_inode *
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index f399c13..97c7616 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -11,6 +11,7 @@
 
 #include <linux/timer.h>
 #include <linux/sunrpc/types.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
@@ -85,6 +86,7 @@
 	union {
 		struct work_struct	tk_work;	/* Async task work queue */
 		struct rpc_wait		tk_wait;	/* RPC wait */
+		struct rcu_head		tk_rcu;		/* for task deletion */
 	} u;
 
 	unsigned short		tk_timeouts;	/* maj timeouts */
@@ -178,13 +180,6 @@
 	} while (0)
 
 #define RPC_IS_ACTIVATED(t)	(test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
-#define rpc_set_active(t)	(set_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
-#define rpc_clear_active(t)	\
-	do { \
-		smp_mb__before_clear_bit(); \
-		clear_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate); \
-		smp_mb__after_clear_bit(); \
-	} while(0)
 
 /*
  * Task priorities.
@@ -222,7 +217,7 @@
 
 #ifndef RPC_DEBUG
 # define RPC_WAITQ_INIT(var,qname) { \
-		.lock = SPIN_LOCK_UNLOCKED, \
+		.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
 		.tasks = { \
 			[0] = LIST_HEAD_INIT(var.tasks[0]), \
 			[1] = LIST_HEAD_INIT(var.tasks[1]), \
@@ -231,7 +226,7 @@
 	}
 #else
 # define RPC_WAITQ_INIT(var,qname) { \
-		.lock = SPIN_LOCK_UNLOCKED, \
+		.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
 		.tasks = { \
 			[0] = LIST_HEAD_INIT(var.tasks[0]), \
 			[1] = LIST_HEAD_INIT(var.tasks[1]), \
@@ -254,8 +249,10 @@
 void		rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
 				int flags, const struct rpc_call_ops *ops,
 				void *data);
+void		rpc_put_task(struct rpc_task *);
 void		rpc_release_task(struct rpc_task *);
 void		rpc_exit_task(struct rpc_task *);
+void		rpc_release_calldata(const struct rpc_call_ops *, void *);
 void		rpc_killall_tasks(struct rpc_clnt *);
 int		rpc_execute(struct rpc_task *);
 void		rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index ac69e55..9e340fa 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -11,6 +11,7 @@
 
 #include <linux/uio.h>
 #include <asm/byteorder.h>
+#include <linux/scatterlist.h>
 
 /*
  * Buffer adjustment
@@ -139,29 +140,30 @@
  */
 extern void xdr_shift_buf(struct xdr_buf *, size_t);
 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
-extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int);
-extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int);
-extern int read_bytes_from_xdr_buf(struct xdr_buf *, int, void *, int);
-extern int write_bytes_to_xdr_buf(struct xdr_buf *, int, void *, int);
+extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
+extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
+extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
 
 /*
  * Helper structure for copying from an sk_buff.
  */
-typedef struct {
+struct xdr_skb_reader {
 	struct sk_buff	*skb;
 	unsigned int	offset;
 	size_t		count;
-	unsigned int	csum;
-} skb_reader_t;
+	__wsum		csum;
+};
 
-typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
+typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
 
+size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len);
 extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
 extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
-		skb_reader_t *, skb_read_actor_t);
+		struct xdr_skb_reader *, xdr_skb_read_actor);
 
-extern int xdr_encode_word(struct xdr_buf *, int, u32);
-extern int xdr_decode_word(struct xdr_buf *, int, u32 *);
+extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
+extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
 
 struct xdr_array2_desc;
 typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem);
@@ -196,6 +198,7 @@
 extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
 extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
+extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
 
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 60394fb..f780e72 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -106,7 +106,6 @@
 
 struct rpc_xprt_ops {
 	void		(*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
-	char *		(*print_addr)(struct rpc_xprt *xprt, enum rpc_display_format_t format);
 	int		(*reserve_xprt)(struct rpc_task *task);
 	void		(*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
 	void		(*rpcbind)(struct rpc_task *task);
@@ -126,8 +125,6 @@
 struct rpc_xprt {
 	struct kref		kref;		/* Reference count */
 	struct rpc_xprt_ops *	ops;		/* transport methods */
-	struct socket *		sock;		/* BSD socket layer */
-	struct sock *		inet;		/* INET layer */
 
 	struct rpc_timeout	timeout;	/* timeout parms */
 	struct sockaddr_storage	addr;		/* server address */
@@ -137,9 +134,6 @@
 	unsigned long		cong;		/* current congestion */
 	unsigned long		cwnd;		/* congestion window */
 
-	size_t			rcvsize,	/* transport rcv buffer size */
-				sndsize;	/* transport send buffer size */
-
 	size_t			max_payload;	/* largest RPC payload size,
 						   in bytes */
 	unsigned int		tsh_size;	/* size of transport specific
@@ -158,27 +152,11 @@
 				resvport   : 1; /* use a reserved port */
 
 	/*
-	 * XID
-	 */
-	__u32			xid;		/* Next XID value to use */
-
-	/*
-	 * State of TCP reply receive stuff
-	 */
-	__be32			tcp_recm,	/* Fragment header */
-				tcp_xid;	/* Current XID */
-	u32			tcp_reclen,	/* fragment length */
-				tcp_offset;	/* fragment offset */
-	unsigned long		tcp_copied,	/* copied to request */
-				tcp_flags;
-	/*
 	 * Connection of transports
 	 */
 	unsigned long		connect_timeout,
 				bind_timeout,
 				reestablish_timeout;
-	struct work_struct	connect_worker;
-	unsigned short		port;
 
 	/*
 	 * Disconnection of idle transports
@@ -193,8 +171,8 @@
 	 */
 	spinlock_t		transport_lock;	/* lock transport info */
 	spinlock_t		reserve_lock;	/* lock slot table */
+	u32			xid;		/* Next XID value to use */
 	struct rpc_task *	snd_task;	/* Task blocked in send */
-
 	struct list_head	recv;
 
 	struct {
@@ -210,18 +188,9 @@
 					bklog_u;	/* backlog queue utilization */
 	} stat;
 
-	void			(*old_data_ready)(struct sock *, int);
-	void			(*old_state_change)(struct sock *);
-	void			(*old_write_space)(struct sock *);
-
 	char *			address_strings[RPC_DISPLAY_MAX];
 };
 
-#define XPRT_LAST_FRAG		(1 << 0)
-#define XPRT_COPY_RECM		(1 << 1)
-#define XPRT_COPY_XID		(1 << 2)
-#define XPRT_COPY_DATA		(1 << 3)
-
 #ifdef __KERNEL__
 
 /*
@@ -270,8 +239,8 @@
 /*
  * Socket transport setup operations
  */
-int			xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to);
-int			xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+struct rpc_xprt *	xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
+struct rpc_xprt *	xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
 
 /*
  * Reserved bit positions in xprt->state
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index b1237f1..bf99bd4 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -9,10 +9,13 @@
 #include <linux/init.h>
 #include <linux/pm.h>
 
-/* page backup entry */
+/* struct pbe is used for creating lists of pages that should be restored
+ * atomically during the resume from disk, because the page frames they have
+ * occupied before the suspend are in use.
+ */
 struct pbe {
-	unsigned long address;		/* address of the copy */
-	unsigned long orig_address;	/* original address of page */
+	void *address;		/* address of the copy */
+	void *orig_address;	/* original address of a page */
 	struct pbe *next;
 };
 
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e7c36ba..add51ce 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -218,8 +218,6 @@
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct file *, struct page *);
 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
-				struct bio **bio_chain);
 extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
 
 /* linux/mm/swap_state.c */
@@ -247,9 +245,10 @@
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern void swap_free(swp_entry_t);
 extern void free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t);
+extern int swap_type_of(dev_t, sector_t);
 extern unsigned int count_swap_pages(int, int);
 extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t swapdev_block(int, pgoff_t);
 extern struct swap_info_struct *get_swap_info_struct(unsigned);
 extern int can_share_swap_page(struct page *);
 extern int remove_exclusive_swap_page(struct page *);
@@ -259,7 +258,6 @@
 
 /* linux/mm/thrash.c */
 extern struct mm_struct * swap_token_mm;
-extern unsigned long swap_token_default_timeout;
 extern void grab_swap_token(void);
 extern void __put_swap_token(struct mm_struct *);
 
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index d98562f..94316a9 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -426,6 +426,8 @@
 	NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
 	NET_CIPSOV4_RBM_OPTFMT=120,
 	NET_CIPSOV4_RBM_STRICTVALID=121,
+	NET_TCP_AVAIL_CONG_CONTROL=122,
+	NET_TCP_ALLOWED_CONG_CONTROL=123,
 };
 
 enum {
@@ -604,16 +606,6 @@
 	NET_DCCP_DEFAULT=1,
 };
 
-/* /proc/sys/net/dccp/default */
-enum {
-	NET_DCCP_DEFAULT_SEQ_WINDOW  = 1,
-	NET_DCCP_DEFAULT_RX_CCID     = 2,
-	NET_DCCP_DEFAULT_TX_CCID     = 3,
-	NET_DCCP_DEFAULT_ACK_RATIO   = 4,
-	NET_DCCP_DEFAULT_SEND_ACKVEC = 5,
-	NET_DCCP_DEFAULT_SEND_NDP    = 6,
-};
-
 /* /proc/sys/net/ipx */
 enum {
 	NET_IPX_PPROP_BROADCASTING=1,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 6d5c43d..2129d1b 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -97,6 +97,9 @@
 sysfs_rename_dir(struct kobject *, const char *new_name);
 
 extern int __must_check
+sysfs_move_dir(struct kobject *, struct kobject *);
+
+extern int __must_check
 sysfs_create_file(struct kobject *, const struct attribute *);
 
 extern int __must_check
@@ -142,6 +145,11 @@
 	return 0;
 }
 
+static inline int sysfs_move_dir(struct kobject * k, struct kobject * new_parent)
+{
+	return 0;
+}
+
 static inline int sysfs_create_file(struct kobject * k, const struct attribute * a)
 {
 	return 0;
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
index 6562a20..7e9680f 100644
--- a/include/linux/taskstats_kern.h
+++ b/include/linux/taskstats_kern.h
@@ -12,64 +12,27 @@
 #include <net/genetlink.h>
 
 #ifdef CONFIG_TASKSTATS
-extern kmem_cache_t *taskstats_cache;
+extern struct kmem_cache *taskstats_cache;
 extern struct mutex taskstats_exit_mutex;
 
-static inline void taskstats_exit_free(struct taskstats *tidstats)
-{
-	if (tidstats)
-		kmem_cache_free(taskstats_cache, tidstats);
-}
-
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {
 	sig->stats = NULL;
 }
 
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{
-	struct signal_struct *sig = tsk->signal;
-	struct taskstats *stats;
-
-	if (sig->stats != NULL)
-		return;
-
-	/* No problem if kmem_cache_zalloc() fails */
-	stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-
-	spin_lock_irq(&tsk->sighand->siglock);
-	if (!sig->stats) {
-		sig->stats = stats;
-		stats = NULL;
-	}
-	spin_unlock_irq(&tsk->sighand->siglock);
-
-	if (stats)
-		kmem_cache_free(taskstats_cache, stats);
-}
-
 static inline void taskstats_tgid_free(struct signal_struct *sig)
 {
 	if (sig->stats)
 		kmem_cache_free(taskstats_cache, sig->stats);
 }
 
-extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
-extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
+extern void taskstats_exit(struct task_struct *, int group_dead);
 extern void taskstats_init_early(void);
 #else
-static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
-{}
-static inline void taskstats_exit_free(struct taskstats *ptidstats)
-{}
-static inline void taskstats_exit_send(struct task_struct *tsk,
-				       struct taskstats *tidstats,
-				       int group_dead, unsigned int cpu)
+static inline void taskstats_exit(struct task_struct *tsk, int group_dead)
 {}
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {}
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{}
 static inline void taskstats_tgid_free(struct signal_struct *sig)
 {}
 static inline void taskstats_init_early(void)
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 2d36f6d..3cc70d1 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
 
 #include <linux/types.h>
 #include <asm/byteorder.h>
+#include <linux/socket.h>
 
 struct tcphdr {
 	__be16	source;
@@ -51,7 +52,7 @@
 #error	"Adjust your <asm/byteorder.h> defines"
 #endif	
 	__be16	window;
-	__be16	check;
+	__sum16	check;
 	__be16	urg_ptr;
 };
 
@@ -94,6 +95,7 @@
 #define TCP_INFO		11	/* Information about this connection. */
 #define TCP_QUICKACK		12	/* Block/reenable quick acks */
 #define TCP_CONGESTION		13	/* Congestion control algorithm */
+#define TCP_MD5SIG		14	/* TCP MD5 Signature (RFC2385) */
 
 #define TCPI_OPT_TIMESTAMPS	1
 #define TCPI_OPT_SACK		2
@@ -157,6 +159,17 @@
 	__u32	tcpi_total_retrans;
 };
 
+/* for TCP_MD5SIG socket option */
+#define TCP_MD5SIG_MAXKEYLEN	80
+
+struct tcp_md5sig {
+	struct __kernel_sockaddr_storage tcpm_addr;	/* address associated */
+	__u16	__tcpm_pad1;				/* zero */
+	__u16	tcpm_keylen;				/* key length */
+	__u32	__tcpm_pad2;				/* zero */
+	__u8	tcpm_key[TCP_MD5SIG_MAXKEYLEN];		/* key (binary) */
+};
+
 #ifdef __KERNEL__
 
 #include <linux/skbuff.h>
@@ -172,17 +185,17 @@
 };
 
 struct tcp_sack_block {
-	__u32	start_seq;
-	__u32	end_seq;
+	u32	start_seq;
+	u32	end_seq;
 };
 
 struct tcp_options_received {
 /*	PAWS/RTTM data	*/
 	long	ts_recent_stamp;/* Time we stored ts_recent (for aging) */
-	__u32	ts_recent;	/* Time stamp to echo next		*/
-	__u32	rcv_tsval;	/* Time stamp value             	*/
-	__u32	rcv_tsecr;	/* Time stamp echo reply        	*/
-	__u16 	saw_tstamp : 1,	/* Saw TIMESTAMP on last packet		*/
+	u32	ts_recent;	/* Time stamp to echo next		*/
+	u32	rcv_tsval;	/* Time stamp value             	*/
+	u32	rcv_tsecr;	/* Time stamp echo reply        	*/
+	u16 	saw_tstamp : 1,	/* Saw TIMESTAMP on last packet		*/
 		tstamp_ok : 1,	/* TIMESTAMP seen on SYN packet		*/
 		dsack : 1,	/* D-SACK is scheduled			*/
 		wscale_ok : 1,	/* Wscale seen on SYN packet		*/
@@ -190,16 +203,20 @@
 		snd_wscale : 4,	/* Window scaling received from sender	*/
 		rcv_wscale : 4;	/* Window scaling to send to receiver	*/
 /*	SACKs data	*/
-	__u8	eff_sacks;	/* Size of SACK array to send with next packet */
-	__u8	num_sacks;	/* Number of SACK blocks		*/
-	__u16	user_mss;  	/* mss requested by user in ioctl */
-	__u16	mss_clamp;	/* Maximal mss, negotiated at connection setup */
+	u8	eff_sacks;	/* Size of SACK array to send with next packet */
+	u8	num_sacks;	/* Number of SACK blocks		*/
+	u16	user_mss;  	/* mss requested by user in ioctl */
+	u16	mss_clamp;	/* Maximal mss, negotiated at connection setup */
 };
 
 struct tcp_request_sock {
-	struct inet_request_sock req;
-	__u32			 rcv_isn;
-	__u32			 snt_isn;
+	struct inet_request_sock 	req;
+#ifdef CONFIG_TCP_MD5SIG
+	/* Only used by TCP MD5 Signature so far. */
+	struct tcp_request_sock_ops	*af_specific;
+#endif
+	u32			 	rcv_isn;
+	u32			 	snt_isn;
 };
 
 static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -210,7 +227,8 @@
 struct tcp_sock {
 	/* inet_connection_sock has to be the first member of tcp_sock */
 	struct inet_connection_sock	inet_conn;
-	int	tcp_header_len;	/* Bytes of tcp header to send		*/
+	u16	tcp_header_len;	/* Bytes of tcp header to send		*/
+	u16	xmit_size_goal;	/* Goal for segmenting output packets	*/
 
 /*
  *	Header prediction flags
@@ -223,13 +241,13 @@
  *	read the code and the spec side by side (and laugh ...)
  *	See RFC793 and RFC1122. The RFC writes these in capitals.
  */
- 	__u32	rcv_nxt;	/* What we want to receive next 	*/
- 	__u32	snd_nxt;	/* Next sequence we send		*/
+ 	u32	rcv_nxt;	/* What we want to receive next 	*/
+ 	u32	snd_nxt;	/* Next sequence we send		*/
 
- 	__u32	snd_una;	/* First byte we want an ack for	*/
- 	__u32	snd_sml;	/* Last byte of the most recently transmitted small packet */
-	__u32	rcv_tstamp;	/* timestamp of last received ACK (for keepalives) */
-	__u32	lsndtime;	/* timestamp of last sent data packet (for restart window) */
+ 	u32	snd_una;	/* First byte we want an ack for	*/
+ 	u32	snd_sml;	/* Last byte of the most recently transmitted small packet */
+	u32	rcv_tstamp;	/* timestamp of last received ACK (for keepalives) */
+	u32	lsndtime;	/* timestamp of last sent data packet (for restart window) */
 
 	/* Data for direct copy to user */
 	struct {
@@ -247,32 +265,30 @@
 #endif
 	} ucopy;
 
-	__u32	snd_wl1;	/* Sequence for window update		*/
-	__u32	snd_wnd;	/* The window we expect to receive	*/
-	__u32	max_window;	/* Maximal window ever seen from peer	*/
-	__u32	mss_cache;	/* Cached effective mss, not including SACKS */
-	__u16	xmit_size_goal;	/* Goal for segmenting output packets	*/
-	/* XXX Two bytes hole, try to pack */
+	u32	snd_wl1;	/* Sequence for window update		*/
+	u32	snd_wnd;	/* The window we expect to receive	*/
+	u32	max_window;	/* Maximal window ever seen from peer	*/
+	u32	mss_cache;	/* Cached effective mss, not including SACKS */
 
-	__u32	window_clamp;	/* Maximal window to advertise		*/
-	__u32	rcv_ssthresh;	/* Current window clamp			*/
+	u32	window_clamp;	/* Maximal window to advertise		*/
+	u32	rcv_ssthresh;	/* Current window clamp			*/
 
-	__u32	frto_highmark;	/* snd_nxt when RTO occurred */
-	__u8	reordering;	/* Packet reordering metric.		*/
-	__u8	frto_counter;	/* Number of new acks after RTO */
-	__u8	nonagle;	/* Disable Nagle algorithm?             */
-	__u8	keepalive_probes; /* num of allowed keep alive probes	*/
+	u32	frto_highmark;	/* snd_nxt when RTO occurred */
+	u8	reordering;	/* Packet reordering metric.		*/
+	u8	frto_counter;	/* Number of new acks after RTO */
+	u8	nonagle;	/* Disable Nagle algorithm?             */
+	u8	keepalive_probes; /* num of allowed keep alive probes	*/
 
 /* RTT measurement */
-	__u32	srtt;		/* smoothed round trip time << 3	*/
-	__u32	mdev;		/* medium deviation			*/
-	__u32	mdev_max;	/* maximal mdev for the last rtt period	*/
-	__u32	rttvar;		/* smoothed mdev_max			*/
-	__u32	rtt_seq;	/* sequence number to update rttvar	*/
+	u32	srtt;		/* smoothed round trip time << 3	*/
+	u32	mdev;		/* medium deviation			*/
+	u32	mdev_max;	/* maximal mdev for the last rtt period	*/
+	u32	rttvar;		/* smoothed mdev_max			*/
+	u32	rtt_seq;	/* sequence number to update rttvar	*/
 
-	__u32	packets_out;	/* Packets which are "in flight"	*/
-	__u32	left_out;	/* Packets which leaved network	*/
-	__u32	retrans_out;	/* Retransmitted packets out		*/
+	u32	packets_out;	/* Packets which are "in flight"	*/
+	u32	left_out;	/* Packets which leaved network	*/
+	u32	retrans_out;	/* Retransmitted packets out		*/
 /*
  *      Options received (usually on last packet, some only on SYN packets).
  */
@@ -281,20 +297,20 @@
 /*
  *	Slow start and congestion control (see also Nagle, and Karn & Partridge)
  */
- 	__u32	snd_ssthresh;	/* Slow start size threshold		*/
- 	__u32	snd_cwnd;	/* Sending congestion window		*/
- 	__u16	snd_cwnd_cnt;	/* Linear increase counter		*/
-	__u16	snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
-	__u32	snd_cwnd_used;
-	__u32	snd_cwnd_stamp;
+ 	u32	snd_ssthresh;	/* Slow start size threshold		*/
+ 	u32	snd_cwnd;	/* Sending congestion window		*/
+ 	u16	snd_cwnd_cnt;	/* Linear increase counter		*/
+	u16	snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
+	u32	snd_cwnd_used;
+	u32	snd_cwnd_stamp;
 
 	struct sk_buff_head	out_of_order_queue; /* Out of order segments go here */
 
- 	__u32	rcv_wnd;	/* Current receiver window		*/
-	__u32	rcv_wup;	/* rcv_nxt on last window update sent	*/
-	__u32	write_seq;	/* Tail(+1) of data held in tcp send buffer */
-	__u32	pushed_seq;	/* Last pushed seq, required to talk to windows */
-	__u32	copied_seq;	/* Head of yet unread data		*/
+ 	u32	rcv_wnd;	/* Current receiver window		*/
+	u32	rcv_wup;	/* rcv_nxt on last window update sent	*/
+	u32	write_seq;	/* Tail(+1) of data held in tcp send buffer */
+	u32	pushed_seq;	/* Last pushed seq, required to talk to windows */
+	u32	copied_seq;	/* Head of yet unread data		*/
 
 /*	SACKs data	*/
 	struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
@@ -315,26 +331,26 @@
 	int     retransmit_cnt_hint;
 	int     forward_cnt_hint;
 
-	__u16	advmss;		/* Advertised MSS			*/
-	__u16	prior_ssthresh; /* ssthresh saved at recovery start	*/
-	__u32	lost_out;	/* Lost packets			*/
-	__u32	sacked_out;	/* SACK'd packets			*/
-	__u32	fackets_out;	/* FACK'd packets			*/
-	__u32	high_seq;	/* snd_nxt at onset of congestion	*/
+	u16	advmss;		/* Advertised MSS			*/
+	u16	prior_ssthresh; /* ssthresh saved at recovery start	*/
+	u32	lost_out;	/* Lost packets			*/
+	u32	sacked_out;	/* SACK'd packets			*/
+	u32	fackets_out;	/* FACK'd packets			*/
+	u32	high_seq;	/* snd_nxt at onset of congestion	*/
 
-	__u32	retrans_stamp;	/* Timestamp of the last retransmit,
+	u32	retrans_stamp;	/* Timestamp of the last retransmit,
 				 * also used in SYN-SENT to remember stamp of
 				 * the first SYN. */
-	__u32	undo_marker;	/* tracking retrans started here. */
+	u32	undo_marker;	/* tracking retrans started here. */
 	int	undo_retrans;	/* number of undoable retransmissions. */
-	__u32	urg_seq;	/* Seq of received urgent pointer */
-	__u16	urg_data;	/* Saved octet of OOB data and control flags */
-	__u8	urg_mode;	/* In urgent mode		*/
-	__u8	ecn_flags;	/* ECN status bits.			*/
-	__u32	snd_up;		/* Urgent pointer		*/
+	u32	urg_seq;	/* Seq of received urgent pointer */
+	u16	urg_data;	/* Saved octet of OOB data and control flags */
+	u8	urg_mode;	/* In urgent mode		*/
+	u8	ecn_flags;	/* ECN status bits.			*/
+	u32	snd_up;		/* Urgent pointer		*/
 
-	__u32	total_retrans;	/* Total retransmits for entire connection */
-	__u32	bytes_acked;	/* Appropriate Byte Counting - RFC3465 */
+	u32	total_retrans;	/* Total retransmits for entire connection */
+	u32	bytes_acked;	/* Appropriate Byte Counting - RFC3465 */
 
 	unsigned int		keepalive_time;	  /* time before keep alive takes place */
 	unsigned int		keepalive_intvl;  /* time interval between keep alive probes */
@@ -342,27 +358,35 @@
 
 	unsigned long last_synq_overflow; 
 
-	__u32	tso_deferred;
+	u32	tso_deferred;
 
 /* Receiver side RTT estimation */
 	struct {
-		__u32	rtt;
-		__u32	seq;
-		__u32	time;
+		u32	rtt;
+		u32	seq;
+		u32	time;
 	} rcv_rtt_est;
 
 /* Receiver queue space */
 	struct {
 		int	space;
-		__u32	seq;
-		__u32	time;
+		u32	seq;
+		u32	time;
 	} rcvq_space;
 
 /* TCP-specific MTU probe information. */
 	struct {
-		__u32		  probe_seq_start;
-		__u32		  probe_seq_end;
+		u32		  probe_seq_start;
+		u32		  probe_seq_end;
 	} mtu_probe;
+
+#ifdef CONFIG_TCP_MD5SIG
+/* TCP AF-Specific parts; only used by MD5 Signature support so far */
+	struct tcp_sock_af_ops	*af_specific;
+
+/* TCP MD5 Signagure Option information */
+	struct tcp_md5sig_info	*md5sig_info;
+#endif
 };
 
 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
@@ -372,11 +396,15 @@
 
 struct tcp_timewait_sock {
 	struct inet_timewait_sock tw_sk;
-	__u32			  tw_rcv_nxt;
-	__u32			  tw_snd_nxt;
-	__u32			  tw_rcv_wnd;
-	__u32			  tw_ts_recent;
+	u32			  tw_rcv_nxt;
+	u32			  tw_snd_nxt;
+	u32			  tw_rcv_wnd;
+	u32			  tw_ts_recent;
 	long			  tw_ts_recent_stamp;
+#ifdef CONFIG_TCP_MD5SIG
+	u16			  tw_md5_keylen;
+	u8			  tw_md5_key[TCP_MD5SIG_MAXKEYLEN];
+#endif
 };
 
 static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 7dac8f0..004808a 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -20,7 +20,7 @@
 /**
  * struct ts_state - search state
  * @offset: offset for next match
- * @cb: control buffer, for persistant variables of get_next_block()
+ * @cb: control buffer, for persistent variables of get_next_block()
  */
 struct ts_state
 {
@@ -71,7 +71,7 @@
 	 * Called repeatedly until 0 is returned. Must assign the
 	 * head of the next block of data to &*dst and return the length
 	 * of the block or 0 if at the end. consumed == 0 indicates
-	 * a new search. May store/read persistant values in state->cb.
+	 * a new search. May store/read persistent values in state->cb.
 	 */
 	unsigned int		(*get_next_block)(unsigned int consumed,
 						  const u8 **dst,
diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h
index 7dab783..31a9b25 100644
--- a/include/linux/tfrc.h
+++ b/include/linux/tfrc.h
@@ -1,7 +1,8 @@
 #ifndef _LINUX_TFRC_H_
 #define _LINUX_TFRC_H_
 /*
- *  include/linux/tfrc.h
+ *  TFRC - Data Structures for the TCP-Friendly Rate Control congestion
+ *         control mechanism as specified in RFC 3448.
  *
  *  Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
  *  Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
@@ -13,15 +14,30 @@
  *  the Free Software Foundation; either version 2 of the License, or
  *  (at your option) any later version.
  */
-
 #include <linux/types.h>
 
+/** 	tfrc_rx_info    -    TFRC Receiver Data Structure
+ *
+ * 	@tfrcrx_x_recv:	receiver estimate of sending rate (3.2.2)
+ * 	@tfrcrx_rtt:	round-trip-time (communicated by sender)
+ * 	@tfrcrx_p:	current estimate of loss event rate (3.2.2)
+ */
 struct tfrc_rx_info {
   	__u32 tfrcrx_x_recv;
 	__u32 tfrcrx_rtt;
   	__u32 tfrcrx_p;
 };
 
+/** 	tfrc_tx_info    -    TFRC Sender Data Structure
+ *
+ * 	@tfrctx_x:	computed transmit rate (4.3 (4))
+ * 	@tfrctx_x_recv: receiver estimate of send rate (4.3)
+ * 	@tfrctx_x_calc:	return value of throughput equation (3.1)
+ * 	@tfrctx_rtt:	(moving average) estimate of RTT (4.3)
+ * 	@tfrctx_p:	current loss event rate (5.4)
+ * 	@tfrctx_rto:	estimate of RTO, equals 4*RTT (4.3)
+ * 	@tfrctx_ipi:	inter-packet interval (4.6)
+ */
 struct tfrc_tx_info {
 	__u32 tfrctx_x;
 	__u32 tfrctx_x_recv;
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index 33a6539..b0c916d 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -194,34 +194,34 @@
 
 
 struct tipc_node_info {
-	__u32 addr;			/* network address of node */
-	__u32 up;			/* 0=down, 1= up */
+	__be32 addr;			/* network address of node */
+	__be32 up;			/* 0=down, 1= up */
 };
 
 struct tipc_link_info {
-	__u32 dest;			/* network address of peer node */
-	__u32 up;			/* 0=down, 1=up */
+	__be32 dest;			/* network address of peer node */
+	__be32 up;			/* 0=down, 1=up */
 	char str[TIPC_MAX_LINK_NAME];	/* link name */
 };
 
 struct tipc_bearer_config {
-	__u32 priority;			/* Range [1,31]. Override per link  */
-	__u32 detect_scope;     
+	__be32 priority;		/* Range [1,31]. Override per link  */
+	__be32 detect_scope;
 	char name[TIPC_MAX_BEARER_NAME];
 };
 
 struct tipc_link_config {
-	__u32 value;
+	__be32 value;
 	char name[TIPC_MAX_LINK_NAME];
 };
 
 #define TIPC_NTQ_ALLTYPES 0x80000000
 
 struct tipc_name_table_query {
-	__u32 depth;	/* 1:type, 2:+name info, 3:+port info, 4+:+debug info */
-	__u32 type;	/* {t,l,u} info ignored if high bit of "depth" is set */
-	__u32 lowbound; /* (i.e. displays all entries of name table) */
-	__u32 upbound;
+	__be32 depth;	/* 1:type, 2:+name info, 3:+port info, 4+:+debug info */
+	__be32 type;	/* {t,l,u} info ignored if high bit of "depth" is set */
+	__be32 lowbound; /* (i.e. displays all entries of name table) */
+	__be32 upbound;
 };
 
 /*
@@ -262,8 +262,8 @@
  */
 
 struct tlv_desc {
-	__u16 tlv_len;		/* TLV length (descriptor + value) */
-	__u16 tlv_type;		/* TLV identifier */
+	__be16 tlv_len;		/* TLV length (descriptor + value) */
+	__be16 tlv_type;		/* TLV identifier */
 };
 
 #define TLV_ALIGNTO 4
@@ -377,9 +377,9 @@
 
 struct tipc_cfg_msg_hdr
 {
-	__u32 tcm_len;		/* Message length (including header) */
-	__u16 tcm_type;		/* Command type */
-	__u16 tcm_flags;	/* Additional flags */
+	__be32 tcm_len;		/* Message length (including header) */
+	__be16 tcm_type;	/* Command type */
+	__be16 tcm_flags;	/* Additional flags */
 	char  tcm_reserved[8];	/* Unused */
 };
 
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 44091c0..f717f08 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -53,7 +53,7 @@
 };
 
 struct tty_bufhead {
-	struct work_struct		work;
+	struct delayed_work work;
 	struct semaphore pty_sem;
 	spinlock_t lock;
 	struct tty_buffer *head;	/* Queue head */
@@ -276,9 +276,8 @@
 extern int tty_unregister_ldisc(int disc);
 extern int tty_register_driver(struct tty_driver *driver);
 extern int tty_unregister_driver(struct tty_driver *driver);
-extern struct class_device *tty_register_device(struct tty_driver *driver,
-						unsigned index,
-						struct device *dev);
+extern struct device *tty_register_device(struct tty_driver *driver,
+					  unsigned index, struct device *dev);
 extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
 extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
 			     int buflen);
diff --git a/include/linux/types.h b/include/linux/types.h
index 750f085..0351bf2 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -128,21 +128,27 @@
 
 /* this is a special 64bit data type that is 8-byte aligned */
 #define aligned_u64 unsigned long long __attribute__((aligned(8)))
+#define aligned_be64 __be64 __attribute__((aligned(8)))
+#define aligned_le64 __le64 __attribute__((aligned(8)))
 
 /**
  * The type used for indexing onto a disc or disc partition.
  *
  * Linux always considers sectors to be 512 bytes long independently
  * of the devices real block size.
- *
- * If required, asm/types.h can override it and define
- * HAVE_SECTOR_T
  */
-#ifndef HAVE_SECTOR_T
+#ifdef CONFIG_LBD
+typedef u64 sector_t;
+#else
 typedef unsigned long sector_t;
 #endif
 
-#ifndef HAVE_BLKCNT_T
+/*
+ * The type of the inode's block count.
+ */
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#else
 typedef unsigned long blkcnt_t;
 #endif
 
@@ -180,6 +186,8 @@
 typedef __u64 __bitwise __le64;
 typedef __u64 __bitwise __be64;
 #endif
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
 
 #ifdef __KERNEL__
 typedef unsigned __bitwise__ gfp_t;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index a48d7f1..975c963 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,8 +1,43 @@
 #ifndef __LINUX_UACCESS_H__
 #define __LINUX_UACCESS_H__
 
+#include <linux/preempt.h>
 #include <asm/uaccess.h>
 
+/*
+ * These routines enable/disable the pagefault handler in that
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
+ */
+static inline void pagefault_disable(void)
+{
+	inc_preempt_count();
+	/*
+	 * make sure to have issued the store before a pagefault
+	 * can hit.
+	 */
+	barrier();
+}
+
+static inline void pagefault_enable(void)
+{
+	/*
+	 * make sure to issue those last loads/stores before enabling
+	 * the pagefault handler again.
+	 */
+	barrier();
+	dec_preempt_count();
+	/*
+	 * make sure we do..
+	 */
+	barrier();
+	preempt_check_resched();
+}
+
 #ifndef ARCH_HAS_NOCACHE_UACCESS
 
 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -30,14 +65,22 @@
  * do_page_fault() doesn't attempt to take mmap_sem.  This makes
  * probe_kernel_address() suitable for use within regions where the caller
  * already holds mmap_sem, or other locks which nest inside mmap_sem.
+ * This must be a macro because __get_user() needs to know the types of the
+ * args.
+ *
+ * We don't include enough header files to be able to do the set_fs().  We
+ * require that the probe_kernel_address() caller will do that.
  */
 #define probe_kernel_address(addr, retval)		\
 	({						\
 		long ret;				\
+		mm_segment_t old_fs = get_fs();		\
 							\
-		inc_preempt_count();			\
-		ret = __get_user(retval, addr);		\
-		dec_preempt_count();			\
+		set_fs(KERNEL_DS);			\
+		pagefault_disable();			\
+		ret = __get_user(retval, (__force typeof(retval) __user *)(addr));		\
+		pagefault_enable();			\
+		set_fs(old_fs);				\
 		ret;					\
 	})
 
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 014b41d..7e08c07 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -23,7 +23,7 @@
 	__be16	source;
 	__be16	dest;
 	__be16	len;
-	__be16	check;
+	__sum16	check;
 };
 
 /* UDP socket options */
@@ -38,6 +38,7 @@
 #include <linux/types.h>
 
 #include <net/inet_sock.h>
+#define UDP_HTABLE_SIZE		128
 
 struct udp_sock {
 	/* inet_sock has to be the first member */
@@ -50,12 +51,23 @@
 	 * when the socket is uncorked.
 	 */
 	__u16		 len;		/* total length of pending frames */
+	/*
+	 * Fields specific to UDP-Lite.
+	 */
+	__u16		 pcslen;
+	__u16		 pcrlen;
+/* indicator bits used by pcflag: */
+#define UDPLITE_BIT      0x1  		/* set by udplite proto init function */
+#define UDPLITE_SEND_CC  0x2  		/* set via udplite setsockopt         */
+#define UDPLITE_RECV_CC  0x4		/* set via udplite setsocktopt        */
+	__u8		 pcflag;        /* marks socket as UDP-Lite if > 0    */
 };
 
 static inline struct udp_sock *udp_sk(const struct sock *sk)
 {
 	return (struct udp_sock *)sk;
 }
+#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
 
 #endif
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5482bfb..aab5b1b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -313,8 +313,13 @@
 /* This is arbitrary.
  * From USB 2.0 spec Table 11-13, offset 7, a hub can
  * have up to 255 ports. The most yet reported is 10.
+ *
+ * Current Wireless USB host hardware (Intel i1480 for example) allows
+ * up to 22 devices to connect. Upcoming hardware might raise that
+ * limit. Because the arrays need to add a bit for hub status data, we
+ * do 31, so plus one evens out to four bytes.
  */
-#define USB_MAXCHILDREN		(16)
+#define USB_MAXCHILDREN		(31)
 
 struct usb_tt;
 
@@ -357,7 +362,8 @@
 	u8 portnum;			/* Parent port number (origin 1) */
 	u8 level;			/* Number of USB hub ancestors */
 
-	int have_langid;		/* whether string_langid is valid */
+	unsigned discon_suspended:1;	/* Disconnected while suspended */
+	unsigned have_langid:1;		/* whether string_langid is valid */
 	int string_langid;		/* language ID for strings */
 
 	/* static strings from the device */
@@ -382,7 +388,7 @@
 
 	int pm_usage_cnt;		/* usage counter for autosuspend */
 #ifdef CONFIG_PM
-	struct work_struct autosuspend;	/* for delayed autosuspends */
+	struct delayed_work autosuspend; /* for delayed autosuspends */
 	struct mutex pm_mutex;		/* protects PM operations */
 
 	unsigned auto_pm:1;		/* autosuspend/resume in progress */
@@ -410,14 +416,37 @@
 
 /* USB autosuspend and autoresume */
 #ifdef CONFIG_USB_SUSPEND
+extern int usb_autopm_set_interface(struct usb_interface *intf);
 extern int usb_autopm_get_interface(struct usb_interface *intf);
 extern void usb_autopm_put_interface(struct usb_interface *intf);
 
-#else
-#define usb_autopm_get_interface(intf)		0
-#define usb_autopm_put_interface(intf)		do {} while (0)
-#endif
+static inline void usb_autopm_enable(struct usb_interface *intf)
+{
+	intf->pm_usage_cnt = 0;
+	usb_autopm_set_interface(intf);
+}
 
+static inline void usb_autopm_disable(struct usb_interface *intf)
+{
+	intf->pm_usage_cnt = 1;
+	usb_autopm_set_interface(intf);
+}
+
+#else
+
+static inline int usb_autopm_set_interface(struct usb_interface *intf)
+{ return 0; }
+
+static inline int usb_autopm_get_interface(struct usb_interface *intf)
+{ return 0; }
+
+static inline void usb_autopm_put_interface(struct usb_interface *intf)
+{ }
+static inline void usb_autopm_enable(struct usb_interface *intf)
+{ }
+static inline void usb_autopm_disable(struct usb_interface *intf)
+{ }
+#endif
 
 /*-------------------------------------------------------------------------*/
 
@@ -490,17 +519,137 @@
 
 /*-------------------------------------------------------------------------*/
 
-extern int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd);
-extern int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd);
+/**
+ * usb_endpoint_dir_in - check if the endpoint has IN direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type IN, otherwise it returns false.
+ */
+static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+{
+	return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
+}
+
+/**
+ * usb_endpoint_dir_out - check if the endpoint has OUT direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type OUT, otherwise it returns false.
+ */
+static inline int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
+{
+	return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+}
+
+/**
+ * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type bulk, otherwise it returns false.
+ */
+static inline int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
+{
+	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+		USB_ENDPOINT_XFER_BULK);
+}
+
+/**
+ * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type interrupt, otherwise it returns
+ * false.
+ */
+static inline int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
+{
+	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+		USB_ENDPOINT_XFER_INT);
+}
+
+/**
+ * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type isochronous, otherwise it returns
+ * false.
+ */
+static inline int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
+{
+	return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+		USB_ENDPOINT_XFER_ISOC);
+}
+
+/**
+ * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
+{
+	return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
+{
+	return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
+}
+
+/**
+ * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
+{
+	return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
+{
+	return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
+}
+
+/**
+ * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
+{
+	return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
+{
+	return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
+}
 
 /*-------------------------------------------------------------------------*/
 
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index a50a013..7c269f4 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -546,6 +546,8 @@
 /* MLME requests (SIOCSIWMLME / struct iw_mlme) */
 #define IW_MLME_DEAUTH		0
 #define IW_MLME_DISASSOC	1
+#define IW_MLME_AUTH		2
+#define IW_MLME_ASSOC		3
 
 /* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */
 #define IW_AUTH_INDEX		0x0FFF
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9bca353..edef8d5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,12 +11,23 @@
 
 struct workqueue_struct;
 
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+
 struct work_struct {
-	unsigned long pending;
+	/* the first word is the work queue pointer and the flags rolled into
+	 * one */
+	unsigned long management;
+#define WORK_STRUCT_PENDING 0		/* T if work item pending execution */
+#define WORK_STRUCT_NOAUTOREL 1		/* F if work item automatically released on exec */
+#define WORK_STRUCT_FLAG_MASK (3UL)
+#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
 	struct list_head entry;
-	void (*func)(void *);
-	void *data;
-	void *wq_data;
+	work_func_t func;
+};
+
+struct delayed_work {
+	struct work_struct work;
 	struct timer_list timer;
 };
 
@@ -24,77 +35,160 @@
 	struct work_struct work;
 };
 
-#define __WORK_INITIALIZER(n, f, d) {				\
+#define __WORK_INITIALIZER(n, f) {				\
+	.management = 0,					\
         .entry	= { &(n).entry, &(n).entry },			\
 	.func = (f),						\
-	.data = (d),						\
+	}
+
+#define __WORK_INITIALIZER_NAR(n, f) {				\
+	.management = (1 << WORK_STRUCT_NOAUTOREL),		\
+        .entry	= { &(n).entry, &(n).entry },			\
+	.func = (f),						\
+	}
+
+#define __DELAYED_WORK_INITIALIZER(n, f) {			\
+	.work = __WORK_INITIALIZER((n).work, (f)),		\
 	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
 	}
 
-#define DECLARE_WORK(n, f, d)					\
-	struct work_struct n = __WORK_INITIALIZER(n, f, d)
+#define __DELAYED_WORK_INITIALIZER_NAR(n, f) {			\
+	.work = __WORK_INITIALIZER_NAR((n).work, (f)),		\
+	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
+	}
+
+#define DECLARE_WORK(n, f)					\
+	struct work_struct n = __WORK_INITIALIZER(n, f)
+
+#define DECLARE_WORK_NAR(n, f)					\
+	struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
+
+#define DECLARE_DELAYED_WORK(n, f)				\
+	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
+
+#define DECLARE_DELAYED_WORK_NAR(n, f)			\
+	struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
 
 /*
- * initialize a work-struct's func and data pointers:
+ * initialize a work item's function pointer
  */
-#define PREPARE_WORK(_work, _func, _data)			\
+#define PREPARE_WORK(_work, _func)				\
 	do {							\
-		(_work)->func = _func;				\
-		(_work)->data = _data;				\
+		(_work)->func = (_func);			\
 	} while (0)
 
+#define PREPARE_DELAYED_WORK(_work, _func)			\
+	PREPARE_WORK(&(_work)->work, (_func))
+
 /*
- * initialize all of a work-struct:
+ * initialize all of a work item in one go
  */
-#define INIT_WORK(_work, _func, _data)				\
+#define INIT_WORK(_work, _func)					\
 	do {							\
+		(_work)->management = 0;			\
 		INIT_LIST_HEAD(&(_work)->entry);		\
-		(_work)->pending = 0;				\
-		PREPARE_WORK((_work), (_func), (_data));	\
+		PREPARE_WORK((_work), (_func));			\
+	} while (0)
+
+#define INIT_WORK_NAR(_work, _func)					\
+	do {								\
+		(_work)->management = (1 << WORK_STRUCT_NOAUTOREL);	\
+		INIT_LIST_HEAD(&(_work)->entry);			\
+		PREPARE_WORK((_work), (_func));				\
+	} while (0)
+
+#define INIT_DELAYED_WORK(_work, _func)				\
+	do {							\
+		INIT_WORK(&(_work)->work, (_func));		\
 		init_timer(&(_work)->timer);			\
 	} while (0)
 
+#define INIT_DELAYED_WORK_NAR(_work, _func)			\
+	do {							\
+		INIT_WORK_NAR(&(_work)->work, (_func));		\
+		init_timer(&(_work)->timer);			\
+	} while (0)
+
+/**
+ * work_pending - Find out whether a work item is currently pending
+ * @work: The work item in question
+ */
+#define work_pending(work) \
+	test_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+/**
+ * delayed_work_pending - Find out whether a delayable work item is currently
+ * pending
+ * @work: The work item in question
+ */
+#define delayed_work_pending(work) \
+	test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
+
+/**
+ * work_release - Release a work item under execution
+ * @work: The work item to release
+ *
+ * This is used to release a work item that has been initialised with automatic
+ * release mode disabled (WORK_STRUCT_NOAUTOREL is set).  This gives the work
+ * function the opportunity to grab auxiliary data from the container of the
+ * work_struct before clearing the pending bit as the work_struct may be
+ * subject to deallocation the moment the pending bit is cleared.
+ *
+ * In such a case, this should be called in the work function after it has
+ * fetched any data it may require from the containter of the work_struct.
+ * After this function has been called, the work_struct may be scheduled for
+ * further execution or it may be deallocated unless other precautions are
+ * taken.
+ *
+ * This should also be used to release a delayed work item.
+ */
+#define work_release(work) \
+	clear_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+
 extern struct workqueue_struct *__create_workqueue(const char *name,
-						    int singlethread);
-#define create_workqueue(name) __create_workqueue((name), 0)
-#define create_singlethread_workqueue(name) __create_workqueue((name), 1)
+						    int singlethread,
+						    int freezeable);
+#define create_workqueue(name) __create_workqueue((name), 0, 0)
+#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
+#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-	struct work_struct *work, unsigned long delay);
+	struct delayed_work *work, unsigned long delay);
 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
 
 extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
+extern int FASTCALL(run_scheduled_work(struct work_struct *work));
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
 
-extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
-extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
+extern int schedule_on_each_cpu(work_func_t func);
 extern void flush_scheduled_work(void);
 extern int current_is_keventd(void);
 extern int keventd_up(void);
 
 extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct work_struct *work);
+void cancel_rearming_delayed_work(struct delayed_work *work);
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
-				       struct work_struct *);
-int execute_in_process_context(void (*fn)(void *), void *,
-			       struct execute_work *);
+				       struct delayed_work *);
+int execute_in_process_context(work_func_t fn, struct execute_work *);
 
 /*
  * Kill off a pending schedule_delayed_work().  Note that the work callback
  * function may still be running on return from cancel_delayed_work().  Run
  * flush_scheduled_work() to wait on it.
  */
-static inline int cancel_delayed_work(struct work_struct *work)
+static inline int cancel_delayed_work(struct delayed_work *work)
 {
 	int ret;
 
 	ret = del_timer_sync(&work->timer);
 	if (ret)
-		clear_bit(0, &work->pending);
+		clear_bit(WORK_STRUCT_PENDING, &work->work.management);
 	return ret;
 }
 
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 8ae7f74..088ba81 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -211,8 +211,8 @@
 
 struct xfrm_encap_tmpl {
 	__u16		encap_type;
-	__u16		encap_sport;
-	__u16		encap_dport;
+	__be16		encap_sport;
+	__be16		encap_dport;
 	xfrm_address_t	encap_oa;
 };
 
@@ -289,7 +289,9 @@
 
 struct xfrm_aevent_id {
 	struct xfrm_usersa_id		sa_id;
+	xfrm_address_t			saddr;
 	__u32				flags;
+	__u32				reqid;
 };
 
 struct xfrm_userspi_info {
diff --git a/include/linux/zftape.h b/include/linux/zftape.h
deleted file mode 100644
index b057c65..0000000
--- a/include/linux/zftape.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef _ZFTAPE_H
-#define _ZFTAPE_H
-
-/*
- * Copyright (C) 1996, 1997 Claus-Justus Heine.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING.  If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
-
- *
- * $Source: /homes/cvs/ftape-stacked/include/linux/zftape.h,v $
- * $Revision: 1.12 $
- * $Date: 1997/10/21 11:02:37 $
- *
- *      Special ioctl and other global info for the zftape VFS
- *      interface for the QIC-40/80/3010/3020 floppy-tape driver for
- *      Linux.
- */
-
-#define ZFTAPE_VERSION  "zftape for " FTAPE_VERSION
-
-#include <linux/ftape.h>
-
-#define ZFTAPE_LABEL       "Ftape - The Linux Floppy Tape Project!"
-
-/* Bits of the minor device number that control the operation mode */
-#define ZFT_Q80_MODE		(1 << 3)
-#define ZFT_ZIP_MODE		(1 << 4)
-#define ZFT_RAW_MODE		(1 << 5)
-#define ZFT_MINOR_OP_MASK	(ZFT_Q80_MODE	| 	\
-				 ZFT_ZIP_MODE	| 	\
-				 ZFT_RAW_MODE)
-#define ZFT_MINOR_MASK		(FTAPE_SEL_MASK		|	\
-				 ZFT_MINOR_OP_MASK	|	\
-				 FTAPE_NO_REWIND)
-
-#ifdef ZFT_OBSOLETE
-struct mtblksz {
-	unsigned int mt_blksz;
-};
-#define MTIOC_ZFTAPE_GETBLKSZ _IOR('m', 104, struct mtblksz)
-#endif
-
-#ifdef __KERNEL__
-
-extern int zft_init(void);
-
-static inline __s64 zft_div_blksz(__s64 value, __u32 blk_sz)
-{
-	if (blk_sz == 1) {
-		return value;
-	} else {
-		return (__s64)(((__u32)(value >> 10) + (blk_sz >> 10) - 1) 
-			       / (blk_sz >> 10));
-	} 
-}
-
-static inline __s64 zft_mul_blksz(__s64 value, __u32 blk_sz)
-{
-	if (blk_sz == 1) {
-		return value;
-	} else {
-		/*  if blk_sz != 1, then it is a multiple of 1024. In
-		 *  this case, `value' will also fit into 32 bits.
-		 * 
-		 *  Actually, this limits the capacity to 42
-		 *  bits. This is (2^32)*1024, roughly a thousand
-		 *  times 2GB, or 3 Terabytes. Hopefully this is enough
-		 */
-		return(__s64)(((__u32)(value)*(blk_sz>>10))<<10);
-	}
-}
-
-#endif
-
-#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 44f1b67..88df8fc 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -35,9 +35,9 @@
 #else
 #error "Please fix <asm/byteorder.h>"
 #endif
-	__u32			valid;
-	__u32			prefered;
-	__u32			reserved2;
+	__be32			valid;
+	__be32			prefered;
+	__be32			reserved2;
 
 	struct in6_addr		prefix;
 };
@@ -183,7 +183,7 @@
 	 * This will include the IEEE address token on links that support it.
 	 */
 
-	word = addr->s6_addr32[2] ^ addr->s6_addr32[3];
+	word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]);
 	word ^= (word >> 16);
 	word ^= (word >> 8);
 
diff --git a/include/net/arp.h b/include/net/arp.h
index 6a3d9a7..f026645 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -16,7 +16,7 @@
 			 struct net_device *dev, __be32 src_ip,
 			 unsigned char *dest_hw, unsigned char *src_hw, unsigned char *th);
 extern int	arp_bind_neighbour(struct dst_entry *dst);
-extern int	arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir);
+extern int	arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
 extern void	arp_ifdown(struct net_device *dev);
 
 extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
diff --git a/include/net/atmclip.h b/include/net/atmclip.h
index 90fcc98..b5a51a7 100644
--- a/include/net/atmclip.h
+++ b/include/net/atmclip.h
@@ -36,7 +36,7 @@
 
 
 struct atmarp_entry {
-	u32		ip;		/* IP address */
+	__be32		ip;		/* IP address */
 	struct clip_vcc	*vccs;		/* active VCCs; NULL if resolution is
 					   pending */
 	unsigned long	expires;	/* entry expiration time */
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 89d743c..3c563f0 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -124,7 +124,7 @@
 	u8  flow_ctrl;
 	u8  priority;
 	u8  ack_timer;
-	u16 mtu;
+	__le16 mtu;
 	u8  max_retrans;
 	u8  credits;
 } __attribute__ ((packed));
@@ -136,7 +136,7 @@
 	u8  flow_ctrl;
 	u8  xon_char;
 	u8  xoff_char;
-	u16 param_mask;
+	__le16 param_mask;
 } __attribute__ ((packed));
 
 struct rfcomm_rls {
diff --git a/include/net/checksum.h b/include/net/checksum.h
index e3ea7cc..1242461 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -27,8 +27,8 @@
 
 #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
 static inline
-unsigned int csum_and_copy_from_user (const unsigned char __user *src, unsigned char *dst,
-				      int len, int sum, int *err_ptr)
+__wsum csum_and_copy_from_user (const void __user *src, void *dst,
+				      int len, __wsum sum, int *err_ptr)
 {
 	if (access_ok(VERIFY_READ, src, len))
 		return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
@@ -41,8 +41,8 @@
 #endif
 
 #ifndef HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user
-(const unsigned char *src, unsigned char __user *dst, int len, unsigned int sum, int *err_ptr)
+static __inline__ __wsum csum_and_copy_to_user
+(const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
 {
 	sum = csum_partial(src, len, sum);
 
@@ -53,35 +53,44 @@
 	if (len)
 		*err_ptr = -EFAULT;
 
-	return -1; /* invalid checksum */
+	return (__force __wsum)-1; /* invalid checksum */
 }
 #endif
 
-static inline unsigned int csum_add(unsigned int csum, unsigned int addend)
+static inline __wsum csum_add(__wsum csum, __wsum addend)
 {
-	csum += addend;
-	return csum + (csum < addend);
+	u32 res = (__force u32)csum;
+	res += (__force u32)addend;
+	return (__force __wsum)(res + (res < (__force u32)addend));
 }
 
-static inline unsigned int csum_sub(unsigned int csum, unsigned int addend)
+static inline __wsum csum_sub(__wsum csum, __wsum addend)
 {
 	return csum_add(csum, ~addend);
 }
 
-static inline unsigned int
-csum_block_add(unsigned int csum, unsigned int csum2, int offset)
+static inline __wsum
+csum_block_add(__wsum csum, __wsum csum2, int offset)
 {
+	u32 sum = (__force u32)csum2;
 	if (offset&1)
-		csum2 = ((csum2&0xFF00FF)<<8)+((csum2>>8)&0xFF00FF);
-	return csum_add(csum, csum2);
+		sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
+	return csum_add(csum, (__force __wsum)sum);
 }
 
-static inline unsigned int
-csum_block_sub(unsigned int csum, unsigned int csum2, int offset)
+static inline __wsum
+csum_block_sub(__wsum csum, __wsum csum2, int offset)
 {
+	u32 sum = (__force u32)csum2;
 	if (offset&1)
-		csum2 = ((csum2&0xFF00FF)<<8)+((csum2>>8)&0xFF00FF);
-	return csum_sub(csum, csum2);
+		sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
+	return csum_sub(csum, (__force __wsum)sum);
 }
 
+static inline __wsum csum_unfold(__sum16 n)
+{
+	return (__force __wsum)n;
+}
+
+#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
 #endif
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 718b4d9..4c9522c 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -58,10 +58,10 @@
 #define CIPSO_V4_MAP_PASS             2
 
 /* limits */
-#define CIPSO_V4_MAX_REM_LVLS         256
+#define CIPSO_V4_MAX_REM_LVLS         255
 #define CIPSO_V4_INV_LVL              0x80000000
 #define CIPSO_V4_MAX_LOC_LVLS         (CIPSO_V4_INV_LVL - 1)
-#define CIPSO_V4_MAX_REM_CATS         65536
+#define CIPSO_V4_MAX_REM_CATS         65534
 #define CIPSO_V4_INV_CAT              0x80000000
 #define CIPSO_V4_MAX_LOC_CATS         (CIPSO_V4_INV_CAT - 1)
 
diff --git a/include/net/dsfield.h b/include/net/dsfield.h
index a79c9e0..eb65bf2 100644
--- a/include/net/dsfield.h
+++ b/include/net/dsfield.h
@@ -20,14 +20,14 @@
 
 static inline __u8 ipv6_get_dsfield(struct ipv6hdr *ipv6h)
 {
-	return ntohs(*(__u16 *) ipv6h) >> 4;
+	return ntohs(*(__be16 *) ipv6h) >> 4;
 }
 
 
 static inline void ipv4_change_dsfield(struct iphdr *iph,__u8 mask,
     __u8 value)
 {
-        __u32 check = ntohs(iph->check);
+        __u32 check = ntohs((__force __be16)iph->check);
 	__u8 dsfield;
 
 	dsfield = (iph->tos & mask) | value;
@@ -35,7 +35,7 @@
 	if ((check+1) >> 16) check = (check+1) & 0xffff;
 	check -= dsfield;
 	check += check >> 16; /* adjust carry */
-	iph->check = htons(check);
+	iph->check = (__force __sum16)htons(check);
 	iph->tos = dsfield;
 }
 
@@ -45,9 +45,9 @@
 {
         __u16 tmp;
 
-	tmp = ntohs(*(__u16 *) ipv6h);
+	tmp = ntohs(*(__be16 *) ipv6h);
 	tmp = (tmp & ((mask << 4) | 0xf00f)) | (value << 4);
-	*(__u16 *) ipv6h = htons(tmp);
+	*(__be16 *) ipv6h = htons(tmp);
 }
 
 
diff --git a/include/net/dst.h b/include/net/dst.h
index e156e38..62b7e75 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -98,7 +98,7 @@
 	int			entry_size;
 
 	atomic_t		entries;
-	kmem_cache_t 		*kmem_cachep;
+	struct kmem_cache 		*kmem_cachep;
 };
 
 #ifdef __KERNEL__
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 8e2f473..bc3c264 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -13,6 +13,8 @@
 	atomic_t		refcnt;
 	int			ifindex;
 	char			ifname[IFNAMSIZ];
+	u32			mark;
+	u32			mark_mask;
 	u32			pref;
 	u32			flags;
 	u32			table;
@@ -50,6 +52,7 @@
 					struct nlmsghdr *,
 					struct fib_rule_hdr *);
 	u32			(*default_pref)(void);
+	size_t			(*nlmsg_payload)(struct fib_rule *);
 
 	int			nlgroup;
 	struct nla_policy	*policy;
@@ -57,6 +60,13 @@
 	struct module		*owner;
 };
 
+#define FRA_GENERIC_POLICY \
+	[FRA_IFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
+	[FRA_PRIORITY]	= { .type = NLA_U32 }, \
+	[FRA_FWMARK]	= { .type = NLA_U32 }, \
+	[FRA_FWMASK]	= { .type = NLA_U32 }, \
+	[FRA_TABLE]     = { .type = NLA_U32 }
+
 static inline void fib_rule_get(struct fib_rule *rule)
 {
 	atomic_inc(&rule->refcnt);
diff --git a/include/net/flow.h b/include/net/flow.h
index 5cda27c..ce4b10d 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -13,12 +13,12 @@
 struct flowi {
 	int	oif;
 	int	iif;
+	__u32	mark;
 
 	union {
 		struct {
 			__be32			daddr;
 			__be32			saddr;
-			__u32			fwmark;
 			__u8			tos;
 			__u8			scope;
 		} ip4_u;
@@ -26,28 +26,23 @@
 		struct {
 			struct in6_addr		daddr;
 			struct in6_addr		saddr;
-			__u32			fwmark;
-			__u32			flowlabel;
+			__be32			flowlabel;
 		} ip6_u;
 
 		struct {
 			__le16			daddr;
 			__le16			saddr;
-			__u32			fwmark;
 			__u8			scope;
 		} dn_u;
 	} nl_u;
 #define fld_dst		nl_u.dn_u.daddr
 #define fld_src		nl_u.dn_u.saddr
-#define fld_fwmark	nl_u.dn_u.fwmark
 #define fld_scope	nl_u.dn_u.scope
 #define fl6_dst		nl_u.ip6_u.daddr
 #define fl6_src		nl_u.ip6_u.saddr
-#define fl6_fwmark	nl_u.ip6_u.fwmark
 #define fl6_flowlabel	nl_u.ip6_u.flowlabel
 #define fl4_dst		nl_u.ip4_u.daddr
 #define fl4_src		nl_u.ip4_u.saddr
-#define fl4_fwmark	nl_u.ip4_u.fwmark
 #define fl4_tos		nl_u.ip4_u.tos
 #define fl4_scope	nl_u.ip4_u.scope
 
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index b619314..adff4c8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -53,6 +53,7 @@
  * @policy: attribute validation policy
  * @doit: standard command callback
  * @dumpit: callback for dumpers
+ * @done: completion callback for dumps
  * @ops_list: operations list
  */
 struct genl_ops
@@ -64,6 +65,7 @@
 				       struct genl_info *info);
 	int		       (*dumpit)(struct sk_buff *skb,
 					 struct netlink_callback *cb);
+	int		       (*done)(struct netlink_callback *cb);
 	struct list_head	ops_list;
 };
 
@@ -79,34 +81,51 @@
  * @skb: socket buffer holding the message
  * @pid: netlink pid the message is addressed to
  * @seq: sequence number (usually the one of the sender)
- * @type: netlink message type
- * @hdrlen: length of the user specific header
+ * @family: generic netlink family
  * @flags netlink message flags
  * @cmd: generic netlink command
- * @version: version
  *
  * Returns pointer to user specific header
  */
 static inline void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
-				int type, int hdrlen, int flags,
-				u8 cmd, u8 version)
+				struct genl_family *family, int flags, u8 cmd)
 {
 	struct nlmsghdr *nlh;
 	struct genlmsghdr *hdr;
 
-	nlh = nlmsg_put(skb, pid, seq, type, GENL_HDRLEN + hdrlen, flags);
+	nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
+			family->hdrsize, flags);
 	if (nlh == NULL)
 		return NULL;
 
 	hdr = nlmsg_data(nlh);
 	hdr->cmd = cmd;
-	hdr->version = version;
+	hdr->version = family->version;
 	hdr->reserved = 0;
 
 	return (char *) hdr + GENL_HDRLEN;
 }
 
 /**
+ * genlmsg_put_reply - Add generic netlink header to a reply message
+ * @skb: socket buffer holding the message
+ * @info: receiver info
+ * @family: generic netlink family
+ * @flags: netlink message flags
+ * @cmd: generic netlink command
+ *
+ * Returns pointer to user specific header
+ */
+static inline void *genlmsg_put_reply(struct sk_buff *skb,
+				      struct genl_info *info,
+				      struct genl_family *family,
+				      int flags, u8 cmd)
+{
+	return genlmsg_put(skb, info->snd_pid, info->snd_seq, family,
+			   flags, cmd);
+}
+
+/**
  * genlmsg_end - Finalize a generic netlink message
  * @skb: socket buffer the message is stored in
  * @hdr: user specific header
@@ -150,6 +169,16 @@
 }
 
 /**
+ * genlmsg_reply - reply to a request
+ * @skb: netlink message to be sent back
+ * @info: receiver information
+ */
+static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
+{
+	return genlmsg_unicast(skb, info->snd_pid);
+}
+
+/**
  * gennlmsg_data - head of message payload
  * @gnlh: genetlink messsage header
  */
@@ -187,4 +216,15 @@
 	return NLMSG_ALIGN(genlmsg_msg_size(payload));
 }
 
+/**
+ * genlmsg_new - Allocate a new generic netlink message
+ * @payload: size of the message payload
+ * @flags: the type of memory to allocate.
+ */
+static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
+{
+	return nlmsg_new(genlmsg_total_size(payload), flags);
+}
+
+
 #endif	/* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index b174ebb..e6af381 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -1037,6 +1037,10 @@
 	/* host performs multicast decryption */
 	int host_mc_decrypt;
 
+	/* host should strip IV and ICV from protected frames */
+	/* meaningful only when hardware decryption is being used */
+	int host_strip_iv_icv;
+
 	int host_open_frag;
 	int host_build_iv;
 	int ieee802_1x;		/* is IEEE 802.1X used */
@@ -1076,6 +1080,8 @@
 	int perfect_rssi;
 	int worst_rssi;
 
+	u16 prev_seq_ctl;	/* used to drop duplicate frames */
+
 	/* Callback functions */
 	void (*set_security) (struct net_device * dev,
 			      struct ieee80211_security * sec);
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 617b672..8911927 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -108,8 +108,8 @@
 	/* Scan retries remaining */
 	int scan_retry;
 
-	struct work_struct work;
-	struct work_struct timeout;
+	struct delayed_work work;
+	struct delayed_work timeout;
 };
 
 struct ieee80211softmac_bss_info {
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 34489c1..3ec7d07 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -152,6 +152,7 @@
 
 struct ipv6_devstat {
 	struct proc_dir_entry	*proc_dir_entry;
+	DEFINE_SNMP_STAT(struct ipstats_mib, ipv6);
 	DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6);
 };
 
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index b33b438..16aa96a 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -27,7 +27,7 @@
 
 extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
 						 struct request_sock ***prevp,
-						 const __u16 rport,
+						 const __be16 rport,
 						 const struct in6_addr *raddr,
 						 const struct in6_addr *laddr,
 						 const int iif);
@@ -38,5 +38,5 @@
 
 extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-extern int inet6_csk_xmit(struct sk_buff *skb, int ipfragok);
+extern int inet6_csk_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok);
 #endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index bc6a71d..c28e424 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -26,11 +26,11 @@
 
 /* I have no idea if this is a good hash for v6 or not. -DaveM */
 static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
-				const struct in6_addr *faddr, const u16 fport)
+				const struct in6_addr *faddr, const __be16 fport)
 {
-	unsigned int hashent = (lport ^ fport);
+	unsigned int hashent = (lport ^ (__force u16)fport);
 
-	hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
+	hashent ^= (__force u32)(laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
 	hashent ^= hashent >> 16;
 	hashent ^= hashent >> 8;
 	return hashent;
@@ -43,7 +43,7 @@
 	const struct in6_addr *laddr = &np->rcv_saddr;
 	const struct in6_addr *faddr = &np->daddr;
 	const __u16 lport = inet->num;
-	const __u16 fport = inet->dport;
+	const __be16 fport = inet->dport;
 	return inet6_ehashfn(laddr, lport, faddr, fport);
 }
 
@@ -57,7 +57,7 @@
  */
 extern struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
 					   const struct in6_addr *saddr,
-					   const u16 sport,
+					   const __be16 sport,
 					   const struct in6_addr *daddr,
 					   const u16 hnum,
 					   const int dif);
@@ -69,7 +69,7 @@
 
 static inline struct sock *__inet6_lookup(struct inet_hashinfo *hashinfo,
 					  const struct in6_addr *saddr,
-					  const u16 sport,
+					  const __be16 sport,
 					  const struct in6_addr *daddr,
 					  const u16 hnum,
 					  const int dif)
@@ -83,8 +83,8 @@
 }
 
 extern struct sock *inet6_lookup(struct inet_hashinfo *hashinfo,
-				 const struct in6_addr *saddr, const u16 sport,
-				 const struct in6_addr *daddr, const u16 dport,
+				 const struct in6_addr *saddr, const __be16 sport,
+				 const struct in6_addr *daddr, const __be16 dport,
 				 const int dif);
 #endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
 #endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0bcf9f2..bf16d98 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -18,6 +18,7 @@
 #include <linux/compiler.h>
 #include <linux/string.h>
 #include <linux/timer.h>
+#include <linux/poll.h>
 
 #include <net/inet_sock.h>
 #include <net/request_sock.h>
@@ -36,7 +37,8 @@
  * (i.e. things that depend on the address family)
  */
 struct inet_connection_sock_af_ops {
-	int	    (*queue_xmit)(struct sk_buff *skb, int ipfragok);
+	int	    (*queue_xmit)(struct sk_buff *skb, struct sock *sk,
+				  int ipfragok);
 	void	    (*send_check)(struct sock *sk, int len,
 				  struct sk_buff *skb);
 	int	    (*rebuild_header)(struct sock *sk);
@@ -45,7 +47,8 @@
 				      struct request_sock *req,
 				      struct dst_entry *dst);
 	int	    (*remember_stamp)(struct sock *sk);
-	__u16	    net_header_len;
+	u16	    net_header_len;
+	u16	    sockaddr_len;
 	int	    (*setsockopt)(struct sock *sk, int level, int optname, 
 				  char __user *optval, int optlen);
 	int	    (*getsockopt)(struct sock *sk, int level, int optname, 
@@ -57,7 +60,6 @@
 				int level, int optname,
 				char __user *optval, int __user *optlen);
 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
-	int sockaddr_len;
 };
 
 /** inet_connection_sock - INET connection oriented sock
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 7849844..10117c8 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -53,7 +53,7 @@
 
 static inline int IP_ECN_set_ce(struct iphdr *iph)
 {
-	u32 check = iph->check;
+	u32 check = (__force u32)iph->check;
 	u32 ecn = (iph->tos + 1) & INET_ECN_MASK;
 
 	/*
@@ -71,9 +71,9 @@
 	 * INET_ECN_ECT_1 => check += htons(0xFFFD)
 	 * INET_ECN_ECT_0 => check += htons(0xFFFE)
 	 */
-	check += htons(0xFFFB) + htons(ecn);
+	check += (__force u16)htons(0xFFFB) + (__force u16)htons(ecn);
 
-	iph->check = check + (check>=0xFFFF);
+	iph->check = (__force __sum16)(check + (check>=0xFFFF));
 	iph->tos |= INET_ECN_CE;
 	return 1;
 }
@@ -95,13 +95,13 @@
 {
 	if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
 		return 0;
-	*(u32*)iph |= htonl(INET_ECN_CE << 20);
+	*(__be32*)iph |= htonl(INET_ECN_CE << 20);
 	return 1;
 }
 
 static inline void IP6_ECN_clear(struct ipv6hdr *iph)
 {
-	*(u32*)iph &= ~htonl(INET_ECN_MASK << 20);
+	*(__be32*)iph &= ~htonl(INET_ECN_MASK << 20);
 }
 
 static inline void ipv6_copy_dscp(struct ipv6hdr *outer, struct ipv6hdr *inner)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index a9eb2ea..34cc76e 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -125,7 +125,7 @@
 	rwlock_t			lhash_lock ____cacheline_aligned;
 	atomic_t			lhash_users;
 	wait_queue_head_t		lhash_wait;
-	kmem_cache_t			*bind_bucket_cachep;
+	struct kmem_cache			*bind_bucket_cachep;
 };
 
 static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -136,10 +136,10 @@
 }
 
 extern struct inet_bind_bucket *
-		    inet_bind_bucket_create(kmem_cache_t *cachep,
+		    inet_bind_bucket_create(struct kmem_cache *cachep,
 					    struct inet_bind_hashbucket *head,
 					    const unsigned short snum);
-extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
+extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
 				     struct inet_bind_bucket *tb);
 
 static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 5f48748..f7be1ac 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -84,7 +84,7 @@
 };
 
 extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(void *data);
+extern void inet_twdr_twkill_work(struct work_struct *work);
 extern void inet_twdr_twcal_tick(unsigned long data);
 
 #if (BITS_PER_LONG == 64)
diff --git a/include/net/ip.h b/include/net/ip.h
index b6d95e5..83cb9ac 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -97,7 +97,7 @@
 extern int		ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
 extern int		ip_do_nat(struct sk_buff *skb);
 extern void		ip_send_check(struct iphdr *ip);
-extern int		ip_queue_xmit(struct sk_buff *skb, int ipfragok);
+extern int		ip_queue_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok);
 extern void		ip_init(void);
 extern int		ip_append_data(struct sock *sk,
 				       int getfrag(void *from, char *to, int offset, int len,
@@ -123,7 +123,7 @@
  *      multicast packets.
  */
 
-static inline void ip_tr_mc_map(u32 addr, char *buf)
+static inline void ip_tr_mc_map(__be32 addr, char *buf)
 {
 	buf[0]=0xC0;
 	buf[1]=0x00;
@@ -135,7 +135,7 @@
 
 struct ip_reply_arg {
 	struct kvec iov[1];   
-	u32 	    csum; 
+	__wsum 	    csum;
 	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
 				/* -1 if not needed */ 
 }; 
@@ -192,9 +192,9 @@
 static inline
 int ip_decrease_ttl(struct iphdr *iph)
 {
-	u32 check = iph->check;
-	check += htons(0x0100);
-	iph->check = check + (check>=0xFFFF);
+	u32 check = (__force u32)iph->check;
+	check += (__force u32)htons(0x0100);
+	iph->check = (__force __sum16)(check + (check>=0xFFFF));
 	return --iph->ttl;
 }
 
@@ -238,9 +238,9 @@
  *	Map a multicast IP onto multicast MAC for type ethernet.
  */
 
-static inline void ip_eth_mc_map(u32 addr, char *buf)
+static inline void ip_eth_mc_map(__be32 naddr, char *buf)
 {
-	addr=ntohl(addr);
+	__u32 addr=ntohl(naddr);
 	buf[0]=0x01;
 	buf[1]=0x00;
 	buf[2]=0x5e;
@@ -256,13 +256,14 @@
  *	Leave P_Key as 0 to be filled in by driver.
  */
 
-static inline void ip_ib_mc_map(u32 addr, char *buf)
+static inline void ip_ib_mc_map(__be32 naddr, char *buf)
 {
+	__u32 addr;
 	buf[0]  = 0;		/* Reserved */
 	buf[1]  = 0xff;		/* Multicast QPN */
 	buf[2]  = 0xff;
 	buf[3]  = 0xff;
-	addr    = ntohl(addr);
+	addr    = ntohl(naddr);
 	buf[4]  = 0xff;
 	buf[5]  = 0x12;		/* link local scope */
 	buf[6]  = 0x40;		/* IPv4 signature */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 3dfc885..68e2b32 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -34,60 +34,60 @@
 
 #ifndef _HAVE_ARCH_IPV6_CSUM
 
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
-						     struct in6_addr *daddr,
-						     __u16 len,
-						     unsigned short proto,
-						     unsigned int csum) 
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum csum)
 {
 
 	int carry;
 	__u32 ulen;
 	__u32 uproto;
+	__u32 sum = (__force u32)csum;
 
-	csum += saddr->s6_addr32[0];
-	carry = (csum < saddr->s6_addr32[0]);
-	csum += carry;
+	sum += (__force u32)saddr->s6_addr32[0];
+	carry = (sum < (__force u32)saddr->s6_addr32[0]);
+	sum += carry;
 
-	csum += saddr->s6_addr32[1];
-	carry = (csum < saddr->s6_addr32[1]);
-	csum += carry;
+	sum += (__force u32)saddr->s6_addr32[1];
+	carry = (sum < (__force u32)saddr->s6_addr32[1]);
+	sum += carry;
 
-	csum += saddr->s6_addr32[2];
-	carry = (csum < saddr->s6_addr32[2]);
-	csum += carry;
+	sum += (__force u32)saddr->s6_addr32[2];
+	carry = (sum < (__force u32)saddr->s6_addr32[2]);
+	sum += carry;
 
-	csum += saddr->s6_addr32[3];
-	carry = (csum < saddr->s6_addr32[3]);
-	csum += carry;
+	sum += (__force u32)saddr->s6_addr32[3];
+	carry = (sum < (__force u32)saddr->s6_addr32[3]);
+	sum += carry;
 
-	csum += daddr->s6_addr32[0];
-	carry = (csum < daddr->s6_addr32[0]);
-	csum += carry;
+	sum += (__force u32)daddr->s6_addr32[0];
+	carry = (sum < (__force u32)daddr->s6_addr32[0]);
+	sum += carry;
 
-	csum += daddr->s6_addr32[1];
-	carry = (csum < daddr->s6_addr32[1]);
-	csum += carry;
+	sum += (__force u32)daddr->s6_addr32[1];
+	carry = (sum < (__force u32)daddr->s6_addr32[1]);
+	sum += carry;
 
-	csum += daddr->s6_addr32[2];
-	carry = (csum < daddr->s6_addr32[2]);
-	csum += carry;
+	sum += (__force u32)daddr->s6_addr32[2];
+	carry = (sum < (__force u32)daddr->s6_addr32[2]);
+	sum += carry;
 
-	csum += daddr->s6_addr32[3];
-	carry = (csum < daddr->s6_addr32[3]);
-	csum += carry;
+	sum += (__force u32)daddr->s6_addr32[3];
+	carry = (sum < (__force u32)daddr->s6_addr32[3]);
+	sum += carry;
 
-	ulen = htonl((__u32) len);
-	csum += ulen;
-	carry = (csum < ulen);
-	csum += carry;
+	ulen = (__force u32)htonl((__u32) len);
+	sum += ulen;
+	carry = (sum < ulen);
+	sum += carry;
 
-	uproto = htonl(proto);
-	csum += uproto;
-	carry = (csum < uproto);
-	csum += carry;
+	uproto = (__force u32)htonl(proto);
+	sum += uproto;
+	carry = (sum < uproto);
+	sum += carry;
 
-	return csum_fold(csum);
+	return csum_fold((__force __wsum)csum);
 }
 
 #endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index e4438de..f9cde44 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -107,6 +107,11 @@
 	u8				rt6i_protocol;
 };
 
+static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
+{
+	return ((struct rt6_info *)dst)->rt6i_idev;
+}
+
 struct fib6_walker_t
 {
 	struct fib6_walker_t *prev, *next;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index c14b70e..4e927eb 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -20,7 +20,7 @@
 				route_pref:2,
 				reserved_h:3;
 #endif
-	__u32			lifetime;
+	__be32			lifetime;
 	__u8			prefix[0];	/* 0,8 or 16 */
 };
 
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 949b932..36c635c 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -115,7 +115,7 @@
 
 struct fib_result_nl {
 	__be32		fl_addr;   /* To be looked up*/
-	u32		fl_fwmark; 
+	u32		fl_mark;
 	unsigned char	fl_tos;
 	unsigned char   fl_scope;
 	unsigned char   tb_id_in;
diff --git a/include/net/ip_mp_alg.h b/include/net/ip_mp_alg.h
index beffdd6..25b5657 100644
--- a/include/net/ip_mp_alg.h
+++ b/include/net/ip_mp_alg.h
@@ -88,9 +88,7 @@
 	return flp1->fl4_dst == flp2->fl4_dst &&
 		flp1->fl4_src == flp2->fl4_src &&
 		flp1->oif == flp2->oif &&
-#ifdef CONFIG_IP_ROUTE_FWMARK
-		flp1->fl4_fwmark == flp2->fl4_fwmark &&
-#endif
+		flp1->mark == flp2->mark &&
 		!((flp1->fl4_tos ^ flp2->fl4_tos) &
 		  (IPTOS_RT_MASK | RTO_ONLINK));
 }
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 903108e..672564e 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -988,14 +988,20 @@
 extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
 		struct ip_vs_conn *cp, int dir);
 
-extern u16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
 
-static inline u16 ip_vs_check_diff(u32 old, u32 new, u16 oldsum)
+static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
 {
-	u32 diff[2] = { old, new };
+	__be32 diff[2] = { ~old, new };
 
-	return csum_fold(csum_partial((char *) diff, sizeof(diff),
-				      oldsum ^ 0xFFFF));
+	return csum_partial((char *) diff, sizeof(diff), oldsum);
+}
+
+static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
+{
+	__be16 diff[2] = { ~old, new };
+
+	return csum_partial((char *) diff, sizeof(diff), oldsum);
 }
 
 #endif /* __KERNEL__ */
diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h
index 2a1fe99..3924d7d 100644
--- a/include/net/ipconfig.h
+++ b/include/net/ipconfig.h
@@ -11,12 +11,12 @@
 extern int ic_proto_enabled;	/* Protocols enabled (see IC_xxx) */
 extern int ic_set_manually;	/* IPconfig parameters set manually */
 
-extern u32 ic_myaddr;		/* My IP address */
-extern u32 ic_gateway;		/* Gateway IP address */
+extern __be32 ic_myaddr;		/* My IP address */
+extern __be32 ic_gateway;		/* Gateway IP address */
 
-extern u32 ic_servaddr;		/* Boot server IP address */
+extern __be32 ic_servaddr;		/* Boot server IP address */
 
-extern u32 root_server_addr;	/* Address of NFS server */
+extern __be32 root_server_addr;	/* Address of NFS server */
 extern u8 root_server_path[];	/* Path to mount as root */
 
 
diff --git a/include/net/ipip.h b/include/net/ipip.h
index f490c3c..7cdc914 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -35,7 +35,7 @@
 	ip_send_check(iph);						\
 									\
 	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output);\
-	if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) {		\
+	if (net_xmit_eval(err) == 0) {					\
 		stats->tx_bytes += pkt_len;				\
 		stats->tx_packets++;					\
 	} else {							\
@@ -44,8 +44,4 @@
 	}								\
 } while (0)
 
-
-extern int	sit_init(void);
-extern void	sit_cleanup(void);
-
 #endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8223c44..00328b7 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -95,10 +95,10 @@
  */
 
 struct frag_hdr {
-	unsigned char	nexthdr;
-	unsigned char	reserved;	
-	unsigned short	frag_off;
-	__u32		identification;
+	__u8	nexthdr;
+	__u8	reserved;
+	__be16	frag_off;
+	__be32	identification;
 };
 
 #define	IP6_MF	0x0001
@@ -113,9 +113,24 @@
 
 /* MIBs */
 DECLARE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
-#define IP6_INC_STATS(field)		SNMP_INC_STATS(ipv6_statistics, field)
-#define IP6_INC_STATS_BH(field)		SNMP_INC_STATS_BH(ipv6_statistics, field)
-#define IP6_INC_STATS_USER(field) 	SNMP_INC_STATS_USER(ipv6_statistics, field)
+#define IP6_INC_STATS(idev,field)		({			\
+	struct inet6_dev *_idev = (idev);				\
+	if (likely(_idev != NULL))					\
+		SNMP_INC_STATS(_idev->stats.ipv6, field);		\
+	SNMP_INC_STATS(ipv6_statistics, field);				\
+})
+#define IP6_INC_STATS_BH(idev,field)		({			\
+	struct inet6_dev *_idev = (idev);				\
+	if (likely(_idev != NULL))					\
+		SNMP_INC_STATS_BH(_idev->stats.ipv6, field);		\
+	SNMP_INC_STATS_BH(ipv6_statistics, field);			\
+})
+#define IP6_INC_STATS_USER(idev,field)		({			\
+	struct inet6_dev *_idev = (idev);				\
+	if (likely(_idev != NULL))					\
+		SNMP_INC_STATS_USER(_idev->stats.ipv6, field);		\
+	SNMP_INC_STATS_USER(ipv6_statistics, field);			\
+})
 DECLARE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
 #define ICMP6_INC_STATS(idev, field)		({			\
 	struct inet6_dev *_idev = (idev);				\
@@ -143,9 +158,13 @@
 	SNMP_INC_STATS_OFFSET_BH(icmpv6_statistics, field, _offset);    	\
 })
 DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6);
-#define UDP6_INC_STATS(field)		SNMP_INC_STATS(udp_stats_in6, field)
-#define UDP6_INC_STATS_BH(field)	SNMP_INC_STATS_BH(udp_stats_in6, field)
-#define UDP6_INC_STATS_USER(field) 	SNMP_INC_STATS_USER(udp_stats_in6, field)
+DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
+#define UDP6_INC_STATS_BH(field, is_udplite) 			      do  {  \
+	if (is_udplite) SNMP_INC_STATS_BH(udplite_stats_in6, field);         \
+	else		SNMP_INC_STATS_BH(udp_stats_in6, field);    } while(0)
+#define UDP6_INC_STATS_USER(field, is_udplite)			       do {    \
+	if (is_udplite) SNMP_INC_STATS_USER(udplite_stats_in6, field);         \
+	else		SNMP_INC_STATS_USER(udp_stats_in6, field);    } while(0)
 
 int snmp6_register_dev(struct inet6_dev *idev);
 int snmp6_unregister_dev(struct inet6_dev *idev);
@@ -191,7 +210,7 @@
 struct ip6_flowlabel
 {
 	struct ip6_flowlabel	*next;
-	u32			label;
+	__be32			label;
 	struct in6_addr		dst;
 	struct ipv6_txoptions	*opt;
 	atomic_t		users;
@@ -211,7 +230,7 @@
 	struct ip6_flowlabel	*fl;
 };
 
-extern struct ip6_flowlabel	*fl6_sock_lookup(struct sock *sk, u32 label);
+extern struct ip6_flowlabel	*fl6_sock_lookup(struct sock *sk, __be32 label);
 extern struct ipv6_txoptions	*fl6_merge_options(struct ipv6_txoptions * opt_space,
 						   struct ip6_flowlabel * fl,
 						   struct ipv6_txoptions * fopt);
@@ -375,22 +394,15 @@
  */
 static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen)
 {
-	const __u32 *a1 = token1, *a2 = token2;
+	const __be32 *a1 = token1, *a2 = token2;
 	int i;
 
 	addrlen >>= 2;
 
 	for (i = 0; i < addrlen; i++) {
-		__u32 xb = a1[i] ^ a2[i];
-		if (xb) {
-			int j = 31;
-
-			xb = ntohl(xb);
-			while ((xb & (1 << j)) == 0)
-				j--;
-
-			return (i * 32 + 31 - j);
-		}
+		__be32 xb = a1[i] ^ a2[i];
+		if (xb)
+			return i * 32 + 32 - fls(ntohl(xb));
 	}
 
 	/*
@@ -544,7 +556,7 @@
 						     struct sockaddr *addr, int addr_len);
 
 extern int 			ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern void			ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, u16 port,
+extern void			ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
 						u32 info, u8 *payload);
 extern void			ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info);
 
@@ -589,6 +601,8 @@
 extern void tcp6_proc_exit(void);
 extern int  udp6_proc_init(void);
 extern void udp6_proc_exit(void);
+extern int  udplite6_proc_init(void);
+extern void udplite6_proc_exit(void);
 extern int  ipv6_misc_proc_init(void);
 extern void ipv6_misc_proc_exit(void);
 
diff --git a/include/net/irda/irlan_filter.h b/include/net/irda/irlan_filter.h
index 492deda..1720539 100644
--- a/include/net/irda/irlan_filter.h
+++ b/include/net/irda/irlan_filter.h
@@ -28,6 +28,8 @@
 void irlan_check_command_param(struct irlan_cb *self, char *param, 
 			       char *value);
 void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb);
+#ifdef CONFIG_PROC_FS
 void irlan_print_filter(struct seq_file *seq, int filter_type);
+#endif
 
 #endif /* IRLAN_FILTER_H */
diff --git a/include/net/irda/irlap_frame.h b/include/net/irda/irlap_frame.h
index 9dd54a5..641f88e 100644
--- a/include/net/irda/irlap_frame.h
+++ b/include/net/irda/irlap_frame.h
@@ -91,8 +91,8 @@
 	__u8  caddr; /* Connection address */
 	__u8  control;
 	__u8  ident; /* Should always be XID_FORMAT */ 
-	__u32 saddr; /* Source device address */
-	__u32 daddr; /* Destination device address */
+	__le32 saddr; /* Source device address */
+	__le32 daddr; /* Destination device address */
 	__u8  flags; /* Discovery flags */
 	__u8  slotnr;
 	__u8  version;
@@ -101,15 +101,15 @@
 struct test_frame {
 	__u8 caddr;          /* Connection address */
 	__u8 control;
-	__u32 saddr;         /* Source device address */
-	__u32 daddr;         /* Destination device address */
+	__le32 saddr;         /* Source device address */
+	__le32 daddr;         /* Destination device address */
 } IRDA_PACK;
 
 struct ua_frame {
 	__u8 caddr;
 	__u8 control;
-	__u32 saddr; /* Source device address */
-	__u32 daddr; /* Dest device address */
+	__le32 saddr; /* Source device address */
+	__le32 daddr; /* Dest device address */
 } IRDA_PACK;
 
 struct dm_frame {
@@ -135,8 +135,8 @@
 struct snrm_frame {
 	__u8  caddr;
 	__u8  control;
-	__u32 saddr;
-	__u32 daddr;
+	__le32 saddr;
+	__le32 daddr;
 	__u8  ncaddr;
 } IRDA_PACK;
 
diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h
index 2c5d886..cb61568 100644
--- a/include/net/irda/timer.h
+++ b/include/net/irda/timer.h
@@ -28,6 +28,7 @@
 #define TIMER_H
 
 #include <linux/timer.h>
+#include <linux/jiffies.h>
 
 #include <asm/param.h>  /* for HZ */
 
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 8f63065..aa33a47 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -252,9 +252,9 @@
  */
 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
 {
-	if (skb->protocol == ntohs(ETH_P_802_2))
+	if (skb->protocol == htons(ETH_P_802_2))
 		memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
-	else if (skb->protocol == ntohs(ETH_P_TR_802_2)) {
+	else if (skb->protocol == htons(ETH_P_TR_802_2)) {
 		memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN);
 		*sa &= 0x7F;
 	}
@@ -269,9 +269,9 @@
  */
 static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
 {
-	if (skb->protocol == ntohs(ETH_P_802_2))
+	if (skb->protocol == htons(ETH_P_802_2))
 		memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
-	else if (skb->protocol == ntohs(ETH_P_TR_802_2))
+	else if (skb->protocol == htons(ETH_P_TR_802_2))
 		memcpy(da, tr_hdr(skb)->daddr, ETH_ALEN);
 }
 
@@ -345,7 +345,7 @@
 	pdu->ctrl_1  = LLC_PDU_TYPE_U;
 	pdu->ctrl_1 |= LLC_1_PDU_CMD_TEST;
 	pdu->ctrl_1 |= LLC_U_PF_BIT_MASK;
-	if (ev_skb->protocol == ntohs(ETH_P_802_2)) {
+	if (ev_skb->protocol == htons(ETH_P_802_2)) {
 		struct llc_pdu_un *ev_pdu = llc_pdu_un_hdr(ev_skb);
 		int dsize;
 
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index d3915dab..475b10c 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -66,8 +66,8 @@
 
 struct ra_msg {
         struct icmp6hdr		icmph;
-	__u32			reachable_time;
-	__u32			retrans_timer;
+	__be32			reachable_time;
+	__be32			retrans_timer;
 };
 
 struct nd_opt_hdr {
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index c8aacbd..2396703 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -160,7 +160,7 @@
 	atomic_t		entries;
 	rwlock_t		lock;
 	unsigned long		last_rand;
-	kmem_cache_t		*kmem_cachep;
+	struct kmem_cache		*kmem_cachep;
 	struct neigh_statistics	*stats;
 	struct neighbour	**hash_buckets;
 	unsigned int		hash_mask;
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 9168443..1401ccc 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -9,32 +9,35 @@
 #ifndef _NF_CONNTRACK_IPV4_H
 #define _NF_CONNTRACK_IPV4_H
 
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-#include <linux/netfilter_ipv4/ip_nat.h>
+#ifdef CONFIG_NF_NAT_NEEDED
+#include <net/netfilter/nf_nat.h>
+#include <linux/netfilter/nf_conntrack_pptp.h>
 
 /* per conntrack: nat application helper private data */
-union ip_conntrack_nat_help {
+union nf_conntrack_nat_help {
         /* insert nat helper private data here */
+	struct nf_nat_pptp nat_pptp_info;
 };
 
-struct nf_conntrack_ipv4_nat {
-	struct ip_nat_info info;
-	union ip_conntrack_nat_help help;
+struct nf_conn_nat {
+	struct nf_nat_info info;
+	union nf_conntrack_nat_help help;
 #if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
 	defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
 	int masq_index;
 #endif
 };
-#endif /* CONFIG_IP_NF_NAT_NEEDED */
-
-struct nf_conntrack_ipv4 {
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-	struct nf_conntrack_ipv4_nat *nat;
-#endif
-};
+#endif /* CONFIG_NF_NAT_NEEDED */
 
 /* Returns new sk_buff, or NULL */
 struct sk_buff *
 nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
 
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
+
+extern int nf_conntrack_ipv4_compat_init(void);
+extern void nf_conntrack_ipv4_compat_fini(void);
+
 #endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
new file mode 100644
index 0000000..b4b6049
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -0,0 +1,25 @@
+#ifndef _NF_CONNTRACK_IPV6_H
+#define _NF_CONNTRACK_IPV6_H
+
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
+
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+
+extern int nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start,
+				  u8 *nexthdrp, int len);
+
+extern int nf_ct_frag6_init(void);
+extern void nf_ct_frag6_cleanup(void);
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
+extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+			       struct net_device *in,
+			       struct net_device *out,
+			       int (*okfn)(struct sk_buff *));
+
+extern unsigned int nf_ct_frag6_timeout;
+extern unsigned int nf_ct_frag6_low_thresh;
+extern unsigned int nf_ct_frag6_high_thresh;
+
+#endif /* _NF_CONNTRACK_IPV6_H*/
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 1fbd819..bd01b46 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -21,6 +21,7 @@
 
 #include <linux/netfilter/nf_conntrack_tcp.h>
 #include <linux/netfilter/nf_conntrack_sctp.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
 #include <net/netfilter/ipv4/nf_conntrack_icmp.h>
 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
 
@@ -33,6 +34,7 @@
 	struct ip_ct_tcp tcp;
 	struct ip_ct_icmp icmp;
 	struct nf_ct_icmpv6 icmpv6;
+	struct nf_ct_gre gre;
 };
 
 union nf_conntrack_expect_proto {
@@ -41,15 +43,20 @@
 
 /* Add protocol helper include file here */
 #include <linux/netfilter/nf_conntrack_ftp.h>
+#include <linux/netfilter/nf_conntrack_pptp.h>
+#include <linux/netfilter/nf_conntrack_h323.h>
 
 /* per conntrack: application helper private data */
 union nf_conntrack_help {
 	/* insert conntrack helper private data (master) here */
-	struct ip_ct_ftp_master ct_ftp_info;
+	struct nf_ct_ftp_master ct_ftp_info;
+	struct nf_ct_pptp_master ct_pptp_info;
+	struct nf_ct_h323_master ct_h323_info;
 };
 
 #include <linux/types.h>
 #include <linux/skbuff.h>
+#include <linux/timer.h>
 
 #ifdef CONFIG_NETFILTER_DEBUG
 #define NF_CT_ASSERT(x)							\
@@ -79,6 +86,8 @@
 
 
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+
 struct nf_conn
 {
 	/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
@@ -124,44 +133,6 @@
 	char data[0];
 };
 
-struct nf_conntrack_expect
-{
-	/* Internal linked list (global expectation list) */
-	struct list_head list;
-
-	/* We expect this tuple, with the following mask */
-	struct nf_conntrack_tuple tuple, mask;
- 
-	/* Function to call after setup and insertion */
-	void (*expectfn)(struct nf_conn *new,
-			 struct nf_conntrack_expect *this);
-
-	/* The conntrack of the master connection */
-	struct nf_conn *master;
-
-	/* Timer function; deletes the expectation. */
-	struct timer_list timeout;
-
-	/* Usage count. */
-	atomic_t use;
-
-	/* Unique ID */
-	unsigned int id;
-
-	/* Flags */
-	unsigned int flags;
-
-#ifdef CONFIG_NF_NAT_NEEDED
-	/* This is the original per-proto part, used to map the
-	 * expected connection the way the recipient expects. */
-	union nf_conntrack_manip_proto saved_proto;
-	/* Direction relative to the master connection. */
-	enum ip_conntrack_dir dir;
-#endif
-};
-
-#define NF_CT_EXPECT_PERMANENT 0x1
-
 static inline struct nf_conn *
 nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
 {
@@ -208,16 +179,6 @@
 
 extern void nf_conntrack_hash_insert(struct nf_conn *ct);
 
-extern struct nf_conntrack_expect *
-__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple);
-
-extern struct nf_conntrack_expect *
-nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple);
-
-extern void nf_ct_unlink_expect(struct nf_conntrack_expect *exp);
-
-extern void nf_ct_remove_expectations(struct nf_conn *ct);
-
 extern void nf_conntrack_flush(void);
 
 extern struct nf_conntrack_helper *
@@ -289,89 +250,12 @@
 
 extern unsigned int nf_conntrack_htable_size;
 extern int nf_conntrack_checksum;
+extern atomic_t nf_conntrack_count;
+extern int nf_conntrack_max;
 
+DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
 #define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++)
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-#include <linux/notifier.h>
-#include <linux/interrupt.h>
-
-struct nf_conntrack_ecache {
-	struct nf_conn *ct;
-	unsigned int events;
-};
-DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
-
-#define CONNTRACK_ECACHE(x)	(__get_cpu_var(nf_conntrack_ecache).x)
-
-extern struct atomic_notifier_head nf_conntrack_chain;
-extern struct atomic_notifier_head nf_conntrack_expect_chain;
-
-static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
-{
-	return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
-}
-
-static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
-{
-	return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
-}
-
-static inline int
-nf_conntrack_expect_register_notifier(struct notifier_block *nb)
-{
-	return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
-}
-
-static inline int
-nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
-{
-	return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain,
-			nb);
-}
-
-extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
-extern void __nf_ct_event_cache_init(struct nf_conn *ct);
-
-static inline void
-nf_conntrack_event_cache(enum ip_conntrack_events event,
-			 const struct sk_buff *skb)
-{
-	struct nf_conn *ct = (struct nf_conn *)skb->nfct;
-	struct nf_conntrack_ecache *ecache;
-
-	local_bh_disable();
-	ecache = &__get_cpu_var(nf_conntrack_ecache);
-	if (ct != ecache->ct)
-		__nf_ct_event_cache_init(ct);
-	ecache->events |= event;
-	local_bh_enable();
-}
-
-static inline void nf_conntrack_event(enum ip_conntrack_events event,
-				      struct nf_conn *ct)
-{
-	if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
-		atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
-}
-
-static inline void
-nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
-			  struct nf_conntrack_expect *exp)
-{
-	atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
-}
-#else /* CONFIG_NF_CONNTRACK_EVENTS */
-static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
-					    const struct sk_buff *skb) {}
-static inline void nf_conntrack_event(enum ip_conntrack_events event,
-				      struct nf_conn *ct) {}
-static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
-static inline void
-nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
-			  struct nf_conntrack_expect *exp) {}
-#endif /* CONFIG_NF_CONNTRACK_EVENTS */
-
 /* no helper, no nat */
 #define	NF_CT_F_BASIC	0
 /* for helper */
@@ -387,8 +271,35 @@
 
 /* valid combinations:
  * basic: nf_conn, nf_conn .. nf_conn_help
- * nat: nf_conn .. nf_conn_nat, nf_conn .. nf_conn_nat, nf_conn help
+ * nat: nf_conn .. nf_conn_nat, nf_conn .. nf_conn_nat .. nf_conn help
  */
+#ifdef CONFIG_NF_NAT_NEEDED
+static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
+{
+	unsigned int offset = sizeof(struct nf_conn);
+
+	if (!(ct->features & NF_CT_F_NAT))
+		return NULL;
+
+	offset = ALIGN(offset, __alignof__(struct nf_conn_nat));
+	return (struct nf_conn_nat *) ((void *)ct + offset);
+}
+
+static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
+{
+	unsigned int offset = sizeof(struct nf_conn);
+
+	if (!(ct->features & NF_CT_F_HELP))
+		return NULL;
+	if (ct->features & NF_CT_F_NAT) {
+		offset = ALIGN(offset, __alignof__(struct nf_conn_nat));
+		offset += sizeof(struct nf_conn_nat);
+	}
+
+	offset = ALIGN(offset, __alignof__(struct nf_conn_help));
+	return (struct nf_conn_help *) ((void *)ct + offset);
+}
+#else /* No NAT */
 static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
 {
 	unsigned int offset = sizeof(struct nf_conn);
@@ -396,8 +307,9 @@
 	if (!(ct->features & NF_CT_F_HELP))
 		return NULL;
 
+	offset = ALIGN(offset, __alignof__(struct nf_conn_help));
 	return (struct nf_conn_help *) ((void *)ct + offset);
 }
-
+#endif /* CONFIG_NF_NAT_NEEDED */
 #endif /* __KERNEL__ */
 #endif /* _NF_CONNTRACK_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index da25452..7fdc72c 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -13,6 +13,9 @@
 #define _NF_CONNTRACK_CORE_H
 
 #include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
 
 /* This header is used to share core functionality between the
    standalone connection tracking module, and the compatibility layer's use
@@ -29,7 +32,7 @@
 /* Like above, but you already have conntrack read lock. */
 extern struct nf_conntrack_l3proto *__nf_ct_find_l3proto(u_int16_t l3proto);
 
-struct nf_conntrack_protocol;
+struct nf_conntrack_l4proto;
 
 extern int
 nf_ct_get_tuple(const struct sk_buff *skb,
@@ -39,13 +42,13 @@
 		u_int8_t protonum,
 		struct nf_conntrack_tuple *tuple,
 		const struct nf_conntrack_l3proto *l3proto,
-		const struct nf_conntrack_protocol *protocol);
+		const struct nf_conntrack_l4proto *l4proto);
 
 extern int
 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 		   const struct nf_conntrack_tuple *orig,
 		   const struct nf_conntrack_l3proto *l3proto,
-		   const struct nf_conntrack_protocol *protocol);
+		   const struct nf_conntrack_l4proto *l4proto);
 
 /* Find a connection corresponding to a tuple. */
 extern struct nf_conntrack_tuple_hash *
@@ -70,7 +73,14 @@
 
 extern void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb);
 
+int
+print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
+	    struct nf_conntrack_l3proto *l3proto,
+	    struct nf_conntrack_l4proto *proto);
+
 extern struct list_head *nf_conntrack_hash;
 extern struct list_head nf_conntrack_expect_list;
 extern rwlock_t nf_conntrack_lock ;
+extern struct list_head unconfirmed;
+
 #endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
new file mode 100644
index 0000000..b62a8a9
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -0,0 +1,95 @@
+/*
+ * connection tracking event cache.
+ */
+
+#ifndef _NF_CONNTRACK_ECACHE_H
+#define _NF_CONNTRACK_ECACHE_H
+#include <net/netfilter/nf_conntrack.h>
+
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+struct nf_conntrack_ecache {
+	struct nf_conn *ct;
+	unsigned int events;
+};
+DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
+
+#define CONNTRACK_ECACHE(x)	(__get_cpu_var(nf_conntrack_ecache).x)
+
+extern struct atomic_notifier_head nf_conntrack_chain;
+extern struct atomic_notifier_head nf_conntrack_expect_chain;
+
+static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
+}
+
+static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
+}
+
+static inline int
+nf_conntrack_expect_register_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb);
+}
+
+static inline int
+nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain,
+			nb);
+}
+
+extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
+extern void __nf_ct_event_cache_init(struct nf_conn *ct);
+extern void nf_ct_event_cache_flush(void);
+
+static inline void
+nf_conntrack_event_cache(enum ip_conntrack_events event,
+			 const struct sk_buff *skb)
+{
+	struct nf_conn *ct = (struct nf_conn *)skb->nfct;
+	struct nf_conntrack_ecache *ecache;
+
+	local_bh_disable();
+	ecache = &__get_cpu_var(nf_conntrack_ecache);
+	if (ct != ecache->ct)
+		__nf_ct_event_cache_init(ct);
+	ecache->events |= event;
+	local_bh_enable();
+}
+
+static inline void nf_conntrack_event(enum ip_conntrack_events event,
+				      struct nf_conn *ct)
+{
+	if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
+		atomic_notifier_call_chain(&nf_conntrack_chain, event, ct);
+}
+
+static inline void
+nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
+			  struct nf_conntrack_expect *exp)
+{
+	atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
+}
+
+#else /* CONFIG_NF_CONNTRACK_EVENTS */
+
+static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
+					    const struct sk_buff *skb) {}
+static inline void nf_conntrack_event(enum ip_conntrack_events event,
+				      struct nf_conn *ct) {}
+static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
+static inline void
+nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
+			  struct nf_conntrack_expect *exp) {}
+static inline void nf_ct_event_cache_flush(void) {}
+#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+
+#endif /*_NF_CONNTRACK_ECACHE_H*/
+
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
new file mode 100644
index 0000000..41bcc9e
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -0,0 +1,80 @@
+/*
+ * connection tracking expectations.
+ */
+
+#ifndef _NF_CONNTRACK_EXPECT_H
+#define _NF_CONNTRACK_EXPECT_H
+#include <net/netfilter/nf_conntrack.h>
+
+extern struct list_head nf_conntrack_expect_list;
+extern struct kmem_cache *nf_conntrack_expect_cachep;
+extern struct file_operations exp_file_ops;
+
+struct nf_conntrack_expect
+{
+	/* Internal linked list (global expectation list) */
+	struct list_head list;
+
+	/* We expect this tuple, with the following mask */
+	struct nf_conntrack_tuple tuple, mask;
+
+	/* Function to call after setup and insertion */
+	void (*expectfn)(struct nf_conn *new,
+			 struct nf_conntrack_expect *this);
+
+	/* Helper to assign to new connection */
+	struct nf_conntrack_helper *helper;
+
+	/* The conntrack of the master connection */
+	struct nf_conn *master;
+
+	/* Timer function; deletes the expectation. */
+	struct timer_list timeout;
+
+	/* Usage count. */
+	atomic_t use;
+
+	/* Unique ID */
+	unsigned int id;
+
+	/* Flags */
+	unsigned int flags;
+
+#ifdef CONFIG_NF_NAT_NEEDED
+	__be32 saved_ip;
+	/* This is the original per-proto part, used to map the
+	 * expected connection the way the recipient expects. */
+	union nf_conntrack_man_proto saved_proto;
+	/* Direction relative to the master connection. */
+	enum ip_conntrack_dir dir;
+#endif
+};
+
+#define NF_CT_EXPECT_PERMANENT 0x1
+
+
+struct nf_conntrack_expect *
+__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple);
+
+struct nf_conntrack_expect *
+nf_conntrack_expect_find_get(const struct nf_conntrack_tuple *tuple);
+
+struct nf_conntrack_expect *
+find_expectation(const struct nf_conntrack_tuple *tuple);
+
+void nf_ct_unlink_expect(struct nf_conntrack_expect *exp);
+void nf_ct_remove_expectations(struct nf_conn *ct);
+void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp);
+
+/* Allocate space for an expectation: this is mandatory before calling
+   nf_conntrack_expect_related.  You will have to call put afterwards. */
+struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me);
+void nf_conntrack_expect_init(struct nf_conntrack_expect *, int,
+			      union nf_conntrack_address *,
+			      union nf_conntrack_address *,
+			      u_int8_t, __be16 *, __be16 *);
+void nf_conntrack_expect_put(struct nf_conntrack_expect *exp);
+int nf_conntrack_expect_related(struct nf_conntrack_expect *expect);
+
+#endif /*_NF_CONNTRACK_EXPECT_H*/
+
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 86ec817..8c72ac9 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -34,20 +34,22 @@
 		    struct nf_conn *ct,
 		    enum ip_conntrack_info conntrackinfo);
 
+	void (*destroy)(struct nf_conn *ct);
+
 	int (*to_nfattr)(struct sk_buff *skb, const struct nf_conn *ct);
 };
 
+extern struct nf_conntrack_helper *
+__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple);
+
+extern struct nf_conntrack_helper *
+nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple);
+
+extern struct nf_conntrack_helper *
+__nf_conntrack_helper_find_byname(const char *name);
+
+extern void nf_ct_helper_put(struct nf_conntrack_helper *helper);
 extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
 extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
 
-/* Allocate space for an expectation: this is mandatory before calling
-   nf_conntrack_expect_related.  You will have to call put afterwards. */
-extern struct nf_conntrack_expect *
-nf_conntrack_expect_alloc(struct nf_conn *master);
-extern void nf_conntrack_expect_put(struct nf_conntrack_expect *exp);
-
-/* Add an expected connection: can have more than one per connection */
-extern int nf_conntrack_expect_related(struct nf_conntrack_expect *exp);
-extern void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp);
-
 #endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index dac43b1..664ddcf 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -18,9 +18,6 @@
 
 struct nf_conntrack_l3proto
 {
-	/* Next pointer. */
-	struct list_head list;
-
 	/* L3 Protocol Family number. ex) PF_INET */
 	u_int16_t l3proto;
 
@@ -78,6 +75,12 @@
 	int (*nfattr_to_tuple)(struct nfattr *tb[],
 			       struct nf_conntrack_tuple *t);
 
+#ifdef CONFIG_SYSCTL
+	struct ctl_table_header	*ctl_table_header;
+	struct ctl_table	*ctl_table_path;
+	struct ctl_table	*ctl_table;
+#endif /* CONFIG_SYSCTL */
+
 	/* Module (if any) which this is connected to. */
 	struct module *me;
 };
@@ -86,7 +89,7 @@
 
 /* Protocol registration. */
 extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
-extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+extern int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
 
 extern struct nf_conntrack_l3proto *
 nf_ct_l3proto_find_get(u_int16_t l3proto);
@@ -96,13 +99,13 @@
 /* Existing built-in protocols */
 extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
 extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
-extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto;
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
 
 static inline struct nf_conntrack_l3proto *
 __nf_ct_l3proto_find(u_int16_t l3proto)
 {
 	if (unlikely(l3proto >= AF_MAX))
-		return &nf_conntrack_generic_l3proto;
+		return &nf_conntrack_l3proto_generic;
 	return nf_ct_l3protos[l3proto];
 }
 
diff --git a/include/net/netfilter/nf_conntrack_protocol.h b/include/net/netfilter/nf_conntrack_l4proto.h
similarity index 71%
rename from include/net/netfilter/nf_conntrack_protocol.h
rename to include/net/netfilter/nf_conntrack_l4proto.h
index 1f33737..fc8af08 100644
--- a/include/net/netfilter/nf_conntrack_protocol.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -1,5 +1,5 @@
 /*
- * Header for use in defining a given protocol for connection tracking.
+ * Header for use in defining a given L4 protocol for connection tracking.
  *
  * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
  *	- generalized L3 protocol dependent part.
@@ -7,23 +7,20 @@
  * Derived from include/linux/netfiter_ipv4/ip_conntrack_protcol.h
  */
 
-#ifndef _NF_CONNTRACK_PROTOCOL_H
-#define _NF_CONNTRACK_PROTOCOL_H
+#ifndef _NF_CONNTRACK_L4PROTO_H
+#define _NF_CONNTRACK_L4PROTO_H
 #include <net/netfilter/nf_conntrack.h>
 
 struct seq_file;
 struct nfattr;
 
-struct nf_conntrack_protocol
+struct nf_conntrack_l4proto
 {
-	/* Next pointer. */
-	struct list_head list;
-
 	/* L3 Protocol number. */
 	u_int16_t l3proto;
 
-	/* Protocol number. */
-	u_int8_t proto;
+	/* L4 Protocol number. */
+	u_int8_t l4proto;
 
 	/* Protocol name */
 	const char *name;
@@ -79,30 +76,40 @@
 	int (*nfattr_to_tuple)(struct nfattr *tb[],
 			       struct nf_conntrack_tuple *t);
 
+#ifdef CONFIG_SYSCTL
+	struct ctl_table_header	**ctl_table_header;
+	struct ctl_table	*ctl_table;
+	unsigned int		*ctl_table_users;
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	struct ctl_table_header	*ctl_compat_table_header;
+	struct ctl_table	*ctl_compat_table;
+#endif
+#endif
+
 	/* Module (if any) which this is connected to. */
 	struct module *me;
 };
 
 /* Existing built-in protocols */
-extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp6;
-extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4;
-extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6;
-extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
 
 #define MAX_NF_CT_PROTO 256
-extern struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
+extern struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX];
 
-extern struct nf_conntrack_protocol *
-__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol);
+extern struct nf_conntrack_l4proto *
+__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
 
-extern struct nf_conntrack_protocol *
-nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol);
+extern struct nf_conntrack_l4proto *
+nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t protocol);
 
-extern void nf_ct_proto_put(struct nf_conntrack_protocol *p);
+extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
 
 /* Protocol registration. */
-extern int nf_conntrack_protocol_register(struct nf_conntrack_protocol *proto);
-extern void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto);
+extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto);
+extern int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto);
 
 /* Generic netlink helpers */
 extern int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 530ef1f..5d72b16 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -24,10 +24,10 @@
 
 /* The l3 protocol-specific manipulable parts of the tuple: always in
    network order! */
-union nf_conntrack_man_l3proto {
+union nf_conntrack_address {
 	u_int32_t all[NF_CT_TUPLE_L3SIZE];
-	u_int32_t ip;
-	u_int32_t ip6[4];
+	__be32 ip;
+	__be32 ip6[4];
 };
 
 /* The protocol-specific manipulable parts of the tuple: always in
@@ -38,23 +38,26 @@
 	u_int16_t all;
 
 	struct {
-		u_int16_t port;
+		__be16 port;
 	} tcp;
 	struct {
-		u_int16_t port;
+		__be16 port;
 	} udp;
 	struct {
-		u_int16_t id;
+		__be16 id;
 	} icmp;
 	struct {
-		u_int16_t port;
+		__be16 port;
 	} sctp;
+	struct {
+		__be16 key;	/* GRE key is 32bit, PPtP only uses 16bit */
+	} gre;
 };
 
 /* The manipulable part of the tuple. */
 struct nf_conntrack_man
 {
-	union nf_conntrack_man_l3proto u3;
+	union nf_conntrack_address u3;
 	union nf_conntrack_man_proto u;
 	/* Layer 3 protocol */
 	u_int16_t l3num;
@@ -67,27 +70,26 @@
 
 	/* These are the parts of the tuple which are fixed. */
 	struct {
-		union {
-			u_int32_t all[NF_CT_TUPLE_L3SIZE];
-			u_int32_t ip;
-			u_int32_t ip6[4];
-		} u3;
+		union nf_conntrack_address u3;
 		union {
 			/* Add other protocols here. */
 			u_int16_t all;
 
 			struct {
-				u_int16_t port;
+				__be16 port;
 			} tcp;
 			struct {
-				u_int16_t port;
+				__be16 port;
 			} udp;
 			struct {
 				u_int8_t type, code;
 			} icmp;
 			struct {
-				u_int16_t port;
+				__be16 port;
 			} sctp;
+			struct {
+				__be16 key;
+			} gre;
 		} u;
 
 		/* The protocol. */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
new file mode 100644
index 0000000..61c6206
--- /dev/null
+++ b/include/net/netfilter/nf_nat.h
@@ -0,0 +1,77 @@
+#ifndef _NF_NAT_H
+#define _NF_NAT_H
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16
+
+enum nf_nat_manip_type
+{
+	IP_NAT_MANIP_SRC,
+	IP_NAT_MANIP_DST
+};
+
+/* SRC manip occurs POST_ROUTING or LOCAL_IN */
+#define HOOK2MANIP(hooknum) ((hooknum) != NF_IP_POST_ROUTING && (hooknum) != NF_IP_LOCAL_IN)
+
+#define IP_NAT_RANGE_MAP_IPS 1
+#define IP_NAT_RANGE_PROTO_SPECIFIED 2
+
+/* NAT sequence number modifications */
+struct nf_nat_seq {
+	/* position of the last TCP sequence number modification (if any) */
+	u_int32_t correction_pos;
+
+	/* sequence number offset before and after last modification */
+	int16_t offset_before, offset_after;
+};
+
+/* Single range specification. */
+struct nf_nat_range
+{
+	/* Set to OR of flags above. */
+	unsigned int flags;
+
+	/* Inclusive: network order. */
+	__be32 min_ip, max_ip;
+
+	/* Inclusive: network order */
+	union nf_conntrack_man_proto min, max;
+};
+
+/* For backwards compat: don't use in modern code. */
+struct nf_nat_multi_range_compat
+{
+	unsigned int rangesize; /* Must be 1. */
+
+	/* hangs off end. */
+	struct nf_nat_range range[1];
+};
+
+#ifdef __KERNEL__
+#include <linux/list.h>
+
+/* The structure embedded in the conntrack structure. */
+struct nf_nat_info
+{
+	struct list_head bysource;
+	struct nf_nat_seq seq[IP_CT_DIR_MAX];
+};
+
+struct nf_conn;
+
+/* Set up the info structure to map into this range. */
+extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
+				      const struct nf_nat_range *range,
+				      unsigned int hooknum);
+
+/* Is this tuple already taken? (not by us)*/
+extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+			     const struct nf_conn *ignored_conntrack);
+
+extern int nf_nat_module_is_loaded;
+
+#else  /* !__KERNEL__: iptables wants this to compile. */
+#define nf_nat_multi_range nf_nat_multi_range_compat
+#endif /*__KERNEL__*/
+#endif
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
new file mode 100644
index 0000000..9778ffa
--- /dev/null
+++ b/include/net/netfilter/nf_nat_core.h
@@ -0,0 +1,27 @@
+#ifndef _NF_NAT_CORE_H
+#define _NF_NAT_CORE_H
+#include <linux/list.h>
+#include <net/netfilter/nf_conntrack.h>
+
+/* This header used to share core functionality between the standalone
+   NAT module, and the compatibility layer's use of NAT for masquerading. */
+
+extern unsigned int nf_nat_packet(struct nf_conn *ct,
+				  enum ip_conntrack_info ctinfo,
+				  unsigned int hooknum,
+				  struct sk_buff **pskb);
+
+extern int nf_nat_icmp_reply_translation(struct nf_conn *ct,
+					 enum ip_conntrack_info ctinfo,
+					 unsigned int hooknum,
+					 struct sk_buff **pskb);
+
+static inline int nf_nat_initialized(struct nf_conn *ct,
+				     enum nf_nat_manip_type manip)
+{
+	if (manip == IP_NAT_MANIP_SRC)
+		return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+	else
+		return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+}
+#endif /* _NF_NAT_CORE_H */
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
new file mode 100644
index 0000000..ec98ecf
--- /dev/null
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -0,0 +1,32 @@
+#ifndef _NF_NAT_HELPER_H
+#define _NF_NAT_HELPER_H
+/* NAT protocol helper routines. */
+
+#include <net/netfilter/nf_conntrack.h>
+
+struct sk_buff;
+
+/* These return true or false. */
+extern int nf_nat_mangle_tcp_packet(struct sk_buff **skb,
+				    struct nf_conn *ct,
+				    enum ip_conntrack_info ctinfo,
+				    unsigned int match_offset,
+				    unsigned int match_len,
+				    const char *rep_buffer,
+				    unsigned int rep_len);
+extern int nf_nat_mangle_udp_packet(struct sk_buff **skb,
+				    struct nf_conn *ct,
+				    enum ip_conntrack_info ctinfo,
+				    unsigned int match_offset,
+				    unsigned int match_len,
+				    const char *rep_buffer,
+				    unsigned int rep_len);
+extern int nf_nat_seq_adjust(struct sk_buff **pskb,
+			     struct nf_conn *ct,
+			     enum ip_conntrack_info ctinfo);
+
+/* Setup NAT on this expected conntrack so it follows master, but goes
+ * to port ct->master->saved_proto. */
+extern void nf_nat_follow_master(struct nf_conn *ct,
+				 struct nf_conntrack_expect *this);
+#endif
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
new file mode 100644
index 0000000..a9ec5ef
--- /dev/null
+++ b/include/net/netfilter/nf_nat_protocol.h
@@ -0,0 +1,70 @@
+/* Header for use in defining a given protocol. */
+#ifndef _NF_NAT_PROTOCOL_H
+#define _NF_NAT_PROTOCOL_H
+#include <net/netfilter/nf_nat.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+struct nf_nat_range;
+
+struct nf_nat_protocol
+{
+	/* Protocol name */
+	const char *name;
+
+	/* Protocol number. */
+	unsigned int protonum;
+
+	struct module *me;
+
+	/* Translate a packet to the target according to manip type.
+	   Return true if succeeded. */
+	int (*manip_pkt)(struct sk_buff **pskb,
+			 unsigned int iphdroff,
+			 const struct nf_conntrack_tuple *tuple,
+			 enum nf_nat_manip_type maniptype);
+
+	/* Is the manipable part of the tuple between min and max incl? */
+	int (*in_range)(const struct nf_conntrack_tuple *tuple,
+			enum nf_nat_manip_type maniptype,
+			const union nf_conntrack_man_proto *min,
+			const union nf_conntrack_man_proto *max);
+
+	/* Alter the per-proto part of the tuple (depending on
+	   maniptype), to give a unique tuple in the given range if
+	   possible; return false if not.  Per-protocol part of tuple
+	   is initialized to the incoming packet. */
+	int (*unique_tuple)(struct nf_conntrack_tuple *tuple,
+			    const struct nf_nat_range *range,
+			    enum nf_nat_manip_type maniptype,
+			    const struct nf_conn *ct);
+
+	int (*range_to_nfattr)(struct sk_buff *skb,
+			       const struct nf_nat_range *range);
+
+	int (*nfattr_to_range)(struct nfattr *tb[],
+			       struct nf_nat_range *range);
+};
+
+/* Protocol registration. */
+extern int nf_nat_protocol_register(struct nf_nat_protocol *proto);
+extern void nf_nat_protocol_unregister(struct nf_nat_protocol *proto);
+
+extern struct nf_nat_protocol *nf_nat_proto_find_get(u_int8_t protocol);
+extern void nf_nat_proto_put(struct nf_nat_protocol *proto);
+
+/* Built-in protocols. */
+extern struct nf_nat_protocol nf_nat_protocol_tcp;
+extern struct nf_nat_protocol nf_nat_protocol_udp;
+extern struct nf_nat_protocol nf_nat_protocol_icmp;
+extern struct nf_nat_protocol nf_nat_unknown_protocol;
+
+extern int init_protocols(void) __init;
+extern void cleanup_protocols(void);
+extern struct nf_nat_protocol *find_nat_proto(u_int16_t protonum);
+
+extern int nf_nat_port_range_to_nfattr(struct sk_buff *skb,
+				       const struct nf_nat_range *range);
+extern int nf_nat_port_nfattr_to_range(struct nfattr *tb[],
+				       struct nf_nat_range *range);
+
+#endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h
new file mode 100644
index 0000000..f191c67
--- /dev/null
+++ b/include/net/netfilter/nf_nat_rule.h
@@ -0,0 +1,35 @@
+#ifndef _NF_NAT_RULE_H
+#define _NF_NAT_RULE_H
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+
+/* Compatibility definitions for ipt_FOO modules */
+#define ip_nat_range			nf_nat_range
+#define ip_conntrack_tuple		nf_conntrack_tuple
+#define ip_conntrack_get		nf_ct_get
+#define ip_conntrack			nf_conn
+#define ip_nat_setup_info		nf_nat_setup_info
+#define ip_nat_multi_range_compat	nf_nat_multi_range_compat
+#define ip_ct_iterate_cleanup		nf_ct_iterate_cleanup
+#define	IP_NF_ASSERT			NF_CT_ASSERT
+
+extern int nf_nat_rule_init(void) __init;
+extern void nf_nat_rule_cleanup(void);
+extern int nf_nat_rule_find(struct sk_buff **pskb,
+			    unsigned int hooknum,
+			    const struct net_device *in,
+			    const struct net_device *out,
+			    struct nf_conn *ct,
+			    struct nf_nat_info *info);
+
+extern unsigned int
+alloc_null_binding(struct nf_conn *ct,
+		   struct nf_nat_info *info,
+		   unsigned int hooknum);
+
+extern unsigned int
+alloc_null_binding_confirmed(struct nf_conn *ct,
+			     struct nf_nat_info *info,
+			     unsigned int hooknum);
+#endif /* _NF_NAT_RULE_H */
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 12c214b..83da7e1 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -111,13 +111,34 @@
 	void (*free) (const void *data);
 	void *data;
 };
+/* The catmap bitmap field MUST be a power of two in length and large
+ * enough to hold at least 240 bits.  Special care (i.e. check the code!)
+ * should be used when changing these values as the LSM implementation
+ * probably has functions which rely on the sizes of these types to speed
+ * processing. */
+#define NETLBL_CATMAP_MAPTYPE           u64
+#define NETLBL_CATMAP_MAPCNT            4
+#define NETLBL_CATMAP_MAPSIZE           (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
+#define NETLBL_CATMAP_SIZE              (NETLBL_CATMAP_MAPSIZE * \
+					 NETLBL_CATMAP_MAPCNT)
+#define NETLBL_CATMAP_BIT               (NETLBL_CATMAP_MAPTYPE)0x01
+struct netlbl_lsm_secattr_catmap {
+	u32 startbit;
+	NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
+	struct netlbl_lsm_secattr_catmap *next;
+};
+#define NETLBL_SECATTR_NONE             0x00000000
+#define NETLBL_SECATTR_DOMAIN           0x00000001
+#define NETLBL_SECATTR_CACHE            0x00000002
+#define NETLBL_SECATTR_MLS_LVL          0x00000004
+#define NETLBL_SECATTR_MLS_CAT          0x00000008
 struct netlbl_lsm_secattr {
+	u32 flags;
+
 	char *domain;
 
 	u32 mls_lvl;
-	u32 mls_lvl_vld;
-	unsigned char *mls_cat;
-	size_t mls_cat_len;
+	struct netlbl_lsm_secattr_catmap *mls_cat;
 
 	struct netlbl_lsm_cache *cache;
 };
@@ -165,18 +186,54 @@
 }
 
 /**
+ * netlbl_secattr_catmap_alloc - Allocate a LSM secattr catmap
+ * @flags: memory allocation flags
+ *
+ * Description:
+ * Allocate memory for a LSM secattr catmap, returns a pointer on success, NULL
+ * on failure.
+ *
+ */
+static inline struct netlbl_lsm_secattr_catmap *netlbl_secattr_catmap_alloc(
+	                                                           gfp_t flags)
+{
+	return kzalloc(sizeof(struct netlbl_lsm_secattr_catmap), flags);
+}
+
+/**
+ * netlbl_secattr_catmap_free - Free a LSM secattr catmap
+ * @catmap: the category bitmap
+ *
+ * Description:
+ * Free a LSM secattr catmap.
+ *
+ */
+static inline void netlbl_secattr_catmap_free(
+	                              struct netlbl_lsm_secattr_catmap *catmap)
+{
+	struct netlbl_lsm_secattr_catmap *iter;
+
+	do {
+		iter = catmap;
+		catmap = catmap->next;
+		kfree(iter);
+	} while (catmap);
+}
+
+/**
  * netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct
  * @secattr: the struct to initialize
  *
  * Description:
- * Initialize an already allocated netlbl_lsm_secattr struct.  Returns zero on
- * success, negative values on error.
+ * Initialize an already allocated netlbl_lsm_secattr struct.
  *
  */
-static inline int netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
+static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
 {
-	memset(secattr, 0, sizeof(*secattr));
-	return 0;
+	secattr->flags = 0;
+	secattr->domain = NULL;
+	secattr->mls_cat = NULL;
+	secattr->cache = NULL;
 }
 
 /**
@@ -193,7 +250,8 @@
 	if (secattr->cache)
 		netlbl_secattr_cache_free(secattr->cache);
 	kfree(secattr->domain);
-	kfree(secattr->mls_cat);
+	if (secattr->mls_cat)
+		netlbl_secattr_catmap_free(secattr->mls_cat);
 }
 
 /**
@@ -205,7 +263,7 @@
  * pointer on success, or NULL on failure.
  *
  */
-static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(int flags)
+static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(gfp_t flags)
 {
 	return kzalloc(sizeof(struct netlbl_lsm_secattr), flags);
 }
@@ -224,6 +282,51 @@
 	kfree(secattr);
 }
 
+#ifdef CONFIG_NETLABEL
+int netlbl_secattr_catmap_walk(struct netlbl_lsm_secattr_catmap *catmap,
+			       u32 offset);
+int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap,
+				   u32 offset);
+int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap *catmap,
+				 u32 bit,
+				 gfp_t flags);
+int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
+				 u32 start,
+				 u32 end,
+				 gfp_t flags);
+#else
+static inline int netlbl_secattr_catmap_walk(
+	                              struct netlbl_lsm_secattr_catmap *catmap,
+				      u32 offset)
+{
+	return -ENOENT;
+}
+
+static inline int netlbl_secattr_catmap_walk_rng(
+				      struct netlbl_lsm_secattr_catmap *catmap,
+				      u32 offset)
+{
+	return -ENOENT;
+}
+
+static inline int netlbl_secattr_catmap_setbit(
+	                              struct netlbl_lsm_secattr_catmap *catmap,
+				      u32 bit,
+				      gfp_t flags)
+{
+	return 0;
+}
+
+static inline int netlbl_secattr_catmap_setrng(
+	                              struct netlbl_lsm_secattr_catmap *catmap,
+				      u32 start,
+				      u32 end,
+				      gfp_t flags)
+{
+	return 0;
+}
+#endif
+
 /*
  * LSM protocol operations
  */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index ce5cba1..bcaf67b 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/netlink.h>
+#include <linux/jiffies.h>
 
 /* ========================================================================
  *         Netlink Messages and Attributes Interface (As Seen On TV)
@@ -500,14 +501,15 @@
 
 /**
  * nlmsg_new - Allocate a new netlink message
- * @size: maximum size of message
+ * @payload: size of the message payload
  * @flags: the type of memory to allocate.
  *
- * Use NLMSG_GOODSIZE if size isn't know and you need a good default size.
+ * Use NLMSG_DEFAULT_SIZE if the size of the payload isn't known
+ * and a good default is needed.
  */
-static inline struct sk_buff *nlmsg_new(int size, gfp_t flags)
+static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
 {
-	return alloc_skb(size, flags);
+	return alloc_skb(nlmsg_total_size(payload), flags);
 }
 
 /**
@@ -828,6 +830,9 @@
 #define NLA_PUT_U16(skb, attrtype, value) \
 	NLA_PUT_TYPE(skb, u16, attrtype, value)
 
+#define NLA_PUT_LE16(skb, attrtype, value) \
+	NLA_PUT_TYPE(skb, __le16, attrtype, value)
+
 #define NLA_PUT_U32(skb, attrtype, value) \
 	NLA_PUT_TYPE(skb, u32, attrtype, value)
 
@@ -874,6 +879,15 @@
 }
 
 /**
+ * nla_get_le16 - return payload of __le16 attribute
+ * @nla: __le16 netlink attribute
+ */
+static inline __le16 nla_get_le16(struct nlattr *nla)
+{
+	return *(__le16 *) nla_data(nla);
+}
+
+/**
  * nla_get_u8 - return payload of u8 attribute
  * @nla: u8 netlink attribute
  */
diff --git a/include/net/protocol.h b/include/net/protocol.h
index c643bce..105bf12 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -50,7 +50,7 @@
 	void	(*err_handler)(struct sk_buff *skb,
 			       struct inet6_skb_parm *opt,
 			       int type, int code, int offset,
-			       __u32 info);
+			       __be32 info);
 
 	int	(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff *(*gso_segment)(struct sk_buff *skb,
@@ -71,7 +71,7 @@
 
         /* These two fields form the lookup key.  */
 	unsigned short	 type;	   /* This is the 2nd argument to socket(2). */
-	int		 protocol; /* This is the L4 protocol number.  */
+	unsigned short	 protocol; /* This is the L4 protocol number.  */
 
 	struct proto	 *prot;
 	const struct proto_ops *ops;
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index 14476a7..af89608 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -21,7 +21,7 @@
 					  struct sk_buff *skb,
 					  struct inet6_skb_parm *opt,
 					  int type, int code, 
-					  int offset, u32 info);
+					  int offset, __be32 info);
 
 #endif
 
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 8e165ca..7aed02c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -28,14 +28,15 @@
 
 struct request_sock_ops {
 	int		family;
-	kmem_cache_t	*slab;
 	int		obj_size;
+	struct kmem_cache	*slab;
 	int		(*rtx_syn_ack)(struct sock *sk,
 				       struct request_sock *req,
 				       struct dst_entry *dst);
 	void		(*send_ack)(struct sk_buff *skb,
 				    struct request_sock *req);
-	void		(*send_reset)(struct sk_buff *skb);
+	void		(*send_reset)(struct sock *sk,
+				      struct sk_buff *skb);
 	void		(*destructor)(struct request_sock *req);
 };
 
@@ -51,14 +52,15 @@
 	u32				rcv_wnd;	  /* rcv_wnd offered first time */
 	u32				ts_recent;
 	unsigned long			expires;
-	struct request_sock_ops		*rsk_ops;
+	const struct request_sock_ops	*rsk_ops;
 	struct sock			*sk;
 	u32				secid;
+	u32				peer_secid;
 };
 
-static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
+static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
 {
-	struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
+	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
 
 	if (req != NULL)
 		req->rsk_ops = ops;
@@ -120,7 +122,7 @@
 };
 
 extern int reqsk_queue_alloc(struct request_sock_queue *queue,
-			     const int nr_table_entries);
+			     unsigned int nr_table_entries);
 
 static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
 {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index b0e9108..8208639 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -60,6 +60,7 @@
 	int			(*graft)(struct Qdisc *, unsigned long cl,
 					struct Qdisc *, struct Qdisc **);
 	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
+	void			(*qlen_notify)(struct Qdisc *, unsigned long);
 
 	/* Class manipulation routines */
 	unsigned long		(*get)(struct Qdisc *, u32 classid);
@@ -144,7 +145,7 @@
 	void			*root;
 	int			(*classify)(struct sk_buff*, struct tcf_proto*,
 					struct tcf_result *);
-	u32			protocol;
+	__be16			protocol;
 
 	/* All the rest */
 	u32			prio;
@@ -172,9 +173,10 @@
 extern void dev_deactivate(struct net_device *dev);
 extern void qdisc_reset(struct Qdisc *qdisc);
 extern void qdisc_destroy(struct Qdisc *qdisc);
+extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
 extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops);
 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
-				       struct Qdisc_ops *ops);
+				       struct Qdisc_ops *ops, u32 parentid);
 
 static inline void
 tcf_destroy(struct tcf_proto *tp)
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 807d6f1..6114c4f 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -116,9 +116,11 @@
 typedef union {
 	__s32 i32;
 	__u32 u32;
+	__be32 be32;
 	__u16 u16;
 	__u8 u8;
 	int error;
+	__be16 err;
 	sctp_state_t state;
 	sctp_event_timeout_t to;
 	unsigned long zero;
@@ -164,9 +166,11 @@
 
 SCTP_ARG_CONSTRUCTOR(I32,	__s32, i32)
 SCTP_ARG_CONSTRUCTOR(U32,	__u32, u32)
+SCTP_ARG_CONSTRUCTOR(BE32,	__be32, be32)
 SCTP_ARG_CONSTRUCTOR(U16,	__u16, u16)
 SCTP_ARG_CONSTRUCTOR(U8,	__u8, u8)
 SCTP_ARG_CONSTRUCTOR(ERROR,     int, error)
+SCTP_ARG_CONSTRUCTOR(PERR,      __be16, err)	/* protocol error */
 SCTP_ARG_CONSTRUCTOR(STATE,	sctp_state_t, state)
 SCTP_ARG_CONSTRUCTOR(TO,	sctp_event_timeout_t, to)
 SCTP_ARG_CONSTRUCTOR(PTR,	void *, ptr)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 6c632e2..5ddb855 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -356,7 +356,7 @@
  * addresses.
  */
 #define IS_IPV4_UNUSABLE_ADDRESS(a) \
-	((INADDR_BROADCAST == *a) || \
+	((htonl(INADDR_BROADCAST) == *a) || \
 	(MULTICAST(*a)) || \
 	(((unsigned char *)(a))[0] == 0) || \
 	((((unsigned char *)(a))[0] == 198) && \
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 764e3af..215461f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -585,7 +585,7 @@
 }
 
 /* Convert from an address parameter type to an address family.  */
-static inline int param_type2af(__u16 type)
+static inline int param_type2af(__be16 type)
 {
 	switch (type) {
 	case SCTP_PARAM_IPV4_ADDRESS:
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index de313de..3269ed1 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -213,7 +213,7 @@
 					  const struct sctp_chunk *);
 struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *,
 					  const struct sctp_chunk *);
-void sctp_init_cause(struct sctp_chunk *, __u16 cause, const void *, size_t);
+void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t);
 struct sctp_chunk *sctp_make_abort(const struct sctp_association *,
 			      const struct sctp_chunk *,
 			      const size_t hint);
@@ -236,14 +236,14 @@
 				      const size_t paylen);
 struct sctp_chunk *sctp_make_op_error(const struct sctp_association *,
 				 const struct sctp_chunk *chunk,
-				 __u16 cause_code,
+				 __be16 cause_code,
 				 const void *payload,
 				 size_t paylen);
 
 struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
 					      union sctp_addr *,
 					      struct sockaddr *,
-					      int, __u16);
+					      int, __be16);
 struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
 					     union sctp_addr *addr);
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index c6d93bb..c089f93 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -537,7 +537,7 @@
 					  struct net_device *);
 	void		(*dst_saddr)	(union sctp_addr *saddr,
 					 struct dst_entry *dst,
-					 unsigned short port);
+					 __be16 port);
 	int		(*cmp_addr)	(const union sctp_addr *addr1,
 					 const union sctp_addr *addr2);
 	void		(*addr_copy)	(union sctp_addr *dst,
@@ -553,14 +553,14 @@
 					 struct sock *sk);
 	void		(*from_addr_param) (union sctp_addr *,
 					    union sctp_addr_param *,
-					    __u16 port, int iif);	
+					    __be16 port, int iif);
 	int		(*to_addr_param) (const union sctp_addr *,
 					  union sctp_addr_param *); 
 	int		(*addr_valid)	(union sctp_addr *,
 					 struct sctp_sock *,
 					 const struct sk_buff *);
 	sctp_scope_t	(*scope) (union sctp_addr *);
-	void		(*inaddr_any)	(union sctp_addr *, unsigned short);
+	void		(*inaddr_any)	(union sctp_addr *, __be16);
 	int		(*is_any)	(const union sctp_addr *);
 	int		(*available)	(union sctp_addr *,
 					 struct sctp_sock *);
@@ -587,7 +587,7 @@
 			  struct sctp_sock *);
 	int  (*bind_verify) (struct sctp_sock *, union sctp_addr *);
 	int  (*send_verify) (struct sctp_sock *, union sctp_addr *);
-	int  (*supported_addrs)(const struct sctp_sock *, __u16 *);
+	int  (*supported_addrs)(const struct sctp_sock *, __be16 *);
 	struct sock *(*create_accept_sk) (struct sock *sk,
 					  struct sctp_association *asoc);
 	void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
@@ -1030,7 +1030,7 @@
 void sctp_inq_free(struct sctp_inq *);
 void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
-void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
 
 /* This is the structure we use to hold outbound chunks.  You push
  * chunks in and they automatically pop out the other end as bundled
@@ -1270,7 +1270,7 @@
  	 * 	    this here so we pre-allocate this once and can re-use
  	 * 	    on every receive.
  	 */
- 	__u8 digest[SCTP_SIGNATURE_SIZE];
+ 	__u8 *digest;
  
 	/* sendbuf acct. policy.	*/
 	__u32 sndbuf_policy;
@@ -1314,6 +1314,13 @@
 __u32 sctp_generate_tag(const struct sctp_endpoint *);
 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
 
+struct sctp_inithdr_host {
+	__u32 init_tag;
+	__u32 a_rwnd;
+	__u16 num_outbound_streams;
+	__u16 num_inbound_streams;
+	__u32 initial_tsn;
+};
 
 /* RFC2960
  *
@@ -1482,9 +1489,9 @@
 		/* This mask is used to disable sending the ASCONF chunk
 		 * with specified parameter to peer.
 		 */
-		__u16 addip_disabled_mask;
+		__be16 addip_disabled_mask;
 
-		struct sctp_inithdr i;
+		struct sctp_inithdr_host i;
 		int cookie_len;
 		void *cookie;
 
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index 021947d..70a824d 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -105,7 +105,7 @@
 	 * every SACK.  Store up to SCTP_MAX_DUP_TSNS worth of
 	 * information.
 	 */
-	__u32 dup_tsns[SCTP_MAX_DUP_TSNS];
+	__be32 dup_tsns[SCTP_MAX_DUP_TSNS];
 	__u16 num_dup_tsns;
 
 	/* Record gap ack block information here.  */
@@ -162,7 +162,7 @@
 }
 
 /* Return pointer to duplicate tsn array as needed by SACK. */
-static inline __u32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
+static inline __be32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
 {
 	map->num_dup_tsns = 0;
 	return map->dup_tsns;
diff --git a/include/net/sock.h b/include/net/sock.h
index 9cdbae2..03684e7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -47,6 +47,7 @@
 #include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>	/* struct sk_buff */
+#include <linux/mm.h>
 #include <linux/security.h>
 
 #include <linux/filter.h>
@@ -570,7 +571,7 @@
 	int			*sysctl_rmem;
 	int			max_header;
 
-	kmem_cache_t		*slab;
+	struct kmem_cache		*slab;
 	unsigned int		obj_size;
 
 	atomic_t		*orphan_count;
@@ -745,7 +746,32 @@
  */
 #define sock_owned_by_user(sk)	((sk)->sk_lock.owner)
 
-extern void FASTCALL(lock_sock(struct sock *sk));
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
+do {									\
+	sk->sk_lock.owner = NULL;					\
+	init_waitqueue_head(&sk->sk_lock.wq);				\
+	spin_lock_init(&(sk)->sk_lock.slock);				\
+	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
+			sizeof((sk)->sk_lock));				\
+	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
+		       	(skey), (sname));				\
+	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
+} while (0)
+
+extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
+
+static inline void lock_sock(struct sock *sk)
+{
+	lock_sock_nested(sk, 0);
+}
+
 extern void FASTCALL(release_sock(struct sock *sk));
 
 /* BH context may only use the following locking interface. */
@@ -948,7 +974,8 @@
 		sk_free(sk);
 }
 
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb);
+extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+			  const int nested);
 
 /* Detach socket from process context.
  * Announce socket dead, detach it from wait queue and inode.
@@ -1082,7 +1109,7 @@
 {
 	if (skb->ip_summed == CHECKSUM_NONE) {
 		int err = 0;
-		unsigned int csum = csum_and_copy_from_user(from,
+		__wsum csum = csum_and_copy_from_user(from,
 						     page_address(page) + off,
 							    copy, 0, &err);
 		if (err)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7a093d0..c99774f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -28,6 +28,7 @@
 #include <linux/percpu.h>
 #include <linux/skbuff.h>
 #include <linux/dmaengine.h>
+#include <linux/crypto.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
@@ -138,7 +139,6 @@
 #define MAX_TCP_SYNCNT		127
 
 #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
-#define TCP_SYNQ_HSIZE		512	/* Size of SYNACK hash table */
 
 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
@@ -162,6 +162,7 @@
 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 #define TCPOPT_SACK             5       /* SACK Block */
 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
+#define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
 
 /*
  *     TCP option lengths
@@ -171,6 +172,7 @@
 #define TCPOLEN_WINDOW         3
 #define TCPOLEN_SACK_PERM      2
 #define TCPOLEN_TIMESTAMP      10
+#define TCPOLEN_MD5SIG         18
 
 /* But this is what stacks really send out. */
 #define TCPOLEN_TSTAMP_ALIGNED		12
@@ -179,6 +181,7 @@
 #define TCPOLEN_SACK_BASE		2
 #define TCPOLEN_SACK_BASE_ALIGNED	4
 #define TCPOLEN_SACK_PERBLOCK		8
+#define TCPOLEN_MD5SIG_ALIGNED		20
 
 /* Flags in tp->nonagle */
 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
@@ -300,6 +303,8 @@
 extern int			tcp_twsk_unique(struct sock *sk,
 						struct sock *sktw, void *twp);
 
+extern void			tcp_twsk_destructor(struct sock *sk);
+
 static inline void tcp_dec_quickack_mode(struct sock *sk,
 					 const unsigned int pkts)
 {
@@ -621,8 +626,12 @@
  * Interface for adding new TCP congestion control handlers
  */
 #define TCP_CA_NAME_MAX	16
+#define TCP_CA_MAX	128
+#define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
+
 struct tcp_congestion_ops {
 	struct list_head	list;
+	int	non_restricted;
 
 	/* initialize private data (optional) */
 	void (*init)(struct sock *sk);
@@ -660,6 +669,9 @@
 extern void tcp_cleanup_congestion_control(struct sock *sk);
 extern int tcp_set_default_congestion_control(const char *name);
 extern void tcp_get_default_congestion_control(char *name);
+extern void tcp_get_available_congestion_control(char *buf, size_t len);
+extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
+extern int tcp_set_allowed_congestion_control(char *allowed);
 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
 extern void tcp_slow_start(struct tcp_sock *tp);
 
@@ -795,14 +807,14 @@
 /*
  * Calculate(/check) TCP checksum
  */
-static inline u16 tcp_v4_check(struct tcphdr *th, int len,
-			       unsigned long saddr, unsigned long daddr, 
-			       unsigned long base)
+static inline __sum16 tcp_v4_check(struct tcphdr *th, int len,
+			       __be32 saddr, __be32 daddr,
+			       __wsum base)
 {
 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
 }
 
-static inline int __tcp_checksum_complete(struct sk_buff *skb)
+static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
 {
 	return __skb_checksum_complete(skb);
 }
@@ -1058,6 +1070,114 @@
 	tp->fastpath_skb_hint = NULL;
 }
 
+/* MD5 Signature */
+struct crypto_hash;
+
+/* - key database */
+struct tcp_md5sig_key {
+	u8			*key;
+	u8			keylen;
+};
+
+struct tcp4_md5sig_key {
+	u8			*key;
+	u16			keylen;
+	__be32			addr;
+};
+
+struct tcp6_md5sig_key {
+	u8			*key;
+	u16			keylen;
+#if 0
+	u32			scope_id;	/* XXX */
+#endif
+	struct in6_addr		addr;
+};
+
+/* - sock block */
+struct tcp_md5sig_info {
+	struct tcp4_md5sig_key	*keys4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	struct tcp6_md5sig_key	*keys6;
+	u32			entries6;
+	u32			alloced6;
+#endif
+	u32			entries4;
+	u32			alloced4;
+};
+
+/* - pseudo header */
+struct tcp4_pseudohdr {
+	__be32		saddr;
+	__be32		daddr;
+	__u8		pad;
+	__u8		protocol;
+	__be16		len;
+};
+
+struct tcp6_pseudohdr {
+	struct in6_addr	saddr;
+	struct in6_addr daddr;
+	__be32		len;
+	__be32		protocol;	/* including padding */
+};
+
+union tcp_md5sum_block {
+	struct tcp4_pseudohdr ip4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	struct tcp6_pseudohdr ip6;
+#endif
+};
+
+/* - pool: digest algorithm, hash description and scratch buffer */
+struct tcp_md5sig_pool {
+	struct hash_desc	md5_desc;
+	union tcp_md5sum_block	md5_blk;
+};
+
+#define TCP_MD5SIG_MAXKEYS	(~(u32)0)	/* really?! */
+
+/* - functions */
+extern int			tcp_v4_calc_md5_hash(char *md5_hash,
+						     struct tcp_md5sig_key *key,
+						     struct sock *sk,
+						     struct dst_entry *dst,
+						     struct request_sock *req,
+						     struct tcphdr *th,
+						     int protocol, int tcplen);
+extern struct tcp_md5sig_key	*tcp_v4_md5_lookup(struct sock *sk,
+						   struct sock *addr_sk);
+
+extern int			tcp_v4_md5_do_add(struct sock *sk,
+						  __be32 addr,
+						  u8 *newkey,
+						  u8 newkeylen);
+
+extern int			tcp_v4_md5_do_del(struct sock *sk,
+						  __be32 addr);
+
+extern struct tcp_md5sig_pool	**tcp_alloc_md5sig_pool(void);
+extern void			tcp_free_md5sig_pool(void);
+
+extern struct tcp_md5sig_pool	*__tcp_get_md5sig_pool(int cpu);
+extern void			__tcp_put_md5sig_pool(void);
+
+static inline
+struct tcp_md5sig_pool		*tcp_get_md5sig_pool(void)
+{
+	int cpu = get_cpu();
+	struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
+	if (!ret)
+		put_cpu();
+	return ret;
+}
+
+static inline void		tcp_put_md5sig_pool(void)
+{
+	__tcp_put_md5sig_pool();
+	put_cpu();
+}
+
 /* /proc */
 enum tcp_seq_states {
 	TCP_SEQ_STATE_LISTENING,
@@ -1097,6 +1217,35 @@
 extern void tcp4_proc_exit(void);
 #endif
 
+/* TCP af-specific functions */
+struct tcp_sock_af_ops {
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
+						struct sock *addr_sk);
+	int			(*calc_md5_hash) (char *location,
+						  struct tcp_md5sig_key *md5,
+						  struct sock *sk,
+						  struct dst_entry *dst,
+						  struct request_sock *req,
+						  struct tcphdr *th,
+						  int protocol, int len);
+	int			(*md5_add) (struct sock *sk,
+					    struct sock *addr_sk,
+					    u8 *newkey,
+					    u8 len);
+	int			(*md5_parse) (struct sock *sk,
+					      char __user *optval,
+					      int optlen);
+#endif
+};
+
+struct tcp_request_sock_ops {
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key	*(*md5_lookup) (struct sock *sk,
+						struct request_sock *req);
+#endif
+};
+
 extern void tcp_v4_init(struct net_proto_family *ops);
 extern void tcp_init(void);
 
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index be293d7..1e1ee32 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -15,7 +15,7 @@
 #include <net/sock.h>
 
 struct timewait_sock_ops {
-	kmem_cache_t	*twsk_slab;
+	struct kmem_cache	*twsk_slab;
 	unsigned int	twsk_obj_size;
 	int		(*twsk_unique)(struct sock *sk,
 				       struct sock *sktw, void *twp);
@@ -31,6 +31,9 @@
 
 static inline void twsk_destructor(struct sock *sk)
 {
+	BUG_ON(sk == NULL);
+	BUG_ON(sk->sk_prot == NULL);
+	BUG_ON(sk->sk_prot->twsk_prot == NULL);
 	if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
 		sk->sk_prot->twsk_prot->twsk_destructor(sk);
 }
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
index e07136d..2151a80 100644
--- a/include/net/tipc/tipc_bearer.h
+++ b/include/net/tipc/tipc_bearer.h
@@ -58,7 +58,7 @@
  */
 
 struct tipc_media_addr {
-	__u32  type;			/* bearer type (network byte order) */
+	__be32  type;			/* bearer type (network byte order) */
 	union {
 		__u8   eth_addr[6];	/* 48 bit Ethernet addr (byte array) */ 
 #if 0
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
index 4d096ee..fb42eb7 100644
--- a/include/net/tipc/tipc_msg.h
+++ b/include/net/tipc/tipc_msg.h
@@ -40,7 +40,7 @@
 #ifdef __KERNEL__
 
 struct tipc_msg {
-	u32 hdr[15];
+	__be32 hdr[15];
 };
 
 
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 61f724c..409da3a 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -11,6 +11,7 @@
 
 extern struct proto rawv6_prot;
 extern struct proto udpv6_prot;
+extern struct proto udplitev6_prot;
 extern struct proto tcpv6_prot;
 
 struct flowi;
@@ -24,6 +25,7 @@
 /* transport protocols */
 extern void				rawv6_init(void);
 extern void				udpv6_init(void);
+extern void 				udplitev6_init(void);
 extern void				tcpv6_init(void);
 
 extern int				udpv6_connect(struct sock *sk,
diff --git a/include/net/udp.h b/include/net/udp.h
index db0c05f..1b921fa 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -26,9 +26,29 @@
 #include <net/inet_sock.h>
 #include <net/sock.h>
 #include <net/snmp.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
 #include <linux/seq_file.h>
+#include <linux/poll.h>
 
-#define UDP_HTABLE_SIZE		128
+/**
+ *	struct udp_skb_cb  -  UDP(-Lite) private variables
+ *
+ *	@header:      private variables used by IPv4/IPv6
+ *	@cscov:       checksum coverage length (UDP-Lite only)
+ *	@partial_cov: if set indicates partial csum coverage
+ */
+struct udp_skb_cb {
+	union {
+		struct inet_skb_parm	h4;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+		struct inet6_skb_parm	h6;
+#endif
+	} header;
+	__u16		cscov;
+	__u8		partial_cov;
+};
+#define UDP_SKB_CB(__skb)	((struct udp_skb_cb *)((__skb)->cb))
 
 extern struct hlist_head udp_hash[UDP_HTABLE_SIZE];
 extern rwlock_t udp_hash_lock;
@@ -47,6 +67,62 @@
 
 struct sk_buff;
 
+/*
+ *	Generic checksumming routines for UDP(-Lite) v4 and v6
+ */
+static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
+{
+	if (! UDP_SKB_CB(skb)->partial_cov)
+		return __skb_checksum_complete(skb);
+	return csum_fold(skb_checksum(skb, 0, UDP_SKB_CB(skb)->cscov,
+				      skb->csum));
+}
+
+static inline int udp_lib_checksum_complete(struct sk_buff *skb)
+{
+	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+		__udp_lib_checksum_complete(skb);
+}
+
+/**
+ * 	udp_csum_outgoing  -  compute UDPv4/v6 checksum over fragments
+ * 	@sk: 	socket we are writing to
+ * 	@skb: 	sk_buff containing the filled-in UDP header
+ * 	        (checksum field must be zeroed out)
+ */
+static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
+{
+	__wsum csum = csum_partial(skb->h.raw, sizeof(struct udphdr), 0);
+
+	skb_queue_walk(&sk->sk_write_queue, skb) {
+		csum = csum_add(csum, skb->csum);
+	}
+	return csum;
+}
+
+/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
+static inline void udp_lib_hash(struct sock *sk)
+{
+	BUG();
+}
+
+static inline void udp_lib_unhash(struct sock *sk)
+{
+	write_lock_bh(&udp_hash_lock);
+	if (sk_del_node_init(sk)) {
+		inet_sk(sk)->num = 0;
+		sock_prot_dec_use(sk->sk_prot);
+	}
+	write_unlock_bh(&udp_hash_lock);
+}
+
+static inline void udp_lib_close(struct sock *sk, long timeout)
+{
+	sk_common_release(sk);
+}
+
+
+/* net/ipv4/udp.c */
 extern int	udp_get_port(struct sock *sk, unsigned short snum,
 			     int (*saddr_cmp)(const struct sock *, const struct sock *));
 extern void	udp_err(struct sk_buff *, u32);
@@ -59,23 +135,36 @@
 extern int	udp_disconnect(struct sock *sk, int flags);
 extern unsigned int udp_poll(struct file *file, struct socket *sock,
 			     poll_table *wait);
+extern int 	udp_lib_getsockopt(struct sock *sk, int level, int optname,
+			           char __user *optval, int __user *optlen);
+extern int 	udp_lib_setsockopt(struct sock *sk, int level, int optname,
+				   char __user *optval, int optlen,
+				   int (*push_pending_frames)(struct sock *));
 
 DECLARE_SNMP_STAT(struct udp_mib, udp_statistics);
-#define UDP_INC_STATS(field)		SNMP_INC_STATS(udp_statistics, field)
-#define UDP_INC_STATS_BH(field)		SNMP_INC_STATS_BH(udp_statistics, field)
-#define UDP_INC_STATS_USER(field) 	SNMP_INC_STATS_USER(udp_statistics, field)
+/*
+ * 	SNMP statistics for UDP and UDP-Lite
+ */
+#define UDP_INC_STATS_USER(field, is_udplite)			       do {   \
+	if (is_udplite) SNMP_INC_STATS_USER(udplite_statistics, field);       \
+	else		SNMP_INC_STATS_USER(udp_statistics, field);  }  while(0)
+#define UDP_INC_STATS_BH(field, is_udplite) 			       do  {  \
+	if (is_udplite) SNMP_INC_STATS_BH(udplite_statistics, field);         \
+	else		SNMP_INC_STATS_BH(udp_statistics, field);    }  while(0)
 
 /* /proc */
 struct udp_seq_afinfo {
 	struct module		*owner;
 	char			*name;
 	sa_family_t		family;
+	struct hlist_head	*hashtable;
 	int 			(*seq_show) (struct seq_file *m, void *v);
 	struct file_operations	*seq_fops;
 };
 
 struct udp_iter_state {
 	sa_family_t		family;
+	struct hlist_head	*hashtable;
 	int			bucket;
 	struct seq_operations	seq_ops;
 };
diff --git a/include/net/udplite.h b/include/net/udplite.h
new file mode 100644
index 0000000..67ac514
--- /dev/null
+++ b/include/net/udplite.h
@@ -0,0 +1,151 @@
+/*
+ *	Definitions for the UDP-Lite (RFC 3828) code.
+ */
+#ifndef _UDPLITE_H
+#define _UDPLITE_H
+
+#include <net/ip6_checksum.h>
+
+/* UDP-Lite socket options */
+#define UDPLITE_SEND_CSCOV   10 /* sender partial coverage (as sent)      */
+#define UDPLITE_RECV_CSCOV   11 /* receiver partial coverage (threshold ) */
+
+extern struct proto 		udplite_prot;
+extern struct hlist_head 	udplite_hash[UDP_HTABLE_SIZE];
+
+/* UDP-Lite does not have a standardized MIB yet, so we inherit from UDP */
+DECLARE_SNMP_STAT(struct udp_mib, udplite_statistics);
+
+/*
+ *	Checksum computation is all in software, hence simpler getfrag.
+ */
+static __inline__ int udplite_getfrag(void *from, char *to, int  offset,
+				      int len, int odd, struct sk_buff *skb)
+{
+	return memcpy_fromiovecend(to, (struct iovec *) from, offset, len);
+}
+
+/* Designate sk as UDP-Lite socket */
+static inline int udplite_sk_init(struct sock *sk)
+{
+	udp_sk(sk)->pcflag = UDPLITE_BIT;
+	return 0;
+}
+
+/*
+ * 	Checksumming routines
+ */
+static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
+{
+	u16 cscov;
+
+        /* In UDPv4 a zero checksum means that the transmitter generated no
+         * checksum. UDP-Lite (like IPv6) mandates checksums, hence packets
+         * with a zero checksum field are illegal.                            */
+	if (uh->check == 0) {
+		LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: zeroed checksum field\n");
+		return 1;
+	}
+
+        UDP_SKB_CB(skb)->partial_cov = 0;
+	cscov = ntohs(uh->len);
+
+	if (cscov == 0)		 /* Indicates that full coverage is required. */
+		cscov = skb->len;
+	else if (cscov < 8  || cscov > skb->len) {
+		/*
+		 * Coverage length violates RFC 3828: log and discard silently.
+		 */
+		LIMIT_NETDEBUG(KERN_DEBUG "UDPLITE: bad csum coverage %d/%d\n",
+			       cscov, skb->len);
+		return 1;
+
+	} else if (cscov < skb->len)
+        	UDP_SKB_CB(skb)->partial_cov = 1;
+
+        UDP_SKB_CB(skb)->cscov = cscov;
+
+	/*
+	 * There is no known NIC manufacturer supporting UDP-Lite yet,
+	 * hence ip_summed is always (re-)set to CHECKSUM_NONE.
+	 */
+	skb->ip_summed = CHECKSUM_NONE;
+
+	return 0;
+}
+
+static __inline__ int udplite4_csum_init(struct sk_buff *skb, struct udphdr *uh)
+{
+	int rc = udplite_checksum_init(skb, uh);
+
+	if (!rc)
+		skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr,
+					       skb->nh.iph->daddr,
+					       skb->len, IPPROTO_UDPLITE, 0);
+	return rc;
+}
+
+static __inline__ int udplite6_csum_init(struct sk_buff *skb, struct udphdr *uh)
+{
+	int rc = udplite_checksum_init(skb, uh);
+
+	if (!rc)
+		skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+					     &skb->nh.ipv6h->daddr,
+					     skb->len, IPPROTO_UDPLITE, 0));
+	return rc;
+}
+
+static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
+{
+	int cscov = up->len;
+
+	/*
+	 * Sender has set `partial coverage' option on UDP-Lite socket
+	 */
+	if (up->pcflag & UDPLITE_SEND_CC)    {
+		if (up->pcslen < up->len) {
+		/* up->pcslen == 0 means that full coverage is required,
+		 * partial coverage only if  0 < up->pcslen < up->len */
+			if (0 < up->pcslen) {
+			       cscov = up->pcslen;
+			}
+			uh->len = htons(up->pcslen);
+		}
+	/*
+	 * NOTE: Causes for the error case  `up->pcslen > up->len':
+	 *        (i)  Application error (will not be penalized).
+	 *       (ii)  Payload too big for send buffer: data is split
+	 *             into several packets, each with its own header.
+	 *             In this case (e.g. last segment), coverage may
+	 *             exceed packet length.
+	 *       Since packets with coverage length > packet length are
+	 *       illegal, we fall back to the defaults here.
+	 */
+	}
+	return cscov;
+}
+
+static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
+{
+	int off, len, cscov = udplite_sender_cscov(udp_sk(sk), skb->h.uh);
+	__wsum csum = 0;
+
+	skb->ip_summed = CHECKSUM_NONE;     /* no HW support for checksumming */
+
+	skb_queue_walk(&sk->sk_write_queue, skb) {
+		off = skb->h.raw - skb->data;
+		len = skb->len - off;
+
+		csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum);
+
+		if ((cscov -= len) <= 0)
+			break;
+	}
+	return csum;
+}
+
+extern void	udplite4_register(void);
+extern int 	udplite_get_port(struct sock *sk, unsigned short snum,
+			int (*scmp)(const struct sock *, const struct sock *));
+#endif	/* _UDPLITE_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 737fdb2..e476541 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -310,6 +310,8 @@
 /* Source address of tunnel. Ignored, if it is not a tunnel. */
 	xfrm_address_t		saddr;
 
+	unsigned short		encap_family;
+
 	__u32			reqid;
 
 /* Mode: transport, tunnel etc. */
@@ -340,18 +342,19 @@
 	atomic_t		refcnt;
 	struct timer_list	timer;
 
-	u8			type;
 	u32			priority;
 	u32			index;
 	struct xfrm_selector	selector;
 	struct xfrm_lifetime_cfg lft;
 	struct xfrm_lifetime_cur curlft;
 	struct dst_entry       *bundles;
-	__u16			family;
-	__u8			action;
-	__u8			flags;
-	__u8			dead;
-	__u8			xfrm_nr;
+	u16			family;
+	u8			type;
+	u8			action;
+	u8			flags;
+	u8			dead;
+	u8			xfrm_nr;
+	/* XXX 1 byte hole, try to pack */
 	struct xfrm_sec_ctx	*security;
 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
 };
@@ -379,7 +382,7 @@
 	int			(*notify)(struct xfrm_state *x, struct km_event *c);
 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
-	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
+	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
 	int			(*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
 	int			(*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
 };
@@ -389,6 +392,20 @@
 
 extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
 
+/* Audit Information */
+struct xfrm_audit
+{
+	uid_t	loginuid;
+	u32	secid;
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result,
+		    struct xfrm_policy *xp, struct xfrm_state *x);
+#else
+#define xfrm_audit_log(a,s,t,r,p,x) do { ; } while (0)
+#endif /* CONFIG_AUDITSYSCALL */
+
 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
 {
 	if (likely(policy != NULL))
@@ -468,6 +485,7 @@
 	switch(fl->proto) {
 	case IPPROTO_TCP:
 	case IPPROTO_UDP:
+	case IPPROTO_UDPLITE:
 	case IPPROTO_SCTP:
 		port = fl->fl_ip_sport;
 		break;
@@ -493,6 +511,7 @@
 	switch(fl->proto) {
 	case IPPROTO_TCP:
 	case IPPROTO_UDP:
+	case IPPROTO_UDPLITE:
 	case IPPROTO_SCTP:
 		port = fl->fl_ip_dport;
 		break;
@@ -506,40 +525,8 @@
 	return port;
 }
 
-static inline int
-__xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
-{
-	return  addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
-		addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
-		!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
-		!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
-		(fl->proto == sel->proto || !sel->proto) &&
-		(fl->oif == sel->ifindex || !sel->ifindex);
-}
-
-static inline int
-__xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
-{
-	return  addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
-		addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
-		!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
-		!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
-		(fl->proto == sel->proto || !sel->proto) &&
-		(fl->oif == sel->ifindex || !sel->ifindex);
-}
-
-static inline int
-xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
-		    unsigned short family)
-{
-	switch (family) {
-	case AF_INET:
-		return __xfrm4_selector_match(sel, fl);
-	case AF_INET6:
-		return __xfrm6_selector_match(sel, fl);
-	}
-	return 0;
-}
+extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+			       unsigned short family);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 /*	If neither has a context --> match
@@ -887,8 +874,7 @@
 struct xfrm6_tunnel {
 	int (*handler)(struct sk_buff *skb);
 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
-			   int type, int code, int offset, __u32 info);
-
+			   int type, int code, int offset, __be32 info);
 	struct xfrm6_tunnel *next;
 	int priority;
 };
@@ -934,7 +920,7 @@
 #endif
 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
 extern int xfrm_state_delete(struct xfrm_state *x);
-extern void xfrm_state_flush(u8 proto);
+extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
 extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq);
 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
@@ -951,9 +937,9 @@
 			    xfrm_address_t *saddr, u8 proto);
 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
-extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
+extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
-extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
+extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
 extern int xfrm6_output(struct sk_buff *skb);
 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
 				 u8 **prevhdr);
@@ -987,20 +973,20 @@
 					  struct xfrm_selector *sel,
 					  struct xfrm_sec_ctx *ctx, int delete);
 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete);
-void xfrm_policy_flush(u8 type);
+void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
 u32 xfrm_get_acqseq(void);
 void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi);
-struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 
-				  xfrm_address_t *daddr, xfrm_address_t *saddr, 
+struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
+				  xfrm_address_t *daddr, xfrm_address_t *saddr,
 				  int create, unsigned short family);
-extern void xfrm_policy_flush(u8 type);
+extern void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
 extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
 			  struct flowi *fl, int family, int strict);
 extern void xfrm_init_pmtu(struct dst_entry *dst);
 
 extern wait_queue_head_t km_waitq;
-extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
+extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
 
@@ -1033,7 +1019,7 @@
 	switch (family) {
 	default:
 	case AF_INET:
-		return a->a4 - b->a4;
+		return (__force __u32)a->a4 - (__force __u32)b->a4;
 	case AF_INET6:
 		return ipv6_addr_cmp((struct in6_addr *)a,
 				     (struct in6_addr *)b);
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index ede6398..623a0fc 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -262,9 +262,10 @@
 		u8			present:1,	/* PCMCIA card is present in socket */
 					busy:1,		/* "master" ioctl is used */
 					dead:1,		/* pcmcia module is being unloaded */
-					device_add_pending:1, /* a pseudo-multifunction-device
+					device_add_pending:1, /* a multifunction-device
 							       * add event is pending */
-					reserved:4;
+					mfc_pfc:1,	/* the pending event adds a mfc (1) or pfc (0) */
+					reserved:3;
 	} 				pcmcia_state;
 
 	struct work_struct		device_add;	/* for adding further pseudo-multifunction
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index c9b4738..5c07017 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -60,6 +60,7 @@
 };
 
 enum ib_cm_lap_state {
+	IB_CM_LAP_UNINIT,
 	IB_CM_LAP_IDLE,
 	IB_CM_LAP_SENT,
 	IB_CM_LAP_RCVD,
@@ -443,13 +444,20 @@
 		    u8 private_data_len);
 
 /**
- * ib_cm_establish - Forces a connection state to established.
+ * ib_cm_notify - Notifies the CM of an event reported to the consumer.
  * @cm_id: Connection identifier to transition to established.
+ * @event: Type of event.
  *
- * This routine should be invoked by users who receive messages on a
- * connected QP before an RTU has been received.
+ * This routine should be invoked by users to notify the CM of relevant
+ * communication events.  Events that should be reported to the CM and
+ * when to report them are:
+ *
+ * IB_EVENT_COMM_EST - Used when a message is received on a connected
+ *    QP before an RTU has been received.
+ * IB_EVENT_PATH_MIG - Notifies the CM that the connection has failed over
+ *   to the alternate path.
  */
-int ib_cm_establish(struct ib_cm_id *cm_id);
+int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event);
 
 /**
  * ib_send_cm_rej - Sends a connection rejection message to the
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h
index 066c20b..37650af 100644
--- a/include/rdma/ib_user_cm.h
+++ b/include/rdma/ib_user_cm.h
@@ -38,7 +38,7 @@
 
 #include <rdma/ib_user_sa.h>
 
-#define IB_USER_CM_ABI_VERSION 4
+#define IB_USER_CM_ABI_VERSION 5
 
 enum {
 	IB_USER_CM_CMD_CREATE_ID,
@@ -46,7 +46,7 @@
 	IB_USER_CM_CMD_ATTR_ID,
 
 	IB_USER_CM_CMD_LISTEN,
-	IB_USER_CM_CMD_ESTABLISH,
+	IB_USER_CM_CMD_NOTIFY,
 
 	IB_USER_CM_CMD_SEND_REQ,
 	IB_USER_CM_CMD_SEND_REP,
@@ -117,8 +117,9 @@
 	__u32 reserved;
 };
 
-struct ib_ucm_establish {
+struct ib_ucm_notify {
 	__u32 id;
+	__u32 event;
 };
 
 struct ib_ucm_private_data {
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 61eebec..ea0816d 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -25,6 +25,8 @@
 
 #include <linux/types.h>
 #include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
 #include <scsi/iscsi_proto.h>
 #include <scsi/iscsi_if.h>
 
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 1d77b63..0c775fc 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -201,9 +201,14 @@
         void *lldd_dev;
 };
 
+struct sas_discovery_event {
+	struct work_struct work;
+	struct asd_sas_port *port;
+};
+
 struct sas_discovery {
 	spinlock_t disc_event_lock;
-	struct work_struct disc_work[DISC_NUM_EVENTS];
+	struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
 	unsigned long    pending;
 	u8     fanout_sas_addr[8];
 	u8     eeds_a[8];
@@ -249,14 +254,19 @@
 	void *lldd_port;	  /* not touched by the sas class code */
 };
 
+struct asd_sas_event {
+	struct work_struct work;
+	struct asd_sas_phy *phy;
+};
+
 /* The phy pretty much is controlled by the LLDD.
  * The class only reads those fields.
  */
 struct asd_sas_phy {
 /* private: */
 	/* protected by ha->event_lock */
-	struct work_struct   port_events[PORT_NUM_EVENTS];
-	struct work_struct   phy_events[PHY_NUM_EVENTS];
+	struct asd_sas_event   port_events[PORT_NUM_EVENTS];
+	struct asd_sas_event   phy_events[PHY_NUM_EVENTS];
 
 	unsigned long port_events_pending;
 	unsigned long phy_events_pending;
@@ -308,10 +318,15 @@
 	int               queue_thread_kill;
 };
 
+struct sas_ha_event {
+	struct work_struct work;
+	struct sas_ha_struct *ha;
+};
+
 struct sas_ha_struct {
 /* private: */
 	spinlock_t       event_lock;
-	struct work_struct ha_events[HA_NUM_EVENTS];
+	struct sas_ha_event ha_events[HA_NUM_EVENTS];
 	unsigned long	 pending;
 
 	struct scsi_core core;
@@ -339,6 +354,8 @@
 	void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
 
 	void *lldd_ha;		  /* not touched by sas class code */
+
+	struct list_head eh_done_q;
 };
 
 #define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -527,17 +544,20 @@
 
 	void   *lldd_task;	  /* for use by LLDDs */
 	void   *uldd_task;
+
+	struct work_struct abort_work;
 };
 
 
 
-#define SAS_TASK_STATE_PENDING  1
-#define SAS_TASK_STATE_DONE     2
-#define SAS_TASK_STATE_ABORTED  4
+#define SAS_TASK_STATE_PENDING      1
+#define SAS_TASK_STATE_DONE         2
+#define SAS_TASK_STATE_ABORTED      4
+#define SAS_TASK_INITIATOR_ABORTED  8
 
 static inline struct sas_task *sas_alloc_task(gfp_t flags)
 {
-	extern kmem_cache_t *sas_task_cache;
+	extern struct kmem_cache *sas_task_cache;
 	struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags);
 
 	if (task) {
@@ -555,7 +575,7 @@
 static inline void sas_free_task(struct sas_task *task)
 {
 	if (task) {
-		extern kmem_cache_t *sas_task_cache;
+		extern struct kmem_cache *sas_task_cache;
 		BUG_ON(!list_empty(&task->list));
 		kmem_cache_free(sas_task_cache, task);
 	}
@@ -593,6 +613,7 @@
 extern int sas_register_ha(struct sas_ha_struct *);
 extern int sas_unregister_ha(struct sas_ha_struct *);
 
+int sas_phy_reset(struct sas_phy *phy, int hard_reset);
 extern int sas_queuecommand(struct scsi_cmnd *,
 		     void (*scsi_done)(struct scsi_cmnd *));
 extern int sas_target_alloc(struct scsi_target *);
@@ -625,4 +646,6 @@
 
 void sas_init_dev(struct domain_device *);
 
+void sas_task_abort(struct work_struct *);
+
 #endif /* _SASLIB_H_ */
diff --git a/include/scsi/libsrp.h b/include/scsi/libsrp.h
new file mode 100644
index 0000000..d143171
--- /dev/null
+++ b/include/scsi/libsrp.h
@@ -0,0 +1,77 @@
+#ifndef __LIBSRP_H__
+#define __LIBSRP_H__
+
+#include <linux/list.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/srp.h>
+
+enum iue_flags {
+	V_DIOVER,
+	V_WRITE,
+	V_LINKED,
+	V_FLYING,
+};
+
+struct srp_buf {
+	dma_addr_t dma;
+	void *buf;
+};
+
+struct srp_queue {
+	void *pool;
+	void *items;
+	struct kfifo *queue;
+	spinlock_t lock;
+};
+
+struct srp_target {
+	struct Scsi_Host *shost;
+	struct device *dev;
+
+	spinlock_t lock;
+	struct list_head cmd_queue;
+
+	size_t srp_iu_size;
+	struct srp_queue iu_queue;
+	size_t rx_ring_size;
+	struct srp_buf **rx_ring;
+
+	void *ldata;
+};
+
+struct iu_entry {
+	struct srp_target *target;
+
+	struct list_head ilist;
+	dma_addr_t remote_token;
+	unsigned long flags;
+
+	struct srp_buf *sbuf;
+};
+
+typedef int (srp_rdma_t)(struct scsi_cmnd *, struct scatterlist *, int,
+			 struct srp_direct_buf *, int,
+			 enum dma_data_direction, unsigned int);
+extern int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
+extern void srp_target_free(struct srp_target *);
+
+extern struct iu_entry *srp_iu_get(struct srp_target *);
+extern void srp_iu_put(struct iu_entry *);
+
+extern int srp_cmd_queue(struct Scsi_Host *, struct srp_cmd *, void *, u64);
+extern int srp_transfer_data(struct scsi_cmnd *, struct srp_cmd *,
+			     srp_rdma_t, int, int);
+
+
+static inline struct srp_target *host_to_srp_target(struct Scsi_Host *host)
+{
+	return (struct srp_target *) host->hostdata;
+}
+
+static inline int srp_cmd_direction(struct srp_cmd *cmd)
+{
+	return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
+
+#endif
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index be117f8..d6948d0 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -8,6 +8,7 @@
 
 struct request;
 struct scatterlist;
+struct Scsi_Host;
 struct scsi_device;
 
 
@@ -72,6 +73,9 @@
 	unsigned short use_sg;	/* Number of pieces of scatter-gather */
 	unsigned short sglist_len;	/* size of malloc'd scatter-gather list */
 
+	/* offset in cmd we are at (for multi-transfer tgt cmds) */
+	unsigned offset;
+
 	unsigned underflow;	/* Return error if less than
 				   this amount is transferred */
 
@@ -119,7 +123,10 @@
 };
 
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
+extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
 extern void scsi_put_command(struct scsi_cmnd *);
+extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
+			       struct device *);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_finish_command(struct scsi_cmnd *cmd);
 extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
@@ -128,4 +135,7 @@
 				 size_t *offset, size_t *len);
 extern void scsi_kunmap_atomic_sg(void *virt);
 
+extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
+extern void scsi_free_sgtable(struct scatterlist *, int);
+
 #endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b401c82..ebf31b1 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -223,13 +223,13 @@
 						  struct scsi_device *);
 
 /**
- * shost_for_each_device  -  iterate over all devices of a host
- * @sdev:	iterator
- * @host:	host whiches devices we want to iterate over
+ * shost_for_each_device - iterate over all devices of a host
+ * @sdev: the &struct scsi_device to use as a cursor
+ * @shost: the &struct scsi_host to iterate over
  *
- * This traverses over each devices of @shost.  The devices have
- * a reference that must be released by scsi_host_put when breaking
- * out of the loop.
+ * Iterator that returns each device attached to @shost.  This loop
+ * takes a reference on each device and releases it at the end.  If
+ * you break out of the loop, you must call scsi_device_put(sdev).
  */
 #define shost_for_each_device(sdev, shost) \
 	for ((sdev) = __scsi_iterate_devices((shost), NULL); \
@@ -237,17 +237,17 @@
 	     (sdev) = __scsi_iterate_devices((shost), (sdev)))
 
 /**
- * __shost_for_each_device  -  iterate over all devices of a host (UNLOCKED)
- * @sdev:	iterator
- * @host:	host whiches devices we want to iterate over
+ * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
+ * @sdev: the &struct scsi_device to use as a cursor
+ * @shost: the &struct scsi_host to iterate over
  *
- * This traverses over each devices of @shost.  It does _not_ take a
- * reference on the scsi_device, thus it the whole loop must be protected
- * by shost->host_lock.
+ * Iterator that returns each device attached to @shost.  It does _not_
+ * take a reference on the scsi_device, so the whole loop must be
+ * protected by shost->host_lock.
  *
- * Note:  The only reason why drivers would want to use this is because
- * they're need to access the device list in irq context.  Otherwise you
- * really want to use shost_for_each_device instead.
+ * Note: The only reason to use this is because you need to access the
+ * device list in interrupt context.  Otherwise you really want to use
+ * shost_for_each_device instead.
  */
 #define __shost_for_each_device(sdev, shost) \
 	list_for_each_entry((sdev), &((shost)->__devices), siblings)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 39c6f8c..7f1f411 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -7,6 +7,7 @@
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
 
+struct request_queue;
 struct block_device;
 struct completion;
 struct module;
@@ -124,6 +125,39 @@
 			     void (*done)(struct scsi_cmnd *));
 
 	/*
+	 * The transfer functions are used to queue a scsi command to
+	 * the LLD. When the driver is finished processing the command
+	 * the done callback is invoked.
+	 *
+	 * return values: see queuecommand
+	 *
+	 * If the LLD accepts the cmd, it should set the result to an
+	 * appropriate value when completed before calling the done function.
+	 *
+	 * STATUS: REQUIRED FOR TARGET DRIVERS
+	 */
+	/* TODO: rename */
+	int (* transfer_response)(struct scsi_cmnd *,
+				  void (*done)(struct scsi_cmnd *));
+	/*
+	 * This is called to inform the LLD to transfer cmd->request_bufflen
+	 * bytes of the cmd at cmd->offset in the cmd. The cmd->use_sg
+	 * speciefies the number of scatterlist entried in the command
+	 * and cmd->request_buffer contains the scatterlist.
+	 *
+	 * If the command cannot be processed in one transfer_data call
+	 * becuase a scatterlist within the LLD's limits cannot be
+	 * created then transfer_data will be called multiple times.
+	 * It is initially called from process context, and later
+	 * calls are from the interrup context.
+	 */
+	int (* transfer_data)(struct scsi_cmnd *,
+			      void (*done)(struct scsi_cmnd *));
+
+	/* Used as callback for the completion of task management request. */
+	int (* tsk_mgmt_response)(u64 mid, int result);
+
+	/*
 	 * This is an error handling strategy routine.  You don't need to
 	 * define one of these if you don't want to - there is a default
 	 * routine that is present that should work in most cases.  For those
@@ -241,6 +275,24 @@
 	void (* target_destroy)(struct scsi_target *);
 
 	/*
+	 * If a host has the ability to discover targets on its own instead
+	 * of scanning the entire bus, it can fill in this function and
+	 * call scsi_scan_host().  This function will be called periodically
+	 * until it returns 1 with the scsi_host and the elapsed time of
+	 * the scan in jiffies.
+	 *
+	 * Status: OPTIONAL
+	 */
+	int (* scan_finished)(struct Scsi_Host *, unsigned long);
+
+	/*
+	 * If the host wants to be called before the scan starts, but
+	 * after the midlayer has set up ready for the scan, it can fill
+	 * in this function.
+	 */
+	void (* scan_start)(struct Scsi_Host *);
+
+	/*
 	 * fill in this function to allow the queue depth of this host
 	 * to be changeable (on a per device basis).  returns either
 	 * the current queue depth setting (may be different from what
@@ -552,6 +604,9 @@
 	/* task mgmt function in progress */
 	unsigned tmf_in_progress:1;
 
+	/* Asynchronous scan in progress */
+	unsigned async_scan:1;
+
 	/*
 	 * Optional work queue to be utilized by the transport
 	 */
@@ -568,6 +623,12 @@
 	 */
 	unsigned int max_host_blocked;
 
+	/*
+	 * q used for scsi_tgt msgs, async events or any other requests that
+	 * need to be processed in userspace
+	 */
+	struct request_queue *uspace_req_q;
+
 	/* legacy crap */
 	unsigned long base;
 	unsigned long io_port;
@@ -648,11 +709,6 @@
 
 extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
 
-static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
-{
-	shost->host_lock = lock;
-}
-
 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 {
         return shost->shost_gendev.parent;
@@ -671,6 +727,9 @@
 extern void scsi_block_requests(struct Scsi_Host *);
 
 struct class_container;
+
+extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+						void (*) (struct request_queue *));
 /*
  * These two functions are used to allocate and free a pseudo device
  * which will connect to the host adapter itself rather than any
diff --git a/include/scsi/scsi_tgt.h b/include/scsi/scsi_tgt.h
new file mode 100644
index 0000000..4f44279
--- /dev/null
+++ b/include/scsi/scsi_tgt.h
@@ -0,0 +1,19 @@
+/*
+ * SCSI target definitions
+ */
+
+#include <linux/dma-mapping.h>
+
+struct Scsi_Host;
+struct scsi_cmnd;
+struct scsi_lun;
+
+extern struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *);
+extern int scsi_tgt_alloc_queue(struct Scsi_Host *);
+extern void scsi_tgt_free_queue(struct Scsi_Host *);
+extern int scsi_tgt_queue_command(struct scsi_cmnd *, struct scsi_lun *, u64);
+extern int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *, int, u64, struct scsi_lun *,
+				     void *);
+extern struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *,
+					       enum dma_data_direction,	gfp_t);
+extern void scsi_host_put_command(struct Scsi_Host *, struct scsi_cmnd *);
diff --git a/include/scsi/scsi_tgt_if.h b/include/scsi/scsi_tgt_if.h
new file mode 100644
index 0000000..46d5e70
--- /dev/null
+++ b/include/scsi/scsi_tgt_if.h
@@ -0,0 +1,90 @@
+/*
+ * SCSI target kernel/user interface
+ *
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#ifndef __SCSI_TARGET_IF_H
+#define __SCSI_TARGET_IF_H
+
+/* user -> kernel */
+#define	TGT_UEVENT_CMD_RSP	0x0001
+#define	TGT_UEVENT_TSK_MGMT_RSP	0x0002
+
+/* kernel -> user */
+#define	TGT_KEVENT_CMD_REQ	0x1001
+#define	TGT_KEVENT_CMD_DONE	0x1002
+#define	TGT_KEVENT_TSK_MGMT_REQ	0x1003
+
+struct tgt_event_hdr {
+	uint16_t version;
+	uint16_t status;
+	uint16_t type;
+	uint16_t len;
+} __attribute__ ((aligned (sizeof(uint64_t))));
+
+struct tgt_event {
+	struct tgt_event_hdr hdr;
+
+	union {
+		/* user-> kernel */
+		struct {
+			int host_no;
+			uint32_t len;
+			int result;
+			aligned_u64 uaddr;
+			uint8_t rw;
+			aligned_u64 tag;
+		} cmd_rsp;
+		struct {
+			int host_no;
+			aligned_u64 mid;
+			int result;
+		} tsk_mgmt_rsp;
+
+
+		/* kernel -> user */
+		struct {
+			int host_no;
+			uint32_t data_len;
+			uint8_t scb[16];
+			uint8_t lun[8];
+			int attribute;
+			aligned_u64 tag;
+		} cmd_req;
+		struct {
+			int host_no;
+			aligned_u64 tag;
+			int result;
+		} cmd_done;
+		struct {
+			int host_no;
+			int function;
+			aligned_u64 tag;
+			uint8_t lun[8];
+			aligned_u64 mid;
+		} tsk_mgmt_req;
+	} p;
+} __attribute__ ((aligned (sizeof(uint64_t))));
+
+#define TGT_RING_SIZE (1UL << 16)
+#define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
+#define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
+#define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
+
+#endif
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index fd35232..798f7c7 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -206,9 +206,9 @@
 	u8 flags;
 	struct list_head peers;
 	struct device dev;
- 	struct work_struct dev_loss_work;
+ 	struct delayed_work dev_loss_work;
  	struct work_struct scan_work;
- 	struct work_struct fail_io_work;
+ 	struct delayed_work fail_io_work;
  	struct work_struct stgt_delete_work;
 	struct work_struct rport_delete_work;
 } __attribute__((aligned(sizeof(unsigned long))));
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4b95c89..d5c218d 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -176,7 +176,7 @@
 
 	/* recovery fields */
 	int recovery_tmo;
-	struct work_struct recovery_work;
+	struct delayed_work recovery_work;
 
 	int target_id;
 
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 5302437..59633a8 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -73,6 +73,8 @@
 
 	/* for the list of phys belonging to a port */
 	struct list_head	port_siblings;
+
+	struct work_struct      reset_work;
 };
 
 #define dev_to_phy(d) \
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 4c43521..3372039 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -511,7 +511,7 @@
 #ifdef CONFIG_SND_AC97_POWER_SAVE
 	unsigned int power_up;	/* power states */
 	struct workqueue_struct *power_workq;
-	struct work_struct power_work;
+	struct delayed_work power_work;
 #endif
 	struct device dev;
 };
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 11702aa..2ee0616 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -182,7 +182,7 @@
 	unsigned char rcs0;
 	unsigned char rcs1;
 	struct workqueue_struct *workqueue;
-	struct work_struct work;
+	struct delayed_work work;
 	void *change_callback_private;
 	void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1);
 };
diff --git a/include/sound/core.h b/include/sound/core.h
index fa1ca01..a994bea 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -132,6 +132,7 @@
 	int shutdown;			/* this card is going down */
 	int free_on_last_close;		/* free in context of file_release */
 	wait_queue_head_t shutdown_sleep;
+	struct device *parent;
 	struct device *dev;
 
 #ifdef CONFIG_PM
@@ -187,13 +188,14 @@
 	int device;			/* device number */
 	const struct file_operations *f_ops;	/* file operations */
 	void *private_data;		/* private data for f_ops->open */
-	struct class_device *class_dev;	/* class device for sysfs */
+	struct device *dev;		/* device for sysfs */
 };
 
 /* sound.c */
 
 extern int snd_major;
 extern int snd_ecards_limit;
+extern struct class *sound_class;
 
 void snd_request_card(int card);
 
@@ -203,7 +205,7 @@
 int snd_unregister_device(int type, struct snd_card *card, int dev);
 void *snd_lookup_minor_data(unsigned int minor, int type);
 int snd_add_device_sysfs_file(int type, struct snd_card *card, int dev,
-			      const struct class_device_attribute *attr);
+			      struct device_attribute *attr);
 
 #ifdef CONFIG_SND_OSSEMUL
 int snd_register_oss_device(int type, struct snd_card *card, int dev,
@@ -255,7 +257,7 @@
 int snd_card_file_remove(struct snd_card *card, struct file *file);
 
 #ifndef snd_card_set_dev
-#define snd_card_set_dev(card,devptr) ((card)->dev = (devptr))
+#define snd_card_set_dev(card,devptr) ((card)->parent = (devptr))
 #endif
 
 /* device.c */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index afaf3e8..2f645df 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -26,6 +26,7 @@
 #include <sound/asound.h>
 #include <sound/memalloc.h>
 #include <linux/poll.h>
+#include <linux/mm.h>
 #include <linux/bitops.h>
 
 #define snd_pcm_substream_chip(substream) ((substream)->private_data)
diff --git a/init/Kconfig b/init/Kconfig
index 176f7e5..14d4846 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -249,6 +249,26 @@
 
 	  Say N if unsure.
 
+config SYSFS_DEPRECATED
+	bool "Create deprecated sysfs files"
+	default y
+	help
+	  This option creates deprecated symlinks such as the
+	  "device"-link, the <subsystem>:<name>-link, and the
+	  "bus"-link. It may also add deprecated key in the
+	  uevent environment.
+	  None of these features or values should be used today, as
+	  they export driver core implementation details to userspace
+	  or export properties which can't be kept stable across kernel
+	  releases.
+
+	  If enabled, this option will also move any device structures
+	  that belong to a class, back into the /sys/class heirachy, in
+	  order to support older versions of udev.
+
+	  If you are using a distro that was released in 2006 or later,
+	  it should be safe to say N here.
+
 config RELAY
 	bool "Kernel->user space relay support (formerly relayfs)"
 	help
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 919a80c..2cfd7cb 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -6,6 +6,7 @@
 #include <linux/romfs_fs.h>
 #include <linux/initrd.h>
 #include <linux/sched.h>
+#include <linux/freezer.h>
 
 #include "do_mounts.h"
 
diff --git a/init/initramfs.c b/init/initramfs.c
index d28c109..85f0403 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -182,6 +182,10 @@
 
 static int __init do_header(void)
 {
+	if (memcmp(collected, "070707", 6)==0) {
+		error("incorrect cpio method used: use -H newc option");
+		return 1;
+	}
 	if (memcmp(collected, "070701", 6)) {
 		error("no cpio magic");
 		return 1;
diff --git a/init/main.c b/init/main.c
index 36f608a..1174ae3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -29,6 +29,7 @@
 #include <linux/percpu.h>
 #include <linux/kmod.h>
 #include <linux/kernel_stat.h>
+#include <linux/start_kernel.h>
 #include <linux/security.h>
 #include <linux/workqueue.h>
 #include <linux/profile.h>
@@ -73,6 +74,10 @@
 #error Sorry, your GCC is too old. It builds incorrect kernels.
 #endif
 
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0
+#warning gcc-4.1.0 is known to miscompile the kernel.  A different compiler version is recommended.
+#endif
+
 static int init(void *);
 
 extern void init_IRQ(void);
diff --git a/ipc/compat.c b/ipc/compat.c
index 4d20cfd..fa18141 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -115,7 +115,6 @@
 
 extern int sem_ctls[];
 #define sc_semopm	(sem_ctls[2])
-#define MAXBUF (64*1024)
 
 static inline int compat_ipc_parse_version(int *cmd)
 {
@@ -307,35 +306,30 @@
 
 long compat_sys_msgsnd(int first, int second, int third, void __user *uptr)
 {
-	struct msgbuf __user *p;
 	struct compat_msgbuf __user *up = uptr;
 	long type;
 
 	if (first < 0)
 		return -EINVAL;
-	if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf)))
+	if (second < 0)
 		return -EINVAL;
 
-	p = compat_alloc_user_space(second + sizeof(struct msgbuf));
-	if (get_user(type, &up->mtype) ||
-	    put_user(type, &p->mtype) ||
-	    copy_in_user(p->mtext, up->mtext, second))
+	if (get_user(type, &up->mtype))
 		return -EFAULT;
 
-	return sys_msgsnd(first, p, second, third);
+	return do_msgsnd(first, type, up->mtext, second, third);
 }
 
 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
 			   int version, void __user *uptr)
 {
-	struct msgbuf __user *p;
 	struct compat_msgbuf __user *up;
 	long type;
 	int err;
 
 	if (first < 0)
 		return -EINVAL;
-	if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf)))
+	if (second < 0)
 		return -EINVAL;
 
 	if (!version) {
@@ -349,14 +343,11 @@
 		uptr = compat_ptr(ipck.msgp);
 		msgtyp = ipck.msgtyp;
 	}
-	p = compat_alloc_user_space(second + sizeof(struct msgbuf));
-	err = sys_msgrcv(first, p, second, msgtyp, third);
+	up = uptr;
+	err = do_msgrcv(first, &type, up->mtext, second, msgtyp, third);
 	if (err < 0)
 		goto out;
-	up = uptr;
-	if (get_user(type, &p->mtype) ||
-	    put_user(type, &up->mtype) ||
-	    copy_in_user(up->mtext, p->mtext, err))
+	if (put_user(type, &up->mtype))
 		err = -EFAULT;
 out:
 	return err;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 7c27400..3acc166 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -90,7 +90,7 @@
 static void remove_notification(struct mqueue_inode_info *info);
 
 static spinlock_t mq_lock;
-static kmem_cache_t *mqueue_inode_cachep;
+static struct kmem_cache *mqueue_inode_cachep;
 static struct vfsmount *mqueue_mnt;
 
 static unsigned int queues_count;
@@ -211,7 +211,7 @@
 	return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 
@@ -224,7 +224,7 @@
 {
 	struct mqueue_inode_info *ei;
 
-	ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	return &ei->vfs_inode;
diff --git a/ipc/msg.c b/ipc/msg.c
index 1266b1d..a388824 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -626,12 +626,11 @@
 	return 0;
 }
 
-asmlinkage long
-sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
+long do_msgsnd(int msqid, long mtype, void __user *mtext,
+		size_t msgsz, int msgflg)
 {
 	struct msg_queue *msq;
 	struct msg_msg *msg;
-	long mtype;
 	int err;
 	struct ipc_namespace *ns;
 
@@ -639,12 +638,10 @@
 
 	if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
 		return -EINVAL;
-	if (get_user(mtype, &msgp->mtype))
-		return -EFAULT;
 	if (mtype < 1)
 		return -EINVAL;
 
-	msg = load_msg(msgp->mtext, msgsz);
+	msg = load_msg(mtext, msgsz);
 	if (IS_ERR(msg))
 		return PTR_ERR(msg);
 
@@ -723,6 +720,16 @@
 	return err;
 }
 
+asmlinkage long
+sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
+{
+	long mtype;
+
+	if (get_user(mtype, &msgp->mtype))
+		return -EFAULT;
+	return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
+}
+
 static inline int convert_mode(long *msgtyp, int msgflg)
 {
 	/*
@@ -742,8 +749,8 @@
 	return SEARCH_EQUAL;
 }
 
-asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
-			   long msgtyp, int msgflg)
+long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+		size_t msgsz, long msgtyp, int msgflg)
 {
 	struct msg_queue *msq;
 	struct msg_msg *msg;
@@ -889,15 +896,30 @@
 		return PTR_ERR(msg);
 
 	msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
-	if (put_user (msg->m_type, &msgp->mtype) ||
-	    store_msg(msgp->mtext, msg, msgsz)) {
+	*pmtype = msg->m_type;
+	if (store_msg(mtext, msg, msgsz))
 		msgsz = -EFAULT;
-	}
+
 	free_msg(msg);
 
 	return msgsz;
 }
 
+asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+			   long msgtyp, int msgflg)
+{
+	long err, mtype;
+
+	err =  do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
+	if (err < 0)
+		goto out;
+
+	if (put_user(mtype, &msgp->mtype))
+		err = -EFAULT;
+out:
+	return err;
+}
+
 #ifdef CONFIG_PROC_FS
 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
 {
diff --git a/ipc/sem.c b/ipc/sem.c
index 21b3289..d3e12ef 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1070,14 +1070,13 @@
 	ipc_rcu_getref(sma);
 	sem_unlock(sma);
 
-	new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
 	if (!new) {
 		ipc_lock_by_ptr(&sma->sem_perm);
 		ipc_rcu_putref(sma);
 		sem_unlock(sma);
 		return ERR_PTR(-ENOMEM);
 	}
-	memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems);
 	new->semadj = (short *) &new[1];
 	new->semid = semid;
 
diff --git a/ipc/util.c b/ipc/util.c
index cd8bb14..a9b7a22 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -514,6 +514,11 @@
 	container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
 }
 
+static void ipc_do_vfree(struct work_struct *work)
+{
+	vfree(container_of(work, struct ipc_rcu_sched, work));
+}
+
 /**
  * ipc_schedule_free - free ipc + rcu space
  * @head: RCU callback structure for queued work
@@ -528,7 +533,7 @@
 	struct ipc_rcu_sched *sched =
 			container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
 
-	INIT_WORK(&sched->work, vfree, sched);
+	INIT_WORK(&sched->work, ipc_do_vfree);
 	schedule_work(&sched->work);
 }
 
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 248e1c3..4af15802 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -7,7 +7,7 @@
 	default HZ_250
 	help
 	 Allows the configuration of the timer frequency. It is customary
-	 to have the timer interrupt run at 1000 HZ but 100 HZ may be more
+	 to have the timer interrupt run at 1000 Hz but 100 Hz may be more
 	 beneficial for servers and NUMA systems that do not need to have
 	 a fast response for user interaction and that may experience bus
 	 contention and cacheline bounces as a result of timer interrupts.
@@ -19,21 +19,30 @@
 	config HZ_100
 		bool "100 HZ"
 	help
-	  100 HZ is a typical choice for servers, SMP and NUMA systems
+	  100 Hz is a typical choice for servers, SMP and NUMA systems
 	  with lots of processors that may show reduced performance if
 	  too many timer interrupts are occurring.
 
 	config HZ_250
 		bool "250 HZ"
 	help
-	 250 HZ is a good compromise choice allowing server performance
+	 250 Hz is a good compromise choice allowing server performance
 	 while also showing good interactive responsiveness even
-	 on SMP and NUMA systems.
+	 on SMP and NUMA systems. If you are going to be using NTSC video
+	 or multimedia, selected 300Hz instead.
+
+	config HZ_300
+		bool "300 HZ"
+	help
+	 300 Hz is a good compromise choice allowing server performance
+	 while also showing good interactive responsiveness even
+	 on SMP and NUMA systems and exactly dividing by both PAL and
+	 NTSC frame rates for video and multimedia work.
 
 	config HZ_1000
 		bool "1000 HZ"
 	help
-	 1000 HZ is the preferred choice for desktop systems and other
+	 1000 Hz is the preferred choice for desktop systems and other
 	 systems requiring fast interactive responses to events.
 
 endchoice
@@ -42,5 +51,6 @@
 	int
 	default 100 if HZ_100
 	default 250 if HZ_250
+	default 300 if HZ_300
 	default 1000 if HZ_1000
 
diff --git a/kernel/acct.c b/kernel/acct.c
index 0aad5ca..dc12db8 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -89,7 +89,8 @@
 	struct timer_list	timer;
 };
 
-static struct acct_glbs acct_globals __cacheline_aligned = {SPIN_LOCK_UNLOCKED};
+static struct acct_glbs acct_globals __cacheline_aligned =
+	{__SPIN_LOCK_UNLOCKED(acct_globals.lock)};
 
 /*
  * Called whenever the timer says to check the free space.
diff --git a/kernel/audit.c b/kernel/audit.c
index 98106f6..d9b690a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -57,6 +57,7 @@
 #include <linux/netlink.h>
 #include <linux/selinux.h>
 #include <linux/inotify.h>
+#include <linux/freezer.h>
 
 #include "audit.h"
 
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 4f40d92..2e896f8 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -636,10 +636,9 @@
 	struct audit_rule *rule;
 	int i;
 
-	rule = kmalloc(sizeof(*rule), GFP_KERNEL);
+	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
 	if (unlikely(!rule))
 		return NULL;
-	memset(rule, 0, sizeof(*rule));
 
 	rule->flags = krule->flags | krule->listnr;
 	rule->action = krule->action;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 42f2f11..40722e2 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -64,6 +64,7 @@
 #include <linux/tty.h>
 #include <linux/selinux.h>
 #include <linux/binfmts.h>
+#include <linux/highmem.h>
 #include <linux/syscalls.h>
 
 #include "audit.h"
@@ -730,7 +731,7 @@
 		printk(KERN_ERR "audit: freed %d contexts\n", count);
 }
 
-static void audit_log_task_context(struct audit_buffer *ab)
+void audit_log_task_context(struct audit_buffer *ab)
 {
 	char *ctx = NULL;
 	ssize_t len = 0;
@@ -759,6 +760,8 @@
 	return;
 }
 
+EXPORT_SYMBOL(audit_log_task_context);
+
 static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
 {
 	char name[sizeof(tsk->comm)];
@@ -1487,6 +1490,8 @@
 	return ctx ? ctx->loginuid : -1;
 }
 
+EXPORT_SYMBOL(audit_get_loginuid);
+
 /**
  * __audit_mq_open - record audit data for a POSIX MQ open
  * @oflag: open flag
diff --git a/kernel/configs.c b/kernel/configs.c
index f9e3197..8fa1fb2 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -75,7 +75,7 @@
 	return count;
 }
 
-static struct file_operations ikconfig_file_ops = {
+static const struct file_operations ikconfig_file_ops = {
 	.owner = THIS_MODULE,
 	.read = ikconfig_read_current,
 };
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 272254f..9124669 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -270,11 +270,7 @@
 			goto out;
 		}
 	}
-	error = set_cpus_allowed(current, cpumask_of_cpu(first_cpu));
-	if (error) {
-		printk(KERN_ERR "Could not run on CPU%d\n", first_cpu);
-		goto out;
-	}
+
 	/* We take down all of the non-boot CPUs in one shot to avoid races
 	 * with the userspace trying to use the CPU hotplug at the same time
 	 */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6313c38..0a6b4d8 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -729,9 +729,11 @@
 	}
 
 	/* Remaining checks don't apply to root cpuset */
-	if ((par = cur->parent) == NULL)
+	if (cur == &top_cpuset)
 		return 0;
 
+	par = cur->parent;
+
 	/* We must be a subset of our parent cpuset */
 	if (!is_cpuset_subset(trial, par))
 		return -EACCES;
@@ -1060,10 +1062,7 @@
 	cpu_exclusive_changed =
 		(is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
 	mutex_lock(&callback_mutex);
-	if (turning_on)
-		set_bit(bit, &cs->flags);
-	else
-		clear_bit(bit, &cs->flags);
+	cs->flags = trialcs.flags;
 	mutex_unlock(&callback_mutex);
 
 	if (cpu_exclusive_changed)
@@ -1281,7 +1280,8 @@
 	FILE_TASKLIST,
 } cpuset_filetype_t;
 
-static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
+static ssize_t cpuset_common_file_write(struct file *file,
+					const char __user *userbuf,
 					size_t nbytes, loff_t *unused_ppos)
 {
 	struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
@@ -1292,7 +1292,7 @@
 	int retval = 0;
 
 	/* Crude upper limit on largest legitimate cpulist user might write. */
-	if (nbytes > 100 + 6 * NR_CPUS)
+	if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES))
 		return -E2BIG;
 
 	/* +1 for nul-terminator */
@@ -1532,7 +1532,7 @@
 	return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
 }
 
-static struct file_operations cpuset_file_operations = {
+static const struct file_operations cpuset_file_operations = {
 	.read = cpuset_file_read,
 	.write = cpuset_file_write,
 	.llseek = generic_file_llseek,
@@ -2045,7 +2045,6 @@
 	return err;
 }
 
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
 /*
  * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
  * or memory nodes, we need to walk over the cpuset hierarchy,
@@ -2109,9 +2108,7 @@
 	mutex_unlock(&callback_mutex);
 	mutex_unlock(&manage_mutex);
 }
-#endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * The top_cpuset tracks what CPUs and Memory Nodes are online,
  * period.  This is necessary in order to make cpusets transparent
@@ -2128,7 +2125,6 @@
 	common_cpu_mem_hotplug_unplug();
 	return 0;
 }
-#endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 /*
@@ -2610,7 +2606,7 @@
 	return single_open(file, proc_cpuset_show, pid);
 }
 
-struct file_operations proc_cpuset_operations = {
+const struct file_operations proc_cpuset_operations = {
 	.open		= cpuset_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 66a0ea4..766d591 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -20,7 +20,7 @@
 #include <linux/delayacct.h>
 
 int delayacct_on __read_mostly = 1;	/* Delay accounting turned on/off */
-kmem_cache_t *delayacct_cache;
+struct kmem_cache *delayacct_cache;
 
 static int __init delayacct_setup_disable(char *str)
 {
@@ -41,7 +41,7 @@
 
 void __delayacct_tsk_init(struct task_struct *tsk)
 {
-	tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL);
+	tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
 	if (tsk->delays)
 		spin_lock_init(&tsk->delays->lock);
 }
diff --git a/kernel/dma.c b/kernel/dma.c
index 2020644..937b13c 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -140,7 +140,7 @@
 	return single_open(file, proc_dma_show, NULL);
 }
 
-static struct file_operations proc_dma_operations = {
+static const struct file_operations proc_dma_operations = {
 	.open		= proc_dma_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/exit.c b/kernel/exit.c
index 06de6c4..4e3f919 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -850,9 +850,7 @@
 fastcall NORET_TYPE void do_exit(long code)
 {
 	struct task_struct *tsk = current;
-	struct taskstats *tidstats;
 	int group_dead;
-	unsigned int mycpu;
 
 	profile_task_exit(tsk);
 
@@ -890,8 +888,6 @@
 				current->comm, current->pid,
 				preempt_count());
 
-	taskstats_exit_alloc(&tidstats, &mycpu);
-
 	acct_update_integrals(tsk);
 	if (tsk->mm) {
 		update_hiwater_rss(tsk->mm);
@@ -911,8 +907,8 @@
 #endif
 	if (unlikely(tsk->audit_context))
 		audit_free(tsk);
-	taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
-	taskstats_exit_free(tidstats);
+
+	taskstats_exit(tsk, group_dead);
 
 	exit_mm(tsk);
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 8cdd3e7..7f2e31b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -82,26 +82,26 @@
 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
 # define alloc_task_struct()	kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
 # define free_task_struct(tsk)	kmem_cache_free(task_struct_cachep, (tsk))
-static kmem_cache_t *task_struct_cachep;
+static struct kmem_cache *task_struct_cachep;
 #endif
 
 /* SLAB cache for signal_struct structures (tsk->signal) */
-static kmem_cache_t *signal_cachep;
+static struct kmem_cache *signal_cachep;
 
 /* SLAB cache for sighand_struct structures (tsk->sighand) */
-kmem_cache_t *sighand_cachep;
+struct kmem_cache *sighand_cachep;
 
 /* SLAB cache for files_struct structures (tsk->files) */
-kmem_cache_t *files_cachep;
+struct kmem_cache *files_cachep;
 
 /* SLAB cache for fs_struct structures (tsk->fs) */
-kmem_cache_t *fs_cachep;
+struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-kmem_cache_t *vm_area_cachep;
+struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
-static kmem_cache_t *mm_cachep;
+static struct kmem_cache *mm_cachep;
 
 void free_task(struct task_struct *tsk)
 {
@@ -237,7 +237,7 @@
 				goto fail_nomem;
 			charge = len;
 		}
-		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 		if (!tmp)
 			goto fail_nomem;
 		*tmp = *mpnt;
@@ -319,7 +319,7 @@
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
 
-#define allocate_mm()	(kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
+#define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL))
 #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))
 
 #include <linux/init_task.h>
@@ -448,7 +448,16 @@
 		tsk->vfork_done = NULL;
 		complete(vfork_done);
 	}
-	if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
+
+	/*
+	 * If we're exiting normally, clear a user-space tid field if
+	 * requested.  We leave this alone when dying by signal, to leave
+	 * the value intact in a core dump, and to save the unnecessary
+	 * trouble otherwise.  Userland only wants this done for a sys_exit.
+	 */
+	if (tsk->clear_child_tid
+	    && !(tsk->flags & PF_SIGNALED)
+	    && atomic_read(&mm->mm_users) > 1) {
 		u32 __user * tidptr = tsk->clear_child_tid;
 		tsk->clear_child_tid = NULL;
 
@@ -479,6 +488,10 @@
 
 	memcpy(mm, oldmm, sizeof(*mm));
 
+	/* Initializing for Swap token stuff */
+	mm->token_priority = 0;
+	mm->last_interval = 0;
+
 	if (!mm_init(mm))
 		goto fail_nomem;
 
@@ -542,6 +555,10 @@
 		goto fail_nomem;
 
 good_mm:
+	/* Initializing for Swap token stuff */
+	mm->token_priority = 0;
+	mm->last_interval = 0;
+
 	tsk->mm = mm;
 	tsk->active_mm = mm;
 	return 0;
@@ -613,7 +630,7 @@
 	struct files_struct *newf;
 	struct fdtable *fdt;
 
-	newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
+	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
 	if (!newf)
 		goto out;
 
@@ -830,7 +847,6 @@
 	if (clone_flags & CLONE_THREAD) {
 		atomic_inc(&current->signal->count);
 		atomic_inc(&current->signal->live);
-		taskstats_tgid_alloc(current);
 		return 0;
 	}
 	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
@@ -1303,7 +1319,7 @@
 	return ERR_PTR(retval);
 }
 
-struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
+noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
 {
 	memset(regs, 0, sizeof(struct pt_regs));
 	return regs;
@@ -1413,7 +1429,7 @@
 #define ARCH_MIN_MMSTRUCT_ALIGN 0
 #endif
 
-static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct sighand_struct *sighand = data;
 
diff --git a/kernel/futex.c b/kernel/futex.c
index 93ef30b..95989a3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -282,9 +282,9 @@
 {
 	int ret;
 
-	inc_preempt_count();
+	pagefault_disable();
 	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-	dec_preempt_count();
+	pagefault_enable();
 
 	return ret ? -EFAULT : 0;
 }
@@ -324,12 +324,11 @@
 	if (likely(current->pi_state_cache))
 		return 0;
 
-	pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
+	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
 
 	if (!pi_state)
 		return -ENOMEM;
 
-	memset(pi_state, 0, sizeof(*pi_state));
 	INIT_LIST_HEAD(&pi_state->list);
 	/* pi_mutex gets initialized later */
 	pi_state->owner = NULL;
@@ -553,7 +552,7 @@
 	 * at the end of wake_up_all() does not prevent this store from
 	 * moving.
 	 */
-	wmb();
+	smp_wmb();
 	q->lock_ptr = NULL;
 }
 
@@ -585,9 +584,9 @@
 	if (!(uval & FUTEX_OWNER_DIED)) {
 		newval = FUTEX_WAITERS | new_owner->pid;
 
-		inc_preempt_count();
+		pagefault_disable();
 		curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-		dec_preempt_count();
+		pagefault_enable();
 		if (curval == -EFAULT)
 			return -EFAULT;
 		if (curval != uval)
@@ -618,9 +617,9 @@
 	 * There is no waiter, so we unlock the futex. The owner died
 	 * bit has not to be preserved here. We are the owner:
 	 */
-	inc_preempt_count();
+	pagefault_disable();
 	oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (oldval == -EFAULT)
 		return oldval;
@@ -1158,9 +1157,9 @@
 	 */
 	newval = current->pid;
 
-	inc_preempt_count();
+	pagefault_disable();
 	curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (unlikely(curval == -EFAULT))
 		goto uaddr_faulted;
@@ -1183,9 +1182,9 @@
 	uval = curval;
 	newval = uval | FUTEX_WAITERS;
 
-	inc_preempt_count();
+	pagefault_disable();
 	curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-	dec_preempt_count();
+	pagefault_enable();
 
 	if (unlikely(curval == -EFAULT))
 		goto uaddr_faulted;
@@ -1215,10 +1214,10 @@
 			newval = current->pid |
 				FUTEX_OWNER_DIED | FUTEX_WAITERS;
 
-			inc_preempt_count();
+			pagefault_disable();
 			curval = futex_atomic_cmpxchg_inatomic(uaddr,
 							       uval, newval);
-			dec_preempt_count();
+			pagefault_enable();
 
 			if (unlikely(curval == -EFAULT))
 				goto uaddr_faulted;
@@ -1390,9 +1389,9 @@
 	 * anyone else up:
 	 */
 	if (!(uval & FUTEX_OWNER_DIED)) {
-		inc_preempt_count();
+		pagefault_disable();
 		uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
-		dec_preempt_count();
+		pagefault_enable();
 	}
 
 	if (unlikely(uval == -EFAULT))
@@ -1493,7 +1492,7 @@
 	return ret;
 }
 
-static struct file_operations futex_fops = {
+static const struct file_operations futex_fops = {
 	.release	= futex_close,
 	.poll		= futex_poll,
 };
@@ -1858,10 +1857,16 @@
 
 static int __init init(void)
 {
-	unsigned int i;
+	int i = register_filesystem(&futex_fs_type);
 
-	register_filesystem(&futex_fs_type);
+	if (i)
+		return i;
+
 	futex_mnt = kern_mount(&futex_fs_type);
+	if (IS_ERR(futex_mnt)) {
+		unregister_filesystem(&futex_fs_type);
+		return PTR_ERR(futex_mnt);
+	}
 
 	for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
 		INIT_LIST_HEAD(&futex_queues[i].chain);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a681912..aff1f0f 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -54,7 +54,7 @@
 		.chip = &no_irq_chip,
 		.handle_irq = handle_bad_irq,
 		.depth = 1,
-		.lock = SPIN_LOCK_UNLOCKED,
+		.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
 #ifdef CONFIG_SMP
 		.affinity = CPU_MASK_ALL
 #endif
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index eeac3e3..ab63cfc 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -20,6 +20,7 @@
 #include <linux/proc_fs.h>
 #include <linux/sched.h>	/* for cond_resched */
 #include <linux/mm.h>
+#include <linux/ctype.h>
 
 #include <asm/sections.h>
 
@@ -301,13 +302,6 @@
 	char name[KSYM_NAME_LEN+1];
 };
 
-/* Only label it "global" if it is exported. */
-static void upcase_if_global(struct kallsym_iter *iter)
-{
-	if (is_exported(iter->name, iter->owner))
-		iter->type += 'A' - 'a';
-}
-
 static int get_ksymbol_mod(struct kallsym_iter *iter)
 {
 	iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms,
@@ -316,7 +310,10 @@
 	if (iter->owner == NULL)
 		return 0;
 
-	upcase_if_global(iter);
+	/* Label it "global" if it is exported, "local" if not exported. */
+	iter->type = is_exported(iter->name, iter->owner)
+		? toupper(iter->type) : tolower(iter->type);
+
 	return 1;
 }
 
@@ -401,7 +398,7 @@
 	return 0;
 }
 
-static struct seq_operations kallsyms_op = {
+static const struct seq_operations kallsyms_op = {
 	.start = s_start,
 	.next = s_next,
 	.stop = s_stop,
@@ -436,7 +433,7 @@
 	return seq_release(inode, file);
 }
 
-static struct file_operations kallsyms_operations = {
+static const struct file_operations kallsyms_operations = {
 	.open = kallsyms_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/kernel/kexec.c b/kernel/kexec.c
index fcdd5d2..2a59c8a 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -20,6 +20,8 @@
 #include <linux/syscalls.h>
 #include <linux/ioport.h>
 #include <linux/hardirq.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -108,11 +110,10 @@
 
 	/* Allocate a controlling structure */
 	result = -ENOMEM;
-	image = kmalloc(sizeof(*image), GFP_KERNEL);
+	image = kzalloc(sizeof(*image), GFP_KERNEL);
 	if (!image)
 		goto out;
 
-	memset(image, 0, sizeof(*image));
 	image->head = 0;
 	image->entry = &image->head;
 	image->last_entry = &image->head;
@@ -851,6 +852,7 @@
 			memset(ptr + uchunk, 0, mchunk - uchunk);
 		}
 		result = copy_from_user(ptr, buf, uchunk);
+		kexec_flush_icache_page(page);
 		kunmap(page);
 		if (result) {
 			result = (result < 0) ? result : -EIO;
@@ -1067,6 +1069,60 @@
 	}
 }
 
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
+			    size_t data_len)
+{
+	struct elf_note note;
+
+	note.n_namesz = strlen(name) + 1;
+	note.n_descsz = data_len;
+	note.n_type   = type;
+	memcpy(buf, &note, sizeof(note));
+	buf += (sizeof(note) + 3)/4;
+	memcpy(buf, name, note.n_namesz);
+	buf += (note.n_namesz + 3)/4;
+	memcpy(buf, data, note.n_descsz);
+	buf += (note.n_descsz + 3)/4;
+
+	return buf;
+}
+
+static void final_note(u32 *buf)
+{
+	struct elf_note note;
+
+	note.n_namesz = 0;
+	note.n_descsz = 0;
+	note.n_type   = 0;
+	memcpy(buf, &note, sizeof(note));
+}
+
+void crash_save_cpu(struct pt_regs *regs, int cpu)
+{
+	struct elf_prstatus prstatus;
+	u32 *buf;
+
+	if ((cpu < 0) || (cpu >= NR_CPUS))
+		return;
+
+	/* Using ELF notes here is opportunistic.
+	 * I need a well defined structure format
+	 * for the data I pass, and I need tags
+	 * on the data to indicate what information I have
+	 * squirrelled away.  ELF notes happen to provide
+	 * all of that, so there is no need to invent something new.
+	 */
+	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
+	if (!buf)
+		return;
+	memset(&prstatus, 0, sizeof(prstatus));
+	prstatus.pr_pid = current->pid;
+	elf_core_copy_regs(&prstatus.pr_reg, regs);
+	buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
+				sizeof(prstatus));
+	final_note(buf);
+}
+
 static int __init crash_notes_memory_init(void)
 {
 	/* Allocate memory for saving cpu registers. */
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2b76dee..8d2bea0 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -114,6 +114,7 @@
 #endif /* CONFIG_KMOD */
 
 struct subprocess_info {
+	struct work_struct work;
 	struct completion *complete;
 	char *path;
 	char **argv;
@@ -221,9 +222,10 @@
 }
 
 /* This is run by khelper thread  */
-static void __call_usermodehelper(void *data)
+static void __call_usermodehelper(struct work_struct *work)
 {
-	struct subprocess_info *sub_info = data;
+	struct subprocess_info *sub_info =
+		container_of(work, struct subprocess_info, work);
 	pid_t pid;
 	int wait = sub_info->wait;
 
@@ -264,6 +266,8 @@
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct subprocess_info sub_info = {
+		.work		= __WORK_INITIALIZER(sub_info.work,
+						     __call_usermodehelper),
 		.complete	= &done,
 		.path		= path,
 		.argv		= argv,
@@ -272,7 +276,6 @@
 		.wait		= wait,
 		.retval		= 0,
 	};
-	DECLARE_WORK(work, __call_usermodehelper, &sub_info);
 
 	if (!khelper_wq)
 		return -EBUSY;
@@ -280,7 +283,7 @@
 	if (path[0] == '\0')
 		return 0;
 
-	queue_work(khelper_wq, &work);
+	queue_work(khelper_wq, &sub_info.work);
 	wait_for_completion(&done);
 	return sub_info.retval;
 }
@@ -291,6 +294,8 @@
 {
 	DECLARE_COMPLETION(done);
 	struct subprocess_info sub_info = {
+		.work		= __WORK_INITIALIZER(sub_info.work,
+						     __call_usermodehelper),
 		.complete	= &done,
 		.path		= path,
 		.argv		= argv,
@@ -298,7 +303,6 @@
 		.retval		= 0,
 	};
 	struct file *f;
-	DECLARE_WORK(work, __call_usermodehelper, &sub_info);
 
 	if (!khelper_wq)
 		return -EBUSY;
@@ -318,7 +322,7 @@
 	}
 	sub_info.stdin = f;
 
-	queue_work(khelper_wq, &work);
+	queue_work(khelper_wq, &sub_info.work);
 	wait_for_completion(&done);
 	return sub_info.retval;
 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 610c837..17ec4af 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -38,6 +38,7 @@
 #include <linux/module.h>
 #include <linux/moduleloader.h>
 #include <linux/kallsyms.h>
+#include <linux/freezer.h>
 #include <asm-generic/sections.h>
 #include <asm/cacheflush.h>
 #include <asm/errno.h>
@@ -83,9 +84,36 @@
 	kprobe_opcode_t *insns;		/* Page of instruction slots */
 	char slot_used[INSNS_PER_PAGE];
 	int nused;
+	int ngarbage;
 };
 
 static struct hlist_head kprobe_insn_pages;
+static int kprobe_garbage_slots;
+static int collect_garbage_slots(void);
+
+static int __kprobes check_safety(void)
+{
+	int ret = 0;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
+	ret = freeze_processes();
+	if (ret == 0) {
+		struct task_struct *p, *q;
+		do_each_thread(p, q) {
+			if (p != current && p->state == TASK_RUNNING &&
+			    p->pid != 0) {
+				printk("Check failed: %s is running\n",p->comm);
+				ret = -1;
+				goto loop_end;
+			}
+		} while_each_thread(p, q);
+	}
+loop_end:
+	thaw_processes();
+#else
+	synchronize_sched();
+#endif
+	return ret;
+}
 
 /**
  * get_insn_slot() - Find a slot on an executable page for an instruction.
@@ -96,6 +124,7 @@
 	struct kprobe_insn_page *kip;
 	struct hlist_node *pos;
 
+      retry:
 	hlist_for_each(pos, &kprobe_insn_pages) {
 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
 		if (kip->nused < INSNS_PER_PAGE) {
@@ -112,7 +141,11 @@
 		}
 	}
 
-	/* All out of space.  Need to allocate a new page. Use slot 0.*/
+	/* If there are any garbage slots, collect it and try again. */
+	if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
+		goto retry;
+	}
+	/* All out of space.  Need to allocate a new page. Use slot 0. */
 	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
 	if (!kip) {
 		return NULL;
@@ -133,10 +166,62 @@
 	memset(kip->slot_used, 0, INSNS_PER_PAGE);
 	kip->slot_used[0] = 1;
 	kip->nused = 1;
+	kip->ngarbage = 0;
 	return kip->insns;
 }
 
-void __kprobes free_insn_slot(kprobe_opcode_t *slot)
+/* Return 1 if all garbages are collected, otherwise 0. */
+static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
+{
+	kip->slot_used[idx] = 0;
+	kip->nused--;
+	if (kip->nused == 0) {
+		/*
+		 * Page is no longer in use.  Free it unless
+		 * it's the last one.  We keep the last one
+		 * so as not to have to set it up again the
+		 * next time somebody inserts a probe.
+		 */
+		hlist_del(&kip->hlist);
+		if (hlist_empty(&kprobe_insn_pages)) {
+			INIT_HLIST_NODE(&kip->hlist);
+			hlist_add_head(&kip->hlist,
+				       &kprobe_insn_pages);
+		} else {
+			module_free(NULL, kip->insns);
+			kfree(kip);
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static int __kprobes collect_garbage_slots(void)
+{
+	struct kprobe_insn_page *kip;
+	struct hlist_node *pos, *next;
+
+	/* Ensure no-one is preepmted on the garbages */
+	if (check_safety() != 0)
+		return -EAGAIN;
+
+	hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
+		int i;
+		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
+		if (kip->ngarbage == 0)
+			continue;
+		kip->ngarbage = 0;	/* we will collect all garbages */
+		for (i = 0; i < INSNS_PER_PAGE; i++) {
+			if (kip->slot_used[i] == -1 &&
+			    collect_one_slot(kip, i))
+				break;
+		}
+	}
+	kprobe_garbage_slots = 0;
+	return 0;
+}
+
+void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
 {
 	struct kprobe_insn_page *kip;
 	struct hlist_node *pos;
@@ -146,28 +231,18 @@
 		if (kip->insns <= slot &&
 		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
 			int i = (slot - kip->insns) / MAX_INSN_SIZE;
-			kip->slot_used[i] = 0;
-			kip->nused--;
-			if (kip->nused == 0) {
-				/*
-				 * Page is no longer in use.  Free it unless
-				 * it's the last one.  We keep the last one
-				 * so as not to have to set it up again the
-				 * next time somebody inserts a probe.
-				 */
-				hlist_del(&kip->hlist);
-				if (hlist_empty(&kprobe_insn_pages)) {
-					INIT_HLIST_NODE(&kip->hlist);
-					hlist_add_head(&kip->hlist,
-						&kprobe_insn_pages);
-				} else {
-					module_free(NULL, kip->insns);
-					kfree(kip);
-				}
+			if (dirty) {
+				kip->slot_used[i] = -1;
+				kip->ngarbage++;
+			} else {
+				collect_one_slot(kip, i);
 			}
-			return;
+			break;
 		}
 	}
+	if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
+		collect_garbage_slots();
+	}
 }
 #endif
 
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4f9c60e..1db8c72 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -31,6 +31,8 @@
 	/* Result passed back to kthread_create() from keventd. */
 	struct task_struct *result;
 	struct completion done;
+
+	struct work_struct work;
 };
 
 struct kthread_stop_info
@@ -111,9 +113,10 @@
 }
 
 /* We are keventd: create a thread. */
-static void keventd_create_kthread(void *_create)
+static void keventd_create_kthread(struct work_struct *work)
 {
-	struct kthread_create_info *create = _create;
+	struct kthread_create_info *create =
+		container_of(work, struct kthread_create_info, work);
 	int pid;
 
 	/* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@
 				   ...)
 {
 	struct kthread_create_info create;
-	DECLARE_WORK(work, keventd_create_kthread, &create);
 
 	create.threadfn = threadfn;
 	create.data = data;
 	init_completion(&create.started);
 	init_completion(&create.done);
+	INIT_WORK(&create.work, keventd_create_kthread);
 
 	/*
 	 * The workqueue needs to start up first:
 	 */
 	if (!helper_wq)
-		work.func(work.data);
+		create.work.func(&create.work);
 	else {
-		queue_work(helper_wq, &work);
+		queue_work(helper_wq, &create.work);
 		wait_for_completion(&create.done);
 	}
 	if (!IS_ERR(create.result)) {
diff --git a/kernel/latency.c b/kernel/latency.c
index 258f255..e63fcac 100644
--- a/kernel/latency.c
+++ b/kernel/latency.c
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/notifier.h>
+#include <linux/jiffies.h>
 #include <asm/atomic.h>
 
 struct latency_info {
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index c9fefdb..b020324 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -140,13 +140,6 @@
 
 EXPORT_SYMBOL(lockdep_on);
 
-int lockdep_internal(void)
-{
-	return current->lockdep_recursion != 0;
-}
-
-EXPORT_SYMBOL(lockdep_internal);
-
 /*
  * Debugging switches:
  */
@@ -228,17 +221,15 @@
 	trace->skip = 3;
 	trace->all_contexts = 0;
 
-	/* Make sure to not recurse in case the the unwinder needs to tak
-e	   locks. */
-	lockdep_off();
 	save_stack_trace(trace, NULL);
-	lockdep_on();
 
 	trace->max_entries = trace->nr_entries;
 
 	nr_stack_trace_entries += trace->nr_entries;
-	if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
+	if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
+		__raw_spin_unlock(&hash_lock);
 		return 0;
+	}
 
 	if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
 		__raw_spin_unlock(&hash_lock);
@@ -357,7 +348,7 @@
 
 static void print_lock_name(struct lock_class *class)
 {
-	char str[128], c1, c2, c3, c4;
+	char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
 	const char *name;
 
 	get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -379,7 +370,7 @@
 static void print_lockdep_cache(struct lockdep_map *lock)
 {
 	const char *name;
-	char str[128];
+	char str[KSYM_NAME_LEN + 1];
 
 	name = lock->name;
 	if (!name)
@@ -449,7 +440,9 @@
 	print_lock_class_header(class, depth);
 
 	list_for_each_entry(entry, &class->locks_after, entry) {
-		DEBUG_LOCKS_WARN_ON(!entry->class);
+		if (DEBUG_LOCKS_WARN_ON(!entry->class))
+			return;
+
 		print_lock_dependencies(entry->class, depth + 1);
 
 		printk("%*s ... acquired at:\n",depth,"");
@@ -474,7 +467,8 @@
 		return 0;
 
 	entry->class = this;
-	save_trace(&entry->trace);
+	if (!save_trace(&entry->trace))
+		return 0;
 
 	/*
 	 * Since we never remove from the dependency list, the list can
@@ -562,8 +556,12 @@
 	if (debug_locks_silent)
 		return 0;
 
+	/* hash_lock unlocked by the header */
+	__raw_spin_lock(&hash_lock);
 	this.class = check_source->class;
-	save_trace(&this.trace);
+	if (!save_trace(&this.trace))
+		return 0;
+	__raw_spin_unlock(&hash_lock);
 	print_circular_bug_entry(&this, 0);
 
 	printk("\nother info that might help us debug this:\n\n");
@@ -966,14 +964,11 @@
 			       &prev->class->locks_after, next->acquire_ip);
 	if (!ret)
 		return 0;
-	/*
-	 * Return value of 2 signals 'dependency already added',
-	 * in that case we dont have to add the backlink either.
-	 */
-	if (ret == 2)
-		return 2;
+
 	ret = add_lock_to_list(next->class, prev->class,
 			       &next->class->locks_before, next->acquire_ip);
+	if (!ret)
+		return 0;
 
 	/*
 	 * Debugging printouts:
@@ -1025,7 +1020,8 @@
 		 * added:
 		 */
 		if (hlock->read != 2) {
-			check_prev_add(curr, hlock, next);
+			if (!check_prev_add(curr, hlock, next))
+				return 0;
 			/*
 			 * Stop after the first non-trylock entry,
 			 * as non-trylock entries have added their
@@ -1182,6 +1178,7 @@
 	struct lockdep_subclass_key *key;
 	struct list_head *hash_head;
 	struct lock_class *class;
+	unsigned long flags;
 
 	class = look_up_lock_class(lock, subclass);
 	if (likely(class))
@@ -1203,6 +1200,7 @@
 	key = lock->key->subkeys + subclass;
 	hash_head = classhashentry(key);
 
+	raw_local_irq_save(flags);
 	__raw_spin_lock(&hash_lock);
 	/*
 	 * We have to do the hash-walk again, to avoid races
@@ -1217,6 +1215,7 @@
 	 */
 	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
 		__raw_spin_unlock(&hash_lock);
+		raw_local_irq_restore(flags);
 		debug_locks_off();
 		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
 		printk("turning off the locking correctness validator.\n");
@@ -1239,15 +1238,18 @@
 
 	if (verbose(class)) {
 		__raw_spin_unlock(&hash_lock);
+		raw_local_irq_restore(flags);
 		printk("\nnew class %p: %s", class->key, class->name);
 		if (class->name_version > 1)
 			printk("#%d", class->name_version);
 		printk("\n");
 		dump_stack();
+		raw_local_irq_save(flags);
 		__raw_spin_lock(&hash_lock);
 	}
 out_unlock_set:
 	__raw_spin_unlock(&hash_lock);
+	raw_local_irq_restore(flags);
 
 	if (!subclass || force)
 		lock->class_cache = class;
@@ -1728,6 +1730,7 @@
 		debug_atomic_dec(&nr_unused_locks);
 		break;
 	default:
+		__raw_spin_unlock(&hash_lock);
 		debug_locks_off();
 		WARN_ON(1);
 		return 0;
@@ -2645,6 +2648,7 @@
 	}
 	local_irq_restore(flags);
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
 static void print_held_locks_bug(struct task_struct *curr)
 {
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index eab043c..8ce09bc 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -20,7 +20,7 @@
 #define MAX_LOCKDEP_KEYS_BITS	11
 #define MAX_LOCKDEP_KEYS	(1UL << MAX_LOCKDEP_KEYS_BITS)
 
-#define MAX_LOCKDEP_CHAINS_BITS	13
+#define MAX_LOCKDEP_CHAINS_BITS	14
 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 
 /*
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index f6e72ea..b554b40 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -113,7 +113,7 @@
 	return 0;
 }
 
-static struct seq_operations lockdep_ops = {
+static const struct seq_operations lockdep_ops = {
 	.start	= l_start,
 	.next	= l_next,
 	.stop	= l_stop,
@@ -135,7 +135,7 @@
 	return res;
 }
 
-static struct file_operations proc_lockdep_operations = {
+static const struct file_operations proc_lockdep_operations = {
 	.open		= lockdep_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -319,7 +319,7 @@
 	return single_open(file, lockdep_stats_show, NULL);
 }
 
-static struct file_operations proc_lockdep_stats_operations = {
+static const struct file_operations proc_lockdep_stats_operations = {
 	.open		= lockdep_stats_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/module.c b/kernel/module.c
index f016656..d9eae45 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -34,10 +34,10 @@
 #include <linux/err.h>
 #include <linux/vermagic.h>
 #include <linux/notifier.h>
+#include <linux/sched.h>
 #include <linux/stop_machine.h>
 #include <linux/device.h>
 #include <linux/string.h>
-#include <linux/sched.h>
 #include <linux/mutex.h>
 #include <linux/unwind.h>
 #include <asm/uaccess.h>
@@ -790,6 +790,19 @@
 	.show = show_refcnt,
 };
 
+void module_put(struct module *module)
+{
+	if (module) {
+		unsigned int cpu = get_cpu();
+		local_dec(&module->ref[cpu].count);
+		/* Maybe they're waiting for us to drop reference? */
+		if (unlikely(!module_is_live(module)))
+			wake_up_process(module->waiter);
+		put_cpu();
+	}
+}
+EXPORT_SYMBOL(module_put);
+
 #else /* !CONFIG_MODULE_UNLOAD */
 static void print_unload_info(struct seq_file *m, struct module *mod)
 {
@@ -1086,22 +1099,35 @@
 		goto out;
 	kobj_set_kset_s(&mod->mkobj, module_subsys);
 	mod->mkobj.mod = mod;
-	err = kobject_register(&mod->mkobj.kobj);
+
+	/* delay uevent until full sysfs population */
+	kobject_init(&mod->mkobj.kobj);
+	err = kobject_add(&mod->mkobj.kobj);
 	if (err)
 		goto out;
 
+	mod->drivers_dir = kobject_add_dir(&mod->mkobj.kobj, "drivers");
+	if (!mod->drivers_dir)
+		goto out_unreg;
+
 	err = module_param_sysfs_setup(mod, kparam, num_params);
 	if (err)
-		goto out_unreg;
+		goto out_unreg_drivers;
 
 	err = module_add_modinfo_attrs(mod);
 	if (err)
-		goto out_unreg;
+		goto out_unreg_param;
 
+	kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
 	return 0;
 
+out_unreg_drivers:
+	kobject_unregister(mod->drivers_dir);
+out_unreg_param:
+	module_param_sysfs_remove(mod);
 out_unreg:
-	kobject_unregister(&mod->mkobj.kobj);
+	kobject_del(&mod->mkobj.kobj);
+	kobject_put(&mod->mkobj.kobj);
 out:
 	return err;
 }
@@ -1110,6 +1136,7 @@
 {
 	module_remove_modinfo_attrs(mod);
 	module_param_sysfs_remove(mod);
+	kobject_unregister(mod->drivers_dir);
 
 	kobject_unregister(&mod->mkobj.kobj);
 }
@@ -2182,7 +2209,7 @@
    Where refcount is a number or -, and deps is a comma-separated list
    of depends or -.
 */
-struct seq_operations modules_op = {
+const struct seq_operations modules_op = {
 	.start	= m_start,
 	.next	= m_next,
 	.stop	= m_stop,
@@ -2275,11 +2302,14 @@
 
 void module_add_driver(struct module *mod, struct device_driver *drv)
 {
+	int no_warn;
+
 	if (!mod || !drv)
 		return;
 
-	/* Don't check return code; this call is idempotent */
-	sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module");
+	/* Don't check return codes; these calls are idempotent */
+	no_warn = sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module");
+	no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj, drv->name);
 }
 EXPORT_SYMBOL(module_add_driver);
 
@@ -2288,6 +2318,8 @@
 	if (!drv)
 		return;
 	sysfs_remove_link(&drv->kobj, "module");
+	if (drv->owner && drv->owner->drivers_dir)
+		sysfs_remove_link(drv->owner->drivers_dir, drv->name);
 }
 EXPORT_SYMBOL(module_remove_driver);
 
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 1865164..841539d 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -77,6 +77,9 @@
 
 void debug_mutex_unlock(struct mutex *lock)
 {
+	if (unlikely(!debug_locks))
+		return;
+
 	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
 	DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
diff --git a/kernel/pid.c b/kernel/pid.c
index b914392..a48879b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -31,7 +31,7 @@
 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
 static struct hlist_head *pid_hash;
 static int pidhash_shift;
-static kmem_cache_t *pid_cachep;
+static struct kmem_cache *pid_cachep;
 
 int pid_max = PID_MAX_DEFAULT;
 
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9cbb5d1..5fe87de 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -70,7 +70,7 @@
 /*
  * Lets keep our timers in a slab cache :-)
  */
-static kmem_cache_t *posix_timers_cache;
+static struct kmem_cache *posix_timers_cache;
 static struct idr posix_timers_id;
 static DEFINE_SPINLOCK(idr_lock);
 
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 825068c..710ed08 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -78,7 +78,7 @@
 
 config SOFTWARE_SUSPEND
 	bool "Software Suspend"
-	depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP) && !X86_PAE) || ((FRV || PPC32) && !SMP))
+	depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
 	---help---
 	  Enable the possibility of suspending the machine.
 	  It doesn't need ACPI or APM.
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index b1fb786..0b00f56 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -20,6 +20,7 @@
 #include <linux/pm.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/freezer.h>
 
 #include "power.h"
 
@@ -27,6 +28,23 @@
 static int noresume = 0;
 char resume_file[256] = CONFIG_PM_STD_PARTITION;
 dev_t swsusp_resume_device;
+sector_t swsusp_resume_block;
+
+/**
+ *	platform_prepare - prepare the machine for hibernation using the
+ *	platform driver if so configured and return an error code if it fails
+ */
+
+static inline int platform_prepare(void)
+{
+	int error = 0;
+
+	if (pm_disk_mode == PM_DISK_PLATFORM) {
+		if (pm_ops && pm_ops->prepare)
+			error = pm_ops->prepare(PM_SUSPEND_DISK);
+	}
+	return error;
+}
 
 /**
  *	power_down - Shut machine down for hibernate.
@@ -40,12 +58,10 @@
 
 static void power_down(suspend_disk_method_t mode)
 {
-	int error = 0;
-
 	switch(mode) {
 	case PM_DISK_PLATFORM:
 		kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
-		error = pm_ops->enter(PM_SUSPEND_DISK);
+		pm_ops->enter(PM_SUSPEND_DISK);
 		break;
 	case PM_DISK_SHUTDOWN:
 		kernel_power_off();
@@ -90,12 +106,18 @@
 		goto thaw;
 	}
 
+	error = platform_prepare();
+	if (error)
+		goto thaw;
+
 	/* Free memory before shutting down devices. */
 	if (!(error = swsusp_shrink_memory()))
 		return 0;
-thaw:
+
+	platform_finish();
+ thaw:
 	thaw_processes();
-enable_cpus:
+ enable_cpus:
 	enable_nonboot_cpus();
 	pm_restore_console();
 	return error;
@@ -127,7 +149,7 @@
 		return error;
 
 	if (pm_disk_mode == PM_DISK_TESTPROC)
-		goto Thaw;
+		return 0;
 
 	suspend_console();
 	error = device_suspend(PMSG_FREEZE);
@@ -189,10 +211,10 @@
 {
 	int error;
 
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	if (!swsusp_resume_device) {
 		if (!strlen(resume_file)) {
-			up(&pm_sem);
+			mutex_unlock(&pm_mutex);
 			return -ENOENT;
 		}
 		swsusp_resume_device = name_to_dev_t(resume_file);
@@ -207,7 +229,7 @@
 		 * FIXME: If noresume is specified, we need to find the partition
 		 * and reset it back to normal swap space.
 		 */
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		return 0;
 	}
 
@@ -251,7 +273,7 @@
 	unprepare_processes();
  Done:
 	/* For success case, the suspend path will release the lock */
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	pr_debug("PM: Resume from disk failed.\n");
 	return 0;
 }
@@ -312,7 +334,7 @@
 	p = memchr(buf, '\n', n);
 	len = p ? p - buf : n;
 
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	for (i = PM_DISK_FIRMWARE; i < PM_DISK_MAX; i++) {
 		if (!strncmp(buf, pm_disk_modes[i], len)) {
 			mode = i;
@@ -336,7 +358,7 @@
 
 	pr_debug("PM: suspend-to-disk mode set to '%s'\n",
 		 pm_disk_modes[mode]);
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	return error ? error : n;
 }
 
@@ -361,14 +383,14 @@
 	if (maj != MAJOR(res) || min != MINOR(res))
 		goto out;
 
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	swsusp_resume_device = res;
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	printk("Attempting manual resume\n");
 	noresume = 0;
 	software_resume();
 	ret = n;
-out:
+ out:
 	return ret;
 }
 
@@ -423,6 +445,19 @@
 	return 1;
 }
 
+static int __init resume_offset_setup(char *str)
+{
+	unsigned long long offset;
+
+	if (noresume)
+		return 1;
+
+	if (sscanf(str, "%llu", &offset) == 1)
+		swsusp_resume_block = offset;
+
+	return 1;
+}
+
 static int __init noresume_setup(char *str)
 {
 	noresume = 1;
@@ -430,4 +465,5 @@
 }
 
 __setup("noresume", noresume_setup);
+__setup("resume_offset=", resume_offset_setup);
 __setup("resume=", resume_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 873228c..500eb87 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/kobject.h>
 #include <linux/string.h>
@@ -18,13 +19,14 @@
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/resume-trace.h>
+#include <linux/freezer.h>
 
 #include "power.h"
 
 /*This is just an arbitrary number */
 #define FREE_PAGE_NUMBER (100)
 
-DECLARE_MUTEX(pm_sem);
+DEFINE_MUTEX(pm_mutex);
 
 struct pm_ops *pm_ops;
 suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
@@ -36,9 +38,9 @@
 
 void pm_set_ops(struct pm_ops * ops)
 {
-	down(&pm_sem);
+	mutex_lock(&pm_mutex);
 	pm_ops = ops;
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 }
 
 
@@ -182,7 +184,7 @@
 
 	if (!valid_state(state))
 		return -ENODEV;
-	if (down_trylock(&pm_sem))
+	if (!mutex_trylock(&pm_mutex))
 		return -EBUSY;
 
 	if (state == PM_SUSPEND_DISK) {
@@ -200,7 +202,7 @@
 	pr_debug("PM: Finishing wakeup.\n");
 	suspend_finish(state);
  Unlock:
-	up(&pm_sem);
+	mutex_unlock(&pm_mutex);
 	return error;
 }
 
@@ -229,7 +231,7 @@
 	return -EINVAL;
 }
 
-
+EXPORT_SYMBOL(pm_suspend);
 
 decl_subsys(power,NULL,NULL);
 
diff --git a/kernel/power/power.h b/kernel/power/power.h
index bfe999f..eb461b8 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -22,7 +22,9 @@
 	return -EPERM;
 }
 #endif
-extern struct semaphore pm_sem;
+
+extern struct mutex pm_mutex;
+
 #define power_attr(_name) \
 static struct subsys_attribute _name##_attr = {	\
 	.attr	= {				\
@@ -42,6 +44,7 @@
 extern unsigned long image_size;
 extern int in_suspend;
 extern dev_t swsusp_resume_device;
+extern sector_t swsusp_resume_block;
 
 extern asmlinkage int swsusp_arch_suspend(void);
 extern asmlinkage int swsusp_arch_resume(void);
@@ -102,8 +105,18 @@
 extern unsigned int snapshot_additional_pages(struct zone *zone);
 extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
 extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
+extern void snapshot_write_finalize(struct snapshot_handle *handle);
 extern int snapshot_image_loaded(struct snapshot_handle *handle);
-extern void snapshot_free_unused_memory(struct snapshot_handle *handle);
+
+/*
+ * This structure is used to pass the values needed for the identification
+ * of the resume swap area from a user space to the kernel via the
+ * SNAPSHOT_SET_SWAP_AREA ioctl
+ */
+struct resume_swap_area {
+	loff_t offset;
+	u_int32_t dev;
+} __attribute__((packed));
 
 #define SNAPSHOT_IOC_MAGIC	'3'
 #define SNAPSHOT_FREEZE			_IO(SNAPSHOT_IOC_MAGIC, 1)
@@ -117,7 +130,14 @@
 #define SNAPSHOT_FREE_SWAP_PAGES	_IO(SNAPSHOT_IOC_MAGIC, 9)
 #define SNAPSHOT_SET_SWAP_FILE		_IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
 #define SNAPSHOT_S2RAM			_IO(SNAPSHOT_IOC_MAGIC, 11)
-#define SNAPSHOT_IOC_MAXNR	11
+#define SNAPSHOT_PMOPS			_IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
+#define SNAPSHOT_SET_SWAP_AREA		_IOW(SNAPSHOT_IOC_MAGIC, 13, \
+							struct resume_swap_area)
+#define SNAPSHOT_IOC_MAXNR	13
+
+#define PMOPS_PREPARE	1
+#define PMOPS_ENTER	2
+#define PMOPS_FINISH	3
 
 /**
  *	The bitmap is used for tracing allocated swap pages
@@ -141,7 +161,7 @@
 
 extern void free_bitmap(struct bitmap_page *bitmap);
 extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
-extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
+extern sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap);
 extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
 
 extern int swsusp_check(void);
@@ -153,3 +173,7 @@
 extern int swsusp_write(void);
 extern void swsusp_close(void);
 extern int suspend_enter(suspend_state_t state);
+
+struct timeval;
+extern void swsusp_show_speed(struct timeval *, struct timeval *,
+				unsigned int, char *);
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index f1f900a..678ec73 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -16,12 +16,12 @@
  * callback we use.
  */
 
-static void do_poweroff(void *dummy)
+static void do_poweroff(struct work_struct *dummy)
 {
 	kernel_power_off();
 }
 
-static DECLARE_WORK(poweroff_work, do_poweroff, NULL);
+static DECLARE_WORK(poweroff_work, do_poweroff);
 
 static void handle_poweroff(int key, struct tty_struct *tty)
 {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 72e72d2..99eeb11 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -13,12 +13,15 @@
 #include <linux/suspend.h>
 #include <linux/module.h>
 #include <linux/syscalls.h>
+#include <linux/freezer.h>
 
 /* 
  * Timeout for stopping processes
  */
 #define TIMEOUT	(20 * HZ)
 
+#define FREEZER_KERNEL_THREADS 0
+#define FREEZER_USER_SPACE 1
 
 static inline int freezeable(struct task_struct * p)
 {
@@ -39,7 +42,6 @@
 	long save;
 	save = current->state;
 	pr_debug("%s entered refrigerator\n", current->comm);
-	printk("=");
 
 	frozen_process(current);
 	spin_lock_irq(&current->sighand->siglock);
@@ -79,96 +81,136 @@
 	}
 }
 
-/* 0 = success, else # of processes that we failed to stop */
-int freeze_processes(void)
+static inline int is_user_space(struct task_struct *p)
 {
-	int todo, nr_user, user_frozen;
-	unsigned long start_time;
-	struct task_struct *g, *p;
+	return p->mm && !(p->flags & PF_BORROWED_MM);
+}
 
-	printk( "Stopping tasks: " );
-	start_time = jiffies;
-	user_frozen = 0;
+static unsigned int try_to_freeze_tasks(int freeze_user_space)
+{
+	struct task_struct *g, *p;
+	unsigned long end_time;
+	unsigned int todo;
+
+	end_time = jiffies + TIMEOUT;
 	do {
-		nr_user = todo = 0;
+		todo = 0;
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 			if (!freezeable(p))
 				continue;
+
 			if (frozen(p))
 				continue;
-			if (p->state == TASK_TRACED && frozen(p->parent)) {
+
+			if (p->state == TASK_TRACED &&
+			    (frozen(p->parent) ||
+			     p->parent->state == TASK_STOPPED)) {
 				cancel_freezing(p);
 				continue;
 			}
-			if (p->mm && !(p->flags & PF_BORROWED_MM)) {
-				/* The task is a user-space one.
-				 * Freeze it unless there's a vfork completion
-				 * pending
+			if (is_user_space(p)) {
+				if (!freeze_user_space)
+					continue;
+
+				/* Freeze the task unless there is a vfork
+				 * completion pending
 				 */
 				if (!p->vfork_done)
 					freeze_process(p);
-				nr_user++;
 			} else {
-				/* Freeze only if the user space is frozen */
-				if (user_frozen)
-					freeze_process(p);
-				todo++;
+				if (freeze_user_space)
+					continue;
+
+				freeze_process(p);
 			}
+			todo++;
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
-		todo += nr_user;
-		if (!user_frozen && !nr_user) {
-			sys_sync();
-			start_time = jiffies;
-		}
-		user_frozen = !nr_user;
 		yield();			/* Yield is okay here */
-		if (todo && time_after(jiffies, start_time + TIMEOUT))
+		if (todo && time_after(jiffies, end_time))
 			break;
-	} while(todo);
+	} while (todo);
 
-	/* This does not unfreeze processes that are already frozen
-	 * (we have slightly ugly calling convention in that respect,
-	 * and caller must call thaw_processes() if something fails),
-	 * but it cleans up leftover PF_FREEZE requests.
-	 */
 	if (todo) {
-		printk( "\n" );
-		printk(KERN_ERR " stopping tasks timed out "
-			"after %d seconds (%d tasks remaining):\n",
-			TIMEOUT / HZ, todo);
+		/* This does not unfreeze processes that are already frozen
+		 * (we have slightly ugly calling convention in that respect,
+		 * and caller must call thaw_processes() if something fails),
+		 * but it cleans up leftover PF_FREEZE requests.
+		 */
+		printk("\n");
+		printk(KERN_ERR "Stopping %s timed out after %d seconds "
+				"(%d tasks refusing to freeze):\n",
+				freeze_user_space ? "user space processes" :
+					"kernel threads",
+				TIMEOUT / HZ, todo);
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
+			if (is_user_space(p) == !freeze_user_space)
+				continue;
+
 			if (freezeable(p) && !frozen(p))
-				printk(KERN_ERR "  %s\n", p->comm);
+				printk(KERN_ERR " %s\n", p->comm);
+
 			cancel_freezing(p);
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
-		return todo;
 	}
 
-	printk( "|\n" );
+	return todo;
+}
+
+/**
+ *	freeze_processes - tell processes to enter the refrigerator
+ *
+ *	Returns 0 on success, or the number of processes that didn't freeze,
+ *	although they were told to.
+ */
+int freeze_processes(void)
+{
+	unsigned int nr_unfrozen;
+
+	printk("Stopping tasks ... ");
+	nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE);
+	if (nr_unfrozen)
+		return nr_unfrozen;
+
+	sys_sync();
+	nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
+	if (nr_unfrozen)
+		return nr_unfrozen;
+
+	printk("done.\n");
 	BUG_ON(in_atomic());
 	return 0;
 }
 
-void thaw_processes(void)
+static void thaw_tasks(int thaw_user_space)
 {
 	struct task_struct *g, *p;
 
-	printk( "Restarting tasks..." );
 	read_lock(&tasklist_lock);
 	do_each_thread(g, p) {
 		if (!freezeable(p))
 			continue;
-		if (!thaw_process(p))
-			printk(KERN_INFO " Strange, %s not stopped\n", p->comm );
-	} while_each_thread(g, p);
 
+		if (is_user_space(p) == !thaw_user_space)
+			continue;
+
+		if (!thaw_process(p))
+			printk(KERN_WARNING " Strange, %s not stopped\n",
+				p->comm );
+	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
+}
+
+void thaw_processes(void)
+{
+	printk("Restarting tasks ... ");
+	thaw_tasks(FREEZER_KERNEL_THREADS);
+	thaw_tasks(FREEZER_USER_SPACE);
 	schedule();
-	printk( " done\n" );
+	printk("done.\n");
 }
 
 EXPORT_SYMBOL(refrigerator);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 99f9b7d..c024606 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1,15 +1,15 @@
 /*
  * linux/kernel/power/snapshot.c
  *
- * This file provide system snapshot/restore functionality.
+ * This file provides system snapshot/restore functionality for swsusp.
  *
  * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  *
- * This file is released under the GPLv2, and is based on swsusp.c.
+ * This file is released under the GPLv2.
  *
  */
 
-
 #include <linux/version.h>
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -34,137 +34,24 @@
 
 #include "power.h"
 
-/* List of PBEs used for creating and restoring the suspend image */
+/* List of PBEs needed for restoring the pages that were allocated before
+ * the suspend and included in the suspend image, but have also been
+ * allocated by the "resume" kernel, so their contents cannot be written
+ * directly to their "original" page frames.
+ */
 struct pbe *restore_pblist;
 
-static unsigned int nr_copy_pages;
-static unsigned int nr_meta_pages;
+/* Pointer to an auxiliary buffer (1 page) */
 static void *buffer;
 
-#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void)
-{
-	struct zone *zone;
-	unsigned long zone_pfn;
-	unsigned int n = 0;
-
-	for_each_zone (zone)
-		if (is_highmem(zone)) {
-			mark_free_pages(zone);
-			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
-				struct page *page;
-				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
-				if (!pfn_valid(pfn))
-					continue;
-				page = pfn_to_page(pfn);
-				if (PageReserved(page))
-					continue;
-				if (PageNosaveFree(page))
-					continue;
-				n++;
-			}
-		}
-	return n;
-}
-
-struct highmem_page {
-	char *data;
-	struct page *page;
-	struct highmem_page *next;
-};
-
-static struct highmem_page *highmem_copy;
-
-static int save_highmem_zone(struct zone *zone)
-{
-	unsigned long zone_pfn;
-	mark_free_pages(zone);
-	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
-		struct page *page;
-		struct highmem_page *save;
-		void *kaddr;
-		unsigned long pfn = zone_pfn + zone->zone_start_pfn;
-
-		if (!(pfn%10000))
-			printk(".");
-		if (!pfn_valid(pfn))
-			continue;
-		page = pfn_to_page(pfn);
-		/*
-		 * This condition results from rvmalloc() sans vmalloc_32()
-		 * and architectural memory reservations. This should be
-		 * corrected eventually when the cases giving rise to this
-		 * are better understood.
-		 */
-		if (PageReserved(page))
-			continue;
-		BUG_ON(PageNosave(page));
-		if (PageNosaveFree(page))
-			continue;
-		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
-		if (!save)
-			return -ENOMEM;
-		save->next = highmem_copy;
-		save->page = page;
-		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
-		if (!save->data) {
-			kfree(save);
-			return -ENOMEM;
-		}
-		kaddr = kmap_atomic(page, KM_USER0);
-		memcpy(save->data, kaddr, PAGE_SIZE);
-		kunmap_atomic(kaddr, KM_USER0);
-		highmem_copy = save;
-	}
-	return 0;
-}
-
-int save_highmem(void)
-{
-	struct zone *zone;
-	int res = 0;
-
-	pr_debug("swsusp: Saving Highmem");
-	drain_local_pages();
-	for_each_zone (zone) {
-		if (is_highmem(zone))
-			res = save_highmem_zone(zone);
-		if (res)
-			return res;
-	}
-	printk("\n");
-	return 0;
-}
-
-int restore_highmem(void)
-{
-	printk("swsusp: Restoring Highmem\n");
-	while (highmem_copy) {
-		struct highmem_page *save = highmem_copy;
-		void *kaddr;
-		highmem_copy = save->next;
-
-		kaddr = kmap_atomic(save->page, KM_USER0);
-		memcpy(kaddr, save->data, PAGE_SIZE);
-		kunmap_atomic(kaddr, KM_USER0);
-		free_page((long) save->data);
-		kfree(save);
-	}
-	return 0;
-}
-#else
-static inline unsigned int count_highmem_pages(void) {return 0;}
-static inline int save_highmem(void) {return 0;}
-static inline int restore_highmem(void) {return 0;}
-#endif
-
 /**
  *	@safe_needed - on resume, for storing the PBE list and the image,
  *	we can only use memory pages that do not conflict with the pages
- *	used before suspend.
+ *	used before suspend.  The unsafe pages have PageNosaveFree set
+ *	and we count them using unsafe_pages.
  *
- *	The unsafe pages are marked with the PG_nosave_free flag
- *	and we count them using unsafe_pages
+ *	Each allocated image page is marked as PageNosave and PageNosaveFree
+ *	so that swsusp_free() can release it.
  */
 
 #define PG_ANY		0
@@ -174,7 +61,7 @@
 
 static unsigned int allocated_unsafe_pages;
 
-static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
+static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 {
 	void *res;
 
@@ -195,20 +82,39 @@
 
 unsigned long get_safe_page(gfp_t gfp_mask)
 {
-	return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE);
+	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
+}
+
+static struct page *alloc_image_page(gfp_t gfp_mask)
+{
+	struct page *page;
+
+	page = alloc_page(gfp_mask);
+	if (page) {
+		SetPageNosave(page);
+		SetPageNosaveFree(page);
+	}
+	return page;
 }
 
 /**
  *	free_image_page - free page represented by @addr, allocated with
- *	alloc_image_page (page flags set by it must be cleared)
+ *	get_image_page (page flags set by it must be cleared)
  */
 
 static inline void free_image_page(void *addr, int clear_nosave_free)
 {
-	ClearPageNosave(virt_to_page(addr));
+	struct page *page;
+
+	BUG_ON(!virt_addr_valid(addr));
+
+	page = virt_to_page(addr);
+
+	ClearPageNosave(page);
 	if (clear_nosave_free)
-		ClearPageNosaveFree(virt_to_page(addr));
-	free_page((unsigned long)addr);
+		ClearPageNosaveFree(page);
+
+	__free_page(page);
 }
 
 /* struct linked_page is used to build chains of pages */
@@ -269,7 +175,7 @@
 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 		struct linked_page *lp;
 
-		lp = alloc_image_page(ca->gfp_mask, ca->safe_needed);
+		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
 		if (!lp)
 			return NULL;
 
@@ -446,8 +352,8 @@
 
 	/* Compute the number of zones */
 	nr = 0;
-	for_each_zone (zone)
-		if (populated_zone(zone) && !is_highmem(zone))
+	for_each_zone(zone)
+		if (populated_zone(zone))
 			nr++;
 
 	/* Allocate the list of zones bitmap objects */
@@ -459,10 +365,10 @@
 	}
 
 	/* Initialize the zone bitmap objects */
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		unsigned long pfn;
 
-		if (!populated_zone(zone) || is_highmem(zone))
+		if (!populated_zone(zone))
 			continue;
 
 		zone_bm->start_pfn = zone->zone_start_pfn;
@@ -481,7 +387,7 @@
 		while (bb) {
 			unsigned long *ptr;
 
-			ptr = alloc_image_page(gfp_mask, safe_needed);
+			ptr = get_image_page(gfp_mask, safe_needed);
 			bb->data = ptr;
 			if (!ptr)
 				goto Free;
@@ -505,7 +411,7 @@
 	memory_bm_position_reset(bm);
 	return 0;
 
-Free:
+ Free:
 	bm->p_list = ca.chain;
 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 	return -ENOMEM;
@@ -651,7 +557,7 @@
 	memory_bm_position_reset(bm);
 	return BM_END_OF_MAP;
 
-Return_pfn:
+ Return_pfn:
 	bm->cur.chunk = chunk;
 	bm->cur.bit = bit;
 	return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
@@ -669,9 +575,81 @@
 
 	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
 	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
-	return res;
+	return 2 * res;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+ *	count_free_highmem_pages - compute the total number of free highmem
+ *	pages, system-wide.
+ */
+
+static unsigned int count_free_highmem_pages(void)
+{
+	struct zone *zone;
+	unsigned int cnt = 0;
+
+	for_each_zone(zone)
+		if (populated_zone(zone) && is_highmem(zone))
+			cnt += zone->free_pages;
+
+	return cnt;
+}
+
+/**
+ *	saveable_highmem_page - Determine whether a highmem page should be
+ *	included in the suspend image.
+ *
+ *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
+ *	and it isn't a part of a free chunk of pages.
+ */
+
+static struct page *saveable_highmem_page(unsigned long pfn)
+{
+	struct page *page;
+
+	if (!pfn_valid(pfn))
+		return NULL;
+
+	page = pfn_to_page(pfn);
+
+	BUG_ON(!PageHighMem(page));
+
+	if (PageNosave(page) || PageReserved(page) || PageNosaveFree(page))
+		return NULL;
+
+	return page;
+}
+
+/**
+ *	count_highmem_pages - compute the total number of saveable highmem
+ *	pages.
+ */
+
+unsigned int count_highmem_pages(void)
+{
+	struct zone *zone;
+	unsigned int n = 0;
+
+	for_each_zone(zone) {
+		unsigned long pfn, max_zone_pfn;
+
+		if (!is_highmem(zone))
+			continue;
+
+		mark_free_pages(zone);
+		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
+			if (saveable_highmem_page(pfn))
+				n++;
+	}
+	return n;
+}
+#else
+static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
+static inline unsigned int count_highmem_pages(void) { return 0; }
+#endif /* CONFIG_HIGHMEM */
+
 /**
  *	pfn_is_nosave - check if given pfn is in the 'nosave' section
  */
@@ -684,12 +662,12 @@
 }
 
 /**
- *	saveable - Determine whether a page should be cloned or not.
- *	@pfn:	The page
+ *	saveable - Determine whether a non-highmem page should be included in
+ *	the suspend image.
  *
- *	We save a page if it isn't Nosave, and is not in the range of pages
- *	statically defined as 'unsaveable', and it
- *	isn't a part of a free chunk of pages.
+ *	We should save the page if it isn't Nosave, and is not in the range
+ *	of pages statically defined as 'unsaveable', and it isn't a part of
+ *	a free chunk of pages.
  */
 
 static struct page *saveable_page(unsigned long pfn)
@@ -701,76 +679,130 @@
 
 	page = pfn_to_page(pfn);
 
-	if (PageNosave(page))
+	BUG_ON(PageHighMem(page));
+
+	if (PageNosave(page) || PageNosaveFree(page))
 		return NULL;
+
 	if (PageReserved(page) && pfn_is_nosave(pfn))
 		return NULL;
-	if (PageNosaveFree(page))
-		return NULL;
 
 	return page;
 }
 
+/**
+ *	count_data_pages - compute the total number of saveable non-highmem
+ *	pages.
+ */
+
 unsigned int count_data_pages(void)
 {
 	struct zone *zone;
 	unsigned long pfn, max_zone_pfn;
 	unsigned int n = 0;
 
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		if (is_highmem(zone))
 			continue;
+
 		mark_free_pages(zone);
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-			n += !!saveable_page(pfn);
+			if(saveable_page(pfn))
+				n++;
 	}
 	return n;
 }
 
-static inline void copy_data_page(long *dst, long *src)
+/* This is needed, because copy_page and memcpy are not usable for copying
+ * task structs.
+ */
+static inline void do_copy_page(long *dst, long *src)
 {
 	int n;
 
-	/* copy_page and memcpy are not usable for copying task structs. */
 	for (n = PAGE_SIZE / sizeof(long); n; n--)
 		*dst++ = *src++;
 }
 
+#ifdef CONFIG_HIGHMEM
+static inline struct page *
+page_is_saveable(struct zone *zone, unsigned long pfn)
+{
+	return is_highmem(zone) ?
+			saveable_highmem_page(pfn) : saveable_page(pfn);
+}
+
+static inline void
+copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+{
+	struct page *s_page, *d_page;
+	void *src, *dst;
+
+	s_page = pfn_to_page(src_pfn);
+	d_page = pfn_to_page(dst_pfn);
+	if (PageHighMem(s_page)) {
+		src = kmap_atomic(s_page, KM_USER0);
+		dst = kmap_atomic(d_page, KM_USER1);
+		do_copy_page(dst, src);
+		kunmap_atomic(src, KM_USER0);
+		kunmap_atomic(dst, KM_USER1);
+	} else {
+		src = page_address(s_page);
+		if (PageHighMem(d_page)) {
+			/* Page pointed to by src may contain some kernel
+			 * data modified by kmap_atomic()
+			 */
+			do_copy_page(buffer, src);
+			dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
+			memcpy(dst, buffer, PAGE_SIZE);
+			kunmap_atomic(dst, KM_USER0);
+		} else {
+			dst = page_address(d_page);
+			do_copy_page(dst, src);
+		}
+	}
+}
+#else
+#define page_is_saveable(zone, pfn)	saveable_page(pfn)
+
+static inline void
+copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+{
+	do_copy_page(page_address(pfn_to_page(dst_pfn)),
+			page_address(pfn_to_page(src_pfn)));
+}
+#endif /* CONFIG_HIGHMEM */
+
 static void
 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
 {
 	struct zone *zone;
 	unsigned long pfn;
 
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		unsigned long max_zone_pfn;
 
-		if (is_highmem(zone))
-			continue;
-
 		mark_free_pages(zone);
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-			if (saveable_page(pfn))
+			if (page_is_saveable(zone, pfn))
 				memory_bm_set_bit(orig_bm, pfn);
 	}
 	memory_bm_position_reset(orig_bm);
 	memory_bm_position_reset(copy_bm);
 	do {
 		pfn = memory_bm_next_pfn(orig_bm);
-		if (likely(pfn != BM_END_OF_MAP)) {
-			struct page *page;
-			void *src;
-
-			page = pfn_to_page(pfn);
-			src = page_address(page);
-			page = pfn_to_page(memory_bm_next_pfn(copy_bm));
-			copy_data_page(page_address(page), src);
-		}
+		if (likely(pfn != BM_END_OF_MAP))
+			copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
 	} while (pfn != BM_END_OF_MAP);
 }
 
+/* Total number of image pages */
+static unsigned int nr_copy_pages;
+/* Number of pages needed for saving the original pfns of the image pages */
+static unsigned int nr_meta_pages;
+
 /**
  *	swsusp_free - free pages allocated for the suspend.
  *
@@ -792,7 +824,7 @@
 				if (PageNosave(page) && PageNosaveFree(page)) {
 					ClearPageNosave(page);
 					ClearPageNosaveFree(page);
-					free_page((long) page_address(page));
+					__free_page(page);
 				}
 			}
 	}
@@ -802,34 +834,108 @@
 	buffer = NULL;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+  *	count_pages_for_highmem - compute the number of non-highmem pages
+  *	that will be necessary for creating copies of highmem pages.
+  */
+
+static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
+{
+	unsigned int free_highmem = count_free_highmem_pages();
+
+	if (free_highmem >= nr_highmem)
+		nr_highmem = 0;
+	else
+		nr_highmem -= free_highmem;
+
+	return nr_highmem;
+}
+#else
+static unsigned int
+count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
+#endif /* CONFIG_HIGHMEM */
 
 /**
- *	enough_free_mem - Make sure we enough free memory to snapshot.
- *
- *	Returns TRUE or FALSE after checking the number of available
- *	free pages.
+ *	enough_free_mem - Make sure we have enough free memory for the
+ *	snapshot image.
  */
 
-static int enough_free_mem(unsigned int nr_pages)
+static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
 {
 	struct zone *zone;
 	unsigned int free = 0, meta = 0;
 
-	for_each_zone (zone)
-		if (!is_highmem(zone)) {
+	for_each_zone(zone) {
+		meta += snapshot_additional_pages(zone);
+		if (!is_highmem(zone))
 			free += zone->free_pages;
-			meta += snapshot_additional_pages(zone);
-		}
+	}
 
-	pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n",
+	nr_pages += count_pages_for_highmem(nr_highmem);
+	pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
 		nr_pages, PAGES_FOR_IO, meta, free);
 
 	return free > nr_pages + PAGES_FOR_IO + meta;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+ *	get_highmem_buffer - if there are some highmem pages in the suspend
+ *	image, we may need the buffer to copy them and/or load their data.
+ */
+
+static inline int get_highmem_buffer(int safe_needed)
+{
+	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
+	return buffer ? 0 : -ENOMEM;
+}
+
+/**
+ *	alloc_highmem_image_pages - allocate some highmem pages for the image.
+ *	Try to allocate as many pages as needed, but if the number of free
+ *	highmem pages is lesser than that, allocate them all.
+ */
+
+static inline unsigned int
+alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
+{
+	unsigned int to_alloc = count_free_highmem_pages();
+
+	if (to_alloc > nr_highmem)
+		to_alloc = nr_highmem;
+
+	nr_highmem -= to_alloc;
+	while (to_alloc-- > 0) {
+		struct page *page;
+
+		page = alloc_image_page(__GFP_HIGHMEM);
+		memory_bm_set_bit(bm, page_to_pfn(page));
+	}
+	return nr_highmem;
+}
+#else
+static inline int get_highmem_buffer(int safe_needed) { return 0; }
+
+static inline unsigned int
+alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ *	swsusp_alloc - allocate memory for the suspend image
+ *
+ *	We first try to allocate as many highmem pages as there are
+ *	saveable highmem pages in the system.  If that fails, we allocate
+ *	non-highmem pages for the copies of the remaining highmem ones.
+ *
+ *	In this approach it is likely that the copies of highmem pages will
+ *	also be located in the high memory, because of the way in which
+ *	copy_data_pages() works.
+ */
+
 static int
 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
-		unsigned int nr_pages)
+		unsigned int nr_pages, unsigned int nr_highmem)
 {
 	int error;
 
@@ -841,46 +947,61 @@
 	if (error)
 		goto Free;
 
+	if (nr_highmem > 0) {
+		error = get_highmem_buffer(PG_ANY);
+		if (error)
+			goto Free;
+
+		nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
+	}
 	while (nr_pages-- > 0) {
-		struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+		struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
+
 		if (!page)
 			goto Free;
 
-		SetPageNosave(page);
-		SetPageNosaveFree(page);
 		memory_bm_set_bit(copy_bm, page_to_pfn(page));
 	}
 	return 0;
 
-Free:
+ Free:
 	swsusp_free();
 	return -ENOMEM;
 }
 
-/* Memory bitmap used for marking saveable pages */
+/* Memory bitmap used for marking saveable pages (during suspend) or the
+ * suspend image pages (during resume)
+ */
 static struct memory_bitmap orig_bm;
-/* Memory bitmap used for marking allocated pages that will contain the copies
- * of saveable pages
+/* Memory bitmap used on suspend for marking allocated pages that will contain
+ * the copies of saveable pages.  During resume it is initially used for
+ * marking the suspend image pages, but then its set bits are duplicated in
+ * @orig_bm and it is released.  Next, on systems with high memory, it may be
+ * used for marking "safe" highmem pages, but it has to be reinitialized for
+ * this purpose.
  */
 static struct memory_bitmap copy_bm;
 
 asmlinkage int swsusp_save(void)
 {
-	unsigned int nr_pages;
+	unsigned int nr_pages, nr_highmem;
 
-	pr_debug("swsusp: critical section: \n");
+	printk("swsusp: critical section: \n");
 
 	drain_local_pages();
 	nr_pages = count_data_pages();
-	printk("swsusp: Need to copy %u pages\n", nr_pages);
+	nr_highmem = count_highmem_pages();
+	printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
 
-	if (!enough_free_mem(nr_pages)) {
+	if (!enough_free_mem(nr_pages, nr_highmem)) {
 		printk(KERN_ERR "swsusp: Not enough free memory\n");
 		return -ENOMEM;
 	}
 
-	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages))
+	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
+		printk(KERN_ERR "swsusp: Memory allocation failed\n");
 		return -ENOMEM;
+	}
 
 	/* During allocating of suspend pagedir, new cold pages may appear.
 	 * Kill them.
@@ -894,10 +1015,12 @@
 	 * touch swap space! Except we must write out our image of course.
 	 */
 
+	nr_pages += nr_highmem;
 	nr_copy_pages = nr_pages;
-	nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
 
 	printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
+
 	return 0;
 }
 
@@ -960,7 +1083,7 @@
 
 	if (!buffer) {
 		/* This makes the buffer be freed by swsusp_free() */
-		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
+		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
 		if (!buffer)
 			return -ENOMEM;
 	}
@@ -975,9 +1098,23 @@
 			memset(buffer, 0, PAGE_SIZE);
 			pack_pfns(buffer, &orig_bm);
 		} else {
-			unsigned long pfn = memory_bm_next_pfn(&copy_bm);
+			struct page *page;
 
-			handle->buffer = page_address(pfn_to_page(pfn));
+			page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
+			if (PageHighMem(page)) {
+				/* Highmem pages are copied to the buffer,
+				 * because we can't return with a kmapped
+				 * highmem page (we may not be called again).
+				 */
+				void *kaddr;
+
+				kaddr = kmap_atomic(page, KM_USER0);
+				memcpy(buffer, kaddr, PAGE_SIZE);
+				kunmap_atomic(kaddr, KM_USER0);
+				handle->buffer = buffer;
+			} else {
+				handle->buffer = page_address(page);
+			}
 		}
 		handle->prev = handle->cur;
 	}
@@ -1005,7 +1142,7 @@
 	unsigned long pfn, max_zone_pfn;
 
 	/* Clear page flags */
-	for_each_zone (zone) {
+	for_each_zone(zone) {
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 			if (pfn_valid(pfn))
@@ -1101,6 +1238,218 @@
 	}
 }
 
+/* List of "safe" pages that may be used to store data loaded from the suspend
+ * image
+ */
+static struct linked_page *safe_pages_list;
+
+#ifdef CONFIG_HIGHMEM
+/* struct highmem_pbe is used for creating the list of highmem pages that
+ * should be restored atomically during the resume from disk, because the page
+ * frames they have occupied before the suspend are in use.
+ */
+struct highmem_pbe {
+	struct page *copy_page;	/* data is here now */
+	struct page *orig_page;	/* data was here before the suspend */
+	struct highmem_pbe *next;
+};
+
+/* List of highmem PBEs needed for restoring the highmem pages that were
+ * allocated before the suspend and included in the suspend image, but have
+ * also been allocated by the "resume" kernel, so their contents cannot be
+ * written directly to their "original" page frames.
+ */
+static struct highmem_pbe *highmem_pblist;
+
+/**
+ *	count_highmem_image_pages - compute the number of highmem pages in the
+ *	suspend image.  The bits in the memory bitmap @bm that correspond to the
+ *	image pages are assumed to be set.
+ */
+
+static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
+{
+	unsigned long pfn;
+	unsigned int cnt = 0;
+
+	memory_bm_position_reset(bm);
+	pfn = memory_bm_next_pfn(bm);
+	while (pfn != BM_END_OF_MAP) {
+		if (PageHighMem(pfn_to_page(pfn)))
+			cnt++;
+
+		pfn = memory_bm_next_pfn(bm);
+	}
+	return cnt;
+}
+
+/**
+ *	prepare_highmem_image - try to allocate as many highmem pages as
+ *	there are highmem image pages (@nr_highmem_p points to the variable
+ *	containing the number of highmem image pages).  The pages that are
+ *	"safe" (ie. will not be overwritten when the suspend image is
+ *	restored) have the corresponding bits set in @bm (it must be
+ *	unitialized).
+ *
+ *	NOTE: This function should not be called if there are no highmem
+ *	image pages.
+ */
+
+static unsigned int safe_highmem_pages;
+
+static struct memory_bitmap *safe_highmem_bm;
+
+static int
+prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
+{
+	unsigned int to_alloc;
+
+	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
+		return -ENOMEM;
+
+	if (get_highmem_buffer(PG_SAFE))
+		return -ENOMEM;
+
+	to_alloc = count_free_highmem_pages();
+	if (to_alloc > *nr_highmem_p)
+		to_alloc = *nr_highmem_p;
+	else
+		*nr_highmem_p = to_alloc;
+
+	safe_highmem_pages = 0;
+	while (to_alloc-- > 0) {
+		struct page *page;
+
+		page = alloc_page(__GFP_HIGHMEM);
+		if (!PageNosaveFree(page)) {
+			/* The page is "safe", set its bit the bitmap */
+			memory_bm_set_bit(bm, page_to_pfn(page));
+			safe_highmem_pages++;
+		}
+		/* Mark the page as allocated */
+		SetPageNosave(page);
+		SetPageNosaveFree(page);
+	}
+	memory_bm_position_reset(bm);
+	safe_highmem_bm = bm;
+	return 0;
+}
+
+/**
+ *	get_highmem_page_buffer - for given highmem image page find the buffer
+ *	that suspend_write_next() should set for its caller to write to.
+ *
+ *	If the page is to be saved to its "original" page frame or a copy of
+ *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
+ *	the copy of the page is to be made in normal memory, so the address of
+ *	the copy is returned.
+ *
+ *	If @buffer is returned, the caller of suspend_write_next() will write
+ *	the page's contents to @buffer, so they will have to be copied to the
+ *	right location on the next call to suspend_write_next() and it is done
+ *	with the help of copy_last_highmem_page().  For this purpose, if
+ *	@buffer is returned, @last_highmem page is set to the page to which
+ *	the data will have to be copied from @buffer.
+ */
+
+static struct page *last_highmem_page;
+
+static void *
+get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
+{
+	struct highmem_pbe *pbe;
+	void *kaddr;
+
+	if (PageNosave(page) && PageNosaveFree(page)) {
+		/* We have allocated the "original" page frame and we can
+		 * use it directly to store the loaded page.
+		 */
+		last_highmem_page = page;
+		return buffer;
+	}
+	/* The "original" page frame has not been allocated and we have to
+	 * use a "safe" page frame to store the loaded page.
+	 */
+	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
+	if (!pbe) {
+		swsusp_free();
+		return NULL;
+	}
+	pbe->orig_page = page;
+	if (safe_highmem_pages > 0) {
+		struct page *tmp;
+
+		/* Copy of the page will be stored in high memory */
+		kaddr = buffer;
+		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
+		safe_highmem_pages--;
+		last_highmem_page = tmp;
+		pbe->copy_page = tmp;
+	} else {
+		/* Copy of the page will be stored in normal memory */
+		kaddr = safe_pages_list;
+		safe_pages_list = safe_pages_list->next;
+		pbe->copy_page = virt_to_page(kaddr);
+	}
+	pbe->next = highmem_pblist;
+	highmem_pblist = pbe;
+	return kaddr;
+}
+
+/**
+ *	copy_last_highmem_page - copy the contents of a highmem image from
+ *	@buffer, where the caller of snapshot_write_next() has place them,
+ *	to the right location represented by @last_highmem_page .
+ */
+
+static void copy_last_highmem_page(void)
+{
+	if (last_highmem_page) {
+		void *dst;
+
+		dst = kmap_atomic(last_highmem_page, KM_USER0);
+		memcpy(dst, buffer, PAGE_SIZE);
+		kunmap_atomic(dst, KM_USER0);
+		last_highmem_page = NULL;
+	}
+}
+
+static inline int last_highmem_page_copied(void)
+{
+	return !last_highmem_page;
+}
+
+static inline void free_highmem_data(void)
+{
+	if (safe_highmem_bm)
+		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
+
+	if (buffer)
+		free_image_page(buffer, PG_UNSAFE_CLEAR);
+}
+#else
+static inline int get_safe_write_buffer(void) { return 0; }
+
+static unsigned int
+count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
+
+static inline int
+prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
+{
+	return 0;
+}
+
+static inline void *
+get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
+{
+	return NULL;
+}
+
+static inline void copy_last_highmem_page(void) {}
+static inline int last_highmem_page_copied(void) { return 1; }
+static inline void free_highmem_data(void) {}
+#endif /* CONFIG_HIGHMEM */
+
 /**
  *	prepare_image - use the memory bitmap @bm to mark the pages that will
  *	be overwritten in the process of restoring the system memory state
@@ -1110,20 +1459,25 @@
  *	The idea is to allocate a new memory bitmap first and then allocate
  *	as many pages as needed for the image data, but not to assign these
  *	pages to specific tasks initially.  Instead, we just mark them as
- *	allocated and create a list of "safe" pages that will be used later.
+ *	allocated and create a lists of "safe" pages that will be used
+ *	later.  On systems with high memory a list of "safe" highmem pages is
+ *	also created.
  */
 
 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
 
-static struct linked_page *safe_pages_list;
-
 static int
 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
 {
-	unsigned int nr_pages;
+	unsigned int nr_pages, nr_highmem;
 	struct linked_page *sp_list, *lp;
 	int error;
 
+	/* If there is no highmem, the buffer will not be necessary */
+	free_image_page(buffer, PG_UNSAFE_CLEAR);
+	buffer = NULL;
+
+	nr_highmem = count_highmem_image_pages(bm);
 	error = mark_unsafe_pages(bm);
 	if (error)
 		goto Free;
@@ -1134,6 +1488,11 @@
 
 	duplicate_memory_bitmap(new_bm, bm);
 	memory_bm_free(bm, PG_UNSAFE_KEEP);
+	if (nr_highmem > 0) {
+		error = prepare_highmem_image(bm, &nr_highmem);
+		if (error)
+			goto Free;
+	}
 	/* Reserve some safe pages for potential later use.
 	 *
 	 * NOTE: This way we make sure there will be enough safe pages for the
@@ -1142,10 +1501,10 @@
 	 */
 	sp_list = NULL;
 	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
-	nr_pages = nr_copy_pages - allocated_unsafe_pages;
+	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
 	while (nr_pages > 0) {
-		lp = alloc_image_page(GFP_ATOMIC, PG_SAFE);
+		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
 		if (!lp) {
 			error = -ENOMEM;
 			goto Free;
@@ -1156,7 +1515,7 @@
 	}
 	/* Preallocate memory for the image */
 	safe_pages_list = NULL;
-	nr_pages = nr_copy_pages - allocated_unsafe_pages;
+	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
 	while (nr_pages > 0) {
 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
 		if (!lp) {
@@ -1181,7 +1540,7 @@
 	}
 	return 0;
 
-Free:
+ Free:
 	swsusp_free();
 	return error;
 }
@@ -1196,6 +1555,9 @@
 	struct pbe *pbe;
 	struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
 
+	if (PageHighMem(page))
+		return get_highmem_page_buffer(page, ca);
+
 	if (PageNosave(page) && PageNosaveFree(page))
 		/* We have allocated the "original" page frame and we can
 		 * use it directly to store the loaded page.
@@ -1210,12 +1572,12 @@
 		swsusp_free();
 		return NULL;
 	}
-	pbe->orig_address = (unsigned long)page_address(page);
-	pbe->address = (unsigned long)safe_pages_list;
+	pbe->orig_address = page_address(page);
+	pbe->address = safe_pages_list;
 	safe_pages_list = safe_pages_list->next;
 	pbe->next = restore_pblist;
 	restore_pblist = pbe;
-	return (void *)pbe->address;
+	return pbe->address;
 }
 
 /**
@@ -1249,14 +1611,16 @@
 	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
 		return 0;
 
-	if (!buffer) {
-		/* This makes the buffer be freed by swsusp_free() */
-		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
+	if (handle->offset == 0) {
+		if (!buffer)
+			/* This makes the buffer be freed by swsusp_free() */
+			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
+
 		if (!buffer)
 			return -ENOMEM;
-	}
-	if (!handle->offset)
+
 		handle->buffer = buffer;
+	}
 	handle->sync_read = 1;
 	if (handle->prev < handle->cur) {
 		if (handle->prev == 0) {
@@ -1284,8 +1648,10 @@
 					return -ENOMEM;
 			}
 		} else {
+			copy_last_highmem_page();
 			handle->buffer = get_buffer(&orig_bm, &ca);
-			handle->sync_read = 0;
+			if (handle->buffer != buffer)
+				handle->sync_read = 0;
 		}
 		handle->prev = handle->cur;
 	}
@@ -1301,15 +1667,73 @@
 	return count;
 }
 
+/**
+ *	snapshot_write_finalize - must be called after the last call to
+ *	snapshot_write_next() in case the last page in the image happens
+ *	to be a highmem page and its contents should be stored in the
+ *	highmem.  Additionally, it releases the memory that will not be
+ *	used any more.
+ */
+
+void snapshot_write_finalize(struct snapshot_handle *handle)
+{
+	copy_last_highmem_page();
+	/* Free only if we have loaded the image entirely */
+	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
+		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
+		free_highmem_data();
+	}
+}
+
 int snapshot_image_loaded(struct snapshot_handle *handle)
 {
-	return !(!nr_copy_pages ||
+	return !(!nr_copy_pages || !last_highmem_page_copied() ||
 			handle->cur <= nr_meta_pages + nr_copy_pages);
 }
 
-void snapshot_free_unused_memory(struct snapshot_handle *handle)
+#ifdef CONFIG_HIGHMEM
+/* Assumes that @buf is ready and points to a "safe" page */
+static inline void
+swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
 {
-	/* Free only if we have loaded the image entirely */
-	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
-		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
+	void *kaddr1, *kaddr2;
+
+	kaddr1 = kmap_atomic(p1, KM_USER0);
+	kaddr2 = kmap_atomic(p2, KM_USER1);
+	memcpy(buf, kaddr1, PAGE_SIZE);
+	memcpy(kaddr1, kaddr2, PAGE_SIZE);
+	memcpy(kaddr2, buf, PAGE_SIZE);
+	kunmap_atomic(kaddr1, KM_USER0);
+	kunmap_atomic(kaddr2, KM_USER1);
 }
+
+/**
+ *	restore_highmem - for each highmem page that was allocated before
+ *	the suspend and included in the suspend image, and also has been
+ *	allocated by the "resume" kernel swap its current (ie. "before
+ *	resume") contents with the previous (ie. "before suspend") one.
+ *
+ *	If the resume eventually fails, we can call this function once
+ *	again and restore the "before resume" highmem state.
+ */
+
+int restore_highmem(void)
+{
+	struct highmem_pbe *pbe = highmem_pblist;
+	void *buf;
+
+	if (!pbe)
+		return 0;
+
+	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
+	if (!buf)
+		return -ENOMEM;
+
+	while (pbe) {
+		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
+		pbe = pbe->next;
+	}
+	free_image_page(buf, PG_UNSAFE_CLEAR);
+	return 0;
+}
+#endif /* CONFIG_HIGHMEM */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 1a3b0dd..f133d4a 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -34,177 +34,77 @@
 #define SWSUSP_SIG	"S1SUSPEND"
 
 static struct swsusp_header {
-	char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
-	swp_entry_t image;
+	char reserved[PAGE_SIZE - 20 - sizeof(sector_t)];
+	sector_t image;
 	char	orig_sig[10];
 	char	sig[10];
 } __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
 
 /*
- * Saving part...
+ * General things
  */
 
 static unsigned short root_swap = 0xffff;
+static struct block_device *resume_bdev;
 
-static int mark_swapfiles(swp_entry_t start)
+/**
+ *	submit - submit BIO request.
+ *	@rw:	READ or WRITE.
+ *	@off	physical offset of page.
+ *	@page:	page we're reading or writing.
+ *	@bio_chain: list of pending biod (for async reading)
+ *
+ *	Straight from the textbook - allocate and initialize the bio.
+ *	If we're reading, make sure the page is marked as dirty.
+ *	Then submit it and, if @bio_chain == NULL, wait.
+ */
+static int submit(int rw, pgoff_t page_off, struct page *page,
+			struct bio **bio_chain)
 {
-	int error;
+	struct bio *bio;
 
-	rw_swap_page_sync(READ, swp_entry(root_swap, 0),
-			  virt_to_page((unsigned long)&swsusp_header), NULL);
-	if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
-	    !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
-		memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
-		memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
-		swsusp_header.image = start;
-		error = rw_swap_page_sync(WRITE, swp_entry(root_swap, 0),
-				virt_to_page((unsigned long)&swsusp_header),
-				NULL);
+	bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+	if (!bio)
+		return -ENOMEM;
+	bio->bi_sector = page_off * (PAGE_SIZE >> 9);
+	bio->bi_bdev = resume_bdev;
+	bio->bi_end_io = end_swap_bio_read;
+
+	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+		printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
+		bio_put(bio);
+		return -EFAULT;
+	}
+
+	lock_page(page);
+	bio_get(bio);
+
+	if (bio_chain == NULL) {
+		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
+		wait_on_page_locked(page);
+		if (rw == READ)
+			bio_set_pages_dirty(bio);
+		bio_put(bio);
 	} else {
-		pr_debug("swsusp: Partition is not swap space.\n");
-		error = -ENODEV;
+		if (rw == READ)
+			get_page(page);	/* These pages are freed later */
+		bio->bi_private = *bio_chain;
+		*bio_chain = bio;
+		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
 	}
-	return error;
-}
-
-/**
- *	swsusp_swap_check - check if the resume device is a swap device
- *	and get its index (if so)
- */
-
-static int swsusp_swap_check(void) /* This is called before saving image */
-{
-	int res = swap_type_of(swsusp_resume_device);
-
-	if (res >= 0) {
-		root_swap = res;
-		return 0;
-	}
-	return res;
-}
-
-/**
- *	write_page - Write one page to given swap location.
- *	@buf:		Address we're writing.
- *	@offset:	Offset of the swap page we're writing to.
- *	@bio_chain:	Link the next write BIO here
- */
-
-static int write_page(void *buf, unsigned long offset, struct bio **bio_chain)
-{
-	swp_entry_t entry;
-	int error = -ENOSPC;
-
-	if (offset) {
-		struct page *page = virt_to_page(buf);
-
-		if (bio_chain) {
-			/*
-			 * Whether or not we successfully allocated a copy page,
-			 * we take a ref on the page here.  It gets undone in
-			 * wait_on_bio_chain().
-			 */
-			struct page *page_copy;
-			page_copy = alloc_page(GFP_ATOMIC);
-			if (page_copy == NULL) {
-				WARN_ON_ONCE(1);
-				bio_chain = NULL;	/* Go synchronous */
-				get_page(page);
-			} else {
-				memcpy(page_address(page_copy),
-					page_address(page), PAGE_SIZE);
-				page = page_copy;
-			}
-		}
-		entry = swp_entry(root_swap, offset);
-		error = rw_swap_page_sync(WRITE, entry, page, bio_chain);
-	}
-	return error;
-}
-
-/*
- *	The swap map is a data structure used for keeping track of each page
- *	written to a swap partition.  It consists of many swap_map_page
- *	structures that contain each an array of MAP_PAGE_SIZE swap entries.
- *	These structures are stored on the swap and linked together with the
- *	help of the .next_swap member.
- *
- *	The swap map is created during suspend.  The swap map pages are
- *	allocated and populated one at a time, so we only need one memory
- *	page to set up the entire structure.
- *
- *	During resume we also only need to use one swap_map_page structure
- *	at a time.
- */
-
-#define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(long) - 1)
-
-struct swap_map_page {
-	unsigned long		entries[MAP_PAGE_ENTRIES];
-	unsigned long		next_swap;
-};
-
-/**
- *	The swap_map_handle structure is used for handling swap in
- *	a file-alike way
- */
-
-struct swap_map_handle {
-	struct swap_map_page *cur;
-	unsigned long cur_swap;
-	struct bitmap_page *bitmap;
-	unsigned int k;
-};
-
-static void release_swap_writer(struct swap_map_handle *handle)
-{
-	if (handle->cur)
-		free_page((unsigned long)handle->cur);
-	handle->cur = NULL;
-	if (handle->bitmap)
-		free_bitmap(handle->bitmap);
-	handle->bitmap = NULL;
-}
-
-static void show_speed(struct timeval *start, struct timeval *stop,
-			unsigned nr_pages, char *msg)
-{
-	s64 elapsed_centisecs64;
-	int centisecs;
-	int k;
-	int kps;
-
-	elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
-	do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
-	centisecs = elapsed_centisecs64;
-	if (centisecs == 0)
-		centisecs = 1;	/* avoid div-by-zero */
-	k = nr_pages * (PAGE_SIZE / 1024);
-	kps = (k * 100) / centisecs;
-	printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
-			centisecs / 100, centisecs % 100,
-			kps / 1000, (kps % 1000) / 10);
-}
-
-static int get_swap_writer(struct swap_map_handle *handle)
-{
-	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
-	if (!handle->cur)
-		return -ENOMEM;
-	handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0));
-	if (!handle->bitmap) {
-		release_swap_writer(handle);
-		return -ENOMEM;
-	}
-	handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap);
-	if (!handle->cur_swap) {
-		release_swap_writer(handle);
-		return -ENOSPC;
-	}
-	handle->k = 0;
 	return 0;
 }
 
+static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+	return submit(READ, page_off, virt_to_page(addr), bio_chain);
+}
+
+static int bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+	return submit(WRITE, page_off, virt_to_page(addr), bio_chain);
+}
+
 static int wait_on_bio_chain(struct bio **bio_chain)
 {
 	struct bio *bio;
@@ -233,15 +133,155 @@
 	return ret;
 }
 
+/*
+ * Saving part
+ */
+
+static int mark_swapfiles(sector_t start)
+{
+	int error;
+
+	bio_read_page(swsusp_resume_block, &swsusp_header, NULL);
+	if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
+	    !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
+		memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
+		memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
+		swsusp_header.image = start;
+		error = bio_write_page(swsusp_resume_block,
+					&swsusp_header, NULL);
+	} else {
+		printk(KERN_ERR "swsusp: Swap header not found!\n");
+		error = -ENODEV;
+	}
+	return error;
+}
+
+/**
+ *	swsusp_swap_check - check if the resume device is a swap device
+ *	and get its index (if so)
+ */
+
+static int swsusp_swap_check(void) /* This is called before saving image */
+{
+	int res;
+
+	res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
+	if (res < 0)
+		return res;
+
+	root_swap = res;
+	resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_WRITE);
+	if (IS_ERR(resume_bdev))
+		return PTR_ERR(resume_bdev);
+
+	res = set_blocksize(resume_bdev, PAGE_SIZE);
+	if (res < 0)
+		blkdev_put(resume_bdev);
+
+	return res;
+}
+
+/**
+ *	write_page - Write one page to given swap location.
+ *	@buf:		Address we're writing.
+ *	@offset:	Offset of the swap page we're writing to.
+ *	@bio_chain:	Link the next write BIO here
+ */
+
+static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+{
+	void *src;
+
+	if (!offset)
+		return -ENOSPC;
+
+	if (bio_chain) {
+		src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+		if (src) {
+			memcpy(src, buf, PAGE_SIZE);
+		} else {
+			WARN_ON_ONCE(1);
+			bio_chain = NULL;	/* Go synchronous */
+			src = buf;
+		}
+	} else {
+		src = buf;
+	}
+	return bio_write_page(offset, src, bio_chain);
+}
+
+/*
+ *	The swap map is a data structure used for keeping track of each page
+ *	written to a swap partition.  It consists of many swap_map_page
+ *	structures that contain each an array of MAP_PAGE_SIZE swap entries.
+ *	These structures are stored on the swap and linked together with the
+ *	help of the .next_swap member.
+ *
+ *	The swap map is created during suspend.  The swap map pages are
+ *	allocated and populated one at a time, so we only need one memory
+ *	page to set up the entire structure.
+ *
+ *	During resume we also only need to use one swap_map_page structure
+ *	at a time.
+ */
+
+#define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
+
+struct swap_map_page {
+	sector_t entries[MAP_PAGE_ENTRIES];
+	sector_t next_swap;
+};
+
+/**
+ *	The swap_map_handle structure is used for handling swap in
+ *	a file-alike way
+ */
+
+struct swap_map_handle {
+	struct swap_map_page *cur;
+	sector_t cur_swap;
+	struct bitmap_page *bitmap;
+	unsigned int k;
+};
+
+static void release_swap_writer(struct swap_map_handle *handle)
+{
+	if (handle->cur)
+		free_page((unsigned long)handle->cur);
+	handle->cur = NULL;
+	if (handle->bitmap)
+		free_bitmap(handle->bitmap);
+	handle->bitmap = NULL;
+}
+
+static int get_swap_writer(struct swap_map_handle *handle)
+{
+	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
+	if (!handle->cur)
+		return -ENOMEM;
+	handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0));
+	if (!handle->bitmap) {
+		release_swap_writer(handle);
+		return -ENOMEM;
+	}
+	handle->cur_swap = alloc_swapdev_block(root_swap, handle->bitmap);
+	if (!handle->cur_swap) {
+		release_swap_writer(handle);
+		return -ENOSPC;
+	}
+	handle->k = 0;
+	return 0;
+}
+
 static int swap_write_page(struct swap_map_handle *handle, void *buf,
 				struct bio **bio_chain)
 {
 	int error = 0;
-	unsigned long offset;
+	sector_t offset;
 
 	if (!handle->cur)
 		return -EINVAL;
-	offset = alloc_swap_page(root_swap, handle->bitmap);
+	offset = alloc_swapdev_block(root_swap, handle->bitmap);
 	error = write_page(buf, offset, bio_chain);
 	if (error)
 		return error;
@@ -250,7 +290,7 @@
 		error = wait_on_bio_chain(bio_chain);
 		if (error)
 			goto out;
-		offset = alloc_swap_page(root_swap, handle->bitmap);
+		offset = alloc_swapdev_block(root_swap, handle->bitmap);
 		if (!offset)
 			return -ENOSPC;
 		handle->cur->next_swap = offset;
@@ -261,7 +301,7 @@
 		handle->cur_swap = offset;
 		handle->k = 0;
 	}
-out:
+ out:
 	return error;
 }
 
@@ -315,7 +355,7 @@
 		error = err2;
 	if (!error)
 		printk("\b\b\b\bdone\n");
-	show_speed(&start, &stop, nr_to_write, "Wrote");
+	swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
 	return error;
 }
 
@@ -350,100 +390,50 @@
 	struct swsusp_info *header;
 	int error;
 
-	if ((error = swsusp_swap_check())) {
+	error = swsusp_swap_check();
+	if (error) {
 		printk(KERN_ERR "swsusp: Cannot find swap device, try "
 				"swapon -a.\n");
 		return error;
 	}
 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
 	error = snapshot_read_next(&snapshot, PAGE_SIZE);
-	if (error < PAGE_SIZE)
-		return error < 0 ? error : -EFAULT;
+	if (error < PAGE_SIZE) {
+		if (error >= 0)
+			error = -EFAULT;
+
+		goto out;
+	}
 	header = (struct swsusp_info *)data_of(snapshot);
 	if (!enough_swap(header->pages)) {
 		printk(KERN_ERR "swsusp: Not enough free swap\n");
-		return -ENOSPC;
+		error = -ENOSPC;
+		goto out;
 	}
 	error = get_swap_writer(&handle);
 	if (!error) {
-		unsigned long start = handle.cur_swap;
+		sector_t start = handle.cur_swap;
+
 		error = swap_write_page(&handle, header, NULL);
 		if (!error)
 			error = save_image(&handle, &snapshot,
 					header->pages - 1);
+
 		if (!error) {
 			flush_swap_writer(&handle);
 			printk("S");
-			error = mark_swapfiles(swp_entry(root_swap, start));
+			error = mark_swapfiles(start);
 			printk("|\n");
 		}
 	}
 	if (error)
 		free_all_swap_pages(root_swap, handle.bitmap);
 	release_swap_writer(&handle);
+ out:
+	swsusp_close();
 	return error;
 }
 
-static struct block_device *resume_bdev;
-
-/**
- *	submit - submit BIO request.
- *	@rw:	READ or WRITE.
- *	@off	physical offset of page.
- *	@page:	page we're reading or writing.
- *	@bio_chain: list of pending biod (for async reading)
- *
- *	Straight from the textbook - allocate and initialize the bio.
- *	If we're reading, make sure the page is marked as dirty.
- *	Then submit it and, if @bio_chain == NULL, wait.
- */
-static int submit(int rw, pgoff_t page_off, struct page *page,
-			struct bio **bio_chain)
-{
-	struct bio *bio;
-
-	bio = bio_alloc(GFP_ATOMIC, 1);
-	if (!bio)
-		return -ENOMEM;
-	bio->bi_sector = page_off * (PAGE_SIZE >> 9);
-	bio->bi_bdev = resume_bdev;
-	bio->bi_end_io = end_swap_bio_read;
-
-	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
-		printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
-		bio_put(bio);
-		return -EFAULT;
-	}
-
-	lock_page(page);
-	bio_get(bio);
-
-	if (bio_chain == NULL) {
-		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
-		wait_on_page_locked(page);
-		if (rw == READ)
-			bio_set_pages_dirty(bio);
-		bio_put(bio);
-	} else {
-		if (rw == READ)
-			get_page(page);	/* These pages are freed later */
-		bio->bi_private = *bio_chain;
-		*bio_chain = bio;
-		submit_bio(rw | (1 << BIO_RW_SYNC), bio);
-	}
-	return 0;
-}
-
-static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
-{
-	return submit(READ, page_off, virt_to_page(addr), bio_chain);
-}
-
-static int bio_write_page(pgoff_t page_off, void *addr)
-{
-	return submit(WRITE, page_off, virt_to_page(addr), NULL);
-}
-
 /**
  *	The following functions allow us to read data using a swap map
  *	in a file-alike way
@@ -456,17 +446,18 @@
 	handle->cur = NULL;
 }
 
-static int get_swap_reader(struct swap_map_handle *handle,
-                                      swp_entry_t start)
+static int get_swap_reader(struct swap_map_handle *handle, sector_t start)
 {
 	int error;
 
-	if (!swp_offset(start))
+	if (!start)
 		return -EINVAL;
-	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
+
+	handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
 	if (!handle->cur)
 		return -ENOMEM;
-	error = bio_read_page(swp_offset(start), handle->cur, NULL);
+
+	error = bio_read_page(start, handle->cur, NULL);
 	if (error) {
 		release_swap_reader(handle);
 		return error;
@@ -478,7 +469,7 @@
 static int swap_read_page(struct swap_map_handle *handle, void *buf,
 				struct bio **bio_chain)
 {
-	unsigned long offset;
+	sector_t offset;
 	int error;
 
 	if (!handle->cur)
@@ -547,11 +538,11 @@
 		error = err2;
 	if (!error) {
 		printk("\b\b\b\bdone\n");
-		snapshot_free_unused_memory(snapshot);
+		snapshot_write_finalize(snapshot);
 		if (!snapshot_image_loaded(snapshot))
 			error = -ENODATA;
 	}
-	show_speed(&start, &stop, nr_to_read, "Read");
+	swsusp_show_speed(&start, &stop, nr_to_read, "Read");
 	return error;
 }
 
@@ -600,12 +591,16 @@
 	if (!IS_ERR(resume_bdev)) {
 		set_blocksize(resume_bdev, PAGE_SIZE);
 		memset(&swsusp_header, 0, sizeof(swsusp_header));
-		if ((error = bio_read_page(0, &swsusp_header, NULL)))
+		error = bio_read_page(swsusp_resume_block,
+					&swsusp_header, NULL);
+		if (error)
 			return error;
+
 		if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
 			memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
 			/* Reset swap signature now */
-			error = bio_write_page(0, &swsusp_header);
+			error = bio_write_page(swsusp_resume_block,
+						&swsusp_header, NULL);
 		} else {
 			return -EINVAL;
 		}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 0b66659..31aa039 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -49,6 +49,7 @@
 #include <linux/bootmem.h>
 #include <linux/syscalls.h>
 #include <linux/highmem.h>
+#include <linux/time.h>
 
 #include "power.h"
 
@@ -64,10 +65,8 @@
 
 #ifdef CONFIG_HIGHMEM
 unsigned int count_highmem_pages(void);
-int save_highmem(void);
 int restore_highmem(void);
 #else
-static inline int save_highmem(void) { return 0; }
 static inline int restore_highmem(void) { return 0; }
 static inline unsigned int count_highmem_pages(void) { return 0; }
 #endif
@@ -134,18 +133,18 @@
 	return 0;
 }
 
-unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap)
+sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap)
 {
 	unsigned long offset;
 
 	offset = swp_offset(get_swap_page_of_type(swap));
 	if (offset) {
-		if (bitmap_set(bitmap, offset)) {
+		if (bitmap_set(bitmap, offset))
 			swap_free(swp_entry(swap, offset));
-			offset = 0;
-		}
+		else
+			return swapdev_block(swap, offset);
 	}
-	return offset;
+	return 0;
 }
 
 void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
@@ -166,6 +165,34 @@
 }
 
 /**
+ *	swsusp_show_speed - print the time elapsed between two events represented by
+ *	@start and @stop
+ *
+ *	@nr_pages -	number of pages processed between @start and @stop
+ *	@msg -		introductory message to print
+ */
+
+void swsusp_show_speed(struct timeval *start, struct timeval *stop,
+			unsigned nr_pages, char *msg)
+{
+	s64 elapsed_centisecs64;
+	int centisecs;
+	int k;
+	int kps;
+
+	elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
+	do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
+	centisecs = elapsed_centisecs64;
+	if (centisecs == 0)
+		centisecs = 1;	/* avoid div-by-zero */
+	k = nr_pages * (PAGE_SIZE / 1024);
+	kps = (k * 100) / centisecs;
+	printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
+			centisecs / 100, centisecs % 100,
+			kps / 1000, (kps % 1000) / 10);
+}
+
+/**
  *	swsusp_shrink_memory -  Try to free as much memory as needed
  *
  *	... but do not OOM-kill anyone
@@ -184,23 +211,37 @@
 
 int swsusp_shrink_memory(void)
 {
-	long size, tmp;
+	long tmp;
 	struct zone *zone;
 	unsigned long pages = 0;
 	unsigned int i = 0;
 	char *p = "-\\|/";
+	struct timeval start, stop;
 
 	printk("Shrinking memory...  ");
+	do_gettimeofday(&start);
 	do {
-		size = 2 * count_highmem_pages();
-		size += size / 50 + count_data_pages() + PAGES_FOR_IO;
+		long size, highmem_size;
+
+		highmem_size = count_highmem_pages();
+		size = count_data_pages() + PAGES_FOR_IO;
 		tmp = size;
+		size += highmem_size;
 		for_each_zone (zone)
-			if (!is_highmem(zone) && populated_zone(zone)) {
-				tmp -= zone->free_pages;
-				tmp += zone->lowmem_reserve[ZONE_NORMAL];
-				tmp += snapshot_additional_pages(zone);
+			if (populated_zone(zone)) {
+				if (is_highmem(zone)) {
+					highmem_size -= zone->free_pages;
+				} else {
+					tmp -= zone->free_pages;
+					tmp += zone->lowmem_reserve[ZONE_NORMAL];
+					tmp += snapshot_additional_pages(zone);
+				}
 			}
+
+		if (highmem_size < 0)
+			highmem_size = 0;
+
+		tmp += highmem_size;
 		if (tmp > 0) {
 			tmp = __shrink_memory(tmp);
 			if (!tmp)
@@ -212,7 +253,9 @@
 		}
 		printk("\b%c", p[i++%4]);
 	} while (tmp > 0);
+	do_gettimeofday(&stop);
 	printk("\bdone (%lu pages freed)\n", pages);
+	swsusp_show_speed(&start, &stop, pages, "Freed");
 
 	return 0;
 }
@@ -223,6 +266,7 @@
 
 	if ((error = arch_prepare_suspend()))
 		return error;
+
 	local_irq_disable();
 	/* At this point, device_suspend() has been called, but *not*
 	 * device_power_down(). We *must* device_power_down() now.
@@ -235,23 +279,16 @@
 		goto Enable_irqs;
 	}
 
-	if ((error = save_highmem())) {
-		printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
-		goto Restore_highmem;
-	}
-
 	save_processor_state();
 	if ((error = swsusp_arch_suspend()))
 		printk(KERN_ERR "Error %d suspending\n", error);
 	/* Restore control flow magically appears here */
 	restore_processor_state();
-Restore_highmem:
-	restore_highmem();
 	/* NOTE:  device_power_up() is just a resume() for devices
 	 * that suspended with irqs off ... no overall powerup.
 	 */
 	device_power_up();
-Enable_irqs:
+ Enable_irqs:
 	local_irq_enable();
 	return error;
 }
@@ -268,18 +305,23 @@
 		printk(KERN_ERR "Some devices failed to power down, very bad\n");
 	/* We'll ignore saved state, but this gets preempt count (etc) right */
 	save_processor_state();
-	error = swsusp_arch_resume();
-	/* Code below is only ever reached in case of failure. Otherwise
-	 * execution continues at place where swsusp_arch_suspend was called
-         */
-	BUG_ON(!error);
+	error = restore_highmem();
+	if (!error) {
+		error = swsusp_arch_resume();
+		/* The code below is only ever reached in case of a failure.
+		 * Otherwise execution continues at place where
+		 * swsusp_arch_suspend() was called
+        	 */
+		BUG_ON(!error);
+		/* This call to restore_highmem() undos the previous one */
+		restore_highmem();
+	}
 	/* The only reason why swsusp_arch_resume() can fail is memory being
 	 * very tight, so we have to free it as soon as we can to avoid
 	 * subsequent failures
 	 */
 	swsusp_free();
 	restore_processor_state();
-	restore_highmem();
 	touch_softlockup_watchdog();
 	device_power_up();
 	local_irq_enable();
diff --git a/kernel/power/user.c b/kernel/power/user.c
index d991d3b..89443b8 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -11,6 +11,7 @@
 
 #include <linux/suspend.h>
 #include <linux/syscalls.h>
+#include <linux/reboot.h>
 #include <linux/string.h>
 #include <linux/device.h>
 #include <linux/miscdevice.h>
@@ -21,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -54,7 +56,8 @@
 	filp->private_data = data;
 	memset(&data->handle, 0, sizeof(struct snapshot_handle));
 	if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
-		data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device) : -1;
+		data->swap = swsusp_resume_device ?
+				swap_type_of(swsusp_resume_device, 0) : -1;
 		data->mode = O_RDONLY;
 	} else {
 		data->swap = -1;
@@ -76,10 +79,10 @@
 	free_all_swap_pages(data->swap, data->bitmap);
 	free_bitmap(data->bitmap);
 	if (data->frozen) {
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		thaw_processes();
 		enable_nonboot_cpus();
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 	}
 	atomic_inc(&device_available);
 	return 0;
@@ -124,7 +127,8 @@
 {
 	int error = 0;
 	struct snapshot_data *data;
-	loff_t offset, avail;
+	loff_t avail;
+	sector_t offset;
 
 	if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
 		return -ENOTTY;
@@ -140,7 +144,7 @@
 	case SNAPSHOT_FREEZE:
 		if (data->frozen)
 			break;
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		error = disable_nonboot_cpus();
 		if (!error) {
 			error = freeze_processes();
@@ -150,7 +154,7 @@
 				error = -EBUSY;
 			}
 		}
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		if (!error)
 			data->frozen = 1;
 		break;
@@ -158,10 +162,10 @@
 	case SNAPSHOT_UNFREEZE:
 		if (!data->frozen)
 			break;
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		thaw_processes();
 		enable_nonboot_cpus();
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		data->frozen = 0;
 		break;
 
@@ -170,7 +174,7 @@
 			error = -EPERM;
 			break;
 		}
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		/* Free memory before shutting down devices. */
 		error = swsusp_shrink_memory();
 		if (!error) {
@@ -183,7 +187,7 @@
 			}
 			resume_console();
 		}
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		if (!error)
 			error = put_user(in_suspend, (unsigned int __user *)arg);
 		if (!error)
@@ -191,13 +195,13 @@
 		break;
 
 	case SNAPSHOT_ATOMIC_RESTORE:
+		snapshot_write_finalize(&data->handle);
 		if (data->mode != O_WRONLY || !data->frozen ||
 		    !snapshot_image_loaded(&data->handle)) {
 			error = -EPERM;
 			break;
 		}
-		snapshot_free_unused_memory(&data->handle);
-		down(&pm_sem);
+		mutex_lock(&pm_mutex);
 		pm_prepare_console();
 		suspend_console();
 		error = device_suspend(PMSG_PRETHAW);
@@ -207,7 +211,7 @@
 		}
 		resume_console();
 		pm_restore_console();
-		up(&pm_sem);
+		mutex_unlock(&pm_mutex);
 		break;
 
 	case SNAPSHOT_FREE:
@@ -238,10 +242,10 @@
 				break;
 			}
 		}
-		offset = alloc_swap_page(data->swap, data->bitmap);
+		offset = alloc_swapdev_block(data->swap, data->bitmap);
 		if (offset) {
 			offset <<= PAGE_SHIFT;
-			error = put_user(offset, (loff_t __user *)arg);
+			error = put_user(offset, (sector_t __user *)arg);
 		} else {
 			error = -ENOSPC;
 		}
@@ -264,7 +268,7 @@
 			 * so we need to recode them
 			 */
 			if (old_decode_dev(arg)) {
-				data->swap = swap_type_of(old_decode_dev(arg));
+				data->swap = swap_type_of(old_decode_dev(arg), 0);
 				if (data->swap < 0)
 					error = -ENODEV;
 			} else {
@@ -282,7 +286,7 @@
 			break;
 		}
 
-		if (down_trylock(&pm_sem)) {
+		if (!mutex_trylock(&pm_mutex)) {
 			error = -EBUSY;
 			break;
 		}
@@ -309,8 +313,66 @@
 		if (pm_ops->finish)
 			pm_ops->finish(PM_SUSPEND_MEM);
 
-OutS3:
-		up(&pm_sem);
+ OutS3:
+		mutex_unlock(&pm_mutex);
+		break;
+
+	case SNAPSHOT_PMOPS:
+		switch (arg) {
+
+		case PMOPS_PREPARE:
+			if (pm_ops->prepare) {
+				error = pm_ops->prepare(PM_SUSPEND_DISK);
+			}
+			break;
+
+		case PMOPS_ENTER:
+			kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
+			error = pm_ops->enter(PM_SUSPEND_DISK);
+			break;
+
+		case PMOPS_FINISH:
+			if (pm_ops && pm_ops->finish) {
+				pm_ops->finish(PM_SUSPEND_DISK);
+			}
+			break;
+
+		default:
+			printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
+			error = -EINVAL;
+
+		}
+		break;
+
+	case SNAPSHOT_SET_SWAP_AREA:
+		if (data->bitmap) {
+			error = -EPERM;
+		} else {
+			struct resume_swap_area swap_area;
+			dev_t swdev;
+
+			error = copy_from_user(&swap_area, (void __user *)arg,
+					sizeof(struct resume_swap_area));
+			if (error) {
+				error = -EFAULT;
+				break;
+			}
+
+			/*
+			 * User space encodes device types as two-byte values,
+			 * so we need to recode them
+			 */
+			swdev = old_decode_dev(swap_area.dev);
+			if (swdev) {
+				offset = swap_area.offset;
+				data->swap = swap_type_of(swdev, offset);
+				if (data->swap < 0)
+					error = -ENODEV;
+			} else {
+				data->swap = -1;
+				error = -EINVAL;
+			}
+		}
 		break;
 
 	default:
@@ -321,7 +383,7 @@
 	return error;
 }
 
-static struct file_operations snapshot_fops = {
+static const struct file_operations snapshot_fops = {
 	.open = snapshot_open,
 	.release = snapshot_release,
 	.read = snapshot_read,
diff --git a/kernel/printk.c b/kernel/printk.c
index 6642655..185bb45 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -53,8 +53,6 @@
 	DEFAULT_CONSOLE_LOGLEVEL,	/* default_console_loglevel */
 };
 
-EXPORT_UNUSED_SYMBOL(console_printk);  /*  June 2006  */
-
 /*
  * Low lever drivers may need that to know if they can schedule in
  * their unblank() callback or not. So let's export it.
@@ -335,13 +333,25 @@
 	}
 }
 
+static int __read_mostly ignore_loglevel;
+
+int __init ignore_loglevel_setup(char *str)
+{
+	ignore_loglevel = 1;
+	printk(KERN_INFO "debug: ignoring loglevel setting.\n");
+
+	return 1;
+}
+
+__setup("ignore_loglevel", ignore_loglevel_setup);
+
 /*
  * Write out chars from start to end - 1 inclusive
  */
 static void _call_console_drivers(unsigned long start,
 				unsigned long end, int msg_log_level)
 {
-	if (msg_log_level < console_loglevel &&
+	if ((msg_log_level < console_loglevel || ignore_loglevel) &&
 			console_drivers && start != end) {
 		if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
 			/* wrapped write */
@@ -631,12 +641,7 @@
 
 asmlinkage long sys_syslog(int type, char __user *buf, int len)
 {
-	return 0;
-}
-
-int do_syslog(int type, char __user *buf, int len)
-{
-	return 0;
+	return -ENOSYS;
 }
 
 static void call_console_drivers(unsigned long start, unsigned long end)
@@ -777,7 +782,6 @@
 {
 	return console_locked;
 }
-EXPORT_UNUSED_SYMBOL(is_console_locked);  /*  June 2006  */
 
 /**
  * release_console_sem - unlock the console system
diff --git a/kernel/profile.c b/kernel/profile.c
index f940b46..fb5e03d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -40,7 +40,7 @@
 
 static atomic_t *prof_buffer;
 static unsigned long prof_len, prof_shift;
-static int prof_on __read_mostly;
+int prof_on __read_mostly;
 static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
 #ifdef CONFIG_SMP
 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
@@ -51,9 +51,19 @@
 static int __init profile_setup(char * str)
 {
 	static char __initdata schedstr[] = "schedule";
+	static char __initdata sleepstr[] = "sleep";
 	int par;
 
-	if (!strncmp(str, schedstr, strlen(schedstr))) {
+	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
+		prof_on = SLEEP_PROFILING;
+		if (str[strlen(sleepstr)] == ',')
+			str += strlen(sleepstr) + 1;
+		if (get_option(&str, &par))
+			prof_shift = par;
+		printk(KERN_INFO
+			"kernel sleep profiling enabled (shift: %ld)\n",
+			prof_shift);
+	} else if (!strncmp(str, sleepstr, strlen(sleepstr))) {
 		prof_on = SCHED_PROFILING;
 		if (str[strlen(schedstr)] == ',')
 			str += strlen(schedstr) + 1;
@@ -204,7 +214,8 @@
  * positions to which hits are accounted during short intervals (e.g.
  * several seconds) is usually very small. Exclusion from buffer
  * flipping is provided by interrupt disablement (note that for
- * SCHED_PROFILING profile_hit() may be called from process context).
+ * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
+ * process context).
  * The hash function is meant to be lightweight as opposed to strong,
  * and was vaguely inspired by ppc64 firmware-supported inverted
  * pagetable hash functions, but uses a full hashtable full of finite
@@ -257,7 +268,7 @@
 	mutex_unlock(&profile_flip_mutex);
 }
 
-void profile_hit(int type, void *__pc)
+void profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
 	unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
 	int i, j, cpu;
@@ -274,21 +285,31 @@
 		put_cpu();
 		return;
 	}
+	/*
+	 * We buffer the global profiler buffer into a per-CPU
+	 * queue and thus reduce the number of global (and possibly
+	 * NUMA-alien) accesses. The write-queue is self-coalescing:
+	 */
 	local_irq_save(flags);
 	do {
 		for (j = 0; j < PROFILE_GRPSZ; ++j) {
 			if (hits[i + j].pc == pc) {
-				hits[i + j].hits++;
+				hits[i + j].hits += nr_hits;
 				goto out;
 			} else if (!hits[i + j].hits) {
 				hits[i + j].pc = pc;
-				hits[i + j].hits = 1;
+				hits[i + j].hits = nr_hits;
 				goto out;
 			}
 		}
 		i = (i + secondary) & (NR_PROFILE_HIT - 1);
 	} while (i != primary);
-	atomic_inc(&prof_buffer[pc]);
+
+	/*
+	 * Add the current hit(s) and flush the write-queue out
+	 * to the global buffer:
+	 */
+	atomic_add(nr_hits, &prof_buffer[pc]);
 	for (i = 0; i < NR_PROFILE_HIT; ++i) {
 		atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
 		hits[i].pc = hits[i].hits = 0;
@@ -298,7 +319,6 @@
 	put_cpu();
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __devinit profile_cpu_callback(struct notifier_block *info,
 					unsigned long action, void *__cpu)
 {
@@ -351,19 +371,19 @@
 	}
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 #else /* !CONFIG_SMP */
 #define profile_flip_buffers()		do { } while (0)
 #define profile_discard_flip_buffers()	do { } while (0)
+#define profile_cpu_callback		NULL
 
-void profile_hit(int type, void *__pc)
+void profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
 	unsigned long pc;
 
 	if (prof_on != type || !prof_buffer)
 		return;
 	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
-	atomic_inc(&prof_buffer[min(pc, prof_len - 1)]);
+	atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
 }
 #endif /* !CONFIG_SMP */
 
@@ -442,7 +462,8 @@
 	read = 0;
 
 	while (p < sizeof(unsigned int) && count > 0) {
-		put_user(*((char *)(&sample_step)+p),buf);
+		if (put_user(*((char *)(&sample_step)+p),buf))
+			return -EFAULT;
 		buf++; p++; count--; read++;
 	}
 	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
@@ -480,7 +501,7 @@
 	return count;
 }
 
-static struct file_operations proc_profile_operations = {
+static const struct file_operations proc_profile_operations = {
 	.read		= read_profile,
 	.write		= write_profile,
 };
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 26bb5ff..3554b76 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -235,12 +235,14 @@
 
 	list = rdp->donelist;
 	while (list) {
-		next = rdp->donelist = list->next;
+		next = list->next;
+		prefetch(next);
 		list->func(list);
 		list = next;
 		if (++count >= rdp->blimit)
 			break;
 	}
+	rdp->donelist = list;
 
 	local_irq_disable();
 	rdp->qlen -= count;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index e2bda18..c52f981 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -401,7 +401,7 @@
 	cleanup_srcu_struct(&srcu_ctl);
 }
 
-static int srcu_torture_read_lock(void)
+static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
 {
 	return srcu_read_lock(&srcu_ctl);
 }
@@ -419,7 +419,7 @@
 		schedule_timeout_interruptible(longdelay);
 }
 
-static void srcu_torture_read_unlock(int idx)
+static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 {
 	srcu_read_unlock(&srcu_ctl, idx);
 }
diff --git a/kernel/relay.c b/kernel/relay.c
index f04bbdb..75a3a9a 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -308,9 +308,10 @@
  *	reason waking is deferred is that calling directly from write
  *	causes problems if you're writing from say the scheduler.
  */
-static void wakeup_readers(void *private)
+static void wakeup_readers(struct work_struct *work)
 {
-	struct rchan_buf *buf = private;
+	struct rchan_buf *buf =
+		container_of(work, struct rchan_buf, wake_readers.work);
 	wake_up_interruptible(&buf->read_wait);
 }
 
@@ -328,7 +329,7 @@
 	if (init) {
 		init_waitqueue_head(&buf->read_wait);
 		kref_init(&buf->kref);
-		INIT_WORK(&buf->wake_readers, NULL, NULL);
+		INIT_DELAYED_WORK(&buf->wake_readers, NULL);
 	} else {
 		cancel_delayed_work(&buf->wake_readers);
 		flush_scheduled_work();
@@ -549,7 +550,8 @@
 			buf->padding[old_subbuf];
 		smp_mb();
 		if (waitqueue_active(&buf->read_wait)) {
-			PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
+			PREPARE_DELAYED_WORK(&buf->wake_readers,
+					     wakeup_readers);
 			schedule_delayed_work(&buf->wake_readers, 1);
 		}
 	}
@@ -1011,7 +1013,7 @@
 				       actor, &desc);
 }
 
-struct file_operations relay_file_operations = {
+const struct file_operations relay_file_operations = {
 	.open		= relay_file_open,
 	.poll		= relay_file_poll,
 	.mmap		= relay_file_mmap,
diff --git a/kernel/resource.c b/kernel/resource.c
index 6de60c1..7b9a497 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -88,7 +88,7 @@
 	return 0;
 }
 
-static struct seq_operations resource_op = {
+static const struct seq_operations resource_op = {
 	.start	= r_start,
 	.next	= r_next,
 	.stop	= r_stop,
@@ -115,14 +115,14 @@
 	return res;
 }
 
-static struct file_operations proc_ioports_operations = {
+static const struct file_operations proc_ioports_operations = {
 	.open		= ioports_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= seq_release,
 };
 
-static struct file_operations proc_iomem_operations = {
+static const struct file_operations proc_iomem_operations = {
 	.open		= iomem_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 6dcea9d..015fc63 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/sysdev.h>
 #include <linux/timer.h>
+#include <linux/freezer.h>
 
 #include "rtmutex.h"
 
diff --git a/kernel/sched.c b/kernel/sched.c
index 3399701..f385eff 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -34,7 +34,7 @@
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -505,7 +505,7 @@
 	return res;
 }
 
-struct file_operations proc_schedstat_operations = {
+const struct file_operations proc_schedstat_operations = {
 	.open    = schedstat_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -948,6 +948,17 @@
 	}
 #endif
 
+	/*
+	 * Sleep time is in units of nanosecs, so shift by 20 to get a
+	 * milliseconds-range estimation of the amount of time that the task
+	 * spent sleeping:
+	 */
+	if (unlikely(prof_on == SLEEP_PROFILING)) {
+		if (p->state == TASK_UNINTERRUPTIBLE)
+			profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
+				     (now - p->timestamp) >> 20);
+	}
+
 	if (!rt_task(p))
 		p->prio = recalc_task_prio(p, now);
 
@@ -3333,6 +3344,7 @@
 		printk(KERN_ERR "BUG: scheduling while atomic: "
 			"%s/0x%08x/%d\n",
 			current->comm, preempt_count(), current->pid);
+		debug_show_held_locks(current);
 		dump_stack();
 	}
 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -4804,18 +4816,18 @@
 		show_stack(p, NULL);
 }
 
-void show_state(void)
+void show_state_filter(unsigned long state_filter)
 {
 	struct task_struct *g, *p;
 
 #if (BITS_PER_LONG == 32)
 	printk("\n"
-	       "                                               sibling\n");
-	printk("  task             PC      pid father child younger older\n");
+	       "                         free                        sibling\n");
+	printk("  task             PC    stack   pid father child younger older\n");
 #else
 	printk("\n"
-	       "                                                       sibling\n");
-	printk("  task                 PC          pid father child younger older\n");
+	       "                                 free                        sibling\n");
+	printk("  task                 PC        stack   pid father child younger older\n");
 #endif
 	read_lock(&tasklist_lock);
 	do_each_thread(g, p) {
@@ -4824,11 +4836,16 @@
 		 * console might take alot of time:
 		 */
 		touch_nmi_watchdog();
-		show_task(p);
+		if (p->state & state_filter)
+			show_task(p);
 	} while_each_thread(g, p);
 
 	read_unlock(&tasklist_lock);
-	debug_show_all_locks();
+	/*
+	 * Only show locks if all tasks are dumped:
+	 */
+	if (state_filter == -1)
+		debug_show_all_locks();
 }
 
 /**
@@ -6723,8 +6740,6 @@
 	    sched_smt_power_savings_store);
 #endif
 
-
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * Force a reinitialization of the sched domains hierarchy.  The domains
  * and groups cannot be updated in place without racing with the balancing
@@ -6757,7 +6772,6 @@
 
 	return NOTIFY_OK;
 }
-#endif
 
 void __init sched_init_smp(void)
 {
@@ -6867,6 +6881,7 @@
 				" context at %s:%d\n", file, line);
 		printk("in_atomic():%d, irqs_disabled():%d\n",
 			in_atomic(), irqs_disabled());
+		debug_show_held_locks(current);
 		dump_stack();
 	}
 #endif
diff --git a/kernel/signal.c b/kernel/signal.c
index df18c16..ec81def 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -23,6 +23,7 @@
 #include <linux/ptrace.h>
 #include <linux/signal.h>
 #include <linux/capability.h>
+#include <linux/freezer.h>
 #include <asm/param.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -33,7 +34,7 @@
  * SLAB caches for signal bits.
  */
 
-static kmem_cache_t *sigqueue_cachep;
+static struct kmem_cache *sigqueue_cachep;
 
 /*
  * In POSIX a signal is sent either to a specific thread (Linux task)
@@ -1133,8 +1134,7 @@
 	return error;
 }
 
-int
-kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
 	int error;
 	rcu_read_lock();
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bf25015..918e52d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -574,8 +574,6 @@
 
 	switch (action) {
 	case CPU_UP_PREPARE:
-		BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
-		BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
 		p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
 		if (IS_ERR(p)) {
 			printk("ksoftirqd for %i failed\n", hotcpu);
diff --git a/kernel/sys.c b/kernel/sys.c
index 98489d8..a0c1a29 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -880,7 +880,7 @@
 	return 0;
 }
 
-static void deferred_cad(void *dummy)
+static void deferred_cad(struct work_struct *dummy)
 {
 	kernel_restart(NULL);
 }
@@ -892,7 +892,7 @@
  */
 void ctrl_alt_del(void)
 {
-	static DECLARE_WORK(cad_work, deferred_cad, NULL);
+	static DECLARE_WORK(cad_work, deferred_cad);
 
 	if (C_A_D)
 		schedule_work(&cad_work);
@@ -1102,14 +1102,14 @@
 asmlinkage long sys_setuid(uid_t uid)
 {
 	int old_euid = current->euid;
-	int old_ruid, old_suid, new_ruid, new_suid;
+	int old_ruid, old_suid, new_suid;
 	int retval;
 
 	retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
 	if (retval)
 		return retval;
 
-	old_ruid = new_ruid = current->uid;
+	old_ruid = current->uid;
 	old_suid = current->suid;
 	new_suid = old_suid;
 	
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 09e569f..8e9f00f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -54,6 +54,7 @@
 
 #ifdef CONFIG_X86
 #include <asm/nmi.h>
+#include <asm/stacktrace.h>
 #endif
 
 #if defined(CONFIG_SYSCTL)
@@ -170,7 +171,7 @@
 static ssize_t proc_writesys(struct file *, const char __user *, size_t, loff_t *);
 static int proc_opensys(struct inode *, struct file *);
 
-struct file_operations proc_sys_file_operations = {
+const struct file_operations proc_sys_file_operations = {
 	.open		= proc_opensys,
 	.read		= proc_readsys,
 	.write		= proc_writesys,
@@ -707,6 +708,14 @@
 		.mode		= 0444,
 		.proc_handler	= &proc_dointvec,
 	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "kstack_depth_to_print",
+		.data		= &kstack_depth_to_print,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
 #endif
 #if defined(CONFIG_MMU)
 	{
@@ -977,17 +986,6 @@
 		.extra1		= &zero,
 	},
 #endif
-#ifdef CONFIG_SWAP
-	{
-		.ctl_name	= VM_SWAP_TOKEN_TIMEOUT,
-		.procname	= "swap_token_timeout",
-		.data		= &swap_token_default_timeout,
-		.maxlen		= sizeof(swap_token_default_timeout),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-		.strategy	= &sysctl_jiffies,
-	},
-#endif
 #ifdef CONFIG_NUMA
 	{
 		.ctl_name	= VM_ZONE_RECLAIM_MODE,
@@ -1886,7 +1884,7 @@
 			p = buf;
 			if (*p == '-' && left > 1) {
 				neg = 1;
-				left--, p++;
+				p++;
 			}
 			if (*p < '0' || *p > '9')
 				break;
@@ -2137,7 +2135,7 @@
 			p = buf;
 			if (*p == '-' && left > 1) {
 				neg = 1;
-				left--, p++;
+				p++;
 			}
 			if (*p < '0' || *p > '9')
 				break;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index f45c5e7..4c3476f 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -34,7 +34,7 @@
 
 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
 static int family_registered;
-kmem_cache_t *taskstats_cache;
+struct kmem_cache *taskstats_cache;
 
 static struct genl_family family = {
 	.id		= GENL_ID_GENERATE,
@@ -69,7 +69,7 @@
 };
 
 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
-			void **replyp, size_t size)
+				size_t size)
 {
 	struct sk_buff *skb;
 	void *reply;
@@ -77,8 +77,7 @@
 	/*
 	 * If new attributes are added, please revisit this allocation
 	 */
-	size = nlmsg_total_size(genlmsg_total_size(size));
-	skb = nlmsg_new(size, GFP_KERNEL);
+	skb = genlmsg_new(size, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 
@@ -86,20 +85,15 @@
 		int seq = get_cpu_var(taskstats_seqnum)++;
 		put_cpu_var(taskstats_seqnum);
 
-		reply = genlmsg_put(skb, 0, seq,
-				family.id, 0, 0,
-				cmd, family.version);
+		reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
 	} else
-		reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
-				family.id, 0, 0,
-				cmd, family.version);
+		reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
 	if (reply == NULL) {
 		nlmsg_free(skb);
 		return -EINVAL;
 	}
 
 	*skbp = skb;
-	*replyp = reply;
 	return 0;
 }
 
@@ -124,10 +118,10 @@
 /*
  * Send taskstats data in @skb to listeners registered for @cpu's exit data
  */
-static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
+static void send_cpu_listeners(struct sk_buff *skb,
+					struct listener_list *listeners)
 {
 	struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
-	struct listener_list *listeners;
 	struct listener *s, *tmp;
 	struct sk_buff *skb_next, *skb_cur = skb;
 	void *reply = genlmsg_data(genlhdr);
@@ -140,7 +134,6 @@
 	}
 
 	rc = 0;
-	listeners = &per_cpu(listener_array, cpu);
 	down_read(&listeners->sem);
 	list_for_each_entry(s, &listeners->list, list) {
 		skb_next = NULL;
@@ -191,6 +184,7 @@
 	} else
 		get_task_struct(tsk);
 
+	memset(stats, 0, sizeof(*stats));
 	/*
 	 * Each accounting subsystem adds calls to its functions to
 	 * fill in relevant parts of struct taskstsats as follows
@@ -233,6 +227,8 @@
 
 	if (first->signal->stats)
 		memcpy(stats, first->signal->stats, sizeof(*stats));
+	else
+		memset(stats, 0, sizeof(*stats));
 
 	tsk = first;
 	do {
@@ -349,14 +345,36 @@
 	return ret;
 }
 
+static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
+{
+	struct nlattr *na, *ret;
+	int aggr;
+
+	aggr = (type == TASKSTATS_TYPE_PID)
+			? TASKSTATS_TYPE_AGGR_PID
+			: TASKSTATS_TYPE_AGGR_TGID;
+
+	na = nla_nest_start(skb, aggr);
+	if (!na)
+		goto err;
+	if (nla_put(skb, type, sizeof(pid), &pid) < 0)
+		goto err;
+	ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
+	if (!ret)
+		goto err;
+	nla_nest_end(skb, na);
+
+	return nla_data(ret);
+err:
+	return NULL;
+}
+
 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
 	int rc = 0;
 	struct sk_buff *rep_skb;
-	struct taskstats stats;
-	void *reply;
+	struct taskstats *stats;
 	size_t size;
-	struct nlattr *na;
 	cpumask_t mask;
 
 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
@@ -377,83 +395,71 @@
 	size = nla_total_size(sizeof(u32)) +
 		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
-	memset(&stats, 0, sizeof(stats));
-	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
+	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
 	if (rc < 0)
 		return rc;
 
+	rc = -EINVAL;
 	if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
 		u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
-		rc = fill_pid(pid, NULL, &stats);
-		if (rc < 0)
+		stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
+		if (!stats)
 			goto err;
 
-		na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
-		NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
-		NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-				stats);
+		rc = fill_pid(pid, NULL, stats);
+		if (rc < 0)
+			goto err;
 	} else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
 		u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
-		rc = fill_tgid(tgid, NULL, &stats);
-		if (rc < 0)
+		stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
+		if (!stats)
 			goto err;
 
-		na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
-		NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
-		NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-				stats);
-	} else {
-		rc = -EINVAL;
+		rc = fill_tgid(tgid, NULL, stats);
+		if (rc < 0)
+			goto err;
+	} else
 		goto err;
-	}
-
-	nla_nest_end(rep_skb, na);
 
 	return send_reply(rep_skb, info->snd_pid);
-
-nla_put_failure:
-	rc = genlmsg_cancel(rep_skb, reply);
 err:
 	nlmsg_free(rep_skb);
 	return rc;
 }
 
-void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
+static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 {
-	struct listener_list *listeners;
-	struct taskstats *tmp;
-	/*
-	 * This is the cpu on which the task is exiting currently and will
-	 * be the one for which the exit event is sent, even if the cpu
-	 * on which this function is running changes later.
-	 */
-	*mycpu = raw_smp_processor_id();
+	struct signal_struct *sig = tsk->signal;
+	struct taskstats *stats;
 
-	*ptidstats = NULL;
-	tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-	if (!tmp)
-		return;
+	if (sig->stats || thread_group_empty(tsk))
+		goto ret;
 
-	listeners = &per_cpu(listener_array, *mycpu);
-	down_read(&listeners->sem);
-	if (!list_empty(&listeners->list)) {
-		*ptidstats = tmp;
-		tmp = NULL;
+	/* No problem if kmem_cache_zalloc() fails */
+	stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+
+	spin_lock_irq(&tsk->sighand->siglock);
+	if (!sig->stats) {
+		sig->stats = stats;
+		stats = NULL;
 	}
-	up_read(&listeners->sem);
-	kfree(tmp);
+	spin_unlock_irq(&tsk->sighand->siglock);
+
+	if (stats)
+		kmem_cache_free(taskstats_cache, stats);
+ret:
+	return sig->stats;
 }
 
 /* Send pid data out on exit */
-void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
-			int group_dead, unsigned int mycpu)
+void taskstats_exit(struct task_struct *tsk, int group_dead)
 {
 	int rc;
+	struct listener_list *listeners;
+	struct taskstats *stats;
 	struct sk_buff *rep_skb;
-	void *reply;
 	size_t size;
 	int is_thread_group;
-	struct nlattr *na;
 
 	if (!family_registered)
 		return;
@@ -464,7 +470,7 @@
 	size = nla_total_size(sizeof(u32)) +
 		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
-	is_thread_group = (tsk->signal->stats != NULL);
+	is_thread_group = !!taskstats_tgid_alloc(tsk);
 	if (is_thread_group) {
 		/* PID + STATS + TGID + STATS */
 		size = 2 * size;
@@ -472,49 +478,39 @@
 		fill_tgid_exit(tsk);
 	}
 
-	if (!tidstats)
+	listeners = &__raw_get_cpu_var(listener_array);
+	if (list_empty(&listeners->list))
 		return;
 
-	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
+	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
 	if (rc < 0)
-		goto ret;
+		return;
 
-	rc = fill_pid(tsk->pid, tsk, tidstats);
+	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
+	if (!stats)
+		goto err;
+
+	rc = fill_pid(tsk->pid, tsk, stats);
 	if (rc < 0)
-		goto err_skb;
-
-	na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
-	NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
-	NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-			*tidstats);
-	nla_nest_end(rep_skb, na);
-
-	if (!is_thread_group)
-		goto send;
+		goto err;
 
 	/*
 	 * Doesn't matter if tsk is the leader or the last group member leaving
 	 */
-	if (!group_dead)
+	if (!is_thread_group || !group_dead)
 		goto send;
 
-	na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
-	NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
-	/* No locking needed for tsk->signal->stats since group is dead */
-	NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-			*tsk->signal->stats);
-	nla_nest_end(rep_skb, na);
+	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
+	if (!stats)
+		goto err;
+
+	memcpy(stats, tsk->signal->stats, sizeof(*stats));
 
 send:
-	send_cpu_listeners(rep_skb, mycpu);
+	send_cpu_listeners(rep_skb, listeners);
 	return;
-
-nla_put_failure:
-	genlmsg_cancel(rep_skb, reply);
-err_skb:
+err:
 	nlmsg_free(rep_skb);
-ret:
-	return;
 }
 
 static struct genl_ops taskstats_ops = {
diff --git a/kernel/unwind.c b/kernel/unwind.c
index ed0a21d..09c2613 100644
--- a/kernel/unwind.c
+++ b/kernel/unwind.c
@@ -14,11 +14,12 @@
 #include <linux/bootmem.h>
 #include <linux/sort.h>
 #include <linux/stop_machine.h>
+#include <linux/uaccess.h>
 #include <asm/sections.h>
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 
-extern char __start_unwind[], __end_unwind[];
+extern const char __start_unwind[], __end_unwind[];
 extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];
 
 #define MAX_STACK_DEPTH 8
@@ -94,6 +95,7 @@
 
 typedef unsigned long uleb128_t;
 typedef   signed long sleb128_t;
+#define sleb128abs __builtin_labs
 
 static struct unwind_table {
 	struct {
@@ -135,6 +137,17 @@
 
 static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
 
+static unsigned unwind_debug;
+static int __init unwind_debug_setup(char *s)
+{
+	unwind_debug = simple_strtoul(s, NULL, 0);
+	return 1;
+}
+__setup("unwind_debug=", unwind_debug_setup);
+#define dprintk(lvl, fmt, args...) \
+	((void)(lvl > unwind_debug \
+	 || printk(KERN_DEBUG "unwind: " fmt "\n", ##args)))
+
 static struct unwind_table *find_table(unsigned long pc)
 {
 	struct unwind_table *table;
@@ -151,7 +164,9 @@
 
 static unsigned long read_pointer(const u8 **pLoc,
                                   const void *end,
-                                  signed ptrType);
+                                  signed ptrType,
+                                  unsigned long text_base,
+                                  unsigned long data_base);
 
 static void init_unwind_table(struct unwind_table *table,
                               const char *name,
@@ -176,10 +191,13 @@
 	/* See if the linker provided table looks valid. */
 	if (header_size <= 4
 	    || header_start[0] != 1
-	    || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
-	    || header_start[2] == DW_EH_PE_omit
-	    || read_pointer(&ptr, end, header_start[2]) <= 0
-	    || header_start[3] == DW_EH_PE_omit)
+	    || (void *)read_pointer(&ptr, end, header_start[1], 0, 0)
+	       != table_start
+	    || !read_pointer(&ptr, end, header_start[2], 0, 0)
+	    || !read_pointer(&ptr, end, header_start[3], 0,
+	                     (unsigned long)header_start)
+	    || !read_pointer(&ptr, end, header_start[3], 0,
+	                     (unsigned long)header_start))
 		header_start = NULL;
 	table->hdrsz = header_size;
 	smp_wmb();
@@ -269,7 +287,7 @@
 		ptr = (const u8 *)(fde + 2);
 		if (!read_pointer(&ptr,
 		                  (const u8 *)(fde + 1) + *fde,
-		                  ptrType))
+		                  ptrType, 0, 0))
 			return;
 		++n;
 	}
@@ -279,6 +297,7 @@
 
 	hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
 	        + 2 * n * sizeof(unsigned long);
+	dprintk(2, "Binary lookup table size for %s: %lu bytes", table->name, hdrSize);
 	header = alloc(hdrSize);
 	if (!header)
 		return;
@@ -303,7 +322,7 @@
 		ptr = (const u8 *)(fde + 2);
 		header->table[n].start = read_pointer(&ptr,
 		                                      (const u8 *)(fde + 1) + *fde,
-		                                      fde_pointer_type(cie));
+		                                      fde_pointer_type(cie), 0, 0);
 		header->table[n].fde = (unsigned long)fde;
 		++n;
 	}
@@ -486,7 +505,9 @@
 
 static unsigned long read_pointer(const u8 **pLoc,
                                   const void *end,
-                                  signed ptrType)
+                                  signed ptrType,
+                                  unsigned long text_base,
+                                  unsigned long data_base)
 {
 	unsigned long value = 0;
 	union {
@@ -498,13 +519,17 @@
 		const unsigned long *pul;
 	} ptr;
 
-	if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+	if (ptrType < 0 || ptrType == DW_EH_PE_omit) {
+		dprintk(1, "Invalid pointer encoding %02X (%p,%p).", ptrType, *pLoc, end);
 		return 0;
+	}
 	ptr.p8 = *pLoc;
 	switch(ptrType & DW_EH_PE_FORM) {
 	case DW_EH_PE_data2:
-		if (end < (const void *)(ptr.p16u + 1))
+		if (end < (const void *)(ptr.p16u + 1)) {
+			dprintk(1, "Data16 overrun (%p,%p).", ptr.p8, end);
 			return 0;
+		}
 		if(ptrType & DW_EH_PE_signed)
 			value = get_unaligned(ptr.p16s++);
 		else
@@ -512,8 +537,10 @@
 		break;
 	case DW_EH_PE_data4:
 #ifdef CONFIG_64BIT
-		if (end < (const void *)(ptr.p32u + 1))
+		if (end < (const void *)(ptr.p32u + 1)) {
+			dprintk(1, "Data32 overrun (%p,%p).", ptr.p8, end);
 			return 0;
+		}
 		if(ptrType & DW_EH_PE_signed)
 			value = get_unaligned(ptr.p32s++);
 		else
@@ -525,8 +552,10 @@
 		BUILD_BUG_ON(sizeof(u32) != sizeof(value));
 #endif
 	case DW_EH_PE_native:
-		if (end < (const void *)(ptr.pul + 1))
+		if (end < (const void *)(ptr.pul + 1)) {
+			dprintk(1, "DataUL overrun (%p,%p).", ptr.p8, end);
 			return 0;
+		}
 		value = get_unaligned(ptr.pul++);
 		break;
 	case DW_EH_PE_leb128:
@@ -534,10 +563,14 @@
 		value = ptrType & DW_EH_PE_signed
 		        ? get_sleb128(&ptr.p8, end)
 		        : get_uleb128(&ptr.p8, end);
-		if ((const void *)ptr.p8 > end)
+		if ((const void *)ptr.p8 > end) {
+			dprintk(1, "DataLEB overrun (%p,%p).", ptr.p8, end);
 			return 0;
+		}
 		break;
 	default:
+		dprintk(2, "Cannot decode pointer type %02X (%p,%p).",
+		        ptrType, ptr.p8, end);
 		return 0;
 	}
 	switch(ptrType & DW_EH_PE_ADJUST) {
@@ -546,12 +579,33 @@
 	case DW_EH_PE_pcrel:
 		value += (unsigned long)*pLoc;
 		break;
+	case DW_EH_PE_textrel:
+		if (likely(text_base)) {
+			value += text_base;
+			break;
+		}
+		dprintk(2, "Text-relative encoding %02X (%p,%p), but zero text base.",
+		        ptrType, *pLoc, end);
+		return 0;
+	case DW_EH_PE_datarel:
+		if (likely(data_base)) {
+			value += data_base;
+			break;
+		}
+		dprintk(2, "Data-relative encoding %02X (%p,%p), but zero data base.",
+		        ptrType, *pLoc, end);
+		return 0;
 	default:
+		dprintk(2, "Cannot adjust pointer type %02X (%p,%p).",
+		        ptrType, *pLoc, end);
 		return 0;
 	}
 	if ((ptrType & DW_EH_PE_indirect)
-	    && __get_user(value, (unsigned long *)value))
+	    && probe_kernel_address((unsigned long *)value, value)) {
+		dprintk(1, "Cannot read indirect value %lx (%p,%p).",
+		        value, *pLoc, end);
 		return 0;
+	}
 	*pLoc = ptr.p8;
 
 	return value;
@@ -594,7 +648,8 @@
 			case 'P': {
 					signed ptrType = *ptr++;
 
-					if (!read_pointer(&ptr, end, ptrType) || ptr > end)
+					if (!read_pointer(&ptr, end, ptrType, 0, 0)
+					    || ptr > end)
 						return -1;
 				}
 				break;
@@ -654,7 +709,8 @@
 			case DW_CFA_nop:
 				break;
 			case DW_CFA_set_loc:
-				if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0)
+				state->loc = read_pointer(&ptr.p8, end, ptrType, 0, 0);
+				if (state->loc == 0)
 					result = 0;
 				break;
 			case DW_CFA_advance_loc1:
@@ -700,8 +756,10 @@
 					state->label = NULL;
 					return 1;
 				}
-				if (state->stackDepth >= MAX_STACK_DEPTH)
+				if (state->stackDepth >= MAX_STACK_DEPTH) {
+					dprintk(1, "State stack overflow (%p,%p).", ptr.p8, end);
 					return 0;
+				}
 				state->stack[state->stackDepth++] = ptr.p8;
 				break;
 			case DW_CFA_restore_state:
@@ -716,8 +774,10 @@
 					result = processCFI(start, end, 0, ptrType, state);
 					state->loc = loc;
 					state->label = label;
-				} else
+				} else {
+					dprintk(1, "State stack underflow (%p,%p).", ptr.p8, end);
 					return 0;
+				}
 				break;
 			case DW_CFA_def_cfa:
 				state->cfa.reg = get_uleb128(&ptr.p8, end);
@@ -749,6 +809,7 @@
 				break;
 			case DW_CFA_GNU_window_save:
 			default:
+				dprintk(1, "Unrecognized CFI op %02X (%p,%p).", ptr.p8[-1], ptr.p8 - 1, end);
 				result = 0;
 				break;
 			}
@@ -764,12 +825,17 @@
 			set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
 			break;
 		}
-		if (ptr.p8 > end)
+		if (ptr.p8 > end) {
+			dprintk(1, "Data overrun (%p,%p).", ptr.p8, end);
 			result = 0;
+		}
 		if (result && targetLoc != 0 && targetLoc < state->loc)
 			return 1;
 	}
 
+	if (result && ptr.p8 < end)
+		dprintk(1, "Data underrun (%p,%p).", ptr.p8, end);
+
 	return result
 	   && ptr.p8 == end
 	   && (targetLoc == 0
@@ -786,7 +852,7 @@
 #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
 	const u32 *fde = NULL, *cie = NULL;
 	const u8 *ptr = NULL, *end = NULL;
-	unsigned long pc = UNW_PC(frame) - frame->call_frame;
+	unsigned long pc = UNW_PC(frame) - frame->call_frame, sp;
 	unsigned long startLoc = 0, endLoc = 0, cfa;
 	unsigned i;
 	signed ptrType = -1;
@@ -813,9 +879,9 @@
 			ptr = hdr + 4;
 			end = hdr + table->hdrsz;
 			if (tableSize
-			    && read_pointer(&ptr, end, hdr[1])
+			    && read_pointer(&ptr, end, hdr[1], 0, 0)
 			       == (unsigned long)table->address
-			    && (i = read_pointer(&ptr, end, hdr[2])) > 0
+			    && (i = read_pointer(&ptr, end, hdr[2], 0, 0)) > 0
 			    && i == (end - ptr) / (2 * tableSize)
 			    && !((end - ptr) % (2 * tableSize))) {
 				do {
@@ -823,7 +889,8 @@
 
 					startLoc = read_pointer(&cur,
 					                        cur + tableSize,
-					                        hdr[3]);
+					                        hdr[3], 0,
+					                        (unsigned long)hdr);
 					if (pc < startLoc)
 						i /= 2;
 					else {
@@ -834,13 +901,17 @@
 				if (i == 1
 				    && (startLoc = read_pointer(&ptr,
 				                                ptr + tableSize,
-				                                hdr[3])) != 0
+				                                hdr[3], 0,
+				                                (unsigned long)hdr)) != 0
 				    && pc >= startLoc)
 					fde = (void *)read_pointer(&ptr,
 					                           ptr + tableSize,
-					                           hdr[3]);
+					                           hdr[3], 0,
+					                           (unsigned long)hdr);
 			}
 		}
+		if(hdr && !fde)
+			dprintk(3, "Binary lookup for %lx failed.", pc);
 
 		if (fde != NULL) {
 			cie = cie_for_fde(fde, table);
@@ -851,17 +922,19 @@
 			   && (ptrType = fde_pointer_type(cie)) >= 0
 			   && read_pointer(&ptr,
 			                   (const u8 *)(fde + 1) + *fde,
-			                   ptrType) == startLoc) {
+			                   ptrType, 0, 0) == startLoc) {
 				if (!(ptrType & DW_EH_PE_indirect))
 					ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
 				endLoc = startLoc
 				         + read_pointer(&ptr,
 				                        (const u8 *)(fde + 1) + *fde,
-				                        ptrType);
+				                        ptrType, 0, 0);
 				if(pc >= endLoc)
 					fde = NULL;
 			} else
 				fde = NULL;
+			if(!fde)
+				dprintk(1, "Binary lookup result for %lx discarded.", pc);
 		}
 		if (fde == NULL) {
 			for (fde = table->address, tableSize = table->size;
@@ -881,7 +954,7 @@
 				ptr = (const u8 *)(fde + 2);
 				startLoc = read_pointer(&ptr,
 				                        (const u8 *)(fde + 1) + *fde,
-				                        ptrType);
+				                        ptrType, 0, 0);
 				if (!startLoc)
 					continue;
 				if (!(ptrType & DW_EH_PE_indirect))
@@ -889,10 +962,12 @@
 				endLoc = startLoc
 				         + read_pointer(&ptr,
 				                        (const u8 *)(fde + 1) + *fde,
-				                        ptrType);
+				                        ptrType, 0, 0);
 				if (pc >= startLoc && pc < endLoc)
 					break;
 			}
+			if(!fde)
+				dprintk(3, "Linear lookup for %lx failed.", pc);
 		}
 	}
 	if (cie != NULL) {
@@ -926,6 +1001,8 @@
 			if (ptr >= end || *ptr)
 				cie = NULL;
 		}
+		if(!cie)
+			dprintk(1, "CIE unusable (%p,%p).", ptr, end);
 		++ptr;
 	}
 	if (cie != NULL) {
@@ -935,7 +1012,12 @@
 		state.dataAlign = get_sleb128(&ptr, end);
 		if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
 			cie = NULL;
-		else {
+		else if (UNW_PC(frame) % state.codeAlign
+		         || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
+			dprintk(1, "Input pointer(s) misaligned (%lx,%lx).",
+			        UNW_PC(frame), UNW_SP(frame));
+			return -EPERM;
+		} else {
 			retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
 			/* skip augmentation */
 			if (((const char *)(cie + 2))[1] == 'z') {
@@ -949,6 +1031,8 @@
 			   || reg_info[retAddrReg].width != sizeof(unsigned long))
 				cie = NULL;
 		}
+		if(!cie)
+			dprintk(1, "CIE validation failed (%p,%p).", ptr, end);
 	}
 	if (cie != NULL) {
 		state.cieStart = ptr;
@@ -962,11 +1046,15 @@
 			if ((ptr += augSize) > end)
 				fde = NULL;
 		}
+		if(!fde)
+			dprintk(1, "FDE validation failed (%p,%p).", ptr, end);
 	}
 	if (cie == NULL || fde == NULL) {
 #ifdef CONFIG_FRAME_POINTER
 		unsigned long top, bottom;
 
+		if ((UNW_SP(frame) | UNW_FP(frame)) % sizeof(unsigned long))
+			return -EPERM;
 		top = STACK_TOP(frame->task);
 		bottom = STACK_BOTTOM(frame->task);
 # if FRAME_RETADDR_OFFSET < 0
@@ -982,18 +1070,19 @@
 		        & (sizeof(unsigned long) - 1))) {
 			unsigned long link;
 
-			if (!__get_user(link,
+			if (!probe_kernel_address(
 			                (unsigned long *)(UNW_FP(frame)
-			                                  + FRAME_LINK_OFFSET))
+			                                  + FRAME_LINK_OFFSET),
+						  link)
 # if FRAME_RETADDR_OFFSET < 0
 			   && link > bottom && link < UNW_FP(frame)
 # else
 			   && link > UNW_FP(frame) && link < bottom
 # endif
 			   && !(link & (sizeof(link) - 1))
-			   && !__get_user(UNW_PC(frame),
+			   && !probe_kernel_address(
 			                  (unsigned long *)(UNW_FP(frame)
-			                                    + FRAME_RETADDR_OFFSET))) {
+			                                    + FRAME_RETADDR_OFFSET), UNW_PC(frame))) {
 				UNW_SP(frame) = UNW_FP(frame) + FRAME_RETADDR_OFFSET
 # if FRAME_RETADDR_OFFSET < 0
 					-
@@ -1016,8 +1105,11 @@
 	   || state.regs[retAddrReg].where == Nowhere
 	   || state.cfa.reg >= ARRAY_SIZE(reg_info)
 	   || reg_info[state.cfa.reg].width != sizeof(unsigned long)
-	   || state.cfa.offs % sizeof(unsigned long))
+	   || FRAME_REG(state.cfa.reg, unsigned long) % sizeof(unsigned long)
+	   || state.cfa.offs % sizeof(unsigned long)) {
+		dprintk(1, "Unusable unwind info (%p,%p).", ptr, end);
 		return -EIO;
+	}
 	/* update frame */
 #ifndef CONFIG_AS_CFI_SIGNAL_FRAME
 	if(frame->call_frame
@@ -1036,10 +1128,14 @@
 #else
 # define CASES CASE(8); CASE(16); CASE(32); CASE(64)
 #endif
+	pc = UNW_PC(frame);
+	sp = UNW_SP(frame);
 	for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
 		if (REG_INVALID(i)) {
 			if (state.regs[i].where == Nowhere)
 				continue;
+			dprintk(1, "Cannot restore register %u (%d).",
+			        i, state.regs[i].where);
 			return -EIO;
 		}
 		switch(state.regs[i].where) {
@@ -1048,8 +1144,11 @@
 		case Register:
 			if (state.regs[i].value >= ARRAY_SIZE(reg_info)
 			   || REG_INVALID(state.regs[i].value)
-			   || reg_info[i].width > reg_info[state.regs[i].value].width)
+			   || reg_info[i].width > reg_info[state.regs[i].value].width) {
+				dprintk(1, "Cannot restore register %u from register %lu.",
+				        i, state.regs[i].value);
 				return -EIO;
+			}
 			switch(reg_info[state.regs[i].value].width) {
 #define CASE(n) \
 			case sizeof(u##n): \
@@ -1059,6 +1158,9 @@
 			CASES;
 #undef CASE
 			default:
+				dprintk(1, "Unsupported register size %u (%lu).",
+				        reg_info[state.regs[i].value].width,
+				        state.regs[i].value);
 				return -EIO;
 			}
 			break;
@@ -1083,12 +1185,17 @@
 			CASES;
 #undef CASE
 			default:
+				dprintk(1, "Unsupported register size %u (%u).",
+				        reg_info[i].width, i);
 				return -EIO;
 			}
 			break;
 		case Value:
-			if (reg_info[i].width != sizeof(unsigned long))
+			if (reg_info[i].width != sizeof(unsigned long)) {
+				dprintk(1, "Unsupported value size %u (%u).",
+				        reg_info[i].width, i);
 				return -EIO;
+			}
 			FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
 			                                    * state.dataAlign;
 			break;
@@ -1100,15 +1207,20 @@
 				    % sizeof(unsigned long)
 				    || addr < startLoc
 				    || addr + sizeof(unsigned long) < addr
-				    || addr + sizeof(unsigned long) > endLoc)
+				    || addr + sizeof(unsigned long) > endLoc) {
+					dprintk(1, "Bad memory location %lx (%lx).",
+					        addr, state.regs[i].value);
 					return -EIO;
+				}
 				switch(reg_info[i].width) {
 #define CASE(n)     case sizeof(u##n): \
-					__get_user(FRAME_REG(i, u##n), (u##n *)addr); \
+					probe_kernel_address((u##n *)addr, FRAME_REG(i, u##n)); \
 					break
 				CASES;
 #undef CASE
 				default:
+					dprintk(1, "Unsupported memory size %u (%u).",
+					        reg_info[i].width, i);
 					return -EIO;
 				}
 			}
@@ -1116,6 +1228,17 @@
 		}
 	}
 
+	if (UNW_PC(frame) % state.codeAlign
+	    || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
+		dprintk(1, "Output pointer(s) misaligned (%lx,%lx).",
+		        UNW_PC(frame), UNW_SP(frame));
+		return -EIO;
+	}
+	if (pc == UNW_PC(frame) && sp == UNW_SP(frame)) {
+		dprintk(1, "No progress (%lx,%lx).", pc, sp);
+		return -EIO;
+	}
+
 	return 0;
 #undef CASES
 #undef FRAME_REG
diff --git a/kernel/user.c b/kernel/user.c
index 220e586..4869563 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -26,7 +26,7 @@
 #define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 #define uidhashentry(uid)	(uidhash_table + __uidhashfn((uid)))
 
-static kmem_cache_t *uid_cachep;
+static struct kmem_cache *uid_cachep;
 static struct list_head uidhash_table[UIDHASH_SZ];
 
 /*
@@ -132,7 +132,7 @@
 	if (!up) {
 		struct user_struct *new;
 
-		new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
+		new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
 		if (!new)
 			return NULL;
 		new->uid = uid;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 17c2f03d..6b18675 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -29,6 +29,9 @@
 #include <linux/kthread.h>
 #include <linux/hardirq.h>
 #include <linux/mempolicy.h>
+#include <linux/freezer.h>
+#include <linux/kallsyms.h>
+#include <linux/debug_locks.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +58,8 @@
 	struct task_struct *thread;
 
 	int run_depth;		/* Detect run_workqueue() recursion depth */
+
+	int freezeable;		/* Freeze the thread during suspend */
 } ____cacheline_aligned;
 
 /*
@@ -80,6 +85,102 @@
 	return list_empty(&wq->list);
 }
 
+static inline void set_wq_data(struct work_struct *work, void *wq)
+{
+	unsigned long new, old, res;
+
+	/* assume the pending flag is already set and that the task has already
+	 * been queued on this workqueue */
+	new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+	res = work->management;
+	if (res != new) {
+		do {
+			old = res;
+			new = (unsigned long) wq;
+			new |= (old & WORK_STRUCT_FLAG_MASK);
+			res = cmpxchg(&work->management, old, new);
+		} while (res != old);
+	}
+}
+
+static inline void *get_wq_data(struct work_struct *work)
+{
+	return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
+}
+
+static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cwq->lock, flags);
+	/*
+	 * We need to re-validate the work info after we've gotten
+	 * the cpu_workqueue lock. We can run the work now iff:
+	 *
+	 *  - the wq_data still matches the cpu_workqueue_struct
+	 *  - AND the work is still marked pending
+	 *  - AND the work is still on a list (which will be this
+	 *    workqueue_struct list)
+	 *
+	 * All these conditions are important, because we
+	 * need to protect against the work being run right
+	 * now on another CPU (all but the last one might be
+	 * true if it's currently running and has not been
+	 * released yet, for example).
+	 */
+	if (get_wq_data(work) == cwq
+	    && work_pending(work)
+	    && !list_empty(&work->entry)) {
+		work_func_t f = work->func;
+		list_del_init(&work->entry);
+		spin_unlock_irqrestore(&cwq->lock, flags);
+
+		if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+			work_release(work);
+		f(work);
+
+		spin_lock_irqsave(&cwq->lock, flags);
+		cwq->remove_sequence++;
+		wake_up(&cwq->work_done);
+		ret = 1;
+	}
+	spin_unlock_irqrestore(&cwq->lock, flags);
+	return ret;
+}
+
+/**
+ * run_scheduled_work - run scheduled work synchronously
+ * @work: work to run
+ *
+ * This checks if the work was pending, and runs it
+ * synchronously if so. It returns a boolean to indicate
+ * whether it had any scheduled work to run or not.
+ *
+ * NOTE! This _only_ works for normal work_structs. You
+ * CANNOT use this for delayed work, because the wq data
+ * for delayed work will not point properly to the per-
+ * CPU workqueue struct, but will change!
+ */
+int fastcall run_scheduled_work(struct work_struct *work)
+{
+	for (;;) {
+		struct cpu_workqueue_struct *cwq;
+
+		if (!work_pending(work))
+			return 0;
+		if (list_empty(&work->entry))
+			return 0;
+		/* NOTE! This depends intimately on __queue_work! */
+		cwq = get_wq_data(work);
+		if (!cwq)
+			return 0;
+		if (__run_work(cwq, work))
+			return 1;
+	}
+}
+EXPORT_SYMBOL(run_scheduled_work);
+
 /* Preempt must be disabled. */
 static void __queue_work(struct cpu_workqueue_struct *cwq,
 			 struct work_struct *work)
@@ -87,7 +188,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&cwq->lock, flags);
-	work->wq_data = cwq;
+	set_wq_data(work, cwq);
 	list_add_tail(&work->entry, &cwq->worklist);
 	cwq->insert_sequence++;
 	wake_up(&cwq->more_work);
@@ -108,7 +209,7 @@
 {
 	int ret = 0, cpu = get_cpu();
 
-	if (!test_and_set_bit(0, &work->pending)) {
+	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
 		if (unlikely(is_single_threaded(wq)))
 			cpu = singlethread_cpu;
 		BUG_ON(!list_empty(&work->entry));
@@ -122,38 +223,42 @@
 
 static void delayed_work_timer_fn(unsigned long __data)
 {
-	struct work_struct *work = (struct work_struct *)__data;
-	struct workqueue_struct *wq = work->wq_data;
+	struct delayed_work *dwork = (struct delayed_work *)__data;
+	struct workqueue_struct *wq = get_wq_data(&dwork->work);
 	int cpu = smp_processor_id();
 
 	if (unlikely(is_single_threaded(wq)))
 		cpu = singlethread_cpu;
 
-	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
 }
 
 /**
  * queue_delayed_work - queue work on a workqueue after delay
  * @wq: workqueue to use
- * @work: work to queue
+ * @work: delayable work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int fastcall queue_delayed_work(struct workqueue_struct *wq,
-			struct work_struct *work, unsigned long delay)
+			struct delayed_work *dwork, unsigned long delay)
 {
 	int ret = 0;
-	struct timer_list *timer = &work->timer;
+	struct timer_list *timer = &dwork->timer;
+	struct work_struct *work = &dwork->work;
 
-	if (!test_and_set_bit(0, &work->pending)) {
+	if (delay == 0)
+		return queue_work(wq, work);
+
+	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
 		BUG_ON(timer_pending(timer));
 		BUG_ON(!list_empty(&work->entry));
 
 		/* This stores wq for the moment, for the timer_fn */
-		work->wq_data = wq;
+		set_wq_data(work, wq);
 		timer->expires = jiffies + delay;
-		timer->data = (unsigned long)work;
+		timer->data = (unsigned long)dwork;
 		timer->function = delayed_work_timer_fn;
 		add_timer(timer);
 		ret = 1;
@@ -172,19 +277,20 @@
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-			struct work_struct *work, unsigned long delay)
+			struct delayed_work *dwork, unsigned long delay)
 {
 	int ret = 0;
-	struct timer_list *timer = &work->timer;
+	struct timer_list *timer = &dwork->timer;
+	struct work_struct *work = &dwork->work;
 
-	if (!test_and_set_bit(0, &work->pending)) {
+	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
 		BUG_ON(timer_pending(timer));
 		BUG_ON(!list_empty(&work->entry));
 
 		/* This stores wq for the moment, for the timer_fn */
-		work->wq_data = wq;
+		set_wq_data(work, wq);
 		timer->expires = jiffies + delay;
-		timer->data = (unsigned long)work;
+		timer->data = (unsigned long)dwork;
 		timer->function = delayed_work_timer_fn;
 		add_timer_on(timer, cpu);
 		ret = 1;
@@ -212,15 +318,26 @@
 	while (!list_empty(&cwq->worklist)) {
 		struct work_struct *work = list_entry(cwq->worklist.next,
 						struct work_struct, entry);
-		void (*f) (void *) = work->func;
-		void *data = work->data;
+		work_func_t f = work->func;
 
 		list_del_init(cwq->worklist.next);
 		spin_unlock_irqrestore(&cwq->lock, flags);
 
-		BUG_ON(work->wq_data != cwq);
-		clear_bit(0, &work->pending);
-		f(data);
+		BUG_ON(get_wq_data(work) != cwq);
+		if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+			work_release(work);
+		f(work);
+
+		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+					"%s/0x%08x/%d\n",
+					current->comm, preempt_count(),
+				       	current->pid);
+			printk(KERN_ERR "    last function: ");
+			print_symbol("%s\n", (unsigned long)f);
+			debug_show_held_locks(current);
+			dump_stack();
+		}
 
 		spin_lock_irqsave(&cwq->lock, flags);
 		cwq->remove_sequence++;
@@ -237,7 +354,8 @@
 	struct k_sigaction sa;
 	sigset_t blocked;
 
-	current->flags |= PF_NOFREEZE;
+	if (!cwq->freezeable)
+		current->flags |= PF_NOFREEZE;
 
 	set_user_nice(current, -5);
 
@@ -260,6 +378,9 @@
 
 	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
+		if (cwq->freezeable)
+			try_to_freeze();
+
 		add_wait_queue(&cwq->more_work, &wait);
 		if (list_empty(&cwq->worklist))
 			schedule();
@@ -336,7 +457,7 @@
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
-						   int cpu)
+						   int cpu, int freezeable)
 {
 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
 	struct task_struct *p;
@@ -346,6 +467,7 @@
 	cwq->thread = NULL;
 	cwq->insert_sequence = 0;
 	cwq->remove_sequence = 0;
+	cwq->freezeable = freezeable;
 	INIT_LIST_HEAD(&cwq->worklist);
 	init_waitqueue_head(&cwq->more_work);
 	init_waitqueue_head(&cwq->work_done);
@@ -361,7 +483,7 @@
 }
 
 struct workqueue_struct *__create_workqueue(const char *name,
-					    int singlethread)
+					    int singlethread, int freezeable)
 {
 	int cpu, destroy = 0;
 	struct workqueue_struct *wq;
@@ -381,7 +503,7 @@
 	mutex_lock(&workqueue_mutex);
 	if (singlethread) {
 		INIT_LIST_HEAD(&wq->list);
-		p = create_workqueue_thread(wq, singlethread_cpu);
+		p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
 		if (!p)
 			destroy = 1;
 		else
@@ -389,7 +511,7 @@
 	} else {
 		list_add(&wq->list, &workqueues);
 		for_each_online_cpu(cpu) {
-			p = create_workqueue_thread(wq, cpu);
+			p = create_workqueue_thread(wq, cpu, freezeable);
 			if (p) {
 				kthread_bind(p, cpu);
 				wake_up_process(p);
@@ -468,38 +590,37 @@
 
 /**
  * schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue.
  */
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
 {
-	return queue_delayed_work(keventd_wq, work, delay);
+	return queue_delayed_work(keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
- * @work: job to be done
+ * @dwork: job to be done
  * @delay: number of jiffies to wait
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
 int schedule_delayed_work_on(int cpu,
-			struct work_struct *work, unsigned long delay)
+			struct delayed_work *dwork, unsigned long delay)
 {
-	return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
 /**
  * schedule_on_each_cpu - call a function on each online CPU from keventd
  * @func: the function to call
- * @info: a pointer to pass to func()
  *
  * Returns zero on success.
  * Returns -ve errno on failure.
@@ -508,7 +629,7 @@
  *
  * schedule_on_each_cpu() is very slow.
  */
-int schedule_on_each_cpu(void (*func)(void *info), void *info)
+int schedule_on_each_cpu(work_func_t func)
 {
 	int cpu;
 	struct work_struct *works;
@@ -519,7 +640,7 @@
 
 	mutex_lock(&workqueue_mutex);
 	for_each_online_cpu(cpu) {
-		INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+		INIT_WORK(per_cpu_ptr(works, cpu), func);
 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
 				per_cpu_ptr(works, cpu));
 	}
@@ -539,12 +660,12 @@
  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  *			work whose handler rearms the delayed work.
  * @wq:   the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
-				       struct work_struct *work)
+				       struct delayed_work *dwork)
 {
-	while (!cancel_delayed_work(work))
+	while (!cancel_delayed_work(dwork))
 		flush_workqueue(wq);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +673,17 @@
 /**
  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  *			work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
 {
-	cancel_rearming_delayed_workqueue(keventd_wq, work);
+	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:		the function to execute
- * @data:	data to pass to the function
  * @ew:		guaranteed storage for the execute work structure (must
  *		be available when the work executes)
  *
@@ -573,15 +693,14 @@
  * Returns:	0 - function was executed
  *		1 - function was scheduled for execution
  */
-int execute_in_process_context(void (*fn)(void *data), void *data,
-			       struct execute_work *ew)
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
 {
 	if (!in_interrupt()) {
-		fn(data);
+		fn(&ew->work);
 		return 0;
 	}
 
-	INIT_WORK(&ew->work, fn, data);
+	INIT_WORK(&ew->work, fn);
 	schedule_work(&ew->work);
 
 	return 1;
@@ -609,7 +728,6 @@
 
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* Take the work from this (downed) CPU. */
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
@@ -642,7 +760,7 @@
 		mutex_lock(&workqueue_mutex);
 		/* Create a new workqueue thread for it. */
 		list_for_each_entry(wq, &workqueues, list) {
-			if (!create_workqueue_thread(wq, hotcpu)) {
+			if (!create_workqueue_thread(wq, hotcpu, 0)) {
 				printk("workqueue for %i failed\n", hotcpu);
 				return NOTIFY_BAD;
 			}
@@ -692,7 +810,6 @@
 
 	return NOTIFY_OK;
 }
-#endif
 
 void init_workqueues(void)
 {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d367910..b75fed7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1,6 +1,7 @@
 
 config PRINTK_TIME
 	bool "Show timing information on printks"
+	depends on PRINTK
 	help
 	  Selecting this option causes timing information to be
 	  included in printk output.  This allows you to measure
diff --git a/lib/Makefile b/lib/Makefile
index cf98fab..fea8f90 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,7 +25,7 @@
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
-lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
 obj-$(CONFIG_PLIST) += plist.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 0331ed8..8a5b530 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -16,6 +16,23 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 
+/*
+ *	If a hyphen was found in get_option, this will handle the
+ *	range of numbers, M-N.  This will expand the range and insert
+ *	the values[M, M+1, ..., N] into the ints array in get_options.
+ */
+
+static int get_range(char **str, int *pint)
+{
+	int x, inc_counter, upper_range;
+
+	(*str)++;
+	upper_range = simple_strtol((*str), NULL, 0);
+	inc_counter = upper_range - *pint;
+	for (x = *pint; x < upper_range; x++)
+		*pint++ = x;
+	return inc_counter;
+}
 
 /**
  *	get_option - Parse integer from an option string
@@ -29,6 +46,7 @@
  *	0 : no int in string
  *	1 : int found, no subsequent comma
  *	2 : int found including a subsequent comma
+ *	3 : hyphen found to denote a range
  */
 
 int get_option (char **str, int *pint)
@@ -44,6 +62,8 @@
 		(*str)++;
 		return 2;
 	}
+	if (**str == '-')
+		return 3;
 
 	return 1;
 }
@@ -55,7 +75,8 @@
  *	@ints: integer array
  *
  *	This function parses a string containing a comma-separated
- *	list of integers.  The parse halts when the array is
+ *	list of integers, a hyphen-separated range of _positive_ integers,
+ *	or a combination of both.  The parse halts when the array is
  *	full, or when no more numbers can be retrieved from the
  *	string.
  *
@@ -72,6 +93,18 @@
 		res = get_option ((char **)&str, ints + i);
 		if (res == 0)
 			break;
+		if (res == 3) {
+			int range_nums;
+			range_nums = get_range((char **)&str, ints + i);
+			if (range_nums < 0)
+				break;
+			/*
+			 * Decrement the result by one to leave out the
+			 * last number in the range.  The next iteration
+			 * will handle the upper number in the range
+			 */
+			i += (range_nums - 1);
+		}
 		i++;
 		if (res == 1)
 			break;
diff --git a/lib/idr.c b/lib/idr.c
index 16d2143..7185353 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -33,7 +33,7 @@
 #include <linux/string.h>
 #include <linux/idr.h>
 
-static kmem_cache_t *idr_layer_cache;
+static struct kmem_cache *idr_layer_cache;
 
 static struct idr_layer *alloc_layer(struct idr *idp)
 {
@@ -445,7 +445,7 @@
 }
 EXPORT_SYMBOL(idr_replace);
 
-static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
+static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
 		unsigned long flags)
 {
 	memset(idr_layer, 0, sizeof(struct idr_layer));
diff --git a/lib/iomap.c b/lib/iomap.c
index 55689c5..d6ccdd8 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -50,6 +50,16 @@
 	}							\
 } while (0)
 
+#ifndef pio_read16be
+#define pio_read16be(port) swab16(inw(port))
+#define pio_read32be(port) swab32(inl(port))
+#endif
+
+#ifndef mmio_read16be
+#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
+#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
+#endif
+
 unsigned int fastcall ioread8(void __iomem *addr)
 {
 	IO_COND(addr, return inb(port), return readb(addr));
@@ -60,7 +70,7 @@
 }
 unsigned int fastcall ioread16be(void __iomem *addr)
 {
-	IO_COND(addr, return inw(port), return be16_to_cpu(__raw_readw(addr)));
+	IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
 }
 unsigned int fastcall ioread32(void __iomem *addr)
 {
@@ -68,7 +78,7 @@
 }
 unsigned int fastcall ioread32be(void __iomem *addr)
 {
-	IO_COND(addr, return inl(port), return be32_to_cpu(__raw_readl(addr)));
+	IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
 }
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -76,6 +86,16 @@
 EXPORT_SYMBOL(ioread32);
 EXPORT_SYMBOL(ioread32be);
 
+#ifndef pio_write16be
+#define pio_write16be(val,port) outw(swab16(val),port)
+#define pio_write32be(val,port) outl(swab32(val),port)
+#endif
+
+#ifndef mmio_write16be
+#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
+#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
+#endif
+
 void fastcall iowrite8(u8 val, void __iomem *addr)
 {
 	IO_COND(addr, outb(val,port), writeb(val, addr));
@@ -86,7 +106,7 @@
 }
 void fastcall iowrite16be(u16 val, void __iomem *addr)
 {
-	IO_COND(addr, outw(val,port), __raw_writew(cpu_to_be16(val), addr));
+	IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
 }
 void fastcall iowrite32(u32 val, void __iomem *addr)
 {
@@ -94,7 +114,7 @@
 }
 void fastcall iowrite32be(u32 val, void __iomem *addr)
 {
-	IO_COND(addr, outl(val,port), __raw_writel(cpu_to_be32(val), addr));
+	IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
 }
 EXPORT_SYMBOL(iowrite8);
 EXPORT_SYMBOL(iowrite16);
@@ -108,6 +128,7 @@
  * convert to CPU byte order. We write in "IO byte
  * order" (we also don't have IO barriers).
  */
+#ifndef mmio_insb
 static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
 {
 	while (--count >= 0) {
@@ -132,7 +153,9 @@
 		dst++;
 	}
 }
+#endif
 
+#ifndef mmio_outsb
 static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
 {
 	while (--count >= 0) {
@@ -154,6 +177,7 @@
 		src++;
 	}
 }
+#endif
 
 void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
 {
diff --git a/lib/kobject.c b/lib/kobject.c
index 7dd5c0e..7ce6dc1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -111,10 +111,9 @@
 	len = get_kobj_path_length(kobj);
 	if (len == 0)
 		return NULL;
-	path = kmalloc(len, gfp_mask);
+	path = kzalloc(len, gfp_mask);
 	if (!path)
 		return NULL;
-	memset(path, 0x00, len);
 	fill_kobj_path(kobj, path, len);
 
 	return path;
@@ -311,6 +310,56 @@
 }
 
 /**
+ *	kobject_move - move object to another parent
+ *	@kobj:	object in question.
+ *	@new_parent: object's new parent
+ */
+
+int kobject_move(struct kobject *kobj, struct kobject *new_parent)
+{
+	int error;
+	struct kobject *old_parent;
+	const char *devpath = NULL;
+	char *devpath_string = NULL;
+	char *envp[2];
+
+	kobj = kobject_get(kobj);
+	if (!kobj)
+		return -EINVAL;
+	new_parent = kobject_get(new_parent);
+	if (!new_parent) {
+		error = -EINVAL;
+		goto out;
+	}
+	/* old object path */
+	devpath = kobject_get_path(kobj, GFP_KERNEL);
+	if (!devpath) {
+		error = -ENOMEM;
+		goto out;
+	}
+	devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
+	if (!devpath_string) {
+		error = -ENOMEM;
+		goto out;
+	}
+	sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
+	envp[0] = devpath_string;
+	envp[1] = NULL;
+	error = sysfs_move_dir(kobj, new_parent);
+	if (error)
+		goto out;
+	old_parent = kobj->parent;
+	kobj->parent = new_parent;
+	kobject_put(old_parent);
+	kobject_uevent_env(kobj, KOBJ_MOVE, envp);
+out:
+	kobject_put(kobj);
+	kfree(devpath_string);
+	kfree(devpath);
+	return error;
+}
+
+/**
  *	kobject_del - unlink kobject from hierarchy.
  * 	@kobj:	object.
  */
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7f20e7b..a192276 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -50,18 +50,22 @@
 		return "offline";
 	case KOBJ_ONLINE:
 		return "online";
+	case KOBJ_MOVE:
+		return "move";
 	default:
 		return NULL;
 	}
 }
 
 /**
- * kobject_uevent - notify userspace by ending an uevent
+ * kobject_uevent_env - send an uevent with environmental data
  *
- * @action: action that is happening (usually KOBJ_ADD and KOBJ_REMOVE)
+ * @action: action that is happening (usually KOBJ_MOVE)
  * @kobj: struct kobject that the action is happening to
+ * @envp_ext: pointer to environmental data
  */
-void kobject_uevent(struct kobject *kobj, enum kobject_action action)
+void kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+			char *envp_ext[])
 {
 	char **envp;
 	char *buffer;
@@ -76,6 +80,7 @@
 	char *seq_buff;
 	int i = 0;
 	int retval;
+	int j;
 
 	pr_debug("%s\n", __FUNCTION__);
 
@@ -134,7 +139,8 @@
 	scratch += sprintf (scratch, "DEVPATH=%s", devpath) + 1;
 	envp [i++] = scratch;
 	scratch += sprintf(scratch, "SUBSYSTEM=%s", subsystem) + 1;
-
+	for (j = 0; envp_ext && envp_ext[j]; j++)
+		envp[i++] = envp_ext[j];
 	/* just reserve the space, overwrite it after kset call has returned */
 	envp[i++] = seq_buff = scratch;
 	scratch += strlen("SEQNUM=18446744073709551616") + 1;
@@ -200,6 +206,20 @@
 	kfree(envp);
 	return;
 }
+
+EXPORT_SYMBOL_GPL(kobject_uevent_env);
+
+/**
+ * kobject_uevent - notify userspace by ending an uevent
+ *
+ * @action: action that is happening (usually KOBJ_ADD and KOBJ_REMOVE)
+ * @kobj: struct kobject that the action is happening to
+ */
+void kobject_uevent(struct kobject *kobj, enum kobject_action action)
+{
+	kobject_uevent_env(kobj, action, NULL);
+}
+
 EXPORT_SYMBOL_GPL(kobject_uevent);
 
 /**
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 7ba9d82..4350ba9 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -21,13 +21,15 @@
 			      struct list_head *next)
 {
 	if (unlikely(next->prev != prev)) {
-		printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n",
-			prev, next->prev);
+		printk(KERN_ERR "list_add corruption. next->prev should be "
+			"prev (%p), but was %p. (next=%p).\n",
+			prev, next->prev, next);
 		BUG();
 	}
 	if (unlikely(prev->next != next)) {
-		printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n",
-			next, prev->next);
+		printk(KERN_ERR "list_add corruption. prev->next should be "
+			"next (%p), but was %p. (prev=%p).\n",
+			next, prev->next, prev);
 		BUG();
 	}
 	next->prev = new;
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 7945787..280332c 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -963,7 +963,9 @@
 			printk("failed|");
 		} else {
 			unexpected_testcase_failures++;
+
 			printk("FAILED|");
+			dump_stack();
 		}
 	} else {
 		testcase_successes++;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index aa9bfd0..d69ddbe 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2,6 +2,7 @@
  * Copyright (C) 2001 Momchil Velikov
  * Portions Copyright (C) 2001 Christoph Hellwig
  * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ * Copyright (C) 2006 Nick Piggin
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -30,6 +31,7 @@
 #include <linux/gfp.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
+#include <linux/rcupdate.h>
 
 
 #ifdef __KERNEL__
@@ -45,7 +47,9 @@
 	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
 struct radix_tree_node {
+	unsigned int	height;		/* Height from the bottom */
 	unsigned int	count;
+	struct rcu_head	rcu_head;
 	void		*slots[RADIX_TREE_MAP_SIZE];
 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
@@ -63,7 +67,7 @@
 /*
  * Radix tree node cache.
  */
-static kmem_cache_t *radix_tree_node_cachep;
+static struct kmem_cache *radix_tree_node_cachep;
 
 /*
  * Per-cpu pool of preloaded nodes
@@ -100,13 +104,21 @@
 			rtp->nr--;
 		}
 	}
+	BUG_ON(radix_tree_is_direct_ptr(ret));
 	return ret;
 }
 
+static void radix_tree_node_rcu_free(struct rcu_head *head)
+{
+	struct radix_tree_node *node =
+			container_of(head, struct radix_tree_node, rcu_head);
+	kmem_cache_free(radix_tree_node_cachep, node);
+}
+
 static inline void
 radix_tree_node_free(struct radix_tree_node *node)
 {
-	kmem_cache_free(radix_tree_node_cachep, node);
+	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 }
 
 /*
@@ -222,11 +234,12 @@
 	}
 
 	do {
+		unsigned int newheight;
 		if (!(node = radix_tree_node_alloc(root)))
 			return -ENOMEM;
 
 		/* Increase the height.  */
-		node->slots[0] = root->rnode;
+		node->slots[0] = radix_tree_direct_to_ptr(root->rnode);
 
 		/* Propagate the aggregated tag info into the new root */
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -234,9 +247,11 @@
 				tag_set(node, tag, 0);
 		}
 
+		newheight = root->height+1;
+		node->height = newheight;
 		node->count = 1;
-		root->rnode = node;
-		root->height++;
+		rcu_assign_pointer(root->rnode, node);
+		root->height = newheight;
 	} while (height > root->height);
 out:
 	return 0;
@@ -258,6 +273,8 @@
 	int offset;
 	int error;
 
+	BUG_ON(radix_tree_is_direct_ptr(item));
+
 	/* Make sure the tree is high enough.  */
 	if (index > radix_tree_maxindex(root->height)) {
 		error = radix_tree_extend(root, index);
@@ -275,11 +292,12 @@
 			/* Have to add a child node.  */
 			if (!(slot = radix_tree_node_alloc(root)))
 				return -ENOMEM;
+			slot->height = height;
 			if (node) {
-				node->slots[offset] = slot;
+				rcu_assign_pointer(node->slots[offset], slot);
 				node->count++;
 			} else
-				root->rnode = slot;
+				rcu_assign_pointer(root->rnode, slot);
 		}
 
 		/* Go a level down */
@@ -295,11 +313,11 @@
 
 	if (node) {
 		node->count++;
-		node->slots[offset] = item;
+		rcu_assign_pointer(node->slots[offset], item);
 		BUG_ON(tag_get(node, 0, offset));
 		BUG_ON(tag_get(node, 1, offset));
 	} else {
-		root->rnode = item;
+		rcu_assign_pointer(root->rnode, radix_tree_ptr_to_direct(item));
 		BUG_ON(root_tag_get(root, 0));
 		BUG_ON(root_tag_get(root, 1));
 	}
@@ -308,48 +326,53 @@
 }
 EXPORT_SYMBOL(radix_tree_insert);
 
-static inline void **__lookup_slot(struct radix_tree_root *root,
-				   unsigned long index)
-{
-	unsigned int height, shift;
-	struct radix_tree_node **slot;
-
-	height = root->height;
-
-	if (index > radix_tree_maxindex(height))
-		return NULL;
-
-	if (height == 0 && root->rnode)
-		return (void **)&root->rnode;
-
-	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-	slot = &root->rnode;
-
-	while (height > 0) {
-		if (*slot == NULL)
-			return NULL;
-
-		slot = (struct radix_tree_node **)
-			((*slot)->slots +
-				((index >> shift) & RADIX_TREE_MAP_MASK));
-		shift -= RADIX_TREE_MAP_SHIFT;
-		height--;
-	}
-
-	return (void **)slot;
-}
-
 /**
  *	radix_tree_lookup_slot    -    lookup a slot in a radix tree
  *	@root:		radix tree root
  *	@index:		index key
  *
- *	Lookup the slot corresponding to the position @index in the radix tree
- *	@root. This is useful for update-if-exists operations.
+ *	Returns:  the slot corresponding to the position @index in the
+ *	radix tree @root. This is useful for update-if-exists operations.
+ *
+ *	This function cannot be called under rcu_read_lock, it must be
+ *	excluded from writers, as must the returned slot for subsequent
+ *	use by radix_tree_deref_slot() and radix_tree_replace slot.
+ *	Caller must hold tree write locked across slot lookup and
+ *	replace.
  */
 void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
 {
-	return __lookup_slot(root, index);
+	unsigned int height, shift;
+	struct radix_tree_node *node, **slot;
+
+	node = root->rnode;
+	if (node == NULL)
+		return NULL;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (index > 0)
+			return NULL;
+		return (void **)&root->rnode;
+	}
+
+	height = node->height;
+	if (index > radix_tree_maxindex(height))
+		return NULL;
+
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+
+	do {
+		slot = (struct radix_tree_node **)
+			(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+		node = *slot;
+		if (node == NULL)
+			return NULL;
+
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	} while (height > 0);
+
+	return (void **)slot;
 }
 EXPORT_SYMBOL(radix_tree_lookup_slot);
 
@@ -359,13 +382,45 @@
  *	@index:		index key
  *
  *	Lookup the item at the position @index in the radix tree @root.
+ *
+ *	This function can be called under rcu_read_lock, however the caller
+ *	must manage lifetimes of leaf nodes (eg. RCU may also be used to free
+ *	them safely). No RCU barriers are required to access or modify the
+ *	returned item, however.
  */
 void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
 {
-	void **slot;
+	unsigned int height, shift;
+	struct radix_tree_node *node, **slot;
 
-	slot = __lookup_slot(root, index);
-	return slot != NULL ? *slot : NULL;
+	node = rcu_dereference(root->rnode);
+	if (node == NULL)
+		return NULL;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (index > 0)
+			return NULL;
+		return radix_tree_direct_to_ptr(node);
+	}
+
+	height = node->height;
+	if (index > radix_tree_maxindex(height))
+		return NULL;
+
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
+
+	do {
+		slot = (struct radix_tree_node **)
+			(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+		node = rcu_dereference(*slot);
+		if (node == NULL)
+			return NULL;
+
+		shift -= RADIX_TREE_MAP_SHIFT;
+		height--;
+	} while (height > 0);
+
+	return node;
 }
 EXPORT_SYMBOL(radix_tree_lookup);
 
@@ -495,27 +550,30 @@
 			unsigned long index, unsigned int tag)
 {
 	unsigned int height, shift;
-	struct radix_tree_node *slot;
+	struct radix_tree_node *node;
 	int saw_unset_tag = 0;
 
-	height = root->height;
-	if (index > radix_tree_maxindex(height))
-		return 0;
-
 	/* check the root's tag bit */
 	if (!root_tag_get(root, tag))
 		return 0;
 
-	if (height == 0)
-		return 1;
+	node = rcu_dereference(root->rnode);
+	if (node == NULL)
+		return 0;
+
+	if (radix_tree_is_direct_ptr(node))
+		return (index == 0);
+
+	height = node->height;
+	if (index > radix_tree_maxindex(height))
+		return 0;
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	slot = root->rnode;
 
 	for ( ; ; ) {
 		int offset;
 
-		if (slot == NULL)
+		if (node == NULL)
 			return 0;
 
 		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -524,15 +582,15 @@
 		 * This is just a debug check.  Later, we can bale as soon as
 		 * we see an unset tag.
 		 */
-		if (!tag_get(slot, tag, offset))
+		if (!tag_get(node, tag, offset))
 			saw_unset_tag = 1;
 		if (height == 1) {
-			int ret = tag_get(slot, tag, offset);
+			int ret = tag_get(node, tag, offset);
 
 			BUG_ON(ret && saw_unset_tag);
 			return !!ret;
 		}
-		slot = slot->slots[offset];
+		node = rcu_dereference(node->slots[offset]);
 		shift -= RADIX_TREE_MAP_SHIFT;
 		height--;
 	}
@@ -541,47 +599,45 @@
 #endif
 
 static unsigned int
-__lookup(struct radix_tree_root *root, void **results, unsigned long index,
+__lookup(struct radix_tree_node *slot, void **results, unsigned long index,
 	unsigned int max_items, unsigned long *next_index)
 {
 	unsigned int nr_found = 0;
 	unsigned int shift, height;
-	struct radix_tree_node *slot;
 	unsigned long i;
 
-	height = root->height;
-	if (height == 0) {
-		if (root->rnode && index == 0)
-			results[nr_found++] = root->rnode;
+	height = slot->height;
+	if (height == 0)
 		goto out;
-	}
-
 	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-	slot = root->rnode;
 
 	for ( ; height > 1; height--) {
-
-		for (i = (index >> shift) & RADIX_TREE_MAP_MASK ;
-				i < RADIX_TREE_MAP_SIZE; i++) {
+		i = (index >> shift) & RADIX_TREE_MAP_MASK;
+		for (;;) {
 			if (slot->slots[i] != NULL)
 				break;
 			index &= ~((1UL << shift) - 1);
 			index += 1UL << shift;
 			if (index == 0)
 				goto out;	/* 32-bit wraparound */
+			i++;
+			if (i == RADIX_TREE_MAP_SIZE)
+				goto out;
 		}
-		if (i == RADIX_TREE_MAP_SIZE)
-			goto out;
 
 		shift -= RADIX_TREE_MAP_SHIFT;
-		slot = slot->slots[i];
+		slot = rcu_dereference(slot->slots[i]);
+		if (slot == NULL)
+			goto out;
 	}
 
 	/* Bottom level: grab some items */
 	for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
+		struct radix_tree_node *node;
 		index++;
-		if (slot->slots[i]) {
-			results[nr_found++] = slot->slots[i];
+		node = slot->slots[i];
+		if (node) {
+			results[nr_found++] = rcu_dereference(node);
 			if (nr_found == max_items)
 				goto out;
 		}
@@ -603,28 +659,51 @@
  *	*@results.
  *
  *	The implementation is naive.
+ *
+ *	Like radix_tree_lookup, radix_tree_gang_lookup may be called under
+ *	rcu_read_lock. In this case, rather than the returned results being
+ *	an atomic snapshot of the tree at a single point in time, the semantics
+ *	of an RCU protected gang lookup are as though multiple radix_tree_lookups
+ *	have been issued in individual locks, and results stored in 'results'.
  */
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
 			unsigned long first_index, unsigned int max_items)
 {
-	const unsigned long max_index = radix_tree_maxindex(root->height);
+	unsigned long max_index;
+	struct radix_tree_node *node;
 	unsigned long cur_index = first_index;
-	unsigned int ret = 0;
+	unsigned int ret;
 
+	node = rcu_dereference(root->rnode);
+	if (!node)
+		return 0;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (first_index > 0)
+			return 0;
+		node = radix_tree_direct_to_ptr(node);
+		results[0] = rcu_dereference(node);
+		return 1;
+	}
+
+	max_index = radix_tree_maxindex(node->height);
+
+	ret = 0;
 	while (ret < max_items) {
 		unsigned int nr_found;
 		unsigned long next_index;	/* Index of next search */
 
 		if (cur_index > max_index)
 			break;
-		nr_found = __lookup(root, results + ret, cur_index,
+		nr_found = __lookup(node, results + ret, cur_index,
 					max_items - ret, &next_index);
 		ret += nr_found;
 		if (next_index == 0)
 			break;
 		cur_index = next_index;
 	}
+
 	return ret;
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup);
@@ -634,55 +713,64 @@
  * open-coding the search.
  */
 static unsigned int
-__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
+__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
 	unsigned int max_items, unsigned long *next_index, unsigned int tag)
 {
 	unsigned int nr_found = 0;
-	unsigned int shift;
-	unsigned int height = root->height;
-	struct radix_tree_node *slot;
+	unsigned int shift, height;
 
-	if (height == 0) {
-		if (root->rnode && index == 0)
-			results[nr_found++] = root->rnode;
+	height = slot->height;
+	if (height == 0)
 		goto out;
-	}
+	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
 
-	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	slot = root->rnode;
+	while (height > 0) {
+		unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK ;
 
-	do {
-		unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
-
-		for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
-			if (tag_get(slot, tag, i)) {
-				BUG_ON(slot->slots[i] == NULL);
+		for (;;) {
+			if (tag_get(slot, tag, i))
 				break;
-			}
 			index &= ~((1UL << shift) - 1);
 			index += 1UL << shift;
 			if (index == 0)
 				goto out;	/* 32-bit wraparound */
+			i++;
+			if (i == RADIX_TREE_MAP_SIZE)
+				goto out;
 		}
-		if (i == RADIX_TREE_MAP_SIZE)
-			goto out;
 		height--;
 		if (height == 0) {	/* Bottom level: grab some items */
 			unsigned long j = index & RADIX_TREE_MAP_MASK;
 
 			for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
+				struct radix_tree_node *node;
 				index++;
-				if (tag_get(slot, tag, j)) {
-					BUG_ON(slot->slots[j] == NULL);
-					results[nr_found++] = slot->slots[j];
+				if (!tag_get(slot, tag, j))
+					continue;
+				node = slot->slots[j];
+				/*
+				 * Even though the tag was found set, we need to
+				 * recheck that we have a non-NULL node, because
+				 * if this lookup is lockless, it may have been
+				 * subsequently deleted.
+				 *
+				 * Similar care must be taken in any place that
+				 * lookup ->slots[x] without a lock (ie. can't
+				 * rely on its value remaining the same).
+				 */
+				if (node) {
+					node = rcu_dereference(node);
+					results[nr_found++] = node;
 					if (nr_found == max_items)
 						goto out;
 				}
 			}
 		}
 		shift -= RADIX_TREE_MAP_SHIFT;
-		slot = slot->slots[i];
-	} while (height > 0);
+		slot = rcu_dereference(slot->slots[i]);
+		if (slot == NULL)
+			break;
+	}
 out:
 	*next_index = index;
 	return nr_found;
@@ -706,27 +794,44 @@
 		unsigned long first_index, unsigned int max_items,
 		unsigned int tag)
 {
-	const unsigned long max_index = radix_tree_maxindex(root->height);
+	struct radix_tree_node *node;
+	unsigned long max_index;
 	unsigned long cur_index = first_index;
-	unsigned int ret = 0;
+	unsigned int ret;
 
 	/* check the root's tag bit */
 	if (!root_tag_get(root, tag))
 		return 0;
 
+	node = rcu_dereference(root->rnode);
+	if (!node)
+		return 0;
+
+	if (radix_tree_is_direct_ptr(node)) {
+		if (first_index > 0)
+			return 0;
+		node = radix_tree_direct_to_ptr(node);
+		results[0] = rcu_dereference(node);
+		return 1;
+	}
+
+	max_index = radix_tree_maxindex(node->height);
+
+	ret = 0;
 	while (ret < max_items) {
 		unsigned int nr_found;
 		unsigned long next_index;	/* Index of next search */
 
 		if (cur_index > max_index)
 			break;
-		nr_found = __lookup_tag(root, results + ret, cur_index,
+		nr_found = __lookup_tag(node, results + ret, cur_index,
 					max_items - ret, &next_index, tag);
 		ret += nr_found;
 		if (next_index == 0)
 			break;
 		cur_index = next_index;
 	}
+
 	return ret;
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
@@ -742,8 +847,19 @@
 			root->rnode->count == 1 &&
 			root->rnode->slots[0]) {
 		struct radix_tree_node *to_free = root->rnode;
+		void *newptr;
 
-		root->rnode = to_free->slots[0];
+		/*
+		 * We don't need rcu_assign_pointer(), since we are simply
+		 * moving the node from one part of the tree to another. If
+		 * it was safe to dereference the old pointer to it
+		 * (to_free->slots[0]), it will be safe to dereference the new
+		 * one (root->rnode).
+		 */
+		newptr = to_free->slots[0];
+		if (root->height == 1)
+			newptr = radix_tree_ptr_to_direct(newptr);
+		root->rnode = newptr;
 		root->height--;
 		/* must only free zeroed nodes into the slab */
 		tag_clear(to_free, 0, 0);
@@ -767,6 +883,7 @@
 {
 	struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
 	struct radix_tree_node *slot = NULL;
+	struct radix_tree_node *to_free;
 	unsigned int height, shift;
 	int tag;
 	int offset;
@@ -777,6 +894,7 @@
 
 	slot = root->rnode;
 	if (height == 0 && root->rnode) {
+		slot = radix_tree_direct_to_ptr(slot);
 		root_tag_clear_all(root);
 		root->rnode = NULL;
 		goto out;
@@ -809,10 +927,17 @@
 			radix_tree_tag_clear(root, index, tag);
 	}
 
+	to_free = NULL;
 	/* Now free the nodes we do not need anymore */
 	while (pathp->node) {
 		pathp->node->slots[pathp->offset] = NULL;
 		pathp->node->count--;
+		/*
+		 * Queue the node for deferred freeing after the
+		 * last reference to it disappears (set NULL, above).
+		 */
+		if (to_free)
+			radix_tree_node_free(to_free);
 
 		if (pathp->node->count) {
 			if (pathp->node == root->rnode)
@@ -821,13 +946,15 @@
 		}
 
 		/* Node with zero slots in use so free it */
-		radix_tree_node_free(pathp->node);
-
+		to_free = pathp->node;
 		pathp--;
+
 	}
 	root_tag_clear_all(root);
 	root->height = 0;
 	root->rnode = NULL;
+	if (to_free)
+		radix_tree_node_free(to_free);
 
 out:
 	return slot;
@@ -846,7 +973,7 @@
 EXPORT_SYMBOL(radix_tree_tagged);
 
 static void
-radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags)
+radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags)
 {
 	memset(node, 0, sizeof(struct radix_tree_node));
 }
@@ -869,7 +996,6 @@
 		height_to_maxindex[i] = __maxindex(i);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int radix_tree_callback(struct notifier_block *nfb,
                             unsigned long action,
                             void *hcpu)
@@ -889,7 +1015,6 @@
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init radix_tree_init(void)
 {
diff --git a/lib/random32.c b/lib/random32.c
index 4a15ce5..ec7f81d 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -36,6 +36,7 @@
 #include <linux/types.h>
 #include <linux/percpu.h>
 #include <linux/module.h>
+#include <linux/jiffies.h>
 #include <linux/random.h>
 
 struct rnd_state {
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index b6c4f89..479fd46 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/spinlock.h>
+#include <linux/nmi.h>
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 #include <linux/delay.h>
@@ -117,6 +118,9 @@
 				raw_smp_processor_id(), current->comm,
 				current->pid, lock);
 			dump_stack();
+#ifdef CONFIG_SMP
+			trigger_all_cpu_backtrace();
+#endif
 		}
 	}
 }
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 2cb4a43..98bcadc 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -40,7 +40,7 @@
  *       configuration according to the specified parameters.
  *   (3) User starts the search(es) by calling _find() or _next() to
  *       fetch subsequent occurrences. A state variable is provided
- *       to the algorihtm to store persistant variables.
+ *       to the algorihtm to store persistent variables.
  *   (4) Core eventually resets the search offset and forwards the find()
  *       request to the algorithm.
  *   (5) Algorithm calls get_next_block() provided by the user continously
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index eaa9abe..b2486cf 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -17,10 +17,9 @@
 void percpu_depopulate(void *__pdata, int cpu)
 {
 	struct percpu_data *pdata = __percpu_disguise(__pdata);
-	if (pdata->ptrs[cpu]) {
-		kfree(pdata->ptrs[cpu]);
-		pdata->ptrs[cpu] = NULL;
-	}
+
+	kfree(pdata->ptrs[cpu]);
+	pdata->ptrs[cpu] = NULL;
 }
 EXPORT_SYMBOL_GPL(percpu_depopulate);
 
@@ -123,6 +122,8 @@
  */
 void percpu_free(void *__pdata)
 {
+	if (unlikely(!__pdata))
+		return;
 	__percpu_depopulate_mask(__pdata, &cpu_possible_map);
 	kfree(__percpu_disguise(__pdata));
 }
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d53112f..00a9697 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -27,8 +27,6 @@
 unsigned long min_low_pfn;
 unsigned long max_pfn;
 
-EXPORT_UNUSED_SYMBOL(max_pfn);  /*  June 2006  */
-
 static LIST_HEAD(bdata_list);
 #ifdef CONFIG_CRASH_DUMP
 /*
@@ -196,6 +194,10 @@
 	if (limit && bdata->node_boot_start >= limit)
 		return NULL;
 
+	/* on nodes without memory - bootmem_map is NULL */
+	if (!bdata->node_bootmem_map)
+		return NULL;
+
 	end_pfn = bdata->node_low_pfn;
 	limit = PFN_DOWN(limit);
 	if (limit && end_pfn > limit)
diff --git a/mm/filemap.c b/mm/filemap.c
index 7b84dc8..af7e2f5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1445,7 +1445,6 @@
 	 * effect.
 	 */
 	error = page_cache_read(file, pgoff);
-	grab_swap_token();
 
 	/*
 	 * The page we want has now been added to the page cache.
@@ -1893,6 +1892,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(should_remove_suid);
 
 int __remove_suid(struct dentry *dentry, int kill)
 {
diff --git a/mm/fremap.c b/mm/fremap.c
index 7a9d0f5..b77a002 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -101,7 +101,6 @@
 {
 	int err = -ENOMEM;
 	pte_t *pte;
-	pte_t pte_val;
 	spinlock_t *ptl;
 
 	pte = get_locked_pte(mm, addr, &ptl);
@@ -114,7 +113,6 @@
 	}
 
 	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
-	pte_val = *pte;
 	/*
 	 * We don't need to run update_mmu_cache() here because the "file pte"
 	 * being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a088f59..0ccc7f2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -109,7 +109,7 @@
 	if (nid == MAX_NUMNODES)
 		nid = first_node(node_online_map);
 	if (page) {
-		page[1].lru.next = (void *)free_huge_page;	/* dtor */
+		set_compound_page_dtor(page, free_huge_page);
 		spin_lock(&hugetlb_lock);
 		nr_huge_pages++;
 		nr_huge_pages_node[page_to_nid(page)]++;
@@ -344,7 +344,6 @@
 			entry = *src_pte;
 			ptepage = pte_page(entry);
 			get_page(ptepage);
-			add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
 			set_huge_pte_at(dst, addr, dst_pte, entry);
 		}
 		spin_unlock(&src->page_table_lock);
@@ -365,6 +364,11 @@
 	pte_t pte;
 	struct page *page;
 	struct page *tmp;
+	/*
+	 * A page gathering list, protected by per file i_mmap_lock. The
+	 * lock is used to avoid list corruption from multiple unmapping
+	 * of the same page since we are using page->lru.
+	 */
 	LIST_HEAD(page_list);
 
 	WARN_ON(!is_vm_hugetlb_page(vma));
@@ -372,24 +376,21 @@
 	BUG_ON(end & ~HPAGE_MASK);
 
 	spin_lock(&mm->page_table_lock);
-
-	/* Update high watermark before we lower rss */
-	update_hiwater_rss(mm);
-
 	for (address = start; address < end; address += HPAGE_SIZE) {
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
 
+		if (huge_pmd_unshare(mm, &address, ptep))
+			continue;
+
 		pte = huge_ptep_get_and_clear(mm, address, ptep);
 		if (pte_none(pte))
 			continue;
 
 		page = pte_page(pte);
 		list_add(&page->lru, &page_list);
-		add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
 	}
-
 	spin_unlock(&mm->page_table_lock);
 	flush_tlb_range(vma, start, end);
 	list_for_each_entry_safe(page, tmp, &page_list, lru) {
@@ -515,7 +516,6 @@
 	if (!pte_none(*ptep))
 		goto backout;
 
-	add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
 				&& (vma->vm_flags & VM_SHARED)));
 	set_huge_pte_at(mm, address, ptep, new_pte);
@@ -653,11 +653,14 @@
 	BUG_ON(address >= end);
 	flush_cache_range(vma, address, end);
 
+	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
 	spin_lock(&mm->page_table_lock);
 	for (; address < end; address += HPAGE_SIZE) {
 		ptep = huge_pte_offset(mm, address);
 		if (!ptep)
 			continue;
+		if (huge_pmd_unshare(mm, &address, ptep))
+			continue;
 		if (!pte_none(*ptep)) {
 			pte = huge_ptep_get_and_clear(mm, address, ptep);
 			pte = pte_mkhuge(pte_modify(pte, newprot));
@@ -666,6 +669,7 @@
 		}
 	}
 	spin_unlock(&mm->page_table_lock);
+	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
 
 	flush_tlb_range(vma, start, end);
 }
diff --git a/mm/memory.c b/mm/memory.c
index 156861f..4198df0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1902,7 +1902,6 @@
 
 	return 0;
 }
-EXPORT_UNUSED_SYMBOL(vmtruncate_range);  /*  June 2006  */
 
 /**
  * swapin_readahead - swap in pages in hope we need them soon
@@ -1991,6 +1990,7 @@
 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
 	page = lookup_swap_cache(entry);
 	if (!page) {
+		grab_swap_token(); /* Contend for token _before_ read-in */
  		swapin_readahead(entry, address, vma);
  		page = read_swap_cache_async(entry, vma, address);
 		if (!page) {
@@ -2008,7 +2008,6 @@
 		/* Had to read the page from swap area: Major fault */
 		ret = VM_FAULT_MAJOR;
 		count_vm_event(PGMAJFAULT);
-		grab_swap_token();
 	}
 
 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index fd678a6..0c055a09 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -72,7 +72,6 @@
 			return ret;
 	}
 	memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
-	zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
 	return 0;
 }
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 617fb31..b917d6f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -141,9 +141,11 @@
 	enum zone_type k;
 
 	max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
+	max++;			/* space for zlcache_ptr (see mmzone.h) */
 	zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
 	if (!zl)
 		return NULL;
+	zl->zlcache_ptr = NULL;
 	num = 0;
 	/* First put in the highest zones from all nodes, then all the next 
 	   lower zones etc. Avoid empty zones because the memory allocator
@@ -219,7 +221,7 @@
 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	do {
 		struct page *page;
-		unsigned int nid;
+		int nid;
 
 		if (!pte_present(*pte))
 			continue;
@@ -1324,7 +1326,7 @@
 	atomic_set(&new->refcnt, 1);
 	if (new->policy == MPOL_BIND) {
 		int sz = ksize(old->v.zonelist);
-		new->v.zonelist = kmemdup(old->v.zonelist, sz, SLAB_KERNEL);
+		new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
 		if (!new->v.zonelist) {
 			kmem_cache_free(policy_cache, new);
 			return ERR_PTR(-ENOMEM);
@@ -1705,8 +1707,8 @@
  * Display pages allocated per node and memory policy via /proc.
  */
 
-static const char *policy_types[] = { "default", "prefer", "bind",
-				      "interleave" };
+static const char * const policy_types[] =
+	{ "default", "prefer", "bind", "interleave" };
 
 /*
  * Convert a mempolicy into a string.
diff --git a/mm/migrate.c b/mm/migrate.c
index b4979d4..e9b161b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -294,7 +294,7 @@
 static int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page)
 {
-	struct page **radix_pointer;
+	void **pslot;
 
 	if (!mapping) {
 		/* Anonymous page */
@@ -305,12 +305,11 @@
 
 	write_lock_irq(&mapping->tree_lock);
 
-	radix_pointer = (struct page **)radix_tree_lookup_slot(
-						&mapping->page_tree,
-						page_index(page));
+	pslot = radix_tree_lookup_slot(&mapping->page_tree,
+ 					page_index(page));
 
 	if (page_count(page) != 2 + !!PagePrivate(page) ||
-			*radix_pointer != page) {
+			(struct page *)radix_tree_deref_slot(pslot) != page) {
 		write_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
@@ -318,7 +317,7 @@
 	/*
 	 * Now we know that no one else is looking at the page.
 	 */
-	get_page(newpage);
+	get_page(newpage);	/* add cache reference */
 #ifdef CONFIG_SWAP
 	if (PageSwapCache(page)) {
 		SetPageSwapCache(newpage);
@@ -326,8 +325,14 @@
 	}
 #endif
 
-	*radix_pointer = newpage;
+	radix_tree_replace_slot(pslot, newpage);
+
+	/*
+	 * Drop cache reference from old page.
+	 * We know this isn't the last reference.
+	 */
 	__put_page(page);
+
 	write_unlock_irq(&mapping->tree_lock);
 
 	return 0;
diff --git a/mm/mlock.c b/mm/mlock.c
index b90c595..3446b7e 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -65,7 +65,7 @@
 			ret = make_pages_present(start, end);
 	}
 
-	vma->vm_mm->locked_vm -= pages;
+	mm->locked_vm -= pages;
 out:
 	if (ret == -ENOMEM)
 		ret = -EAGAIN;
diff --git a/mm/mmap.c b/mm/mmap.c
index 7b40abd..7be110e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1736,7 +1736,7 @@
 	if (mm->map_count >= sysctl_max_map_count)
 		return -ENOMEM;
 
-	new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 
@@ -2057,7 +2057,7 @@
 		    vma_start < new_vma->vm_end)
 			*vmap = new_vma;
 	} else {
-		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 		if (new_vma) {
 			*new_vma = *vma;
 			pol = mpol_copy(vma_policy(vma));
diff --git a/mm/mmzone.c b/mm/mmzone.c
index febea1c..eb58386 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -14,8 +14,6 @@
 	return NODE_DATA(first_online_node);
 }
 
-EXPORT_UNUSED_SYMBOL(first_online_pgdat);  /*  June 2006  */
-
 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 {
 	int nid = next_online_node(pgdat->node_id);
@@ -24,8 +22,6 @@
 		return NULL;
 	return NODE_DATA(nid);
 }
-EXPORT_UNUSED_SYMBOL(next_online_pgdat);  /*  June 2006  */
-
 
 /*
  * next_zone - helper magic for for_each_zone()
@@ -45,5 +41,4 @@
 	}
 	return zone;
 }
-EXPORT_UNUSED_SYMBOL(next_zone);  /*  June 2006  */
 
diff --git a/mm/nommu.c b/mm/nommu.c
index 8bdde95..af87456 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -497,15 +497,17 @@
 	    (flags & MAP_TYPE) != MAP_SHARED)
 		return -EINVAL;
 
-	if (PAGE_ALIGN(len) == 0)
-		return addr;
-
-	if (len > TASK_SIZE)
+	if (!len)
 		return -EINVAL;
 
+	/* Careful about overflows.. */
+	len = PAGE_ALIGN(len);
+	if (!len || len > TASK_SIZE)
+		return -ENOMEM;
+
 	/* offset overflow? */
 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
-		return -EINVAL;
+		return -EOVERFLOW;
 
 	if (file) {
 		/* validate file mapping requests */
@@ -806,10 +808,9 @@
 	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
 	/* we're going to need to record the mapping if it works */
-	vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
+	vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
 	if (!vml)
 		goto error_getting_vml;
-	memset(vml, 0, sizeof(*vml));
 
 	down_write(&nommu_vma_sem);
 
@@ -885,11 +886,10 @@
 	}
 
 	/* we're going to need a VMA struct as well */
-	vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
+	vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 	if (!vma)
 		goto error_getting_vma;
 
-	memset(vma, 0, sizeof(*vma));
 	INIT_LIST_HEAD(&vma->anon_vma_node);
 	atomic_set(&vma->vm_usage, 1);
 	if (file)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 2e3ce3a..223d9cc 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -264,7 +264,7 @@
  * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
  * set.
  */
-static void __oom_kill_task(struct task_struct *p, const char *message)
+static void __oom_kill_task(struct task_struct *p, int verbose)
 {
 	if (is_init(p)) {
 		WARN_ON(1);
@@ -278,10 +278,8 @@
 		return;
 	}
 
-	if (message) {
-		printk(KERN_ERR "%s: Killed process %d (%s).\n",
-				message, p->pid, p->comm);
-	}
+	if (verbose)
+		printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm);
 
 	/*
 	 * We give our sacrificial lamb high priority and access to
@@ -294,7 +292,7 @@
 	force_sig(SIGKILL, p);
 }
 
-static int oom_kill_task(struct task_struct *p, const char *message)
+static int oom_kill_task(struct task_struct *p)
 {
 	struct mm_struct *mm;
 	struct task_struct *g, *q;
@@ -313,15 +311,25 @@
 	if (mm == NULL)
 		return 1;
 
-	__oom_kill_task(p, message);
+	/*
+	 * Don't kill the process if any threads are set to OOM_DISABLE
+	 */
+	do_each_thread(g, q) {
+		if (q->mm == mm && p->oomkilladj == OOM_DISABLE)
+			return 1;
+	} while_each_thread(g, q);
+
+	__oom_kill_task(p, 1);
+
 	/*
 	 * kill all processes that share the ->mm (i.e. all threads),
-	 * but are in a different thread group
+	 * but are in a different thread group. Don't let them have access
+	 * to memory reserves though, otherwise we might deplete all memory.
 	 */
-	do_each_thread(g, q)
+	do_each_thread(g, q) {
 		if (q->mm == mm && q->tgid != p->tgid)
-			__oom_kill_task(q, message);
-	while_each_thread(g, q);
+			force_sig(SIGKILL, p);
+	} while_each_thread(g, q);
 
 	return 0;
 }
@@ -337,21 +345,22 @@
 	 * its children or threads, just set TIF_MEMDIE so it can die quickly
 	 */
 	if (p->flags & PF_EXITING) {
-		__oom_kill_task(p, NULL);
+		__oom_kill_task(p, 0);
 		return 0;
 	}
 
-	printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li"
-			" and children.\n", p->pid, p->comm, points);
+	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
+					message, p->pid, p->comm, points);
+
 	/* Try to kill a child first */
 	list_for_each(tsk, &p->children) {
 		c = list_entry(tsk, struct task_struct, sibling);
 		if (c->mm == p->mm)
 			continue;
-		if (!oom_kill_task(c, message))
+		if (!oom_kill_task(c))
 			return 0;
 	}
-	return oom_kill_task(p, message);
+	return oom_kill_task(p);
 }
 
 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aa6fcc7..cace22b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -83,14 +83,7 @@
 
 EXPORT_SYMBOL(totalram_pages);
 
-/*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
- */
-struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
-EXPORT_SYMBOL(zone_table);
-
-static char *zone_names[MAX_NR_ZONES] = {
+static char * const zone_names[MAX_NR_ZONES] = {
 	 "DMA",
 #ifdef CONFIG_ZONE_DMA32
 	 "DMA32",
@@ -237,7 +230,7 @@
 	int i;
 	int nr_pages = 1 << order;
 
-	page[1].lru.next = (void *)free_compound_page;	/* set dtor */
+	set_compound_page_dtor(page, free_compound_page);
 	page[1].lru.prev = (void *)order;
 	for (i = 0; i < nr_pages; i++) {
 		struct page *p = page + i;
@@ -486,7 +479,7 @@
 	spin_lock(&zone->lock);
 	zone->all_unreclaimable = 0;
 	zone->pages_scanned = 0;
-	__free_one_page(page, zone ,order);
+	__free_one_page(page, zone, order);
 	spin_unlock(&zone->lock);
 }
 
@@ -605,6 +598,8 @@
 			1 << PG_checked | 1 << PG_mappedtodisk);
 	set_page_private(page, 0);
 	set_page_refcounted(page);
+
+	arch_alloc_page(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 
 	if (gfp_flags & __GFP_ZERO)
@@ -690,9 +685,15 @@
 
 			pcp = &pset->pcp[i];
 			if (pcp->count) {
+				int to_drain;
+
 				local_irq_save(flags);
-				free_pages_bulk(zone, pcp->count, &pcp->list, 0);
-				pcp->count = 0;
+				if (pcp->count >= pcp->batch)
+					to_drain = pcp->batch;
+				else
+					to_drain = pcp->count;
+				free_pages_bulk(zone, to_drain, &pcp->list, 0);
+				pcp->count -= to_drain;
 				local_irq_restore(flags);
 			}
 		}
@@ -700,7 +701,6 @@
 }
 #endif
 
-#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
 static void __drain_pages(unsigned int cpu)
 {
 	unsigned long flags;
@@ -722,7 +722,6 @@
 		}
 	}
 }
-#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_PM
 
@@ -925,31 +924,160 @@
 	return 1;
 }
 
+#ifdef CONFIG_NUMA
 /*
- * get_page_from_freeliest goes through the zonelist trying to allocate
+ * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
+ * skip over zones that are not allowed by the cpuset, or that have
+ * been recently (in last second) found to be nearly full.  See further
+ * comments in mmzone.h.  Reduces cache footprint of zonelist scans
+ * that have to skip over alot of full or unallowed zones.
+ *
+ * If the zonelist cache is present in the passed in zonelist, then
+ * returns a pointer to the allowed node mask (either the current
+ * tasks mems_allowed, or node_online_map.)
+ *
+ * If the zonelist cache is not available for this zonelist, does
+ * nothing and returns NULL.
+ *
+ * If the fullzones BITMAP in the zonelist cache is stale (more than
+ * a second since last zap'd) then we zap it out (clear its bits.)
+ *
+ * We hold off even calling zlc_setup, until after we've checked the
+ * first zone in the zonelist, on the theory that most allocations will
+ * be satisfied from that first zone, so best to examine that zone as
+ * quickly as we can.
+ */
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
+	nodemask_t *allowednodes;	/* zonelist_cache approximation */
+
+	zlc = zonelist->zlcache_ptr;
+	if (!zlc)
+		return NULL;
+
+	if (jiffies - zlc->last_full_zap > 1 * HZ) {
+		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+		zlc->last_full_zap = jiffies;
+	}
+
+	allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
+					&cpuset_current_mems_allowed :
+					&node_online_map;
+	return allowednodes;
+}
+
+/*
+ * Given 'z' scanning a zonelist, run a couple of quick checks to see
+ * if it is worth looking at further for free memory:
+ *  1) Check that the zone isn't thought to be full (doesn't have its
+ *     bit set in the zonelist_cache fullzones BITMAP).
+ *  2) Check that the zones node (obtained from the zonelist_cache
+ *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
+ * Return true (non-zero) if zone is worth looking at further, or
+ * else return false (zero) if it is not.
+ *
+ * This check -ignores- the distinction between various watermarks,
+ * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
+ * found to be full for any variation of these watermarks, it will
+ * be considered full for up to one second by all requests, unless
+ * we are so low on memory on all allowed nodes that we are forced
+ * into the second scan of the zonelist.
+ *
+ * In the second scan we ignore this zonelist cache and exactly
+ * apply the watermarks to all zones, even it is slower to do so.
+ * We are low on memory in the second scan, and should leave no stone
+ * unturned looking for a free page.
+ */
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+						nodemask_t *allowednodes)
+{
+	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
+	int i;				/* index of *z in zonelist zones */
+	int n;				/* node that zone *z is on */
+
+	zlc = zonelist->zlcache_ptr;
+	if (!zlc)
+		return 1;
+
+	i = z - zonelist->zones;
+	n = zlc->z_to_n[i];
+
+	/* This zone is worth trying if it is allowed but not full */
+	return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
+}
+
+/*
+ * Given 'z' scanning a zonelist, set the corresponding bit in
+ * zlc->fullzones, so that subsequent attempts to allocate a page
+ * from that zone don't waste time re-examining it.
+ */
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+	struct zonelist_cache *zlc;	/* cached zonelist speedup info */
+	int i;				/* index of *z in zonelist zones */
+
+	zlc = zonelist->zlcache_ptr;
+	if (!zlc)
+		return;
+
+	i = z - zonelist->zones;
+
+	set_bit(i, zlc->fullzones);
+}
+
+#else	/* CONFIG_NUMA */
+
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+	return NULL;
+}
+
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+				nodemask_t *allowednodes)
+{
+	return 1;
+}
+
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+}
+#endif	/* CONFIG_NUMA */
+
+/*
+ * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
 		struct zonelist *zonelist, int alloc_flags)
 {
-	struct zone **z = zonelist->zones;
+	struct zone **z;
 	struct page *page = NULL;
-	int classzone_idx = zone_idx(*z);
+	int classzone_idx = zone_idx(zonelist->zones[0]);
 	struct zone *zone;
+	nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
+	int zlc_active = 0;		/* set if using zonelist_cache */
+	int did_zlc_setup = 0;		/* just call zlc_setup() one time */
 
+zonelist_scan:
 	/*
-	 * Go through the zonelist once, looking for a zone with enough free.
+	 * Scan zonelist, looking for a zone with enough free.
 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
 	 */
+	z = zonelist->zones;
+
 	do {
+		if (NUMA_BUILD && zlc_active &&
+			!zlc_zone_worth_trying(zonelist, z, allowednodes))
+				continue;
 		zone = *z;
 		if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
 			zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
 				break;
 		if ((alloc_flags & ALLOC_CPUSET) &&
-				!cpuset_zone_allowed(zone, gfp_mask))
-			continue;
+			!cpuset_zone_allowed(zone, gfp_mask))
+				goto try_next_zone;
 
 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
 			unsigned long mark;
@@ -959,18 +1087,34 @@
 				mark = zone->pages_low;
 			else
 				mark = zone->pages_high;
-			if (!zone_watermark_ok(zone , order, mark,
-				    classzone_idx, alloc_flags))
+			if (!zone_watermark_ok(zone, order, mark,
+				    classzone_idx, alloc_flags)) {
 				if (!zone_reclaim_mode ||
 				    !zone_reclaim(zone, gfp_mask, order))
-					continue;
+					goto this_zone_full;
+			}
 		}
 
 		page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
-		if (page) {
+		if (page)
 			break;
+this_zone_full:
+		if (NUMA_BUILD)
+			zlc_mark_zone_full(zonelist, z);
+try_next_zone:
+		if (NUMA_BUILD && !did_zlc_setup) {
+			/* we do zlc_setup after the first zone is tried */
+			allowednodes = zlc_setup(zonelist, alloc_flags);
+			zlc_active = 1;
+			did_zlc_setup = 1;
 		}
 	} while (*(++z) != NULL);
+
+	if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
+		/* Disable zlc cache for second zonelist scan */
+		zlc_active = 0;
+		goto zonelist_scan;
+	}
 	return page;
 }
 
@@ -1005,9 +1149,19 @@
 	if (page)
 		goto got_pg;
 
-	do {
+	/*
+	 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
+	 * __GFP_NOWARN set) should not cause reclaim since the subsystem
+	 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
+	 * using a larger set of nodes after it has established that the
+	 * allowed per node queues are empty and that nodes are
+	 * over allocated.
+	 */
+	if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+		goto nopage;
+
+	for (z = zonelist->zones; *z; z++)
 		wakeup_kswapd(*z, order);
-	} while (*(++z));
 
 	/*
 	 * OK, we're below the kswapd watermark and have kicked background
@@ -1041,6 +1195,7 @@
 
 	/* This allocation should allow future memory freeing. */
 
+rebalance:
 	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
 			&& !in_interrupt()) {
 		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
@@ -1062,7 +1217,6 @@
 	if (!wait)
 		goto nopage;
 
-rebalance:
 	cond_resched();
 
 	/* We now go into synchronous reclaim */
@@ -1262,7 +1416,7 @@
 static inline void show_node(struct zone *zone)
 {
 	if (NUMA_BUILD)
-		printk("Node %ld ", zone_to_nid(zone));
+		printk("Node %d ", zone_to_nid(zone));
 }
 
 void si_meminfo(struct sysinfo *val)
@@ -1542,6 +1696,24 @@
 	}
 }
 
+/* Construct the zonelist performance cache - see further mmzone.h */
+static void __meminit build_zonelist_cache(pg_data_t *pgdat)
+{
+	int i;
+
+	for (i = 0; i < MAX_NR_ZONES; i++) {
+		struct zonelist *zonelist;
+		struct zonelist_cache *zlc;
+		struct zone **z;
+
+		zonelist = pgdat->node_zonelists + i;
+		zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
+		bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+		for (z = zonelist->zones; *z; z++)
+			zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
+	}
+}
+
 #else	/* CONFIG_NUMA */
 
 static void __meminit build_zonelists(pg_data_t *pgdat)
@@ -1579,14 +1751,26 @@
 	}
 }
 
+/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
+static void __meminit build_zonelist_cache(pg_data_t *pgdat)
+{
+	int i;
+
+	for (i = 0; i < MAX_NR_ZONES; i++)
+		pgdat->node_zonelists[i].zlcache_ptr = NULL;
+}
+
 #endif	/* CONFIG_NUMA */
 
 /* return values int ....just for stop_machine_run() */
 static int __meminit __build_all_zonelists(void *dummy)
 {
 	int nid;
-	for_each_online_node(nid)
+
+	for_each_online_node(nid) {
 		build_zonelists(NODE_DATA(nid));
+		build_zonelist_cache(NODE_DATA(nid));
+	}
 	return 0;
 }
 
@@ -1715,20 +1899,6 @@
 	}
 }
 
-#define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr)
-void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-		unsigned long pfn, unsigned long size)
-{
-	unsigned long snum = pfn_to_section_nr(pfn);
-	unsigned long end = pfn_to_section_nr(pfn + size);
-
-	if (FLAGS_HAS_NODE)
-		zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
-	else
-		for (; snum <= end; snum++)
-			zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
-}
-
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
 	memmap_init_zone((size), (nid), (zone), (start_pfn))
@@ -1881,16 +2051,16 @@
 	int ret = NOTIFY_OK;
 
 	switch (action) {
-		case CPU_UP_PREPARE:
-			if (process_zones(cpu))
-				ret = NOTIFY_BAD;
-			break;
-		case CPU_UP_CANCELED:
-		case CPU_DEAD:
-			free_zone_pagesets(cpu);
-			break;
-		default:
-			break;
+	case CPU_UP_PREPARE:
+		if (process_zones(cpu))
+			ret = NOTIFY_BAD;
+		break;
+	case CPU_UP_CANCELED:
+	case CPU_DEAD:
+		free_zone_pagesets(cpu);
+		break;
+	default:
+		break;
 	}
 	return ret;
 }
@@ -2421,7 +2591,6 @@
 		if (!size)
 			continue;
 
-		zonetable_add(zone, nid, j, zone_start_pfn, size);
 		ret = init_currently_empty_zone(zone, zone_start_pfn, size);
 		BUG_ON(ret);
 		zone_start_pfn += size;
@@ -2736,7 +2905,6 @@
 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int page_alloc_cpu_notify(struct notifier_block *self,
 				 unsigned long action, void *hcpu)
 {
@@ -2751,7 +2919,6 @@
 	}
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init page_alloc_init(void)
 {
@@ -3055,7 +3222,7 @@
 	/* allow the kernel cmdline to have a say */
 	if (!numentries) {
 		/* round applicable memory size up to nearest megabyte */
-		numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
+		numentries = nr_kernel_pages;
 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
 		numentries >>= 20 - PAGE_SHIFT;
 		numentries <<= 20 - PAGE_SHIFT;
diff --git a/mm/page_io.c b/mm/page_io.c
index d4840ec..dbffec0 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -147,48 +147,3 @@
 out:
 	return ret;
 }
-
-#ifdef CONFIG_SOFTWARE_SUSPEND
-/*
- * A scruffy utility function to read or write an arbitrary swap page
- * and wait on the I/O.  The caller must have a ref on the page.
- *
- * We use end_swap_bio_read() even for writes, because it happens to do what
- * we want.
- */
-int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
-			struct bio **bio_chain)
-{
-	struct bio *bio;
-	int ret = 0;
-	int bio_rw;
-
-	lock_page(page);
-
-	bio = get_swap_bio(GFP_KERNEL, entry.val, page, end_swap_bio_read);
-	if (bio == NULL) {
-		unlock_page(page);
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	bio_rw = rw;
-	if (!bio_chain)
-		bio_rw |= (1 << BIO_RW_SYNC);
-	if (bio_chain)
-		bio_get(bio);
-	submit_bio(bio_rw, bio);
-	if (bio_chain == NULL) {
-		wait_on_page_locked(page);
-
-		if (!PageUptodate(page) || PageError(page))
-			ret = -EIO;
-	}
-	if (bio_chain) {
-		bio->bi_private = *bio_chain;
-		*bio_chain = bio;
-	}
-out:
-	return ret;
-}
-#endif
diff --git a/mm/pdflush.c b/mm/pdflush.c
index b02102f..8ce0900 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -21,6 +21,7 @@
 #include <linux/writeback.h>	// Prototypes pdflush_operation()
 #include <linux/kthread.h>
 #include <linux/cpuset.h>
+#include <linux/freezer.h>
 
 
 /*
diff --git a/mm/readahead.c b/mm/readahead.c
index 23cb61a..a386f2b 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -148,13 +148,7 @@
 		if (!pagevec_add(&lru_pvec, page))
 			__pagevec_lru_add(&lru_pvec);
 		if (ret) {
-			while (!list_empty(pages)) {
-				struct page *victim;
-
-				victim = list_to_page(pages);
-				list_del(&victim->lru);
-				page_cache_release(victim);
-			}
+			put_pages_list(pages);
 			break;
 		}
 	}
diff --git a/mm/shmem.c b/mm/shmem.c
index 4959535..c820b4f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -177,7 +177,7 @@
 
 static struct super_operations shmem_ops;
 static const struct address_space_operations shmem_aops;
-static struct file_operations shmem_file_operations;
+static const struct file_operations shmem_file_operations;
 static struct inode_operations shmem_inode_operations;
 static struct inode_operations shmem_dir_inode_operations;
 static struct inode_operations shmem_special_inode_operations;
@@ -1943,7 +1943,7 @@
 	return security_inode_setsecurity(inode, name, value, size, flags);
 }
 
-struct xattr_handler shmem_xattr_security_handler = {
+static struct xattr_handler shmem_xattr_security_handler = {
 	.prefix = XATTR_SECURITY_PREFIX,
 	.list   = shmem_xattr_security_list,
 	.get    = shmem_xattr_security_get,
@@ -2263,7 +2263,7 @@
 static struct inode *shmem_alloc_inode(struct super_block *sb)
 {
 	struct shmem_inode_info *p;
-	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
+	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
 	if (!p)
 		return NULL;
 	return &p->vfs_inode;
@@ -2319,7 +2319,7 @@
 	.migratepage	= migrate_page,
 };
 
-static struct file_operations shmem_file_operations = {
+static const struct file_operations shmem_file_operations = {
 	.mmap		= shmem_mmap,
 #ifdef CONFIG_TMPFS
 	.llseek		= generic_file_llseek,
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e3..068cb45 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -103,12 +103,12 @@
 #include	<linux/module.h>
 #include	<linux/rcupdate.h>
 #include	<linux/string.h>
+#include	<linux/uaccess.h>
 #include	<linux/nodemask.h>
 #include	<linux/mempolicy.h>
 #include	<linux/mutex.h>
 #include	<linux/rtmutex.h>
 
-#include	<asm/uaccess.h>
 #include	<asm/cacheflush.h>
 #include	<asm/tlbflush.h>
 #include	<asm/page.h>
@@ -313,7 +313,7 @@
 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 			int node);
 static int enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
+static void cache_reap(struct work_struct *unused);
 
 /*
  * This function must be completely optimized away if a constant is passed to
@@ -730,7 +730,10 @@
 }
 #endif
 
-/* Guard access to the cache-chain. */
+/*
+ * 1. Guard access to the cache-chain.
+ * 2. Protect sanity of cpu_online_map against cpu hotplug events
+ */
 static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
 
@@ -753,7 +756,7 @@
 	return g_cpucache_up == FULL;
 }
 
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
 
 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
@@ -866,6 +869,22 @@
 	dump_stack();
 }
 
+/*
+ * By default on NUMA we use alien caches to stage the freeing of
+ * objects allocated from other nodes. This causes massive memory
+ * inefficiencies when using fake NUMA setup to split memory into a
+ * large number of small nodes, so it can be disabled on the command
+ * line
+  */
+
+static int use_alien_caches __read_mostly = 1;
+static int __init noaliencache_setup(char *s)
+{
+	use_alien_caches = 0;
+	return 1;
+}
+__setup("noaliencache", noaliencache_setup);
+
 #ifdef CONFIG_NUMA
 /*
  * Special reaping functions for NUMA systems called from cache_reap().
@@ -916,16 +935,16 @@
  */
 static void __devinit start_cpu_timer(int cpu)
 {
-	struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
 
 	/*
 	 * When this gets called from do_initcalls via cpucache_init(),
 	 * init_workqueues() has already run, so keventd will be setup
 	 * at that time.
 	 */
-	if (keventd_up() && reap_work->func == NULL) {
+	if (keventd_up() && reap_work->work.func == NULL) {
 		init_reap_node(cpu);
-		INIT_WORK(reap_work, cache_reap, NULL);
+		INIT_DELAYED_WORK(reap_work, cache_reap);
 		schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
 	}
 }
@@ -996,7 +1015,7 @@
 	return NULL;
 }
 
-static inline void *__cache_alloc_node(struct kmem_cache *cachep,
+static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 		 gfp_t flags, int nodeid)
 {
 	return NULL;
@@ -1004,7 +1023,7 @@
 
 #else	/* CONFIG_NUMA */
 
-static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
+static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 
 static struct array_cache **alloc_alien_cache(int node, int limit)
@@ -1114,7 +1133,7 @@
 	 * Make sure we are not freeing a object from another node to the array
 	 * cache on this cpu.
 	 */
-	if (likely(slabp->nodeid == node))
+	if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches))
 		return 0;
 
 	l3 = cachep->nodelists[node];
@@ -1192,7 +1211,7 @@
 		list_for_each_entry(cachep, &cache_chain, next) {
 			struct array_cache *nc;
 			struct array_cache *shared;
-			struct array_cache **alien;
+			struct array_cache **alien = NULL;
 
 			nc = alloc_arraycache(node, cachep->limit,
 						cachep->batchcount);
@@ -1204,9 +1223,11 @@
 			if (!shared)
 				goto bad;
 
-			alien = alloc_alien_cache(node, cachep->limit);
-			if (!alien)
-				goto bad;
+			if (use_alien_caches) {
+                                alien = alloc_alien_cache(node, cachep->limit);
+                                if (!alien)
+                                        goto bad;
+                        }
 			cachep->array[cpu] = nc;
 			l3 = cachep->nodelists[node];
 			BUG_ON(!l3);
@@ -1230,12 +1251,18 @@
 			kfree(shared);
 			free_alien_cache(alien);
 		}
-		mutex_unlock(&cache_chain_mutex);
 		break;
 	case CPU_ONLINE:
+		mutex_unlock(&cache_chain_mutex);
 		start_cpu_timer(cpu);
 		break;
 #ifdef CONFIG_HOTPLUG_CPU
+	case CPU_DOWN_PREPARE:
+		mutex_lock(&cache_chain_mutex);
+		break;
+	case CPU_DOWN_FAILED:
+		mutex_unlock(&cache_chain_mutex);
+		break;
 	case CPU_DEAD:
 		/*
 		 * Even if all the cpus of a node are down, we don't free the
@@ -1246,8 +1273,8 @@
 		 * gets destroyed at kmem_cache_destroy().
 		 */
 		/* fall thru */
+#endif
 	case CPU_UP_CANCELED:
-		mutex_lock(&cache_chain_mutex);
 		list_for_each_entry(cachep, &cache_chain, next) {
 			struct array_cache *nc;
 			struct array_cache *shared;
@@ -1308,11 +1335,9 @@
 		}
 		mutex_unlock(&cache_chain_mutex);
 		break;
-#endif
 	}
 	return NOTIFY_OK;
 bad:
-	mutex_unlock(&cache_chain_mutex);
 	return NOTIFY_BAD;
 }
 
@@ -1580,12 +1605,7 @@
 	flags |= __GFP_COMP;
 #endif
 
-	/*
-	 * Under NUMA we want memory on the indicated node. We will handle
-	 * the needed fallback ourselves since we want to serve from our
-	 * per node object lists first for other nodes.
-	 */
-	flags |= cachep->gfpflags | GFP_THISNODE;
+	flags |= cachep->gfpflags;
 
 	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
 	if (!page)
@@ -2098,15 +2118,12 @@
 	}
 
 	/*
-	 * Prevent CPUs from coming and going.
-	 * lock_cpu_hotplug() nests outside cache_chain_mutex
+	 * We use cache_chain_mutex to ensure a consistent view of
+	 * cpu_online_map as well.  Please see cpuup_callback
 	 */
-	lock_cpu_hotplug();
-
 	mutex_lock(&cache_chain_mutex);
 
 	list_for_each_entry(pc, &cache_chain, next) {
-		mm_segment_t old_fs = get_fs();
 		char tmp;
 		int res;
 
@@ -2115,9 +2132,7 @@
 		 * destroy its slab cache and no-one else reuses the vmalloc
 		 * area of the module.  Print a warning.
 		 */
-		set_fs(KERNEL_DS);
-		res = __get_user(tmp, pc->name);
-		set_fs(old_fs);
+		res = probe_kernel_address(pc->name, tmp);
 		if (res) {
 			printk("SLAB: cache with size %d has lost its name\n",
 			       pc->buffer_size);
@@ -2197,25 +2212,24 @@
 	if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
 		ralign = BYTES_PER_WORD;
 
-	/* 2) arch mandated alignment: disables debug if necessary */
+	/* 2) arch mandated alignment */
 	if (ralign < ARCH_SLAB_MINALIGN) {
 		ralign = ARCH_SLAB_MINALIGN;
-		if (ralign > BYTES_PER_WORD)
-			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 	}
-	/* 3) caller mandated alignment: disables debug if necessary */
+	/* 3) caller mandated alignment */
 	if (ralign < align) {
 		ralign = align;
-		if (ralign > BYTES_PER_WORD)
-			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 	}
+	/* disable debug if necessary */
+	if (ralign > BYTES_PER_WORD)
+		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 	/*
 	 * 4) Store it.
 	 */
 	align = ralign;
 
 	/* Get cache's description obj. */
-	cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
+	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
 	if (!cachep)
 		goto oops;
 
@@ -2326,7 +2340,6 @@
 		panic("kmem_cache_create(): failed to create slab `%s'\n",
 		      name);
 	mutex_unlock(&cache_chain_mutex);
-	unlock_cpu_hotplug();
 	return cachep;
 }
 EXPORT_SYMBOL(kmem_cache_create);
@@ -2444,6 +2457,7 @@
 	return nr_freed;
 }
 
+/* Called with cache_chain_mutex held to protect against cpu hotplug */
 static int __cache_shrink(struct kmem_cache *cachep)
 {
 	int ret = 0, i = 0;
@@ -2474,9 +2488,13 @@
  */
 int kmem_cache_shrink(struct kmem_cache *cachep)
 {
+	int ret;
 	BUG_ON(!cachep || in_interrupt());
 
-	return __cache_shrink(cachep);
+	mutex_lock(&cache_chain_mutex);
+	ret = __cache_shrink(cachep);
+	mutex_unlock(&cache_chain_mutex);
+	return ret;
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
@@ -2500,23 +2518,16 @@
 {
 	BUG_ON(!cachep || in_interrupt());
 
-	/* Don't let CPUs to come and go */
-	lock_cpu_hotplug();
-
 	/* Find the cache in the chain of caches. */
 	mutex_lock(&cache_chain_mutex);
 	/*
 	 * the chain is never empty, cache_cache is never destroyed
 	 */
 	list_del(&cachep->next);
-	mutex_unlock(&cache_chain_mutex);
-
 	if (__cache_shrink(cachep)) {
 		slab_error(cachep, "Can't free all objects");
-		mutex_lock(&cache_chain_mutex);
 		list_add(&cachep->next, &cache_chain);
 		mutex_unlock(&cache_chain_mutex);
-		unlock_cpu_hotplug();
 		return;
 	}
 
@@ -2524,7 +2535,7 @@
 		synchronize_rcu();
 
 	__kmem_cache_destroy(cachep);
-	unlock_cpu_hotplug();
+	mutex_unlock(&cache_chain_mutex);
 }
 EXPORT_SYMBOL(kmem_cache_destroy);
 
@@ -2548,7 +2559,7 @@
 	if (OFF_SLAB(cachep)) {
 		/* Slab management obj is off-slab. */
 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
-					      local_flags, nodeid);
+					      local_flags & ~GFP_THISNODE, nodeid);
 		if (!slabp)
 			return NULL;
 	} else {
@@ -2618,7 +2629,7 @@
 
 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 {
-	if (flags & SLAB_DMA)
+	if (flags & GFP_DMA)
 		BUG_ON(!(cachep->gfpflags & GFP_DMA));
 	else
 		BUG_ON(cachep->gfpflags & GFP_DMA);
@@ -2689,10 +2700,10 @@
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep,
+		gfp_t flags, int nodeid, void *objp)
 {
 	struct slab *slabp;
-	void *objp;
 	size_t offset;
 	gfp_t local_flags;
 	unsigned long ctor_flags;
@@ -2702,12 +2713,12 @@
 	 * Be lazy and only check for valid flags here,  keeping it out of the
 	 * critical path in kmem_cache_alloc().
 	 */
-	BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
-	if (flags & SLAB_NO_GROW)
+	BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
+	if (flags & __GFP_NO_GROW)
 		return 0;
 
 	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
-	local_flags = (flags & SLAB_LEVEL_MASK);
+	local_flags = (flags & GFP_LEVEL_MASK);
 	if (!(local_flags & __GFP_WAIT))
 		/*
 		 * Not allowed to sleep.  Need to tell a constructor about
@@ -2744,12 +2755,14 @@
 	 * Get mem for the objs.  Attempt to allocate a physical page from
 	 * 'nodeid'.
 	 */
-	objp = kmem_getpages(cachep, flags, nodeid);
+	if (!objp)
+		objp = kmem_getpages(cachep, flags, nodeid);
 	if (!objp)
 		goto failed;
 
 	/* Get slab management. */
-	slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
+	slabp = alloc_slabmgmt(cachep, objp, offset,
+			local_flags & ~GFP_THISNODE, nodeid);
 	if (!slabp)
 		goto opps1;
 
@@ -2987,7 +3000,7 @@
 
 	if (unlikely(!ac->avail)) {
 		int x;
-		x = cache_grow(cachep, flags, node);
+		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
 
 		/* cache_grow can reenable interrupts, then ac could change. */
 		ac = cpu_cache_get(cachep);
@@ -3063,6 +3076,12 @@
 
 		cachep->ctor(objp, cachep, ctor_flags);
 	}
+#if ARCH_SLAB_MINALIGN
+	if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
+		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+		       objp, ARCH_SLAB_MINALIGN);
+	}
+#endif
 	return objp;
 }
 #else
@@ -3105,10 +3124,10 @@
 		objp = ____cache_alloc(cachep, flags);
 	/*
 	 * We may just have run out of memory on the local node.
-	 * __cache_alloc_node() knows how to locate memory on other nodes
+	 * ____cache_alloc_node() knows how to locate memory on other nodes
 	 */
  	if (NUMA_BUILD && !objp)
- 		objp = __cache_alloc_node(cachep, flags, numa_node_id());
+ 		objp = ____cache_alloc_node(cachep, flags, numa_node_id());
 	local_irq_restore(save_flags);
 	objp = cache_alloc_debugcheck_after(cachep, flags, objp,
 					    caller);
@@ -3135,15 +3154,17 @@
 	else if (current->mempolicy)
 		nid_alloc = slab_node(current->mempolicy);
 	if (nid_alloc != nid_here)
-		return __cache_alloc_node(cachep, flags, nid_alloc);
+		return ____cache_alloc_node(cachep, flags, nid_alloc);
 	return NULL;
 }
 
 /*
  * Fallback function if there was no memory available and no objects on a
- * certain node and we are allowed to fall back. We mimick the behavior of
- * the page allocator. We fall back according to a zonelist determined by
- * the policy layer while obeying cpuset constraints.
+ * certain node and fall back is permitted. First we scan all the
+ * available nodelists for available objects. If that fails then we
+ * perform an allocation without specifying a node. This allows the page
+ * allocator to do its reclaim / fallback magic. We then insert the
+ * slab into the proper nodelist and then allocate from it.
  */
 void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 {
@@ -3151,15 +3172,51 @@
 					->node_zonelists[gfp_zone(flags)];
 	struct zone **z;
 	void *obj = NULL;
+	int nid;
 
+retry:
+	/*
+	 * Look through allowed nodes for objects available
+	 * from existing per node queues.
+	 */
 	for (z = zonelist->zones; *z && !obj; z++) {
-		int nid = zone_to_nid(*z);
+		nid = zone_to_nid(*z);
 
-		if (zone_idx(*z) <= ZONE_NORMAL &&
-				cpuset_zone_allowed(*z, flags) &&
-				cache->nodelists[nid])
-			obj = __cache_alloc_node(cache,
-					flags | __GFP_THISNODE, nid);
+		if (cpuset_zone_allowed(*z, flags) &&
+			cache->nodelists[nid] &&
+			cache->nodelists[nid]->free_objects)
+				obj = ____cache_alloc_node(cache,
+					flags | GFP_THISNODE, nid);
+	}
+
+	if (!obj) {
+		/*
+		 * This allocation will be performed within the constraints
+		 * of the current cpuset / memory policy requirements.
+		 * We may trigger various forms of reclaim on the allowed
+		 * set and go into memory reserves if necessary.
+		 */
+		obj = kmem_getpages(cache, flags, -1);
+		if (obj) {
+			/*
+			 * Insert into the appropriate per node queues
+			 */
+			nid = page_to_nid(virt_to_page(obj));
+			if (cache_grow(cache, flags, nid, obj)) {
+				obj = ____cache_alloc_node(cache,
+					flags | GFP_THISNODE, nid);
+				if (!obj)
+					/*
+					 * Another processor may allocate the
+					 * objects in the slab since we are
+					 * not holding any locks.
+					 */
+					goto retry;
+			} else {
+				kmem_freepages(cache, obj);
+				obj = NULL;
+			}
+		}
 	}
 	return obj;
 }
@@ -3167,7 +3224,7 @@
 /*
  * A interface to enable slab creation on nodeid
  */
-static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 				int nodeid)
 {
 	struct list_head *entry;
@@ -3216,7 +3273,7 @@
 
 must_grow:
 	spin_unlock(&l3->list_lock);
-	x = cache_grow(cachep, flags, nodeid);
+	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
 	if (x)
 		goto retry;
 
@@ -3434,35 +3491,59 @@
  * @flags: See kmalloc().
  * @nodeid: node number of the target node.
  *
- * Identical to kmem_cache_alloc, except that this function is slow
- * and can sleep. And it will allocate memory on the given node, which
- * can improve the performance for cpu bound structures.
- * New and improved: it will now make sure that the object gets
- * put on the correct node list so that there is no false sharing.
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
  */
-void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static __always_inline void *
+__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+		int nodeid, void *caller)
 {
 	unsigned long save_flags;
-	void *ptr;
+	void *ptr = NULL;
 
 	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
 
-	if (nodeid == -1 || nodeid == numa_node_id() ||
-			!cachep->nodelists[nodeid])
-		ptr = ____cache_alloc(cachep, flags);
-	else
-		ptr = __cache_alloc_node(cachep, flags, nodeid);
-	local_irq_restore(save_flags);
+	if (unlikely(nodeid == -1))
+		nodeid = numa_node_id();
 
-	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
-					   __builtin_return_address(0));
+	if (likely(cachep->nodelists[nodeid])) {
+		if (nodeid == numa_node_id()) {
+			/*
+			 * Use the locally cached objects if possible.
+			 * However ____cache_alloc does not allow fallback
+			 * to other nodes. It may fail while we still have
+			 * objects on other nodes available.
+			 */
+			ptr = ____cache_alloc(cachep, flags);
+		}
+		if (!ptr) {
+			/* ___cache_alloc_node can fall back to other nodes */
+			ptr = ____cache_alloc_node(cachep, flags, nodeid);
+		}
+	} else {
+		/* Node not bootstrapped yet */
+		if (!(flags & __GFP_THISNODE))
+			ptr = fallback_alloc(cachep, flags);
+	}
+
+	local_irq_restore(save_flags);
+	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
 	return ptr;
 }
+
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+{
+	return __cache_alloc_node(cachep, flags, nodeid,
+			__builtin_return_address(0));
+}
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *
+__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
 {
 	struct kmem_cache *cachep;
 
@@ -3471,8 +3552,29 @@
 		return NULL;
 	return kmem_cache_alloc_node(cachep, flags, node);
 }
+
+#ifdef CONFIG_DEBUG_SLAB
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+	return __do_kmalloc_node(size, flags, node,
+			__builtin_return_address(0));
+}
 EXPORT_SYMBOL(__kmalloc_node);
-#endif
+
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
+		int node, void *caller)
+{
+	return __do_kmalloc_node(size, flags, node, caller);
+}
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
+#else
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+	return __do_kmalloc_node(size, flags, node, NULL);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+#endif /* CONFIG_DEBUG_SLAB */
+#endif /* CONFIG_NUMA */
 
 /**
  * __do_kmalloc - allocate memory
@@ -3583,13 +3685,15 @@
 	int node;
 	struct kmem_list3 *l3;
 	struct array_cache *new_shared;
-	struct array_cache **new_alien;
+	struct array_cache **new_alien = NULL;
 
 	for_each_online_node(node) {
 
-		new_alien = alloc_alien_cache(node, cachep->limit);
-		if (!new_alien)
-			goto fail;
+                if (use_alien_caches) {
+                        new_alien = alloc_alien_cache(node, cachep->limit);
+                        if (!new_alien)
+                                goto fail;
+                }
 
 		new_shared = alloc_arraycache(node,
 				cachep->shared*cachep->batchcount,
@@ -3815,7 +3919,7 @@
  * If we cannot acquire the cache chain mutex then just give up - we'll try
  * again on the next iteration.
  */
-static void cache_reap(void *unused)
+static void cache_reap(struct work_struct *unused)
 {
 	struct kmem_cache *searchp;
 	struct kmem_list3 *l3;
@@ -4038,7 +4142,7 @@
  * + further values on SMP and with statistics enabled
  */
 
-struct seq_operations slabinfo_op = {
+const struct seq_operations slabinfo_op = {
 	.start = s_start,
 	.next = s_next,
 	.stop = s_stop,
@@ -4236,7 +4340,7 @@
 	return 0;
 }
 
-struct seq_operations slabstats_op = {
+const struct seq_operations slabstats_op = {
 	.start = leaks_start,
 	.next = s_next,
 	.stop = s_stop,
diff --git a/mm/sparse.c b/mm/sparse.c
index b3c82ba..ac26eb0 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -24,6 +24,25 @@
 #endif
 EXPORT_SYMBOL(mem_section);
 
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+/*
+ * If we did not store the node number in the page then we have to
+ * do a lookup in the section_to_node_table in order to find which
+ * node the page belongs to.
+ */
+#if MAX_NUMNODES <= 256
+static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
+#else
+static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
+#endif
+
+int page_to_nid(struct page *page)
+{
+	return section_to_node_table[page_to_section(page)];
+}
+EXPORT_SYMBOL(page_to_nid);
+#endif
+
 #ifdef CONFIG_SPARSEMEM_EXTREME
 static struct mem_section *sparse_index_alloc(int nid)
 {
@@ -49,6 +68,10 @@
 	struct mem_section *section;
 	int ret = 0;
 
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+	section_to_node_table[section_nr] = nid;
+#endif
+
 	if (mem_section[root])
 		return -EEXIST;
 
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871..2ed7be3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -57,9 +57,9 @@
 {
 	page = (struct page *)page_private(page);
 	if (put_page_testzero(page)) {
-		void (*dtor)(struct page *page);
+		compound_page_dtor *dtor;
 
-		dtor = (void (*)(struct page *))page[1].lru.next;
+		dtor = get_compound_page_dtor(page);
 		(*dtor)(page);
 	}
 }
@@ -216,7 +216,7 @@
 }
 
 #ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
 	lru_add_drain();
 }
@@ -226,7 +226,7 @@
  */
 int lru_add_drain_all(void)
 {
-	return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+	return schedule_on_each_cpu(lru_add_drain_per_cpu);
 }
 
 #else
@@ -514,5 +514,7 @@
 	 * Right now other parts of the system means that we
 	 * _really_ don't want to cluster much more
 	 */
+#ifdef CONFIG_HOTPLUG_CPU
 	hotcpu_notifier(cpu_swap_callback, 0);
+#endif
 }
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a15def6..c543107 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -427,34 +427,48 @@
 
 #ifdef CONFIG_SOFTWARE_SUSPEND
 /*
- * Find the swap type that corresponds to given device (if any)
+ * Find the swap type that corresponds to given device (if any).
  *
- * This is needed for software suspend and is done in such a way that inode
- * aliasing is allowed.
+ * @offset - number of the PAGE_SIZE-sized block of the device, starting
+ * from 0, in which the swap header is expected to be located.
+ *
+ * This is needed for the suspend to disk (aka swsusp).
  */
-int swap_type_of(dev_t device)
+int swap_type_of(dev_t device, sector_t offset)
 {
+	struct block_device *bdev = NULL;
 	int i;
 
+	if (device)
+		bdev = bdget(device);
+
 	spin_lock(&swap_lock);
 	for (i = 0; i < nr_swapfiles; i++) {
-		struct inode *inode;
+		struct swap_info_struct *sis = swap_info + i;
 
-		if (!(swap_info[i].flags & SWP_WRITEOK))
+		if (!(sis->flags & SWP_WRITEOK))
 			continue;
 
-		if (!device) {
+		if (!bdev) {
 			spin_unlock(&swap_lock);
 			return i;
 		}
-		inode = swap_info[i].swap_file->f_dentry->d_inode;
-		if (S_ISBLK(inode->i_mode) &&
-		    device == MKDEV(imajor(inode), iminor(inode))) {
-			spin_unlock(&swap_lock);
-			return i;
+		if (bdev == sis->bdev) {
+			struct swap_extent *se;
+
+			se = list_entry(sis->extent_list.next,
+					struct swap_extent, list);
+			if (se->start_block == offset) {
+				spin_unlock(&swap_lock);
+				bdput(bdev);
+				return i;
+			}
 		}
 	}
 	spin_unlock(&swap_lock);
+	if (bdev)
+		bdput(bdev);
+
 	return -ENODEV;
 }
 
@@ -931,6 +945,23 @@
 	}
 }
 
+#ifdef CONFIG_SOFTWARE_SUSPEND
+/*
+ * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
+ * corresponding to given index in swap_info (swap type).
+ */
+sector_t swapdev_block(int swap_type, pgoff_t offset)
+{
+	struct swap_info_struct *sis;
+
+	if (swap_type >= nr_swapfiles)
+		return 0;
+
+	sis = swap_info + swap_type;
+	return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
+}
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+
 /*
  * Free all of a swapdev's extent information
  */
@@ -1274,10 +1305,13 @@
 
 	mutex_lock(&swapon_mutex);
 
+	if (!l)
+		return SEQ_START_TOKEN;
+
 	for (i = 0; i < nr_swapfiles; i++, ptr++) {
 		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
 			continue;
-		if (!l--)
+		if (!--l)
 			return ptr;
 	}
 
@@ -1286,10 +1320,17 @@
 
 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
 {
-	struct swap_info_struct *ptr = v;
+	struct swap_info_struct *ptr;
 	struct swap_info_struct *endptr = swap_info + nr_swapfiles;
 
-	for (++ptr; ptr < endptr; ptr++) {
+	if (v == SEQ_START_TOKEN)
+		ptr = swap_info;
+	else {
+		ptr = v;
+		ptr++;
+	}
+
+	for (; ptr < endptr; ptr++) {
 		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
 			continue;
 		++*pos;
@@ -1310,8 +1351,10 @@
 	struct file *file;
 	int len;
 
-	if (v == swap_info)
-		seq_puts(swap, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+	if (ptr == SEQ_START_TOKEN) {
+		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+		return 0;
+	}
 
 	file = ptr->swap_file;
 	len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
@@ -1325,7 +1368,7 @@
 	return 0;
 }
 
-static struct seq_operations swaps_op = {
+static const struct seq_operations swaps_op = {
 	.start =	swap_start,
 	.next =		swap_next,
 	.stop =		swap_stop,
@@ -1337,7 +1380,7 @@
 	return seq_open(file, &swaps_op);
 }
 
-static struct file_operations proc_swaps_operations = {
+static const struct file_operations proc_swaps_operations = {
 	.open		= swaps_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -1540,6 +1583,11 @@
 		error = -EINVAL;
 		if (!maxpages)
 			goto bad_swap;
+		if (swapfilesize && maxpages > swapfilesize) {
+			printk(KERN_WARNING
+			       "Swap area shorter than signature indicates\n");
+			goto bad_swap;
+		}
 		if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
 			goto bad_swap;
 		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
@@ -1567,12 +1615,6 @@
 			goto bad_swap;
 	}
 
-	if (swapfilesize && maxpages > swapfilesize) {
-		printk(KERN_WARNING
-		       "Swap area shorter than signature indicates\n");
-		error = -EINVAL;
-		goto bad_swap;
-	}
 	if (nr_good_pages) {
 		p->swap_map[0] = SWAP_MAP_BAD;
 		p->max = maxpages;
diff --git a/mm/thrash.c b/mm/thrash.c
index f4c560b..9ef9071 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -7,100 +7,74 @@
  *
  * Simple token based thrashing protection, using the algorithm
  * described in:  http://www.cs.wm.edu/~sjiang/token.pdf
+ *
+ * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
+ * Improved algorithm to pass token:
+ * Each task has a priority which is incremented if it contended
+ * for the token in an interval less than its previous attempt.
+ * If the token is acquired, that task's priority is boosted to prevent
+ * the token from bouncing around too often and to let the task make
+ * some progress in its execution.
  */
+
 #include <linux/jiffies.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/swap.h>
 
 static DEFINE_SPINLOCK(swap_token_lock);
-static unsigned long swap_token_timeout;
-static unsigned long swap_token_check;
-struct mm_struct * swap_token_mm = &init_mm;
+struct mm_struct *swap_token_mm;
+static unsigned int global_faults;
 
-#define SWAP_TOKEN_CHECK_INTERVAL (HZ * 2)
-#define SWAP_TOKEN_TIMEOUT	(300 * HZ)
-/*
- * Currently disabled; Needs further code to work at HZ * 300.
- */
-unsigned long swap_token_default_timeout = SWAP_TOKEN_TIMEOUT;
-
-/*
- * Take the token away if the process had no page faults
- * in the last interval, or if it has held the token for
- * too long.
- */
-#define SWAP_TOKEN_ENOUGH_RSS 1
-#define SWAP_TOKEN_TIMED_OUT 2
-static int should_release_swap_token(struct mm_struct *mm)
-{
-	int ret = 0;
-	if (!mm->recent_pagein)
-		ret = SWAP_TOKEN_ENOUGH_RSS;
-	else if (time_after(jiffies, swap_token_timeout))
-		ret = SWAP_TOKEN_TIMED_OUT;
-	mm->recent_pagein = 0;
-	return ret;
-}
-
-/*
- * Try to grab the swapout protection token.  We only try to
- * grab it once every TOKEN_CHECK_INTERVAL, both to prevent
- * SMP lock contention and to check that the process that held
- * the token before is no longer thrashing.
- */
 void grab_swap_token(void)
 {
-	struct mm_struct *mm;
-	int reason;
+	int current_interval;
 
-	/* We have the token. Let others know we still need it. */
-	if (has_swap_token(current->mm)) {
-		current->mm->recent_pagein = 1;
-		if (unlikely(!swap_token_default_timeout))
-			disable_swap_token();
+	global_faults++;
+
+	current_interval = global_faults - current->mm->faultstamp;
+
+	if (!spin_trylock(&swap_token_lock))
 		return;
+
+	/* First come first served */
+	if (swap_token_mm == NULL) {
+		current->mm->token_priority = current->mm->token_priority + 2;
+		swap_token_mm = current->mm;
+		goto out;
 	}
 
-	if (time_after(jiffies, swap_token_check)) {
-
-		if (!swap_token_default_timeout) {
-			swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-			return;
+	if (current->mm != swap_token_mm) {
+		if (current_interval < current->mm->last_interval)
+			current->mm->token_priority++;
+		else {
+			current->mm->token_priority--;
+			if (unlikely(current->mm->token_priority < 0))
+				current->mm->token_priority = 0;
 		}
-
-		/* ... or if we recently held the token. */
-		if (time_before(jiffies, current->mm->swap_token_time))
-			return;
-
-		if (!spin_trylock(&swap_token_lock))
-			return;
-
-		swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-
-		mm = swap_token_mm;
-		if ((reason = should_release_swap_token(mm))) {
-			unsigned long eligible = jiffies;
-			if (reason == SWAP_TOKEN_TIMED_OUT) {
-				eligible += swap_token_default_timeout;
-			}
-			mm->swap_token_time = eligible;
-			swap_token_timeout = jiffies + swap_token_default_timeout;
+		/* Check if we deserve the token */
+		if (current->mm->token_priority >
+				swap_token_mm->token_priority) {
+			current->mm->token_priority += 2;
 			swap_token_mm = current->mm;
 		}
-		spin_unlock(&swap_token_lock);
+	} else {
+		/* Token holder came in again! */
+		current->mm->token_priority += 2;
 	}
-	return;
+
+out:
+	current->mm->faultstamp = global_faults;
+	current->mm->last_interval = current_interval;
+	spin_unlock(&swap_token_lock);
+return;
 }
 
 /* Called on process exit. */
 void __put_swap_token(struct mm_struct *mm)
 {
 	spin_lock(&swap_token_lock);
-	if (likely(mm == swap_token_mm)) {
-		mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-		swap_token_mm = &init_mm;
-		swap_token_check = jiffies;
-	}
+	if (likely(mm == swap_token_mm))
+		swap_token_mm = NULL;
 	spin_unlock(&swap_token_lock);
 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 518540a..093f5fe 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -36,6 +36,7 @@
 #include <linux/rwsem.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -1172,11 +1173,12 @@
 			if (!zone_watermark_ok(zone, order, zone->pages_high,
 					       0, 0)) {
 				end_zone = i;
-				goto scan;
+				break;
 			}
 		}
-		goto out;
-scan:
+		if (i < 0)
+			goto out;
+
 		for (i = 0; i <= end_zone; i++) {
 			struct zone *zone = pgdat->node_zones + i;
 
@@ -1259,6 +1261,9 @@
 	}
 	if (!all_zones_ok) {
 		cond_resched();
+
+		try_to_freeze();
+
 		goto loop_again;
 	}
 
@@ -1508,7 +1513,6 @@
 }
 #endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
@@ -1529,7 +1533,6 @@
 	}
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*
  * This kswapd start function will be called by init and node-hot-add.
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8614e8f..dc005a0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -430,7 +430,7 @@
 	return 0;
 }
 
-struct seq_operations fragmentation_op = {
+const struct seq_operations fragmentation_op = {
 	.start	= frag_start,
 	.next	= frag_next,
 	.stop	= frag_stop,
@@ -452,7 +452,7 @@
 #define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \
 					TEXT_FOR_HIGHMEM(xx)
 
-static char *vmstat_text[] = {
+static const char * const vmstat_text[] = {
 	/* Zoned VM counters */
 	"nr_anon_pages",
 	"nr_mapped",
@@ -597,7 +597,7 @@
 	return 0;
 }
 
-struct seq_operations zoneinfo_op = {
+const struct seq_operations zoneinfo_op = {
 	.start	= frag_start, /* iterate over all zones. The same as in
 			       * fragmentation. */
 	.next	= frag_next,
@@ -660,7 +660,7 @@
 	m->private = NULL;
 }
 
-struct seq_operations vmstat_op = {
+const struct seq_operations vmstat_op = {
 	.start	= vmstat_start,
 	.next	= vmstat_next,
 	.stop	= vmstat_stop,
@@ -679,13 +679,13 @@
 		void *hcpu)
 {
 	switch (action) {
-		case CPU_UP_PREPARE:
-		case CPU_UP_CANCELED:
-		case CPU_DEAD:
-			refresh_zone_stat_thresholds();
-			break;
-		default:
-			break;
+	case CPU_UP_PREPARE:
+	case CPU_UP_CANCELED:
+	case CPU_DEAD:
+		refresh_zone_stat_thresholds();
+		break;
+	default:
+		break;
 	}
 	return NOTIFY_OK;
 }
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 6d7fed3..579e2dd 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -36,7 +36,6 @@
 #include <net/arp.h>
 #include <net/sock.h>
 #include <asm/uaccess.h>
-#include <asm/checksum.h>
 #include <asm/system.h>
 
 /*
diff --git a/net/Kconfig b/net/Kconfig
index 67e39ad..7dfc949 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -75,7 +75,7 @@
 	  If you are unsure how to answer this question, answer N.
 
 menuconfig NETFILTER
-	bool "Network packet filtering (replaces ipchains)"
+	bool "Network packet filtering framework (Netfilter)"
 	---help---
 	  Netfilter is a framework for filtering and mangling network packets
 	  that pass through your Linux box.
@@ -175,33 +175,6 @@
 source "drivers/net/appletalk/Kconfig"
 source "net/x25/Kconfig"
 source "net/lapb/Kconfig"
-
-config NET_DIVERT
-	bool "Frame Diverter (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && BROKEN
-	---help---
-	  The Frame Diverter allows you to divert packets from the
-	  network, that are not aimed at the interface receiving it (in
-	  promisc. mode). Typically, a Linux box setup as an Ethernet bridge
-	  with the Frames Diverter on, can do some *really* transparent www
-	  caching using a Squid proxy for example.
-
-	  This is very useful when you don't want to change your router's
-	  config (or if you simply don't have access to it).
-
-	  The other possible usages of diverting Ethernet Frames are
-	  numberous:
-	  - reroute smtp traffic to another interface
-	  - traffic-shape certain network streams
-	  - transparently proxy smtp connections
-	  - etc...
-
-	  For more informations, please refer to:
-	  <http://diverter.sourceforge.net/>
-	  <http://perso.wanadoo.fr/magpie/EtherDivert.html>
-
-	  If unsure, say N.
-
 source "net/econet/Kconfig"
 source "net/wanrouter/Kconfig"
 source "net/sched/Kconfig"
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 485e35c..3a70522 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -61,6 +61,7 @@
 #include <net/tcp_states.h>
 #include <net/route.h>
 #include <linux/atalk.h>
+#include "../core/kmap_skb.h"
 
 struct datalink_proto *ddp_dl, *aarp_dl;
 static const struct proto_ops atalk_dgram_ops;
diff --git a/net/atm/Makefile b/net/atm/Makefile
index 89656d6..cc50bd1 100644
--- a/net/atm/Makefile
+++ b/net/atm/Makefile
@@ -7,10 +7,7 @@
 
 obj-$(CONFIG_ATM) += atm.o
 obj-$(CONFIG_ATM_CLIP) += clip.o
-atm-$(subst m,y,$(CONFIG_ATM_CLIP)) += ipcommon.o
 obj-$(CONFIG_ATM_BR2684) += br2684.o
-atm-$(subst m,y,$(CONFIG_ATM_BR2684)) += ipcommon.o
-atm-$(subst m,y,$(CONFIG_NET_SCH_ATM)) += ipcommon.o
 atm-$(CONFIG_PROC_FS) += proc.o
 
 obj-$(CONFIG_ATM_LANE) += lec.o
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index d00cca9..83a1c1b 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -23,7 +23,6 @@
 #include <linux/atmbr2684.h>
 
 #include "common.h"
-#include "ipcommon.h"
 
 /*
  * Define this to use a version of the code which interacts with the higher
@@ -372,7 +371,7 @@
 
 /* Returns 1 if packet should be dropped */
 static inline int
-packet_fails_filter(u16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
+packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
 {
 	if (brvcc->filter.netmask == 0)
 		return 0;			/* no filter in place */
@@ -500,11 +499,12 @@
 */
 	int err;
 	struct br2684_vcc *brvcc;
-	struct sk_buff_head copy;
 	struct sk_buff *skb;
+	struct sk_buff_head *rq;
 	struct br2684_dev *brdev;
 	struct net_device *net_dev;
 	struct atm_backend_br2684 be;
+	unsigned long flags;
 
 	if (copy_from_user(&be, arg, sizeof be))
 		return -EFAULT;
@@ -554,12 +554,30 @@
 	brvcc->old_push = atmvcc->push;
 	barrier();
 	atmvcc->push = br2684_push;
-	skb_queue_head_init(&copy);
-	skb_migrate(&sk_atm(atmvcc)->sk_receive_queue, &copy);
-	while ((skb = skb_dequeue(&copy)) != NULL) {
+
+	rq = &sk_atm(atmvcc)->sk_receive_queue;
+
+	spin_lock_irqsave(&rq->lock, flags);
+	if (skb_queue_empty(rq)) {
+		skb = NULL;
+	} else {
+		/* NULL terminate the list.  */
+		rq->prev->next = NULL;
+		skb = rq->next;
+	}
+	rq->prev = rq->next = (struct sk_buff *)rq;
+	rq->qlen = 0;
+	spin_unlock_irqrestore(&rq->lock, flags);
+
+	while (skb) {
+		struct sk_buff *next = skb->next;
+
+		skb->next = skb->prev = NULL;
 		BRPRIV(skb->dev)->stats.rx_bytes -= skb->len;
 		BRPRIV(skb->dev)->stats.rx_packets--;
 		br2684_push(atmvcc, skb);
+
+		skb = next;
 	}
 	__module_get(THIS_MODULE);
 	return 0;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 7af2c41..5f8a1d2 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -38,7 +38,6 @@
 
 #include "common.h"
 #include "resources.h"
-#include "ipcommon.h"
 #include <net/atmclip.h>
 
 
@@ -54,7 +53,7 @@
 static struct neigh_table clip_tbl;
 static struct timer_list idle_timer;
 
-static int to_atmarpd(enum atmarp_ctrl_type type, int itf, unsigned long ip)
+static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
 {
 	struct sock *sk;
 	struct atmarp_ctrl *ctrl;
@@ -220,7 +219,7 @@
 	    || memcmp(skb->data, llc_oui, sizeof (llc_oui)))
 		skb->protocol = htons(ETH_P_IP);
 	else {
-		skb->protocol = ((u16 *) skb->data)[3];
+		skb->protocol = ((__be16 *) skb->data)[3];
 		skb_pull(skb, RFC1483LLC_LEN);
 		if (skb->protocol == htons(ETH_P_ARP)) {
 			PRIV(skb->dev)->stats.rx_packets++;
@@ -430,7 +429,7 @@
 
 		here = skb_push(skb, RFC1483LLC_LEN);
 		memcpy(here, llc_oui, sizeof(llc_oui));
-		((u16 *) here)[3] = skb->protocol;
+		((__be16 *) here)[3] = skb->protocol;
 	}
 	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
 	ATM_SKB(skb)->atm_options = vcc->atm_options;
@@ -469,8 +468,9 @@
 static int clip_mkip(struct atm_vcc *vcc, int timeout)
 {
 	struct clip_vcc *clip_vcc;
-	struct sk_buff_head copy;
 	struct sk_buff *skb;
+	struct sk_buff_head *rq;
+	unsigned long flags;
 
 	if (!vcc->push)
 		return -EBADFD;
@@ -490,10 +490,26 @@
 	clip_vcc->old_pop = vcc->pop;
 	vcc->push = clip_push;
 	vcc->pop = clip_pop;
-	skb_queue_head_init(&copy);
-	skb_migrate(&sk_atm(vcc)->sk_receive_queue, &copy);
+
+	rq = &sk_atm(vcc)->sk_receive_queue;
+
+	spin_lock_irqsave(&rq->lock, flags);
+	if (skb_queue_empty(rq)) {
+		skb = NULL;
+	} else {
+		/* NULL terminate the list.  */
+		rq->prev->next = NULL;
+		skb = rq->next;
+	}
+	rq->prev = rq->next = (struct sk_buff *)rq;
+	rq->qlen = 0;
+	spin_unlock_irqrestore(&rq->lock, flags);
+
 	/* re-process everything received between connection setup and MKIP */
-	while ((skb = skb_dequeue(&copy)) != NULL)
+	while (skb) {
+		struct sk_buff *next = skb->next;
+
+		skb->next = skb->prev = NULL;
 		if (!clip_devs) {
 			atm_return(vcc, skb->truesize);
 			kfree_skb(skb);
@@ -506,10 +522,13 @@
 			PRIV(skb->dev)->stats.rx_bytes -= len;
 			kfree_skb(skb);
 		}
+
+		skb = next;
+	}
 	return 0;
 }
 
-static int clip_setentry(struct atm_vcc *vcc, u32 ip)
+static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
 {
 	struct neighbour *neigh;
 	struct atmarp_entry *entry;
@@ -752,7 +771,7 @@
 		err = clip_mkip(vcc, arg);
 		break;
 	case ATMARP_SETENTRY:
-		err = clip_setentry(vcc, arg);
+		err = clip_setentry(vcc, (__force __be32)arg);
 		break;
 	case ATMARP_ENCAP:
 		err = clip_encap(vcc, arg);
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
deleted file mode 100644
index 1d3de42..0000000
--- a/net/atm/ipcommon.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/* net/atm/ipcommon.c - Common items for all ways of doing IP over ATM */
-
-/* Written 1996-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/in.h>
-#include <linux/atmdev.h>
-#include <linux/atmclip.h>
-
-#include "common.h"
-#include "ipcommon.h"
-
-
-#if 0
-#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
-#else
-#define DPRINTK(format,args...)
-#endif
-
-
-/*
- * skb_migrate appends the list at "from" to "to", emptying "from" in the
- * process. skb_migrate is atomic with respect to all other skb operations on
- * "from" and "to". Note that it locks both lists at the same time, so to deal
- * with the lock ordering, the locks are taken in address order.
- *
- * This function should live in skbuff.c or skbuff.h.
- */
-
-
-void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
-{
-	unsigned long flags;
-	struct sk_buff *skb_from = (struct sk_buff *) from;
-	struct sk_buff *skb_to = (struct sk_buff *) to;
-	struct sk_buff *prev;
-
-	if ((unsigned long) from < (unsigned long) to) {
-		spin_lock_irqsave(&from->lock, flags);
-		spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
-	} else {
-		spin_lock_irqsave(&to->lock, flags);
-		spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
-	}
-	prev = from->prev;
-	from->next->prev = to->prev;
-	prev->next = skb_to;
-	to->prev->next = from->next;
-	to->prev = from->prev;
-	to->qlen += from->qlen;
-	spin_unlock(&to->lock);
-	from->prev = skb_from;
-	from->next = skb_from;
-	from->qlen = 0;
-	spin_unlock_irqrestore(&from->lock, flags);
-}
-
-
-EXPORT_SYMBOL(skb_migrate);
diff --git a/net/atm/ipcommon.h b/net/atm/ipcommon.h
deleted file mode 100644
index d72165f..0000000
--- a/net/atm/ipcommon.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* net/atm/ipcommon.h - Common items for all ways of doing IP over ATM */
-
-/* Written 1996-2000 by Werner Almesberger, EPFL LRC/ICA */
-
-
-#ifndef NET_ATM_IPCOMMON_H
-#define NET_ATM_IPCOMMON_H
-
-
-#include <linux/string.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/atmdev.h>
-
-/*
- * Appends all skbs from "from" to "to". The operation is atomic with respect
- * to all other skb operations on "from" or "to".
- */
-
-void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to);
-
-#endif
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 66c57c1..3fc0abe 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -204,9 +204,9 @@
 	memset(rdesc, 0, ETH_ALEN);
 	/* offset 4 comes from LAN destination field in LE control frames */
 	if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
-		memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(uint16_t));
+		memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
 	else {
-		memcpy(&rdesc[4], &trh->rseg[1], sizeof(uint16_t));
+		memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
 		rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
 	}
 
@@ -775,7 +775,7 @@
 		unsigned char *src, *dst;
 
 		atm_return(vcc, skb->truesize);
-		if (*(uint16_t *) skb->data == htons(priv->lecid) ||
+		if (*(__be16 *) skb->data == htons(priv->lecid) ||
 		    !priv->lecd || !(dev->flags & IFF_UP)) {
 			/*
 			 * Probably looping back, or if lecd is missing,
@@ -1321,11 +1321,10 @@
 		if (table == NULL)
 			return -1;
 
-		*tlvs = kmalloc(table->sizeoftlvs, GFP_ATOMIC);
+		*tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC);
 		if (*tlvs == NULL)
 			return -1;
 
-		memcpy(*tlvs, table->tlvs, table->sizeoftlvs);
 		*sizeoftlvs = table->sizeoftlvs;
 
 		return 0;
@@ -1364,11 +1363,10 @@
 
 	kfree(priv->tlvs);	/* NULL if there was no previous association */
 
-	priv->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
+	priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
 	if (priv->tlvs == NULL)
 		return (0);
 	priv->sizeoftlvs = sizeoftlvs;
-	memcpy(priv->tlvs, tlvs, sizeoftlvs);
 
 	skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
 	if (skb == NULL)
@@ -1409,12 +1407,10 @@
 
 	kfree(entry->tlvs);
 
-	entry->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
+	entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
 	if (entry->tlvs == NULL)
 		return;
-
 	entry->sizeoftlvs = sizeoftlvs;
-	memcpy(entry->tlvs, tlvs, sizeoftlvs);
 #endif
 #if 0
 	printk("lec.c: lane2_associate_ind()\n");
@@ -1458,7 +1454,7 @@
 
 #define LEC_ARP_REFRESH_INTERVAL (3*HZ)
 
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
 static void lec_arp_expire_arp(unsigned long data);
 
 /* 
@@ -1481,7 +1477,7 @@
         INIT_HLIST_HEAD(&priv->lec_no_forward);
         INIT_HLIST_HEAD(&priv->mcast_fwds);
 	spin_lock_init(&priv->lec_arp_lock);
-	INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+	INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
 	schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
 }
 
@@ -1879,10 +1875,11 @@
  *       to ESI_FORWARD_DIRECT. This causes the flush period to end
  *       regardless of the progress of the flush protocol.
  */
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
 {
 	unsigned long flags;
-	struct lec_priv *priv = data;
+	struct lec_priv *priv =
+		container_of(work, struct lec_priv, lec_arp_work.work);
 	struct hlist_node *node, *next;
 	struct lec_arp_table *entry;
 	unsigned long now;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 877f509..99136ba 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -14,14 +14,14 @@
 #define LEC_HEADER_LEN 16
 
 struct lecdatahdr_8023 {
-	unsigned short le_header;
+	__be16 le_header;
 	unsigned char h_dest[ETH_ALEN];
 	unsigned char h_source[ETH_ALEN];
-	unsigned short h_type;
+	__be16 h_type;
 };
 
 struct lecdatahdr_8025 {
-	unsigned short le_header;
+	__be16 le_header;
 	unsigned char ac_pad;
 	unsigned char fc;
 	unsigned char h_dest[ETH_ALEN];
@@ -92,7 +92,7 @@
 	spinlock_t lec_arp_lock;
 	struct atm_vcc *mcast_vcc;		/* Default Multicast Send VCC */
 	struct atm_vcc *lecd;
-	struct work_struct lec_arp_work;	/* C10 */
+	struct delayed_work lec_arp_work;	/* C10 */
 	unsigned int maximum_unknown_frame_count;
 						/*
 						 * Within the period of time defined by this variable, the client will send
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 0d2b994..c18f737 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -152,7 +152,7 @@
 /*
  * Overwrites the old entry or makes a new one.
  */
-struct atm_mpoa_qos *atm_mpoa_add_qos(uint32_t dst_ip, struct atm_qos *qos)
+struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
 {
 	struct atm_mpoa_qos *entry;
 
@@ -177,7 +177,7 @@
 	return entry;
 }
 
-struct atm_mpoa_qos *atm_mpoa_search_qos(uint32_t dst_ip)
+struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
 {
 	struct atm_mpoa_qos *qos;
 
@@ -460,11 +460,11 @@
 	in_cache_entry *entry;
 	struct iphdr *iph;
 	char *buff;
-	uint32_t ipaddr = 0;
+	__be32 ipaddr = 0;
 
 	static struct {
 		struct llc_snap_hdr hdr;
-		uint32_t tag;
+		__be32 tag;
 	} tagged_llc_snap_hdr = {
 		{0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}},
 		0
@@ -559,7 +559,7 @@
 	struct mpoa_client *mpc;
 	struct atmmpc_ioc ioc_data;
 	in_cache_entry *in_entry;
-	uint32_t  ipaddr;
+	__be32  ipaddr;
 
 	bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
 	if (bytes_left != 0) {
@@ -638,7 +638,7 @@
 	struct sk_buff *new_skb;
 	eg_cache_entry *eg;
 	struct mpoa_client *mpc;
-	uint32_t tag;
+	__be32 tag;
 	char *tmp;
 	
 	ddprintk("mpoa: (%s) mpc_push:\n", dev->name);
@@ -683,7 +683,7 @@
 	}
 
 	tmp = skb->data + sizeof(struct llc_snap_hdr);
-	tag = *(uint32_t *)tmp;
+	tag = *(__be32 *)tmp;
 
 	eg = mpc->eg_ops->get_by_tag(tag, mpc);
 	if (eg == NULL) {
@@ -1029,7 +1029,7 @@
 
 static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 {
-	uint32_t dst_ip = msg->content.in_info.in_dst_ip;
+	__be32 dst_ip = msg->content.in_info.in_dst_ip;
 	in_cache_entry *entry;
 
 	entry = mpc->in_ops->get(dst_ip, mpc);
@@ -1066,7 +1066,7 @@
  */
 static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry)
 {
-	uint32_t dst_ip = msg->content.in_info.in_dst_ip;
+	__be32 dst_ip = msg->content.in_info.in_dst_ip;
 	struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
 	eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
 
@@ -1102,7 +1102,7 @@
 
 static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 {
-	uint32_t dst_ip = msg->content.in_info.in_dst_ip;
+	__be32 dst_ip = msg->content.in_info.in_dst_ip;
 	in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
 
 	dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip));
@@ -1148,8 +1148,8 @@
 
 static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 {
-	uint32_t dst_ip = msg->content.in_info.in_dst_ip;
-	uint32_t mask = msg->ip_mask;
+	__be32 dst_ip = msg->content.in_info.in_dst_ip;
+	__be32 mask = msg->ip_mask;
 	in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
 
 	if(entry == NULL){
@@ -1173,7 +1173,7 @@
 
 static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 {
-	uint32_t cache_id = msg->content.eg_info.cache_id;
+	__be32 cache_id = msg->content.eg_info.cache_id;
 	eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
 	
 	if (entry == NULL) {
@@ -1322,13 +1322,12 @@
 	if(client->number_of_mps_macs)
 		kfree(client->mps_macs);
 	client->number_of_mps_macs = 0;
-	client->mps_macs = kmalloc(ETH_ALEN,GFP_KERNEL);
+	client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
 	if (client->mps_macs == NULL) {
 		printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n");
 		return;
 	}
 	client->number_of_mps_macs = 1;
-	memcpy(client->mps_macs, msg->MPS_ctrl, ETH_ALEN);
 	
 	return;
 }
diff --git a/net/atm/mpc.h b/net/atm/mpc.h
index 3c7981a..51f460d 100644
--- a/net/atm/mpc.h
+++ b/net/atm/mpc.h
@@ -36,14 +36,14 @@
 
 struct atm_mpoa_qos {
         struct atm_mpoa_qos *next;
-        uint32_t ipaddr;
+        __be32 ipaddr;
         struct atm_qos qos;
 };
 
 
 /* MPOA QoS operations */
-struct atm_mpoa_qos *atm_mpoa_add_qos(uint32_t dst_ip, struct atm_qos *qos);
-struct atm_mpoa_qos *atm_mpoa_search_qos(uint32_t dst_ip);
+struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos);
+struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip);
 int atm_mpoa_delete_qos(struct atm_mpoa_qos *qos);
 
 /* Display QoS entries. This is for the procfs */
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index fbf13cd..697a081 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -22,7 +22,7 @@
 #define ddprintk(format,args...)
 #endif
 
-static in_cache_entry *in_cache_get(uint32_t dst_ip,
+static in_cache_entry *in_cache_get(__be32 dst_ip,
 				    struct mpoa_client *client)
 {
 	in_cache_entry *entry;
@@ -42,9 +42,9 @@
 	return NULL;
 }
 
-static in_cache_entry *in_cache_get_with_mask(uint32_t dst_ip,
+static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
 					      struct mpoa_client *client,
-					      uint32_t mask)
+					      __be32 mask)
 {
 	in_cache_entry *entry;
 
@@ -84,10 +84,10 @@
 	return NULL;
 }
 
-static in_cache_entry *in_cache_add_entry(uint32_t dst_ip,
+static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
 					  struct mpoa_client *client)
 {
-	in_cache_entry* entry = kmalloc(sizeof(in_cache_entry), GFP_KERNEL);
+	in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
 
 	if (entry == NULL) {
 		printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
@@ -95,7 +95,6 @@
 	}
 
 	dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %u.%u.%u.%u\n", NIPQUAD(dst_ip));
-	memset(entry,0,sizeof(in_cache_entry));
 
 	atomic_set(&entry->use, 1);
 	dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n");
@@ -319,7 +318,7 @@
 	return;
 }
 
-static eg_cache_entry *eg_cache_get_by_cache_id(uint32_t cache_id, struct mpoa_client *mpc)
+static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc)
 {
 	eg_cache_entry *entry;
 
@@ -339,7 +338,7 @@
 }
 
 /* This can be called from any context since it saves CPU flags */
-static eg_cache_entry *eg_cache_get_by_tag(uint32_t tag, struct mpoa_client *mpc)
+static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
 {
 	unsigned long flags;
 	eg_cache_entry *entry;
@@ -380,7 +379,7 @@
 	return NULL;
 }
 
-static eg_cache_entry *eg_cache_get_by_src_ip(uint32_t ipaddr, struct mpoa_client *mpc)
+static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc)
 {
 	eg_cache_entry *entry;
 
@@ -447,7 +446,7 @@
 
 static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client)
 {
-	eg_cache_entry *entry = kmalloc(sizeof(eg_cache_entry), GFP_KERNEL);
+	eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
 
 	if (entry == NULL) {
 		printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n");
@@ -455,7 +454,6 @@
 	}
 
 	dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %u.%u.%u.%u, this should be our IP\n", NIPQUAD(msg->content.eg_info.eg_dst_ip));
-	memset(entry, 0, sizeof(eg_cache_entry));
 
 	atomic_set(&entry->use, 1);
 	dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n");
diff --git a/net/atm/mpoa_caches.h b/net/atm/mpoa_caches.h
index 6c9886a..84de977 100644
--- a/net/atm/mpoa_caches.h
+++ b/net/atm/mpoa_caches.h
@@ -29,12 +29,12 @@
 } in_cache_entry;
 
 struct in_cache_ops{
-        in_cache_entry *(*add_entry)(uint32_t dst_ip,
+        in_cache_entry *(*add_entry)(__be32 dst_ip,
                                       struct mpoa_client *client);
-        in_cache_entry *(*get)(uint32_t dst_ip, struct mpoa_client *client);
-        in_cache_entry *(*get_with_mask)(uint32_t dst_ip, 
+        in_cache_entry *(*get)(__be32 dst_ip, struct mpoa_client *client);
+        in_cache_entry *(*get_with_mask)(__be32 dst_ip,
 					 struct mpoa_client *client,
-					 uint32_t mask);
+					 __be32 mask);
         in_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, 
                                       struct mpoa_client *client);
         void            (*put)(in_cache_entry *entry);
@@ -56,17 +56,17 @@
         struct atm_vcc       *shortcut;
         uint32_t             packets_rcvd;
         uint16_t             entry_state;
-        uint32_t             latest_ip_addr;    /* The src IP address of the last packet */
+        __be32             latest_ip_addr;    /* The src IP address of the last packet */
         struct eg_ctrl_info  ctrl_info;
         atomic_t             use;
 } eg_cache_entry;
 
 struct eg_cache_ops{
         eg_cache_entry *(*add_entry)(struct k_message *msg, struct mpoa_client *client);
-        eg_cache_entry *(*get_by_cache_id)(uint32_t cache_id, struct mpoa_client *client);
-        eg_cache_entry *(*get_by_tag)(uint32_t cache_id, struct mpoa_client *client);
+        eg_cache_entry *(*get_by_cache_id)(__be32 cache_id, struct mpoa_client *client);
+        eg_cache_entry *(*get_by_tag)(__be32 cache_id, struct mpoa_client *client);
         eg_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, struct mpoa_client *client);
-        eg_cache_entry *(*get_by_src_ip)(uint32_t ipaddr, struct mpoa_client *client);
+        eg_cache_entry *(*get_by_src_ip)(__be32 ipaddr, struct mpoa_client *client);
         void            (*put)(eg_cache_entry *entry);
         void            (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client);
         void            (*update)(eg_cache_entry *entry, uint16_t holding_time);
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index d37b891..3844c85 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -231,14 +231,14 @@
          */
         unsigned char ip[4]; 
 	int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
-        uint32_t ipaddr;
+        __be32 ipaddr;
 	struct atm_qos qos; 
         
         memset(&qos, 0, sizeof(struct atm_qos));
 
 	if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
 			ip, ip+1, ip+2, ip+3) == 4) {
-		ipaddr = *(uint32_t *)ip;
+		ipaddr = *(__be32 *)ip;
 		return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
 	}
 
@@ -250,7 +250,7 @@
 		ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
 		return 0;
 
-        ipaddr = *(uint32_t *)ip;
+        ipaddr = *(__be32 *)ip;
 	qos.txtp.traffic_class = ATM_CBR;
 	qos.txtp.max_pcr = tx_pcr;
 	qos.txtp.max_sdu = tx_sdu;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 000695c..6cabf6d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -906,13 +906,13 @@
 	ax25->source_addr = oax25->source_addr;
 
 	if (oax25->digipeat != NULL) {
-		if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
+		ax25->digipeat = kmemdup(oax25->digipeat, sizeof(ax25_digi),
+					 GFP_ATOMIC);
+		if (ax25->digipeat == NULL) {
 			sk_free(sk);
 			ax25_cb_put(ax25);
 			return NULL;
 		}
-
-		memcpy(ax25->digipeat, oax25->digipeat, sizeof(ax25_digi));
 	}
 
 	sk->sk_protinfo = ax25;
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index d7736e5..f84047d 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -70,11 +70,11 @@
 	ax25->dest_addr   = *dest;
 
 	if (digi != NULL) {
-		if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
+		ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
+		if (ax25->digipeat == NULL) {
 			ax25_cb_put(ax25);
 			return NULL;
 		}
-		memcpy(ax25->digipeat, digi, sizeof(ax25_digi));
 	}
 
 	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 51b7bda..8580356 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -432,11 +432,12 @@
 	}
 
 	if (ax25_rt->digipeat != NULL) {
-		if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
+		ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi),
+					 GFP_ATOMIC);
+		if (ax25->digipeat == NULL) {
 			err = -ENOMEM;
 			goto put;
 		}
-		memcpy(ax25->digipeat, ax25_rt->digipeat, sizeof(ax25_digi));
 		ax25_adjust_path(addr, ax25->digipeat);
 	}
 
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 867d425..d23a27f 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -209,7 +209,9 @@
 	}
 
 	for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
-		ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC);
+		struct ctl_table *child = kmemdup(ax25_param_table,
+						  sizeof(ax25_param_table),
+						  GFP_ATOMIC);
 		if (!child) {
 			while (n--)
 				kfree(ax25_table[n].child);
@@ -217,7 +219,6 @@
 			spin_unlock_bh(&ax25_dev_lock);
 			return;
 		}
-		memcpy(child, ax25_param_table, sizeof(ax25_param_table));
 		ax25_table[n].child = ax25_dev->systable = child;
 		ax25_table[n].ctl_name     = n + 1;
 		ax25_table[n].procname     = ax25_dev->dev->name;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index bbb1ed7..0b6cd0e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -95,14 +95,14 @@
 struct bnep_set_filter_req {
 	__u8  type;
 	__u8  ctrl;
-	__u16 len;
+	__be16 len;
 	__u8  list[0];
 } __attribute__((packed));
 
 struct bnep_control_rsp {
 	__u8  type;
 	__u8  ctrl;
-	__u16 resp;
+	__be16 resp;
 } __attribute__((packed));
 
 struct bnep_ext_hdr {
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 4d3424c..7ba6470 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -117,18 +117,18 @@
 static inline void bnep_set_default_proto_filter(struct bnep_session *s)
 {
 	/* (IPv4, ARP)  */
-	s->proto_filter[0].start = htons(0x0800);
-	s->proto_filter[0].end   = htons(0x0806);
+	s->proto_filter[0].start = ETH_P_IP;
+	s->proto_filter[0].end   = ETH_P_ARP;
 	/* (RARP, AppleTalk) */
-	s->proto_filter[1].start = htons(0x8035);
-	s->proto_filter[1].end   = htons(0x80F3);
+	s->proto_filter[1].start = ETH_P_RARP;
+	s->proto_filter[1].end   = ETH_P_AARP;
 	/* (IPX, IPv6) */
-	s->proto_filter[2].start = htons(0x8137);
-	s->proto_filter[2].end   = htons(0x86DD);
+	s->proto_filter[2].start = ETH_P_IPX;
+	s->proto_filter[2].end   = ETH_P_IPV6;
 }
 #endif
 
-static int bnep_ctrl_set_netfilter(struct bnep_session *s, u16 *data, int len)
+static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len)
 {
 	int n;
 
@@ -150,8 +150,8 @@
 		int i;
 
 		for (i = 0; i < n; i++) {
-			f[i].start = get_unaligned(data++);
-			f[i].end   = get_unaligned(data++);
+			f[i].start = ntohs(get_unaligned(data++));
+			f[i].end   = ntohs(get_unaligned(data++));
 
 			BT_DBG("proto filter start %d end %d",
 				f[i].start, f[i].end);
@@ -180,7 +180,7 @@
 	if (len < 2)
 		return -EILSEQ;
 
-	n = ntohs(get_unaligned((u16 *) data)); 
+	n = ntohs(get_unaligned((__be16 *) data));
 	data += 2; len -= 2;
 
 	if (len < n)
@@ -332,7 +332,7 @@
 	if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
 		goto badframe;
 
-	s->eh.h_proto = get_unaligned((u16 *) (skb->data - 2));
+	s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
 
 	if (type & BNEP_EXT_HEADER) {
 		if (bnep_rx_extension(s, skb) < 0)
@@ -343,7 +343,7 @@
 	if (ntohs(s->eh.h_proto) == 0x8100) {
 		if (!skb_pull(skb, 4))
 			goto badframe;
-		s->eh.h_proto = get_unaligned((u16 *) (skb->data - 2));
+		s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
 	}
 	
 	/* We have to alloc new skb and copy data here :(. Because original skb
@@ -365,7 +365,7 @@
 	case BNEP_COMPRESSED_SRC_ONLY:
 		memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN);
 		memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN);
-		put_unaligned(s->eh.h_proto, (u16 *) __skb_put(nskb, 2));
+		put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
 		break;
 
 	case BNEP_COMPRESSED_DST_ONLY:
@@ -375,7 +375,7 @@
 
 	case BNEP_GENERAL:
 		memcpy(__skb_put(nskb, ETH_ALEN * 2), skb->mac.raw, ETH_ALEN * 2);
-		put_unaligned(s->eh.h_proto, (u16 *) __skb_put(nskb, 2));
+		put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
 		break;
 	}
 
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 7f7b27d..67a002a 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -158,14 +158,15 @@
 static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
 {
 	struct ethhdr *eh = (void *) skb->data;
+	u16 proto = ntohs(eh->h_proto);
 	
-	if (ntohs(eh->h_proto) >= 1536)
-		return eh->h_proto;
+	if (proto >= 1536)
+		return proto;
 		
-	if (get_unaligned((u16 *) skb->data) == 0xFFFF)
-		return htons(ETH_P_802_3);
+	if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF))
+		return ETH_P_802_3;
 		
-	return htons(ETH_P_802_2);
+	return ETH_P_802_2;
 }
 
 static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3eeeb7a..d4c9356 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -237,9 +237,9 @@
 	kfree(data);
 }
 
-static void add_conn(void *data)
+static void add_conn(struct work_struct *work)
 {
-	struct hci_conn *conn = data;
+	struct hci_conn *conn = container_of(work, struct hci_conn, work);
 	int i;
 
 	if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@
 
 	dev_set_drvdata(&conn->dev, conn);
 
-	INIT_WORK(&conn->work, add_conn, (void *) conn);
+	INIT_WORK(&conn->work, add_conn);
 
 	schedule_work(&conn->work);
 }
 
-static void del_conn(void *data)
+static void del_conn(struct work_struct *work)
 {
-	struct hci_conn *conn = data;
+	struct hci_conn *conn = container_of(work, struct hci_conn, work);
 	device_del(&conn->dev);
 }
 
@@ -287,7 +287,7 @@
 {
 	BT_DBG("conn %p", conn);
 
-	INIT_WORK(&conn->work, del_conn, (void *) conn);
+	INIT_WORK(&conn->work, del_conn);
 
 	schedule_work(&conn->work);
 }
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index bbf78e6..29a8fa4 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -770,7 +770,7 @@
 	long timeo;
 	int err = 0;
 
-	lock_sock(sk);
+	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 
 	if (sk->sk_state != BT_LISTEN) {
 		err = -EBADFD;
@@ -792,7 +792,7 @@
 
 		release_sock(sk);
 		timeo = schedule_timeout(timeo);
-		lock_sock(sk);
+		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 
 		if (sk->sk_state != BT_LISTEN) {
 			err = -EBADFD;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ddc4e9d..278c867 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -854,7 +854,7 @@
 	rpn->flow_ctrl     = flow_ctrl_settings;
 	rpn->xon_char      = xon_char;
 	rpn->xoff_char     = xoff_char;
-	rpn->param_mask    = param_mask;
+	rpn->param_mask    = cpu_to_le16(param_mask);
 
 	*ptr = __fcs(buf); ptr++;
 
@@ -1018,7 +1018,7 @@
 
 	if (len > 127) {
 		hdr = (void *) skb_push(skb, 4);
-		put_unaligned(htobs(__len16(len)), (u16 *) &hdr->len);
+		put_unaligned(htobs(__len16(len)), (__le16 *) &hdr->len);
 	} else {
 		hdr = (void *) skb_push(skb, 3);
 		hdr->len = __len8(len);
@@ -1343,7 +1343,7 @@
 	/* Check for sane values, ignore/accept bit_rate, 8 bits, 1 stop bit,
 	 * no parity, no flow control lines, normal XON/XOFF chars */
 
-	if (rpn->param_mask & RFCOMM_RPN_PM_BITRATE) {
+	if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_BITRATE)) {
 		bit_rate = rpn->bit_rate;
 		if (bit_rate != RFCOMM_RPN_BR_115200) {
 			BT_DBG("RPN bit rate mismatch 0x%x", bit_rate);
@@ -1352,7 +1352,7 @@
 		}
 	}
 
-	if (rpn->param_mask & RFCOMM_RPN_PM_DATA) {
+	if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_DATA)) {
 		data_bits = __get_rpn_data_bits(rpn->line_settings);
 		if (data_bits != RFCOMM_RPN_DATA_8) {
 			BT_DBG("RPN data bits mismatch 0x%x", data_bits);
@@ -1361,7 +1361,7 @@
 		}
 	}
 
-	if (rpn->param_mask & RFCOMM_RPN_PM_STOP) {
+	if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_STOP)) {
 		stop_bits = __get_rpn_stop_bits(rpn->line_settings);
 		if (stop_bits != RFCOMM_RPN_STOP_1) {
 			BT_DBG("RPN stop bits mismatch 0x%x", stop_bits);
@@ -1370,7 +1370,7 @@
 		}
 	}
 
-	if (rpn->param_mask & RFCOMM_RPN_PM_PARITY) {
+	if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_PARITY)) {
 		parity = __get_rpn_parity(rpn->line_settings);
 		if (parity != RFCOMM_RPN_PARITY_NONE) {
 			BT_DBG("RPN parity mismatch 0x%x", parity);
@@ -1379,7 +1379,7 @@
 		}
 	}
 
-	if (rpn->param_mask & RFCOMM_RPN_PM_FLOW) {
+	if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_FLOW)) {
 		flow_ctrl = rpn->flow_ctrl;
 		if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) {
 			BT_DBG("RPN flow ctrl mismatch 0x%x", flow_ctrl);
@@ -1388,7 +1388,7 @@
 		}
 	}
 
-	if (rpn->param_mask & RFCOMM_RPN_PM_XON) {
+	if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XON)) {
 		xon_char = rpn->xon_char;
 		if (xon_char != RFCOMM_RPN_XON_CHAR) {
 			BT_DBG("RPN XON char mismatch 0x%x", xon_char);
@@ -1397,7 +1397,7 @@
 		}
 	}
 
-	if (rpn->param_mask & RFCOMM_RPN_PM_XOFF) {
+	if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XOFF)) {
 		xoff_char = rpn->xoff_char;
 		if (xoff_char != RFCOMM_RPN_XOFF_CHAR) {
 			BT_DBG("RPN XOFF char mismatch 0x%x", xoff_char);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d9f0486..8ca448d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -23,7 +23,7 @@
 #include <asm/atomic.h>
 #include "br_private.h"
 
-static kmem_cache_t *br_fdb_cache __read_mostly;
+static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		      const unsigned char *addr);
 
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f753c40..55bb263 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -77,12 +77,16 @@
  * Called from work queue to allow for calling functions that
  * might sleep (such as speed check), and to debounce.
  */
-static void port_carrier_check(void *arg)
+static void port_carrier_check(struct work_struct *work)
 {
-	struct net_device *dev = arg;
 	struct net_bridge_port *p;
+	struct net_device *dev;
 	struct net_bridge *br;
 
+	dev = container_of(work, struct net_bridge_port,
+			   carrier_check.work)->dev;
+	work_release(work);
+
 	rtnl_lock();
 	p = dev->br_port;
 	if (!p)
@@ -276,7 +280,7 @@
 	p->port_no = index;
 	br_init_port(p);
 	p->state = BR_STATE_DISABLED;
-	INIT_WORK(&p->carrier_check, port_carrier_check, dev);
+	INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
 	br_stp_port_timer_init(p);
 
 	kobject_init(&p->kobj);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index ac181be..bd221ad 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -34,13 +34,13 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_arp.h>
 #include <linux/in_route.h>
+#include <linux/inetdevice.h>
 
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/route.h>
 
 #include <asm/uaccess.h>
-#include <asm/checksum.h>
 #include "br_private.h"
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
@@ -222,10 +222,14 @@
  *
  * Otherwise, the packet is considered to be routed and we just
  * change the destination MAC address so that the packet will
- * later be passed up to the IP stack to be routed.
+ * later be passed up to the IP stack to be routed. For a redirected
+ * packet, ip_route_input() will give back the localhost as output device,
+ * which differs from the bridge device.
  *
  * Let us now consider the case that ip_route_input() fails:
  *
+ * This can be because the destination address is martian, in which case
+ * the packet will be dropped.
  * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
  * will fail, while __ip_route_output_key() will return success. The source
  * address for __ip_route_output_key() is set to zero, so __ip_route_output_key
@@ -238,7 +242,8 @@
  *
  * --Lennert, 20020411
  * --Bart, 20020416 (updated)
- * --Bart, 20021007 (updated) */
+ * --Bart, 20021007 (updated)
+ * --Bart, 20062711 (updated) */
 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
 {
 	if (skb->pkt_type == PACKET_OTHERHOST) {
@@ -265,15 +270,15 @@
 	struct net_device *dev = skb->dev;
 	struct iphdr *iph = skb->nh.iph;
 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+	int err;
 
 	if (nf_bridge->mask & BRNF_PKT_TYPE) {
 		skb->pkt_type = PACKET_OTHERHOST;
 		nf_bridge->mask ^= BRNF_PKT_TYPE;
 	}
 	nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-
 	if (dnat_took_place(skb)) {
-		if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
+		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
 			struct rtable *rt;
 			struct flowi fl = {
 				.nl_u = {
@@ -284,19 +289,33 @@
 				},
 				.proto = 0,
 			};
+			struct in_device *in_dev = in_dev_get(dev);
+
+			/* If err equals -EHOSTUNREACH the error is due to a
+			 * martian destination or due to the fact that
+			 * forwarding is disabled. For most martian packets,
+			 * ip_route_output_key() will fail. It won't fail for 2 types of
+			 * martian destinations: loopback destinations and destination
+			 * 0.0.0.0. In both cases the packet will be dropped because the
+			 * destination is the loopback device and not the bridge. */
+			if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
+				goto free_skb;
 
 			if (!ip_route_output_key(&rt, &fl)) {
 				/* - Bridged-and-DNAT'ed traffic doesn't
-				 *   require ip_forwarding.
-				 * - Deal with redirected traffic. */
-				if (((struct dst_entry *)rt)->dev == dev ||
-				    rt->rt_type == RTN_LOCAL) {
+				 *   require ip_forwarding. */
+				if (((struct dst_entry *)rt)->dev == dev) {
 					skb->dst = (struct dst_entry *)rt;
 					goto bridged_dnat;
 				}
+				/* we are sure that forwarding is disabled, so printing
+				 * this message is no problem. Note that the packet could
+				 * still have a martian destination address, in which case
+				 * the packet could be dropped even if forwarding were enabled */
 				__br_dnat_complain();
 				dst_release((struct dst_entry *)rt);
 			}
+free_skb:
 			kfree_skb(skb);
 			return 0;
 		} else {
@@ -381,7 +400,7 @@
 		case IPV6_TLV_JUMBO:
 			if (skb->nh.raw[off + 1] != 4 || (off & 3) != 2)
 				goto bad;
-			pkt_len = ntohl(*(u32 *) (skb->nh.raw + off + 2));
+			pkt_len = ntohl(*(__be32 *) (skb->nh.raw + off + 2));
 			if (pkt_len <= IPV6_MAXPLEN ||
 			    skb->nh.ipv6h->payload_len)
 				goto bad;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 8f66119..a913968 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -15,6 +15,18 @@
 #include <net/netlink.h>
 #include "br_private.h"
 
+static inline size_t br_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+	       + nla_total_size(4) /* IFLA_MASTER */
+	       + nla_total_size(4) /* IFLA_MTU */
+	       + nla_total_size(4) /* IFLA_LINK */
+	       + nla_total_size(1) /* IFLA_OPERSTATE */
+	       + nla_total_size(1); /* IFLA_PROTINFO */
+}
+
 /*
  * Create one netlink message for one interface
  * Contains port and master info as well as carrier and bridge state.
@@ -24,51 +36,43 @@
 {
 	const struct net_bridge *br = port->br;
 	const struct net_device *dev = port->dev;
-	struct ifinfomsg *r;
+	struct ifinfomsg *hdr;
 	struct nlmsghdr *nlh;
-	unsigned char *b = skb->tail;
-	u32 mtu = dev->mtu;
 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
-	u8 portstate = port->state;
 
 	pr_debug("br_fill_info event %d port %s master %s\n",
 		 event, dev->name, br->dev->name);
 
-	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
-	r = NLMSG_DATA(nlh);
-	r->ifi_family = AF_BRIDGE;
-	r->__ifi_pad = 0;
-	r->ifi_type = dev->type;
-	r->ifi_index = dev->ifindex;
-	r->ifi_flags = dev_get_flags(dev);
-	r->ifi_change = 0;
+	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
+	if (nlh == NULL)
+		return -ENOBUFS;
 
-	RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name);
+	hdr = nlmsg_data(nlh);
+	hdr->ifi_family = AF_BRIDGE;
+	hdr->__ifi_pad = 0;
+	hdr->ifi_type = dev->type;
+	hdr->ifi_index = dev->ifindex;
+	hdr->ifi_flags = dev_get_flags(dev);
+	hdr->ifi_change = 0;
 
-	RTA_PUT(skb, IFLA_MASTER, sizeof(int), &br->dev->ifindex);
+	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
+	NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex);
+	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+	NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate);
 
 	if (dev->addr_len)
-		RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
+		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
 
-	RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu);
 	if (dev->ifindex != dev->iflink)
-		RTA_PUT(skb, IFLA_LINK, sizeof(int), &dev->iflink);
-
-
-	RTA_PUT(skb, IFLA_OPERSTATE, sizeof(operstate), &operstate);
+		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
 
 	if (event == RTM_NEWLINK)
-		RTA_PUT(skb, IFLA_PROTINFO, sizeof(portstate), &portstate);
+		NLA_PUT_U8(skb, IFLA_PROTINFO, port->state);
 
-	nlh->nlmsg_len = skb->tail - b;
+	return nlmsg_end(skb, nlh);
 
-	return skb->len;
-
-nlmsg_failure:
-rtattr_failure:
-
-	skb_trim(skb, b - skb->data);
-	return -EINVAL;
+nla_put_failure:
+	return nlmsg_cancel(skb, nlh);
 }
 
 /*
@@ -77,19 +81,16 @@
 void br_ifinfo_notify(int event, struct net_bridge_port *port)
 {
 	struct sk_buff *skb;
-	int payload = sizeof(struct ifinfomsg) + 128;
 	int err = -ENOBUFS;
 
 	pr_debug("bridge notify event=%d\n", event);
-	skb = nlmsg_new(nlmsg_total_size(payload), GFP_ATOMIC);
+	skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
 	if (skb == NULL)
 		goto errout;
 
 	err = br_fill_ifinfo(skb, port, 0, 0, event, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in br_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
 errout:
@@ -104,25 +105,18 @@
 {
 	struct net_device *dev;
 	int idx;
-	int s_idx = cb->args[0];
-	int err = 0;
 
 	read_lock(&dev_base_lock);
 	for (dev = dev_base, idx = 0; dev; dev = dev->next) {
-		struct net_bridge_port *p = dev->br_port;
-
 		/* not a bridge port */
-		if (!p)
-			continue;
+		if (dev->br_port == NULL || idx < cb->args[0])
+			goto skip;
 
-		if (idx < s_idx)
-			goto cont;
-
-		err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid,
-				     cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI);
-		if (err <= 0)
+		if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid,
+				   cb->nlh->nlmsg_seq, RTM_NEWLINK,
+				   NLM_F_MULTI) < 0)
 			break;
-cont:
+skip:
 		++idx;
 	}
 	read_unlock(&dev_base_lock);
@@ -138,26 +132,27 @@
  */
 static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 {
-	struct rtattr  **rta = arg;
-	struct ifinfomsg *ifm = NLMSG_DATA(nlh);
+	struct ifinfomsg *ifm;
+	struct nlattr *protinfo;
 	struct net_device *dev;
 	struct net_bridge_port *p;
 	u8 new_state;
 
+	if (nlmsg_len(nlh) < sizeof(*ifm))
+		return -EINVAL;
+
+	ifm = nlmsg_data(nlh);
 	if (ifm->ifi_family != AF_BRIDGE)
 		return -EPFNOSUPPORT;
 
-	/* Must pass valid state as PROTINFO */
-	if (rta[IFLA_PROTINFO-1]) {
-		u8 *pstate = RTA_DATA(rta[IFLA_PROTINFO-1]);
-		new_state = *pstate;
-	} else
+	protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO);
+	if (!protinfo || nla_len(protinfo) < sizeof(u8))
 		return -EINVAL;
 
+	new_state = nla_get_u8(protinfo);
 	if (new_state > BR_STATE_BLOCKING)
 		return -EINVAL;
 
-	/* Find bridge port */
 	dev = __dev_get_by_index(ifm->ifi_index);
 	if (!dev)
 		return -ENODEV;
@@ -170,10 +165,8 @@
 	if (p->br->stp_enabled)
 		return -EBUSY;
 
-	if (!netif_running(dev))
-		return -ENETDOWN;
-
-	if (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED)
+	if (!netif_running(dev) ||
+	    (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED))
 		return -ENETDOWN;
 
 	p->state = new_state;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74258d8..3a534e9 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@
 	struct timer_list		hold_timer;
 	struct timer_list		message_age_timer;
 	struct kobject			kobj;
-	struct work_struct		carrier_check;
+	struct delayed_work		carrier_check;
 	struct rcu_head			rcu;
 };
 
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index d42f63f..9abbc09 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -17,7 +17,7 @@
 {
 	struct ebt_802_3_info *info = (struct ebt_802_3_info *)data;
 	struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb);
-	uint16_t type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
+	__be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
 
 	if (info->bitmask & EBT_802_3_SAP) {
 		if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP)) 
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index a614485..ce97c42 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -15,7 +15,7 @@
 #include <linux/module.h>
 
 static int ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
-				     const char *mac, uint32_t ip)
+				     const char *mac, __be32 ip)
 {
 	/* You may be puzzled as to how this code works.
 	 * Some tricks were used, refer to 
@@ -70,7 +70,7 @@
 	return 0;
 }
 
-static int get_ip_dst(const struct sk_buff *skb, uint32_t *addr)
+static int get_ip_dst(const struct sk_buff *skb, __be32 *addr)
 {
 	if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
 		struct iphdr _iph, *ih;
@@ -81,16 +81,16 @@
 		*addr = ih->daddr;
 	} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
 		struct arphdr _arph, *ah;
-		uint32_t buf, *bp;
+		__be32 buf, *bp;
 
 		ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
 		if (ah == NULL ||
-		    ah->ar_pln != sizeof(uint32_t) ||
+		    ah->ar_pln != sizeof(__be32) ||
 		    ah->ar_hln != ETH_ALEN)
 			return -1;
 		bp = skb_header_pointer(skb, sizeof(struct arphdr) +
-					2 * ETH_ALEN + sizeof(uint32_t),
-					sizeof(uint32_t), &buf);
+					2 * ETH_ALEN + sizeof(__be32),
+					sizeof(__be32), &buf);
 		if (bp == NULL)
 			return -1;
 		*addr = *bp;
@@ -98,7 +98,7 @@
 	return 0;
 }
 
-static int get_ip_src(const struct sk_buff *skb, uint32_t *addr)
+static int get_ip_src(const struct sk_buff *skb, __be32 *addr)
 {
 	if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
 		struct iphdr _iph, *ih;
@@ -109,15 +109,15 @@
 		*addr = ih->saddr;
 	} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
 		struct arphdr _arph, *ah;
-		uint32_t buf, *bp;
+		__be32 buf, *bp;
 
 		ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
 		if (ah == NULL ||
-		    ah->ar_pln != sizeof(uint32_t) ||
+		    ah->ar_pln != sizeof(__be32) ||
 		    ah->ar_hln != ETH_ALEN)
 			return -1;
 		bp = skb_header_pointer(skb, sizeof(struct arphdr) +
-					ETH_ALEN, sizeof(uint32_t), &buf);
+					ETH_ALEN, sizeof(__be32), &buf);
 		if (bp == NULL)
 			return -1;
 		*addr = *bp;
@@ -133,7 +133,7 @@
 	struct ebt_among_info *info = (struct ebt_among_info *) data;
 	const char *dmac, *smac;
 	const struct ebt_mac_wormhash *wh_dst, *wh_src;
-	uint32_t dip = 0, sip = 0;
+	__be32 dip = 0, sip = 0;
 
 	wh_dst = ebt_among_wh_dst(info);
 	wh_src = ebt_among_wh_src(info);
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index a6c81d9..9c59980 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -35,10 +35,10 @@
 		return EBT_NOMATCH;
 
 	if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) {
-		uint32_t _addr, *ap;
+		__be32 _addr, *ap;
 
 		/* IPv4 addresses are always 4 bytes */
-		if (ah->ar_pln != sizeof(uint32_t))
+		if (ah->ar_pln != sizeof(__be32))
 			return EBT_NOMATCH;
 		if (info->bitmask & EBT_ARP_SRC_IP) {
 			ap = skb_header_pointer(skb, sizeof(struct arphdr) +
@@ -53,7 +53,7 @@
 
 		if (info->bitmask & EBT_ARP_DST_IP) {
 			ap = skb_header_pointer(skb, sizeof(struct arphdr) +
-						2*ah->ar_hln+sizeof(uint32_t),
+						2*ah->ar_hln+sizeof(__be32),
 						sizeof(_addr), &_addr);
 			if (ap == NULL)
 				return EBT_NOMATCH;
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 65b665c..e4c6424 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -20,8 +20,8 @@
 #include <linux/module.h>
 
 struct tcpudphdr {
-	uint16_t src;
-	uint16_t dst;
+	__be16 src;
+	__be16 dst;
 };
 
 static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in,
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 466ed34..a184f87 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -38,8 +38,8 @@
 
 struct tcpudphdr
 {
-	uint16_t src;
-	uint16_t dst;
+	__be16 src;
+	__be16 dst;
 };
 
 struct arppayload
@@ -130,7 +130,7 @@
 		 * then log the ARP payload */
 		if (ah->ar_hrd == htons(1) &&
 		    ah->ar_hln == ETH_ALEN &&
-		    ah->ar_pln == sizeof(uint32_t)) {
+		    ah->ar_pln == sizeof(__be32)) {
 			struct arppayload _arpp, *ap;
 
 			ap = skb_header_pointer(skb, sizeof(_arph),
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index b54306a..62d23c7 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -25,15 +25,15 @@
 	int action = info->target & -16;
 
 	if (action == MARK_SET_VALUE)
-		(*pskb)->nfmark = info->mark;
+		(*pskb)->mark = info->mark;
 	else if (action == MARK_OR_VALUE)
-		(*pskb)->nfmark |= info->mark;
+		(*pskb)->mark |= info->mark;
 	else if (action == MARK_AND_VALUE)
-		(*pskb)->nfmark &= info->mark;
+		(*pskb)->mark &= info->mark;
 	else
-		(*pskb)->nfmark ^= info->mark;
+		(*pskb)->mark ^= info->mark;
 
-	return info->target | -16;
+	return info->target | ~EBT_VERDICT_BITS;
 }
 
 static int ebt_target_mark_check(const char *tablename, unsigned int hookmask,
@@ -44,13 +44,13 @@
 
 	if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info)))
 		return -EINVAL;
-	tmp = info->target | -16;
+	tmp = info->target | ~EBT_VERDICT_BITS;
 	if (BASE_CHAIN && tmp == EBT_RETURN)
 		return -EINVAL;
 	CLEAR_BASE_CHAIN_BIT;
 	if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
 		return -EINVAL;
-	tmp = info->target & -16;
+	tmp = info->target & ~EBT_VERDICT_BITS;
 	if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
 	    tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
 		return -EINVAL;
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index a6413e4..025869e 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -19,8 +19,8 @@
 	struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data;
 
 	if (info->bitmask & EBT_MARK_OR)
-		return !(!!(skb->nfmark & info->mask) ^ info->invert);
-	return !(((skb->nfmark & info->mask) == info->mark) ^ info->invert);
+		return !(!!(skb->mark & info->mask) ^ info->invert);
+	return !(((skb->mark & info->mask) == info->mark) ^ info->invert);
 }
 
 static int ebt_mark_check(const char *tablename, unsigned int hookmask,
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index cbb33e2..a507221 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -12,6 +12,8 @@
 #include <linux/netfilter_bridge/ebt_nat.h>
 #include <linux/module.h>
 #include <net/sock.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
 
 static int ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr,
    const struct net_device *in, const struct net_device *out,
@@ -31,24 +33,43 @@
 		*pskb = nskb;
 	}
 	memcpy(eth_hdr(*pskb)->h_source, info->mac, ETH_ALEN);
-	return info->target;
+	if (!(info->target & NAT_ARP_BIT) &&
+	    eth_hdr(*pskb)->h_proto == htons(ETH_P_ARP)) {
+		struct arphdr _ah, *ap;
+
+		ap = skb_header_pointer(*pskb, 0, sizeof(_ah), &_ah);
+		if (ap == NULL)
+			return EBT_DROP;
+		if (ap->ar_hln != ETH_ALEN)
+			goto out;
+		if (skb_store_bits(*pskb, sizeof(_ah), info->mac,ETH_ALEN))
+			return EBT_DROP;
+	}
+out:
+	return info->target | ~EBT_VERDICT_BITS;
 }
 
 static int ebt_target_snat_check(const char *tablename, unsigned int hookmask,
    const struct ebt_entry *e, void *data, unsigned int datalen)
 {
 	struct ebt_nat_info *info = (struct ebt_nat_info *) data;
+	int tmp;
 
 	if (datalen != EBT_ALIGN(sizeof(struct ebt_nat_info)))
 		return -EINVAL;
-	if (BASE_CHAIN && info->target == EBT_RETURN)
+	tmp = info->target | ~EBT_VERDICT_BITS;
+	if (BASE_CHAIN && tmp == EBT_RETURN)
 		return -EINVAL;
 	CLEAR_BASE_CHAIN_BIT;
 	if (strcmp(tablename, "nat"))
 		return -EINVAL;
 	if (hookmask & ~(1 << NF_BR_POST_ROUTING))
 		return -EINVAL;
-	if (INVALID_TARGET)
+
+	if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
+		return -EINVAL;
+	tmp = info->target | EBT_VERDICT_BITS;
+	if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT)
 		return -EINVAL;
 	return 0;
 }
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 9f950db..c1af68b 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -168,7 +168,7 @@
 	if (ub->qlen == 1)
 		skb_set_timestamp(ub->skb, &pm->stamp);
 	pm->data_len = copy_len;
-	pm->mark = skb->nfmark;
+	pm->mark = skb->mark;
 	pm->hook = hooknr;
 	if (uloginfo->prefix != NULL)
 		strcpy(pm->prefix, uloginfo->prefix);
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index a2b4528..7ee3776 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -55,7 +55,7 @@
 	unsigned short id;	/* VLAN ID, given from frame TCI */
 	unsigned char prio;	/* user_priority, given from frame TCI */
 	/* VLAN encapsulated Type/Length field, given from orig frame */
-	unsigned short encap;
+	__be16 encap;
 
 	fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame);
 	if (fp == NULL)
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 9a6e548..d37ce04 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -23,7 +23,7 @@
 	.policy		= EBT_ACCEPT,
 };
 
-static struct ebt_replace initial_table =
+static struct ebt_replace_kernel initial_table =
 {
 	.name		= "broute",
 	.valid_hooks	= 1 << NF_BR_BROUTING,
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 3d5bd44..127135e 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -30,7 +30,7 @@
 	},
 };
 
-static struct ebt_replace initial_table =
+static struct ebt_replace_kernel initial_table =
 {
 	.name		= "filter",
 	.valid_hooks	= FILTER_VALID_HOOKS,
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 04dd42e..9c50488 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -30,7 +30,7 @@
 	}
 };
 
-static struct ebt_replace initial_table =
+static struct ebt_replace_kernel initial_table =
 {
 	.name		= "nat",
 	.valid_hooks	= NAT_VALID_HOOKS,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 9f85666..bee558a4 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -338,10 +338,11 @@
    const char *name, unsigned int hookmask, unsigned int *cnt)
 {
 	struct ebt_match *match;
+	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
 	int ret;
 
-	if (((char *)m) + m->match_size + sizeof(struct ebt_entry_match) >
-	   ((char *)e) + e->watchers_offset)
+	if (left < sizeof(struct ebt_entry_match) ||
+	    left - sizeof(struct ebt_entry_match) < m->match_size)
 		return -EINVAL;
 	match = find_match_lock(m->u.name, &ret, &ebt_mutex);
 	if (!match)
@@ -367,10 +368,11 @@
    const char *name, unsigned int hookmask, unsigned int *cnt)
 {
 	struct ebt_watcher *watcher;
+	size_t left = ((char *)e + e->target_offset) - (char *)w;
 	int ret;
 
-	if (((char *)w) + w->watcher_size + sizeof(struct ebt_entry_watcher) >
-	   ((char *)e) + e->target_offset)
+	if (left < sizeof(struct ebt_entry_watcher) ||
+	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
 		return -EINVAL;
 	watcher = find_watcher_lock(w->u.name, &ret, &ebt_mutex);
 	if (!watcher)
@@ -391,35 +393,91 @@
 	return 0;
 }
 
+static int ebt_verify_pointers(struct ebt_replace *repl,
+			       struct ebt_table_info *newinfo)
+{
+	unsigned int limit = repl->entries_size;
+	unsigned int valid_hooks = repl->valid_hooks;
+	unsigned int offset = 0;
+	int i;
+
+	for (i = 0; i < NF_BR_NUMHOOKS; i++)
+		newinfo->hook_entry[i] = NULL;
+
+	newinfo->entries_size = repl->entries_size;
+	newinfo->nentries = repl->nentries;
+
+	while (offset < limit) {
+		size_t left = limit - offset;
+		struct ebt_entry *e = (void *)newinfo->entries + offset;
+
+		if (left < sizeof(unsigned int))
+			break;
+
+		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+			if ((valid_hooks & (1 << i)) == 0)
+				continue;
+			if ((char __user *)repl->hook_entry[i] ==
+			     repl->entries + offset)
+				break;
+		}
+
+		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
+			if (e->bitmask != 0) {
+				/* we make userspace set this right,
+				   so there is no misunderstanding */
+				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
+					 "in distinguisher\n");
+				return -EINVAL;
+			}
+			if (i != NF_BR_NUMHOOKS)
+				newinfo->hook_entry[i] = (struct ebt_entries *)e;
+			if (left < sizeof(struct ebt_entries))
+				break;
+			offset += sizeof(struct ebt_entries);
+		} else {
+			if (left < sizeof(struct ebt_entry))
+				break;
+			if (left < e->next_offset)
+				break;
+			offset += e->next_offset;
+		}
+	}
+	if (offset != limit) {
+		BUGPRINT("entries_size too small\n");
+		return -EINVAL;
+	}
+
+	/* check if all valid hooks have a chain */
+	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+		if (!newinfo->hook_entry[i] &&
+		   (valid_hooks & (1 << i))) {
+			BUGPRINT("Valid hook without chain\n");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
 /*
  * this one is very careful, as it is the first function
  * to parse the userspace data
  */
 static inline int
 ebt_check_entry_size_and_hooks(struct ebt_entry *e,
-   struct ebt_table_info *newinfo, char *base, char *limit,
-   struct ebt_entries **hook_entries, unsigned int *n, unsigned int *cnt,
-   unsigned int *totalcnt, unsigned int *udc_cnt, unsigned int valid_hooks)
+   struct ebt_table_info *newinfo,
+   unsigned int *n, unsigned int *cnt,
+   unsigned int *totalcnt, unsigned int *udc_cnt)
 {
 	int i;
 
 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
-		if ((valid_hooks & (1 << i)) == 0)
-			continue;
-		if ( (char *)hook_entries[i] - base ==
-		   (char *)e - newinfo->entries)
+		if ((void *)e == (void *)newinfo->hook_entry[i])
 			break;
 	}
 	/* beginning of a new chain
 	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
-	if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
-		if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) != 0) {
-			/* we make userspace set this right,
-			   so there is no misunderstanding */
-			BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
-			         "in distinguisher\n");
-			return -EINVAL;
-		}
+	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
 		/* this checks if the previous chain has as many entries
 		   as it said it has */
 		if (*n != *cnt) {
@@ -427,12 +485,6 @@
 		                 "in the chain\n");
 			return -EINVAL;
 		}
-		/* before we look at the struct, be sure it is not too big */
-		if ((char *)hook_entries[i] + sizeof(struct ebt_entries)
-		   > limit) {
-			BUGPRINT("entries_size too small\n");
-			return -EINVAL;
-		}
 		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
 		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
 			/* only RETURN from udc */
@@ -444,8 +496,6 @@
 		}
 		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
 			(*udc_cnt)++;
-		else
-			newinfo->hook_entry[i] = (struct ebt_entries *)e;
 		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
 			BUGPRINT("counter_offset != totalcnt");
 			return -EINVAL;
@@ -466,7 +516,6 @@
 		BUGPRINT("target size too small\n");
 		return -EINVAL;
 	}
-
 	(*cnt)++;
 	(*totalcnt)++;
 	return 0;
@@ -485,17 +534,14 @@
  */
 static inline int
 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
-   struct ebt_entries **hook_entries, unsigned int *n, unsigned int valid_hooks,
-   struct ebt_cl_stack *udc)
+   unsigned int *n, struct ebt_cl_stack *udc)
 {
 	int i;
 
 	/* we're only interested in chain starts */
-	if (e->bitmask & EBT_ENTRY_OR_ENTRIES)
+	if (e->bitmask)
 		return 0;
 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
-		if ((valid_hooks & (1 << i)) == 0)
-			continue;
 		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
 			break;
 	}
@@ -541,7 +587,7 @@
 {
 	struct ebt_entry_target *t;
 
-	if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
+	if (e->bitmask == 0)
 		return 0;
 	/* we're done */
 	if (cnt && (*cnt)-- == 0)
@@ -558,16 +604,17 @@
 
 static inline int
 ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
-   const char *name, unsigned int *cnt, unsigned int valid_hooks,
+   const char *name, unsigned int *cnt,
    struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
 {
 	struct ebt_entry_target *t;
 	struct ebt_target *target;
 	unsigned int i, j, hook = 0, hookmask = 0;
+	size_t gap = e->next_offset - e->target_offset;
 	int ret;
 
 	/* don't mess with the struct ebt_entries */
-	if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
+	if (e->bitmask == 0)
 		return 0;
 
 	if (e->bitmask & ~EBT_F_MASK) {
@@ -584,7 +631,7 @@
 	}
 	/* what hook do we belong to? */
 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
-		if ((valid_hooks & (1 << i)) == 0)
+		if (!newinfo->hook_entry[i])
 			continue;
 		if ((char *)newinfo->hook_entry[i] < (char *)e)
 			hook = i;
@@ -625,8 +672,7 @@
 
 	t->u.target = target;
 	if (t->u.target == &ebt_standard_target) {
-		if (e->target_offset + sizeof(struct ebt_standard_target) >
-		   e->next_offset) {
+		if (gap < sizeof(struct ebt_standard_target)) {
 			BUGPRINT("Standard target size too big\n");
 			ret = -EFAULT;
 			goto cleanup_watchers;
@@ -637,8 +683,7 @@
 			ret = -EFAULT;
 			goto cleanup_watchers;
 		}
-	} else if ((e->target_offset + t->target_size +
-	   sizeof(struct ebt_entry_target) > e->next_offset) ||
+	} else if (t->target_size > gap - sizeof(struct ebt_entry_target) ||
 	   (t->u.target->check &&
 	   t->u.target->check(name, hookmask, e, t->data, t->target_size) != 0)){
 		module_put(t->u.target->me);
@@ -708,7 +753,9 @@
 				BUGPRINT("loop\n");
 				return -1;
 			}
-			/* this can't be 0, so the above test is correct */
+			if (cl_s[i].hookmask & (1 << hooknr))
+				goto letscontinue;
+			/* this can't be 0, so the loop test is correct */
 			cl_s[i].cs.n = pos + 1;
 			pos = 0;
 			cl_s[i].cs.e = ((void *)e + e->next_offset);
@@ -728,42 +775,35 @@
 }
 
 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
-static int translate_table(struct ebt_replace *repl,
-   struct ebt_table_info *newinfo)
+static int translate_table(char *name, struct ebt_table_info *newinfo)
 {
 	unsigned int i, j, k, udc_cnt;
 	int ret;
 	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
 
 	i = 0;
-	while (i < NF_BR_NUMHOOKS && !(repl->valid_hooks & (1 << i)))
+	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
 		i++;
 	if (i == NF_BR_NUMHOOKS) {
 		BUGPRINT("No valid hooks specified\n");
 		return -EINVAL;
 	}
-	if (repl->hook_entry[i] != (struct ebt_entries *)repl->entries) {
+	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
 		BUGPRINT("Chains don't start at beginning\n");
 		return -EINVAL;
 	}
 	/* make sure chains are ordered after each other in same order
 	   as their corresponding hooks */
 	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
-		if (!(repl->valid_hooks & (1 << j)))
+		if (!newinfo->hook_entry[j])
 			continue;
-		if ( repl->hook_entry[j] <= repl->hook_entry[i] ) {
+		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
 			BUGPRINT("Hook order must be followed\n");
 			return -EINVAL;
 		}
 		i = j;
 	}
 
-	for (i = 0; i < NF_BR_NUMHOOKS; i++)
-		newinfo->hook_entry[i] = NULL;
-
-	newinfo->entries_size = repl->entries_size;
-	newinfo->nentries = repl->nentries;
-
 	/* do some early checkings and initialize some things */
 	i = 0; /* holds the expected nr. of entries for the chain */
 	j = 0; /* holds the up to now counted entries for the chain */
@@ -771,9 +811,8 @@
 	          newinfo->nentries afterwards */
 	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
-	   ebt_check_entry_size_and_hooks, newinfo, repl->entries,
-	   repl->entries + repl->entries_size, repl->hook_entry, &i, &j, &k,
-	   &udc_cnt, repl->valid_hooks);
+	   ebt_check_entry_size_and_hooks, newinfo,
+	   &i, &j, &k, &udc_cnt);
 
 	if (ret != 0)
 		return ret;
@@ -788,15 +827,6 @@
 		return -EINVAL;
 	}
 
-	/* check if all valid hooks have a chain */
-	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
-		if (newinfo->hook_entry[i] == NULL &&
-		   (repl->valid_hooks & (1 << i))) {
-			BUGPRINT("Valid hook without chain\n");
-			return -EINVAL;
-		}
-	}
-
 	/* get the location of the udc, put them in an array
 	   while we're at it, allocate the chainstack */
 	if (udc_cnt) {
@@ -824,8 +854,7 @@
 			return -ENOMEM;
 		i = 0; /* the i'th udc */
 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
-		   ebt_get_udc_positions, newinfo, repl->hook_entry, &i,
-		   repl->valid_hooks, cl_s);
+		   ebt_get_udc_positions, newinfo, &i, cl_s);
 		/* sanity check */
 		if (i != udc_cnt) {
 			BUGPRINT("i != udc_cnt\n");
@@ -836,7 +865,7 @@
 
 	/* Check for loops */
 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
-		if (repl->valid_hooks & (1 << i))
+		if (newinfo->hook_entry[i])
 			if (check_chainloops(newinfo->hook_entry[i],
 			   cl_s, udc_cnt, i, newinfo->entries)) {
 				vfree(cl_s);
@@ -856,8 +885,7 @@
 	/* used to know what we need to clean up if something goes wrong */
 	i = 0;
 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
-	   ebt_check_entry, newinfo, repl->name, &i, repl->valid_hooks,
-	   cl_s, udc_cnt);
+	   ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt);
 	if (ret != 0) {
 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
 		   ebt_cleanup_entry, &i);
@@ -954,7 +982,11 @@
 
 	/* this can get initialized by translate_table() */
 	newinfo->chainstack = NULL;
-	ret = translate_table(&tmp, newinfo);
+	ret = ebt_verify_pointers(&tmp, newinfo);
+	if (ret != 0)
+		goto free_counterstmp;
+
+	ret = translate_table(tmp.name, newinfo);
 
 	if (ret != 0)
 		goto free_counterstmp;
@@ -1125,35 +1157,47 @@
 {
 	struct ebt_table_info *newinfo;
 	struct ebt_table *t;
+	struct ebt_replace_kernel *repl;
 	int ret, i, countersize;
+	void *p;
 
-	if (!table || !table->table ||!table->table->entries ||
-	    table->table->entries_size == 0 ||
-	    table->table->counters || table->private) {
+	if (!table || !(repl = table->table) || !repl->entries ||
+	    repl->entries_size == 0 ||
+	    repl->counters || table->private) {
 		BUGPRINT("Bad table data for ebt_register_table!!!\n");
 		return -EINVAL;
 	}
 
-	countersize = COUNTER_OFFSET(table->table->nentries) *
+	countersize = COUNTER_OFFSET(repl->nentries) *
 					(highest_possible_processor_id()+1);
 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
 	ret = -ENOMEM;
 	if (!newinfo)
 		return -ENOMEM;
 
-	newinfo->entries = vmalloc(table->table->entries_size);
-	if (!(newinfo->entries))
+	p = vmalloc(repl->entries_size);
+	if (!p)
 		goto free_newinfo;
 
-	memcpy(newinfo->entries, table->table->entries,
-	   table->table->entries_size);
+	memcpy(p, repl->entries, repl->entries_size);
+	newinfo->entries = p;
+
+	newinfo->entries_size = repl->entries_size;
+	newinfo->nentries = repl->nentries;
 
 	if (countersize)
 		memset(newinfo->counters, 0, countersize);
 
 	/* fill in newinfo and parse the entries */
 	newinfo->chainstack = NULL;
-	ret = translate_table(table->table, newinfo);
+	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+		if ((repl->valid_hooks & (1 << i)) == 0)
+			newinfo->hook_entry[i] = NULL;
+		else
+			newinfo->hook_entry[i] = p +
+				((char *)repl->hook_entry[i] - repl->entries);
+	}
+	ret = translate_table(repl->name, newinfo);
 	if (ret != 0) {
 		BUGPRINT("Translate_table failed\n");
 		goto free_chainstack;
@@ -1277,33 +1321,33 @@
 }
 
 static inline int ebt_make_matchname(struct ebt_entry_match *m,
-   char *base, char *ubase)
+   char *base, char __user *ubase)
 {
-	char *hlp = ubase - base + (char *)m;
+	char __user *hlp = ubase + ((char *)m - base);
 	if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
 		return -EFAULT;
 	return 0;
 }
 
 static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
-   char *base, char *ubase)
+   char *base, char __user *ubase)
 {
-	char *hlp = ubase - base + (char *)w;
+	char __user *hlp = ubase + ((char *)w - base);
 	if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
 		return -EFAULT;
 	return 0;
 }
 
-static inline int ebt_make_names(struct ebt_entry *e, char *base, char *ubase)
+static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase)
 {
 	int ret;
-	char *hlp;
+	char __user *hlp;
 	struct ebt_entry_target *t;
 
-	if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0)
+	if (e->bitmask == 0)
 		return 0;
 
-	hlp = ubase - base + (char *)e + e->target_offset;
+	hlp = ubase + (((char *)e + e->target_offset) - base);
 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 	
 	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
diff --git a/net/core/Makefile b/net/core/Makefile
index 1195680..73272d5 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -12,7 +12,6 @@
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-$(CONFIG_SYSFS) += net-sysfs.o
-obj-$(CONFIG_NET_DIVERT) += dv.o
 obj-$(CONFIG_NET_PKTGEN) += pktgen.o
 obj-$(CONFIG_WIRELESS_EXT) += wireless.o
 obj-$(CONFIG_NETPOLL) += netpoll.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index f558c61..797fdd4 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -321,7 +321,7 @@
 
 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
 				      u8 __user *to, int len,
-				      unsigned int *csump)
+				      __wsum *csump)
 {
 	int start = skb_headlen(skb);
 	int pos = 0;
@@ -350,7 +350,7 @@
 
 		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
-			unsigned int csum2;
+			__wsum csum2;
 			int err = 0;
 			u8  *vaddr;
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -386,7 +386,7 @@
 
 			end = start + list->len;
 			if ((copy = end - offset) > 0) {
-				unsigned int csum2 = 0;
+				__wsum csum2 = 0;
 				if (copy > len)
 					copy = len;
 				if (skb_copy_and_csum_datagram(list,
@@ -411,11 +411,11 @@
 	return -EFAULT;
 }
 
-unsigned int __skb_checksum_complete(struct sk_buff *skb)
+__sum16 __skb_checksum_complete(struct sk_buff *skb)
 {
-	unsigned int sum;
+	__sum16 sum;
 
-	sum = (u16)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+	sum = csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
 	if (likely(!sum)) {
 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
 			netdev_rx_csum_fault(skb->dev);
@@ -441,7 +441,7 @@
 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
 				     int hlen, struct iovec *iov)
 {
-	unsigned int csum;
+	__wsum csum;
 	int chunk = skb->len - hlen;
 
 	/* Skip filled elements.
@@ -460,7 +460,7 @@
 		if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
 					       chunk, &csum))
 			goto fault;
-		if ((unsigned short)csum_fold(csum))
+		if (csum_fold(csum))
 			goto csum_error;
 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
 			netdev_rx_csum_fault(skb->dev);
diff --git a/net/core/dev.c b/net/core/dev.c
index 81c426a..e660cb5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -98,7 +98,6 @@
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/if_bridge.h>
-#include <linux/divert.h>
 #include <net/dst.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
@@ -1170,7 +1169,7 @@
  */
 int skb_checksum_help(struct sk_buff *skb)
 {
-	unsigned int csum;
+	__wsum csum;
 	int ret = 0, offset = skb->h.raw - skb->data;
 
 	if (skb->ip_summed == CHECKSUM_COMPLETE)
@@ -1192,9 +1191,9 @@
 
 	offset = skb->tail - skb->h.raw;
 	BUG_ON(offset <= 0);
-	BUG_ON(skb->csum + 2 > offset);
+	BUG_ON(skb->csum_offset + 2 > offset);
 
-	*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
+	*(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum);
 
 out_set_summed:
 	skb->ip_summed = CHECKSUM_NONE;
@@ -1216,7 +1215,7 @@
 {
 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 	struct packet_type *ptype;
-	int type = skb->protocol;
+	__be16 type = skb->protocol;
 	int err;
 
 	BUG_ON(skb_shinfo(skb)->frag_list);
@@ -1767,7 +1766,7 @@
 	struct packet_type *ptype, *pt_prev;
 	struct net_device *orig_dev;
 	int ret = NET_RX_DROP;
-	unsigned short type;
+	__be16 type;
 
 	/* if we've gotten here through NAPI, check netpoll */
 	if (skb->dev->poll && netpoll_rx(skb))
@@ -1827,8 +1826,6 @@
 ncls:
 #endif
 
-	handle_diverter(skb);
-
 	if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
 		goto out;
 
@@ -2898,10 +2895,6 @@
 	spin_lock_init(&dev->ingress_lock);
 #endif
 
-	ret = alloc_divert_blk(dev);
-	if (ret)
-		goto out;
-
 	dev->iflink = -1;
 
 	/* Init, if this function is available */
@@ -2910,13 +2903,13 @@
 		if (ret) {
 			if (ret > 0)
 				ret = -EIO;
-			goto out_err;
+			goto out;
 		}
 	}
  
 	if (!dev_valid_name(dev->name)) {
 		ret = -EINVAL;
-		goto out_err;
+		goto out;
 	}
 
 	dev->ifindex = dev_new_index();
@@ -2930,7 +2923,7 @@
 			= hlist_entry(p, struct net_device, name_hlist);
 		if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
 			ret = -EEXIST;
- 			goto out_err;
+ 			goto out;
 		}
  	}
 
@@ -2974,7 +2967,7 @@
 
 	ret = netdev_register_sysfs(dev);
 	if (ret)
-		goto out_err;
+		goto out;
 	dev->reg_state = NETREG_REGISTERED;
 
 	/*
@@ -3001,9 +2994,6 @@
 
 out:
 	return ret;
-out_err:
-	free_divert_blk(dev);
-	goto out;
 }
 
 /**
@@ -3035,15 +3025,6 @@
 			goto out;
 	}
 	
-	/*
-	 * Back compatibility hook. Kill this one in 2.5
-	 */
-	if (dev->name[0] == 0 || dev->name[0] == ' ') {
-		err = dev_alloc_name(dev, "eth%d");
-		if (err < 0)
-			goto out;
-	}
-
 	err = register_netdevice(dev);
 out:
 	rtnl_unlock();
@@ -3329,8 +3310,6 @@
 	/* Notifier chain MUST detach us from master device. */
 	BUG_TRAP(!dev->master);
 
-	free_divert_blk(dev);
-
 	/* Finish processing unregister after unlock */
 	net_set_todo(dev);
 
@@ -3361,7 +3340,6 @@
 
 EXPORT_SYMBOL(unregister_netdev);
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int dev_cpu_callback(struct notifier_block *nfb,
 			    unsigned long action,
 			    void *ocpu)
@@ -3405,7 +3383,6 @@
 
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_NET_DMA
 /**
diff --git a/net/core/dst.c b/net/core/dst.c
index 1a5e49d..836ec66 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -125,7 +125,7 @@
 		if (ops->gc())
 			return NULL;
 	}
-	dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC);
+	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
 	if (!dst)
 		return NULL;
 	memset(dst, 0, ops->entry_size);
diff --git a/net/core/dv.c b/net/core/dv.c
deleted file mode 100644
index 29ee77f..0000000
--- a/net/core/dv.c
+++ /dev/null
@@ -1,546 +0,0 @@
-/*
- * INET		An implementation of the TCP/IP protocol suite for the LINUX
- *		operating system.  INET is implemented using the  BSD Socket
- *		interface as the means of communication with the user level.
- *
- *		Generic frame diversion
- *
- * Authors:	
- * 		Benoit LOCHER:	initial integration within the kernel with support for ethernet
- * 		Dave Miller:	improvement on the code (correctness, performance and source files)
- *
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <net/dst.h>
-#include <net/arp.h>
-#include <net/sock.h>
-#include <net/ipv6.h>
-#include <net/ip.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/checksum.h>
-#include <linux/divert.h>
-#include <linux/sockios.h>
-
-const char sysctl_divert_version[32]="0.46";	/* Current version */
-
-static int __init dv_init(void)
-{
-	return 0;
-}
-module_init(dv_init);
-
-/*
- * Allocate a divert_blk for a device. This must be an ethernet nic.
- */
-int alloc_divert_blk(struct net_device *dev)
-{
-	int alloc_size = (sizeof(struct divert_blk) + 3) & ~3;
-
-	dev->divert = NULL;
-	if (dev->type == ARPHRD_ETHER) {
-		dev->divert = kzalloc(alloc_size, GFP_KERNEL);
-		if (dev->divert == NULL) {
-			printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n",
-			       dev->name);
-			return -ENOMEM;
-		}
-		dev_hold(dev);
-	}
-
-	return 0;
-} 
-
-/*
- * Free a divert_blk allocated by the above function, if it was 
- * allocated on that device.
- */
-void free_divert_blk(struct net_device *dev)
-{
-	if (dev->divert) {
-		kfree(dev->divert);
-		dev->divert=NULL;
-		dev_put(dev);
-	}
-}
-
-/*
- * Adds a tcp/udp (source or dest) port to an array
- */
-static int add_port(u16 ports[], u16 port)
-{
-	int i;
-
-	if (port == 0)
-		return -EINVAL;
-
-	/* Storing directly in network format for performance,
-	 * thanks Dave :)
-	 */
-	port = htons(port);
-
-	for (i = 0; i < MAX_DIVERT_PORTS; i++) {
-		if (ports[i] == port)
-			return -EALREADY;
-	}
-	
-	for (i = 0; i < MAX_DIVERT_PORTS; i++) {
-		if (ports[i] == 0) {
-			ports[i] = port;
-			return 0;
-		}
-	}
-
-	return -ENOBUFS;
-}
-
-/*
- * Removes a port from an array tcp/udp (source or dest)
- */
-static int remove_port(u16 ports[], u16 port)
-{
-	int i;
-
-	if (port == 0)
-		return -EINVAL;
-	
-	/* Storing directly in network format for performance,
-	 * thanks Dave !
-	 */
-	port = htons(port);
-
-	for (i = 0; i < MAX_DIVERT_PORTS; i++) {
-		if (ports[i] == port) {
-			ports[i] = 0;
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-
-/* Some basic sanity checks on the arguments passed to divert_ioctl() */
-static int check_args(struct divert_cf *div_cf, struct net_device **dev)
-{
-	char devname[32];
-	int ret;
-
-	if (dev == NULL)
-		return -EFAULT;
-	
-	/* GETVERSION: all other args are unused */
-	if (div_cf->cmd == DIVCMD_GETVERSION)
-		return 0;
-	
-	/* Network device index should reasonably be between 0 and 1000 :) */
-	if (div_cf->dev_index < 0 || div_cf->dev_index > 1000) 
-		return -EINVAL;
-			
-	/* Let's try to find the ifname */
-	sprintf(devname, "eth%d", div_cf->dev_index);
-	*dev = dev_get_by_name(devname);
-	
-	/* dev should NOT be null */
-	if (*dev == NULL)
-		return -EINVAL;
-
-	ret = 0;
-
-	/* user issuing the ioctl must be a super one :) */
-	if (!capable(CAP_SYS_ADMIN)) {
-		ret = -EPERM;
-		goto out;
-	}
-
-	/* Device must have a divert_blk member NOT null */
-	if ((*dev)->divert == NULL)
-		ret = -EINVAL;
-out:
-	dev_put(*dev);
-	return ret;
-}
-
-/*
- * control function of the diverter
- */
-#if 0
-#define	DVDBG(a)	\
-	printk(KERN_DEBUG "divert_ioctl() line %d %s\n", __LINE__, (a))
-#else
-#define	DVDBG(a)
-#endif
-
-int divert_ioctl(unsigned int cmd, struct divert_cf __user *arg)
-{
-	struct divert_cf	div_cf;
-	struct divert_blk	*div_blk;
-	struct net_device	*dev;
-	int			ret;
-
-	switch (cmd) {
-	case SIOCGIFDIVERT:
-		DVDBG("SIOCGIFDIVERT, copy_from_user");
-		if (copy_from_user(&div_cf, arg, sizeof(struct divert_cf)))
-			return -EFAULT;
-		DVDBG("before check_args");
-		ret = check_args(&div_cf, &dev);
-		if (ret)
-			return ret;
-		DVDBG("after checkargs");
-		div_blk = dev->divert;
-			
-		DVDBG("befre switch()");
-		switch (div_cf.cmd) {
-		case DIVCMD_GETSTATUS:
-			/* Now, just give the user the raw divert block
-			 * for him to play with :)
-			 */
-			if (copy_to_user(div_cf.arg1.ptr, dev->divert,
-					 sizeof(struct divert_blk)))
-				return -EFAULT;
-			break;
-
-		case DIVCMD_GETVERSION:
-			DVDBG("GETVERSION: checking ptr");
-			if (div_cf.arg1.ptr == NULL)
-				return -EINVAL;
-			DVDBG("GETVERSION: copying data to userland");
-			if (copy_to_user(div_cf.arg1.ptr,
-					 sysctl_divert_version, 32))
-				return -EFAULT;
-			DVDBG("GETVERSION: data copied");
-			break;
-
-		default:
-			return -EINVAL;
-		}
-
-		break;
-
-	case SIOCSIFDIVERT:
-		if (copy_from_user(&div_cf, arg, sizeof(struct divert_cf)))
-			return -EFAULT;
-
-		ret = check_args(&div_cf, &dev);
-		if (ret)
-			return ret;
-
-		div_blk = dev->divert;
-
-		switch(div_cf.cmd) {
-		case DIVCMD_RESET:
-			div_blk->divert = 0;
-			div_blk->protos = DIVERT_PROTO_NONE;
-			memset(div_blk->tcp_dst, 0,
-			       MAX_DIVERT_PORTS * sizeof(u16));
-			memset(div_blk->tcp_src, 0,
-			       MAX_DIVERT_PORTS * sizeof(u16));
-			memset(div_blk->udp_dst, 0,
-			       MAX_DIVERT_PORTS * sizeof(u16));
-			memset(div_blk->udp_src, 0,
-			       MAX_DIVERT_PORTS * sizeof(u16));
-			return 0;
-				
-		case DIVCMD_DIVERT:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ENABLE:
-				if (div_blk->divert)
-					return -EALREADY;
-				div_blk->divert = 1;
-				break;
-
-			case DIVARG1_DISABLE:
-				if (!div_blk->divert)
-					return -EALREADY;
-				div_blk->divert = 0;
-				break;
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_IP:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ENABLE:
-				if (div_blk->protos & DIVERT_PROTO_IP)
-					return -EALREADY;
-				div_blk->protos |= DIVERT_PROTO_IP;
-				break;
-
-			case DIVARG1_DISABLE:
-				if (!(div_blk->protos & DIVERT_PROTO_IP))
-					return -EALREADY;
-				div_blk->protos &= ~DIVERT_PROTO_IP;
-				break;
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_TCP:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ENABLE:
-				if (div_blk->protos & DIVERT_PROTO_TCP)
-					return -EALREADY;
-				div_blk->protos |= DIVERT_PROTO_TCP;
-				break;
-
-			case DIVARG1_DISABLE:
-				if (!(div_blk->protos & DIVERT_PROTO_TCP))
-					return -EALREADY;
-				div_blk->protos &= ~DIVERT_PROTO_TCP;
-				break;
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_TCPDST:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ADD:
-				return add_port(div_blk->tcp_dst,
-						div_cf.arg2.uint16);
-				
-			case DIVARG1_REMOVE:
-				return remove_port(div_blk->tcp_dst,
-						   div_cf.arg2.uint16);
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_TCPSRC:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ADD:
-				return add_port(div_blk->tcp_src,
-						div_cf.arg2.uint16);
-
-			case DIVARG1_REMOVE:
-				return remove_port(div_blk->tcp_src,
-						   div_cf.arg2.uint16);
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_UDP:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ENABLE:
-				if (div_blk->protos & DIVERT_PROTO_UDP)
-					return -EALREADY;
-				div_blk->protos |= DIVERT_PROTO_UDP;
-				break;
-
-			case DIVARG1_DISABLE:
-				if (!(div_blk->protos & DIVERT_PROTO_UDP))
-					return -EALREADY;
-				div_blk->protos &= ~DIVERT_PROTO_UDP;
-				break;
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_UDPDST:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ADD:
-				return add_port(div_blk->udp_dst,
-						div_cf.arg2.uint16);
-
-			case DIVARG1_REMOVE:
-				return remove_port(div_blk->udp_dst,
-						   div_cf.arg2.uint16);
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_UDPSRC:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ADD:
-				return add_port(div_blk->udp_src,
-						div_cf.arg2.uint16);
-
-			case DIVARG1_REMOVE:
-				return remove_port(div_blk->udp_src,
-						   div_cf.arg2.uint16);
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		case DIVCMD_ICMP:
-			switch(div_cf.arg1.int32) {
-			case DIVARG1_ENABLE:
-				if (div_blk->protos & DIVERT_PROTO_ICMP)
-					return -EALREADY;
-				div_blk->protos |= DIVERT_PROTO_ICMP;
-				break;
-
-			case DIVARG1_DISABLE:
-				if (!(div_blk->protos & DIVERT_PROTO_ICMP))
-					return -EALREADY;
-				div_blk->protos &= ~DIVERT_PROTO_ICMP;
-				break;
-
-			default:
-				return -EINVAL;
-			}
-
-			break;
-
-		default:
-			return -EINVAL;
-		}
-
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-
-/*
- * Check if packet should have its dest mac address set to the box itself
- * for diversion
- */
-
-#define	ETH_DIVERT_FRAME(skb) \
-	memcpy(eth_hdr(skb), skb->dev->dev_addr, ETH_ALEN); \
-	skb->pkt_type=PACKET_HOST
-		
-void divert_frame(struct sk_buff *skb)
-{
-	struct ethhdr			*eth = eth_hdr(skb);
-	struct iphdr			*iph;
-	struct tcphdr			*tcph;
-	struct udphdr			*udph;
-	struct divert_blk		*divert = skb->dev->divert;
-	int				i, src, dst;
-	unsigned char			*skb_data_end = skb->data + skb->len;
-
-	/* Packet is already aimed at us, return */
-	if (!compare_ether_addr(eth->h_dest, skb->dev->dev_addr))
-		return;
-	
-	/* proto is not IP, do nothing */
-	if (eth->h_proto != htons(ETH_P_IP))
-		return;
-	
-	/* Divert all IP frames ? */
-	if (divert->protos & DIVERT_PROTO_IP) {
-		ETH_DIVERT_FRAME(skb);
-		return;
-	}
-	
-	/* Check for possible (maliciously) malformed IP frame (thanks Dave) */
-	iph = (struct iphdr *) skb->data;
-	if (((iph->ihl<<2)+(unsigned char*)(iph)) >= skb_data_end) {
-		printk(KERN_INFO "divert: malformed IP packet !\n");
-		return;
-	}
-
-	switch (iph->protocol) {
-	/* Divert all ICMP frames ? */
-	case IPPROTO_ICMP:
-		if (divert->protos & DIVERT_PROTO_ICMP) {
-			ETH_DIVERT_FRAME(skb);
-			return;
-		}
-		break;
-
-	/* Divert all TCP frames ? */
-	case IPPROTO_TCP:
-		if (divert->protos & DIVERT_PROTO_TCP) {
-			ETH_DIVERT_FRAME(skb);
-			return;
-		}
-
-		/* Check for possible (maliciously) malformed IP
-		 * frame (thanx Dave)
-		 */
-		tcph = (struct tcphdr *)
-			(((unsigned char *)iph) + (iph->ihl<<2));
-		if (((unsigned char *)(tcph+1)) >= skb_data_end) {
-			printk(KERN_INFO "divert: malformed TCP packet !\n");
-			return;
-		}
-
-		/* Divert some tcp dst/src ports only ?*/
-		for (i = 0; i < MAX_DIVERT_PORTS; i++) {
-			dst = divert->tcp_dst[i];
-			src = divert->tcp_src[i];
-			if ((dst && dst == tcph->dest) ||
-			    (src && src == tcph->source)) {
-				ETH_DIVERT_FRAME(skb);
-				return;
-			}
-		}
-		break;
-
-	/* Divert all UDP frames ? */
-	case IPPROTO_UDP:
-		if (divert->protos & DIVERT_PROTO_UDP) {
-			ETH_DIVERT_FRAME(skb);
-			return;
-		}
-
-		/* Check for possible (maliciously) malformed IP
-		 * packet (thanks Dave)
-		 */
-		udph = (struct udphdr *)
-			(((unsigned char *)iph) + (iph->ihl<<2));
-		if (((unsigned char *)(udph+1)) >= skb_data_end) {
-			printk(KERN_INFO
-			       "divert: malformed UDP packet !\n");
-			return;
-		}
-
-		/* Divert some udp dst/src ports only ? */
-		for (i = 0; i < MAX_DIVERT_PORTS; i++) {
-			dst = divert->udp_dst[i];
-			src = divert->udp_src[i];
-			if ((dst && dst == udph->dest) ||
-			    (src && src == udph->source)) {
-				ETH_DIVERT_FRAME(skb);
-				return;
-			}
-		}
-		break;
-	}
-}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 6b0e63c..1df6cd45 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -107,6 +107,22 @@
 
 EXPORT_SYMBOL_GPL(fib_rules_unregister);
 
+static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
+			  struct flowi *fl, int flags)
+{
+	int ret = 0;
+
+	if (rule->ifindex && (rule->ifindex != fl->iif))
+		goto out;
+
+	if ((rule->mark ^ fl->mark) & rule->mark_mask)
+		goto out;
+
+	ret = ops->match(rule, fl, flags);
+out:
+	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
+}
+
 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
 		     int flags, struct fib_lookup_arg *arg)
 {
@@ -116,10 +132,7 @@
 	rcu_read_lock();
 
 	list_for_each_entry_rcu(rule, ops->rules_list, list) {
-		if (rule->ifindex && (rule->ifindex != fl->iif))
-			continue;
-
-		if (!ops->match(rule, fl, flags))
+		if (!fib_rule_match(rule, ops, fl, flags))
 			continue;
 
 		err = ops->action(rule, fl, flags, arg);
@@ -179,6 +192,18 @@
 			rule->ifindex = dev->ifindex;
 	}
 
+	if (tb[FRA_FWMARK]) {
+		rule->mark = nla_get_u32(tb[FRA_FWMARK]);
+		if (rule->mark)
+			/* compatibility: if the mark value is non-zero all bits
+			 * are compared unless a mask is explicitly specified.
+			 */
+			rule->mark_mask = 0xFFFFFFFF;
+	}
+
+	if (tb[FRA_FWMASK])
+		rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
+
 	rule->action = frh->action;
 	rule->flags = frh->flags;
 	rule->table = frh_get_table(frh, tb);
@@ -250,6 +275,14 @@
 		    nla_strcmp(tb[FRA_IFNAME], rule->ifname))
 			continue;
 
+		if (tb[FRA_FWMARK] &&
+		    (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
+			continue;
+
+		if (tb[FRA_FWMASK] &&
+		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
+			continue;
+
 		if (!ops->compare(rule, frh, tb))
 			continue;
 
@@ -273,6 +306,22 @@
 	return err;
 }
 
+static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
+					 struct fib_rule *rule)
+{
+	size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
+			 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
+			 + nla_total_size(4) /* FRA_PRIORITY */
+			 + nla_total_size(4) /* FRA_TABLE */
+			 + nla_total_size(4) /* FRA_FWMARK */
+			 + nla_total_size(4); /* FRA_FWMASK */
+
+	if (ops->nlmsg_payload)
+		payload += ops->nlmsg_payload(rule);
+
+	return payload;
+}
+
 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
 			    u32 pid, u32 seq, int type, int flags,
 			    struct fib_rules_ops *ops)
@@ -298,6 +347,12 @@
 	if (rule->pref)
 		NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
 
+	if (rule->mark)
+		NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
+
+	if (rule->mark_mask || rule->mark)
+		NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
+
 	if (ops->fill(rule, skb, nlh, frh) < 0)
 		goto nla_put_failure;
 
@@ -345,15 +400,13 @@
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 
-	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
 	if (skb == NULL)
 		goto errout;
 
 	err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in fib_rule_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
 errout:
diff --git a/net/core/filter.c b/net/core/filter.c
index 6732782..0df843b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -178,7 +178,7 @@
 load_w:
 			ptr = load_pointer(skb, k, 4, &tmp);
 			if (ptr != NULL) {
-				A = ntohl(get_unaligned((u32 *)ptr));
+				A = ntohl(get_unaligned((__be32 *)ptr));
 				continue;
 			}
 			break;
@@ -187,7 +187,7 @@
 load_h:
 			ptr = load_pointer(skb, k, 2, &tmp);
 			if (ptr != NULL) {
-				A = ntohs(get_unaligned((u16 *)ptr));
+				A = ntohs(get_unaligned((__be16 *)ptr));
 				continue;
 			}
 			break;
@@ -261,7 +261,7 @@
 		 */
 		switch (k-SKF_AD_OFF) {
 		case SKF_AD_PROTOCOL:
-			A = htons(skb->protocol);
+			A = ntohs(skb->protocol);
 			continue;
 		case SKF_AD_PKTTYPE:
 			A = skb->pkt_type;
diff --git a/net/core/flow.c b/net/core/flow.c
index b16d31a..d137f97 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -44,7 +44,7 @@
 
 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
 
-static kmem_cache_t *flow_cachep __read_mostly;
+static struct kmem_cache *flow_cachep __read_mostly;
 
 static int flow_lwm, flow_hwm;
 
@@ -211,7 +211,7 @@
 		if (flow_count(cpu) > flow_hwm)
 			flow_cache_shrink(cpu);
 
-		fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
+		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
 		if (fle) {
 			fle->next = *head;
 			*head = fle;
@@ -340,7 +340,6 @@
 	tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int flow_cache_cpu(struct notifier_block *nfb,
 			  unsigned long action,
 			  void *hcpu)
@@ -349,7 +348,6 @@
 		__flow_cache_shrink((unsigned long)hcpu, 0);
 	return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static int __init flow_cache_init(void)
 {
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 65e4b56..04b249c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -158,9 +158,9 @@
  *	call to this function will be unaligned also.
  */
 int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
-				 int offset, unsigned int len, int *csump)
+				 int offset, unsigned int len, __wsum *csump)
 {
-	int csum = *csump;
+	__wsum csum = *csump;
 	int partial_cnt = 0, err = 0;
 
 	/* Skip over the finished iovecs */
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
new file mode 100644
index 0000000..283c2b99
--- /dev/null
+++ b/net/core/kmap_skb.h
@@ -0,0 +1,19 @@
+#include <linux/highmem.h>
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+	BUG_ON(in_irq());
+
+	local_bh_disable();
+#endif
+	return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+	kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+	local_bh_enable();
+#endif
+}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4b36114..549a2ce 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -34,8 +34,8 @@
 static unsigned long linkwatch_flags;
 static unsigned long linkwatch_nextevent;
 
-static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static void linkwatch_event(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
 
 static LIST_HEAD(lweventlist);
 static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@
 }       
 
 
-static void linkwatch_event(void *dummy)
+static void linkwatch_event(struct work_struct *dummy)
 {
 	/* Limit the number of linkwatch events to one
 	 * per second so that a runaway driver does not
@@ -171,10 +171,9 @@
 			unsigned long delay = linkwatch_nextevent - jiffies;
 
 			/* If we wrap around we'll delay it by at most HZ. */
-			if (!delay || delay > HZ)
-				schedule_work(&linkwatch_work);
-			else
-				schedule_delayed_work(&linkwatch_work, delay);
+			if (delay > HZ)
+				delay = 0;
+			schedule_delayed_work(&linkwatch_work, delay);
 		}
 	}
 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b4b4783..0ab1987 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -251,7 +251,7 @@
 			goto out_entries;
 	}
 
-	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
+	n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
 	if (!n)
 		goto out_entries;
 
@@ -1266,10 +1266,9 @@
 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
 				      struct neigh_table *tbl)
 {
-	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
+	struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
 
 	if (p) {
-		memcpy(p, &tbl->parms, sizeof(*p));
 		p->tbl		  = tbl;
 		atomic_set(&p->refcnt, 1);
 		INIT_RCU_HEAD(&p->rcu_head);
@@ -2410,20 +2409,27 @@
 #endif /* CONFIG_PROC_FS */
 
 #ifdef CONFIG_ARPD
+static inline size_t neigh_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ndmsg))
+	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
+	       + nla_total_size(sizeof(struct nda_cacheinfo))
+	       + nla_total_size(4); /* NDA_PROBES */
+}
+
 static void __neigh_notify(struct neighbour *n, int type, int flags)
 {
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 
-	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
 	if (skb == NULL)
 		goto errout;
 
 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in neigh_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
 errout:
@@ -2618,14 +2624,14 @@
 			  int p_id, int pdev_id, char *p_name, 
 			  proc_handler *handler, ctl_handler *strategy)
 {
-	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+	struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
+					       sizeof(*t), GFP_KERNEL);
 	const char *dev_name_source = NULL;
 	char *dev_name = NULL;
 	int err = 0;
 
 	if (!t)
 		return -ENOBUFS;
-	memcpy(t, &neigh_sysctl_template, sizeof(*t));
 	t->neigh_vars[0].data  = &p->mcast_probes;
 	t->neigh_vars[1].data  = &p->ucast_probes;
 	t->neigh_vars[2].data  = &p->app_probes;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6589adb..b3c559b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -34,18 +34,12 @@
 #define MAX_UDP_CHUNK 1460
 #define MAX_SKBS 32
 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
-#define MAX_RETRIES 20000
 
-static DEFINE_SPINLOCK(skb_list_lock);
-static int nr_skbs;
-static struct sk_buff *skbs;
-
-static DEFINE_SPINLOCK(queue_lock);
-static int queue_depth;
-static struct sk_buff *queue_head, *queue_tail;
+static struct sk_buff_head skb_pool;
 
 static atomic_t trapped;
 
+#define USEC_PER_POLL	50
 #define NETPOLL_RX_ENABLED  1
 #define NETPOLL_RX_DROP     2
 
@@ -56,54 +50,36 @@
 static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
 {
-	unsigned long flags;
+	struct netpoll_info *npinfo =
+		container_of(work, struct netpoll_info, tx_work.work);
 	struct sk_buff *skb;
 
-	while (queue_head) {
-		spin_lock_irqsave(&queue_lock, flags);
+	while ((skb = skb_dequeue(&npinfo->txq))) {
+		struct net_device *dev = skb->dev;
 
-		skb = queue_head;
-		queue_head = skb->next;
-		if (skb == queue_tail)
-			queue_head = NULL;
+		if (!netif_device_present(dev) || !netif_running(dev)) {
+			__kfree_skb(skb);
+			continue;
+		}
 
-		queue_depth--;
+		netif_tx_lock_bh(dev);
+		if (netif_queue_stopped(dev) ||
+		    dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+			skb_queue_head(&npinfo->txq, skb);
+			netif_tx_unlock_bh(dev);
 
-		spin_unlock_irqrestore(&queue_lock, flags);
-
-		dev_queue_xmit(skb);
+			schedule_delayed_work(&npinfo->tx_work, HZ/10);
+			return;
+		}
 	}
 }
 
-static DECLARE_WORK(send_queue, queue_process, NULL);
-
-void netpoll_queue(struct sk_buff *skb)
+static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
+			    unsigned short ulen, __be32 saddr, __be32 daddr)
 {
-	unsigned long flags;
-
-	if (queue_depth == MAX_QUEUE_DEPTH) {
-		__kfree_skb(skb);
-		return;
-	}
-
-	spin_lock_irqsave(&queue_lock, flags);
-	if (!queue_head)
-		queue_head = skb;
-	else
-		queue_tail->next = skb;
-	queue_tail = skb;
-	queue_depth++;
-	spin_unlock_irqrestore(&queue_lock, flags);
-
-	schedule_work(&send_queue);
-}
-
-static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
-			     unsigned short ulen, u32 saddr, u32 daddr)
-{
-	unsigned int psum;
+	__wsum psum;
 
 	if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
 		return 0;
@@ -111,7 +87,7 @@
 	psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
 
 	if (skb->ip_summed == CHECKSUM_COMPLETE &&
-	    !(u16)csum_fold(csum_add(psum, skb->csum)))
+	    !csum_fold(csum_add(psum, skb->csum)))
 		return 0;
 
 	skb->csum = psum;
@@ -167,12 +143,11 @@
 		arp_reply(skb);
 		skb = skb_dequeue(&npi->arp_tx);
 	}
-	return;
 }
 
 void netpoll_poll(struct netpoll *np)
 {
-	if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
+	if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
 		return;
 
 	/* Process pending work on NIC */
@@ -190,17 +165,15 @@
 	struct sk_buff *skb;
 	unsigned long flags;
 
-	spin_lock_irqsave(&skb_list_lock, flags);
-	while (nr_skbs < MAX_SKBS) {
+	spin_lock_irqsave(&skb_pool.lock, flags);
+	while (skb_pool.qlen < MAX_SKBS) {
 		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
 		if (!skb)
 			break;
 
-		skb->next = skbs;
-		skbs = skb;
-		nr_skbs++;
+		__skb_queue_tail(&skb_pool, skb);
 	}
-	spin_unlock_irqrestore(&skb_list_lock, flags);
+	spin_unlock_irqrestore(&skb_pool.lock, flags);
 }
 
 static void zap_completion_queue(void)
@@ -219,7 +192,7 @@
 		while (clist != NULL) {
 			struct sk_buff *skb = clist;
 			clist = clist->next;
-			if(skb->destructor)
+			if (skb->destructor)
 				dev_kfree_skb_any(skb); /* put this one back */
 			else
 				__kfree_skb(skb);
@@ -229,38 +202,25 @@
 	put_cpu_var(softnet_data);
 }
 
-static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
+static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 {
-	int once = 1, count = 0;
-	unsigned long flags;
-	struct sk_buff *skb = NULL;
+	int count = 0;
+	struct sk_buff *skb;
 
 	zap_completion_queue();
+	refill_skbs();
 repeat:
-	if (nr_skbs < MAX_SKBS)
-		refill_skbs();
 
 	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		skb = skb_dequeue(&skb_pool);
 
 	if (!skb) {
-		spin_lock_irqsave(&skb_list_lock, flags);
-		skb = skbs;
-		if (skb) {
-			skbs = skb->next;
-			skb->next = NULL;
-			nr_skbs--;
+		if (++count < 10) {
+			netpoll_poll(np);
+			goto repeat;
 		}
-		spin_unlock_irqrestore(&skb_list_lock, flags);
-	}
-
-	if(!skb) {
-		count++;
-		if (once && (count == 1000000)) {
-			printk("out of netpoll skbs!\n");
-			once = 0;
-		}
-		netpoll_poll(np);
-		goto repeat;
+		return NULL;
 	}
 
 	atomic_set(&skb->users, 1);
@@ -270,50 +230,40 @@
 
 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
-	int status;
-	struct netpoll_info *npinfo;
+	int status = NETDEV_TX_BUSY;
+	unsigned long tries;
+ 	struct net_device *dev = np->dev;
+ 	struct netpoll_info *npinfo = np->dev->npinfo;
 
-	if (!np || !np->dev || !netif_running(np->dev)) {
-		__kfree_skb(skb);
-		return;
-	}
+ 	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
+ 		__kfree_skb(skb);
+ 		return;
+ 	}
 
-	npinfo = np->dev->npinfo;
+	/* don't get messages out of order, and no recursion */
+	if (skb_queue_len(&npinfo->txq) == 0 &&
+	    npinfo->poll_owner != smp_processor_id() &&
+	    netif_tx_trylock(dev)) {
+		/* try until next clock tick */
+		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
+			if (!netif_queue_stopped(dev))
+				status = dev->hard_start_xmit(skb, dev);
 
-	/* avoid recursion */
-	if (npinfo->poll_owner == smp_processor_id() ||
-	    np->dev->xmit_lock_owner == smp_processor_id()) {
-		if (np->drop)
-			np->drop(skb);
-		else
-			__kfree_skb(skb);
-		return;
-	}
+			if (status == NETDEV_TX_OK)
+				break;
 
-	do {
-		npinfo->tries--;
-		netif_tx_lock(np->dev);
+			/* tickle device maybe there is some cleanup */
+			netpoll_poll(np);
 
-		/*
-		 * network drivers do not expect to be called if the queue is
-		 * stopped.
-		 */
-		status = NETDEV_TX_BUSY;
-		if (!netif_queue_stopped(np->dev))
-			status = np->dev->hard_start_xmit(skb, np->dev);
-
-		netif_tx_unlock(np->dev);
-
-		/* success */
-		if(!status) {
-			npinfo->tries = MAX_RETRIES; /* reset */
-			return;
+			udelay(USEC_PER_POLL);
 		}
+		netif_tx_unlock(dev);
+	}
 
-		/* transmit busy */
-		netpoll_poll(np);
-		udelay(50);
-	} while (npinfo->tries > 0);
+	if (status != NETDEV_TX_OK) {
+		skb_queue_tail(&npinfo->txq, skb);
+		schedule_delayed_work(&npinfo->tx_work,0);
+	}
 }
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -345,7 +295,7 @@
 					udp_len, IPPROTO_UDP,
 					csum_partial((unsigned char *)udph, udp_len, 0));
 	if (udph->check == 0)
-		udph->check = -1;
+		udph->check = CSUM_MANGLED_0;
 
 	skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
 
@@ -379,7 +329,7 @@
 	struct arphdr *arp;
 	unsigned char *arp_ptr;
 	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
-	u32 sip, tip;
+	__be32 sip, tip;
 	struct sk_buff *send_skb;
 	struct netpoll *np = NULL;
 
@@ -431,8 +381,8 @@
 
 	if (np->dev->hard_header &&
 	    np->dev->hard_header(send_skb, skb->dev, ptype,
-				       np->remote_mac, np->local_mac,
-				       send_skb->len) < 0) {
+				 np->remote_mac, np->local_mac,
+				 send_skb->len) < 0) {
 		kfree_skb(send_skb);
 		return;
 	}
@@ -470,7 +420,6 @@
 	struct netpoll_info *npi = skb->dev->npinfo;
 	struct netpoll *np = npi->rx_np;
 
-
 	if (!np)
 		goto out;
 	if (skb->dev->type != ARPHRD_ETHER)
@@ -543,47 +492,47 @@
 {
 	char *cur=opt, *delim;
 
-	if(*cur != '@') {
+	if (*cur != '@') {
 		if ((delim = strchr(cur, '@')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->local_port=simple_strtol(cur, NULL, 10);
-		cur=delim;
+		*delim = 0;
+		np->local_port = simple_strtol(cur, NULL, 10);
+		cur = delim;
 	}
 	cur++;
 	printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
 
-	if(*cur != '/') {
+	if (*cur != '/') {
 		if ((delim = strchr(cur, '/')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->local_ip=ntohl(in_aton(cur));
-		cur=delim;
+		*delim = 0;
+		np->local_ip = ntohl(in_aton(cur));
+		cur = delim;
 
 		printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
 		       np->name, HIPQUAD(np->local_ip));
 	}
 	cur++;
 
-	if ( *cur != ',') {
+	if (*cur != ',') {
 		/* parse out dev name */
 		if ((delim = strchr(cur, ',')) == NULL)
 			goto parse_failed;
-		*delim=0;
+		*delim = 0;
 		strlcpy(np->dev_name, cur, sizeof(np->dev_name));
-		cur=delim;
+		cur = delim;
 	}
 	cur++;
 
 	printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
 
-	if ( *cur != '@' ) {
+	if (*cur != '@') {
 		/* dst port */
 		if ((delim = strchr(cur, '@')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->remote_port=simple_strtol(cur, NULL, 10);
-		cur=delim;
+		*delim = 0;
+		np->remote_port = simple_strtol(cur, NULL, 10);
+		cur = delim;
 	}
 	cur++;
 	printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
@@ -591,42 +540,41 @@
 	/* dst ip */
 	if ((delim = strchr(cur, '/')) == NULL)
 		goto parse_failed;
-	*delim=0;
-	np->remote_ip=ntohl(in_aton(cur));
-	cur=delim+1;
+	*delim = 0;
+	np->remote_ip = ntohl(in_aton(cur));
+	cur = delim + 1;
 
 	printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
-		       np->name, HIPQUAD(np->remote_ip));
+	       np->name, HIPQUAD(np->remote_ip));
 
-	if( *cur != 0 )
-	{
+	if (*cur != 0) {
 		/* MAC address */
 		if ((delim = strchr(cur, ':')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->remote_mac[0]=simple_strtol(cur, NULL, 16);
-		cur=delim+1;
+		*delim = 0;
+		np->remote_mac[0] = simple_strtol(cur, NULL, 16);
+		cur = delim + 1;
 		if ((delim = strchr(cur, ':')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->remote_mac[1]=simple_strtol(cur, NULL, 16);
-		cur=delim+1;
+		*delim = 0;
+		np->remote_mac[1] = simple_strtol(cur, NULL, 16);
+		cur = delim + 1;
 		if ((delim = strchr(cur, ':')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->remote_mac[2]=simple_strtol(cur, NULL, 16);
-		cur=delim+1;
+		*delim = 0;
+		np->remote_mac[2] = simple_strtol(cur, NULL, 16);
+		cur = delim + 1;
 		if ((delim = strchr(cur, ':')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->remote_mac[3]=simple_strtol(cur, NULL, 16);
-		cur=delim+1;
+		*delim = 0;
+		np->remote_mac[3] = simple_strtol(cur, NULL, 16);
+		cur = delim + 1;
 		if ((delim = strchr(cur, ':')) == NULL)
 			goto parse_failed;
-		*delim=0;
-		np->remote_mac[4]=simple_strtol(cur, NULL, 16);
-		cur=delim+1;
-		np->remote_mac[5]=simple_strtol(cur, NULL, 16);
+		*delim = 0;
+		np->remote_mac[4] = simple_strtol(cur, NULL, 16);
+		cur = delim + 1;
+		np->remote_mac[5] = simple_strtol(cur, NULL, 16);
 	}
 
 	printk(KERN_INFO "%s: remote ethernet address "
@@ -653,34 +601,44 @@
 	struct in_device *in_dev;
 	struct netpoll_info *npinfo;
 	unsigned long flags;
+	int err;
 
 	if (np->dev_name)
 		ndev = dev_get_by_name(np->dev_name);
 	if (!ndev) {
 		printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
 		       np->name, np->dev_name);
-		return -1;
+		return -ENODEV;
 	}
 
 	np->dev = ndev;
 	if (!ndev->npinfo) {
 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
-		if (!npinfo)
+		if (!npinfo) {
+			err = -ENOMEM;
 			goto release;
+		}
 
 		npinfo->rx_flags = 0;
 		npinfo->rx_np = NULL;
 		spin_lock_init(&npinfo->poll_lock);
 		npinfo->poll_owner = -1;
-		npinfo->tries = MAX_RETRIES;
+
 		spin_lock_init(&npinfo->rx_lock);
 		skb_queue_head_init(&npinfo->arp_tx);
-	} else
+		skb_queue_head_init(&npinfo->txq);
+		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
+
+		atomic_set(&npinfo->refcnt, 1);
+	} else {
 		npinfo = ndev->npinfo;
+		atomic_inc(&npinfo->refcnt);
+	}
 
 	if (!ndev->poll_controller) {
 		printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
 		       np->name, np->dev_name);
+		err = -ENOTSUPP;
 		goto release;
 	}
 
@@ -691,13 +649,14 @@
 		       np->name, np->dev_name);
 
 		rtnl_lock();
-		if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
+		err = dev_open(ndev);
+		rtnl_unlock();
+
+		if (err) {
 			printk(KERN_ERR "%s: failed to open %s\n",
-			       np->name, np->dev_name);
-			rtnl_unlock();
+			       np->name, ndev->name);
 			goto release;
 		}
-		rtnl_unlock();
 
 		atleast = jiffies + HZ/10;
  		atmost = jiffies + 4*HZ;
@@ -735,6 +694,7 @@
 			rcu_read_unlock();
 			printk(KERN_ERR "%s: no IP address for %s, aborting\n",
 			       np->name, np->dev_name);
+			err = -EDESTADDRREQ;
 			goto release;
 		}
 
@@ -767,9 +727,16 @@
 		kfree(npinfo);
 	np->dev = NULL;
 	dev_put(ndev);
-	return -1;
+	return err;
 }
 
+static int __init netpoll_init(void)
+{
+	skb_queue_head_init(&skb_pool);
+	return 0;
+}
+core_initcall(netpoll_init);
+
 void netpoll_cleanup(struct netpoll *np)
 {
 	struct netpoll_info *npinfo;
@@ -777,12 +744,25 @@
 
 	if (np->dev) {
 		npinfo = np->dev->npinfo;
-		if (npinfo && npinfo->rx_np == np) {
-			spin_lock_irqsave(&npinfo->rx_lock, flags);
-			npinfo->rx_np = NULL;
-			npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
-			spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+		if (npinfo) {
+			if (npinfo->rx_np == np) {
+				spin_lock_irqsave(&npinfo->rx_lock, flags);
+				npinfo->rx_np = NULL;
+				npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+				spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+			}
+
+			np->dev->npinfo = NULL;
+			if (atomic_dec_and_test(&npinfo->refcnt)) {
+				skb_queue_purge(&npinfo->arp_tx);
+ 				skb_queue_purge(&npinfo->txq);
+				cancel_rearming_delayed_work(&npinfo->tx_work);
+ 				flush_scheduled_work();
+
+				kfree(npinfo);
+			}
 		}
+
 		dev_put(np->dev);
 	}
 
@@ -809,4 +789,3 @@
 EXPORT_SYMBOL(netpoll_cleanup);
 EXPORT_SYMBOL(netpoll_send_udp);
 EXPORT_SYMBOL(netpoll_poll);
-EXPORT_SYMBOL(netpoll_queue);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 733d86d..1897a3a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -207,7 +207,7 @@
 #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4)
 
 struct flow_state {
-	__u32 cur_daddr;
+	__be32 cur_daddr;
 	int count;
 };
 
@@ -282,10 +282,10 @@
 	/* If we're doing ranges, random or incremental, then this
 	 * defines the min/max for those ranges.
 	 */
-	__u32 saddr_min;	/* inclusive, source IP address */
-	__u32 saddr_max;	/* exclusive, source IP address */
-	__u32 daddr_min;	/* inclusive, dest IP address */
-	__u32 daddr_max;	/* exclusive, dest IP address */
+	__be32 saddr_min;	/* inclusive, source IP address */
+	__be32 saddr_max;	/* exclusive, source IP address */
+	__be32 daddr_min;	/* inclusive, dest IP address */
+	__be32 daddr_max;	/* exclusive, dest IP address */
 
 	__u16 udp_src_min;	/* inclusive, source UDP port */
 	__u16 udp_src_max;	/* exclusive, source UDP port */
@@ -317,8 +317,8 @@
 
 	__u32 cur_dst_mac_offset;
 	__u32 cur_src_mac_offset;
-	__u32 cur_saddr;
-	__u32 cur_daddr;
+	__be32 cur_saddr;
+	__be32 cur_daddr;
 	__u16 cur_udp_dst;
 	__u16 cur_udp_src;
 	__u32 cur_pkt_size;
@@ -350,10 +350,10 @@
 };
 
 struct pktgen_hdr {
-	__u32 pgh_magic;
-	__u32 seq_num;
-	__u32 tv_sec;
-	__u32 tv_usec;
+	__be32 pgh_magic;
+	__be32 seq_num;
+	__be32 tv_sec;
+	__be32 tv_usec;
 };
 
 struct pktgen_thread {
@@ -2160,7 +2160,7 @@
 		for(i = 0; i < pkt_dev->nr_labels; i++)
 			if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
 				pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
-						     (pktgen_random() &
+					     ((__force __be32)pktgen_random() &
 						      htonl(0x000fffff));
 	}
 
@@ -2220,29 +2220,25 @@
 		if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) {
 			pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr;
 		} else {
-
-			if ((imn = ntohl(pkt_dev->daddr_min)) < (imx =
-								 ntohl(pkt_dev->
-								       daddr_max)))
-			{
+			imn = ntohl(pkt_dev->daddr_min);
+			imx = ntohl(pkt_dev->daddr_max);
+			if (imn < imx) {
 				__u32 t;
+				__be32 s;
 				if (pkt_dev->flags & F_IPDST_RND) {
 
-					t = ((pktgen_random() % (imx - imn)) +
-					     imn);
-					t = htonl(t);
+					t = pktgen_random() % (imx - imn) + imn;
+					s = htonl(t);
 
-					while (LOOPBACK(t) || MULTICAST(t)
-					       || BADCLASS(t) || ZERONET(t)
-					       || LOCAL_MCAST(t)) {
-						t = ((pktgen_random() %
-						      (imx - imn)) + imn);
-						t = htonl(t);
+					while (LOOPBACK(s) || MULTICAST(s)
+					       || BADCLASS(s) || ZERONET(s)
+					       || LOCAL_MCAST(s)) {
+						t = (pktgen_random() %
+						      (imx - imn)) + imn;
+						s = htonl(t);
 					}
-					pkt_dev->cur_daddr = t;
-				}
-
-				else {
+					pkt_dev->cur_daddr = s;
+				} else {
 					t = ntohl(pkt_dev->cur_daddr);
 					t++;
 					if (t > imx) {
@@ -2270,7 +2266,7 @@
 
 			for (i = 0; i < 4; i++) {
 				pkt_dev->cur_in6_daddr.s6_addr32[i] =
-				    ((pktgen_random() |
+				    (((__force __be32)pktgen_random() |
 				      pkt_dev->min_in6_daddr.s6_addr32[i]) &
 				     pkt_dev->max_in6_daddr.s6_addr32[i]);
 			}
@@ -2377,7 +2373,7 @@
 	udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
 
 	memcpy(eth, pkt_dev->hh, 12);
-	*(u16 *) & eth[12] = protocol;
+	*(__be16 *) & eth[12] = protocol;
 
 	/* Eth + IPh + UDPh + mpls */
 	datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
@@ -2497,7 +2493,7 @@
 	char suffix[16];
 	unsigned int prefixlen = 0;
 	unsigned int suffixlen = 0;
-	__u32 tmp;
+	__be32 tmp;
 
 	for (i = 0; i < 16; i++)
 		ip[i] = 0;
@@ -2713,7 +2709,7 @@
 	udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
 
 	memcpy(eth, pkt_dev->hh, 12);
-	*(u16 *) & eth[12] = protocol;
+	*(__be16 *) & eth[12] = protocol;
 
 	/* Eth + IPh + UDPh + mpls */
 	datalen = pkt_dev->cur_pkt_size - 14 -
@@ -2732,11 +2728,11 @@
 	udph->len = htons(datalen + sizeof(struct udphdr));
 	udph->check = 0;	/* No checksum */
 
-	*(u32 *) iph = __constant_htonl(0x60000000);	/* Version + flow */
+	*(__be32 *) iph = __constant_htonl(0x60000000);	/* Version + flow */
 
 	if (pkt_dev->traffic_class) {
 		/* Version + traffic class + flow (0) */
-		*(u32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20));
+		*(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20));
 	}
 
 	iph->hop_limit = 32;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 79ebd75..5f0818d 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -15,6 +15,7 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/vmalloc.h>
 
 #include <net/request_sock.h>
 
@@ -29,22 +30,31 @@
  * it is absolutely not enough even at 100conn/sec. 256 cures most
  * of problems. This value is adjusted to 128 for very small machines
  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
- * Further increasing requires to change hash table size.
+ * Note : Dont forget somaxconn that may limit backlog too.
  */
 int sysctl_max_syn_backlog = 256;
 
 int reqsk_queue_alloc(struct request_sock_queue *queue,
-		      const int nr_table_entries)
+		      unsigned int nr_table_entries)
 {
-	const int lopt_size = sizeof(struct listen_sock) +
-			      nr_table_entries * sizeof(struct request_sock *);
-	struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL);
+	size_t lopt_size = sizeof(struct listen_sock);
+	struct listen_sock *lopt;
 
+	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
+	nr_table_entries = max_t(u32, nr_table_entries, 8);
+	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
+	lopt_size += nr_table_entries * sizeof(struct request_sock *);
+	if (lopt_size > PAGE_SIZE)
+		lopt = __vmalloc(lopt_size,
+			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+			PAGE_KERNEL);
+	else
+		lopt = kzalloc(lopt_size, GFP_KERNEL);
 	if (lopt == NULL)
 		return -ENOMEM;
 
-	for (lopt->max_qlen_log = 6;
-	     (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog;
+	for (lopt->max_qlen_log = 3;
+	     (1 << lopt->max_qlen_log) < nr_table_entries;
 	     lopt->max_qlen_log++);
 
 	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
@@ -65,9 +75,11 @@
 {
 	/* make all the listen_opt local to us */
 	struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
+	size_t lopt_size = sizeof(struct listen_sock) +
+		lopt->nr_table_entries * sizeof(struct request_sock *);
 
 	if (lopt->qlen != 0) {
-		int i;
+		unsigned int i;
 
 		for (i = 0; i < lopt->nr_table_entries; i++) {
 			struct request_sock *req;
@@ -81,7 +93,10 @@
 	}
 
 	BUG_TRAP(lopt->qlen == 0);
-	kfree(lopt);
+	if (lopt_size > PAGE_SIZE)
+		vfree(lopt);
+	else
+		kfree(lopt);
 }
 
 EXPORT_SYMBOL(reqsk_queue_destroy);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 02f3c79..e76539a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -108,7 +108,6 @@
 	[RTM_FAM(RTM_NEWTCLASS)]    = NLMSG_LENGTH(sizeof(struct tcmsg)),
 	[RTM_FAM(RTM_NEWTFILTER)]   = NLMSG_LENGTH(sizeof(struct tcmsg)),
 	[RTM_FAM(RTM_NEWACTION)]    = NLMSG_LENGTH(sizeof(struct tcamsg)),
-	[RTM_FAM(RTM_NEWPREFIX)]    = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
 	[RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
 	[RTM_FAM(RTM_GETANYCAST)]   = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
 };
@@ -213,6 +212,26 @@
 	return nla_nest_cancel(skb, mx);
 }
 
+int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
+		       u32 ts, u32 tsage, long expires, u32 error)
+{
+	struct rta_cacheinfo ci = {
+		.rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
+		.rta_used = dst->__use,
+		.rta_clntref = atomic_read(&(dst->__refcnt)),
+		.rta_error = error,
+		.rta_id =  id,
+		.rta_ts = ts,
+		.rta_tsage = tsage,
+	};
+
+	if (expires)
+		ci.rta_expires = jiffies_to_clock_t(expires);
+
+	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+}
+
+EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
 
 static void set_operstate(struct net_device *dev, unsigned char transition)
 {
@@ -273,6 +292,25 @@
 	a->tx_compressed = b->tx_compressed;
 };
 
+static inline size_t if_nlmsg_size(int iwbuflen)
+{
+	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+	       + nla_total_size(sizeof(struct rtnl_link_ifmap))
+	       + nla_total_size(sizeof(struct rtnl_link_stats))
+	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+	       + nla_total_size(4) /* IFLA_TXQLEN */
+	       + nla_total_size(4) /* IFLA_WEIGHT */
+	       + nla_total_size(4) /* IFLA_MTU */
+	       + nla_total_size(4) /* IFLA_LINK */
+	       + nla_total_size(4) /* IFLA_MASTER */
+	       + nla_total_size(1) /* IFLA_OPERSTATE */
+	       + nla_total_size(1) /* IFLA_LINKMODE */
+	       + nla_total_size(iwbuflen);
+}
+
 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
 			    void *iwbuf, int iwbuflen, int type, u32 pid,
 			    u32 seq, u32 change, unsigned int flags)
@@ -558,7 +596,7 @@
 	struct sk_buff *nskb;
 	char *iw_buf = NULL, *iw = NULL;
 	int iw_buf_len = 0;
-	int err, payload;
+	int err;
 
 	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
 	if (err < 0)
@@ -587,9 +625,7 @@
 	}
 #endif	/* CONFIG_NET_WIRELESS_RTNETLINK */
 
-	payload = NLMSG_ALIGN(sizeof(struct ifinfomsg) +
-			      nla_total_size(iw_buf_len));
-	nskb = nlmsg_new(nlmsg_total_size(payload), GFP_KERNEL);
+	nskb = nlmsg_new(if_nlmsg_size(iw_buf_len), GFP_KERNEL);
 	if (nskb == NULL) {
 		err = -ENOBUFS;
 		goto errout;
@@ -597,10 +633,8 @@
 
 	err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK,
 			       NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0);
-	if (err <= 0) {
-		kfree_skb(nskb);
-		goto errout;
-	}
+	/* failure impilies BUG in if_nlmsg_size or wireless_rtnetlink_get */
+	BUG_ON(err < 0);
 
 	err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
 errout:
@@ -639,15 +673,13 @@
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 
-	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	skb = nlmsg_new(if_nlmsg_size(0), GFP_KERNEL);
 	if (skb == NULL)
 		goto errout;
 
 	err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in if_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
 errout:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b8b1063..de7801d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -56,7 +56,6 @@
 #include <linux/cache.h>
 #include <linux/rtnetlink.h>
 #include <linux/init.h>
-#include <linux/highmem.h>
 
 #include <net/protocol.h>
 #include <net/dst.h>
@@ -67,8 +66,10 @@
 #include <asm/uaccess.h>
 #include <asm/system.h>
 
-static kmem_cache_t *skbuff_head_cache __read_mostly;
-static kmem_cache_t *skbuff_fclone_cache __read_mostly;
+#include "kmap_skb.h"
+
+static struct kmem_cache *skbuff_head_cache __read_mostly;
+static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
 /*
  *	Keep out-of-line to prevent kernel bloat.
@@ -131,6 +132,7 @@
  *	@gfp_mask: allocation mask
  *	@fclone: allocate from fclone cache instead of head cache
  *		and allocate a cloned (child) skb
+ *	@node: numa node to allocate memory on
  *
  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
  *	tail room of size bytes. The object has a reference count of one.
@@ -140,9 +142,9 @@
  *	%GFP_ATOMIC.
  */
 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
-			    int fclone)
+			    int fclone, int node)
 {
-	kmem_cache_t *cache;
+	struct kmem_cache *cache;
 	struct skb_shared_info *shinfo;
 	struct sk_buff *skb;
 	u8 *data;
@@ -150,14 +152,14 @@
 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 
 	/* Get the HEAD */
-	skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
+	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 	if (!skb)
 		goto out;
 
 	/* Get the DATA. Size must match skb_add_mtu(). */
 	size = SKB_DATA_ALIGN(size);
-	data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
-			gfp_mask);
+	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
+			gfp_mask, node);
 	if (!data)
 		goto nodata;
 
@@ -209,7 +211,7 @@
  *	Buffers may only be allocated from interrupts using a @gfp_mask of
  *	%GFP_ATOMIC.
  */
-struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
 				     unsigned int size,
 				     gfp_t gfp_mask)
 {
@@ -266,9 +268,10 @@
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 		unsigned int length, gfp_t gfp_mask)
 {
+	int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1;
 	struct sk_buff *skb;
 
-	skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
+ 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
 	if (likely(skb)) {
 		skb_reserve(skb, NET_SKB_PAD);
 		skb->dev = dev;
@@ -473,8 +476,8 @@
 #endif
 	C(protocol);
 	n->destructor = NULL;
+	C(mark);
 #ifdef CONFIG_NETFILTER
-	C(nfmark);
 	C(nfct);
 	nf_conntrack_get(skb->nfct);
 	C(nfctinfo);
@@ -534,8 +537,8 @@
 	new->pkt_type	= old->pkt_type;
 	new->tstamp	= old->tstamp;
 	new->destructor = NULL;
+	new->mark	= old->mark;
 #ifdef CONFIG_NETFILTER
-	new->nfmark	= old->nfmark;
 	new->nfct	= old->nfct;
 	nf_conntrack_get(old->nfct);
 	new->nfctinfo	= old->nfctinfo;
@@ -1240,8 +1243,8 @@
 
 /* Checksum skb data. */
 
-unsigned int skb_checksum(const struct sk_buff *skb, int offset,
-			  int len, unsigned int csum)
+__wsum skb_checksum(const struct sk_buff *skb, int offset,
+			  int len, __wsum csum)
 {
 	int start = skb_headlen(skb);
 	int i, copy = start - offset;
@@ -1265,7 +1268,7 @@
 
 		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
-			unsigned int csum2;
+			__wsum csum2;
 			u8 *vaddr;
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
@@ -1294,7 +1297,7 @@
 
 			end = start + list->len;
 			if ((copy = end - offset) > 0) {
-				unsigned int csum2;
+				__wsum csum2;
 				if (copy > len)
 					copy = len;
 				csum2 = skb_checksum(list, offset - start,
@@ -1315,8 +1318,8 @@
 
 /* Both of above in one bottle. */
 
-unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
-				    u8 *to, int len, unsigned int csum)
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
+				    u8 *to, int len, __wsum csum)
 {
 	int start = skb_headlen(skb);
 	int i, copy = start - offset;
@@ -1342,7 +1345,7 @@
 
 		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
-			unsigned int csum2;
+			__wsum csum2;
 			u8 *vaddr;
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
@@ -1368,7 +1371,7 @@
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			unsigned int csum2;
+			__wsum csum2;
 			int end;
 
 			BUG_TRAP(start <= offset + len);
@@ -1396,7 +1399,7 @@
 
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
 {
-	unsigned int csum;
+	__wsum csum;
 	long csstart;
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -1414,9 +1417,9 @@
 					      skb->len - csstart, 0);
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		long csstuff = csstart + skb->csum;
+		long csstuff = csstart + skb->csum_offset;
 
-		*((unsigned short *)(to + csstuff)) = csum_fold(csum);
+		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
 	}
 }
 
diff --git a/net/core/sock.c b/net/core/sock.c
index ee6cd25..0ed5b4f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -111,6 +111,7 @@
 #include <linux/poll.h>
 #include <linux/tcp.h>
 #include <linux/init.h>
+#include <linux/highmem.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -270,7 +271,7 @@
 }
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 {
 	int rc = NET_RX_SUCCESS;
 
@@ -279,7 +280,10 @@
 
 	skb->dev = NULL;
 
-	bh_lock_sock(sk);
+	if (nested)
+		bh_lock_sock_nested(sk);
+	else
+		bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk)) {
 		/*
 		 * trylock + unlock semantics:
@@ -806,24 +810,11 @@
  */
 static void inline sock_lock_init(struct sock *sk)
 {
-	spin_lock_init(&sk->sk_lock.slock);
-	sk->sk_lock.owner = NULL;
-	init_waitqueue_head(&sk->sk_lock.wq);
-	/*
-	 * Make sure we are not reinitializing a held lock:
-	 */
-	debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
-
-	/*
-	 * Mark both the sk_lock and the sk_lock.slock as a
-	 * per-address-family lock class:
-	 */
-	lockdep_set_class_and_name(&sk->sk_lock.slock,
-				   af_family_slock_keys + sk->sk_family,
-				   af_family_slock_key_strings[sk->sk_family]);
-	lockdep_init_map(&sk->sk_lock.dep_map,
-			 af_family_key_strings[sk->sk_family],
-			 af_family_keys + sk->sk_family, 0);
+	sock_lock_init_class_and_name(sk,
+			af_family_slock_key_strings[sk->sk_family],
+			af_family_slock_keys + sk->sk_family,
+			af_family_key_strings[sk->sk_family],
+			af_family_keys + sk->sk_family);
 }
 
 /**
@@ -837,7 +828,7 @@
 		      struct proto *prot, int zero_it)
 {
 	struct sock *sk = NULL;
-	kmem_cache_t *slab = prot->slab;
+	struct kmem_cache *slab = prot->slab;
 
 	if (slab != NULL)
 		sk = kmem_cache_alloc(slab, priority);
@@ -1527,7 +1518,7 @@
 	atomic_set(&sk->sk_refcnt, 1);
 }
 
-void fastcall lock_sock(struct sock *sk)
+void fastcall lock_sock_nested(struct sock *sk, int subclass)
 {
 	might_sleep();
 	spin_lock_bh(&sk->sk_lock.slock);
@@ -1538,11 +1529,11 @@
 	/*
 	 * The sk_lock has mutex_lock() semantics here:
 	 */
-	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
 	local_bh_enable();
 }
 
-EXPORT_SYMBOL(lock_sock);
+EXPORT_SYMBOL(lock_sock_nested);
 
 void fastcall release_sock(struct sock *sk)
 {
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0253413..1e75b15 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -21,10 +21,6 @@
 
 extern int sysctl_core_destroy_delay;
 
-#ifdef CONFIG_NET_DIVERT
-extern char sysctl_divert_version[];
-#endif /* CONFIG_NET_DIVERT */
-
 #ifdef CONFIG_XFRM
 extern u32 sysctl_xfrm_aevent_etime;
 extern u32 sysctl_xfrm_aevent_rseqth;
@@ -105,16 +101,6 @@
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec
 	},
-#ifdef CONFIG_NET_DIVERT
-	{
-		.ctl_name	= NET_CORE_DIVERT_VERSION,
-		.procname	= "divert_version",
-		.data		= (void *)sysctl_divert_version,
-		.maxlen		= 32,
-		.mode		= 0444,
-		.proc_handler	= &proc_dostring
-	},
-#endif /* CONFIG_NET_DIVERT */
 #ifdef CONFIG_XFRM
 	{
 		.ctl_name	= NET_CORE_AEVENT_ETIME,
diff --git a/net/core/utils.c b/net/core/utils.c
index d93fe64..6155606 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -88,7 +88,7 @@
 #define IN6PTON_NULL		0x20000000	/* first/tail */
 #define IN6PTON_UNKNOWN		0x40000000
 
-static inline int digit2bin(char c, char delim)
+static inline int digit2bin(char c, int delim)
 {
 	if (c == delim || c == '\0')
 		return IN6PTON_DELIM;
@@ -99,7 +99,7 @@
 	return IN6PTON_UNKNOWN;
 }
 
-static inline int xdigit2bin(char c, char delim)
+static inline int xdigit2bin(char c, int delim)
 {
 	if (c == delim || c == '\0')
 		return IN6PTON_DELIM;
@@ -113,12 +113,14 @@
 		return (IN6PTON_XDIGIT | (c - 'a' + 10));
 	if (c >= 'A' && c <= 'F')
 		return (IN6PTON_XDIGIT | (c - 'A' + 10));
+	if (delim == -1)
+		return IN6PTON_DELIM;
 	return IN6PTON_UNKNOWN;
 }
 
 int in4_pton(const char *src, int srclen,
 	     u8 *dst,
-	     char delim, const char **end)
+	     int delim, const char **end)
 {
 	const char *s;
 	u8 *d;
@@ -173,7 +175,7 @@
 
 int in6_pton(const char *src, int srclen,
 	     u8 *dst,
-	     char delim, const char **end)
+	     int delim, const char **end)
 {
 	const char *s, *tok = NULL;
 	u8 *d, *dc = NULL;
diff --git a/net/core/wireless.c b/net/core/wireless.c
index cb1b872..f69ab7b 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -2130,7 +2130,7 @@
 	 * The rtnl_lock() make sure we don't race with the other iw_handlers.
 	 * This make sure wireless_spy_update() "see" that the spy list
 	 * is temporarily disabled. */
-	wmb();
+	smp_wmb();
 
 	/* Are there are addresses to copy? */
 	if(wrqu->data.length > 0) {
@@ -2159,7 +2159,7 @@
 	}
 
 	/* Make sure above is updated before re-enabling */
-	wmb();
+	smp_wmb();
 
 	/* Enable addresses */
 	spydata->spy_number = wrqu->data.length;
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index ef8919c..b8a68dd 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -38,6 +38,9 @@
 	---help---
 	  Only use this if you're hacking DCCP.
 
+	  When compiling DCCP as a module, this debugging output can be toggled
+	  by setting the parameter dccp_debug of the `dccp' module to 0 or 1.
+
 	  Just say N.
 
 config NET_DCCPPROBE
@@ -49,7 +52,7 @@
 	DCCP congestion avoidance modules. If you don't understand
 	what was just said, you don't need it: say N.
 
-	Documentation on how to use the packet generator can be found
+	Documentation on how to use DCCP connection probing can be found
 	at http://linux-net.osdl.org/index.php/DccpProbe
 
 	To compile this code as a module, choose M here: the
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 17ed99c..f4f8793 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -1,13 +1,13 @@
-obj-$(CONFIG_IPV6) += dccp_ipv6.o
-
-dccp_ipv6-y := ipv6.o
-
 obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
 
 dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o
 
 dccp_ipv4-y := ipv4.o
 
+# build dccp_ipv6 as module whenever either IPv6 or DCCP is a module
+obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o
+dccp_ipv6-y := ipv6.o
+
 dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o
 
 obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index f820887..1f4727d 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -21,8 +21,8 @@
 
 #include <net/sock.h>
 
-static kmem_cache_t *dccp_ackvec_slab;
-static kmem_cache_t *dccp_ackvec_record_slab;
+static struct kmem_cache *dccp_ackvec_slab;
+static struct kmem_cache *dccp_ackvec_record_slab;
 
 static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
 {
@@ -67,15 +67,16 @@
 int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
-#ifdef CONFIG_IP_DCCP_DEBUG
-	const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
-				"CLIENT tx: " : "server tx: ";
-#endif
 	struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
-	int len = av->dccpav_vec_len + 2;
+	/* Figure out how many options do we need to represent the ackvec */
+	const u16 nr_opts = (av->dccpav_vec_len +
+			     DCCP_MAX_ACKVEC_OPT_LEN - 1) /
+			    DCCP_MAX_ACKVEC_OPT_LEN;
+	u16 len = av->dccpav_vec_len + 2 * nr_opts, i;
 	struct timeval now;
 	u32 elapsed_time;
-	unsigned char *to, *from;
+	const unsigned char *tail, *from;
+	unsigned char *to;
 	struct dccp_ackvec_record *avr;
 
 	if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
@@ -94,24 +95,37 @@
 
 	DCCP_SKB_CB(skb)->dccpd_opt_len += len;
 
-	to    = skb_push(skb, len);
-	*to++ = DCCPO_ACK_VECTOR_0;
-	*to++ = len;
-
+	to   = skb_push(skb, len);
 	len  = av->dccpav_vec_len;
 	from = av->dccpav_buf + av->dccpav_buf_head;
+	tail = av->dccpav_buf + DCCP_MAX_ACKVEC_LEN;
 
-	/* Check if buf_head wraps */
-	if ((int)av->dccpav_buf_head + len > DCCP_MAX_ACKVEC_LEN) {
-		const u32 tailsize = DCCP_MAX_ACKVEC_LEN - av->dccpav_buf_head;
+	for (i = 0; i < nr_opts; ++i) {
+		int copylen = len;
 
-		memcpy(to, from, tailsize);
-		to   += tailsize;
-		len  -= tailsize;
-		from = av->dccpav_buf;
+		if (len > DCCP_MAX_ACKVEC_OPT_LEN)
+			copylen = DCCP_MAX_ACKVEC_OPT_LEN;
+
+		*to++ = DCCPO_ACK_VECTOR_0;
+		*to++ = copylen + 2;
+
+		/* Check if buf_head wraps */
+		if (from + copylen > tail) {
+			const u16 tailsize = tail - from;
+
+			memcpy(to, from, tailsize);
+			to	+= tailsize;
+			len	-= tailsize;
+			copylen	-= tailsize;
+			from	= av->dccpav_buf;
+		}
+
+		memcpy(to, from, copylen);
+		from += copylen;
+		to   += copylen;
+		len  -= copylen;
 	}
 
-	memcpy(to, from, len);
 	/*
 	 *	From RFC 4340, A.2:
 	 *
@@ -129,9 +143,9 @@
 
 	dccp_ackvec_insert_avr(av, avr);
 
-	dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, "
+	dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, "
 		      "ack_ackno=%llu\n",
-		      debug_prefix, avr->dccpavr_sent_len,
+		      dccp_role(sk), avr->dccpavr_sent_len,
 		      (unsigned long long)avr->dccpavr_ack_seqno,
 		      (unsigned long long)avr->dccpavr_ack_ackno);
 	return 0;
@@ -145,7 +159,6 @@
 		av->dccpav_buf_head	= DCCP_MAX_ACKVEC_LEN - 1;
 		av->dccpav_buf_ackno	= DCCP_MAX_SEQNO + 1;
 		av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0;
-		av->dccpav_ack_ptr	= 0;
 		av->dccpav_time.tv_sec	= 0;
 		av->dccpav_time.tv_usec	= 0;
 		av->dccpav_vec_len	= 0;
@@ -174,13 +187,13 @@
 }
 
 static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av,
-				   const u8 index)
+				   const u32 index)
 {
 	return av->dccpav_buf[index] & DCCP_ACKVEC_STATE_MASK;
 }
 
 static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av,
-				 const u8 index)
+				 const u32 index)
 {
 	return av->dccpav_buf[index] & DCCP_ACKVEC_LEN_MASK;
 }
@@ -280,7 +293,7 @@
 		 *	could reduce the complexity of this scan.)
 		 */
 		u64 delta = dccp_delta_seqno(ackno, av->dccpav_buf_ackno);
-		u8 index = av->dccpav_buf_head;
+		u32 index = av->dccpav_buf_head;
 
 		while (1) {
 			const u8 len = dccp_ackvec_len(av, index);
@@ -322,21 +335,18 @@
 #ifdef CONFIG_IP_DCCP_DEBUG
 void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len)
 {
-	if (!dccp_debug)
-		return;
-
-	printk("ACK vector len=%d, ackno=%llu |", len,
-	       (unsigned long long)ackno);
+	dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len,
+			 		(unsigned long long)ackno);
 
 	while (len--) {
 		const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6;
 		const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
 
-		printk("%d,%d|", state, rl);
+		dccp_pr_debug_cat("%d,%d|", state, rl);
 		++vector;
 	}
 
-	printk("\n");
+	dccp_pr_debug_cat("\n");
 }
 
 void dccp_ackvec_print(const struct dccp_ackvec *av)
@@ -380,24 +390,20 @@
 	 */
 	list_for_each_entry_reverse(avr, &av->dccpav_records, dccpavr_node) {
 		if (ackno == avr->dccpavr_ack_seqno) {
-#ifdef CONFIG_IP_DCCP_DEBUG
-			struct dccp_sock *dp = dccp_sk(sk);
-			const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
-						"CLIENT rx ack: " : "server rx ack: ";
-#endif
-			dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, "
+			dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, "
 				      "ack_ackno=%llu, ACKED!\n",
-				      debug_prefix, 1,
+				      dccp_role(sk), 1,
 				      (unsigned long long)avr->dccpavr_ack_seqno,
 				      (unsigned long long)avr->dccpavr_ack_ackno);
 			dccp_ackvec_throw_record(av, avr);
 			break;
-		}
+		} else if (avr->dccpavr_ack_seqno > ackno)
+			break; /* old news */
 	}
 }
 
 static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
-					    struct sock *sk, u64 ackno,
+					    struct sock *sk, u64 *ackno,
 					    const unsigned char len,
 					    const unsigned char *vector)
 {
@@ -420,7 +426,7 @@
 		const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
 		u64 ackno_end_rl;
 
-		dccp_set_seqno(&ackno_end_rl, ackno - rl);
+		dccp_set_seqno(&ackno_end_rl, *ackno - rl);
 
 		/*
 		 * If our AVR sequence number is greater than the ack, go
@@ -428,25 +434,19 @@
 		 */
 		list_for_each_entry_from(avr, &av->dccpav_records,
 					 dccpavr_node) {
-			if (!after48(avr->dccpavr_ack_seqno, ackno))
+			if (!after48(avr->dccpavr_ack_seqno, *ackno))
 				goto found;
 		}
 		/* End of the dccpav_records list, not found, exit */
 		break;
 found:
-		if (between48(avr->dccpavr_ack_seqno, ackno_end_rl, ackno)) {
+		if (between48(avr->dccpavr_ack_seqno, ackno_end_rl, *ackno)) {
 			const u8 state = *vector & DCCP_ACKVEC_STATE_MASK;
 			if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
-#ifdef CONFIG_IP_DCCP_DEBUG
-				struct dccp_sock *dp = dccp_sk(sk);
-				const char *debug_prefix =
-					dp->dccps_role == DCCP_ROLE_CLIENT ?
-					"CLIENT rx ack: " : "server rx ack: ";
-#endif
-				dccp_pr_debug("%sACK vector 0, len=%d, "
+				dccp_pr_debug("%s ACK vector 0, len=%d, "
 					      "ack_seqno=%llu, ack_ackno=%llu, "
 					      "ACKED!\n",
-					      debug_prefix, len,
+					      dccp_role(sk), len,
 					      (unsigned long long)
 					      avr->dccpavr_ack_seqno,
 					      (unsigned long long)
@@ -460,27 +460,23 @@
 			 */
 		}
 
-		dccp_set_seqno(&ackno, ackno_end_rl - 1);
+		dccp_set_seqno(ackno, ackno_end_rl - 1);
 		++vector;
 	}
 }
 
 int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
-		      const u8 opt, const u8 *value, const u8 len)
+		      u64 *ackno, const u8 opt, const u8 *value, const u8 len)
 {
-	if (len > DCCP_MAX_ACKVEC_LEN)
+	if (len > DCCP_MAX_ACKVEC_OPT_LEN)
 		return -1;
 
 	/* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */
 	dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk,
-					DCCP_SKB_CB(skb)->dccpd_ack_seq,
-				        len, value);
+					ackno, len, value);
 	return 0;
 }
 
-static char dccp_ackvec_slab_msg[] __initdata =
-	KERN_CRIT "DCCP: Unable to create ack vectors slab caches\n";
-
 int __init dccp_ackvec_init(void)
 {
 	dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
@@ -502,7 +498,7 @@
 	kmem_cache_destroy(dccp_ackvec_slab);
 	dccp_ackvec_slab = NULL;
 out_err:
-	printk(dccp_ackvec_slab_msg);
+	DCCP_CRIT("Unable to create Ack Vector slab cache");
 	return -ENOBUFS;
 }
 
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index cf8f20c..96504a3 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -17,7 +17,9 @@
 #include <linux/types.h>
 
 /* Read about the ECN nonce to see why it is 253 */
-#define DCCP_MAX_ACKVEC_LEN 253
+#define DCCP_MAX_ACKVEC_OPT_LEN 253
+/* We can spread an ack vector across multiple options */
+#define DCCP_MAX_ACKVEC_LEN (DCCP_MAX_ACKVEC_OPT_LEN * 2)
 
 #define DCCP_ACKVEC_STATE_RECEIVED	0
 #define DCCP_ACKVEC_STATE_ECN_MARKED	(1 << 6)
@@ -41,7 +43,6 @@
  * Ack Vectors it has recently sent. For each packet sent carrying an
  * Ack Vector, it remembers four variables:
  *
- * @dccpav_ack_ptr - the value of buf_head at the time of acknowledgement.
  * @dccpav_records - list of dccp_ackvec_record
  * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0.
  *
@@ -52,9 +53,8 @@
 	u64		dccpav_buf_ackno;
 	struct list_head dccpav_records;
 	struct timeval	dccpav_time;
-	u8		dccpav_buf_head;
-	u8		dccpav_ack_ptr;
-	u8		dccpav_vec_len;
+	u16		dccpav_buf_head;
+	u16		dccpav_vec_len;
 	u8		dccpav_buf_nonce;
 	u8		dccpav_ack_nonce;
 	u8		dccpav_buf[DCCP_MAX_ACKVEC_LEN];
@@ -77,9 +77,9 @@
 	struct list_head dccpavr_node;
 	u64		 dccpavr_ack_seqno;
 	u64		 dccpavr_ack_ackno;
-	u8		 dccpavr_ack_ptr;
+	u16		 dccpavr_ack_ptr;
+	u16		 dccpavr_sent_len;
 	u8		 dccpavr_ack_nonce;
-	u8		 dccpavr_sent_len;
 };
 
 struct sock;
@@ -98,7 +98,8 @@
 extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av,
 					struct sock *sk, const u64 ackno);
 extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
-			     const u8 opt, const u8 *value, const u8 len);
+			     u64 *ackno, const u8 opt,
+			     const u8 *value, const u8 len);
 
 extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb);
 
@@ -137,7 +138,8 @@
 }
 
 static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
-				    const u8 opt, const u8 *value, const u8 len)
+				    const u64 *ackno, const u8 opt,
+				    const u8 *value, const u8 len)
 {
 	return -1;
 }
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index ff05e59..d8cf92f 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -55,9 +55,9 @@
 #define ccids_read_unlock() do { } while(0)
 #endif
 
-static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
+static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
 {
-	kmem_cache_t *slab;
+	struct kmem_cache *slab;
 	char slab_name_fmt[32], *slab_name;
 	va_list args;
 
@@ -75,7 +75,7 @@
 	return slab;
 }
 
-static void ccid_kmem_cache_destroy(kmem_cache_t *slab)
+static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
 {
 	if (slab != NULL) {
 		const char *name = kmem_cache_name(slab);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index f7eb6c6..bcc2d12 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -27,9 +27,9 @@
 	unsigned char	ccid_id;
 	const char	*ccid_name;
 	struct module	*ccid_owner;
-	kmem_cache_t	*ccid_hc_rx_slab;
+	struct kmem_cache	*ccid_hc_rx_slab;
 	__u32		ccid_hc_rx_obj_size;
-	kmem_cache_t	*ccid_hc_tx_slab;
+	struct kmem_cache	*ccid_hc_tx_slab;
 	__u32		ccid_hc_tx_obj_size;
 	int		(*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
 	int		(*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
@@ -52,9 +52,9 @@
 						    unsigned char len, u16 idx,
 						    unsigned char* value);
 	int		(*ccid_hc_tx_send_packet)(struct sock *sk,
-						  struct sk_buff *skb, int len);
-	void		(*ccid_hc_tx_packet_sent)(struct sock *sk, int more,
-						  int len);
+						  struct sk_buff *skb);
+	void		(*ccid_hc_tx_packet_sent)(struct sock *sk,
+						  int more, unsigned int len);
 	void		(*ccid_hc_rx_get_info)(struct sock *sk,
 					       struct tcp_info *info);
 	void		(*ccid_hc_tx_get_info)(struct sock *sk,
@@ -94,16 +94,16 @@
 extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
 
 static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
-					 struct sk_buff *skb, int len)
+					 struct sk_buff *skb)
 {
 	int rc = 0;
 	if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL)
-		rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb, len);
+		rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
 	return rc;
 }
 
 static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,
-					  int more, int len)
+					  int more, unsigned int len)
 {
 	if (ccid->ccid_ops->ccid_hc_tx_packet_sent != NULL)
 		ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, more, len);
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 8533dab..80f4698 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -28,13 +28,20 @@
 	  This text was extracted from RFC 4340 (sec. 10.1),
 	  http://www.ietf.org/rfc/rfc4340.txt
 
+	  To compile this CCID as a module, choose M here: the module will be
+	  called dccp_ccid2.
+
 	  If in doubt, say M.
 
 config IP_DCCP_CCID2_DEBUG
-	  bool "CCID2 debug"
+	  bool "CCID2 debugging messages"
 	  depends on IP_DCCP_CCID2
 	  ---help---
-	    Enable CCID2 debug messages.
+	    Enable CCID2-specific debugging messages.
+
+	    When compiling CCID2 as a module, this debugging output can
+	    additionally be toggled by setting the ccid2_debug module
+	    parameter to 0 or 1.
 
 	    If in doubt, say N.
 
@@ -62,10 +69,57 @@
 	  This text was extracted from RFC 4340 (sec. 10.2),
 	  http://www.ietf.org/rfc/rfc4340.txt
 	  
+	  To compile this CCID as a module, choose M here: the module will be
+	  called dccp_ccid3.
+
 	  If in doubt, say M.
 
 config IP_DCCP_TFRC_LIB
 	depends on IP_DCCP_CCID3
 	def_tristate IP_DCCP_CCID3
 
+config IP_DCCP_CCID3_DEBUG
+	  bool "CCID3 debugging messages"
+	  depends on IP_DCCP_CCID3
+	  ---help---
+	    Enable CCID3-specific debugging messages.
+
+	    When compiling CCID3 as a module, this debugging output can
+	    additionally be toggled by setting the ccid3_debug module
+	    parameter to 0 or 1.
+
+	    If in doubt, say N.
+
+config IP_DCCP_CCID3_RTO
+	  int "Use higher bound for nofeedback timer"
+	  default 100
+	  depends on IP_DCCP_CCID3 && EXPERIMENTAL
+	  ---help---
+	    Use higher lower bound for nofeedback timer expiration.
+
+	    The TFRC nofeedback timer normally expires after the maximum of 4
+	    RTTs and twice the current send interval (RFC 3448, 4.3). On LANs
+	    with a small RTT this can mean a high processing load and reduced
+	    performance, since then the nofeedback timer is triggered very
+	    frequently.
+
+	    This option enables to set a higher lower bound for the nofeedback
+	    value. Values in units of milliseconds can be set here.
+
+	    A value of 0 disables this feature by enforcing the value specified
+	    in RFC 3448. The following values have been suggested as bounds for
+	    experimental use:
+	    	* 16-20ms to match the typical multimedia inter-frame interval
+	    	* 100ms as a reasonable compromise [default]
+	    	* 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
+
+	    The default of 100ms is a compromise between a large value for
+	    efficient DCCP implementations, and a small value to avoid disrupting
+	    the network in times of congestion.
+
+	    The purpose of the nofeedback timer is to slow DCCP down when there
+	    is serious network congestion: experimenting with larger values should
+	    therefore not be performed on WANs.
+
+
 endmenu
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 162032b..2555be8 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -33,18 +33,11 @@
 #include "../dccp.h"
 #include "ccid2.h"
 
+
+#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
 static int ccid2_debug;
+#define ccid2_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid2_debug, format, ##a)
 
-#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
-#define ccid2_pr_debug(format, a...) \
-        do { if (ccid2_debug) \
-                printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \
-        } while (0)
-#else
-#define ccid2_pr_debug(format, a...)
-#endif
-
-#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
 static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx)
 {
 	int len = 0;
@@ -86,7 +79,8 @@
 	BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN);
 }
 #else
-#define ccid2_hc_tx_check_sanity(hctx) do {} while (0)
+#define ccid2_pr_debug(format, a...)
+#define ccid2_hc_tx_check_sanity(hctx)
 #endif
 
 static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num,
@@ -131,8 +125,7 @@
 	return 0;
 }
 
-static int ccid2_hc_tx_send_packet(struct sock *sk,
-				   struct sk_buff *skb, int len)
+static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
 {
 	struct ccid2_hc_tx_sock *hctx;
 
@@ -274,7 +267,7 @@
 		       jiffies + hctx->ccid2hctx_rto);
 }
 
-static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, int len)
+static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
@@ -426,7 +419,7 @@
 	return -1;
 
 out_invalid_option:
-	BUG_ON(1); /* should never happen... options were previously parsed ! */
+	DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
 	return -1;
 }
 
@@ -619,7 +612,17 @@
 	}
 
 	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
-	seqp = hctx->ccid2hctx_seqh->ccid2s_prev;
+	if (after48(ackno, hctx->ccid2hctx_high_ack))
+		hctx->ccid2hctx_high_ack = ackno;
+
+	seqp = hctx->ccid2hctx_seqt;
+	while (before48(seqp->ccid2s_seq, ackno)) {
+		seqp = seqp->ccid2s_next;
+		if (seqp == hctx->ccid2hctx_seqh) {
+			seqp = hctx->ccid2hctx_seqh->ccid2s_prev;
+			break;
+		}
+	}
 
 	/* If in slow-start, cwnd can increase at most Ack Ratio / 2 packets for
 	 * this single ack.  I round up.
@@ -697,7 +700,14 @@
 	/* The state about what is acked should be correct now
 	 * Check for NUMDUPACK
 	 */
-	seqp = hctx->ccid2hctx_seqh->ccid2s_prev;
+	seqp = hctx->ccid2hctx_seqt;
+	while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) {
+		seqp = seqp->ccid2s_next;
+		if (seqp == hctx->ccid2hctx_seqh) {
+			seqp = hctx->ccid2hctx_seqh->ccid2s_prev;
+			break;
+		}
+	}
 	done = 0;
 	while (1) {
 		if (seqp->ccid2s_acked) {
@@ -771,6 +781,7 @@
 	hctx->ccid2hctx_lastrtt  = 0;
 	hctx->ccid2hctx_rpdupack = -1;
 	hctx->ccid2hctx_last_cong = jiffies;
+	hctx->ccid2hctx_high_ack = 0;
 
 	hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire;
 	hctx->ccid2hctx_rtotimer.data	  = (unsigned long)sk;
@@ -823,8 +834,10 @@
 	.ccid_hc_rx_packet_recv	= ccid2_hc_rx_packet_recv,
 };
 
+#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
 module_param(ccid2_debug, int, 0444);
 MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
+#endif
 
 static __init int ccid2_module_init(void)
 {
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 5b2ef4a..ebd7949 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -35,7 +35,7 @@
 	struct ccid2_seq	*ccid2s_next;
 };
 
-#define CCID2_SEQBUF_LEN 256
+#define CCID2_SEQBUF_LEN 1024
 #define CCID2_SEQBUF_MAX 128
 
 /** struct ccid2_hc_tx_sock - CCID2 TX half connection
@@ -72,6 +72,7 @@
 	int			ccid2hctx_rpdupack;
 	int			ccid2hctx_sendwait;
 	unsigned long		ccid2hctx_last_cong;
+	u64			ccid2hctx_high_ack;
 };
 
 struct ccid2_hc_rx_sock {
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index cec23ad..66a27b9 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -60,13 +60,11 @@
 	return (b >= 2 * div) ? tmp / (b / div) : tmp;
 }
 
-static int ccid3_debug;
 
-#ifdef CCID3_DEBUG
-#define ccid3_pr_debug(format, a...) \
-	do { if (ccid3_debug) \
-		printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \
-	} while (0)
+
+#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
+static int ccid3_debug;
+#define ccid3_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid3_debug, format, ##a)
 #else
 #define ccid3_pr_debug(format, a...)
 #endif
@@ -75,15 +73,7 @@
 static struct dccp_rx_hist *ccid3_rx_hist;
 static struct dccp_li_hist *ccid3_li_hist;
 
-/* TFRC sender states */
-enum ccid3_hc_tx_states {
-       	TFRC_SSTATE_NO_SENT = 1,
-	TFRC_SSTATE_NO_FBACK,
-	TFRC_SSTATE_FBACK,
-	TFRC_SSTATE_TERM,
-};
-
-#ifdef CCID3_DEBUG
+#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
 static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
 {
 	static char *ccid3_state_names[] = {
@@ -110,105 +100,115 @@
 	hctx->ccid3hctx_state = state;
 }
 
-/* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
-static inline void ccid3_calc_new_t_ipi(struct ccid3_hc_tx_sock *hctx)
+/*
+ * Recalculate scheduled nominal send time t_nom, inter-packet interval
+ * t_ipi, and delta value. Should be called after each change to X.
+ */
+static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx)
 {
-	/*
-	 * If no feedback spec says t_ipi is 1 second (set elsewhere and then
-	 * doubles after every no feedback timer (separate function)
-	 */
-	if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK)
-		hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s,
-						  hctx->ccid3hctx_x);
-}
+	timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
 
-/* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
-static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx)
-{
+	/* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
+	hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x);
+
+	/* Update nominal send time with regard to the new t_ipi */
+	timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
+
+	/* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
 	hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
 					   TFRC_OPSYS_HALF_TIME_GRAN);
 }
-
 /*
  * Update X by
  *    If (p > 0)
- *       x_calc = calcX(s, R, p);
+ *       X_calc = calcX(s, R, p);
  *       X = max(min(X_calc, 2 * X_recv), s / t_mbi);
  *    Else
  *       If (now - tld >= R)
  *          X = max(min(2 * X, 2 * X_recv), s / R);
  *          tld = now;
+ *
+ * If X has changed, we also update the scheduled send time t_now,
+ * the inter-packet interval t_ipi, and the delta value.
  */ 
-static void ccid3_hc_tx_update_x(struct sock *sk)
+static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now)
+
 {
 	struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
+	const __u32 old_x = hctx->ccid3hctx_x;
 
-	/* To avoid large error in calcX */
-	if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) {
+	if (hctx->ccid3hctx_p > 0) {
 		hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
 						     hctx->ccid3hctx_rtt,
 						     hctx->ccid3hctx_p);
-		hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc,
-							  2 * hctx->ccid3hctx_x_recv),
-					       (hctx->ccid3hctx_s /
-					        TFRC_MAX_BACK_OFF_TIME));
-	} else {
-		struct timeval now;
+		hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc,
+						   hctx->ccid3hctx_x_recv * 2),
+					       hctx->ccid3hctx_s / TFRC_T_MBI);
 
-		dccp_timestamp(sk, &now);
-	       	if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >=
-		    hctx->ccid3hctx_rtt) {
-			hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv,
-								  hctx->ccid3hctx_x) * 2,
-						       usecs_div(hctx->ccid3hctx_s,
-							       	 hctx->ccid3hctx_rtt));
-			hctx->ccid3hctx_t_ld = now;
-		}
-	}
+	} else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >=
+							  hctx->ccid3hctx_rtt) {
+		hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv,
+					    hctx->ccid3hctx_x      ) * 2,
+					usecs_div(hctx->ccid3hctx_s,
+					       	  hctx->ccid3hctx_rtt)   );
+		hctx->ccid3hctx_t_ld = *now;
+	} else
+		ccid3_pr_debug("Not changing X\n");
+
+	if (hctx->ccid3hctx_x != old_x)
+		ccid3_update_send_time(hctx);
+}
+
+/*
+ * 	Track the mean packet size `s' (cf. RFC 4342, 5.3 and  RFC 3448, 4.1)
+ * 	@len: DCCP packet payload size in bytes
+ */
+static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len)
+{
+	if (unlikely(len == 0))
+		ccid3_pr_debug("Packet payload length is 0 - not updating\n");
+	else
+		hctx->ccid3hctx_s = hctx->ccid3hctx_s == 0 ? len :
+				    (9 * hctx->ccid3hctx_s + len) / 10;
+	/*
+	 * Note: We could do a potential optimisation here - when `s' changes,
+	 *	 recalculate sending rate and consequently t_ipi, t_delta, and
+	 *	 t_now. This is however non-standard, and the benefits are not
+	 *	 clear, so it is currently left out.
+	 */
 }
 
 static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
 {
 	struct sock *sk = (struct sock *)data;
-	unsigned long next_tmout = 0;
 	struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
+	unsigned long t_nfb = USEC_PER_SEC / 5;
 
 	bh_lock_sock(sk);
 	if (sock_owned_by_user(sk)) {
 		/* Try again later. */
 		/* XXX: set some sensible MIB */
-		sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
-			       jiffies + HZ / 5);
-		goto out;
+		goto restart_timer;
 	}
 
 	ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
 		       ccid3_tx_state_name(hctx->ccid3hctx_state));
 	
 	switch (hctx->ccid3hctx_state) {
-	case TFRC_SSTATE_TERM:
-		goto out;
 	case TFRC_SSTATE_NO_FBACK:
-		/* Halve send rate */
-		hctx->ccid3hctx_x /= 2;
-		if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s /
-					 TFRC_MAX_BACK_OFF_TIME))
-			hctx->ccid3hctx_x = (hctx->ccid3hctx_s /
-					     TFRC_MAX_BACK_OFF_TIME);
+		/* RFC 3448, 4.4: Halve send rate directly */
+		hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2,
+					       hctx->ccid3hctx_s / TFRC_T_MBI);
 
 		ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
 			       "bytes/s\n",
 			       dccp_role(sk), sk,
 			       ccid3_tx_state_name(hctx->ccid3hctx_state),
 			       hctx->ccid3hctx_x);
-		next_tmout = max_t(u32, 2 * usecs_div(hctx->ccid3hctx_s,
-						      hctx->ccid3hctx_x),
-					TFRC_INITIAL_TIMEOUT);
-		/*
-		 * FIXME - not sure above calculation is correct. See section
-		 * 5 of CCID3 11 should adjust tx_t_ipi and double that to
-		 * achieve it really
-		 */
+		/* The value of R is still undefined and so we can not recompute
+		 * the timout value. Keep initial value as per [RFC 4342, 5]. */
+		t_nfb = TFRC_INITIAL_TIMEOUT;
+		ccid3_update_send_time(hctx);
 		break;
 	case TFRC_SSTATE_FBACK:
 		/*
@@ -218,85 +218,89 @@
 		if (!hctx->ccid3hctx_idle ||
 		    (hctx->ccid3hctx_x_recv >=
 		     4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
+			struct timeval now;
+
 			ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
 				       dccp_role(sk), sk,
 				       ccid3_tx_state_name(hctx->ccid3hctx_state));
 			/* Halve sending rate */
 
-			/*  If (X_calc > 2 * X_recv)
+			/*  If (p == 0 || X_calc > 2 * X_recv)
 			 *    X_recv = max(X_recv / 2, s / (2 * t_mbi));
 			 *  Else
 			 *    X_recv = X_calc / 4;
 			 */
-			BUG_ON(hctx->ccid3hctx_p >= TFRC_SMALLEST_P &&
-			       hctx->ccid3hctx_x_calc == 0);
+			BUG_ON(hctx->ccid3hctx_p && !hctx->ccid3hctx_x_calc);
 
-			/* check also if p is zero -> x_calc is infinity? */
-			if (hctx->ccid3hctx_p < TFRC_SMALLEST_P ||
+			if (hctx->ccid3hctx_p  == 0 ||
 			    hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
 				hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
-								    hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME));
+								    hctx->ccid3hctx_s / (2 * TFRC_T_MBI));
 			else
 				hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
 
 			/* Update sending rate */
-			ccid3_hc_tx_update_x(sk);
+			dccp_timestamp(sk, &now);
+			ccid3_hc_tx_update_x(sk, &now);
 		}
 		/*
 		 * Schedule no feedback timer to expire in
-		 * max(4 * R, 2 * s / X)
+		 * max(t_RTO, 2 * s/X)  =  max(t_RTO, 2 * t_ipi)
+		 * See comments in packet_recv() regarding the value of t_RTO.
 		 */
-		next_tmout = max_t(u32, hctx->ccid3hctx_t_rto, 
-					2 * usecs_div(hctx->ccid3hctx_s,
-						      hctx->ccid3hctx_x));
+		t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
 		break;
-	default:
-		printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
-		       __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
-		dump_stack();
+	case TFRC_SSTATE_NO_SENT:
+		DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk);
+		/* fall through */
+	case TFRC_SSTATE_TERM:
 		goto out;
 	}
 
-	sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 
-		      jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
 	hctx->ccid3hctx_idle = 1;
+
+restart_timer:
+	sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
+		           jiffies + usecs_to_jiffies(t_nfb));
 out:
 	bh_unlock_sock(sk);
 	sock_put(sk);
 }
 
-static int ccid3_hc_tx_send_packet(struct sock *sk,
-				   struct sk_buff *skb, int len)
+/*
+ * returns
+ *   > 0: delay (in msecs) that should pass before actually sending
+ *   = 0: can send immediately
+ *   < 0: error condition; do not send packet
+ */
+static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
 	struct dccp_tx_hist_entry *new_packet;
 	struct timeval now;
 	long delay;
-	int rc = -ENOTCONN;
 
-	BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
+	BUG_ON(hctx == NULL);
 
-	/* Check if pure ACK or Terminating*/
 	/*
-	 * XXX: We only call this function for DATA and DATAACK, on, these
-	 * packets can have zero length, but why the comment about "pure ACK"?
+	 * This function is called only for Data and DataAck packets. Sending
+	 * zero-sized Data(Ack)s is theoretically possible, but for congestion
+	 * control this case is pathological - ignore it.
 	 */
-	if (unlikely(len == 0))
-		goto out;
+	if (unlikely(skb->len == 0))
+		return -EBADMSG;
 
 	/* See if last packet allocated was not sent */
 	new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
 	if (new_packet == NULL || new_packet->dccphtx_sent) {
 		new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
-						    SLAB_ATOMIC);
+						    GFP_ATOMIC);
 
-		rc = -ENOBUFS;
 		if (unlikely(new_packet == NULL)) {
-			LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, not enough "
-				       "mem to add to history, send refused\n",
-				       __FUNCTION__, dccp_role(sk), sk);
-			goto out;
+			DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
+				  "send refused\n", dccp_role(sk), sk);
+			return -ENOBUFS;
 		}
 
 		dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
@@ -311,123 +315,94 @@
 		hctx->ccid3hctx_last_win_count	 = 0;
 		hctx->ccid3hctx_t_last_win_count = now;
 		ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
-		hctx->ccid3hctx_t_ipi = TFRC_INITIAL_IPI;
 
-		/* Set nominal send time for initial packet */
+		/* Set initial sending rate to 1 packet per second */
+		ccid3_hc_tx_update_s(hctx, skb->len);
+		hctx->ccid3hctx_x     = hctx->ccid3hctx_s;
+
+		/* First timeout, according to [RFC 3448, 4.2], is 1 second */
+		hctx->ccid3hctx_t_ipi = USEC_PER_SEC;
+		/* Initial delta: minimum of 0.5 sec and t_gran/2 */
+		hctx->ccid3hctx_delta = TFRC_OPSYS_HALF_TIME_GRAN;
+
+		/* Set t_0 for initial packet */
 		hctx->ccid3hctx_t_nom = now;
-		timeval_add_usecs(&hctx->ccid3hctx_t_nom,
-				  hctx->ccid3hctx_t_ipi);
-		ccid3_calc_new_delta(hctx);
-		rc = 0;
 		break;
 	case TFRC_SSTATE_NO_FBACK:
 	case TFRC_SSTATE_FBACK:
-		delay = (timeval_delta(&now, &hctx->ccid3hctx_t_nom) -
-		         hctx->ccid3hctx_delta);
-		delay /= -1000;
-		/* divide by -1000 is to convert to ms and get sign right */
-		rc = delay > 0 ? delay : 0;
+		delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now);
+		/*
+		 * 	Scheduling of packet transmissions [RFC 3448, 4.6]
+		 *
+		 * if (t_now > t_nom - delta)
+		 *       // send the packet now
+		 * else
+		 *       // send the packet in (t_nom - t_now) milliseconds.
+		 */
+		if (delay - (long)hctx->ccid3hctx_delta >= 0)
+			return delay / 1000L;
 		break;
-	default:
-		printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
-		       __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
-		dump_stack();
-		rc = -EINVAL;
-		break;
+	case TFRC_SSTATE_TERM:
+		DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
+		return -EINVAL;
 	}
 
-	/* Can we send? if so add options and add to packet history */
-	if (rc == 0) {
-		dp->dccps_hc_tx_insert_options = 1;
-		new_packet->dccphtx_ccval =
-			DCCP_SKB_CB(skb)->dccpd_ccval =
-				hctx->ccid3hctx_last_win_count;
-		timeval_add_usecs(&hctx->ccid3hctx_t_nom,
-				  hctx->ccid3hctx_t_ipi);
-	}
-out:
-	return rc;
+	/* prepare to send now (add options etc.) */
+	dp->dccps_hc_tx_insert_options = 1;
+	new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval =
+				    hctx->ccid3hctx_last_win_count;
+	timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi);
+
+	return 0;
 }
 
-static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
+static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
 {
 	const struct dccp_sock *dp = dccp_sk(sk);
 	struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
 	struct timeval now;
+	unsigned long quarter_rtt;
+	struct dccp_tx_hist_entry *packet;
 
-	BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
+	BUG_ON(hctx == NULL);
 
 	dccp_timestamp(sk, &now);
 
-	/* check if we have sent a data packet */
-	if (len > 0) {
-		unsigned long quarter_rtt;
-		struct dccp_tx_hist_entry *packet;
+	ccid3_hc_tx_update_s(hctx, len);
 
-		packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
-		if (unlikely(packet == NULL)) {
-			LIMIT_NETDEBUG(KERN_WARNING "%s: packet doesn't "
-				       "exists in history!\n", __FUNCTION__);
-			return;
-		}
-		if (unlikely(packet->dccphtx_sent)) {
-			LIMIT_NETDEBUG(KERN_WARNING "%s: no unsent packet in "
-				       "history!\n", __FUNCTION__);
-			return;
-		}
-		packet->dccphtx_tstamp = now;
-		packet->dccphtx_seqno  = dp->dccps_gss;
-		/*
-		 * Check if win_count have changed
-		 * Algorithm in "8.1. Window Counter Value" in RFC 4342.
-		 */
-		quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
-		if (likely(hctx->ccid3hctx_rtt > 8))
-			quarter_rtt /= hctx->ccid3hctx_rtt / 4;
-
-		if (quarter_rtt > 0) {
-			hctx->ccid3hctx_t_last_win_count = now;
-			hctx->ccid3hctx_last_win_count	 = (hctx->ccid3hctx_last_win_count +
-							    min_t(unsigned long, quarter_rtt, 5)) % 16;
-			ccid3_pr_debug("%s, sk=%p, window changed from "
-				       "%u to %u!\n",
-				       dccp_role(sk), sk,
-				       packet->dccphtx_ccval,
-				       hctx->ccid3hctx_last_win_count);
-		}
-
-		hctx->ccid3hctx_idle = 0;
-		packet->dccphtx_rtt  = hctx->ccid3hctx_rtt;
-		packet->dccphtx_sent = 1;
-	} else
-		ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n",
-			       dccp_role(sk), sk, dp->dccps_gss);
-
-	switch (hctx->ccid3hctx_state) {
-	case TFRC_SSTATE_NO_SENT:
-		/* if first wasn't pure ack */
-		if (len != 0)
-			printk(KERN_CRIT "%s: %s, First packet sent is noted "
-					 "as a data packet\n",
-			       __FUNCTION__, dccp_role(sk));
+	packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
+	if (unlikely(packet == NULL)) {
+		DCCP_WARN("packet doesn't exist in history!\n");
 		return;
-	case TFRC_SSTATE_NO_FBACK:
-	case TFRC_SSTATE_FBACK:
-		if (len > 0) {
-			timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
-				  hctx->ccid3hctx_t_ipi);
-			ccid3_calc_new_t_ipi(hctx);
-			ccid3_calc_new_delta(hctx);
-			timeval_add_usecs(&hctx->ccid3hctx_t_nom,
-					  hctx->ccid3hctx_t_ipi);
-		}
-		break;
-	default:
-		printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
-		       __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
-		dump_stack();
-		break;
 	}
+	if (unlikely(packet->dccphtx_sent)) {
+		DCCP_WARN("no unsent packet in history!\n");
+		return;
+	}
+	packet->dccphtx_tstamp = now;
+	packet->dccphtx_seqno  = dp->dccps_gss;
+	/*
+	 * Check if win_count have changed
+	 * Algorithm in "8.1. Window Counter Value" in RFC 4342.
+	 */
+	quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
+	if (likely(hctx->ccid3hctx_rtt > 8))
+		quarter_rtt /= hctx->ccid3hctx_rtt / 4;
+
+	if (quarter_rtt > 0) {
+		hctx->ccid3hctx_t_last_win_count = now;
+		hctx->ccid3hctx_last_win_count	 = (hctx->ccid3hctx_last_win_count +
+						    min_t(unsigned long, quarter_rtt, 5)) % 16;
+		ccid3_pr_debug("%s, sk=%p, window changed from "
+			       "%u to %u!\n",
+			       dccp_role(sk), sk,
+			       packet->dccphtx_ccval,
+			       hctx->ccid3hctx_last_win_count);
+	}
+
+	hctx->ccid3hctx_idle = 0;
+	packet->dccphtx_rtt  = hctx->ccid3hctx_rtt;
+	packet->dccphtx_sent = 1;
 }
 
 static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -437,13 +412,11 @@
 	struct ccid3_options_received *opt_recv;
 	struct dccp_tx_hist_entry *packet;
 	struct timeval now;
-	unsigned long next_tmout; 
-	u32 t_elapsed;
+	unsigned long t_nfb;
 	u32 pinv;
-	u32 x_recv;
-	u32 r_sample;
+	long r_sample, t_elapsed;
 
-	BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
+	BUG_ON(hctx == NULL);
 
 	/* we are only interested in ACKs */
 	if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
@@ -452,37 +425,45 @@
 
 	opt_recv = &hctx->ccid3hctx_options_received;
 
-	t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
-	x_recv = opt_recv->ccid3or_receive_rate;
-	pinv = opt_recv->ccid3or_loss_event_rate;
-
 	switch (hctx->ccid3hctx_state) {
-	case TFRC_SSTATE_NO_SENT:
-		/* FIXME: what to do here? */
-		return;
 	case TFRC_SSTATE_NO_FBACK:
 	case TFRC_SSTATE_FBACK:
-		/* Calculate new round trip sample by
-		 * R_sample = (now - t_recvdata) - t_delay */
-		/* get t_recvdata from history */
+		/* get packet from history to look up t_recvdata */
 		packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
 						 DCCP_SKB_CB(skb)->dccpd_ack_seq);
 		if (unlikely(packet == NULL)) {
-			LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, seqno "
-				       "%llu(%s) does't exist in history!\n",
-				       __FUNCTION__, dccp_role(sk), sk,
+			DCCP_WARN("%s(%p), seqno %llu(%s) doesn't exist "
+				  "in history!\n",  dccp_role(sk), sk,
 			    (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
-				dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
+				  dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
 			return;
 		}
 
-		/* Update RTT */
+		/* Update receive rate */
+		hctx->ccid3hctx_x_recv = opt_recv->ccid3or_receive_rate;
+
+		/* Update loss event rate */
+		pinv = opt_recv->ccid3or_loss_event_rate;
+		if (pinv == ~0U || pinv == 0)
+			hctx->ccid3hctx_p = 0;
+		else
+ 			hctx->ccid3hctx_p = 1000000 / pinv;
+
 		dccp_timestamp(sk, &now);
-		r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
-		if (unlikely(r_sample <= t_elapsed))
-			LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
-				       "t_elapsed=%uus\n",
-				       __FUNCTION__, r_sample, t_elapsed);
+
+		/*
+		 * Calculate new round trip sample as per [RFC 3448, 4.3] by
+		 * 	R_sample  =  (now - t_recvdata) - t_elapsed
+		 */
+		r_sample  = timeval_delta(&now, &packet->dccphtx_tstamp);
+		t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
+
+		if (unlikely(r_sample <= 0)) {
+			DCCP_WARN("WARNING: R_sample (%ld) <= 0!\n", r_sample);
+			r_sample = 0;
+		} else if (unlikely(r_sample <= t_elapsed))
+			DCCP_WARN("WARNING: r_sample=%ldus <= t_elapsed=%ldus\n",
+				  r_sample, t_elapsed);
 		else
 			r_sample -= t_elapsed;
 
@@ -495,82 +476,79 @@
 		 * q is a constant, RFC 3448 recomments 0.9
 		 */
 		if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
+			/* Use Larger Initial Windows [RFC 4342, sec. 5]
+			 * We deviate in that we use `s' instead of `MSS'. */
+			u16 w_init = max(    4 * hctx->ccid3hctx_s,
+					 max(2 * hctx->ccid3hctx_s, 4380));
+			hctx->ccid3hctx_rtt  = r_sample;
+			hctx->ccid3hctx_x    = usecs_div(w_init, r_sample);
+			hctx->ccid3hctx_t_ld = now;
+
+			ccid3_update_send_time(hctx);
+
+			ccid3_pr_debug("%s(%p), s=%u, w_init=%u, "
+				       "R_sample=%ldus, X=%u\n", dccp_role(sk),
+				       sk, hctx->ccid3hctx_s, w_init, r_sample,
+				       hctx->ccid3hctx_x);
+
 			ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
-			hctx->ccid3hctx_rtt = r_sample;
-		} else
-			hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 +
-					      r_sample / 10;
+		} else {
+			hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
+					           (u32)r_sample        ) / 10;
 
-		ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, "
-			       "r_sample=%us\n", dccp_role(sk), sk,
-			       hctx->ccid3hctx_rtt, r_sample);
+			ccid3_hc_tx_update_x(sk, &now);
 
-		/* Update timeout interval */
-		hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
-					      USEC_PER_SEC);
-
-		/* Update receive rate */
-		hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */
-
-		/* Update loss event rate */
-		if (pinv == ~0 || pinv == 0)
-			hctx->ccid3hctx_p = 0;
-		else {
-			hctx->ccid3hctx_p = 1000000 / pinv;
-
-			if (hctx->ccid3hctx_p < TFRC_SMALLEST_P) {
-				hctx->ccid3hctx_p = TFRC_SMALLEST_P;
-				ccid3_pr_debug("%s, sk=%p, Smallest p used!\n",
-					       dccp_role(sk), sk);
-			}
+			ccid3_pr_debug("%s(%p), RTT=%uus (sample=%ldus), s=%u, "
+				       "p=%u, X_calc=%u, X=%u\n", dccp_role(sk),
+				       sk, hctx->ccid3hctx_rtt, r_sample,
+				       hctx->ccid3hctx_s, hctx->ccid3hctx_p,
+				       hctx->ccid3hctx_x_calc,
+				       hctx->ccid3hctx_x);
 		}
 
 		/* unschedule no feedback timer */
 		sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
 
-		/* Update sending rate */
-		ccid3_hc_tx_update_x(sk);
-
-		/* Update next send time */
-		timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
-				  hctx->ccid3hctx_t_ipi);
-		ccid3_calc_new_t_ipi(hctx);
-		timeval_add_usecs(&hctx->ccid3hctx_t_nom,
-				  hctx->ccid3hctx_t_ipi);
-		ccid3_calc_new_delta(hctx);
-
 		/* remove all packets older than the one acked from history */
 		dccp_tx_hist_purge_older(ccid3_tx_hist,
 					 &hctx->ccid3hctx_hist, packet);
 		/*
 		 * As we have calculated new ipi, delta, t_nom it is possible that
-		 * we now can send a packet, so wake up dccp_wait_for_ccids.
+		 * we now can send a packet, so wake up dccp_wait_for_ccid
 		 */
 		sk->sk_write_space(sk);
 
 		/*
-		 * Schedule no feedback timer to expire in
-		 * max(4 * R, 2 * s / X)
+		 * Update timeout interval for the nofeedback timer.
+		 * We use a configuration option to increase the lower bound.
+		 * This can help avoid triggering the nofeedback timer too often
+		 * ('spinning') on LANs with small RTTs.
 		 */
-		next_tmout = max(hctx->ccid3hctx_t_rto,
-				 2 * usecs_div(hctx->ccid3hctx_s,
-					       hctx->ccid3hctx_x));
+		hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
+						   CONFIG_IP_DCCP_CCID3_RTO *
+						   (USEC_PER_SEC/1000)	     );
+		/*
+		 * Schedule no feedback timer to expire in
+		 * max(t_RTO, 2 * s/X)  =  max(t_RTO, 2 * t_ipi)
+		 */
+		t_nfb = max(hctx->ccid3hctx_t_rto, 2 * hctx->ccid3hctx_t_ipi);
 			
 		ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
 			       "expire in %lu jiffies (%luus)\n",
 			       dccp_role(sk), sk,
-			       usecs_to_jiffies(next_tmout), next_tmout); 
+			       usecs_to_jiffies(t_nfb), t_nfb);
 
 		sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, 
-			       jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
+				   jiffies + usecs_to_jiffies(t_nfb));
 
 		/* set idle flag */
 		hctx->ccid3hctx_idle = 1;   
 		break;
-	default:
-		printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
-		       __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
-		dump_stack();
+	case TFRC_SSTATE_NO_SENT:
+		if (dccp_sk(sk)->dccps_role == DCCP_ROLE_CLIENT)
+			DCCP_WARN("Illegal ACK received - no packet sent\n");
+		/* fall through */
+	case TFRC_SSTATE_TERM:		/* ignore feedback when closing */
 		break;
 	}
 }
@@ -610,9 +588,9 @@
 	switch (option) {
 	case TFRC_OPT_LOSS_EVENT_RATE:
 		if (unlikely(len != 4)) {
-			LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
-				       "len for TFRC_OPT_LOSS_EVENT_RATE\n",
-				       __FUNCTION__, dccp_role(sk), sk);
+			DCCP_WARN("%s, sk=%p, invalid len %d "
+				  "for TFRC_OPT_LOSS_EVENT_RATE\n",
+				  dccp_role(sk), sk, len);
 			rc = -EINVAL;
 		} else {
 			opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
@@ -631,9 +609,9 @@
 		break;
 	case TFRC_OPT_RECEIVE_RATE:
 		if (unlikely(len != 4)) {
-			LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
-				       "len for TFRC_OPT_RECEIVE_RATE\n",
-				       __FUNCTION__, dccp_role(sk), sk);
+			DCCP_WARN("%s, sk=%p, invalid len %d "
+				  "for TFRC_OPT_RECEIVE_RATE\n",
+				  dccp_role(sk), sk, len);
 			rc = -EINVAL;
 		} else {
 			opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
@@ -649,18 +627,9 @@
 
 static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
 {
-	struct dccp_sock *dp = dccp_sk(sk);
 	struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
 
-	if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
-	    dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
-		hctx->ccid3hctx_s = dp->dccps_packet_size;
-	else
-		hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE;
-
-	/* Set transmission rate to 1 packet per second */
-	hctx->ccid3hctx_x     = hctx->ccid3hctx_s;
-	hctx->ccid3hctx_t_rto = USEC_PER_SEC;
+	hctx->ccid3hctx_s     = 0;
 	hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
 	INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
 
@@ -688,14 +657,7 @@
  * RX Half Connection methods
  */
 
-/* TFRC receiver states */
-enum ccid3_hc_rx_states {
-       	TFRC_RSTATE_NO_DATA = 1,
-	TFRC_RSTATE_DATA,
-	TFRC_RSTATE_TERM    = 127,
-};
-
-#ifdef CCID3_DEBUG
+#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
 static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
 {
 	static char *ccid3_rx_state_names[] = {
@@ -721,6 +683,15 @@
 	hcrx->ccid3hcrx_state = state;
 }
 
+static inline void ccid3_hc_rx_update_s(struct ccid3_hc_rx_sock *hcrx, int len)
+{
+	if (unlikely(len == 0))	/* don't update on empty packets (e.g. ACKs) */
+		ccid3_pr_debug("Packet payload length is 0 - not updating\n");
+	else
+		hcrx->ccid3hcrx_s = hcrx->ccid3hcrx_s == 0 ? len :
+				    (9 * hcrx->ccid3hcrx_s + len) / 10;
+}
+
 static void ccid3_hc_rx_send_feedback(struct sock *sk)
 {
 	struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
@@ -743,18 +714,15 @@
 						   delta);
 	}
 		break;
-	default:
-		printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
-		       __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
-		dump_stack();
+	case TFRC_RSTATE_TERM:
+		DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
 		return;
 	}
 
 	packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
 	if (unlikely(packet == NULL)) {
-		LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, no data packet "
-			       "in history!\n",
-			       __FUNCTION__, dccp_role(sk), sk);
+		DCCP_WARN("%s, sk=%p, no data packet in history!\n",
+			  dccp_role(sk), sk);
 		return;
 	}
 
@@ -842,29 +810,29 @@
 	}
 
 	if (unlikely(step == 0)) {
-		LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, packet history "
-			       "contains no data packets!\n",
-			       __FUNCTION__, dccp_role(sk), sk);
+		DCCP_WARN("%s, sk=%p, packet history has no data packets!\n",
+			  dccp_role(sk), sk);
 		return ~0;
 	}
 
 	if (unlikely(interval == 0)) {
-		LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Could not find a "
-			       "win_count interval > 0. Defaulting to 1\n",
-			       __FUNCTION__, dccp_role(sk), sk);
+		DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0."
+			  "Defaulting to 1\n", dccp_role(sk), sk);
 		interval = 1;
 	}
 found:
 	if (!tail) {
-		LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n",
-		   __FUNCTION__);
+		DCCP_CRIT("tail is null\n");
 		return ~0;
 	}
 	rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
 	ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
 		       dccp_role(sk), sk, rtt);
-	if (rtt == 0)
-		rtt = 1;
+
+	if (rtt == 0) {
+		DCCP_WARN("RTT==0, setting to 1\n");
+ 		rtt = 1;
+	}
 
 	dccp_timestamp(sk, &tstamp);
 	delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
@@ -878,9 +846,7 @@
 	tmp2 = (u32)tmp1;
 
 	if (!tmp2) {
-		LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 "
-		   "%s: x_recv = %u, rtt =%u\n",
-		   __FUNCTION__, x_recv, rtt);
+		DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt);
 		return ~0;
 	}
 
@@ -923,11 +889,10 @@
 		/* new loss event detected */
 		/* calculate last interval length */
 		seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
-		entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
+		entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC);
 
 		if (entry == NULL) {
-			printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__);
-			dump_stack();
+			DCCP_BUG("out of memory - can not allocate entry");
 			return;
 		}
 
@@ -1002,13 +967,10 @@
 	const struct dccp_options_received *opt_recv;
 	struct dccp_rx_hist_entry *packet;
 	struct timeval now;
-	u8 win_count;
 	u32 p_prev, rtt_prev, r_sample, t_elapsed;
-	int loss;
+	int loss, payload_size;
 
-	BUG_ON(hcrx == NULL ||
-	       !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
-		 hcrx->ccid3hcrx_state == TFRC_RSTATE_DATA));
+	BUG_ON(hcrx == NULL);
 
 	opt_recv = &dccp_sk(sk)->dccps_options_received;
 
@@ -1026,9 +988,8 @@
 		t_elapsed = opt_recv->dccpor_elapsed_time * 10;
 
 		if (unlikely(r_sample <= t_elapsed))
-			LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
-				       "t_elapsed=%uus\n",
-				       __FUNCTION__, r_sample, t_elapsed);
+			DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n",
+				  r_sample, t_elapsed);
 		else
 			r_sample -= t_elapsed;
 
@@ -1050,21 +1011,21 @@
 	}
 
 	packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
-					skb, SLAB_ATOMIC);
+					skb, GFP_ATOMIC);
 	if (unlikely(packet == NULL)) {
-		LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Not enough mem to "
-				"add rx packet to history, consider it lost!\n",
-			       __FUNCTION__, dccp_role(sk), sk);
+		DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
+			  "to history, consider it lost!\n", dccp_role(sk), sk);
 		return;
 	}
 
-	win_count = packet->dccphrx_ccval;
-
 	loss = ccid3_hc_rx_detect_loss(sk, packet);
 
 	if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
 		return;
 
+	payload_size = skb->len - dccp_hdr(skb)->dccph_doff * 4;
+	ccid3_hc_rx_update_s(hcrx, payload_size);
+
 	switch (hcrx->ccid3hcrx_state) {
 	case TFRC_RSTATE_NO_DATA:
 		ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
@@ -1075,8 +1036,7 @@
 		ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
 		return;
 	case TFRC_RSTATE_DATA:
-		hcrx->ccid3hcrx_bytes_recv += skb->len -
-					      dccp_hdr(skb)->dccph_doff * 4;
+		hcrx->ccid3hcrx_bytes_recv += payload_size;
 		if (loss)
 			break;
 
@@ -1087,10 +1047,8 @@
 			ccid3_hc_rx_send_feedback(sk);
 		}
 		return;
-	default:
-		printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
-		       __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
-		dump_stack();
+	case TFRC_RSTATE_TERM:
+		DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk);
 		return;
 	}
 
@@ -1107,10 +1065,8 @@
 		/* Scaling up by 1000000 as fixed decimal */
 		if (i_mean != 0)
 			hcrx->ccid3hcrx_p = 1000000 / i_mean;
-	} else {
-		printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__);
-		dump_stack();
-	}
+	} else
+		DCCP_BUG("empty loss history");
 
 	if (hcrx->ccid3hcrx_p > p_prev) {
 		ccid3_hc_rx_send_feedback(sk);
@@ -1120,22 +1076,16 @@
 
 static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
 {
-	struct dccp_sock *dp = dccp_sk(sk);
 	struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
 
 	ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
 
-	if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
-	    dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
-		hcrx->ccid3hcrx_s = dp->dccps_packet_size;
-	else
-		hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE;
-
 	hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
 	INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
 	INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
 	dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
 	hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
+	hcrx->ccid3hcrx_s   = 0;
 	hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
 	return 0;
 }
@@ -1261,8 +1211,10 @@
 	.ccid_hc_tx_getsockopt	   = ccid3_hc_tx_getsockopt,
 };
  
+#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
 module_param(ccid3_debug, int, 0444);
 MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
+#endif
 
 static __init int ccid3_module_init(void)
 {
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 0a2cb75..07596d7 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -42,22 +42,14 @@
 #include <linux/tfrc.h>
 #include "../ccid.h"
 
-#define TFRC_MIN_PACKET_SIZE	   16
-#define TFRC_STD_PACKET_SIZE	  256
-#define TFRC_MAX_PACKET_SIZE	65535
-
-/* Two seconds as per CCID3 spec */
+/* Two seconds as per RFC 3448 4.2 */
 #define TFRC_INITIAL_TIMEOUT	   (2 * USEC_PER_SEC)
 
-#define TFRC_INITIAL_IPI	   (USEC_PER_SEC / 4)
-
 /* In usecs - half the scheduling granularity as per RFC3448 4.6 */
 #define TFRC_OPSYS_HALF_TIME_GRAN  (USEC_PER_SEC / (2 * HZ))
 
-/* In seconds */
-#define TFRC_MAX_BACK_OFF_TIME	   64
-
-#define TFRC_SMALLEST_P		   40
+/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
+#define TFRC_T_MBI		   64
 
 enum ccid3_options {
 	TFRC_OPT_LOSS_EVENT_RATE = 192,
@@ -73,26 +65,36 @@
 	u32 ccid3or_receive_rate;
 };
 
-/** struct ccid3_hc_tx_sock - CCID3 sender half connection sock
+/* TFRC sender states */
+enum ccid3_hc_tx_states {
+       	TFRC_SSTATE_NO_SENT = 1,
+	TFRC_SSTATE_NO_FBACK,
+	TFRC_SSTATE_FBACK,
+	TFRC_SSTATE_TERM,
+};
+
+/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
  *
-  * @ccid3hctx_state - Sender state
-  * @ccid3hctx_x - Current sending rate
-  * @ccid3hctx_x_recv - Receive rate
-  * @ccid3hctx_x_calc - Calculated send (?) rate
-  * @ccid3hctx_s - Packet size
-  * @ccid3hctx_rtt - Estimate of current round trip time in usecs
-  * @@ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000
-  * @ccid3hctx_last_win_count - Last window counter sent
-  * @ccid3hctx_t_last_win_count - Timestamp of earliest packet
-  * 				  with last_win_count value sent
-  * @ccid3hctx_no_feedback_timer - Handle to no feedback timer
-  * @ccid3hctx_idle - FIXME
-  * @ccid3hctx_t_ld - Time last doubled during slow start
-  * @ccid3hctx_t_nom - Nominal send time of next packet
-  * @ccid3hctx_t_ipi - Interpacket (send) interval
-  * @ccid3hctx_delta - Send timer delta
-  * @ccid3hctx_hist - Packet history
-  */
+ * @ccid3hctx_x - Current sending rate
+ * @ccid3hctx_x_recv - Receive rate
+ * @ccid3hctx_x_calc - Calculated send rate (RFC 3448, 3.1)
+ * @ccid3hctx_rtt - Estimate of current round trip time in usecs
+ * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000
+ * @ccid3hctx_s - Packet size
+ * @ccid3hctx_t_rto - Retransmission Timeout (RFC 3448, 3.1)
+ * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6)
+ * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states
+ * @ccid3hctx_last_win_count - Last window counter sent
+ * @ccid3hctx_t_last_win_count - Timestamp of earliest packet
+ * 				 with last_win_count value sent
+ * @ccid3hctx_no_feedback_timer - Handle to no feedback timer
+ * @ccid3hctx_idle - Flag indicating that sender is idling
+ * @ccid3hctx_t_ld - Time last doubled during slow start
+ * @ccid3hctx_t_nom - Nominal send time of next packet
+ * @ccid3hctx_delta - Send timer delta
+ * @ccid3hctx_hist - Packet history
+ * @ccid3hctx_options_received - Parsed set of retrieved options
+ */
 struct ccid3_hc_tx_sock {
 	struct tfrc_tx_info		ccid3hctx_tfrc;
 #define ccid3hctx_x			ccid3hctx_tfrc.tfrctx_x
@@ -103,7 +105,7 @@
 #define ccid3hctx_t_rto			ccid3hctx_tfrc.tfrctx_rto
 #define ccid3hctx_t_ipi			ccid3hctx_tfrc.tfrctx_ipi
 	u16				ccid3hctx_s;
-  	u8				ccid3hctx_state;
+  	enum ccid3_hc_tx_states 	ccid3hctx_state:8;
 	u8				ccid3hctx_last_win_count;
 	u8				ccid3hctx_idle;
 	struct timeval			ccid3hctx_t_last_win_count;
@@ -115,23 +117,48 @@
 	struct ccid3_options_received	ccid3hctx_options_received;
 };
 
+/* TFRC receiver states */
+enum ccid3_hc_rx_states {
+       	TFRC_RSTATE_NO_DATA = 1,
+	TFRC_RSTATE_DATA,
+	TFRC_RSTATE_TERM    = 127,
+};
+
+/** struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
+ *
+ *  @ccid3hcrx_x_recv  -  Receiver estimate of send rate (RFC 3448 4.3)
+ *  @ccid3hcrx_rtt  -  Receiver estimate of rtt (non-standard)
+ *  @ccid3hcrx_p  -  current loss event rate (RFC 3448 5.4)
+ *  @ccid3hcrx_seqno_nonloss  -  Last received non-loss sequence number
+ *  @ccid3hcrx_ccval_nonloss  -  Last received non-loss Window CCVal
+ *  @ccid3hcrx_ccval_last_counter  -  Tracks window counter (RFC 4342, 8.1)
+ *  @ccid3hcrx_state  -  receiver state, one of %ccid3_hc_rx_states
+ *  @ccid3hcrx_bytes_recv  -  Total sum of DCCP payload bytes
+ *  @ccid3hcrx_tstamp_last_feedback  -  Time at which last feedback was sent
+ *  @ccid3hcrx_tstamp_last_ack  -  Time at which last feedback was sent
+ *  @ccid3hcrx_hist  -  Packet history
+ *  @ccid3hcrx_li_hist  -  Loss Interval History
+ *  @ccid3hcrx_s  -  Received packet size in bytes
+ *  @ccid3hcrx_pinv  -  Inverse of Loss Event Rate (RFC 4342, sec. 8.5)
+ *  @ccid3hcrx_elapsed_time  -  Time since packet reception
+ */
 struct ccid3_hc_rx_sock {
-	struct tfrc_rx_info	ccid3hcrx_tfrc;
-#define ccid3hcrx_x_recv	ccid3hcrx_tfrc.tfrcrx_x_recv
-#define ccid3hcrx_rtt		ccid3hcrx_tfrc.tfrcrx_rtt
-#define ccid3hcrx_p		ccid3hcrx_tfrc.tfrcrx_p
-  	u64			ccid3hcrx_seqno_nonloss:48,
-				ccid3hcrx_ccval_nonloss:4,
-				ccid3hcrx_state:8,
-				ccid3hcrx_ccval_last_counter:4;
-  	u32			ccid3hcrx_bytes_recv;
-  	struct timeval		ccid3hcrx_tstamp_last_feedback;
-  	struct timeval		ccid3hcrx_tstamp_last_ack;
-	struct list_head	ccid3hcrx_hist;
-	struct list_head	ccid3hcrx_li_hist;
-  	u16			ccid3hcrx_s;
-  	u32			ccid3hcrx_pinv;
-  	u32			ccid3hcrx_elapsed_time;
+	struct tfrc_rx_info		ccid3hcrx_tfrc;
+#define ccid3hcrx_x_recv		ccid3hcrx_tfrc.tfrcrx_x_recv
+#define ccid3hcrx_rtt			ccid3hcrx_tfrc.tfrcrx_rtt
+#define ccid3hcrx_p			ccid3hcrx_tfrc.tfrcrx_p
+  	u64				ccid3hcrx_seqno_nonloss:48,
+					ccid3hcrx_ccval_nonloss:4,
+					ccid3hcrx_ccval_last_counter:4;
+	enum ccid3_hc_rx_states		ccid3hcrx_state:8;
+  	u32				ccid3hcrx_bytes_recv;
+  	struct timeval			ccid3hcrx_tstamp_last_feedback;
+  	struct timeval			ccid3hcrx_tstamp_last_ack;
+	struct list_head		ccid3hcrx_hist;
+	struct list_head		ccid3hcrx_li_hist;
+  	u16				ccid3hcrx_s;
+  	u32				ccid3hcrx_pinv;
+  	u32				ccid3hcrx_elapsed_time;
 };
 
 static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 906c81a..0a0baef 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -13,7 +13,7 @@
 
 #include <linux/module.h>
 #include <net/sock.h>
-
+#include "../../dccp.h"
 #include "loss_interval.h"
 
 struct dccp_li_hist *dccp_li_hist_new(const char *name)
@@ -109,7 +109,7 @@
 	i_tot = max(i_tot0, i_tot1);
 
 	if (!w_tot) {
-		LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__);
+		DCCP_WARN("w_tot = 0\n");
 		return 1;
 	}
 
@@ -125,10 +125,10 @@
 	int i;
 
 	for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
-		entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
+		entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC);
 		if (entry == NULL) {
 			dccp_li_hist_purge(hist, list);
-			dump_stack();
+			DCCP_BUG("loss interval list entry is NULL");
 			return 0;
 		}
 		entry->dccplih_interval = ~0;
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 0ae85f0..eb25701 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -20,7 +20,7 @@
 #define DCCP_LI_HIST_IVAL_F_LENGTH  8
 
 struct dccp_li_hist {
-	kmem_cache_t *dccplih_slab;
+	struct kmem_cache *dccplih_slab;
 };
 
 extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 067cf1c..9a8bcf2 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -68,14 +68,14 @@
 };
 
 struct dccp_tx_hist {
-	kmem_cache_t *dccptxh_slab;
+	struct kmem_cache *dccptxh_slab;
 };
 
 extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
 extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
 
 struct dccp_rx_hist {
-	kmem_cache_t *dccprxh_slab;
+	struct kmem_cache *dccprxh_slab;
 };
 
 extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 44076e0..ddac2c5 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -13,16 +13,84 @@
  */
 
 #include <linux/module.h>
-
 #include <asm/div64.h>
-
+#include "../../dccp.h"
 #include "tfrc.h"
 
 #define TFRC_CALC_X_ARRSIZE 500
+#define TFRC_CALC_X_SPLIT   50000	/* 0.05 * 1000000, details below */
+#define TFRC_SMALLEST_P	    (TFRC_CALC_X_SPLIT/TFRC_CALC_X_ARRSIZE)
 
-#define TFRC_CALC_X_SPLIT 50000
-/* equivalent to 0.05 */
+/*
+  TFRC TCP Reno Throughput Equation Lookup Table for f(p)
 
+  The following two-column lookup table implements a part of the TCP throughput
+  equation from [RFC 3448, sec. 3.1]:
+
+	 			     s
+  X_calc  =  --------------------------------------------------------------
+	     R * sqrt(2*b*p/3) + (3 * t_RTO * sqrt(3*b*p/8) * (p + 32*p^3))
+
+  Where:
+	X      is the transmit rate in bytes/second
+	s      is the packet size in bytes
+	R      is the round trip time in seconds
+	p      is the loss event rate, between 0 and 1.0, of the number of loss
+	              events as a fraction of the number of packets transmitted
+	t_RTO  is the TCP retransmission timeout value in seconds
+	b      is the number of packets acknowledged by a single TCP ACK
+
+  We can assume that b = 1 and t_RTO is 4 * R. The equation now becomes:
+
+				     s
+  X_calc  =  -------------------------------------------------------
+	     R * sqrt(p*2/3) + (12 * R * sqrt(p*3/8) * (p + 32*p^3))
+
+  which we can break down into:
+
+                      s
+	X_calc  =  ---------
+	            R * f(p)
+
+  where f(p) is given for 0 < p <= 1 by:
+
+	f(p)  =  sqrt(2*p/3) + 12 * sqrt(3*p/8) *  (p + 32*p^3)
+
+  Since this is kernel code, floating-point arithmetic is avoided in favour of
+  integer arithmetic. This means that nearly all fractional parameters are
+  scaled by 1000000:
+    * the parameters p and R
+    * the return result f(p)
+  The lookup table therefore actually tabulates the following function g(q):
+
+  	g(q)  =  1000000 * f(q/1000000)
+
+  Hence, when p <= 1, q must be less than or equal to 1000000. To achieve finer
+  granularity for the practically more relevant case of small values of p (up to
+  5%), the second column is used; the first one ranges up to 100%.  This split
+  corresponds to the value of q = TFRC_CALC_X_SPLIT. At the same time this also
+  determines the smallest resolution possible with this lookup table:
+
+    TFRC_SMALLEST_P   =  TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE
+
+  The entire table is generated by:
+    for(i=0; i < TFRC_CALC_X_ARRSIZE; i++) {
+	lookup[i][0]  =  g((i+1) * 1000000/TFRC_CALC_X_ARRSIZE);
+	lookup[i][1]  =  g((i+1) * TFRC_CALC_X_SPLIT/TFRC_CALC_X_ARRSIZE);
+    }
+
+  With the given configuration, we have, with M = TFRC_CALC_X_ARRSIZE-1,
+    lookup[0][0]  =  g(1000000/(M+1)) 		 =  1000000 * f(0.2%)
+    lookup[M][0]  =  g(1000000)			 =  1000000 * f(100%)
+    lookup[0][1]  =  g(TFRC_SMALLEST_P)		  = 1000000 * f(0.01%)
+    lookup[M][1]  =  g(TFRC_CALC_X_SPLIT)	 =  1000000 * f(5%)
+
+  In summary, the two columns represent f(p) for the following ranges:
+    * The first column is for   0.002  <= p <= 1.0
+    * The second column is for  0.0001 <= p <= 0.05
+  Where the columns overlap, the second (finer-grained) is given preference,
+  i.e. the first column is used only for p >= 0.05.
+ */
 static const u32 tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE][2] = {
 	{     37172,   8172 },
 	{     53499,  11567 },
@@ -526,83 +594,69 @@
 	{ 243315981, 271305 }
 };
 
-/* Calculate the send rate as per section 3.1 of RFC3448
- 
-Returns send rate in bytes per second
+/* return largest index i such that fval <= lookup[i][small] */
+static inline u32 tfrc_binsearch(u32 fval, u8 small)
+{
+	u32 try, low = 0, high = TFRC_CALC_X_ARRSIZE - 1;
 
-Integer maths and lookups are used as not allowed floating point in kernel
+	while (low < high) {
+		try = (low + high) / 2;
+		if (fval <= tfrc_calc_x_lookup[try][small])
+			high = try;
+		else
+			low  = try + 1;
+	}
+	return high;
+}
 
-The function for Xcalc as per section 3.1 of RFC3448 is:
-
-X =                            s
-     -------------------------------------------------------------
-     R*sqrt(2*b*p/3) + (t_RTO * (3*sqrt(3*b*p/8) * p * (1+32*p^2)))
-
-where 
-X is the trasmit rate in bytes/second
-s is the packet size in bytes
-R is the round trip time in seconds
-p is the loss event rate, between 0 and 1.0, of the number of loss events 
-  as a fraction of the number of packets transmitted
-t_RTO is the TCP retransmission timeout value in seconds
-b is the number of packets acknowledged by a single TCP acknowledgement
-
-we can assume that b = 1 and t_RTO is 4 * R. With this the equation becomes:
-
-X =                            s
-     -----------------------------------------------------------------------
-     R * sqrt(2 * p / 3) + (12 * R * (sqrt(3 * p / 8) * p * (1 + 32 * p^2)))
-
-
-which we can break down into:
-
-X =     s
-     --------
-     R * f(p)
-
-where f(p) = sqrt(2 * p / 3) + (12 * sqrt(3 * p / 8) * p * (1 + 32 * p * p))
-
-Function parameters:
-s - bytes
-R - RTT in usecs
-p - loss rate (decimal fraction multiplied by 1,000,000)
-
-Returns Xcalc in bytes per second
-
-DON'T alter this code unless you run test cases against it as the code
-has been manipulated to stop underflow/overlow.
-
-*/
+/**
+ * tfrc_calc_x - Calculate the send rate as per section 3.1 of RFC3448
+ *
+ *  @s: packet size          in bytes
+ *  @R: RTT                  scaled by 1000000   (i.e., microseconds)
+ *  @p: loss ratio estimate  scaled by 1000000
+ *  Returns X_calc           in bytes per second (not scaled).
+ *
+ * Note: DO NOT alter this code unless you run test cases against it,
+ *       as the code has been optimized to stop underflow/overflow.
+ */
 u32 tfrc_calc_x(u16 s, u32 R, u32 p)
 {
 	int index;
 	u32 f;
 	u64 tmp1, tmp2;
 
-	if (p < TFRC_CALC_X_SPLIT)
-		index = (p / (TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE)) - 1;
-	else
-		index = (p / (1000000 / TFRC_CALC_X_ARRSIZE)) - 1;
+	/* check against invalid parameters and divide-by-zero   */
+	BUG_ON(p >  1000000);		/* p must not exceed 100%   */
+	BUG_ON(p == 0);			/* f(0) = 0, divide by zero */
+	if (R == 0) {			/* possible  divide by zero */
+		DCCP_CRIT("WARNING: RTT is 0, returning maximum X_calc.");
+		return ~0U;
+ 	}
 
-	if (index < 0)
-		/* p should be 0 unless there is a bug in my code */
-		index = 0;
+	if (p <= TFRC_CALC_X_SPLIT) 		{     /* 0.0000 < p <= 0.05   */
+		if (p < TFRC_SMALLEST_P) {	      /* 0.0000 < p <  0.0001 */
+			DCCP_WARN("Value of p (%d) below resolution. "
+				  "Substituting %d\n", p, TFRC_SMALLEST_P);
+			index = 0;
+		} else 				      /* 0.0001 <= p <= 0.05  */
+			index =  p/TFRC_SMALLEST_P - 1;
 
-	if (R == 0)
-		R = 1; /* RTT can't be zero or else divide by zero */
+ 		f = tfrc_calc_x_lookup[index][1];
 
-	BUG_ON(index >= TFRC_CALC_X_ARRSIZE);
+	} else {	 			      /* 0.05   <  p <= 1.00  */
+		index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1;
 
-	if (p >= TFRC_CALC_X_SPLIT)
 		f = tfrc_calc_x_lookup[index][0];
-	else
-		f = tfrc_calc_x_lookup[index][1];
+	}
 
+	/* The following computes X = s/(R*f(p)) in bytes per second. Since f(p)
+	 * and R are both scaled by 1000000, we need to multiply by 1000000^2.
+	 * ==> DO NOT alter this unless you test against overflow on 32 bit   */
 	tmp1 = ((u64)s * 100000000);
 	tmp2 = ((u64)R * (u64)f);
 	do_div(tmp2, 10000);
 	do_div(tmp1, tmp2); 
-	/* Don't alter above math unless you test due to overflow on 32 bit */
 
 	return (u32)tmp1; 
 }
@@ -610,33 +664,36 @@
 EXPORT_SYMBOL_GPL(tfrc_calc_x);
 
 /*
- * args: fvalue - function value to match
- * returns: p closest to that value
+ *  tfrc_calc_x_reverse_lookup  -  try to find p given f(p)
  *
- * both fvalue and p are multiplied by 1,000,000 to use ints
+ *  @fvalue: function value to match, scaled by 1000000
+ *  Returns closest match for p, also scaled by 1000000
  */
 u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
 {
-	int ctr = 0;
-	int small;
+	int index;
 
-	if (fvalue < tfrc_calc_x_lookup[0][1])
+	if (fvalue == 0)	/* f(p) = 0  whenever  p = 0 */
 		return 0;
 
-	if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1])
-		small = 1;
-	else if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0])
+	/* Error cases. */
+	if (fvalue < tfrc_calc_x_lookup[0][1]) {
+		DCCP_WARN("fvalue %d smaller than resolution\n", fvalue);
+		return tfrc_calc_x_lookup[0][1];
+	}
+	if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0]) {
+		DCCP_WARN("fvalue %d exceeds bounds!\n", fvalue);
 		return 1000000;
-	else
-		small = 0;
+	}
 
-	while (fvalue > tfrc_calc_x_lookup[ctr][small])
-		ctr++;
-
-	if (small)
-		return TFRC_CALC_X_SPLIT * ctr / TFRC_CALC_X_ARRSIZE;
-	else
-		return 1000000 * ctr / TFRC_CALC_X_ARRSIZE;
+	if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1]) {
+		index = tfrc_binsearch(fvalue, 1);
+		return (index + 1) * TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE;
+ 	}
+ 
+	/* else ... it must be in the coarse-grained column */
+	index = tfrc_binsearch(fvalue, 0);
+	return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
 }
 
 EXPORT_SYMBOL_GPL(tfrc_calc_x_reverse_lookup);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 272e858..6888698 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -18,15 +18,33 @@
 #include <net/tcp.h>
 #include "ackvec.h"
 
+/*
+ * 	DCCP - specific warning and debugging macros.
+ */
+#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt,       \
+						        __FUNCTION__, ##a)
+#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
+					 __FILE__, __LINE__, __FUNCTION__)
+#define DCCP_BUG(a...)       do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
+#define DCCP_BUG_ON(cond)    do { if (unlikely((cond) != 0))		   \
+				     DCCP_BUG("\"%s\" holds (exception!)", \
+					      __stringify(cond));          \
+			     } while (0)
+
+#ifdef MODULE
+#define DCCP_PRINTK(enable, fmt, args...)	do { if (enable)	     \
+							printk(fmt, ##args); \
+					 	} while(0)
+#else
+#define DCCP_PRINTK(enable, fmt, args...)	printk(fmt, ##args)
+#endif
+#define DCCP_PR_DEBUG(enable, fmt, a...)	DCCP_PRINTK(enable, KERN_DEBUG \
+						  "%s: " fmt, __FUNCTION__, ##a)
+
 #ifdef CONFIG_IP_DCCP_DEBUG
 extern int dccp_debug;
-
-#define dccp_pr_debug(format, a...) \
-	do { if (dccp_debug) \
-		printk(KERN_DEBUG "%s: " format, __FUNCTION__ , ##a); \
-	} while (0)
-#define dccp_pr_debug_cat(format, a...) do { if (dccp_debug) \
-					     printk(format, ##a); } while (0)
+#define dccp_pr_debug(format, a...)	  DCCP_PR_DEBUG(dccp_debug, format, ##a)
+#define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
 #else
 #define dccp_pr_debug(format, a...)
 #define dccp_pr_debug_cat(format, a...)
@@ -35,17 +53,21 @@
 extern struct inet_hashinfo dccp_hashinfo;
 
 extern atomic_t dccp_orphan_count;
-extern int dccp_tw_count;
-extern void dccp_tw_deschedule(struct inet_timewait_sock *tw);
 
 extern void dccp_time_wait(struct sock *sk, int state, int timeo);
 
-/* FIXME: Right size this */
-#define DCCP_MAX_OPT_LEN 128
-
-#define DCCP_MAX_PACKET_HDR 32
-
-#define MAX_DCCP_HEADER  (DCCP_MAX_PACKET_HDR + DCCP_MAX_OPT_LEN + MAX_HEADER)
+/*
+ *  Set safe upper bounds for header and option length. Since Data Offset is 8
+ *  bits (RFC 4340, sec. 5.1), the total header length can never be more than
+ *  4 * 255 = 1020 bytes. The largest possible header length is 28 bytes (X=1):
+ *    - DCCP-Response with ACK Subheader and 4 bytes of Service code      OR
+ *    - DCCP-Reset    with ACK Subheader and 4 bytes of Reset Code fields
+ *  Hence a safe upper bound for the maximum option length is 1020-28 = 992
+ */
+#define MAX_DCCP_SPECIFIC_HEADER (255 * sizeof(int))
+#define DCCP_MAX_PACKET_HDR 28
+#define DCCP_MAX_OPT_LEN (MAX_DCCP_SPECIFIC_HEADER - DCCP_MAX_PACKET_HDR)
+#define MAX_DCCP_HEADER (MAX_DCCP_SPECIFIC_HEADER + MAX_HEADER)
 
 #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT
 				     * state, about 60 seconds */
@@ -58,6 +80,20 @@
 
 #define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */
 
+#define DCCP_XMIT_TIMEO 30000 /* Time/msecs for blocking transmit per packet */
+
+/* sysctl variables for DCCP */
+extern int  sysctl_dccp_request_retries;
+extern int  sysctl_dccp_retries1;
+extern int  sysctl_dccp_retries2;
+extern int  sysctl_dccp_feat_sequence_window;
+extern int  sysctl_dccp_feat_rx_ccid;
+extern int  sysctl_dccp_feat_tx_ccid;
+extern int  sysctl_dccp_feat_ack_ratio;
+extern int  sysctl_dccp_feat_send_ack_vector;
+extern int  sysctl_dccp_feat_send_ndp_count;
+extern int  sysctl_dccp_tx_qlen;
+
 /* is seq1 < seq2 ? */
 static inline int before48(const u64 seq1, const u64 seq2)
 {
@@ -123,10 +159,36 @@
 #define DCCP_ADD_STATS_USER(field, val)	\
 			SNMP_ADD_STATS_USER(dccp_statistics, field, val)
 
+/*
+ * 	Checksumming routines
+ */
+static inline int dccp_csum_coverage(const struct sk_buff *skb)
+{
+	const struct dccp_hdr* dh = dccp_hdr(skb);
+
+	if (dh->dccph_cscov == 0)
+		return skb->len;
+	return (dh->dccph_doff + dh->dccph_cscov - 1) * sizeof(u32);
+}
+
+static inline void dccp_csum_outgoing(struct sk_buff *skb)
+{
+	int cov = dccp_csum_coverage(skb);
+
+	if (cov >= skb->len)
+		dccp_hdr(skb)->dccph_cscov = 0;
+
+	skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
+}
+
+extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
+
 extern int  dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb);
 
 extern void dccp_send_ack(struct sock *sk);
 extern void dccp_send_delayed_ack(struct sock *sk);
+extern void dccp_reqsk_send_ack(struct sk_buff *sk, struct request_sock *rsk);
+
 extern void dccp_send_sync(struct sock *sk, const u64 seq,
 			   const enum dccp_pkt_type pkt_type);
 
@@ -147,18 +209,7 @@
 extern void dccp_set_state(struct sock *sk, const int state);
 extern void dccp_done(struct sock *sk);
 
-static inline void dccp_openreq_init(struct request_sock *req,
-				     struct dccp_sock *dp,
-				     struct sk_buff *skb)
-{
-	/*
-	 * FIXME: fill in the other req fields from the DCCP options
-	 * received
-	 */
-	inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
-	inet_rsk(req)->acked	= 0;
-	req->rcv_wnd = 0;
-}
+extern void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb);
 
 extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 
@@ -217,14 +268,9 @@
 extern int	   inet_dccp_listen(struct socket *sock, int backlog);
 extern unsigned int dccp_poll(struct file *file, struct socket *sock,
 			     poll_table *wait);
-extern void	   dccp_v4_send_check(struct sock *sk, int len,
-				      struct sk_buff *skb);
 extern int	   dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
 				   int addr_len);
 
-extern int	   dccp_v4_checksum(const struct sk_buff *skb,
-				    const __be32 saddr, const __be32 daddr);
-
 extern int	   dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
 extern void	   dccp_send_close(struct sock *sk, const int active);
 extern int	   dccp_invalid_packet(struct sk_buff *skb);
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index a1b0682..4dc487f 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -12,7 +12,6 @@
 
 #include <linux/module.h>
 
-#include "dccp.h"
 #include "ccid.h"
 #include "feat.h"
 
@@ -23,9 +22,17 @@
 {
 	struct dccp_opt_pend *opt;
 
-	dccp_pr_debug("feat change type=%d feat=%d\n", type, feature);
+	dccp_feat_debug(type, feature, *val);
 
-	/* XXX sanity check feat change request */
+	if (!dccp_feat_is_valid_type(type)) {
+		DCCP_WARN("option type %d invalid in negotiation\n", type);
+		return 1;
+	}
+	if (!dccp_feat_is_valid_length(type, feature, len)) {
+		DCCP_WARN("invalid length %d\n", len);
+		return 1;
+	}
+	/* XXX add further sanity checks */
 
 	/* check if that feature is already being negotiated */
 	list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) {
@@ -95,14 +102,14 @@
 /* XXX taking only u8 vals */
 static int dccp_feat_update(struct sock *sk, u8 type, u8 feat, u8 val)
 {
-	dccp_pr_debug("changing [%d] feat %d to %d\n", type, feat, val);
+	dccp_feat_debug(type, feat, val);
 
 	switch (feat) {
 	case DCCPF_CCID:
 		return dccp_feat_update_ccid(sk, type, val);
 	default:
-		dccp_pr_debug("IMPLEMENT changing [%d] feat %d to %d\n",
-			      type, feat, val);
+		dccp_pr_debug("UNIMPLEMENTED: %s(%d, ...)\n",
+			      dccp_feat_typename(type), feat);
 		break;
 	}
 	return 0;
@@ -162,7 +169,8 @@
 			break;
 
 		default:
-			WARN_ON(1); /* XXX implement res */
+			DCCP_BUG("Fell through, feat=%d", opt->dccpop_feat);
+			/* XXX implement res */
 			return -EFAULT;
 		}
 
@@ -265,10 +273,10 @@
 	u8 *copy;
 	int rc;
 
-	/* NN features must be change L */
-	if (type == DCCPO_CHANGE_R) {
-		dccp_pr_debug("received CHANGE_R %d for NN feat %d\n",
-			      type, feature);
+	/* NN features must be Change L (sec. 6.3.2) */
+	if (type != DCCPO_CHANGE_L) {
+		dccp_pr_debug("received %s for NN feature %d\n",
+				dccp_feat_typename(type), feature);
 		return -EFAULT;
 	}
 
@@ -279,12 +287,11 @@
 	if (opt == NULL)
 		return -ENOMEM;
 
-	copy = kmalloc(len, GFP_ATOMIC);
+	copy = kmemdup(val, len, GFP_ATOMIC);
 	if (copy == NULL) {
 		kfree(opt);
 		return -ENOMEM;
 	}
-	memcpy(copy, val, len);
 
 	opt->dccpop_type = DCCPO_CONFIRM_R; /* NN can only confirm R */
 	opt->dccpop_feat = feature;
@@ -299,7 +306,8 @@
 		return rc;
 	}
 
-	dccp_pr_debug("Confirming NN feature %d (val=%d)\n", feature, *copy);
+	dccp_feat_debug(type, feature, *copy);
+
 	list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf);
 
 	return 0;
@@ -318,14 +326,19 @@
 		return;
 	}
 
-	opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R :
-						    DCCPO_CONFIRM_L;
+	switch (type) {
+	case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break;
+	case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break;
+	default: 	     DCCP_WARN("invalid type %d\n", type); return;
+
+	}
 	opt->dccpop_feat = feature;
 	opt->dccpop_val	 = NULL;
 	opt->dccpop_len	 = 0;
 
 	/* change feature */
-	dccp_pr_debug("Empty confirm feature %d type %d\n", feature, type);
+	dccp_pr_debug("Empty %s(%d)\n", dccp_feat_typename(type), feature);
+
 	list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf);
 }
 
@@ -359,7 +372,7 @@
 {
 	int rc;
 
-	dccp_pr_debug("got feat change type=%d feat=%d\n", type, feature);
+	dccp_feat_debug(type, feature, *val);
 
 	/* figure out if it's SP or NN feature */
 	switch (feature) {
@@ -375,6 +388,8 @@
 
 	/* XXX implement other features */
 	default:
+		dccp_pr_debug("UNIMPLEMENTED: not handling %s(%d, ...)\n",
+			      dccp_feat_typename(type), feature);
 		rc = -EFAULT;
 		break;
 	}
@@ -403,20 +418,27 @@
 	u8 t;
 	struct dccp_opt_pend *opt;
 	struct dccp_minisock *dmsk = dccp_msk(sk);
-	int rc = 1;
+	int found = 0;
 	int all_confirmed = 1;
 
-	dccp_pr_debug("got feat confirm type=%d feat=%d\n", type, feature);
-
-	/* XXX sanity check type & feat */
+	dccp_feat_debug(type, feature, *val);
 
 	/* locate our change request */
-	t = type == DCCPO_CONFIRM_L ? DCCPO_CHANGE_R : DCCPO_CHANGE_L;
+	switch (type) {
+	case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break;
+	case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break;
+	default: 	      DCCP_WARN("invalid type %d\n", type);
+			      return 1;
+
+	}
+	/* XXX sanity check feature value */
 
 	list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) {
 		if (!opt->dccpop_conf && opt->dccpop_type == t &&
 		    opt->dccpop_feat == feature) {
-			/* we found it */
+			found = 1;
+			dccp_pr_debug("feature %d found\n", opt->dccpop_feat);
+
 			/* XXX do sanity check */
 
 			opt->dccpop_conf = 1;
@@ -425,9 +447,7 @@
 			dccp_feat_update(sk, opt->dccpop_type,
 					 opt->dccpop_feat, *val);
 
-			dccp_pr_debug("feat %d type %d confirmed %d\n",
-				      feature, type, *val);
-			rc = 0;
+			/* XXX check the return value of dccp_feat_update */
 			break;
 		}
 
@@ -446,9 +466,9 @@
 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
 	}
 
-	if (rc)
-		dccp_pr_debug("feat %d type %d never requested\n",
-			      feature, type);
+	if (!found)
+		dccp_pr_debug("%s(%d, ...) never requested\n",
+			      dccp_feat_typename(type), feature);
 	return 0;
 }
 
@@ -501,20 +521,18 @@
 	list_for_each_entry(opt, &olddmsk->dccpms_pending, dccpop_node) {
 		struct dccp_opt_pend *newopt;
 		/* copy the value of the option */
-		u8 *val = kmalloc(opt->dccpop_len, GFP_ATOMIC);
+		u8 *val = kmemdup(opt->dccpop_val, opt->dccpop_len, GFP_ATOMIC);
 
 		if (val == NULL)
 			goto out_clean;
-		memcpy(val, opt->dccpop_val, opt->dccpop_len);
 
-		newopt = kmalloc(sizeof(*newopt), GFP_ATOMIC);
+		newopt = kmemdup(opt, sizeof(*newopt), GFP_ATOMIC);
 		if (newopt == NULL) {
 			kfree(val);
 			goto out_clean;
 		}
 
 		/* insert the option */
-		memcpy(newopt, opt, sizeof(*newopt));
 		newopt->dccpop_val = val;
 		list_add_tail(&newopt->dccpop_node, &newdmsk->dccpms_pending);
 
@@ -545,10 +563,9 @@
 			    u8 *val, u8 len)
 {
 	int rc = -ENOMEM;
-	u8 *copy = kmalloc(len, GFP_KERNEL);
+	u8 *copy = kmemdup(val, len, GFP_KERNEL);
 
 	if (copy != NULL) {
-		memcpy(copy, val, len);
 		rc = dccp_feat_change(dmsk, type, feat, copy, len, GFP_KERNEL);
 		if (rc)
 			kfree(copy);
@@ -583,3 +600,45 @@
 }
 
 EXPORT_SYMBOL_GPL(dccp_feat_init);
+
+#ifdef CONFIG_IP_DCCP_DEBUG
+const char *dccp_feat_typename(const u8 type)
+{
+	switch(type) {
+	case DCCPO_CHANGE_L:  return("ChangeL");
+	case DCCPO_CONFIRM_L: return("ConfirmL");
+	case DCCPO_CHANGE_R:  return("ChangeR");
+	case DCCPO_CONFIRM_R: return("ConfirmR");
+	/* the following case must not appear in feature negotation  */
+	default: 	      dccp_pr_debug("unknown type %d [BUG!]\n", type);
+	}
+	return NULL;
+}
+
+EXPORT_SYMBOL_GPL(dccp_feat_typename);
+
+const char *dccp_feat_name(const u8 feat)
+{
+	static const char *feature_names[] = {
+		[DCCPF_RESERVED]	= "Reserved",
+		[DCCPF_CCID]		= "CCID",
+		[DCCPF_SHORT_SEQNOS]	= "Allow Short Seqnos",
+		[DCCPF_SEQUENCE_WINDOW]	= "Sequence Window",
+		[DCCPF_ECN_INCAPABLE]	= "ECN Incapable",
+		[DCCPF_ACK_RATIO]	= "Ack Ratio",
+		[DCCPF_SEND_ACK_VECTOR]	= "Send ACK Vector",
+		[DCCPF_SEND_NDP_COUNT]	= "Send NDP Count",
+		[DCCPF_MIN_CSUM_COVER]	= "Min. Csum Coverage",
+		[DCCPF_DATA_CHECKSUM]	= "Send Data Checksum",
+	};
+	if (feat >= DCCPF_MIN_CCID_SPECIFIC)
+		return "CCID-specific";
+
+	if (dccp_feat_is_reserved(feat))
+		return feature_names[DCCPF_RESERVED];
+
+	return feature_names[feat];
+}
+
+EXPORT_SYMBOL_GPL(dccp_feat_name);
+#endif /* CONFIG_IP_DCCP_DEBUG */
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index cee553d..2c373ad 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -12,9 +12,46 @@
  */
 
 #include <linux/types.h>
+#include "dccp.h"
 
-struct sock;
-struct dccp_minisock;
+static inline int dccp_feat_is_valid_length(u8 type, u8 feature, u8 len)
+{
+	/* sec. 6.1: Confirm has at least length 3,
+	 * sec. 6.2: Change  has at least length 4 */
+	if (len < 3)
+		return 1;
+	if (len < 4  && (type == DCCPO_CHANGE_L || type == DCCPO_CHANGE_R))
+		return 1;
+	/* XXX: add per-feature length validation (sec. 6.6.8) */
+	return 0;
+}
+
+static inline int dccp_feat_is_reserved(const u8 feat)
+{
+	return (feat > DCCPF_DATA_CHECKSUM &&
+		feat < DCCPF_MIN_CCID_SPECIFIC) ||
+	        feat == DCCPF_RESERVED;
+}
+
+/* feature negotiation knows only these four option types (RFC 4340, sec. 6) */
+static inline int dccp_feat_is_valid_type(const u8 optnum)
+{
+	return optnum >= DCCPO_CHANGE_L && optnum <= DCCPO_CONFIRM_R;
+
+}
+
+#ifdef CONFIG_IP_DCCP_DEBUG
+extern const char *dccp_feat_typename(const u8 type);
+extern const char *dccp_feat_name(const u8 feat);
+
+static inline void dccp_feat_debug(const u8 type, const u8 feat, const u8 val)
+{
+	dccp_pr_debug("%s(%s (%d), %d)\n", dccp_feat_typename(type),
+					   dccp_feat_name(feat), feat, val);
+}
+#else
+#define dccp_feat_debug(type, feat, val)
+#endif /* CONFIG_IP_DCCP_DEBUG */
 
 extern int  dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature,
 			     u8 *val, u8 len, gfp_t gfp);
@@ -26,11 +63,4 @@
 extern int  dccp_feat_clone(struct sock *oldsk, struct sock *newsk);
 extern int  dccp_feat_init(struct dccp_minisock *dmsk);
 
-extern int  dccp_feat_default_sequence_window;
-extern int  dccp_feat_default_rx_ccid;
-extern int  dccp_feat_default_tx_ccid;
-extern int  dccp_feat_default_ack_ratio;
-extern int  dccp_feat_default_send_ack_vector;
-extern int  dccp_feat_default_send_ndp_count;
-
 #endif /* _DCCP_FEAT_H */
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 1d24881..7371a2f 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -128,21 +128,18 @@
 		     DCCP_PKT_WITHOUT_ACK_SEQ))
 			dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq;
 	} else {
-		LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, "
-					    "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
-					    "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
-					    "sending SYNC...\n",
-			       dccp_packet_name(dh->dccph_type),
-			       (unsigned long long) lswl,
-			       (unsigned long long)
-			       DCCP_SKB_CB(skb)->dccpd_seq,
-			       (unsigned long long) dp->dccps_swh,
-			       (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
+		DCCP_WARN("DCCP: Step 6 failed for %s packet, "
+			  "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
+			  "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
+			  "sending SYNC...\n",  dccp_packet_name(dh->dccph_type),
+			  (unsigned long long) lswl,
+			  (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq,
+			  (unsigned long long) dp->dccps_swh,
+			  (DCCP_SKB_CB(skb)->dccpd_ack_seq ==
 			        DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
-			       (unsigned long long) lawl,
-			       (unsigned long long)
-			       DCCP_SKB_CB(skb)->dccpd_ack_seq,
-			       (unsigned long long) dp->dccps_awh);
+			  (unsigned long long) lawl,
+			  (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq,
+			  (unsigned long long) dp->dccps_awh);
 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
 		return -1;
 	}
@@ -431,29 +428,25 @@
 
 	/*
 	 *  Step 3: Process LISTEN state
-	 *  	(Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv)
 	 *
 	 *     If S.state == LISTEN,
-	 *	  If P.type == Request or P contains a valid Init Cookie
-	 *	  	option,
-	 *	     * Must scan the packet's options to check for an Init
-	 *		Cookie.  Only the Init Cookie is processed here,
-	 *		however; other options are processed in Step 8.  This
-	 *		scan need only be performed if the endpoint uses Init
-	 *		Cookies *
-	 *	     * Generate a new socket and switch to that socket *
-	 *	     Set S := new socket for this port pair
-	 *	     S.state = RESPOND
-	 *	     Choose S.ISS (initial seqno) or set from Init Cookie
-	 *	     Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
-	 *	     Continue with S.state == RESPOND
-	 *	     * A Response packet will be generated in Step 11 *
-	 *	  Otherwise,
-	 *	     Generate Reset(No Connection) unless P.type == Reset
-	 *	     Drop packet and return
-	 *
-	 * NOTE: the check for the packet types is done in
-	 *	 dccp_rcv_state_process
+	 *	 If P.type == Request or P contains a valid Init Cookie option,
+	 *	      (* Must scan the packet's options to check for Init
+	 *		 Cookies.  Only Init Cookies are processed here,
+	 *		 however; other options are processed in Step 8.  This
+	 *		 scan need only be performed if the endpoint uses Init
+	 *		 Cookies *)
+	 *	      (* Generate a new socket and switch to that socket *)
+	 *	      Set S := new socket for this port pair
+	 *	      S.state = RESPOND
+	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
+	 *	      Initialize S.GAR := S.ISS
+	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
+	 *	      Cookies Continue with S.state == RESPOND
+	 *	      (* A Response packet will be generated in Step 11 *)
+	 *	 Otherwise,
+	 *	      Generate Reset(No Connection) unless P.type == Reset
+	 *	      Drop packet and return
 	 */
 	if (sk->sk_state == DCCP_LISTEN) {
 		if (dh->dccph_type == DCCP_PKT_REQUEST) {
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index e08e768..ff81679 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -113,13 +113,8 @@
 	/* OK, now commit destination to socket.  */
 	sk_setup_caps(sk, &rt->u.dst);
 
-	dp->dccps_gar =
-		dp->dccps_iss = secure_dccp_sequence_number(inet->saddr,
-							    inet->daddr,
-							    inet->sport,
-							    usin->sin_port);
-	dccp_update_gss(sk, dp->dccps_iss);
-
+	dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr,
+						    inet->sport, inet->dport);
 	inet->id = dp->dccps_iss ^ jiffies;
 
 	err = dccp_connect(sk);
@@ -193,86 +188,6 @@
 	} /* else let the usual retransmit timer handle it */
 }
 
-static void dccp_v4_reqsk_send_ack(struct sk_buff *rxskb,
-				   struct request_sock *req)
-{
-	int err;
-	struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
-	const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
-				     sizeof(struct dccp_hdr_ext) +
-				     sizeof(struct dccp_hdr_ack_bits);
-	struct sk_buff *skb;
-
-	if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL)
-		return;
-
-	skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, GFP_ATOMIC);
-	if (skb == NULL)
-		return;
-
-	/* Reserve space for headers. */
-	skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header);
-
-	skb->dst = dst_clone(rxskb->dst);
-
-	skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
-	dh = dccp_hdr(skb);
-	memset(dh, 0, dccp_hdr_ack_len);
-
-	/* Build DCCP header and checksum it. */
-	dh->dccph_type	   = DCCP_PKT_ACK;
-	dh->dccph_sport	   = rxdh->dccph_dport;
-	dh->dccph_dport	   = rxdh->dccph_sport;
-	dh->dccph_doff	   = dccp_hdr_ack_len / 4;
-	dh->dccph_x	   = 1;
-
-	dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
-	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
-			 DCCP_SKB_CB(rxskb)->dccpd_seq);
-
-	bh_lock_sock(dccp_v4_ctl_socket->sk);
-	err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
-				    rxskb->nh.iph->daddr,
-				    rxskb->nh.iph->saddr, NULL);
-	bh_unlock_sock(dccp_v4_ctl_socket->sk);
-
-	if (err == NET_XMIT_CN || err == 0) {
-		DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-		DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
-	}
-}
-
-static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
-				 struct dst_entry *dst)
-{
-	int err = -1;
-	struct sk_buff *skb;
-
-	/* First, grab a route. */
-	
-	if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
-		goto out;
-
-	skb = dccp_make_response(sk, dst, req);
-	if (skb != NULL) {
-		const struct inet_request_sock *ireq = inet_rsk(req);
-		struct dccp_hdr *dh = dccp_hdr(skb);
-
-		dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr,
-						      ireq->rmt_addr);
-		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
-					    ireq->rmt_addr,
-					    ireq->opt);
-		if (err == NET_XMIT_CN)
-			err = 0;
-	}
-
-out:
-	dst_release(dst);
-	return err;
-}
-
 /*
  * This routine is called by the ICMP module when it gets some sort of error
  * condition. If err < 0 then the socket should be closed and the error
@@ -329,7 +244,7 @@
 	seq = dccp_hdr_seq(skb);
 	if (sk->sk_state != DCCP_LISTEN &&
 	    !between48(seq, dp->dccps_swl, dp->dccps_swh)) {
-		NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
+		NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
 		goto out;
 	}
 
@@ -429,19 +344,24 @@
 	sock_put(sk);
 }
 
-/* This routine computes an IPv4 DCCP checksum. */
-void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
+static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
+				      __be32 src, __be32 dst)
+{
+	return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
+}
+
+void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb)
 {
 	const struct inet_sock *inet = inet_sk(sk);
 	struct dccp_hdr *dh = dccp_hdr(skb);
 
-	dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr);
+	dccp_csum_outgoing(skb);
+	dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr);
 }
 
 EXPORT_SYMBOL_GPL(dccp_v4_send_check);
 
-static inline u64 dccp_v4_init_sequence(const struct sock *sk,
-					const struct sk_buff *skb)
+static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
 {
 	return secure_dccp_sequence_number(skb->nh.iph->daddr,
 					   skb->nh.iph->saddr,
@@ -449,95 +369,6 @@
 					   dccp_hdr(skb)->dccph_sport);
 }
 
-static struct request_sock_ops dccp_request_sock_ops;
-
-int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
-{
-	struct inet_request_sock *ireq;
-	struct dccp_sock dp;
-	struct request_sock *req;
-	struct dccp_request_sock *dreq;
-	const __be32 saddr = skb->nh.iph->saddr;
-	const __be32 daddr = skb->nh.iph->daddr;
- 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
-	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
-	__u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
-
-	/* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
-	if (((struct rtable *)skb->dst)->rt_flags &
-	    (RTCF_BROADCAST | RTCF_MULTICAST)) {
-		reset_code = DCCP_RESET_CODE_NO_CONNECTION;
-		goto drop;
-	}
-
-	if (dccp_bad_service_code(sk, service)) {
-		reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
-		goto drop;
- 	}
-	/*
-	 * TW buckets are converted to open requests without
-	 * limitations, they conserve resources and peer is
-	 * evidently real one.
-	 */
-	if (inet_csk_reqsk_queue_is_full(sk))
-		goto drop;
-
-	/*
-	 * Accept backlog is full. If we have already queued enough
-	 * of warm entries in syn queue, drop request. It is better than
-	 * clogging syn queue with openreqs with exponentially increasing
-	 * timeout.
-	 */
-	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
-		goto drop;
-
-	req = reqsk_alloc(&dccp_request_sock_ops);
-	if (req == NULL)
-		goto drop;
-
-	if (dccp_parse_options(sk, skb))
-		goto drop_and_free;
-
-	dccp_openreq_init(req, &dp, skb);
-
-	if (security_inet_conn_request(sk, skb, req))
-		goto drop_and_free;
-
-	ireq = inet_rsk(req);
-	ireq->loc_addr = daddr;
-	ireq->rmt_addr = saddr;
-	req->rcv_wnd	= dccp_feat_default_sequence_window;
-	ireq->opt	= NULL;
-
-	/* 
-	 * Step 3: Process LISTEN state
-	 *
-	 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
-	 *
-	 * In fact we defer setting S.GSR, S.SWL, S.SWH to
-	 * dccp_create_openreq_child.
-	 */
-	dreq = dccp_rsk(req);
-	dreq->dreq_isr	   = dcb->dccpd_seq;
-	dreq->dreq_iss	   = dccp_v4_init_sequence(sk, skb);
-	dreq->dreq_service = service;
-
-	if (dccp_v4_send_response(sk, req, NULL))
-		goto drop_and_free;
-
-	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
-	return 0;
-
-drop_and_free:
-	reqsk_free(req);
-drop:
-	DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
-	dcb->dccpd_reset_code = reset_code;
-	return -1;
-}
-
-EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
-
 /*
  * The three way handshake has completed - we got a valid ACK or DATAACK -
  * now create the new socket.
@@ -623,47 +454,6 @@
 	return sk;
 }
 
-int dccp_v4_checksum(const struct sk_buff *skb, const __be32 saddr,
-		     const __be32 daddr)
-{
-	const struct dccp_hdr* dh = dccp_hdr(skb);
-	int checksum_len;
-	u32 tmp;
-
-	if (dh->dccph_cscov == 0)
-		checksum_len = skb->len;
-	else {
-		checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
-		checksum_len = checksum_len < skb->len ? checksum_len :
-							 skb->len;
-	}
-
-	tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
-	return csum_tcpudp_magic(saddr, daddr, checksum_len,
-				 IPPROTO_DCCP, tmp);
-}
-
-EXPORT_SYMBOL_GPL(dccp_v4_checksum);
-
-static int dccp_v4_verify_checksum(struct sk_buff *skb,
-				   const __be32 saddr, const __be32 daddr)
-{
-	struct dccp_hdr *dh = dccp_hdr(skb);
-	int checksum_len;
-	u32 tmp;
-
-	if (dh->dccph_cscov == 0)
-		checksum_len = skb->len;
-	else {
-		checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32);
-		checksum_len = checksum_len < skb->len ? checksum_len :
-							 skb->len;
-	}
-	tmp = csum_partial((unsigned char *)dh, checksum_len, 0);
-	return csum_tcpudp_magic(saddr, daddr, checksum_len,
-				 IPPROTO_DCCP, tmp) == 0 ? 0 : -1;
-}
-
 static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
 					   struct sk_buff *skb)
 {
@@ -689,7 +479,37 @@
 	return &rt->u.dst;
 }
 
-static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
+static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
+				 struct dst_entry *dst)
+{
+	int err = -1;
+	struct sk_buff *skb;
+
+	/* First, grab a route. */
+	
+	if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
+		goto out;
+
+	skb = dccp_make_response(sk, dst, req);
+	if (skb != NULL) {
+		const struct inet_request_sock *ireq = inet_rsk(req);
+		struct dccp_hdr *dh = dccp_hdr(skb);
+
+		dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
+							      ireq->rmt_addr);
+		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
+					    ireq->rmt_addr,
+					    ireq->opt);
+		err = net_xmit_eval(err);
+	}
+
+out:
+	dst_release(dst);
+	return err;
+}
+
+static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
 {
 	int err;
 	struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
@@ -698,7 +518,7 @@
 				       sizeof(struct dccp_hdr_reset);
 	struct sk_buff *skb;
 	struct dst_entry *dst;
-	u64 seqno;
+	u64 seqno = 0;
 
 	/* Never send a reset in response to a reset. */
 	if (rxdh->dccph_type == DCCP_PKT_RESET)
@@ -720,9 +540,7 @@
 	skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header);
 	skb->dst = dst_clone(dst);
 
-	skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
-	dh = dccp_hdr(skb);
-	memset(dh, 0, dccp_hdr_reset_len);
+	dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
 
 	/* Build DCCP header and checksum it. */
 	dh->dccph_type	   = DCCP_PKT_RESET;
@@ -734,16 +552,15 @@
 				DCCP_SKB_CB(rxskb)->dccpd_reset_code;
 
 	/* See "8.3.1. Abnormal Termination" in RFC 4340 */
-	seqno = 0;
 	if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
 		dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
 
 	dccp_hdr_set_seq(dh, seqno);
-	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
-			 DCCP_SKB_CB(rxskb)->dccpd_seq);
+	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq);
 
-	dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr,
-					      rxskb->nh.iph->daddr);
+	dccp_csum_outgoing(skb);
+	dh->dccph_checksum = dccp_v4_csum_finish(skb, rxskb->nh.iph->saddr,
+						      rxskb->nh.iph->daddr);
 
 	bh_lock_sock(dccp_v4_ctl_socket->sk);
 	err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk,
@@ -751,7 +568,7 @@
 				    rxskb->nh.iph->saddr, NULL);
 	bh_unlock_sock(dccp_v4_ctl_socket->sk);
 
-	if (err == NET_XMIT_CN || err == 0) {
+	if (net_xmit_eval(err) == 0) {
 		DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
 		DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
 	}
@@ -759,6 +576,103 @@
 	 dst_release(dst);
 }
 
+static void dccp_v4_reqsk_destructor(struct request_sock *req)
+{
+	kfree(inet_rsk(req)->opt);
+}
+
+static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
+	.family		= PF_INET,
+	.obj_size	= sizeof(struct dccp_request_sock),
+	.rtx_syn_ack	= dccp_v4_send_response,
+	.send_ack	= dccp_reqsk_send_ack,
+	.destructor	= dccp_v4_reqsk_destructor,
+	.send_reset	= dccp_v4_ctl_send_reset,
+};
+
+int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+{
+	struct inet_request_sock *ireq;
+	struct request_sock *req;
+	struct dccp_request_sock *dreq;
+ 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
+	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
+	__u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
+
+	/* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
+	if (((struct rtable *)skb->dst)->rt_flags &
+	    (RTCF_BROADCAST | RTCF_MULTICAST)) {
+		reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+		goto drop;
+	}
+
+	if (dccp_bad_service_code(sk, service)) {
+		reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
+		goto drop;
+ 	}
+	/*
+	 * TW buckets are converted to open requests without
+	 * limitations, they conserve resources and peer is
+	 * evidently real one.
+	 */
+	if (inet_csk_reqsk_queue_is_full(sk))
+		goto drop;
+
+	/*
+	 * Accept backlog is full. If we have already queued enough
+	 * of warm entries in syn queue, drop request. It is better than
+	 * clogging syn queue with openreqs with exponentially increasing
+	 * timeout.
+	 */
+	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+		goto drop;
+
+	req = reqsk_alloc(&dccp_request_sock_ops);
+	if (req == NULL)
+		goto drop;
+
+	if (dccp_parse_options(sk, skb))
+		goto drop_and_free;
+
+	dccp_reqsk_init(req, skb);
+
+	if (security_inet_conn_request(sk, skb, req))
+		goto drop_and_free;
+
+	ireq = inet_rsk(req);
+	ireq->loc_addr = skb->nh.iph->daddr;
+	ireq->rmt_addr = skb->nh.iph->saddr;
+	ireq->opt	= NULL;
+
+	/* 
+	 * Step 3: Process LISTEN state
+	 *
+	 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
+	 *
+	 * In fact we defer setting S.GSR, S.SWL, S.SWH to
+	 * dccp_create_openreq_child.
+	 */
+	dreq = dccp_rsk(req);
+	dreq->dreq_isr	   = dcb->dccpd_seq;
+	dreq->dreq_iss	   = dccp_v4_init_sequence(skb);
+	dreq->dreq_service = service;
+
+	if (dccp_v4_send_response(sk, req, NULL))
+		goto drop_and_free;
+
+	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+	return 0;
+
+drop_and_free:
+	reqsk_free(req);
+drop:
+	DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+	dcb->dccpd_reset_code = reset_code;
+	return -1;
+}
+
+EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
+
 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct dccp_hdr *dh = dccp_hdr(skb);
@@ -771,24 +685,23 @@
 
 	/*
 	 *  Step 3: Process LISTEN state
-	 *     If S.state == LISTEN,
-	 *	  If P.type == Request or P contains a valid Init Cookie
-	 *	  	option,
-	 *	     * Must scan the packet's options to check for an Init
-	 *		Cookie.  Only the Init Cookie is processed here,
-	 *		however; other options are processed in Step 8.  This
-	 *		scan need only be performed if the endpoint uses Init
-	 *		Cookies *
-	 *	     * Generate a new socket and switch to that socket *
-	 *	     Set S := new socket for this port pair
-	 *	     S.state = RESPOND
-	 *	     Choose S.ISS (initial seqno) or set from Init Cookie
-	 *	     Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
-	 *	     Continue with S.state == RESPOND
-	 *	     * A Response packet will be generated in Step 11 *
-	 *	  Otherwise,
-	 *	     Generate Reset(No Connection) unless P.type == Reset
-	 *	     Drop packet and return
+	 *	 If P.type == Request or P contains a valid Init Cookie option,
+	 *	      (* Must scan the packet's options to check for Init
+	 *		 Cookies.  Only Init Cookies are processed here,
+	 *		 however; other options are processed in Step 8.  This
+	 *		 scan need only be performed if the endpoint uses Init
+	 *		 Cookies *)
+	 *	      (* Generate a new socket and switch to that socket *)
+	 *	      Set S := new socket for this port pair
+	 *	      S.state = RESPOND
+	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
+	 *	      Initialize S.GAR := S.ISS
+	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
+	 *	      Continue with S.state == RESPOND
+	 *	      (* A Response packet will be generated in Step 11 *)
+	 *	 Otherwise,
+	 *	      Generate Reset(No Connection) unless P.type == Reset
+	 *	      Drop packet and return
 	 *
 	 * NOTE: the check for the packet types is done in
 	 *	 dccp_rcv_state_process
@@ -811,7 +724,7 @@
 	return 0;
 
 reset:
-	dccp_v4_ctl_send_reset(skb);
+	dccp_v4_ctl_send_reset(sk, skb);
 discard:
 	kfree_skb(skb);
 	return 0;
@@ -819,60 +732,74 @@
 
 EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
 
+/**
+ *	dccp_invalid_packet  -  check for malformed packets
+ *	Implements RFC 4340, 8.5:  Step 1: Check header basics
+ *	Packets that fail these checks are ignored and do not receive Resets.
+ */
 int dccp_invalid_packet(struct sk_buff *skb)
 {
 	const struct dccp_hdr *dh;
+	unsigned int cscov;
 
 	if (skb->pkt_type != PACKET_HOST)
 		return 1;
 
+	/* If the packet is shorter than 12 bytes, drop packet and return */
 	if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
-		LIMIT_NETDEBUG(KERN_WARNING "DCCP: pskb_may_pull failed\n");
+		DCCP_WARN("pskb_may_pull failed\n");
 		return 1;
 	}
 
 	dh = dccp_hdr(skb);
 
-	/* If the packet type is not understood, drop packet and return */
+	/* If P.type is not understood, drop packet and return */
 	if (dh->dccph_type >= DCCP_PKT_INVALID) {
-		LIMIT_NETDEBUG(KERN_WARNING "DCCP: invalid packet type\n");
+		DCCP_WARN("invalid packet type\n");
 		return 1;
 	}
 
 	/*
-	 * If P.Data Offset is too small for packet type, or too large for
-	 * packet, drop packet and return
+	 * If P.Data Offset is too small for packet type, drop packet and return
 	 */
 	if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
-		LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
-					    "too small 1\n",
-			       dh->dccph_doff);
+		DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
 		return 1;
 	}
-
+	/*
+	 * If P.Data Offset is too too large for packet, drop packet and return
+	 */
 	if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
-		LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) "
-					    "too small 2\n",
-			       dh->dccph_doff);
+		DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
 		return 1;
 	}
 
-	dh = dccp_hdr(skb);
-
 	/*
 	 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
 	 * has short sequence numbers), drop packet and return
 	 */
-	if (dh->dccph_x == 0 &&
-	    dh->dccph_type != DCCP_PKT_DATA &&
-	    dh->dccph_type != DCCP_PKT_ACK &&
-	    dh->dccph_type != DCCP_PKT_DATAACK) {
-		LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.type (%s) not Data, Ack "
-					    "nor DataAck and P.X == 0\n",
-			       dccp_packet_name(dh->dccph_type));
+	if (dh->dccph_type >= DCCP_PKT_DATA    &&
+	    dh->dccph_type <= DCCP_PKT_DATAACK && dh->dccph_x == 0)  {
+		DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
+			  dccp_packet_name(dh->dccph_type));
 		return 1;
 	}
 
+	/*
+	 * If P.CsCov is too large for the packet size, drop packet and return.
+	 * This must come _before_ checksumming (not as RFC 4340 suggests).
+	 */
+	cscov = dccp_csum_coverage(skb);
+	if (cscov > skb->len) {
+		DCCP_WARN("P.CsCov %u exceeds packet length %d\n",
+			  dh->dccph_cscov, skb->len);
+		return 1;
+	}
+
+	/* If header checksum is incorrect, drop packet and return.
+	 * (This step is completed in the AF-dependent functions.) */
+	skb->csum = skb_checksum(skb, 0, cscov, 0);
+
 	return 0;
 }
 
@@ -883,17 +810,16 @@
 {
 	const struct dccp_hdr *dh;
 	struct sock *sk;
+	int min_cov;
 
-	/* Step 1: Check header basics: */
+	/* Step 1: Check header basics */
 
 	if (dccp_invalid_packet(skb))
 		goto discard_it;
 
-	/* If the header checksum is incorrect, drop packet and return */
-	if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr,
-				    skb->nh.iph->daddr) < 0) {
-		LIMIT_NETDEBUG(KERN_WARNING "%s: incorrect header checksum\n",
-			       __FUNCTION__);
+	/* Step 1: If header checksum is incorrect, drop packet and return */
+	if (dccp_v4_csum_finish(skb, skb->nh.iph->saddr, skb->nh.iph->daddr)) {
+		DCCP_WARN("dropped packet with invalid checksum\n");
 		goto discard_it;
 	}
 
@@ -915,8 +841,7 @@
 		dccp_pr_debug_cat("\n");
 	} else {
 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
-		dccp_pr_debug_cat(", ack=%llu\n",
-				  (unsigned long long)
+		dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long)
 				  DCCP_SKB_CB(skb)->dccpd_ack_seq);
 	}
 
@@ -930,8 +855,6 @@
 	/* 
 	 * Step 2:
 	 * 	If no socket ...
-	 *		Generate Reset(No Connection) unless P.type == Reset
-	 *		Drop packet and return
 	 */
 	if (sk == NULL) {
 		dccp_pr_debug("failed to look up flow ID in table and "
@@ -945,45 +868,55 @@
 	 *		Generate Reset(No Connection) unless P.type == Reset
 	 *		Drop packet and return
 	 */
-	       
 	if (sk->sk_state == DCCP_TIME_WAIT) {
-		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: "
-			      "do_time_wait\n");
-                goto do_time_wait;
+		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
+		inet_twsk_put(inet_twsk(sk));
+		goto no_dccp_socket;
+	}
+
+	/*
+	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
+	 * 	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
+	 * 	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
+	 */
+	min_cov = dccp_sk(sk)->dccps_pcrlen;
+	if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov))  {
+		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
+			      dh->dccph_cscov, min_cov);
+		/* FIXME: "Such packets SHOULD be reported using Data Dropped
+		 *         options (Section 11.7) with Drop Code 0, Protocol
+		 *         Constraints."                                     */
+		goto discard_and_relse;
 	}
 
 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_and_relse;
 	nf_reset(skb);
 
-	return sk_receive_skb(sk, skb);
+	return sk_receive_skb(sk, skb, 1);
 
 no_dccp_socket:
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 		goto discard_it;
 	/*
 	 * Step 2:
+	 * 	If no socket ...
 	 *		Generate Reset(No Connection) unless P.type == Reset
 	 *		Drop packet and return
 	 */
 	if (dh->dccph_type != DCCP_PKT_RESET) {
 		DCCP_SKB_CB(skb)->dccpd_reset_code =
 					DCCP_RESET_CODE_NO_CONNECTION;
-		dccp_v4_ctl_send_reset(skb);
+		dccp_v4_ctl_send_reset(sk, skb);
 	}
 
 discard_it:
-	/* Discard frame. */
 	kfree_skb(skb);
 	return 0;
 
 discard_and_relse:
 	sock_put(sk);
 	goto discard_it;
-
-do_time_wait:
-	inet_twsk_put(inet_twsk(sk));
-	goto no_dccp_socket;
 }
 
 static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
@@ -1017,20 +950,6 @@
 	return err;
 }
 
-static void dccp_v4_reqsk_destructor(struct request_sock *req)
-{
-	kfree(inet_rsk(req)->opt);
-}
-
-static struct request_sock_ops dccp_request_sock_ops = {
-	.family		= PF_INET,
-	.obj_size	= sizeof(struct dccp_request_sock),
-	.rtx_syn_ack	= dccp_v4_send_response,
-	.send_ack	= dccp_v4_reqsk_send_ack,
-	.destructor	= dccp_v4_reqsk_destructor,
-	.send_reset	= dccp_v4_ctl_send_reset,
-};
-
 static struct timewait_sock_ops dccp_timewait_sock_ops = {
 	.twsk_obj_size	= sizeof(struct inet_timewait_sock),
 };
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index fc4242c..c7aaa25 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -36,13 +36,6 @@
 /* Socket used for sending RSTs and ACKs */
 static struct socket *dccp_v6_ctl_socket;
 
-static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
-static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
-				   struct request_sock *req);
-static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
-
-static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
-
 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
 
@@ -65,205 +58,37 @@
 	}
 }
 
-static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
-				struct in6_addr *saddr,
-				struct in6_addr *daddr,
-				unsigned long base)
+/* add pseudo-header to DCCP checksum stored in skb->csum */
+static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
+				      struct in6_addr *saddr,
+				      struct in6_addr *daddr)
 {
-	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
+	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
 }
 
-static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
+static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
+				      struct sk_buff *skb)
 {
-	const struct dccp_hdr *dh = dccp_hdr(skb);
-
-	if (skb->protocol == htons(ETH_P_IPV6))
-		return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
-						    skb->nh.ipv6h->saddr.s6_addr32,
-						    dh->dccph_dport,
-						    dh->dccph_sport);
-
-	return secure_dccp_sequence_number(skb->nh.iph->daddr,
-					   skb->nh.iph->saddr,
-					   dh->dccph_dport,
-					   dh->dccph_sport);
-}
-
-static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
-			   int addr_len)
-{
-	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct dccp_sock *dp = dccp_sk(sk);
-	struct in6_addr *saddr = NULL, *final_p = NULL, final;
-	struct flowi fl;
-	struct dst_entry *dst;
-	int addr_type;
-	int err;
+	struct dccp_hdr *dh = dccp_hdr(skb);
 
-	dp->dccps_role = DCCP_ROLE_CLIENT;
+	dccp_csum_outgoing(skb);
+	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
+}
 
-	if (addr_len < SIN6_LEN_RFC2133)
-		return -EINVAL;
+static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+				   		  __be16 sport, __be16 dport   )
+{
+	return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
+}
 
-	if (usin->sin6_family != AF_INET6)
-		return -EAFNOSUPPORT;
+static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
+{
+	return secure_dccpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
+					     skb->nh.ipv6h->saddr.s6_addr32,
+					     dccp_hdr(skb)->dccph_dport,
+					     dccp_hdr(skb)->dccph_sport     );
 
-	memset(&fl, 0, sizeof(fl));
-
-	if (np->sndflow) {
-		fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
-		IP6_ECN_flow_init(fl.fl6_flowlabel);
-		if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
-			struct ip6_flowlabel *flowlabel;
-			flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
-			if (flowlabel == NULL)
-				return -EINVAL;
-			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
-			fl6_sock_release(flowlabel);
-		}
-	}
-	/*
-	 * connect() to INADDR_ANY means loopback (BSD'ism).
-	 */
-	if (ipv6_addr_any(&usin->sin6_addr))
-		usin->sin6_addr.s6_addr[15] = 1;
-
-	addr_type = ipv6_addr_type(&usin->sin6_addr);
-
-	if (addr_type & IPV6_ADDR_MULTICAST)
-		return -ENETUNREACH;
-
-	if (addr_type & IPV6_ADDR_LINKLOCAL) {
-		if (addr_len >= sizeof(struct sockaddr_in6) &&
-		    usin->sin6_scope_id) {
-			/* If interface is set while binding, indices
-			 * must coincide.
-			 */
-			if (sk->sk_bound_dev_if &&
-			    sk->sk_bound_dev_if != usin->sin6_scope_id)
-				return -EINVAL;
-
-			sk->sk_bound_dev_if = usin->sin6_scope_id;
-		}
-
-		/* Connect to link-local address requires an interface */
-		if (!sk->sk_bound_dev_if)
-			return -EINVAL;
-	}
-
-	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
-	np->flow_label = fl.fl6_flowlabel;
-
-	/*
-	 * DCCP over IPv4
-	 */
-	if (addr_type == IPV6_ADDR_MAPPED) {
-		u32 exthdrlen = icsk->icsk_ext_hdr_len;
-		struct sockaddr_in sin;
-
-		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
-
-		if (__ipv6_only_sock(sk))
-			return -ENETUNREACH;
-
-		sin.sin_family = AF_INET;
-		sin.sin_port = usin->sin6_port;
-		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
-
-		icsk->icsk_af_ops = &dccp_ipv6_mapped;
-		sk->sk_backlog_rcv = dccp_v4_do_rcv;
-
-		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
-		if (err) {
-			icsk->icsk_ext_hdr_len = exthdrlen;
-			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
-			sk->sk_backlog_rcv = dccp_v6_do_rcv;
-			goto failure;
-		} else {
-			ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
-				      inet->saddr);
-			ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
-				      inet->rcv_saddr);
-		}
-
-		return err;
-	}
-
-	if (!ipv6_addr_any(&np->rcv_saddr))
-		saddr = &np->rcv_saddr;
-
-	fl.proto = IPPROTO_DCCP;
-	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
-	ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
-	fl.oif = sk->sk_bound_dev_if;
-	fl.fl_ip_dport = usin->sin6_port;
-	fl.fl_ip_sport = inet->sport;
-	security_sk_classify_flow(sk, &fl);
-
-	if (np->opt != NULL && np->opt->srcrt != NULL) {
-		const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
-
-		ipv6_addr_copy(&final, &fl.fl6_dst);
-		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
-		final_p = &final;
-	}
-
-	err = ip6_dst_lookup(sk, &dst, &fl);
-	if (err)
-		goto failure;
-
-	if (final_p)
-		ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-	err = xfrm_lookup(&dst, &fl, sk, 0);
-	if (err < 0)
-		goto failure;
-
-	if (saddr == NULL) {
-		saddr = &fl.fl6_src;
-		ipv6_addr_copy(&np->rcv_saddr, saddr);
-	}
-
-	/* set the source address */
-	ipv6_addr_copy(&np->saddr, saddr);
-	inet->rcv_saddr = LOOPBACK4_IPV6;
-
-	__ip6_dst_store(sk, dst, NULL, NULL);
-
-	icsk->icsk_ext_hdr_len = 0;
-	if (np->opt != NULL)
-		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
-					  np->opt->opt_nflen);
-
-	inet->dport = usin->sin6_port;
-
-	dccp_set_state(sk, DCCP_REQUESTING);
-	err = inet6_hash_connect(&dccp_death_row, sk);
-	if (err)
-		goto late_failure;
-	/* FIXME */
-#if 0
-	dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
-						       np->daddr.s6_addr32,
-						       inet->sport,
-						       inet->dport);
-#endif
-	err = dccp_connect(sk);
-	if (err)
-		goto late_failure;
-
-	return 0;
-
-late_failure:
-	dccp_set_state(sk, DCCP_CLOSED);
-	__sk_dst_reset(sk);
-failure:
-	inet->dport = 0;
-	sk->sk_route_caps = 0;
-	return err;
 }
 
 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -464,16 +289,12 @@
 	if (skb != NULL) {
 		struct dccp_hdr *dh = dccp_hdr(skb);
 
-		dh->dccph_checksum = dccp_v6_check(dh, skb->len,
-						   &ireq6->loc_addr,
-						   &ireq6->rmt_addr,
-						   csum_partial((char *)dh,
-								skb->len,
-								skb->csum));
+		dh->dccph_checksum = dccp_v6_csum_finish(skb,
+							 &ireq6->loc_addr,
+							 &ireq6->rmt_addr);
 		ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
 		err = ip6_xmit(sk, skb, &fl, opt, 0);
-		if (err == NET_XMIT_CN)
-			err = 0;
+		err = net_xmit_eval(err);
 	}
 
 done:
@@ -489,32 +310,7 @@
 		kfree_skb(inet6_rsk(req)->pktopts);
 }
 
-static struct request_sock_ops dccp6_request_sock_ops = {
-	.family		= AF_INET6,
-	.obj_size	= sizeof(struct dccp6_request_sock),
-	.rtx_syn_ack	= dccp_v6_send_response,
-	.send_ack	= dccp_v6_reqsk_send_ack,
-	.destructor	= dccp_v6_reqsk_destructor,
-	.send_reset	= dccp_v6_ctl_send_reset,
-};
-
-static struct timewait_sock_ops dccp6_timewait_sock_ops = {
-	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
-};
-
-static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
-{
-	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct dccp_hdr *dh = dccp_hdr(skb);
-
-	dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
-					     len, IPPROTO_DCCP,
-					     csum_partial((char *)dh,
-							  dh->dccph_doff << 2,
-							  skb->csum));
-}
-
-static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
+static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
 {
 	struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
 	const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
@@ -522,7 +318,7 @@
 				       sizeof(struct dccp_hdr_reset);
 	struct sk_buff *skb;
 	struct flowi fl;
-	u64 seqno;
+	u64 seqno = 0;
 
 	if (rxdh->dccph_type == DCCP_PKT_RESET)
 		return;
@@ -537,9 +333,7 @@
 
 	skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
 
-	skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
-	dh = dccp_hdr(skb);
-	memset(dh, 0, dccp_hdr_reset_len);
+	dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
 
 	/* Swap the send and the receive. */
 	dh->dccph_type	= DCCP_PKT_RESET;
@@ -551,20 +345,20 @@
 				DCCP_SKB_CB(rxskb)->dccpd_reset_code;
 
 	/* See "8.3.1. Abnormal Termination" in RFC 4340 */
-	seqno = 0;
 	if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
 		dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
 
 	dccp_hdr_set_seq(dh, seqno);
-	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
-			 DCCP_SKB_CB(rxskb)->dccpd_seq);
+	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq);
+
+	dccp_csum_outgoing(skb);
+	dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr,
+				   		      &rxskb->nh.ipv6h->daddr);
 
 	memset(&fl, 0, sizeof(fl));
 	ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
 	ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
-	dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
-					     sizeof(*dh), IPPROTO_DCCP,
-					     skb->csum);
+
 	fl.proto = IPPROTO_DCCP;
 	fl.oif = inet6_iif(rxskb);
 	fl.fl_ip_dport = dh->dccph_dport;
@@ -584,60 +378,14 @@
 	kfree_skb(skb);
 }
 
-static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb,
-				   struct request_sock *req)
-{
-	struct flowi fl;
-	struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
-	const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
-				     sizeof(struct dccp_hdr_ext) +
-				     sizeof(struct dccp_hdr_ack_bits);
-	struct sk_buff *skb;
-
-	skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header,
-			GFP_ATOMIC);
-	if (skb == NULL)
-		return;
-
-	skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header);
-
-	skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
-	dh = dccp_hdr(skb);
-	memset(dh, 0, dccp_hdr_ack_len);
-
-	/* Build DCCP header and checksum it. */
-	dh->dccph_type	= DCCP_PKT_ACK;
-	dh->dccph_sport = rxdh->dccph_dport;
-	dh->dccph_dport = rxdh->dccph_sport;
-	dh->dccph_doff	= dccp_hdr_ack_len / 4;
-	dh->dccph_x	= 1;
-
-	dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
-	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
-			 DCCP_SKB_CB(rxskb)->dccpd_seq);
-
-	memset(&fl, 0, sizeof(fl));
-	ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
-	ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
-
-	/* FIXME: calculate checksum, IPv4 also should... */
-
-	fl.proto = IPPROTO_DCCP;
-	fl.oif = inet6_iif(rxskb);
-	fl.fl_ip_dport = dh->dccph_dport;
-	fl.fl_ip_sport = dh->dccph_sport;
-	security_req_classify_flow(req, &fl);
-
-	if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
-		if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
-			ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
-			DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-			return;
-		}
-	}
-
-	kfree_skb(skb);
-}
+static struct request_sock_ops dccp6_request_sock_ops = {
+	.family		= AF_INET6,
+	.obj_size	= sizeof(struct dccp6_request_sock),
+	.rtx_syn_ack	= dccp_v6_send_response,
+	.send_ack	= dccp_reqsk_send_ack,
+	.destructor	= dccp_v6_reqsk_destructor,
+	.send_reset	= dccp_v6_ctl_send_reset,
+};
 
 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
 {
@@ -672,7 +420,6 @@
 
 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
-	struct dccp_sock dp;
 	struct request_sock *req;
 	struct dccp_request_sock *dreq;
 	struct inet6_request_sock *ireq6;
@@ -704,9 +451,10 @@
 	if (req == NULL)
 		goto drop;
 
-	/* FIXME: process options */
+	if (dccp_parse_options(sk, skb))
+		goto drop_and_free;
 
-	dccp_openreq_init(req, &dp, skb);
+	dccp_reqsk_init(req, skb);
 
 	if (security_inet_conn_request(sk, skb, req))
 		goto drop_and_free;
@@ -714,7 +462,6 @@
 	ireq6 = inet6_rsk(req);
 	ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
 	ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
-	req->rcv_wnd	= dccp_feat_default_sequence_window;
 	ireq6->pktopts	= NULL;
 
 	if (ipv6_opt_accepted(sk, skb) ||
@@ -733,14 +480,14 @@
 	/*
 	 * Step 3: Process LISTEN state
 	 *
-	 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
+	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
 	 *
-	 * In fact we defer setting S.GSR, S.SWL, S.SWH to
-	 * dccp_create_openreq_child.
+	 *   In fact we defer setting S.GSR, S.SWL, S.SWH to
+	 *   dccp_create_openreq_child.
 	 */
 	dreq = dccp_rsk(req);
 	dreq->dreq_isr	   = dcb->dccpd_seq;
-	dreq->dreq_iss	   = dccp_v6_init_sequence(sk, skb);
+	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
 	dreq->dreq_service = service;
 
 	if (dccp_v6_send_response(sk, req, NULL))
@@ -990,18 +737,46 @@
 	                                       --ANK (980728)
 	 */
 	if (np->rxopt.all)
+	/*
+	 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
+	 *        (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
+	 */
 		opt_skb = skb_clone(skb, GFP_ATOMIC);
 
 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
 			goto reset;
 		if (opt_skb) {
-			/* This is where we would goto ipv6_pktoptions. */
+			/* XXX This is where we would goto ipv6_pktoptions. */
 			__kfree_skb(opt_skb);
 		}
 		return 0;
 	}
 
+	/*
+	 *  Step 3: Process LISTEN state
+	 *     If S.state == LISTEN,
+	 *	 If P.type == Request or P contains a valid Init Cookie option,
+	 *	      (* Must scan the packet's options to check for Init
+	 *		 Cookies.  Only Init Cookies are processed here,
+	 *		 however; other options are processed in Step 8.  This
+	 *		 scan need only be performed if the endpoint uses Init
+	 *		 Cookies *)
+	 *	      (* Generate a new socket and switch to that socket *)
+	 *	      Set S := new socket for this port pair
+	 *	      S.state = RESPOND
+	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
+	 *	      Initialize S.GAR := S.ISS
+	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
+	 *	      Continue with S.state == RESPOND
+	 *	      (* A Response packet will be generated in Step 11 *)
+	 *	 Otherwise,
+	 *	      Generate Reset(No Connection) unless P.type == Reset
+	 *	      Drop packet and return
+	 *
+	 * NOTE: the check for the packet types is done in
+	 *	 dccp_rcv_state_process
+	 */
 	if (sk->sk_state == DCCP_LISTEN) {
 		struct sock *nsk = dccp_v6_hnd_req(sk, skb);
 
@@ -1024,13 +799,13 @@
 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
 		goto reset;
 	if (opt_skb) {
-		/* This is where we would goto ipv6_pktoptions. */
+		/* XXX This is where we would goto ipv6_pktoptions. */
 		__kfree_skb(opt_skb);
 	}
 	return 0;
 
 reset:
-	dccp_v6_ctl_send_reset(skb);
+	dccp_v6_ctl_send_reset(sk, skb);
 discard:
 	if (opt_skb != NULL)
 		__kfree_skb(opt_skb);
@@ -1043,12 +818,20 @@
 	const struct dccp_hdr *dh;
 	struct sk_buff *skb = *pskb;
 	struct sock *sk;
+	int min_cov;
 
-	/* Step 1: Check header basics: */
+	/* Step 1: Check header basics */
 
 	if (dccp_invalid_packet(skb))
 		goto discard_it;
 
+	/* Step 1: If header checksum is incorrect, drop packet and return. */
+	if (dccp_v6_csum_finish(skb, &skb->nh.ipv6h->saddr,
+				     &skb->nh.ipv6h->daddr)) {
+		DCCP_WARN("dropped packet with invalid checksum\n");
+		goto discard_it;
+	}
+
 	dh = dccp_hdr(skb);
 
 	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(skb);
@@ -1068,11 +851,12 @@
 	/*
 	 * Step 2:
 	 * 	If no socket ...
-	 *		Generate Reset(No Connection) unless P.type == Reset
-	 *		Drop packet and return
 	 */
-	if (sk == NULL)
+	if (sk == NULL) {
+		dccp_pr_debug("failed to look up flow ID in table and "
+			      "get corresponding socket\n");
 		goto no_dccp_socket;
+	}
 
 	/*
 	 * Step 2:
@@ -1080,43 +864,226 @@
 	 *		Generate Reset(No Connection) unless P.type == Reset
 	 *		Drop packet and return
 	 */
-	if (sk->sk_state == DCCP_TIME_WAIT)
-		goto do_time_wait;
+	if (sk->sk_state == DCCP_TIME_WAIT) {
+		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
+		inet_twsk_put(inet_twsk(sk));
+		goto no_dccp_socket;
+	}
+
+	/*
+	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
+	 * 	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
+	 * 	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
+	 */
+	min_cov = dccp_sk(sk)->dccps_pcrlen;
+	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
+		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
+			      dh->dccph_cscov, min_cov);
+		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
+		goto discard_and_relse;
+	}
 
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_and_relse;
 
-	return sk_receive_skb(sk, skb) ? -1 : 0;
+	return sk_receive_skb(sk, skb, 1) ? -1 : 0;
 
 no_dccp_socket:
 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 		goto discard_it;
 	/*
 	 * Step 2:
+	 * 	If no socket ...
 	 *		Generate Reset(No Connection) unless P.type == Reset
 	 *		Drop packet and return
 	 */
 	if (dh->dccph_type != DCCP_PKT_RESET) {
 		DCCP_SKB_CB(skb)->dccpd_reset_code =
 					DCCP_RESET_CODE_NO_CONNECTION;
-		dccp_v6_ctl_send_reset(skb);
+		dccp_v6_ctl_send_reset(sk, skb);
 	}
+
 discard_it:
-
-	/*
-	 *	Discard frame
-	 */
-
 	kfree_skb(skb);
 	return 0;
 
 discard_and_relse:
 	sock_put(sk);
 	goto discard_it;
+}
 
-do_time_wait:
-	inet_twsk_put(inet_twsk(sk));
-	goto no_dccp_socket;
+static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+			   int addr_len)
+{
+	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
+	struct inet_connection_sock *icsk = inet_csk(sk);
+	struct inet_sock *inet = inet_sk(sk);
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct dccp_sock *dp = dccp_sk(sk);
+	struct in6_addr *saddr = NULL, *final_p = NULL, final;
+	struct flowi fl;
+	struct dst_entry *dst;
+	int addr_type;
+	int err;
+
+	dp->dccps_role = DCCP_ROLE_CLIENT;
+
+	if (addr_len < SIN6_LEN_RFC2133)
+		return -EINVAL;
+
+	if (usin->sin6_family != AF_INET6)
+		return -EAFNOSUPPORT;
+
+	memset(&fl, 0, sizeof(fl));
+
+	if (np->sndflow) {
+		fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
+		IP6_ECN_flow_init(fl.fl6_flowlabel);
+		if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
+			struct ip6_flowlabel *flowlabel;
+			flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+			if (flowlabel == NULL)
+				return -EINVAL;
+			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+			fl6_sock_release(flowlabel);
+		}
+	}
+	/*
+	 * connect() to INADDR_ANY means loopback (BSD'ism).
+	 */
+	if (ipv6_addr_any(&usin->sin6_addr))
+		usin->sin6_addr.s6_addr[15] = 1;
+
+	addr_type = ipv6_addr_type(&usin->sin6_addr);
+
+	if (addr_type & IPV6_ADDR_MULTICAST)
+		return -ENETUNREACH;
+
+	if (addr_type & IPV6_ADDR_LINKLOCAL) {
+		if (addr_len >= sizeof(struct sockaddr_in6) &&
+		    usin->sin6_scope_id) {
+			/* If interface is set while binding, indices
+			 * must coincide.
+			 */
+			if (sk->sk_bound_dev_if &&
+			    sk->sk_bound_dev_if != usin->sin6_scope_id)
+				return -EINVAL;
+
+			sk->sk_bound_dev_if = usin->sin6_scope_id;
+		}
+
+		/* Connect to link-local address requires an interface */
+		if (!sk->sk_bound_dev_if)
+			return -EINVAL;
+	}
+
+	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+	np->flow_label = fl.fl6_flowlabel;
+
+	/*
+	 * DCCP over IPv4
+	 */
+	if (addr_type == IPV6_ADDR_MAPPED) {
+		u32 exthdrlen = icsk->icsk_ext_hdr_len;
+		struct sockaddr_in sin;
+
+		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
+
+		if (__ipv6_only_sock(sk))
+			return -ENETUNREACH;
+
+		sin.sin_family = AF_INET;
+		sin.sin_port = usin->sin6_port;
+		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
+
+		icsk->icsk_af_ops = &dccp_ipv6_mapped;
+		sk->sk_backlog_rcv = dccp_v4_do_rcv;
+
+		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
+		if (err) {
+			icsk->icsk_ext_hdr_len = exthdrlen;
+			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
+			sk->sk_backlog_rcv = dccp_v6_do_rcv;
+			goto failure;
+		} else {
+			ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
+				      inet->saddr);
+			ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
+				      inet->rcv_saddr);
+		}
+
+		return err;
+	}
+
+	if (!ipv6_addr_any(&np->rcv_saddr))
+		saddr = &np->rcv_saddr;
+
+	fl.proto = IPPROTO_DCCP;
+	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
+	ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
+	fl.oif = sk->sk_bound_dev_if;
+	fl.fl_ip_dport = usin->sin6_port;
+	fl.fl_ip_sport = inet->sport;
+	security_sk_classify_flow(sk, &fl);
+
+	if (np->opt != NULL && np->opt->srcrt != NULL) {
+		const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
+
+		ipv6_addr_copy(&final, &fl.fl6_dst);
+		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
+		final_p = &final;
+	}
+
+	err = ip6_dst_lookup(sk, &dst, &fl);
+	if (err)
+		goto failure;
+
+	if (final_p)
+		ipv6_addr_copy(&fl.fl6_dst, final_p);
+
+	err = xfrm_lookup(&dst, &fl, sk, 0);
+	if (err < 0)
+		goto failure;
+
+	if (saddr == NULL) {
+		saddr = &fl.fl6_src;
+		ipv6_addr_copy(&np->rcv_saddr, saddr);
+	}
+
+	/* set the source address */
+	ipv6_addr_copy(&np->saddr, saddr);
+	inet->rcv_saddr = LOOPBACK4_IPV6;
+
+	__ip6_dst_store(sk, dst, NULL, NULL);
+
+	icsk->icsk_ext_hdr_len = 0;
+	if (np->opt != NULL)
+		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
+					  np->opt->opt_nflen);
+
+	inet->dport = usin->sin6_port;
+
+	dccp_set_state(sk, DCCP_REQUESTING);
+	err = inet6_hash_connect(&dccp_death_row, sk);
+	if (err)
+		goto late_failure;
+
+	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
+						      np->daddr.s6_addr32,
+						      inet->sport, inet->dport);
+	err = dccp_connect(sk);
+	if (err)
+		goto late_failure;
+
+	return 0;
+
+late_failure:
+	dccp_set_state(sk, DCCP_CLOSED);
+	__sk_dst_reset(sk);
+failure:
+	inet->dport = 0;
+	sk->sk_route_caps = 0;
+	return err;
 }
 
 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
@@ -1179,6 +1146,10 @@
 	return inet6_destroy_sock(sk);
 }
 
+static struct timewait_sock_ops dccp6_timewait_sock_ops = {
+	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
+};
+
 static struct proto dccp_v6_prot = {
 	.name		   = "DCCPv6",
 	.owner		   = THIS_MODULE,
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 9045438..4c9e267 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -11,6 +11,7 @@
  */
 
 #include <linux/dccp.h>
+#include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <linux/timer.h>
 
@@ -31,8 +32,7 @@
 	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,
 					    (unsigned long)&dccp_death_row),
 	.twkill_work	= __WORK_INITIALIZER(dccp_death_row.twkill_work,
-					     inet_twdr_twkill_work,
-					     &dccp_death_row),
+					     inet_twdr_twkill_work),
 /* Short-time timewait calendar */
 
 	.twcal_hand	= -1,
@@ -83,8 +83,7 @@
 		 * socket up.  We've got bigger problems than
 		 * non-graceful socket closings.
 		 */
-		LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket "
-					 "table overflow\n");
+		DCCP_WARN("time wait bucket table overflow\n");
 	}
 
 	dccp_done(sk);
@@ -97,8 +96,8 @@
 	/*
 	 * Step 3: Process LISTEN state
 	 *
-	 * // Generate a new socket and switch to that socket
-	 * Set S := new socket for this port pair
+	 *   (* Generate a new socket and switch to that socket *)
+	 *   Set S := new socket for this port pair
 	 */
 	struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
 
@@ -147,9 +146,9 @@
 		/*
 		 * Step 3: Process LISTEN state
 		 *
-		 *	Choose S.ISS (initial seqno) or set from Init Cookie
-		 *	Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
-		 *	Cookie
+		 *    Choose S.ISS (initial seqno) or set from Init Cookies
+		 *    Initialize S.GAR := S.ISS
+		 *    Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
 		 */
 
 		/* See dccp_v4_conn_request */
@@ -195,15 +194,17 @@
 
 	/* Check for retransmitted REQUEST */
 	if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
-		if (after48(DCCP_SKB_CB(skb)->dccpd_seq,
-			    dccp_rsk(req)->dreq_isr)) {
-			struct dccp_request_sock *dreq = dccp_rsk(req);
+		struct dccp_request_sock *dreq = dccp_rsk(req);
 
+		if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) {
 			dccp_pr_debug("Retransmitted REQUEST\n");
-			/* Send another RESPONSE packet */
-			dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1);
-			dccp_set_seqno(&dreq->dreq_isr,
-				       DCCP_SKB_CB(skb)->dccpd_seq);
+			dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
+			/*
+			 * Send another RESPONSE packet
+			 * To protect against Request floods, increment retrans
+			 * counter (backoff, monitored by dccp_response_timer).
+			 */
+			req->retrans++;
 			req->rsk_ops->rtx_syn_ack(sk, req, NULL);
 		}
 		/* Network Duplicate, discard packet */
@@ -243,7 +244,7 @@
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 drop:
 	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
-		req->rsk_ops->send_reset(skb);
+		req->rsk_ops->send_reset(sk, skb);
 
 	inet_csk_reqsk_queue_drop(sk, req, prev);
 	goto out;
@@ -283,3 +284,19 @@
 }
 
 EXPORT_SYMBOL_GPL(dccp_child_process);
+
+void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk)
+{
+	DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
+}
+
+EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
+
+void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb)
+{
+	inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
+	inet_rsk(req)->acked	= 0;
+	req->rcv_wnd		= sysctl_dccp_feat_sequence_window;
+}
+
+EXPORT_SYMBOL_GPL(dccp_reqsk_init);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index fb0db1f..f398b43 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -22,23 +22,23 @@
 #include "dccp.h"
 #include "feat.h"
 
-int dccp_feat_default_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW;
-int dccp_feat_default_rx_ccid	      = DCCPF_INITIAL_CCID;
-int dccp_feat_default_tx_ccid	      = DCCPF_INITIAL_CCID;
-int dccp_feat_default_ack_ratio	      = DCCPF_INITIAL_ACK_RATIO;
-int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
-int dccp_feat_default_send_ndp_count  = DCCPF_INITIAL_SEND_NDP_COUNT;
+int sysctl_dccp_feat_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW;
+int sysctl_dccp_feat_rx_ccid	      = DCCPF_INITIAL_CCID;
+int sysctl_dccp_feat_tx_ccid	      = DCCPF_INITIAL_CCID;
+int sysctl_dccp_feat_ack_ratio	      = DCCPF_INITIAL_ACK_RATIO;
+int sysctl_dccp_feat_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR;
+int sysctl_dccp_feat_send_ndp_count  = DCCPF_INITIAL_SEND_NDP_COUNT;
 
-EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window);
+EXPORT_SYMBOL_GPL(sysctl_dccp_feat_sequence_window);
 
 void dccp_minisock_init(struct dccp_minisock *dmsk)
 {
-	dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window;
-	dmsk->dccpms_rx_ccid	     = dccp_feat_default_rx_ccid;
-	dmsk->dccpms_tx_ccid	     = dccp_feat_default_tx_ccid;
-	dmsk->dccpms_ack_ratio	     = dccp_feat_default_ack_ratio;
-	dmsk->dccpms_send_ack_vector = dccp_feat_default_send_ack_vector;
-	dmsk->dccpms_send_ndp_count  = dccp_feat_default_send_ndp_count;
+	dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window;
+	dmsk->dccpms_rx_ccid	     = sysctl_dccp_feat_rx_ccid;
+	dmsk->dccpms_tx_ccid	     = sysctl_dccp_feat_tx_ccid;
+	dmsk->dccpms_ack_ratio	     = sysctl_dccp_feat_ack_ratio;
+	dmsk->dccpms_send_ack_vector = sysctl_dccp_feat_send_ack_vector;
+	dmsk->dccpms_send_ndp_count  = sysctl_dccp_feat_send_ndp_count;
 }
 
 static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len)
@@ -60,12 +60,9 @@
 int dccp_parse_options(struct sock *sk, struct sk_buff *skb)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
-#ifdef CONFIG_IP_DCCP_DEBUG
-	const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ?
-					"CLIENT rx opt: " : "server rx opt: ";
-#endif
 	const struct dccp_hdr *dh = dccp_hdr(skb);
 	const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type;
+	u64 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
 	unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
 	unsigned char *opt_ptr = options;
 	const unsigned char *opt_end = (unsigned char *)dh +
@@ -119,7 +116,7 @@
 				goto out_invalid_option;
 
 			opt_recv->dccpor_ndp = dccp_decode_value_var(value, len);
-			dccp_pr_debug("%sNDP count=%d\n", debug_prefix,
+			dccp_pr_debug("%s rx opt: NDP count=%d\n", dccp_role(sk),
 				      opt_recv->dccpor_ndp);
 			break;
 		case DCCPO_CHANGE_L:
@@ -153,7 +150,7 @@
 				break;
 
 			if (dccp_msk(sk)->dccpms_send_ack_vector &&
-			    dccp_ackvec_parse(sk, skb, opt, value, len))
+			    dccp_ackvec_parse(sk, skb, &ackno, opt, value, len))
 				goto out_invalid_option;
 			break;
 		case DCCPO_TIMESTAMP:
@@ -165,8 +162,8 @@
 			dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp;
 			dccp_timestamp(sk, &dp->dccps_timestamp_time);
 
-			dccp_pr_debug("%sTIMESTAMP=%u, ackno=%llu\n",
-				      debug_prefix, opt_recv->dccpor_timestamp,
+			dccp_pr_debug("%s rx opt: TIMESTAMP=%u, ackno=%llu\n",
+				      dccp_role(sk), opt_recv->dccpor_timestamp,
 				      (unsigned long long)
 				      DCCP_SKB_CB(skb)->dccpd_ack_seq);
 			break;
@@ -176,8 +173,8 @@
 
 			opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value);
 
-			dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, ackno=%llu, ",
-				      debug_prefix,
+			dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, "
+				      "ackno=%llu, ",  dccp_role(sk),
 				      opt_recv->dccpor_timestamp_echo,
 				      len + 2,
 				      (unsigned long long)
@@ -211,8 +208,8 @@
 			if (elapsed_time > opt_recv->dccpor_elapsed_time)
 				opt_recv->dccpor_elapsed_time = elapsed_time;
 
-			dccp_pr_debug("%sELAPSED_TIME=%d\n", debug_prefix,
-				      elapsed_time);
+			dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n",
+				      dccp_role(sk), elapsed_time);
 			break;
 			/*
 			 * From RFC 4340, sec. 10.3:
@@ -242,9 +239,8 @@
 		}
 			break;
 		default:
-			pr_info("DCCP(%p): option %d(len=%d) not "
-				"implemented, ignoring\n",
-				sk, opt, len);
+			DCCP_CRIT("DCCP(%p): option %d(len=%d) not "
+				  "implemented, ignoring", sk, opt, len);
 			break;
 	        }
 
@@ -261,7 +257,7 @@
 out_invalid_option:
 	DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR;
-	pr_info("DCCP(%p): invalid option %d, len=%d\n", sk, opt, len);
+	DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len);
 	return -1;
 }
 
@@ -451,8 +447,7 @@
 	u8 *to;
 
 	if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 3 > DCCP_MAX_OPT_LEN) {
-		LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small"
-			       " to insert feature %d option!\n", feat);
+		DCCP_WARN("packet too small for feature %d option!\n", feat);
 		return -1;
 	}
 
@@ -465,8 +460,10 @@
 
 	if (len)
 		memcpy(to, val, len);
-	dccp_pr_debug("option %d feat %d len %d\n", type, feat, len);
 
+	dccp_pr_debug("%s(%s (%d), ...), length %d\n",
+		      dccp_feat_typename(type),
+		      dccp_feat_name(feat), feat, len);
 	return 0;
 }
 
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 7102e3a..400c30b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -88,16 +88,15 @@
 			return -EPROTO;
 		}
 		
-		skb->h.raw = skb_push(skb, dccp_header_size);
-		dh = dccp_hdr(skb);
 
 		/* Build DCCP header and checksum it. */
-		memset(dh, 0, dccp_header_size);
+		dh = dccp_zeroed_hdr(skb, dccp_header_size);
 		dh->dccph_type	= dcb->dccpd_type;
 		dh->dccph_sport	= inet->sport;
 		dh->dccph_dport	= inet->dport;
 		dh->dccph_doff	= (dccp_header_size + dcb->dccpd_opt_len) / 4;
 		dh->dccph_ccval	= dcb->dccpd_ccval;
+		dh->dccph_cscov = dp->dccps_pcslen;
 		/* XXX For now we're using only 48 bits sequence numbers */
 		dh->dccph_x	= 1;
 
@@ -117,7 +116,7 @@
 			break;
 		}
 
-		icsk->icsk_af_ops->send_check(sk, skb->len, skb);
+		icsk->icsk_af_ops->send_check(sk, 0, skb);
 
 		if (set_ack)
 			dccp_event_ack_sent(sk);
@@ -125,17 +124,8 @@
 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
 
 		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-		err = icsk->icsk_af_ops->queue_xmit(skb, 0);
-		if (err <= 0)
-			return err;
-
-		/* NET_XMIT_CN is special. It does not guarantee,
-		 * that this packet is lost. It tells that device
-		 * is about to start to drop packets or already
-		 * drops some packets of the same priority and
-		 * invokes us to send less aggressively.
-		 */
-		return err == NET_XMIT_CN ? 0 : err;
+		err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
+		return net_xmit_eval(err);
 	}
 	return -ENOBUFS;
 }
@@ -205,8 +195,7 @@
 		if (signal_pending(current))
 			goto do_interrupted;
 
-		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
-					    skb->len);
+		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
 		if (rc <= 0)
 			break;
 		delay = msecs_to_jiffies(rc);
@@ -251,25 +240,23 @@
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct sk_buff *skb;
-	long timeo = 30000; 	/* If a packet is taking longer than 2 secs
-				   we have other issues */
+	long timeo = DCCP_XMIT_TIMEO; 	/* If a packet is taking longer than
+					   this we have other issues */
 
 	while ((skb = skb_peek(&sk->sk_write_queue))) {
-		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
-					 skb->len);
+		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
 
 		if (err > 0) {
 			if (!block) {
 				sk_reset_timer(sk, &dp->dccps_xmit_timer,
 						msecs_to_jiffies(err)+jiffies);
 				break;
-			} else
+			} else {
 				err = dccp_wait_for_ccid(sk, skb, &timeo);
-			if (err) {
-				printk(KERN_CRIT "%s:err at dccp_wait_for_ccid"
-						 " %d\n", __FUNCTION__, err);
-				dump_stack();
+				timeo = DCCP_XMIT_TIMEO;
 			}
+			if (err)
+				DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
 		}
 
 		skb_dequeue(&sk->sk_write_queue);
@@ -291,12 +278,9 @@
 
 			err = dccp_transmit_skb(sk, skb);
 			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
-			if (err) {
-				printk(KERN_CRIT "%s:err from "
-					         "ccid_hc_tx_packet_sent %d\n",
-					         __FUNCTION__, err);
-				dump_stack();
-			}
+			if (err)
+				DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
+					 err);
 		} else
 			kfree(skb);
 	}
@@ -329,9 +313,10 @@
 	skb_reserve(skb, sk->sk_prot->max_header);
 
 	skb->dst = dst_clone(dst);
-	skb->csum = 0;
 
 	dreq = dccp_rsk(req);
+	if (inet_rsk(req)->acked)	/* increase ISS upon retransmission */
+		dccp_inc_seqno(&dreq->dreq_iss);
 	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
 	DCCP_SKB_CB(skb)->dccpd_seq  = dreq->dreq_iss;
 
@@ -340,10 +325,8 @@
 		return NULL;
 	}
 
-	skb->h.raw = skb_push(skb, dccp_header_size);
-
-	dh = dccp_hdr(skb);
-	memset(dh, 0, dccp_header_size);
+	/* Build and checksum header */
+	dh = dccp_zeroed_hdr(skb, dccp_header_size);
 
 	dh->dccph_sport	= inet_sk(sk)->sport;
 	dh->dccph_dport	= inet_rsk(req)->rmt_port;
@@ -355,6 +338,10 @@
 	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
 	dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
 
+	dccp_csum_outgoing(skb);
+
+	/* We use `acked' to remember that a Response was already sent. */
+	inet_rsk(req)->acked = 1;
 	DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
 	return skb;
 }
@@ -379,7 +366,6 @@
 	skb_reserve(skb, sk->sk_prot->max_header);
 
 	skb->dst = dst_clone(dst);
-	skb->csum = 0;
 
 	dccp_inc_seqno(&dp->dccps_gss);
 
@@ -392,10 +378,7 @@
 		return NULL;
 	}
 
-	skb->h.raw = skb_push(skb, dccp_header_size);
-
-	dh = dccp_hdr(skb);
-	memset(dh, 0, dccp_header_size);
+	dh = dccp_zeroed_hdr(skb, dccp_header_size);
 
 	dh->dccph_sport	= inet_sk(sk)->sport;
 	dh->dccph_dport	= inet_sk(sk)->dport;
@@ -407,7 +390,7 @@
 	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
 
 	dccp_hdr_reset(skb)->dccph_reset_code = code;
-	inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb);
+	inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb);
 
 	DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
 	return skb;
@@ -426,9 +409,8 @@
 						      code);
 		if (skb != NULL) {
 			memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-			err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0);
-			if (err == NET_XMIT_CN)
-				err = 0;
+			err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0);
+			return net_xmit_eval(err);
 		}
 	}
 
@@ -449,7 +431,6 @@
 	
 	dccp_sync_mss(sk, dst_mtu(dst));
 
-	dccp_update_gss(sk, dp->dccps_iss);
  	/*
 	 * SWL and AWL are initially adjusted so that they are not less than
 	 * the initial Sequence Numbers received and sent, respectively:
@@ -458,8 +439,13 @@
 	 * These adjustments MUST be applied only at the beginning of the
 	 * connection.
  	 */
+	dccp_update_gss(sk, dp->dccps_iss);
 	dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
 
+	/* S.GAR - greatest valid acknowledgement number received on a non-Sync;
+	 *         initialized to S.ISS (sec. 8.5)                            */
+	dp->dccps_gar = dp->dccps_iss;
+
 	icsk->icsk_retransmits = 0;
 	init_timer(&dp->dccps_xmit_timer);
 	dp->dccps_xmit_timer.data = (unsigned long)sk;
@@ -481,7 +467,6 @@
 	skb_reserve(skb, sk->sk_prot->max_header);
 
 	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
-	skb->csum = 0;
 
 	dccp_skb_entail(sk, skb);
 	dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
@@ -513,7 +498,6 @@
 
 		/* Reserve space for headers */
 		skb_reserve(skb, sk->sk_prot->max_header);
-		skb->csum = 0;
 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
 		dccp_transmit_skb(sk, skb);
 	}
@@ -567,7 +551,6 @@
 
 	/* Reserve space for headers and prepare control bits. */
 	skb_reserve(skb, sk->sk_prot->max_header);
-	skb->csum = 0;
 	DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
 	DCCP_SKB_CB(skb)->dccpd_seq = seq;
 
@@ -593,7 +576,6 @@
 
 	/* Reserve space for headers and prepare control bits. */
 	skb_reserve(skb, sk->sk_prot->max_header);
-	skb->csum = 0;
 	DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
 					DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
 
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index fded149..f81e37d 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -106,8 +106,10 @@
 }
 
 static struct jprobe dccp_send_probe = {
-	.kp	= { .addr = (kprobe_opcode_t *)&dccp_sendmsg, },
-	.entry	= (kprobe_opcode_t *)&jdccp_sendmsg,
+	.kp	= {
+		.symbol_name = "dccp_sendmsg",
+	},
+	.entry	= JPROBE_ENTRY(jdccp_sendmsg),
 };
 
 static int dccpprobe_open(struct inode *inode, struct file *file)
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 72cbdcf..5ec47d9 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -52,6 +52,9 @@
 
 EXPORT_SYMBOL_GPL(dccp_hashinfo);
 
+/* the maximum queue length for tx in packets. 0 is no limit */
+int sysctl_dccp_tx_qlen __read_mostly = 5;
+
 void dccp_set_state(struct sock *sk, const int state)
 {
 	const int oldstate = sk->sk_state;
@@ -212,6 +215,7 @@
 
 	dccp_init_xmit_timers(sk);
 	icsk->icsk_rto		= DCCP_TIMEOUT_INIT;
+	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
 	sk->sk_state		= DCCP_CLOSED;
 	sk->sk_write_space	= dccp_write_space;
 	icsk->icsk_sync_mss	= dccp_sync_mss;
@@ -262,12 +266,12 @@
 
 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
 
-static inline int dccp_listen_start(struct sock *sk)
+static inline int dccp_listen_start(struct sock *sk, int backlog)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 
 	dp->dccps_role = DCCP_ROLE_LISTEN;
-	return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
+	return inet_csk_listen_start(sk, backlog);
 }
 
 int dccp_disconnect(struct sock *sk, int flags)
@@ -451,9 +455,8 @@
 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
 		char __user *optval, int optlen)
 {
-	struct dccp_sock *dp;
-	int err;
-	int val;
+	struct dccp_sock *dp = dccp_sk(sk);
+	int val, err = 0;
 
 	if (optlen < sizeof(int))
 		return -EINVAL;
@@ -465,14 +468,11 @@
 		return dccp_setsockopt_service(sk, val, optval, optlen);
 
 	lock_sock(sk);
-	dp = dccp_sk(sk);
-	err = 0;
-
 	switch (optname) {
 	case DCCP_SOCKOPT_PACKET_SIZE:
-		dp->dccps_packet_size = val;
+		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
+		err = 0;
 		break;
-
 	case DCCP_SOCKOPT_CHANGE_L:
 		if (optlen != sizeof(struct dccp_so_feat))
 			err = -EINVAL;
@@ -481,7 +481,6 @@
 					             (struct dccp_so_feat __user *)
 						     optval);
 		break;
-
 	case DCCP_SOCKOPT_CHANGE_R:
 		if (optlen != sizeof(struct dccp_so_feat))
 			err = -EINVAL;
@@ -490,12 +489,26 @@
 						     (struct dccp_so_feat __user *)
 						     optval);
 		break;
-
+	case DCCP_SOCKOPT_SEND_CSCOV:	/* sender side, RFC 4340, sec. 9.2 */
+		if (val < 0 || val > 15)
+			err = -EINVAL;
+		else
+			dp->dccps_pcslen = val;
+		break;
+	case DCCP_SOCKOPT_RECV_CSCOV:	/* receiver side, RFC 4340 sec. 9.2.1 */
+		if (val < 0 || val > 15)
+			err = -EINVAL;
+		else {
+			dp->dccps_pcrlen = val;
+			/* FIXME: add feature negotiation,
+			 * ChangeL(MinimumChecksumCoverage, val) */
+		}
+		break;
 	default:
 		err = -ENOPROTOOPT;
 		break;
 	}
-	
+
 	release_sock(sk);
 	return err;
 }
@@ -569,12 +582,17 @@
 
 	switch (optname) {
 	case DCCP_SOCKOPT_PACKET_SIZE:
-		val = dp->dccps_packet_size;
-		len = sizeof(dp->dccps_packet_size);
-		break;
+		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
+		return 0;
 	case DCCP_SOCKOPT_SERVICE:
 		return dccp_getsockopt_service(sk, len,
 					       (__be32 __user *)optval, optlen);
+	case DCCP_SOCKOPT_SEND_CSCOV:
+		val = dp->dccps_pcslen;
+		break;
+	case DCCP_SOCKOPT_RECV_CSCOV:
+		val = dp->dccps_pcrlen;
+		break;
 	case 128 ... 191:
 		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
 					     len, (u32 __user *)optval, optlen);
@@ -630,6 +648,13 @@
 		return -EMSGSIZE;
 
 	lock_sock(sk);
+
+	if (sysctl_dccp_tx_qlen &&
+	    (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
+		rc = -EAGAIN;
+		goto out_release;
+	}
+
 	timeo = sock_sndtimeo(sk, noblock);
 
 	/*
@@ -788,7 +813,7 @@
 		 * FIXME: here it probably should be sk->sk_prot->listen_start
 		 * see tcp_listen_start
 		 */
-		err = dccp_listen_start(sk);
+		err = dccp_listen_start(sk, backlog);
 		if (err)
 			goto out;
 	}
@@ -1008,8 +1033,7 @@
 	} while (!dccp_hashinfo.ehash && --ehash_order > 0);
 
 	if (!dccp_hashinfo.ehash) {
-		printk(KERN_CRIT "Failed to allocate DCCP "
-				 "established hash table\n");
+		DCCP_CRIT("Failed to allocate DCCP established hash table");
 		goto out_free_bind_bucket_cachep;
 	}
 
@@ -1031,7 +1055,7 @@
 	} while (!dccp_hashinfo.bhash && --bhash_order >= 0);
 
 	if (!dccp_hashinfo.bhash) {
-		printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
+		DCCP_CRIT("Failed to allocate DCCP bind hash table");
 		goto out_free_dccp_ehash;
 	}
 
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 38bc157..fdcfca3 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -11,6 +11,7 @@
 
 #include <linux/mm.h>
 #include <linux/sysctl.h>
+#include "dccp.h"
 #include "feat.h"
 
 #ifndef CONFIG_SYSCTL
@@ -19,53 +20,76 @@
 
 static struct ctl_table dccp_default_table[] = {
 	{
-		.ctl_name	= NET_DCCP_DEFAULT_SEQ_WINDOW,
 		.procname	= "seq_window",
-		.data		= &dccp_feat_default_sequence_window,
-		.maxlen		= sizeof(dccp_feat_default_sequence_window),
+		.data		= &sysctl_dccp_feat_sequence_window,
+		.maxlen		= sizeof(sysctl_dccp_feat_sequence_window),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
 	{
-		.ctl_name	= NET_DCCP_DEFAULT_RX_CCID,
 		.procname	= "rx_ccid",
-		.data		= &dccp_feat_default_rx_ccid,
-		.maxlen		= sizeof(dccp_feat_default_rx_ccid),
+		.data		= &sysctl_dccp_feat_rx_ccid,
+		.maxlen		= sizeof(sysctl_dccp_feat_rx_ccid),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
 	{
-		.ctl_name	= NET_DCCP_DEFAULT_TX_CCID,
 		.procname	= "tx_ccid",
-		.data		= &dccp_feat_default_tx_ccid,
-		.maxlen		= sizeof(dccp_feat_default_tx_ccid),
+		.data		= &sysctl_dccp_feat_tx_ccid,
+		.maxlen		= sizeof(sysctl_dccp_feat_tx_ccid),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
 	{
-		.ctl_name	= NET_DCCP_DEFAULT_ACK_RATIO,
 		.procname	= "ack_ratio",
-		.data		= &dccp_feat_default_ack_ratio,
-		.maxlen		= sizeof(dccp_feat_default_ack_ratio),
+		.data		= &sysctl_dccp_feat_ack_ratio,
+		.maxlen		= sizeof(sysctl_dccp_feat_ack_ratio),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
 	{
-		.ctl_name	= NET_DCCP_DEFAULT_SEND_ACKVEC,
 		.procname	= "send_ackvec",
-		.data		= &dccp_feat_default_send_ack_vector,
-		.maxlen		= sizeof(dccp_feat_default_send_ack_vector),
+		.data		= &sysctl_dccp_feat_send_ack_vector,
+		.maxlen		= sizeof(sysctl_dccp_feat_send_ack_vector),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
 	{
-		.ctl_name	= NET_DCCP_DEFAULT_SEND_NDP,
 		.procname	= "send_ndp",
-		.data		= &dccp_feat_default_send_ndp_count,
-		.maxlen		= sizeof(dccp_feat_default_send_ndp_count),
+		.data		= &sysctl_dccp_feat_send_ndp_count,
+		.maxlen		= sizeof(sysctl_dccp_feat_send_ndp_count),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname	= "request_retries",
+		.data		= &sysctl_dccp_request_retries,
+		.maxlen		= sizeof(sysctl_dccp_request_retries),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "retries1",
+		.data		= &sysctl_dccp_retries1,
+		.maxlen		= sizeof(sysctl_dccp_retries1),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "retries2",
+		.data		= &sysctl_dccp_retries2,
+		.maxlen		= sizeof(sysctl_dccp_retries2),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "tx_qlen",
+		.data		= &sysctl_dccp_tx_qlen,
+		.maxlen		= sizeof(sysctl_dccp_tx_qlen),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+
 	{ .ctl_name = 0, }
 };
 
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 8447742..e8f519e 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -15,15 +15,10 @@
 
 #include "dccp.h"
 
-static void dccp_write_timer(unsigned long data);
-static void dccp_keepalive_timer(unsigned long data);
-static void dccp_delack_timer(unsigned long data);
-
-void dccp_init_xmit_timers(struct sock *sk)
-{
-	inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
-				  &dccp_keepalive_timer);
-}
+/* sysctl variables governing numbers of retransmission attempts */
+int  sysctl_dccp_request_retries	__read_mostly = TCP_SYN_RETRIES;
+int  sysctl_dccp_retries1		__read_mostly = TCP_RETR1;
+int  sysctl_dccp_retries2		__read_mostly = TCP_RETR2;
 
 static void dccp_write_err(struct sock *sk)
 {
@@ -44,11 +39,10 @@
 	if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
 		if (icsk->icsk_retransmits != 0)
 			dst_negative_advice(&sk->sk_dst_cache);
-		retry_until = icsk->icsk_syn_retries ? :
-			    /* FIXME! */ 3 /* FIXME! sysctl_tcp_syn_retries */;
+		retry_until = icsk->icsk_syn_retries ?
+			    : sysctl_dccp_request_retries;
 	} else {
-		if (icsk->icsk_retransmits >=
-		     /* FIXME! sysctl_tcp_retries1 */ 5 /* FIXME! */) {
+		if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
 			/* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
 			   black hole detection. :-(
 
@@ -72,7 +66,7 @@
 			dst_negative_advice(&sk->sk_dst_cache);
 		}
 
-		retry_until = /* FIXME! */ 15 /* FIXME! sysctl_tcp_retries2 */;
+		retry_until = sysctl_dccp_retries2;
 		/*
 		 * FIXME: see tcp_write_timout and tcp_out_of_resources
 		 */
@@ -86,53 +80,6 @@
 	return 0;
 }
 
-/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
-static void dccp_delack_timer(unsigned long data)
-{
-	struct sock *sk = (struct sock *)data;
-	struct inet_connection_sock *icsk = inet_csk(sk);
-
-	bh_lock_sock(sk);
-	if (sock_owned_by_user(sk)) {
-		/* Try again later. */
-		icsk->icsk_ack.blocked = 1;
-		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
-		sk_reset_timer(sk, &icsk->icsk_delack_timer,
-			       jiffies + TCP_DELACK_MIN);
-		goto out;
-	}
-
-	if (sk->sk_state == DCCP_CLOSED ||
-	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
-		goto out;
-	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
-		sk_reset_timer(sk, &icsk->icsk_delack_timer,
-			       icsk->icsk_ack.timeout);
-		goto out;
-	}
-
-	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
-
-	if (inet_csk_ack_scheduled(sk)) {
-		if (!icsk->icsk_ack.pingpong) {
-			/* Delayed ACK missed: inflate ATO. */
-			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
-						 icsk->icsk_rto);
-		} else {
-			/* Delayed ACK missed: leave pingpong mode and
-			 * deflate ATO.
-			 */
-			icsk->icsk_ack.pingpong = 0;
-			icsk->icsk_ack.ato = TCP_ATO_MIN;
-		}
-		dccp_send_ack(sk);
-		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
-	}
-out:
-	bh_unlock_sock(sk);
-	sock_put(sk);
-}
-
 /*
  *	The DCCP retransmit timer.
  */
@@ -142,7 +89,7 @@
 
 	/* retransmit timer is used for feature negotiation throughout
 	 * connection.  In this case, no packet is re-transmitted, but rather an
-	 * ack is generated and pending changes are splaced into its options.
+	 * ack is generated and pending changes are placed into its options.
 	 */
 	if (sk->sk_send_head == NULL) {
 		dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk);
@@ -154,9 +101,11 @@
 	/*
 	 * sk->sk_send_head has to have one skb with
 	 * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP
-	 * packet types (REQUEST, RESPONSE, the ACK in the 3way handshake
-	 * (PARTOPEN timer), etc).
-	 */
+	 * packet types. The only packets eligible for retransmission are:
+	 * 	-- Requests in client-REQUEST  state (sec. 8.1.1)
+	 * 	-- Acks     in client-PARTOPEN state (sec. 8.1.5)
+	 * 	-- CloseReq in server-CLOSEREQ state (sec. 8.3)
+	 * 	-- Close    in   node-CLOSING  state (sec. 8.3)                */
 	BUG_TRAP(sk->sk_send_head != NULL);
 
 	/* 
@@ -194,7 +143,7 @@
 	icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
 				  DCCP_RTO_MAX);
-	if (icsk->icsk_retransmits > 3 /* FIXME: sysctl_dccp_retries1 */)
+	if (icsk->icsk_retransmits > sysctl_dccp_retries1)
 		__sk_dst_reset(sk);
 out:;
 }
@@ -264,3 +213,56 @@
 	bh_unlock_sock(sk);
 	sock_put(sk);
 }
+
+/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
+static void dccp_delack_timer(unsigned long data)
+{
+	struct sock *sk = (struct sock *)data;
+	struct inet_connection_sock *icsk = inet_csk(sk);
+
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
+		/* Try again later. */
+		icsk->icsk_ack.blocked = 1;
+		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
+		sk_reset_timer(sk, &icsk->icsk_delack_timer,
+			       jiffies + TCP_DELACK_MIN);
+		goto out;
+	}
+
+	if (sk->sk_state == DCCP_CLOSED ||
+	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+		goto out;
+	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
+		sk_reset_timer(sk, &icsk->icsk_delack_timer,
+			       icsk->icsk_ack.timeout);
+		goto out;
+	}
+
+	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
+
+	if (inet_csk_ack_scheduled(sk)) {
+		if (!icsk->icsk_ack.pingpong) {
+			/* Delayed ACK missed: inflate ATO. */
+			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
+						 icsk->icsk_rto);
+		} else {
+			/* Delayed ACK missed: leave pingpong mode and
+			 * deflate ATO.
+			 */
+			icsk->icsk_ack.pingpong = 0;
+			icsk->icsk_ack.ato = TCP_ATO_MIN;
+		}
+		dccp_send_ack(sk);
+		NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
+	}
+out:
+	bh_unlock_sock(sk);
+	sock_put(sk);
+}
+
+void dccp_init_xmit_timers(struct sock *sk)
+{
+	inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
+				  &dccp_keepalive_timer);
+}
diff --git a/net/decnet/Kconfig b/net/decnet/Kconfig
index 36e72cb..7914fd6 100644
--- a/net/decnet/Kconfig
+++ b/net/decnet/Kconfig
@@ -41,11 +41,3 @@
 
 	  See <file:Documentation/networking/decnet.txt> for more information.
 
-config DECNET_ROUTE_FWMARK
-	bool "DECnet: use FWMARK value as routing key (EXPERIMENTAL)"
-	depends on DECNET_ROUTER && NETFILTER
-	help
-	  If you say Y here, you will be able to specify different routes for
-	  packets with different FWMARK ("firewalling mark") values
-	  (see ipchains(8), "-m" argument).
-
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 01861fe..0b9d4c9 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -38,7 +38,6 @@
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
 #include <linux/sysctl.h>
 #include <linux/notifier.h>
 #include <asm/uaccess.h>
@@ -47,6 +46,7 @@
 #include <net/dst.h>
 #include <net/flow.h>
 #include <net/fib_rules.h>
+#include <net/netlink.h>
 #include <net/dn.h>
 #include <net/dn_dev.h>
 #include <net/dn_route.h>
@@ -73,7 +73,7 @@
 
 static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
 static void dn_dev_delete(struct net_device *dev);
-static void rtmsg_ifa(int event, struct dn_ifaddr *ifa);
+static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa);
 
 static int dn_eth_up(struct net_device *);
 static void dn_eth_down(struct net_device *);
@@ -255,12 +255,10 @@
 	struct dn_dev_sysctl_table *t;
 	int i;
 
-	t = kmalloc(sizeof(*t), GFP_KERNEL);
+	t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
 	if (t == NULL)
 		return;
 
-	memcpy(t, &dn_dev_sysctl, sizeof(*t));
-
 	for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) {
 		long offset = (long)t->dn_dev_vars[i].data;
 		t->dn_dev_vars[i].data = ((char *)parms) + offset;
@@ -442,7 +440,7 @@
 		}
 	}
 
-	rtmsg_ifa(RTM_DELADDR, ifa1);
+	dn_ifaddr_notify(RTM_DELADDR, ifa1);
 	blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
 	if (destroy) {
 		dn_dev_free_ifa(ifa1);
@@ -477,7 +475,7 @@
 	ifa->ifa_next = dn_db->ifa_list;
 	dn_db->ifa_list = ifa;
 
-	rtmsg_ifa(RTM_NEWADDR, ifa);
+	dn_ifaddr_notify(RTM_NEWADDR, ifa);
 	blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
 
 	return 0;
@@ -647,41 +645,62 @@
 	return dn_dev;
 }
 
-static int dn_dev_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+static struct nla_policy dn_ifa_policy[IFA_MAX+1] __read_mostly = {
+	[IFA_ADDRESS]		= { .type = NLA_U16 },
+	[IFA_LOCAL]		= { .type = NLA_U16 },
+	[IFA_LABEL]		= { .type = NLA_STRING,
+				    .len = IFNAMSIZ - 1 },
+};
+
+static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
-	struct rtattr **rta = arg;
+	struct nlattr *tb[IFA_MAX+1];
 	struct dn_dev *dn_db;
-	struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
+	struct ifaddrmsg *ifm;
 	struct dn_ifaddr *ifa, **ifap;
+	int err = -EADDRNOTAVAIL;
 
+	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
+	if (err < 0)
+		goto errout;
+
+	ifm = nlmsg_data(nlh);
 	if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
-		return -EADDRNOTAVAIL;
+		goto errout;
 
-	for(ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next) {
-		void *tmp = rta[IFA_LOCAL-1];
-		if ((tmp && memcmp(RTA_DATA(tmp), &ifa->ifa_local, 2)) ||
-		    (rta[IFA_LABEL-1] && rtattr_strcmp(rta[IFA_LABEL-1], ifa->ifa_label)))
+	for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
+		if (tb[IFA_LOCAL] &&
+		    nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
+			continue;
+
+		if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
 			continue;
 
 		dn_dev_del_ifa(dn_db, ifap, 1);
 		return 0;
 	}
 
-	return -EADDRNOTAVAIL;
+errout:
+	return err;
 }
 
-static int dn_dev_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
-	struct rtattr **rta = arg;
+	struct nlattr *tb[IFA_MAX+1];
 	struct net_device *dev;
 	struct dn_dev *dn_db;
-	struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
+	struct ifaddrmsg *ifm;
 	struct dn_ifaddr *ifa;
-	int rv;
+	int err;
 
-	if (rta[IFA_LOCAL-1] == NULL)
+	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
+	if (err < 0)
+		return err;
+
+	if (tb[IFA_LOCAL] == NULL)
 		return -EINVAL;
 
+	ifm = nlmsg_data(nlh);
 	if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL)
 		return -ENODEV;
 
@@ -695,69 +714,77 @@
 	if ((ifa = dn_dev_alloc_ifa()) == NULL)
 		return -ENOBUFS;
 
-	if (!rta[IFA_ADDRESS - 1])
-		rta[IFA_ADDRESS - 1] = rta[IFA_LOCAL - 1];
-	memcpy(&ifa->ifa_local, RTA_DATA(rta[IFA_LOCAL-1]), 2);
-	memcpy(&ifa->ifa_address, RTA_DATA(rta[IFA_ADDRESS-1]), 2);
+	if (tb[IFA_ADDRESS] == NULL)
+		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
+
+	ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
+	ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
 	ifa->ifa_flags = ifm->ifa_flags;
 	ifa->ifa_scope = ifm->ifa_scope;
 	ifa->ifa_dev = dn_db;
-	if (rta[IFA_LABEL-1])
-		rtattr_strlcpy(ifa->ifa_label, rta[IFA_LABEL-1], IFNAMSIZ);
+
+	if (tb[IFA_LABEL])
+		nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
 	else
 		memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
 
-	rv = dn_dev_insert_ifa(dn_db, ifa);
-	if (rv)
+	err = dn_dev_insert_ifa(dn_db, ifa);
+	if (err)
 		dn_dev_free_ifa(ifa);
-	return rv;
+
+	return err;
 }
 
-static int dn_dev_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
-				u32 pid, u32 seq, int event, unsigned int flags)
+static inline size_t dn_ifaddr_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+	       + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
+	       + nla_total_size(2) /* IFA_ADDRESS */
+	       + nla_total_size(2); /* IFA_LOCAL */
+}
+
+static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
+			     u32 pid, u32 seq, int event, unsigned int flags)
 {
 	struct ifaddrmsg *ifm;
 	struct nlmsghdr *nlh;
-	unsigned char *b = skb->tail;
 
-	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags);
-	ifm = NLMSG_DATA(nlh);
+	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
+	if (nlh == NULL)
+		return -ENOBUFS;
 
+	ifm = nlmsg_data(nlh);
 	ifm->ifa_family = AF_DECnet;
 	ifm->ifa_prefixlen = 16;
 	ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
 	ifm->ifa_scope = ifa->ifa_scope;
 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
-	if (ifa->ifa_address)
-		RTA_PUT(skb, IFA_ADDRESS, 2, &ifa->ifa_address);
-	if (ifa->ifa_local)
-		RTA_PUT(skb, IFA_LOCAL, 2, &ifa->ifa_local);
-	if (ifa->ifa_label[0])
-		RTA_PUT(skb, IFA_LABEL, IFNAMSIZ, &ifa->ifa_label);
-	nlh->nlmsg_len = skb->tail - b;
-	return skb->len;
 
-nlmsg_failure:
-rtattr_failure:
-        skb_trim(skb, b - skb->data);
-        return -1;
+	if (ifa->ifa_address)
+		NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
+	if (ifa->ifa_local)
+		NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
+	if (ifa->ifa_label[0])
+		NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	return nlmsg_cancel(skb, nlh);
 }
 
-static void rtmsg_ifa(int event, struct dn_ifaddr *ifa)
+static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
 {
 	struct sk_buff *skb;
-	int payload = sizeof(struct ifaddrmsg) + 128;
 	int err = -ENOBUFS;
 
-	skb = alloc_skb(nlmsg_total_size(payload), GFP_KERNEL);
+	skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL);
 	if (skb == NULL)
 		goto errout;
 
-	err = dn_dev_fill_ifaddr(skb, ifa, 0, 0, event, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
+	/* failure implies BUG in dn_ifaddr_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
 errout:
@@ -765,39 +792,43 @@
 		rtnl_set_sk_err(RTNLGRP_DECnet_IFADDR, err);
 }
 
-static int dn_dev_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
 {
-	int idx, dn_idx;
-	int s_idx, s_dn_idx;
+	int idx, dn_idx = 0, skip_ndevs, skip_naddr;
 	struct net_device *dev;
 	struct dn_dev *dn_db;
 	struct dn_ifaddr *ifa;
 
-	s_idx = cb->args[0];
-	s_dn_idx = dn_idx = cb->args[1];
+	skip_ndevs = cb->args[0];
+	skip_naddr = cb->args[1];
+
 	read_lock(&dev_base_lock);
-	for(dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
-		if (idx < s_idx)
+	for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
+		if (idx < skip_ndevs)
 			continue;
-		if (idx > s_idx)
-			s_dn_idx = 0;
+		else if (idx > skip_ndevs) {
+			/* Only skip over addresses for first dev dumped
+			 * in this iteration (idx == skip_ndevs) */
+			skip_naddr = 0;
+		}
+
 		if ((dn_db = dev->dn_ptr) == NULL)
 			continue;
 
-		for(ifa = dn_db->ifa_list, dn_idx = 0; ifa; ifa = ifa->ifa_next, dn_idx++) {
-			if (dn_idx < s_dn_idx)
+		for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
+		     ifa = ifa->ifa_next, dn_idx++) {
+			if (dn_idx < skip_naddr)
 				continue;
 
-			if (dn_dev_fill_ifaddr(skb, ifa,
-					       NETLINK_CB(cb->skb).pid,
-					       cb->nlh->nlmsg_seq,
-					       RTM_NEWADDR,
-					       NLM_F_MULTI) <= 0)
+			if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
+					      cb->nlh->nlmsg_seq, RTM_NEWADDR,
+					      NLM_F_MULTI) < 0)
 				goto done;
 		}
 	}
 done:
 	read_unlock(&dev_base_lock);
+
 	cb->args[0] = idx;
 	cb->args[1] = dn_idx;
 
@@ -1414,9 +1445,9 @@
 
 static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] =
 {
-	[RTM_NEWADDR  - RTM_BASE] = { .doit	= dn_dev_rtm_newaddr,	},
-	[RTM_DELADDR  - RTM_BASE] = { .doit	= dn_dev_rtm_deladdr,	},
-	[RTM_GETADDR  - RTM_BASE] = { .dumpit	= dn_dev_dump_ifaddr,	},
+	[RTM_NEWADDR  - RTM_BASE] = { .doit	= dn_nl_newaddr,	},
+	[RTM_DELADDR  - RTM_BASE] = { .doit	= dn_nl_deladdr,	},
+	[RTM_GETADDR  - RTM_BASE] = { .dumpit	= dn_nl_dump_ifaddr,	},
 #ifdef CONFIG_DECNET_ROUTER
 	[RTM_NEWROUTE - RTM_BASE] = { .doit	= dn_fib_rtm_newroute,	},
 	[RTM_DELROUTE - RTM_BASE] = { .doit	= dn_fib_rtm_delroute,	},
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ff0ebe9..7322bb3 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -591,7 +591,6 @@
 
 	seq          = file->private_data;
 	seq->private = s;
-	memset(s, 0, sizeof(*s));
 out:
 	return rc;
 out_kfree:
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 7683d4f..39a6cf7 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -804,7 +804,7 @@
 				goto free_out;
 		}
 
-		return sk_receive_skb(sk, skb);
+		return sk_receive_skb(sk, skb, 0);
 	}
 
 	return dn_nsp_no_socket(skb, reason);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 23489f7..9881933 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -269,9 +269,7 @@
 {
 	return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) |
 		(fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) |
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-		(fl1->nl_u.dn_u.fwmark ^ fl2->nl_u.dn_u.fwmark) |
-#endif
+		(fl1->mark ^ fl2->mark) |
 		(fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) |
 		(fl1->oif ^ fl2->oif) |
 		(fl1->iif ^ fl2->iif)) == 0;
@@ -882,10 +880,8 @@
 				      { .daddr = oldflp->fld_dst,
 					.saddr = oldflp->fld_src,
 					.scope = RT_SCOPE_UNIVERSE,
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-					.fwmark = oldflp->fld_fwmark
-#endif
 				     } },
+			    .mark = oldflp->mark,
 			    .iif = loopback_dev.ifindex,
 			    .oif = oldflp->oif };
 	struct dn_route *rt = NULL;
@@ -903,7 +899,7 @@
 		       "dn_route_output_slow: dst=%04x src=%04x mark=%d"
 		       " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst),
 		       dn_ntohs(oldflp->fld_src),
-                       oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif);
+                       oldflp->mark, loopback_dev.ifindex, oldflp->oif);
 
 	/* If we have an output interface, verify its a DECnet device */
 	if (oldflp->oif) {
@@ -1108,9 +1104,7 @@
 	rt->fl.fld_dst    = oldflp->fld_dst;
 	rt->fl.oif        = oldflp->oif;
 	rt->fl.iif        = 0;
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-	rt->fl.fld_fwmark = oldflp->fld_fwmark;
-#endif
+	rt->fl.mark       = oldflp->mark;
 
 	rt->rt_saddr      = fl.fld_src;
 	rt->rt_daddr      = fl.fld_dst;
@@ -1178,9 +1172,7 @@
 			rt = rcu_dereference(rt->u.rt_next)) {
 			if ((flp->fld_dst == rt->fl.fld_dst) &&
 			    (flp->fld_src == rt->fl.fld_src) &&
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-			    (flp->fld_fwmark == rt->fl.fld_fwmark) &&
-#endif
+			    (flp->mark == rt->fl.mark) &&
 			    (rt->fl.iif == 0) &&
 			    (rt->fl.oif == flp->oif)) {
 				rt->u.dst.lastuse = jiffies;
@@ -1235,10 +1227,8 @@
 				     { .daddr = cb->dst,
 				       .saddr = cb->src,
 				       .scope = RT_SCOPE_UNIVERSE,
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-				       .fwmark = skb->nfmark
-#endif
 				    } },
+			    .mark = skb->mark,
 			    .iif = skb->dev->ifindex };
 	struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
 	int err = -EINVAL;
@@ -1385,7 +1375,7 @@
 	rt->fl.fld_dst    = cb->dst;
 	rt->fl.oif        = 0;
 	rt->fl.iif        = in_dev->ifindex;
-	rt->fl.fld_fwmark = fl.fld_fwmark;
+	rt->fl.mark       = fl.mark;
 
 	rt->u.dst.flags = DST_HOST;
 	rt->u.dst.neighbour = neigh;
@@ -1457,9 +1447,7 @@
 		if ((rt->fl.fld_src == cb->src) &&
 	 	    (rt->fl.fld_dst == cb->dst) &&
 		    (rt->fl.oif == 0) &&
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-		    (rt->fl.fld_fwmark == skb->nfmark) &&
-#endif
+		    (rt->fl.mark == skb->mark) &&
 		    (rt->fl.iif == cb->iif)) {
 			rt->u.dst.lastuse = jiffies;
 			dst_hold(&rt->u.dst);
@@ -1481,7 +1469,7 @@
 	struct rtmsg *r;
 	struct nlmsghdr *nlh;
 	unsigned char *b = skb->tail;
-	struct rta_cacheinfo ci;
+	long expires;
 
 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
 	r = NLMSG_DATA(nlh);
@@ -1514,16 +1502,10 @@
 		RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
 	if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
 		goto rtattr_failure;
-	ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
-	ci.rta_used     = rt->u.dst.__use;
-	ci.rta_clntref  = atomic_read(&rt->u.dst.__refcnt);
-	if (rt->u.dst.expires)
-		ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
-	else
-		ci.rta_expires = 0;
-	ci.rta_error    = rt->u.dst.error;
-	ci.rta_id       = ci.rta_ts = ci.rta_tsage = 0;
-	RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+	expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
+	if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires,
+			       rt->u.dst.error) < 0)
+		goto rtattr_failure;
 	if (rt->fl.iif)
 		RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
 
@@ -1604,8 +1586,6 @@
 	if (rtm->rtm_flags & RTM_F_NOTIFY)
 		rt->rt_flags |= RTCF_NOTIFY;
 
-	NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
-
 	err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
 
 	if (err == 0)
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 590e0a7..e32d0c3 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -45,10 +45,6 @@
 	__le16			dstmask;
 	__le16			srcmap;
 	u8			flags;
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-	u32			fwmark;
-	u32			fwmask;
-#endif
 };
 
 static struct dn_fib_rule default_rule = {
@@ -112,13 +108,9 @@
 }
 
 static struct nla_policy dn_fib_rule_policy[FRA_MAX+1] __read_mostly = {
-	[FRA_IFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
-	[FRA_PRIORITY]	= { .type = NLA_U32 },
+	FRA_GENERIC_POLICY,
 	[FRA_SRC]	= { .type = NLA_U16 },
 	[FRA_DST]	= { .type = NLA_U16 },
-	[FRA_FWMARK]	= { .type = NLA_U32 },
-	[FRA_FWMASK]	= { .type = NLA_U32 },
-	[FRA_TABLE]     = { .type = NLA_U32 },
 };
 
 static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
@@ -131,11 +123,6 @@
 	    ((daddr ^ r->dst) & r->dstmask))
 		return 0;
 
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-	if ((r->fwmark ^ fl->fld_fwmark) & r->fwmask)
-		return 0;
-#endif
-
 	return 1;
 }
 
@@ -169,20 +156,6 @@
 	if (tb[FRA_DST])
 		r->dst = nla_get_u16(tb[FRA_DST]);
 
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-	if (tb[FRA_FWMARK]) {
-		r->fwmark = nla_get_u32(tb[FRA_FWMARK]);
-		if (r->fwmark)
-			/* compatibility: if the mark value is non-zero all bits
-			 * are compared unless a mask is explicitly specified.
-			 */
-			r->fwmask = 0xFFFFFFFF;
-	}
-
-	if (tb[FRA_FWMASK])
-		r->fwmask = nla_get_u32(tb[FRA_FWMASK]);
-#endif
-
 	r->src_len = frh->src_len;
 	r->srcmask = dnet_make_mask(r->src_len);
 	r->dst_len = frh->dst_len;
@@ -203,14 +176,6 @@
 	if (frh->dst_len && (r->dst_len != frh->dst_len))
 		return 0;
 
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-	if (tb[FRA_FWMARK] && (r->fwmark != nla_get_u32(tb[FRA_FWMARK])))
-		return 0;
-
-	if (tb[FRA_FWMASK] && (r->fwmask != nla_get_u32(tb[FRA_FWMASK])))
-		return 0;
-#endif
-
 	if (tb[FRA_SRC] && (r->src != nla_get_u16(tb[FRA_SRC])))
 		return 0;
 
@@ -248,12 +213,6 @@
 	frh->src_len = r->src_len;
 	frh->tos = 0;
 
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
-	if (r->fwmark)
-		NLA_PUT_U32(skb, FRA_FWMARK, r->fwmark);
-	if (r->fwmask || r->fwmark)
-		NLA_PUT_U32(skb, FRA_FWMASK, r->fwmask);
-#endif
 	if (r->dst_len)
 		NLA_PUT_U16(skb, FRA_DST, r->dst);
 	if (r->src_len)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 317904b..13b2421 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -79,7 +79,7 @@
 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
 static DEFINE_RWLOCK(dn_fib_tables_lock);
 
-static kmem_cache_t *dn_hash_kmem __read_mostly;
+static struct kmem_cache *dn_hash_kmem __read_mostly;
 static int dn_fib_hash_zombies;
 
 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
@@ -263,6 +263,32 @@
 	return 0;
 }
 
+static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
+{
+	size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
+			 + nla_total_size(4) /* RTA_TABLE */
+			 + nla_total_size(2) /* RTA_DST */
+			 + nla_total_size(4); /* RTA_PRIORITY */
+
+	/* space for nested metrics */
+	payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
+
+	if (fi->fib_nhs) {
+		/* Also handles the special case fib_nhs == 1 */
+
+		/* each nexthop is packed in an attribute */
+		size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
+
+		/* may contain a gateway attribute */
+		nhsize += nla_total_size(4);
+
+		/* all nexthops are packed in a nested attribute */
+		payload += nla_total_size(fi->fib_nhs * nhsize);
+	}
+
+	return payload;
+}
+
 static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
                         u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
                         struct dn_fib_info *fi, unsigned int flags)
@@ -335,17 +361,15 @@
         u32 pid = req ? req->pid : 0;
 	int err = -ENOBUFS;
 
-        skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+        skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
         if (skb == NULL)
 		goto errout;
 
         err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
 			       f->fn_type, f->fn_scope, &f->fn_key, z,
 			       DN_FIB_INFO(f), 0);
-	if (err < 0) {
-                kfree_skb(skb);
-		goto errout;
-        }
+	/* failure implies BUG in dn_fib_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
 errout:
@@ -566,7 +590,7 @@
 
 replace:
 	err = -ENOBUFS;
-	new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL);
+	new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
 	if (new_f == NULL)
 		goto out;
 
@@ -807,10 +831,11 @@
                 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); 
                 return NULL;
         }
-        if ((t = kmalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), GFP_KERNEL)) == NULL)
-                return NULL;
 
-        memset(t, 0, sizeof(struct dn_fib_table));
+        t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
+		    GFP_KERNEL);
+        if (t == NULL)
+                return NULL;
 
         t->n = n;
         t->insert = dn_fib_table_insert;
@@ -818,7 +843,6 @@
         t->lookup = dn_fib_table_lookup;
         t->flush  = dn_fib_table_flush;
         t->dump = dn_fib_table_dump;
-	memset(t->data, 0, sizeof(struct dn_hash));
 	hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
 
         return t;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 4bd78c8..2d31bf3 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -60,7 +60,6 @@
 #include <net/ip.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
-#include <asm/checksum.h>
 
 __setup("ether=", netdev_boot_setup);
 
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index 4200ec5..fc1f99a 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -16,6 +16,7 @@
 #include <linux/random.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
+#include <linux/mm.h>
 #include <linux/if_ether.h>
 #include <linux/if_arp.h>
 #include <asm/string.h>
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index 1b2efff..7a95c3d 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/skbuff.h>
+#include <linux/mm.h>
 #include <asm/string.h>
 
 #include <net/ieee80211.h>
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 13b1e5f..b1c6d1f 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -67,7 +67,7 @@
 		return 0;
 
 	ieee->networks =
-	    kmalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
+	    kzalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
 		    GFP_KERNEL);
 	if (!ieee->networks) {
 		printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
@@ -75,9 +75,6 @@
 		return -ENOMEM;
 	}
 
-	memset(ieee->networks, 0,
-	       MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
-
 	return 0;
 }
 
@@ -118,6 +115,21 @@
 			      &ieee->network_free_list);
 }
 
+static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 68) || (new_mtu > IEEE80211_DATA_LEN))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *ieee80211_generic_get_stats(
+	struct net_device *dev)
+{
+	struct ieee80211_device *ieee = netdev_priv(dev);
+	return &ieee->stats;
+}
+
 struct net_device *alloc_ieee80211(int sizeof_priv)
 {
 	struct ieee80211_device *ieee;
@@ -133,6 +145,11 @@
 	}
 	ieee = netdev_priv(dev);
 	dev->hard_start_xmit = ieee80211_xmit;
+	dev->change_mtu = ieee80211_change_mtu;
+
+	/* Drivers are free to override this if the generic implementation
+	 * does not meet their needs. */
+	dev->get_stats = ieee80211_generic_get_stats;
 
 	ieee->dev = dev;
 
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 2759312..d97e541 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -415,17 +415,16 @@
 	    ieee->host_mc_decrypt : ieee->host_decrypt;
 
 	if (can_be_decrypted) {
-		int idx = 0;
 		if (skb->len >= hdrlen + 3) {
 			/* Top two-bits of byte 3 are the key index */
-			idx = skb->data[hdrlen + 3] >> 6;
+			keyidx = skb->data[hdrlen + 3] >> 6;
 		}
 
-		/* ieee->crypt[] is WEP_KEY (4) in length.  Given that idx
-		 * is only allowed 2-bits of storage, no value of idx can
-		 * be provided via above code that would result in idx
+		/* ieee->crypt[] is WEP_KEY (4) in length.  Given that keyidx
+		 * is only allowed 2-bits of storage, no value of keyidx can
+		 * be provided via above code that would result in keyidx
 		 * being out of range */
-		crypt = ieee->crypt[idx];
+		crypt = ieee->crypt[keyidx];
 
 #ifdef NOT_YET
 		sta = NULL;
@@ -479,6 +478,11 @@
 			goto rx_exit;
 	}
 #endif
+	/* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */
+	if (sc == ieee->prev_seq_ctl)
+		goto rx_dropped;
+	else
+		ieee->prev_seq_ctl = sc;
 
 	/* Data frame - extract src/dst addresses */
 	if (skb->len < IEEE80211_3ADDR_LEN)
@@ -655,6 +659,51 @@
 		goto rx_dropped;
 	}
 
+	/* If the frame was decrypted in hardware, we may need to strip off
+	 * any security data (IV, ICV, etc) that was left behind */
+	if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) &&
+	    ieee->host_strip_iv_icv) {
+	    	int trimlen = 0;
+
+		/* Top two-bits of byte 3 are the key index */
+		if (skb->len >= hdrlen + 3)
+			keyidx = skb->data[hdrlen + 3] >> 6;
+
+		/* To strip off any security data which appears before the
+		 * payload, we simply increase hdrlen (as the header gets
+		 * chopped off immediately below). For the security data which
+		 * appears after the payload, we use skb_trim. */
+
+		switch (ieee->sec.encode_alg[keyidx]) {
+		case SEC_ALG_WEP:
+			/* 4 byte IV */
+			hdrlen += 4;
+			/* 4 byte ICV */
+			trimlen = 4;
+			break;
+		case SEC_ALG_TKIP:
+			/* 4 byte IV, 4 byte ExtIV */
+			hdrlen += 8;
+			/* 8 byte MIC, 4 byte ICV */
+			trimlen = 12;
+			break;
+		case SEC_ALG_CCMP:
+			/* 8 byte CCMP header */
+			hdrlen += 8;
+			/* 8 byte MIC */
+			trimlen = 8;
+			break;
+		}
+
+		if (skb->len < trimlen)
+			goto rx_dropped;
+
+		__skb_trim(skb, skb->len - trimlen);
+
+		if (skb->len < hdrlen)
+			goto rx_dropped;
+	}
+
 	/* skb: hdr + (possible reassembled) full plaintext payload */
 
 	payload = skb->data + hdrlen;
@@ -1255,12 +1304,11 @@
 		case MFIE_TYPE_IBSS_DFS:
 			if (network->ibss_dfs)
 				break;
-			network->ibss_dfs =
-			    kmalloc(info_element->len, GFP_ATOMIC);
+			network->ibss_dfs = kmemdup(info_element->data,
+						    info_element->len,
+						    GFP_ATOMIC);
 			if (!network->ibss_dfs)
 				return 1;
-			memcpy(network->ibss_dfs, info_element->data,
-			       info_element->len);
 			network->flags |= NETWORK_HAS_IBSS_DFS;
 			break;
 
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index ae25449..854fc13 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -390,7 +390,7 @@
 		 * this stack is providing the full 802.11 header, one will
 		 * eventually be affixed to this fragment -- so we must account
 		 * for it when determining the amount of payload space. */
-		bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
+		bytes_per_frag = frag_size - hdr_len;
 		if (ieee->config &
 		    (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
 			bytes_per_frag -= IEEE80211_FCS_LEN;
@@ -412,7 +412,7 @@
 	} else {
 		nr_frags = 1;
 		bytes_per_frag = bytes_last_frag = bytes;
-		frag_size = bytes + IEEE80211_3ADDR_LEN;
+		frag_size = bytes + hdr_len;
 	}
 
 	rts_required = (frag_size > ieee->rts
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cf51c87..eec1a1d 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -58,9 +58,11 @@
 }
 
 void
-ieee80211softmac_assoc_timeout(void *d)
+ieee80211softmac_assoc_timeout(struct work_struct *work)
 {
-	struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+	struct ieee80211softmac_device *mac =
+		container_of(work, struct ieee80211softmac_device,
+			     associnfo.timeout.work);
 	struct ieee80211softmac_network *n;
 
 	mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@
 
 /* This function is called to handle userspace requests (asynchronously) */
 void
-ieee80211softmac_assoc_work(void *d)
+ieee80211softmac_assoc_work(struct work_struct *work)
 {
-	struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+	struct ieee80211softmac_device *mac =
+		container_of(work, struct ieee80211softmac_device,
+			     associnfo.work.work);
 	struct ieee80211softmac_network *found = NULL;
 	struct ieee80211_network *net = NULL, *best = NULL;
 	int bssvalid;
@@ -412,7 +416,7 @@
 				network->authenticated = 0;
 				/* we don't want to do this more than once ... */
 				network->auth_desynced_once = 1;
-				schedule_work(&mac->associnfo.work);
+				schedule_delayed_work(&mac->associnfo.work, 0);
 				break;
 			}
 		default:
@@ -427,6 +431,17 @@
 	return 0;
 }
 
+void
+ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&mac->lock, flags);
+	mac->associnfo.associating = 1;
+	schedule_work(&mac->associnfo.work);
+	spin_unlock_irqrestore(&mac->lock, flags);
+}
+
 int
 ieee80211softmac_handle_disassoc(struct net_device * dev,
 				 struct ieee80211_disassoc *disassoc)
@@ -445,8 +460,7 @@
 	dprintk(KERN_INFO PFX "got disassoc frame\n");
 	ieee80211softmac_disassoc(mac);
 
-	/* try to reassociate */
-	schedule_work(&mac->associnfo.work);
+	ieee80211softmac_try_reassoc(mac);
 
 	return 0;
 }
@@ -466,7 +480,7 @@
 		dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
 		return 0;
 	}
-	schedule_work(&mac->associnfo.work);
+	schedule_delayed_work(&mac->associnfo.work, 0);
 
 	return 0;
 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 4cef39e..8ed3e59 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -26,7 +26,7 @@
 
 #include "ieee80211softmac_priv.h"
 
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
 
 /* Queues an auth request to the desired AP */
 int
@@ -54,14 +54,14 @@
 	auth->mac = mac;
 	auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
 	auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
-	INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+	INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
 	
 	/* Lock (for list) */
 	spin_lock_irqsave(&mac->lock, flags);
 
 	/* add to list */
 	list_add_tail(&auth->list, &mac->auth_queue);
-	schedule_work(&auth->work);
+	schedule_delayed_work(&auth->work, 0);
 	spin_unlock_irqrestore(&mac->lock, flags);
 	
 	return 0;
@@ -70,14 +70,15 @@
 
 /* Sends an auth request to the desired AP and handles timeouts */
 static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
 {
 	struct ieee80211softmac_device *mac;
 	struct ieee80211softmac_auth_queue_item *auth;
 	struct ieee80211softmac_network *net;
 	unsigned long flags;
 
-	auth = (struct ieee80211softmac_auth_queue_item *)data;
+	auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+			    work.work);
 	net = auth->net;
 	mac = auth->mac;
 
@@ -118,9 +119,11 @@
 
 /* Sends a response to an auth challenge (for shared key auth). */
 static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
 {
-	struct ieee80211softmac_auth_queue_item *aq = _aq;
+	struct ieee80211softmac_auth_queue_item *aq =
+		container_of(work, struct ieee80211softmac_auth_queue_item,
+			     work.work);
 
 	/* Send our response */
 	ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -158,7 +161,7 @@
 	/* Make sure that we've got an auth queue item for this request */
 	if(aq == NULL)
 	{
-		printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
+		dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
 		/* Error #? */
 		return -1;
 	}			
@@ -166,7 +169,7 @@
 	/* Check for out of order authentication */
 	if(!net->authenticating)
 	{
-		printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
+		dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
 		return -1;
 	}
 
@@ -216,10 +219,16 @@
 			net->challenge_len = *data++; 	
 			if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
 				net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
-			if (net->challenge != NULL)
-				kfree(net->challenge);
-			net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
-			memcpy(net->challenge, data, net->challenge_len);
+			kfree(net->challenge);
+			net->challenge = kmemdup(data, net->challenge_len,
+						 GFP_ATOMIC);
+			if (net->challenge == NULL) {
+				printkl(KERN_NOTICE PFX "Shared Key "
+					"Authentication failed due to "
+					"memory shortage.\n");
+				spin_unlock_irqrestore(&mac->lock, flags);
+				break;
+			}
 			aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 
 
 			/* We reuse the work struct from the auth request here.
@@ -228,8 +237,8 @@
 			 * we have obviously already sent the initial auth
 			 * request. */
 			cancel_delayed_work(&aq->work);
-			INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
-			schedule_work(&aq->work);
+			INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+			schedule_delayed_work(&aq->work, 0);
 			spin_unlock_irqrestore(&mac->lock, flags);
 			return 0;
 		case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -328,6 +337,8 @@
 	/* can't transmit data right now... */
 	netif_carrier_off(mac->dev);
 	spin_unlock_irqrestore(&mac->lock, flags);
+
+	ieee80211softmac_try_reassoc(mac);
 }
 
 /* 
@@ -342,7 +353,7 @@
 	/* Make sure the network is authenticated */
 	if (!net->authenticated)
 	{
-		printkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
+		dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
 		/* Error okay? */
 		return -EPERM;
 	}
@@ -376,7 +387,7 @@
 	net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2);
 	
 	if (net == NULL) {
-		printkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
+		dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
 			MAC_ARG(deauth->header.addr2));
 		return 0;
 	}
@@ -384,7 +395,7 @@
 	/* Make sure the network is authenticated */
 	if(!net->authenticated)
 	{
-		printkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
+		dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
 		/* Error okay? */
 		return -EPERM;
 	}
@@ -392,6 +403,6 @@
 	ieee80211softmac_deauth_from_net(mac, net);
 
 	/* let's try to re-associate */
-	schedule_work(&mac->associnfo.work);
+	schedule_delayed_work(&mac->associnfo.work, 0);
 	return 0;
 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index f34fa2e..b901565 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -73,10 +73,12 @@
 
 
 static void
-ieee80211softmac_notify_callback(void *d)
+ieee80211softmac_notify_callback(struct work_struct *work)
 {
-	struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
-	kfree(d);
+	struct ieee80211softmac_event *pevent =
+		container_of(work, struct ieee80211softmac_event, work.work);
+	struct ieee80211softmac_event event = *pevent;
+	kfree(pevent);
 	
 	event.fun(event.mac->dev, event.event_type, event.context);
 }
@@ -99,7 +101,7 @@
 		return -ENOMEM;
 	
 	eventptr->event_type = event;
-	INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr);
+	INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
 	eventptr->fun = fun;
 	eventptr->context = context;
 	eventptr->mac = mac;
@@ -170,7 +172,7 @@
 				/* User may have subscribed to ANY event, so
 				 * we tell them which event triggered it. */
 				eventptr->event_type = event;
-				schedule_work(&eventptr->work);
+				schedule_delayed_work(&eventptr->work, 0);
 			}
 		}
 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 33aff4f..256207b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -58,8 +58,8 @@
 	INIT_LIST_HEAD(&softmac->events);
 
 	mutex_init(&softmac->associnfo.mutex);
-	INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac);
-	INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac);
+	INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
+	INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
 	softmac->start_scan = ieee80211softmac_start_scan_implementation;
 	softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
 	softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 0642e09..4c2bba3 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -78,7 +78,7 @@
 /* private definitions and prototypes */
 
 /*** prototypes from _scan.c */
-void ieee80211softmac_scan(void *sm);
+void ieee80211softmac_scan(struct work_struct *work);
 /* for internal use if scanning is needed */
 int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
 void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@
 int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
 
 /*** prototypes from _assoc.c */
-void ieee80211softmac_assoc_work(void *d);
+void ieee80211softmac_assoc_work(struct work_struct *work);
 int ieee80211softmac_handle_assoc_response(struct net_device * dev,
 					   struct ieee80211_assoc_response * resp,
 					   struct ieee80211_network * network);
@@ -157,7 +157,7 @@
 				     struct ieee80211_disassoc * disassoc);
 int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
 				        struct ieee80211_reassoc_request * reassoc);
-void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_assoc_timeout(struct work_struct *work);
 void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
 void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
 
@@ -207,7 +207,7 @@
 	struct ieee80211softmac_device	*mac;	/* SoftMAC device */
 	u8 retry;				/* Retry limit */
 	u8 state;				/* Auth State */
-	struct work_struct		work;	/* Work queue */
+	struct delayed_work		work;	/* Work queue */
 };
 
 /* scanning information */
@@ -219,7 +219,8 @@
 	   stop:1;
 	u8 skip_flags;
 	struct completion finished;
-	struct work_struct softmac_scan;
+	struct delayed_work softmac_scan;
+	struct ieee80211softmac_device *mac;
 };
 
 /* private event struct */
@@ -227,7 +228,7 @@
 	struct list_head list;
 	int event_type;
 	void *event_context;
-	struct work_struct work;
+	struct delayed_work work;
 	notify_function_ptr fun;
 	void *context;
 	struct ieee80211softmac_device *mac;
@@ -238,4 +239,6 @@
 int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
 	int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask);
 
+void ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac);
+
 #endif /* IEEE80211SOFTMAC_PRIV_H_ */
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index d31cf77..0c85d6c 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -47,7 +47,6 @@
 	sm->scanning = 1;
 	spin_unlock_irqrestore(&sm->lock, flags);
 
-	netif_tx_disable(sm->ieee->dev);
 	ret = sm->start_scan(sm->dev);
 	if (ret) {
 		spin_lock_irqsave(&sm->lock, flags);
@@ -91,12 +90,14 @@
 
 
 /* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
 {
 	int invalid_channel;
 	u8 current_channel_idx;
-	struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
-	struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+	struct ieee80211softmac_scaninfo *si =
+		container_of(work, struct ieee80211softmac_scaninfo,
+			     softmac_scan.work);
+	struct ieee80211softmac_device *sm = si->mac;
 	unsigned long flags;
 
 	while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -135,7 +136,8 @@
 	si->started = 0;
 	spin_unlock_irqrestore(&sm->lock, flags);
 
-	dprintk(PFX "Scanning finished\n");
+	dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n",
+		     sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel);
 	ieee80211softmac_scan_finished(sm);
 	complete_all(&sm->scaninfo->finished);
 }
@@ -146,7 +148,8 @@
 	struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
 	if (unlikely(!info))
 		return NULL;
-	INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+	INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+	info->mac = mac;
 	init_completion(&info->finished);
 	return info;
 }
@@ -183,13 +186,11 @@
 		sm->scaninfo->channels = sm->ieee->geo.bg;
 		sm->scaninfo->number_channels = sm->ieee->geo.bg_channels;
 	}
-	dprintk(PFX "Start scanning with channel: %d\n", sm->scaninfo->channels[0].channel);
-	dprintk(PFX "Scanning %d channels\n", sm->scaninfo->number_channels);
 	sm->scaninfo->current_channel_idx = 0;
 	sm->scaninfo->started = 1;
 	sm->scaninfo->stop = 0;
 	INIT_COMPLETION(sm->scaninfo->finished);
-	schedule_work(&sm->scaninfo->softmac_scan);
+	schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
 	spin_unlock_irqrestore(&sm->lock, flags);
 	return 0;
 }
@@ -248,7 +249,6 @@
 		if (net)
 			sm->set_channel(sm->dev, net->channel);
 	}
-	netif_wake_queue(sm->ieee->dev);
 	ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL);
 }
 EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished);
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 23068a8..480d72c 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -122,7 +122,7 @@
 
 	sm->associnfo.associating = 1;
 	/* queue lower level code to do work (if necessary) */
-	schedule_work(&sm->associnfo.work);
+	schedule_delayed_work(&sm->associnfo.work, 0);
 out:
 	mutex_unlock(&sm->associnfo.mutex);
 
@@ -356,7 +356,7 @@
 		/* force reassociation */
 		mac->associnfo.bssvalid = 0;
 		if (mac->associnfo.associated)
-			schedule_work(&mac->associnfo.work);
+			schedule_delayed_work(&mac->associnfo.work, 0);
 	} else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
 		/* the bssid we have is no longer fixed */
 		mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@
 		/* tell the other code that this bssid should be used no matter what */
 		mac->associnfo.bssfixed = 1;
 		/* queue associate if new bssid or (old one again and not associated) */
-		schedule_work(&mac->associnfo.work);
+		schedule_delayed_work(&mac->associnfo.work, 0);
         }
 
  out:
@@ -495,7 +495,8 @@
 			printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n");
 			goto out;
 		}
-		return ieee80211softmac_deauth_req(mac, net, reason);
+		err =  ieee80211softmac_deauth_req(mac, net, reason);
+		goto out;
 	case IW_MLME_DISASSOC:
 		ieee80211softmac_send_disassoc_req(mac, reason);
 		mac->associnfo.associated = 0;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 5572071..503e705 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -104,13 +104,6 @@
 
 	  If unsure, say N.
 
-config IP_ROUTE_FWMARK
-	bool "IP: use netfilter MARK value as routing key"
-	depends on IP_MULTIPLE_TABLES && NETFILTER
-	help
-	  If you say Y here, you will be able to specify different routes for
-	  packets with different mark values (see iptables(8), MARK target).
-
 config IP_ROUTE_MULTIPATH
 	bool "IP: equal cost multipath"
 	depends on IP_ADVANCED_ROUTER
@@ -625,5 +618,17 @@
 	default "reno" if DEFAULT_RENO
 	default "cubic"
 
+config TCP_MD5SIG
+	bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	select CRYPTO
+	select CRYPTO_MD5
+	---help---
+	  RFC2385 specifices a method of giving MD5 protection to TCP sessions.
+	  Its main (only?) use is to protect BGP sessions between core routers
+	  on the Internet.
+
+	  If unsure, say N.
+
 source "net/ipv4/ipvs/Kconfig"
 
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 15645c5..7a06862 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,7 +8,8 @@
 	     inet_timewait_sock.o inet_connection_sock.o \
 	     tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
 	     tcp_minisocks.o tcp_cong.o \
-	     datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
+	     datagram.o raw.o udp.o udplite.o \
+	     arp.o icmp.o devinet.o af_inet.o  igmp.o \
 	     sysctl_net_ipv4.o fib_frontend.o fib_semantics.o
 
 obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index edcf093..1144900 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -104,6 +104,7 @@
 #include <net/inet_connection_sock.h>
 #include <net/tcp.h>
 #include <net/udp.h>
+#include <net/udplite.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
 #include <net/raw.h>
@@ -204,7 +205,7 @@
 	 * we can only allow the backlog to be adjusted.
 	 */
 	if (old_state != TCP_LISTEN) {
-		err = inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
+		err = inet_csk_listen_start(sk, backlog);
 		if (err)
 			goto out;
 	}
@@ -643,7 +644,7 @@
 		sin->sin_port = inet->dport;
 		sin->sin_addr.s_addr = inet->daddr;
 	} else {
-		__u32 addr = inet->rcv_saddr;
+		__be32 addr = inet->rcv_saddr;
 		if (!addr)
 			addr = inet->saddr;
 		sin->sin_port = inet->sport;
@@ -994,8 +995,8 @@
 	struct inet_sock *inet = inet_sk(sk);
 	int err;
 	struct rtable *rt;
-	__u32 old_saddr = inet->saddr;
-	__u32 new_saddr;
+	__be32 old_saddr = inet->saddr;
+	__be32 new_saddr;
 	__be32 daddr = inet->daddr;
 
 	if (inet->opt && inet->opt->srr)
@@ -1223,10 +1224,13 @@
 	tcp_statistics[1] = alloc_percpu(struct tcp_mib);
 	udp_statistics[0] = alloc_percpu(struct udp_mib);
 	udp_statistics[1] = alloc_percpu(struct udp_mib);
+	udplite_statistics[0] = alloc_percpu(struct udp_mib);
+	udplite_statistics[1] = alloc_percpu(struct udp_mib);
 	if (!
 	    (net_statistics[0] && net_statistics[1] && ip_statistics[0]
 	     && ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1]
-	     && udp_statistics[0] && udp_statistics[1]))
+	     && udp_statistics[0] && udp_statistics[1]
+	     && udplite_statistics[0] && udplite_statistics[1]             ) )
 		return -ENOMEM;
 
 	(void) tcp_mib_init();
@@ -1313,6 +1317,8 @@
 	/* Setup TCP slab cache for open requests. */
 	tcp_init();
 
+	/* Add UDP-Lite (RFC 3828) */
+	udplite4_register();
 
 	/*
 	 *	Set the ICMP layer up
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 9954297..67a5509 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -14,7 +14,7 @@
  * into IP header for icv calculation. Options are already checked
  * for validity, so paranoia is not required. */
 
-static int ip_clear_mutable_options(struct iphdr *iph, u32 *daddr)
+static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
 {
 	unsigned char * optptr = (unsigned char*)(iph+1);
 	int  l = iph->ihl*4 - sizeof(struct iphdr);
@@ -162,7 +162,7 @@
 	iph->frag_off = 0;
 	iph->check = 0;
 	if (ihl > sizeof(*iph)) {
-		u32 dummy;
+		__be32 dummy;
 		if (ip_clear_mutable_options(iph, &dummy))
 			goto out;
 	}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index cfb5d3d..3981e8b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -203,7 +203,7 @@
 	.gc_thresh3 =	1024,
 };
 
-int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir)
+int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
 {
 	switch (dev->type) {
 	case ARPHRD_ETHER:
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 6460233..60aafb4 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -319,6 +319,7 @@
 			entry->activity += 1;
 			atomic_inc(&entry->lsm_data->refcount);
 			secattr->cache = entry->lsm_data;
+			secattr->flags |= NETLBL_SECATTR_CACHE;
 			if (prev_entry == NULL) {
 				spin_unlock_bh(&cipso_v4_cache[bkt].lock);
 				return 0;
@@ -377,12 +378,11 @@
 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 	if (entry == NULL)
 		return -ENOMEM;
-	entry->key = kmalloc(cipso_ptr_len, GFP_ATOMIC);
+	entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
 	if (entry->key == NULL) {
 		ret_val = -ENOMEM;
 		goto cache_add_failure;
 	}
-	memcpy(entry->key, cipso_ptr, cipso_ptr_len);
 	entry->key_len = cipso_ptr_len;
 	entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
 	atomic_inc(&secattr->cache->refcount);
@@ -447,8 +447,30 @@
  */
 int cipso_v4_doi_add(struct cipso_v4_doi *doi_def)
 {
+	u32 iter;
+
 	if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
 		return -EINVAL;
+	for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
+		switch (doi_def->tags[iter]) {
+		case CIPSO_V4_TAG_RBITMAP:
+			break;
+		case CIPSO_V4_TAG_RANGE:
+			if (doi_def->type != CIPSO_V4_MAP_PASS)
+				return -EINVAL;
+			break;
+		case CIPSO_V4_TAG_INVALID:
+			if (iter == 0)
+				return -EINVAL;
+			break;
+		case CIPSO_V4_TAG_ENUM:
+			if (doi_def->type != CIPSO_V4_MAP_PASS)
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
 
 	doi_def->valid = 1;
 	INIT_RCU_HEAD(&doi_def->rcu);
@@ -805,8 +827,7 @@
 /**
  * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network
  * @doi_def: the DOI definition
- * @host_cat: the category bitmap in host format
- * @host_cat_len: the length of the host's category bitmap in bytes
+ * @secattr: the security attributes
  * @net_cat: the zero'd out category bitmap in network/CIPSO format
  * @net_cat_len: the length of the CIPSO bitmap in bytes
  *
@@ -817,59 +838,51 @@
  *
  */
 static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
-				     const unsigned char *host_cat,
-				     u32 host_cat_len,
+				     const struct netlbl_lsm_secattr *secattr,
 				     unsigned char *net_cat,
 				     u32 net_cat_len)
 {
 	int host_spot = -1;
-	u32 net_spot;
+	u32 net_spot = CIPSO_V4_INV_CAT;
 	u32 net_spot_max = 0;
-	u32 host_clen_bits = host_cat_len * 8;
 	u32 net_clen_bits = net_cat_len * 8;
-	u32 host_cat_size;
-	u32 *host_cat_array;
+	u32 host_cat_size = 0;
+	u32 *host_cat_array = NULL;
 
-	switch (doi_def->type) {
-	case CIPSO_V4_MAP_PASS:
-		net_spot_max = host_cat_len;
-		while (net_spot_max > 0 && host_cat[net_spot_max - 1] == 0)
-			net_spot_max--;
-		if (net_spot_max > net_cat_len)
-			return -EINVAL;
-		memcpy(net_cat, host_cat, net_spot_max);
-		return net_spot_max;
-	case CIPSO_V4_MAP_STD:
+	if (doi_def->type == CIPSO_V4_MAP_STD) {
 		host_cat_size = doi_def->map.std->cat.local_size;
 		host_cat_array = doi_def->map.std->cat.local;
-		for (;;) {
-			host_spot = cipso_v4_bitmap_walk(host_cat,
-							 host_clen_bits,
-							 host_spot + 1,
-							 1);
-			if (host_spot < 0)
-				break;
-			if (host_spot >= host_cat_size)
-				return -EPERM;
-
-			net_spot = host_cat_array[host_spot];
-			if (net_spot >= net_clen_bits)
-				return -ENOSPC;
-			cipso_v4_bitmap_setbit(net_cat, net_spot, 1);
-
-			if (net_spot > net_spot_max)
-				net_spot_max = net_spot;
-		}
-
-		if (host_spot == -2)
-			return -EFAULT;
-
-		if (++net_spot_max % 8)
-			return net_spot_max / 8 + 1;
-		return net_spot_max / 8;
 	}
 
-	return -EINVAL;
+	for (;;) {
+		host_spot = netlbl_secattr_catmap_walk(secattr->mls_cat,
+						       host_spot + 1);
+		if (host_spot < 0)
+			break;
+
+		switch (doi_def->type) {
+		case CIPSO_V4_MAP_PASS:
+			net_spot = host_spot;
+			break;
+		case CIPSO_V4_MAP_STD:
+			if (host_spot >= host_cat_size)
+				return -EPERM;
+			net_spot = host_cat_array[host_spot];
+			if (net_spot >= CIPSO_V4_INV_CAT)
+				return -EPERM;
+			break;
+		}
+		if (net_spot >= net_clen_bits)
+			return -ENOSPC;
+		cipso_v4_bitmap_setbit(net_cat, net_spot, 1);
+
+		if (net_spot > net_spot_max)
+			net_spot_max = net_spot;
+	}
+
+	if (++net_spot_max % 8)
+		return net_spot_max / 8 + 1;
+	return net_spot_max / 8;
 }
 
 /**
@@ -877,102 +890,333 @@
  * @doi_def: the DOI definition
  * @net_cat: the category bitmap in network/CIPSO format
  * @net_cat_len: the length of the CIPSO bitmap in bytes
- * @host_cat: the zero'd out category bitmap in host format
- * @host_cat_len: the length of the host's category bitmap in bytes
+ * @secattr: the security attributes
  *
  * Description:
  * Perform a label mapping to translate a CIPSO bitmap to the correct local
- * MLS category bitmap using the given DOI definition.  Returns the minimum
- * size in bytes of the host bitmap on success, negative values otherwise.
+ * MLS category bitmap using the given DOI definition.  Returns zero on
+ * success, negative values on failure.
  *
  */
 static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
 				     const unsigned char *net_cat,
 				     u32 net_cat_len,
-				     unsigned char *host_cat,
-				     u32 host_cat_len)
+				     struct netlbl_lsm_secattr *secattr)
 {
-	u32 host_spot;
-	u32 host_spot_max = 0;
+	int ret_val;
 	int net_spot = -1;
+	u32 host_spot = CIPSO_V4_INV_CAT;
 	u32 net_clen_bits = net_cat_len * 8;
-	u32 host_clen_bits = host_cat_len * 8;
-	u32 net_cat_size;
-	u32 *net_cat_array;
+	u32 net_cat_size = 0;
+	u32 *net_cat_array = NULL;
 
-	switch (doi_def->type) {
-	case CIPSO_V4_MAP_PASS:
-		if (net_cat_len > host_cat_len)
-			return -EINVAL;
-		memcpy(host_cat, net_cat, net_cat_len);
-		return net_cat_len;
-	case CIPSO_V4_MAP_STD:
+	if (doi_def->type == CIPSO_V4_MAP_STD) {
 		net_cat_size = doi_def->map.std->cat.cipso_size;
 		net_cat_array = doi_def->map.std->cat.cipso;
-		for (;;) {
-			net_spot = cipso_v4_bitmap_walk(net_cat,
-							net_clen_bits,
-							net_spot + 1,
-							1);
-			if (net_spot < 0)
-				break;
-			if (net_spot >= net_cat_size ||
-			    net_cat_array[net_spot] >= CIPSO_V4_INV_CAT)
-				return -EPERM;
+	}
 
-			host_spot = net_cat_array[net_spot];
-			if (host_spot >= host_clen_bits)
-				return -ENOSPC;
-			cipso_v4_bitmap_setbit(host_cat, host_spot, 1);
-
-			if (host_spot > host_spot_max)
-				host_spot_max = host_spot;
+	for (;;) {
+		net_spot = cipso_v4_bitmap_walk(net_cat,
+						net_clen_bits,
+						net_spot + 1,
+						1);
+		if (net_spot < 0) {
+			if (net_spot == -2)
+				return -EFAULT;
+			return 0;
 		}
 
-		if (net_spot == -2)
-			return -EFAULT;
-
-		if (++host_spot_max % 8)
-			return host_spot_max / 8 + 1;
-		return host_spot_max / 8;
+		switch (doi_def->type) {
+		case CIPSO_V4_MAP_PASS:
+			host_spot = net_spot;
+			break;
+		case CIPSO_V4_MAP_STD:
+			if (net_spot >= net_cat_size)
+				return -EPERM;
+			host_spot = net_cat_array[net_spot];
+			if (host_spot >= CIPSO_V4_INV_CAT)
+				return -EPERM;
+			break;
+		}
+		ret_val = netlbl_secattr_catmap_setbit(secattr->mls_cat,
+						       host_spot,
+						       GFP_ATOMIC);
+		if (ret_val != 0)
+			return ret_val;
 	}
 
 	return -EINVAL;
 }
 
+/**
+ * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid
+ * @doi_def: the DOI definition
+ * @enumcat: category list
+ * @enumcat_len: length of the category list in bytes
+ *
+ * Description:
+ * Checks the given categories against the given DOI definition and returns a
+ * negative value if any of the categories do not have a valid mapping and a
+ * zero value if all of the categories are valid.
+ *
+ */
+static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
+				       const unsigned char *enumcat,
+				       u32 enumcat_len)
+{
+	u16 cat;
+	int cat_prev = -1;
+	u32 iter;
+
+	if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01)
+		return -EFAULT;
+
+	for (iter = 0; iter < enumcat_len; iter += 2) {
+		cat = ntohs(*((__be16 *)&enumcat[iter]));
+		if (cat <= cat_prev)
+			return -EFAULT;
+		cat_prev = cat;
+	}
+
+	return 0;
+}
+
+/**
+ * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network
+ * @doi_def: the DOI definition
+ * @secattr: the security attributes
+ * @net_cat: the zero'd out category list in network/CIPSO format
+ * @net_cat_len: the length of the CIPSO category list in bytes
+ *
+ * Description:
+ * Perform a label mapping to translate a local MLS category bitmap to the
+ * correct CIPSO category list using the given DOI definition.   Returns the
+ * size in bytes of the network category bitmap on success, negative values
+ * otherwise.
+ *
+ */
+static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def,
+				      const struct netlbl_lsm_secattr *secattr,
+				      unsigned char *net_cat,
+				      u32 net_cat_len)
+{
+	int cat = -1;
+	u32 cat_iter = 0;
+
+	for (;;) {
+		cat = netlbl_secattr_catmap_walk(secattr->mls_cat, cat + 1);
+		if (cat < 0)
+			break;
+		if ((cat_iter + 2) > net_cat_len)
+			return -ENOSPC;
+
+		*((__be16 *)&net_cat[cat_iter]) = htons(cat);
+		cat_iter += 2;
+	}
+
+	return cat_iter;
+}
+
+/**
+ * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host
+ * @doi_def: the DOI definition
+ * @net_cat: the category list in network/CIPSO format
+ * @net_cat_len: the length of the CIPSO bitmap in bytes
+ * @secattr: the security attributes
+ *
+ * Description:
+ * Perform a label mapping to translate a CIPSO category list to the correct
+ * local MLS category bitmap using the given DOI definition.  Returns zero on
+ * success, negative values on failure.
+ *
+ */
+static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
+				      const unsigned char *net_cat,
+				      u32 net_cat_len,
+				      struct netlbl_lsm_secattr *secattr)
+{
+	int ret_val;
+	u32 iter;
+
+	for (iter = 0; iter < net_cat_len; iter += 2) {
+		ret_val = netlbl_secattr_catmap_setbit(secattr->mls_cat,
+					    ntohs(*((__be16 *)&net_cat[iter])),
+					    GFP_ATOMIC);
+		if (ret_val != 0)
+			return ret_val;
+	}
+
+	return 0;
+}
+
+/**
+ * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid
+ * @doi_def: the DOI definition
+ * @rngcat: category list
+ * @rngcat_len: length of the category list in bytes
+ *
+ * Description:
+ * Checks the given categories against the given DOI definition and returns a
+ * negative value if any of the categories do not have a valid mapping and a
+ * zero value if all of the categories are valid.
+ *
+ */
+static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
+				      const unsigned char *rngcat,
+				      u32 rngcat_len)
+{
+	u16 cat_high;
+	u16 cat_low;
+	u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1;
+	u32 iter;
+
+	if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01)
+		return -EFAULT;
+
+	for (iter = 0; iter < rngcat_len; iter += 4) {
+		cat_high = ntohs(*((__be16 *)&rngcat[iter]));
+		if ((iter + 4) <= rngcat_len)
+			cat_low = ntohs(*((__be16 *)&rngcat[iter + 2]));
+		else
+			cat_low = 0;
+
+		if (cat_high > cat_prev)
+			return -EFAULT;
+
+		cat_prev = cat_low;
+	}
+
+	return 0;
+}
+
+/**
+ * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network
+ * @doi_def: the DOI definition
+ * @secattr: the security attributes
+ * @net_cat: the zero'd out category list in network/CIPSO format
+ * @net_cat_len: the length of the CIPSO category list in bytes
+ *
+ * Description:
+ * Perform a label mapping to translate a local MLS category bitmap to the
+ * correct CIPSO category list using the given DOI definition.   Returns the
+ * size in bytes of the network category bitmap on success, negative values
+ * otherwise.
+ *
+ */
+static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
+				     const struct netlbl_lsm_secattr *secattr,
+				     unsigned char *net_cat,
+				     u32 net_cat_len)
+{
+	/* The constant '16' is not random, it is the maximum number of
+	 * high/low category range pairs as permitted by the CIPSO draft based
+	 * on a maximum IPv4 header length of 60 bytes - the BUG_ON() assertion
+	 * does a sanity check to make sure we don't overflow the array. */
+	int iter = -1;
+	u16 array[16];
+	u32 array_cnt = 0;
+	u32 cat_size = 0;
+
+	BUG_ON(net_cat_len > 30);
+
+	for (;;) {
+		iter = netlbl_secattr_catmap_walk(secattr->mls_cat, iter + 1);
+		if (iter < 0)
+			break;
+		cat_size += (iter == 0 ? 0 : sizeof(u16));
+		if (cat_size > net_cat_len)
+			return -ENOSPC;
+		array[array_cnt++] = iter;
+
+		iter = netlbl_secattr_catmap_walk_rng(secattr->mls_cat, iter);
+		if (iter < 0)
+			return -EFAULT;
+		cat_size += sizeof(u16);
+		if (cat_size > net_cat_len)
+			return -ENOSPC;
+		array[array_cnt++] = iter;
+	}
+
+	for (iter = 0; array_cnt > 0;) {
+		*((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
+		iter += 2;
+		array_cnt--;
+		if (array[array_cnt] != 0) {
+			*((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
+			iter += 2;
+		}
+	}
+
+	return cat_size;
+}
+
+/**
+ * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host
+ * @doi_def: the DOI definition
+ * @net_cat: the category list in network/CIPSO format
+ * @net_cat_len: the length of the CIPSO bitmap in bytes
+ * @secattr: the security attributes
+ *
+ * Description:
+ * Perform a label mapping to translate a CIPSO category list to the correct
+ * local MLS category bitmap using the given DOI definition.  Returns zero on
+ * success, negative values on failure.
+ *
+ */
+static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
+				     const unsigned char *net_cat,
+				     u32 net_cat_len,
+				     struct netlbl_lsm_secattr *secattr)
+{
+	int ret_val;
+	u32 net_iter;
+	u16 cat_low;
+	u16 cat_high;
+
+	for(net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
+		cat_high = ntohs(*((__be16 *)&net_cat[net_iter]));
+		if ((net_iter + 4) <= net_cat_len)
+			cat_low = ntohs(*((__be16 *)&net_cat[net_iter + 2]));
+		else
+			cat_low = 0;
+
+		ret_val = netlbl_secattr_catmap_setrng(secattr->mls_cat,
+						       cat_low,
+						       cat_high,
+						       GFP_ATOMIC);
+		if (ret_val != 0)
+			return ret_val;
+	}
+
+	return 0;
+}
+
 /*
  * Protocol Handling Functions
  */
 
+#define CIPSO_V4_OPT_LEN_MAX          40
 #define CIPSO_V4_HDR_LEN              6
 
 /**
  * cipso_v4_gentag_hdr - Generate a CIPSO option header
  * @doi_def: the DOI definition
- * @len: the total tag length in bytes
+ * @len: the total tag length in bytes, not including this header
  * @buf: the CIPSO option buffer
  *
  * Description:
- * Write a CIPSO header into the beginning of @buffer.  Return zero on success,
- * negative values on failure.
+ * Write a CIPSO header into the beginning of @buffer.
  *
  */
-static int cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def,
-			       u32 len,
-			       unsigned char *buf)
+static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def,
+				unsigned char *buf,
+				u32 len)
 {
-	if (CIPSO_V4_HDR_LEN + len > 40)
-		return -ENOSPC;
-
 	buf[0] = IPOPT_CIPSO;
 	buf[1] = CIPSO_V4_HDR_LEN + len;
-	*(u32 *)&buf[2] = htonl(doi_def->doi);
-
-	return 0;
+	*(__be32 *)&buf[2] = htonl(doi_def->doi);
 }
 
-#define CIPSO_V4_TAG1_CAT_LEN         30
-
 /**
  * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1)
  * @doi_def: the DOI definition
@@ -983,68 +1227,49 @@
  * Description:
  * Generate a CIPSO option using the restricted bitmap tag, tag type #1.  The
  * actual buffer length may be larger than the indicated size due to
- * translation between host and network category bitmaps.  Returns zero on
- * success, negative values on failure.
+ * translation between host and network category bitmaps.  Returns the size of
+ * the tag on success, negative values on failure.
  *
  */
 static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
 			       const struct netlbl_lsm_secattr *secattr,
-			       unsigned char **buffer,
-			       u32 *buffer_len)
+			       unsigned char *buffer,
+			       u32 buffer_len)
 {
-	int ret_val = -EPERM;
-	unsigned char *buf = NULL;
-	u32 buf_len;
+	int ret_val;
+	u32 tag_len;
 	u32 level;
 
-	if (secattr->mls_cat) {
-		buf = kzalloc(CIPSO_V4_HDR_LEN + 4 + CIPSO_V4_TAG1_CAT_LEN,
-			      GFP_ATOMIC);
-		if (buf == NULL)
-			return -ENOMEM;
+	if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0)
+		return -EPERM;
 
+	ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level);
+	if (ret_val != 0)
+		return ret_val;
+
+	if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
 		ret_val = cipso_v4_map_cat_rbm_hton(doi_def,
-						    secattr->mls_cat,
-						    secattr->mls_cat_len,
-						    &buf[CIPSO_V4_HDR_LEN + 4],
-						    CIPSO_V4_TAG1_CAT_LEN);
+						    secattr,
+						    &buffer[4],
+						    buffer_len - 4);
 		if (ret_val < 0)
-			goto gentag_failure;
+			return ret_val;
 
 		/* This will send packets using the "optimized" format when
 		 * possibile as specified in  section 3.4.2.6 of the
 		 * CIPSO draft. */
-		if (cipso_v4_rbm_optfmt && (ret_val > 0 && ret_val < 10))
-			ret_val = 10;
+		if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)
+			tag_len = 14;
+		else
+			tag_len = 4 + ret_val;
+	} else
+		tag_len = 4;
 
-		buf_len = 4 + ret_val;
-	} else {
-		buf = kzalloc(CIPSO_V4_HDR_LEN + 4, GFP_ATOMIC);
-		if (buf == NULL)
-			return -ENOMEM;
-		buf_len = 4;
-	}
+	buffer[0] = 0x01;
+	buffer[1] = tag_len;
+	buffer[3] = level;
 
-	ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level);
-	if (ret_val != 0)
-		goto gentag_failure;
-
-	ret_val = cipso_v4_gentag_hdr(doi_def, buf_len, buf);
-	if (ret_val != 0)
-		goto gentag_failure;
-
-	buf[CIPSO_V4_HDR_LEN] = 0x01;
-	buf[CIPSO_V4_HDR_LEN + 1] = buf_len;
-	buf[CIPSO_V4_HDR_LEN + 3] = level;
-
-	*buffer = buf;
-	*buffer_len = CIPSO_V4_HDR_LEN + buf_len;
-
-	return 0;
-
-gentag_failure:
-	kfree(buf);
-	return ret_val;
+	return tag_len;
 }
 
 /**
@@ -1071,32 +1296,208 @@
 	if (ret_val != 0)
 		return ret_val;
 	secattr->mls_lvl = level;
-	secattr->mls_lvl_vld = 1;
+	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
 
 	if (tag_len > 4) {
-		switch (doi_def->type) {
-		case CIPSO_V4_MAP_PASS:
-			secattr->mls_cat_len = tag_len - 4;
-			break;
-		case CIPSO_V4_MAP_STD:
-			secattr->mls_cat_len =
-				doi_def->map.std->cat.local_size;
-			break;
-		}
-		secattr->mls_cat = kzalloc(secattr->mls_cat_len, GFP_ATOMIC);
+		secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
 		if (secattr->mls_cat == NULL)
 			return -ENOMEM;
 
 		ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
 						    &tag[4],
 						    tag_len - 4,
-						    secattr->mls_cat,
-						    secattr->mls_cat_len);
-		if (ret_val < 0) {
-			kfree(secattr->mls_cat);
+						    secattr);
+		if (ret_val != 0) {
+			netlbl_secattr_catmap_free(secattr->mls_cat);
 			return ret_val;
 		}
-		secattr->mls_cat_len = ret_val;
+
+		secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+	}
+
+	return 0;
+}
+
+/**
+ * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2)
+ * @doi_def: the DOI definition
+ * @secattr: the security attributes
+ * @buffer: the option buffer
+ * @buffer_len: length of buffer in bytes
+ *
+ * Description:
+ * Generate a CIPSO option using the enumerated tag, tag type #2.  Returns the
+ * size of the tag on success, negative values on failure.
+ *
+ */
+static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
+				const struct netlbl_lsm_secattr *secattr,
+				unsigned char *buffer,
+				u32 buffer_len)
+{
+	int ret_val;
+	u32 tag_len;
+	u32 level;
+
+	if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
+		return -EPERM;
+
+	ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level);
+	if (ret_val != 0)
+		return ret_val;
+
+	if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
+		ret_val = cipso_v4_map_cat_enum_hton(doi_def,
+						     secattr,
+						     &buffer[4],
+						     buffer_len - 4);
+		if (ret_val < 0)
+			return ret_val;
+
+		tag_len = 4 + ret_val;
+	} else
+		tag_len = 4;
+
+	buffer[0] = 0x02;
+	buffer[1] = tag_len;
+	buffer[3] = level;
+
+	return tag_len;
+}
+
+/**
+ * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag
+ * @doi_def: the DOI definition
+ * @tag: the CIPSO tag
+ * @secattr: the security attributes
+ *
+ * Description:
+ * Parse a CIPSO enumerated tag (tag type #2) and return the security
+ * attributes in @secattr.  Return zero on success, negatives values on
+ * failure.
+ *
+ */
+static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
+				  const unsigned char *tag,
+				  struct netlbl_lsm_secattr *secattr)
+{
+	int ret_val;
+	u8 tag_len = tag[1];
+	u32 level;
+
+	ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
+	if (ret_val != 0)
+		return ret_val;
+	secattr->mls_lvl = level;
+	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+
+	if (tag_len > 4) {
+		secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+		if (secattr->mls_cat == NULL)
+			return -ENOMEM;
+
+		ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
+						     &tag[4],
+						     tag_len - 4,
+						     secattr);
+		if (ret_val != 0) {
+			netlbl_secattr_catmap_free(secattr->mls_cat);
+			return ret_val;
+		}
+
+		secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+	}
+
+	return 0;
+}
+
+/**
+ * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5)
+ * @doi_def: the DOI definition
+ * @secattr: the security attributes
+ * @buffer: the option buffer
+ * @buffer_len: length of buffer in bytes
+ *
+ * Description:
+ * Generate a CIPSO option using the ranged tag, tag type #5.  Returns the
+ * size of the tag on success, negative values on failure.
+ *
+ */
+static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def,
+			       const struct netlbl_lsm_secattr *secattr,
+			       unsigned char *buffer,
+			       u32 buffer_len)
+{
+	int ret_val;
+	u32 tag_len;
+	u32 level;
+
+	if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
+		return -EPERM;
+
+	ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level);
+	if (ret_val != 0)
+		return ret_val;
+
+	if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
+		ret_val = cipso_v4_map_cat_rng_hton(doi_def,
+						    secattr,
+						    &buffer[4],
+						    buffer_len - 4);
+		if (ret_val < 0)
+			return ret_val;
+
+		tag_len = 4 + ret_val;
+	} else
+		tag_len = 4;
+
+	buffer[0] = 0x05;
+	buffer[1] = tag_len;
+	buffer[3] = level;
+
+	return tag_len;
+}
+
+/**
+ * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag
+ * @doi_def: the DOI definition
+ * @tag: the CIPSO tag
+ * @secattr: the security attributes
+ *
+ * Description:
+ * Parse a CIPSO ranged tag (tag type #5) and return the security attributes
+ * in @secattr.  Return zero on success, negatives values on failure.
+ *
+ */
+static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
+				 const unsigned char *tag,
+				 struct netlbl_lsm_secattr *secattr)
+{
+	int ret_val;
+	u8 tag_len = tag[1];
+	u32 level;
+
+	ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
+	if (ret_val != 0)
+		return ret_val;
+	secattr->mls_lvl = level;
+	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
+
+	if (tag_len > 4) {
+		secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+		if (secattr->mls_cat == NULL)
+			return -ENOMEM;
+
+		ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
+						    &tag[4],
+						    tag_len - 4,
+						    secattr);
+		if (ret_val != 0) {
+			netlbl_secattr_catmap_free(secattr->mls_cat);
+			return ret_val;
+		}
+
+		secattr->flags |= NETLBL_SECATTR_MLS_CAT;
 	}
 
 	return 0;
@@ -1140,7 +1541,7 @@
 	}
 
 	rcu_read_lock();
-	doi_def = cipso_v4_doi_getdef(ntohl(*((u32 *)&opt[2])));
+	doi_def = cipso_v4_doi_search(ntohl(*((__be32 *)&opt[2])));
 	if (doi_def == NULL) {
 		err_offset = 2;
 		goto validate_return_locked;
@@ -1191,6 +1592,44 @@
 				}
 			}
 			break;
+		case CIPSO_V4_TAG_ENUM:
+			if (tag_len < 4) {
+				err_offset = opt_iter + 1;
+				goto validate_return_locked;
+			}
+
+			if (cipso_v4_map_lvl_valid(doi_def,
+						   tag[3]) < 0) {
+				err_offset = opt_iter + 3;
+				goto validate_return_locked;
+			}
+			if (tag_len > 4 &&
+			    cipso_v4_map_cat_enum_valid(doi_def,
+							&tag[4],
+							tag_len - 4) < 0) {
+				err_offset = opt_iter + 4;
+				goto validate_return_locked;
+			}
+			break;
+		case CIPSO_V4_TAG_RANGE:
+			if (tag_len < 4) {
+				err_offset = opt_iter + 1;
+				goto validate_return_locked;
+			}
+
+			if (cipso_v4_map_lvl_valid(doi_def,
+						   tag[3]) < 0) {
+				err_offset = opt_iter + 3;
+				goto validate_return_locked;
+			}
+			if (tag_len > 4 &&
+			    cipso_v4_map_cat_rng_valid(doi_def,
+						       &tag[4],
+						       tag_len - 4) < 0) {
+				err_offset = opt_iter + 4;
+				goto validate_return_locked;
+			}
+			break;
 		default:
 			err_offset = opt_iter;
 			goto validate_return_locked;
@@ -1265,7 +1704,7 @@
 {
 	int ret_val = -EPERM;
 	u32 iter;
-	unsigned char *buf = NULL;
+	unsigned char *buf;
 	u32 buf_len = 0;
 	u32 opt_len;
 	struct ip_options *opt = NULL;
@@ -1281,17 +1720,40 @@
 	if (sk == NULL)
 		return 0;
 
+	/* We allocate the maximum CIPSO option size here so we are probably
+	 * being a little wasteful, but it makes our life _much_ easier later
+	 * on and after all we are only talking about 40 bytes. */
+	buf_len = CIPSO_V4_OPT_LEN_MAX;
+	buf = kmalloc(buf_len, GFP_ATOMIC);
+	if (buf == NULL) {
+		ret_val = -ENOMEM;
+		goto socket_setattr_failure;
+	}
+
 	/* XXX - This code assumes only one tag per CIPSO option which isn't
 	 * really a good assumption to make but since we only support the MAC
 	 * tags right now it is a safe assumption. */
 	iter = 0;
 	do {
+		memset(buf, 0, buf_len);
 		switch (doi_def->tags[iter]) {
 		case CIPSO_V4_TAG_RBITMAP:
 			ret_val = cipso_v4_gentag_rbm(doi_def,
-						      secattr,
-						      &buf,
-						      &buf_len);
+						   secattr,
+						   &buf[CIPSO_V4_HDR_LEN],
+						   buf_len - CIPSO_V4_HDR_LEN);
+			break;
+		case CIPSO_V4_TAG_ENUM:
+			ret_val = cipso_v4_gentag_enum(doi_def,
+						   secattr,
+						   &buf[CIPSO_V4_HDR_LEN],
+						   buf_len - CIPSO_V4_HDR_LEN);
+			break;
+		case CIPSO_V4_TAG_RANGE:
+			ret_val = cipso_v4_gentag_rng(doi_def,
+						   secattr,
+						   &buf[CIPSO_V4_HDR_LEN],
+						   buf_len - CIPSO_V4_HDR_LEN);
 			break;
 		default:
 			ret_val = -EPERM;
@@ -1299,11 +1761,13 @@
 		}
 
 		iter++;
-	} while (ret_val != 0 &&
+	} while (ret_val < 0 &&
 		 iter < CIPSO_V4_TAG_MAXCNT &&
 		 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
-	if (ret_val != 0)
+	if (ret_val < 0)
 		goto socket_setattr_failure;
+	cipso_v4_gentag_hdr(doi_def, buf, ret_val);
+	buf_len = CIPSO_V4_HDR_LEN + ret_val;
 
 	/* We can't use ip_options_get() directly because it makes a call to
 	 * ip_options_get_alloc() which allocates memory with GFP_KERNEL and
@@ -1370,19 +1834,33 @@
 	if (ret_val == 0)
 		return ret_val;
 
-	doi = ntohl(*(u32 *)&cipso_ptr[2]);
+	doi = ntohl(*(__be32 *)&cipso_ptr[2]);
 	rcu_read_lock();
-	doi_def = cipso_v4_doi_getdef(doi);
+	doi_def = cipso_v4_doi_search(doi);
 	if (doi_def == NULL) {
 		rcu_read_unlock();
 		return -ENOMSG;
 	}
+
+	/* XXX - This code assumes only one tag per CIPSO option which isn't
+	 * really a good assumption to make but since we only support the MAC
+	 * tags right now it is a safe assumption. */
 	switch (cipso_ptr[6]) {
 	case CIPSO_V4_TAG_RBITMAP:
 		ret_val = cipso_v4_parsetag_rbm(doi_def,
 						&cipso_ptr[6],
 						secattr);
 		break;
+	case CIPSO_V4_TAG_ENUM:
+		ret_val = cipso_v4_parsetag_enum(doi_def,
+						 &cipso_ptr[6],
+						 secattr);
+		break;
+	case CIPSO_V4_TAG_RANGE:
+		ret_val = cipso_v4_parsetag_rng(doi_def,
+						&cipso_ptr[6],
+						secattr);
+		break;
 	}
 	rcu_read_unlock();
 
@@ -1430,23 +1908,30 @@
 	u32 doi;
 	struct cipso_v4_doi *doi_def;
 
-	if (!CIPSO_V4_OPTEXIST(skb))
-		return -ENOMSG;
 	cipso_ptr = CIPSO_V4_OPTPTR(skb);
 	if (cipso_v4_cache_check(cipso_ptr, cipso_ptr[1], secattr) == 0)
 		return 0;
 
-	doi = ntohl(*(u32 *)&cipso_ptr[2]);
+	doi = ntohl(*(__be32 *)&cipso_ptr[2]);
 	rcu_read_lock();
-	doi_def = cipso_v4_doi_getdef(doi);
+	doi_def = cipso_v4_doi_search(doi);
 	if (doi_def == NULL)
 		goto skbuff_getattr_return;
+
+	/* XXX - This code assumes only one tag per CIPSO option which isn't
+	 * really a good assumption to make but since we only support the MAC
+	 * tags right now it is a safe assumption. */
 	switch (cipso_ptr[6]) {
 	case CIPSO_V4_TAG_RBITMAP:
 		ret_val = cipso_v4_parsetag_rbm(doi_def,
 						&cipso_ptr[6],
 						secattr);
 		break;
+	case CIPSO_V4_TAG_ENUM:
+		ret_val = cipso_v4_parsetag_enum(doi_def,
+						 &cipso_ptr[6],
+						 secattr);
+		break;
 	}
 
 skbuff_getattr_return:
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7602c79..2fd8991 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -577,20 +577,20 @@
  *	Determine a default network mask, based on the IP address.
  */
 
-static __inline__ int inet_abc_len(u32 addr)
+static __inline__ int inet_abc_len(__be32 addr)
 {
 	int rc = -1;	/* Something else, probably a multicast. */
 
   	if (ZERONET(addr))
   		rc = 0;
 	else {
-		addr = ntohl(addr);
+		__u32 haddr = ntohl(addr);
 
-		if (IN_CLASSA(addr))
+		if (IN_CLASSA(haddr))
 			rc = 8;
-		else if (IN_CLASSB(addr))
+		else if (IN_CLASSB(haddr))
 			rc = 16;
-		else if (IN_CLASSC(addr))
+		else if (IN_CLASSC(haddr))
 			rc = 24;
 	}
 
@@ -1120,6 +1120,16 @@
 	.notifier_call =inetdev_event,
 };
 
+static inline size_t inet_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+	       + nla_total_size(4) /* IFA_ADDRESS */
+	       + nla_total_size(4) /* IFA_LOCAL */
+	       + nla_total_size(4) /* IFA_BROADCAST */
+	       + nla_total_size(4) /* IFA_ANYCAST */
+	       + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
+}
+
 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
 			    u32 pid, u32 seq, int event, unsigned int flags)
 {
@@ -1208,15 +1218,13 @@
 	u32 seq = nlh ? nlh->nlmsg_seq : 0;
 	int err = -ENOBUFS;
 
-	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
 	if (skb == NULL)
 		goto errout;
 
 	err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in inet_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
 errout:
@@ -1556,12 +1564,12 @@
 {
 	int i;
 	struct net_device *dev = in_dev ? in_dev->dev : NULL;
-	struct devinet_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+	struct devinet_sysctl_table *t = kmemdup(&devinet_sysctl, sizeof(*t),
+						 GFP_KERNEL);
 	char *dev_name = NULL;
 
 	if (!t)
 		return;
-	memcpy(t, &devinet_sysctl, sizeof(*t));
 	for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
 		t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
 		t->devinet_vars[i].de = NULL;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b5c205b..f2c6776 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -67,7 +67,7 @@
 	if (x->encap) {
 		struct xfrm_encap_tmpl *encap = x->encap;
 		struct udphdr *uh;
-		u32 *udpdata32;
+		__be32 *udpdata32;
 
 		uh = (struct udphdr *)esph;
 		uh->source = encap->encap_sport;
@@ -81,7 +81,7 @@
 			esph = (struct ip_esp_hdr *)(uh + 1);
 			break;
 		case UDP_ENCAP_ESPINUDP_NON_IKE:
-			udpdata32 = (u32 *)(uh + 1);
+			udpdata32 = (__be32 *)(uh + 1);
 			udpdata32[0] = udpdata32[1] = 0;
 			esph = (struct ip_esp_hdr *)(udpdata32 + 2);
 			break;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index af0190d..d47b72a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -768,8 +768,8 @@
 {
 	
 	struct fib_result       res;
-	struct flowi            fl = { .nl_u = { .ip4_u = { .daddr = frn->fl_addr, 
-							    .fwmark = frn->fl_fwmark,
+	struct flowi            fl = { .mark = frn->fl_mark,
+				       .nl_u = { .ip4_u = { .daddr = frn->fl_addr,
 							    .tos = frn->fl_tos,
 							    .scope = frn->fl_scope } } };
 	if (tb) {
@@ -811,7 +811,6 @@
 	
 	pid = nlh->nlmsg_pid;           /*pid of sending process */
 	NETLINK_CB(skb).pid = 0;         /* from kernel */
-	NETLINK_CB(skb).dst_pid = pid;
 	NETLINK_CB(skb).dst_group = 0;  /* unicast */
 	netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
 }    
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 107bb6c..648f47c 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -45,8 +45,8 @@
 
 #include "fib_lookup.h"
 
-static kmem_cache_t *fn_hash_kmem __read_mostly;
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_hash_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
 
 struct fib_node {
 	struct hlist_node	fn_hash;
@@ -485,13 +485,13 @@
 		goto out;
 
 	err = -ENOBUFS;
-	new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+	new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
 	if (new_fa == NULL)
 		goto out;
 
 	new_f = NULL;
 	if (!f) {
-		new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL);
+		new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL);
 		if (new_f == NULL)
 			goto out_free_new_fa;
 
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 0852b9c..b837c33 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -44,10 +44,6 @@
 	__be32			srcmask;
 	__be32			dst;
 	__be32			dstmask;
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	u32			fwmark;
-	u32			fwmask;
-#endif
 #ifdef CONFIG_NET_CLS_ROUTE
 	u32			tclassid;
 #endif
@@ -160,11 +156,6 @@
 	if (r->tos && (r->tos != fl->fl4_tos))
 		return 0;
 
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	if ((r->fwmark ^ fl->fl4_fwmark) & r->fwmask)
-		return 0;
-#endif
-
 	return 1;
 }
 
@@ -179,14 +170,10 @@
 }
 
 static struct nla_policy fib4_rule_policy[FRA_MAX+1] __read_mostly = {
-	[FRA_IFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
-	[FRA_PRIORITY]	= { .type = NLA_U32 },
+	FRA_GENERIC_POLICY,
 	[FRA_SRC]	= { .type = NLA_U32 },
 	[FRA_DST]	= { .type = NLA_U32 },
-	[FRA_FWMARK]	= { .type = NLA_U32 },
-	[FRA_FWMASK]	= { .type = NLA_U32 },
 	[FRA_FLOW]	= { .type = NLA_U32 },
-	[FRA_TABLE]	= { .type = NLA_U32 },
 };
 
 static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
@@ -220,20 +207,6 @@
 	if (tb[FRA_DST])
 		rule4->dst = nla_get_be32(tb[FRA_DST]);
 
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	if (tb[FRA_FWMARK]) {
-		rule4->fwmark = nla_get_u32(tb[FRA_FWMARK]);
-		if (rule4->fwmark)
-			/* compatibility: if the mark value is non-zero all bits
-			 * are compared unless a mask is explicitly specified.
-			 */
-			rule4->fwmask = 0xFFFFFFFF;
-	}
-
-	if (tb[FRA_FWMASK])
-		rule4->fwmask = nla_get_u32(tb[FRA_FWMASK]);
-#endif
-
 #ifdef CONFIG_NET_CLS_ROUTE
 	if (tb[FRA_FLOW])
 		rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
@@ -264,14 +237,6 @@
 	if (frh->tos && (rule4->tos != frh->tos))
 		return 0;
 
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	if (tb[FRA_FWMARK] && (rule4->fwmark != nla_get_u32(tb[FRA_FWMARK])))
-		return 0;
-
-	if (tb[FRA_FWMASK] && (rule4->fwmask != nla_get_u32(tb[FRA_FWMASK])))
-		return 0;
-#endif
-
 #ifdef CONFIG_NET_CLS_ROUTE
 	if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
 		return 0;
@@ -296,14 +261,6 @@
 	frh->src_len = rule4->src_len;
 	frh->tos = rule4->tos;
 
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	if (rule4->fwmark)
-		NLA_PUT_U32(skb, FRA_FWMARK, rule4->fwmark);
-
-	if (rule4->fwmask || rule4->fwmark)
-		NLA_PUT_U32(skb, FRA_FWMASK, rule4->fwmask);
-#endif
-
 	if (rule4->dst_len)
 		NLA_PUT_BE32(skb, FRA_DST, rule4->dst);
 
@@ -342,6 +299,13 @@
 	return 0;
 }
 
+static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
+{
+	return nla_total_size(4) /* dst */
+	       + nla_total_size(4) /* src */
+	       + nla_total_size(4); /* flow */
+}
+
 static struct fib_rules_ops fib4_rules_ops = {
 	.family		= AF_INET,
 	.rule_size	= sizeof(struct fib4_rule),
@@ -351,6 +315,7 @@
 	.compare	= fib4_rule_compare,
 	.fill		= fib4_rule_fill,
 	.default_pref	= fib4_rule_default_pref,
+	.nlmsg_payload	= fib4_rule_nlmsg_payload,
 	.nlgroup	= RTNLGRP_IPV4_RULE,
 	.policy		= fib4_rule_policy,
 	.rules_list	= &fib4_rules,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 884d176..e63b8a9 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -273,25 +273,49 @@
 	return -1;
 }
 
+static inline size_t fib_nlmsg_size(struct fib_info *fi)
+{
+	size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
+			 + nla_total_size(4) /* RTA_TABLE */
+			 + nla_total_size(4) /* RTA_DST */
+			 + nla_total_size(4) /* RTA_PRIORITY */
+			 + nla_total_size(4); /* RTA_PREFSRC */
+
+	/* space for nested metrics */
+	payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
+
+	if (fi->fib_nhs) {
+		/* Also handles the special case fib_nhs == 1 */
+
+		/* each nexthop is packed in an attribute */
+		size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
+
+		/* may contain flow and gateway attribute */
+		nhsize += 2 * nla_total_size(4);
+
+		/* all nexthops are packed in a nested attribute */
+		payload += nla_total_size(fi->fib_nhs * nhsize);
+	}
+
+	return payload;
+}
+
 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
 	       int dst_len, u32 tb_id, struct nl_info *info)
 {
 	struct sk_buff *skb;
-	int payload = sizeof(struct rtmsg) + 256;
 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 	int err = -ENOBUFS;
 
-	skb = nlmsg_new(nlmsg_total_size(payload), GFP_KERNEL);
+	skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
 	if (skb == NULL)
 		goto errout;
 
 	err = fib_dump_info(skb, info->pid, seq, event, tb_id,
 			    fa->fa_type, fa->fa_scope, key, dst_len,
 			    fa->fa_tos, fa->fa_info, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in fib_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE,
 			  info->nlh, GFP_KERNEL);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d17990e..cfb249c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -172,7 +172,7 @@
 static struct tnode *halve(struct trie *t, struct tnode *tn);
 static void tnode_free(struct tnode *tn);
 
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct trie *trie_local = NULL, *trie_main = NULL;
 
 
@@ -1187,7 +1187,7 @@
 			u8 state;
 
 			err = -ENOBUFS;
-			new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+			new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
 			if (new_fa == NULL)
 				goto out;
 
@@ -1232,7 +1232,7 @@
 		goto out;
 
 	err = -ENOBUFS;
-	new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+	new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
 	if (new_fa == NULL)
 		goto out;
 
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index b39a37a..40cf0d0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -332,7 +332,7 @@
 			  struct sk_buff *skb)
 {
 	struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
-	unsigned int csum;
+	__wsum csum;
 
 	csum = skb_copy_and_csum_bits(icmp_param->skb,
 				      icmp_param->offset + offset,
@@ -356,7 +356,7 @@
 		ip_flush_pending_frames(icmp_socket->sk);
 	else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
 		struct icmphdr *icmph = skb->h.icmph;
-		unsigned int csum = 0;
+		__wsum csum = 0;
 		struct sk_buff *skb1;
 
 		skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) {
@@ -931,7 +931,7 @@
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
-		if (!(u16)csum_fold(skb->csum))
+		if (!csum_fold(skb->csum))
 			break;
 		/* fall through */
 	case CHECKSUM_NONE:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6eee716..0017ccb 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -932,7 +932,7 @@
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
-		if (!(u16)csum_fold(skb->csum))
+		if (!csum_fold(skb->csum))
 			break;
 		/* fall through */
 	case CHECKSUM_NONE:
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 96bbe2a..9d68837 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -343,7 +343,7 @@
 EXPORT_SYMBOL_GPL(inet_csk_route_req);
 
 static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
-				 const u32 rnd, const u16 synq_hsize)
+				 const u32 rnd, const u32 synq_hsize)
 {
 	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
 }
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 244c4f4..8c79c8a 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -27,11 +27,11 @@
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
  */
-struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
+struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
 						 struct inet_bind_hashbucket *head,
 						 const unsigned short snum)
 {
-	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC);
+	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
 	if (tb != NULL) {
 		tb->port      = snum;
@@ -45,7 +45,7 @@
 /*
  * Caller must hold hashbucket lock for this tb with local BH disabled
  */
-void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
+void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
 {
 	if (hlist_empty(&tb->owners)) {
 		__hlist_del(&tb->node);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index cdd8053..9f414e3 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -91,7 +91,7 @@
 {
 	struct inet_timewait_sock *tw =
 		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
-				 SLAB_ATOMIC);
+				 GFP_ATOMIC);
 	if (tw != NULL) {
 		const struct inet_sock *inet = inet_sk(sk);
 
@@ -178,7 +178,6 @@
 	need_timer = 0;
 	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
 		twdr->thread_slots |= (1 << twdr->slot);
-		mb();
 		schedule_work(&twdr->twkill_work);
 		need_timer = 1;
 	} else {
@@ -197,9 +196,10 @@
 
 extern void twkill_slots_invalid(void);
 
-void inet_twdr_twkill_work(void *data)
+void inet_twdr_twkill_work(struct work_struct *work)
 {
-	struct inet_timewait_death_row *twdr = data;
+	struct inet_timewait_death_row *twdr =
+		container_of(work, struct inet_timewait_death_row, twkill_work);
 	int i;
 
 	if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index f072f38..711eb6d 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -73,7 +73,7 @@
 /* Exported for inet_getid inline function.  */
 DEFINE_SPINLOCK(inet_peer_idlock);
 
-static kmem_cache_t *peer_cachep __read_mostly;
+static struct kmem_cache *peer_cachep __read_mostly;
 
 #define node_height(x) x->avl_height
 static struct inet_peer peer_fake_node = {
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d5b5dec..476cb60 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -144,7 +144,7 @@
  */
 
 #define HASH_SIZE  16
-#define HASH(addr) ((addr^(addr>>4))&0xF)
+#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
 static struct ip_tunnel *tunnels[4][HASH_SIZE];
 
@@ -157,7 +157,7 @@
 
 /* Given src, dst and key, find appropriate for input tunnel. */
 
-static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key)
+static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be32 key)
 {
 	unsigned h0 = HASH(remote);
 	unsigned h1 = HASH(key);
@@ -194,9 +194,9 @@
 
 static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
 {
-	u32 remote = t->parms.iph.daddr;
-	u32 local = t->parms.iph.saddr;
-	u32 key = t->parms.i_key;
+	__be32 remote = t->parms.iph.daddr;
+	__be32 local = t->parms.iph.saddr;
+	__be32 key = t->parms.i_key;
 	unsigned h = HASH(key);
 	int prio = 0;
 
@@ -236,9 +236,9 @@
 
 static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
 {
-	u32 remote = parms->iph.daddr;
-	u32 local = parms->iph.saddr;
-	u32 key = parms->i_key;
+	__be32 remote = parms->iph.daddr;
+	__be32 local = parms->iph.saddr;
+	__be32 key = parms->i_key;
 	struct ip_tunnel *t, **tp, *nt;
 	struct net_device *dev;
 	unsigned h = HASH(key);
@@ -319,12 +319,12 @@
  */
 
 	struct iphdr *iph = (struct iphdr*)skb->data;
-	u16	     *p = (u16*)(skb->data+(iph->ihl<<2));
+	__be16	     *p = (__be16*)(skb->data+(iph->ihl<<2));
 	int grehlen = (iph->ihl<<2) + 4;
 	int type = skb->h.icmph->type;
 	int code = skb->h.icmph->code;
 	struct ip_tunnel *t;
-	u16 flags;
+	__be16 flags;
 
 	flags = p[0];
 	if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
@@ -370,7 +370,7 @@
 	}
 
 	read_lock(&ipgre_lock);
-	t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((u32*)p) + (grehlen>>2) - 1) : 0);
+	t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((__be32*)p) + (grehlen>>2) - 1) : 0);
 	if (t == NULL || t->parms.iph.daddr == 0 || MULTICAST(t->parms.iph.daddr))
 		goto out;
 
@@ -388,14 +388,14 @@
 #else
 	struct iphdr *iph = (struct iphdr*)dp;
 	struct iphdr *eiph;
-	u16	     *p = (u16*)(dp+(iph->ihl<<2));
+	__be16	     *p = (__be16*)(dp+(iph->ihl<<2));
 	int type = skb->h.icmph->type;
 	int code = skb->h.icmph->code;
 	int rel_type = 0;
 	int rel_code = 0;
 	__be32 rel_info = 0;
 	__u32 n = 0;
-	u16 flags;
+	__be16 flags;
 	int grehlen = (iph->ihl<<2) + 4;
 	struct sk_buff *skb2;
 	struct flowi fl;
@@ -556,9 +556,9 @@
 {
 	struct iphdr *iph;
 	u8     *h;
-	u16    flags;
-	u16    csum = 0;
-	u32    key = 0;
+	__be16    flags;
+	__sum16   csum = 0;
+	__be32 key = 0;
 	u32    seqno = 0;
 	struct ip_tunnel *tunnel;
 	int    offset = 4;
@@ -568,7 +568,7 @@
 
 	iph = skb->nh.iph;
 	h = skb->data;
-	flags = *(u16*)h;
+	flags = *(__be16*)h;
 
 	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
 		/* - Version must be 0.
@@ -580,7 +580,7 @@
 		if (flags&GRE_CSUM) {
 			switch (skb->ip_summed) {
 			case CHECKSUM_COMPLETE:
-				csum = (u16)csum_fold(skb->csum);
+				csum = csum_fold(skb->csum);
 				if (!csum)
 					break;
 				/* fall through */
@@ -592,11 +592,11 @@
 			offset += 4;
 		}
 		if (flags&GRE_KEY) {
-			key = *(u32*)(h + offset);
+			key = *(__be32*)(h + offset);
 			offset += 4;
 		}
 		if (flags&GRE_SEQ) {
-			seqno = ntohl(*(u32*)(h + offset));
+			seqno = ntohl(*(__be32*)(h + offset));
 			offset += 4;
 		}
 	}
@@ -605,7 +605,7 @@
 	if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
 		secpath_reset(skb);
 
-		skb->protocol = *(u16*)(h + 2);
+		skb->protocol = *(__be16*)(h + 2);
 		/* WCCP version 1 and 2 protocol decoding.
 		 * - Change protocol to IP
 		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
@@ -673,13 +673,13 @@
 	struct iphdr  *old_iph = skb->nh.iph;
 	struct iphdr  *tiph;
 	u8     tos;
-	u16    df;
+	__be16 df;
 	struct rtable *rt;     			/* Route to the other host */
 	struct net_device *tdev;			/* Device to other host */
 	struct iphdr  *iph;			/* Our new IP header */
 	int    max_headroom;			/* The extra header space needed */
 	int    gre_hlen;
-	u32    dst;
+	__be32 dst;
 	int    mtu;
 
 	if (tunnel->recursion++) {
@@ -860,11 +860,11 @@
 			iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
 	}
 
-	((u16*)(iph+1))[0] = tunnel->parms.o_flags;
-	((u16*)(iph+1))[1] = skb->protocol;
+	((__be16*)(iph+1))[0] = tunnel->parms.o_flags;
+	((__be16*)(iph+1))[1] = skb->protocol;
 
 	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
-		u32 *ptr = (u32*)(((u8*)iph) + tunnel->hlen - 4);
+		__be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
 
 		if (tunnel->parms.o_flags&GRE_SEQ) {
 			++tunnel->o_seqno;
@@ -877,7 +877,7 @@
 		}
 		if (tunnel->parms.o_flags&GRE_CSUM) {
 			*ptr = 0;
-			*(__u16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
+			*(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
 		}
 	}
 
@@ -1068,7 +1068,7 @@
 {
 	struct ip_tunnel *t = netdev_priv(dev);
 	struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
-	u16 *p = (u16*)(iph+1);
+	__be16 *p = (__be16*)(iph+1);
 
 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 	p[0]		= t->parms.o_flags;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index fc195a4..a35209d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -53,6 +53,7 @@
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/errno.h>
+#include <linux/highmem.h>
 
 #include <linux/socket.h>
 #include <linux/sockios.h>
@@ -288,9 +289,8 @@
 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
-int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
+int ip_queue_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok)
 {
-	struct sock *sk = skb->sk;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ip_options *opt = inet->opt;
 	struct rtable *rt;
@@ -342,7 +342,7 @@
 
 	/* OK, we know where to send it, allocate and build IP header. */
 	iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
-	*((__u16 *)iph)	= htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
+	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
 	iph->tot_len = htons(skb->len);
 	if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
 		iph->frag_off = htons(IP_DF);
@@ -386,6 +386,7 @@
 	dst_release(to->dst);
 	to->dst = dst_clone(from->dst);
 	to->dev = from->dev;
+	to->mark = from->mark;
 
 	/* Copy the flags to each fragment. */
 	IPCB(to)->flags = IPCB(from)->flags;
@@ -394,7 +395,6 @@
 	to->tc_index = from->tc_index;
 #endif
 #ifdef CONFIG_NETFILTER
-	to->nfmark = from->nfmark;
 	/* Connection association is same as pre-frag packet */
 	nf_conntrack_put(to->nfct);
 	to->nfct = from->nfct;
@@ -683,7 +683,7 @@
 		if (memcpy_fromiovecend(to, iov, offset, len) < 0)
 			return -EFAULT;
 	} else {
-		unsigned int csum = 0;
+		__wsum csum = 0;
 		if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
 			return -EFAULT;
 		skb->csum = csum_block_add(skb->csum, csum, odd);
@@ -691,11 +691,11 @@
 	return 0;
 }
 
-static inline unsigned int
+static inline __wsum
 csum_page(struct page *page, int offset, int copy)
 {
 	char *kaddr;
-	unsigned int csum;
+	__wsum csum;
 	kaddr = kmap(page);
 	csum = csum_partial(kaddr + offset, copy, 0);
 	kunmap(page);
@@ -1167,7 +1167,7 @@
 		}
 
 		if (skb->ip_summed == CHECKSUM_NONE) {
-			unsigned int csum;
+			__wsum csum;
 			csum = csum_page(page, offset, len);
 			skb->csum = csum_block_add(skb->csum, csum, skb->len);
 		}
@@ -1315,7 +1315,7 @@
 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 
 			      int len, int odd, struct sk_buff *skb)
 {
-	unsigned int csum;
+	__wsum csum;
 
 	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
 	skb->csum = csum_block_add(skb->csum, csum, odd);
@@ -1385,7 +1385,7 @@
 		       &ipc, rt, MSG_DONTWAIT);
 	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
 		if (arg->csumoffset >= 0)
-			*((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
+			*((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
 		skb->ip_summed = CHECKSUM_NONE;
 		ip_push_pending_frames(sk);
 	}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 4b13295..57d4bae 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -355,7 +355,7 @@
 	sin = (struct sockaddr_in *)msg->msg_name;
 	if (sin) {
 		sin->sin_family = AF_INET;
-		sin->sin_addr.s_addr = *(u32*)(skb->nh.raw + serr->addr_offset);
+		sin->sin_addr.s_addr = *(__be32*)(skb->nh.raw + serr->addr_offset);
 		sin->sin_port = serr->port;
 		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
 	}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 955a07a..afa60b9 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -101,6 +101,7 @@
 #define CONF_NAMESERVERS_MAX   3       /* Maximum number of nameservers  
                                            - '3' from resolv.h */
 
+#define NONE __constant_htonl(INADDR_NONE)
 
 /*
  * Public IP configuration
@@ -129,19 +130,19 @@
 
 static int ic_host_name_set __initdata = 0;	/* Host name set by us? */
 
-u32 ic_myaddr = INADDR_NONE;		/* My IP address */
-static u32 ic_netmask = INADDR_NONE;	/* Netmask for local subnet */
-u32 ic_gateway = INADDR_NONE;	/* Gateway IP address */
+__be32 ic_myaddr = NONE;		/* My IP address */
+static __be32 ic_netmask = NONE;	/* Netmask for local subnet */
+__be32 ic_gateway = NONE;	/* Gateway IP address */
 
-u32 ic_servaddr = INADDR_NONE;	/* Boot server IP address */
+__be32 ic_servaddr = NONE;	/* Boot server IP address */
 
-u32 root_server_addr = INADDR_NONE;	/* Address of NFS server */
+__be32 root_server_addr = NONE;	/* Address of NFS server */
 u8 root_server_path[256] = { 0, };	/* Path to mount as root */
 
 /* Persistent data: */
 
 static int ic_proto_used;			/* Protocol used, if any */
-static u32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */
+static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */
 static u8 ic_domain[64];		/* DNS (not NIS) domain name */
 
 /*
@@ -172,7 +173,7 @@
 	struct net_device *dev;
 	unsigned short flags;
 	short able;
-	u32 xid;
+	__be32 xid;
 };
 
 static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
@@ -223,7 +224,7 @@
 			d->flags = oflags;
 			d->able = able;
 			if (able & IC_BOOTP)
-				get_random_bytes(&d->xid, sizeof(u32));
+				get_random_bytes(&d->xid, sizeof(__be32));
 			else
 				d->xid = 0;
 			ic_proto_have_if |= able;
@@ -269,7 +270,7 @@
  */
 
 static inline void
-set_sockaddr(struct sockaddr_in *sin, u32 addr, u16 port)
+set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port)
 {
 	sin->sin_family = AF_INET;
 	sin->sin_addr.s_addr = addr;
@@ -332,7 +333,7 @@
 {
 	/* No need to setup device routes, only the default route... */
 
-	if (ic_gateway != INADDR_NONE) {
+	if (ic_gateway != NONE) {
 		struct rtentry rm;
 		int err;
 
@@ -368,10 +369,10 @@
 	if (!ic_host_name_set)
 		sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr));
 
-	if (root_server_addr == INADDR_NONE)
+	if (root_server_addr == NONE)
 		root_server_addr = ic_servaddr;
 
-	if (ic_netmask == INADDR_NONE) {
+	if (ic_netmask == NONE) {
 		if (IN_CLASSA(ntohl(ic_myaddr)))
 			ic_netmask = htonl(IN_CLASSA_NET);
 		else if (IN_CLASSB(ntohl(ic_myaddr)))
@@ -420,7 +421,7 @@
 {
 	struct arphdr *rarp;
 	unsigned char *rarp_ptr;
-	u32 sip, tip;
+	__be32 sip, tip;
 	unsigned char *sha, *tha;		/* s for "source", t for "target" */
 	struct ic_device *d;
 
@@ -485,12 +486,12 @@
 		goto drop_unlock;
 
 	/* Discard packets which are not from specified server. */
-	if (ic_servaddr != INADDR_NONE && ic_servaddr != sip)
+	if (ic_servaddr != NONE && ic_servaddr != sip)
 		goto drop_unlock;
 
 	/* We have a winner! */
 	ic_dev = dev;
-	if (ic_myaddr == INADDR_NONE)
+	if (ic_myaddr == NONE)
 		ic_myaddr = tip;
 	ic_servaddr = sip;
 	ic_got_reply = IC_RARP;
@@ -530,13 +531,13 @@
 	u8 htype;		/* HW address type */
 	u8 hlen;		/* HW address length */
 	u8 hops;		/* Used only by gateways */
-	u32 xid;		/* Transaction ID */
-	u16 secs;		/* Seconds since we started */
-	u16 flags;		/* Just what it says */
-	u32 client_ip;		/* Client's IP address if known */
-	u32 your_ip;		/* Assigned IP address */
-	u32 server_ip;		/* (Next, e.g. NFS) Server's IP address */
-	u32 relay_ip;		/* IP address of BOOTP relay */
+	__be32 xid;		/* Transaction ID */
+	__be16 secs;		/* Seconds since we started */
+	__be16 flags;		/* Just what it says */
+	__be32 client_ip;		/* Client's IP address if known */
+	__be32 your_ip;		/* Assigned IP address */
+	__be32 server_ip;		/* (Next, e.g. NFS) Server's IP address */
+	__be32 relay_ip;		/* IP address of BOOTP relay */
 	u8 hw_addr[16];		/* Client's HW address */
 	u8 serv_name[64];	/* Server host name */
 	u8 boot_file[128];	/* Name of boot file */
@@ -576,7 +577,7 @@
 static void __init
 ic_dhcp_init_options(u8 *options)
 {
-	u8 mt = ((ic_servaddr == INADDR_NONE)
+	u8 mt = ((ic_servaddr == NONE)
 		 ? DHCPDISCOVER : DHCPREQUEST);
 	u8 *e = options;
 
@@ -666,7 +667,7 @@
 	int i;
 
 	for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
-		ic_nameservers[i] = INADDR_NONE;
+		ic_nameservers[i] = NONE;
 
 	dev_add_pack(&bootp_packet_type);
 }
@@ -708,7 +709,7 @@
 	h->frag_off = htons(IP_DF);
 	h->ttl = 64;
 	h->protocol = IPPROTO_UDP;
-	h->daddr = INADDR_BROADCAST;
+	h->daddr = htonl(INADDR_BROADCAST);
 	h->check = ip_fast_csum((unsigned char *) h, h->ihl);
 
 	/* Construct UDP header */
@@ -730,8 +731,8 @@
 		b->htype = dev->type; /* can cause undefined behavior */
 	}
 	b->hlen = dev->addr_len;
-	b->your_ip = INADDR_NONE;
-	b->server_ip = INADDR_NONE;
+	b->your_ip = NONE;
+	b->server_ip = NONE;
 	memcpy(b->hw_addr, dev->dev_addr, dev->addr_len);
 	b->secs = htons(jiffies_diff / HZ);
 	b->xid = d->xid;
@@ -788,11 +789,11 @@
 
 	switch (*ext++) {
 		case 1:		/* Subnet mask */
-			if (ic_netmask == INADDR_NONE)
+			if (ic_netmask == NONE)
 				memcpy(&ic_netmask, ext+1, 4);
 			break;
 		case 3:		/* Default gateway */
-			if (ic_gateway == INADDR_NONE)
+			if (ic_gateway == NONE)
 				memcpy(&ic_gateway, ext+1, 4);
 			break;
 		case 6:		/* DNS server */
@@ -800,7 +801,7 @@
 			if (servers > CONF_NAMESERVERS_MAX)
 				servers = CONF_NAMESERVERS_MAX;
 			for (i = 0; i < servers; i++) {
-				if (ic_nameservers[i] == INADDR_NONE)
+				if (ic_nameservers[i] == NONE)
 					memcpy(&ic_nameservers[i], ext+1+4*i, 4);
 			}
 			break;
@@ -917,7 +918,7 @@
 
 #ifdef IPCONFIG_DHCP
 		if (ic_proto_enabled & IC_USE_DHCP) {
-			u32 server_id = INADDR_NONE;
+			__be32 server_id = NONE;
 			int mt = 0;
 
 			ext = &b->exten[4];
@@ -949,7 +950,7 @@
 				/* While in the process of accepting one offer,
 				 * ignore all others.
 				 */
-				if (ic_myaddr != INADDR_NONE)
+				if (ic_myaddr != NONE)
 					goto drop_unlock;
 
 				/* Let's accept that offer. */
@@ -965,7 +966,7 @@
 				 * precedence over the bootp header one if
 				 * they are different.
 				 */
-				if ((server_id != INADDR_NONE) &&
+				if ((server_id != NONE) &&
 				    (b->server_ip != server_id))
 					b->server_ip = ic_servaddr;
 				break;
@@ -979,8 +980,8 @@
 
 			default:
 				/* Urque.  Forget it*/
-				ic_myaddr = INADDR_NONE;
-				ic_servaddr = INADDR_NONE;
+				ic_myaddr = NONE;
+				ic_servaddr = NONE;
 				goto drop_unlock;
 			};
 
@@ -1004,9 +1005,9 @@
 	ic_dev = dev;
 	ic_myaddr = b->your_ip;
 	ic_servaddr = b->server_ip;
-	if (ic_gateway == INADDR_NONE && b->relay_ip)
+	if (ic_gateway == NONE && b->relay_ip)
 		ic_gateway = b->relay_ip;
-	if (ic_nameservers[0] == INADDR_NONE)
+	if (ic_nameservers[0] == NONE)
 		ic_nameservers[0] = ic_servaddr;
 	ic_got_reply = IC_BOOTP;
 
@@ -1150,7 +1151,7 @@
 #endif
 
 	if (!ic_got_reply) {
-		ic_myaddr = INADDR_NONE;
+		ic_myaddr = NONE;
 		return -1;
 	}
 
@@ -1182,12 +1183,12 @@
 		seq_printf(seq,
 			   "domain %s\n", ic_domain);
 	for (i = 0; i < CONF_NAMESERVERS_MAX; i++) {
-		if (ic_nameservers[i] != INADDR_NONE)
+		if (ic_nameservers[i] != NONE)
 			seq_printf(seq,
 				   "nameserver %u.%u.%u.%u\n",
 				   NIPQUAD(ic_nameservers[i]));
 	}
-	if (ic_servaddr != INADDR_NONE)
+	if (ic_servaddr != NONE)
 		seq_printf(seq,
 			   "bootserver %u.%u.%u.%u\n",
 			   NIPQUAD(ic_servaddr));
@@ -1213,9 +1214,9 @@
  *  need to have root_server_addr set _before_ IPConfig gets called as it
  *  can override it.
  */
-u32 __init root_nfs_parse_addr(char *name)
+__be32 __init root_nfs_parse_addr(char *name)
 {
-	u32 addr;
+	__be32 addr;
 	int octets = 0;
 	char *cp, *cq;
 
@@ -1237,7 +1238,7 @@
 		addr = in_aton(name);
 		memmove(name, cp, strlen(cp) + 1);
 	} else
-		addr = INADDR_NONE;
+		addr = NONE;
 
 	return addr;
 }
@@ -1248,7 +1249,7 @@
 
 static int __init ip_auto_config(void)
 {
-	u32 addr;
+	__be32 addr;
 
 #ifdef CONFIG_PROC_FS
 	proc_net_fops_create("pnp", S_IRUGO, &pnp_seq_fops);
@@ -1277,11 +1278,11 @@
 	 * interfaces and no default was set), use BOOTP or RARP to get the
 	 * missing values.
 	 */
-	if (ic_myaddr == INADDR_NONE ||
+	if (ic_myaddr == NONE ||
 #ifdef CONFIG_ROOT_NFS
 	    (MAJOR(ROOT_DEV) == UNNAMED_MAJOR
-	     && root_server_addr == INADDR_NONE
-	     && ic_servaddr == INADDR_NONE) ||
+	     && root_server_addr == NONE
+	     && ic_servaddr == NONE) ||
 #endif
 	    ic_first_dev->next) {
 #ifdef IPCONFIG_DYNAMIC
@@ -1334,7 +1335,7 @@
 	}
 
 	addr = root_nfs_parse_addr(root_server_path);
-	if (root_server_addr == INADDR_NONE)
+	if (root_server_addr == NONE)
 		root_server_addr = addr;
 
 	/*
@@ -1461,19 +1462,19 @@
 			switch (num) {
 			case 0:
 				if ((ic_myaddr = in_aton(ip)) == INADDR_ANY)
-					ic_myaddr = INADDR_NONE;
+					ic_myaddr = NONE;
 				break;
 			case 1:
 				if ((ic_servaddr = in_aton(ip)) == INADDR_ANY)
-					ic_servaddr = INADDR_NONE;
+					ic_servaddr = NONE;
 				break;
 			case 2:
 				if ((ic_gateway = in_aton(ip)) == INADDR_ANY)
-					ic_gateway = INADDR_NONE;
+					ic_gateway = NONE;
 				break;
 			case 3:
 				if ((ic_netmask = in_aton(ip)) == INADDR_ANY)
-					ic_netmask = INADDR_NONE;
+					ic_netmask = NONE;
 				break;
 			case 4:
 				if ((dp = strchr(ip, '.'))) {
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 0c45565..9d719d6 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -118,7 +118,7 @@
 #include <net/xfrm.h>
 
 #define HASH_SIZE  16
-#define HASH(addr) ((addr^(addr>>4))&0xF)
+#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
 static int ipip_fb_tunnel_init(struct net_device *dev);
 static int ipip_tunnel_init(struct net_device *dev);
@@ -134,7 +134,7 @@
 
 static DEFINE_RWLOCK(ipip_lock);
 
-static struct ip_tunnel * ipip_tunnel_lookup(u32 remote, u32 local)
+static struct ip_tunnel * ipip_tunnel_lookup(__be32 remote, __be32 local)
 {
 	unsigned h0 = HASH(remote);
 	unsigned h1 = HASH(local);
@@ -160,8 +160,8 @@
 
 static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t)
 {
-	u32 remote = t->parms.iph.daddr;
-	u32 local = t->parms.iph.saddr;
+	__be32 remote = t->parms.iph.daddr;
+	__be32 local = t->parms.iph.saddr;
 	unsigned h = 0;
 	int prio = 0;
 
@@ -203,8 +203,8 @@
 
 static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create)
 {
-	u32 remote = parms->iph.daddr;
-	u32 local = parms->iph.saddr;
+	__be32 remote = parms->iph.daddr;
+	__be32 local = parms->iph.saddr;
 	struct ip_tunnel *t, **tp, *nt;
 	struct net_device *dev;
 	unsigned h = 0;
@@ -519,13 +519,13 @@
 	struct net_device_stats *stats = &tunnel->stat;
 	struct iphdr  *tiph = &tunnel->parms.iph;
 	u8     tos = tunnel->parms.iph.tos;
-	u16    df = tiph->frag_off;
+	__be16 df = tiph->frag_off;
 	struct rtable *rt;     			/* Route to the other host */
 	struct net_device *tdev;			/* Device to other host */
 	struct iphdr  *old_iph = skb->nh.iph;
 	struct iphdr  *iph;			/* Our new IP header */
 	int    max_headroom;			/* The extra header space needed */
-	u32    dst = tiph->daddr;
+	__be32 dst = tiph->daddr;
 	int    mtu;
 
 	if (tunnel->recursion++) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 97cfa97..ecb5422 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -105,7 +105,7 @@
    In this case data path is free of exclusive locks at all.
  */
 
-static kmem_cache_t *mrt_cachep __read_mostly;
+static struct kmem_cache *mrt_cachep __read_mostly;
 
 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
@@ -1493,7 +1493,7 @@
         if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
 	    (pim->flags&PIM_NULL_REGISTER) ||
 	    (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 
-	     (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 
+	     csum_fold(skb_checksum(skb, 0, skb->len, 0))))
 		goto drop;
 
 	/* check if the inner packet is destined to mcast group */
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index e775233..6c40899 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -80,10 +80,9 @@
 	if (!pp->unregister_app)
 		return -EOPNOTSUPP;
 
-	inc = kmalloc(sizeof(struct ip_vs_app), GFP_KERNEL);
+	inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
 	if (!inc)
 		return -ENOMEM;
-	memcpy(inc, app, sizeof(*inc));
 	INIT_LIST_HEAD(&inc->p_list);
 	INIT_LIST_HEAD(&inc->incs_list);
 	inc->app = app;
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 8832eb5..8086787 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -44,7 +44,7 @@
 static struct list_head *ip_vs_conn_tab;
 
 /*  SLAB cache for IPVS connections */
-static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
+static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
 
 /*  counter for current IPVS connections */
 static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 1445bb4..3425752 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -536,9 +536,9 @@
 	return NF_STOP;
 }
 
-u16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
+__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
 {
-	return (u16) csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
+	return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
 }
 
 static inline struct sk_buff *
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f261616..9b93338 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -221,10 +221,10 @@
  *	Timer for checking the defense
  */
 #define DEFENSE_TIMER_PERIOD	1*HZ
-static void defense_work_handler(void *data);
-static DECLARE_WORK(defense_work, defense_work_handler, NULL);
+static void defense_work_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
 
-static void defense_work_handler(void *data)
+static void defense_work_handler(struct work_struct *work)
 {
 	update_defense_level();
 	if (atomic_read(&ip_vs_dropentry))
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 524751e..a4385a2 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -45,6 +45,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
+#include <linux/jiffies.h>
 
 /* for sysctl */
 #include <linux/fs.h>
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 0899019..fe1af5d 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -43,6 +43,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
+#include <linux/jiffies.h>
 
 /* for sysctl */
 #include <linux/fs.h>
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index c4528b5..e844ddb 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -118,13 +118,7 @@
 int *
 ip_vs_create_timeout_table(int *table, int size)
 {
-	int *t;
-
-	t = kmalloc(size, GFP_ATOMIC);
-	if (t == NULL)
-		return NULL;
-	memcpy(t, table, size);
-	return t;
+	return kmemdup(table, size, GFP_ATOMIC);
 }
 
 
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index 6ff05c3..16a9ebe 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -84,7 +84,7 @@
 	}
 
 	if (th->syn &&
-	    (svc = ip_vs_service_get(skb->nfmark, skb->nh.iph->protocol,
+	    (svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol,
 				     skb->nh.iph->daddr, th->dest))) {
 		if (ip_vs_todrop()) {
 			/*
@@ -116,9 +116,9 @@
 		     __be16 oldport, __be16 newport)
 {
 	tcph->check =
-		ip_vs_check_diff(~oldip, newip,
-				 ip_vs_check_diff(oldport ^ htons(0xFFFF),
-						  newport, tcph->check));
+		csum_fold(ip_vs_check_diff4(oldip, newip,
+				 ip_vs_check_diff2(oldport, newport,
+						~csum_unfold(tcph->check))));
 }
 
 
@@ -490,16 +490,18 @@
 static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
 static DEFINE_SPINLOCK(tcp_app_lock);
 
-static inline __u16 tcp_app_hashkey(__u16 port)
+static inline __u16 tcp_app_hashkey(__be16 port)
 {
-	return ((port >> TCP_APP_TAB_BITS) ^ port) & TCP_APP_TAB_MASK;
+	return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
+		& TCP_APP_TAB_MASK;
 }
 
 
 static int tcp_register_app(struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
-	__u16 hash, port = inc->port;
+	__u16 hash;
+	__be16 port = inc->port;
 	int ret = 0;
 
 	hash = tcp_app_hashkey(port);
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index 691c8b6..03f0a41 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -89,7 +89,7 @@
 		return 0;
 	}
 
-	if ((svc = ip_vs_service_get(skb->nfmark, skb->nh.iph->protocol,
+	if ((svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol,
 				     skb->nh.iph->daddr, uh->dest))) {
 		if (ip_vs_todrop()) {
 			/*
@@ -121,11 +121,11 @@
 		     __be16 oldport, __be16 newport)
 {
 	uhdr->check =
-		ip_vs_check_diff(~oldip, newip,
-				 ip_vs_check_diff(oldport ^ htons(0xFFFF),
-						  newport, uhdr->check));
+		csum_fold(ip_vs_check_diff4(oldip, newip,
+				 ip_vs_check_diff2(oldport, newport,
+					~csum_unfold(uhdr->check))));
 	if (!uhdr->check)
-		uhdr->check = -1;
+		uhdr->check = CSUM_MANGLED_0;
 }
 
 static int
@@ -173,7 +173,7 @@
 						cp->protocol,
 						(*pskb)->csum);
 		if (udph->check == 0)
-			udph->check = -1;
+			udph->check = CSUM_MANGLED_0;
 		IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
 			  pp->name, udph->check,
 			  (char*)&(udph->check) - (char*)udph);
@@ -228,7 +228,7 @@
 						cp->protocol,
 						(*pskb)->csum);
 		if (udph->check == 0)
-			udph->check = -1;
+			udph->check = CSUM_MANGLED_0;
 		(*pskb)->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 	return 1;
@@ -282,16 +282,18 @@
 static struct list_head udp_apps[UDP_APP_TAB_SIZE];
 static DEFINE_SPINLOCK(udp_app_lock);
 
-static inline __u16 udp_app_hashkey(__u16 port)
+static inline __u16 udp_app_hashkey(__be16 port)
 {
-	return ((port >> UDP_APP_TAB_BITS) ^ port) & UDP_APP_TAB_MASK;
+	return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
+		& UDP_APP_TAB_MASK;
 }
 
 
 static int udp_register_app(struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
-	__u16 hash, port = inc->port;
+	__u16 hash;
+	__be16 port = inc->port;
 	int ret = 0;
 
 	hash = udp_app_hashkey(port);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index e2005c6..a689660 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -27,9 +27,7 @@
 		fl.nl_u.ip4_u.saddr = iph->saddr;
 		fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
 		fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
-#ifdef CONFIG_IP_ROUTE_FWMARK
-		fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
-#endif
+		fl.mark = (*pskb)->mark;
 		if (ip_route_output_key(&rt, &fl) != 0)
 			return -1;
 
@@ -164,17 +162,17 @@
 	return 0;
 }
 
-unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 			    unsigned int dataoff, u_int8_t protocol)
 {
 	struct iphdr *iph = skb->nh.iph;
-	unsigned int csum = 0;
+	__sum16 csum = 0;
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
 		if (hook != NF_IP_PRE_ROUTING && hook != NF_IP_LOCAL_IN)
 			break;
-		if ((protocol == 0 && !(u16)csum_fold(skb->csum)) ||
+		if ((protocol == 0 && !csum_fold(skb->csum)) ||
 		    !csum_tcpudp_magic(iph->saddr, iph->daddr,
 			    	       skb->len - dataoff, protocol,
 				       skb->csum)) {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index d88c292..363df99 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -6,7 +6,7 @@
 	depends on INET && NETFILTER
 
 config NF_CONNTRACK_IPV4
-	tristate "IPv4 support for new connection tracking (EXPERIMENTAL)"
+	tristate "IPv4 connection tracking support (required for NAT) (EXPERIMENTAL)"
 	depends on EXPERIMENTAL && NF_CONNTRACK
 	---help---
 	  Connection tracking keeps a record of what packets have passed
@@ -19,21 +19,18 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_PROC_COMPAT
+	bool "proc/sysctl compatibility with old connection tracking"
+	depends on NF_CONNTRACK_IPV4
+	default y
+	help
+	  This option enables /proc and sysctl compatibility with the old
+	  layer 3 dependant connection tracking. This is needed to keep
+	  old programs that have not been adapted to the new names working.
+
+	  If unsure, say Y.
+
 # connection tracking, helpers and protocols
-config IP_NF_CONNTRACK
-	tristate "Connection tracking (required for masq/NAT)"
-	---help---
-	  Connection tracking keeps a record of what packets have passed
-	  through your machine, in order to figure out how they are related
-	  into connections.
-
-	  This is required to do Masquerading or other kinds of Network
-	  Address Translation (except for Fast NAT).  It can also be used to
-	  enhance packet filtering (see `Connection state match support'
-	  below).
-
-	  To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_CT_ACCT
 	bool "Connection tracking flow accounting"
 	depends on IP_NF_CONNTRACK
@@ -315,20 +312,6 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/modules.txt>.  If unsure, say `N'.
 
-config IP_NF_MATCH_HASHLIMIT
-	tristate  'hashlimit match support'
-	depends on IP_NF_IPTABLES
-	help
-	  This option adds a new iptables `hashlimit' match.  
-
-	  As opposed to `limit', this match dynamically creates a hash table
-	  of limit buckets, based on your selection of source/destination
-	  ip addresses and/or ports.
-
-	  It enables you to express policies like `10kpps for any given
-	  destination IP' or `500pps from any given source IP'  with a single
-	  IPtables rule.
-
 # `filter', generic and specific targets
 config IP_NF_FILTER
 	tristate "Packet filtering"
@@ -404,7 +387,7 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
-# NAT + specific targets
+# NAT + specific targets: ip_conntrack
 config IP_NF_NAT
 	tristate "Full NAT"
 	depends on IP_NF_IPTABLES && IP_NF_CONNTRACK
@@ -415,14 +398,30 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+# NAT + specific targets: nf_conntrack
+config NF_NAT
+	tristate "Full NAT"
+	depends on IP_NF_IPTABLES && NF_CONNTRACK
+	help
+	  The Full NAT option allows masquerading, port forwarding and other
+	  forms of full Network Address Port Translation.  It is controlled by
+	  the `nat' table in iptables: see the man page for iptables(8).
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_NF_NAT_NEEDED
 	bool
-	depends on IP_NF_NAT != n
+	depends on IP_NF_NAT
+	default y
+
+config NF_NAT_NEEDED
+	bool
+	depends on NF_NAT
 	default y
 
 config IP_NF_TARGET_MASQUERADE
 	tristate "MASQUERADE target support"
-	depends on IP_NF_NAT
+	depends on (NF_NAT || IP_NF_NAT)
 	help
 	  Masquerading is a special case of NAT: all outgoing connections are
 	  changed to seem to come from a particular interface's address, and
@@ -434,7 +433,7 @@
 
 config IP_NF_TARGET_REDIRECT
 	tristate "REDIRECT target support"
-	depends on IP_NF_NAT
+	depends on (NF_NAT || IP_NF_NAT)
 	help
 	  REDIRECT is a special case of NAT: all incoming connections are
 	  mapped onto the incoming interface's address, causing the packets to
@@ -445,7 +444,7 @@
 
 config IP_NF_TARGET_NETMAP
 	tristate "NETMAP target support"
-	depends on IP_NF_NAT
+	depends on (NF_NAT || IP_NF_NAT)
 	help
 	  NETMAP is an implementation of static 1:1 NAT mapping of network
 	  addresses. It maps the network address part, while keeping the host
@@ -456,7 +455,7 @@
 
 config IP_NF_TARGET_SAME
 	tristate "SAME target support"
-	depends on IP_NF_NAT
+	depends on (NF_NAT || IP_NF_NAT)
 	help
 	  This option adds a `SAME' target, which works like the standard SNAT
 	  target, but attempts to give clients the same IP for all connections.
@@ -478,19 +477,52 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_NAT_SNMP_BASIC
+	tristate "Basic SNMP-ALG support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_NAT
+	---help---
+
+	  This module implements an Application Layer Gateway (ALG) for
+	  SNMP payloads.  In conjunction with NAT, it allows a network
+	  management system to access multiple private networks with
+	  conflicting addresses.  It works by modifying IP addresses
+	  inside SNMP payloads to match IP-layer NAT mapping.
+
+	  This is the "basic" form of SNMP-ALG, as described in RFC 2962
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
+# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker.
+# From kconfig-language.txt:
+#
+#           <expr> '&&' <expr>                   (6)
+#
+# (6) Returns the result of min(/expr/, /expr/).
+config NF_NAT_PROTO_GRE
+	tristate
+	depends on NF_NAT && NF_CT_PROTO_GRE
+
+config IP_NF_NAT_FTP
+	tristate
+	depends on IP_NF_IPTABLES && IP_NF_CONNTRACK && IP_NF_NAT
+	default IP_NF_NAT && IP_NF_FTP
+
+config NF_NAT_FTP
+	tristate
+	depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_FTP
+
 config IP_NF_NAT_IRC
 	tristate
 	depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
 	default IP_NF_NAT if IP_NF_IRC=y
 	default m if IP_NF_IRC=m
 
-# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y), 
-# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker.  Argh.
-config IP_NF_NAT_FTP
+config NF_NAT_IRC
 	tristate
-	depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
-	default IP_NF_NAT if IP_NF_FTP=y
-	default m if IP_NF_FTP=m
+	depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_IRC
 
 config IP_NF_NAT_TFTP
 	tristate
@@ -498,30 +530,56 @@
 	default IP_NF_NAT if IP_NF_TFTP=y
 	default m if IP_NF_TFTP=m
 
+config NF_NAT_TFTP
+	tristate
+	depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_TFTP
+
 config IP_NF_NAT_AMANDA
 	tristate
 	depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
 	default IP_NF_NAT if IP_NF_AMANDA=y
 	default m if IP_NF_AMANDA=m
 
+config NF_NAT_AMANDA
+	tristate
+	depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_AMANDA
+
 config IP_NF_NAT_PPTP
 	tristate
 	depends on IP_NF_NAT!=n && IP_NF_PPTP!=n
 	default IP_NF_NAT if IP_NF_PPTP=y
 	default m if IP_NF_PPTP=m
 
+config NF_NAT_PPTP
+	tristate
+	depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_PPTP
+	select NF_NAT_PROTO_GRE
+
 config IP_NF_NAT_H323
 	tristate
 	depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
 	default IP_NF_NAT if IP_NF_H323=y
 	default m if IP_NF_H323=m
 
+config NF_NAT_H323
+	tristate
+	depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_H323
+
 config IP_NF_NAT_SIP
 	tristate
 	depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n
 	default IP_NF_NAT if IP_NF_SIP=y
 	default m if IP_NF_SIP=m
 
+config NF_NAT_SIP
+	tristate
+	depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
+	default NF_NAT && NF_CONNTRACK_SIP
+
 # mangle + specific targets
 config IP_NF_MANGLE
 	tristate "Packet mangling"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 09aaed1..15e741a 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -5,17 +5,23 @@
 # objects for the standalone - connection tracking / NAT
 ip_conntrack-objs	:= ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o
 ip_nat-objs	:= ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o
+nf_nat-objs	:= nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
+ifneq ($(CONFIG_NF_NAT),)
+iptable_nat-objs	:= nf_nat_rule.o nf_nat_standalone.o
+else
 iptable_nat-objs	:= ip_nat_rule.o ip_nat_standalone.o
+endif
 
 ip_conntrack_pptp-objs	:= ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o
 ip_nat_pptp-objs	:= ip_nat_helper_pptp.o ip_nat_proto_gre.o
 
-ip_conntrack_h323-objs := ip_conntrack_helper_h323.o ip_conntrack_helper_h323_asn1.o
+ip_conntrack_h323-objs := ip_conntrack_helper_h323.o ../../netfilter/nf_conntrack_h323_asn1.o
 ip_nat_h323-objs := ip_nat_helper_h323.o
 
 # connection tracking
 obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
 obj-$(CONFIG_IP_NF_NAT) += ip_nat.o
+obj-$(CONFIG_NF_NAT) += nf_nat.o
 
 # conntrack netlink interface
 obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o
@@ -34,7 +40,7 @@
 obj-$(CONFIG_IP_NF_SIP) += ip_conntrack_sip.o
 obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o
 
-# NAT helpers 
+# NAT helpers (ip_conntrack)
 obj-$(CONFIG_IP_NF_NAT_H323) += ip_nat_h323.o
 obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o
 obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o
@@ -43,6 +49,19 @@
 obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o
 obj-$(CONFIG_IP_NF_NAT_SIP) += ip_nat_sip.o
 
+# NAT helpers (nf_conntrack)
+obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
+obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
+obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
+obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
+obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
+obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
+obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
+obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
+
+# NAT protocols (nf_nat)
+obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
+
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 
@@ -50,10 +69,10 @@
 obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
 obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
 obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
+obj-$(CONFIG_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 
 # matches
-obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
 obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
 obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
@@ -89,6 +108,11 @@
 
 # objects for l3 independent conntrack
 nf_conntrack_ipv4-objs  :=  nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
+ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
+ifeq ($(CONFIG_PROC_FS),y)
+nf_conntrack_ipv4-objs	+= nf_conntrack_l3proto_ipv4_compat.o
+endif
+endif
 
 # l3 independent conntrack
 obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 413c2d0..71b76ad 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -375,6 +375,13 @@
 			    && unconditional(&e->arp)) {
 				unsigned int oldpos, size;
 
+				if (t->verdict < -NF_MAX_VERDICT - 1) {
+					duprintf("mark_source_chains: bad "
+						"negative verdict (%i)\n",
+								t->verdict);
+					return 0;
+				}
+
 				/* Return: backtrack through the last
 				 * big jump.
 				 */
@@ -404,6 +411,14 @@
 				if (strcmp(t->target.u.user.name,
 					   ARPT_STANDARD_TARGET) == 0
 				    && newpos >= 0) {
+					if (newpos > newinfo->size -
+						sizeof(struct arpt_entry)) {
+						duprintf("mark_source_chains: "
+							"bad verdict (%i)\n",
+								newpos);
+						return 0;
+					}
+
 					/* This a jump; chase it. */
 					duprintf("Jump rule %u -> %u\n",
 						 pos, newpos);
@@ -426,8 +441,6 @@
 static inline int standard_check(const struct arpt_entry_target *t,
 				 unsigned int max_offset)
 {
-	struct arpt_standard_target *targ = (void *)t;
-
 	/* Check standard info. */
 	if (t->u.target_size
 	    != ARPT_ALIGN(sizeof(struct arpt_standard_target))) {
@@ -437,18 +450,6 @@
 		return 0;
 	}
 
-	if (targ->verdict >= 0
-	    && targ->verdict > max_offset - sizeof(struct arpt_entry)) {
-		duprintf("arpt_standard_check: bad verdict (%i)\n",
-			 targ->verdict);
-		return 0;
-	}
-
-	if (targ->verdict < -NF_MAX_VERDICT - 1) {
-		duprintf("arpt_standard_check: bad negative verdict (%i)\n",
-			 targ->verdict);
-		return 0;
-	}
 	return 1;
 }
 
@@ -627,18 +628,20 @@
 		}
 	}
 
+	if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
+		duprintf("Looping hook\n");
+		return -ELOOP;
+	}
+
 	/* Finally, each sanity check must pass */
 	i = 0;
 	ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
 				 check_entry, name, size, &i);
 
-	if (ret != 0)
-		goto cleanup;
-
-	ret = -ELOOP;
-	if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
-		duprintf("Looping hook\n");
-		goto cleanup;
+	if (ret != 0) {
+		ARPT_ENTRY_ITERATE(entry0, newinfo->size,
+				cleanup_entry, &i);
+		return ret;
 	}
 
 	/* And one copy for every other CPU */
@@ -647,9 +650,6 @@
 			memcpy(newinfo->entries[i], entry0, newinfo->size);
 	}
 
-	return 0;
-cleanup:
-	ARPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
 	return ret;
 }
 
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
index 6c7383a..ad246ba 100644
--- a/net/ipv4/netfilter/ip_conntrack_amanda.c
+++ b/net/ipv4/netfilter/ip_conntrack_amanda.c
@@ -92,6 +92,7 @@
 	char pbuf[sizeof("65535")], *tmp;
 	u_int16_t port, len;
 	int ret = NF_ACCEPT;
+	typeof(ip_nat_amanda_hook) ip_nat_amanda;
 
 	/* Only look at packets from the Amanda server */
 	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
@@ -161,9 +162,11 @@
 		exp->mask.dst.protonum = 0xFF;
 		exp->mask.dst.u.tcp.port = htons(0xFFFF);
 
-		if (ip_nat_amanda_hook)
-			ret = ip_nat_amanda_hook(pskb, ctinfo, off - dataoff,
-						 len, exp);
+		/* RCU read locked by nf_hook_slow */
+		ip_nat_amanda = rcu_dereference(ip_nat_amanda_hook);
+		if (ip_nat_amanda)
+			ret = ip_nat_amanda(pskb, ctinfo, off - dataoff,
+					    len, exp);
 		else if (ip_conntrack_expect_related(exp) != 0)
 			ret = NF_DROP;
 		ip_conntrack_expect_put(exp);
@@ -180,7 +183,7 @@
 	.help = help,
 	.name = "amanda",
 
-	.tuple = { .src = { .u = { __constant_htons(10080) } },
+	.tuple = { .src = { .u = { .udp = {.port = __constant_htons(10080) } } },
 		   .dst = { .protonum = IPPROTO_UDP },
 	},
 	.mask = { .src = { .u = { 0xFFFF } },
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 8b848aa..8556a4f 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -40,9 +40,6 @@
 
 /* ip_conntrack_lock protects the main hash table, protocol/helper/expected
    registrations, conntrack timers*/
-#define ASSERT_READ_LOCK(x)
-#define ASSERT_WRITE_LOCK(x)
-
 #include <linux/netfilter_ipv4/ip_conntrack.h>
 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
 #include <linux/netfilter_ipv4/ip_conntrack_helper.h>
@@ -68,8 +65,8 @@
 unsigned int ip_conntrack_htable_size __read_mostly = 0;
 int ip_conntrack_max __read_mostly;
 struct list_head *ip_conntrack_hash __read_mostly;
-static kmem_cache_t *ip_conntrack_cachep __read_mostly;
-static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly;
 struct ip_conntrack ip_conntrack_untracked;
 unsigned int ip_ct_log_invalid __read_mostly;
 static LIST_HEAD(unconfirmed);
@@ -201,7 +198,6 @@
 /* ip_conntrack_expect helper functions */
 void ip_ct_unlink_expect(struct ip_conntrack_expect *exp)
 {
-	ASSERT_WRITE_LOCK(&ip_conntrack_lock);
 	IP_NF_ASSERT(!timer_pending(&exp->timeout));
 	list_del(&exp->list);
 	CONNTRACK_STAT_INC(expect_delete);
@@ -233,7 +229,7 @@
 
 /* Just find a expectation corresponding to a tuple. */
 struct ip_conntrack_expect *
-ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
+ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
 {
 	struct ip_conntrack_expect *i;
 	
@@ -294,7 +290,6 @@
 clean_from_lists(struct ip_conntrack *ct)
 {
 	DEBUGP("clean_from_lists(%p)\n", ct);
-	ASSERT_WRITE_LOCK(&ip_conntrack_lock);
 	list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
 	list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list);
 
@@ -373,7 +368,6 @@
 	struct ip_conntrack_tuple_hash *h;
 	unsigned int hash = hash_conntrack(tuple);
 
-	ASSERT_READ_LOCK(&ip_conntrack_lock);
 	list_for_each_entry(h, &ip_conntrack_hash[hash], list) {
 		if (tuplehash_to_ctrack(h) != ignored_conntrack &&
 		    ip_ct_tuple_equal(tuple, &h->tuple)) {
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c
index 93dcf96..0410c99 100644
--- a/net/ipv4/netfilter/ip_conntrack_ftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_ftp.c
@@ -310,6 +310,7 @@
 	struct ip_conntrack_expect *exp;
 	unsigned int i;
 	int found = 0, ends_in_nl;
+	typeof(ip_nat_ftp_hook) ip_nat_ftp;
 
 	/* Until there's been traffic both ways, don't look in packets. */
 	if (ctinfo != IP_CT_ESTABLISHED
@@ -433,9 +434,10 @@
 
 	/* Now, NAT might want to mangle the packet, and register the
 	 * (possibly changed) expectation itself. */
-	if (ip_nat_ftp_hook)
-		ret = ip_nat_ftp_hook(pskb, ctinfo, search[dir][i].ftptype,
-				      matchoff, matchlen, exp, &seq);
+	ip_nat_ftp = rcu_dereference(ip_nat_ftp_hook);
+	if (ip_nat_ftp)
+		ret = ip_nat_ftp(pskb, ctinfo, search[dir][i].ftptype,
+				 matchoff, matchlen, exp, &seq);
 	else {
 		/* Can't expect this?  Best to drop packet now. */
 		if (ip_conntrack_expect_related(exp) != 0)
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index 6cb9070..aabfe1c 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -237,6 +237,7 @@
 	u_int16_t rtp_port;
 	struct ip_conntrack_expect *rtp_exp;
 	struct ip_conntrack_expect *rtcp_exp;
+	typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp;
 
 	/* Read RTP or RTCP address */
 	if (!get_h245_addr(*data, addr, &ip, &port) ||
@@ -279,11 +280,11 @@
 	rtcp_exp->flags = 0;
 
 	if (ct->tuplehash[dir].tuple.src.ip !=
-	    ct->tuplehash[!dir].tuple.dst.ip && nat_rtp_rtcp_hook) {
+	    ct->tuplehash[!dir].tuple.dst.ip &&
+	    (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook))) {
 		/* NAT needed */
-		ret = nat_rtp_rtcp_hook(pskb, ct, ctinfo, data, dataoff,
-					addr, port, rtp_port, rtp_exp,
-					rtcp_exp);
+		ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
+				   addr, port, rtp_port, rtp_exp, rtcp_exp);
 	} else {		/* Conntrack only */
 		rtp_exp->expectfn = NULL;
 		rtcp_exp->expectfn = NULL;
@@ -328,6 +329,7 @@
 	__be32 ip;
 	u_int16_t port;
 	struct ip_conntrack_expect *exp = NULL;
+	typeof(nat_t120_hook) nat_t120;
 
 	/* Read T.120 address */
 	if (!get_h245_addr(*data, addr, &ip, &port) ||
@@ -350,10 +352,11 @@
 	exp->flags = IP_CT_EXPECT_PERMANENT;	/* Accept multiple channels */
 
 	if (ct->tuplehash[dir].tuple.src.ip !=
-	    ct->tuplehash[!dir].tuple.dst.ip && nat_t120_hook) {
+	    ct->tuplehash[!dir].tuple.dst.ip &&
+	    (nat_t120 = rcu_dereference(nat_t120_hook))) {
 		/* NAT needed */
-		ret = nat_t120_hook(pskb, ct, ctinfo, data, dataoff, addr,
-				    port, exp);
+		ret = nat_t120(pskb, ct, ctinfo, data, dataoff, addr,
+			       port, exp);
 	} else {		/* Conntrack only */
 		exp->expectfn = NULL;
 		if (ip_conntrack_expect_related(exp) == 0) {
@@ -651,6 +654,7 @@
 	__be32 ip;
 	u_int16_t port;
 	struct ip_conntrack_expect *exp = NULL;
+	typeof(nat_h245_hook) nat_h245;
 
 	/* Read h245Address */
 	if (!get_h225_addr(*data, addr, &ip, &port) ||
@@ -673,10 +677,11 @@
 	exp->flags = 0;
 
 	if (ct->tuplehash[dir].tuple.src.ip !=
-	    ct->tuplehash[!dir].tuple.dst.ip && nat_h245_hook) {
+	    ct->tuplehash[!dir].tuple.dst.ip &&
+	    (nat_h245 = rcu_dereference(nat_h245_hook))) {
 		/* NAT needed */
-		ret = nat_h245_hook(pskb, ct, ctinfo, data, dataoff, addr,
-				    port, exp);
+		ret = nat_h245(pskb, ct, ctinfo, data, dataoff, addr,
+			       port, exp);
 	} else {		/* Conntrack only */
 		exp->expectfn = ip_conntrack_h245_expect;
 
@@ -712,6 +717,7 @@
 	__be32 ip;
 	u_int16_t port;
 	struct ip_conntrack_expect *exp = NULL;
+	typeof(nat_callforwarding_hook) nat_callforwarding;
 
 	/* Read alternativeAddress */
 	if (!get_h225_addr(*data, addr, &ip, &port) || port == 0)
@@ -759,10 +765,11 @@
 	exp->flags = 0;
 
 	if (ct->tuplehash[dir].tuple.src.ip !=
-	    ct->tuplehash[!dir].tuple.dst.ip && nat_callforwarding_hook) {
+	    ct->tuplehash[!dir].tuple.dst.ip &&
+	    (nat_callforwarding = rcu_dereference(nat_callforwarding_hook))) {
 		/* Need NAT */
-		ret = nat_callforwarding_hook(pskb, ct, ctinfo, data, dataoff,
-					      addr, port, exp);
+		ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff,
+					 addr, port, exp);
 	} else {		/* Conntrack only */
 		exp->expectfn = ip_conntrack_q931_expect;
 
@@ -793,6 +800,7 @@
 	int i;
 	__be32 ip;
 	u_int16_t port;
+	typeof(set_h225_addr_hook) set_h225_addr;
 
 	DEBUGP("ip_ct_q931: Setup\n");
 
@@ -803,8 +811,10 @@
 			return -1;
 	}
 
+	set_h225_addr = rcu_dereference(set_h225_addr_hook);
+
 	if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
-	    (set_h225_addr_hook) &&
+	    (set_h225_addr) &&
 	    get_h225_addr(*data, &setup->destCallSignalAddress, &ip, &port) &&
 	    ip != ct->tuplehash[!dir].tuple.src.ip) {
 		DEBUGP("ip_ct_q931: set destCallSignalAddress "
@@ -812,17 +822,17 @@
 		       NIPQUAD(ip), port,
 		       NIPQUAD(ct->tuplehash[!dir].tuple.src.ip),
 		       ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
-		ret = set_h225_addr_hook(pskb, data, dataoff,
-					 &setup->destCallSignalAddress,
-					 ct->tuplehash[!dir].tuple.src.ip,
-					 ntohs(ct->tuplehash[!dir].tuple.src.
-					       u.tcp.port));
+		ret = set_h225_addr(pskb, data, dataoff,
+				    &setup->destCallSignalAddress,
+				    ct->tuplehash[!dir].tuple.src.ip,
+				    ntohs(ct->tuplehash[!dir].tuple.src.
+					  u.tcp.port));
 		if (ret < 0)
 			return -1;
 	}
 
 	if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
-	    (set_h225_addr_hook) &&
+	    (set_h225_addr) &&
 	    get_h225_addr(*data, &setup->sourceCallSignalAddress, &ip, &port)
 	    && ip != ct->tuplehash[!dir].tuple.dst.ip) {
 		DEBUGP("ip_ct_q931: set sourceCallSignalAddress "
@@ -830,11 +840,11 @@
 		       NIPQUAD(ip), port,
 		       NIPQUAD(ct->tuplehash[!dir].tuple.dst.ip),
 		       ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
-		ret = set_h225_addr_hook(pskb, data, dataoff,
-					 &setup->sourceCallSignalAddress,
-					 ct->tuplehash[!dir].tuple.dst.ip,
-					 ntohs(ct->tuplehash[!dir].tuple.dst.
-					       u.tcp.port));
+		ret = set_h225_addr(pskb, data, dataoff,
+				    &setup->sourceCallSignalAddress,
+				    ct->tuplehash[!dir].tuple.dst.ip,
+				    ntohs(ct->tuplehash[!dir].tuple.dst.
+					  u.tcp.port));
 		if (ret < 0)
 			return -1;
 	}
@@ -1153,7 +1163,7 @@
 	.me = THIS_MODULE,
 	.max_expected = H323_RTP_CHANNEL_MAX * 4 + 4 /* T.120 and H.245 */ ,
 	.timeout = 240,
-	.tuple = {.src = {.u = {__constant_htons(Q931_PORT)}},
+	.tuple = {.src = {.u = {.tcp = {.port = __constant_htons(Q931_PORT)}}},
 		  .dst = {.protonum = IPPROTO_TCP}},
 	.mask = {.src = {.u = {0xFFFF}},
 		 .dst = {.protonum = 0xFF}},
@@ -1231,6 +1241,7 @@
 	__be32 ip;
 	u_int16_t port;
 	struct ip_conntrack_expect *exp;
+	typeof(nat_q931_hook) nat_q931;
 
 	/* Look for the first related address */
 	for (i = 0; i < count; i++) {
@@ -1258,9 +1269,9 @@
 	exp->mask.dst.protonum = 0xFF;
 	exp->flags = IP_CT_EXPECT_PERMANENT;	/* Accept multiple calls */
 
-	if (nat_q931_hook) {	/* Need NAT */
-		ret = nat_q931_hook(pskb, ct, ctinfo, data, addr, i,
-				    port, exp);
+	nat_q931 = rcu_dereference(nat_q931_hook);
+	if (nat_q931) {	/* Need NAT */
+		ret = nat_q931(pskb, ct, ctinfo, data, addr, i, port, exp);
 	} else {		/* Conntrack only */
 		exp->expectfn = ip_conntrack_q931_expect;
 
@@ -1288,11 +1299,14 @@
 		       enum ip_conntrack_info ctinfo,
 		       unsigned char **data, GatekeeperRequest * grq)
 {
+	typeof(set_ras_addr_hook) set_ras_addr;
+
 	DEBUGP("ip_ct_ras: GRQ\n");
 
-	if (set_ras_addr_hook)	/* NATed */
-		return set_ras_addr_hook(pskb, ct, ctinfo, data,
-					 &grq->rasAddress, 1);
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr)	/* NATed */
+		return set_ras_addr(pskb, ct, ctinfo, data,
+				    &grq->rasAddress, 1);
 	return 0;
 }
 
@@ -1362,6 +1376,7 @@
 {
 	struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
 	int ret;
+	typeof(set_ras_addr_hook) set_ras_addr;
 
 	DEBUGP("ip_ct_ras: RRQ\n");
 
@@ -1371,10 +1386,11 @@
 	if (ret < 0)
 		return -1;
 
-	if (set_ras_addr_hook) {
-		ret = set_ras_addr_hook(pskb, ct, ctinfo, data,
-					rrq->rasAddress.item,
-					rrq->rasAddress.count);
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr) {
+		ret = set_ras_addr(pskb, ct, ctinfo, data,
+				   rrq->rasAddress.item,
+				   rrq->rasAddress.count);
 		if (ret < 0)
 			return -1;
 	}
@@ -1397,13 +1413,15 @@
 	int dir = CTINFO2DIR(ctinfo);
 	int ret;
 	struct ip_conntrack_expect *exp;
+	typeof(set_sig_addr_hook) set_sig_addr;
 
 	DEBUGP("ip_ct_ras: RCF\n");
 
-	if (set_sig_addr_hook) {
-		ret = set_sig_addr_hook(pskb, ct, ctinfo, data,
-					rcf->callSignalAddress.item,
-					rcf->callSignalAddress.count);
+	set_sig_addr = rcu_dereference(set_sig_addr_hook);
+	if (set_sig_addr) {
+		ret = set_sig_addr(pskb, ct, ctinfo, data,
+				   rcf->callSignalAddress.item,
+				   rcf->callSignalAddress.count);
 		if (ret < 0)
 			return -1;
 	}
@@ -1448,13 +1466,15 @@
 	struct ip_ct_h323_master *info = &ct->help.ct_h323_info;
 	int dir = CTINFO2DIR(ctinfo);
 	int ret;
+	typeof(set_sig_addr_hook) set_sig_addr;
 
 	DEBUGP("ip_ct_ras: URQ\n");
 
-	if (set_sig_addr_hook) {
-		ret = set_sig_addr_hook(pskb, ct, ctinfo, data,
-					urq->callSignalAddress.item,
-					urq->callSignalAddress.count);
+	set_sig_addr = rcu_dereference(set_sig_addr_hook);
+	if (set_sig_addr) {
+		ret = set_sig_addr(pskb, ct, ctinfo, data,
+				   urq->callSignalAddress.item,
+				   urq->callSignalAddress.count);
 		if (ret < 0)
 			return -1;
 	}
@@ -1479,28 +1499,30 @@
 	int dir = CTINFO2DIR(ctinfo);
 	__be32 ip;
 	u_int16_t port;
+	typeof(set_h225_addr_hook) set_h225_addr;
 
 	DEBUGP("ip_ct_ras: ARQ\n");
 
+	set_h225_addr = rcu_dereference(set_h225_addr_hook);
 	if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
 	    get_h225_addr(*data, &arq->destCallSignalAddress, &ip, &port) &&
 	    ip == ct->tuplehash[dir].tuple.src.ip &&
-	    port == info->sig_port[dir] && set_h225_addr_hook) {
+	    port == info->sig_port[dir] && set_h225_addr) {
 		/* Answering ARQ */
-		return set_h225_addr_hook(pskb, data, 0,
-					  &arq->destCallSignalAddress,
-					  ct->tuplehash[!dir].tuple.dst.ip,
-					  info->sig_port[!dir]);
+		return set_h225_addr(pskb, data, 0,
+				     &arq->destCallSignalAddress,
+				     ct->tuplehash[!dir].tuple.dst.ip,
+				     info->sig_port[!dir]);
 	}
 
 	if ((arq->options & eAdmissionRequest_srcCallSignalAddress) &&
 	    get_h225_addr(*data, &arq->srcCallSignalAddress, &ip, &port) &&
-	    ip == ct->tuplehash[dir].tuple.src.ip && set_h225_addr_hook) {
+	    ip == ct->tuplehash[dir].tuple.src.ip && set_h225_addr) {
 		/* Calling ARQ */
-		return set_h225_addr_hook(pskb, data, 0,
-					  &arq->srcCallSignalAddress,
-					  ct->tuplehash[!dir].tuple.dst.ip,
-					  port);
+		return set_h225_addr(pskb, data, 0,
+				     &arq->srcCallSignalAddress,
+				     ct->tuplehash[!dir].tuple.dst.ip,
+				     port);
 	}
 
 	return 0;
@@ -1516,6 +1538,7 @@
 	__be32 ip;
 	u_int16_t port;
 	struct ip_conntrack_expect *exp;
+	typeof(set_sig_addr_hook) set_sig_addr;
 
 	DEBUGP("ip_ct_ras: ACF\n");
 
@@ -1523,10 +1546,10 @@
 		return 0;
 
 	if (ip == ct->tuplehash[dir].tuple.dst.ip) {	/* Answering ACF */
-		if (set_sig_addr_hook)
-			return set_sig_addr_hook(pskb, ct, ctinfo, data,
-						 &acf->destCallSignalAddress,
-						 1);
+		set_sig_addr = rcu_dereference(set_sig_addr_hook);
+		if (set_sig_addr)
+			return set_sig_addr(pskb, ct, ctinfo, data,
+					    &acf->destCallSignalAddress, 1);
 		return 0;
 	}
 
@@ -1566,11 +1589,14 @@
 		       enum ip_conntrack_info ctinfo,
 		       unsigned char **data, LocationRequest * lrq)
 {
+	typeof(set_ras_addr_hook) set_ras_addr;
+
 	DEBUGP("ip_ct_ras: LRQ\n");
 
-	if (set_ras_addr_hook)
-		return set_ras_addr_hook(pskb, ct, ctinfo, data,
-					 &lrq->replyAddress, 1);
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr)
+		return set_ras_addr(pskb, ct, ctinfo, data,
+				    &lrq->replyAddress, 1);
 	return 0;
 }
 
@@ -1629,20 +1655,24 @@
 		       unsigned char **data, InfoRequestResponse * irr)
 {
 	int ret;
+	typeof(set_ras_addr_hook) set_ras_addr;
+	typeof(set_sig_addr_hook) set_sig_addr;
 
 	DEBUGP("ip_ct_ras: IRR\n");
 
-	if (set_ras_addr_hook) {
-		ret = set_ras_addr_hook(pskb, ct, ctinfo, data,
-					&irr->rasAddress, 1);
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr) {
+		ret = set_ras_addr(pskb, ct, ctinfo, data,
+				   &irr->rasAddress, 1);
 		if (ret < 0)
 			return -1;
 	}
 
-	if (set_sig_addr_hook) {
-		ret = set_sig_addr_hook(pskb, ct, ctinfo, data,
-					irr->callSignalAddress.item,
-					irr->callSignalAddress.count);
+	set_sig_addr = rcu_dereference(set_sig_addr_hook);
+	if (set_sig_addr) {
+		ret = set_sig_addr(pskb, ct, ctinfo, data,
+				   irr->callSignalAddress.item,
+				   irr->callSignalAddress.count);
 		if (ret < 0)
 			return -1;
 	}
@@ -1746,7 +1776,7 @@
 	.me = THIS_MODULE,
 	.max_expected = 32,
 	.timeout = 240,
-	.tuple = {.src = {.u = {__constant_htons(RAS_PORT)}},
+	.tuple = {.src = {.u = {.tcp = {.port = __constant_htons(RAS_PORT)}}},
 		  .dst = {.protonum = IPPROTO_UDP}},
 	.mask = {.src = {.u = {0xFFFE}},
 		 .dst = {.protonum = 0xFF}},
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
index a2af5e0..4d19373 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -124,6 +124,8 @@
 static void pptp_expectfn(struct ip_conntrack *ct,
 			 struct ip_conntrack_expect *exp)
 {
+	typeof(ip_nat_pptp_hook_expectfn) ip_nat_pptp_expectfn;
+
 	DEBUGP("increasing timeouts\n");
 
 	/* increase timeout of GRE data channel conntrack entry */
@@ -133,7 +135,9 @@
 	/* Can you see how rusty this code is, compared with the pre-2.6.11
 	 * one? That's what happened to my shiny newnat of 2002 ;( -HW */
 
-	if (!ip_nat_pptp_hook_expectfn) {
+	rcu_read_lock();
+	ip_nat_pptp_expectfn = rcu_dereference(ip_nat_pptp_hook_expectfn);
+	if (!ip_nat_pptp_expectfn) {
 		struct ip_conntrack_tuple inv_t;
 		struct ip_conntrack_expect *exp_other;
 
@@ -142,7 +146,7 @@
 		DEBUGP("trying to unexpect other dir: ");
 		DUMP_TUPLE(&inv_t);
 
-		exp_other = ip_conntrack_expect_find(&inv_t);
+		exp_other = ip_conntrack_expect_find_get(&inv_t);
 		if (exp_other) {
 			/* delete other expectation.  */
 			DEBUGP("found\n");
@@ -153,8 +157,9 @@
 		}
 	} else {
 		/* we need more than simple inversion */
-		ip_nat_pptp_hook_expectfn(ct, exp);
+		ip_nat_pptp_expectfn(ct, exp);
 	}
+	rcu_read_unlock();
 }
 
 static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t)
@@ -176,7 +181,7 @@
 		ip_conntrack_put(sibling);
 		return 1;
 	} else {
-		exp = ip_conntrack_expect_find(t);
+		exp = ip_conntrack_expect_find_get(t);
 		if (exp) {
 			DEBUGP("unexpect_related of expect %p\n", exp);
 			ip_conntrack_unexpect_related(exp);
@@ -226,6 +231,7 @@
 {
 	struct ip_conntrack_expect *exp_orig, *exp_reply;
 	int ret = 1;
+	typeof(ip_nat_pptp_hook_exp_gre) ip_nat_pptp_exp_gre;
 
 	exp_orig = ip_conntrack_expect_alloc(ct);
 	if (exp_orig == NULL)
@@ -262,8 +268,9 @@
 	exp_reply->tuple.dst.u.gre.key = peer_callid;
 	exp_reply->tuple.dst.protonum = IPPROTO_GRE;
 
-	if (ip_nat_pptp_hook_exp_gre)
-		ip_nat_pptp_hook_exp_gre(exp_orig, exp_reply);
+	ip_nat_pptp_exp_gre = rcu_dereference(ip_nat_pptp_hook_exp_gre);
+	if (ip_nat_pptp_exp_gre)
+		ip_nat_pptp_exp_gre(exp_orig, exp_reply);
 	if (ip_conntrack_expect_related(exp_orig) != 0)
 		goto out_put_both;
 	if (ip_conntrack_expect_related(exp_reply) != 0)
@@ -303,6 +310,7 @@
 	struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
 	u_int16_t msg;
 	__be16 cid = 0, pcid = 0;
+	typeof(ip_nat_pptp_hook_inbound) ip_nat_pptp_inbound;
 
 	msg = ntohs(ctlh->messageType);
 	DEBUGP("inbound control message %s\n", pptp_msg_name[msg]);
@@ -402,9 +410,9 @@
 		goto invalid;
 	}
 
-	if (ip_nat_pptp_hook_inbound)
-		return ip_nat_pptp_hook_inbound(pskb, ct, ctinfo, ctlh,
-						pptpReq);
+	ip_nat_pptp_inbound = rcu_dereference(ip_nat_pptp_hook_inbound);
+	if (ip_nat_pptp_inbound)
+		return ip_nat_pptp_inbound(pskb, ct, ctinfo, ctlh, pptpReq);
 	return NF_ACCEPT;
 
 invalid:
@@ -427,6 +435,7 @@
 	struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
 	u_int16_t msg;
 	__be16 cid = 0, pcid = 0;
+	typeof(ip_nat_pptp_hook_outbound) ip_nat_pptp_outbound;
 
 	msg = ntohs(ctlh->messageType);
 	DEBUGP("outbound control message %s\n", pptp_msg_name[msg]);
@@ -492,9 +501,9 @@
 		goto invalid;
 	}
 
-	if (ip_nat_pptp_hook_outbound)
-		return ip_nat_pptp_hook_outbound(pskb, ct, ctinfo, ctlh,
-						 pptpReq);
+	ip_nat_pptp_outbound = rcu_dereference(ip_nat_pptp_hook_outbound);
+	if (ip_nat_pptp_outbound)
+		return ip_nat_pptp_outbound(pskb, ct, ctinfo, ctlh, pptpReq);
 	return NF_ACCEPT;
 
 invalid:
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c
index 75f7c3d..91832ec 100644
--- a/net/ipv4/netfilter/ip_conntrack_irc.c
+++ b/net/ipv4/netfilter/ip_conntrack_irc.c
@@ -114,6 +114,7 @@
 	u_int16_t dcc_port;
 	int i, ret = NF_ACCEPT;
 	char *addr_beg_p, *addr_end_p;
+	typeof(ip_nat_irc_hook) ip_nat_irc;
 
 	DEBUGP("entered\n");
 
@@ -222,11 +223,12 @@
 					{ .tcp = { htons(0xFFFF) } }, 0xFF }});
 			exp->expectfn = NULL;
 			exp->flags = 0;
-			if (ip_nat_irc_hook)
-				ret = ip_nat_irc_hook(pskb, ctinfo, 
-						      addr_beg_p - ib_ptr,
-						      addr_end_p - addr_beg_p,
-						      exp);
+			ip_nat_irc = rcu_dereference(ip_nat_irc_hook);
+			if (ip_nat_irc)
+				ret = ip_nat_irc(pskb, ctinfo,
+						 addr_beg_p - ib_ptr,
+						 addr_end_p - addr_beg_p,
+						 exp);
 			else if (ip_conntrack_expect_related(exp) != 0)
 				ret = NF_DROP;
 			ip_conntrack_expect_put(exp);
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 55f0ae6..5fcf91d 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -320,8 +320,6 @@
 	} else if (events & (IPCT_NEW | IPCT_RELATED)) {
 		type = IPCTNL_MSG_CT_NEW;
 		flags = NLM_F_CREATE|NLM_F_EXCL;
-		/* dump everything */
-		events = ~0UL;
 		group = NFNLGRP_CONNTRACK_NEW;
 	} else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) {
 		type = IPCTNL_MSG_CT_NEW;
@@ -356,28 +354,35 @@
 	if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nfattr_failure;
 	NFA_NEST_END(skb, nest_parms);
-	
-	/* NAT stuff is now a status flag */
-	if ((events & IPCT_STATUS || events & IPCT_NATINFO)
-	    && ctnetlink_dump_status(skb, ct) < 0)
-		goto nfattr_failure;
-	if (events & IPCT_REFRESH
-	    && ctnetlink_dump_timeout(skb, ct) < 0)
-		goto nfattr_failure;
-	if (events & IPCT_PROTOINFO
-	    && ctnetlink_dump_protoinfo(skb, ct) < 0)
-		goto nfattr_failure;
-	if (events & IPCT_HELPINFO
-	    && ctnetlink_dump_helpinfo(skb, ct) < 0)
-		goto nfattr_failure;
 
-	if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
-		goto nfattr_failure;
+	if (events & IPCT_DESTROY) {
+		if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
+		    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+			goto nfattr_failure;
+	} else {
+		if (ctnetlink_dump_status(skb, ct) < 0)
+			goto nfattr_failure;
 
-	if (events & IPCT_MARK
-	    && ctnetlink_dump_mark(skb, ct) < 0)
-		goto nfattr_failure;
+		if (ctnetlink_dump_timeout(skb, ct) < 0)
+			goto nfattr_failure;
+
+		if (events & IPCT_PROTOINFO
+		    && ctnetlink_dump_protoinfo(skb, ct) < 0)
+		    	goto nfattr_failure;
+
+		if ((events & IPCT_HELPER || ct->helper)
+		    && ctnetlink_dump_helpinfo(skb, ct) < 0)
+		    	goto nfattr_failure;
+
+		if ((events & IPCT_MARK || ct->mark)
+		    && ctnetlink_dump_mark(skb, ct) < 0)
+		    	goto nfattr_failure;
+
+		if (events & IPCT_COUNTER_FILLING &&
+		    (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
+		     ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0))
+			goto nfattr_failure;
+	}
 
 	nlh->nlmsg_len = skb->tail - b;
 	nfnetlink_send(skb, 0, group, 0);
@@ -743,7 +748,6 @@
 		ip_conntrack_put(ct);
 		return -ENOMEM;
 	}
-	NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid;
 
 	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 
 				  IPCTNL_MSG_CT_NEW, 1, ct);
@@ -946,9 +950,11 @@
 	ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
 	ct->status |= IPS_CONFIRMED;
 
-	err = ctnetlink_change_status(ct, cda);
-	if (err < 0)
-		goto err;
+	if (cda[CTA_STATUS-1]) {
+		err = ctnetlink_change_status(ct, cda);
+		if (err < 0)
+			goto err;
+	}
 
 	if (cda[CTA_PROTOINFO-1]) {
 		err = ctnetlink_change_protoinfo(ct, cda);
@@ -1257,7 +1263,7 @@
 	if (err < 0)
 		return err;
 
-	exp = ip_conntrack_expect_find(&tuple);
+	exp = ip_conntrack_expect_find_get(&tuple);
 	if (!exp)
 		return -ENOENT;
 
@@ -1273,8 +1279,7 @@
 	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!skb2)
 		goto out;
-	NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid;
-	
+
 	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, 
 				      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
 				      1, exp);
@@ -1311,7 +1316,7 @@
 			return err;
 
 		/* bump usage count to 2 */
-		exp = ip_conntrack_expect_find(&tuple);
+		exp = ip_conntrack_expect_find_get(&tuple);
 		if (!exp)
 			return -ENOENT;
 
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
index 5fe026f..ac1c49e 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
@@ -34,8 +34,6 @@
 #include <linux/interrupt.h>
 
 static DEFINE_RWLOCK(ip_ct_gre_lock);
-#define ASSERT_READ_LOCK(x)
-#define ASSERT_WRITE_LOCK(x)
 
 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
 #include <linux/netfilter_ipv4/ip_conntrack_helper.h>
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c
index f4f7599..3a26d63 100644
--- a/net/ipv4/netfilter/ip_conntrack_sip.c
+++ b/net/ipv4/netfilter/ip_conntrack_sip.c
@@ -52,20 +52,56 @@
 				const char *dptr);
 EXPORT_SYMBOL_GPL(ip_nat_sdp_hook);
 
-int ct_sip_get_info(const char *dptr, size_t dlen,
-				unsigned int *matchoff,
-				unsigned int *matchlen,
-				struct sip_header_nfo *hnfo);
-EXPORT_SYMBOL_GPL(ct_sip_get_info);
-
-
 static int digits_len(const char *dptr, const char *limit, int *shift);
 static int epaddr_len(const char *dptr, const char *limit, int *shift);
 static int skp_digits_len(const char *dptr, const char *limit, int *shift);
 static int skp_epaddr_len(const char *dptr, const char *limit, int *shift);
 
-struct sip_header_nfo ct_sip_hdrs[] = {
-	{ 	/* Via header */
+struct sip_header_nfo {
+	const char	*lname;
+	const char	*sname;
+	const char	*ln_str;
+	size_t		lnlen;
+	size_t		snlen;
+	size_t		ln_strlen;
+	int		case_sensitive;
+	int		(*match_len)(const char *, const char *, int *);
+};
+
+static struct sip_header_nfo ct_sip_hdrs[] = {
+	[POS_REG_REQ_URI] = { 	/* SIP REGISTER request URI */
+		.lname		= "sip:",
+		.lnlen		= sizeof("sip:") - 1,
+		.ln_str		= ":",
+		.ln_strlen	= sizeof(":") - 1,
+		.match_len	= epaddr_len
+	},
+	[POS_REQ_URI] = { 	/* SIP request URI */
+		.lname		= "sip:",
+		.lnlen		= sizeof("sip:") - 1,
+		.ln_str		= "@",
+		.ln_strlen	= sizeof("@") - 1,
+		.match_len	= epaddr_len
+	},
+	[POS_FROM] = {		/* SIP From header */
+		.lname		= "From:",
+		.lnlen		= sizeof("From:") - 1,
+		.sname		= "\r\nf:",
+		.snlen		= sizeof("\r\nf:") - 1,
+		.ln_str		= "sip:",
+		.ln_strlen	= sizeof("sip:") - 1,
+		.match_len	= skp_epaddr_len,
+	},
+	[POS_TO] = {		/* SIP To header */
+		.lname		= "To:",
+		.lnlen		= sizeof("To:") - 1,
+		.sname		= "\r\nt:",
+		.snlen		= sizeof("\r\nt:") - 1,
+		.ln_str		= "sip:",
+		.ln_strlen	= sizeof("sip:") - 1,
+		.match_len	= skp_epaddr_len,
+	},
+	[POS_VIA] = { 		/* SIP Via header */
 		.lname		= "Via:",
 		.lnlen		= sizeof("Via:") - 1,
 		.sname		= "\r\nv:",
@@ -74,7 +110,7 @@
 		.ln_strlen	= sizeof("UDP ") - 1,
 		.match_len	= epaddr_len,
 	},
-	{ 	/* Contact header */
+	[POS_CONTACT] = { 	/* SIP Contact header */
 		.lname		= "Contact:",
 		.lnlen		= sizeof("Contact:") - 1,
 		.sname		= "\r\nm:",
@@ -83,7 +119,7 @@
 		.ln_strlen	= sizeof("sip:") - 1,
 		.match_len	= skp_epaddr_len
 	},
-	{ 	/* Content length header */
+	[POS_CONTENT] = { 	/* SIP Content length header */
 		.lname		= "Content-Length:",
 		.lnlen		= sizeof("Content-Length:") - 1,
 		.sname		= "\r\nl:",
@@ -92,7 +128,8 @@
 		.ln_strlen	= sizeof(":") - 1,
 		.match_len	= skp_digits_len
 	},
-	{	/* SDP media info */
+	[POS_MEDIA] = {		/* SDP media info */
+		.case_sensitive	= 1,
 		.lname		= "\nm=",
 		.lnlen		= sizeof("\nm=") - 1,
 		.sname		= "\rm=",
@@ -101,7 +138,8 @@
 		.ln_strlen	= sizeof("audio ") - 1,
 		.match_len	= digits_len
 	},
-	{ 	/* SDP owner address*/
+	[POS_OWNER] = { 	/* SDP owner address*/
+		.case_sensitive	= 1,
 		.lname		= "\no=",
 		.lnlen		= sizeof("\no=") - 1,
 		.sname		= "\ro=",
@@ -110,7 +148,8 @@
 		.ln_strlen	= sizeof("IN IP4 ") - 1,
 		.match_len	= epaddr_len
 	},
-	{ 	/* SDP connection info */
+	[POS_CONNECTION] = { 	/* SDP connection info */
+		.case_sensitive	= 1,
 		.lname		= "\nc=",
 		.lnlen		= sizeof("\nc=") - 1,
 		.sname		= "\rc=",
@@ -119,16 +158,8 @@
 		.ln_strlen	= sizeof("IN IP4 ") - 1,
 		.match_len	= epaddr_len
 	},
-	{ 	/* Requests headers */
-		.lname		= "sip:",
-		.lnlen		= sizeof("sip:") - 1,
-		.sname		= "sip:",
-		.snlen		= sizeof("sip:") - 1, /* yes, i know.. ;) */
-		.ln_str		= "@",
-		.ln_strlen	= sizeof("@") - 1,
-		.match_len	= epaddr_len
-	},
-	{ 	/* SDP version header */
+	[POS_SDP_HEADER] = { 	/* SDP version header */
+		.case_sensitive	= 1,
 		.lname		= "\nv=",
 		.lnlen		= sizeof("\nv=") - 1,
 		.sname		= "\rv=",
@@ -138,7 +169,6 @@
 		.match_len	= digits_len
 	}
 };
-EXPORT_SYMBOL_GPL(ct_sip_hdrs);
 
 /* get line lenght until first CR or LF seen. */
 int ct_sip_lnlen(const char *line, const char *limit)
@@ -159,13 +189,19 @@
 
 /* Linear string search, case sensitive. */
 const char *ct_sip_search(const char *needle, const char *haystack,
-                          size_t needle_len, size_t haystack_len)
+			  size_t needle_len, size_t haystack_len,
+			  int case_sensitive)
 {
 	const char *limit = haystack + (haystack_len - needle_len);
 
 	while (haystack <= limit) {
-		if (memcmp(haystack, needle, needle_len) == 0)
-			return haystack;
+		if (case_sensitive) {
+			if (strncmp(haystack, needle, needle_len) == 0)
+				return haystack;
+		} else {
+			if (strnicmp(haystack, needle, needle_len) == 0)
+				return haystack;
+		}
 		haystack++;
 	}
 	return NULL;
@@ -263,8 +299,9 @@
 int ct_sip_get_info(const char *dptr, size_t dlen,
 		    unsigned int *matchoff,
 		    unsigned int *matchlen,
-		    struct sip_header_nfo *hnfo)
+		    enum sip_header_pos pos)
 {
+	struct sip_header_nfo *hnfo = &ct_sip_hdrs[pos];
 	const char *limit, *aux, *k = dptr;
 	int shift = 0;
 
@@ -272,12 +309,14 @@
 
 	while (dptr <= limit) {
 		if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) &&
-		    (strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) {
+		    (hnfo->sname == NULL ||
+		     strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) {
 			dptr++;
 			continue;
 		}
 		aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen,
-		                    ct_sip_lnlen(dptr, limit));
+		                    ct_sip_lnlen(dptr, limit),
+				    hnfo->case_sensitive);
 		if (!aux) {
 			DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str,
 			       hnfo->lname);
@@ -298,6 +337,7 @@
 	DEBUGP("%s header not found.\n", hnfo->lname);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ct_sip_get_info);
 
 static int set_expected_rtp(struct sk_buff **pskb,
 			    struct ip_conntrack *ct,
@@ -308,6 +348,7 @@
 	struct ip_conntrack_expect *exp;
 	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 	int ret;
+	typeof(ip_nat_sdp_hook) ip_nat_sdp;
 
 	exp = ip_conntrack_expect_alloc(ct);
 	if (exp == NULL)
@@ -328,8 +369,9 @@
 	exp->expectfn = NULL;
 	exp->flags = 0;
 
-	if (ip_nat_sdp_hook)
-		ret = ip_nat_sdp_hook(pskb, ctinfo, exp, dptr);
+	ip_nat_sdp = rcu_dereference(ip_nat_sdp_hook);
+	if (ip_nat_sdp)
+		ret = ip_nat_sdp(pskb, ctinfo, exp, dptr);
 	else {
 		if (ip_conntrack_expect_related(exp) != 0)
 			ret = NF_DROP;
@@ -351,6 +393,7 @@
 	int matchoff, matchlen;
 	__be32 ipaddr;
 	u_int16_t port;
+	typeof(ip_nat_sip_hook) ip_nat_sip;
 
 	/* No Data ? */
 	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
@@ -368,8 +411,9 @@
 		goto out;
 	}
 
-	if (ip_nat_sip_hook) {
-		if (!ip_nat_sip_hook(pskb, ctinfo, ct, &dptr)) {
+	ip_nat_sip = rcu_dereference(ip_nat_sip_hook);
+	if (ip_nat_sip) {
+		if (!ip_nat_sip(pskb, ctinfo, ct, &dptr)) {
 			ret = NF_DROP;
 			goto out;
 		}
@@ -389,7 +433,7 @@
 	}
 	/* Get ip and port address from SDP packet. */
 	if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
-	                    &ct_sip_hdrs[POS_CONNECTION]) > 0) {
+	                    POS_CONNECTION) > 0) {
 
 		/* We'll drop only if there are parse problems. */
 		if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr,
@@ -398,7 +442,7 @@
 			goto out;
 		}
 		if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen,
-		                    &ct_sip_hdrs[POS_MEDIA]) > 0) {
+		                    POS_MEDIA) > 0) {
 
 			port = simple_strtoul(dptr + matchoff, NULL, 10);
 			if (port < 1024) {
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 0213575..86efb54 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -28,9 +28,6 @@
 #include <net/ip.h>
 #include <net/route.h>
 
-#define ASSERT_READ_LOCK(x)
-#define ASSERT_WRITE_LOCK(x)
-
 #include <linux/netfilter_ipv4/ip_conntrack.h>
 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
 #include <linux/netfilter_ipv4/ip_conntrack_core.h>
@@ -139,7 +136,6 @@
 	const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash);
 	struct ip_conntrack_protocol *proto;
 
-	ASSERT_READ_LOCK(&ip_conntrack_lock);
 	IP_NF_ASSERT(conntrack);
 
 	/* we only want to print DIR_ORIGINAL */
@@ -926,7 +922,7 @@
 EXPORT_SYMBOL(ip_conntrack_expect_alloc);
 EXPORT_SYMBOL(ip_conntrack_expect_put);
 EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_find);
+EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
 EXPORT_SYMBOL(ip_conntrack_expect_related);
 EXPORT_SYMBOL(ip_conntrack_unexpect_related);
 EXPORT_SYMBOL_GPL(ip_conntrack_expect_list);
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index fe0b634..ef56de2 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -50,6 +50,7 @@
 	struct tftphdr _tftph, *tfh;
 	struct ip_conntrack_expect *exp;
 	unsigned int ret = NF_ACCEPT;
+	typeof(ip_nat_tftp_hook) ip_nat_tftp;
 
 	tfh = skb_header_pointer(*pskb,
 				 (*pskb)->nh.iph->ihl*4+sizeof(struct udphdr),
@@ -81,8 +82,9 @@
 		DEBUGP("expect: ");
 		DUMP_TUPLE(&exp->tuple);
 		DUMP_TUPLE(&exp->mask);
-		if (ip_nat_tftp_hook)
-			ret = ip_nat_tftp_hook(pskb, ctinfo, exp);
+		ip_nat_tftp = rcu_dereference(ip_nat_tftp_hook);
+		if (ip_nat_tftp)
+			ret = ip_nat_tftp(pskb, ctinfo, exp);
 		else if (ip_conntrack_expect_related(exp) != 0)
 			ret = NF_DROP;
 		ip_conntrack_expect_put(exp);
diff --git a/net/ipv4/netfilter/ip_nat_amanda.c b/net/ipv4/netfilter/ip_nat_amanda.c
index 3a88871..85df1a9 100644
--- a/net/ipv4/netfilter/ip_nat_amanda.c
+++ b/net/ipv4/netfilter/ip_nat_amanda.c
@@ -70,15 +70,14 @@
 
 static void __exit ip_nat_amanda_fini(void)
 {
-	ip_nat_amanda_hook = NULL;
-	/* Make sure noone calls it, meanwhile. */
-	synchronize_net();
+	rcu_assign_pointer(ip_nat_amanda_hook, NULL);
+	synchronize_rcu();
 }
 
 static int __init ip_nat_amanda_init(void)
 {
-	BUG_ON(ip_nat_amanda_hook);
-	ip_nat_amanda_hook = help;
+	BUG_ON(rcu_dereference(ip_nat_amanda_hook));
+	rcu_assign_pointer(ip_nat_amanda_hook, help);
 	return 0;
 }
 
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 4b6260a..9d1a517 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -362,12 +362,10 @@
 	iph = (void *)(*pskb)->data + iphdroff;
 
 	if (maniptype == IP_NAT_MANIP_SRC) {
-		iph->check = nf_csum_update(~iph->saddr, target->src.ip,
-					    iph->check);
+		nf_csum_replace4(&iph->check, iph->saddr, target->src.ip);
 		iph->saddr = target->src.ip;
 	} else {
-		iph->check = nf_csum_update(~iph->daddr, target->dst.ip,
-					    iph->check);
+		nf_csum_replace4(&iph->check, iph->daddr, target->dst.ip);
 		iph->daddr = target->dst.ip;
 	}
 	return 1;
diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c
index a71c233..913960e 100644
--- a/net/ipv4/netfilter/ip_nat_ftp.c
+++ b/net/ipv4/netfilter/ip_nat_ftp.c
@@ -156,15 +156,14 @@
 
 static void __exit ip_nat_ftp_fini(void)
 {
-	ip_nat_ftp_hook = NULL;
-	/* Make sure noone calls it, meanwhile. */
-	synchronize_net();
+	rcu_assign_pointer(ip_nat_ftp_hook, NULL);
+	synchronize_rcu();
 }
 
 static int __init ip_nat_ftp_init(void)
 {
-	BUG_ON(ip_nat_ftp_hook);
-	ip_nat_ftp_hook = ip_nat_ftp;
+	BUG_ON(rcu_dereference(ip_nat_ftp_hook));
+	rcu_assign_pointer(ip_nat_ftp_hook, ip_nat_ftp);
 	return 0;
 }
 
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index 3bf8584..ee80feb 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -188,10 +188,8 @@
 					   csum_partial((char *)tcph,
 					   		datalen, 0));
 	} else
-		tcph->check = nf_proto_csum_update(*pskb,
-						   htons(oldlen) ^ htons(0xFFFF),
-						   htons(datalen),
-						   tcph->check, 1);
+		nf_proto_csum_replace2(&tcph->check, *pskb,
+					htons(oldlen), htons(datalen), 1);
 
 	if (rep_len != match_len) {
 		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
@@ -264,12 +262,10 @@
 		                                csum_partial((char *)udph,
 		                                             datalen, 0));
 		if (!udph->check)
-			udph->check = -1;
+			udph->check = CSUM_MANGLED_0;
 	} else
-		udph->check = nf_proto_csum_update(*pskb,
-						   htons(oldlen) ^ htons(0xFFFF),
-						   htons(datalen),
-						   udph->check, 1);
+		nf_proto_csum_replace2(&udph->check, *pskb,
+					htons(oldlen), htons(datalen), 1);
 	return 1;
 }
 EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
@@ -307,14 +303,10 @@
 			ntohl(sack->start_seq), new_start_seq,
 			ntohl(sack->end_seq), new_end_seq);
 
-		tcph->check = nf_proto_csum_update(skb,
-						   ~sack->start_seq,
-						   new_start_seq,
-						   tcph->check, 0);
-		tcph->check = nf_proto_csum_update(skb,
-						   ~sack->end_seq,
-						   new_end_seq,
-						   tcph->check, 0);
+		nf_proto_csum_replace4(&tcph->check, skb,
+					sack->start_seq, new_start_seq, 0);
+		nf_proto_csum_replace4(&tcph->check, skb,
+					sack->end_seq, new_end_seq, 0);
 		sack->start_seq = new_start_seq;
 		sack->end_seq = new_end_seq;
 		sackoff += sizeof(*sack);
@@ -397,10 +389,8 @@
 	else
 		newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
 
-	tcph->check = nf_proto_csum_update(*pskb, ~tcph->seq, newseq,
-					   tcph->check, 0);
-	tcph->check = nf_proto_csum_update(*pskb, ~tcph->ack_seq, newack,
-					   tcph->check, 0);
+	nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0);
+	nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0);
 
 	DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
 		ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c
index 4a7d344..bdc99ef 100644
--- a/net/ipv4/netfilter/ip_nat_helper_h323.c
+++ b/net/ipv4/netfilter/ip_nat_helper_h323.c
@@ -563,25 +563,25 @@
 /****************************************************************************/
 static int __init init(void)
 {
-	BUG_ON(set_h245_addr_hook != NULL);
-	BUG_ON(set_h225_addr_hook != NULL);
-	BUG_ON(set_sig_addr_hook != NULL);
-	BUG_ON(set_ras_addr_hook != NULL);
-	BUG_ON(nat_rtp_rtcp_hook != NULL);
-	BUG_ON(nat_t120_hook != NULL);
-	BUG_ON(nat_h245_hook != NULL);
-	BUG_ON(nat_callforwarding_hook != NULL);
-	BUG_ON(nat_q931_hook != NULL);
+	BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_t120_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_h245_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_q931_hook) != NULL);
 
-	set_h245_addr_hook = set_h245_addr;
-	set_h225_addr_hook = set_h225_addr;
-	set_sig_addr_hook = set_sig_addr;
-	set_ras_addr_hook = set_ras_addr;
-	nat_rtp_rtcp_hook = nat_rtp_rtcp;
-	nat_t120_hook = nat_t120;
-	nat_h245_hook = nat_h245;
-	nat_callforwarding_hook = nat_callforwarding;
-	nat_q931_hook = nat_q931;
+	rcu_assign_pointer(set_h245_addr_hook, set_h245_addr);
+	rcu_assign_pointer(set_h225_addr_hook, set_h225_addr);
+	rcu_assign_pointer(set_sig_addr_hook, set_sig_addr);
+	rcu_assign_pointer(set_ras_addr_hook, set_ras_addr);
+	rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp);
+	rcu_assign_pointer(nat_t120_hook, nat_t120);
+	rcu_assign_pointer(nat_h245_hook, nat_h245);
+	rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding);
+	rcu_assign_pointer(nat_q931_hook, nat_q931);
 
 	DEBUGP("ip_nat_h323: init success\n");
 	return 0;
@@ -590,16 +590,16 @@
 /****************************************************************************/
 static void __exit fini(void)
 {
-	set_h245_addr_hook = NULL;
-	set_h225_addr_hook = NULL;
-	set_sig_addr_hook = NULL;
-	set_ras_addr_hook = NULL;
-	nat_rtp_rtcp_hook = NULL;
-	nat_t120_hook = NULL;
-	nat_h245_hook = NULL;
-	nat_callforwarding_hook = NULL;
-	nat_q931_hook = NULL;
-	synchronize_net();
+	rcu_assign_pointer(set_h245_addr_hook, NULL);
+	rcu_assign_pointer(set_h225_addr_hook, NULL);
+	rcu_assign_pointer(set_sig_addr_hook, NULL);
+	rcu_assign_pointer(set_ras_addr_hook, NULL);
+	rcu_assign_pointer(nat_rtp_rtcp_hook, NULL);
+	rcu_assign_pointer(nat_t120_hook, NULL);
+	rcu_assign_pointer(nat_h245_hook, NULL);
+	rcu_assign_pointer(nat_callforwarding_hook, NULL);
+	rcu_assign_pointer(nat_q931_hook, NULL);
+	synchronize_rcu();
 }
 
 /****************************************************************************/
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
index 329fdcd..ec957bb 100644
--- a/net/ipv4/netfilter/ip_nat_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c
@@ -101,7 +101,7 @@
 
 	DEBUGP("trying to unexpect other dir: ");
 	DUMP_TUPLE(&t);
-	other_exp = ip_conntrack_expect_find(&t);
+	other_exp = ip_conntrack_expect_find_get(&t);
 	if (other_exp) {
 		ip_conntrack_unexpect_related(other_exp);
 		ip_conntrack_expect_put(other_exp);
@@ -315,17 +315,17 @@
 	if (ret < 0)
 		return ret;
 
-	BUG_ON(ip_nat_pptp_hook_outbound);
-	ip_nat_pptp_hook_outbound = &pptp_outbound_pkt;
+	BUG_ON(rcu_dereference(ip_nat_pptp_hook_outbound));
+	rcu_assign_pointer(ip_nat_pptp_hook_outbound, pptp_outbound_pkt);
 
-	BUG_ON(ip_nat_pptp_hook_inbound);
-	ip_nat_pptp_hook_inbound = &pptp_inbound_pkt;
+	BUG_ON(rcu_dereference(ip_nat_pptp_hook_inbound));
+	rcu_assign_pointer(ip_nat_pptp_hook_inbound, pptp_inbound_pkt);
 
-	BUG_ON(ip_nat_pptp_hook_exp_gre);
-	ip_nat_pptp_hook_exp_gre = &pptp_exp_gre;
+	BUG_ON(rcu_dereference(ip_nat_pptp_hook_exp_gre));
+	rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, pptp_exp_gre);
 
-	BUG_ON(ip_nat_pptp_hook_expectfn);
-	ip_nat_pptp_hook_expectfn = &pptp_nat_expected;
+	BUG_ON(rcu_dereference(ip_nat_pptp_hook_expectfn));
+	rcu_assign_pointer(ip_nat_pptp_hook_expectfn, pptp_nat_expected);
 
 	printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION);
 	return 0;
@@ -335,14 +335,13 @@
 {
 	DEBUGP("cleanup_module\n" );
 
-	ip_nat_pptp_hook_expectfn = NULL;
-	ip_nat_pptp_hook_exp_gre = NULL;
-	ip_nat_pptp_hook_inbound = NULL;
-	ip_nat_pptp_hook_outbound = NULL;
+	rcu_assign_pointer(ip_nat_pptp_hook_expectfn, NULL);
+	rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, NULL);
+	rcu_assign_pointer(ip_nat_pptp_hook_inbound, NULL);
+	rcu_assign_pointer(ip_nat_pptp_hook_outbound, NULL);
+	synchronize_rcu();
 
 	ip_nat_proto_gre_fini();
-	/* Make sure noone calls it, meanwhile */
-	synchronize_net();
 
 	printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION);
 }
diff --git a/net/ipv4/netfilter/ip_nat_irc.c b/net/ipv4/netfilter/ip_nat_irc.c
index a767123..feb26b4 100644
--- a/net/ipv4/netfilter/ip_nat_irc.c
+++ b/net/ipv4/netfilter/ip_nat_irc.c
@@ -98,15 +98,14 @@
 
 static void __exit ip_nat_irc_fini(void)
 {
-	ip_nat_irc_hook = NULL;
-	/* Make sure noone calls it, meanwhile. */
-	synchronize_net();
+	rcu_assign_pointer(ip_nat_irc_hook, NULL);
+	synchronize_rcu();
 }
 
 static int __init ip_nat_irc_init(void)
 {
-	BUG_ON(ip_nat_irc_hook);
-	ip_nat_irc_hook = help;
+	BUG_ON(rcu_dereference(ip_nat_irc_hook));
+	rcu_assign_pointer(ip_nat_irc_hook, help);
 	return 0;
 }
 
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index bf91f93..9581020 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -129,11 +129,9 @@
 			}
 			if (greh->csum) {
 				/* FIXME: Never tested this code... */
-				*(gre_csum(greh)) =
-					nf_proto_csum_update(*pskb,
-							~*(gre_key(greh)),
-							tuple->dst.u.gre.key,
-							*(gre_csum(greh)), 0);
+				nf_proto_csum_replace4(gre_csum(greh), *pskb,
+							*(gre_key(greh)),
+							tuple->dst.u.gre.key, 0);
 			}
 			*(gre_key(greh)) = tuple->dst.u.gre.key;
 			break;
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c
index 3f6efc1..fb716ed 100644
--- a/net/ipv4/netfilter/ip_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_icmp.c
@@ -24,8 +24,8 @@
 	      const union ip_conntrack_manip_proto *min,
 	      const union ip_conntrack_manip_proto *max)
 {
-	return (tuple->src.u.icmp.id >= min->icmp.id
-		&& tuple->src.u.icmp.id <= max->icmp.id);
+	return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
+	       ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
 }
 
 static int
@@ -66,10 +66,8 @@
 		return 0;
 
 	hdr = (struct icmphdr *)((*pskb)->data + hdroff);
-	hdr->checksum = nf_proto_csum_update(*pskb,
-					     hdr->un.echo.id ^ htons(0xFFFF),
-					     tuple->src.u.icmp.id,
-					     hdr->checksum, 0);
+	nf_proto_csum_replace2(&hdr->checksum, *pskb,
+			       hdr->un.echo.id, tuple->src.u.icmp.id, 0);
 	hdr->un.echo.id = tuple->src.u.icmp.id;
 	return 1;
 }
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
index 12deb13..b586d18 100644
--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
@@ -129,9 +129,8 @@
 	if (hdrsize < sizeof(*hdr))
 		return 1;
 
-	hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip, hdr->check, 1);
-	hdr->check = nf_proto_csum_update(*pskb, oldport ^ htons(0xFFFF), newport,
-					  hdr->check, 0);
+	nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+	nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0);
 	return 1;
 }
 
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
index 4bbec77..5ced087 100644
--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
@@ -115,13 +115,10 @@
 	}
 
 	if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) {
-		hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip,
-						  hdr->check, 1);
-		hdr->check = nf_proto_csum_update(*pskb,
-						  *portptr ^ htons(0xFFFF), newport,
-						  hdr->check, 0);
+		nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+		nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport, 0);
 		if (!hdr->check)
-			hdr->check = -1;
+			hdr->check = CSUM_MANGLED_0;
 	}
 	*portptr = newport;
 	return 1;
diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c
index 71fc273..6223abc 100644
--- a/net/ipv4/netfilter/ip_nat_sip.c
+++ b/net/ipv4/netfilter/ip_nat_sip.c
@@ -29,18 +29,123 @@
 #define DEBUGP(format, args...)
 #endif
 
-extern struct sip_header_nfo ct_sip_hdrs[];
+struct addr_map {
+	struct {
+		char		src[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+		char		dst[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+		unsigned int	srclen, srciplen;
+		unsigned int	dstlen, dstiplen;
+	} addr[IP_CT_DIR_MAX];
+};
+
+static void addr_map_init(struct ip_conntrack *ct, struct addr_map *map)
+{
+	struct ip_conntrack_tuple *t;
+	enum ip_conntrack_dir dir;
+	unsigned int n;
+
+	for (dir = 0; dir < IP_CT_DIR_MAX; dir++) {
+		t = &ct->tuplehash[dir].tuple;
+
+		n = sprintf(map->addr[dir].src, "%u.%u.%u.%u",
+			    NIPQUAD(t->src.ip));
+		map->addr[dir].srciplen = n;
+		n += sprintf(map->addr[dir].src + n, ":%u",
+			     ntohs(t->src.u.udp.port));
+		map->addr[dir].srclen = n;
+
+		n = sprintf(map->addr[dir].dst, "%u.%u.%u.%u",
+			    NIPQUAD(t->dst.ip));
+		map->addr[dir].dstiplen = n;
+		n += sprintf(map->addr[dir].dst + n, ":%u",
+			     ntohs(t->dst.u.udp.port));
+		map->addr[dir].dstlen = n;
+	}
+}
+
+static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
+			struct ip_conntrack *ct, const char **dptr, size_t dlen,
+			enum sip_header_pos pos, struct addr_map *map)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	unsigned int matchlen, matchoff, addrlen;
+	char *addr;
+
+	if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0)
+		return 1;
+
+	if ((matchlen == map->addr[dir].srciplen ||
+	     matchlen == map->addr[dir].srclen) &&
+	    memcmp(*dptr + matchoff, map->addr[dir].src, matchlen) == 0) {
+		addr    = map->addr[!dir].dst;
+		addrlen = map->addr[!dir].dstlen;
+	} else if ((matchlen == map->addr[dir].dstiplen ||
+		    matchlen == map->addr[dir].dstlen) &&
+		   memcmp(*dptr + matchoff, map->addr[dir].dst, matchlen) == 0) {
+		addr    = map->addr[!dir].src;
+		addrlen = map->addr[!dir].srclen;
+	} else
+		return 1;
+
+	if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
+	                              matchoff, matchlen, addr, addrlen))
+		return 0;
+	*dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+	return 1;
+
+}
+
+static unsigned int ip_nat_sip(struct sk_buff **pskb,
+			       enum ip_conntrack_info ctinfo,
+			       struct ip_conntrack *ct,
+			       const char **dptr)
+{
+	enum sip_header_pos pos;
+	struct addr_map map;
+	int dataoff, datalen;
+
+	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+	datalen = (*pskb)->len - dataoff;
+	if (datalen < sizeof("SIP/2.0") - 1)
+		return NF_DROP;
+
+	addr_map_init(ct, &map);
+
+	/* Basic rules: requests and responses. */
+	if (strncmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) != 0) {
+		/* 10.2: Constructing the REGISTER Request:
+		 *
+		 * The "userinfo" and "@" components of the SIP URI MUST NOT
+		 * be present.
+		 */
+		if (datalen >= sizeof("REGISTER") - 1 &&
+		    strncmp(*dptr, "REGISTER", sizeof("REGISTER") - 1) == 0)
+			pos = POS_REG_REQ_URI;
+		else
+			pos = POS_REQ_URI;
+
+		if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, pos, &map))
+			return NF_DROP;
+	}
+
+	if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_FROM, &map) ||
+	    !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_TO, &map) ||
+	    !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_VIA, &map) ||
+	    !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map))
+		return NF_DROP;
+	return NF_ACCEPT;
+}
 
 static unsigned int mangle_sip_packet(struct sk_buff **pskb,
 				      enum ip_conntrack_info ctinfo,
 				      struct ip_conntrack *ct,
 				      const char **dptr, size_t dlen,
 				      char *buffer, int bufflen,
-				      struct sip_header_nfo *hnfo)
+				      enum sip_header_pos pos)
 {
 	unsigned int matchlen, matchoff;
 
-	if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, hnfo) <= 0)
+	if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0)
 		return 0;
 
 	if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo,
@@ -52,77 +157,6 @@
 	return 1;
 }
 
-static unsigned int ip_nat_sip(struct sk_buff **pskb,
-			       enum ip_conntrack_info ctinfo,
-			       struct ip_conntrack *ct,
-			       const char **dptr)
-{
-	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-	char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-	unsigned int bufflen, dataoff;
-	__be32 ip;
-	__be16 port;
-
-	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
-
-	ip   = ct->tuplehash[!dir].tuple.dst.ip;
-	port = ct->tuplehash[!dir].tuple.dst.u.udp.port;
-	bufflen = sprintf(buffer, "%u.%u.%u.%u:%u", NIPQUAD(ip), ntohs(port));
-
-	/* short packet ? */
-	if (((*pskb)->len - dataoff) < (sizeof("SIP/2.0") - 1))
-		return 0;
-
-	/* Basic rules: requests and responses. */
-	if (memcmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) == 0) {
-		const char *aux;
-
-		if ((ctinfo) < IP_CT_IS_REPLY) {
-			mangle_sip_packet(pskb, ctinfo, ct, dptr,
-			                  (*pskb)->len - dataoff,
-			                  buffer, bufflen,
-			                  &ct_sip_hdrs[POS_CONTACT]);
-			return 1;
-		}
-
-		if (!mangle_sip_packet(pskb, ctinfo, ct, dptr,
-				       (*pskb)->len - dataoff,
-		                       buffer, bufflen, &ct_sip_hdrs[POS_VIA]))
-			return 0;
-
-		/* This search should ignore case, but later.. */
-		aux = ct_sip_search("CSeq:", *dptr, sizeof("CSeq:") - 1,
-		                    (*pskb)->len - dataoff);
-		if (!aux)
-			return 0;
-
-		if (!ct_sip_search("REGISTER", aux, sizeof("REGISTER"),
-		    ct_sip_lnlen(aux, *dptr + (*pskb)->len - dataoff)))
-			return 1;
-
-		return mangle_sip_packet(pskb, ctinfo, ct, dptr,
-					 (*pskb)->len - dataoff,
-		                         buffer, bufflen,
-					 &ct_sip_hdrs[POS_CONTACT]);
-	}
-	if ((ctinfo) < IP_CT_IS_REPLY) {
-		if (!mangle_sip_packet(pskb, ctinfo, ct, dptr,
-				       (*pskb)->len - dataoff,
-		                       buffer, bufflen, &ct_sip_hdrs[POS_VIA]))
-			return 0;
-
-		/* Mangle Contact if exists only. - watch udp_nat_mangle()! */
-		mangle_sip_packet(pskb, ctinfo, ct, dptr, (*pskb)->len - dataoff,
-		                  buffer, bufflen, &ct_sip_hdrs[POS_CONTACT]);
-		return 1;
-	}
-	/* This mangle requests headers. */
-	return mangle_sip_packet(pskb, ctinfo, ct, dptr,
-	                         ct_sip_lnlen(*dptr,
-				              *dptr + (*pskb)->len - dataoff),
-	                         buffer, bufflen, &ct_sip_hdrs[POS_REQ_HEADER]);
-}
-
 static int mangle_content_len(struct sk_buff **pskb,
 			      enum ip_conntrack_info ctinfo,
 			      struct ip_conntrack *ct,
@@ -136,7 +170,7 @@
 
 	/* Get actual SDP lenght */
 	if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
-	                    &matchlen, &ct_sip_hdrs[POS_SDP_HEADER]) > 0) {
+	                    &matchlen, POS_SDP_HEADER) > 0) {
 
 		/* since ct_sip_get_info() give us a pointer passing 'v='
 		   we need to add 2 bytes in this count. */
@@ -144,7 +178,7 @@
 
 		/* Now, update SDP lenght */
 		if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff,
-		                    &matchlen, &ct_sip_hdrs[POS_CONTENT]) > 0) {
+		                    &matchlen, POS_CONTENT) > 0) {
 
 			bufflen = sprintf(buffer, "%u", c_len);
 
@@ -170,17 +204,17 @@
 	/* Mangle owner and contact info. */
 	bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
 	if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
-	                       buffer, bufflen, &ct_sip_hdrs[POS_OWNER]))
+	                       buffer, bufflen, POS_OWNER))
 		return 0;
 
 	if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
-	                       buffer, bufflen, &ct_sip_hdrs[POS_CONNECTION]))
+	                       buffer, bufflen, POS_CONNECTION))
 		return 0;
 
 	/* Mangle media port. */
 	bufflen = sprintf(buffer, "%u", port);
 	if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
-	                       buffer, bufflen, &ct_sip_hdrs[POS_MEDIA]))
+	                       buffer, bufflen, POS_MEDIA))
 		return 0;
 
 	return mangle_content_len(pskb, ctinfo, ct, dptr);
@@ -230,18 +264,17 @@
 
 static void __exit fini(void)
 {
-	ip_nat_sip_hook = NULL;
-	ip_nat_sdp_hook = NULL;
-	/* Make sure noone calls it, meanwhile. */
-	synchronize_net();
+	rcu_assign_pointer(ip_nat_sip_hook, NULL);
+	rcu_assign_pointer(ip_nat_sdp_hook, NULL);
+	synchronize_rcu();
 }
 
 static int __init init(void)
 {
-	BUG_ON(ip_nat_sip_hook);
-	BUG_ON(ip_nat_sdp_hook);
-	ip_nat_sip_hook = ip_nat_sip;
-	ip_nat_sdp_hook = ip_nat_sdp;
+	BUG_ON(rcu_dereference(ip_nat_sip_hook));
+	BUG_ON(rcu_dereference(ip_nat_sdp_hook));
+	rcu_assign_pointer(ip_nat_sip_hook, ip_nat_sip);
+	rcu_assign_pointer(ip_nat_sdp_hook, ip_nat_sdp);
 	return 0;
 }
 
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index 168f45f..c3d9f3b 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -64,7 +64,7 @@
 
 #define SNMP_PORT 161
 #define SNMP_TRAP_PORT 162
-#define NOCT1(n) (u_int8_t )((n) & 0xff)
+#define NOCT1(n) (*(u8 *)n)
 
 static int debug;
 static DEFINE_SPINLOCK(snmp_lock);
@@ -613,7 +613,7 @@
 static inline void mangle_address(unsigned char *begin,
                                   unsigned char *addr,
                                   const struct oct1_map *map,
-                                  u_int16_t *check);
+                                  __sum16 *check);
 struct snmp_cnv
 {
 	unsigned int class;
@@ -873,38 +873,24 @@
  * Fast checksum update for possibly oddly-aligned UDP byte, from the
  * code example in the draft.
  */
-static void fast_csum(unsigned char *csum,
+static void fast_csum(__sum16 *csum,
                       const unsigned char *optr,
                       const unsigned char *nptr,
-                      int odd)
+                      int offset)
 {
-	long x, old, new;
-	
-	x = csum[0] * 256 + csum[1];
-	
-	x =~ x & 0xFFFF;
-	
-	if (odd) old = optr[0] * 256;
-	else old = optr[0];
-	
-	x -= old & 0xFFFF;
-	if (x <= 0) {
-		x--;
-		x &= 0xFFFF;
+	unsigned char s[4];
+
+	if (offset & 1) {
+		s[0] = s[2] = 0;
+		s[1] = ~*optr;
+		s[3] = *nptr;
+	} else {
+		s[1] = s[3] = 0;
+		s[0] = ~*optr;
+		s[2] = *nptr;
 	}
-	
-	if (odd) new = nptr[0] * 256;
-	else new = nptr[0];
-	
-	x += new & 0xFFFF;
-	if (x & 0x10000) {
-		x++;
-		x &= 0xFFFF;
-	}
-	
-	x =~ x & 0xFFFF;
-	csum[0] = x / 256;
-	csum[1] = x & 0xFF;
+
+	*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
 }
 
 /* 
@@ -915,9 +901,9 @@
 static inline void mangle_address(unsigned char *begin,
                                   unsigned char *addr,
                                   const struct oct1_map *map,
-                                  u_int16_t *check)
+                                  __sum16 *check)
 {
-	if (map->from == NOCT1(*addr)) {
+	if (map->from == NOCT1(addr)) {
 		u_int32_t old;
 		
 		if (debug)
@@ -927,11 +913,8 @@
 		
 		/* Update UDP checksum if being used */
 		if (*check) {
-			unsigned char odd = !((addr - begin) % 2);
-			
-			fast_csum((unsigned char *)check,
-			          &map->from, &map->to, odd);
-			          
+			fast_csum(check,
+			          &map->from, &map->to, addr - begin);
 		}
 		
 		if (debug)
@@ -943,7 +926,7 @@
 static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
                                       struct snmp_v1_trap *trap,
                                       const struct oct1_map *map,
-                                      u_int16_t *check)
+                                      __sum16 *check)
 {
 	unsigned int cls, con, tag, len;
 	unsigned char *end;
@@ -1037,7 +1020,7 @@
 static int snmp_parse_mangle(unsigned char *msg,
                              u_int16_t len,
                              const struct oct1_map *map,
-                             u_int16_t *check)
+                             __sum16 *check)
 {
 	unsigned char *eoc, *end;
 	unsigned int cls, con, tag, vers, pdutype;
@@ -1223,12 +1206,12 @@
 	 */
 	if (dir == IP_CT_DIR_ORIGINAL) {
 		/* SNAT traps */
-		map.from = NOCT1(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip);
-		map.to = NOCT1(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip);
+		map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip);
+		map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip);
 	} else {
 		/* DNAT replies */
-		map.from = NOCT1(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
-		map.to = NOCT1(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip);
+		map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
+		map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip);
 	}
 	
 	if (map.from == map.to)
@@ -1294,11 +1277,11 @@
 	.help = help,
 	.name = "snmp",
 
-	.tuple = { .src = { .u = { __constant_htons(SNMP_PORT) } },
-		   .dst = { .protonum = IPPROTO_UDP },
+	.tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_PORT)}}},
+		  .dst = {.protonum = IPPROTO_UDP},
 	},
-	.mask = { .src = { .u = { 0xFFFF } },
-		 .dst = { .protonum = 0xFF },
+	.mask = {.src = {.u = {0xFFFF}},
+		 .dst = {.protonum = 0xFF},
 	},
 };
 
@@ -1309,11 +1292,11 @@
 	.help = help,
 	.name = "snmp_trap",
 
-	.tuple = { .src = { .u = { __constant_htons(SNMP_TRAP_PORT) } },
-		   .dst = { .protonum = IPPROTO_UDP },
+	.tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_TRAP_PORT)}}},
+		  .dst = {.protonum = IPPROTO_UDP},
 	},
-	.mask = { .src = { .u = { 0xFFFF } },
-		 .dst = { .protonum = 0xFF },
+	.mask = {.src = {.u = {0xFFFF}},
+		 .dst = {.protonum = 0xFF},
 	},
 };
 
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index d85d2de..ad66328 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -44,12 +44,6 @@
 #define DEBUGP(format, args...)
 #endif
 
-#define HOOKNAME(hooknum) ((hooknum) == NF_IP_POST_ROUTING ? "POST_ROUTING"  \
-			   : ((hooknum) == NF_IP_PRE_ROUTING ? "PRE_ROUTING" \
-			      : ((hooknum) == NF_IP_LOCAL_OUT ? "LOCAL_OUT"  \
-			         : ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN"  \
-				    : "*ERROR*")))
-
 #ifdef CONFIG_XFRM
 static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
 {
diff --git a/net/ipv4/netfilter/ip_nat_tftp.c b/net/ipv4/netfilter/ip_nat_tftp.c
index 94a7801..6047935 100644
--- a/net/ipv4/netfilter/ip_nat_tftp.c
+++ b/net/ipv4/netfilter/ip_nat_tftp.c
@@ -55,15 +55,14 @@
 
 static void __exit ip_nat_tftp_fini(void)
 {
-	ip_nat_tftp_hook = NULL;
-	/* Make sure noone calls it, meanwhile. */
-	synchronize_net();
+	rcu_assign_pointer(ip_nat_tftp_hook, NULL);
+	synchronize_rcu();
 }
 
 static int __init ip_nat_tftp_init(void)
 {
-	BUG_ON(ip_nat_tftp_hook);
-	ip_nat_tftp_hook = help;
+	BUG_ON(rcu_dereference(ip_nat_tftp_hook));
+	rcu_assign_pointer(ip_nat_tftp_hook, help);
 	return 0;
 }
 
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 97556cc..cd520df 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -243,7 +243,7 @@
 	pmsg->data_len        = data_len;
 	pmsg->timestamp_sec   = entry->skb->tstamp.off_sec;
 	pmsg->timestamp_usec  = entry->skb->tstamp.off_usec;
-	pmsg->mark            = entry->skb->nfmark;
+	pmsg->mark            = entry->skb->mark;
 	pmsg->hook            = entry->info->hook;
 	pmsg->hw_protocol     = entry->skb->protocol;
 	
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 8a45543..0ff2956 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -401,6 +401,13 @@
 			    && unconditional(&e->ip)) {
 				unsigned int oldpos, size;
 
+				if (t->verdict < -NF_MAX_VERDICT - 1) {
+					duprintf("mark_source_chains: bad "
+						"negative verdict (%i)\n",
+								t->verdict);
+					return 0;
+				}
+
 				/* Return: backtrack through the last
 				   big jump. */
 				do {
@@ -438,6 +445,13 @@
 				if (strcmp(t->target.u.user.name,
 					   IPT_STANDARD_TARGET) == 0
 				    && newpos >= 0) {
+					if (newpos > newinfo->size -
+						sizeof(struct ipt_entry)) {
+						duprintf("mark_source_chains: "
+							"bad verdict (%i)\n",
+								newpos);
+						return 0;
+					}
 					/* This a jump; chase it. */
 					duprintf("Jump rule %u -> %u\n",
 						 pos, newpos);
@@ -470,27 +484,6 @@
 }
 
 static inline int
-standard_check(const struct ipt_entry_target *t,
-	       unsigned int max_offset)
-{
-	struct ipt_standard_target *targ = (void *)t;
-
-	/* Check standard info. */
-	if (targ->verdict >= 0
-	    && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
-		duprintf("ipt_standard_check: bad verdict (%i)\n",
-			 targ->verdict);
-		return 0;
-	}
-	if (targ->verdict < -NF_MAX_VERDICT - 1) {
-		duprintf("ipt_standard_check: bad negative verdict (%i)\n",
-			 targ->verdict);
-		return 0;
-	}
-	return 1;
-}
-
-static inline int
 check_match(struct ipt_entry_match *m,
 	    const char *name,
 	    const struct ipt_ip *ip,
@@ -576,12 +569,7 @@
 	if (ret)
 		goto err;
 
-	if (t->u.kernel.target == &ipt_standard_target) {
-		if (!standard_check(t, size)) {
-			ret = -EINVAL;
-			goto err;
-		}
-	} else if (t->u.kernel.target->checkentry
+	if (t->u.kernel.target->checkentry
 		   && !t->u.kernel.target->checkentry(name, e, target, t->data,
 						      e->comefrom)) {
 		duprintf("ip_tables: check failed for `%s'.\n",
@@ -718,17 +706,19 @@
 		}
 	}
 
+	if (!mark_source_chains(newinfo, valid_hooks, entry0))
+		return -ELOOP;
+
 	/* Finally, each sanity check must pass */
 	i = 0;
 	ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
 				check_entry, name, size, &i);
 
-	if (ret != 0)
-		goto cleanup;
-
-	ret = -ELOOP;
-	if (!mark_source_chains(newinfo, valid_hooks, entry0))
-		goto cleanup;
+	if (ret != 0) {
+		IPT_ENTRY_ITERATE(entry0, newinfo->size,
+				cleanup_entry, &i);
+		return ret;
+	}
 
 	/* And one copy for every other CPU */
 	for_each_possible_cpu(i) {
@@ -736,9 +726,6 @@
 			memcpy(newinfo->entries[i], entry0, newinfo->size);
 	}
 
-	return 0;
-cleanup:
-	IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
 	return ret;
 }
 
@@ -1529,25 +1516,8 @@
 	void **dstptr, compat_uint_t *size, const char *name,
 	const struct ipt_ip *ip, unsigned int hookmask)
 {
-	struct ipt_entry_match *dm;
-	struct ipt_match *match;
-	int ret;
-
-	dm = (struct ipt_entry_match *)*dstptr;
-	match = m->u.kernel.match;
 	xt_compat_match_from_user(m, dstptr, size);
-
-	ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
-			     name, hookmask, ip->proto,
-			     ip->invflags & IPT_INV_PROTO);
-	if (!ret && m->u.kernel.match->checkentry
-	    && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
-					      hookmask)) {
-		duprintf("ip_tables: check failed for `%s'.\n",
-			 m->u.kernel.match->name);
-		ret = -EINVAL;
-	}
-	return ret;
+	return 0;
 }
 
 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
@@ -1569,7 +1539,7 @@
 	ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
 			name, &de->ip, de->comefrom);
 	if (ret)
-		goto err;
+		return ret;
 	de->target_offset = e->target_offset - (origsize - *size);
 	t = ipt_get_target(e);
 	target = t->u.kernel.target;
@@ -1582,31 +1552,62 @@
 		if ((unsigned char *)de - base < newinfo->underflow[h])
 			newinfo->underflow[h] -= origsize - *size;
 	}
+	return ret;
+}
 
-	t = ipt_get_target(de);
+static inline int compat_check_match(struct ipt_entry_match *m, const char *name,
+				const struct ipt_ip *ip, unsigned int hookmask)
+{
+	struct ipt_match *match;
+	int ret;
+
+	match = m->u.kernel.match;
+	ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
+			     name, hookmask, ip->proto,
+			     ip->invflags & IPT_INV_PROTO);
+	if (!ret && m->u.kernel.match->checkentry
+	    && !m->u.kernel.match->checkentry(name, ip, match, m->data,
+					      hookmask)) {
+		duprintf("ip_tables: compat: check failed for `%s'.\n",
+			 m->u.kernel.match->name);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static inline int compat_check_target(struct ipt_entry *e, const char *name)
+{
+ 	struct ipt_entry_target *t;
+ 	struct ipt_target *target;
+ 	int ret;
+
+	t = ipt_get_target(e);
 	target = t->u.kernel.target;
 	ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
 			      name, e->comefrom, e->ip.proto,
 			      e->ip.invflags & IPT_INV_PROTO);
-	if (ret)
-		goto err;
-
-	ret = -EINVAL;
-	if (t->u.kernel.target == &ipt_standard_target) {
-		if (!standard_check(t, *size))
-			goto err;
-	} else if (t->u.kernel.target->checkentry
-		   && !t->u.kernel.target->checkentry(name, de, target,
-						      t->data, de->comefrom)) {
+	if (!ret && t->u.kernel.target->checkentry
+		   && !t->u.kernel.target->checkentry(name, e, target,
+						      t->data, e->comefrom)) {
 		duprintf("ip_tables: compat: check failed for `%s'.\n",
 			 t->u.kernel.target->name);
-		goto err;
+		ret = -EINVAL;
 	}
-	ret = 0;
-err:
 	return ret;
 }
 
+static inline int compat_check_entry(struct ipt_entry *e, const char *name)
+{
+	int ret;
+
+	ret = IPT_MATCH_ITERATE(e, compat_check_match, name, &e->ip,
+								e->comefrom);
+	if (ret)
+		return ret;
+
+	return compat_check_target(e, name);
+}
+
 static int
 translate_compat_table(const char *name,
 		unsigned int valid_hooks,
@@ -1695,6 +1696,11 @@
 	if (!mark_source_chains(newinfo, valid_hooks, entry1))
 		goto free_newinfo;
 
+	ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
+									name);
+	if (ret)
+		goto free_newinfo;
+
 	/* And one copy for every other CPU */
 	for_each_possible_cpu(i)
 		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 7a29d6e..0983650 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -40,8 +40,6 @@
 #define DEBUGP
 #endif
 
-#define ASSERT_READ_LOCK(x)
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_DESCRIPTION("iptables target for CLUSTERIP");
@@ -123,7 +121,6 @@
 {
 	struct list_head *pos;
 
-	ASSERT_READ_LOCK(&clusterip_lock);
 	list_for_each(pos, &clusterip_configs) {
 		struct clusterip_config *c = list_entry(pos, 
 					struct clusterip_config, list);
@@ -170,7 +167,6 @@
 			struct net_device *dev)
 {
 	struct clusterip_config *c;
-	char buffer[16];
 
 	c = kzalloc(sizeof(*c), GFP_ATOMIC);
 	if (!c)
@@ -187,12 +183,17 @@
 	atomic_set(&c->entries, 1);
 
 #ifdef CONFIG_PROC_FS
-	/* create proc dir entry */
-	sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
-	c->pde = create_proc_entry(buffer, S_IWUSR|S_IRUSR, clusterip_procdir);
-	if (!c->pde) {
-		kfree(c);
-		return NULL;
+	{
+		char buffer[16];
+
+		/* create proc dir entry */
+		sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
+		c->pde = create_proc_entry(buffer, S_IWUSR|S_IRUSR,
+					   clusterip_procdir);
+		if (!c->pde) {
+			kfree(c);
+			return NULL;
+		}
 	}
 	c->pde->proc_fops = &clusterip_proc_fops;
 	c->pde->data = c;
@@ -205,6 +206,7 @@
 	return c;
 }
 
+#ifdef CONFIG_PROC_FS
 static int
 clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum)
 {
@@ -232,6 +234,7 @@
 
 	return 1;
 }
+#endif
 
 static inline u_int32_t
 clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
@@ -737,8 +740,10 @@
 		CLUSTERIP_VERSION);
 	return 0;
 
+#ifdef CONFIG_PROC_FS
 cleanup_hook:
 	nf_unregister_hook(&cip_arp_ops);
+#endif /* CONFIG_PROC_FS */
 cleanup_target:
 	ipt_unregister_target(&clusterip_tgt);
 	return ret;
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 1aa4517..b55d670 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -28,17 +28,16 @@
 set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
 {
 	struct iphdr *iph = (*pskb)->nh.iph;
-	u_int16_t oldtos;
 
 	if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) {
+		__u8 oldtos;
 		if (!skb_make_writable(pskb, sizeof(struct iphdr)))
 			return 0;
 		iph = (*pskb)->nh.iph;
 		oldtos = iph->tos;
 		iph->tos &= ~IPT_ECN_IP_MASK;
 		iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
-		iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF),
-					    htons(iph->tos), iph->check);
+		nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
 	} 
 	return 1;
 }
@@ -72,10 +71,8 @@
 	if (einfo->operation & IPT_ECN_OP_SET_CWR)
 		tcph->cwr = einfo->proto.tcp.cwr;
 
-	tcph->check = nf_proto_csum_update((*pskb),
-					   oldval ^ htons(0xFFFF),
-					   ((__be16 *)tcph)[6],
-					   tcph->check, 0);
+	nf_proto_csum_replace2(&tcph->check, *pskb,
+				oldval, ((__be16 *)tcph)[6], 0);
 	return 1;
 }
 
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 7dc820d..c96de16 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -171,11 +171,15 @@
 		}
 		break;
 	}
-	case IPPROTO_UDP: {
+	case IPPROTO_UDP:
+	case IPPROTO_UDPLITE: {
 		struct udphdr _udph, *uh;
 
-		/* Max length: 10 "PROTO=UDP " */
-		printk("PROTO=UDP ");
+		if (ih->protocol == IPPROTO_UDP)
+			/* Max length: 10 "PROTO=UDP "     */
+			printk("PROTO=UDP " );
+		else	/* Max length: 14 "PROTO=UDPLITE " */
+			printk("PROTO=UDPLITE ");
 
 		if (ntohs(ih->frag_off) & IP_OFFSET)
 			break;
@@ -341,6 +345,7 @@
 	/* IP:      40+46+6+11+127 = 230 */
 	/* TCP:     10+max(25,20+30+13+9+32+11+127) = 252 */
 	/* UDP:     10+max(25,20) = 35 */
+	/* UDPLITE: 14+max(25,20) = 39 */
 	/* ICMP:    11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
 	/* ESP:     10+max(25)+15 = 50 */
 	/* AH:      9+max(25)+15 = 49 */
@@ -425,13 +430,8 @@
 	li.u.log.level = loginfo->level;
 	li.u.log.logflags = loginfo->logflags;
 
-	if (loginfo->logflags & IPT_LOG_NFLOG)
-		nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
-		              "%s", loginfo->prefix);
-	else
-		ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
-		               loginfo->prefix);
-
+	ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
+	               loginfo->prefix);
 	return IPT_CONTINUE;
 }
 
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 3dbfcfa..28b9233 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -2,7 +2,7 @@
    (depending on route). */
 
 /* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -20,7 +20,11 @@
 #include <net/checksum.h>
 #include <net/route.h>
 #include <linux/netfilter_ipv4.h>
+#ifdef CONFIG_NF_NAT_NEEDED
+#include <net/netfilter/nf_nat_rule.h>
+#else
 #include <linux/netfilter_ipv4/ip_nat_rule.h>
+#endif
 #include <linux/netfilter_ipv4/ip_tables.h>
 
 MODULE_LICENSE("GPL");
@@ -65,23 +69,33 @@
 		  const struct xt_target *target,
 		  const void *targinfo)
 {
+#ifdef CONFIG_NF_NAT_NEEDED
+	struct nf_conn_nat *nat;
+#endif
 	struct ip_conntrack *ct;
 	enum ip_conntrack_info ctinfo;
-	const struct ip_nat_multi_range_compat *mr;
 	struct ip_nat_range newrange;
+	const struct ip_nat_multi_range_compat *mr;
 	struct rtable *rt;
 	__be32 newsrc;
 
 	IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING);
 
 	ct = ip_conntrack_get(*pskb, &ctinfo);
+#ifdef CONFIG_NF_NAT_NEEDED
+	nat = nfct_nat(ct);
+#endif
 	IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED
 	                    || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
 
 	/* Source address is 0.0.0.0 - locally generated packet that is
 	 * probably not supposed to be masqueraded.
 	 */
+#ifdef CONFIG_NF_NAT_NEEDED
+	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
+#else
 	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip == 0)
+#endif
 		return NF_ACCEPT;
 
 	mr = targinfo;
@@ -93,7 +107,11 @@
 	}
 
 	write_lock_bh(&masq_lock);
+#ifdef CONFIG_NF_NAT_NEEDED
+	nat->masq_index = out->ifindex;
+#else
 	ct->nat.masq_index = out->ifindex;
+#endif
 	write_unlock_bh(&masq_lock);
 
 	/* Transfer from original range. */
@@ -109,10 +127,17 @@
 static inline int
 device_cmp(struct ip_conntrack *i, void *ifindex)
 {
+#ifdef CONFIG_NF_NAT_NEEDED
+	struct nf_conn_nat *nat = nfct_nat(i);
+#endif
 	int ret;
 
 	read_lock_bh(&masq_lock);
+#ifdef CONFIG_NF_NAT_NEEDED
+	ret = (nat->masq_index == (int)(long)ifindex);
+#else
 	ret = (i->nat.masq_index == (int)(long)ifindex);
+#endif
 	read_unlock_bh(&masq_lock);
 
 	return ret;
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 58a88f2..9390e90 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -15,7 +15,11 @@
 #include <linux/netdevice.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
+#ifdef CONFIG_NF_NAT_NEEDED
+#include <net/netfilter/nf_nat_rule.h>
+#else
 #include <linux/netfilter_ipv4/ip_nat_rule.h>
+#endif
 
 #define MODULENAME "NETMAP"
 MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index c0dcfe9..462eceb 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -1,6 +1,6 @@
 /* Redirect.  Simple mapping which alters dst to a local IP address. */
 /* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -18,7 +18,11 @@
 #include <net/protocol.h>
 #include <net/checksum.h>
 #include <linux/netfilter_ipv4.h>
+#ifdef CONFIG_NF_NAT_NEEDED
+#include <net/netfilter/nf_nat_rule.h>
+#else
 #include <linux/netfilter_ipv4/ip_nat_rule.h>
+#endif
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 264763a..f0319e5 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -76,7 +76,7 @@
 
 	/* This packet will not be the same as the other: clear nf fields */
 	nf_reset(nskb);
-	nskb->nfmark = 0;
+	nskb->mark = 0;
 	skb_init_secmark(nskb);
 
 	tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index b38b133..3dcf294 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -34,7 +34,11 @@
 #include <net/protocol.h>
 #include <net/checksum.h>
 #include <linux/netfilter_ipv4.h>
+#ifdef CONFIG_NF_NAT_NEEDED
+#include <net/netfilter/nf_nat_rule.h>
+#else
 #include <linux/netfilter_ipv4/ip_nat_rule.h>
+#endif
 #include <linux/netfilter_ipv4/ipt_SAME.h>
 
 MODULE_LICENSE("GPL");
@@ -152,11 +156,17 @@
 	   Here we calculate the index in same->iparray which
 	   holds the ipaddress we should use */
 	
+#ifdef CONFIG_NF_NAT_NEEDED
+	tmpip = ntohl(t->src.u3.ip);
+
+	if (!(same->info & IPT_SAME_NODST))
+		tmpip += ntohl(t->dst.u3.ip);
+#else
 	tmpip = ntohl(t->src.ip);
 
 	if (!(same->info & IPT_SAME_NODST))
 		tmpip += ntohl(t->dst.ip);
-	
+#endif
 	aindex = tmpip % same->ipnum;
 
 	new_ip = htonl(same->iparray[aindex]);
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
index 108b6b7..93eb5c3 100644
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ b/net/ipv4/netfilter/ipt_TCPMSS.c
@@ -97,10 +97,8 @@
 			opt[i+2] = (newmss & 0xff00) >> 8;
 			opt[i+3] = (newmss & 0x00ff);
 
-			tcph->check = nf_proto_csum_update(*pskb,
-							   htons(oldmss)^htons(0xFFFF),
-							   htons(newmss),
-							   tcph->check, 0);
+			nf_proto_csum_replace2(&tcph->check, *pskb,
+						htons(oldmss), htons(newmss), 0);
 			return IPT_CONTINUE;
 		}
 	}
@@ -126,28 +124,22 @@
  	opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
 	memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
 
-	tcph->check = nf_proto_csum_update(*pskb,
-					   htons(tcplen) ^ htons(0xFFFF),
-				           htons(tcplen + TCPOLEN_MSS),
-					   tcph->check, 1);
+	nf_proto_csum_replace2(&tcph->check, *pskb,
+				htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
 	opt[0] = TCPOPT_MSS;
 	opt[1] = TCPOLEN_MSS;
 	opt[2] = (newmss & 0xff00) >> 8;
 	opt[3] = (newmss & 0x00ff);
 
-	tcph->check = nf_proto_csum_update(*pskb, htonl(~0), *((__be32 *)opt),
-					   tcph->check, 0);
+	nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
 
 	oldval = ((__be16 *)tcph)[6];
 	tcph->doff += TCPOLEN_MSS/4;
-	tcph->check = nf_proto_csum_update(*pskb,
-					   oldval ^ htons(0xFFFF),
-					   ((__be16 *)tcph)[6],
-					   tcph->check, 0);
+	nf_proto_csum_replace2(&tcph->check, *pskb,
+				oldval, ((__be16 *)tcph)[6], 0);
 
 	newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS);
-	iph->check = nf_csum_update(iph->tot_len ^ htons(0xFFFF),
-				    newtotlen, iph->check);
+	nf_csum_replace2(&iph->check, iph->tot_len, newtotlen);
 	iph->tot_len = newtotlen;
 	return IPT_CONTINUE;
 }
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 83b80b3..18e74ac 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -30,16 +30,15 @@
 {
 	const struct ipt_tos_target_info *tosinfo = targinfo;
 	struct iphdr *iph = (*pskb)->nh.iph;
-	u_int16_t oldtos;
 
 	if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) {
+		__u8 oldtos;
 		if (!skb_make_writable(pskb, sizeof(struct iphdr)))
 			return NF_DROP;
 		iph = (*pskb)->nh.iph;
 		oldtos = iph->tos;
 		iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos;
-		iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF),
-					    htons(iph->tos), iph->check);
+		nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
 	}
 	return IPT_CONTINUE;
 }
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index ac9517d..fffe5ca 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -54,9 +54,8 @@
 	}
 
 	if (new_ttl != iph->ttl) {
-		iph->check = nf_csum_update(htons((iph->ttl << 8)) ^ htons(0xFFFF),
-					    htons(new_ttl << 8),
-					    iph->check);
+		nf_csum_replace2(&iph->check, htons(iph->ttl << 8),
+					      htons(new_ttl << 8));
 		iph->ttl = new_ttl;
 	}
 
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 2b104ea..dbd3478 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -239,7 +239,7 @@
 	pm->data_len = copy_len;
 	pm->timestamp_sec = skb->tstamp.off_sec;
 	pm->timestamp_usec = skb->tstamp.off_usec;
-	pm->mark = skb->nfmark;
+	pm->mark = skb->mark;
 	pm->hook = hooknum;
 	if (prefix != NULL)
 		strncpy(pm->prefix, prefix, sizeof(pm->prefix));
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
deleted file mode 100644
index 33ccdbf..0000000
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/* iptables match extension to limit the number of packets per second
- * seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
- *
- * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
- *
- * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $
- *
- * Development of this code was funded by Astaro AG, http://www.astaro.com/
- *
- * based on ipt_limit.c by:
- * Jérôme de Vivie	<devivie@info.enserb.u-bordeaux.fr>
- * Hervé Eychenne	<eychenne@info.enserb.u-bordeaux.fr>
- * Rusty Russell	<rusty@rustcorp.com.au>
- *
- * The general idea is to create a hash table for every dstip and have a
- * seperate limit counter per tuple.  This way you can do something like 'limit
- * the number of syn packets for each of my internal addresses.
- *
- * Ideally this would just be implemented as a general 'hash' match, which would
- * allow us to attach any iptables target to it's hash buckets.  But this is
- * not possible in the current iptables architecture.  As always, pkttables for
- * 2.7.x will help ;)
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/random.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/list.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_hashlimit.h>
-
-/* FIXME: this is just for IP_NF_ASSERRT */
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/mutex.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("iptables match for limiting per hash-bucket");
-
-/* need to declare this at the top */
-static struct proc_dir_entry *hashlimit_procdir;
-static struct file_operations dl_file_ops;
-
-/* hash table crap */
-
-struct dsthash_dst {
-	__be32 src_ip;
-	__be32 dst_ip;
-	/* ports have to be consecutive !!! */
-	__be16 src_port;
-	__be16 dst_port;
-};
-
-struct dsthash_ent {
-	/* static / read-only parts in the beginning */
-	struct hlist_node node;
-	struct dsthash_dst dst;
-
-	/* modified structure members in the end */
-	unsigned long expires;		/* precalculated expiry time */
-	struct {
-		unsigned long prev;	/* last modification */
-		u_int32_t credit;
-		u_int32_t credit_cap, cost;
-	} rateinfo;
-};
-
-struct ipt_hashlimit_htable {
-	struct hlist_node node;		/* global list of all htables */
-	atomic_t use;
-
-	struct hashlimit_cfg cfg;	/* config */
-
-	/* used internally */
-	spinlock_t lock;		/* lock for list_head */
-	u_int32_t rnd;			/* random seed for hash */
-	int rnd_initialized;
-	struct timer_list timer;	/* timer for gc */
-	atomic_t count;			/* number entries in table */
-
-	/* seq_file stuff */
-	struct proc_dir_entry *pde;
-
-	struct hlist_head hash[0];	/* hashtable itself */
-};
-
-static DEFINE_SPINLOCK(hashlimit_lock);	/* protects htables list */
-static DEFINE_MUTEX(hlimit_mutex);	/* additional checkentry protection */
-static HLIST_HEAD(hashlimit_htables);
-static kmem_cache_t *hashlimit_cachep __read_mostly;
-
-static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
-{
-	return (ent->dst.dst_ip == b->dst_ip 
-		&& ent->dst.dst_port == b->dst_port
-		&& ent->dst.src_port == b->src_port
-		&& ent->dst.src_ip == b->src_ip);
-}
-
-static inline u_int32_t
-hash_dst(const struct ipt_hashlimit_htable *ht, const struct dsthash_dst *dst)
-{
-	return (jhash_3words((__force u32)dst->dst_ip,
-			    ((__force u32)dst->dst_port<<16 |
-			     (__force u32)dst->src_port),
-			     (__force u32)dst->src_ip, ht->rnd) % ht->cfg.size);
-}
-
-static inline struct dsthash_ent *
-__dsthash_find(const struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst)
-{
-	struct dsthash_ent *ent;
-	struct hlist_node *pos;
-	u_int32_t hash = hash_dst(ht, dst);
-
-	if (!hlist_empty(&ht->hash[hash]))
-		hlist_for_each_entry(ent, pos, &ht->hash[hash], node) {
-			if (dst_cmp(ent, dst)) {
-				return ent;
-			}
-		}
-	
-	return NULL;
-}
-
-/* allocate dsthash_ent, initialize dst, put in htable and lock it */
-static struct dsthash_ent *
-__dsthash_alloc_init(struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst)
-{
-	struct dsthash_ent *ent;
-
-	/* initialize hash with random val at the time we allocate
-	 * the first hashtable entry */
-	if (!ht->rnd_initialized) {
-		get_random_bytes(&ht->rnd, 4);
-		ht->rnd_initialized = 1;
-	}
-
-	if (ht->cfg.max &&
-	    atomic_read(&ht->count) >= ht->cfg.max) {
-		/* FIXME: do something. question is what.. */
-		if (net_ratelimit())
-			printk(KERN_WARNING 
-				"ipt_hashlimit: max count of %u reached\n", 
-				ht->cfg.max);
-		return NULL;
-	}
-
-	ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
-	if (!ent) {
-		if (net_ratelimit())
-			printk(KERN_ERR 
-				"ipt_hashlimit: can't allocate dsthash_ent\n");
-		return NULL;
-	}
-
-	atomic_inc(&ht->count);
-
-	ent->dst.dst_ip = dst->dst_ip;
-	ent->dst.dst_port = dst->dst_port;
-	ent->dst.src_ip = dst->src_ip;
-	ent->dst.src_port = dst->src_port;
-
-	hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
-
-	return ent;
-}
-
-static inline void 
-__dsthash_free(struct ipt_hashlimit_htable *ht, struct dsthash_ent *ent)
-{
-	hlist_del(&ent->node);
-	kmem_cache_free(hashlimit_cachep, ent);
-	atomic_dec(&ht->count);
-}
-static void htable_gc(unsigned long htlong);
-
-static int htable_create(struct ipt_hashlimit_info *minfo)
-{
-	int i;
-	unsigned int size;
-	struct ipt_hashlimit_htable *hinfo;
-
-	if (minfo->cfg.size)
-		size = minfo->cfg.size;
-	else {
-		size = (((num_physpages << PAGE_SHIFT) / 16384)
-			 / sizeof(struct list_head));
-		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
-			size = 8192;
-		if (size < 16)
-			size = 16;
-	}
-	/* FIXME: don't use vmalloc() here or anywhere else -HW */
-	hinfo = vmalloc(sizeof(struct ipt_hashlimit_htable)
-			+ (sizeof(struct list_head) * size));
-	if (!hinfo) {
-		printk(KERN_ERR "ipt_hashlimit: Unable to create hashtable\n");
-		return -1;
-	}
-	minfo->hinfo = hinfo;
-
-	/* copy match config into hashtable config */
-	memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
-	hinfo->cfg.size = size;
-	if (!hinfo->cfg.max)
-		hinfo->cfg.max = 8 * hinfo->cfg.size;
-	else if (hinfo->cfg.max < hinfo->cfg.size)
-		hinfo->cfg.max = hinfo->cfg.size;
-
-	for (i = 0; i < hinfo->cfg.size; i++)
-		INIT_HLIST_HEAD(&hinfo->hash[i]);
-
-	atomic_set(&hinfo->count, 0);
-	atomic_set(&hinfo->use, 1);
-	hinfo->rnd_initialized = 0;
-	spin_lock_init(&hinfo->lock);
-	hinfo->pde = create_proc_entry(minfo->name, 0, hashlimit_procdir);
-	if (!hinfo->pde) {
-		vfree(hinfo);
-		return -1;
-	}
-	hinfo->pde->proc_fops = &dl_file_ops;
-	hinfo->pde->data = hinfo;
-
-	init_timer(&hinfo->timer);
-	hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
-	hinfo->timer.data = (unsigned long )hinfo;
-	hinfo->timer.function = htable_gc;
-	add_timer(&hinfo->timer);
-
-	spin_lock_bh(&hashlimit_lock);
-	hlist_add_head(&hinfo->node, &hashlimit_htables);
-	spin_unlock_bh(&hashlimit_lock);
-
-	return 0;
-}
-
-static int select_all(struct ipt_hashlimit_htable *ht, struct dsthash_ent *he)
-{
-	return 1;
-}
-
-static int select_gc(struct ipt_hashlimit_htable *ht, struct dsthash_ent *he)
-{
-	return (jiffies >= he->expires);
-}
-
-static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht,
-		 		int (*select)(struct ipt_hashlimit_htable *ht, 
-					      struct dsthash_ent *he))
-{
-	int i;
-
-	IP_NF_ASSERT(ht->cfg.size && ht->cfg.max);
-
-	/* lock hash table and iterate over it */
-	spin_lock_bh(&ht->lock);
-	for (i = 0; i < ht->cfg.size; i++) {
-		struct dsthash_ent *dh;
-		struct hlist_node *pos, *n;
-		hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
-			if ((*select)(ht, dh))
-				__dsthash_free(ht, dh);
-		}
-	}
-	spin_unlock_bh(&ht->lock);
-}
-
-/* hash table garbage collector, run by timer */
-static void htable_gc(unsigned long htlong)
-{
-	struct ipt_hashlimit_htable *ht = (struct ipt_hashlimit_htable *)htlong;
-
-	htable_selective_cleanup(ht, select_gc);
-
-	/* re-add the timer accordingly */
-	ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
-	add_timer(&ht->timer);
-}
-
-static void htable_destroy(struct ipt_hashlimit_htable *hinfo)
-{
-	/* remove timer, if it is pending */
-	if (timer_pending(&hinfo->timer))
-		del_timer(&hinfo->timer);
-
-	/* remove proc entry */
-	remove_proc_entry(hinfo->pde->name, hashlimit_procdir);
-
-	htable_selective_cleanup(hinfo, select_all);
-	vfree(hinfo);
-}
-
-static struct ipt_hashlimit_htable *htable_find_get(char *name)
-{
-	struct ipt_hashlimit_htable *hinfo;
-	struct hlist_node *pos;
-
-	spin_lock_bh(&hashlimit_lock);
-	hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
-		if (!strcmp(name, hinfo->pde->name)) {
-			atomic_inc(&hinfo->use);
-			spin_unlock_bh(&hashlimit_lock);
-			return hinfo;
-		}
-	}
-	spin_unlock_bh(&hashlimit_lock);
-
-	return NULL;
-}
-
-static void htable_put(struct ipt_hashlimit_htable *hinfo)
-{
-	if (atomic_dec_and_test(&hinfo->use)) {
-		spin_lock_bh(&hashlimit_lock);
-		hlist_del(&hinfo->node);
-		spin_unlock_bh(&hashlimit_lock);
-		htable_destroy(hinfo);
-	}
-}
-
-
-/* The algorithm used is the Simple Token Bucket Filter (TBF)
- * see net/sched/sch_tbf.c in the linux source tree
- */
-
-/* Rusty: This is my (non-mathematically-inclined) understanding of
-   this algorithm.  The `average rate' in jiffies becomes your initial
-   amount of credit `credit' and the most credit you can ever have
-   `credit_cap'.  The `peak rate' becomes the cost of passing the
-   test, `cost'.
-
-   `prev' tracks the last packet hit: you gain one credit per jiffy.
-   If you get credit balance more than this, the extra credit is
-   discarded.  Every time the match passes, you lose `cost' credits;
-   if you don't have that many, the test fails.
-
-   See Alexey's formal explanation in net/sched/sch_tbf.c.
-
-   To get the maximum range, we multiply by this factor (ie. you get N
-   credits per jiffy).  We want to allow a rate as low as 1 per day
-   (slowest userspace tool allows), which means
-   CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
-*/
-#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
-
-/* Repeated shift and or gives us all 1s, final shift and add 1 gives
- * us the power of 2 below the theoretical max, so GCC simply does a
- * shift. */
-#define _POW2_BELOW2(x) ((x)|((x)>>1))
-#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
-#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
-#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
-#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
-#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
-
-#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
-
-/* Precision saver. */
-static inline u_int32_t
-user2credits(u_int32_t user)
-{
-	/* If multiplying would overflow... */
-	if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
-		/* Divide first. */
-		return (user / IPT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
-
-	return (user * HZ * CREDITS_PER_JIFFY) / IPT_HASHLIMIT_SCALE;
-}
-
-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
-{
-	dh->rateinfo.credit += (now - xchg(&dh->rateinfo.prev, now)) 
-					* CREDITS_PER_JIFFY;
-	if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
-		dh->rateinfo.credit = dh->rateinfo.credit_cap;
-}
-
-static int
-hashlimit_match(const struct sk_buff *skb,
-		const struct net_device *in,
-		const struct net_device *out,
-		const struct xt_match *match,
-		const void *matchinfo,
-		int offset,
-		unsigned int protoff,
-		int *hotdrop)
-{
-	struct ipt_hashlimit_info *r = 
-		((struct ipt_hashlimit_info *)matchinfo)->u.master;
-	struct ipt_hashlimit_htable *hinfo = r->hinfo;
-	unsigned long now = jiffies;
-	struct dsthash_ent *dh;
-	struct dsthash_dst dst;
-
-	/* build 'dst' according to hinfo->cfg and current packet */
-	memset(&dst, 0, sizeof(dst));
-	if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DIP)
-		dst.dst_ip = skb->nh.iph->daddr;
-	if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SIP)
-		dst.src_ip = skb->nh.iph->saddr;
-	if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DPT
-	    ||hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SPT) {
-		__be16 _ports[2], *ports;
-
-		switch (skb->nh.iph->protocol) {
-		case IPPROTO_TCP:
-		case IPPROTO_UDP:
-		case IPPROTO_SCTP:
-		case IPPROTO_DCCP:
-			ports = skb_header_pointer(skb, skb->nh.iph->ihl*4,
-						   sizeof(_ports), &_ports);
-			break;
-		default:
-			_ports[0] = _ports[1] = 0;
-			ports = _ports;
-			break;
-		}
-		if (!ports) {
-			/* We've been asked to examine this packet, and we
-		 	  can't.  Hence, no choice but to drop. */
-			*hotdrop = 1;
-			return 0;
-		}
-		if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SPT)
-			dst.src_port = ports[0];
-		if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DPT)
-			dst.dst_port = ports[1];
-	} 
-
-	spin_lock_bh(&hinfo->lock);
-	dh = __dsthash_find(hinfo, &dst);
-	if (!dh) {
-		dh = __dsthash_alloc_init(hinfo, &dst);
-
-		if (!dh) {
-			/* enomem... don't match == DROP */
-			if (net_ratelimit())
-				printk(KERN_ERR "%s: ENOMEM\n", __FUNCTION__);
-			spin_unlock_bh(&hinfo->lock);
-			return 0;
-		}
-
-		dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
-
-		dh->rateinfo.prev = jiffies;
-		dh->rateinfo.credit = user2credits(hinfo->cfg.avg * 
-							hinfo->cfg.burst);
-		dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * 
-							hinfo->cfg.burst);
-		dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
-	} else {
-		/* update expiration timeout */
-		dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
-		rateinfo_recalc(dh, now);
-	}
-
-	if (dh->rateinfo.credit >= dh->rateinfo.cost) {
-		/* We're underlimit. */
-		dh->rateinfo.credit -= dh->rateinfo.cost;
-		spin_unlock_bh(&hinfo->lock);
-		return 1;
-	}
-
-       	spin_unlock_bh(&hinfo->lock);
-
-	/* default case: we're overlimit, thus don't match */
-	return 0;
-}
-
-static int
-hashlimit_checkentry(const char *tablename,
-		     const void *inf,
-		     const struct xt_match *match,
-		     void *matchinfo,
-		     unsigned int hook_mask)
-{
-	struct ipt_hashlimit_info *r = matchinfo;
-
-	/* Check for overflow. */
-	if (r->cfg.burst == 0
-	    || user2credits(r->cfg.avg * r->cfg.burst) < 
-	    				user2credits(r->cfg.avg)) {
-		printk(KERN_ERR "ipt_hashlimit: Overflow, try lower: %u/%u\n",
-		       r->cfg.avg, r->cfg.burst);
-		return 0;
-	}
-
-	if (r->cfg.mode == 0 
-	    || r->cfg.mode > (IPT_HASHLIMIT_HASH_DPT
-		          |IPT_HASHLIMIT_HASH_DIP
-			  |IPT_HASHLIMIT_HASH_SIP
-			  |IPT_HASHLIMIT_HASH_SPT))
-		return 0;
-
-	if (!r->cfg.gc_interval)
-		return 0;
-	
-	if (!r->cfg.expire)
-		return 0;
-
-	if (r->name[sizeof(r->name) - 1] != '\0')
-		return 0;
-
-	/* This is the best we've got: We cannot release and re-grab lock,
-	 * since checkentry() is called before ip_tables.c grabs ipt_mutex.  
-	 * We also cannot grab the hashtable spinlock, since htable_create will 
-	 * call vmalloc, and that can sleep.  And we cannot just re-search
-	 * the list of htable's in htable_create(), since then we would
-	 * create duplicate proc files. -HW */
-	mutex_lock(&hlimit_mutex);
-	r->hinfo = htable_find_get(r->name);
-	if (!r->hinfo && (htable_create(r) != 0)) {
-		mutex_unlock(&hlimit_mutex);
-		return 0;
-	}
-	mutex_unlock(&hlimit_mutex);
-
-	/* Ugly hack: For SMP, we only want to use one set */
-	r->u.master = r;
-
-	return 1;
-}
-
-static void
-hashlimit_destroy(const struct xt_match *match, void *matchinfo)
-{
-	struct ipt_hashlimit_info *r = matchinfo;
-
-	htable_put(r->hinfo);
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_ipt_hashlimit_info {
-	char name[IFNAMSIZ];
-	struct hashlimit_cfg cfg;
-	compat_uptr_t hinfo;
-	compat_uptr_t master;
-};
-
-static void compat_from_user(void *dst, void *src)
-{
-	int off = offsetof(struct compat_ipt_hashlimit_info, hinfo);
-
-	memcpy(dst, src, off);
-	memset(dst + off, 0, sizeof(struct compat_ipt_hashlimit_info) - off);
-}
-
-static int compat_to_user(void __user *dst, void *src)
-{
-	int off = offsetof(struct compat_ipt_hashlimit_info, hinfo);
-
-	return copy_to_user(dst, src, off) ? -EFAULT : 0;
-}
-#endif
-
-static struct ipt_match ipt_hashlimit = {
-	.name		= "hashlimit",
-	.match		= hashlimit_match,
-	.matchsize	= sizeof(struct ipt_hashlimit_info),
-#ifdef CONFIG_COMPAT
-	.compatsize	= sizeof(struct compat_ipt_hashlimit_info),
-	.compat_from_user = compat_from_user,
-	.compat_to_user	= compat_to_user,
-#endif
-	.checkentry	= hashlimit_checkentry,
-	.destroy	= hashlimit_destroy,
-	.me		= THIS_MODULE
-};
-
-/* PROC stuff */
-
-static void *dl_seq_start(struct seq_file *s, loff_t *pos)
-{
-	struct proc_dir_entry *pde = s->private;
-	struct ipt_hashlimit_htable *htable = pde->data;
-	unsigned int *bucket;
-
-	spin_lock_bh(&htable->lock);
-	if (*pos >= htable->cfg.size)
-		return NULL;
-
-	bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
-	if (!bucket)
-		return ERR_PTR(-ENOMEM);
-
-	*bucket = *pos;
-	return bucket;
-}
-
-static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
-	struct proc_dir_entry *pde = s->private;
-	struct ipt_hashlimit_htable *htable = pde->data;
-	unsigned int *bucket = (unsigned int *)v;
-
-	*pos = ++(*bucket);
-	if (*pos >= htable->cfg.size) {
-		kfree(v);
-		return NULL;
-	}
-	return bucket;
-}
-
-static void dl_seq_stop(struct seq_file *s, void *v)
-{
-	struct proc_dir_entry *pde = s->private;
-	struct ipt_hashlimit_htable *htable = pde->data;
-	unsigned int *bucket = (unsigned int *)v;
-
-	kfree(bucket);
-
-	spin_unlock_bh(&htable->lock);
-}
-
-static inline int dl_seq_real_show(struct dsthash_ent *ent, struct seq_file *s)
-{
-	/* recalculate to show accurate numbers */
-	rateinfo_recalc(ent, jiffies);
-
-	return seq_printf(s, "%ld %u.%u.%u.%u:%u->%u.%u.%u.%u:%u %u %u %u\n",
-			(long)(ent->expires - jiffies)/HZ,
-			NIPQUAD(ent->dst.src_ip), ntohs(ent->dst.src_port),
-			NIPQUAD(ent->dst.dst_ip), ntohs(ent->dst.dst_port),
-			ent->rateinfo.credit, ent->rateinfo.credit_cap,
-			ent->rateinfo.cost);
-}
-
-static int dl_seq_show(struct seq_file *s, void *v)
-{
-	struct proc_dir_entry *pde = s->private;
-	struct ipt_hashlimit_htable *htable = pde->data;
-	unsigned int *bucket = (unsigned int *)v;
-	struct dsthash_ent *ent;
-	struct hlist_node *pos;
-
-	if (!hlist_empty(&htable->hash[*bucket]))
-		hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) {
-			if (dl_seq_real_show(ent, s)) {
-				/* buffer was filled and unable to print that tuple */
-				return 1;
-			}
-		}
-	
-	return 0;
-}
-
-static struct seq_operations dl_seq_ops = {
-	.start = dl_seq_start,
-	.next  = dl_seq_next,
-	.stop  = dl_seq_stop,
-	.show  = dl_seq_show
-};
-
-static int dl_proc_open(struct inode *inode, struct file *file)
-{
-	int ret = seq_open(file, &dl_seq_ops);
-
-	if (!ret) {
-		struct seq_file *sf = file->private_data;
-		sf->private = PDE(inode);
-	}
-	return ret;
-}
-
-static struct file_operations dl_file_ops = {
-	.owner   = THIS_MODULE,
-	.open    = dl_proc_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = seq_release
-};
-
-static int init_or_fini(int fini)
-{
-	int ret = 0;
-
-	if (fini)
-		goto cleanup;
-
-	if (ipt_register_match(&ipt_hashlimit)) {
-		ret = -EINVAL;
-		goto cleanup_nothing;
-	}
-
-	hashlimit_cachep = kmem_cache_create("ipt_hashlimit",
-					    sizeof(struct dsthash_ent), 0,
-					    0, NULL, NULL);
-	if (!hashlimit_cachep) {
-		printk(KERN_ERR "Unable to create ipt_hashlimit slab cache\n");
-		ret = -ENOMEM;
-		goto cleanup_unreg_match;
-	}
-
-	hashlimit_procdir = proc_mkdir("ipt_hashlimit", proc_net);
-	if (!hashlimit_procdir) {
-		printk(KERN_ERR "Unable to create proc dir entry\n");
-		ret = -ENOMEM;
-		goto cleanup_free_slab;
-	}
-
-	return ret;
-
-cleanup:
-	remove_proc_entry("ipt_hashlimit", proc_net);
-cleanup_free_slab:
-	kmem_cache_destroy(hashlimit_cachep);
-cleanup_unreg_match:
-	ipt_unregister_match(&ipt_hashlimit);
-cleanup_nothing:
-	return ret;
-	
-}
-
-static int __init ipt_hashlimit_init(void)
-{
-	return init_or_fini(0);
-}
-
-static void __exit ipt_hashlimit_fini(void)
-{
-	init_or_fini(1);
-}
-
-module_init(ipt_hashlimit_init);
-module_exit(ipt_hashlimit_fini);
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index b91f358..af29398 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -132,7 +132,7 @@
 	unsigned int ret;
 	u_int8_t tos;
 	__be32 saddr, daddr;
-	unsigned long nfmark;
+	u_int32_t mark;
 
 	/* root is playing with raw sockets. */
 	if ((*pskb)->len < sizeof(struct iphdr)
@@ -143,7 +143,7 @@
 	}
 
 	/* Save things which could affect route */
-	nfmark = (*pskb)->nfmark;
+	mark = (*pskb)->mark;
 	saddr = (*pskb)->nh.iph->saddr;
 	daddr = (*pskb)->nh.iph->daddr;
 	tos = (*pskb)->nh.iph->tos;
@@ -153,9 +153,7 @@
 	if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE
 	    && ((*pskb)->nh.iph->saddr != saddr
 		|| (*pskb)->nh.iph->daddr != daddr
-#ifdef CONFIG_IP_ROUTE_FWMARK
-		|| (*pskb)->nfmark != nfmark
-#endif
+		|| (*pskb)->mark != mark
 		|| (*pskb)->nh.iph->tos != tos))
 		if (ip_route_me_harder(pskb, RTN_UNSPEC))
 			ret = NF_DROP;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 0af803d..471b638 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -27,7 +27,7 @@
 #include <linux/netfilter_ipv4.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
@@ -38,12 +38,10 @@
 #define DEBUGP(format, args...)
 #endif
 
-DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat);
-
 static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
 			     struct nf_conntrack_tuple *tuple)
 {
-	u_int32_t _addrs[2], *ap;
+	__be32 _addrs[2], *ap;
 	ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
 				sizeof(u_int32_t) * 2, _addrs);
 	if (ap == NULL)
@@ -113,10 +111,12 @@
 	return NF_ACCEPT;
 }
 
-int nat_module_is_loaded = 0;
+int nf_nat_module_is_loaded = 0;
+EXPORT_SYMBOL_GPL(nf_nat_module_is_loaded);
+
 static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple)
 {
-	if (nat_module_is_loaded)
+	if (nf_nat_module_is_loaded)
 		return NF_CT_F_NAT;
 
 	return NF_CT_F_BASIC;
@@ -268,43 +268,59 @@
 	},
 };
 
-#ifdef CONFIG_SYSCTL
-/* From nf_conntrack_proto_icmp.c */
-extern unsigned int nf_ct_icmp_timeout;
-static struct ctl_table_header *nf_ct_ipv4_sysctl_header;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+static int log_invalid_proto_min = 0;
+static int log_invalid_proto_max = 255;
 
-static ctl_table nf_ct_sysctl_table[] = {
+static ctl_table ip_ct_sysctl_table[] = {
 	{
-		.ctl_name	= NET_NF_CONNTRACK_ICMP_TIMEOUT,
-		.procname	= "nf_conntrack_icmp_timeout",
-		.data		= &nf_ct_icmp_timeout,
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_MAX,
+		.procname	= "ip_conntrack_max",
+		.data		= &nf_conntrack_max,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_COUNT,
+		.procname	= "ip_conntrack_count",
+		.data		= &nf_conntrack_count,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_BUCKETS,
+		.procname	= "ip_conntrack_buckets",
+		.data		= &nf_conntrack_htable_size,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0444,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_CHECKSUM,
+		.procname	= "ip_conntrack_checksum",
+		.data		= &nf_conntrack_checksum,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_LOG_INVALID,
+		.procname	= "ip_conntrack_log_invalid",
+		.data		= &nf_ct_log_invalid,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &log_invalid_proto_min,
+		.extra2		= &log_invalid_proto_max,
 	},
-        { .ctl_name = 0 }
-};
-
-static ctl_table nf_ct_netfilter_table[] = {
 	{
-		.ctl_name       = NET_NETFILTER,
-		.procname       = "netfilter",
-		.mode           = 0555,
-		.child          = nf_ct_sysctl_table,
-	},
-	{ .ctl_name = 0 }
+		.ctl_name	= 0
+	}
 };
-
-static ctl_table nf_ct_net_table[] = {
-	{
-		.ctl_name       = CTL_NET,
-		.procname       = "net",
-		.mode           = 0555,
-		.child          = nf_ct_netfilter_table,
-	},
-	{ .ctl_name = 0 }
-};
-#endif
+#endif /* CONFIG_SYSCTL && CONFIG_NF_CONNTRACK_PROC_COMPAT */
 
 /* Fast function for those who don't want to parse /proc (and I don't
    blame them). */
@@ -396,10 +412,8 @@
 	if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip))
 		return -EINVAL;
 
-	t->src.u3.ip =
-		*(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_SRC-1]);
-	t->dst.u3.ip =
-		*(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_DST-1]);
+	t->src.u3.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_SRC-1]);
+	t->dst.u3.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_DST-1]);
 
 	return 0;
 }
@@ -426,14 +440,15 @@
 	.tuple_to_nfattr = ipv4_tuple_to_nfattr,
 	.nfattr_to_tuple = ipv4_nfattr_to_tuple,
 #endif
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+	.ctl_table_path  = nf_net_ipv4_netfilter_sysctl_path,
+	.ctl_table	 = ip_ct_sysctl_table,
+#endif
 	.me		 = THIS_MODULE,
 };
 
-extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp4;
-extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4;
-extern struct nf_conntrack_protocol nf_conntrack_protocol_icmp;
-
 MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
+MODULE_ALIAS("ip_conntrack");
 MODULE_LICENSE("GPL");
 
 static int __init nf_conntrack_l3proto_ipv4_init(void)
@@ -448,19 +463,19 @@
 		return ret;
 	}
 
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp4);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4);
 	if (ret < 0) {
 		printk("nf_conntrack_ipv4: can't register tcp.\n");
 		goto cleanup_sockopt;
 	}
 
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp4);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4);
 	if (ret < 0) {
 		printk("nf_conntrack_ipv4: can't register udp.\n");
 		goto cleanup_tcp;
 	}
 
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmp);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp);
 	if (ret < 0) {
 		printk("nf_conntrack_ipv4: can't register icmp.\n");
 		goto cleanup_udp;
@@ -478,28 +493,24 @@
 		printk("nf_conntrack_ipv4: can't register hooks.\n");
 		goto cleanup_ipv4;
 	}
-#ifdef CONFIG_SYSCTL
-	nf_ct_ipv4_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
-	if (nf_ct_ipv4_sysctl_header == NULL) {
-		printk("nf_conntrack: can't register to sysctl.\n");
-		ret = -ENOMEM;
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+	ret = nf_conntrack_ipv4_compat_init();
+	if (ret < 0)
 		goto cleanup_hooks;
-	}
 #endif
 	return ret;
-
-#ifdef CONFIG_SYSCTL
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
  cleanup_hooks:
-	nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
+ 	nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
 #endif
  cleanup_ipv4:
 	nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
  cleanup_icmp:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmp);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
  cleanup_udp:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp4);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
  cleanup_tcp:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
  cleanup_sockopt:
 	nf_unregister_sockopt(&so_getorigdst);
 	return ret;
@@ -508,18 +519,16 @@
 static void __exit nf_conntrack_l3proto_ipv4_fini(void)
 {
 	synchronize_net();
-#ifdef CONFIG_SYSCTL
- 	unregister_sysctl_table(nf_ct_ipv4_sysctl_header);
+#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+	nf_conntrack_ipv4_compat_fini();
 #endif
 	nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
 	nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmp);
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp4);
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
 	nf_unregister_sockopt(&so_getorigdst);
 }
 
 module_init(nf_conntrack_l3proto_ipv4_init);
 module_exit(nf_conntrack_l3proto_ipv4_fini);
-
-EXPORT_SYMBOL(nf_ct_ipv4_gather_frags);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
new file mode 100644
index 0000000..3b31bc6
--- /dev/null
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -0,0 +1,412 @@
+/* ip_conntrack proc compat - based on ip_conntrack_standalone.c
+ *
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#ifdef CONFIG_NF_CT_ACCT
+static unsigned int
+seq_print_counters(struct seq_file *s,
+		   const struct ip_conntrack_counter *counter)
+{
+	return seq_printf(s, "packets=%llu bytes=%llu ",
+			  (unsigned long long)counter->packets,
+			  (unsigned long long)counter->bytes);
+}
+#else
+#define seq_print_counters(x, y)	0
+#endif
+
+struct ct_iter_state {
+	unsigned int bucket;
+};
+
+static struct list_head *ct_get_first(struct seq_file *seq)
+{
+	struct ct_iter_state *st = seq->private;
+
+	for (st->bucket = 0;
+	     st->bucket < nf_conntrack_htable_size;
+	     st->bucket++) {
+		if (!list_empty(&nf_conntrack_hash[st->bucket]))
+			return nf_conntrack_hash[st->bucket].next;
+	}
+	return NULL;
+}
+
+static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head)
+{
+	struct ct_iter_state *st = seq->private;
+
+	head = head->next;
+	while (head == &nf_conntrack_hash[st->bucket]) {
+		if (++st->bucket >= nf_conntrack_htable_size)
+			return NULL;
+		head = nf_conntrack_hash[st->bucket].next;
+	}
+	return head;
+}
+
+static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct list_head *head = ct_get_first(seq);
+
+	if (head)
+		while (pos && (head = ct_get_next(seq, head)))
+			pos--;
+	return pos ? NULL : head;
+}
+
+static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	read_lock_bh(&nf_conntrack_lock);
+	return ct_get_idx(seq, *pos);
+}
+
+static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return ct_get_next(s, v);
+}
+
+static void ct_seq_stop(struct seq_file *s, void *v)
+{
+	read_unlock_bh(&nf_conntrack_lock);
+}
+
+static int ct_seq_show(struct seq_file *s, void *v)
+{
+	const struct nf_conntrack_tuple_hash *hash = v;
+	const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
+	struct nf_conntrack_l3proto *l3proto;
+	struct nf_conntrack_l4proto *l4proto;
+
+	NF_CT_ASSERT(ct);
+
+	/* we only want to print DIR_ORIGINAL */
+	if (NF_CT_DIRECTION(hash))
+		return 0;
+	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num != AF_INET)
+		return 0;
+
+	l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
+				       .tuple.src.l3num);
+	NF_CT_ASSERT(l3proto);
+	l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
+				       .tuple.src.l3num,
+				       ct->tuplehash[IP_CT_DIR_ORIGINAL]
+				       .tuple.dst.protonum);
+	NF_CT_ASSERT(l4proto);
+
+	if (seq_printf(s, "%-8s %u %ld ",
+		      l4proto->name,
+		      ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
+		      timer_pending(&ct->timeout)
+		      ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
+		return -ENOSPC;
+
+	if (l3proto->print_conntrack(s, ct))
+		return -ENOSPC;
+
+	if (l4proto->print_conntrack(s, ct))
+		return -ENOSPC;
+
+	if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+			l3proto, l4proto))
+		return -ENOSPC;
+
+ 	if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL]))
+		return -ENOSPC;
+
+	if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
+		if (seq_printf(s, "[UNREPLIED] "))
+			return -ENOSPC;
+
+	if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+			l3proto, l4proto))
+		return -ENOSPC;
+
+ 	if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY]))
+		return -ENOSPC;
+
+	if (test_bit(IPS_ASSURED_BIT, &ct->status))
+		if (seq_printf(s, "[ASSURED] "))
+			return -ENOSPC;
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+	if (seq_printf(s, "mark=%u ", ct->mark))
+		return -ENOSPC;
+#endif
+
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+	if (seq_printf(s, "secmark=%u ", ct->secmark))
+		return -ENOSPC;
+#endif
+
+	if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static struct seq_operations ct_seq_ops = {
+	.start = ct_seq_start,
+	.next  = ct_seq_next,
+	.stop  = ct_seq_stop,
+	.show  = ct_seq_show
+};
+
+static int ct_open(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq;
+	struct ct_iter_state *st;
+	int ret;
+
+	st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
+	if (st == NULL)
+		return -ENOMEM;
+	ret = seq_open(file, &ct_seq_ops);
+	if (ret)
+		goto out_free;
+	seq          = file->private_data;
+	seq->private = st;
+	memset(st, 0, sizeof(struct ct_iter_state));
+	return ret;
+out_free:
+	kfree(st);
+	return ret;
+}
+
+static struct file_operations ct_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = ct_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private,
+};
+
+/* expects */
+static void *exp_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct list_head *e = &nf_conntrack_expect_list;
+	loff_t i;
+
+	/* strange seq_file api calls stop even if we fail,
+	 * thus we need to grab lock since stop unlocks */
+	read_lock_bh(&nf_conntrack_lock);
+
+	if (list_empty(e))
+		return NULL;
+
+	for (i = 0; i <= *pos; i++) {
+		e = e->next;
+		if (e == &nf_conntrack_expect_list)
+			return NULL;
+	}
+	return e;
+}
+
+static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ 	struct list_head *e = v;
+
+	++*pos;
+	e = e->next;
+
+	if (e == &nf_conntrack_expect_list)
+		return NULL;
+
+	return e;
+}
+
+static void exp_seq_stop(struct seq_file *s, void *v)
+{
+	read_unlock_bh(&nf_conntrack_lock);
+}
+
+static int exp_seq_show(struct seq_file *s, void *v)
+{
+	struct nf_conntrack_expect *exp = v;
+
+	if (exp->tuple.src.l3num != AF_INET)
+		return 0;
+
+	if (exp->timeout.function)
+		seq_printf(s, "%ld ", timer_pending(&exp->timeout)
+			   ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
+	else
+		seq_printf(s, "- ");
+
+	seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
+
+	print_tuple(s, &exp->tuple,
+		    __nf_ct_l3proto_find(exp->tuple.src.l3num),
+		    __nf_ct_l4proto_find(exp->tuple.src.l3num,
+		    			 exp->tuple.dst.protonum));
+	return seq_putc(s, '\n');
+}
+
+static struct seq_operations exp_seq_ops = {
+	.start = exp_seq_start,
+	.next = exp_seq_next,
+	.stop = exp_seq_stop,
+	.show = exp_seq_show
+};
+
+static int exp_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &exp_seq_ops);
+}
+
+static struct file_operations ip_exp_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = exp_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+
+static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	int cpu;
+
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+		if (!cpu_possible(cpu))
+			continue;
+		*pos = cpu+1;
+		return &per_cpu(nf_conntrack_stat, cpu);
+	}
+
+	return NULL;
+}
+
+static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int cpu;
+
+	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+		if (!cpu_possible(cpu))
+			continue;
+		*pos = cpu+1;
+		return &per_cpu(nf_conntrack_stat, cpu);
+	}
+
+	return NULL;
+}
+
+static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int ct_cpu_seq_show(struct seq_file *seq, void *v)
+{
+	unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
+	struct ip_conntrack_stat *st = v;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete\n");
+		return 0;
+	}
+
+	seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
+			"%08x %08x %08x %08x %08x  %08x %08x %08x \n",
+		   nr_conntracks,
+		   st->searched,
+		   st->found,
+		   st->new,
+		   st->invalid,
+		   st->ignore,
+		   st->delete,
+		   st->delete_list,
+		   st->insert,
+		   st->insert_failed,
+		   st->drop,
+		   st->early_drop,
+		   st->error,
+
+		   st->expect_new,
+		   st->expect_create,
+		   st->expect_delete
+		);
+	return 0;
+}
+
+static struct seq_operations ct_cpu_seq_ops = {
+	.start  = ct_cpu_seq_start,
+	.next   = ct_cpu_seq_next,
+	.stop   = ct_cpu_seq_stop,
+	.show   = ct_cpu_seq_show,
+};
+
+static int ct_cpu_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ct_cpu_seq_ops);
+}
+
+static struct file_operations ct_cpu_seq_fops = {
+	.owner   = THIS_MODULE,
+	.open    = ct_cpu_seq_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private,
+};
+
+int __init nf_conntrack_ipv4_compat_init(void)
+{
+	struct proc_dir_entry *proc, *proc_exp, *proc_stat;
+
+	proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops);
+	if (!proc)
+		goto err1;
+
+	proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440,
+					&ip_exp_file_ops);
+	if (!proc_exp)
+		goto err2;
+
+	proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat);
+	if (!proc_stat)
+		goto err3;
+
+	proc_stat->proc_fops = &ct_cpu_seq_fops;
+	proc_stat->owner = THIS_MODULE;
+
+	return 0;
+
+err3:
+	proc_net_remove("ip_conntrack_expect");
+err2:
+	proc_net_remove("ip_conntrack");
+err1:
+	return -ENOMEM;
+}
+
+void __exit nf_conntrack_ipv4_compat_fini(void)
+{
+	remove_proc_entry("ip_conntrack", proc_net_stat);
+	proc_net_remove("ip_conntrack_expect");
+	proc_net_remove("ip_conntrack");
+}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 790f00d..db9e7c4 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -22,10 +22,10 @@
 #include <net/checksum.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
 
-unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
 
 #if 0
 #define DEBUGP printk
@@ -152,7 +152,7 @@
 		struct icmphdr icmp;
 		struct iphdr ip;
 	} _in, *inside;
-	struct nf_conntrack_protocol *innerproto;
+	struct nf_conntrack_l4proto *innerproto;
 	struct nf_conntrack_tuple_hash *h;
 	int dataoff;
 
@@ -170,7 +170,7 @@
 		return -NF_ACCEPT;
 	}
 
-	innerproto = __nf_ct_proto_find(PF_INET, inside->ip.protocol);
+	innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
 	dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp);
 	/* Are they talking about one of our connections? */
 	if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET,
@@ -311,7 +311,7 @@
 	tuple->dst.u.icmp.code =
 			*(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
 	tuple->src.u.icmp.id =
-			*(u_int16_t *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]);
+			*(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]);
 
 	if (tuple->dst.u.icmp.type >= sizeof(invmap)
 	    || !invmap[tuple->dst.u.icmp.type])
@@ -321,11 +321,42 @@
 }
 #endif
 
-struct nf_conntrack_protocol nf_conntrack_protocol_icmp =
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *icmp_sysctl_header;
+static struct ctl_table icmp_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_ICMP_TIMEOUT,
+		.procname	= "nf_conntrack_icmp_timeout",
+		.data		= &nf_ct_icmp_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+        {
+		.ctl_name = 0
+	}
+};
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+static struct ctl_table icmp_compat_sysctl_table[] = {
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT,
+		.procname	= "ip_conntrack_icmp_timeout",
+		.data		= &nf_ct_icmp_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+        {
+		.ctl_name = 0
+	}
+};
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
+#endif /* CONFIG_SYSCTL */
+
+struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
 {
-	.list			= { NULL, NULL },
 	.l3proto		= PF_INET,
-	.proto			= IPPROTO_ICMP,
+	.l4proto		= IPPROTO_ICMP,
 	.name			= "icmp",
 	.pkt_to_tuple		= icmp_pkt_to_tuple,
 	.invert_tuple		= icmp_invert_tuple,
@@ -341,6 +372,12 @@
 	.tuple_to_nfattr	= icmp_tuple_to_nfattr,
 	.nfattr_to_tuple	= icmp_nfattr_to_tuple,
 #endif
+#ifdef CONFIG_SYSCTL
+	.ctl_table_header	= &icmp_sysctl_header,
+	.ctl_table		= icmp_sysctl_table,
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	.ctl_compat_table	= icmp_compat_sysctl_table,
+#endif
+#endif
 };
-
-EXPORT_SYMBOL(nf_conntrack_protocol_icmp);
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_icmp);
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
new file mode 100644
index 0000000..0f17098
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -0,0 +1,78 @@
+/* Amanda extension for TCP NAT alteration.
+ * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca>
+ * based on a copy of HW's ip_nat_irc.c as well as other modules
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_amanda.h>
+
+MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
+MODULE_DESCRIPTION("Amanda NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_amanda");
+
+static unsigned int help(struct sk_buff **pskb,
+			 enum ip_conntrack_info ctinfo,
+			 unsigned int matchoff,
+			 unsigned int matchlen,
+			 struct nf_conntrack_expect *exp)
+{
+	char buffer[sizeof("65535")];
+	u_int16_t port;
+	unsigned int ret;
+
+	/* Connection comes from client. */
+	exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+	exp->dir = IP_CT_DIR_ORIGINAL;
+
+	/* When you see the packet, we need to NAT it the same as the
+	 * this one (ie. same IP: it will be TCP and master is UDP). */
+	exp->expectfn = nf_nat_follow_master;
+
+	/* Try to get same port: if not, try to change it. */
+	for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
+		exp->tuple.dst.u.tcp.port = htons(port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (port == 0)
+		return NF_DROP;
+
+	sprintf(buffer, "%u", port);
+	ret = nf_nat_mangle_udp_packet(pskb, exp->master, ctinfo,
+				       matchoff, matchlen,
+				       buffer, strlen(buffer));
+	if (ret != NF_ACCEPT)
+		nf_conntrack_unexpect_related(exp);
+	return ret;
+}
+
+static void __exit nf_nat_amanda_fini(void)
+{
+	rcu_assign_pointer(nf_nat_amanda_hook, NULL);
+	synchronize_rcu();
+}
+
+static int __init nf_nat_amanda_init(void)
+{
+	BUG_ON(rcu_dereference(nf_nat_amanda_hook));
+	rcu_assign_pointer(nf_nat_amanda_hook, help);
+	return 0;
+}
+
+module_init(nf_nat_amanda_init);
+module_exit(nf_nat_amanda_fini);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
new file mode 100644
index 0000000..86a9227
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -0,0 +1,647 @@
+/* NAT for netfilter; shared with compatibility layer. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/tcp.h>  /* For tcp_prot in getorigdst */
+#include <linux/icmp.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static DEFINE_RWLOCK(nf_nat_lock);
+
+static struct nf_conntrack_l3proto *l3proto = NULL;
+
+/* Calculated at init based on memory size */
+static unsigned int nf_nat_htable_size;
+
+static struct list_head *bysource;
+
+#define MAX_IP_NAT_PROTO 256
+static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO];
+
+static inline struct nf_nat_protocol *
+__nf_nat_proto_find(u_int8_t protonum)
+{
+	return nf_nat_protos[protonum];
+}
+
+struct nf_nat_protocol *
+nf_nat_proto_find_get(u_int8_t protonum)
+{
+	struct nf_nat_protocol *p;
+
+	/* we need to disable preemption to make sure 'p' doesn't get
+	 * removed until we've grabbed the reference */
+	preempt_disable();
+	p = __nf_nat_proto_find(protonum);
+	if (!try_module_get(p->me))
+		p = &nf_nat_unknown_protocol;
+	preempt_enable();
+
+	return p;
+}
+EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
+
+void
+nf_nat_proto_put(struct nf_nat_protocol *p)
+{
+	module_put(p->me);
+}
+EXPORT_SYMBOL_GPL(nf_nat_proto_put);
+
+/* We keep an extra hash for each conntrack, for fast searching. */
+static inline unsigned int
+hash_by_src(const struct nf_conntrack_tuple *tuple)
+{
+	/* Original src, to ensure we map it consistently if poss. */
+	return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all,
+			    tuple->dst.protonum, 0) % nf_nat_htable_size;
+}
+
+/* Noone using conntrack by the time this called. */
+static void nf_nat_cleanup_conntrack(struct nf_conn *conn)
+{
+	struct nf_conn_nat *nat;
+	if (!(conn->status & IPS_NAT_DONE_MASK))
+		return;
+
+	nat = nfct_nat(conn);
+	write_lock_bh(&nf_nat_lock);
+	list_del(&nat->info.bysource);
+	write_unlock_bh(&nf_nat_lock);
+}
+
+/* Is this tuple already taken? (not by us) */
+int
+nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+		  const struct nf_conn *ignored_conntrack)
+{
+	/* Conntrack tracking doesn't keep track of outgoing tuples; only
+	   incoming ones.  NAT means they don't have a fixed mapping,
+	   so we invert the tuple and look for the incoming reply.
+
+	   We could keep a separate hash if this proves too slow. */
+	struct nf_conntrack_tuple reply;
+
+	nf_ct_invert_tuplepr(&reply, tuple);
+	return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
+}
+EXPORT_SYMBOL(nf_nat_used_tuple);
+
+/* If we source map this tuple so reply looks like reply_tuple, will
+ * that meet the constraints of range. */
+static int
+in_range(const struct nf_conntrack_tuple *tuple,
+	 const struct nf_nat_range *range)
+{
+	struct nf_nat_protocol *proto;
+
+	proto = __nf_nat_proto_find(tuple->dst.protonum);
+	/* If we are supposed to map IPs, then we must be in the
+	   range specified, otherwise let this drag us onto a new src IP. */
+	if (range->flags & IP_NAT_RANGE_MAP_IPS) {
+		if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
+		    ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
+			return 0;
+	}
+
+	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
+	    proto->in_range(tuple, IP_NAT_MANIP_SRC,
+			    &range->min, &range->max))
+		return 1;
+
+	return 0;
+}
+
+static inline int
+same_src(const struct nf_conn *ct,
+	 const struct nf_conntrack_tuple *tuple)
+{
+	const struct nf_conntrack_tuple *t;
+
+	t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	return (t->dst.protonum == tuple->dst.protonum &&
+		t->src.u3.ip == tuple->src.u3.ip &&
+		t->src.u.all == tuple->src.u.all);
+}
+
+/* Only called for SRC manip */
+static int
+find_appropriate_src(const struct nf_conntrack_tuple *tuple,
+		     struct nf_conntrack_tuple *result,
+		     const struct nf_nat_range *range)
+{
+	unsigned int h = hash_by_src(tuple);
+	struct nf_conn_nat *nat;
+	struct nf_conn *ct;
+
+	read_lock_bh(&nf_nat_lock);
+	list_for_each_entry(nat, &bysource[h], info.bysource) {
+		ct = (struct nf_conn *)((char *)nat - offsetof(struct nf_conn, data));
+		if (same_src(ct, tuple)) {
+			/* Copy source part from reply tuple. */
+			nf_ct_invert_tuplepr(result,
+				       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+			result->dst = tuple->dst;
+
+			if (in_range(result, range)) {
+				read_unlock_bh(&nf_nat_lock);
+				return 1;
+			}
+		}
+	}
+	read_unlock_bh(&nf_nat_lock);
+	return 0;
+}
+
+/* For [FUTURE] fragmentation handling, we want the least-used
+   src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
+   if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
+   1-65535, we don't do pro-rata allocation based on ports; we choose
+   the ip with the lowest src-ip/dst-ip/proto usage.
+*/
+static void
+find_best_ips_proto(struct nf_conntrack_tuple *tuple,
+		    const struct nf_nat_range *range,
+		    const struct nf_conn *ct,
+		    enum nf_nat_manip_type maniptype)
+{
+	__be32 *var_ipp;
+	/* Host order */
+	u_int32_t minip, maxip, j;
+
+	/* No IP mapping?  Do nothing. */
+	if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
+		return;
+
+	if (maniptype == IP_NAT_MANIP_SRC)
+		var_ipp = &tuple->src.u3.ip;
+	else
+		var_ipp = &tuple->dst.u3.ip;
+
+	/* Fast path: only one choice. */
+	if (range->min_ip == range->max_ip) {
+		*var_ipp = range->min_ip;
+		return;
+	}
+
+	/* Hashing source and destination IPs gives a fairly even
+	 * spread in practice (if there are a small number of IPs
+	 * involved, there usually aren't that many connections
+	 * anyway).  The consistency means that servers see the same
+	 * client coming from the same IP (some Internet Banking sites
+	 * like this), even across reboots. */
+	minip = ntohl(range->min_ip);
+	maxip = ntohl(range->max_ip);
+	j = jhash_2words((__force u32)tuple->src.u3.ip,
+			 (__force u32)tuple->dst.u3.ip, 0);
+	*var_ipp = htonl(minip + j % (maxip - minip + 1));
+}
+
+/* Manipulate the tuple into the range given.  For NF_IP_POST_ROUTING,
+ * we change the source to map into the range.  For NF_IP_PRE_ROUTING
+ * and NF_IP_LOCAL_OUT, we change the destination to map into the
+ * range.  It might not be possible to get a unique tuple, but we try.
+ * At worst (or if we race), we will end up with a final duplicate in
+ * __ip_conntrack_confirm and drop the packet. */
+static void
+get_unique_tuple(struct nf_conntrack_tuple *tuple,
+		 const struct nf_conntrack_tuple *orig_tuple,
+		 const struct nf_nat_range *range,
+		 struct nf_conn *ct,
+		 enum nf_nat_manip_type maniptype)
+{
+	struct nf_nat_protocol *proto;
+
+	/* 1) If this srcip/proto/src-proto-part is currently mapped,
+	   and that same mapping gives a unique tuple within the given
+	   range, use that.
+
+	   This is only required for source (ie. NAT/masq) mappings.
+	   So far, we don't do local source mappings, so multiple
+	   manips not an issue.  */
+	if (maniptype == IP_NAT_MANIP_SRC) {
+		if (find_appropriate_src(orig_tuple, tuple, range)) {
+			DEBUGP("get_unique_tuple: Found current src map\n");
+			if (!nf_nat_used_tuple(tuple, ct))
+				return;
+		}
+	}
+
+	/* 2) Select the least-used IP/proto combination in the given
+	   range. */
+	*tuple = *orig_tuple;
+	find_best_ips_proto(tuple, range, ct, maniptype);
+
+	/* 3) The per-protocol part of the manip is made to map into
+	   the range to make a unique tuple. */
+
+	proto = nf_nat_proto_find_get(orig_tuple->dst.protonum);
+
+	/* Only bother mapping if it's not already in range and unique */
+	if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
+	     proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
+	    !nf_nat_used_tuple(tuple, ct)) {
+		nf_nat_proto_put(proto);
+		return;
+	}
+
+	/* Last change: get protocol to try to obtain unique tuple. */
+	proto->unique_tuple(tuple, range, maniptype, ct);
+
+	nf_nat_proto_put(proto);
+}
+
+unsigned int
+nf_nat_setup_info(struct nf_conn *ct,
+		  const struct nf_nat_range *range,
+		  unsigned int hooknum)
+{
+	struct nf_conntrack_tuple curr_tuple, new_tuple;
+	struct nf_conn_nat *nat = nfct_nat(ct);
+	struct nf_nat_info *info = &nat->info;
+	int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+
+	NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
+		     hooknum == NF_IP_POST_ROUTING ||
+		     hooknum == NF_IP_LOCAL_IN ||
+		     hooknum == NF_IP_LOCAL_OUT);
+	BUG_ON(nf_nat_initialized(ct, maniptype));
+
+	/* What we've got will look like inverse of reply. Normally
+	   this is what is in the conntrack, except for prior
+	   manipulations (future optimization: if num_manips == 0,
+	   orig_tp =
+	   conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
+	nf_ct_invert_tuplepr(&curr_tuple,
+			     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+	get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
+
+	if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
+		struct nf_conntrack_tuple reply;
+
+		/* Alter conntrack table so will recognize replies. */
+		nf_ct_invert_tuplepr(&reply, &new_tuple);
+		nf_conntrack_alter_reply(ct, &reply);
+
+		/* Non-atomic: we own this at the moment. */
+		if (maniptype == IP_NAT_MANIP_SRC)
+			ct->status |= IPS_SRC_NAT;
+		else
+			ct->status |= IPS_DST_NAT;
+	}
+
+	/* Place in source hash if this is the first time. */
+	if (have_to_hash) {
+		unsigned int srchash;
+
+		srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+		write_lock_bh(&nf_nat_lock);
+		list_add(&info->bysource, &bysource[srchash]);
+		write_unlock_bh(&nf_nat_lock);
+	}
+
+	/* It's done. */
+	if (maniptype == IP_NAT_MANIP_DST)
+		set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+	else
+		set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+
+	return NF_ACCEPT;
+}
+EXPORT_SYMBOL(nf_nat_setup_info);
+
+/* Returns true if succeeded. */
+static int
+manip_pkt(u_int16_t proto,
+	  struct sk_buff **pskb,
+	  unsigned int iphdroff,
+	  const struct nf_conntrack_tuple *target,
+	  enum nf_nat_manip_type maniptype)
+{
+	struct iphdr *iph;
+	struct nf_nat_protocol *p;
+
+	if (!skb_make_writable(pskb, iphdroff + sizeof(*iph)))
+		return 0;
+
+	iph = (void *)(*pskb)->data + iphdroff;
+
+	/* Manipulate protcol part. */
+	p = nf_nat_proto_find_get(proto);
+	if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) {
+		nf_nat_proto_put(p);
+		return 0;
+	}
+	nf_nat_proto_put(p);
+
+	iph = (void *)(*pskb)->data + iphdroff;
+
+	if (maniptype == IP_NAT_MANIP_SRC) {
+		nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
+		iph->saddr = target->src.u3.ip;
+	} else {
+		nf_csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
+		iph->daddr = target->dst.u3.ip;
+	}
+	return 1;
+}
+
+/* Do packet manipulations according to nf_nat_setup_info. */
+unsigned int nf_nat_packet(struct nf_conn *ct,
+			   enum ip_conntrack_info ctinfo,
+			   unsigned int hooknum,
+			   struct sk_buff **pskb)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	unsigned long statusbit;
+	enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
+
+	if (mtype == IP_NAT_MANIP_SRC)
+		statusbit = IPS_SRC_NAT;
+	else
+		statusbit = IPS_DST_NAT;
+
+	/* Invert if this is reply dir. */
+	if (dir == IP_CT_DIR_REPLY)
+		statusbit ^= IPS_NAT_MASK;
+
+	/* Non-atomic: these bits don't change. */
+	if (ct->status & statusbit) {
+		struct nf_conntrack_tuple target;
+
+		/* We are aiming to look like inverse of other direction. */
+		nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+
+		if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype))
+			return NF_DROP;
+	}
+	return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_nat_packet);
+
+/* Dir is direction ICMP is coming from (opposite to packet it contains) */
+int nf_nat_icmp_reply_translation(struct nf_conn *ct,
+				  enum ip_conntrack_info ctinfo,
+				  unsigned int hooknum,
+				  struct sk_buff **pskb)
+{
+	struct {
+		struct icmphdr icmp;
+		struct iphdr ip;
+	} *inside;
+	struct nf_conntrack_tuple inner, target;
+	int hdrlen = (*pskb)->nh.iph->ihl * 4;
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	unsigned long statusbit;
+	enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+
+	if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
+		return 0;
+
+	inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
+
+	/* We're actually going to mangle it beyond trivial checksum
+	   adjustment, so make sure the current checksum is correct. */
+	if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0))
+		return 0;
+
+	/* Must be RELATED */
+	NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED ||
+		     (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
+
+	/* Redirects on non-null nats must be dropped, else they'll
+           start talking to each other without our translation, and be
+           confused... --RR */
+	if (inside->icmp.type == ICMP_REDIRECT) {
+		/* If NAT isn't finished, assume it and drop. */
+		if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
+			return 0;
+
+		if (ct->status & IPS_NAT_MASK)
+			return 0;
+	}
+
+	DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
+	       *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
+
+	if (!nf_ct_get_tuple(*pskb,
+			     (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr),
+			     (*pskb)->nh.iph->ihl*4 +
+	                     sizeof(struct icmphdr) + inside->ip.ihl*4,
+	                     (u_int16_t)AF_INET,
+	                     inside->ip.protocol,
+	                     &inner,
+	                     l3proto,
+			     __nf_ct_l4proto_find((u_int16_t)PF_INET,
+			     			  inside->ip.protocol)))
+		return 0;
+
+	/* Change inner back to look like incoming packet.  We do the
+	   opposite manip on this hook to normal, because it might not
+	   pass all hooks (locally-generated ICMP).  Consider incoming
+	   packet: PREROUTING (DST manip), routing produces ICMP, goes
+	   through POSTROUTING (which must correct the DST manip). */
+	if (!manip_pkt(inside->ip.protocol, pskb,
+		       (*pskb)->nh.iph->ihl*4 + sizeof(inside->icmp),
+		       &ct->tuplehash[!dir].tuple,
+		       !manip))
+		return 0;
+
+	if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
+		/* Reloading "inside" here since manip_pkt inner. */
+		inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
+		inside->icmp.checksum = 0;
+		inside->icmp.checksum =
+			csum_fold(skb_checksum(*pskb, hdrlen,
+					       (*pskb)->len - hdrlen, 0));
+	}
+
+	/* Change outer to look the reply to an incoming packet
+	 * (proto 0 means don't invert per-proto part). */
+	if (manip == IP_NAT_MANIP_SRC)
+		statusbit = IPS_SRC_NAT;
+	else
+		statusbit = IPS_DST_NAT;
+
+	/* Invert if this is reply dir. */
+	if (dir == IP_CT_DIR_REPLY)
+		statusbit ^= IPS_NAT_MASK;
+
+	if (ct->status & statusbit) {
+		nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+		if (!manip_pkt(0, pskb, 0, &target, manip))
+			return 0;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
+
+/* Protocol registration. */
+int nf_nat_protocol_register(struct nf_nat_protocol *proto)
+{
+	int ret = 0;
+
+	write_lock_bh(&nf_nat_lock);
+	if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
+		ret = -EBUSY;
+		goto out;
+	}
+	nf_nat_protos[proto->protonum] = proto;
+ out:
+	write_unlock_bh(&nf_nat_lock);
+	return ret;
+}
+EXPORT_SYMBOL(nf_nat_protocol_register);
+
+/* Noone stores the protocol anywhere; simply delete it. */
+void nf_nat_protocol_unregister(struct nf_nat_protocol *proto)
+{
+	write_lock_bh(&nf_nat_lock);
+	nf_nat_protos[proto->protonum] = &nf_nat_unknown_protocol;
+	write_unlock_bh(&nf_nat_lock);
+
+	/* Someone could be still looking at the proto in a bh. */
+	synchronize_net();
+}
+EXPORT_SYMBOL(nf_nat_protocol_unregister);
+
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+int
+nf_nat_port_range_to_nfattr(struct sk_buff *skb,
+			    const struct nf_nat_range *range)
+{
+	NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
+		&range->min.tcp.port);
+	NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16),
+		&range->max.tcp.port);
+
+	return 0;
+
+nfattr_failure:
+	return -1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range);
+
+int
+nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range)
+{
+	int ret = 0;
+
+	/* we have to return whether we actually parsed something or not */
+
+	if (tb[CTA_PROTONAT_PORT_MIN-1]) {
+		ret = 1;
+		range->min.tcp.port =
+			*(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
+	}
+
+	if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
+		if (ret)
+			range->max.tcp.port = range->min.tcp.port;
+	} else {
+		ret = 1;
+		range->max.tcp.port =
+			*(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr);
+#endif
+
+static int __init nf_nat_init(void)
+{
+	size_t i;
+
+	/* Leave them the same for the moment. */
+	nf_nat_htable_size = nf_conntrack_htable_size;
+
+	/* One vmalloc for both hash tables */
+	bysource = vmalloc(sizeof(struct list_head) * nf_nat_htable_size);
+	if (!bysource)
+		return -ENOMEM;
+
+	/* Sew in builtin protocols. */
+	write_lock_bh(&nf_nat_lock);
+	for (i = 0; i < MAX_IP_NAT_PROTO; i++)
+		nf_nat_protos[i] = &nf_nat_unknown_protocol;
+	nf_nat_protos[IPPROTO_TCP] = &nf_nat_protocol_tcp;
+	nf_nat_protos[IPPROTO_UDP] = &nf_nat_protocol_udp;
+	nf_nat_protos[IPPROTO_ICMP] = &nf_nat_protocol_icmp;
+	write_unlock_bh(&nf_nat_lock);
+
+	for (i = 0; i < nf_nat_htable_size; i++) {
+		INIT_LIST_HEAD(&bysource[i]);
+	}
+
+	/* FIXME: Man, this is a hack.  <SIGH> */
+	NF_CT_ASSERT(nf_conntrack_destroyed == NULL);
+	nf_conntrack_destroyed = &nf_nat_cleanup_conntrack;
+
+	/* Initialize fake conntrack so that NAT will skip it */
+	nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
+
+	l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
+	return 0;
+}
+
+/* Clear NAT section of all conntracks, in case we're loaded again. */
+static int clean_nat(struct nf_conn *i, void *data)
+{
+	struct nf_conn_nat *nat = nfct_nat(i);
+
+	if (!nat)
+		return 0;
+	memset(nat, 0, sizeof(nat));
+	i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
+	return 0;
+}
+
+static void __exit nf_nat_cleanup(void)
+{
+	nf_ct_iterate_cleanup(&clean_nat, NULL);
+	nf_conntrack_destroyed = NULL;
+	vfree(bysource);
+	nf_ct_l3proto_put(l3proto);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(nf_nat_init);
+module_exit(nf_nat_cleanup);
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
new file mode 100644
index 0000000..751b598
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -0,0 +1,179 @@
+/* FTP extension for TCP NAT alteration. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_ftp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
+MODULE_DESCRIPTION("ftp NAT helper");
+MODULE_ALIAS("ip_nat_ftp");
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* FIXME: Time out? --RR */
+
+static int
+mangle_rfc959_packet(struct sk_buff **pskb,
+		     __be32 newip,
+		     u_int16_t port,
+		     unsigned int matchoff,
+		     unsigned int matchlen,
+		     struct nf_conn *ct,
+		     enum ip_conntrack_info ctinfo,
+		     u32 *seq)
+{
+	char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
+
+	sprintf(buffer, "%u,%u,%u,%u,%u,%u",
+		NIPQUAD(newip), port>>8, port&0xFF);
+
+	DEBUGP("calling nf_nat_mangle_tcp_packet\n");
+
+	*seq += strlen(buffer) - matchlen;
+	return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+					matchlen, buffer, strlen(buffer));
+}
+
+/* |1|132.235.1.2|6275| */
+static int
+mangle_eprt_packet(struct sk_buff **pskb,
+		   __be32 newip,
+		   u_int16_t port,
+		   unsigned int matchoff,
+		   unsigned int matchlen,
+		   struct nf_conn *ct,
+		   enum ip_conntrack_info ctinfo,
+		   u32 *seq)
+{
+	char buffer[sizeof("|1|255.255.255.255|65535|")];
+
+	sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
+
+	DEBUGP("calling nf_nat_mangle_tcp_packet\n");
+
+	*seq += strlen(buffer) - matchlen;
+	return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+					matchlen, buffer, strlen(buffer));
+}
+
+/* |1|132.235.1.2|6275| */
+static int
+mangle_epsv_packet(struct sk_buff **pskb,
+		   __be32 newip,
+		   u_int16_t port,
+		   unsigned int matchoff,
+		   unsigned int matchlen,
+		   struct nf_conn *ct,
+		   enum ip_conntrack_info ctinfo,
+		   u32 *seq)
+{
+	char buffer[sizeof("|||65535|")];
+
+	sprintf(buffer, "|||%u|", port);
+
+	DEBUGP("calling nf_nat_mangle_tcp_packet\n");
+
+	*seq += strlen(buffer) - matchlen;
+	return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+					matchlen, buffer, strlen(buffer));
+}
+
+static int (*mangle[])(struct sk_buff **, __be32, u_int16_t,
+		       unsigned int, unsigned int, struct nf_conn *,
+		       enum ip_conntrack_info, u32 *seq)
+= {
+	[NF_CT_FTP_PORT] = mangle_rfc959_packet,
+	[NF_CT_FTP_PASV] = mangle_rfc959_packet,
+	[NF_CT_FTP_EPRT] = mangle_eprt_packet,
+	[NF_CT_FTP_EPSV] = mangle_epsv_packet
+};
+
+/* So, this packet has hit the connection tracking matching code.
+   Mangle it, and change the expectation to match the new version. */
+static unsigned int nf_nat_ftp(struct sk_buff **pskb,
+			       enum ip_conntrack_info ctinfo,
+			       enum nf_ct_ftp_type type,
+			       unsigned int matchoff,
+			       unsigned int matchlen,
+			       struct nf_conntrack_expect *exp,
+			       u32 *seq)
+{
+	__be32 newip;
+	u_int16_t port;
+	int dir = CTINFO2DIR(ctinfo);
+	struct nf_conn *ct = exp->master;
+
+	DEBUGP("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
+
+	/* Connection will come from wherever this packet goes, hence !dir */
+	newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+	exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+	exp->dir = !dir;
+
+	/* When you see the packet, we need to NAT it the same as the
+	 * this one. */
+	exp->expectfn = nf_nat_follow_master;
+
+	/* Try to get same port: if not, try to change it. */
+	for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
+		exp->tuple.dst.u.tcp.port = htons(port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (port == 0)
+		return NF_DROP;
+
+	if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo,
+			  seq)) {
+		nf_conntrack_unexpect_related(exp);
+		return NF_DROP;
+	}
+	return NF_ACCEPT;
+}
+
+static void __exit nf_nat_ftp_fini(void)
+{
+	rcu_assign_pointer(nf_nat_ftp_hook, NULL);
+	synchronize_rcu();
+}
+
+static int __init nf_nat_ftp_init(void)
+{
+	BUG_ON(rcu_dereference(nf_nat_ftp_hook));
+	rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp);
+	return 0;
+}
+
+/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
+static int warn_set(const char *val, struct kernel_param *kp)
+{
+	printk(KERN_INFO KBUILD_MODNAME
+	       ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
+	return 0;
+}
+module_param_call(ports, warn_set, NULL, NULL, 0);
+
+module_init(nf_nat_ftp_init);
+module_exit(nf_nat_ftp_fini);
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
new file mode 100644
index 0000000..fb9ab01
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -0,0 +1,596 @@
+/*
+ * H.323 extension for NAT alteration.
+ *
+ * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
+ *
+ * This source code is licensed under General Public License version 2.
+ *
+ * Based on the 'brute force' H.323 NAT module by
+ * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_h323.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/****************************************************************************/
+static int set_addr(struct sk_buff **pskb,
+		    unsigned char **data, int dataoff,
+		    unsigned int addroff, __be32 ip, __be16 port)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct = ip_conntrack_get(*pskb, &ctinfo);
+	struct {
+		__be32 ip;
+		__be16 port;
+	} __attribute__ ((__packed__)) buf;
+	struct tcphdr _tcph, *th;
+
+	buf.ip = ip;
+	buf.port = port;
+	addroff += dataoff;
+
+	if ((*pskb)->nh.iph->protocol == IPPROTO_TCP) {
+		if (!nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+					      addroff, sizeof(buf),
+					      (char *) &buf, sizeof(buf))) {
+			if (net_ratelimit())
+				printk("nf_nat_h323: nf_nat_mangle_tcp_packet"
+				       " error\n");
+			return -1;
+		}
+
+		/* Relocate data pointer */
+		th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4,
+					sizeof(_tcph), &_tcph);
+		if (th == NULL)
+			return -1;
+		*data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 +
+		    th->doff * 4 + dataoff;
+	} else {
+		if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
+					      addroff, sizeof(buf),
+					      (char *) &buf, sizeof(buf))) {
+			if (net_ratelimit())
+				printk("nf_nat_h323: nf_nat_mangle_udp_packet"
+				       " error\n");
+			return -1;
+		}
+		/* nf_nat_mangle_udp_packet uses skb_make_writable() to copy
+		 * or pull everything in a linear buffer, so we can safely
+		 * use the skb pointers now */
+		*data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 +
+		    sizeof(struct udphdr);
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int set_h225_addr(struct sk_buff **pskb,
+			 unsigned char **data, int dataoff,
+			 TransportAddress *taddr,
+			 union nf_conntrack_address *addr, __be16 port)
+{
+	return set_addr(pskb, data, dataoff, taddr->ipAddress.ip,
+			addr->ip, port);
+}
+
+/****************************************************************************/
+static int set_h245_addr(struct sk_buff **pskb,
+			 unsigned char **data, int dataoff,
+			 H245_TransportAddress *taddr,
+			 union nf_conntrack_address *addr, __be16 port)
+{
+	return set_addr(pskb, data, dataoff,
+			taddr->unicastAddress.iPAddress.network,
+			addr->ip, port);
+}
+
+/****************************************************************************/
+static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo,
+			unsigned char **data,
+			TransportAddress *taddr, int count)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	int i;
+	__be16 port;
+	union nf_conntrack_address addr;
+
+	for (i = 0; i < count; i++) {
+		if (get_h225_addr(ct, *data, &taddr[i], &addr, &port)) {
+			if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
+			    port == info->sig_port[dir]) {
+				/* GW->GK */
+
+				/* Fix for Gnomemeeting */
+				if (i > 0 &&
+				    get_h225_addr(ct, *data, &taddr[0],
+						  &addr, &port) &&
+				    (ntohl(addr.ip) & 0xff000000) == 0x7f000000)
+					i = 0;
+
+				DEBUGP
+				    ("nf_nat_ras: set signal address "
+				     "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+				     NIPQUAD(ip), port,
+				     NIPQUAD(ct->tuplehash[!dir].tuple.dst.
+					     ip), info->sig_port[!dir]);
+				return set_h225_addr(pskb, data, 0, &taddr[i],
+						     &ct->tuplehash[!dir].
+						     tuple.dst.u3,
+						     info->sig_port[!dir]);
+			} else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
+				   port == info->sig_port[dir]) {
+				/* GK->GW */
+				DEBUGP
+				    ("nf_nat_ras: set signal address "
+				     "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+				     NIPQUAD(ip), port,
+				     NIPQUAD(ct->tuplehash[!dir].tuple.src.
+					     ip), info->sig_port[!dir]);
+				return set_h225_addr(pskb, data, 0, &taddr[i],
+						     &ct->tuplehash[!dir].
+						     tuple.src.u3,
+						     info->sig_port[!dir]);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int set_ras_addr(struct sk_buff **pskb, struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo,
+			unsigned char **data,
+			TransportAddress *taddr, int count)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int i;
+	__be16 port;
+	union nf_conntrack_address addr;
+
+	for (i = 0; i < count; i++) {
+		if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
+		    addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
+		    port == ct->tuplehash[dir].tuple.src.u.udp.port) {
+			DEBUGP("nf_nat_ras: set rasAddress "
+			       "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+			       NIPQUAD(ip), ntohs(port),
+			       NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip),
+			       ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.
+				     port));
+			return set_h225_addr(pskb, data, 0, &taddr[i],
+					     &ct->tuplehash[!dir].tuple.dst.u3,
+					     ct->tuplehash[!dir].tuple.
+								dst.u.udp.port);
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo,
+			unsigned char **data, int dataoff,
+			H245_TransportAddress *taddr,
+			__be16 port, __be16 rtp_port,
+			struct nf_conntrack_expect *rtp_exp,
+			struct nf_conntrack_expect *rtcp_exp)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	int i;
+	u_int16_t nated_port;
+
+	/* Set expectations for NAT */
+	rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
+	rtp_exp->expectfn = nf_nat_follow_master;
+	rtp_exp->dir = !dir;
+	rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
+	rtcp_exp->expectfn = nf_nat_follow_master;
+	rtcp_exp->dir = !dir;
+
+	/* Lookup existing expects */
+	for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) {
+		if (info->rtp_port[i][dir] == rtp_port) {
+			/* Expected */
+
+			/* Use allocated ports first. This will refresh
+			 * the expects */
+			rtp_exp->tuple.dst.u.udp.port = info->rtp_port[i][dir];
+			rtcp_exp->tuple.dst.u.udp.port =
+			    htons(ntohs(info->rtp_port[i][dir]) + 1);
+			break;
+		} else if (info->rtp_port[i][dir] == 0) {
+			/* Not expected */
+			break;
+		}
+	}
+
+	/* Run out of expectations */
+	if (i >= H323_RTP_CHANNEL_MAX) {
+		if (net_ratelimit())
+			printk("nf_nat_h323: out of expectations\n");
+		return 0;
+	}
+
+	/* Try to get a pair of ports. */
+	for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port);
+	     nated_port != 0; nated_port += 2) {
+		rtp_exp->tuple.dst.u.udp.port = htons(nated_port);
+		if (nf_conntrack_expect_related(rtp_exp) == 0) {
+			rtcp_exp->tuple.dst.u.udp.port =
+			    htons(nated_port + 1);
+			if (nf_conntrack_expect_related(rtcp_exp) == 0)
+				break;
+			nf_conntrack_unexpect_related(rtp_exp);
+		}
+	}
+
+	if (nated_port == 0) {	/* No port available */
+		if (net_ratelimit())
+			printk("nf_nat_h323: out of RTP ports\n");
+		return 0;
+	}
+
+	/* Modify signal */
+	if (set_h245_addr(pskb, data, dataoff, taddr,
+			  &ct->tuplehash[!dir].tuple.dst.u3,
+			  htons((port & htons(1)) ? nated_port + 1 :
+			  			    nated_port)) == 0) {
+		/* Save ports */
+		info->rtp_port[i][dir] = rtp_port;
+		info->rtp_port[i][!dir] = htons(nated_port);
+	} else {
+		nf_conntrack_unexpect_related(rtp_exp);
+		nf_conntrack_unexpect_related(rtcp_exp);
+		return -1;
+	}
+
+	/* Success */
+	DEBUGP("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+	       NIPQUAD(rtp_exp->tuple.src.ip),
+	       ntohs(rtp_exp->tuple.src.u.udp.port),
+	       NIPQUAD(rtp_exp->tuple.dst.ip),
+	       ntohs(rtp_exp->tuple.dst.u.udp.port));
+	DEBUGP("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+	       NIPQUAD(rtcp_exp->tuple.src.ip),
+	       ntohs(rtcp_exp->tuple.src.u.udp.port),
+	       NIPQUAD(rtcp_exp->tuple.dst.ip),
+	       ntohs(rtcp_exp->tuple.dst.u.udp.port));
+
+	return 0;
+}
+
+/****************************************************************************/
+static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct,
+		    enum ip_conntrack_info ctinfo,
+		    unsigned char **data, int dataoff,
+		    H245_TransportAddress *taddr, __be16 port,
+		    struct nf_conntrack_expect *exp)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port = ntohs(port);
+
+	/* Set expectations for NAT */
+	exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+	exp->expectfn = nf_nat_follow_master;
+	exp->dir = !dir;
+
+	/* Try to get same port: if not, try to change it. */
+	for (; nated_port != 0; nated_port++) {
+		exp->tuple.dst.u.tcp.port = htons(nated_port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (nated_port == 0) {	/* No port available */
+		if (net_ratelimit())
+			printk("nf_nat_h323: out of TCP ports\n");
+		return 0;
+	}
+
+	/* Modify signal */
+	if (set_h245_addr(pskb, data, dataoff, taddr,
+			  &ct->tuplehash[!dir].tuple.dst.u3,
+			  htons(nated_port)) < 0) {
+		nf_conntrack_unexpect_related(exp);
+		return -1;
+	}
+
+	DEBUGP("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+	       NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
+	       NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
+
+	return 0;
+}
+
+/****************************************************************************/
+static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct,
+		    enum ip_conntrack_info ctinfo,
+		    unsigned char **data, int dataoff,
+		    TransportAddress *taddr, __be16 port,
+		    struct nf_conntrack_expect *exp)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port = ntohs(port);
+
+	/* Set expectations for NAT */
+	exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+	exp->expectfn = nf_nat_follow_master;
+	exp->dir = !dir;
+
+	/* Check existing expects */
+	if (info->sig_port[dir] == port)
+		nated_port = ntohs(info->sig_port[!dir]);
+
+	/* Try to get same port: if not, try to change it. */
+	for (; nated_port != 0; nated_port++) {
+		exp->tuple.dst.u.tcp.port = htons(nated_port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (nated_port == 0) {	/* No port available */
+		if (net_ratelimit())
+			printk("nf_nat_q931: out of TCP ports\n");
+		return 0;
+	}
+
+	/* Modify signal */
+	if (set_h225_addr(pskb, data, dataoff, taddr,
+			  &ct->tuplehash[!dir].tuple.dst.u3,
+			  htons(nated_port)) == 0) {
+		/* Save ports */
+		info->sig_port[dir] = port;
+		info->sig_port[!dir] = htons(nated_port);
+	} else {
+		nf_conntrack_unexpect_related(exp);
+		return -1;
+	}
+
+	DEBUGP("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+	       NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
+	       NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
+
+	return 0;
+}
+
+/****************************************************************************
+ * This conntrack expect function replaces nf_conntrack_q931_expect()
+ * which was set by nf_conntrack_h323.c.
+ ****************************************************************************/
+static void ip_nat_q931_expect(struct nf_conn *new,
+			       struct nf_conntrack_expect *this)
+{
+	struct ip_nat_range range;
+
+	if (this->tuple.src.u3.ip != 0) {	/* Only accept calls from GK */
+		nf_nat_follow_master(new, this);
+		return;
+	}
+
+	/* This must be a fresh one. */
+	BUG_ON(new->status & IPS_NAT_DONE_MASK);
+
+	/* Change src to where master sends to */
+	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
+
+	/* hook doesn't matter, but it has to do source manip */
+	nf_nat_setup_info(new, &range, NF_IP_POST_ROUTING);
+
+	/* For DST manip, map port here to where it's expected. */
+	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+	range.min = range.max = this->saved_proto;
+	range.min_ip = range.max_ip =
+	    new->master->tuplehash[!this->dir].tuple.src.u3.ip;
+
+	/* hook doesn't matter, but it has to do destination manip */
+	nf_nat_setup_info(new, &range, NF_IP_PRE_ROUTING);
+}
+
+/****************************************************************************/
+static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct,
+		    enum ip_conntrack_info ctinfo,
+		    unsigned char **data, TransportAddress *taddr, int idx,
+		    __be16 port, struct nf_conntrack_expect *exp)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port = ntohs(port);
+	union nf_conntrack_address addr;
+
+	/* Set expectations for NAT */
+	exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+	exp->expectfn = ip_nat_q931_expect;
+	exp->dir = !dir;
+
+	/* Check existing expects */
+	if (info->sig_port[dir] == port)
+		nated_port = ntohs(info->sig_port[!dir]);
+
+	/* Try to get same port: if not, try to change it. */
+	for (; nated_port != 0; nated_port++) {
+		exp->tuple.dst.u.tcp.port = htons(nated_port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (nated_port == 0) {	/* No port available */
+		if (net_ratelimit())
+			printk("nf_nat_ras: out of TCP ports\n");
+		return 0;
+	}
+
+	/* Modify signal */
+	if (set_h225_addr(pskb, data, 0, &taddr[idx],
+			  &ct->tuplehash[!dir].tuple.dst.u3,
+			  htons(nated_port)) == 0) {
+		/* Save ports */
+		info->sig_port[dir] = port;
+		info->sig_port[!dir] = htons(nated_port);
+
+		/* Fix for Gnomemeeting */
+		if (idx > 0 &&
+		    get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
+		    (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
+			set_h225_addr_hook(pskb, data, 0, &taddr[0],
+					   &ct->tuplehash[!dir].tuple.dst.u3,
+					   info->sig_port[!dir]);
+		}
+	} else {
+		nf_conntrack_unexpect_related(exp);
+		return -1;
+	}
+
+	/* Success */
+	DEBUGP("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+	       NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
+	       NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
+
+	return 0;
+}
+
+/****************************************************************************/
+static void ip_nat_callforwarding_expect(struct nf_conn *new,
+					 struct nf_conntrack_expect *this)
+{
+	struct nf_nat_range range;
+
+	/* This must be a fresh one. */
+	BUG_ON(new->status & IPS_NAT_DONE_MASK);
+
+	/* Change src to where master sends to */
+	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
+
+	/* hook doesn't matter, but it has to do source manip */
+	nf_nat_setup_info(new, &range, NF_IP_POST_ROUTING);
+
+	/* For DST manip, map port here to where it's expected. */
+	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+	range.min = range.max = this->saved_proto;
+	range.min_ip = range.max_ip = this->saved_ip;
+
+	/* hook doesn't matter, but it has to do destination manip */
+	nf_nat_setup_info(new, &range, NF_IP_PRE_ROUTING);
+}
+
+/****************************************************************************/
+static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct,
+			      enum ip_conntrack_info ctinfo,
+			      unsigned char **data, int dataoff,
+			      TransportAddress *taddr, __be16 port,
+			      struct nf_conntrack_expect *exp)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	u_int16_t nated_port;
+
+	/* Set expectations for NAT */
+	exp->saved_ip = exp->tuple.dst.u3.ip;
+	exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+	exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+	exp->expectfn = ip_nat_callforwarding_expect;
+	exp->dir = !dir;
+
+	/* Try to get same port: if not, try to change it. */
+	for (nated_port = ntohs(port); nated_port != 0; nated_port++) {
+		exp->tuple.dst.u.tcp.port = htons(nated_port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (nated_port == 0) {	/* No port available */
+		if (net_ratelimit())
+			printk("nf_nat_q931: out of TCP ports\n");
+		return 0;
+	}
+
+	/* Modify signal */
+	if (!set_h225_addr(pskb, data, dataoff, taddr,
+			   &ct->tuplehash[!dir].tuple.dst.u3,
+			   htons(nated_port)) == 0) {
+		nf_conntrack_unexpect_related(exp);
+		return -1;
+	}
+
+	/* Success */
+	DEBUGP("nf_nat_q931: expect Call Forwarding "
+	       "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n",
+	       NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port),
+	       NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port));
+
+	return 0;
+}
+
+/****************************************************************************/
+static int __init init(void)
+{
+	BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_t120_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_h245_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL);
+	BUG_ON(rcu_dereference(nat_q931_hook) != NULL);
+
+	rcu_assign_pointer(set_h245_addr_hook, set_h245_addr);
+	rcu_assign_pointer(set_h225_addr_hook, set_h225_addr);
+	rcu_assign_pointer(set_sig_addr_hook, set_sig_addr);
+	rcu_assign_pointer(set_ras_addr_hook, set_ras_addr);
+	rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp);
+	rcu_assign_pointer(nat_t120_hook, nat_t120);
+	rcu_assign_pointer(nat_h245_hook, nat_h245);
+	rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding);
+	rcu_assign_pointer(nat_q931_hook, nat_q931);
+
+	DEBUGP("nf_nat_h323: init success\n");
+	return 0;
+}
+
+/****************************************************************************/
+static void __exit fini(void)
+{
+	rcu_assign_pointer(set_h245_addr_hook, NULL);
+	rcu_assign_pointer(set_h225_addr_hook, NULL);
+	rcu_assign_pointer(set_sig_addr_hook, NULL);
+	rcu_assign_pointer(set_ras_addr_hook, NULL);
+	rcu_assign_pointer(nat_rtp_rtcp_hook, NULL);
+	rcu_assign_pointer(nat_t120_hook, NULL);
+	rcu_assign_pointer(nat_h245_hook, NULL);
+	rcu_assign_pointer(nat_callforwarding_hook, NULL);
+	rcu_assign_pointer(nat_q931_hook, NULL);
+	synchronize_rcu();
+}
+
+/****************************************************************************/
+module_init(init);
+module_exit(fini);
+
+MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
+MODULE_DESCRIPTION("H.323 NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_h323");
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
new file mode 100644
index 0000000..98fbfc8
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -0,0 +1,433 @@
+/* ip_nat_helper.c - generic support functions for NAT helpers
+ *
+ * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
+ * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_helper.h>
+
+#if 0
+#define DEBUGP printk
+#define DUMP_OFFSET(x)	printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
+#else
+#define DEBUGP(format, args...)
+#define DUMP_OFFSET(x)
+#endif
+
+static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
+
+/* Setup TCP sequence correction given this change at this sequence */
+static inline void
+adjust_tcp_sequence(u32 seq,
+		    int sizediff,
+		    struct nf_conn *ct,
+		    enum ip_conntrack_info ctinfo)
+{
+	int dir;
+	struct nf_nat_seq *this_way, *other_way;
+	struct nf_conn_nat *nat = nfct_nat(ct);
+
+	DEBUGP("nf_nat_resize_packet: old_size = %u, new_size = %u\n",
+		(*skb)->len, new_size);
+
+	dir = CTINFO2DIR(ctinfo);
+
+	this_way = &nat->info.seq[dir];
+	other_way = &nat->info.seq[!dir];
+
+	DEBUGP("nf_nat_resize_packet: Seq_offset before: ");
+	DUMP_OFFSET(this_way);
+
+	spin_lock_bh(&nf_nat_seqofs_lock);
+
+	/* SYN adjust. If it's uninitialized, or this is after last
+	 * correction, record it: we don't handle more than one
+	 * adjustment in the window, but do deal with common case of a
+	 * retransmit */
+	if (this_way->offset_before == this_way->offset_after ||
+	    before(this_way->correction_pos, seq)) {
+		   this_way->correction_pos = seq;
+		   this_way->offset_before = this_way->offset_after;
+		   this_way->offset_after += sizediff;
+	}
+	spin_unlock_bh(&nf_nat_seqofs_lock);
+
+	DEBUGP("nf_nat_resize_packet: Seq_offset after: ");
+	DUMP_OFFSET(this_way);
+}
+
+/* Frobs data inside this packet, which is linear. */
+static void mangle_contents(struct sk_buff *skb,
+			    unsigned int dataoff,
+			    unsigned int match_offset,
+			    unsigned int match_len,
+			    const char *rep_buffer,
+			    unsigned int rep_len)
+{
+	unsigned char *data;
+
+	BUG_ON(skb_is_nonlinear(skb));
+	data = (unsigned char *)skb->nh.iph + dataoff;
+
+	/* move post-replacement */
+	memmove(data + match_offset + rep_len,
+		data + match_offset + match_len,
+		skb->tail - (data + match_offset + match_len));
+
+	/* insert data from buffer */
+	memcpy(data + match_offset, rep_buffer, rep_len);
+
+	/* update skb info */
+	if (rep_len > match_len) {
+		DEBUGP("nf_nat_mangle_packet: Extending packet by "
+		       "%u from %u bytes\n", rep_len - match_len,
+		       skb->len);
+		skb_put(skb, rep_len - match_len);
+	} else {
+		DEBUGP("nf_nat_mangle_packet: Shrinking packet from "
+		       "%u from %u bytes\n", match_len - rep_len,
+		       skb->len);
+		__skb_trim(skb, skb->len + rep_len - match_len);
+	}
+
+	/* fix IP hdr checksum information */
+	skb->nh.iph->tot_len = htons(skb->len);
+	ip_send_check(skb->nh.iph);
+}
+
+/* Unusual, but possible case. */
+static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
+{
+	struct sk_buff *nskb;
+
+	if ((*pskb)->len + extra > 65535)
+		return 0;
+
+	nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC);
+	if (!nskb)
+		return 0;
+
+	/* Transfer socket to new skb. */
+	if ((*pskb)->sk)
+		skb_set_owner_w(nskb, (*pskb)->sk);
+	kfree_skb(*pskb);
+	*pskb = nskb;
+	return 1;
+}
+
+/* Generic function for mangling variable-length address changes inside
+ * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
+ * command in FTP).
+ *
+ * Takes care about all the nasty sequence number changes, checksumming,
+ * skb enlargement, ...
+ *
+ * */
+int
+nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
+			 struct nf_conn *ct,
+			 enum ip_conntrack_info ctinfo,
+			 unsigned int match_offset,
+			 unsigned int match_len,
+			 const char *rep_buffer,
+			 unsigned int rep_len)
+{
+	struct iphdr *iph;
+	struct tcphdr *tcph;
+	int oldlen, datalen;
+
+	if (!skb_make_writable(pskb, (*pskb)->len))
+		return 0;
+
+	if (rep_len > match_len &&
+	    rep_len - match_len > skb_tailroom(*pskb) &&
+	    !enlarge_skb(pskb, rep_len - match_len))
+		return 0;
+
+	SKB_LINEAR_ASSERT(*pskb);
+
+	iph = (*pskb)->nh.iph;
+	tcph = (void *)iph + iph->ihl*4;
+
+	oldlen = (*pskb)->len - iph->ihl*4;
+	mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
+			match_offset, match_len, rep_buffer, rep_len);
+
+	datalen = (*pskb)->len - iph->ihl*4;
+	if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
+		tcph->check = 0;
+		tcph->check = tcp_v4_check(tcph, datalen,
+					   iph->saddr, iph->daddr,
+					   csum_partial((char *)tcph,
+					   		datalen, 0));
+	} else
+		nf_proto_csum_replace2(&tcph->check, *pskb,
+				       htons(oldlen), htons(datalen), 1);
+
+	if (rep_len != match_len) {
+		set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+		adjust_tcp_sequence(ntohl(tcph->seq),
+				    (int)rep_len - (int)match_len,
+				    ct, ctinfo);
+		/* Tell TCP window tracking about seq change */
+		nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4,
+					ct, CTINFO2DIR(ctinfo));
+	}
+	return 1;
+}
+EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
+
+/* Generic function for mangling variable-length address changes inside
+ * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
+ * command in the Amanda protocol)
+ *
+ * Takes care about all the nasty sequence number changes, checksumming,
+ * skb enlargement, ...
+ *
+ * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
+ *       should be fairly easy to do.
+ */
+int
+nf_nat_mangle_udp_packet(struct sk_buff **pskb,
+			 struct nf_conn *ct,
+			 enum ip_conntrack_info ctinfo,
+			 unsigned int match_offset,
+			 unsigned int match_len,
+			 const char *rep_buffer,
+			 unsigned int rep_len)
+{
+	struct iphdr *iph;
+	struct udphdr *udph;
+	int datalen, oldlen;
+
+	/* UDP helpers might accidentally mangle the wrong packet */
+	iph = (*pskb)->nh.iph;
+	if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
+	                       match_offset + match_len)
+		return 0;
+
+	if (!skb_make_writable(pskb, (*pskb)->len))
+		return 0;
+
+	if (rep_len > match_len &&
+	    rep_len - match_len > skb_tailroom(*pskb) &&
+	    !enlarge_skb(pskb, rep_len - match_len))
+		return 0;
+
+	iph = (*pskb)->nh.iph;
+	udph = (void *)iph + iph->ihl*4;
+
+	oldlen = (*pskb)->len - iph->ihl*4;
+	mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph),
+			match_offset, match_len, rep_buffer, rep_len);
+
+	/* update the length of the UDP packet */
+	datalen = (*pskb)->len - iph->ihl*4;
+	udph->len = htons(datalen);
+
+	/* fix udp checksum if udp checksum was previously calculated */
+	if (!udph->check && (*pskb)->ip_summed != CHECKSUM_PARTIAL)
+		return 1;
+
+	if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
+		udph->check = 0;
+		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+		                                datalen, IPPROTO_UDP,
+		                                csum_partial((char *)udph,
+		                                             datalen, 0));
+		if (!udph->check)
+			udph->check = CSUM_MANGLED_0;
+	} else
+		nf_proto_csum_replace2(&udph->check, *pskb,
+				       htons(oldlen), htons(datalen), 1);
+
+	return 1;
+}
+EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
+
+/* Adjust one found SACK option including checksum correction */
+static void
+sack_adjust(struct sk_buff *skb,
+	    struct tcphdr *tcph,
+	    unsigned int sackoff,
+	    unsigned int sackend,
+	    struct nf_nat_seq *natseq)
+{
+	while (sackoff < sackend) {
+		struct tcp_sack_block_wire *sack;
+		__be32 new_start_seq, new_end_seq;
+
+		sack = (void *)skb->data + sackoff;
+		if (after(ntohl(sack->start_seq) - natseq->offset_before,
+			  natseq->correction_pos))
+			new_start_seq = htonl(ntohl(sack->start_seq)
+					- natseq->offset_after);
+		else
+			new_start_seq = htonl(ntohl(sack->start_seq)
+					- natseq->offset_before);
+
+		if (after(ntohl(sack->end_seq) - natseq->offset_before,
+			  natseq->correction_pos))
+			new_end_seq = htonl(ntohl(sack->end_seq)
+				      - natseq->offset_after);
+		else
+			new_end_seq = htonl(ntohl(sack->end_seq)
+				      - natseq->offset_before);
+
+		DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
+			ntohl(sack->start_seq), new_start_seq,
+			ntohl(sack->end_seq), new_end_seq);
+
+		nf_proto_csum_replace4(&tcph->check, skb,
+				       sack->start_seq, new_start_seq, 0);
+		nf_proto_csum_replace4(&tcph->check, skb,
+				       sack->end_seq, new_end_seq, 0);
+		sack->start_seq = new_start_seq;
+		sack->end_seq = new_end_seq;
+		sackoff += sizeof(*sack);
+	}
+}
+
+/* TCP SACK sequence number adjustment */
+static inline unsigned int
+nf_nat_sack_adjust(struct sk_buff **pskb,
+		   struct tcphdr *tcph,
+		   struct nf_conn *ct,
+		   enum ip_conntrack_info ctinfo)
+{
+	unsigned int dir, optoff, optend;
+	struct nf_conn_nat *nat = nfct_nat(ct);
+
+	optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr);
+	optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4;
+
+	if (!skb_make_writable(pskb, optend))
+		return 0;
+
+	dir = CTINFO2DIR(ctinfo);
+
+	while (optoff < optend) {
+		/* Usually: option, length. */
+		unsigned char *op = (*pskb)->data + optoff;
+
+		switch (op[0]) {
+		case TCPOPT_EOL:
+			return 1;
+		case TCPOPT_NOP:
+			optoff++;
+			continue;
+		default:
+			/* no partial options */
+			if (optoff + 1 == optend ||
+			    optoff + op[1] > optend ||
+			    op[1] < 2)
+				return 0;
+			if (op[0] == TCPOPT_SACK &&
+			    op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
+			    ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
+				sack_adjust(*pskb, tcph, optoff+2,
+					    optoff+op[1],
+					    &nat->info.seq[!dir]);
+			optoff += op[1];
+		}
+	}
+	return 1;
+}
+
+/* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
+int
+nf_nat_seq_adjust(struct sk_buff **pskb,
+		  struct nf_conn *ct,
+		  enum ip_conntrack_info ctinfo)
+{
+	struct tcphdr *tcph;
+	int dir;
+	__be32 newseq, newack;
+	struct nf_conn_nat *nat = nfct_nat(ct);
+	struct nf_nat_seq *this_way, *other_way;
+
+	dir = CTINFO2DIR(ctinfo);
+
+	this_way = &nat->info.seq[dir];
+	other_way = &nat->info.seq[!dir];
+
+	if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
+		return 0;
+
+	tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
+	if (after(ntohl(tcph->seq), this_way->correction_pos))
+		newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
+	else
+		newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
+
+	if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
+		  other_way->correction_pos))
+		newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
+	else
+		newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
+
+	nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0);
+	nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0);
+
+	DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
+		ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
+		ntohl(newack));
+
+	tcph->seq = newseq;
+	tcph->ack_seq = newack;
+
+	if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo))
+		return 0;
+
+	nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4, ct, dir);
+
+	return 1;
+}
+EXPORT_SYMBOL(nf_nat_seq_adjust);
+
+/* Setup NAT on this expected conntrack so it follows master. */
+/* If we fail to get a free NAT slot, we'll get dropped on confirm */
+void nf_nat_follow_master(struct nf_conn *ct,
+			  struct nf_conntrack_expect *exp)
+{
+	struct nf_nat_range range;
+
+	/* This must be a fresh one. */
+	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+	/* Change src to where master sends to */
+	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.min_ip = range.max_ip
+		= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+	/* hook doesn't matter, but it has to do source manip */
+	nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
+
+	/* For DST manip, map port here to where it's expected. */
+	range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
+	range.min = range.max = exp->saved_proto;
+	range.min_ip = range.max_ip
+		= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
+	/* hook doesn't matter, but it has to do destination manip */
+	nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
+}
+EXPORT_SYMBOL(nf_nat_follow_master);
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
new file mode 100644
index 0000000..9b8c0da
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -0,0 +1,101 @@
+/* IRC extension for TCP NAT alteration.
+ *
+ * (C) 2000-2001 by Harald Welte <laforge@gnumonks.org>
+ * (C) 2004 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ * based on a copy of RR's ip_nat_ftp.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/tcp.h>
+#include <linux/kernel.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_irc.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("IRC (DCC) NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_irc");
+
+static unsigned int help(struct sk_buff **pskb,
+			 enum ip_conntrack_info ctinfo,
+			 unsigned int matchoff,
+			 unsigned int matchlen,
+			 struct nf_conntrack_expect *exp)
+{
+	char buffer[sizeof("4294967296 65635")];
+	u_int32_t ip;
+	u_int16_t port;
+	unsigned int ret;
+
+	DEBUGP("IRC_NAT: info (seq %u + %u) in %u\n",
+	       expect->seq, exp_irc_info->len, ntohl(tcph->seq));
+
+	/* Reply comes from server. */
+	exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+	exp->dir = IP_CT_DIR_REPLY;
+	exp->expectfn = nf_nat_follow_master;
+
+	/* Try to get same port: if not, try to change it. */
+	for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
+		exp->tuple.dst.u.tcp.port = htons(port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (port == 0)
+		return NF_DROP;
+
+	ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
+	sprintf(buffer, "%u %u", ip, port);
+	DEBUGP("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n",
+	       buffer, NIPQUAD(ip), port);
+
+	ret = nf_nat_mangle_tcp_packet(pskb, exp->master, ctinfo,
+				       matchoff, matchlen, buffer,
+				       strlen(buffer));
+	if (ret != NF_ACCEPT)
+		nf_conntrack_unexpect_related(exp);
+	return ret;
+}
+
+static void __exit nf_nat_irc_fini(void)
+{
+	rcu_assign_pointer(nf_nat_irc_hook, NULL);
+	synchronize_rcu();
+}
+
+static int __init nf_nat_irc_init(void)
+{
+	BUG_ON(rcu_dereference(nf_nat_irc_hook));
+	rcu_assign_pointer(nf_nat_irc_hook, help);
+	return 0;
+}
+
+/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
+static int warn_set(const char *val, struct kernel_param *kp)
+{
+	printk(KERN_INFO KBUILD_MODNAME
+	       ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
+	return 0;
+}
+module_param_call(ports, warn_set, NULL, NULL, 0);
+
+module_init(nf_nat_irc_init);
+module_exit(nf_nat_irc_fini);
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
new file mode 100644
index 0000000..0ae45b7
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -0,0 +1,315 @@
+/*
+ * nf_nat_pptp.c
+ *
+ * NAT support for PPTP (Point to Point Tunneling Protocol).
+ * PPTP is a a protocol for creating virtual private networks.
+ * It is a specification defined by Microsoft and some vendors
+ * working with Microsoft.  PPTP is built on top of a modified
+ * version of the Internet Generic Routing Encapsulation Protocol.
+ * GRE is defined in RFC 1701 and RFC 1702.  Documentation of
+ * PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ * TODO: - NAT to a unique tuple, not to TCP source port
+ * 	   (needs netfilter tuple reservation)
+ */
+
+#include <linux/module.h>
+#include <linux/tcp.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
+#include <linux/netfilter/nf_conntrack_pptp.h>
+
+#define NF_NAT_PPTP_VERSION "3.0"
+
+#define REQ_CID(req, off)		(*(__be16 *)((char *)(req) + (off)))
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
+MODULE_ALIAS("ip_nat_pptp");
+
+#if 0
+extern const char *pptp_msg_name[];
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
+				       __FUNCTION__, ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static void pptp_nat_expected(struct nf_conn *ct,
+			      struct nf_conntrack_expect *exp)
+{
+	struct nf_conn *master = ct->master;
+	struct nf_conntrack_expect *other_exp;
+	struct nf_conntrack_tuple t;
+	struct nf_ct_pptp_master *ct_pptp_info;
+	struct nf_nat_pptp *nat_pptp_info;
+	struct ip_nat_range range;
+
+	ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
+	nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
+
+	/* And here goes the grand finale of corrosion... */
+	if (exp->dir == IP_CT_DIR_ORIGINAL) {
+		DEBUGP("we are PNS->PAC\n");
+		/* therefore, build tuple for PAC->PNS */
+		t.src.l3num = AF_INET;
+		t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip;
+		t.src.u.gre.key = ct_pptp_info->pac_call_id;
+		t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+		t.dst.u.gre.key = ct_pptp_info->pns_call_id;
+		t.dst.protonum = IPPROTO_GRE;
+	} else {
+		DEBUGP("we are PAC->PNS\n");
+		/* build tuple for PNS->PAC */
+		t.src.l3num = AF_INET;
+		t.src.u3.ip = master->tuplehash[exp->dir].tuple.src.u3.ip;
+		t.src.u.gre.key = nat_pptp_info->pns_call_id;
+		t.dst.u3.ip = master->tuplehash[exp->dir].tuple.dst.u3.ip;
+		t.dst.u.gre.key = nat_pptp_info->pac_call_id;
+		t.dst.protonum = IPPROTO_GRE;
+	}
+
+	DEBUGP("trying to unexpect other dir: ");
+	NF_CT_DUMP_TUPLE(&t);
+	other_exp = nf_conntrack_expect_find_get(&t);
+	if (other_exp) {
+		nf_conntrack_unexpect_related(other_exp);
+		nf_conntrack_expect_put(other_exp);
+		DEBUGP("success\n");
+	} else {
+		DEBUGP("not found!\n");
+	}
+
+	/* This must be a fresh one. */
+	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+	/* Change src to where master sends to */
+	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.min_ip = range.max_ip
+		= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+	if (exp->dir == IP_CT_DIR_ORIGINAL) {
+		range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		range.min = range.max = exp->saved_proto;
+	}
+	/* hook doesn't matter, but it has to do source manip */
+	nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
+
+	/* For DST manip, map port here to where it's expected. */
+	range.flags = IP_NAT_RANGE_MAP_IPS;
+	range.min_ip = range.max_ip
+		= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
+	if (exp->dir == IP_CT_DIR_REPLY) {
+		range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		range.min = range.max = exp->saved_proto;
+	}
+	/* hook doesn't matter, but it has to do destination manip */
+	nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
+}
+
+/* outbound packets == from PNS to PAC */
+static int
+pptp_outbound_pkt(struct sk_buff **pskb,
+		  struct nf_conn *ct,
+		  enum ip_conntrack_info ctinfo,
+		  struct PptpControlHeader *ctlh,
+		  union pptp_ctrl_union *pptpReq)
+
+{
+	struct nf_ct_pptp_master *ct_pptp_info;
+	struct nf_nat_pptp *nat_pptp_info;
+	u_int16_t msg;
+	__be16 new_callid;
+	unsigned int cid_off;
+
+	ct_pptp_info  = &nfct_help(ct)->help.ct_pptp_info;
+	nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
+
+	new_callid = ct_pptp_info->pns_call_id;
+
+	switch (msg = ntohs(ctlh->messageType)) {
+	case PPTP_OUT_CALL_REQUEST:
+		cid_off = offsetof(union pptp_ctrl_union, ocreq.callID);
+		/* FIXME: ideally we would want to reserve a call ID
+		 * here.  current netfilter NAT core is not able to do
+		 * this :( For now we use TCP source port. This breaks
+		 * multiple calls within one control session */
+
+		/* save original call ID in nat_info */
+		nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
+
+		/* don't use tcph->source since we are at a DSTmanip
+		 * hook (e.g. PREROUTING) and pkt is not mangled yet */
+		new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
+
+		/* save new call ID in ct info */
+		ct_pptp_info->pns_call_id = new_callid;
+		break;
+	case PPTP_IN_CALL_REPLY:
+		cid_off = offsetof(union pptp_ctrl_union, icack.callID);
+		break;
+	case PPTP_CALL_CLEAR_REQUEST:
+		cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
+		break;
+	default:
+		DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
+		      (msg <= PPTP_MSG_MAX)?
+		      pptp_msg_name[msg]:pptp_msg_name[0]);
+		/* fall through */
+	case PPTP_SET_LINK_INFO:
+		/* only need to NAT in case PAC is behind NAT box */
+	case PPTP_START_SESSION_REQUEST:
+	case PPTP_START_SESSION_REPLY:
+	case PPTP_STOP_SESSION_REQUEST:
+	case PPTP_STOP_SESSION_REPLY:
+	case PPTP_ECHO_REQUEST:
+	case PPTP_ECHO_REPLY:
+		/* no need to alter packet */
+		return NF_ACCEPT;
+	}
+
+	/* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
+	 * down to here */
+	DEBUGP("altering call id from 0x%04x to 0x%04x\n",
+		ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
+
+	/* mangle packet */
+	if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+	                             cid_off + sizeof(struct pptp_pkt_hdr) +
+	                             sizeof(struct PptpControlHeader),
+	                             sizeof(new_callid), (char *)&new_callid,
+	                             sizeof(new_callid)) == 0)
+		return NF_DROP;
+	return NF_ACCEPT;
+}
+
+static void
+pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
+	     struct nf_conntrack_expect *expect_reply)
+{
+	struct nf_conn *ct = expect_orig->master;
+	struct nf_ct_pptp_master *ct_pptp_info;
+	struct nf_nat_pptp *nat_pptp_info;
+
+	ct_pptp_info  = &nfct_help(ct)->help.ct_pptp_info;
+	nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
+
+	/* save original PAC call ID in nat_info */
+	nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
+
+	/* alter expectation for PNS->PAC direction */
+	expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id;
+	expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id;
+	expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id;
+	expect_orig->dir = IP_CT_DIR_ORIGINAL;
+
+	/* alter expectation for PAC->PNS direction */
+	expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id;
+	expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id;
+	expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id;
+	expect_reply->dir = IP_CT_DIR_REPLY;
+}
+
+/* inbound packets == from PAC to PNS */
+static int
+pptp_inbound_pkt(struct sk_buff **pskb,
+		 struct nf_conn *ct,
+		 enum ip_conntrack_info ctinfo,
+		 struct PptpControlHeader *ctlh,
+		 union pptp_ctrl_union *pptpReq)
+{
+	struct nf_nat_pptp *nat_pptp_info;
+	u_int16_t msg;
+	__be16 new_pcid;
+	unsigned int pcid_off;
+
+	nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
+	new_pcid = nat_pptp_info->pns_call_id;
+
+	switch (msg = ntohs(ctlh->messageType)) {
+	case PPTP_OUT_CALL_REPLY:
+		pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID);
+		break;
+	case PPTP_IN_CALL_CONNECT:
+		pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID);
+		break;
+	case PPTP_IN_CALL_REQUEST:
+		/* only need to nat in case PAC is behind NAT box */
+		return NF_ACCEPT;
+	case PPTP_WAN_ERROR_NOTIFY:
+		pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID);
+		break;
+	case PPTP_CALL_DISCONNECT_NOTIFY:
+		pcid_off = offsetof(union pptp_ctrl_union, disc.callID);
+		break;
+	case PPTP_SET_LINK_INFO:
+		pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
+		break;
+	default:
+		DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)?
+			pptp_msg_name[msg]:pptp_msg_name[0]);
+		/* fall through */
+	case PPTP_START_SESSION_REQUEST:
+	case PPTP_START_SESSION_REPLY:
+	case PPTP_STOP_SESSION_REQUEST:
+	case PPTP_STOP_SESSION_REPLY:
+	case PPTP_ECHO_REQUEST:
+	case PPTP_ECHO_REPLY:
+		/* no need to alter packet */
+		return NF_ACCEPT;
+	}
+
+	/* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST,
+	 * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
+
+	/* mangle packet */
+	DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
+		ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
+
+	if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+	                             pcid_off + sizeof(struct pptp_pkt_hdr) +
+				     sizeof(struct PptpControlHeader),
+				     sizeof(new_pcid), (char *)&new_pcid,
+				     sizeof(new_pcid)) == 0)
+		return NF_DROP;
+	return NF_ACCEPT;
+}
+
+static int __init nf_nat_helper_pptp_init(void)
+{
+	nf_nat_need_gre();
+
+	BUG_ON(rcu_dereference(nf_nat_pptp_hook_outbound));
+	rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
+
+	BUG_ON(rcu_dereference(nf_nat_pptp_hook_inbound));
+	rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
+
+	BUG_ON(rcu_dereference(nf_nat_pptp_hook_exp_gre));
+	rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
+
+	BUG_ON(rcu_dereference(nf_nat_pptp_hook_expectfn));
+	rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
+	return 0;
+}
+
+static void __exit nf_nat_helper_pptp_fini(void)
+{
+	rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL);
+	rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL);
+	rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL);
+	rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL);
+	synchronize_rcu();
+}
+
+module_init(nf_nat_helper_pptp_init);
+module_exit(nf_nat_helper_pptp_fini);
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
new file mode 100644
index 0000000..d3de579
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -0,0 +1,179 @@
+/*
+ * nf_nat_proto_gre.c
+ *
+ * NAT protocol helper module for GRE.
+ *
+ * GRE is a generic encapsulation protocol, which is generally not very
+ * suited for NAT, as it has no protocol-specific part as port numbers.
+ *
+ * It has an optional key field, which may help us distinguishing two
+ * connections between the same two hosts.
+ *
+ * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
+ *
+ * PPTP is built on top of a modified version of GRE, and has a mandatory
+ * field called "CallID", which serves us for the same purpose as the key
+ * field in plain GRE.
+ *
+ * Documentation about PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_protocol.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
+
+#if 0
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
+				       __FUNCTION__, ## args)
+#else
+#define DEBUGP(x, args...)
+#endif
+
+/* is key in given range between min and max */
+static int
+gre_in_range(const struct nf_conntrack_tuple *tuple,
+	     enum nf_nat_manip_type maniptype,
+	     const union nf_conntrack_man_proto *min,
+	     const union nf_conntrack_man_proto *max)
+{
+	__be16 key;
+
+	if (maniptype == IP_NAT_MANIP_SRC)
+		key = tuple->src.u.gre.key;
+	else
+		key = tuple->dst.u.gre.key;
+
+	return ntohs(key) >= ntohs(min->gre.key) &&
+	       ntohs(key) <= ntohs(max->gre.key);
+}
+
+/* generate unique tuple ... */
+static int
+gre_unique_tuple(struct nf_conntrack_tuple *tuple,
+		 const struct nf_nat_range *range,
+		 enum nf_nat_manip_type maniptype,
+		 const struct nf_conn *conntrack)
+{
+	static u_int16_t key;
+	__be16 *keyptr;
+	unsigned int min, i, range_size;
+
+	if (maniptype == IP_NAT_MANIP_SRC)
+		keyptr = &tuple->src.u.gre.key;
+	else
+		keyptr = &tuple->dst.u.gre.key;
+
+	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+		DEBUGP("%p: NATing GRE PPTP\n", conntrack);
+		min = 1;
+		range_size = 0xffff;
+	} else {
+		min = ntohs(range->min.gre.key);
+		range_size = ntohs(range->max.gre.key) - min + 1;
+	}
+
+	DEBUGP("min = %u, range_size = %u\n", min, range_size);
+
+	for (i = 0; i < range_size; i++, key++) {
+		*keyptr = htons(min + key % range_size);
+		if (!nf_nat_used_tuple(tuple, conntrack))
+			return 1;
+	}
+
+	DEBUGP("%p: no NAT mapping\n", conntrack);
+	return 0;
+}
+
+/* manipulate a GRE packet according to maniptype */
+static int
+gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff,
+	      const struct nf_conntrack_tuple *tuple,
+	      enum nf_nat_manip_type maniptype)
+{
+	struct gre_hdr *greh;
+	struct gre_hdr_pptp *pgreh;
+	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
+	unsigned int hdroff = iphdroff + iph->ihl * 4;
+
+	/* pgreh includes two optional 32bit fields which are not required
+	 * to be there.  That's where the magic '8' comes from */
+	if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh) - 8))
+		return 0;
+
+	greh = (void *)(*pskb)->data + hdroff;
+	pgreh = (struct gre_hdr_pptp *)greh;
+
+	/* we only have destination manip of a packet, since 'source key'
+	 * is not present in the packet itself */
+	if (maniptype != IP_NAT_MANIP_DST)
+		return 1;
+	switch (greh->version) {
+	case 0:
+		if (!greh->key) {
+			DEBUGP("can't nat GRE w/o key\n");
+			break;
+		}
+		if (greh->csum) {
+			/* FIXME: Never tested this code... */
+			nf_proto_csum_replace4(gre_csum(greh), *pskb,
+					       *(gre_key(greh)),
+					       tuple->dst.u.gre.key, 0);
+		}
+		*(gre_key(greh)) = tuple->dst.u.gre.key;
+		break;
+	case GRE_VERSION_PPTP:
+		DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
+		pgreh->call_id = tuple->dst.u.gre.key;
+		break;
+	default:
+		DEBUGP("can't nat unknown GRE version\n");
+		return 0;
+	}
+	return 1;
+}
+
+static struct nf_nat_protocol gre __read_mostly = {
+	.name			= "GRE",
+	.protonum		= IPPROTO_GRE,
+	.manip_pkt		= gre_manip_pkt,
+	.in_range		= gre_in_range,
+	.unique_tuple		= gre_unique_tuple,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+	.range_to_nfattr	= nf_nat_port_range_to_nfattr,
+	.nfattr_to_range	= nf_nat_port_nfattr_to_range,
+#endif
+};
+
+int __init nf_nat_proto_gre_init(void)
+{
+	return nf_nat_protocol_register(&gre);
+}
+
+void __exit nf_nat_proto_gre_fini(void)
+{
+	nf_nat_protocol_unregister(&gre);
+}
+
+module_init(nf_nat_proto_gre_init);
+module_exit(nf_nat_proto_gre_fini);
+
+void nf_nat_need_gre(void)
+{
+	return;
+}
+EXPORT_SYMBOL_GPL(nf_nat_need_gre);
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
new file mode 100644
index 0000000..dcfd772
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -0,0 +1,86 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_protocol.h>
+
+static int
+icmp_in_range(const struct nf_conntrack_tuple *tuple,
+	      enum nf_nat_manip_type maniptype,
+	      const union nf_conntrack_man_proto *min,
+	      const union nf_conntrack_man_proto *max)
+{
+	return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
+	       ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
+}
+
+static int
+icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
+		  const struct nf_nat_range *range,
+		  enum nf_nat_manip_type maniptype,
+		  const struct nf_conn *ct)
+{
+	static u_int16_t id;
+	unsigned int range_size;
+	unsigned int i;
+
+	range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
+	/* If no range specified... */
+	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
+		range_size = 0xFFFF;
+
+	for (i = 0; i < range_size; i++, id++) {
+		tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
+		                             (id % range_size));
+		if (!nf_nat_used_tuple(tuple, ct))
+			return 1;
+	}
+	return 0;
+}
+
+static int
+icmp_manip_pkt(struct sk_buff **pskb,
+	       unsigned int iphdroff,
+	       const struct nf_conntrack_tuple *tuple,
+	       enum nf_nat_manip_type maniptype)
+{
+	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
+	struct icmphdr *hdr;
+	unsigned int hdroff = iphdroff + iph->ihl*4;
+
+	if (!skb_make_writable(pskb, hdroff + sizeof(*hdr)))
+		return 0;
+
+	hdr = (struct icmphdr *)((*pskb)->data + hdroff);
+	nf_proto_csum_replace2(&hdr->checksum, *pskb,
+			       hdr->un.echo.id, tuple->src.u.icmp.id, 0);
+	hdr->un.echo.id = tuple->src.u.icmp.id;
+	return 1;
+}
+
+struct nf_nat_protocol nf_nat_protocol_icmp = {
+	.name			= "ICMP",
+	.protonum		= IPPROTO_ICMP,
+	.me			= THIS_MODULE,
+	.manip_pkt		= icmp_manip_pkt,
+	.in_range		= icmp_in_range,
+	.unique_tuple		= icmp_unique_tuple,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+	.range_to_nfattr	= nf_nat_port_range_to_nfattr,
+	.nfattr_to_range	= nf_nat_port_nfattr_to_range,
+#endif
+};
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
new file mode 100644
index 0000000..7e26a7e
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -0,0 +1,148 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_core.h>
+
+static int
+tcp_in_range(const struct nf_conntrack_tuple *tuple,
+	     enum nf_nat_manip_type maniptype,
+	     const union nf_conntrack_man_proto *min,
+	     const union nf_conntrack_man_proto *max)
+{
+	__be16 port;
+
+	if (maniptype == IP_NAT_MANIP_SRC)
+		port = tuple->src.u.tcp.port;
+	else
+		port = tuple->dst.u.tcp.port;
+
+	return ntohs(port) >= ntohs(min->tcp.port) &&
+	       ntohs(port) <= ntohs(max->tcp.port);
+}
+
+static int
+tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
+		 const struct nf_nat_range *range,
+		 enum nf_nat_manip_type maniptype,
+		 const struct nf_conn *ct)
+{
+	static u_int16_t port;
+	__be16 *portptr;
+	unsigned int range_size, min, i;
+
+	if (maniptype == IP_NAT_MANIP_SRC)
+		portptr = &tuple->src.u.tcp.port;
+	else
+		portptr = &tuple->dst.u.tcp.port;
+
+	/* If no range specified... */
+	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+		/* If it's dst rewrite, can't change port */
+		if (maniptype == IP_NAT_MANIP_DST)
+			return 0;
+
+		/* Map privileged onto privileged. */
+		if (ntohs(*portptr) < 1024) {
+			/* Loose convention: >> 512 is credential passing */
+			if (ntohs(*portptr)<512) {
+				min = 1;
+				range_size = 511 - min + 1;
+			} else {
+				min = 600;
+				range_size = 1023 - min + 1;
+			}
+		} else {
+			min = 1024;
+			range_size = 65535 - 1024 + 1;
+		}
+	} else {
+		min = ntohs(range->min.tcp.port);
+		range_size = ntohs(range->max.tcp.port) - min + 1;
+	}
+
+	for (i = 0; i < range_size; i++, port++) {
+		*portptr = htons(min + port % range_size);
+		if (!nf_nat_used_tuple(tuple, ct))
+			return 1;
+	}
+	return 0;
+}
+
+static int
+tcp_manip_pkt(struct sk_buff **pskb,
+	      unsigned int iphdroff,
+	      const struct nf_conntrack_tuple *tuple,
+	      enum nf_nat_manip_type maniptype)
+{
+	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
+	struct tcphdr *hdr;
+	unsigned int hdroff = iphdroff + iph->ihl*4;
+	__be32 oldip, newip;
+	__be16 *portptr, newport, oldport;
+	int hdrsize = 8; /* TCP connection tracking guarantees this much */
+
+	/* this could be a inner header returned in icmp packet; in such
+	   cases we cannot update the checksum field since it is outside of
+	   the 8 bytes of transport layer headers we are guaranteed */
+	if ((*pskb)->len >= hdroff + sizeof(struct tcphdr))
+		hdrsize = sizeof(struct tcphdr);
+
+	if (!skb_make_writable(pskb, hdroff + hdrsize))
+		return 0;
+
+	iph = (struct iphdr *)((*pskb)->data + iphdroff);
+	hdr = (struct tcphdr *)((*pskb)->data + hdroff);
+
+	if (maniptype == IP_NAT_MANIP_SRC) {
+		/* Get rid of src ip and src pt */
+		oldip = iph->saddr;
+		newip = tuple->src.u3.ip;
+		newport = tuple->src.u.tcp.port;
+		portptr = &hdr->source;
+	} else {
+		/* Get rid of dst ip and dst pt */
+		oldip = iph->daddr;
+		newip = tuple->dst.u3.ip;
+		newport = tuple->dst.u.tcp.port;
+		portptr = &hdr->dest;
+	}
+
+	oldport = *portptr;
+	*portptr = newport;
+
+	if (hdrsize < sizeof(*hdr))
+		return 1;
+
+	nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+	nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0);
+	return 1;
+}
+
+struct nf_nat_protocol nf_nat_protocol_tcp = {
+	.name			= "TCP",
+	.protonum		= IPPROTO_TCP,
+	.me			= THIS_MODULE,
+	.manip_pkt		= tcp_manip_pkt,
+	.in_range		= tcp_in_range,
+	.unique_tuple		= tcp_unique_tuple,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+	.range_to_nfattr	= nf_nat_port_range_to_nfattr,
+	.nfattr_to_range	= nf_nat_port_nfattr_to_range,
+#endif
+};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
new file mode 100644
index 0000000..ab0ce4c
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -0,0 +1,138 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_protocol.h>
+
+static int
+udp_in_range(const struct nf_conntrack_tuple *tuple,
+	     enum nf_nat_manip_type maniptype,
+	     const union nf_conntrack_man_proto *min,
+	     const union nf_conntrack_man_proto *max)
+{
+	__be16 port;
+
+	if (maniptype == IP_NAT_MANIP_SRC)
+		port = tuple->src.u.udp.port;
+	else
+		port = tuple->dst.u.udp.port;
+
+	return ntohs(port) >= ntohs(min->udp.port) &&
+	       ntohs(port) <= ntohs(max->udp.port);
+}
+
+static int
+udp_unique_tuple(struct nf_conntrack_tuple *tuple,
+		 const struct nf_nat_range *range,
+		 enum nf_nat_manip_type maniptype,
+		 const struct nf_conn *ct)
+{
+	static u_int16_t port;
+	__be16 *portptr;
+	unsigned int range_size, min, i;
+
+	if (maniptype == IP_NAT_MANIP_SRC)
+		portptr = &tuple->src.u.udp.port;
+	else
+		portptr = &tuple->dst.u.udp.port;
+
+	/* If no range specified... */
+	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+		/* If it's dst rewrite, can't change port */
+		if (maniptype == IP_NAT_MANIP_DST)
+			return 0;
+
+		if (ntohs(*portptr) < 1024) {
+			/* Loose convention: >> 512 is credential passing */
+			if (ntohs(*portptr)<512) {
+				min = 1;
+				range_size = 511 - min + 1;
+			} else {
+				min = 600;
+				range_size = 1023 - min + 1;
+			}
+		} else {
+			min = 1024;
+			range_size = 65535 - 1024 + 1;
+		}
+	} else {
+		min = ntohs(range->min.udp.port);
+		range_size = ntohs(range->max.udp.port) - min + 1;
+	}
+
+	for (i = 0; i < range_size; i++, port++) {
+		*portptr = htons(min + port % range_size);
+		if (!nf_nat_used_tuple(tuple, ct))
+			return 1;
+	}
+	return 0;
+}
+
+static int
+udp_manip_pkt(struct sk_buff **pskb,
+	      unsigned int iphdroff,
+	      const struct nf_conntrack_tuple *tuple,
+	      enum nf_nat_manip_type maniptype)
+{
+	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
+	struct udphdr *hdr;
+	unsigned int hdroff = iphdroff + iph->ihl*4;
+	__be32 oldip, newip;
+	__be16 *portptr, newport;
+
+	if (!skb_make_writable(pskb, hdroff + sizeof(*hdr)))
+		return 0;
+
+	iph = (struct iphdr *)((*pskb)->data + iphdroff);
+	hdr = (struct udphdr *)((*pskb)->data + hdroff);
+
+	if (maniptype == IP_NAT_MANIP_SRC) {
+		/* Get rid of src ip and src pt */
+		oldip = iph->saddr;
+		newip = tuple->src.u3.ip;
+		newport = tuple->src.u.udp.port;
+		portptr = &hdr->source;
+	} else {
+		/* Get rid of dst ip and dst pt */
+		oldip = iph->daddr;
+		newip = tuple->dst.u3.ip;
+		newport = tuple->dst.u.udp.port;
+		portptr = &hdr->dest;
+	}
+	if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) {
+		nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1);
+		nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport,
+				       0);
+		if (!hdr->check)
+			hdr->check = CSUM_MANGLED_0;
+	}
+	*portptr = newport;
+	return 1;
+}
+
+struct nf_nat_protocol nf_nat_protocol_udp = {
+	.name			= "UDP",
+	.protonum		= IPPROTO_UDP,
+	.me			= THIS_MODULE,
+	.manip_pkt		= udp_manip_pkt,
+	.in_range		= udp_in_range,
+	.unique_tuple		= udp_unique_tuple,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+    defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+	.range_to_nfattr	= nf_nat_port_range_to_nfattr,
+	.nfattr_to_range	= nf_nat_port_nfattr_to_range,
+#endif
+};
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c
new file mode 100644
index 0000000..f50d020
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c
@@ -0,0 +1,54 @@
+/* The "unknown" protocol.  This is what is used for protocols we
+ * don't understand.  It's returned by ip_ct_find_proto().
+ */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_protocol.h>
+
+static int unknown_in_range(const struct nf_conntrack_tuple *tuple,
+			    enum nf_nat_manip_type manip_type,
+			    const union nf_conntrack_man_proto *min,
+			    const union nf_conntrack_man_proto *max)
+{
+	return 1;
+}
+
+static int unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
+				const struct nf_nat_range *range,
+				enum nf_nat_manip_type maniptype,
+				const struct nf_conn *ct)
+{
+	/* Sorry: we can't help you; if it's not unique, we can't frob
+	   anything. */
+	return 0;
+}
+
+static int
+unknown_manip_pkt(struct sk_buff **pskb,
+		  unsigned int iphdroff,
+		  const struct nf_conntrack_tuple *tuple,
+		  enum nf_nat_manip_type maniptype)
+{
+	return 1;
+}
+
+struct nf_nat_protocol nf_nat_unknown_protocol = {
+	.name			= "unknown",
+	/* .me isn't set: getting a ref to this cannot fail. */
+	.manip_pkt		= unknown_manip_pkt,
+	.in_range		= unknown_in_range,
+	.unique_tuple		= unknown_unique_tuple,
+};
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
new file mode 100644
index 0000000..b868ee0
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -0,0 +1,343 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Everything about the rules for NAT. */
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <net/checksum.h>
+#include <net/route.h>
+#include <linux/bitops.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_rule.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define NAT_VALID_HOOKS ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_POST_ROUTING) | (1<<NF_IP_LOCAL_OUT))
+
+static struct
+{
+	struct ipt_replace repl;
+	struct ipt_standard entries[3];
+	struct ipt_error term;
+} nat_initial_table __initdata = {
+	.repl = {
+		.name = "nat",
+		.valid_hooks = NAT_VALID_HOOKS,
+		.num_entries = 4,
+		.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
+		.hook_entry = {
+			[NF_IP_PRE_ROUTING] = 0,
+			[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
+			[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
+		.underflow = {
+			[NF_IP_PRE_ROUTING] = 0,
+			[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
+			[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
+	},
+	.entries = {
+		/* PRE_ROUTING */
+		{
+			.entry = {
+	    			.target_offset = sizeof(struct ipt_entry),
+	    			.next_offset = sizeof(struct ipt_standard),
+			},
+			.target = {
+				.target = {
+					.u = {
+						.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
+					},
+				},
+				.verdict = -NF_ACCEPT - 1,
+			},
+		},
+		/* POST_ROUTING */
+		{
+			.entry = {
+	    			.target_offset = sizeof(struct ipt_entry),
+	    			.next_offset = sizeof(struct ipt_standard),
+			},
+			.target = {
+				.target = {
+					.u = {
+						.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
+					},
+				},
+				.verdict = -NF_ACCEPT - 1,
+			},
+		},
+		/* LOCAL_OUT */
+		{
+			.entry = {
+	    			.target_offset = sizeof(struct ipt_entry),
+	    			.next_offset = sizeof(struct ipt_standard),
+			},
+			.target = {
+				.target = {
+					.u = {
+						.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
+					},
+				},
+				.verdict = -NF_ACCEPT - 1,
+			},
+		},
+	},
+	/* ERROR */
+	.term = {
+		.entry = {
+			.target_offset = sizeof(struct ipt_entry),
+			.next_offset = sizeof(struct ipt_error),
+		},
+		.target = {
+			.target = {
+				.u = {
+					.user = {
+						.target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
+						.name = IPT_ERROR_TARGET,
+					},
+				},
+			},
+			.errorname = "ERROR",
+		},
+	}
+};
+
+static struct ipt_table nat_table = {
+	.name		= "nat",
+	.valid_hooks	= NAT_VALID_HOOKS,
+	.lock		= RW_LOCK_UNLOCKED,
+	.me		= THIS_MODULE,
+	.af		= AF_INET,
+};
+
+/* Source NAT */
+static unsigned int ipt_snat_target(struct sk_buff **pskb,
+				    const struct net_device *in,
+				    const struct net_device *out,
+				    unsigned int hooknum,
+				    const struct xt_target *target,
+				    const void *targinfo)
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	const struct nf_nat_multi_range_compat *mr = targinfo;
+
+	NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING);
+
+	ct = nf_ct_get(*pskb, &ctinfo);
+
+	/* Connection must be valid and new. */
+	NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+	                    ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
+	NF_CT_ASSERT(out);
+
+	return nf_nat_setup_info(ct, &mr->range[0], hooknum);
+}
+
+/* Before 2.6.11 we did implicit source NAT if required. Warn about change. */
+static void warn_if_extra_mangle(__be32 dstip, __be32 srcip)
+{
+	static int warned = 0;
+	struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } };
+	struct rtable *rt;
+
+	if (ip_route_output_key(&rt, &fl) != 0)
+		return;
+
+	if (rt->rt_src != srcip && !warned) {
+		printk("NAT: no longer support implicit source local NAT\n");
+		printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n",
+		       NIPQUAD(srcip), NIPQUAD(dstip));
+		warned = 1;
+	}
+	ip_rt_put(rt);
+}
+
+static unsigned int ipt_dnat_target(struct sk_buff **pskb,
+				    const struct net_device *in,
+				    const struct net_device *out,
+				    unsigned int hooknum,
+				    const struct xt_target *target,
+				    const void *targinfo)
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	const struct nf_nat_multi_range_compat *mr = targinfo;
+
+	NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
+		     hooknum == NF_IP_LOCAL_OUT);
+
+	ct = nf_ct_get(*pskb, &ctinfo);
+
+	/* Connection must be valid and new. */
+	NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+	if (hooknum == NF_IP_LOCAL_OUT &&
+	    mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)
+		warn_if_extra_mangle((*pskb)->nh.iph->daddr,
+				     mr->range[0].min_ip);
+
+	return nf_nat_setup_info(ct, &mr->range[0], hooknum);
+}
+
+static int ipt_snat_checkentry(const char *tablename,
+			       const void *entry,
+			       const struct xt_target *target,
+			       void *targinfo,
+			       unsigned int hook_mask)
+{
+	struct nf_nat_multi_range_compat *mr = targinfo;
+
+	/* Must be a valid range */
+	if (mr->rangesize != 1) {
+		printk("SNAT: multiple ranges no longer supported\n");
+		return 0;
+	}
+	return 1;
+}
+
+static int ipt_dnat_checkentry(const char *tablename,
+			       const void *entry,
+			       const struct xt_target *target,
+			       void *targinfo,
+			       unsigned int hook_mask)
+{
+	struct nf_nat_multi_range_compat *mr = targinfo;
+
+	/* Must be a valid range */
+	if (mr->rangesize != 1) {
+		printk("DNAT: multiple ranges no longer supported\n");
+		return 0;
+	}
+	return 1;
+}
+
+inline unsigned int
+alloc_null_binding(struct nf_conn *ct,
+		   struct nf_nat_info *info,
+		   unsigned int hooknum)
+{
+	/* Force range to this IP; let proto decide mapping for
+	   per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+	   Use reply in case it's already been mangled (eg local packet).
+	*/
+	__be32 ip
+		= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
+		   ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
+		   : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
+	struct nf_nat_range range
+		= { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } };
+
+	DEBUGP("Allocating NULL binding for %p (%u.%u.%u.%u)\n",
+	       ct, NIPQUAD(ip));
+	return nf_nat_setup_info(ct, &range, hooknum);
+}
+
+unsigned int
+alloc_null_binding_confirmed(struct nf_conn *ct,
+                             struct nf_nat_info *info,
+                             unsigned int hooknum)
+{
+	__be32 ip
+		= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
+		   ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
+		   : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
+	u_int16_t all
+		= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
+		   ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
+		   : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
+	struct nf_nat_range range
+		= { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } };
+
+	DEBUGP("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n",
+	       ct, NIPQUAD(ip));
+	return nf_nat_setup_info(ct, &range, hooknum);
+}
+
+int nf_nat_rule_find(struct sk_buff **pskb,
+		     unsigned int hooknum,
+		     const struct net_device *in,
+		     const struct net_device *out,
+		     struct nf_conn *ct,
+		     struct nf_nat_info *info)
+{
+	int ret;
+
+	ret = ipt_do_table(pskb, hooknum, in, out, &nat_table);
+
+	if (ret == NF_ACCEPT) {
+		if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
+			/* NUL mapping */
+			ret = alloc_null_binding(ct, info, hooknum);
+	}
+	return ret;
+}
+
+static struct ipt_target ipt_snat_reg = {
+	.name		= "SNAT",
+	.target		= ipt_snat_target,
+	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
+	.table		= "nat",
+	.hooks		= 1 << NF_IP_POST_ROUTING,
+	.checkentry	= ipt_snat_checkentry,
+	.family		= AF_INET,
+};
+
+static struct xt_target ipt_dnat_reg = {
+	.name		= "DNAT",
+	.target		= ipt_dnat_target,
+	.targetsize	= sizeof(struct nf_nat_multi_range_compat),
+	.table		= "nat",
+	.hooks		= (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT),
+	.checkentry	= ipt_dnat_checkentry,
+	.family		= AF_INET,
+};
+
+int __init nf_nat_rule_init(void)
+{
+	int ret;
+
+	ret = ipt_register_table(&nat_table, &nat_initial_table.repl);
+	if (ret != 0)
+		return ret;
+	ret = xt_register_target(&ipt_snat_reg);
+	if (ret != 0)
+		goto unregister_table;
+
+	ret = xt_register_target(&ipt_dnat_reg);
+	if (ret != 0)
+		goto unregister_snat;
+
+	return ret;
+
+ unregister_snat:
+	xt_unregister_target(&ipt_snat_reg);
+ unregister_table:
+	ipt_unregister_table(&nat_table);
+
+	return ret;
+}
+
+void nf_nat_rule_cleanup(void)
+{
+	xt_unregister_target(&ipt_dnat_reg);
+	xt_unregister_target(&ipt_snat_reg);
+	ipt_unregister_table(&nat_table);
+}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
new file mode 100644
index 0000000..3d524b9
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -0,0 +1,283 @@
+/* SIP extension for UDP NAT alteration.
+ *
+ * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
+ * based on RR's ip_nat_ftp.c and other modules.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_sip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
+MODULE_DESCRIPTION("SIP NAT helper");
+MODULE_ALIAS("ip_nat_sip");
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+struct addr_map {
+	struct {
+		char		src[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+		char		dst[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+		unsigned int	srclen, srciplen;
+		unsigned int	dstlen, dstiplen;
+	} addr[IP_CT_DIR_MAX];
+};
+
+static void addr_map_init(struct nf_conn *ct, struct addr_map *map)
+{
+	struct nf_conntrack_tuple *t;
+	enum ip_conntrack_dir dir;
+	unsigned int n;
+
+	for (dir = 0; dir < IP_CT_DIR_MAX; dir++) {
+		t = &ct->tuplehash[dir].tuple;
+
+		n = sprintf(map->addr[dir].src, "%u.%u.%u.%u",
+			    NIPQUAD(t->src.u3.ip));
+		map->addr[dir].srciplen = n;
+		n += sprintf(map->addr[dir].src + n, ":%u",
+			     ntohs(t->src.u.udp.port));
+		map->addr[dir].srclen = n;
+
+		n = sprintf(map->addr[dir].dst, "%u.%u.%u.%u",
+			    NIPQUAD(t->dst.u3.ip));
+		map->addr[dir].dstiplen = n;
+		n += sprintf(map->addr[dir].dst + n, ":%u",
+			     ntohs(t->dst.u.udp.port));
+		map->addr[dir].dstlen = n;
+	}
+}
+
+static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo,
+			struct nf_conn *ct, const char **dptr, size_t dlen,
+			enum sip_header_pos pos, struct addr_map *map)
+{
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	unsigned int matchlen, matchoff, addrlen;
+	char *addr;
+
+	if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0)
+		return 1;
+
+	if ((matchlen == map->addr[dir].srciplen ||
+	     matchlen == map->addr[dir].srclen) &&
+	    memcmp(*dptr + matchoff, map->addr[dir].src, matchlen) == 0) {
+		addr    = map->addr[!dir].dst;
+		addrlen = map->addr[!dir].dstlen;
+	} else if ((matchlen == map->addr[dir].dstiplen ||
+		    matchlen == map->addr[dir].dstlen) &&
+		   memcmp(*dptr + matchoff, map->addr[dir].dst, matchlen) == 0) {
+		addr    = map->addr[!dir].src;
+		addrlen = map->addr[!dir].srclen;
+	} else
+		return 1;
+
+	if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
+	                              matchoff, matchlen, addr, addrlen))
+		return 0;
+	*dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+	return 1;
+
+}
+
+static unsigned int ip_nat_sip(struct sk_buff **pskb,
+			       enum ip_conntrack_info ctinfo,
+			       struct nf_conn *ct,
+			       const char **dptr)
+{
+	enum sip_header_pos pos;
+	struct addr_map map;
+	int dataoff, datalen;
+
+	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+	datalen = (*pskb)->len - dataoff;
+	if (datalen < sizeof("SIP/2.0") - 1)
+		return NF_DROP;
+
+	addr_map_init(ct, &map);
+
+	/* Basic rules: requests and responses. */
+	if (strncmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) != 0) {
+		/* 10.2: Constructing the REGISTER Request:
+		 *
+		 * The "userinfo" and "@" components of the SIP URI MUST NOT
+		 * be present.
+		 */
+		if (datalen >= sizeof("REGISTER") - 1 &&
+		    strncmp(*dptr, "REGISTER", sizeof("REGISTER") - 1) == 0)
+			pos = POS_REG_REQ_URI;
+		else
+			pos = POS_REQ_URI;
+
+		if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, pos, &map))
+			return NF_DROP;
+	}
+
+	if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_FROM, &map) ||
+	    !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_TO, &map) ||
+	    !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_VIA, &map) ||
+	    !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map))
+		return NF_DROP;
+	return NF_ACCEPT;
+}
+
+static unsigned int mangle_sip_packet(struct sk_buff **pskb,
+				      enum ip_conntrack_info ctinfo,
+				      struct nf_conn *ct,
+				      const char **dptr, size_t dlen,
+				      char *buffer, int bufflen,
+				      enum sip_header_pos pos)
+{
+	unsigned int matchlen, matchoff;
+
+	if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0)
+		return 0;
+
+	if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
+	                              matchoff, matchlen, buffer, bufflen))
+		return 0;
+
+	/* We need to reload this. Thanks Patrick. */
+	*dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+	return 1;
+}
+
+static int mangle_content_len(struct sk_buff **pskb,
+			      enum ip_conntrack_info ctinfo,
+			      struct nf_conn *ct,
+			      const char *dptr)
+{
+	unsigned int dataoff, matchoff, matchlen;
+	char buffer[sizeof("65536")];
+	int bufflen;
+
+	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+
+	/* Get actual SDP lenght */
+	if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff,
+	                    &matchlen, POS_SDP_HEADER) > 0) {
+
+		/* since ct_sip_get_info() give us a pointer passing 'v='
+		   we need to add 2 bytes in this count. */
+		int c_len = (*pskb)->len - dataoff - matchoff + 2;
+
+		/* Now, update SDP length */
+		if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff,
+		                    &matchlen, POS_CONTENT) > 0) {
+
+			bufflen = sprintf(buffer, "%u", c_len);
+			return nf_nat_mangle_udp_packet(pskb, ct, ctinfo,
+							matchoff, matchlen,
+							buffer, bufflen);
+		}
+	}
+	return 0;
+}
+
+static unsigned int mangle_sdp(struct sk_buff **pskb,
+			       enum ip_conntrack_info ctinfo,
+			       struct nf_conn *ct,
+			       __be32 newip, u_int16_t port,
+			       const char *dptr)
+{
+	char buffer[sizeof("nnn.nnn.nnn.nnn")];
+	unsigned int dataoff, bufflen;
+
+	dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
+
+	/* Mangle owner and contact info. */
+	bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip));
+	if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
+	                       buffer, bufflen, POS_OWNER_IP4))
+		return 0;
+
+	if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
+	                       buffer, bufflen, POS_CONNECTION_IP4))
+		return 0;
+
+	/* Mangle media port. */
+	bufflen = sprintf(buffer, "%u", port);
+	if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff,
+	                       buffer, bufflen, POS_MEDIA))
+		return 0;
+
+	return mangle_content_len(pskb, ctinfo, ct, dptr);
+}
+
+/* So, this packet has hit the connection tracking matching code.
+   Mangle it, and change the expectation to match the new version. */
+static unsigned int ip_nat_sdp(struct sk_buff **pskb,
+			       enum ip_conntrack_info ctinfo,
+			       struct nf_conntrack_expect *exp,
+			       const char *dptr)
+{
+	struct nf_conn *ct = exp->master;
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	__be32 newip;
+	u_int16_t port;
+
+	DEBUGP("ip_nat_sdp():\n");
+
+	/* Connection will come from reply */
+	newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
+
+	exp->tuple.dst.u3.ip = newip;
+	exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
+	exp->dir = !dir;
+
+	/* When you see the packet, we need to NAT it the same as the
+	   this one. */
+	exp->expectfn = nf_nat_follow_master;
+
+	/* Try to get same port: if not, try to change it. */
+	for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) {
+		exp->tuple.dst.u.udp.port = htons(port);
+		if (nf_conntrack_expect_related(exp) == 0)
+			break;
+	}
+
+	if (port == 0)
+		return NF_DROP;
+
+	if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) {
+		nf_conntrack_unexpect_related(exp);
+		return NF_DROP;
+	}
+	return NF_ACCEPT;
+}
+
+static void __exit nf_nat_sip_fini(void)
+{
+	rcu_assign_pointer(nf_nat_sip_hook, NULL);
+	rcu_assign_pointer(nf_nat_sdp_hook, NULL);
+	synchronize_rcu();
+}
+
+static int __init nf_nat_sip_init(void)
+{
+	BUG_ON(rcu_dereference(nf_nat_sip_hook));
+	BUG_ON(rcu_dereference(nf_nat_sdp_hook));
+	rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
+	rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp);
+	return 0;
+}
+
+module_init(nf_nat_sip_init);
+module_exit(nf_nat_sip_fini);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
new file mode 100644
index 0000000..f12528f
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -0,0 +1,1332 @@
+/*
+ * nf_nat_snmp_basic.c
+ *
+ * Basic SNMP Application Layer Gateway
+ *
+ * This IP NAT module is intended for use with SNMP network
+ * discovery and monitoring applications where target networks use
+ * conflicting private address realms.
+ *
+ * Static NAT is used to remap the networks from the view of the network
+ * management system at the IP layer, and this module remaps some application
+ * layer addresses to match.
+ *
+ * The simplest form of ALG is performed, where only tagged IP addresses
+ * are modified.  The module does not need to be MIB aware and only scans
+ * messages at the ASN.1/BER level.
+ *
+ * Currently, only SNMPv1 and SNMPv2 are supported.
+ *
+ * More information on ALG and associated issues can be found in
+ * RFC 2962
+ *
+ * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory
+ * McLean & Jochen Friedrich, stripped down for use in the kernel.
+ *
+ * Copyright (c) 2000 RP Internet (www.rpi.net.au).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * Author: James Morris <jmorris@intercode.com.au>
+ *
+ * Updates:
+ * 2000-08-06: Convert to new helper API (Harald Welte).
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
+#include <net/udp.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_nat_helper.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
+MODULE_ALIAS("ip_nat_snmp_basic");
+
+#define SNMP_PORT 161
+#define SNMP_TRAP_PORT 162
+#define NOCT1(n) (*(u8 *)n)
+
+static int debug;
+static DEFINE_SPINLOCK(snmp_lock);
+
+/*
+ * Application layer address mapping mimics the NAT mapping, but
+ * only for the first octet in this case (a more flexible system
+ * can be implemented if needed).
+ */
+struct oct1_map
+{
+	u_int8_t from;
+	u_int8_t to;
+};
+
+
+/*****************************************************************************
+ *
+ * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse)
+ *
+ *****************************************************************************/
+
+/* Class */
+#define ASN1_UNI	0	/* Universal */
+#define ASN1_APL	1	/* Application */
+#define ASN1_CTX	2	/* Context */
+#define ASN1_PRV	3	/* Private */
+
+/* Tag */
+#define ASN1_EOC	0	/* End Of Contents */
+#define ASN1_BOL	1	/* Boolean */
+#define ASN1_INT	2	/* Integer */
+#define ASN1_BTS	3	/* Bit String */
+#define ASN1_OTS	4	/* Octet String */
+#define ASN1_NUL	5	/* Null */
+#define ASN1_OJI	6	/* Object Identifier  */
+#define ASN1_OJD	7	/* Object Description */
+#define ASN1_EXT	8	/* External */
+#define ASN1_SEQ	16	/* Sequence */
+#define ASN1_SET	17	/* Set */
+#define ASN1_NUMSTR	18	/* Numerical String */
+#define ASN1_PRNSTR	19	/* Printable String */
+#define ASN1_TEXSTR	20	/* Teletext String */
+#define ASN1_VIDSTR	21	/* Video String */
+#define ASN1_IA5STR	22	/* IA5 String */
+#define ASN1_UNITIM	23	/* Universal Time */
+#define ASN1_GENTIM	24	/* General Time */
+#define ASN1_GRASTR	25	/* Graphical String */
+#define ASN1_VISSTR	26	/* Visible String */
+#define ASN1_GENSTR	27	/* General String */
+
+/* Primitive / Constructed methods*/
+#define ASN1_PRI	0	/* Primitive */
+#define ASN1_CON	1	/* Constructed */
+
+/*
+ * Error codes.
+ */
+#define ASN1_ERR_NOERROR		0
+#define ASN1_ERR_DEC_EMPTY		2
+#define ASN1_ERR_DEC_EOC_MISMATCH	3
+#define ASN1_ERR_DEC_LENGTH_MISMATCH	4
+#define ASN1_ERR_DEC_BADVALUE		5
+
+/*
+ * ASN.1 context.
+ */
+struct asn1_ctx
+{
+	int error;			/* Error condition */
+	unsigned char *pointer;		/* Octet just to be decoded */
+	unsigned char *begin;		/* First octet */
+	unsigned char *end;		/* Octet after last octet */
+};
+
+/*
+ * Octet string (not null terminated)
+ */
+struct asn1_octstr
+{
+	unsigned char *data;
+	unsigned int len;
+};
+
+static void asn1_open(struct asn1_ctx *ctx,
+                      unsigned char *buf,
+                      unsigned int len)
+{
+	ctx->begin = buf;
+	ctx->end = buf + len;
+	ctx->pointer = buf;
+	ctx->error = ASN1_ERR_NOERROR;
+}
+
+static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch)
+{
+	if (ctx->pointer >= ctx->end) {
+		ctx->error = ASN1_ERR_DEC_EMPTY;
+		return 0;
+	}
+	*ch = *(ctx->pointer)++;
+	return 1;
+}
+
+static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
+{
+	unsigned char ch;
+
+	*tag = 0;
+
+	do
+	{
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+		*tag <<= 7;
+		*tag |= ch & 0x7F;
+	} while ((ch & 0x80) == 0x80);
+	return 1;
+}
+
+static unsigned char asn1_id_decode(struct asn1_ctx *ctx,
+                                    unsigned int *cls,
+                                    unsigned int *con,
+                                    unsigned int *tag)
+{
+	unsigned char ch;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*cls = (ch & 0xC0) >> 6;
+	*con = (ch & 0x20) >> 5;
+	*tag = (ch & 0x1F);
+
+	if (*tag == 0x1F) {
+		if (!asn1_tag_decode(ctx, tag))
+			return 0;
+	}
+	return 1;
+}
+
+static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
+                                        unsigned int *def,
+                                        unsigned int *len)
+{
+	unsigned char ch, cnt;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	if (ch == 0x80)
+		*def = 0;
+	else {
+		*def = 1;
+
+		if (ch < 0x80)
+			*len = ch;
+		else {
+			cnt = (unsigned char) (ch & 0x7F);
+			*len = 0;
+
+			while (cnt > 0) {
+				if (!asn1_octet_decode(ctx, &ch))
+					return 0;
+				*len <<= 8;
+				*len |= ch;
+				cnt--;
+			}
+		}
+	}
+	return 1;
+}
+
+static unsigned char asn1_header_decode(struct asn1_ctx *ctx,
+                                        unsigned char **eoc,
+                                        unsigned int *cls,
+                                        unsigned int *con,
+                                        unsigned int *tag)
+{
+	unsigned int def, len;
+
+	if (!asn1_id_decode(ctx, cls, con, tag))
+		return 0;
+
+	def = len = 0;
+	if (!asn1_length_decode(ctx, &def, &len))
+		return 0;
+
+	if (def)
+		*eoc = ctx->pointer + len;
+	else
+		*eoc = NULL;
+	return 1;
+}
+
+static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc)
+{
+	unsigned char ch;
+
+	if (eoc == 0) {
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		if (ch != 0x00) {
+			ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		if (ch != 0x00) {
+			ctx->error = ASN1_ERR_DEC_EOC_MISMATCH;
+			return 0;
+		}
+		return 1;
+	} else {
+		if (ctx->pointer != eoc) {
+			ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH;
+			return 0;
+		}
+		return 1;
+	}
+}
+
+static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc)
+{
+	ctx->pointer = eoc;
+	return 1;
+}
+
+static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
+                                      unsigned char *eoc,
+                                      long *integer)
+{
+	unsigned char ch;
+	unsigned int  len;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*integer = (signed char) ch;
+	len = 1;
+
+	while (ctx->pointer < eoc) {
+		if (++len > sizeof (long)) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*integer <<= 8;
+		*integer |= ch;
+	}
+	return 1;
+}
+
+static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
+                                      unsigned char *eoc,
+                                      unsigned int *integer)
+{
+	unsigned char ch;
+	unsigned int  len;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*integer = ch;
+	if (ch == 0) len = 0;
+	else len = 1;
+
+	while (ctx->pointer < eoc) {
+		if (++len > sizeof (unsigned int)) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*integer <<= 8;
+		*integer |= ch;
+	}
+	return 1;
+}
+
+static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
+                                       unsigned char *eoc,
+                                       unsigned long *integer)
+{
+	unsigned char ch;
+	unsigned int  len;
+
+	if (!asn1_octet_decode(ctx, &ch))
+		return 0;
+
+	*integer = ch;
+	if (ch == 0) len = 0;
+	else len = 1;
+
+	while (ctx->pointer < eoc) {
+		if (++len > sizeof (unsigned long)) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			return 0;
+		}
+
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*integer <<= 8;
+		*integer |= ch;
+	}
+	return 1;
+}
+
+static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
+                                        unsigned char *eoc,
+                                        unsigned char **octets,
+                                        unsigned int *len)
+{
+	unsigned char *ptr;
+
+	*len = 0;
+
+	*octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
+	if (*octets == NULL) {
+		if (net_ratelimit())
+			printk("OOM in bsalg (%d)\n", __LINE__);
+		return 0;
+	}
+
+	ptr = *octets;
+	while (ctx->pointer < eoc) {
+		if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) {
+			kfree(*octets);
+			*octets = NULL;
+			return 0;
+		}
+		(*len)++;
+	}
+	return 1;
+}
+
+static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
+                                       unsigned long *subid)
+{
+	unsigned char ch;
+
+	*subid = 0;
+
+	do {
+		if (!asn1_octet_decode(ctx, &ch))
+			return 0;
+
+		*subid <<= 7;
+		*subid |= ch & 0x7F;
+	} while ((ch & 0x80) == 0x80);
+	return 1;
+}
+
+static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
+                                     unsigned char *eoc,
+                                     unsigned long **oid,
+                                     unsigned int *len)
+{
+	unsigned long subid;
+	unsigned int  size;
+	unsigned long *optr;
+
+	size = eoc - ctx->pointer + 1;
+	*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
+	if (*oid == NULL) {
+		if (net_ratelimit())
+			printk("OOM in bsalg (%d)\n", __LINE__);
+		return 0;
+	}
+
+	optr = *oid;
+
+	if (!asn1_subid_decode(ctx, &subid)) {
+		kfree(*oid);
+		*oid = NULL;
+		return 0;
+	}
+
+	if (subid < 40) {
+		optr [0] = 0;
+		optr [1] = subid;
+	} else if (subid < 80) {
+		optr [0] = 1;
+		optr [1] = subid - 40;
+	} else {
+		optr [0] = 2;
+		optr [1] = subid - 80;
+	}
+
+	*len = 2;
+	optr += 2;
+
+	while (ctx->pointer < eoc) {
+		if (++(*len) > size) {
+			ctx->error = ASN1_ERR_DEC_BADVALUE;
+			kfree(*oid);
+			*oid = NULL;
+			return 0;
+		}
+
+		if (!asn1_subid_decode(ctx, optr++)) {
+			kfree(*oid);
+			*oid = NULL;
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*****************************************************************************
+ *
+ * SNMP decoding routines (gxsnmp author Dirk Wisse)
+ *
+ *****************************************************************************/
+
+/* SNMP Versions */
+#define SNMP_V1				0
+#define SNMP_V2C			1
+#define SNMP_V2				2
+#define SNMP_V3				3
+
+/* Default Sizes */
+#define SNMP_SIZE_COMM			256
+#define SNMP_SIZE_OBJECTID		128
+#define SNMP_SIZE_BUFCHR		256
+#define SNMP_SIZE_BUFINT		128
+#define SNMP_SIZE_SMALLOBJECTID		16
+
+/* Requests */
+#define SNMP_PDU_GET			0
+#define SNMP_PDU_NEXT			1
+#define SNMP_PDU_RESPONSE		2
+#define SNMP_PDU_SET			3
+#define SNMP_PDU_TRAP1			4
+#define SNMP_PDU_BULK			5
+#define SNMP_PDU_INFORM			6
+#define SNMP_PDU_TRAP2			7
+
+/* Errors */
+#define SNMP_NOERROR			0
+#define SNMP_TOOBIG			1
+#define SNMP_NOSUCHNAME			2
+#define SNMP_BADVALUE			3
+#define SNMP_READONLY			4
+#define SNMP_GENERROR			5
+#define SNMP_NOACCESS			6
+#define SNMP_WRONGTYPE			7
+#define SNMP_WRONGLENGTH		8
+#define SNMP_WRONGENCODING		9
+#define SNMP_WRONGVALUE			10
+#define SNMP_NOCREATION			11
+#define SNMP_INCONSISTENTVALUE		12
+#define SNMP_RESOURCEUNAVAILABLE	13
+#define SNMP_COMMITFAILED		14
+#define SNMP_UNDOFAILED			15
+#define SNMP_AUTHORIZATIONERROR		16
+#define SNMP_NOTWRITABLE		17
+#define SNMP_INCONSISTENTNAME		18
+
+/* General SNMP V1 Traps */
+#define SNMP_TRAP_COLDSTART		0
+#define SNMP_TRAP_WARMSTART		1
+#define SNMP_TRAP_LINKDOWN		2
+#define SNMP_TRAP_LINKUP		3
+#define SNMP_TRAP_AUTFAILURE		4
+#define SNMP_TRAP_EQPNEIGHBORLOSS	5
+#define SNMP_TRAP_ENTSPECIFIC		6
+
+/* SNMPv1 Types */
+#define SNMP_NULL                0
+#define SNMP_INTEGER             1    /* l  */
+#define SNMP_OCTETSTR            2    /* c  */
+#define SNMP_DISPLAYSTR          2    /* c  */
+#define SNMP_OBJECTID            3    /* ul */
+#define SNMP_IPADDR              4    /* uc */
+#define SNMP_COUNTER             5    /* ul */
+#define SNMP_GAUGE               6    /* ul */
+#define SNMP_TIMETICKS           7    /* ul */
+#define SNMP_OPAQUE              8    /* c  */
+
+/* Additional SNMPv2 Types */
+#define SNMP_UINTEGER            5    /* ul */
+#define SNMP_BITSTR              9    /* uc */
+#define SNMP_NSAP               10    /* uc */
+#define SNMP_COUNTER64          11    /* ul */
+#define SNMP_NOSUCHOBJECT       12
+#define SNMP_NOSUCHINSTANCE     13
+#define SNMP_ENDOFMIBVIEW       14
+
+union snmp_syntax
+{
+	unsigned char uc[0];	/* 8 bit unsigned */
+	char c[0];		/* 8 bit signed */
+	unsigned long ul[0];	/* 32 bit unsigned */
+	long l[0];		/* 32 bit signed */
+};
+
+struct snmp_object
+{
+	unsigned long *id;
+	unsigned int id_len;
+	unsigned short type;
+	unsigned int syntax_len;
+	union snmp_syntax syntax;
+};
+
+struct snmp_request
+{
+	unsigned long id;
+	unsigned int error_status;
+	unsigned int error_index;
+};
+
+struct snmp_v1_trap
+{
+	unsigned long *id;
+	unsigned int id_len;
+	unsigned long ip_address;	/* pointer  */
+	unsigned int general;
+	unsigned int specific;
+	unsigned long time;
+};
+
+/* SNMP types */
+#define SNMP_IPA    0
+#define SNMP_CNT    1
+#define SNMP_GGE    2
+#define SNMP_TIT    3
+#define SNMP_OPQ    4
+#define SNMP_C64    6
+
+/* SNMP errors */
+#define SERR_NSO    0
+#define SERR_NSI    1
+#define SERR_EOM    2
+
+static inline void mangle_address(unsigned char *begin,
+                                  unsigned char *addr,
+                                  const struct oct1_map *map,
+                                  __sum16 *check);
+struct snmp_cnv
+{
+	unsigned int class;
+	unsigned int tag;
+	int syntax;
+};
+
+static struct snmp_cnv snmp_conv [] =
+{
+	{ASN1_UNI, ASN1_NUL, SNMP_NULL},
+	{ASN1_UNI, ASN1_INT, SNMP_INTEGER},
+	{ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR},
+	{ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR},
+	{ASN1_UNI, ASN1_OJI, SNMP_OBJECTID},
+	{ASN1_APL, SNMP_IPA, SNMP_IPADDR},
+	{ASN1_APL, SNMP_CNT, SNMP_COUNTER},	/* Counter32 */
+	{ASN1_APL, SNMP_GGE, SNMP_GAUGE},	/* Gauge32 == Unsigned32  */
+	{ASN1_APL, SNMP_TIT, SNMP_TIMETICKS},
+	{ASN1_APL, SNMP_OPQ, SNMP_OPAQUE},
+
+	/* SNMPv2 data types and errors */
+	{ASN1_UNI, ASN1_BTS, SNMP_BITSTR},
+	{ASN1_APL, SNMP_C64, SNMP_COUNTER64},
+	{ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT},
+	{ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE},
+	{ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW},
+	{0,       0,       -1}
+};
+
+static unsigned char snmp_tag_cls2syntax(unsigned int tag,
+                                         unsigned int cls,
+                                         unsigned short *syntax)
+{
+	struct snmp_cnv *cnv;
+
+	cnv = snmp_conv;
+
+	while (cnv->syntax != -1) {
+		if (cnv->tag == tag && cnv->class == cls) {
+			*syntax = cnv->syntax;
+			return 1;
+		}
+		cnv++;
+	}
+	return 0;
+}
+
+static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
+                                        struct snmp_object **obj)
+{
+	unsigned int cls, con, tag, len, idlen;
+	unsigned short type;
+	unsigned char *eoc, *end, *p;
+	unsigned long *lp, *id;
+	unsigned long ul;
+	long l;
+
+	*obj = NULL;
+	id = NULL;
+
+	if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag))
+		return 0;
+
+	if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
+		return 0;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		return 0;
+
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
+		return 0;
+
+	if (!asn1_oid_decode(ctx, end, &id, &idlen))
+		return 0;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) {
+		kfree(id);
+		return 0;
+	}
+
+	if (con != ASN1_PRI) {
+		kfree(id);
+		return 0;
+	}
+
+	type = 0;
+	if (!snmp_tag_cls2syntax(tag, cls, &type)) {
+		kfree(id);
+		return 0;
+	}
+
+	l = 0;
+	switch (type) {
+		case SNMP_INTEGER:
+			len = sizeof(long);
+			if (!asn1_long_decode(ctx, end, &l)) {
+				kfree(id);
+				return 0;
+			}
+			*obj = kmalloc(sizeof(struct snmp_object) + len,
+			               GFP_ATOMIC);
+			if (*obj == NULL) {
+				kfree(id);
+				if (net_ratelimit())
+					printk("OOM in bsalg (%d)\n", __LINE__);
+				return 0;
+			}
+			(*obj)->syntax.l[0] = l;
+			break;
+		case SNMP_OCTETSTR:
+		case SNMP_OPAQUE:
+			if (!asn1_octets_decode(ctx, end, &p, &len)) {
+				kfree(id);
+				return 0;
+			}
+			*obj = kmalloc(sizeof(struct snmp_object) + len,
+			               GFP_ATOMIC);
+			if (*obj == NULL) {
+				kfree(id);
+				if (net_ratelimit())
+					printk("OOM in bsalg (%d)\n", __LINE__);
+				return 0;
+			}
+			memcpy((*obj)->syntax.c, p, len);
+			kfree(p);
+			break;
+		case SNMP_NULL:
+		case SNMP_NOSUCHOBJECT:
+		case SNMP_NOSUCHINSTANCE:
+		case SNMP_ENDOFMIBVIEW:
+			len = 0;
+			*obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
+			if (*obj == NULL) {
+				kfree(id);
+				if (net_ratelimit())
+					printk("OOM in bsalg (%d)\n", __LINE__);
+				return 0;
+			}
+			if (!asn1_null_decode(ctx, end)) {
+				kfree(id);
+				kfree(*obj);
+				*obj = NULL;
+				return 0;
+			}
+			break;
+		case SNMP_OBJECTID:
+			if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) {
+				kfree(id);
+				return 0;
+			}
+			len *= sizeof(unsigned long);
+			*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
+			if (*obj == NULL) {
+				kfree(lp);
+				kfree(id);
+				if (net_ratelimit())
+					printk("OOM in bsalg (%d)\n", __LINE__);
+				return 0;
+			}
+			memcpy((*obj)->syntax.ul, lp, len);
+			kfree(lp);
+			break;
+		case SNMP_IPADDR:
+			if (!asn1_octets_decode(ctx, end, &p, &len)) {
+				kfree(id);
+				return 0;
+			}
+			if (len != 4) {
+				kfree(p);
+				kfree(id);
+				return 0;
+			}
+			*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
+			if (*obj == NULL) {
+				kfree(p);
+				kfree(id);
+				if (net_ratelimit())
+					printk("OOM in bsalg (%d)\n", __LINE__);
+				return 0;
+			}
+			memcpy((*obj)->syntax.uc, p, len);
+			kfree(p);
+			break;
+		case SNMP_COUNTER:
+		case SNMP_GAUGE:
+		case SNMP_TIMETICKS:
+			len = sizeof(unsigned long);
+			if (!asn1_ulong_decode(ctx, end, &ul)) {
+				kfree(id);
+				return 0;
+			}
+			*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
+			if (*obj == NULL) {
+				kfree(id);
+				if (net_ratelimit())
+					printk("OOM in bsalg (%d)\n", __LINE__);
+				return 0;
+			}
+			(*obj)->syntax.ul[0] = ul;
+			break;
+		default:
+			kfree(id);
+			return 0;
+	}
+
+	(*obj)->syntax_len = len;
+	(*obj)->type = type;
+	(*obj)->id = id;
+	(*obj)->id_len = idlen;
+
+	if (!asn1_eoc_decode(ctx, eoc)) {
+		kfree(id);
+		kfree(*obj);
+		*obj = NULL;
+		return 0;
+	}
+	return 1;
+}
+
+static unsigned char snmp_request_decode(struct asn1_ctx *ctx,
+                                         struct snmp_request *request)
+{
+	unsigned int cls, con, tag;
+	unsigned char *end;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		return 0;
+
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
+		return 0;
+
+	if (!asn1_ulong_decode(ctx, end, &request->id))
+		return 0;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		return 0;
+
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
+		return 0;
+
+	if (!asn1_uint_decode(ctx, end, &request->error_status))
+		return 0;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		return 0;
+
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
+		return 0;
+
+	if (!asn1_uint_decode(ctx, end, &request->error_index))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * Fast checksum update for possibly oddly-aligned UDP byte, from the
+ * code example in the draft.
+ */
+static void fast_csum(__sum16 *csum,
+                      const unsigned char *optr,
+                      const unsigned char *nptr,
+                      int offset)
+{
+	unsigned char s[4];
+
+	if (offset & 1) {
+		s[0] = s[2] = 0;
+		s[1] = ~*optr;
+		s[3] = *nptr;
+	} else {
+		s[1] = s[3] = 0;
+		s[0] = ~*optr;
+		s[2] = *nptr;
+	}
+
+	*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
+}
+
+/*
+ * Mangle IP address.
+ * 	- begin points to the start of the snmp messgae
+ *      - addr points to the start of the address
+ */
+static inline void mangle_address(unsigned char *begin,
+                                  unsigned char *addr,
+                                  const struct oct1_map *map,
+                                  __sum16 *check)
+{
+	if (map->from == NOCT1(addr)) {
+		u_int32_t old;
+
+		if (debug)
+			memcpy(&old, (unsigned char *)addr, sizeof(old));
+
+		*addr = map->to;
+
+		/* Update UDP checksum if being used */
+		if (*check) {
+			fast_csum(check,
+			          &map->from, &map->to, addr - begin);
+
+		}
+
+		if (debug)
+			printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to "
+			       "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr));
+	}
+}
+
+static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
+                                      struct snmp_v1_trap *trap,
+                                      const struct oct1_map *map,
+                                      __sum16 *check)
+{
+	unsigned int cls, con, tag, len;
+	unsigned char *end;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		return 0;
+
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI)
+		return 0;
+
+	if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len))
+		return 0;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		goto err_id_free;
+
+	if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) ||
+	      (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS)))
+		goto err_id_free;
+
+	if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len))
+		goto err_id_free;
+
+	/* IPv4 only */
+	if (len != 4)
+		goto err_addr_free;
+
+	mangle_address(ctx->begin, ctx->pointer - 4, map, check);
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		goto err_addr_free;
+
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
+		goto err_addr_free;
+
+	if (!asn1_uint_decode(ctx, end, &trap->general))
+		goto err_addr_free;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		goto err_addr_free;
+
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
+		goto err_addr_free;
+
+	if (!asn1_uint_decode(ctx, end, &trap->specific))
+		goto err_addr_free;
+
+	if (!asn1_header_decode(ctx, &end, &cls, &con, &tag))
+		goto err_addr_free;
+
+	if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) ||
+	      (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT)))
+		goto err_addr_free;
+
+	if (!asn1_ulong_decode(ctx, end, &trap->time))
+		goto err_addr_free;
+
+	return 1;
+
+err_addr_free:
+	kfree((unsigned long *)trap->ip_address);
+
+err_id_free:
+	kfree(trap->id);
+
+	return 0;
+}
+
+/*****************************************************************************
+ *
+ * Misc. routines
+ *
+ *****************************************************************************/
+
+static void hex_dump(unsigned char *buf, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; i++) {
+		if (i && !(i % 16))
+			printk("\n");
+		printk("%02x ", *(buf + i));
+	}
+	printk("\n");
+}
+
+/*
+ * Parse and mangle SNMP message according to mapping.
+ * (And this is the fucking 'basic' method).
+ */
+static int snmp_parse_mangle(unsigned char *msg,
+                             u_int16_t len,
+                             const struct oct1_map *map,
+                             __sum16 *check)
+{
+	unsigned char *eoc, *end;
+	unsigned int cls, con, tag, vers, pdutype;
+	struct asn1_ctx ctx;
+	struct asn1_octstr comm;
+	struct snmp_object **obj;
+
+	if (debug > 1)
+		hex_dump(msg, len);
+
+	asn1_open(&ctx, msg, len);
+
+	/*
+	 * Start of SNMP message.
+	 */
+	if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
+		return 0;
+	if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
+		return 0;
+
+	/*
+	 * Version 1 or 2 handled.
+	 */
+	if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag))
+		return 0;
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT)
+		return 0;
+	if (!asn1_uint_decode (&ctx, end, &vers))
+		return 0;
+	if (debug > 1)
+		printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1);
+	if (vers > 1)
+		return 1;
+
+	/*
+	 * Community.
+	 */
+	if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag))
+		return 0;
+	if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS)
+		return 0;
+	if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len))
+		return 0;
+	if (debug > 1) {
+		unsigned int i;
+
+		printk(KERN_DEBUG "bsalg: community: ");
+		for (i = 0; i < comm.len; i++)
+			printk("%c", comm.data[i]);
+		printk("\n");
+	}
+	kfree(comm.data);
+
+	/*
+	 * PDU type
+	 */
+	if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype))
+		return 0;
+	if (cls != ASN1_CTX || con != ASN1_CON)
+		return 0;
+	if (debug > 1) {
+		unsigned char *pdus[] = {
+			[SNMP_PDU_GET] = "get",
+			[SNMP_PDU_NEXT] = "get-next",
+			[SNMP_PDU_RESPONSE] = "response",
+			[SNMP_PDU_SET] = "set",
+			[SNMP_PDU_TRAP1] = "trapv1",
+			[SNMP_PDU_BULK] = "bulk",
+			[SNMP_PDU_INFORM] = "inform",
+			[SNMP_PDU_TRAP2] = "trapv2"
+		};
+
+		if (pdutype > SNMP_PDU_TRAP2)
+			printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype);
+		else
+			printk(KERN_DEBUG "bsalg: pdu: %s\n", pdus[pdutype]);
+	}
+	if (pdutype != SNMP_PDU_RESPONSE &&
+	    pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2)
+		return 1;
+
+	/*
+	 * Request header or v1 trap
+	 */
+	if (pdutype == SNMP_PDU_TRAP1) {
+		struct snmp_v1_trap trap;
+		unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
+
+		if (ret) {
+			kfree(trap.id);
+			kfree((unsigned long *)trap.ip_address);
+		} else
+			return ret;
+
+	} else {
+		struct snmp_request req;
+
+		if (!snmp_request_decode(&ctx, &req))
+			return 0;
+
+		if (debug > 1)
+			printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u "
+			"error_index=%u\n", req.id, req.error_status,
+			req.error_index);
+	}
+
+	/*
+	 * Loop through objects, look for IP addresses to mangle.
+	 */
+	if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag))
+		return 0;
+
+	if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
+		return 0;
+
+	obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
+	if (obj == NULL) {
+		if (net_ratelimit())
+			printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
+		return 0;
+	}
+
+	while (!asn1_eoc_decode(&ctx, eoc)) {
+		unsigned int i;
+
+		if (!snmp_object_decode(&ctx, obj)) {
+			if (*obj) {
+				kfree((*obj)->id);
+				kfree(*obj);
+			}
+			kfree(obj);
+			return 0;
+		}
+
+		if (debug > 1) {
+			printk(KERN_DEBUG "bsalg: object: ");
+			for (i = 0; i < (*obj)->id_len; i++) {
+				if (i > 0)
+					printk(".");
+				printk("%lu", (*obj)->id[i]);
+			}
+			printk(": type=%u\n", (*obj)->type);
+
+		}
+
+		if ((*obj)->type == SNMP_IPADDR)
+			mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
+
+		kfree((*obj)->id);
+		kfree(*obj);
+	}
+	kfree(obj);
+
+	if (!asn1_eoc_decode(&ctx, eoc))
+		return 0;
+
+	return 1;
+}
+
+/*****************************************************************************
+ *
+ * NAT routines.
+ *
+ *****************************************************************************/
+
+/*
+ * SNMP translation routine.
+ */
+static int snmp_translate(struct nf_conn *ct,
+                          enum ip_conntrack_info ctinfo,
+                          struct sk_buff **pskb)
+{
+	struct iphdr *iph = (*pskb)->nh.iph;
+	struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
+	u_int16_t udplen = ntohs(udph->len);
+	u_int16_t paylen = udplen - sizeof(struct udphdr);
+	int dir = CTINFO2DIR(ctinfo);
+	struct oct1_map map;
+
+	/*
+	 * Determine mappping for application layer addresses based
+	 * on NAT manipulations for the packet.
+	 */
+	if (dir == IP_CT_DIR_ORIGINAL) {
+		/* SNAT traps */
+		map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
+		map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
+	} else {
+		/* DNAT replies */
+		map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
+		map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
+	}
+
+	if (map.from == map.to)
+		return NF_ACCEPT;
+
+	if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
+	                       paylen, &map, &udph->check)) {
+		if (net_ratelimit())
+			printk(KERN_WARNING "bsalg: parser failed\n");
+		return NF_DROP;
+	}
+	return NF_ACCEPT;
+}
+
+/* We don't actually set up expectations, just adjust internal IP
+ * addresses if this is being NATted */
+static int help(struct sk_buff **pskb, unsigned int protoff,
+		struct nf_conn *ct,
+		enum ip_conntrack_info ctinfo)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	unsigned int ret;
+	struct iphdr *iph = (*pskb)->nh.iph;
+	struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
+
+	/* SNMP replies and originating SNMP traps get mangled */
+	if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
+		return NF_ACCEPT;
+	if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL)
+		return NF_ACCEPT;
+
+	/* No NAT? */
+	if (!(ct->status & IPS_NAT_MASK))
+		return NF_ACCEPT;
+
+	/*
+	 * Make sure the packet length is ok.  So far, we were only guaranteed
+	 * to have a valid length IP header plus 8 bytes, which means we have
+	 * enough room for a UDP header.  Just verify the UDP length field so we
+	 * can mess around with the payload.
+	 */
+	if (ntohs(udph->len) != (*pskb)->len - (iph->ihl << 2)) {
+		 if (net_ratelimit())
+			 printk(KERN_WARNING "SNMP: dropping malformed packet "
+				"src=%u.%u.%u.%u dst=%u.%u.%u.%u\n",
+				NIPQUAD(iph->saddr), NIPQUAD(iph->daddr));
+		 return NF_DROP;
+	}
+
+	if (!skb_make_writable(pskb, (*pskb)->len))
+		return NF_DROP;
+
+	spin_lock_bh(&snmp_lock);
+	ret = snmp_translate(ct, ctinfo, pskb);
+	spin_unlock_bh(&snmp_lock);
+	return ret;
+}
+
+static struct nf_conntrack_helper snmp_helper __read_mostly = {
+	.max_expected		= 0,
+	.timeout		= 180,
+	.me			= THIS_MODULE,
+	.help			= help,
+	.name			= "snmp",
+	.tuple.src.l3num	= AF_INET,
+	.tuple.src.u.udp.port	= __constant_htons(SNMP_PORT),
+	.tuple.dst.protonum	= IPPROTO_UDP,
+	.mask.src.l3num		= 0xFFFF,
+	.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+	.mask.dst.protonum	= 0xFF,
+};
+
+static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
+	.max_expected		= 0,
+	.timeout		= 180,
+	.me			= THIS_MODULE,
+	.help			= help,
+	.name			= "snmp_trap",
+	.tuple.src.l3num	= AF_INET,
+	.tuple.src.u.udp.port	= __constant_htons(SNMP_TRAP_PORT),
+	.tuple.dst.protonum	= IPPROTO_UDP,
+	.mask.src.l3num		= 0xFFFF,
+	.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+	.mask.dst.protonum	= 0xFF,
+};
+
+/*****************************************************************************
+ *
+ * Module stuff.
+ *
+ *****************************************************************************/
+
+static int __init nf_nat_snmp_basic_init(void)
+{
+	int ret = 0;
+
+	ret = nf_conntrack_helper_register(&snmp_helper);
+	if (ret < 0)
+		return ret;
+	ret = nf_conntrack_helper_register(&snmp_trap_helper);
+	if (ret < 0) {
+		nf_conntrack_helper_unregister(&snmp_helper);
+		return ret;
+	}
+	return ret;
+}
+
+static void __exit nf_nat_snmp_basic_fini(void)
+{
+	nf_conntrack_helper_unregister(&snmp_helper);
+	nf_conntrack_helper_unregister(&snmp_trap_helper);
+}
+
+module_init(nf_nat_snmp_basic_init);
+module_exit(nf_nat_snmp_basic_fini);
+
+module_param(debug, int, 0600);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
new file mode 100644
index 0000000..730a7a4
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -0,0 +1,406 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/icmp.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <linux/spinlock.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define HOOKNAME(hooknum) ((hooknum) == NF_IP_POST_ROUTING ? "POST_ROUTING"  \
+			   : ((hooknum) == NF_IP_PRE_ROUTING ? "PRE_ROUTING" \
+			      : ((hooknum) == NF_IP_LOCAL_OUT ? "LOCAL_OUT"  \
+			         : ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN"  \
+				    : "*ERROR*")))
+
+#ifdef CONFIG_XFRM
+static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
+{
+	struct nf_conn *ct;
+	struct nf_conntrack_tuple *t;
+	enum ip_conntrack_info ctinfo;
+	enum ip_conntrack_dir dir;
+	unsigned long statusbit;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (ct == NULL)
+		return;
+	dir = CTINFO2DIR(ctinfo);
+	t = &ct->tuplehash[dir].tuple;
+
+	if (dir == IP_CT_DIR_ORIGINAL)
+		statusbit = IPS_DST_NAT;
+	else
+		statusbit = IPS_SRC_NAT;
+
+	if (ct->status & statusbit) {
+		fl->fl4_dst = t->dst.u3.ip;
+		if (t->dst.protonum == IPPROTO_TCP ||
+		    t->dst.protonum == IPPROTO_UDP)
+			fl->fl_ip_dport = t->dst.u.tcp.port;
+	}
+
+	statusbit ^= IPS_NAT_MASK;
+
+	if (ct->status & statusbit) {
+		fl->fl4_src = t->src.u3.ip;
+		if (t->dst.protonum == IPPROTO_TCP ||
+		    t->dst.protonum == IPPROTO_UDP)
+			fl->fl_ip_sport = t->src.u.tcp.port;
+	}
+}
+#endif
+
+static unsigned int
+nf_nat_fn(unsigned int hooknum,
+	  struct sk_buff **pskb,
+	  const struct net_device *in,
+	  const struct net_device *out,
+	  int (*okfn)(struct sk_buff *))
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn_nat *nat;
+	struct nf_nat_info *info;
+	/* maniptype == SRC for postrouting. */
+	enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+
+	/* We never see fragments: conntrack defrags on pre-routing
+	   and local-out, and nf_nat_out protects post-routing. */
+	NF_CT_ASSERT(!((*pskb)->nh.iph->frag_off
+		       & htons(IP_MF|IP_OFFSET)));
+
+	ct = nf_ct_get(*pskb, &ctinfo);
+	/* Can't track?  It's not due to stress, or conntrack would
+	   have dropped it.  Hence it's the user's responsibilty to
+	   packet filter it out, or implement conntrack/NAT for that
+	   protocol. 8) --RR */
+	if (!ct) {
+		/* Exception: ICMP redirect to new connection (not in
+                   hash table yet).  We must not let this through, in
+                   case we're doing NAT to the same network. */
+		if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
+			struct icmphdr _hdr, *hp;
+
+			hp = skb_header_pointer(*pskb,
+						(*pskb)->nh.iph->ihl*4,
+						sizeof(_hdr), &_hdr);
+			if (hp != NULL &&
+			    hp->type == ICMP_REDIRECT)
+				return NF_DROP;
+		}
+		return NF_ACCEPT;
+	}
+
+	/* Don't try to NAT if this packet is not conntracked */
+	if (ct == &nf_conntrack_untracked)
+		return NF_ACCEPT;
+
+	nat = nfct_nat(ct);
+	if (!nat)
+		return NF_DROP;
+
+	switch (ctinfo) {
+	case IP_CT_RELATED:
+	case IP_CT_RELATED+IP_CT_IS_REPLY:
+		if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
+			if (!nf_nat_icmp_reply_translation(ct, ctinfo,
+							   hooknum, pskb))
+				return NF_DROP;
+			else
+				return NF_ACCEPT;
+		}
+		/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
+	case IP_CT_NEW:
+		info = &nat->info;
+
+		/* Seen it before?  This can happen for loopback, retrans,
+		   or local packets.. */
+		if (!nf_nat_initialized(ct, maniptype)) {
+			unsigned int ret;
+
+			if (unlikely(nf_ct_is_confirmed(ct)))
+				/* NAT module was loaded late */
+				ret = alloc_null_binding_confirmed(ct, info,
+				                                   hooknum);
+			else if (hooknum == NF_IP_LOCAL_IN)
+				/* LOCAL_IN hook doesn't have a chain!  */
+				ret = alloc_null_binding(ct, info, hooknum);
+			else
+				ret = nf_nat_rule_find(pskb, hooknum, in, out,
+						       ct, info);
+
+			if (ret != NF_ACCEPT) {
+				return ret;
+			}
+		} else
+			DEBUGP("Already setup manip %s for ct %p\n",
+			       maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
+			       ct);
+		break;
+
+	default:
+		/* ESTABLISHED */
+		NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
+			     ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
+		info = &nat->info;
+	}
+
+	NF_CT_ASSERT(info);
+	return nf_nat_packet(ct, ctinfo, hooknum, pskb);
+}
+
+static unsigned int
+nf_nat_in(unsigned int hooknum,
+          struct sk_buff **pskb,
+          const struct net_device *in,
+          const struct net_device *out,
+          int (*okfn)(struct sk_buff *))
+{
+	unsigned int ret;
+	__be32 daddr = (*pskb)->nh.iph->daddr;
+
+	ret = nf_nat_fn(hooknum, pskb, in, out, okfn);
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    daddr != (*pskb)->nh.iph->daddr) {
+		dst_release((*pskb)->dst);
+		(*pskb)->dst = NULL;
+	}
+	return ret;
+}
+
+static unsigned int
+nf_nat_out(unsigned int hooknum,
+	   struct sk_buff **pskb,
+	   const struct net_device *in,
+	   const struct net_device *out,
+	   int (*okfn)(struct sk_buff *))
+{
+#ifdef CONFIG_XFRM
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+#endif
+	unsigned int ret;
+
+	/* root is playing with raw sockets. */
+	if ((*pskb)->len < sizeof(struct iphdr) ||
+	    (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
+		return NF_ACCEPT;
+
+	ret = nf_nat_fn(hooknum, pskb, in, out, okfn);
+#ifdef CONFIG_XFRM
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    (ct = nf_ct_get(*pskb, &ctinfo)) != NULL) {
+		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+		if (ct->tuplehash[dir].tuple.src.u3.ip !=
+		    ct->tuplehash[!dir].tuple.dst.u3.ip
+		    || ct->tuplehash[dir].tuple.src.u.all !=
+		       ct->tuplehash[!dir].tuple.dst.u.all
+		    )
+			return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP;
+	}
+#endif
+	return ret;
+}
+
+static unsigned int
+nf_nat_local_fn(unsigned int hooknum,
+		struct sk_buff **pskb,
+		const struct net_device *in,
+		const struct net_device *out,
+		int (*okfn)(struct sk_buff *))
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+	unsigned int ret;
+
+	/* root is playing with raw sockets. */
+	if ((*pskb)->len < sizeof(struct iphdr) ||
+	    (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
+		return NF_ACCEPT;
+
+	ret = nf_nat_fn(hooknum, pskb, in, out, okfn);
+	if (ret != NF_DROP && ret != NF_STOLEN &&
+	    (ct = nf_ct_get(*pskb, &ctinfo)) != NULL) {
+		enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+		if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+		    ct->tuplehash[!dir].tuple.src.u3.ip
+#ifdef CONFIG_XFRM
+		    || ct->tuplehash[dir].tuple.dst.u.all !=
+		       ct->tuplehash[!dir].tuple.src.u.all
+#endif
+		    )
+			if (ip_route_me_harder(pskb, RTN_UNSPEC))
+				ret = NF_DROP;
+	}
+	return ret;
+}
+
+static unsigned int
+nf_nat_adjust(unsigned int hooknum,
+	      struct sk_buff **pskb,
+	      const struct net_device *in,
+	      const struct net_device *out,
+	      int (*okfn)(struct sk_buff *))
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	ct = nf_ct_get(*pskb, &ctinfo);
+	if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
+	        DEBUGP("nf_nat_standalone: adjusting sequence number\n");
+	        if (!nf_nat_seq_adjust(pskb, ct, ctinfo))
+	                return NF_DROP;
+	}
+	return NF_ACCEPT;
+}
+
+/* We must be after connection tracking and before packet filtering. */
+
+static struct nf_hook_ops nf_nat_ops[] = {
+	/* Before packet filtering, change destination */
+	{
+		.hook		= nf_nat_in,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_IP_PRE_ROUTING,
+		.priority	= NF_IP_PRI_NAT_DST,
+	},
+	/* After packet filtering, change source */
+	{
+		.hook		= nf_nat_out,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_IP_POST_ROUTING,
+		.priority	= NF_IP_PRI_NAT_SRC,
+	},
+	/* After conntrack, adjust sequence number */
+	{
+		.hook		= nf_nat_adjust,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_IP_POST_ROUTING,
+		.priority	= NF_IP_PRI_NAT_SEQ_ADJUST,
+	},
+	/* Before packet filtering, change destination */
+	{
+		.hook		= nf_nat_local_fn,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_IP_LOCAL_OUT,
+		.priority	= NF_IP_PRI_NAT_DST,
+	},
+	/* After packet filtering, change source */
+	{
+		.hook		= nf_nat_fn,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_IP_LOCAL_IN,
+		.priority	= NF_IP_PRI_NAT_SRC,
+	},
+	/* After conntrack, adjust sequence number */
+	{
+		.hook		= nf_nat_adjust,
+		.owner		= THIS_MODULE,
+		.pf		= PF_INET,
+		.hooknum	= NF_IP_LOCAL_IN,
+		.priority	= NF_IP_PRI_NAT_SEQ_ADJUST,
+	},
+};
+
+static int __init nf_nat_standalone_init(void)
+{
+	int size, ret = 0;
+
+	need_conntrack();
+
+	size = ALIGN(sizeof(struct nf_conn), __alignof__(struct nf_conn_nat)) +
+	       sizeof(struct nf_conn_nat);
+	ret = nf_conntrack_register_cache(NF_CT_F_NAT, "nf_nat:base", size);
+	if (ret < 0) {
+		printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n");
+		return ret;
+	}
+
+	size = ALIGN(size, __alignof__(struct nf_conn_help)) +
+	       sizeof(struct nf_conn_help);
+	ret = nf_conntrack_register_cache(NF_CT_F_NAT|NF_CT_F_HELP,
+					  "nf_nat:help", size);
+	if (ret < 0) {
+		printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n");
+		goto cleanup_register_cache;
+	}
+#ifdef CONFIG_XFRM
+	BUG_ON(ip_nat_decode_session != NULL);
+	ip_nat_decode_session = nat_decode_session;
+#endif
+	ret = nf_nat_rule_init();
+	if (ret < 0) {
+		printk("nf_nat_init: can't setup rules.\n");
+		goto cleanup_decode_session;
+	}
+	ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
+	if (ret < 0) {
+		printk("nf_nat_init: can't register hooks.\n");
+		goto cleanup_rule_init;
+	}
+	nf_nat_module_is_loaded = 1;
+	return ret;
+
+ cleanup_rule_init:
+	nf_nat_rule_cleanup();
+ cleanup_decode_session:
+#ifdef CONFIG_XFRM
+	ip_nat_decode_session = NULL;
+	synchronize_net();
+#endif
+	nf_conntrack_unregister_cache(NF_CT_F_NAT|NF_CT_F_HELP);
+ cleanup_register_cache:
+	nf_conntrack_unregister_cache(NF_CT_F_NAT);
+	return ret;
+}
+
+static void __exit nf_nat_standalone_fini(void)
+{
+	nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
+	nf_nat_rule_cleanup();
+	nf_nat_module_is_loaded = 0;
+#ifdef CONFIG_XFRM
+	ip_nat_decode_session = NULL;
+	synchronize_net();
+#endif
+	/* Conntrack caches are unregistered in nf_conntrack_cleanup */
+}
+
+module_init(nf_nat_standalone_init);
+module_exit(nf_nat_standalone_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat");
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
new file mode 100644
index 0000000..2566b79
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -0,0 +1,52 @@
+/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/udp.h>
+
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_tftp.h>
+
+MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
+MODULE_DESCRIPTION("TFTP NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_tftp");
+
+static unsigned int help(struct sk_buff **pskb,
+			 enum ip_conntrack_info ctinfo,
+			 struct nf_conntrack_expect *exp)
+{
+	struct nf_conn *ct = exp->master;
+
+	exp->saved_proto.udp.port
+		= ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
+	exp->dir = IP_CT_DIR_REPLY;
+	exp->expectfn = nf_nat_follow_master;
+	if (nf_conntrack_expect_related(exp) != 0)
+		return NF_DROP;
+	return NF_ACCEPT;
+}
+
+static void __exit nf_nat_tftp_fini(void)
+{
+	rcu_assign_pointer(nf_nat_tftp_hook, NULL);
+	synchronize_rcu();
+}
+
+static int __init nf_nat_tftp_init(void)
+{
+	BUG_ON(rcu_dereference(nf_nat_tftp_hook));
+	rcu_assign_pointer(nf_nat_tftp_hook, help);
+	return 0;
+}
+
+module_init(nf_nat_tftp_init);
+module_exit(nf_nat_tftp_fini);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9c6cbe3..cd873da 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -38,6 +38,7 @@
 #include <net/protocol.h>
 #include <net/tcp.h>
 #include <net/udp.h>
+#include <net/udplite.h>
 #include <linux/inetdevice.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -66,6 +67,7 @@
 		   tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
 		   atomic_read(&tcp_memory_allocated));
 	seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot));
+	seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot));
 	seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot));
 	seq_printf(seq,  "FRAG: inuse %d memory %d\n", ip_frag_nqueues,
 		   atomic_read(&ip_frag_mem));
@@ -304,6 +306,17 @@
 			   fold_field((void **) udp_statistics, 
 				      snmp4_udp_list[i].entry));
 
+	/* the UDP and UDP-Lite MIBs are the same */
+	seq_puts(seq, "\nUdpLite:");
+	for (i = 0; snmp4_udp_list[i].name != NULL; i++)
+		seq_printf(seq, " %s", snmp4_udp_list[i].name);
+
+	seq_puts(seq, "\nUdpLite:");
+	for (i = 0; snmp4_udp_list[i].name != NULL; i++)
+		seq_printf(seq, " %lu",
+			   fold_field((void **) udplite_statistics,
+				      snmp4_udp_list[i].entry)     );
+
 	seq_putc(seq, '\n');
 	return 0;
 }
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 5c31dea..a6c63bb 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -854,8 +854,8 @@
 static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
 {
 	struct inet_sock *inet = inet_sk(sp);
-	unsigned int dest = inet->daddr,
-		     src = inet->rcv_saddr;
+	__be32 dest = inet->daddr,
+	       src = inet->rcv_saddr;
 	__u16 destp = 0,
 	      srcp  = inet->num;
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 925ee4d..11c1671 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -566,11 +566,9 @@
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-	return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
-		(fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
-#ifdef CONFIG_IP_ROUTE_FWMARK
-		(fl1->nl_u.ip4_u.fwmark ^ fl2->nl_u.ip4_u.fwmark) |
-#endif
+	return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+		(fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
+		(fl1->mark ^ fl2->mark) |
 		(*(u16 *)&fl1->nl_u.ip4_u.tos ^
 		 *(u16 *)&fl2->nl_u.ip4_u.tos) |
 		(fl1->oif ^ fl2->oif) |
@@ -1643,9 +1641,7 @@
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+	rth->fl.mark    = skb->mark;
 	rth->fl.fl4_src	= saddr;
 	rth->rt_src	= saddr;
 #ifdef CONFIG_NET_CLS_ROUTE
@@ -1784,14 +1780,12 @@
 #endif
 	if (in_dev->cnf.no_policy)
 		rth->u.dst.flags |= DST_NOPOLICY;
-	if (in_dev->cnf.no_xfrm)
+	if (out_dev->cnf.no_xfrm)
 		rth->u.dst.flags |= DST_NOXFRM;
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+	rth->fl.mark    = skb->mark;
 	rth->fl.fl4_src	= saddr;
 	rth->rt_src	= saddr;
 	rth->rt_gateway	= daddr;
@@ -1920,10 +1914,8 @@
 					.saddr = saddr,
 					.tos = tos,
 					.scope = RT_SCOPE_UNIVERSE,
-#ifdef CONFIG_IP_ROUTE_FWMARK
-					.fwmark = skb->nfmark
-#endif
 				      } },
+			    .mark = skb->mark,
 			    .iif = dev->ifindex };
 	unsigned	flags = 0;
 	u32		itag = 0;
@@ -2034,9 +2026,7 @@
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+	rth->fl.mark    = skb->mark;
 	rth->fl.fl4_src	= saddr;
 	rth->rt_src	= saddr;
 #ifdef CONFIG_NET_CLS_ROUTE
@@ -2113,9 +2103,7 @@
 		    rth->fl.fl4_src == saddr &&
 		    rth->fl.iif == iif &&
 		    rth->fl.oif == 0 &&
-#ifdef CONFIG_IP_ROUTE_FWMARK
-		    rth->fl.fl4_fwmark == skb->nfmark &&
-#endif
+		    rth->fl.mark == skb->mark &&
 		    rth->fl.fl4_tos == tos) {
 			rth->u.dst.lastuse = jiffies;
 			dst_hold(&rth->u.dst);
@@ -2239,9 +2227,7 @@
 	rth->fl.fl4_tos	= tos;
 	rth->fl.fl4_src	= oldflp->fl4_src;
 	rth->fl.oif	= oldflp->oif;
-#ifdef CONFIG_IP_ROUTE_FWMARK
-	rth->fl.fl4_fwmark= oldflp->fl4_fwmark;
-#endif
+	rth->fl.mark    = oldflp->mark;
 	rth->rt_dst	= fl->fl4_dst;
 	rth->rt_src	= fl->fl4_src;
 	rth->rt_iif	= oldflp->oif ? : dev_out->ifindex;
@@ -2385,10 +2371,8 @@
 					.scope = ((tos & RTO_ONLINK) ?
 						  RT_SCOPE_LINK :
 						  RT_SCOPE_UNIVERSE),
-#ifdef CONFIG_IP_ROUTE_FWMARK
-					.fwmark = oldflp->fl4_fwmark
-#endif
 				      } },
+			    .mark = oldflp->mark,
 			    .iif = loopback_dev.ifindex,
 			    .oif = oldflp->oif };
 	struct fib_result res;
@@ -2583,9 +2567,7 @@
 		    rth->fl.fl4_src == flp->fl4_src &&
 		    rth->fl.iif == 0 &&
 		    rth->fl.oif == flp->oif &&
-#ifdef CONFIG_IP_ROUTE_FWMARK
-		    rth->fl.fl4_fwmark == flp->fl4_fwmark &&
-#endif
+		    rth->fl.mark == flp->mark &&
 		    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
 			    (IPTOS_RT_MASK | RTO_ONLINK))) {
 
@@ -2647,7 +2629,8 @@
 	struct rtable *rt = (struct rtable*)skb->dst;
 	struct rtmsg *r;
 	struct nlmsghdr *nlh;
-	struct rta_cacheinfo ci;
+	long expires;
+	u32 id = 0, ts = 0, tsage = 0, error;
 
 	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
 	if (nlh == NULL)
@@ -2694,20 +2677,13 @@
 	if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
 		goto nla_put_failure;
 
-	ci.rta_lastuse	= jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
-	ci.rta_used	= rt->u.dst.__use;
-	ci.rta_clntref	= atomic_read(&rt->u.dst.__refcnt);
-	if (rt->u.dst.expires)
-		ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
-	else
-		ci.rta_expires = 0;
-	ci.rta_error	= rt->u.dst.error;
-	ci.rta_id	= ci.rta_ts = ci.rta_tsage = 0;
+	error = rt->u.dst.error;
+	expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
 	if (rt->peer) {
-		ci.rta_id = rt->peer->ip_id_count;
+		id = rt->peer->ip_id_count;
 		if (rt->peer->tcp_ts_stamp) {
-			ci.rta_ts = rt->peer->tcp_ts;
-			ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
+			ts = rt->peer->tcp_ts;
+			tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
 		}
 	}
 
@@ -2726,7 +2702,7 @@
 				} else {
 					if (err == -EMSGSIZE)
 						goto nla_put_failure;
-					ci.rta_error = err;
+					error = err;
 				}
 			}
 		} else
@@ -2734,7 +2710,9 @@
 			NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
 	}
 
-	NLA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+	if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
+			       expires, error) < 0)
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 661e0a4..6b19530 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -35,23 +35,23 @@
 #define COOKIEBITS 24	/* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
 
-static u32 cookie_hash(u32 saddr, u32 daddr, u32 sport, u32 dport,
+static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
 		       u32 count, int c)
 {
 	__u32 tmp[16 + 5 + SHA_WORKSPACE_WORDS];
 
 	memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c]));
-	tmp[0] = saddr;
-	tmp[1] = daddr;
-	tmp[2] = (sport << 16) + dport;
+	tmp[0] = (__force u32)saddr;
+	tmp[1] = (__force u32)daddr;
+	tmp[2] = ((__force u32)sport << 16) + (__force u32)dport;
 	tmp[3] = count;
 	sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
 
 	return tmp[17];
 }
 
-static __u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport,
-				   __u16 dport, __u32 sseq, __u32 count,
+static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
+				   __be16 dport, __u32 sseq, __u32 count,
 				   __u32 data)
 {
 	/*
@@ -80,8 +80,8 @@
  * "maxdiff" if the current (passed-in) "count".  The return value
  * is (__u32)-1 if this test fails.
  */
-static __u32 check_tcp_syn_cookie(__u32 cookie, __u32 saddr, __u32 daddr,
-				  __u16 sport, __u16 dport, __u32 sseq,
+static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
+				  __be16 sport, __be16 dport, __u32 sseq,
 				  __u32 count, __u32 maxdiff)
 {
 	__u32 diff;
@@ -220,7 +220,7 @@
 	}
 	ireq = inet_rsk(req);
 	treq = tcp_rsk(req);
-	treq->rcv_isn		= htonl(skb->h.th->seq) - 1;
+	treq->rcv_isn		= ntohl(skb->h.th->seq) - 1;
 	treq->snt_isn		= cookie; 
 	req->mss		= mss;
  	ireq->rmt_port		= skb->h.th->source;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 15061b3..dfcf47f 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -129,6 +129,67 @@
 	return ret;
 }
 
+static int proc_tcp_available_congestion_control(ctl_table *ctl,
+						 int write, struct file * filp,
+						 void __user *buffer, size_t *lenp,
+						 loff_t *ppos)
+{
+	ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
+	int ret;
+
+	tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+	if (!tbl.data)
+		return -ENOMEM;
+	tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX);
+	ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos);
+	kfree(tbl.data);
+	return ret;
+}
+
+static int proc_allowed_congestion_control(ctl_table *ctl,
+					   int write, struct file * filp,
+					   void __user *buffer, size_t *lenp,
+					   loff_t *ppos)
+{
+	ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
+	int ret;
+
+	tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+	if (!tbl.data)
+		return -ENOMEM;
+
+	tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen);
+	ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos);
+	if (write && ret == 0)
+		ret = tcp_set_allowed_congestion_control(tbl.data);
+	kfree(tbl.data);
+	return ret;
+}
+
+static int strategy_allowed_congestion_control(ctl_table *table, int __user *name,
+					       int nlen, void __user *oldval,
+					       size_t __user *oldlenp,
+					       void __user *newval, size_t newlen,
+					       void **context)
+{
+	ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
+	int ret;
+
+	tbl.data = kmalloc(tbl.maxlen, GFP_USER);
+	if (!tbl.data)
+		return -ENOMEM;
+
+	tcp_get_available_congestion_control(tbl.data, tbl.maxlen);
+	ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen,
+			    context);
+	if (ret == 0 && newval && newlen)
+		ret = tcp_set_allowed_congestion_control(tbl.data);
+	kfree(tbl.data);
+
+	return ret;
+
+}
+
 ctl_table ipv4_table[] = {
         {
 		.ctl_name	= NET_IPV4_TCP_TIMESTAMPS,
@@ -731,6 +792,21 @@
 		.proc_handler	= &proc_dointvec,
 	},
 #endif /* CONFIG_NETLABEL */
+	{
+		.ctl_name	= NET_TCP_AVAIL_CONG_CONTROL,
+		.procname	= "tcp_available_congestion_control",
+		.maxlen		= TCP_CA_BUF_MAX,
+		.mode		= 0444,
+		.proc_handler   = &proc_tcp_available_congestion_control,
+	},
+	{
+		.ctl_name	= NET_TCP_ALLOWED_CONG_CONTROL,
+		.procname	= "tcp_allowed_congestion_control",
+		.maxlen		= TCP_CA_BUF_MAX,
+		.mode		= 0644,
+		.proc_handler   = &proc_allowed_congestion_control,
+		.strategy	= &strategy_allowed_congestion_control,
+	},
 	{ .ctl_name = 0 }
 };
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c05e8ed..090c690 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -258,6 +258,7 @@
 #include <linux/bootmem.h>
 #include <linux/cache.h>
 #include <linux/err.h>
+#include <linux/crypto.h>
 
 #include <net/icmp.h>
 #include <net/tcp.h>
@@ -462,11 +463,12 @@
 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
 			      struct sk_buff *skb)
 {
-	skb->csum = 0;
-	TCP_SKB_CB(skb)->seq = tp->write_seq;
-	TCP_SKB_CB(skb)->end_seq = tp->write_seq;
-	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
-	TCP_SKB_CB(skb)->sacked = 0;
+	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+
+	skb->csum    = 0;
+	tcb->seq     = tcb->end_seq = tp->write_seq;
+	tcb->flags   = TCPCB_FLAG_ACK;
+	tcb->sacked  = 0;
 	skb_header_release(skb);
 	__skb_queue_tail(&sk->sk_write_queue, skb);
 	sk_charge_skb(sk, skb);
@@ -1942,6 +1944,13 @@
 		}
 		break;
 
+#ifdef CONFIG_TCP_MD5SIG
+	case TCP_MD5SIG:
+		/* Read the IP->Key mappings from userspace */
+		err = tp->af_specific->md5_parse(sk, optval, optlen);
+		break;
+#endif
+
 	default:
 		err = -ENOPROTOOPT;
 		break;
@@ -2154,7 +2163,7 @@
 	struct tcphdr *th;
 	unsigned thlen;
 	unsigned int seq;
-	unsigned int delta;
+	__be32 delta;
 	unsigned int oldlen;
 	unsigned int len;
 
@@ -2207,7 +2216,8 @@
 	do {
 		th->fin = th->psh = 0;
 
-		th->check = ~csum_fold(th->check + delta);
+		th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
+				       (__force u32)delta));
 		if (skb->ip_summed != CHECKSUM_PARTIAL)
 			th->check = csum_fold(csum_partial(skb->h.raw, thlen,
 							   skb->csum));
@@ -2221,7 +2231,8 @@
 	} while (skb->next);
 
 	delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
-	th->check = ~csum_fold(th->check + delta);
+	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
+				(__force u32)delta));
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
 		th->check = csum_fold(csum_partial(skb->h.raw, thlen,
 						   skb->csum));
@@ -2231,6 +2242,135 @@
 }
 EXPORT_SYMBOL(tcp_tso_segment);
 
+#ifdef CONFIG_TCP_MD5SIG
+static unsigned long tcp_md5sig_users;
+static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
+
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+{
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
+		if (p) {
+			if (p->md5_desc.tfm)
+				crypto_free_hash(p->md5_desc.tfm);
+			kfree(p);
+			p = NULL;
+		}
+	}
+	free_percpu(pool);
+}
+
+void tcp_free_md5sig_pool(void)
+{
+	struct tcp_md5sig_pool **pool = NULL;
+
+	spin_lock(&tcp_md5sig_pool_lock);
+	if (--tcp_md5sig_users == 0) {
+		pool = tcp_md5sig_pool;
+		tcp_md5sig_pool = NULL;
+	}
+	spin_unlock(&tcp_md5sig_pool_lock);
+	if (pool)
+		__tcp_free_md5sig_pool(pool);
+}
+
+EXPORT_SYMBOL(tcp_free_md5sig_pool);
+
+static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
+{
+	int cpu;
+	struct tcp_md5sig_pool **pool;
+
+	pool = alloc_percpu(struct tcp_md5sig_pool *);
+	if (!pool)
+		return NULL;
+
+	for_each_possible_cpu(cpu) {
+		struct tcp_md5sig_pool *p;
+		struct crypto_hash *hash;
+
+		p = kzalloc(sizeof(*p), GFP_KERNEL);
+		if (!p)
+			goto out_free;
+		*per_cpu_ptr(pool, cpu) = p;
+
+		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+		if (!hash || IS_ERR(hash))
+			goto out_free;
+
+		p->md5_desc.tfm = hash;
+	}
+	return pool;
+out_free:
+	__tcp_free_md5sig_pool(pool);
+	return NULL;
+}
+
+struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
+{
+	struct tcp_md5sig_pool **pool;
+	int alloc = 0;
+
+retry:
+	spin_lock(&tcp_md5sig_pool_lock);
+	pool = tcp_md5sig_pool;
+	if (tcp_md5sig_users++ == 0) {
+		alloc = 1;
+		spin_unlock(&tcp_md5sig_pool_lock);
+	} else if (!pool) {
+		tcp_md5sig_users--;
+		spin_unlock(&tcp_md5sig_pool_lock);
+		cpu_relax();
+		goto retry;
+	} else
+		spin_unlock(&tcp_md5sig_pool_lock);
+
+	if (alloc) {
+		/* we cannot hold spinlock here because this may sleep. */
+		struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
+		spin_lock(&tcp_md5sig_pool_lock);
+		if (!p) {
+			tcp_md5sig_users--;
+			spin_unlock(&tcp_md5sig_pool_lock);
+			return NULL;
+		}
+		pool = tcp_md5sig_pool;
+		if (pool) {
+			/* oops, it has already been assigned. */
+			spin_unlock(&tcp_md5sig_pool_lock);
+			__tcp_free_md5sig_pool(p);
+		} else {
+			tcp_md5sig_pool = pool = p;
+			spin_unlock(&tcp_md5sig_pool_lock);
+		}
+	}
+	return pool;
+}
+
+EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+
+struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+{
+	struct tcp_md5sig_pool **p;
+	spin_lock(&tcp_md5sig_pool_lock);
+	p = tcp_md5sig_pool;
+	if (p)
+		tcp_md5sig_users++;
+	spin_unlock(&tcp_md5sig_pool_lock);
+	return (p ? *per_cpu_ptr(p, cpu) : NULL);
+}
+
+EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+
+void __tcp_put_md5sig_pool(void) {
+	__tcp_free_md5sig_pool(tcp_md5sig_pool);
+}
+
+EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+#endif
+
 extern void __skb_cb_too_small_for_tcp(int, int);
 extern struct tcp_congestion_ops tcp_reno;
 
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1e2982f..5ca7723 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -113,7 +113,7 @@
 	spin_lock(&tcp_cong_list_lock);
 	ca = tcp_ca_find(name);
 #ifdef CONFIG_KMOD
-	if (!ca) {
+	if (!ca && capable(CAP_SYS_MODULE)) {
 		spin_unlock(&tcp_cong_list_lock);
 
 		request_module("tcp_%s", name);
@@ -123,6 +123,7 @@
 #endif
 
 	if (ca) {
+		ca->non_restricted = 1;	/* default is always allowed */
 		list_move(&ca->list, &tcp_cong_list);
 		ret = 0;
 	}
@@ -139,6 +140,22 @@
 late_initcall(tcp_congestion_default);
 
 
+/* Build string with list of available congestion control values */
+void tcp_get_available_congestion_control(char *buf, size_t maxlen)
+{
+	struct tcp_congestion_ops *ca;
+	size_t offs = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
+		offs += snprintf(buf + offs, maxlen - offs,
+				 "%s%s",
+				 offs == 0 ? "" : " ", ca->name);
+
+	}
+	rcu_read_unlock();
+}
+
 /* Get current default congestion control */
 void tcp_get_default_congestion_control(char *name)
 {
@@ -152,6 +169,64 @@
 	rcu_read_unlock();
 }
 
+/* Built list of non-restricted congestion control values */
+void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
+{
+	struct tcp_congestion_ops *ca;
+	size_t offs = 0;
+
+	*buf = '\0';
+	rcu_read_lock();
+	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
+		if (!ca->non_restricted)
+			continue;
+		offs += snprintf(buf + offs, maxlen - offs,
+				 "%s%s",
+				 offs == 0 ? "" : " ", ca->name);
+
+	}
+	rcu_read_unlock();
+}
+
+/* Change list of non-restricted congestion control */
+int tcp_set_allowed_congestion_control(char *val)
+{
+	struct tcp_congestion_ops *ca;
+	char *clone, *name;
+	int ret = 0;
+
+	clone = kstrdup(val, GFP_USER);
+	if (!clone)
+		return -ENOMEM;
+
+	spin_lock(&tcp_cong_list_lock);
+	/* pass 1 check for bad entries */
+	while ((name = strsep(&clone, " ")) && *name) {
+		ca = tcp_ca_find(name);
+		if (!ca) {
+			ret = -ENOENT;
+			goto out;
+		}
+	}
+
+	/* pass 2 clear */
+	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
+		ca->non_restricted = 0;
+
+	/* pass 3 mark as allowed */
+	while ((name = strsep(&val, " ")) && *name) {
+		ca = tcp_ca_find(name);
+		WARN_ON(!ca);
+		if (ca)
+			ca->non_restricted = 1;
+	}
+out:
+	spin_unlock(&tcp_cong_list_lock);
+
+	return ret;
+}
+
+
 /* Change congestion control for socket */
 int tcp_set_congestion_control(struct sock *sk, const char *name)
 {
@@ -161,12 +236,25 @@
 
 	rcu_read_lock();
 	ca = tcp_ca_find(name);
+	/* no change asking for existing value */
 	if (ca == icsk->icsk_ca_ops)
 		goto out;
 
+#ifdef CONFIG_KMOD
+	/* not found attempt to autoload module */
+	if (!ca && capable(CAP_SYS_MODULE)) {
+		rcu_read_unlock();
+		request_module("tcp_%s", name);
+		rcu_read_lock();
+		ca = tcp_ca_find(name);
+	}
+#endif
 	if (!ca)
 		err = -ENOENT;
 
+	else if (!(ca->non_restricted || capable(CAP_NET_ADMIN)))
+		err = -EPERM;
+
 	else if (!try_module_get(ca->owner))
 		err = -EBUSY;
 
@@ -268,6 +356,7 @@
 
 struct tcp_congestion_ops tcp_reno = {
 	.name		= "reno",
+	.non_restricted = 1,
 	.owner		= THIS_MODULE,
 	.ssthresh	= tcp_reno_ssthresh,
 	.cong_avoid	= tcp_reno_cong_avoid,
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 283be3c..753987a 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -26,12 +26,12 @@
 	u32	alpha;		/* Fixed point arith, << 7 */
 	u8	beta;           /* Fixed point arith, << 7 */
 	u8	modeswitch;     /* Delay modeswitch until we had at least one congestion event */
-	u32	last_cong;	/* Time since last congestion event end */
-	u32	undo_last_cong;
 	u16	pkts_acked;
 	u32	packetcount;
 	u32	minRTT;
 	u32	maxRTT;
+	u32	last_cong;	/* Time since last congestion event end */
+	u32	undo_last_cong;
 
 	u32	undo_maxRTT;
 	u32	undo_old_maxB;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cf06acc..c701f6a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2677,6 +2677,14 @@
 					   opt_rx->sack_ok) {
 						TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
 					}
+#ifdef CONFIG_TCP_MD5SIG
+				case TCPOPT_MD5SIG:
+					/*
+					 * The MD5 Hash has already been
+					 * checked (see tcp_v{4,6}_do_rcv()).
+					 */
+					break;
+#endif
 	  			};
 	  			ptr+=opsize-2;
 	  			length-=opsize;
@@ -3782,9 +3790,9 @@
 	return err;
 }
 
-static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
+static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
 {
-	int result;
+	__sum16 result;
 
 	if (sock_owned_by_user(sk)) {
 		local_bh_enable();
@@ -4227,9 +4235,11 @@
 		 * Change state from SYN-SENT only after copied_seq
 		 * is initialized. */
 		tp->copied_seq = tp->rcv_nxt;
-		mb();
+		smp_mb();
 		tcp_set_state(sk, TCP_ESTABLISHED);
 
+		security_inet_conn_established(sk, skb);
+
 		/* Make sure socket is routed, for correct metrics.  */
 		icsk->icsk_af_ops->rebuild_header(sk);
 
@@ -4473,7 +4483,7 @@
 		case TCP_SYN_RECV:
 			if (acceptable) {
 				tp->copied_seq = tp->rcv_nxt;
-				mb();
+				smp_mb();
 				tcp_set_state(sk, TCP_ESTABLISHED);
 				sk->sk_state_change(sk);
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 22ef8bd..a1222d6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -78,6 +78,9 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
 int sysctl_tcp_tw_reuse __read_mostly;
 int sysctl_tcp_low_latency __read_mostly;
 
@@ -89,10 +92,19 @@
 
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
+						   __be32 addr);
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+				   __be32 saddr, __be32 daddr,
+				   struct tcphdr *th, int protocol,
+				   int tcplen);
+#endif
+
 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
-	.lhash_lock	= __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
-	.lhash_users	= ATOMIC_INIT(0),
-	.lhash_wait	= __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
+	.lhash_lock  = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
+	.lhash_users = ATOMIC_INIT(0),
+	.lhash_wait  = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
 };
 
 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
@@ -111,7 +123,7 @@
 	inet_unhash(&tcp_hashinfo, sk);
 }
 
-static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
+static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
 {
 	return secure_tcp_sequence_number(skb->nh.iph->daddr,
 					  skb->nh.iph->saddr,
@@ -205,13 +217,14 @@
 	if (tcp_death_row.sysctl_tw_recycle &&
 	    !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
 		struct inet_peer *peer = rt_get_peer(rt);
-
-		/* VJ's idea. We save last timestamp seen from
-		 * the destination in peer table, when entering state TIME-WAIT
-		 * and initialize rx_opt.ts_recent from it, when trying new connection.
+		/*
+		 * VJ's idea. We save last timestamp seen from
+		 * the destination in peer table, when entering state
+		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
+		 * when trying new connection.
 		 */
-
-		if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
+		if (peer != NULL &&
+		    peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
 			tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
 			tp->rx_opt.ts_recent = peer->tcp_ts;
 		}
@@ -236,7 +249,8 @@
 	if (err)
 		goto failure;
 
-	err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk);
+	err = ip_route_newports(&rt, IPPROTO_TCP,
+				inet->sport, inet->dport, sk);
 	if (err)
 		goto failure;
 
@@ -260,7 +274,10 @@
 	return 0;
 
 failure:
-	/* This unhashes the socket and releases the local port, if necessary. */
+	/*
+	 * This unhashes the socket and releases the local port,
+	 * if necessary.
+	 */
 	tcp_set_state(sk, TCP_CLOSE);
 	ip_rt_put(rt);
 	sk->sk_route_caps = 0;
@@ -485,8 +502,9 @@
 	struct tcphdr *th = skb->h.th;
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
-		skb->csum = offsetof(struct tcphdr, check);
+		th->check = ~tcp_v4_check(th, len,
+					  inet->saddr, inet->daddr, 0);
+		skb->csum_offset = offsetof(struct tcphdr, check);
 	} else {
 		th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
 					 csum_partial((char *)th,
@@ -508,7 +526,7 @@
 
 	th->check = 0;
 	th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
-	skb->csum = offsetof(struct tcphdr, check);
+	skb->csum_offset = offsetof(struct tcphdr, check);
 	skb->ip_summed = CHECKSUM_PARTIAL;
 	return 0;
 }
@@ -526,11 +544,19 @@
  *	Exception: precedence violation. We do not implement it in any case.
  */
 
-static void tcp_v4_send_reset(struct sk_buff *skb)
+static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcphdr *th = skb->h.th;
-	struct tcphdr rth;
+	struct {
+		struct tcphdr th;
+#ifdef CONFIG_TCP_MD5SIG
+		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
+#endif
+	} rep;
 	struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *key;
+#endif
 
 	/* Never send a reset in response to a reset. */
 	if (th->rst)
@@ -540,29 +566,49 @@
 		return;
 
 	/* Swap the send and the receive. */
-	memset(&rth, 0, sizeof(struct tcphdr));
-	rth.dest   = th->source;
-	rth.source = th->dest;
-	rth.doff   = sizeof(struct tcphdr) / 4;
-	rth.rst    = 1;
+	memset(&rep, 0, sizeof(rep));
+	rep.th.dest   = th->source;
+	rep.th.source = th->dest;
+	rep.th.doff   = sizeof(struct tcphdr) / 4;
+	rep.th.rst    = 1;
 
 	if (th->ack) {
-		rth.seq = th->ack_seq;
+		rep.th.seq = th->ack_seq;
 	} else {
-		rth.ack = 1;
-		rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
-				    skb->len - (th->doff << 2));
+		rep.th.ack = 1;
+		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
+				       skb->len - (th->doff << 2));
 	}
 
-	memset(&arg, 0, sizeof arg);
-	arg.iov[0].iov_base = (unsigned char *)&rth;
-	arg.iov[0].iov_len  = sizeof rth;
+	memset(&arg, 0, sizeof(arg));
+	arg.iov[0].iov_base = (unsigned char *)&rep;
+	arg.iov[0].iov_len  = sizeof(rep.th);
+
+#ifdef CONFIG_TCP_MD5SIG
+	key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL;
+	if (key) {
+		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
+				   (TCPOPT_NOP << 16) |
+				   (TCPOPT_MD5SIG << 8) |
+				   TCPOLEN_MD5SIG);
+		/* Update length and the length the header thinks exists */
+		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+		rep.th.doff = arg.iov[0].iov_len / 4;
+
+		tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
+					key,
+					skb->nh.iph->daddr,
+					skb->nh.iph->saddr,
+					&rep.th, IPPROTO_TCP,
+					arg.iov[0].iov_len);
+	}
+#endif
 	arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
-				      skb->nh.iph->saddr, /*XXX*/
+				      skb->nh.iph->saddr, /* XXX */
 				      sizeof(struct tcphdr), IPPROTO_TCP, 0);
 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 
-	ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
+	ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
 
 	TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
 	TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
@@ -572,28 +618,37 @@
    outside socket context is ugly, certainly. What can I do?
  */
 
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
+			    struct sk_buff *skb, u32 seq, u32 ack,
 			    u32 win, u32 ts)
 {
 	struct tcphdr *th = skb->h.th;
 	struct {
 		struct tcphdr th;
-		u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2];
+		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
+#ifdef CONFIG_TCP_MD5SIG
+			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
+#endif
+			];
 	} rep;
 	struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *key;
+	struct tcp_md5sig_key tw_key;
+#endif
 
 	memset(&rep.th, 0, sizeof(struct tcphdr));
-	memset(&arg, 0, sizeof arg);
+	memset(&arg, 0, sizeof(arg));
 
 	arg.iov[0].iov_base = (unsigned char *)&rep;
 	arg.iov[0].iov_len  = sizeof(rep.th);
 	if (ts) {
-		rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
-				     (TCPOPT_TIMESTAMP << 8) |
-				     TCPOLEN_TIMESTAMP);
-		rep.tsopt[1] = htonl(tcp_time_stamp);
-		rep.tsopt[2] = htonl(ts);
-		arg.iov[0].iov_len = sizeof(rep);
+		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+				   (TCPOPT_TIMESTAMP << 8) |
+				   TCPOLEN_TIMESTAMP);
+		rep.opt[1] = htonl(tcp_time_stamp);
+		rep.opt[2] = htonl(ts);
+		arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED;
 	}
 
 	/* Swap the send and the receive. */
@@ -605,8 +660,44 @@
 	rep.th.ack     = 1;
 	rep.th.window  = htons(win);
 
+#ifdef CONFIG_TCP_MD5SIG
+	/*
+	 * The SKB holds an imcoming packet, but may not have a valid ->sk
+	 * pointer. This is especially the case when we're dealing with a
+	 * TIME_WAIT ack, because the sk structure is long gone, and only
+	 * the tcp_timewait_sock remains. So the md5 key is stashed in that
+	 * structure, and we use it in preference.  I believe that (twsk ||
+	 * skb->sk) holds true, but we program defensively.
+	 */
+	if (!twsk && skb->sk) {
+		key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr);
+	} else if (twsk && twsk->tw_md5_keylen) {
+		tw_key.key = twsk->tw_md5_key;
+		tw_key.keylen = twsk->tw_md5_keylen;
+		key = &tw_key;
+	} else
+		key = NULL;
+
+	if (key) {
+		int offset = (ts) ? 3 : 0;
+
+		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
+					  (TCPOPT_NOP << 16) |
+					  (TCPOPT_MD5SIG << 8) |
+					  TCPOLEN_MD5SIG);
+		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+		rep.th.doff = arg.iov[0].iov_len/4;
+
+		tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
+					key,
+					skb->nh.iph->daddr,
+					skb->nh.iph->saddr,
+					&rep.th, IPPROTO_TCP,
+					arg.iov[0].iov_len);
+	}
+#endif
 	arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
-				      skb->nh.iph->saddr, /*XXX*/
+				      skb->nh.iph->saddr, /* XXX */
 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
 
@@ -618,17 +709,20 @@
 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 {
 	struct inet_timewait_sock *tw = inet_twsk(sk);
-	const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
-			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
+	tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+			tcptw->tw_ts_recent);
 
 	inet_twsk_put(tw);
 }
 
-static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
+static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
+				  struct request_sock *req)
 {
-	tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
+	tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
+			tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
 			req->ts_recent);
 }
 
@@ -662,8 +756,7 @@
 		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
 					    ireq->rmt_addr,
 					    ireq->opt);
-		if (err == NET_XMIT_CN)
-			err = 0;
+		err = net_xmit_eval(err);
 	}
 
 out:
@@ -715,7 +808,423 @@
 	return dopt;
 }
 
-struct request_sock_ops tcp_request_sock_ops = {
+#ifdef CONFIG_TCP_MD5SIG
+/*
+ * RFC2385 MD5 checksumming requires a mapping of
+ * IP address->MD5 Key.
+ * We need to maintain these in the sk structure.
+ */
+
+/* Find the Key structure for an address.  */
+static struct tcp_md5sig_key *
+			tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	int i;
+
+	if (!tp->md5sig_info || !tp->md5sig_info->entries4)
+		return NULL;
+	for (i = 0; i < tp->md5sig_info->entries4; i++) {
+		if (tp->md5sig_info->keys4[i].addr == addr)
+			return (struct tcp_md5sig_key *)
+						&tp->md5sig_info->keys4[i];
+	}
+	return NULL;
+}
+
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+					 struct sock *addr_sk)
+{
+	return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_lookup);
+
+static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
+						      struct request_sock *req)
+{
+	return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
+}
+
+/* This can be called on a newly created socket, from other files */
+int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
+		      u8 *newkey, u8 newkeylen)
+{
+	/* Add Key to the list */
+	struct tcp4_md5sig_key *key;
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcp4_md5sig_key *keys;
+
+	key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
+	if (key) {
+		/* Pre-existing entry - just update that one. */
+		kfree(key->key);
+		key->key = newkey;
+		key->keylen = newkeylen;
+	} else {
+		struct tcp_md5sig_info *md5sig;
+
+		if (!tp->md5sig_info) {
+			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
+						  GFP_ATOMIC);
+			if (!tp->md5sig_info) {
+				kfree(newkey);
+				return -ENOMEM;
+			}
+		}
+		if (tcp_alloc_md5sig_pool() == NULL) {
+			kfree(newkey);
+			return -ENOMEM;
+		}
+		md5sig = tp->md5sig_info;
+
+		if (md5sig->alloced4 == md5sig->entries4) {
+			keys = kmalloc((sizeof(*keys) *
+				        (md5sig->entries4 + 1)), GFP_ATOMIC);
+			if (!keys) {
+				kfree(newkey);
+				tcp_free_md5sig_pool();
+				return -ENOMEM;
+			}
+
+			if (md5sig->entries4)
+				memcpy(keys, md5sig->keys4,
+				       sizeof(*keys) * md5sig->entries4);
+
+			/* Free old key list, and reference new one */
+			if (md5sig->keys4)
+				kfree(md5sig->keys4);
+			md5sig->keys4 = keys;
+			md5sig->alloced4++;
+		}
+		md5sig->entries4++;
+		md5sig->keys4[md5sig->entries4 - 1].addr   = addr;
+		md5sig->keys4[md5sig->entries4 - 1].key    = newkey;
+		md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_add);
+
+static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
+			       u8 *newkey, u8 newkeylen)
+{
+	return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
+				 newkey, newkeylen);
+}
+
+int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	int i;
+
+	for (i = 0; i < tp->md5sig_info->entries4; i++) {
+		if (tp->md5sig_info->keys4[i].addr == addr) {
+			/* Free the key */
+			kfree(tp->md5sig_info->keys4[i].key);
+			tp->md5sig_info->entries4--;
+
+			if (tp->md5sig_info->entries4 == 0) {
+				kfree(tp->md5sig_info->keys4);
+				tp->md5sig_info->keys4 = NULL;
+			} else if (tp->md5sig_info->entries4 != i) {
+				/* Need to do some manipulation */
+				memcpy(&tp->md5sig_info->keys4[i],
+				       &tp->md5sig_info->keys4[i+1],
+				       (tp->md5sig_info->entries4 - i) *
+				        sizeof(struct tcp4_md5sig_key));
+			}
+			tcp_free_md5sig_pool();
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_del);
+
+static void tcp_v4_clear_md5_list(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	/* Free each key, then the set of key keys,
+	 * the crypto element, and then decrement our
+	 * hold on the last resort crypto.
+	 */
+	if (tp->md5sig_info->entries4) {
+		int i;
+		for (i = 0; i < tp->md5sig_info->entries4; i++)
+			kfree(tp->md5sig_info->keys4[i].key);
+		tp->md5sig_info->entries4 = 0;
+		tcp_free_md5sig_pool();
+	}
+	if (tp->md5sig_info->keys4) {
+		kfree(tp->md5sig_info->keys4);
+		tp->md5sig_info->keys4 = NULL;
+		tp->md5sig_info->alloced4  = 0;
+	}
+}
+
+static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
+				 int optlen)
+{
+	struct tcp_md5sig cmd;
+	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
+	u8 *newkey;
+
+	if (optlen < sizeof(cmd))
+		return -EINVAL;
+
+	if (copy_from_user(&cmd, optval, sizeof(cmd)))
+		return -EFAULT;
+
+	if (sin->sin_family != AF_INET)
+		return -EINVAL;
+
+	if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
+		if (!tcp_sk(sk)->md5sig_info)
+			return -ENOENT;
+		return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
+	}
+
+	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
+		return -EINVAL;
+
+	if (!tcp_sk(sk)->md5sig_info) {
+		struct tcp_sock *tp = tcp_sk(sk);
+		struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+		if (!p)
+			return -EINVAL;
+
+		tp->md5sig_info = p;
+
+	}
+
+	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
+	if (!newkey)
+		return -ENOMEM;
+	return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
+				 newkey, cmd.tcpm_keylen);
+}
+
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+				   __be32 saddr, __be32 daddr,
+				   struct tcphdr *th, int protocol,
+				   int tcplen)
+{
+	struct scatterlist sg[4];
+	__u16 data_len;
+	int block = 0;
+	__sum16 old_checksum;
+	struct tcp_md5sig_pool *hp;
+	struct tcp4_pseudohdr *bp;
+	struct hash_desc *desc;
+	int err;
+	unsigned int nbytes = 0;
+
+	/*
+	 * Okay, so RFC2385 is turned on for this connection,
+	 * so we need to generate the MD5 hash for the packet now.
+	 */
+
+	hp = tcp_get_md5sig_pool();
+	if (!hp)
+		goto clear_hash_noput;
+
+	bp = &hp->md5_blk.ip4;
+	desc = &hp->md5_desc;
+
+	/*
+	 * 1. the TCP pseudo-header (in the order: source IP address,
+	 * destination IP address, zero-padded protocol number, and
+	 * segment length)
+	 */
+	bp->saddr = saddr;
+	bp->daddr = daddr;
+	bp->pad = 0;
+	bp->protocol = protocol;
+	bp->len = htons(tcplen);
+	sg_set_buf(&sg[block++], bp, sizeof(*bp));
+	nbytes += sizeof(*bp);
+
+	/* 2. the TCP header, excluding options, and assuming a
+	 * checksum of zero/
+	 */
+	old_checksum = th->check;
+	th->check = 0;
+	sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
+	nbytes += sizeof(struct tcphdr);
+
+	/* 3. the TCP segment data (if any) */
+	data_len = tcplen - (th->doff << 2);
+	if (data_len > 0) {
+		unsigned char *data = (unsigned char *)th + (th->doff << 2);
+		sg_set_buf(&sg[block++], data, data_len);
+		nbytes += data_len;
+	}
+
+	/* 4. an independently-specified key or password, known to both
+	 * TCPs and presumably connection-specific
+	 */
+	sg_set_buf(&sg[block++], key->key, key->keylen);
+	nbytes += key->keylen;
+
+	/* Now store the Hash into the packet */
+	err = crypto_hash_init(desc);
+	if (err)
+		goto clear_hash;
+	err = crypto_hash_update(desc, sg, nbytes);
+	if (err)
+		goto clear_hash;
+	err = crypto_hash_final(desc, md5_hash);
+	if (err)
+		goto clear_hash;
+
+	/* Reset header, and free up the crypto */
+	tcp_put_md5sig_pool();
+	th->check = old_checksum;
+
+out:
+	return 0;
+clear_hash:
+	tcp_put_md5sig_pool();
+clear_hash_noput:
+	memset(md5_hash, 0, 16);
+	goto out;
+}
+
+int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+			 struct sock *sk,
+			 struct dst_entry *dst,
+			 struct request_sock *req,
+			 struct tcphdr *th, int protocol,
+			 int tcplen)
+{
+	__be32 saddr, daddr;
+
+	if (sk) {
+		saddr = inet_sk(sk)->saddr;
+		daddr = inet_sk(sk)->daddr;
+	} else {
+		struct rtable *rt = (struct rtable *)dst;
+		BUG_ON(!rt);
+		saddr = rt->rt_src;
+		daddr = rt->rt_dst;
+	}
+	return tcp_v4_do_calc_md5_hash(md5_hash, key,
+				       saddr, daddr,
+				       th, protocol, tcplen);
+}
+
+EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
+
+static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
+{
+	/*
+	 * This gets called for each TCP segment that arrives
+	 * so we want to be efficient.
+	 * We have 3 drop cases:
+	 * o No MD5 hash and one expected.
+	 * o MD5 hash and we're not expecting one.
+	 * o MD5 hash and its wrong.
+	 */
+	__u8 *hash_location = NULL;
+	struct tcp_md5sig_key *hash_expected;
+	struct iphdr *iph = skb->nh.iph;
+	struct tcphdr *th = skb->h.th;
+	int length = (th->doff << 2) - sizeof(struct tcphdr);
+	int genhash;
+	unsigned char *ptr;
+	unsigned char newhash[16];
+
+	hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
+
+	/*
+	 * If the TCP option length is less than the TCP_MD5SIG
+	 * option length, then we can shortcut
+	 */
+	if (length < TCPOLEN_MD5SIG) {
+		if (hash_expected)
+			return 1;
+		else
+			return 0;
+	}
+
+	/* Okay, we can't shortcut - we have to grub through the options */
+	ptr = (unsigned char *)(th + 1);
+	while (length > 0) {
+		int opcode = *ptr++;
+		int opsize;
+
+		switch (opcode) {
+		case TCPOPT_EOL:
+			goto done_opts;
+		case TCPOPT_NOP:
+			length--;
+			continue;
+		default:
+			opsize = *ptr++;
+			if (opsize < 2)
+				goto done_opts;
+			if (opsize > length)
+				goto done_opts;
+
+			if (opcode == TCPOPT_MD5SIG) {
+				hash_location = ptr;
+				goto done_opts;
+			}
+		}
+		ptr += opsize-2;
+		length -= opsize;
+	}
+done_opts:
+	/* We've parsed the options - do we have a hash? */
+	if (!hash_expected && !hash_location)
+		return 0;
+
+	if (hash_expected && !hash_location) {
+		LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
+			       "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+			       NIPQUAD(iph->saddr), ntohs(th->source),
+			       NIPQUAD(iph->daddr), ntohs(th->dest));
+		return 1;
+	}
+
+	if (!hash_expected && hash_location) {
+		LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
+			       "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+			       NIPQUAD(iph->saddr), ntohs(th->source),
+			       NIPQUAD(iph->daddr), ntohs(th->dest));
+		return 1;
+	}
+
+	/* Okay, so this is hash_expected and hash_location -
+	 * so we need to calculate the checksum.
+	 */
+	genhash = tcp_v4_do_calc_md5_hash(newhash,
+					  hash_expected,
+					  iph->saddr, iph->daddr,
+					  th, sk->sk_protocol,
+					  skb->len);
+
+	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+		if (net_ratelimit()) {
+			printk(KERN_INFO "MD5 Hash failed for "
+			       "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
+			       NIPQUAD(iph->saddr), ntohs(th->source),
+			       NIPQUAD(iph->daddr), ntohs(th->dest),
+			       genhash ? " tcp_v4_calc_md5_hash failed" : "");
+		}
+		return 1;
+	}
+	return 0;
+}
+
+#endif
+
+struct request_sock_ops tcp_request_sock_ops __read_mostly = {
 	.family		=	PF_INET,
 	.obj_size	=	sizeof(struct tcp_request_sock),
 	.rtx_syn_ack	=	tcp_v4_send_synack,
@@ -724,9 +1233,16 @@
 	.send_reset	=	tcp_v4_send_reset,
 };
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
+};
+#endif
+
 static struct timewait_sock_ops tcp_timewait_sock_ops = {
 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
 	.twsk_unique	= tcp_twsk_unique,
+	.twsk_destructor= tcp_twsk_destructor,
 };
 
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -774,6 +1290,10 @@
 	if (!req)
 		goto drop;
 
+#ifdef CONFIG_TCP_MD5SIG
+	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
+#endif
+
 	tcp_clear_options(&tmp_opt);
 	tmp_opt.mss_clamp = 536;
 	tmp_opt.user_mss  = tcp_sk(sk)->rx_opt.user_mss;
@@ -859,7 +1379,7 @@
 			goto drop_and_free;
 		}
 
-		isn = tcp_v4_init_sequence(sk, skb);
+		isn = tcp_v4_init_sequence(skb);
 	}
 	tcp_rsk(req)->snt_isn = isn;
 
@@ -892,6 +1412,9 @@
 	struct inet_sock *newinet;
 	struct tcp_sock *newtp;
 	struct sock *newsk;
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *key;
+#endif
 
 	if (sk_acceptq_is_full(sk))
 		goto exit_overflow;
@@ -926,6 +1449,22 @@
 	newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
 	tcp_initialize_rcv_mss(newsk);
 
+#ifdef CONFIG_TCP_MD5SIG
+	/* Copy over the MD5 key from the original socket */
+	if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
+		/*
+		 * We're using one, so create a matching key
+		 * on the newsk structure. If we fail to get
+		 * memory, then we end up not copying the key
+		 * across. Shucks.
+		 */
+		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
+		if (newkey != NULL)
+			tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
+					  newkey, key->keylen);
+	}
+#endif
+
 	__inet_hash(&tcp_hashinfo, newsk, 0);
 	__inet_inherit_port(&tcp_hashinfo, sk, newsk);
 
@@ -971,7 +1510,7 @@
 	return sk;
 }
 
-static int tcp_v4_checksum_init(struct sk_buff *skb)
+static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
 {
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 		if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
@@ -1001,10 +1540,24 @@
  */
 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 {
+	struct sock *rsk;
+#ifdef CONFIG_TCP_MD5SIG
+	/*
+	 * We really want to reject the packet as early as possible
+	 * if:
+	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
+	 *  o There is an MD5 option and we're not expecting one
+	 */
+	if (tcp_v4_inbound_md5_hash(sk, skb))
+		goto discard;
+#endif
+
 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 		TCP_CHECK_TIMER(sk);
-		if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
+		if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
+			rsk = sk;
 			goto reset;
+		}
 		TCP_CHECK_TIMER(sk);
 		return 0;
 	}
@@ -1018,20 +1571,24 @@
 			goto discard;
 
 		if (nsk != sk) {
-			if (tcp_child_process(sk, nsk, skb))
+			if (tcp_child_process(sk, nsk, skb)) {
+				rsk = nsk;
 				goto reset;
+			}
 			return 0;
 		}
 	}
 
 	TCP_CHECK_TIMER(sk);
-	if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
+	if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
+		rsk = sk;
 		goto reset;
+	}
 	TCP_CHECK_TIMER(sk);
 	return 0;
 
 reset:
-	tcp_v4_send_reset(skb);
+	tcp_v4_send_reset(rsk, skb);
 discard:
 	kfree_skb(skb);
 	/* Be careful here. If this function gets more complicated and
@@ -1140,7 +1697,7 @@
 bad_packet:
 		TCP_INC_STATS_BH(TCP_MIB_INERRS);
 	} else {
-		tcp_v4_send_reset(skb);
+		tcp_v4_send_reset(NULL, skb);
 	}
 
 discard_it:
@@ -1263,6 +1820,15 @@
 #endif
 };
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
+	.md5_lookup		= tcp_v4_md5_lookup,
+	.calc_md5_hash		= tcp_v4_calc_md5_hash,
+	.md5_add		= tcp_v4_md5_add_func,
+	.md5_parse		= tcp_v4_parse_md5_keys,
+};
+#endif
+
 /* NOTE: A lot of things set to zero explicitly by call to
  *       sk_alloc() so need not be done here.
  */
@@ -1302,6 +1868,9 @@
 
 	icsk->icsk_af_ops = &ipv4_specific;
 	icsk->icsk_sync_mss = tcp_sync_mss;
+#ifdef CONFIG_TCP_MD5SIG
+	tp->af_specific = &tcp_sock_ipv4_specific;
+#endif
 
 	sk->sk_sndbuf = sysctl_tcp_wmem[1];
 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -1325,6 +1894,15 @@
 	/* Cleans up our, hopefully empty, out_of_order_queue. */
   	__skb_queue_purge(&tp->out_of_order_queue);
 
+#ifdef CONFIG_TCP_MD5SIG
+	/* Clean up the MD5 key list, if any */
+	if (tp->md5sig_info) {
+		tcp_v4_clear_md5_list(sk);
+		kfree(tp->md5sig_info);
+		tp->md5sig_info = NULL;
+	}
+#endif
+
 #ifdef CONFIG_NET_DMA
 	/* Cleans up our sk_async_wait_queue */
   	__skb_queue_purge(&sk->sk_async_wait_queue);
@@ -1385,7 +1963,7 @@
 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
 		struct request_sock *req = cur;
 
-	       	icsk = inet_csk(st->syn_wait_sk);
+		icsk = inet_csk(st->syn_wait_sk);
 		req = req->dl_next;
 		while (1) {
 			while (req) {
@@ -1395,7 +1973,7 @@
 				}
 				req = req->dl_next;
 			}
-			if (++st->sbucket >= TCP_SYNQ_HSIZE)
+			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
 				break;
 get_req:
 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
@@ -1543,7 +2121,7 @@
 	while (rc && pos) {
 		rc = established_get_next(seq, rc);
 		--pos;
-	}		
+	}
 	return rc;
 }
 
@@ -1672,7 +2250,7 @@
 	afinfo->seq_fops->read		= seq_read;
 	afinfo->seq_fops->llseek	= seq_lseek;
 	afinfo->seq_fops->release	= seq_release_private;
-	
+
 	p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
 	if (p)
 		p->data = afinfo;
@@ -1686,7 +2264,7 @@
 	if (!afinfo)
 		return;
 	proc_net_remove(afinfo->name);
-	memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
+	memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
 }
 
 static void get_openreq4(struct sock *sk, struct request_sock *req,
@@ -1721,8 +2299,8 @@
 	struct tcp_sock *tp = tcp_sk(sp);
 	const struct inet_connection_sock *icsk = inet_csk(sp);
 	struct inet_sock *inet = inet_sk(sp);
-	unsigned int dest = inet->daddr;
-	unsigned int src = inet->rcv_saddr;
+	__be32 dest = inet->daddr;
+	__be32 src = inet->rcv_saddr;
 	__u16 destp = ntohs(inet->dport);
 	__u16 srcp = ntohs(inet->sport);
 
@@ -1744,7 +2322,8 @@
 			"%08X %5d %8d %lu %d %p %u %u %u %u %d",
 		i, src, srcp, dest, destp, sp->sk_state,
 		tp->write_seq - tp->snd_una,
-		(sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
+		sp->sk_state == TCP_LISTEN ? sp->sk_ack_backlog :
+					     (tp->rcv_nxt - tp->copied_seq),
 		timer_active,
 		jiffies_to_clock_t(timer_expires - jiffies),
 		icsk->icsk_retransmits,
@@ -1759,7 +2338,8 @@
 		tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
 }
 
-static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
+static void get_timewait4_sock(struct inet_timewait_sock *tw,
+			       char *tmpbuf, int i)
 {
 	__be32 dest, src;
 	__u16 destp, srcp;
@@ -1872,7 +2452,8 @@
 
 void __init tcp_v4_init(struct net_proto_family *ops)
 {
-	if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, IPPROTO_TCP) < 0)
+	if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW,
+				     IPPROTO_TCP) < 0)
 		panic("Failed to create the TCP control socket.\n");
 }
 
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0163d98..4a3889d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -45,8 +45,7 @@
 	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,
 					    (unsigned long)&tcp_death_row),
 	.twkill_work	= __WORK_INITIALIZER(tcp_death_row.twkill_work,
-					     inet_twdr_twkill_work,
-					     &tcp_death_row),
+					     inet_twdr_twkill_work),
 /* Short-time timewait calendar */
 
 	.twcal_hand	= -1,
@@ -306,6 +305,28 @@
 			tw->tw_ipv6only = np->ipv6only;
 		}
 #endif
+
+#ifdef CONFIG_TCP_MD5SIG
+		/*
+		 * The timewait bucket does not have the key DB from the
+		 * sock structure. We just make a quick copy of the
+		 * md5 key being used (if indeed we are using one)
+		 * so the timewait ack generating code has the key.
+		 */
+		do {
+			struct tcp_md5sig_key *key;
+			memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
+			tcptw->tw_md5_keylen = 0;
+			key = tp->af_specific->md5_lookup(sk, sk);
+			if (key != NULL) {
+				memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
+				tcptw->tw_md5_keylen = key->keylen;
+				if (tcp_alloc_md5sig_pool() == NULL)
+					BUG();
+			}
+		} while(0);
+#endif
+
 		/* Linkage updates. */
 		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 
@@ -329,14 +350,24 @@
 		 * socket up.  We've got bigger problems than
 		 * non-graceful socket closings.
 		 */
-		if (net_ratelimit())
-			printk(KERN_INFO "TCP: time wait bucket table overflow\n");
+		LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
 	}
 
 	tcp_update_metrics(sk);
 	tcp_done(sk);
 }
 
+void tcp_twsk_destructor(struct sock *sk)
+{
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+	if (twsk->tw_md5_keylen)
+		tcp_put_md5sig_pool();
+#endif
+}
+
+EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
+
 /* This is not only more efficient than what we used to do, it eliminates
  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
  *
@@ -435,6 +466,11 @@
 			newtp->rx_opt.ts_recent_stamp = 0;
 			newtp->tcp_header_len = sizeof(struct tcphdr);
 		}
+#ifdef CONFIG_TCP_MD5SIG
+		newtp->md5sig_info = NULL;	/*XXX*/
+		if (newtp->af_specific->md5_lookup(sk, newsk))
+			newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
 		if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
 			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
 		newtp->rx_opt.mss_clamp = req->mss;
@@ -455,7 +491,7 @@
 			   struct request_sock **prev)
 {
 	struct tcphdr *th = skb->h.th;
-	u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
+	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
 	int paws_reject = 0;
 	struct tcp_options_received tmp_opt;
 	struct sock *child;
@@ -617,6 +653,30 @@
 								 req, NULL);
 		if (child == NULL)
 			goto listen_overflow;
+#ifdef CONFIG_TCP_MD5SIG
+		else {
+			/* Copy over the MD5 key from the original socket */
+			struct tcp_md5sig_key *key;
+			struct tcp_sock *tp = tcp_sk(sk);
+			key = tp->af_specific->md5_lookup(sk, child);
+			if (key != NULL) {
+				/*
+				 * We're using one, so create a matching key on the
+				 * newsk structure. If we fail to get memory then we
+				 * end up not copying the key across. Shucks.
+				 */
+				char *newkey = kmemdup(key->key, key->keylen,
+						       GFP_ATOMIC);
+				if (newkey) {
+					if (!tcp_alloc_md5sig_pool())
+						BUG();
+					tp->af_specific->md5_add(child, child,
+								 newkey,
+								 key->keylen);
+				}
+			}
+		}
+#endif
 
 		inet_csk_reqsk_queue_unlink(sk, req, prev);
 		inet_csk_reqsk_queue_removed(sk, req);
@@ -633,7 +693,7 @@
 	embryonic_reset:
 		NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
 		if (!(flg & TCP_FLAG_RST))
-			req->rsk_ops->send_reset(skb);
+			req->rsk_ops->send_reset(sk, skb);
 
 		inet_csk_reqsk_queue_drop(sk, req, prev);
 		return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ca40615..32c1a97 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -270,7 +270,7 @@
 }
 
 static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
-					 __u32 tstamp)
+					 __u32 tstamp, __u8 **md5_hash)
 {
 	if (tp->rx_opt.tstamp_ok) {
 		*ptr++ = htonl((TCPOPT_NOP << 24) |
@@ -298,16 +298,29 @@
 			tp->rx_opt.eff_sacks--;
 		}
 	}
+#ifdef CONFIG_TCP_MD5SIG
+	if (md5_hash) {
+		*ptr++ = htonl((TCPOPT_NOP << 24) |
+			       (TCPOPT_NOP << 16) |
+			       (TCPOPT_MD5SIG << 8) |
+			       TCPOLEN_MD5SIG);
+		*md5_hash = (__u8 *)ptr;
+	}
+#endif
 }
 
 /* Construct a tcp options header for a SYN or SYN_ACK packet.
  * If this is every changed make sure to change the definition of
  * MAX_SYN_SIZE to match the new maximum number of options that you
  * can generate.
+ *
+ * Note - that with the RFC2385 TCP option, we make room for the
+ * 16 byte MD5 hash. This will be filled in later, so the pointer for the
+ * location to be filled is passed back up.
  */
 static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
 				  int offer_wscale, int wscale, __u32 tstamp,
-				  __u32 ts_recent)
+				  __u32 ts_recent, __u8 **md5_hash)
 {
 	/* We always get an MSS option.
 	 * The option bytes which will be seen in normal data
@@ -346,6 +359,20 @@
 			       (TCPOPT_WINDOW << 16) |
 			       (TCPOLEN_WINDOW << 8) |
 			       (wscale));
+#ifdef CONFIG_TCP_MD5SIG
+	/*
+	 * If MD5 is enabled, then we set the option, and include the size
+	 * (always 18). The actual MD5 hash is added just before the
+	 * packet is sent.
+	 */
+	if (md5_hash) {
+		*ptr++ = htonl((TCPOPT_NOP << 24) |
+			       (TCPOPT_NOP << 16) |
+			       (TCPOPT_MD5SIG << 8) |
+			       TCPOLEN_MD5SIG);
+		*md5_hash = (__u8 *) ptr;
+	}
+#endif
 }
 
 /* This routine actually transmits TCP packets queued in by
@@ -366,6 +393,10 @@
 	struct tcp_sock *tp;
 	struct tcp_skb_cb *tcb;
 	int tcp_header_size;
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *md5;
+	__u8 *md5_hash_location;
+#endif
 	struct tcphdr *th;
 	int sysctl_flags;
 	int err;
@@ -424,9 +455,18 @@
 	if (tcp_packets_in_flight(tp) == 0)
 		tcp_ca_event(sk, CA_EVENT_TX_START);
 
+#ifdef CONFIG_TCP_MD5SIG
+	/*
+	 * Are we doing MD5 on this segment? If so - make
+	 * room for it.
+	 */
+	md5 = tp->af_specific->md5_lookup(sk, sk);
+	if (md5)
+		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
 	th = (struct tcphdr *) skb_push(skb, tcp_header_size);
 	skb->h.th = th;
-	skb_set_owner_w(skb, sk);
 
 	/* Build TCP header and checksum it. */
 	th->source		= inet->sport;
@@ -461,13 +501,34 @@
 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
 				      tp->rx_opt.rcv_wscale,
 				      tcb->when,
-				      tp->rx_opt.ts_recent);
+				      tp->rx_opt.ts_recent,
+
+#ifdef CONFIG_TCP_MD5SIG
+				      md5 ? &md5_hash_location :
+#endif
+				      NULL);
 	} else {
 		tcp_build_and_update_options((__be32 *)(th + 1),
-					     tp, tcb->when);
+					     tp, tcb->when,
+#ifdef CONFIG_TCP_MD5SIG
+					     md5 ? &md5_hash_location :
+#endif
+					     NULL);
 		TCP_ECN_send(sk, tp, skb, tcp_header_size);
 	}
 
+#ifdef CONFIG_TCP_MD5SIG
+	/* Calculate the MD5 hash, as we have all we need now */
+	if (md5) {
+		tp->af_specific->calc_md5_hash(md5_hash_location,
+					       md5,
+					       sk, NULL, NULL,
+					       skb->h.th,
+					       sk->sk_protocol,
+					       skb->len);
+	}
+#endif
+
 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
 
 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
@@ -479,19 +540,13 @@
 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
 		TCP_INC_STATS(TCP_MIB_OUTSEGS);
 
-	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
+	err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
 	if (likely(err <= 0))
 		return err;
 
 	tcp_enter_cwr(sk);
 
-	/* NET_XMIT_CN is special. It does not guarantee,
-	 * that this packet is lost. It tells that device
-	 * is about to start to drop packets or already
-	 * drops some packets of the same priority and
-	 * invokes us to send less aggressively.
-	 */
-	return err == NET_XMIT_CN ? 0 : err;
+	return net_xmit_eval(err);
 
 #undef SYSCTL_FLAG_TSTAMPS
 #undef SYSCTL_FLAG_WSCALE
@@ -847,6 +902,11 @@
 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
 
+#ifdef CONFIG_TCP_MD5SIG
+	if (tp->af_specific->md5_lookup(sk, sk))
+		mss_now -= TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
 	xmit_size_goal = mss_now;
 
 	if (doing_tso) {
@@ -2040,6 +2100,10 @@
 	struct tcphdr *th;
 	int tcp_header_size;
 	struct sk_buff *skb;
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *md5;
+	__u8 *md5_hash_location;
+#endif
 
 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
 	if (skb == NULL)
@@ -2055,6 +2119,13 @@
 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
 			   /* SACK_PERM is in the place of NOP NOP of TS */
 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
+
+#ifdef CONFIG_TCP_MD5SIG
+	/* Are we doing MD5 on this segment? If so - make room for it */
+	md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
+	if (md5)
+		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
+#endif
 	skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
 
 	memset(th, 0, sizeof(struct tcphdr));
@@ -2092,11 +2163,29 @@
 	tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
 			      TCP_SKB_CB(skb)->when,
-			      req->ts_recent);
+			      req->ts_recent,
+			      (
+#ifdef CONFIG_TCP_MD5SIG
+			       md5 ? &md5_hash_location :
+#endif
+			       NULL)
+			      );
 
 	skb->csum = 0;
 	th->doff = (tcp_header_size >> 2);
 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
+
+#ifdef CONFIG_TCP_MD5SIG
+	/* Okay, we have all we need - do the md5 hash if needed */
+	if (md5) {
+		tp->af_specific->calc_md5_hash(md5_hash_location,
+					       md5,
+					       NULL, dst, req,
+					       skb->h.th, sk->sk_protocol,
+					       skb->len);
+	}
+#endif
+
 	return skb;
 }
 
@@ -2115,6 +2204,11 @@
 	tp->tcp_header_len = sizeof(struct tcphdr) +
 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
 
+#ifdef CONFIG_TCP_MD5SIG
+	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
 	/* If user gave his TCP_MAXSEG, record it to clamp */
 	if (tp->rx_opt.user_mss)
 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index fb09ade..3355c276 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -297,7 +297,7 @@
 		if (net_ratelimit()) {
 			struct inet_sock *inet = inet_sk(sk);
 			printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n",
-			       NIPQUAD(inet->daddr), htons(inet->dport),
+			       NIPQUAD(inet->daddr), ntohs(inet->dport),
 			       inet->num, tp->snd_una, tp->snd_nxt);
 		}
 #endif
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a3b7aa0..ddc4bcc 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -42,8 +42,8 @@
  * with V_PARAM_SHIFT bits to the right of the binary point.
  */
 #define V_PARAM_SHIFT 1
-static int alpha = 1<<V_PARAM_SHIFT;
-static int beta  = 3<<V_PARAM_SHIFT;
+static int alpha = 2<<V_PARAM_SHIFT;
+static int beta  = 4<<V_PARAM_SHIFT;
 static int gamma = 1<<V_PARAM_SHIFT;
 
 module_param(alpha, int, 0644);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9e1bd37..035915f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -92,22 +92,16 @@
 #include <linux/timer.h>
 #include <linux/mm.h>
 #include <linux/inet.h>
-#include <linux/ipv6.h>
 #include <linux/netdevice.h>
-#include <net/snmp.h>
-#include <net/ip.h>
 #include <net/tcp_states.h>
-#include <net/protocol.h>
 #include <linux/skbuff.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <net/sock.h>
-#include <net/udp.h>
 #include <net/icmp.h>
 #include <net/route.h>
-#include <net/inet_common.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
+#include "udp_impl.h"
 
 /*
  *	Snmp MIB for the UDP layer
@@ -120,26 +114,30 @@
 
 static int udp_port_rover;
 
-static inline int udp_lport_inuse(u16 num)
+static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[])
 {
 	struct sock *sk;
 	struct hlist_node *node;
 
-	sk_for_each(sk, node, &udp_hash[num & (UDP_HTABLE_SIZE - 1)])
+	sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
 		if (inet_sk(sk)->num == num)
 			return 1;
 	return 0;
 }
 
 /**
- *  udp_get_port  -  common port lookup for IPv4 and IPv6
+ *  __udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
  *
  *  @sk:          socket struct in question
  *  @snum:        port number to look up
+ *  @udptable:    hash list table, must be of UDP_HTABLE_SIZE
+ *  @port_rover:  pointer to record of last unallocated port
  *  @saddr_comp:  AF-dependent comparison of bound local IP addresses
  */
-int udp_get_port(struct sock *sk, unsigned short snum,
-		 int (*saddr_cmp)(const struct sock *sk1, const struct sock *sk2))
+int __udp_lib_get_port(struct sock *sk, unsigned short snum,
+		       struct hlist_head udptable[], int *port_rover,
+		       int (*saddr_comp)(const struct sock *sk1,
+					 const struct sock *sk2 )    )
 {
 	struct hlist_node *node;
 	struct hlist_head *head;
@@ -150,15 +148,15 @@
 	if (snum == 0) {
 		int best_size_so_far, best, result, i;
 
-		if (udp_port_rover > sysctl_local_port_range[1] ||
-		    udp_port_rover < sysctl_local_port_range[0])
-			udp_port_rover = sysctl_local_port_range[0];
+		if (*port_rover > sysctl_local_port_range[1] ||
+		    *port_rover < sysctl_local_port_range[0])
+			*port_rover = sysctl_local_port_range[0];
 		best_size_so_far = 32767;
-		best = result = udp_port_rover;
+		best = result = *port_rover;
 		for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
 			int size;
 
-			head = &udp_hash[result & (UDP_HTABLE_SIZE - 1)];
+			head = &udptable[result & (UDP_HTABLE_SIZE - 1)];
 			if (hlist_empty(head)) {
 				if (result > sysctl_local_port_range[1])
 					result = sysctl_local_port_range[0] +
@@ -179,15 +177,15 @@
 				result = sysctl_local_port_range[0]
 					+ ((result - sysctl_local_port_range[0]) &
 					   (UDP_HTABLE_SIZE - 1));
-			if (!udp_lport_inuse(result))
+			if (! __udp_lib_lport_inuse(result, udptable))
 				break;
 		}
 		if (i >= (1 << 16) / UDP_HTABLE_SIZE)
 			goto fail;
 gotit:
-		udp_port_rover = snum = result;
+		*port_rover = snum = result;
 	} else {
-		head = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
+		head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
 
 		sk_for_each(sk2, node, head)
 			if (inet_sk(sk2)->num == snum                        &&
@@ -195,12 +193,12 @@
 			    (!sk2->sk_reuse        || !sk->sk_reuse)         &&
 			    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
 			     || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
-			    (*saddr_cmp)(sk, sk2)                              )
+			    (*saddr_comp)(sk, sk2)                             )
 				goto fail;
 	}
 	inet_sk(sk)->num = snum;
 	if (sk_unhashed(sk)) {
-		head = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
+		head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
 		sk_add_node(sk, head);
 		sock_prot_inc_use(sk->sk_prot);
 	}
@@ -210,7 +208,13 @@
 	return error;
 }
 
-static inline int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+__inline__ int udp_get_port(struct sock *sk, unsigned short snum,
+			int (*scmp)(const struct sock *, const struct sock *))
+{
+	return  __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp);
+}
+
+inline int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
 {
 	struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
 
@@ -224,34 +228,20 @@
 	return udp_get_port(sk, snum, ipv4_rcv_saddr_equal);
 }
 
-
-static void udp_v4_hash(struct sock *sk)
-{
-	BUG();
-}
-
-static void udp_v4_unhash(struct sock *sk)
-{
-	write_lock_bh(&udp_hash_lock);
-	if (sk_del_node_init(sk)) {
-		inet_sk(sk)->num = 0;
-		sock_prot_dec_use(sk->sk_prot);
-	}
-	write_unlock_bh(&udp_hash_lock);
-}
-
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
-static struct sock *udp_v4_lookup_longway(__be32 saddr, __be16 sport,
-					  __be32 daddr, __be16 dport, int dif)
+static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
+				      __be32 daddr, __be16 dport,
+				      int dif, struct hlist_head udptable[])
 {
 	struct sock *sk, *result = NULL;
 	struct hlist_node *node;
 	unsigned short hnum = ntohs(dport);
 	int badness = -1;
 
-	sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) {
+	read_lock(&udp_hash_lock);
+	sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
 		struct inet_sock *inet = inet_sk(sk);
 
 		if (inet->num == hnum && !ipv6_only_sock(sk)) {
@@ -285,20 +275,10 @@
 			}
 		}
 	}
-	return result;
-}
-
-static __inline__ struct sock *udp_v4_lookup(__be32 saddr, __be16 sport,
-					     __be32 daddr, __be16 dport, int dif)
-{
-	struct sock *sk;
-
-	read_lock(&udp_hash_lock);
-	sk = udp_v4_lookup_longway(saddr, sport, daddr, dport, dif);
-	if (sk)
-		sock_hold(sk);
+	if (result)
+		sock_hold(result);
 	read_unlock(&udp_hash_lock);
-	return sk;
+	return result;
 }
 
 static inline struct sock *udp_v4_mcast_next(struct sock *sk,
@@ -340,7 +320,7 @@
  * to find the appropriate port.
  */
 
-void udp_err(struct sk_buff *skb, u32 info)
+void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
 {
 	struct inet_sock *inet;
 	struct iphdr *iph = (struct iphdr*)skb->data;
@@ -351,7 +331,8 @@
 	int harderr;
 	int err;
 
-	sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex);
+	sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source,
+			       skb->dev->ifindex, udptable		    );
 	if (sk == NULL) {
 		ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
     	  	return;	/* No socket for error */
@@ -405,6 +386,11 @@
 	sock_put(sk);
 }
 
+__inline__ void udp_err(struct sk_buff *skb, u32 info)
+{
+	return __udp4_lib_err(skb, info, udp_hash);
+}
+
 /*
  * Throw away all pending data and cancel the corking. Socket is locked.
  */
@@ -419,16 +405,58 @@
 	}
 }
 
+/**
+ * 	udp4_hwcsum_outgoing  -  handle outgoing HW checksumming
+ * 	@sk: 	socket we are sending on
+ * 	@skb: 	sk_buff containing the filled-in UDP header
+ * 	        (checksum field must be zeroed out)
+ */
+static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
+				 __be32 src, __be32 dst, int len      )
+{
+	unsigned int offset;
+	struct udphdr *uh = skb->h.uh;
+	__wsum csum = 0;
+
+	if (skb_queue_len(&sk->sk_write_queue) == 1) {
+		/*
+		 * Only one fragment on the socket.
+		 */
+		skb->csum_offset = offsetof(struct udphdr, check);
+		uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
+	} else {
+		/*
+		 * HW-checksum won't work as there are two or more
+		 * fragments on the socket so that all csums of sk_buffs
+		 * should be together
+		 */
+		offset = skb->h.raw - skb->data;
+		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+
+		skb->ip_summed = CHECKSUM_NONE;
+
+		skb_queue_walk(&sk->sk_write_queue, skb) {
+			csum = csum_add(csum, skb->csum);
+		}
+
+		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
+		if (uh->check == 0)
+			uh->check = CSUM_MANGLED_0;
+	}
+}
+
 /*
  * Push out all pending data as one UDP datagram. Socket is locked.
  */
-static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up)
+static int udp_push_pending_frames(struct sock *sk)
 {
+	struct udp_sock  *up = udp_sk(sk);
 	struct inet_sock *inet = inet_sk(sk);
 	struct flowi *fl = &inet->cork.fl;
 	struct sk_buff *skb;
 	struct udphdr *uh;
 	int err = 0;
+	__wsum csum = 0;
 
 	/* Grab the skbuff where UDP header space exists. */
 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
@@ -443,52 +471,28 @@
 	uh->len = htons(up->len);
 	uh->check = 0;
 
-	if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
+	if (up->pcflag)  				 /*     UDP-Lite      */
+		csum  = udplite_csum_outgoing(sk, skb);
+
+	else if (sk->sk_no_check == UDP_CSUM_NOXMIT) {   /* UDP csum disabled */
+
 		skb->ip_summed = CHECKSUM_NONE;
 		goto send;
-	}
 
-	if (skb_queue_len(&sk->sk_write_queue) == 1) {
-		/*
-		 * Only one fragment on the socket.
-		 */
-		if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			skb->csum = offsetof(struct udphdr, check);
-			uh->check = ~csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
-					up->len, IPPROTO_UDP, 0);
-		} else {
-			skb->csum = csum_partial((char *)uh,
-					sizeof(struct udphdr), skb->csum);
-			uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
-					up->len, IPPROTO_UDP, skb->csum);
-			if (uh->check == 0)
-				uh->check = -1;
-		}
-	} else {
-		unsigned int csum = 0;
-		/*
-		 * HW-checksum won't work as there are two or more 
-		 * fragments on the socket so that all csums of sk_buffs
-		 * should be together.
-		 */
-		if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			int offset = (unsigned char *)uh - skb->data;
-			skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
 
-			skb->ip_summed = CHECKSUM_NONE;
-		} else {
-			skb->csum = csum_partial((char *)uh,
-					sizeof(struct udphdr), skb->csum);
-		}
+		udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
+		goto send;
 
-		skb_queue_walk(&sk->sk_write_queue, skb) {
-			csum = csum_add(csum, skb->csum);
-		}
-		uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst,
-				up->len, IPPROTO_UDP, csum);
-		if (uh->check == 0)
-			uh->check = -1;
-	}
+	} else						 /*   `normal' UDP    */
+		csum = udp_csum_outgoing(sk, skb);
+
+	/* add protocol-dependent pseudo-header */
+	uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
+				      sk->sk_protocol, csum             );
+	if (uh->check == 0)
+		uh->check = CSUM_MANGLED_0;
+
 send:
 	err = ip_push_pending_frames(sk);
 out:
@@ -497,12 +501,6 @@
 	return err;
 }
 
-
-static unsigned short udp_check(struct udphdr *uh, int len, __be32 saddr, __be32 daddr, unsigned long base)
-{
-	return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
-}
-
 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		size_t len)
 {
@@ -516,8 +514,9 @@
 	__be32 daddr, faddr, saddr;
 	__be16 dport;
 	u8  tos;
-	int err;
+	int err, is_udplite = up->pcflag;
 	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
 
 	if (len > 0xFFFF)
 		return -EMSGSIZE;
@@ -622,7 +621,7 @@
 					      { .daddr = faddr,
 						.saddr = saddr,
 						.tos = tos } },
-				    .proto = IPPROTO_UDP,
+				    .proto = sk->sk_protocol,
 				    .uli_u = { .ports =
 					       { .sport = inet->sport,
 						 .dport = dport } } };
@@ -668,13 +667,14 @@
 
 do_append_data:
 	up->len += ulen;
-	err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen, 
-			sizeof(struct udphdr), &ipc, rt, 
+	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
+	err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
+			sizeof(struct udphdr), &ipc, rt,
 			corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
 	if (err)
 		udp_flush_pending_frames(sk);
 	else if (!corkreq)
-		err = udp_push_pending_frames(sk, up);
+		err = udp_push_pending_frames(sk);
 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
 		up->pending = 0;
 	release_sock(sk);
@@ -684,7 +684,7 @@
 	if (free)
 		kfree(ipc.opt);
 	if (!err) {
-		UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
+		UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
 		return len;
 	}
 	/*
@@ -695,7 +695,7 @@
 	 * seems like overkill.
 	 */
 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
-		UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS);
+		UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
 	}
 	return err;
 
@@ -707,8 +707,8 @@
 	goto out;
 }
 
-static int udp_sendpage(struct sock *sk, struct page *page, int offset,
-			size_t size, int flags)
+int udp_sendpage(struct sock *sk, struct page *page, int offset,
+		 size_t size, int flags)
 {
 	struct udp_sock *up = udp_sk(sk);
 	int ret;
@@ -747,7 +747,7 @@
 
 	up->len += size;
 	if (!(up->corkflag || (flags&MSG_MORE)))
-		ret = udp_push_pending_frames(sk, up);
+		ret = udp_push_pending_frames(sk);
 	if (!ret)
 		ret = size;
 out:
@@ -795,29 +795,18 @@
 	return(0);
 }
 
-static __inline__ int __udp_checksum_complete(struct sk_buff *skb)
-{
-	return __skb_checksum_complete(skb);
-}
-
-static __inline__ int udp_checksum_complete(struct sk_buff *skb)
-{
-	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
-		__udp_checksum_complete(skb);
-}
-
 /*
  * 	This should be easy, if there is something there we
  * 	return it, otherwise we block.
  */
 
-static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-		       size_t len, int noblock, int flags, int *addr_len)
+int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+	        size_t len, int noblock, int flags, int *addr_len)
 {
 	struct inet_sock *inet = inet_sk(sk);
   	struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
   	struct sk_buff *skb;
-  	int copied, err;
+	int copied, err, copy_only, is_udplite = IS_UDPLITE(sk);
 
 	/*
 	 *	Check any passed addresses
@@ -839,15 +828,25 @@
 		msg->msg_flags |= MSG_TRUNC;
 	}
 
-	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
-		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
-					      copied);
-	} else if (msg->msg_flags&MSG_TRUNC) {
-		if (__udp_checksum_complete(skb))
+	/*
+	 * 	Decide whether to checksum and/or copy data.
+	 *
+	 * 	UDP:      checksum may have been computed in HW,
+	 * 	          (re-)compute it if message is truncated.
+	 * 	UDP-Lite: always needs to checksum, no HW support.
+	 */
+	copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY);
+
+	if (is_udplite  ||  (!copy_only  &&  msg->msg_flags&MSG_TRUNC)) {
+		if (__udp_lib_checksum_complete(skb))
 			goto csum_copy_err;
-		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
-					      copied);
-	} else {
+		copy_only = 1;
+	}
+
+	if (copy_only)
+		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
+					      msg->msg_iov, copied       );
+	else {
 		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
 
 		if (err == -EINVAL)
@@ -880,7 +879,7 @@
   	return err;
 
 csum_copy_err:
-	UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+	UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
 
 	skb_kill_datagram(sk, skb, flags);
 
@@ -912,11 +911,6 @@
 	return 0;
 }
 
-static void udp_close(struct sock *sk, long timeout)
-{
-	sk_common_release(sk);
-}
-
 /* return:
  * 	1  if the the UDP system should process it
  *	0  if we should drop this packet
@@ -1022,7 +1016,7 @@
  * Note that in the success and error cases, the skb is assumed to
  * have either been requeued or freed.
  */
-static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
+int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
 {
 	struct udp_sock *up = udp_sk(sk);
 	int rc;
@@ -1030,10 +1024,8 @@
 	/*
 	 *	Charge it to the socket, dropping if the queue is full.
 	 */
-	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
-		kfree_skb(skb);
-		return -1;
-	}
+	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+		goto drop;
 	nf_reset(skb);
 
 	if (up->encap_type) {
@@ -1057,31 +1049,68 @@
 		if (ret < 0) {
 			/* process the ESP packet */
 			ret = xfrm4_rcv_encap(skb, up->encap_type);
-			UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+			UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
 			return -ret;
 		}
 		/* FALLTHROUGH -- it's a UDP Packet */
 	}
 
-	if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
-		if (__udp_checksum_complete(skb)) {
-			UDP_INC_STATS_BH(UDP_MIB_INERRORS);
-			kfree_skb(skb);
-			return -1;
+	/*
+	 * 	UDP-Lite specific tests, ignored on UDP sockets
+	 */
+	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
+
+		/*
+		 * MIB statistics other than incrementing the error count are
+		 * disabled for the following two types of errors: these depend
+		 * on the application settings, not on the functioning of the
+		 * protocol stack as such.
+		 *
+		 * RFC 3828 here recommends (sec 3.3): "There should also be a
+		 * way ... to ... at least let the receiving application block
+		 * delivery of packets with coverage values less than a value
+		 * provided by the application."
+		 */
+		if (up->pcrlen == 0) {          /* full coverage was set  */
+			LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
+				"%d while full coverage %d requested\n",
+				UDP_SKB_CB(skb)->cscov, skb->len);
+			goto drop;
 		}
+		/* The next case involves violating the min. coverage requested
+		 * by the receiver. This is subtle: if receiver wants x and x is
+		 * greater than the buffersize/MTU then receiver will complain
+		 * that it wants x while sender emits packets of smaller size y.
+		 * Therefore the above ...()->partial_cov statement is essential.
+		 */
+		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
+			LIMIT_NETDEBUG(KERN_WARNING
+				"UDPLITE: coverage %d too small, need min %d\n",
+				UDP_SKB_CB(skb)->cscov, up->pcrlen);
+			goto drop;
+		}
+	}
+
+	if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+		if (__udp_lib_checksum_complete(skb))
+			goto drop;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 
 	if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
 		/* Note that an ENOMEM error is charged twice */
 		if (rc == -ENOMEM)
-			UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS);
-		UDP_INC_STATS_BH(UDP_MIB_INERRORS);
-		kfree_skb(skb);
-		return -1;
+			UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag);
+		goto drop;
 	}
-	UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+
+	UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
 	return 0;
+
+drop:
+	UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
+	kfree_skb(skb);
+	return -1;
 }
 
 /*
@@ -1090,14 +1119,16 @@
  *	Note: called only from the BH handler context,
  *	so we don't need to lock the hashes.
  */
-static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh,
-				 __be32 saddr, __be32 daddr)
+static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
+				    struct udphdr  *uh,
+				    __be32 saddr, __be32 daddr,
+				    struct hlist_head udptable[])
 {
 	struct sock *sk;
 	int dif;
 
 	read_lock(&udp_hash_lock);
-	sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
+	sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
 	dif = skb->dev->ifindex;
 	sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
 	if (sk) {
@@ -1131,65 +1162,75 @@
  * Otherwise, csum completion requires chacksumming packet body,
  * including udp header and folding it to skb->csum.
  */
-static void udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
-			     unsigned short ulen, __be32 saddr, __be32 daddr)
+static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh)
 {
 	if (uh->check == 0) {
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	} else if (skb->ip_summed == CHECKSUM_COMPLETE) {
-		if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
+	       if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
+				      skb->len, IPPROTO_UDP, skb->csum       ))
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-		skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+		skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr,
+					       skb->nh.iph->daddr,
+					       skb->len, IPPROTO_UDP, 0);
 	/* Probably, we should checksum udp header (it should be in cache
 	 * in any case) and data in tiny packets (< rx copybreak).
 	 */
+
+	/* UDP = UDP-Lite with a non-partial checksum coverage */
+	UDP_SKB_CB(skb)->partial_cov = 0;
 }
 
 /*
  *	All we need to do is get the socket, and then do a checksum. 
  */
  
-int udp_rcv(struct sk_buff *skb)
+int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
+		   int is_udplite)
 {
   	struct sock *sk;
-  	struct udphdr *uh;
+  	struct udphdr *uh = skb->h.uh;
 	unsigned short ulen;
 	struct rtable *rt = (struct rtable*)skb->dst;
 	__be32 saddr = skb->nh.iph->saddr;
 	__be32 daddr = skb->nh.iph->daddr;
-	int len = skb->len;
 
 	/*
-	 *	Validate the packet and the UDP length.
+	 *  Validate the packet.
 	 */
 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
-		goto no_header;
-
-	uh = skb->h.uh;
+		goto drop;		/* No space for header. */
 
 	ulen = ntohs(uh->len);
-
-	if (ulen > len || ulen < sizeof(*uh))
+	if (ulen > skb->len)
 		goto short_packet;
 
-	if (pskb_trim_rcsum(skb, ulen))
-		goto short_packet;
+	if(! is_udplite ) {		/* UDP validates ulen. */
 
-	udp_checksum_init(skb, uh, ulen, saddr, daddr);
+		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
+			goto short_packet;
+
+		udp4_csum_init(skb, uh);
+
+	} else 	{			/* UDP-Lite validates cscov. */
+		if (udplite4_csum_init(skb, uh))
+			goto csum_error;
+	}
 
 	if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
-		return udp_v4_mcast_deliver(skb, uh, saddr, daddr);
+		return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
 
-	sk = udp_v4_lookup(saddr, uh->source, daddr, uh->dest, skb->dev->ifindex);
+	sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
+			       skb->dev->ifindex, udptable        );
 
 	if (sk != NULL) {
 		int ret = udp_queue_rcv_skb(sk, skb);
 		sock_put(sk);
 
 		/* a return value > 0 means to resubmit the input, but
-		 * it it wants the return to be -protocol, or 0
+		 * it wants the return to be -protocol, or 0
 		 */
 		if (ret > 0)
 			return -ret;
@@ -1201,10 +1242,10 @@
 	nf_reset(skb);
 
 	/* No socket. Drop packet silently, if checksum is wrong */
-	if (udp_checksum_complete(skb))
+	if (udp_lib_checksum_complete(skb))
 		goto csum_error;
 
-	UDP_INC_STATS_BH(UDP_MIB_NOPORTS);
+	UDP_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite);
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
 	/*
@@ -1215,36 +1256,40 @@
 	return(0);
 
 short_packet:
-	LIMIT_NETDEBUG(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
+	LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
+		       is_udplite? "-Lite" : "",
 		       NIPQUAD(saddr),
 		       ntohs(uh->source),
 		       ulen,
-		       len,
+		       skb->len,
 		       NIPQUAD(daddr),
 		       ntohs(uh->dest));
-no_header:
-	UDP_INC_STATS_BH(UDP_MIB_INERRORS);
-	kfree_skb(skb);
-	return(0);
+	goto drop;
 
 csum_error:
 	/* 
 	 * RFC1122: OK.  Discards the bad packet silently (as far as 
 	 * the network is concerned, anyway) as per 4.1.3.4 (MUST). 
 	 */
-	LIMIT_NETDEBUG(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
+	LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
+		       is_udplite? "-Lite" : "",
 		       NIPQUAD(saddr),
 		       ntohs(uh->source),
 		       NIPQUAD(daddr),
 		       ntohs(uh->dest),
 		       ulen);
 drop:
-	UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+	UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
 	kfree_skb(skb);
 	return(0);
 }
 
-static int udp_destroy_sock(struct sock *sk)
+__inline__ int udp_rcv(struct sk_buff *skb)
+{
+	return __udp4_lib_rcv(skb, udp_hash, 0);
+}
+
+int udp_destroy_sock(struct sock *sk)
 {
 	lock_sock(sk);
 	udp_flush_pending_frames(sk);
@@ -1255,8 +1300,9 @@
 /*
  *	Socket option code for UDP
  */
-static int do_udp_setsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int optlen)
+int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+		       char __user *optval, int optlen,
+		       int (*push_pending_frames)(struct sock *))
 {
 	struct udp_sock *up = udp_sk(sk);
 	int val;
@@ -1275,7 +1321,7 @@
 		} else {
 			up->corkflag = 0;
 			lock_sock(sk);
-			udp_push_pending_frames(sk, up);
+			(*push_pending_frames)(sk);
 			release_sock(sk);
 		}
 		break;
@@ -1293,6 +1339,32 @@
 		}
 		break;
 
+	/*
+	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
+	 */
+	/* The sender sets actual checksum coverage length via this option.
+	 * The case coverage > packet length is handled by send module. */
+	case UDPLITE_SEND_CSCOV:
+		if (!up->pcflag)         /* Disable the option on UDP sockets */
+			return -ENOPROTOOPT;
+		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
+			val = 8;
+		up->pcslen = val;
+		up->pcflag |= UDPLITE_SEND_CC;
+		break;
+
+        /* The receiver specifies a minimum checksum coverage value. To make
+         * sense, this should be set to at least 8 (as done below). If zero is
+	 * used, this again means full checksum coverage.                     */
+	case UDPLITE_RECV_CSCOV:
+		if (!up->pcflag)         /* Disable the option on UDP sockets */
+			return -ENOPROTOOPT;
+		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
+			val = 8;
+		up->pcrlen = val;
+		up->pcflag |= UDPLITE_RECV_CC;
+		break;
+
 	default:
 		err = -ENOPROTOOPT;
 		break;
@@ -1301,26 +1373,28 @@
 	return err;
 }
 
-static int udp_setsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int optlen)
+int udp_setsockopt(struct sock *sk, int level, int optname,
+		   char __user *optval, int optlen)
 {
-	if (level != SOL_UDP)
-		return ip_setsockopt(sk, level, optname, optval, optlen);
-	return do_udp_setsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
+					  udp_push_pending_frames);
+	return ip_setsockopt(sk, level, optname, optval, optlen);
 }
 
 #ifdef CONFIG_COMPAT
-static int compat_udp_setsockopt(struct sock *sk, int level, int optname,
-				 char __user *optval, int optlen)
+int compat_udp_setsockopt(struct sock *sk, int level, int optname,
+			  char __user *optval, int optlen)
 {
-	if (level != SOL_UDP)
-		return compat_ip_setsockopt(sk, level, optname, optval, optlen);
-	return do_udp_setsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
+					  udp_push_pending_frames);
+	return compat_ip_setsockopt(sk, level, optname, optval, optlen);
 }
 #endif
 
-static int do_udp_getsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int __user *optlen)
+int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+		       char __user *optval, int __user *optlen)
 {
 	struct udp_sock *up = udp_sk(sk);
 	int val, len;
@@ -1342,6 +1416,16 @@
 		val = up->encap_type;
 		break;
 
+	/* The following two cannot be changed on UDP sockets, the return is
+	 * always 0 (which corresponds to the full checksum coverage of UDP). */
+	case UDPLITE_SEND_CSCOV:
+		val = up->pcslen;
+		break;
+
+	case UDPLITE_RECV_CSCOV:
+		val = up->pcrlen;
+		break;
+
 	default:
 		return -ENOPROTOOPT;
 	};
@@ -1353,21 +1437,21 @@
   	return 0;
 }
 
-static int udp_getsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int __user *optlen)
+int udp_getsockopt(struct sock *sk, int level, int optname,
+		   char __user *optval, int __user *optlen)
 {
-	if (level != SOL_UDP)
-		return ip_getsockopt(sk, level, optname, optval, optlen);
-	return do_udp_getsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
+	return ip_getsockopt(sk, level, optname, optval, optlen);
 }
 
 #ifdef CONFIG_COMPAT
-static int compat_udp_getsockopt(struct sock *sk, int level, int optname,
+int compat_udp_getsockopt(struct sock *sk, int level, int optname,
 				 char __user *optval, int __user *optlen)
 {
-	if (level != SOL_UDP)
-		return compat_ip_getsockopt(sk, level, optname, optval, optlen);
-	return do_udp_getsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
+	return compat_ip_getsockopt(sk, level, optname, optval, optlen);
 }
 #endif
 /**
@@ -1387,7 +1471,8 @@
 {
 	unsigned int mask = datagram_poll(file, sock, wait);
 	struct sock *sk = sock->sk;
-	
+	int 	is_lite = IS_UDPLITE(sk);
+
 	/* Check for false positives due to checksum errors */
 	if ( (mask & POLLRDNORM) &&
 	     !(file->f_flags & O_NONBLOCK) &&
@@ -1397,8 +1482,8 @@
 
 		spin_lock_bh(&rcvq->lock);
 		while ((skb = skb_peek(rcvq)) != NULL) {
-			if (udp_checksum_complete(skb)) {
-				UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+			if (udp_lib_checksum_complete(skb)) {
+				UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
 				__skb_unlink(skb, rcvq);
 				kfree_skb(skb);
 			} else {
@@ -1420,7 +1505,7 @@
 struct proto udp_prot = {
  	.name		   = "UDP",
 	.owner		   = THIS_MODULE,
-	.close		   = udp_close,
+	.close		   = udp_lib_close,
 	.connect	   = ip4_datagram_connect,
 	.disconnect	   = udp_disconnect,
 	.ioctl		   = udp_ioctl,
@@ -1431,8 +1516,8 @@
 	.recvmsg	   = udp_recvmsg,
 	.sendpage	   = udp_sendpage,
 	.backlog_rcv	   = udp_queue_rcv_skb,
-	.hash		   = udp_v4_hash,
-	.unhash		   = udp_v4_unhash,
+	.hash		   = udp_lib_hash,
+	.unhash		   = udp_lib_unhash,
 	.get_port	   = udp_v4_get_port,
 	.obj_size	   = sizeof(struct udp_sock),
 #ifdef CONFIG_COMPAT
@@ -1451,7 +1536,7 @@
 
 	for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
 		struct hlist_node *node;
-		sk_for_each(sk, node, &udp_hash[state->bucket]) {
+		sk_for_each(sk, node, state->hashtable + state->bucket) {
 			if (sk->sk_family == state->family)
 				goto found;
 		}
@@ -1472,7 +1557,7 @@
 	} while (sk && sk->sk_family != state->family);
 
 	if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
-		sk = sk_head(&udp_hash[state->bucket]);
+		sk = sk_head(state->hashtable + state->bucket);
 		goto try_again;
 	}
 	return sk;
@@ -1522,6 +1607,7 @@
 	if (!s)
 		goto out;
 	s->family		= afinfo->family;
+	s->hashtable		= afinfo->hashtable;
 	s->seq_ops.start	= udp_seq_start;
 	s->seq_ops.next		= udp_seq_next;
 	s->seq_ops.show		= afinfo->seq_show;
@@ -1588,7 +1674,7 @@
 		atomic_read(&sp->sk_refcnt), sp);
 }
 
-static int udp4_seq_show(struct seq_file *seq, void *v)
+int udp4_seq_show(struct seq_file *seq, void *v)
 {
 	if (v == SEQ_START_TOKEN)
 		seq_printf(seq, "%-127s\n",
@@ -1611,6 +1697,7 @@
 	.owner		= THIS_MODULE,
 	.name		= "udp",
 	.family		= AF_INET,
+	.hashtable	= udp_hash,
 	.seq_show	= udp4_seq_show,
 	.seq_fops	= &udp4_seq_fops,
 };
@@ -1633,6 +1720,8 @@
 EXPORT_SYMBOL(udp_get_port);
 EXPORT_SYMBOL(udp_prot);
 EXPORT_SYMBOL(udp_sendmsg);
+EXPORT_SYMBOL(udp_lib_getsockopt);
+EXPORT_SYMBOL(udp_lib_setsockopt);
 EXPORT_SYMBOL(udp_poll);
 
 #ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
new file mode 100644
index 0000000..f6f4277
--- /dev/null
+++ b/net/ipv4/udp_impl.h
@@ -0,0 +1,38 @@
+#ifndef _UDP4_IMPL_H
+#define _UDP4_IMPL_H
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <net/protocol.h>
+#include <net/inet_common.h>
+
+extern int  	__udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int );
+extern void 	__udp4_lib_err(struct sk_buff *, u32, struct hlist_head []);
+
+extern int	__udp_lib_get_port(struct sock *sk, unsigned short snum,
+				   struct hlist_head udptable[], int *port_rover,
+		       	       	   int (*)(const struct sock*,const struct sock*));
+extern int	ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
+
+
+extern int	udp_setsockopt(struct sock *sk, int level, int optname,
+			       char __user *optval, int optlen);
+extern int	udp_getsockopt(struct sock *sk, int level, int optname,
+			       char __user *optval, int __user *optlen);
+
+#ifdef CONFIG_COMPAT
+extern int	compat_udp_setsockopt(struct sock *sk, int level, int optname,
+				      char __user *optval, int optlen);
+extern int	compat_udp_getsockopt(struct sock *sk, int level, int optname,
+				      char __user *optval, int __user *optlen);
+#endif
+extern int	udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+			    size_t len, int noblock, int flags, int *addr_len);
+extern int	udp_sendpage(struct sock *sk, struct page *page, int offset,
+			     size_t size, int flags);
+extern int	udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+extern int	udp_destroy_sock(struct sock *sk);
+
+#ifdef CONFIG_PROC_FS
+extern int	udp4_seq_show(struct seq_file *seq, void *v);
+#endif
+#endif	/* _UDP4_IMPL_H */
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
new file mode 100644
index 0000000..b28fe1e
--- /dev/null
+++ b/net/ipv4/udplite.c
@@ -0,0 +1,119 @@
+/*
+ *  UDPLITE     An implementation of the UDP-Lite protocol (RFC 3828).
+ *
+ *  Version:    $Id: udplite.c,v 1.25 2006/10/19 07:22:36 gerrit Exp $
+ *
+ *  Authors:    Gerrit Renker       <gerrit@erg.abdn.ac.uk>
+ *
+ *  Changes:
+ *  Fixes:
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+#include "udp_impl.h"
+DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics)	__read_mostly;
+
+struct hlist_head 	udplite_hash[UDP_HTABLE_SIZE];
+static int		udplite_port_rover;
+
+int udplite_get_port(struct sock *sk, unsigned short p,
+		     int (*c)(const struct sock *, const struct sock *))
+{
+	return  __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c);
+}
+
+static int udplite_v4_get_port(struct sock *sk, unsigned short snum)
+{
+	return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal);
+}
+
+static int udplite_rcv(struct sk_buff *skb)
+{
+	return __udp4_lib_rcv(skb, udplite_hash, 1);
+}
+
+static void udplite_err(struct sk_buff *skb, u32 info)
+{
+	return __udp4_lib_err(skb, info, udplite_hash);
+}
+
+static	struct net_protocol udplite_protocol = {
+	.handler	= udplite_rcv,
+	.err_handler	= udplite_err,
+	.no_policy	= 1,
+};
+
+struct proto 	udplite_prot = {
+	.name		   = "UDP-Lite",
+	.owner		   = THIS_MODULE,
+	.close		   = udp_lib_close,
+	.connect	   = ip4_datagram_connect,
+	.disconnect	   = udp_disconnect,
+	.ioctl		   = udp_ioctl,
+	.init		   = udplite_sk_init,
+	.destroy	   = udp_destroy_sock,
+	.setsockopt	   = udp_setsockopt,
+	.getsockopt	   = udp_getsockopt,
+	.sendmsg	   = udp_sendmsg,
+	.recvmsg	   = udp_recvmsg,
+	.sendpage	   = udp_sendpage,
+	.backlog_rcv	   = udp_queue_rcv_skb,
+	.hash		   = udp_lib_hash,
+	.unhash		   = udp_lib_unhash,
+	.get_port	   = udplite_v4_get_port,
+	.obj_size	   = sizeof(struct udp_sock),
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt = compat_udp_setsockopt,
+	.compat_getsockopt = compat_udp_getsockopt,
+#endif
+};
+
+static struct inet_protosw udplite4_protosw = {
+	.type		=  SOCK_DGRAM,
+	.protocol	=  IPPROTO_UDPLITE,
+	.prot		=  &udplite_prot,
+	.ops		=  &inet_dgram_ops,
+	.capability	= -1,
+	.no_check	=  0,		/* must checksum (RFC 3828) */
+	.flags		=  INET_PROTOSW_PERMANENT,
+};
+
+#ifdef CONFIG_PROC_FS
+static struct file_operations udplite4_seq_fops;
+static struct udp_seq_afinfo udplite4_seq_afinfo = {
+	.owner		= THIS_MODULE,
+	.name		= "udplite",
+	.family		= AF_INET,
+	.hashtable	= udplite_hash,
+	.seq_show	= udp4_seq_show,
+	.seq_fops	= &udplite4_seq_fops,
+};
+#endif
+
+void __init udplite4_register(void)
+{
+	if (proto_register(&udplite_prot, 1))
+		goto out_register_err;
+
+	if (inet_add_protocol(&udplite_protocol, IPPROTO_UDPLITE) < 0)
+		goto out_unregister_proto;
+
+	inet_register_protosw(&udplite4_protosw);
+
+#ifdef CONFIG_PROC_FS
+	if (udp_proc_register(&udplite4_seq_afinfo)) /* udplite4_proc_init() */
+		printk(KERN_ERR "%s: Cannot register /proc!\n", __FUNCTION__);
+#endif
+	return;
+
+out_unregister_proto:
+	proto_unregister(&udplite_prot);
+out_register_err:
+	printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __FUNCTION__);
+}
+
+EXPORT_SYMBOL(udplite_hash);
+EXPORT_SYMBOL(udplite_prot);
+EXPORT_SYMBOL(udplite_get_port);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1bed0cd..fb9f69c 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -72,8 +72,8 @@
 	struct dst_entry *dst, *dst_prev;
 	struct rtable *rt0 = (struct rtable*)(*dst_p);
 	struct rtable *rt = rt0;
-	u32 remote = fl->fl4_dst;
-	u32 local  = fl->fl4_src;
+	__be32 remote = fl->fl4_dst;
+	__be32 local  = fl->fl4_src;
 	struct flowi fl_tunnel = {
 		.nl_u = {
 			.ip4_u = {
@@ -199,11 +199,12 @@
 	if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
 		switch (iph->protocol) {
 		case IPPROTO_UDP:
+		case IPPROTO_UDPLITE:
 		case IPPROTO_TCP:
 		case IPPROTO_SCTP:
 		case IPPROTO_DCCP:
 			if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
-				u16 *ports = (u16 *)xprth;
+				__be16 *ports = (__be16 *)xprth;
 
 				fl->fl_ip_sport = ports[0];
 				fl->fl_ip_dport = ports[1];
@@ -273,6 +274,8 @@
 
 	if (likely(xdst->u.rt.idev))
 		in_dev_put(xdst->u.rt.idev);
+	if (likely(xdst->u.rt.peer))
+		inet_putpeer(xdst->u.rt.peer);
 	xfrm_dst_destroy(xdst);
 }
 
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 6e48f52..deb4101 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -196,10 +196,3 @@
 
 	  If unsure, say N.
 
-config IPV6_ROUTE_FWMARK
-	bool "IPv6: use netfilter MARK value as routing key"
-	depends on IPV6_MULTIPLE_TABLES && NETFILTER
-	---help---
-	  If you say Y here, you will be able to specify different routes for
-	  packets with different mark values (see iptables(8), MARK target).
-
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index addcc01..8bacda1 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -5,8 +5,8 @@
 obj-$(CONFIG_IPV6) += ipv6.o
 
 ipv6-objs :=	af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
-		route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
-		protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
+		route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
+		raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
 		exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
 		ip6_flowlabel.o ipv6_syms.o inet6_connection_sock.o
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b312a5f..a5e8d20 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -232,7 +232,7 @@
 
 int __ipv6_addr_type(const struct in6_addr *addr)
 {
-	u32 st;
+	__be32 st;
 
 	st = addr->s6_addr32[0];
 
@@ -1164,7 +1164,7 @@
 int ipv6_get_saddr(struct dst_entry *dst,
 		   struct in6_addr *daddr, struct in6_addr *saddr)
 {
-	return ipv6_dev_get_saddr(dst ? ((struct rt6_info *)dst)->rt6i_idev->dev : NULL, daddr, saddr);
+	return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr);
 }
 
 
@@ -3098,10 +3098,9 @@
 
 static inline int inet6_ifaddr_msgsize(void)
 {
-	return nlmsg_total_size(sizeof(struct ifaddrmsg) +
-				nla_total_size(16) +
-				nla_total_size(sizeof(struct ifa_cacheinfo)) +
-				128);
+	return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+	       + nla_total_size(16) /* IFA_ADDRESS */
+	       + nla_total_size(sizeof(struct ifa_cacheinfo));
 }
 
 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
@@ -3329,10 +3328,8 @@
 
 	err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
 				nlh->nlmsg_seq, RTM_NEWADDR, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout_ifa;
-	}
+	/* failure implies BUG in inet6_ifaddr_msgsize() */
+	BUG_ON(err < 0);
 
 	err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
 errout_ifa:
@@ -3351,10 +3348,8 @@
 		goto errout;
 
 	err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in inet6_ifaddr_msgsize() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
 errout:
@@ -3365,6 +3360,8 @@
 static void inline ipv6_store_devconf(struct ipv6_devconf *cnf,
 				__s32 *array, int bytes)
 {
+	BUG_ON(bytes < (DEVCONF_MAX * 4));
+
 	memset(array, 0, bytes);
 	array[DEVCONF_FORWARDING] = cnf->forwarding;
 	array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
@@ -3397,80 +3394,76 @@
 	array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
 }
 
-/* Maximum length of ifinfomsg attributes */
-#define INET6_IFINFO_RTA_SPACE \
-		RTA_SPACE(IFNAMSIZ) /* IFNAME */ + \
-		RTA_SPACE(MAX_ADDR_LEN) /* ADDRESS */ +	\
-		RTA_SPACE(sizeof(u32)) /* MTU */ + \
-		RTA_SPACE(sizeof(int)) /* LINK */ + \
-		RTA_SPACE(0) /* PROTINFO */ + \
-		RTA_SPACE(sizeof(u32)) /* FLAGS */ + \
-		RTA_SPACE(sizeof(struct ifla_cacheinfo)) /* CACHEINFO */ + \
-		RTA_SPACE(sizeof(__s32[DEVCONF_MAX])) /* CONF */
+static inline size_t inet6_if_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+	       + nla_total_size(4) /* IFLA_MTU */
+	       + nla_total_size(4) /* IFLA_LINK */
+	       + nla_total_size( /* IFLA_PROTINFO */
+			nla_total_size(4) /* IFLA_INET6_FLAGS */
+			+ nla_total_size(sizeof(struct ifla_cacheinfo))
+			+ nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
+		 );
+}
 
 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, 
 			     u32 pid, u32 seq, int event, unsigned int flags)
 {
-	struct net_device	*dev = idev->dev;
-	__s32			*array = NULL;
-	struct ifinfomsg	*r;
-	struct nlmsghdr 	*nlh;
-	unsigned char		*b = skb->tail;
-	struct rtattr		*subattr;
-	__u32			mtu = dev->mtu;
-	struct ifla_cacheinfo	ci;
+	struct net_device *dev = idev->dev;
+	struct nlattr *conf;
+	struct ifinfomsg *hdr;
+	struct nlmsghdr *nlh;
+	void *protoinfo;
+	struct ifla_cacheinfo ci;
 
-	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
-	r = NLMSG_DATA(nlh);
-	r->ifi_family = AF_INET6;
-	r->__ifi_pad = 0;
-	r->ifi_type = dev->type;
-	r->ifi_index = dev->ifindex;
-	r->ifi_flags = dev_get_flags(dev);
-	r->ifi_change = 0;
+	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
+	if (nlh == NULL)
+		return -ENOBUFS;
 
-	RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name);
+	hdr = nlmsg_data(nlh);
+	hdr->ifi_family = AF_INET6;
+	hdr->__ifi_pad = 0;
+	hdr->ifi_type = dev->type;
+	hdr->ifi_index = dev->ifindex;
+	hdr->ifi_flags = dev_get_flags(dev);
+	hdr->ifi_change = 0;
+
+	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
 
 	if (dev->addr_len)
-		RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
+		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
 
-	RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu);
+	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
 	if (dev->ifindex != dev->iflink)
-		RTA_PUT(skb, IFLA_LINK, sizeof(int), &dev->iflink);
-			
-	subattr = (struct rtattr*)skb->tail;
+		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
 
-	RTA_PUT(skb, IFLA_PROTINFO, 0, NULL);
+	protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
+	if (protoinfo == NULL)
+		goto nla_put_failure;
 
-	/* return the device flags */
-	RTA_PUT(skb, IFLA_INET6_FLAGS, sizeof(__u32), &idev->if_flags);
+	NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
 
-	/* return interface cacheinfo */
 	ci.max_reasm_len = IPV6_MAXPLEN;
 	ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100
 		    + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ);
 	ci.reachable_time = idev->nd_parms->reachable_time;
 	ci.retrans_time = idev->nd_parms->retrans_time;
-	RTA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
-	
-	/* return the device sysctl params */
-	if ((array = kmalloc(DEVCONF_MAX * sizeof(*array), GFP_ATOMIC)) == NULL)
-		goto rtattr_failure;
-	ipv6_store_devconf(&idev->cnf, array, DEVCONF_MAX * sizeof(*array));
-	RTA_PUT(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(*array), array);
+	NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
+
+	conf = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
+	if (conf == NULL)
+		goto nla_put_failure;
+	ipv6_store_devconf(&idev->cnf, nla_data(conf), nla_len(conf));
 
 	/* XXX - Statistics/MC not implemented */
-	subattr->rta_len = skb->tail - (u8*)subattr;
 
-	nlh->nlmsg_len = skb->tail - b;
-	kfree(array);
-	return skb->len;
+	nla_nest_end(skb, protoinfo);
+	return nlmsg_end(skb, nlh);
 
-nlmsg_failure:
-rtattr_failure:
-	kfree(array);
-	skb_trim(skb, b - skb->data);
-	return -1;
+nla_put_failure:
+	return nlmsg_cancel(skb, nlh);
 }
 
 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -3501,18 +3494,15 @@
 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
 {
 	struct sk_buff *skb;
-	int payload = sizeof(struct ifinfomsg) + INET6_IFINFO_RTA_SPACE;
 	int err = -ENOBUFS;
 	
-	skb = nlmsg_new(nlmsg_total_size(payload), GFP_ATOMIC);
+	skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
 	if (skb == NULL)
 		goto errout;
 
 	err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in inet6_if_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
 errout:
@@ -3520,22 +3510,26 @@
 		rtnl_set_sk_err(RTNLGRP_IPV6_IFADDR, err);
 }
 
-/* Maximum length of prefix_cacheinfo attributes */
-#define INET6_PREFIX_RTA_SPACE \
-		RTA_SPACE(sizeof(((struct prefix_info *)NULL)->prefix)) /* ADDRESS */ + \
-		RTA_SPACE(sizeof(struct prefix_cacheinfo)) /* CACHEINFO */
+static inline size_t inet6_prefix_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct prefixmsg))
+	       + nla_total_size(sizeof(struct in6_addr))
+	       + nla_total_size(sizeof(struct prefix_cacheinfo));
+}
 
 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
-			struct prefix_info *pinfo, u32 pid, u32 seq, 
-			int event, unsigned int flags)
+			     struct prefix_info *pinfo, u32 pid, u32 seq,
+			     int event, unsigned int flags)
 {
-	struct prefixmsg	*pmsg;
-	struct nlmsghdr 	*nlh;
-	unsigned char		*b = skb->tail;
+	struct prefixmsg *pmsg;
+	struct nlmsghdr *nlh;
 	struct prefix_cacheinfo	ci;
 
-	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*pmsg), flags);
-	pmsg = NLMSG_DATA(nlh);
+	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags);
+	if (nlh == NULL)
+		return -ENOBUFS;
+
+	pmsg = nlmsg_data(nlh);
 	pmsg->prefix_family = AF_INET6;
 	pmsg->prefix_pad1 = 0;
 	pmsg->prefix_pad2 = 0;
@@ -3543,44 +3537,37 @@
 	pmsg->prefix_len = pinfo->prefix_len;
 	pmsg->prefix_type = pinfo->type;
 	pmsg->prefix_pad3 = 0;
-	
 	pmsg->prefix_flags = 0;
 	if (pinfo->onlink)
 		pmsg->prefix_flags |= IF_PREFIX_ONLINK;
 	if (pinfo->autoconf)
 		pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
 
-	RTA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix);
+	NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix);
 
 	ci.preferred_time = ntohl(pinfo->prefered);
 	ci.valid_time = ntohl(pinfo->valid);
-	RTA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci);
+	NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci);
 
-	nlh->nlmsg_len = skb->tail - b;
-	return skb->len;
+	return nlmsg_end(skb, nlh);
 
-nlmsg_failure:
-rtattr_failure:
-	skb_trim(skb, b - skb->data);
-	return -1;
+nla_put_failure:
+	return nlmsg_cancel(skb, nlh);
 }
 
 static void inet6_prefix_notify(int event, struct inet6_dev *idev, 
 			 struct prefix_info *pinfo)
 {
 	struct sk_buff *skb;
-	int payload = sizeof(struct prefixmsg) + INET6_PREFIX_RTA_SPACE;
 	int err = -ENOBUFS;
 
-	skb = nlmsg_new(nlmsg_total_size(payload), GFP_ATOMIC);
+	skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
 	if (skb == NULL)
 		goto errout;
 
 	err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in inet6_prefix_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
 errout:
@@ -3982,10 +3969,9 @@
 	struct addrconf_sysctl_table *t;
 	char *dev_name = NULL;
 
-	t = kmalloc(sizeof(*t), GFP_KERNEL);
+	t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
 	if (t == NULL)
 		return;
-	memcpy(t, &addrconf_sysctl, sizeof(*t));
 	for (i=0; t->addrconf_vars[i].data; i++) {
 		t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf;
 		t->addrconf_vars[i].de = NULL;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 858cae2..e5cd83b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -49,6 +49,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/udp.h>
+#include <net/udplite.h>
 #include <net/tcp.h>
 #include <net/ipip.h>
 #include <net/protocol.h>
@@ -221,7 +222,7 @@
 		 * the user to assign a number at socket
 		 * creation time automatically shares.
 		 */
-		inet->sport = ntohs(inet->num);
+		inet->sport = htons(inet->num);
 		sk->sk_prot->hash(sk);
 	}
 	if (sk->sk_prot->init) {
@@ -341,7 +342,7 @@
 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
 	if (snum)
 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
-	inet->sport = ntohs(inet->num);
+	inet->sport = htons(inet->num);
 	inet->dport = 0;
 	inet->daddr = 0;
 out:
@@ -678,7 +679,7 @@
 	if (np->rxopt.all) {
 		if ((opt->hop && (np->rxopt.bits.hopopts ||
 				  np->rxopt.bits.ohopopts)) ||
-		    ((IPV6_FLOWINFO_MASK & *(u32*)skb->nh.raw) &&
+		    ((IPV6_FLOWINFO_MASK & *(__be32*)skb->nh.raw) &&
 		     np->rxopt.bits.rxflow) ||
 		    (opt->srcrt && (np->rxopt.bits.srcrt ||
 		     np->rxopt.bits.osrcrt)) ||
@@ -719,10 +720,8 @@
 {
 	if (ptr == NULL)
 		return;
-	if (ptr[0])
-		free_percpu(ptr[0]);
-	if (ptr[1])
-		free_percpu(ptr[1]);
+	free_percpu(ptr[0]);
+	free_percpu(ptr[1]);
 	ptr[0] = ptr[1] = NULL;
 }
 
@@ -737,8 +736,13 @@
 	if (snmp6_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib),
 			   __alignof__(struct udp_mib)) < 0)
 		goto err_udp_mib;
+	if (snmp6_mib_init((void **)udplite_stats_in6, sizeof (struct udp_mib),
+			   __alignof__(struct udp_mib)) < 0)
+		goto err_udplite_mib;
 	return 0;
 
+err_udplite_mib:
+	snmp6_mib_free((void **)udp_stats_in6);
 err_udp_mib:
 	snmp6_mib_free((void **)icmpv6_statistics);
 err_icmp_mib:
@@ -753,6 +757,7 @@
 	snmp6_mib_free((void **)ipv6_statistics);
 	snmp6_mib_free((void **)icmpv6_statistics);
 	snmp6_mib_free((void **)udp_stats_in6);
+	snmp6_mib_free((void **)udplite_stats_in6);
 }
 
 static int __init inet6_init(void)
@@ -780,10 +785,14 @@
 	if (err)
 		goto out_unregister_tcp_proto;
 
-	err = proto_register(&rawv6_prot, 1);
+	err = proto_register(&udplitev6_prot, 1);
 	if (err)
 		goto out_unregister_udp_proto;
 
+	err = proto_register(&rawv6_prot, 1);
+	if (err)
+		goto out_unregister_udplite_proto;
+
 
 	/* Register the socket-side information for inet6_create.  */
 	for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
@@ -837,6 +846,8 @@
 		goto proc_tcp6_fail;
 	if (udp6_proc_init())
 		goto proc_udp6_fail;
+	if (udplite6_proc_init())
+		goto proc_udplite6_fail;
 	if (ipv6_misc_proc_init())
 		goto proc_misc6_fail;
 
@@ -862,6 +873,7 @@
 
 	/* Init v6 transport protocols. */
 	udpv6_init();
+	udplitev6_init();
 	tcpv6_init();
 
 	ipv6_packet_init();
@@ -879,6 +891,8 @@
 proc_anycast6_fail:
 	ipv6_misc_proc_exit();
 proc_misc6_fail:
+	udplite6_proc_exit();
+proc_udplite6_fail:
 	udp6_proc_exit();
 proc_udp6_fail:
 	tcp6_proc_exit();
@@ -902,6 +916,8 @@
 	sock_unregister(PF_INET6);
 out_unregister_raw_proto:
 	proto_unregister(&rawv6_prot);
+out_unregister_udplite_proto:
+	proto_unregister(&udplitev6_prot);
 out_unregister_udp_proto:
 	proto_unregister(&udpv6_prot);
 out_unregister_tcp_proto:
@@ -919,6 +935,7 @@
 	ac6_proc_exit();
  	ipv6_misc_proc_exit();
  	udp6_proc_exit();
+ 	udplite6_proc_exit();
  	tcp6_proc_exit();
  	raw6_proc_exit();
 #endif
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index b0d83e8..12c5a4d 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -354,10 +354,9 @@
 	if (!pskb_may_pull(skb, ah_hlen))
 		goto out;
 
-	tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
+	tmp_hdr = kmemdup(skb->nh.raw, hdr_len, GFP_ATOMIC);
 	if (!tmp_hdr)
 		goto out;
-	memcpy(tmp_hdr, skb->nh.raw, hdr_len);
 	if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len, XFRM_POLICY_IN))
 		goto free_out;
 	skb->nh.ipv6h->priority    = 0;
@@ -397,7 +396,7 @@
 }
 
 static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 
-                    int type, int code, int offset, __u32 info)
+                    int type, int code, int offset, __be32 info)
 {
 	struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
 	struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7206747..5c94fea 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -207,7 +207,7 @@
 }
 
 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 
-		     u16 port, u32 info, u8 *payload)
+		     __be16 port, u32 info, u8 *payload)
 {
 	struct ipv6_pinfo *np  = inet6_sk(sk);
 	struct icmp6hdr *icmph = (struct icmp6hdr *)skb->h.raw;
@@ -318,13 +318,13 @@
 			ipv6_addr_copy(&sin->sin6_addr,
 			  (struct in6_addr *)(skb->nh.raw + serr->addr_offset));
 			if (np->sndflow)
-				sin->sin6_flowinfo = *(u32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK;
+				sin->sin6_flowinfo = *(__be32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK;
 			if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
 				sin->sin6_scope_id = IP6CB(skb)->iif;
 		} else {
 			ipv6_addr_set(&sin->sin6_addr, 0, 0,
 				      htonl(0xffff),
-				      *(u32*)(skb->nh.raw + serr->addr_offset));
+				      *(__be32*)(skb->nh.raw + serr->addr_offset));
 		}
 	}
 
@@ -397,12 +397,12 @@
 	}
 
 	if (np->rxopt.bits.rxtclass) {
-		int tclass = (ntohl(*(u32 *)skb->nh.ipv6h) >> 20) & 0xff;
+		int tclass = (ntohl(*(__be32 *)skb->nh.ipv6h) >> 20) & 0xff;
 		put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
 	}
 
-	if (np->rxopt.bits.rxflow && (*(u32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) {
-		u32 flowinfo = *(u32*)skb->nh.raw & IPV6_FLOWINFO_MASK;
+	if (np->rxopt.bits.rxflow && (*(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) {
+		__be32 flowinfo = *(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK;
 		put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
 	}
 
@@ -560,12 +560,12 @@
 			}
 
 			if (fl->fl6_flowlabel&IPV6_FLOWINFO_MASK) {
-				if ((fl->fl6_flowlabel^*(u32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) {
+				if ((fl->fl6_flowlabel^*(__be32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) {
 					err = -EINVAL;
 					goto exit_f;
 				}
 			}
-			fl->fl6_flowlabel = IPV6_FLOWINFO_MASK & *(u32 *)CMSG_DATA(cmsg);
+			fl->fl6_flowlabel = IPV6_FLOWINFO_MASK & *(__be32 *)CMSG_DATA(cmsg);
 			break;
 
 		case IPV6_2292HOPOPTS:
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index e78680a..25dcf69 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -256,7 +256,7 @@
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-                     int type, int code, int offset, __u32 info)
+                     int type, int code, int offset, __be32 info)
 {
 	struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
 	struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 88c96b1..0711f92 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -284,10 +284,12 @@
 #ifdef CONFIG_IPV6_MIP6
 	__u16 dstbuf;
 #endif
+	struct dst_entry *dst;
 
 	if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
 	    !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INHDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -298,7 +300,9 @@
 	dstbuf = opt->dst1;
 #endif
 
+	dst = dst_clone(skb->dst);
 	if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) {
+		dst_release(dst);
 		skb = *skbp;
 		skb->h.raw += ((skb->h.raw[1]+1)<<3);
 		opt = IP6CB(skb);
@@ -310,7 +314,8 @@
 		return 1;
 	}
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
+	dst_release(dst);
 	return -1;
 }
 
@@ -365,7 +370,8 @@
 
 	if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
 	    !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INHDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -374,7 +380,8 @@
 
 	if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
 	    skb->pkt_type != PACKET_HOST) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INADDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -388,7 +395,8 @@
 			 * processed by own
 			 */
 			if (!addr) {
-				IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+				IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+						 IPSTATS_MIB_INADDRERRORS);
 				kfree_skb(skb);
 				return -1;
 			}
@@ -410,7 +418,8 @@
 	switch (hdr->type) {
 	case IPV6_SRCRT_TYPE_0:
 		if (hdr->hdrlen & 0x01) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+					 IPSTATS_MIB_INHDRERRORS);
 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
 			return -1;
 		}
@@ -419,14 +428,16 @@
 	case IPV6_SRCRT_TYPE_2:
 		/* Silently discard invalid RTH type 2 */
 		if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+					 IPSTATS_MIB_INHDRERRORS);
 			kfree_skb(skb);
 			return -1;
 		}
 		break;
 #endif
 	default:
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
 		return -1;
 	}
@@ -439,7 +450,8 @@
 	n = hdr->hdrlen >> 1;
 
 	if (hdr->segments_left > n) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw);
 		return -1;
 	}
@@ -449,12 +461,14 @@
 	 */
 	if (skb_cloned(skb)) {
 		struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
-		kfree_skb(skb);
 		/* the copy is a forwarded packet */
 		if (skb2 == NULL) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_OUTDISCARDS);	
+			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+					 IPSTATS_MIB_OUTDISCARDS);
+			kfree_skb(skb);
 			return -1;
 		}
+		kfree_skb(skb);
 		*skbp = skb = skb2;
 		opt = IP6CB(skb2);
 		hdr = (struct ipv6_rt_hdr *) skb2->h.raw;
@@ -475,12 +489,14 @@
 		if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
 				     (xfrm_address_t *)&skb->nh.ipv6h->saddr,
 				     IPPROTO_ROUTING) < 0) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+					 IPSTATS_MIB_INADDRERRORS);
 			kfree_skb(skb);
 			return -1;
 		}
 		if (!ipv6_chk_home_addr(addr)) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+					 IPSTATS_MIB_INADDRERRORS);
 			kfree_skb(skb);
 			return -1;
 		}
@@ -491,7 +507,8 @@
 	}
 
 	if (ipv6_addr_is_multicast(addr)) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INADDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -510,7 +527,8 @@
 
 	if (skb->dst->dev->flags&IFF_LOOPBACK) {
 		if (skb->nh.ipv6h->hop_limit <= 1) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+					 IPSTATS_MIB_INHDRERRORS);
 			icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
 				    0, skb->dev);
 			kfree_skb(skb);
@@ -632,24 +650,25 @@
 	if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
 		LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
 		               skb->nh.raw[optoff+1]);
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INHDRERRORS);
 		goto drop;
 	}
 
-	pkt_len = ntohl(*(u32*)(skb->nh.raw+optoff+2));
+	pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2));
 	if (pkt_len <= IPV6_MAXPLEN) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
 		return 0;
 	}
 	if (skb->nh.ipv6h->payload_len) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
 		return 0;
 	}
 
 	if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INTRUNCATEDPKTS);
 		goto drop;
 	}
 
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 315bc1fbe..21cbbbd 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -77,7 +77,7 @@
 		if (hp == NULL)
 			return -1;
 		if (nexthdr == NEXTHDR_FRAGMENT) {
-			unsigned short _frag_off, *fp;
+			__be16 _frag_off, *fp;
 			fp = skb_header_pointer(skb,
 						start+offsetof(struct frag_hdr,
 							       frag_off),
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 1896ecb..0862809 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -25,10 +25,6 @@
 	struct fib_rule		common;
 	struct rt6key		src;
 	struct rt6key		dst;
-#ifdef CONFIG_IPV6_ROUTE_FWMARK
-	u32			fwmark;
-	u32			fwmask;
-#endif
 	u8			tclass;
 };
 
@@ -67,7 +63,7 @@
 		fib_rule_put(arg.rule);
 
 	if (arg.result)
-		return (struct dst_entry *) arg.result;
+		return arg.result;
 
 	dst_hold(&ip6_null_entry.u.dst);
 	return &ip6_null_entry.u.dst;
@@ -130,22 +126,13 @@
 	if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff))
 		return 0;
 
-#ifdef CONFIG_IPV6_ROUTE_FWMARK
-	if ((r->fwmark ^ fl->fl6_fwmark) & r->fwmask)
-		return 0;
-#endif
-
 	return 1;
 }
 
 static struct nla_policy fib6_rule_policy[FRA_MAX+1] __read_mostly = {
-	[FRA_IFNAME]	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
-	[FRA_PRIORITY]	= { .type = NLA_U32 },
+	FRA_GENERIC_POLICY,
 	[FRA_SRC]	= { .len = sizeof(struct in6_addr) },
 	[FRA_DST]	= { .len = sizeof(struct in6_addr) },
-	[FRA_FWMARK]	= { .type = NLA_U32 },
-	[FRA_FWMASK]	= { .type = NLA_U32 },
-	[FRA_TABLE]	= { .type = NLA_U32 },
 };
 
 static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
@@ -155,8 +142,7 @@
 	int err = -EINVAL;
 	struct fib6_rule *rule6 = (struct fib6_rule *) rule;
 
-	if (frh->src_len > 128 || frh->dst_len > 128 ||
-	    (frh->tos & ~IPV6_FLOWINFO_MASK))
+	if (frh->src_len > 128 || frh->dst_len > 128)
 		goto errout;
 
 	if (rule->action == FR_ACT_TO_TBL) {
@@ -177,23 +163,6 @@
 		nla_memcpy(&rule6->dst.addr, tb[FRA_DST],
 			   sizeof(struct in6_addr));
 
-#ifdef CONFIG_IPV6_ROUTE_FWMARK
-	if (tb[FRA_FWMARK]) {
-		rule6->fwmark = nla_get_u32(tb[FRA_FWMARK]);
-		if (rule6->fwmark) {
-			/*
-			 * if the mark value is non-zero,
-			 * all bits are compared by default
-			 * unless a mask is explicitly specified.
-			 */
-			rule6->fwmask = 0xFFFFFFFF;
-		}
-	}
-
-	if (tb[FRA_FWMASK])
-		rule6->fwmask = nla_get_u32(tb[FRA_FWMASK]);
-#endif
-
 	rule6->src.plen = frh->src_len;
 	rule6->dst.plen = frh->dst_len;
 	rule6->tclass = frh->tos;
@@ -225,14 +194,6 @@
 	    nla_memcmp(tb[FRA_DST], &rule6->dst.addr, sizeof(struct in6_addr)))
 		return 0;
 
-#ifdef CONFIG_IPV6_ROUTE_FWMARK
-	if (tb[FRA_FWMARK] && (rule6->fwmark != nla_get_u32(tb[FRA_FWMARK])))
-		return 0;
-
-	if (tb[FRA_FWMASK] && (rule6->fwmask != nla_get_u32(tb[FRA_FWMASK])))
-		return 0;
-#endif
-
 	return 1;
 }
 
@@ -254,14 +215,6 @@
 		NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr),
 			&rule6->src.addr);
 
-#ifdef CONFIG_IPV6_ROUTE_FWMARK
-	if (rule6->fwmark)
-		NLA_PUT_U32(skb, FRA_FWMARK, rule6->fwmark);
-
-	if (rule6->fwmask || rule6->fwmark)
-		NLA_PUT_U32(skb, FRA_FWMASK, rule6->fwmask);
-#endif
-
 	return 0;
 
 nla_put_failure:
@@ -278,6 +231,12 @@
 	return 0x3FFF;
 }
 
+static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+{
+	return nla_total_size(16) /* dst */
+	       + nla_total_size(16); /* src */
+}
+
 static struct fib_rules_ops fib6_rules_ops = {
 	.family			= AF_INET6,
 	.rule_size		= sizeof(struct fib6_rule),
@@ -287,6 +246,7 @@
 	.compare		= fib6_rule_compare,
 	.fill			= fib6_rule_fill,
 	.default_pref		= fib6_rule_default_pref,
+	.nlmsg_payload		= fib6_rule_nlmsg_payload,
 	.nlgroup		= RTNLGRP_IPV6_RULE,
 	.policy			= fib6_rule_policy,
 	.rules_list		= &fib6_rules,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4ec8760..3dcc4b7 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -177,7 +177,8 @@
 	 */
 	dst = ip6_route_output(sk, fl);
 	if (dst->error) {
-		IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+		IP6_INC_STATS(ip6_dst_idev(dst),
+			      IPSTATS_MIB_OUTNOROUTES);
 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
 		res = 1;
 	} else {
@@ -233,7 +234,7 @@
 						      len, fl->proto,
 						      skb->csum);
 	} else {
-		u32 tmp_csum = 0;
+		__wsum tmp_csum = 0;
 
 		skb_queue_walk(&sk->sk_write_queue, skb) {
 			tmp_csum = csum_add(tmp_csum, skb->csum);
@@ -241,13 +242,11 @@
 
 		tmp_csum = csum_partial((char *)icmp6h,
 					sizeof(struct icmp6hdr), tmp_csum);
-		tmp_csum = csum_ipv6_magic(&fl->fl6_src,
-					   &fl->fl6_dst,
-					   len, fl->proto, tmp_csum);
-		icmp6h->icmp6_cksum = tmp_csum;
+		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
+						      &fl->fl6_dst,
+						      len, fl->proto,
+						      tmp_csum);
 	}
-	if (icmp6h->icmp6_cksum == 0)
-		icmp6h->icmp6_cksum = -1;
 	ip6_push_pending_frames(sk);
 out:
 	return err;
@@ -263,7 +262,7 @@
 {
 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
 	struct sk_buff *org_skb = msg->skb;
-	__u32 csum = 0;
+	__wsum csum = 0;
 
 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
 				      to, len, csum);
@@ -555,7 +554,7 @@
 	icmpv6_xmit_unlock();
 }
 
-static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info)
+static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
 {
 	struct in6_addr *saddr, *daddr;
 	struct inet6_protocol *ipprot;
@@ -637,8 +636,8 @@
 			break;
 		/* fall through */
 	case CHECKSUM_NONE:
-		skb->csum = ~csum_ipv6_magic(saddr, daddr, skb->len,
-					     IPPROTO_ICMPV6, 0);
+		skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
+					     IPPROTO_ICMPV6, 0));
 		if (__skb_checksum_complete(skb)) {
 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
 				       NIP6(*saddr), NIP6(*daddr));
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 827f41d..c700302 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -52,20 +52,20 @@
 /*
  * request_sock (formerly open request) hash tables.
  */
-static u32 inet6_synq_hash(const struct in6_addr *raddr, const u16 rport,
+static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
 			   const u32 rnd, const u16 synq_hsize)
 {
-	u32 a = raddr->s6_addr32[0];
-	u32 b = raddr->s6_addr32[1];
-	u32 c = raddr->s6_addr32[2];
+	u32 a = (__force u32)raddr->s6_addr32[0];
+	u32 b = (__force u32)raddr->s6_addr32[1];
+	u32 c = (__force u32)raddr->s6_addr32[2];
 
 	a += JHASH_GOLDEN_RATIO;
 	b += JHASH_GOLDEN_RATIO;
 	c += rnd;
 	__jhash_mix(a, b, c);
 
-	a += raddr->s6_addr32[3];
-	b += (u32)rport;
+	a += (__force u32)raddr->s6_addr32[3];
+	b += (__force u32)rport;
 	__jhash_mix(a, b, c);
 
 	return c & (synq_hsize - 1);
@@ -73,7 +73,7 @@
 
 struct request_sock *inet6_csk_search_req(const struct sock *sk,
 					  struct request_sock ***prevp,
-					  const __u16 rport,
+					  const __be16 rport,
 					  const struct in6_addr *raddr,
 					  const struct in6_addr *laddr,
 					  const int iif)
@@ -139,9 +139,8 @@
 
 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
 
-int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
+int inet6_csk_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok)
 {
-	struct sock *sk = skb->sk;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct flowi fl;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 8accd1f..b7e5bae 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -57,7 +57,7 @@
  */
 struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
 					   const struct in6_addr *saddr,
-					   const u16 sport,
+					   const __be16 sport,
 					   const struct in6_addr *daddr,
 					   const u16 hnum,
 					   const int dif)
@@ -146,8 +146,8 @@
 EXPORT_SYMBOL_GPL(inet6_lookup_listener);
 
 struct sock *inet6_lookup(struct inet_hashinfo *hashinfo,
-			  const struct in6_addr *saddr, const u16 sport,
-			  const struct in6_addr *daddr, const u16 dport,
+			  const struct in6_addr *saddr, const __be16 sport,
+			  const struct in6_addr *daddr, const __be16 dport,
 			  const int dif)
 {
 	struct sock *sk;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index f98ca30..96d8310 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -50,7 +50,7 @@
 
 struct rt6_statistics	rt6_stats;
 
-static kmem_cache_t * fib6_node_kmem __read_mostly;
+static struct kmem_cache * fib6_node_kmem __read_mostly;
 
 enum fib_walk_state_t
 {
@@ -139,9 +139,9 @@
  *	test bit
  */
 
-static __inline__ int addr_bit_set(void *token, int fn_bit)
+static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
 {
-	__u32 *addr = token;
+	__be32 *addr = token;
 
 	return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5];
 }
@@ -150,7 +150,7 @@
 {
 	struct fib6_node *fn;
 
-	if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL)
+	if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
 		memset(fn, 0, sizeof(struct fib6_node));
 
 	return fn;
@@ -434,7 +434,7 @@
 	struct fib6_node *pn = NULL;
 	struct rt6key *key;
 	int	bit;
-       	int	dir = 0;
+       	__be32	dir = 0;
 	__u32	sernum = fib6_new_sernum();
 
 	RT6_TRACE("fib6_add_1\n");
@@ -829,7 +829,7 @@
 					struct lookup_args *args)
 {
 	struct fib6_node *fn;
-	int dir;
+	__be32 dir;
 
 	if (unlikely(args->offset == 0))
 		return NULL;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 6d4533b..624fae2 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -61,7 +61,7 @@
 static DEFINE_RWLOCK(ip6_sk_fl_lock);
 
 
-static __inline__ struct ip6_flowlabel * __fl_lookup(u32 label)
+static __inline__ struct ip6_flowlabel * __fl_lookup(__be32 label)
 {
 	struct ip6_flowlabel *fl;
 
@@ -72,7 +72,7 @@
 	return NULL;
 }
 
-static struct ip6_flowlabel * fl_lookup(u32 label)
+static struct ip6_flowlabel * fl_lookup(__be32 label)
 {
 	struct ip6_flowlabel *fl;
 
@@ -153,7 +153,7 @@
 	write_unlock(&ip6_fl_lock);
 }
 
-static int fl_intern(struct ip6_flowlabel *fl, __u32 label)
+static int fl_intern(struct ip6_flowlabel *fl, __be32 label)
 {
 	fl->label = label & IPV6_FLOWLABEL_MASK;
 
@@ -182,7 +182,7 @@
 
 /* Socket flowlabel lists */
 
-struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, u32 label)
+struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
 {
 	struct ipv6_fl_socklist *sfl;
 	struct ipv6_pinfo *np = inet6_sk(sk);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6b8e6d7..ad0b8ab 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -60,14 +60,22 @@
 {
 	struct ipv6hdr *hdr;
 	u32 		pkt_len;
+	struct inet6_dev *idev;
 
-	if (skb->pkt_type == PACKET_OTHERHOST)
-		goto drop;
+	if (skb->pkt_type == PACKET_OTHERHOST) {
+		kfree_skb(skb);
+		return 0;
+	}
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES);
+	rcu_read_lock();
+
+	idev = __in6_dev_get(skb->dev);
+
+	IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES);
 
 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+		IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
+		rcu_read_unlock();
 		goto out;
 	}
 
@@ -84,7 +92,7 @@
 	 * arrived via the sending interface (ethX), because of the
 	 * nature of scoping architecture. --yoshfuji
 	 */
-	IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex;
+	IP6CB(skb)->iif = skb->dst ? ip6_dst_idev(skb->dst)->dev->ifindex : dev->ifindex;
 
 	if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
 		goto err;
@@ -104,7 +112,7 @@
 		if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
 			goto truncated;
 		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+			IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
 			goto drop;
 		}
 		hdr = skb->nh.ipv6h;
@@ -112,17 +120,21 @@
 
 	if (hdr->nexthdr == NEXTHDR_HOP) {
 		if (ipv6_parse_hopopts(&skb) < 0) {
-			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+			IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
+			rcu_read_unlock();
 			return 0;
 		}
 	}
 
+	rcu_read_unlock();
+
 	return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish);
 truncated:
-	IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
+	IP6_INC_STATS_BH(idev, IPSTATS_MIB_INTRUNCATEDPKTS);
 err:
-	IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+	IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
 drop:
+	rcu_read_unlock();
 	kfree_skb(skb);
 out:
 	return 0;
@@ -140,6 +152,7 @@
 	unsigned int nhoff;
 	int nexthdr;
 	u8 hash;
+	struct inet6_dev *idev;
 
 	/*
 	 *	Parse extension headers
@@ -147,6 +160,7 @@
 
 	rcu_read_lock();
 resubmit:
+	idev = ip6_dst_idev(skb->dst);
 	if (!pskb_pull(skb, skb->h.raw - skb->data))
 		goto discard;
 	nhoff = IP6CB(skb)->nhoff;
@@ -185,24 +199,24 @@
 		if (ret > 0)
 			goto resubmit;
 		else if (ret == 0)
-			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+			IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
 	} else {
 		if (!raw_sk) {
 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-				IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+				IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS);
 				icmpv6_send(skb, ICMPV6_PARAMPROB,
 				            ICMPV6_UNK_NEXTHDR, nhoff,
 				            skb->dev);
 			}
 		} else
-			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+			IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
 		kfree_skb(skb);
 	}
 	rcu_read_unlock();
 	return 0;
 
 discard:
-	IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
+	IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
 	rcu_read_unlock();
 	kfree_skb(skb);
 	return 0;
@@ -219,7 +233,7 @@
 	struct ipv6hdr *hdr;
 	int deliver;
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS);
+	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
 
 	hdr = skb->nh.ipv6h;
 	deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6671691..e9212c7 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -88,7 +88,7 @@
 	} else if (dst->neighbour)
 		return dst->neighbour->output(skb);
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
+	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 	kfree_skb(skb);
 	return -EINVAL;
 
@@ -118,6 +118,7 @@
 
 	if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
 		struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
+		struct inet6_dev *idev = ip6_dst_idev(skb->dst);
 
 		if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
 		    ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
@@ -133,13 +134,13 @@
 					ip6_dev_loopback_xmit);
 
 			if (skb->nh.ipv6h->hop_limit == 0) {
-				IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+				IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
 				kfree_skb(skb);
 				return 0;
 			}
 		}
 
-		IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+		IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
 	}
 
 	return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
@@ -182,12 +183,14 @@
 
 		if (skb_headroom(skb) < head_room) {
 			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
-			kfree_skb(skb);
-			skb = skb2;
-			if (skb == NULL) {	
-				IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+			if (skb2 == NULL) {
+				IP6_INC_STATS(ip6_dst_idev(skb->dst),
+					      IPSTATS_MIB_OUTDISCARDS);
+				kfree_skb(skb);
 				return -ENOBUFS;
 			}
+			kfree_skb(skb);
+			skb = skb2;
 			if (sk)
 				skb_set_owner_w(skb, sk);
 		}
@@ -217,7 +220,7 @@
 	if (tclass < 0)
 		tclass = 0;
 
-	*(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
+	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
 
 	hdr->payload_len = htons(seg_len);
 	hdr->nexthdr = proto;
@@ -230,7 +233,8 @@
 
 	mtu = dst_mtu(dst);
 	if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
-		IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+		IP6_INC_STATS(ip6_dst_idev(skb->dst),
+			      IPSTATS_MIB_OUTREQUESTS);
 		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
 				dst_output);
 	}
@@ -239,7 +243,7 @@
 		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
 	skb->dev = dst->dev;
 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
-	IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
 	kfree_skb(skb);
 	return -EMSGSIZE;
 }
@@ -267,7 +271,7 @@
 	hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
 	skb->nh.ipv6h = hdr;
 
-	*(u32*)hdr = htonl(0x60000000);
+	*(__be32*)hdr = htonl(0x60000000);
 
 	hdr->payload_len = htons(len);
 	hdr->nexthdr = proto;
@@ -373,7 +377,7 @@
 		goto error;
 
 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
-		IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
+		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -406,7 +410,7 @@
 		skb->dev = dst->dev;
 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
 			    0, skb->dev);
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
 
 		kfree_skb(skb);
 		return -ETIMEDOUT;
@@ -419,13 +423,13 @@
 		if (proxied > 0)
 			return ip6_input(skb);
 		else if (proxied < 0) {
-			IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
+			IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 			goto drop;
 		}
 	}
 
 	if (!xfrm6_route_forward(skb)) {
-		IP6_INC_STATS(IPSTATS_MIB_INDISCARDS);
+		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 	dst = skb->dst;
@@ -464,14 +468,14 @@
 		/* Again, force OUTPUT device used as source address */
 		skb->dev = dst->dev;
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
-		IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
-		IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
+		IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
 		kfree_skb(skb);
 		return -EMSGSIZE;
 	}
 
 	if (skb_cow(skb, dst->dev->hard_header_len)) {
-		IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
 		goto drop;
 	}
 
@@ -481,11 +485,11 @@
  
 	hdr->hop_limit--;
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
+	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 	return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
 
 error:
-	IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
+	IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 drop:
 	kfree_skb(skb);
 	return -EINVAL;
@@ -499,12 +503,12 @@
 	dst_release(to->dst);
 	to->dst = dst_clone(from->dst);
 	to->dev = from->dev;
+	to->mark = from->mark;
 
 #ifdef CONFIG_NET_SCHED
 	to->tc_index = from->tc_index;
 #endif
 #ifdef CONFIG_NETFILTER
-	to->nfmark = from->nfmark;
 	/* Connection association is same as pre-frag packet */
 	nf_conntrack_put(to->nfct);
 	to->nfct = from->nfct;
@@ -571,7 +575,7 @@
 	struct ipv6hdr *tmp_hdr;
 	struct frag_hdr *fh;
 	unsigned int mtu, hlen, left, len;
-	u32 frag_id = 0;
+	__be32 frag_id = 0;
 	int ptr, offset = 0, err=0;
 	u8 *prevhdr, nexthdr = 0;
 
@@ -620,14 +624,13 @@
 		skb_shinfo(skb)->frag_list = NULL;
 		/* BUILD HEADER */
 
-		tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
+		*prevhdr = NEXTHDR_FRAGMENT;
+		tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
 		if (!tmp_hdr) {
-			IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+			IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
 			return -ENOMEM;
 		}
 
-		*prevhdr = NEXTHDR_FRAGMENT;
-		memcpy(tmp_hdr, skb->nh.raw, hlen);
 		__skb_pull(skb, hlen);
 		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
 		skb->nh.raw = __skb_push(skb, hlen);
@@ -643,7 +646,8 @@
 		skb->data_len = first_len - skb_headlen(skb);
 		skb->len = first_len;
 		skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
- 
+
+		dst_hold(&rt->u.dst);
 
 		for (;;) {
 			/* Prepare header of the next frame,
@@ -667,7 +671,7 @@
 			
 			err = output(skb);
 			if(!err)
-				IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
+				IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
 
 			if (err || !frag)
 				break;
@@ -680,7 +684,8 @@
 		kfree(tmp_hdr);
 
 		if (err == 0) {
-			IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
+			IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
+			dst_release(&rt->u.dst);
 			return 0;
 		}
 
@@ -690,7 +695,8 @@
 			frag = skb;
 		}
 
-		IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+		IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
+		dst_release(&rt->u.dst);
 		return err;
 	}
 
@@ -723,7 +729,8 @@
 
 		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
 			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
-			IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
+			IP6_INC_STATS(ip6_dst_idev(skb->dst),
+				      IPSTATS_MIB_FRAGFAILS);
 			err = -ENOMEM;
 			goto fail;
 		}
@@ -784,15 +791,17 @@
 		if (err)
 			goto fail;
 
-		IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);
+		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
 	}
+	IP6_INC_STATS(ip6_dst_idev(skb->dst),
+		      IPSTATS_MIB_FRAGOKS);
 	kfree_skb(skb);
-	IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
 	return err;
 
 fail:
+	IP6_INC_STATS(ip6_dst_idev(skb->dst),
+		      IPSTATS_MIB_FRAGFAILS);
 	kfree_skb(skb); 
-	IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
 	return err;
 }
 
@@ -1265,7 +1274,7 @@
 	return 0;
 error:
 	inet->cork.length -= length;
-	IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+	IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
 	return err;
 }
 
@@ -1311,7 +1320,7 @@
 
 	skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
 	
-	*(u32*)hdr = fl->fl6_flowlabel |
+	*(__be32*)hdr = fl->fl6_flowlabel |
 		     htonl(0x60000000 | ((int)np->cork.tclass << 20));
 
 	if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
@@ -1326,7 +1335,7 @@
 	skb->priority = sk->sk_priority;
 
 	skb->dst = dst_clone(&rt->u.dst);
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);	
+	IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
 	if (err) {
 		if (err > 0)
@@ -1357,7 +1366,8 @@
 	struct sk_buff *skb;
 
 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
-		IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+		IP6_INC_STATS(ip6_dst_idev(skb->dst),
+			      IPSTATS_MIB_OUTDISCARDS);
 		kfree_skb(skb);
 	}
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b9f4029..8d91834 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -66,7 +66,7 @@
 
 #define HASH_SIZE  32
 
-#define HASH(addr) (((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
+#define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
 	             (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
                     (HASH_SIZE - 1))
 
@@ -215,11 +215,10 @@
  *   Create tunnel matching given parameters.
  * 
  * Return: 
- *   0 on success
+ *   created tunnel or NULL
  **/
 
-static int
-ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt)
+static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
 {
 	struct net_device *dev;
 	struct ip6_tnl *t;
@@ -236,11 +235,11 @@
 				break;
 		}
 		if (i == IP6_TNL_MAX) 
-			return -ENOBUFS;
+			goto failed;
 	}
 	dev = alloc_netdev(sizeof (*t), name, ip6ip6_tnl_dev_setup);
 	if (dev == NULL)
-		return -ENOMEM;
+		goto failed;
 
 	t = netdev_priv(dev);
 	dev->init = ip6ip6_tnl_dev_init;
@@ -248,13 +247,13 @@
 
 	if ((err = register_netdevice(dev)) < 0) {
 		free_netdev(dev);
-		return err;
+		goto failed;
 	}
 	dev_hold(dev);
-
 	ip6ip6_tnl_link(t);
-	*pt = t;
-	return 0;
+	return t;
+failed:
+	return NULL;
 }
 
 /**
@@ -268,32 +267,23 @@
  *   tunnel device is created and registered for use.
  *
  * Return:
- *   0 if tunnel located or created,
- *   -EINVAL if parameters incorrect,
- *   -ENODEV if no matching tunnel available
+ *   matching tunnel or NULL
  **/
 
-static int
-ip6ip6_tnl_locate(struct ip6_tnl_parm *p, struct ip6_tnl **pt, int create)
+static struct ip6_tnl *ip6ip6_tnl_locate(struct ip6_tnl_parm *p, int create)
 {
 	struct in6_addr *remote = &p->raddr;
 	struct in6_addr *local = &p->laddr;
 	struct ip6_tnl *t;
 
-	if (p->proto != IPPROTO_IPV6)
-		return -EINVAL;
-
 	for (t = *ip6ip6_bucket(p); t; t = t->next) {
 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
-		    ipv6_addr_equal(remote, &t->parms.raddr)) {
-			*pt = t;
-			return (create ? -EEXIST : 0);
-		}
+		    ipv6_addr_equal(remote, &t->parms.raddr))
+			return t;
 	}
 	if (!create)
-		return -ENODEV;
-	
-	return ip6_tnl_create(p, pt);
+		return NULL;
+	return ip6_tnl_create(p);
 }
 
 /**
@@ -391,7 +381,7 @@
 
 static int
 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-	   int type, int code, int offset, __u32 info)
+	   int type, int code, int offset, __be32 info)
 {
 	struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
 	struct ip6_tnl *t;
@@ -434,12 +424,9 @@
 		}
 		break;
 	case ICMPV6_PARAMPROB:
-		/* ignore if parameter problem not caused by a tunnel
-		   encapsulation limit sub-option */
-		if (code != ICMPV6_HDR_FIELD) {
-			break;
-		}
-		teli = parse_tlv_tnl_enc_lim(skb, skb->data);
+		teli = 0;
+		if (code == ICMPV6_HDR_FIELD)
+			teli = parse_tlv_tnl_enc_lim(skb, skb->data);
 
 		if (teli && teli == ntohl(info) - 2) {
 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
@@ -451,6 +438,10 @@
 					       "tunnel!\n", t->parms.name);
 				rel_msg = 1;
 			}
+		} else if (net_ratelimit()) {
+			printk(KERN_WARNING
+			       "%s: Recipient unable to parse tunneled "
+			       "packet!\n ", t->parms.name);
 		}
 		break;
 	case ICMPV6_PKT_TOOBIG:
@@ -470,6 +461,7 @@
 	if (rel_msg &&  pskb_may_pull(skb, offset + sizeof (*ipv6h))) {
 		struct rt6_info *rt;
 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
 		if (!skb2)
 			goto out;
 
@@ -504,6 +496,27 @@
 	if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
 		IP6_ECN_set_ce(inner_iph);
 }
+static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
+{
+	struct ip6_tnl_parm *p = &t->parms;
+	int ret = 0;
+
+	if (p->flags & IP6_TNL_F_CAP_RCV) {
+    		struct net_device *ldev = NULL;
+
+		if (p->link)
+			ldev = dev_get_by_index(p->link);
+
+		if ((ipv6_addr_is_multicast(&p->laddr) ||
+		     likely(ipv6_chk_addr(&p->laddr, ldev, 0))) &&
+		    likely(!ipv6_chk_addr(&p->raddr, NULL, 0)))
+			ret = 1;
+
+		if (ldev)
+			dev_put(ldev);
+	}
+	return ret;
+}
 
 /**
  * ip6ip6_rcv - decapsulate IPv6 packet and retransmit it locally
@@ -528,7 +541,7 @@
 			goto discard;
 		}
 
-		if (!(t->parms.flags & IP6_TNL_F_CAP_RCV)) {
+		if (!ip6_tnl_rcv_ctl(t)) {
 			t->stat.rx_dropped++;
 			read_unlock(&ip6ip6_lock);
 			goto discard;
@@ -560,31 +573,23 @@
 	return 0;
 }
 
-static inline struct ipv6_txoptions *create_tel(__u8 encap_limit)
+struct ipv6_tel_txoption {
+	struct ipv6_txoptions ops;
+	__u8 dst_opt[8];
+};
+
+static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
 {
-	struct ipv6_tlv_tnl_enc_lim *tel;
-	struct ipv6_txoptions *opt;
-	__u8 *raw;
+	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
 
-	int opt_len = sizeof(*opt) + 8;
+	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
+	opt->dst_opt[3] = 1;
+	opt->dst_opt[4] = encap_limit;
+	opt->dst_opt[5] = IPV6_TLV_PADN;
+	opt->dst_opt[6] = 1;
 
-	if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) {
-		return NULL;
-	}
-	opt->tot_len = opt_len;
-	opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1);
-	opt->opt_nflen = 8;
-
-	tel = (struct ipv6_tlv_tnl_enc_lim *) (opt->dst0opt + 1);
-	tel->type = IPV6_TLV_TNL_ENCAP_LIMIT;
-	tel->length = 1;
-	tel->encap_limit = encap_limit;
-
-	raw = (__u8 *) opt->dst0opt;
-	raw[5] = IPV6_TLV_PADN;
-	raw[6] = 1;
-
-	return opt;
+	opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
+	opt->ops.opt_nflen = 8;
 }
 
 /**
@@ -607,6 +612,34 @@
 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
 
+static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
+{
+	struct ip6_tnl_parm *p = &t->parms;
+	int ret = 0;
+
+ 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+		struct net_device *ldev = NULL;
+
+		if (p->link)
+			ldev = dev_get_by_index(p->link);
+
+		if (unlikely(!ipv6_chk_addr(&p->laddr, ldev, 0)))
+			printk(KERN_WARNING
+			       "%s xmit: Local address not yet configured!\n",
+			       p->name);
+		else if (!ipv6_addr_is_multicast(&p->raddr) &&
+			 unlikely(ipv6_chk_addr(&p->raddr, NULL, 0)))
+			printk(KERN_WARNING
+			       "%s xmit: Routing loop! "
+			       "Remote address found on this node!\n",
+			       p->name);
+		else
+			ret = 1;
+		if (ldev)
+			dev_put(ldev);
+	}
+	return ret;
+}
 /**
  * ip6ip6_tnl_xmit - encapsulate packet and send 
  *   @skb: the outgoing socket buffer
@@ -626,8 +659,8 @@
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct net_device_stats *stats = &t->stat;
 	struct ipv6hdr *ipv6h = skb->nh.ipv6h;
-	struct ipv6_txoptions *opt = NULL;
 	int encap_limit = -1;
+	struct ipv6_tel_txoption opt;
 	__u16 offset;
 	struct flowi fl;
 	struct dst_entry *dst;
@@ -644,10 +677,9 @@
 		goto tx_err;
 	}
 	if (skb->protocol != htons(ETH_P_IPV6) ||
-	    !(t->parms.flags & IP6_TNL_F_CAP_XMIT) ||
-	    ip6ip6_tnl_addr_conflict(t, ipv6h)) {
+	    !ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h))
 		goto tx_err;
-	}
+
 	if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
 		struct ipv6_tlv_tnl_enc_lim *tel;
 		tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
@@ -657,20 +689,17 @@
 			goto tx_err;
 		}
 		encap_limit = tel->encap_limit - 1;
-	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 		encap_limit = t->parms.encap_limit;
-	}
+
 	memcpy(&fl, &t->fl, sizeof (fl));
 	proto = fl.proto;
 
 	dsfield = ipv6_get_dsfield(ipv6h);
 	if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
-		fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_TCLASS_MASK);
+		fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
 	if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
-		fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_FLOWLABEL_MASK);
-
-	if (encap_limit >= 0 && (opt = create_tel(encap_limit)) == NULL)
-		goto tx_err;
+		fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
 
 	if ((dst = ip6_tnl_dst_check(t)) != NULL)
 		dst_hold(dst);
@@ -692,7 +721,7 @@
 		goto tx_err_dst_release;
 	}
 	mtu = dst_mtu(dst) - sizeof (*ipv6h);
-	if (opt) {
+	if (encap_limit >= 0) {
 		max_headroom += 8;
 		mtu -= 8;
 	}
@@ -730,12 +759,13 @@
 
 	skb->h.raw = skb->nh.raw;
 
-	if (opt)
-		ipv6_push_nfrag_opts(skb, opt, &proto, NULL);
-
+	if (encap_limit >= 0) {
+		init_tel_txopt(&opt, encap_limit);
+		ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
+	}
 	skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr));
 	ipv6h = skb->nh.ipv6h;
-	*(u32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000);
+	*(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000);
 	dsfield = INET_ECN_encapsulate(0, dsfield);
 	ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
 	ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
@@ -748,7 +778,7 @@
 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, 
 		      skb->dst->dev, dst_output);
 
-	if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) {
+	if (net_xmit_eval(err) == 0) {
 		stats->tx_bytes += pkt_len;
 		stats->tx_packets++;
 	} else {
@@ -756,9 +786,6 @@
 		stats->tx_aborted_errors++;
 	}
 	ip6_tnl_dst_store(t, dst);
-
-	kfree(opt);
-
 	t->recursion--;
 	return 0;
 tx_err_link_failure:
@@ -766,7 +793,6 @@
 	dst_link_failure(skb);
 tx_err_dst_release:
 	dst_release(dst);
-	kfree(opt);
 tx_err:
 	stats->tx_errors++;
 	stats->tx_dropped++;
@@ -778,39 +804,19 @@
 static void ip6_tnl_set_cap(struct ip6_tnl *t)
 {
 	struct ip6_tnl_parm *p = &t->parms;
-	struct in6_addr *laddr = &p->laddr;
-	struct in6_addr *raddr = &p->raddr;
-	int ltype = ipv6_addr_type(laddr);
-	int rtype = ipv6_addr_type(raddr);
+	int ltype = ipv6_addr_type(&p->laddr);
+	int rtype = ipv6_addr_type(&p->raddr);
 
 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
 
-	if (ltype != IPV6_ADDR_ANY && rtype != IPV6_ADDR_ANY &&
-	    ((ltype|rtype) &
-	     (IPV6_ADDR_UNICAST|
-	      IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL|
-	      IPV6_ADDR_MAPPED|IPV6_ADDR_RESERVED)) == IPV6_ADDR_UNICAST) {
-		struct net_device *ldev = NULL;
-		int l_ok = 1;
-		int r_ok = 1;
-
-		if (p->link)
-			ldev = dev_get_by_index(p->link);
-		
-		if (ltype&IPV6_ADDR_UNICAST && !ipv6_chk_addr(laddr, ldev, 0))
-			l_ok = 0;
-		
-		if (rtype&IPV6_ADDR_UNICAST && ipv6_chk_addr(raddr, NULL, 0))
-			r_ok = 0;
-		
-		if (l_ok && r_ok) {
-			if (ltype&IPV6_ADDR_UNICAST)
-				p->flags |= IP6_TNL_F_CAP_XMIT;
-			if (rtype&IPV6_ADDR_UNICAST)
-				p->flags |= IP6_TNL_F_CAP_RCV;
-		}
-		if (ldev)
-			dev_put(ldev);
+	if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
+	    rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
+	    !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
+	    (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
+		if (ltype&IPV6_ADDR_UNICAST)
+			p->flags |= IP6_TNL_F_CAP_XMIT;
+		if (rtype&IPV6_ADDR_UNICAST)
+			p->flags |= IP6_TNL_F_CAP_RCV;
 	}
 }
 
@@ -844,8 +850,11 @@
 	dev->iflink = p->link;
 
 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+		int strict = (ipv6_addr_type(&p->raddr) &
+			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
+
 		struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr,
-						 p->link, 0);
+						 p->link, strict);
 
 		if (rt == NULL)
 			return;
@@ -920,26 +929,20 @@
 ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	int err = 0;
-	int create;
 	struct ip6_tnl_parm p;
 	struct ip6_tnl *t = NULL;
 
 	switch (cmd) {
 	case SIOCGETTUNNEL:
 		if (dev == ip6ip6_fb_tnl_dev) {
-			if (copy_from_user(&p,
-					   ifr->ifr_ifru.ifru_data,
-					   sizeof (p))) {
+			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
 				err = -EFAULT;
 				break;
 			}
-			if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV)
-				t = netdev_priv(dev);
-			else if (err)
-				break;
-		} else
+			t = ip6ip6_tnl_locate(&p, 0);
+		}
+		if (t == NULL)
 			t = netdev_priv(dev);
-
 		memcpy(&p, &t->parms, sizeof (p));
 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
 			err = -EFAULT;
@@ -948,35 +951,36 @@
 	case SIOCADDTUNNEL:
 	case SIOCCHGTUNNEL:
 		err = -EPERM;
-		create = (cmd == SIOCADDTUNNEL);
 		if (!capable(CAP_NET_ADMIN))
 			break;
-		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
-			err = -EFAULT;
+		err = -EFAULT;
+		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
 			break;
-		}
-		if (!create && dev != ip6ip6_fb_tnl_dev) {
-			t = netdev_priv(dev);
-		}
-		if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) {
+		err = -EINVAL;
+		if (p.proto != IPPROTO_IPV6)
 			break;
-		}
-		if (cmd == SIOCCHGTUNNEL) {
-			if (t->dev != dev) {
-				err = -EEXIST;
-				break;
-			}
+		t = ip6ip6_tnl_locate(&p, cmd == SIOCADDTUNNEL);
+		if (dev != ip6ip6_fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+			if (t != NULL) {
+				if (t->dev != dev) {
+					err = -EEXIST;
+					break;
+				}
+			} else
+				t = netdev_priv(dev);
+
 			ip6ip6_tnl_unlink(t);
 			err = ip6ip6_tnl_change(t, &p);
 			ip6ip6_tnl_link(t);
 			netdev_state_change(dev);
 		}
-		if (copy_to_user(ifr->ifr_ifru.ifru_data,
-				 &t->parms, sizeof (p))) {
-			err = -EFAULT;
-		} else {
+		if (t) {
 			err = 0;
-		}
+			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
+				err = -EFAULT;
+
+		} else
+			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
 		break;
 	case SIOCDELTUNNEL:
 		err = -EPERM;
@@ -984,22 +988,18 @@
 			break;
 
 		if (dev == ip6ip6_fb_tnl_dev) {
-			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
-					   sizeof (p))) {
-				err = -EFAULT;
+			err = -EFAULT;
+			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
 				break;
-			}
-			err = ip6ip6_tnl_locate(&p, &t, 0);
-			if (err)
+			err = -ENOENT;
+			if ((t = ip6ip6_tnl_locate(&p, 0)) == NULL)
 				break;
-			if (t == netdev_priv(ip6ip6_fb_tnl_dev)) {
-				err = -EPERM;
+			err = -EPERM;
+			if (t->dev == ip6ip6_fb_tnl_dev)
 				break;
-			}
-		} else {
-			t = netdev_priv(dev);
+			dev = t->dev;
 		}
-		err = unregister_netdevice(t->dev);
+		err = unregister_netdevice(dev);
 		break;
 	default:
 		err = -EINVAL;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 71f59f1..511730b 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -176,7 +176,7 @@
 }
 
 static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-		                int type, int code, int offset, __u32 info)
+		                int type, int code, int offset, __be32 info)
 {
 	__be32 spi;
 	struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index de6b919..1eafcfc 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -51,6 +51,7 @@
 #include <net/inet_common.h>
 #include <net/tcp.h>
 #include <net/udp.h>
+#include <net/udplite.h>
 #include <net/xfrm.h>
 
 #include <asm/uaccess.h>
@@ -239,6 +240,7 @@
 			struct sk_buff *pktopt;
 
 			if (sk->sk_protocol != IPPROTO_UDP &&
+			    sk->sk_protocol != IPPROTO_UDPLITE &&
 			    sk->sk_protocol != IPPROTO_TCP)
 				break;
 
@@ -276,11 +278,15 @@
 				sk->sk_family = PF_INET;
 				tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
 			} else {
+				struct proto *prot = &udp_prot;
+
+				if (sk->sk_protocol == IPPROTO_UDPLITE)
+					prot = &udplite_prot;
 				local_bh_disable();
 				sock_prot_dec_use(sk->sk_prot);
-				sock_prot_inc_use(&udp_prot);
+				sock_prot_inc_use(prot);
 				local_bh_enable();
-				sk->sk_prot = &udp_prot;
+				sk->sk_prot = prot;
 				sk->sk_socket->ops = &inet_dgram_ops;
 				sk->sk_family = PF_INET;
 			}
@@ -813,6 +819,7 @@
 	switch (optname) {
 	case IPV6_ADDRFORM:
 		if (sk->sk_protocol != IPPROTO_UDP &&
+		    sk->sk_protocol != IPPROTO_UDPLITE &&
 		    sk->sk_protocol != IPPROTO_TCP)
 			return -EINVAL;
 		if (sk->sk_state != TCP_ESTABLISHED)
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 3b114e3..a1c231a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -83,7 +83,7 @@
 struct mld2_grec {
 	__u8		grec_type;
 	__u8		grec_auxwords;
-	__u16		grec_nsrcs;
+	__be16		grec_nsrcs;
 	struct in6_addr	grec_mca;
 	struct in6_addr	grec_src[0];
 };
@@ -91,18 +91,18 @@
 struct mld2_report {
 	__u8	type;
 	__u8	resv1;
-	__u16	csum;
-	__u16	resv2;
-	__u16	ngrec;
+	__sum16	csum;
+	__be16	resv2;
+	__be16	ngrec;
 	struct mld2_grec grec[0];
 };
 
 struct mld2_query {
 	__u8 type;
 	__u8 code;
-	__u16 csum;
-	__u16 mrc;
-	__u16 resv1;
+	__sum16 csum;
+	__be16 mrc;
+	__be16 resv1;
 	struct in6_addr mca;
 #if defined(__LITTLE_ENDIAN_BITFIELD)
 	__u8 qrv:3,
@@ -116,7 +116,7 @@
 #error "Please fix <asm/byteorder.h>"
 #endif
 	__u8 qqic;
-	__u16 nsrcs;
+	__be16 nsrcs;
 	struct in6_addr srcs[0];
 };
 
@@ -1465,7 +1465,7 @@
 	struct inet6_dev *idev = in6_dev_get(skb->dev);
 	int err;
 
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+	IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
 	payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h -
 		sizeof(struct ipv6hdr);
 	mldlen = skb->tail - skb->h.raw;
@@ -1477,9 +1477,9 @@
 		mld_dev_queue_xmit);
 	if (!err) {
 		ICMP6_INC_STATS(idev,ICMP6_MIB_OUTMSGS);
-		IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+		IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
 	} else
-		IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+		IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
 
 	if (likely(idev != NULL))
 		in6_dev_put(idev);
@@ -1763,7 +1763,10 @@
 		     IPV6_TLV_ROUTERALERT, 2, 0, 0,
 		     IPV6_TLV_PADN, 0 };
 
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+	rcu_read_lock();
+	IP6_INC_STATS(__in6_dev_get(dev),
+		      IPSTATS_MIB_OUTREQUESTS);
+	rcu_read_unlock();
 	snd_addr = addr;
 	if (type == ICMPV6_MGM_REDUCTION) {
 		snd_addr = &all_routers;
@@ -1777,7 +1780,10 @@
 	skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err);
 
 	if (skb == NULL) {
-		IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+		rcu_read_lock();
+		IP6_INC_STATS(__in6_dev_get(dev),
+			      IPSTATS_MIB_OUTDISCARDS);
+		rcu_read_unlock();
 		return;
 	}
 
@@ -1816,9 +1822,9 @@
 		else
 			ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBRESPONSES);
 		ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
-		IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
+		IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
 	} else
-		IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+		IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
 
 	if (likely(idev != NULL))
 		in6_dev_put(idev);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 7ccdc8f..be7dd7d 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -262,10 +262,10 @@
 	sel.proto = fl->proto;
 	sel.dport = xfrm_flowi_dport(fl);
 	if (sel.dport)
-		sel.dport_mask = ~((__u16)0);
+		sel.dport_mask = htons(~0);
 	sel.sport = xfrm_flowi_sport(fl);
 	if (sel.sport)
-		sel.sport_mask = ~((__u16)0);
+		sel.sport_mask = htons(~0);
 	sel.ifindex = fl->oif;
 
 	err = km_report(IPPROTO_DSTOPTS, &sel,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 73eb8c3..56ea928 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -472,7 +472,9 @@
 			inc_opt = 0;
 	}
 
-	skb = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev),
+	skb = sock_alloc_send_skb(sk,
+				  (MAX_HEADER + sizeof(struct ipv6hdr) +
+				   len + LL_RESERVED_SPACE(dev)),
 				  1, &err);
 
 	if (skb == NULL) {
@@ -513,7 +515,7 @@
 
 	skb->dst = dst;
 	idev = in6_dev_get(dst->dev);
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+	IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
 	if (!err) {
 		ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS);
@@ -561,7 +563,9 @@
 	if (send_llinfo)
 		len += ndisc_opt_addr_space(dev);
 
-	skb = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev),
+	skb = sock_alloc_send_skb(sk,
+				  (MAX_HEADER + sizeof(struct ipv6hdr) +
+				   len + LL_RESERVED_SPACE(dev)),
 				  1, &err);
 	if (skb == NULL) {
 		ND_PRINTK0(KERN_ERR
@@ -597,7 +601,7 @@
 	/* send it! */
 	skb->dst = dst;
 	idev = in6_dev_get(dst->dev);
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+	IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
 	if (!err) {
 		ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORSOLICITS);
@@ -636,7 +640,9 @@
 	if (dev->addr_len)
 		len += ndisc_opt_addr_space(dev);
 
-        skb = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev),
+        skb = sock_alloc_send_skb(sk,
+				  (MAX_HEADER + sizeof(struct ipv6hdr) +
+				   len + LL_RESERVED_SPACE(dev)),
 				  1, &err);
 	if (skb == NULL) {
 		ND_PRINTK0(KERN_ERR
@@ -670,7 +676,7 @@
 	/* send it! */
 	skb->dst = dst;
 	idev = in6_dev_get(dst->dev);
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);	
+	IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output);
 	if (!err) {
 		ICMP6_INC_STATS(idev, ICMP6_MIB_OUTROUTERSOLICITS);
@@ -1261,10 +1267,11 @@
 	}
 
 	if (ndopts.nd_opts_mtu) {
+		__be32 n;
 		u32 mtu;
 
-		memcpy(&mtu, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
-		mtu = ntohl(mtu);
+		memcpy(&n, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
+		mtu = ntohl(n);
 
 		if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
 			ND_PRINTK2(KERN_WARNING
@@ -1446,7 +1453,9 @@
 	rd_len &= ~0x7;
 	len += rd_len;
 
-	buff = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev),
+	buff = sock_alloc_send_skb(sk,
+				   (MAX_HEADER + sizeof(struct ipv6hdr) +
+				    len + LL_RESERVED_SPACE(dev)),
 				   1, &err);
 	if (buff == NULL) {
 		ND_PRINTK0(KERN_ERR
@@ -1504,7 +1513,7 @@
 
 	buff->dst = dst;
 	idev = in6_dev_get(dst->dev);
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+	IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, buff, NULL, dst->dev, dst_output);
 	if (!err) {
 		ICMP6_INC_STATS(idev, ICMP6_MIB_OUTREDIRECTS);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 580b1ab..f6294e5 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -31,7 +31,7 @@
 #endif
 
 	if (dst->error) {
-		IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+		IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 		LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
 		dst_release(dst);
 		return -EINVAL;
@@ -80,11 +80,11 @@
 	return 0;
 }
 
-unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 			     unsigned int dataoff, u_int8_t protocol)
 {
 	struct ipv6hdr *ip6h = skb->nh.ipv6h;
-	unsigned int csum = 0;
+	__sum16 csum = 0;
 
 	switch (skb->ip_summed) {
 	case CHECKSUM_COMPLETE:
@@ -100,12 +100,13 @@
 		}
 		/* fall through */
 	case CHECKSUM_NONE:
-		skb->csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+		skb->csum = ~csum_unfold(
+				csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 					     skb->len - dataoff,
 					     protocol,
 					     csum_sub(0,
 						      skb_checksum(skb, 0,
-							           dataoff, 0)));
+							           dataoff, 0))));
 		csum = __skb_checksum_complete(skb);
 	}
 	return csum;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index d7c45a9..fc3e5eb 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -6,7 +6,7 @@
 	depends on INET && IPV6 && NETFILTER && EXPERIMENTAL
 
 config NF_CONNTRACK_IPV6
-	tristate "IPv6 support for new connection tracking (EXPERIMENTAL)"
+	tristate "IPv6 connection tracking support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL && NF_CONNTRACK
 	---help---
 	  Connection tracking keeps a record of what packets have passed
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 9fec832..d4d9f18 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -241,7 +241,7 @@
 	pmsg->data_len        = data_len;
 	pmsg->timestamp_sec   = entry->skb->tstamp.off_sec;
 	pmsg->timestamp_usec  = entry->skb->tstamp.off_usec;
-	pmsg->mark            = entry->skb->nfmark;
+	pmsg->mark            = entry->skb->mark;
 	pmsg->hook            = entry->info->hook;
 	pmsg->hw_protocol     = entry->skb->protocol;
 	
@@ -620,6 +620,7 @@
 	{ .ctl_name = 0 }
 };
 
+#ifdef CONFIG_PROC_FS
 static int
 ipq_get_info(char *buffer, char **start, off_t offset, int length)
 {
@@ -653,6 +654,7 @@
 		len = 0;
 	return len;
 }
+#endif /* CONFIG_PROC_FS */
 
 static struct nf_queue_handler nfqh = {
 	.name	= "ip6_queue",
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 204e021..4eec4b3 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -440,6 +440,13 @@
 			    && unconditional(&e->ipv6)) {
 				unsigned int oldpos, size;
 
+				if (t->verdict < -NF_MAX_VERDICT - 1) {
+					duprintf("mark_source_chains: bad "
+						"negative verdict (%i)\n",
+								t->verdict);
+					return 0;
+				}
+
 				/* Return: backtrack through the last
 				   big jump. */
 				do {
@@ -477,6 +484,13 @@
 				if (strcmp(t->target.u.user.name,
 					   IP6T_STANDARD_TARGET) == 0
 				    && newpos >= 0) {
+					if (newpos > newinfo->size -
+						sizeof(struct ip6t_entry)) {
+						duprintf("mark_source_chains: "
+							"bad verdict (%i)\n",
+								newpos);
+						return 0;
+					}
 					/* This a jump; chase it. */
 					duprintf("Jump rule %u -> %u\n",
 						 pos, newpos);
@@ -509,27 +523,6 @@
 }
 
 static inline int
-standard_check(const struct ip6t_entry_target *t,
-	       unsigned int max_offset)
-{
-	struct ip6t_standard_target *targ = (void *)t;
-
-	/* Check standard info. */
-	if (targ->verdict >= 0
-	    && targ->verdict > max_offset - sizeof(struct ip6t_entry)) {
-		duprintf("ip6t_standard_check: bad verdict (%i)\n",
-			 targ->verdict);
-		return 0;
-	}
-	if (targ->verdict < -NF_MAX_VERDICT - 1) {
-		duprintf("ip6t_standard_check: bad negative verdict (%i)\n",
-			 targ->verdict);
-		return 0;
-	}
-	return 1;
-}
-
-static inline int
 check_match(struct ip6t_entry_match *m,
 	    const char *name,
 	    const struct ip6t_ip6 *ipv6,
@@ -616,12 +609,7 @@
 	if (ret)
 		goto err;
 
-	if (t->u.kernel.target == &ip6t_standard_target) {
-		if (!standard_check(t, size)) {
-			ret = -EINVAL;
-			goto err;
-		}
-	} else if (t->u.kernel.target->checkentry
+	if (t->u.kernel.target->checkentry
 		   && !t->u.kernel.target->checkentry(name, e, target, t->data,
 						      e->comefrom)) {
 		duprintf("ip_tables: check failed for `%s'.\n",
@@ -758,17 +746,19 @@
 		}
 	}
 
+	if (!mark_source_chains(newinfo, valid_hooks, entry0))
+		return -ELOOP;
+
 	/* Finally, each sanity check must pass */
 	i = 0;
 	ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
 				check_entry, name, size, &i);
 
-	if (ret != 0)
-		goto cleanup;
-
-	ret = -ELOOP;
-	if (!mark_source_chains(newinfo, valid_hooks, entry0))
-		goto cleanup;
+	if (ret != 0) {
+		IP6T_ENTRY_ITERATE(entry0, newinfo->size,
+				   cleanup_entry, &i);
+		return ret;
+	}
 
 	/* And one copy for every other CPU */
 	for_each_possible_cpu(i) {
@@ -777,9 +767,6 @@
 	}
 
 	return 0;
-cleanup:
-	IP6T_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
-	return ret;
 }
 
 /* Gets counters. */
@@ -1481,7 +1468,8 @@
 		if (hp == NULL)
 			return -EBADMSG;
 		if (nexthdr == NEXTHDR_FRAGMENT) {
-			unsigned short _frag_off, *fp;
+			unsigned short _frag_off;
+			__be16 *fp;
 			fp = skb_header_pointer(skb,
 						start+offsetof(struct frag_hdr,
 							       frag_off),
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 0cf537d..33b1faa 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -69,9 +69,9 @@
 	/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
 	printk("LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
 	       ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
-	       (ntohl(*(u_int32_t *)ih) & 0x0ff00000) >> 20,
+	       (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
 	       ih->hop_limit,
-	       (ntohl(*(u_int32_t *)ih) & 0x000fffff));
+	       (ntohl(*(__be32 *)ih) & 0x000fffff));
 
 	fragment = 0;
 	ptr = ip6hoff + sizeof(struct ipv6hdr);
@@ -270,11 +270,15 @@
 		}
 		break;
 	}
-	case IPPROTO_UDP: {
+	case IPPROTO_UDP:
+	case IPPROTO_UDPLITE: {
 		struct udphdr _udph, *uh;
 
-		/* Max length: 10 "PROTO=UDP " */
-		printk("PROTO=UDP ");
+		if (currenthdr == IPPROTO_UDP)
+			/* Max length: 10 "PROTO=UDP "     */
+			printk("PROTO=UDP " );
+		else	/* Max length: 14 "PROTO=UDPLITE " */
+			printk("PROTO=UDPLITE ");
 
 		if (fragment)
 			break;
@@ -436,13 +440,8 @@
 	li.u.log.level = loginfo->level;
 	li.u.log.logflags = loginfo->logflags;
 
-	if (loginfo->logflags & IP6T_LOG_NFLOG)
-		nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
-		              "%s", loginfo->prefix);
-	else
-		ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
-		                loginfo->prefix);
-
+	ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
+	                loginfo->prefix);
 	return IP6T_CONTINUE;
 }
 
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 386ea26..6250e86 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -149,11 +149,10 @@
 		   int (*okfn)(struct sk_buff *))
 {
 
-	unsigned long nfmark;
 	unsigned int ret;
 	struct in6_addr saddr, daddr;
 	u_int8_t hop_limit;
-	u_int32_t flowlabel;
+	u_int32_t flowlabel, mark;
 
 #if 0
 	/* root is playing with raw sockets. */
@@ -165,10 +164,10 @@
 	}
 #endif
 
-	/* save source/dest address, nfmark, hoplimit, flowlabel, priority,  */
+	/* save source/dest address, mark, hoplimit, flowlabel, priority,  */
 	memcpy(&saddr, &(*pskb)->nh.ipv6h->saddr, sizeof(saddr));
 	memcpy(&daddr, &(*pskb)->nh.ipv6h->daddr, sizeof(daddr));
-	nfmark = (*pskb)->nfmark;
+	mark = (*pskb)->mark;
 	hop_limit = (*pskb)->nh.ipv6h->hop_limit;
 
 	/* flowlabel and prio (includes version, which shouldn't change either */
@@ -179,7 +178,7 @@
 	if (ret != NF_DROP && ret != NF_STOLEN 
 		&& (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr))
 		    || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr))
-		    || (*pskb)->nfmark != nfmark
+		    || (*pskb)->mark != mark
 		    || (*pskb)->nh.ipv6h->hop_limit != hop_limit))
 		return ip6_route_me_harder(*pskb) == 0 ? ret : NF_DROP;
 
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index e5e53ff..a20615f 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -33,7 +33,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
 
@@ -43,8 +43,6 @@
 #define DEBUGP(format, args...)
 #endif
 
-DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
-
 static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
 			     struct nf_conntrack_tuple *tuple)
 {
@@ -211,11 +209,6 @@
 	return nf_conntrack_confirm(pskb);
 }
 
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-			       struct net_device *in,
-			       struct net_device *out,
-			       int (*okfn)(struct sk_buff *));
 static unsigned int ipv6_defrag(unsigned int hooknum,
 				struct sk_buff **pskb,
 				const struct net_device *in,
@@ -331,26 +324,7 @@
 };
 
 #ifdef CONFIG_SYSCTL
-
-/* From nf_conntrack_proto_icmpv6.c */
-extern unsigned int nf_ct_icmpv6_timeout;
-
-/* From nf_conntrack_reasm.c */
-extern unsigned int nf_ct_frag6_timeout;
-extern unsigned int nf_ct_frag6_low_thresh;
-extern unsigned int nf_ct_frag6_high_thresh;
-
-static struct ctl_table_header *nf_ct_ipv6_sysctl_header;
-
-static ctl_table nf_ct_sysctl_table[] = {
-	{
-		.ctl_name	= NET_NF_CONNTRACK_ICMPV6_TIMEOUT,
-		.procname	= "nf_conntrack_icmpv6_timeout",
-		.data		= &nf_ct_icmpv6_timeout,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
+static ctl_table nf_ct_ipv6_sysctl_table[] = {
 	{
 		.ctl_name	= NET_NF_CONNTRACK_FRAG6_TIMEOUT,
 		.procname	= "nf_conntrack_frag6_timeout",
@@ -377,26 +351,6 @@
 	},
         { .ctl_name = 0 }
 };
-
-static ctl_table nf_ct_netfilter_table[] = {
-	{
-		.ctl_name	= NET_NETFILTER,
-		.procname	= "netfilter",
-		.mode		= 0555,
-		.child		= nf_ct_sysctl_table,
-	},
-	{ .ctl_name = 0 }
-};
-
-static ctl_table nf_ct_net_table[] = {
-	{
-		.ctl_name	= CTL_NET,
-		.procname	= "net",
-		.mode		= 0555,
-		.child		= nf_ct_netfilter_table,
-	},
-	{ .ctl_name = 0 }
-};
 #endif
 
 #if defined(CONFIG_NF_CT_NETLINK) || \
@@ -454,16 +408,14 @@
 	.tuple_to_nfattr	= ipv6_tuple_to_nfattr,
 	.nfattr_to_tuple	= ipv6_nfattr_to_tuple,
 #endif
+#ifdef CONFIG_SYSCTL
+	.ctl_table_path		= nf_net_netfilter_sysctl_path,
+	.ctl_table		= nf_ct_ipv6_sysctl_table,
+#endif
 	.get_features		= ipv6_get_features,
 	.me			= THIS_MODULE,
 };
 
-extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp6;
-extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6;
-extern struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6;
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-
 MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
@@ -479,19 +431,19 @@
 		printk("nf_conntrack_ipv6: can't initialize frag6.\n");
 		return ret;
 	}
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp6);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6);
 	if (ret < 0) {
 		printk("nf_conntrack_ipv6: can't register tcp.\n");
 		goto cleanup_frag6;
 	}
 
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp6);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6);
 	if (ret < 0) {
 		printk("nf_conntrack_ipv6: can't register udp.\n");
 		goto cleanup_tcp;
 	}
 
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmpv6);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6);
 	if (ret < 0) {
 		printk("nf_conntrack_ipv6: can't register icmpv6.\n");
 		goto cleanup_udp;
@@ -510,28 +462,16 @@
 		       "hook.\n");
 		goto cleanup_ipv6;
 	}
-#ifdef CONFIG_SYSCTL
-	nf_ct_ipv6_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
-	if (nf_ct_ipv6_sysctl_header == NULL) {
-		printk("nf_conntrack: can't register to sysctl.\n");
-		ret = -ENOMEM;
-		goto cleanup_hooks;
-	}
-#endif
 	return ret;
 
-#ifdef CONFIG_SYSCTL
- cleanup_hooks:
-	nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
-#endif
  cleanup_ipv6:
 	nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
  cleanup_icmpv6:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
  cleanup_udp:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
  cleanup_tcp:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
  cleanup_frag6:
 	nf_ct_frag6_cleanup();
 	return ret;
@@ -540,14 +480,11 @@
 static void __exit nf_conntrack_l3proto_ipv6_fini(void)
 {
 	synchronize_net();
-#ifdef CONFIG_SYSCTL
- 	unregister_sysctl_table(nf_ct_ipv6_sysctl_header);
-#endif
 	nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
 	nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6);
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6);
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
 	nf_ct_frag6_cleanup();
 }
 
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 34d4472..3905cac 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -29,11 +29,11 @@
 #include <linux/seq_file.h>
 #include <linux/netfilter_ipv6.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
 
-unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
 
 #if 0
 #define DEBUGP printk
@@ -142,9 +142,6 @@
 	return 1;
 }
 
-extern int
-nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp, int len);
-extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
 static int
 icmpv6_error_message(struct sk_buff *skb,
 		     unsigned int icmp6off,
@@ -155,7 +152,7 @@
 	struct nf_conntrack_tuple_hash *h;
 	struct icmp6hdr _hdr, *hp;
 	unsigned int inip6off;
-	struct nf_conntrack_protocol *inproto;
+	struct nf_conntrack_l4proto *inproto;
 	u_int8_t inprotonum;
 	unsigned int inprotoff;
 
@@ -185,7 +182,7 @@
 		return -NF_ACCEPT;
 	}
 
-	inproto = __nf_ct_proto_find(PF_INET6, inprotonum);
+	inproto = __nf_ct_l4proto_find(PF_INET6, inprotonum);
 
 	/* Are they talking about one of our connections? */
 	if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum,
@@ -290,7 +287,7 @@
 	tuple->dst.u.icmp.code =
 			*(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMPV6_CODE-1]);
 	tuple->src.u.icmp.id =
-			*(u_int16_t *)NFA_DATA(tb[CTA_PROTO_ICMPV6_ID-1]);
+			*(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMPV6_ID-1]);
 
 	if (tuple->dst.u.icmp.type < 128
 	    || tuple->dst.u.icmp.type - 128 >= sizeof(invmap)
@@ -301,10 +298,27 @@
 }
 #endif
 
-struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6 =
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *icmpv6_sysctl_header;
+static struct ctl_table icmpv6_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_ICMPV6_TIMEOUT,
+		.procname	= "nf_conntrack_icmpv6_timeout",
+		.data		= &nf_ct_icmpv6_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+#endif /* CONFIG_SYSCTL */
+
+struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
 {
 	.l3proto		= PF_INET6,
-	.proto			= IPPROTO_ICMPV6,
+	.l4proto		= IPPROTO_ICMPV6,
 	.name			= "icmpv6",
 	.pkt_to_tuple		= icmpv6_pkt_to_tuple,
 	.invert_tuple		= icmpv6_invert_tuple,
@@ -318,6 +332,10 @@
 	.tuple_to_nfattr	= icmpv6_tuple_to_nfattr,
 	.nfattr_to_tuple	= icmpv6_nfattr_to_tuple,
 #endif
+#ifdef CONFIG_SYSCTL
+	.ctl_table_header	= &icmpv6_sysctl_header,
+	.ctl_table		= icmpv6_sysctl_table,
+#endif
 };
 
-EXPORT_SYMBOL(nf_conntrack_protocol_icmpv6);
+EXPORT_SYMBOL(nf_conntrack_l4proto_icmpv6);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index bf93c1e..37e5fca 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -72,7 +72,7 @@
 	struct hlist_node	list;
 	struct list_head	lru_list;	/* lru list member	*/
 
-	__u32			id;		/* fragment id		*/
+	__be32			id;		/* fragment id		*/
 	struct in6_addr		saddr;
 	struct in6_addr		daddr;
 
@@ -115,28 +115,28 @@
 	write_unlock(&nf_ct_frag6_lock);
 }
 
-static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
+static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
 			       struct in6_addr *daddr)
 {
 	u32 a, b, c;
 
-	a = saddr->s6_addr32[0];
-	b = saddr->s6_addr32[1];
-	c = saddr->s6_addr32[2];
+	a = (__force u32)saddr->s6_addr32[0];
+	b = (__force u32)saddr->s6_addr32[1];
+	c = (__force u32)saddr->s6_addr32[2];
 
 	a += JHASH_GOLDEN_RATIO;
 	b += JHASH_GOLDEN_RATIO;
 	c += nf_ct_frag6_hash_rnd;
 	__jhash_mix(a, b, c);
 
-	a += saddr->s6_addr32[3];
-	b += daddr->s6_addr32[0];
-	c += daddr->s6_addr32[1];
+	a += (__force u32)saddr->s6_addr32[3];
+	b += (__force u32)daddr->s6_addr32[0];
+	c += (__force u32)daddr->s6_addr32[1];
 	__jhash_mix(a, b, c);
 
-	a += daddr->s6_addr32[2];
-	b += daddr->s6_addr32[3];
-	c += id;
+	a += (__force u32)daddr->s6_addr32[2];
+	b += (__force u32)daddr->s6_addr32[3];
+	c += (__force u32)id;
 	__jhash_mix(a, b, c);
 
 	return c & (FRAG6Q_HASHSZ - 1);
@@ -338,7 +338,7 @@
 
 
 static struct nf_ct_frag6_queue *
-nf_ct_frag6_create(unsigned int hash, u32 id, struct in6_addr *src,				   struct in6_addr *dst)
+nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src,				   struct in6_addr *dst)
 {
 	struct nf_ct_frag6_queue *fq;
 
@@ -366,7 +366,7 @@
 }
 
 static __inline__ struct nf_ct_frag6_queue *
-fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
+fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
 {
 	struct nf_ct_frag6_queue *fq;
 	struct hlist_node *n;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index efee7a6..35249d8 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -49,6 +49,8 @@
 		       fold_prot_inuse(&tcpv6_prot));
 	seq_printf(seq, "UDP6: inuse %d\n",
 		       fold_prot_inuse(&udpv6_prot));
+	seq_printf(seq, "UDPLITE6: inuse %d\n",
+		        fold_prot_inuse(&udplitev6_prot));
 	seq_printf(seq, "RAW6: inuse %d\n",
 		       fold_prot_inuse(&rawv6_prot));
 	seq_printf(seq, "FRAG6: inuse %d memory %d\n",
@@ -133,6 +135,14 @@
 	SNMP_MIB_SENTINEL
 };
 
+static struct snmp_mib snmp6_udplite6_list[] = {
+	SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS),
+	SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
+	SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
+	SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
+	SNMP_MIB_SENTINEL
+};
+
 static unsigned long
 fold_field(void *mib[], int offt)
 {
@@ -161,11 +171,13 @@
 
 	if (idev) {
 		seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
+		snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list);
 		snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list);
 	} else {
 		snmp6_seq_show_item(seq, (void **)ipv6_statistics, snmp6_ipstats_list);
 		snmp6_seq_show_item(seq, (void **)icmpv6_statistics, snmp6_icmp6_list);
 		snmp6_seq_show_item(seq, (void **)udp_stats_in6, snmp6_udp6_list);
+		snmp6_seq_show_item(seq, (void **)udplite_stats_in6, snmp6_udplite6_list);
 	}
 	return 0;
 }
@@ -281,6 +293,9 @@
 	if (!idev || !idev->dev)
 		return -EINVAL;
 
+	if (snmp6_mib_init((void **)idev->stats.ipv6, sizeof(struct ipstats_mib),
+			   __alignof__(struct ipstats_mib)) < 0)
+		goto err_ip;
 	if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib),
 			   __alignof__(struct icmpv6_mib)) < 0)
 		goto err_icmp;
@@ -288,12 +303,15 @@
 	return 0;
 
 err_icmp:
+	snmp6_mib_free((void **)idev->stats.ipv6);
+err_ip:
 	return err;
 }
 
 int snmp6_free_dev(struct inet6_dev *idev)
 {
 	snmp6_mib_free((void **)idev->stats.icmpv6);
+	snmp6_mib_free((void **)idev->stats.ipv6);
 	return 0;
 }
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d6dedc4..4ae1b19 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -220,7 +220,7 @@
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
-	__u32 v4addr = 0;
+	__be32 v4addr = 0;
 	int addr_type;
 	int err;
 
@@ -290,7 +290,7 @@
 
 void rawv6_err(struct sock *sk, struct sk_buff *skb,
 	       struct inet6_skb_parm *opt,
-	       int type, int code, int offset, u32 info)
+	       int type, int code, int offset, __be32 info)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -370,9 +370,9 @@
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-		skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+		skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr,
 					     &skb->nh.ipv6h->daddr,
-					     skb->len, inet->num, 0);
+					     skb->len, inet->num, 0));
 
 	if (inet->hdrincl) {
 		if (skb_checksum_complete(skb)) {
@@ -479,8 +479,8 @@
 	int offset;
 	int len;
 	int total_len;
-	u32 tmp_csum;
-	u16 csum;
+	__wsum tmp_csum;
+	__sum16 csum;
 
 	if (!rp->checksum)
 		goto send;
@@ -530,16 +530,15 @@
 
 	/* in case cksum was not initialized */
 	if (unlikely(csum))
-		tmp_csum = csum_sub(tmp_csum, csum);
+		tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
 
-	tmp_csum = csum_ipv6_magic(&fl->fl6_src,
+	csum = csum_ipv6_magic(&fl->fl6_src,
 				   &fl->fl6_dst,
 				   total_len, fl->proto, tmp_csum);
 
-	if (tmp_csum == 0)
-		tmp_csum = -1;
+	if (csum == 0 && fl->proto == IPPROTO_UDP)
+		csum = CSUM_MANGLED_0;
 
-	csum = tmp_csum;
 	if (skb_store_bits(skb, offset, &csum, 2))
 		BUG();
 
@@ -586,7 +585,7 @@
 	if (err)
 		goto error_fault;
 
-	IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);		
+	IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
 	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
 		      dst_output);
 	if (err > 0)
@@ -600,7 +599,7 @@
 	err = -EFAULT;
 	kfree_skb(skb);
 error:
-	IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
+	IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
 	return err; 
 }
 
@@ -855,7 +854,8 @@
 	}
 done:
 	dst_release(dst);
-	release_sock(sk);
+	if (!inet->hdrincl)
+		release_sock(sk);
 out:	
 	fl6_sock_release(flowlabel);
 	return err<0?err:len;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index f39bbed..6f9a904 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -47,6 +47,7 @@
 #include <net/snmp.h>
 
 #include <net/ipv6.h>
+#include <net/ip6_route.h>
 #include <net/protocol.h>
 #include <net/transp_v6.h>
 #include <net/rawv6.h>
@@ -76,7 +77,7 @@
 	struct hlist_node	list;
 	struct list_head lru_list;		/* lru list member	*/
 
-	__u32			id;		/* fragment id		*/
+	__be32			id;		/* fragment id		*/
 	struct in6_addr		saddr;
 	struct in6_addr		daddr;
 
@@ -124,28 +125,28 @@
  * callers should be careful not to use the hash value outside the ipfrag_lock
  * as doing so could race with ipfrag_hash_rnd being recalculated.
  */
-static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
+static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
 			       struct in6_addr *daddr)
 {
 	u32 a, b, c;
 
-	a = saddr->s6_addr32[0];
-	b = saddr->s6_addr32[1];
-	c = saddr->s6_addr32[2];
+	a = (__force u32)saddr->s6_addr32[0];
+	b = (__force u32)saddr->s6_addr32[1];
+	c = (__force u32)saddr->s6_addr32[2];
 
 	a += JHASH_GOLDEN_RATIO;
 	b += JHASH_GOLDEN_RATIO;
 	c += ip6_frag_hash_rnd;
 	__jhash_mix(a, b, c);
 
-	a += saddr->s6_addr32[3];
-	b += daddr->s6_addr32[0];
-	c += daddr->s6_addr32[1];
+	a += (__force u32)saddr->s6_addr32[3];
+	b += (__force u32)daddr->s6_addr32[0];
+	c += (__force u32)daddr->s6_addr32[1];
 	__jhash_mix(a, b, c);
 
-	a += daddr->s6_addr32[2];
-	b += daddr->s6_addr32[3];
-	c += id;
+	a += (__force u32)daddr->s6_addr32[2];
+	b += (__force u32)daddr->s6_addr32[3];
+	c += (__force u32)id;
 	__jhash_mix(a, b, c);
 
 	return c & (IP6Q_HASHSZ - 1);
@@ -257,7 +258,7 @@
 	}
 }
 
-static void ip6_evictor(void)
+static void ip6_evictor(struct inet6_dev *idev)
 {
 	struct frag_queue *fq;
 	struct list_head *tmp;
@@ -284,14 +285,14 @@
 		spin_unlock(&fq->lock);
 
 		fq_put(fq, &work);
-		IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+		IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
 	}
 }
 
 static void ip6_frag_expire(unsigned long data)
 {
 	struct frag_queue *fq = (struct frag_queue *) data;
-	struct net_device *dev;
+	struct net_device *dev = NULL;
 
 	spin_lock(&fq->lock);
 
@@ -300,17 +301,19 @@
 
 	fq_kill(fq);
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
-	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+	dev = dev_get_by_index(fq->iif);
+	if (!dev)
+		goto out;
+
+	rcu_read_lock();
+	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+	rcu_read_unlock();
 
 	/* Don't send error if the first segment did not arrive. */
 	if (!(fq->last_in&FIRST_IN) || !fq->fragments)
 		goto out;
 
-	dev = dev_get_by_index(fq->iif);
-	if (!dev)
-		goto out;
-
 	/*
 	   But use as source device on which LAST ARRIVED
 	   segment was received. And do not use fq->dev
@@ -318,8 +321,9 @@
 	 */
 	fq->fragments->dev = dev;
 	icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
-	dev_put(dev);
 out:
+	if (dev)
+		dev_put(dev);
 	spin_unlock(&fq->lock);
 	fq_put(fq, NULL);
 }
@@ -366,7 +370,8 @@
 
 
 static struct frag_queue *
-ip6_frag_create(u32 id, struct in6_addr *src, struct in6_addr *dst)
+ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst,
+		struct inet6_dev *idev)
 {
 	struct frag_queue *fq;
 
@@ -386,12 +391,13 @@
 	return ip6_frag_intern(fq);
 
 oom:
-	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+	IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
 	return NULL;
 }
 
 static __inline__ struct frag_queue *
-fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
+fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
+	struct inet6_dev *idev)
 {
 	struct frag_queue *fq;
 	struct hlist_node *n;
@@ -410,7 +416,7 @@
 	}
 	read_unlock(&ip6_frag_lock);
 
-	return ip6_frag_create(id, src, dst);
+	return ip6_frag_create(id, src, dst, idev);
 }
 
 
@@ -428,7 +434,8 @@
 			((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
 
 	if ((unsigned int)end > IPV6_MAXPLEN) {
-		IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+				 IPSTATS_MIB_INHDRERRORS);
  		icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
  		return;
 	}
@@ -455,7 +462,8 @@
 			/* RFC2460 says always send parameter problem in
 			 * this case. -DaveM
 			 */
-			IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
+			IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
+					 IPSTATS_MIB_INHDRERRORS);
 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 
 					  offsetof(struct ipv6hdr, payload_len));
 			return;
@@ -571,7 +579,7 @@
 	return;
 
 err:
-	IP6_INC_STATS(IPSTATS_MIB_REASMFAILS);
+	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
 	kfree_skb(skb);
 }
 
@@ -665,7 +673,9 @@
 	if (head->ip_summed == CHECKSUM_COMPLETE)
 		head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+	rcu_read_lock();
+	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+	rcu_read_unlock();
 	fq->fragments = NULL;
 	return 1;
 
@@ -677,7 +687,9 @@
 	if (net_ratelimit())
 		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
 out_fail:
-	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+	rcu_read_lock();
+	IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+	rcu_read_unlock();
 	return -1;
 }
 
@@ -691,16 +703,16 @@
 
 	hdr = skb->nh.ipv6h;
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
+	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
 
 	/* Jumbo payload inhibits frag. header */
 	if (hdr->payload_len==0) {
-		IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
 		return -1;
 	}
 	if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
-		IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS);
+		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
 		return -1;
 	}
@@ -711,16 +723,17 @@
 	if (!(fhdr->frag_off & htons(0xFFF9))) {
 		/* It is not a fragmented frame */
 		skb->h.raw += sizeof(struct frag_hdr);
-		IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
+		IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
 
 		IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw;
 		return 1;
 	}
 
 	if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
-		ip6_evictor();
+		ip6_evictor(ip6_dst_idev(skb->dst));
 
-	if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) {
+	if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
+			  ip6_dst_idev(skb->dst))) != NULL) {
 		int ret = -1;
 
 		spin_lock(&fq->lock);
@@ -736,7 +749,7 @@
 		return ret;
 	}
 
-	IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
+	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
 	kfree_skb(skb);
 	return -1;
 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b39ae99..9f80518 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -440,7 +440,7 @@
 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
 		pref = ICMPV6_ROUTER_PREF_MEDIUM;
 
-	lifetime = htonl(rinfo->lifetime);
+	lifetime = ntohl(rinfo->lifetime);
 	if (lifetime == 0xffffffff) {
 		/* infinity */
 	} else if (lifetime > 0x7fffffff/HZ) {
@@ -711,12 +711,10 @@
 			.ip6_u = {
 				.daddr = iph->daddr,
 				.saddr = iph->saddr,
-#ifdef CONFIG_IPV6_ROUTE_FWMARK
-				.fwmark = skb->nfmark,
-#endif
-				.flowlabel = (* (u32 *) iph)&IPV6_FLOWINFO_MASK,
+				.flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
 			},
 		},
+ 		.mark = skb->mark,
 		.proto = iph->nexthdr,
 	};
 
@@ -942,7 +940,7 @@
 	fib6_force_start_gc();
 
 out:
-	return (struct dst_entry *)rt;
+	return &rt->u.dst;
 }
 
 int ndisc_dst_gc(int *more)
@@ -1225,7 +1223,7 @@
 	if (idev)
 		in6_dev_put(idev);
 	if (rt)
-		dst_free((struct dst_entry *) rt);
+		dst_free(&rt->u.dst);
 	return err;
 }
 
@@ -1751,9 +1749,9 @@
 {
 	int type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
 	if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED)
-		IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS);
+		IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS);
 
-	IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
+	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTNOROUTES);
 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
 	kfree_skb(skb);
 	return 0;
@@ -1824,7 +1822,7 @@
 		rt->rt6i_flags |= RTF_LOCAL;
 	rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
 	if (rt->rt6i_nexthop == NULL) {
-		dst_free((struct dst_entry *) rt);
+		dst_free(&rt->u.dst);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2008,6 +2006,20 @@
 	return ip6_route_add(&cfg);
 }
 
+static inline size_t rt6_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct rtmsg))
+	       + nla_total_size(16) /* RTA_SRC */
+	       + nla_total_size(16) /* RTA_DST */
+	       + nla_total_size(16) /* RTA_GATEWAY */
+	       + nla_total_size(16) /* RTA_PREFSRC */
+	       + nla_total_size(4) /* RTA_TABLE */
+	       + nla_total_size(4) /* RTA_IIF */
+	       + nla_total_size(4) /* RTA_OIF */
+	       + nla_total_size(4) /* RTA_PRIORITY */
+	       + nla_total_size(sizeof(struct rta_cacheinfo));
+}
+
 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
 			 struct in6_addr *dst, struct in6_addr *src,
 			 int iif, int type, u32 pid, u32 seq,
@@ -2015,7 +2027,7 @@
 {
 	struct rtmsg *rtm;
 	struct nlmsghdr *nlh;
-	struct rta_cacheinfo ci;
+	long expires;
 	u32 table;
 
 	if (prefix) {	/* user wants prefix routes only */
@@ -2089,18 +2101,11 @@
 		NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
 
 	NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
-	ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
-	if (rt->rt6i_expires)
-		ci.rta_expires = jiffies_to_clock_t(rt->rt6i_expires - jiffies);
-	else
-		ci.rta_expires = 0;
-	ci.rta_used = rt->u.dst.__use;
-	ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
-	ci.rta_error = rt->u.dst.error;
-	ci.rta_id = 0;
-	ci.rta_ts = 0;
-	ci.rta_tsage = 0;
-	NLA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+
+	expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0;
+	if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
+			       expires, rt->u.dst.error) < 0)
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -2202,7 +2207,6 @@
 	struct sk_buff *skb;
 	u32 pid = 0, seq = 0;
 	struct nlmsghdr *nlh = NULL;
-	int payload = sizeof(struct rtmsg) + 256;
 	int err = -ENOBUFS;
 
 	if (info) {
@@ -2212,15 +2216,13 @@
 			seq = nlh->nlmsg_seq;
 	}
 
-	skb = nlmsg_new(nlmsg_total_size(payload), gfp_any());
+	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
 	if (skb == NULL)
 		goto errout;
 
 	err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0);
-	if (err < 0) {
-		kfree_skb(skb);
-		goto errout;
-	}
+	/* failure implies BUG in rt6_nlmsg_size() */
+	BUG_ON(err < 0);
 
 	err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any());
 errout:
@@ -2248,7 +2250,6 @@
 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
 {
 	struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg;
-	int i;
 
 	if (arg->skip < arg->offset / RT6_INFO_LEN) {
 		arg->skip++;
@@ -2258,38 +2259,28 @@
 	if (arg->len >= arg->length)
 		return 0;
 
-	for (i=0; i<16; i++) {
-		sprintf(arg->buffer + arg->len, "%02x",
-			rt->rt6i_dst.addr.s6_addr[i]);
-		arg->len += 2;
-	}
-	arg->len += sprintf(arg->buffer + arg->len, " %02x ",
+	arg->len += sprintf(arg->buffer + arg->len,
+			    NIP6_SEQFMT " %02x ",
+			    NIP6(rt->rt6i_dst.addr),
 			    rt->rt6i_dst.plen);
 
 #ifdef CONFIG_IPV6_SUBTREES
-	for (i=0; i<16; i++) {
-		sprintf(arg->buffer + arg->len, "%02x",
-			rt->rt6i_src.addr.s6_addr[i]);
-		arg->len += 2;
-	}
-	arg->len += sprintf(arg->buffer + arg->len, " %02x ",
+	arg->len += sprintf(arg->buffer + arg->len,
+			    NIP6_SEQFMT " %02x ",
+			    NIP6(rt->rt6i_src.addr),
 			    rt->rt6i_src.plen);
 #else
-	sprintf(arg->buffer + arg->len,
-		"00000000000000000000000000000000 00 ");
-	arg->len += 36;
+	arg->len += sprintf(arg->buffer + arg->len,
+			    "00000000000000000000000000000000 00 ");
 #endif
 
 	if (rt->rt6i_nexthop) {
-		for (i=0; i<16; i++) {
-			sprintf(arg->buffer + arg->len, "%02x",
-				rt->rt6i_nexthop->primary_key[i]);
-			arg->len += 2;
-		}
+		arg->len += sprintf(arg->buffer + arg->len,
+				    NIP6_SEQFMT,
+				    NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
 	} else {
-		sprintf(arg->buffer + arg->len,
-			"00000000000000000000000000000000");
-		arg->len += 32;
+		arg->len += sprintf(arg->buffer + arg->len,
+				    "00000000000000000000000000000000");
 	}
 	arg->len += sprintf(arg->buffer + arg->len,
 			    " %08x %08x %08x %08x %8s\n",
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index be699f8..77b7b09 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -60,7 +60,7 @@
  */
 
 #define HASH_SIZE  16
-#define HASH(addr) ((addr^(addr>>4))&0xF)
+#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
 static int ipip6_fb_tunnel_init(struct net_device *dev);
 static int ipip6_tunnel_init(struct net_device *dev);
@@ -76,7 +76,7 @@
 
 static DEFINE_RWLOCK(ipip6_lock);
 
-static struct ip_tunnel * ipip6_tunnel_lookup(u32 remote, u32 local)
+static struct ip_tunnel * ipip6_tunnel_lookup(__be32 remote, __be32 local)
 {
 	unsigned h0 = HASH(remote);
 	unsigned h1 = HASH(local);
@@ -102,8 +102,8 @@
 
 static struct ip_tunnel ** ipip6_bucket(struct ip_tunnel *t)
 {
-	u32 remote = t->parms.iph.daddr;
-	u32 local = t->parms.iph.saddr;
+	__be32 remote = t->parms.iph.daddr;
+	__be32 local = t->parms.iph.saddr;
 	unsigned h = 0;
 	int prio = 0;
 
@@ -144,8 +144,8 @@
 
 static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create)
 {
-	u32 remote = parms->iph.daddr;
-	u32 local = parms->iph.saddr;
+	__be32 remote = parms->iph.daddr;
+	__be32 local = parms->iph.saddr;
 	struct ip_tunnel *t, **tp, *nt;
 	struct net_device *dev;
 	unsigned h = 0;
@@ -405,9 +405,9 @@
 /* Returns the embedded IPv4 address if the IPv6 address
    comes from 6to4 (RFC 3056) addr space */
 
-static inline u32 try_6to4(struct in6_addr *v6dst)
+static inline __be32 try_6to4(struct in6_addr *v6dst)
 {
-	u32 dst = 0;
+	__be32 dst = 0;
 
 	if (v6dst->s6_addr16[0] == htons(0x2002)) {
 	        /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
@@ -432,7 +432,7 @@
 	struct net_device *tdev;			/* Device to other host */
 	struct iphdr  *iph;			/* Our new IP header */
 	int    max_headroom;			/* The extra header space needed */
-	u32    dst = tiph->daddr;
+	__be32 dst = tiph->daddr;
 	int    mtu;
 	struct in6_addr *addr6;	
 	int addr_type;
@@ -809,7 +809,7 @@
 	}
 }
 
-void __exit sit_cleanup(void)
+static void __exit sit_cleanup(void)
 {
 	inet_del_protocol(&sit_protocol, IPPROTO_IPV6);
 
@@ -819,7 +819,7 @@
 	rtnl_unlock();
 }
 
-int __init sit_init(void)
+static int __init sit_init(void)
 {
 	int err;
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4c2a7c0..c25e930 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -66,10 +66,13 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
 /* Socket used for sending RSTs and ACKs */
 static struct socket *tcp6_socket;
 
-static void	tcp_v6_send_reset(struct sk_buff *skb);
+static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
 static void	tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
 static void	tcp_v6_send_check(struct sock *sk, int len, 
 				  struct sk_buff *skb);
@@ -78,6 +81,10 @@
 
 static struct inet_connection_sock_af_ops ipv6_mapped;
 static struct inet_connection_sock_af_ops ipv6_specific;
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
+static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
+#endif
 
 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
 {
@@ -98,27 +105,20 @@
 	}
 }
 
-static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
+static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
 				   struct in6_addr *saddr, 
 				   struct in6_addr *daddr, 
-				   unsigned long base)
+				   __wsum base)
 {
 	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
 }
 
-static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
+static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
 {
-	if (skb->protocol == htons(ETH_P_IPV6)) {
-		return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
-						    skb->nh.ipv6h->saddr.s6_addr32,
-						    skb->h.th->dest,
-						    skb->h.th->source);
-	} else {
-		return secure_tcp_sequence_number(skb->nh.iph->daddr,
-						  skb->nh.iph->saddr,
-						  skb->h.th->dest,
-						  skb->h.th->source);
-	}
+	return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
+					    skb->nh.ipv6h->saddr.s6_addr32,
+					    skb->h.th->dest,
+					    skb->h.th->source);
 }
 
 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 
@@ -215,6 +215,9 @@
 
 		icsk->icsk_af_ops = &ipv6_mapped;
 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
+#ifdef CONFIG_TCP_MD5SIG
+		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
+#endif
 
 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 
@@ -222,6 +225,9 @@
 			icsk->icsk_ext_hdr_len = exthdrlen;
 			icsk->icsk_af_ops = &ipv6_specific;
 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
+#ifdef CONFIG_TCP_MD5SIG
+			tp->af_specific = &tcp_sock_ipv6_specific;
+#endif
 			goto failure;
 		} else {
 			ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
@@ -310,7 +316,7 @@
 }
 
 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-		int type, int code, int offset, __u32 info)
+		int type, int code, int offset, __be32 info)
 {
 	struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
@@ -509,8 +515,7 @@
 
 		ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
 		err = ip6_xmit(sk, skb, &fl, opt, 0);
-		if (err == NET_XMIT_CN)
-			err = 0;
+		err = net_xmit_eval(err);
 	}
 
 done:
@@ -526,7 +531,396 @@
 		kfree_skb(inet6_rsk(req)->pktopts);
 }
 
-static struct request_sock_ops tcp6_request_sock_ops = {
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
+						   struct in6_addr *addr)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	int i;
+
+	BUG_ON(tp == NULL);
+
+	if (!tp->md5sig_info || !tp->md5sig_info->entries6)
+		return NULL;
+
+	for (i = 0; i < tp->md5sig_info->entries6; i++) {
+		if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
+			return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
+	}
+	return NULL;
+}
+
+static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
+						struct sock *addr_sk)
+{
+	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
+}
+
+static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
+						      struct request_sock *req)
+{
+	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
+}
+
+static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
+			     char *newkey, u8 newkeylen)
+{
+	/* Add key to the list */
+	struct tcp6_md5sig_key *key;
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcp6_md5sig_key *keys;
+
+	key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
+	if (key) {
+		/* modify existing entry - just update that one */
+		kfree(key->key);
+		key->key = newkey;
+		key->keylen = newkeylen;
+	} else {
+		/* reallocate new list if current one is full. */
+		if (!tp->md5sig_info) {
+			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
+			if (!tp->md5sig_info) {
+				kfree(newkey);
+				return -ENOMEM;
+			}
+		}
+		tcp_alloc_md5sig_pool();
+		if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
+			keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
+				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
+
+			if (!keys) {
+				tcp_free_md5sig_pool();
+				kfree(newkey);
+				return -ENOMEM;
+			}
+
+			if (tp->md5sig_info->entries6)
+				memmove(keys, tp->md5sig_info->keys6,
+					(sizeof (tp->md5sig_info->keys6[0]) *
+					 tp->md5sig_info->entries6));
+
+			kfree(tp->md5sig_info->keys6);
+			tp->md5sig_info->keys6 = keys;
+			tp->md5sig_info->alloced6++;
+		}
+
+		ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
+			       peer);
+		tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
+		tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
+
+		tp->md5sig_info->entries6++;
+	}
+	return 0;
+}
+
+static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
+			       u8 *newkey, __u8 newkeylen)
+{
+	return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
+				 newkey, newkeylen);
+}
+
+static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	int i;
+
+	for (i = 0; i < tp->md5sig_info->entries6; i++) {
+		if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
+			/* Free the key */
+			kfree(tp->md5sig_info->keys6[i].key);
+			tp->md5sig_info->entries6--;
+
+			if (tp->md5sig_info->entries6 == 0) {
+				kfree(tp->md5sig_info->keys6);
+				tp->md5sig_info->keys6 = NULL;
+
+				tcp_free_md5sig_pool();
+
+				return 0;
+			} else {
+				/* shrink the database */
+				if (tp->md5sig_info->entries6 != i)
+					memmove(&tp->md5sig_info->keys6[i],
+						&tp->md5sig_info->keys6[i+1],
+						(tp->md5sig_info->entries6 - i)
+						* sizeof (tp->md5sig_info->keys6[0]));
+			}
+		}
+	}
+	return -ENOENT;
+}
+
+static void tcp_v6_clear_md5_list (struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	int i;
+
+	if (tp->md5sig_info->entries6) {
+		for (i = 0; i < tp->md5sig_info->entries6; i++)
+			kfree(tp->md5sig_info->keys6[i].key);
+		tp->md5sig_info->entries6 = 0;
+		tcp_free_md5sig_pool();
+	}
+
+	kfree(tp->md5sig_info->keys6);
+	tp->md5sig_info->keys6 = NULL;
+	tp->md5sig_info->alloced6 = 0;
+
+	if (tp->md5sig_info->entries4) {
+		for (i = 0; i < tp->md5sig_info->entries4; i++)
+			kfree(tp->md5sig_info->keys4[i].key);
+		tp->md5sig_info->entries4 = 0;
+		tcp_free_md5sig_pool();
+	}
+
+	kfree(tp->md5sig_info->keys4);
+	tp->md5sig_info->keys4 = NULL;
+	tp->md5sig_info->alloced4 = 0;
+}
+
+static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
+				  int optlen)
+{
+	struct tcp_md5sig cmd;
+	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
+	u8 *newkey;
+
+	if (optlen < sizeof(cmd))
+		return -EINVAL;
+
+	if (copy_from_user(&cmd, optval, sizeof(cmd)))
+		return -EFAULT;
+
+	if (sin6->sin6_family != AF_INET6)
+		return -EINVAL;
+
+	if (!cmd.tcpm_keylen) {
+		if (!tcp_sk(sk)->md5sig_info)
+			return -ENOENT;
+		if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
+			return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
+		return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
+	}
+
+	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
+		return -EINVAL;
+
+	if (!tcp_sk(sk)->md5sig_info) {
+		struct tcp_sock *tp = tcp_sk(sk);
+		struct tcp_md5sig_info *p;
+
+		p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
+		if (!p)
+			return -ENOMEM;
+
+		tp->md5sig_info = p;
+	}
+
+	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
+	if (!newkey)
+		return -ENOMEM;
+	if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
+		return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
+					 newkey, cmd.tcpm_keylen);
+	}
+	return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
+}
+
+static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+				   struct in6_addr *saddr,
+				   struct in6_addr *daddr,
+				   struct tcphdr *th, int protocol,
+				   int tcplen)
+{
+	struct scatterlist sg[4];
+	__u16 data_len;
+	int block = 0;
+	__sum16 cksum;
+	struct tcp_md5sig_pool *hp;
+	struct tcp6_pseudohdr *bp;
+	struct hash_desc *desc;
+	int err;
+	unsigned int nbytes = 0;
+
+	hp = tcp_get_md5sig_pool();
+	if (!hp) {
+		printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
+		goto clear_hash_noput;
+	}
+	bp = &hp->md5_blk.ip6;
+	desc = &hp->md5_desc;
+
+	/* 1. TCP pseudo-header (RFC2460) */
+	ipv6_addr_copy(&bp->saddr, saddr);
+	ipv6_addr_copy(&bp->daddr, daddr);
+	bp->len = htonl(tcplen);
+	bp->protocol = htonl(protocol);
+
+	sg_set_buf(&sg[block++], bp, sizeof(*bp));
+	nbytes += sizeof(*bp);
+
+	/* 2. TCP header, excluding options */
+	cksum = th->check;
+	th->check = 0;
+	sg_set_buf(&sg[block++], th, sizeof(*th));
+	nbytes += sizeof(*th);
+
+	/* 3. TCP segment data (if any) */
+	data_len = tcplen - (th->doff << 2);
+	if (data_len > 0) {
+		u8 *data = (u8 *)th + (th->doff << 2);
+		sg_set_buf(&sg[block++], data, data_len);
+		nbytes += data_len;
+	}
+
+	/* 4. shared key */
+	sg_set_buf(&sg[block++], key->key, key->keylen);
+	nbytes += key->keylen;
+
+	/* Now store the hash into the packet */
+	err = crypto_hash_init(desc);
+	if (err) {
+		printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
+		goto clear_hash;
+	}
+	err = crypto_hash_update(desc, sg, nbytes);
+	if (err) {
+		printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
+		goto clear_hash;
+	}
+	err = crypto_hash_final(desc, md5_hash);
+	if (err) {
+		printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
+		goto clear_hash;
+	}
+
+	/* Reset header, and free up the crypto */
+	tcp_put_md5sig_pool();
+	th->check = cksum;
+out:
+	return 0;
+clear_hash:
+	tcp_put_md5sig_pool();
+clear_hash_noput:
+	memset(md5_hash, 0, 16);
+	goto out;
+}
+
+static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+				struct sock *sk,
+				struct dst_entry *dst,
+				struct request_sock *req,
+				struct tcphdr *th, int protocol,
+				int tcplen)
+{
+	struct in6_addr *saddr, *daddr;
+
+	if (sk) {
+		saddr = &inet6_sk(sk)->saddr;
+		daddr = &inet6_sk(sk)->daddr;
+	} else {
+		saddr = &inet6_rsk(req)->loc_addr;
+		daddr = &inet6_rsk(req)->rmt_addr;
+	}
+	return tcp_v6_do_calc_md5_hash(md5_hash, key,
+				       saddr, daddr,
+				       th, protocol, tcplen);
+}
+
+static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
+{
+	__u8 *hash_location = NULL;
+	struct tcp_md5sig_key *hash_expected;
+	struct ipv6hdr *ip6h = skb->nh.ipv6h;
+	struct tcphdr *th = skb->h.th;
+	int length = (th->doff << 2) - sizeof (*th);
+	int genhash;
+	u8 *ptr;
+	u8 newhash[16];
+
+	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
+
+	/* If the TCP option is too short, we can short cut */
+	if (length < TCPOLEN_MD5SIG)
+		return hash_expected ? 1 : 0;
+
+	/* parse options */
+	ptr = (u8*)(th + 1);
+	while (length > 0) {
+		int opcode = *ptr++;
+		int opsize;
+
+		switch(opcode) {
+		case TCPOPT_EOL:
+			goto done_opts;
+		case TCPOPT_NOP:
+			length--;
+			continue;
+		default:
+			opsize = *ptr++;
+			if (opsize < 2 || opsize > length)
+				goto done_opts;
+			if (opcode == TCPOPT_MD5SIG) {
+				hash_location = ptr;
+				goto done_opts;
+			}
+		}
+		ptr += opsize - 2;
+		length -= opsize;
+	}
+
+done_opts:
+	/* do we have a hash as expected? */
+	if (!hash_expected) {
+		if (!hash_location)
+			return 0;
+		if (net_ratelimit()) {
+			printk(KERN_INFO "MD5 Hash NOT expected but found "
+			       "(" NIP6_FMT ", %u)->"
+			       "(" NIP6_FMT ", %u)\n",
+			       NIP6(ip6h->saddr), ntohs(th->source),
+			       NIP6(ip6h->daddr), ntohs(th->dest));
+		}
+		return 1;
+	}
+
+	if (!hash_location) {
+		if (net_ratelimit()) {
+			printk(KERN_INFO "MD5 Hash expected but NOT found "
+			       "(" NIP6_FMT ", %u)->"
+			       "(" NIP6_FMT ", %u)\n",
+			       NIP6(ip6h->saddr), ntohs(th->source),
+			       NIP6(ip6h->daddr), ntohs(th->dest));
+		}
+		return 1;
+	}
+
+	/* check the signature */
+	genhash = tcp_v6_do_calc_md5_hash(newhash,
+					  hash_expected,
+					  &ip6h->saddr, &ip6h->daddr,
+					  th, sk->sk_protocol,
+					  skb->len);
+	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+		if (net_ratelimit()) {
+			printk(KERN_INFO "MD5 Hash %s for "
+			       "(" NIP6_FMT ", %u)->"
+			       "(" NIP6_FMT ", %u)\n",
+			       genhash ? "failed" : "mismatch",
+			       NIP6(ip6h->saddr), ntohs(th->source),
+			       NIP6(ip6h->daddr), ntohs(th->dest));
+		}
+		return 1;
+	}
+	return 0;
+}
+#endif
+
+static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 	.family		=	AF_INET6,
 	.obj_size	=	sizeof(struct tcp6_request_sock),
 	.rtx_syn_ack	=	tcp_v6_send_synack,
@@ -535,9 +929,16 @@
 	.send_reset	=	tcp_v6_send_reset
 };
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
+};
+#endif
+
 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
 	.twsk_unique	= tcp_twsk_unique,
+	.twsk_destructor= tcp_twsk_destructor,
 };
 
 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
@@ -547,7 +948,7 @@
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
-		skb->csum = offsetof(struct tcphdr, check);
+		skb->csum_offset = offsetof(struct tcphdr, check);
 	} else {
 		th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 
 					    csum_partial((char *)th, th->doff<<2, 
@@ -569,16 +970,20 @@
 	th->check = 0;
 	th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
 				     IPPROTO_TCP, 0);
-	skb->csum = offsetof(struct tcphdr, check);
+	skb->csum_offset = offsetof(struct tcphdr, check);
 	skb->ip_summed = CHECKSUM_PARTIAL;
 	return 0;
 }
 
-static void tcp_v6_send_reset(struct sk_buff *skb)
+static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcphdr *th = skb->h.th, *t1; 
 	struct sk_buff *buff;
 	struct flowi fl;
+	int tot_len = sizeof(*th);
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *key;
+#endif
 
 	if (th->rst)
 		return;
@@ -586,25 +991,35 @@
 	if (!ipv6_unicast_destination(skb))
 		return; 
 
+#ifdef CONFIG_TCP_MD5SIG
+	if (sk)
+		key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
+	else
+		key = NULL;
+
+	if (key)
+		tot_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
 	/*
 	 * We need to grab some memory, and put together an RST,
 	 * and then put it into the queue to be sent.
 	 */
 
-	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
+	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 			 GFP_ATOMIC);
 	if (buff == NULL) 
 	  	return;
 
-	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
+	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 
-	t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
+	t1 = (struct tcphdr *) skb_push(buff, tot_len);
 
 	/* Swap the send and the receive. */
 	memset(t1, 0, sizeof(*t1));
 	t1->dest = th->source;
 	t1->source = th->dest;
-	t1->doff = sizeof(*t1)/4;
+	t1->doff = tot_len / 4;
 	t1->rst = 1;
   
 	if(th->ack) {
@@ -615,6 +1030,22 @@
 				    + skb->len - (th->doff<<2));
 	}
 
+#ifdef CONFIG_TCP_MD5SIG
+	if (key) {
+		__be32 *opt = (__be32*)(t1 + 1);
+		opt[0] = htonl((TCPOPT_NOP << 24) |
+			       (TCPOPT_NOP << 16) |
+			       (TCPOPT_MD5SIG << 8) |
+			       TCPOLEN_MD5SIG);
+		tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
+					key,
+					&skb->nh.ipv6h->daddr,
+					&skb->nh.ipv6h->saddr,
+					t1, IPPROTO_TCP,
+					tot_len);
+	}
+#endif
+
 	buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
 
 	memset(&fl, 0, sizeof(fl));
@@ -645,15 +1076,37 @@
 	kfree_skb(buff);
 }
 
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
+static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
+			    struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
 {
 	struct tcphdr *th = skb->h.th, *t1;
 	struct sk_buff *buff;
 	struct flowi fl;
 	int tot_len = sizeof(struct tcphdr);
+	__be32 *topt;
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *key;
+	struct tcp_md5sig_key tw_key;
+#endif
+
+#ifdef CONFIG_TCP_MD5SIG
+	if (!tw && skb->sk) {
+		key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
+	} else if (tw && tw->tw_md5_keylen) {
+		tw_key.key = tw->tw_md5_key;
+		tw_key.keylen = tw->tw_md5_keylen;
+		key = &tw_key;
+	} else {
+		key = NULL;
+	}
+#endif
 
 	if (ts)
 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
+#ifdef CONFIG_TCP_MD5SIG
+	if (key)
+		tot_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
 
 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 			 GFP_ATOMIC);
@@ -673,15 +1126,29 @@
 	t1->ack_seq = htonl(ack);
 	t1->ack = 1;
 	t1->window = htons(win);
+
+	topt = (__be32 *)(t1 + 1);
 	
 	if (ts) {
-		u32 *ptr = (u32*)(t1 + 1);
-		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
-			       (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
-		*ptr++ = htonl(tcp_time_stamp);
-		*ptr = htonl(ts);
+		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+		*topt++ = htonl(tcp_time_stamp);
+		*topt = htonl(ts);
 	}
 
+#ifdef CONFIG_TCP_MD5SIG
+	if (key) {
+		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
+		tcp_v6_do_calc_md5_hash((__u8 *)topt,
+					key,
+					&skb->nh.ipv6h->daddr,
+					&skb->nh.ipv6h->saddr,
+					t1, IPPROTO_TCP,
+					tot_len);
+	}
+#endif
+
 	buff->csum = csum_partial((char *)t1, tot_len, 0);
 
 	memset(&fl, 0, sizeof(fl));
@@ -712,9 +1179,9 @@
 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 {
 	struct inet_timewait_sock *tw = inet_twsk(sk);
-	const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+	tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 			tcptw->tw_ts_recent);
 
@@ -723,7 +1190,7 @@
 
 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
 {
-	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
+	tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
 }
 
 
@@ -794,6 +1261,10 @@
 	if (req == NULL)
 		goto drop;
 
+#ifdef CONFIG_TCP_MD5SIG
+	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
+#endif
+
 	tcp_clear_options(&tmp_opt);
 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 	tmp_opt.user_mss = tp->rx_opt.user_mss;
@@ -822,7 +1293,7 @@
 		treq->iif = inet6_iif(skb);
 
 	if (isn == 0) 
-		isn = tcp_v6_init_sequence(sk,skb);
+		isn = tcp_v6_init_sequence(skb);
 
 	tcp_rsk(req)->snt_isn = isn;
 
@@ -852,6 +1323,9 @@
 	struct tcp_sock *newtp;
 	struct sock *newsk;
 	struct ipv6_txoptions *opt;
+#ifdef CONFIG_TCP_MD5SIG
+	struct tcp_md5sig_key *key;
+#endif
 
 	if (skb->protocol == htons(ETH_P_IP)) {
 		/*
@@ -882,6 +1356,10 @@
 
 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
+#ifdef CONFIG_TCP_MD5SIG
+		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
+#endif
+
 		newnp->pktoptions  = NULL;
 		newnp->opt	   = NULL;
 		newnp->mcast_oif   = inet6_iif(skb);
@@ -1016,6 +1494,21 @@
 
 	newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
 
+#ifdef CONFIG_TCP_MD5SIG
+	/* Copy over the MD5 key from the original socket */
+	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
+		/* We're using one, so create a matching key
+		 * on the newsk structure. If we fail to get
+		 * memory, then we end up not copying the key
+		 * across. Shucks.
+		 */
+		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
+		if (newkey != NULL)
+			tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
+					  newkey, key->keylen);
+	}
+#endif
+
 	__inet6_hash(&tcp_hashinfo, newsk);
 	inet_inherit_port(&tcp_hashinfo, sk, newsk);
 
@@ -1031,7 +1524,7 @@
 	return NULL;
 }
 
-static int tcp_v6_checksum_init(struct sk_buff *skb)
+static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
 {
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 		if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
@@ -1041,8 +1534,8 @@
 		}
 	}
 
-	skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
-				  &skb->nh.ipv6h->daddr, 0);
+	skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
+				  &skb->nh.ipv6h->daddr, 0));
 
 	if (skb->len <= 76) {
 		return __skb_checksum_complete(skb);
@@ -1075,6 +1568,11 @@
 	if (skb->protocol == htons(ETH_P_IP))
 		return tcp_v4_do_rcv(sk, skb);
 
+#ifdef CONFIG_TCP_MD5SIG
+	if (tcp_v6_inbound_md5_hash (sk, skb))
+		goto discard;
+#endif
+
 	if (sk_filter(sk, skb))
 		goto discard;
 
@@ -1140,7 +1638,7 @@
 	return 0;
 
 reset:
-	tcp_v6_send_reset(skb);
+	tcp_v6_send_reset(sk, skb);
 discard:
 	if (opt_skb)
 		__kfree_skb(opt_skb);
@@ -1265,7 +1763,7 @@
 bad_packet:
 		TCP_INC_STATS_BH(TCP_MIB_INERRS);
 	} else {
-		tcp_v6_send_reset(skb);
+		tcp_v6_send_reset(NULL, skb);
 	}
 
 discard_it:
@@ -1344,6 +1842,15 @@
 #endif
 };
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
+	.md5_lookup	=	tcp_v6_md5_lookup,
+	.calc_md5_hash	=	tcp_v6_calc_md5_hash,
+	.md5_add	=	tcp_v6_md5_add_func,
+	.md5_parse	=	tcp_v6_parse_md5_keys,
+};
+#endif
+
 /*
  *	TCP over IPv4 via INET6 API
  */
@@ -1366,6 +1873,15 @@
 #endif
 };
 
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
+	.md5_lookup	=	tcp_v4_md5_lookup,
+	.calc_md5_hash	=	tcp_v4_calc_md5_hash,
+	.md5_add	=	tcp_v6_md5_add_func,
+	.md5_parse	=	tcp_v6_parse_md5_keys,
+};
+#endif
+
 /* NOTE: A lot of things set to zero explicitly by call to
  *       sk_alloc() so need not be done here.
  */
@@ -1405,6 +1921,10 @@
 	sk->sk_write_space = sk_stream_write_space;
 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
+#ifdef CONFIG_TCP_MD5SIG
+	tp->af_specific = &tcp_sock_ipv6_specific;
+#endif
+
 	sk->sk_sndbuf = sysctl_tcp_wmem[1];
 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
@@ -1415,6 +1935,11 @@
 
 static int tcp_v6_destroy_sock(struct sock *sk)
 {
+#ifdef CONFIG_TCP_MD5SIG
+	/* Clean up the MD5 key list */
+	if (tcp_sk(sk)->md5sig_info)
+		tcp_v6_clear_md5_list(sk);
+#endif
 	tcp_v4_destroy_sock(sk);
 	return inet6_destroy_sock(sk);
 }
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 0ef9a35..918d07d 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -104,7 +104,7 @@
 }
 
 static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-			int type, int code, int offset, __u32 info)
+			int type, int code, int offset, __be32 info)
 {
 	struct xfrm6_tunnel *handler;
 
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c83f23e..f52a5c3 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -38,26 +38,18 @@
 #include <linux/skbuff.h>
 #include <asm/uaccess.h>
 
-#include <net/sock.h>
-#include <net/snmp.h>
-
-#include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/protocol.h>
 #include <net/transp_v6.h>
 #include <net/ip6_route.h>
-#include <net/addrconf.h>
-#include <net/ip.h>
-#include <net/udp.h>
 #include <net/raw.h>
-#include <net/inet_common.h>
 #include <net/tcp_states.h>
-
 #include <net/ip6_checksum.h>
 #include <net/xfrm.h>
 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include "udp_impl.h"
 
 DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
 
@@ -66,23 +58,9 @@
 	return udp_get_port(sk, snum, ipv6_rcv_saddr_equal);
 }
 
-static void udp_v6_hash(struct sock *sk)
-{
-	BUG();
-}
-
-static void udp_v6_unhash(struct sock *sk)
-{
- 	write_lock_bh(&udp_hash_lock);
-	if (sk_del_node_init(sk)) {
-		inet_sk(sk)->num = 0;
-		sock_prot_dec_use(sk->sk_prot);
-	}
-	write_unlock_bh(&udp_hash_lock);
-}
-
-static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport,
-				  struct in6_addr *daddr, u16 dport, int dif)
+static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
+				      struct in6_addr *daddr, __be16 dport,
+				      int dif, struct hlist_head udptable[])
 {
 	struct sock *sk, *result = NULL;
 	struct hlist_node *node;
@@ -90,7 +68,7 @@
 	int badness = -1;
 
  	read_lock(&udp_hash_lock);
-	sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) {
+	sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
 		struct inet_sock *inet = inet_sk(sk);
 
 		if (inet->num == hnum && sk->sk_family == PF_INET6) {
@@ -132,20 +110,11 @@
 }
 
 /*
- *
- */
-
-static void udpv6_close(struct sock *sk, long timeout)
-{
-	sk_common_release(sk);
-}
-
-/*
  * 	This should be easy, if there is something there we
  * 	return it, otherwise we block.
  */
 
-static int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, 
+int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
 		  struct msghdr *msg, size_t len,
 		  int noblock, int flags, int *addr_len)
 {
@@ -153,7 +122,7 @@
 	struct inet_sock *inet = inet_sk(sk);
   	struct sk_buff *skb;
 	size_t copied;
-  	int err;
+	int err, copy_only, is_udplite = IS_UDPLITE(sk);
 
   	if (addr_len)
   		*addr_len=sizeof(struct sockaddr_in6);
@@ -172,15 +141,21 @@
   		msg->msg_flags |= MSG_TRUNC;
   	}
 
-	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
-		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
-					      copied);
-	} else if (msg->msg_flags&MSG_TRUNC) {
-		if (__skb_checksum_complete(skb))
+	/*
+	 * 	Decide whether to checksum and/or copy data.
+	 */
+	copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY);
+
+	if (is_udplite  ||  (!copy_only  &&  msg->msg_flags&MSG_TRUNC)) {
+		if (__udp_lib_checksum_complete(skb))
 			goto csum_copy_err;
-		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
-					      copied);
-	} else {
+		copy_only = 1;
+	}
+
+	if (copy_only)
+		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
+					      msg->msg_iov, copied       );
+	else {
 		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
 		if (err == -EINVAL)
 			goto csum_copy_err;
@@ -231,14 +206,15 @@
 	skb_kill_datagram(sk, skb, flags);
 
 	if (flags & MSG_DONTWAIT) {
-		UDP6_INC_STATS_USER(UDP_MIB_INERRORS);
+		UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
 		return -EAGAIN;
 	}
 	goto try_again;
 }
 
-static void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-	       int type, int code, int offset, __u32 info)
+void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+		    int type, int code, int offset, __be32 info,
+		    struct hlist_head udptable[]                    )
 {
 	struct ipv6_pinfo *np;
 	struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
@@ -248,8 +224,8 @@
 	struct sock *sk;
 	int err;
 
-	sk = udp_v6_lookup(daddr, uh->dest, saddr, uh->source, inet6_iif(skb));
-   
+	sk = __udp6_lib_lookup(daddr, uh->dest,
+			       saddr, uh->source, inet6_iif(skb), udptable);
 	if (sk == NULL)
 		return;
 
@@ -270,36 +246,60 @@
 	sock_put(sk);
 }
 
-static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
+static __inline__ void udpv6_err(struct sk_buff *skb,
+				 struct inet6_skb_parm *opt, int type,
+				 int code, int offset, __be32 info     )
 {
+	return __udp6_lib_err(skb, opt, type, code, offset, info, udp_hash);
+}
+
+int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
+{
+	struct udp_sock *up = udp_sk(sk);
 	int rc;
 
-	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
-		kfree_skb(skb);
-		return -1;
+	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+		goto drop;
+
+	/*
+	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
+	 */
+	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
+
+		if (up->pcrlen == 0) {          /* full coverage was set  */
+			LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
+				" %d while full coverage %d requested\n",
+				UDP_SKB_CB(skb)->cscov, skb->len);
+			goto drop;
+		}
+		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
+			LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d "
+						    "too small, need min %d\n",
+				       UDP_SKB_CB(skb)->cscov, up->pcrlen);
+			goto drop;
+		}
 	}
 
-	if (skb_checksum_complete(skb)) {
-		UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
-		kfree_skb(skb);
-		return 0;
-	}
+	if (udp_lib_checksum_complete(skb))
+		goto drop;
 
 	if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
 		/* Note that an ENOMEM error is charged twice */
 		if (rc == -ENOMEM)
-			UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS);
-		UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
-		kfree_skb(skb);
-		return 0;
+			UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag);
+		goto drop;
 	}
-	UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS);
+	UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
 	return 0;
+drop:
+	UDP6_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
+	kfree_skb(skb);
+	return -1;
 }
 
 static struct sock *udp_v6_mcast_next(struct sock *sk,
-				      u16 loc_port, struct in6_addr *loc_addr,
-				      u16 rmt_port, struct in6_addr *rmt_addr,
+				      __be16 loc_port, struct in6_addr *loc_addr,
+				      __be16 rmt_port, struct in6_addr *rmt_addr,
 				      int dif)
 {
 	struct hlist_node *node;
@@ -338,15 +338,15 @@
  * Note: called only from the BH handler context,
  * so we don't need to lock the hashes.
  */
-static void udpv6_mcast_deliver(struct udphdr *uh,
-				struct in6_addr *saddr, struct in6_addr *daddr,
-				struct sk_buff *skb)
+static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr,
+		           struct in6_addr *daddr, struct hlist_head udptable[])
 {
 	struct sock *sk, *sk2;
+	const struct udphdr *uh = skb->h.uh;
 	int dif;
 
 	read_lock(&udp_hash_lock);
-	sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
+	sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
 	dif = inet6_iif(skb);
 	sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
 	if (!sk) {
@@ -364,9 +364,35 @@
 	udpv6_queue_rcv_skb(sk, skb);
 out:
 	read_unlock(&udp_hash_lock);
+	return 0;
 }
 
-static int udpv6_rcv(struct sk_buff **pskb)
+static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh)
+
+{
+	if (uh->check == 0) {
+		/* RFC 2460 section 8.1 says that we SHOULD log
+		   this error. Well, it is reasonable.
+		 */
+		LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
+		return 1;
+	}
+	if (skb->ip_summed == CHECKSUM_COMPLETE &&
+	    !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
+		    	     skb->len, IPPROTO_UDP, skb->csum             ))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+		skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+							 &skb->nh.ipv6h->daddr,
+							 skb->len, IPPROTO_UDP,
+							 0));
+
+	return (UDP_SKB_CB(skb)->partial_cov = 0);
+}
+
+int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[],
+		   int is_udplite)
 {
 	struct sk_buff *skb = *pskb;
 	struct sock *sk;
@@ -383,44 +409,39 @@
 	uh = skb->h.uh;
 
 	ulen = ntohs(uh->len);
-
-	/* Check for jumbo payload */
-	if (ulen == 0)
-		ulen = skb->len;
-
-	if (ulen > skb->len || ulen < sizeof(*uh))
+	if (ulen > skb->len)
 		goto short_packet;
 
-	if (uh->check == 0) {
-		/* RFC 2460 section 8.1 says that we SHOULD log
-		   this error. Well, it is reasonable.
-		 */
-		LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
-		goto discard;
-	}
+	if(! is_udplite ) {		/* UDP validates ulen. */
 
-	if (ulen < skb->len) {
-		if (pskb_trim_rcsum(skb, ulen))
+		/* Check for jumbo payload */
+		if (ulen == 0)
+			ulen = skb->len;
+
+		if (ulen < sizeof(*uh))
+			goto short_packet;
+
+		if (ulen < skb->len) {
+			if (pskb_trim_rcsum(skb, ulen))
+				goto short_packet;
+			saddr = &skb->nh.ipv6h->saddr;
+			daddr = &skb->nh.ipv6h->daddr;
+			uh = skb->h.uh;
+		}
+
+		if (udp6_csum_init(skb, uh))
 			goto discard;
-		saddr = &skb->nh.ipv6h->saddr;
-		daddr = &skb->nh.ipv6h->daddr;
-		uh = skb->h.uh;
+
+	} else 	{			/* UDP-Lite validates cscov. */
+		if (udplite6_csum_init(skb, uh))
+			goto discard;
 	}
 
-	if (skb->ip_summed == CHECKSUM_COMPLETE &&
-	    !csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum))
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-		skb->csum = ~csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, 0);
-
 	/* 
 	 *	Multicast receive code 
 	 */
-	if (ipv6_addr_is_multicast(daddr)) {
-		udpv6_mcast_deliver(uh, saddr, daddr, skb);
-		return 0;
-	}
+	if (ipv6_addr_is_multicast(daddr))
+		return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable);
 
 	/* Unicast */
 
@@ -428,15 +449,16 @@
 	 * check socket cache ... must talk to Alan about his plans
 	 * for sock caches... i'll skip this for now.
 	 */
-	sk = udp_v6_lookup(saddr, uh->source, daddr, uh->dest, inet6_iif(skb));
+	sk = __udp6_lib_lookup(saddr, uh->source,
+			       daddr, uh->dest, inet6_iif(skb), udptable);
 
 	if (sk == NULL) {
 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 			goto discard;
 
-		if (skb_checksum_complete(skb))
+		if (udp_lib_checksum_complete(skb))
 			goto discard;
-		UDP6_INC_STATS_BH(UDP_MIB_NOPORTS);
+		UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite);
 
 		icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
 
@@ -451,14 +473,20 @@
 	return(0);
 
 short_packet:	
-	if (net_ratelimit())
-		printk(KERN_DEBUG "UDP: short packet: %d/%u\n", ulen, skb->len);
+	LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
+		       is_udplite? "-Lite" : "",  ulen, skb->len);
 
 discard:
-	UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+	UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
 	kfree_skb(skb);
 	return(0);	
 }
+
+static __inline__ int udpv6_rcv(struct sk_buff **pskb)
+{
+	return __udp6_lib_rcv(pskb, udp_hash, 0);
+}
+
 /*
  * Throw away all pending data and cancel the corking. Socket is locked.
  */
@@ -477,13 +505,15 @@
  *	Sending
  */
 
-static int udp_v6_push_pending_frames(struct sock *sk, struct udp_sock *up)
+static int udp_v6_push_pending_frames(struct sock *sk)
 {
 	struct sk_buff *skb;
 	struct udphdr *uh;
+	struct udp_sock  *up = udp_sk(sk);
 	struct inet_sock *inet = inet_sk(sk);
 	struct flowi *fl = &inet->cork.fl;
 	int err = 0;
+	__wsum csum = 0;
 
 	/* Grab the skbuff where UDP header space exists. */
 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
@@ -498,35 +528,17 @@
 	uh->len = htons(up->len);
 	uh->check = 0;
 
-	if (sk->sk_no_check == UDP_CSUM_NOXMIT) {
-		skb->ip_summed = CHECKSUM_NONE;
-		goto send;
-	}
+	if (up->pcflag)
+		csum = udplite_csum_outgoing(sk, skb);
+	 else
+		csum = udp_csum_outgoing(sk, skb);
 
-	if (skb_queue_len(&sk->sk_write_queue) == 1) {
-		skb->csum = csum_partial((char *)uh,
-				sizeof(struct udphdr), skb->csum);
-		uh->check = csum_ipv6_magic(&fl->fl6_src,
-					    &fl->fl6_dst,
-					    up->len, fl->proto, skb->csum);
-	} else {
-		u32 tmp_csum = 0;
-
-		skb_queue_walk(&sk->sk_write_queue, skb) {
-			tmp_csum = csum_add(tmp_csum, skb->csum);
-		}
-		tmp_csum = csum_partial((char *)uh,
-				sizeof(struct udphdr), tmp_csum);
-                tmp_csum = csum_ipv6_magic(&fl->fl6_src,
-					   &fl->fl6_dst,
-					   up->len, fl->proto, tmp_csum);
-                uh->check = tmp_csum;
-
-	}
+	/* add protocol-dependent pseudo-header */
+	uh->check = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst,
+				    up->len, fl->proto, csum   );
 	if (uh->check == 0)
-		uh->check = -1;
+		uh->check = CSUM_MANGLED_0;
 
-send:
 	err = ip6_push_pending_frames(sk);
 out:
 	up->len = 0;
@@ -534,7 +546,7 @@
 	return err;
 }
 
-static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, 
+int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
 		  struct msghdr *msg, size_t len)
 {
 	struct ipv6_txoptions opt_space;
@@ -554,6 +566,8 @@
 	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
 	int err;
 	int connected = 0;
+	int is_udplite = up->pcflag;
+	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
 
 	/* destination address check */
 	if (sin6) {
@@ -694,7 +708,7 @@
 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
 	opt = ipv6_fixup_options(&opt_space, opt);
 
-	fl.proto = IPPROTO_UDP;
+	fl.proto = sk->sk_protocol;
 	ipv6_addr_copy(&fl.fl6_dst, daddr);
 	if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
 		ipv6_addr_copy(&fl.fl6_src, &np->saddr);
@@ -761,14 +775,15 @@
 
 do_append_data:
 	up->len += ulen;
-	err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
+	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
+	err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
 		sizeof(struct udphdr), hlimit, tclass, opt, &fl,
 		(struct rt6_info*)dst,
 		corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
 	if (err)
 		udp_v6_flush_pending_frames(sk);
 	else if (!corkreq)
-		err = udp_v6_push_pending_frames(sk, up);
+		err = udp_v6_push_pending_frames(sk);
 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
 		up->pending = 0;
 
@@ -793,7 +808,7 @@
 out:
 	fl6_sock_release(flowlabel);
 	if (!err) {
-		UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS);
+		UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
 		return len;
 	}
 	/*
@@ -804,7 +819,7 @@
 	 * seems like overkill.
 	 */
 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
-		UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS);
+		UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
 	}
 	return err;
 
@@ -816,7 +831,7 @@
 	goto out;
 }
 
-static int udpv6_destroy_sock(struct sock *sk)
+int udpv6_destroy_sock(struct sock *sk)
 {
 	lock_sock(sk);
 	udp_v6_flush_pending_frames(sk);
@@ -830,119 +845,41 @@
 /*
  *	Socket option code for UDP
  */
-static int do_udpv6_setsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int optlen)
+int udpv6_setsockopt(struct sock *sk, int level, int optname,
+		     char __user *optval, int optlen)
 {
-	struct udp_sock *up = udp_sk(sk);
-	int val;
-	int err = 0;
-
-	if(optlen<sizeof(int))
-		return -EINVAL;
-
-	if (get_user(val, (int __user *)optval))
-		return -EFAULT;
-
-	switch(optname) {
-	case UDP_CORK:
-		if (val != 0) {
-			up->corkflag = 1;
-		} else {
-			up->corkflag = 0;
-			lock_sock(sk);
-			udp_v6_push_pending_frames(sk, up);
-			release_sock(sk);
-		}
-		break;
-		
-	case UDP_ENCAP:
-		switch (val) {
-		case 0:
-			up->encap_type = val;
-			break;
-		default:
-			err = -ENOPROTOOPT;
-			break;
-		}
-		break;
-
-	default:
-		err = -ENOPROTOOPT;
-		break;
-	};
-
-	return err;
-}
-
-static int udpv6_setsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int optlen)
-{
-	if (level != SOL_UDP)
-		return ipv6_setsockopt(sk, level, optname, optval, optlen);
-	return do_udpv6_setsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
+					  udp_v6_push_pending_frames);
+	return ipv6_setsockopt(sk, level, optname, optval, optlen);
 }
 
 #ifdef CONFIG_COMPAT
-static int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-				   char __user *optval, int optlen)
+int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
+			    char __user *optval, int optlen)
 {
-	if (level != SOL_UDP)
-		return compat_ipv6_setsockopt(sk, level, optname,
-					      optval, optlen);
-	return do_udpv6_setsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
+					  udp_v6_push_pending_frames);
+	return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
 }
 #endif
 
-static int do_udpv6_getsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int __user *optlen)
+int udpv6_getsockopt(struct sock *sk, int level, int optname,
+		     char __user *optval, int __user *optlen)
 {
-	struct udp_sock *up = udp_sk(sk);
-	int val, len;
-
-	if(get_user(len,optlen))
-		return -EFAULT;
-
-	len = min_t(unsigned int, len, sizeof(int));
-	
-	if(len < 0)
-		return -EINVAL;
-
-	switch(optname) {
-	case UDP_CORK:
-		val = up->corkflag;
-		break;
-
-	case UDP_ENCAP:
-		val = up->encap_type;
-		break;
-
-	default:
-		return -ENOPROTOOPT;
-	};
-
-  	if(put_user(len, optlen))
-  		return -EFAULT;
-	if(copy_to_user(optval, &val,len))
-		return -EFAULT;
-  	return 0;
-}
-
-static int udpv6_getsockopt(struct sock *sk, int level, int optname,
-			  char __user *optval, int __user *optlen)
-{
-	if (level != SOL_UDP)
-		return ipv6_getsockopt(sk, level, optname, optval, optlen);
-	return do_udpv6_getsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
+	return ipv6_getsockopt(sk, level, optname, optval, optlen);
 }
 
 #ifdef CONFIG_COMPAT
-static int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
-				   char __user *optval, int __user *optlen)
+int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
+			    char __user *optval, int __user *optlen)
 {
-	if (level != SOL_UDP)
-		return compat_ipv6_getsockopt(sk, level, optname,
-					      optval, optlen);
-	return do_udpv6_getsockopt(sk, level, optname, optval, optlen);
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
+	return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
 }
 #endif
 
@@ -983,7 +920,7 @@
 		   atomic_read(&sp->sk_refcnt), sp);
 }
 
-static int udp6_seq_show(struct seq_file *seq, void *v)
+int udp6_seq_show(struct seq_file *seq, void *v)
 {
 	if (v == SEQ_START_TOKEN)
 		seq_printf(seq,
@@ -1002,6 +939,7 @@
 	.owner		= THIS_MODULE,
 	.name		= "udp6",
 	.family		= AF_INET6,
+	.hashtable	= udp_hash,
 	.seq_show	= udp6_seq_show,
 	.seq_fops	= &udp6_seq_fops,
 };
@@ -1021,7 +959,7 @@
 struct proto udpv6_prot = {
 	.name		   = "UDPv6",
 	.owner		   = THIS_MODULE,
-	.close		   = udpv6_close,
+	.close		   = udp_lib_close,
 	.connect	   = ip6_datagram_connect,
 	.disconnect	   = udp_disconnect,
 	.ioctl		   = udp_ioctl,
@@ -1031,8 +969,8 @@
 	.sendmsg	   = udpv6_sendmsg,
 	.recvmsg	   = udpv6_recvmsg,
 	.backlog_rcv	   = udpv6_queue_rcv_skb,
-	.hash		   = udp_v6_hash,
-	.unhash		   = udp_v6_unhash,
+	.hash		   = udp_lib_hash,
+	.unhash		   = udp_lib_unhash,
 	.get_port	   = udp_v6_get_port,
 	.obj_size	   = sizeof(struct udp6_sock),
 #ifdef CONFIG_COMPAT
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
new file mode 100644
index 0000000..ec98788
--- /dev/null
+++ b/net/ipv6/udp_impl.h
@@ -0,0 +1,34 @@
+#ifndef _UDP6_IMPL_H
+#define _UDP6_IMPL_H
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <net/protocol.h>
+#include <net/addrconf.h>
+#include <net/inet_common.h>
+
+extern int  	__udp6_lib_rcv(struct sk_buff **, struct hlist_head [], int );
+extern void 	__udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
+			       int , int , int , __be32 , struct hlist_head []);
+
+extern int	udpv6_getsockopt(struct sock *sk, int level, int optname,
+		     		 char __user *optval, int __user *optlen);
+extern int	udpv6_setsockopt(struct sock *sk, int level, int optname,
+			  	 char __user *optval, int optlen);
+#ifdef CONFIG_COMPAT
+extern int	compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
+					char __user *optval, int optlen);
+extern int	compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
+				       char __user *optval, int __user *optlen);
+#endif
+extern int	udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
+			      struct msghdr *msg, size_t len);
+extern int	udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+			      struct msghdr *msg, size_t len,
+		  	      int noblock, int flags, int *addr_len);
+extern int	udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+extern int	udpv6_destroy_sock(struct sock *sk);
+
+#ifdef CONFIG_PROC_FS
+extern int	udp6_seq_show(struct seq_file *seq, void *v);
+#endif
+#endif	/* _UDP6_IMPL_H */
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
new file mode 100644
index 0000000..629f971
--- /dev/null
+++ b/net/ipv6/udplite.c
@@ -0,0 +1,105 @@
+/*
+ *  UDPLITEv6   An implementation of the UDP-Lite protocol over IPv6.
+ *              See also net/ipv4/udplite.c
+ *
+ *  Version:    $Id: udplite.c,v 1.9 2006/10/19 08:28:10 gerrit Exp $
+ *
+ *  Authors:    Gerrit Renker       <gerrit@erg.abdn.ac.uk>
+ *
+ *  Changes:
+ *  Fixes:
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+#include "udp_impl.h"
+
+DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly;
+
+static int udplitev6_rcv(struct sk_buff **pskb)
+{
+	return __udp6_lib_rcv(pskb, udplite_hash, 1);
+}
+
+static void udplitev6_err(struct sk_buff *skb,
+			  struct inet6_skb_parm *opt,
+			  int type, int code, int offset, __be32 info)
+{
+	return __udp6_lib_err(skb, opt, type, code, offset, info, udplite_hash);
+}
+
+static struct inet6_protocol udplitev6_protocol = {
+	.handler	=	udplitev6_rcv,
+	.err_handler	=	udplitev6_err,
+	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+};
+
+static int udplite_v6_get_port(struct sock *sk, unsigned short snum)
+{
+	return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal);
+}
+
+struct proto udplitev6_prot = {
+	.name		   = "UDPLITEv6",
+	.owner		   = THIS_MODULE,
+	.close		   = udp_lib_close,
+	.connect	   = ip6_datagram_connect,
+	.disconnect	   = udp_disconnect,
+	.ioctl		   = udp_ioctl,
+	.init		   = udplite_sk_init,
+	.destroy	   = udpv6_destroy_sock,
+	.setsockopt	   = udpv6_setsockopt,
+	.getsockopt	   = udpv6_getsockopt,
+	.sendmsg	   = udpv6_sendmsg,
+	.recvmsg	   = udpv6_recvmsg,
+	.backlog_rcv	   = udpv6_queue_rcv_skb,
+	.hash		   = udp_lib_hash,
+	.unhash		   = udp_lib_unhash,
+	.get_port	   = udplite_v6_get_port,
+	.obj_size	   = sizeof(struct udp6_sock),
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt = compat_udpv6_setsockopt,
+	.compat_getsockopt = compat_udpv6_getsockopt,
+#endif
+};
+
+static struct inet_protosw udplite6_protosw = {
+	.type		= SOCK_DGRAM,
+	.protocol	= IPPROTO_UDPLITE,
+	.prot		= &udplitev6_prot,
+	.ops		= &inet6_dgram_ops,
+	.capability	= -1,
+	.no_check	= 0,
+	.flags		= INET_PROTOSW_PERMANENT,
+};
+
+void __init udplitev6_init(void)
+{
+	if (inet6_add_protocol(&udplitev6_protocol, IPPROTO_UDPLITE) < 0)
+		printk(KERN_ERR "%s: Could not register.\n", __FUNCTION__);
+
+	inet6_register_protosw(&udplite6_protosw);
+}
+
+#ifdef CONFIG_PROC_FS
+static struct file_operations udplite6_seq_fops;
+static struct udp_seq_afinfo udplite6_seq_afinfo = {
+	.owner		= THIS_MODULE,
+	.name		= "udplite6",
+	.family		= AF_INET6,
+	.hashtable	= udplite_hash,
+	.seq_show	= udp6_seq_show,
+	.seq_fops	= &udplite6_seq_fops,
+};
+
+int __init udplite6_proc_init(void)
+{
+	return udp_proc_register(&udplite6_seq_afinfo);
+}
+
+void udplite6_proc_exit(void)
+{
+	udp_proc_unregister(&udplite6_seq_afinfo);
+}
+#endif
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d400f8f..8dffd4d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -274,11 +274,12 @@
 			break;
 
 		case IPPROTO_UDP:
+		case IPPROTO_UDPLITE:
 		case IPPROTO_TCP:
 		case IPPROTO_SCTP:
 		case IPPROTO_DCCP:
 			if (pskb_may_pull(skb, skb->nh.raw + offset + 4 - skb->data)) {
-				u16 *ports = (u16 *)exthdr;
+				__be16 *ports = (__be16 *)exthdr;
 
 				fl->fl_ip_sport = ports[0];
 				fl->fl_ip_dport = ports[1];
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 7931e4f..12e426b 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -50,7 +50,7 @@
 #define XFRM6_TUNNEL_SPI_MIN	1
 #define XFRM6_TUNNEL_SPI_MAX	0xffffffff
 
-static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
+static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
 
 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
@@ -62,7 +62,7 @@
 {
 	unsigned h;
 
-	h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
+	h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
 	h ^= h >> 16;
 	h ^= h >> 8;
 	h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
@@ -126,7 +126,7 @@
 	return NULL;
 }
 
-u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
+__be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
 {
 	struct xfrm6_tunnel_spi *x6spi;
 	u32 spi;
@@ -180,7 +180,7 @@
 	spi = 0;
 	goto out;
 alloc_spi:
-	x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
+	x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
 	if (!x6spi)
 		goto out;
 
@@ -196,7 +196,7 @@
 	return spi;
 }
 
-u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
+__be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
 {
 	struct xfrm6_tunnel_spi *x6spi;
 	u32 spi;
@@ -265,7 +265,7 @@
 }
 
 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-			    int type, int code, int offset, __u32 info)
+			    int type, int code, int offset, __be32 info)
 {
 	/* xfrm6_tunnel native err handling */
 	switch (type) {
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 3fefc82..89fd2a2 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -32,6 +32,7 @@
 
 #include <linux/string.h>
 #include <linux/socket.h>
+#include <linux/fs.h>
 #include <linux/seq_file.h>
 
 #include <net/irda/irda.h>
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d50a020..262bda8 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -61,7 +61,7 @@
 static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
 static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
 static void ircomm_tty_hangup(struct tty_struct *tty);
-static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_do_softint(struct work_struct *work);
 static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
 static void ircomm_tty_stop(struct tty_struct *tty);
 
@@ -389,7 +389,7 @@
 		self->flow = FLOW_STOP;
 
 		self->line = line;
-		INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
+		INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
 		self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
 		self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
 		self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@
 }
 
 /*
- * Function ircomm_tty_do_softint (private_)
+ * Function ircomm_tty_do_softint (work)
  *
  *    We use this routine to give the write wakeup to the user at at a
  *    safe time (as fast as possible after write have completed). This 
  *    can be compared to the Tx interrupt.
  */
-static void ircomm_tty_do_softint(void *private_)
+static void ircomm_tty_do_softint(struct work_struct *work)
 {
-	struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
+	struct ircomm_tty_cb *self =
+		container_of(work, struct ircomm_tty_cb, tqueue);
 	struct tty_struct *tty;
 	unsigned long flags;
 	struct sk_buff *skb, *ctrl_skb;
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 415cf4e..8f1c6d6 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/skbuff.h>
+#include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
@@ -172,7 +173,7 @@
 
 	IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
 
-	self = kmalloc(sizeof(struct iriap_cb), GFP_ATOMIC);
+	self = kzalloc(sizeof(*self), GFP_ATOMIC);
 	if (!self) {
 		IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
 		return NULL;
@@ -181,7 +182,6 @@
 	/*
 	 *  Initialize instance
 	 */
-	memset(self, 0, sizeof(struct iriap_cb));
 
 	self->magic = IAS_MAGIC;
 	self->mode = mode;
@@ -451,12 +451,12 @@
 	n = 2;
 
 	/* Get length, MSB first */
-	len = be16_to_cpu(get_unaligned((__u16 *)(fp+n))); n += 2;
+	len = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2;
 
 	IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len);
 
 	/* Get object ID, MSB first */
-	obj_id = be16_to_cpu(get_unaligned((__u16 *)(fp+n))); n += 2;
+	obj_id = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2;
 
 	type = fp[n++];
 	IRDA_DEBUG(4, "%s(), Value type = %d\n", __FUNCTION__, type);
@@ -506,7 +506,7 @@
 		value = irias_new_string_value(fp+n);
 		break;
 	case IAS_OCT_SEQ:
-		value_len = be16_to_cpu(get_unaligned((__u16 *)(fp+n)));
+		value_len = be16_to_cpu(get_unaligned((__be16 *)(fp+n)));
 		n += 2;
 
 		/* Will truncate to IAS_MAX_OCTET_STRING bytes */
@@ -544,7 +544,7 @@
 {
 	struct sk_buff *tx_skb;
 	int n;
-	__u32 tmp_be32;
+	__be32 tmp_be32;
 	__be16 tmp_be16;
 	__u8 *fp;
 
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 56292ab..b1ee99a 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -501,13 +501,12 @@
 		len = IAS_MAX_OCTET_STRING;
 	value->len = len;
 
-	value->t.oct_seq = kmalloc(len, GFP_ATOMIC);
+	value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC);
 	if (value->t.oct_seq == NULL){
 		IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
 		kfree(value);
 		return NULL;
 	}
-	memcpy(value->t.oct_seq, octseq , len);
 	return value;
 }
 
@@ -522,7 +521,6 @@
 	}
 
 	value->type = IAS_MISSING;
-	value->len = 0;
 
 	return value;
 }
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 9b962f2..2bb04ac 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -995,7 +995,7 @@
 {
 	__u8 *frame;
 	__u8 param_len;
-	__u16 tmp_le; /* Temporary value in little endian format */
+	__le16 tmp_le; /* Temporary value in little endian format */
 	int n=0;
 	
 	if (skb == NULL) {
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index fede837..7e5d12a 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -641,15 +641,13 @@
 	}
 
 	/* Allocate a new instance */
-	new = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
+	new = kmemdup(orig, sizeof(*new), GFP_ATOMIC);
 	if (!new)  {
 		IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__);
 		spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
 				       flags);
 		return NULL;
 	}
-	/* Dup */
-	memcpy(new, orig, sizeof(struct lsap_cb));
 	/* new->lap = orig->lap; => done in the memcpy() */
 	/* new->slsap_sel = orig->slsap_sel; => done in the memcpy() */
 	new->conn_skb = NULL;
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 1ba8c71..1d26cd3 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -356,14 +356,13 @@
 	/*
 	 * Allocate new hashbin
 	 */
-	hashbin = kmalloc( sizeof(hashbin_t), GFP_ATOMIC);
+	hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC);
 	if (!hashbin)
 		return NULL;
 
 	/*
 	 * Initialize structure
 	 */
-	memset(hashbin, 0, sizeof(hashbin_t));
 	hashbin->hb_type = type;
 	hashbin->magic = HB_MAGIC;
 	//hashbin->hb_current = NULL;
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 3c2e70b..03504f3 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -26,6 +26,7 @@
 
 #include <linux/skbuff.h>
 #include <linux/init.h>
+#include <linux/fs.h>
 #include <linux/seq_file.h>
 
 #include <asm/byteorder.h>
@@ -1099,7 +1100,7 @@
 			return -ENOMEM;
 
 		/* Reserve space for MUX_CONTROL and LAP header */
-		skb_reserve(tx_skb, TTP_MAX_HEADER);
+		skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
 	} else {
 		tx_skb = userdata;
 		/*
@@ -1147,7 +1148,7 @@
 		frame[3] = 0x02; /* Value length */
 
 		put_unaligned(cpu_to_be16((__u16) max_sdu_size),
-			      (__u16 *)(frame+4));
+			      (__be16 *)(frame+4));
 	} else {
 		/* Insert plain TTP header */
 		frame = skb_push(tx_skb, TTP_HEADER);
@@ -1348,7 +1349,7 @@
 			return -ENOMEM;
 
 		/* Reserve space for MUX_CONTROL and LAP header */
-		skb_reserve(tx_skb, TTP_MAX_HEADER);
+		skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
 	} else {
 		tx_skb = userdata;
 		/*
@@ -1394,7 +1395,7 @@
 		frame[3] = 0x02; /* Value length */
 
 		put_unaligned(cpu_to_be16((__u16) max_sdu_size),
-			      (__u16 *)(frame+4));
+			      (__be16 *)(frame+4));
 	} else {
 		/* Insert TTP header */
 		frame = skb_push(tx_skb, TTP_HEADER);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 20ff7cc..5dd5094 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -27,6 +27,7 @@
 #include <linux/proc_fs.h>
 #include <linux/init.h>
 #include <net/xfrm.h>
+#include <linux/audit.h>
 
 #include <net/sock.h>
 
@@ -1420,6 +1421,9 @@
 	else
 		err = xfrm_state_update(x);
 
+	xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+		       AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
+
 	if (err < 0) {
 		x->km.state = XFRM_STATE_DEAD;
 		__xfrm_state_put(x);
@@ -1460,8 +1464,12 @@
 		err = -EPERM;
 		goto out;
 	}
-	
+
 	err = xfrm_state_delete(x);
+
+	xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+		       AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
+
 	if (err < 0)
 		goto out;
 
@@ -1637,12 +1645,15 @@
 {
 	unsigned proto;
 	struct km_event c;
+	struct xfrm_audit audit_info;
 
 	proto = pfkey_satype2proto(hdr->sadb_msg_satype);
 	if (proto == 0)
 		return -EINVAL;
 
-	xfrm_state_flush(proto);
+	audit_info.loginuid = audit_get_loginuid(current->audit_context);
+	audit_info.secid = 0;
+	xfrm_state_flush(proto, &audit_info);
 	c.data.proto = proto;
 	c.seq = hdr->sadb_msg_seq;
 	c.pid = hdr->sadb_msg_pid;
@@ -1767,11 +1778,11 @@
 
 	/* addresses present only in tunnel mode */
 	if (t->mode == XFRM_MODE_TUNNEL) {
-		switch (xp->family) {
+		struct sockaddr *sa;
+		sa = (struct sockaddr *)(rq+1);
+		switch(sa->sa_family) {
 		case AF_INET:
-			sin = (void*)(rq+1);
-			if (sin->sin_family != AF_INET)
-				return -EINVAL;
+			sin = (struct sockaddr_in*)sa;
 			t->saddr.a4 = sin->sin_addr.s_addr;
 			sin++;
 			if (sin->sin_family != AF_INET)
@@ -1780,9 +1791,7 @@
 			break;
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 		case AF_INET6:
-			sin6 = (void *)(rq+1);
-			if (sin6->sin6_family != AF_INET6)
-				return -EINVAL;
+			sin6 = (struct sockaddr_in6*)sa;
 			memcpy(t->saddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr));
 			sin6++;
 			if (sin6->sin6_family != AF_INET6)
@@ -1793,7 +1802,10 @@
 		default:
 			return -EINVAL;
 		}
-	}
+		t->encap_family = sa->sa_family;
+	} else
+		t->encap_family = xp->family;
+
 	/* No way to set this via kame pfkey */
 	t->aalgos = t->ealgos = t->calgos = ~0;
 	xp->xfrm_nr++;
@@ -1830,18 +1842,25 @@
 
 static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp)
 {
+	struct xfrm_tmpl *t;
 	int sockaddr_size = pfkey_sockaddr_size(xp->family);
-	int socklen = (xp->family == AF_INET ?
-		       sizeof(struct sockaddr_in) :
-		       sizeof(struct sockaddr_in6));
+	int socklen = 0;
+	int i;
+
+	for (i=0; i<xp->xfrm_nr; i++) {
+		t = xp->xfrm_vec + i;
+		socklen += (t->encap_family == AF_INET ?
+			    sizeof(struct sockaddr_in) :
+			    sizeof(struct sockaddr_in6));
+	}
 
 	return sizeof(struct sadb_msg) +
 		(sizeof(struct sadb_lifetime) * 3) +
 		(sizeof(struct sadb_address) * 2) + 
 		(sockaddr_size * 2) +
 		sizeof(struct sadb_x_policy) +
-		(xp->xfrm_nr * (sizeof(struct sadb_x_ipsecrequest) +
-				(socklen * 2))) +
+		(xp->xfrm_nr * sizeof(struct sadb_x_ipsecrequest)) +
+		(socklen * 2) +
 		pfkey_xfrm_policy2sec_ctx_size(xp);
 }
 
@@ -1999,7 +2018,9 @@
 
 		req_size = sizeof(struct sadb_x_ipsecrequest);
 		if (t->mode == XFRM_MODE_TUNNEL)
-			req_size += 2*socklen;
+			req_size += ((t->encap_family == AF_INET ?
+		       		     sizeof(struct sockaddr_in) :
+		       		     sizeof(struct sockaddr_in6)) * 2);
 		else
 			size -= 2*socklen;
 		rq = (void*)skb_put(skb, req_size);
@@ -2015,7 +2036,7 @@
 			rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
 		rq->sadb_x_ipsecrequest_reqid = t->reqid;
 		if (t->mode == XFRM_MODE_TUNNEL) {
-			switch (xp->family) {
+			switch (t->encap_family) {
 			case AF_INET:
 				sin = (void*)(rq+1);
 				sin->sin_family = AF_INET;
@@ -2195,6 +2216,9 @@
 	err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp,
 				 hdr->sadb_msg_type != SADB_X_SPDUPDATE);
 
+	xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+		       AUDIT_MAC_IPSEC_ADDSPD, err ? 0 : 1, xp, NULL);
+
 	if (err)
 		goto out;
 
@@ -2272,6 +2296,10 @@
 	xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir-1,
 				   &sel, tmp.security, 1);
 	security_xfrm_policy_free(&tmp);
+
+	xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+		       AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL);
+
 	if (xp == NULL)
 		return -ENOENT;
 
@@ -2406,8 +2434,11 @@
 static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
 {
 	struct km_event c;
+	struct xfrm_audit audit_info;
 
-	xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN);
+	audit_info.loginuid = audit_get_loginuid(current->audit_context);
+	audit_info.secid = 0;
+	xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN, &audit_info);
 	c.data.type = XFRM_POLICY_TYPE_MAIN;
 	c.event = XFRM_MSG_FLUSHPOLICY;
 	c.pid = hdr->sadb_msg_pid;
@@ -2938,7 +2969,7 @@
 	return NULL;
 }
 
-static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
+static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
 {
 	struct sk_buff *skb;
 	struct sadb_msg *hdr;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2652ead..190bb3e 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -64,7 +64,7 @@
  *
  *	Given an ARP header type return the corresponding ethernet protocol.
  */
-static inline u16 llc_proto_type(u16 arphrd)
+static inline __be16 llc_proto_type(u16 arphrd)
 {
 	return arphrd == ARPHRD_IEEE802_TR ?
 		         htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 94d2368..db82aff 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -115,8 +115,8 @@
 	skb->h.raw += llc_len;
 	skb_pull(skb, llc_len);
 	if (skb->protocol == htons(ETH_P_802_2)) {
-		u16 pdulen = eth_hdr(skb)->h_proto,
-		    data_size = ntohs(pdulen) - llc_len;
+		__be16 pdulen = eth_hdr(skb)->h_proto;
+		u16 data_size = ntohs(pdulen) - llc_len;
 
 		if (unlikely(pskb_trim_rcsum(skb, data_size)))
 			return 0;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f619c65..3a66878 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -25,19 +25,57 @@
 	  and is also scheduled to replace the old syslog-based ipt_LOG
 	  and ip6t_LOG modules.
 
-config NF_CONNTRACK
-	tristate "Layer 3 Independent Connection tracking (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && IP_NF_CONNTRACK=n
-	default n
-	---help---
+config NF_CONNTRACK_ENABLED
+	tristate "Netfilter connection tracking support"
+	help
 	  Connection tracking keeps a record of what packets have passed
 	  through your machine, in order to figure out how they are related
 	  into connections.
 
+	  This is required to do Masquerading or other kinds of Network
+	  Address Translation (except for Fast NAT).  It can also be used to
+	  enhance packet filtering (see `Connection state match support'
+	  below).
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+choice
+	prompt "Netfilter connection tracking support"
+	depends on NF_CONNTRACK_ENABLED
+
+config NF_CONNTRACK_SUPPORT
+	bool "Layer 3 Independent Connection tracking (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
 	  Layer 3 independent connection tracking is experimental scheme
 	  which generalize ip_conntrack to support other layer 3 protocols.
 
-	  To compile it as a module, choose M here.  If unsure, say N.
+	  This is required to do Masquerading or other kinds of Network
+	  Address Translation (except for Fast NAT).  It can also be used to
+	  enhance packet filtering (see `Connection state match support'
+	  below).
+
+config IP_NF_CONNTRACK_SUPPORT
+	bool "Layer 3 Dependent Connection tracking (OBSOLETE)"
+	help
+	  The old, Layer 3 dependent ip_conntrack subsystem of netfilter.
+
+	  This is required to do Masquerading or other kinds of Network
+	  Address Translation (except for Fast NAT).  It can also be used to
+	  enhance packet filtering (see `Connection state match support'
+	  below).
+
+endchoice
+
+config NF_CONNTRACK
+	tristate
+	default m if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m
+	default y if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y
+
+config IP_NF_CONNTRACK
+	tristate
+	default m if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m
+	default y if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y
 
 config NF_CT_ACCT
 	bool "Connection tracking flow accounting"
@@ -82,8 +120,12 @@
 
 	  If unsure, say `N'.
 
+config NF_CT_PROTO_GRE
+	tristate
+	depends on EXPERIMENTAL && NF_CONNTRACK
+
 config NF_CT_PROTO_SCTP
-	tristate 'SCTP protocol on new connection tracking support (EXPERIMENTAL)'
+	tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)'
 	depends on EXPERIMENTAL && NF_CONNTRACK
 	default n
 	help
@@ -93,8 +135,23 @@
 	  If you want to compile it as a module, say M here and read
 	  Documentation/modules.txt.  If unsure, say `N'.
 
+config NF_CONNTRACK_AMANDA
+	tristate "Amanda backup protocol support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	select TEXTSEARCH
+	select TEXTSEARCH_KMP
+	help
+	  If you are running the Amanda backup package <http://www.amanda.org/>
+	  on this machine or machines that will be MASQUERADED through this
+	  machine, then you may want to enable this feature.  This allows the
+	  connection tracking and natting code to allow the sub-channels that
+	  Amanda requires for communication of the backup data, messages and
+	  index.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NF_CONNTRACK_FTP
-	tristate "FTP support on new connection tracking (EXPERIMENTAL)"
+	tristate "FTP protocol support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL && NF_CONNTRACK
 	help
 	  Tracking FTP connections is problematic: special helpers are
@@ -107,6 +164,101 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_H323
+	tristate "H.323 protocol support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	help
+	  H.323 is a VoIP signalling protocol from ITU-T. As one of the most
+	  important VoIP protocols, it is widely used by voice hardware and
+	  software including voice gateways, IP phones, Netmeeting, OpenPhone,
+	  Gnomemeeting, etc.
+
+	  With this module you can support H.323 on a connection tracking/NAT
+	  firewall.
+
+	  This module supports RAS, Fast Start, H.245 Tunnelling, Call
+	  Forwarding, RTP/RTCP and T.120 based audio, video, fax, chat,
+	  whiteboard, file transfer, etc. For more information, please
+	  visit http://nath323.sourceforge.net/.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_CONNTRACK_IRC
+	tristate "IRC protocol support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	help
+	  There is a commonly-used extension to IRC called
+	  Direct Client-to-Client Protocol (DCC).  This enables users to send
+	  files to each other, and also chat to each other without the need
+	  of a server.  DCC Sending is used anywhere you send files over IRC,
+	  and DCC Chat is most commonly used by Eggdrop bots.  If you are
+	  using NAT, this extension will enable you to send files and initiate
+	  chats.  Note that you do NOT need this extension to get files or
+	  have others initiate chats, or everything else in IRC.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_CONNTRACK_NETBIOS_NS
+	tristate "NetBIOS name service protocol support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	help
+	  NetBIOS name service requests are sent as broadcast messages from an
+	  unprivileged port and responded to with unicast messages to the
+	  same port. This make them hard to firewall properly because connection
+	  tracking doesn't deal with broadcasts. This helper tracks locally
+	  originating NetBIOS name service requests and the corresponding
+	  responses. It relies on correct IP address configuration, specifically
+	  netmask and broadcast address. When properly configured, the output
+	  of "ip address show" should look similar to this:
+
+	  $ ip -4 address show eth0
+	  4: eth0: <BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast qlen 1000
+	      inet 172.16.2.252/24 brd 172.16.2.255 scope global eth0
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_CONNTRACK_PPTP
+	tristate "PPtP protocol support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	select NF_CT_PROTO_GRE
+	help
+	  This module adds support for PPTP (Point to Point Tunnelling
+	  Protocol, RFC2637) connection tracking and NAT.
+
+	  If you are running PPTP sessions over a stateful firewall or NAT
+	  box, you may want to enable this feature.
+
+	  Please note that not all PPTP modes of operation are supported yet.
+	  Specifically these limitations exist:
+	    - Blindy assumes that control connections are always established
+	      in PNS->PAC direction. This is a violation of RFC2637.
+	    - Only supports a single call within each session
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_CONNTRACK_SIP
+	tristate "SIP protocol support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	help
+	  SIP is an application-layer control protocol that can establish,
+	  modify, and terminate multimedia sessions (conferences) such as
+	  Internet telephony calls. With the ip_conntrack_sip and
+	  the nf_nat_sip modules you can support the protocol on a connection
+	  tracking/NATing firewall.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config NF_CONNTRACK_TFTP
+	tristate "TFTP protocol support (EXPERIMENTAL)"
+	depends on EXPERIMENTAL && NF_CONNTRACK
+	help
+	  TFTP connection tracking helper, this is required depending
+	  on how restrictive your ruleset is.
+	  If you are using a tftp client behind -j SNAT or -j MASQUERADING
+	  you will need this.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NF_CT_NETLINK
 	tristate 'Connection tracking netlink interface (EXPERIMENTAL)'
 	depends on EXPERIMENTAL && NF_CONNTRACK && NETFILTER_NETLINK
@@ -184,6 +336,17 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_TARGET_NFLOG
+	tristate '"NFLOG" target support'
+	depends on NETFILTER_XTABLES
+	help
+	  This option enables the NFLOG target, which allows to LOG
+	  messages through the netfilter logging API, which can use
+	  either the old LOG target, the old ULOG target or nfnetlink_log
+	  as backend.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_NOTRACK
 	tristate  '"NOTRACK" target support'
 	depends on NETFILTER_XTABLES
@@ -464,5 +627,19 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_HASHLIMIT
+	tristate '"hashlimit" match support'
+	depends on NETFILTER_XTABLES
+	help
+	  This option adds a `hashlimit' match.
+
+	  As opposed to `limit', this match dynamically creates a hash table
+	  of limit buckets, based on your selection of source/destination
+	  addresses and/or ports.
+
+	  It enables you to express policies like `10kpps for any given
+	  destination address' or `500pps from any given source address'
+	  with a single rule.
+
 endmenu
 
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a74be49..5dc5574 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,7 +1,10 @@
 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
-nf_conntrack-objs	:= nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
+
+nf_conntrack-y	:= nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
 
 obj-$(CONFIG_NETFILTER) = netfilter.o
+obj-$(CONFIG_SYSCTL) += nf_sysctl.o
 
 obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
@@ -11,13 +14,23 @@
 obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
 
 # SCTP protocol connection tracking
+obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
 obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
 
 # netlink interface for nf_conntrack
 obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
 
 # connection tracking helpers
+nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
+
+obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o
 obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
+obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
+obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
+obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
+obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
+obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
+obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
 
 # generic X tables 
 obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
@@ -28,6 +41,7 @@
 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -56,3 +70,4 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index d80b935..291b8c6 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -28,7 +28,7 @@
 
 static DEFINE_SPINLOCK(afinfo_lock);
 
-struct nf_afinfo *nf_afinfo[NPROTO];
+struct nf_afinfo *nf_afinfo[NPROTO] __read_mostly;
 EXPORT_SYMBOL(nf_afinfo);
 
 int nf_register_afinfo(struct nf_afinfo *afinfo)
@@ -54,7 +54,7 @@
  * of skbuffs queued for userspace, and not deregister a hook unless
  * this is zero, but that sucks.  Now, we simply check when the
  * packets come back: if the hook is gone, the packet is discarded. */
-struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
+struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
 static DEFINE_SPINLOCK(nf_hook_lock);
 
@@ -222,28 +222,21 @@
 }
 EXPORT_SYMBOL(skb_make_writable);
 
-u_int16_t nf_csum_update(u_int32_t oldval, u_int32_t newval, u_int32_t csum)
+void nf_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+			    __be32 from, __be32 to, int pseudohdr)
 {
-	u_int32_t diff[] = { oldval, newval };
-
-	return csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum));
-}
-EXPORT_SYMBOL(nf_csum_update);
-
-u_int16_t nf_proto_csum_update(struct sk_buff *skb,
-			       u_int32_t oldval, u_int32_t newval,
-			       u_int16_t csum, int pseudohdr)
-{
+	__be32 diff[] = { ~from, to };
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
-		csum = nf_csum_update(oldval, newval, csum);
+		*sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
+				~csum_unfold(*sum)));
 		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
-			skb->csum = nf_csum_update(oldval, newval, skb->csum);
+			skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+						~skb->csum);
 	} else if (pseudohdr)
-		csum = ~nf_csum_update(oldval, newval, ~csum);
-
-	return csum;
+		*sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
+				csum_unfold(*sum)));
 }
-EXPORT_SYMBOL(nf_proto_csum_update);
+EXPORT_SYMBOL(nf_proto_csum_replace4);
 
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
new file mode 100644
index 0000000..b8869ea
--- /dev/null
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -0,0 +1,238 @@
+/* Amanda extension for IP connection tracking
+ *
+ * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca>
+ * based on HW's ip_conntrack_irc.c as well as other modules
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/textsearch.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/netfilter.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_amanda.h>
+
+static unsigned int master_timeout __read_mostly = 300;
+static char *ts_algo = "kmp";
+
+MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
+MODULE_DESCRIPTION("Amanda connection tracking module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_conntrack_amanda");
+
+module_param(master_timeout, uint, 0600);
+MODULE_PARM_DESC(master_timeout, "timeout for the master connection");
+module_param(ts_algo, charp, 0400);
+MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)");
+
+unsigned int (*nf_nat_amanda_hook)(struct sk_buff **pskb,
+				   enum ip_conntrack_info ctinfo,
+				   unsigned int matchoff,
+				   unsigned int matchlen,
+				   struct nf_conntrack_expect *exp)
+				   __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_amanda_hook);
+
+enum amanda_strings {
+	SEARCH_CONNECT,
+	SEARCH_NEWLINE,
+	SEARCH_DATA,
+	SEARCH_MESG,
+	SEARCH_INDEX,
+};
+
+static struct {
+	char			*string;
+	size_t			len;
+	struct ts_config	*ts;
+} search[] __read_mostly = {
+	[SEARCH_CONNECT] = {
+		.string	= "CONNECT ",
+		.len	= 8,
+	},
+	[SEARCH_NEWLINE] = {
+		.string	= "\n",
+		.len	= 1,
+	},
+	[SEARCH_DATA] = {
+		.string	= "DATA ",
+		.len	= 5,
+	},
+	[SEARCH_MESG] = {
+		.string	= "MESG ",
+		.len	= 5,
+	},
+	[SEARCH_INDEX] = {
+		.string = "INDEX ",
+		.len	= 6,
+	},
+};
+
+static int amanda_help(struct sk_buff **pskb,
+		       unsigned int protoff,
+		       struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo)
+{
+	struct ts_state ts;
+	struct nf_conntrack_expect *exp;
+	struct nf_conntrack_tuple *tuple;
+	unsigned int dataoff, start, stop, off, i;
+	char pbuf[sizeof("65535")], *tmp;
+	u_int16_t len;
+	__be16 port;
+	int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	int ret = NF_ACCEPT;
+	typeof(nf_nat_amanda_hook) nf_nat_amanda;
+
+	/* Only look at packets from the Amanda server */
+	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
+		return NF_ACCEPT;
+
+	/* increase the UDP timeout of the master connection as replies from
+	 * Amanda clients to the server can be quite delayed */
+	nf_ct_refresh(ct, *pskb, master_timeout * HZ);
+
+	/* No data? */
+	dataoff = protoff + sizeof(struct udphdr);
+	if (dataoff >= (*pskb)->len) {
+		if (net_ratelimit())
+			printk("amanda_help: skblen = %u\n", (*pskb)->len);
+		return NF_ACCEPT;
+	}
+
+	memset(&ts, 0, sizeof(ts));
+	start = skb_find_text(*pskb, dataoff, (*pskb)->len,
+			      search[SEARCH_CONNECT].ts, &ts);
+	if (start == UINT_MAX)
+		goto out;
+	start += dataoff + search[SEARCH_CONNECT].len;
+
+	memset(&ts, 0, sizeof(ts));
+	stop = skb_find_text(*pskb, start, (*pskb)->len,
+			     search[SEARCH_NEWLINE].ts, &ts);
+	if (stop == UINT_MAX)
+		goto out;
+	stop += start;
+
+	for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) {
+		memset(&ts, 0, sizeof(ts));
+		off = skb_find_text(*pskb, start, stop, search[i].ts, &ts);
+		if (off == UINT_MAX)
+			continue;
+		off += start + search[i].len;
+
+		len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off);
+		if (skb_copy_bits(*pskb, off, pbuf, len))
+			break;
+		pbuf[len] = '\0';
+
+		port = htons(simple_strtoul(pbuf, &tmp, 10));
+		len = tmp - pbuf;
+		if (port == 0 || len > 5)
+			break;
+
+		exp = nf_conntrack_expect_alloc(ct);
+		if (exp == NULL) {
+			ret = NF_DROP;
+			goto out;
+		}
+		tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+		nf_conntrack_expect_init(exp, family,
+					 &tuple->src.u3, &tuple->dst.u3,
+					 IPPROTO_TCP, NULL, &port);
+
+		nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook);
+		if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
+			ret = nf_nat_amanda(pskb, ctinfo, off - dataoff,
+					    len, exp);
+		else if (nf_conntrack_expect_related(exp) != 0)
+			ret = NF_DROP;
+		nf_conntrack_expect_put(exp);
+	}
+
+out:
+	return ret;
+}
+
+static struct nf_conntrack_helper amanda_helper[2] __read_mostly = {
+	{
+		.name			= "amanda",
+		.max_expected		= 3,
+		.timeout		= 180,
+		.me			= THIS_MODULE,
+		.help			= amanda_help,
+		.tuple.src.l3num	= AF_INET,
+		.tuple.src.u.udp.port	= __constant_htons(10080),
+		.tuple.dst.protonum	= IPPROTO_UDP,
+		.mask.src.l3num		= 0xFFFF,
+		.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+		.mask.dst.protonum	= 0xFF,
+	},
+	{
+		.name			= "amanda",
+		.max_expected		= 3,
+		.timeout		= 180,
+		.me			= THIS_MODULE,
+		.help			= amanda_help,
+		.tuple.src.l3num	= AF_INET6,
+		.tuple.src.u.udp.port	= __constant_htons(10080),
+		.tuple.dst.protonum	= IPPROTO_UDP,
+		.mask.src.l3num		= 0xFFFF,
+		.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+		.mask.dst.protonum	= 0xFF,
+	},
+};
+
+static void __exit nf_conntrack_amanda_fini(void)
+{
+	int i;
+
+	nf_conntrack_helper_unregister(&amanda_helper[0]);
+	nf_conntrack_helper_unregister(&amanda_helper[1]);
+	for (i = 0; i < ARRAY_SIZE(search); i++)
+		textsearch_destroy(search[i].ts);
+}
+
+static int __init nf_conntrack_amanda_init(void)
+{
+	int ret, i;
+
+	ret = -ENOMEM;
+	for (i = 0; i < ARRAY_SIZE(search); i++) {
+		search[i].ts = textsearch_prepare(ts_algo, search[i].string,
+						  search[i].len,
+						  GFP_KERNEL, TS_AUTOLOAD);
+		if (search[i].ts == NULL)
+			goto err1;
+	}
+	ret = nf_conntrack_helper_register(&amanda_helper[0]);
+	if (ret < 0)
+		goto err1;
+	ret = nf_conntrack_helper_register(&amanda_helper[1]);
+	if (ret < 0)
+		goto err2;
+	return 0;
+
+err2:
+	nf_conntrack_helper_unregister(&amanda_helper[0]);
+err1:
+	for (; i >= 0; i--) {
+		if (search[i].ts)
+			textsearch_destroy(search[i].ts);
+	}
+	return ret;
+}
+
+module_init(nf_conntrack_amanda_init);
+module_exit(nf_conntrack_amanda_fini);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index de0567b..9b02ec4 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -46,15 +46,12 @@
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/socket.h>
-
-/* This rwlock protects the main hash table, protocol/helper/expected
-   registrations, conntrack timers*/
-#define ASSERT_READ_LOCK(x)
-#define ASSERT_WRITE_LOCK(x)
+#include <linux/mm.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_core.h>
 
@@ -67,92 +64,32 @@
 #endif
 
 DEFINE_RWLOCK(nf_conntrack_lock);
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
 /* nf_conntrack_standalone needs this */
 atomic_t nf_conntrack_count = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(nf_conntrack_count);
 
-void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
-LIST_HEAD(nf_conntrack_expect_list);
-struct nf_conntrack_protocol **nf_ct_protos[PF_MAX] __read_mostly;
-struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX] __read_mostly;
-static LIST_HEAD(helpers);
-unsigned int nf_conntrack_htable_size __read_mostly = 0;
+void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
+EXPORT_SYMBOL_GPL(nf_conntrack_destroyed);
+
+unsigned int nf_conntrack_htable_size __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
+
 int nf_conntrack_max __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_max);
+
 struct list_head *nf_conntrack_hash __read_mostly;
-static kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
-struct nf_conn nf_conntrack_untracked;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+
+struct nf_conn nf_conntrack_untracked __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
+
 unsigned int nf_ct_log_invalid __read_mostly;
-static LIST_HEAD(unconfirmed);
+LIST_HEAD(unconfirmed);
 static int nf_conntrack_vmalloc __read_mostly;
 
 static unsigned int nf_conntrack_next_id;
-static unsigned int nf_conntrack_expect_next_id;
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
-ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
-
-DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
-
-/* deliver cached events and clear cache entry - must be called with locally
- * disabled softirqs */
-static inline void
-__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
-{
-	DEBUGP("ecache: delivering events for %p\n", ecache->ct);
-	if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
-	    && ecache->events)
-		atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events,
-				    ecache->ct);
-
-	ecache->events = 0;
-	nf_ct_put(ecache->ct);
-	ecache->ct = NULL;
-}
-
-/* Deliver all cached events for a particular conntrack. This is called
- * by code prior to async packet handling for freeing the skb */
-void nf_ct_deliver_cached_events(const struct nf_conn *ct)
-{
-	struct nf_conntrack_ecache *ecache;
-
-	local_bh_disable();
-	ecache = &__get_cpu_var(nf_conntrack_ecache);
-	if (ecache->ct == ct)
-		__nf_ct_deliver_cached_events(ecache);
-	local_bh_enable();
-}
-
-/* Deliver cached events for old pending events, if current conntrack != old */
-void __nf_ct_event_cache_init(struct nf_conn *ct)
-{
-	struct nf_conntrack_ecache *ecache;
-	
-	/* take care of delivering potentially old events */
-	ecache = &__get_cpu_var(nf_conntrack_ecache);
-	BUG_ON(ecache->ct == ct);
-	if (ecache->ct)
-		__nf_ct_deliver_cached_events(ecache);
-	/* initialize for this conntrack/packet */
-	ecache->ct = ct;
-	nf_conntrack_get(&ct->ct_general);
-}
-
-/* flush the event cache - touches other CPU's data and must not be called
- * while packets are still passing through the code */
-static void nf_ct_event_cache_flush(void)
-{
-	struct nf_conntrack_ecache *ecache;
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-		ecache = &per_cpu(nf_conntrack_ecache, cpu);
-		if (ecache->ct)
-			nf_ct_put(ecache->ct);
-	}
-}
-#else
-static inline void nf_ct_event_cache_flush(void) {}
-#endif /* CONFIG_NF_CONNTRACK_EVENTS */
 
 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
@@ -171,7 +108,7 @@
 	size_t size;
 
 	/* slab cache pointer */
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 
 	/* allocated slab cache + modules which uses this slab cache */
 	int use;
@@ -184,85 +121,6 @@
 /* This avoids calling kmem_cache_create() with same name simultaneously */
 static DEFINE_MUTEX(nf_ct_cache_mutex);
 
-extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
-struct nf_conntrack_protocol *
-__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
-{
-	if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
-		return &nf_conntrack_generic_protocol;
-
-	return nf_ct_protos[l3proto][protocol];
-}
-
-/* this is guaranteed to always return a valid protocol helper, since
- * it falls back to generic_protocol */
-struct nf_conntrack_protocol *
-nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol)
-{
-	struct nf_conntrack_protocol *p;
-
-	preempt_disable();
-	p = __nf_ct_proto_find(l3proto, protocol);
-	if (!try_module_get(p->me))
-		p = &nf_conntrack_generic_protocol;
-	preempt_enable();
-	
-	return p;
-}
-
-void nf_ct_proto_put(struct nf_conntrack_protocol *p)
-{
-	module_put(p->me);
-}
-
-struct nf_conntrack_l3proto *
-nf_ct_l3proto_find_get(u_int16_t l3proto)
-{
-	struct nf_conntrack_l3proto *p;
-
-	preempt_disable();
-	p = __nf_ct_l3proto_find(l3proto);
-	if (!try_module_get(p->me))
-		p = &nf_conntrack_generic_l3proto;
-	preempt_enable();
-
-	return p;
-}
-
-void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
-{
-	module_put(p->me);
-}
-
-int
-nf_ct_l3proto_try_module_get(unsigned short l3proto)
-{
-	int ret;
-	struct nf_conntrack_l3proto *p;
-
-retry:	p = nf_ct_l3proto_find_get(l3proto);
-	if (p == &nf_conntrack_generic_l3proto) {
-		ret = request_module("nf_conntrack-%d", l3proto);
-		if (!ret)
-			goto retry;
-
-		return -EPROTOTYPE;
-	}
-
-	return 0;
-}
-
-void nf_ct_l3proto_module_put(unsigned short l3proto)
-{
-	struct nf_conntrack_l3proto *p;
-
-	preempt_disable();
-	p = __nf_ct_l3proto_find(l3proto);
-	preempt_enable();
-
-	module_put(p->me);
-}
-
 static int nf_conntrack_hash_rnd_initted;
 static unsigned int nf_conntrack_hash_rnd;
 
@@ -289,7 +147,7 @@
 {
 	int ret = 0;
 	char *cache_name;
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 
 	DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
 	       features, name, size);
@@ -363,11 +221,12 @@
 	mutex_unlock(&nf_ct_cache_mutex);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_register_cache);
 
 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
 void nf_conntrack_unregister_cache(u_int32_t features)
 {
-	kmem_cache_t *cachep;
+	struct kmem_cache *cachep;
 	char *name;
 
 	/*
@@ -397,6 +256,7 @@
 
 	mutex_unlock(&nf_ct_cache_mutex);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache);
 
 int
 nf_ct_get_tuple(const struct sk_buff *skb,
@@ -406,7 +266,7 @@
 		u_int8_t protonum,
 		struct nf_conntrack_tuple *tuple,
 		const struct nf_conntrack_l3proto *l3proto,
-		const struct nf_conntrack_protocol *protocol)
+		const struct nf_conntrack_l4proto *l4proto)
 {
 	NF_CT_TUPLE_U_BLANK(tuple);
 
@@ -417,14 +277,15 @@
 	tuple->dst.protonum = protonum;
 	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 
-	return protocol->pkt_to_tuple(skb, dataoff, tuple);
+	return l4proto->pkt_to_tuple(skb, dataoff, tuple);
 }
+EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
 
 int
 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 		   const struct nf_conntrack_tuple *orig,
 		   const struct nf_conntrack_l3proto *l3proto,
-		   const struct nf_conntrack_protocol *protocol)
+		   const struct nf_conntrack_l4proto *l4proto)
 {
 	NF_CT_TUPLE_U_BLANK(inverse);
 
@@ -435,111 +296,14 @@
 	inverse->dst.dir = !orig->dst.dir;
 
 	inverse->dst.protonum = orig->dst.protonum;
-	return protocol->invert_tuple(inverse, orig);
+	return l4proto->invert_tuple(inverse, orig);
 }
-
-/* nf_conntrack_expect helper functions */
-void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
-{
-	struct nf_conn_help *master_help = nfct_help(exp->master);
-
-	NF_CT_ASSERT(master_help);
-	ASSERT_WRITE_LOCK(&nf_conntrack_lock);
-	NF_CT_ASSERT(!timer_pending(&exp->timeout));
-
-	list_del(&exp->list);
-	NF_CT_STAT_INC(expect_delete);
-	master_help->expecting--;
-	nf_conntrack_expect_put(exp);
-}
-
-static void expectation_timed_out(unsigned long ul_expect)
-{
-	struct nf_conntrack_expect *exp = (void *)ul_expect;
-
-	write_lock_bh(&nf_conntrack_lock);
-	nf_ct_unlink_expect(exp);
-	write_unlock_bh(&nf_conntrack_lock);
-	nf_conntrack_expect_put(exp);
-}
-
-struct nf_conntrack_expect *
-__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
-{
-	struct nf_conntrack_expect *i;
-	
-	list_for_each_entry(i, &nf_conntrack_expect_list, list) {
-		if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
-			return i;
-	}
-	return NULL;
-}
-
-/* Just find a expectation corresponding to a tuple. */
-struct nf_conntrack_expect *
-nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
-{
-	struct nf_conntrack_expect *i;
-	
-	read_lock_bh(&nf_conntrack_lock);
-	i = __nf_conntrack_expect_find(tuple);
-	if (i)
-		atomic_inc(&i->use);
-	read_unlock_bh(&nf_conntrack_lock);
-
-	return i;
-}
-
-/* If an expectation for this connection is found, it gets delete from
- * global list then returned. */
-static struct nf_conntrack_expect *
-find_expectation(const struct nf_conntrack_tuple *tuple)
-{
-	struct nf_conntrack_expect *i;
-
-	list_for_each_entry(i, &nf_conntrack_expect_list, list) {
-	/* If master is not in hash table yet (ie. packet hasn't left
-	   this machine yet), how can other end know about expected?
-	   Hence these are not the droids you are looking for (if
-	   master ct never got confirmed, we'd hold a reference to it
-	   and weird things would happen to future packets). */
-		if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
-		    && nf_ct_is_confirmed(i->master)) {
-			if (i->flags & NF_CT_EXPECT_PERMANENT) {
-				atomic_inc(&i->use);
-				return i;
-			} else if (del_timer(&i->timeout)) {
-				nf_ct_unlink_expect(i);
-				return i;
-			}
-		}
-	}
-	return NULL;
-}
-
-/* delete all expectations for this conntrack */
-void nf_ct_remove_expectations(struct nf_conn *ct)
-{
-	struct nf_conntrack_expect *i, *tmp;
-	struct nf_conn_help *help = nfct_help(ct);
-
-	/* Optimization: most connection never expect any others. */
-	if (!help || help->expecting == 0)
-		return;
-
-	list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
-		if (i->master == ct && del_timer(&i->timeout)) {
-			nf_ct_unlink_expect(i);
-			nf_conntrack_expect_put(i);
- 		}
-	}
-}
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 
 static void
 clean_from_lists(struct nf_conn *ct)
 {
 	DEBUGP("clean_from_lists(%p)\n", ct);
-	ASSERT_WRITE_LOCK(&nf_conntrack_lock);
 	list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
 	list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list);
 
@@ -551,8 +315,9 @@
 destroy_conntrack(struct nf_conntrack *nfct)
 {
 	struct nf_conn *ct = (struct nf_conn *)nfct;
+	struct nf_conn_help *help = nfct_help(ct);
 	struct nf_conntrack_l3proto *l3proto;
-	struct nf_conntrack_protocol *proto;
+	struct nf_conntrack_l4proto *l4proto;
 
 	DEBUGP("destroy_conntrack(%p)\n", ct);
 	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
@@ -561,6 +326,9 @@
 	nf_conntrack_event(IPCT_DESTROY, ct);
 	set_bit(IPS_DYING_BIT, &ct->status);
 
+	if (help && help->helper && help->helper->destroy)
+		help->helper->destroy(ct);
+
 	/* To make sure we don't get any weird locking issues here:
 	 * destroy_conntrack() MUST NOT be called with a write lock
 	 * to nf_conntrack_lock!!! -HW */
@@ -568,9 +336,9 @@
 	if (l3proto && l3proto->destroy)
 		l3proto->destroy(ct);
 
-	proto = __nf_ct_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
-	if (proto && proto->destroy)
-		proto->destroy(ct);
+	l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
+	if (l4proto && l4proto->destroy)
+		l4proto->destroy(ct);
 
 	if (nf_conntrack_destroyed)
 		nf_conntrack_destroyed(ct);
@@ -618,7 +386,6 @@
 	struct nf_conntrack_tuple_hash *h;
 	unsigned int hash = hash_conntrack(tuple);
 
-	ASSERT_READ_LOCK(&nf_conntrack_lock);
 	list_for_each_entry(h, &nf_conntrack_hash[hash], list) {
 		if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
 		    nf_ct_tuple_equal(tuple, &h->tuple)) {
@@ -630,6 +397,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_find);
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
@@ -646,6 +414,7 @@
 
 	return h;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 				       unsigned int hash,
@@ -669,6 +438,7 @@
 	__nf_conntrack_hash_insert(ct, hash, repl_hash);
 	write_unlock_bh(&nf_conntrack_lock);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
 
 /* Confirm a connection given skb; places it in hash table */
 int
@@ -746,6 +516,7 @@
 	write_unlock_bh(&nf_conntrack_lock);
 	return NF_DROP;
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 
 /* Returns true if a connection correspondings to the tuple (required
    for NAT). */
@@ -761,6 +532,7 @@
 
 	return h != NULL;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 
 /* There's a small race here where we may free a just-assured
    connection.  Too bad: we're in trouble anyway. */
@@ -794,53 +566,13 @@
 	return dropped;
 }
 
-static struct nf_conntrack_helper *
-__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
-{
-	struct nf_conntrack_helper *h;
-
-	list_for_each_entry(h, &helpers, list) {
-		if (nf_ct_tuple_mask_cmp(tuple, &h->tuple, &h->mask))
-			return h;
-	}
-	return NULL;
-}
-
-struct nf_conntrack_helper *
-nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple)
-{
-	struct nf_conntrack_helper *helper;
-
-	/* need nf_conntrack_lock to assure that helper exists until
-	 * try_module_get() is called */
-	read_lock_bh(&nf_conntrack_lock);
-
-	helper = __nf_ct_helper_find(tuple);
-	if (helper) {
-		/* need to increase module usage count to assure helper will
-		 * not go away while the caller is e.g. busy putting a
-		 * conntrack in the hash that uses the helper */
-		if (!try_module_get(helper->me))
-			helper = NULL;
-	}
-
-	read_unlock_bh(&nf_conntrack_lock);
-
-	return helper;
-}
-
-void nf_ct_helper_put(struct nf_conntrack_helper *helper)
-{
-	module_put(helper->me);
-}
-
 static struct nf_conn *
 __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
 		     const struct nf_conntrack_tuple *repl,
-		     const struct nf_conntrack_l3proto *l3proto)
+		     const struct nf_conntrack_l3proto *l3proto,
+		     u_int32_t features)
 {
 	struct nf_conn *conntrack = NULL;
-	u_int32_t features = 0;
 	struct nf_conntrack_helper *helper;
 
 	if (unlikely(!nf_conntrack_hash_rnd_initted)) {
@@ -866,12 +598,13 @@
 	}
 
 	/*  find features needed by this conntrack. */
-	features = l3proto->get_features(orig);
+	features |= l3proto->get_features(orig);
 
 	/* FIXME: protect helper list per RCU */
 	read_lock_bh(&nf_conntrack_lock);
 	helper = __nf_ct_helper_find(repl);
-	if (helper)
+	/* NAT might want to assign a helper later */
+	if (helper || features & NF_CT_F_NAT)
 		features |= NF_CT_F_HELP;
 	read_unlock_bh(&nf_conntrack_lock);
 
@@ -916,8 +649,9 @@
 	struct nf_conntrack_l3proto *l3proto;
 
 	l3proto = __nf_ct_l3proto_find(orig->src.l3num);
-	return __nf_conntrack_alloc(orig, repl, l3proto);
+	return __nf_conntrack_alloc(orig, repl, l3proto, 0);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 
 void nf_conntrack_free(struct nf_conn *conntrack)
 {
@@ -928,32 +662,40 @@
 	kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
 	atomic_dec(&nf_conntrack_count);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_free);
 
 /* Allocate a new conntrack: we return -ENOMEM if classification
    failed due to stress.  Otherwise it really is unclassifiable. */
 static struct nf_conntrack_tuple_hash *
 init_conntrack(const struct nf_conntrack_tuple *tuple,
 	       struct nf_conntrack_l3proto *l3proto,
-	       struct nf_conntrack_protocol *protocol,
+	       struct nf_conntrack_l4proto *l4proto,
 	       struct sk_buff *skb,
 	       unsigned int dataoff)
 {
 	struct nf_conn *conntrack;
 	struct nf_conntrack_tuple repl_tuple;
 	struct nf_conntrack_expect *exp;
+	u_int32_t features = 0;
 
-	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) {
+	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
 		DEBUGP("Can't invert tuple.\n");
 		return NULL;
 	}
 
-	conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto);
+	read_lock_bh(&nf_conntrack_lock);
+	exp = __nf_conntrack_expect_find(tuple);
+	if (exp && exp->helper)
+		features = NF_CT_F_HELP;
+	read_unlock_bh(&nf_conntrack_lock);
+
+	conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features);
 	if (conntrack == NULL || IS_ERR(conntrack)) {
 		DEBUGP("Can't allocate conntrack.\n");
 		return (struct nf_conntrack_tuple_hash *)conntrack;
 	}
 
-	if (!protocol->new(conntrack, skb, dataoff)) {
+	if (!l4proto->new(conntrack, skb, dataoff)) {
 		nf_conntrack_free(conntrack);
 		DEBUGP("init conntrack: can't track with proto module\n");
 		return NULL;
@@ -968,6 +710,8 @@
 		/* Welcome, Mr. Bond.  We've been expecting you... */
 		__set_bit(IPS_EXPECTED_BIT, &conntrack->status);
 		conntrack->master = exp->master;
+		if (exp->helper)
+			nfct_help(conntrack)->helper = exp->helper;
 #ifdef CONFIG_NF_CONNTRACK_MARK
 		conntrack->mark = exp->master->mark;
 #endif
@@ -1005,7 +749,7 @@
 		  u_int16_t l3num,
 		  u_int8_t protonum,
 		  struct nf_conntrack_l3proto *l3proto,
-		  struct nf_conntrack_protocol *proto,
+		  struct nf_conntrack_l4proto *l4proto,
 		  int *set_reply,
 		  enum ip_conntrack_info *ctinfo)
 {
@@ -1015,7 +759,7 @@
 
 	if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
 			     dataoff, l3num, protonum, &tuple, l3proto,
-			     proto)) {
+			     l4proto)) {
 		DEBUGP("resolve_normal_ct: Can't get tuple\n");
 		return NULL;
 	}
@@ -1023,7 +767,7 @@
 	/* look for tuple match */
 	h = nf_conntrack_find_get(&tuple, NULL);
 	if (!h) {
-		h = init_conntrack(&tuple, l3proto, proto, skb, dataoff);
+		h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
 		if (!h)
 			return NULL;
 		if (IS_ERR(h))
@@ -1061,7 +805,7 @@
 	struct nf_conn *ct;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conntrack_l3proto *l3proto;
-	struct nf_conntrack_protocol *proto;
+	struct nf_conntrack_l4proto *l4proto;
 	unsigned int dataoff;
 	u_int8_t protonum;
 	int set_reply = 0;
@@ -1079,19 +823,19 @@
 		return -ret;
 	}
 
-	proto = __nf_ct_proto_find((u_int16_t)pf, protonum);
+	l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum);
 
 	/* It may be an special packet, error, unclean...
 	 * inverse of the return code tells to the netfilter
 	 * core what to do with the packet. */
-	if (proto->error != NULL &&
-	    (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
+	if (l4proto->error != NULL &&
+	    (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
 		NF_CT_STAT_INC(error);
 		NF_CT_STAT_INC(invalid);
 		return -ret;
 	}
 
-	ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto,
+	ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, l4proto,
 			       &set_reply, &ctinfo);
 	if (!ct) {
 		/* Not valid part of a connection */
@@ -1107,7 +851,7 @@
 
 	NF_CT_ASSERT((*pskb)->nfct);
 
-	ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
+	ret = l4proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
 	if (ret < 0) {
 		/* Invalid: inverse of the return code tells
 		 * the netfilter core what to do */
@@ -1123,255 +867,38 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_in);
 
 int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
 			 const struct nf_conntrack_tuple *orig)
 {
 	return nf_ct_invert_tuple(inverse, orig,
 				  __nf_ct_l3proto_find(orig->src.l3num),
-				  __nf_ct_proto_find(orig->src.l3num,
+				  __nf_ct_l4proto_find(orig->src.l3num,
 						     orig->dst.protonum));
 }
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
 
-/* Would two expected things clash? */
-static inline int expect_clash(const struct nf_conntrack_expect *a,
-			       const struct nf_conntrack_expect *b)
+/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
+   implicitly racy: see __nf_conntrack_confirm */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+			      const struct nf_conntrack_tuple *newreply)
 {
-	/* Part covered by intersection of masks must be unequal,
-	   otherwise they clash */
-	struct nf_conntrack_tuple intersect_mask;
-	int count;
-
-	intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
-	intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
-	intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
-	intersect_mask.dst.protonum = a->mask.dst.protonum
-					& b->mask.dst.protonum;
-
-	for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
-		intersect_mask.src.u3.all[count] =
-			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
-	}
-
-	for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
-		intersect_mask.dst.u3.all[count] =
-			a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
-	}
-
-	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
-}
-
-static inline int expect_matches(const struct nf_conntrack_expect *a,
-				 const struct nf_conntrack_expect *b)
-{
-	return a->master == b->master
-		&& nf_ct_tuple_equal(&a->tuple, &b->tuple)
-		&& nf_ct_tuple_equal(&a->mask, &b->mask);
-}
-
-/* Generally a bad idea to call this: could have matched already. */
-void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
-{
-	struct nf_conntrack_expect *i;
-
-	write_lock_bh(&nf_conntrack_lock);
-	/* choose the the oldest expectation to evict */
-	list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
-		if (expect_matches(i, exp) && del_timer(&i->timeout)) {
-			nf_ct_unlink_expect(i);
-			write_unlock_bh(&nf_conntrack_lock);
-			nf_conntrack_expect_put(i);
-			return;
-		}
-	}
-	write_unlock_bh(&nf_conntrack_lock);
-}
-
-/* We don't increase the master conntrack refcount for non-fulfilled
- * conntracks. During the conntrack destruction, the expectations are
- * always killed before the conntrack itself */
-struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
-{
-	struct nf_conntrack_expect *new;
-
-	new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
-	if (!new) {
-		DEBUGP("expect_related: OOM allocating expect\n");
-		return NULL;
-	}
-	new->master = me;
-	atomic_set(&new->use, 1);
-	return new;
-}
-
-void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
-{
-	if (atomic_dec_and_test(&exp->use))
-		kmem_cache_free(nf_conntrack_expect_cachep, exp);
-}
-
-static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
-{
-	struct nf_conn_help *master_help = nfct_help(exp->master);
-
-	atomic_inc(&exp->use);
-	master_help->expecting++;
-	list_add(&exp->list, &nf_conntrack_expect_list);
-
-	init_timer(&exp->timeout);
-	exp->timeout.data = (unsigned long)exp;
-	exp->timeout.function = expectation_timed_out;
-	exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
-	add_timer(&exp->timeout);
-
-	exp->id = ++nf_conntrack_expect_next_id;
-	atomic_inc(&exp->use);
-	NF_CT_STAT_INC(expect_create);
-}
-
-/* Race with expectations being used means we could have none to find; OK. */
-static void evict_oldest_expect(struct nf_conn *master)
-{
-	struct nf_conntrack_expect *i;
-
-	list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
-		if (i->master == master) {
-			if (del_timer(&i->timeout)) {
-				nf_ct_unlink_expect(i);
-				nf_conntrack_expect_put(i);
-			}
-			break;
-		}
-	}
-}
-
-static inline int refresh_timer(struct nf_conntrack_expect *i)
-{
-	struct nf_conn_help *master_help = nfct_help(i->master);
-
-	if (!del_timer(&i->timeout))
-		return 0;
-
-	i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
-	add_timer(&i->timeout);
-	return 1;
-}
-
-int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
-{
-	struct nf_conntrack_expect *i;
-	struct nf_conn *master = expect->master;
-	struct nf_conn_help *master_help = nfct_help(master);
-	int ret;
-
-	NF_CT_ASSERT(master_help);
-
-	DEBUGP("nf_conntrack_expect_related %p\n", related_to);
-	DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple);
-	DEBUGP("mask:  "); NF_CT_DUMP_TUPLE(&expect->mask);
-
-	write_lock_bh(&nf_conntrack_lock);
-	list_for_each_entry(i, &nf_conntrack_expect_list, list) {
-		if (expect_matches(i, expect)) {
-			/* Refresh timer: if it's dying, ignore.. */
-			if (refresh_timer(i)) {
-				ret = 0;
-				goto out;
-			}
-		} else if (expect_clash(i, expect)) {
-			ret = -EBUSY;
-			goto out;
-		}
-	}
-	/* Will be over limit? */
-	if (master_help->helper->max_expected &&
-	    master_help->expecting >= master_help->helper->max_expected)
-		evict_oldest_expect(master);
-
-	nf_conntrack_expect_insert(expect);
-	nf_conntrack_expect_event(IPEXP_NEW, expect);
-	ret = 0;
-out:
-	write_unlock_bh(&nf_conntrack_lock);
-	return ret;
-}
-
-int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
-{
-	int ret;
-	BUG_ON(me->timeout == 0);
-
-	ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
-					  sizeof(struct nf_conn)
-					  + sizeof(struct nf_conn_help)
-					  + __alignof__(struct nf_conn_help));
-	if (ret < 0) {
-		printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n");
-		return ret;
-	}
-	write_lock_bh(&nf_conntrack_lock);
-	list_add(&me->list, &helpers);
-	write_unlock_bh(&nf_conntrack_lock);
-
-	return 0;
-}
-
-struct nf_conntrack_helper *
-__nf_conntrack_helper_find_byname(const char *name)
-{
-	struct nf_conntrack_helper *h;
-
-	list_for_each_entry(h, &helpers, list) {
-		if (!strcmp(h->name, name))
-			return h;
-	}
-
-	return NULL;
-}
-
-static inline void unhelp(struct nf_conntrack_tuple_hash *i,
-			  const struct nf_conntrack_helper *me)
-{
-	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
 	struct nf_conn_help *help = nfct_help(ct);
 
-	if (help && help->helper == me) {
-		nf_conntrack_event(IPCT_HELPER, ct);
-		help->helper = NULL;
-	}
-}
-
-void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
-{
-	unsigned int i;
-	struct nf_conntrack_tuple_hash *h;
-	struct nf_conntrack_expect *exp, *tmp;
-
-	/* Need write lock here, to delete helper. */
 	write_lock_bh(&nf_conntrack_lock);
-	list_del(&me->list);
+	/* Should be unconfirmed, so not in hash table yet */
+	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 
-	/* Get rid of expectations */
-	list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) {
-		struct nf_conn_help *help = nfct_help(exp->master);
-		if (help->helper == me && del_timer(&exp->timeout)) {
-			nf_ct_unlink_expect(exp);
-			nf_conntrack_expect_put(exp);
-		}
-	}
+	DEBUGP("Altering reply tuple of %p to ", ct);
+	NF_CT_DUMP_TUPLE(newreply);
 
-	/* Get rid of expecteds, set helpers to NULL. */
-	list_for_each_entry(h, &unconfirmed, list)
-		unhelp(h, me);
-	for (i = 0; i < nf_conntrack_htable_size; i++) {
-		list_for_each_entry(h, &nf_conntrack_hash[i], list)
-			unhelp(h, me);
-	}
+	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+	if (!ct->master && help && help->expecting == 0)
+		help->helper = __nf_ct_helper_find(newreply);
 	write_unlock_bh(&nf_conntrack_lock);
-
-	/* Someone could be still looking at the helper in a bh. */
-	synchronize_net();
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
 
 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
 void __nf_ct_refresh_acct(struct nf_conn *ct,
@@ -1398,9 +925,14 @@
 		ct->timeout.expires = extra_jiffies;
 		event = IPCT_REFRESH;
 	} else {
-		/* Need del_timer for race avoidance (may already be dying). */
-		if (del_timer(&ct->timeout)) {
-			ct->timeout.expires = jiffies + extra_jiffies;
+		unsigned long newtime = jiffies + extra_jiffies;
+
+		/* Only update the timeout if the new timeout is at least
+		   HZ jiffies from the old timeout. Need del_timer for race
+		   avoidance (may already be dying). */
+		if (newtime - ct->timeout.expires >= HZ
+		    && del_timer(&ct->timeout)) {
+			ct->timeout.expires = newtime;
 			add_timer(&ct->timeout);
 			event = IPCT_REFRESH;
 		}
@@ -1411,9 +943,10 @@
 		ct->counters[CTINFO2DIR(ctinfo)].packets++;
 		ct->counters[CTINFO2DIR(ctinfo)].bytes +=
 			skb->len - (unsigned int)(skb->nh.raw - skb->data);
-	if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
-	    || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
-		event |= IPCT_COUNTER_FILLING;
+
+		if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
+		    || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
+			event |= IPCT_COUNTER_FILLING;
 	}
 #endif
 
@@ -1423,6 +956,7 @@
 	if (event)
 		nf_conntrack_event_cache(event, skb);
 }
+EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 
 #if defined(CONFIG_NF_CT_NETLINK) || \
     defined(CONFIG_NF_CT_NETLINK_MODULE)
@@ -1447,6 +981,7 @@
 nfattr_failure:
 	return -1;
 }
+EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nfattr);
 
 static const size_t cta_min_proto[CTA_PROTO_MAX] = {
 	[CTA_PROTO_SRC_PORT-1]  = sizeof(u_int16_t),
@@ -1462,13 +997,12 @@
 	if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
 		return -EINVAL;
 
-	t->src.u.tcp.port =
-		*(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
-	t->dst.u.tcp.port =
-		*(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
+	t->src.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
+	t->dst.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nf_ct_port_nfattr_to_tuple);
 #endif
 
 /* Used by ipt_REJECT and ip6t_REJECT. */
@@ -1489,6 +1023,7 @@
 	nskb->nfctinfo = ctinfo;
 	nf_conntrack_get(nskb->nfct);
 }
+EXPORT_SYMBOL_GPL(__nf_conntrack_attach);
 
 static inline int
 do_iter(const struct nf_conntrack_tuple_hash *i,
@@ -1542,6 +1077,7 @@
 		nf_ct_put(ct);
 	}
 }
+EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
 
 static int kill_all(struct nf_conn *i, void *data)
 {
@@ -1557,10 +1093,11 @@
 			   get_order(sizeof(struct list_head) * size));
 }
 
-void nf_conntrack_flush()
+void nf_conntrack_flush(void)
 {
 	nf_ct_iterate_cleanup(kill_all, NULL);
 }
+EXPORT_SYMBOL_GPL(nf_conntrack_flush);
 
 /* Mishearing the voices in his head, our hero wonders how he's
    supposed to kill the mall. */
@@ -1598,6 +1135,8 @@
 	free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
 			    nf_conntrack_htable_size);
 
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_generic);
+
 	/* free l3proto protocol tables */
 	for (i = 0; i < PF_MAX; i++)
 		if (nf_ct_protos[i]) {
@@ -1723,10 +1262,14 @@
 		goto err_free_conntrack_slab;
 	}
 
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_generic);
+	if (ret < 0)
+		goto out_free_expect_slab;
+
 	/* Don't NEED lock here, but good form anyway. */
 	write_lock_bh(&nf_conntrack_lock);
-        for (i = 0; i < PF_MAX; i++)
-		nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
+        for (i = 0; i < AF_MAX; i++)
+		nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic;
         write_unlock_bh(&nf_conntrack_lock);
 
 	/* For use by REJECT target */
@@ -1740,6 +1283,8 @@
 
 	return ret;
 
+out_free_expect_slab:
+	kmem_cache_destroy(nf_conntrack_expect_cachep);
 err_free_conntrack_slab:
 	nf_conntrack_unregister_cache(NF_CT_F_BASIC);
 err_free_hash:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
new file mode 100644
index 0000000..1a223e0
--- /dev/null
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -0,0 +1,93 @@
+/* Event cache for netfilter. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_chain);
+
+ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain);
+
+DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
+EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
+
+/* deliver cached events and clear cache entry - must be called with locally
+ * disabled softirqs */
+static inline void
+__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
+{
+	if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
+	    && ecache->events)
+		atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events,
+				    ecache->ct);
+
+	ecache->events = 0;
+	nf_ct_put(ecache->ct);
+	ecache->ct = NULL;
+}
+
+/* Deliver all cached events for a particular conntrack. This is called
+ * by code prior to async packet handling for freeing the skb */
+void nf_ct_deliver_cached_events(const struct nf_conn *ct)
+{
+	struct nf_conntrack_ecache *ecache;
+
+	local_bh_disable();
+	ecache = &__get_cpu_var(nf_conntrack_ecache);
+	if (ecache->ct == ct)
+		__nf_ct_deliver_cached_events(ecache);
+	local_bh_enable();
+}
+EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
+
+/* Deliver cached events for old pending events, if current conntrack != old */
+void __nf_ct_event_cache_init(struct nf_conn *ct)
+{
+	struct nf_conntrack_ecache *ecache;
+
+	/* take care of delivering potentially old events */
+	ecache = &__get_cpu_var(nf_conntrack_ecache);
+	BUG_ON(ecache->ct == ct);
+	if (ecache->ct)
+		__nf_ct_deliver_cached_events(ecache);
+	/* initialize for this conntrack/packet */
+	ecache->ct = ct;
+	nf_conntrack_get(&ct->ct_general);
+}
+EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
+
+/* flush the event cache - touches other CPU's data and must not be called
+ * while packets are still passing through the code */
+void nf_ct_event_cache_flush(void)
+{
+	struct nf_conntrack_ecache *ecache;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		ecache = &per_cpu(nf_conntrack_ecache, cpu);
+		if (ecache->ct)
+			nf_ct_put(ecache->ct);
+	}
+}
+
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
new file mode 100644
index 0000000..9cbf926
--- /dev/null
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -0,0 +1,445 @@
+/* Expectation handling for nf_conntrack. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/kernel.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+LIST_HEAD(nf_conntrack_expect_list);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_list);
+
+struct kmem_cache *nf_conntrack_expect_cachep __read_mostly;
+static unsigned int nf_conntrack_expect_next_id;
+
+/* nf_conntrack_expect helper functions */
+void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
+{
+	struct nf_conn_help *master_help = nfct_help(exp->master);
+
+	NF_CT_ASSERT(master_help);
+	NF_CT_ASSERT(!timer_pending(&exp->timeout));
+
+	list_del(&exp->list);
+	NF_CT_STAT_INC(expect_delete);
+	master_help->expecting--;
+	nf_conntrack_expect_put(exp);
+}
+EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
+
+static void expectation_timed_out(unsigned long ul_expect)
+{
+	struct nf_conntrack_expect *exp = (void *)ul_expect;
+
+	write_lock_bh(&nf_conntrack_lock);
+	nf_ct_unlink_expect(exp);
+	write_unlock_bh(&nf_conntrack_lock);
+	nf_conntrack_expect_put(exp);
+}
+
+struct nf_conntrack_expect *
+__nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_expect *i;
+
+	list_for_each_entry(i, &nf_conntrack_expect_list, list) {
+		if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
+			return i;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(__nf_conntrack_expect_find);
+
+/* Just find a expectation corresponding to a tuple. */
+struct nf_conntrack_expect *
+nf_conntrack_expect_find_get(const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_expect *i;
+
+	read_lock_bh(&nf_conntrack_lock);
+	i = __nf_conntrack_expect_find(tuple);
+	if (i)
+		atomic_inc(&i->use);
+	read_unlock_bh(&nf_conntrack_lock);
+
+	return i;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get);
+
+/* If an expectation for this connection is found, it gets delete from
+ * global list then returned. */
+struct nf_conntrack_expect *
+find_expectation(const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_expect *exp;
+
+	exp = __nf_conntrack_expect_find(tuple);
+	if (!exp)
+		return NULL;
+
+	/* If master is not in hash table yet (ie. packet hasn't left
+	   this machine yet), how can other end know about expected?
+	   Hence these are not the droids you are looking for (if
+	   master ct never got confirmed, we'd hold a reference to it
+	   and weird things would happen to future packets). */
+	if (!nf_ct_is_confirmed(exp->master))
+		return NULL;
+
+	if (exp->flags & NF_CT_EXPECT_PERMANENT) {
+		atomic_inc(&exp->use);
+		return exp;
+	} else if (del_timer(&exp->timeout)) {
+		nf_ct_unlink_expect(exp);
+		return exp;
+	}
+
+	return NULL;
+}
+
+/* delete all expectations for this conntrack */
+void nf_ct_remove_expectations(struct nf_conn *ct)
+{
+	struct nf_conntrack_expect *i, *tmp;
+	struct nf_conn_help *help = nfct_help(ct);
+
+	/* Optimization: most connection never expect any others. */
+	if (!help || help->expecting == 0)
+		return;
+
+	list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
+		if (i->master == ct && del_timer(&i->timeout)) {
+			nf_ct_unlink_expect(i);
+			nf_conntrack_expect_put(i);
+ 		}
+	}
+}
+EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
+
+/* Would two expected things clash? */
+static inline int expect_clash(const struct nf_conntrack_expect *a,
+			       const struct nf_conntrack_expect *b)
+{
+	/* Part covered by intersection of masks must be unequal,
+	   otherwise they clash */
+	struct nf_conntrack_tuple intersect_mask;
+	int count;
+
+	intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
+	intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
+	intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
+	intersect_mask.dst.protonum = a->mask.dst.protonum
+					& b->mask.dst.protonum;
+
+	for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+		intersect_mask.src.u3.all[count] =
+			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
+	}
+
+	for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+		intersect_mask.dst.u3.all[count] =
+			a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
+	}
+
+	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
+}
+
+static inline int expect_matches(const struct nf_conntrack_expect *a,
+				 const struct nf_conntrack_expect *b)
+{
+	return a->master == b->master
+		&& nf_ct_tuple_equal(&a->tuple, &b->tuple)
+		&& nf_ct_tuple_equal(&a->mask, &b->mask);
+}
+
+/* Generally a bad idea to call this: could have matched already. */
+void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
+{
+	struct nf_conntrack_expect *i;
+
+	write_lock_bh(&nf_conntrack_lock);
+	/* choose the the oldest expectation to evict */
+	list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
+		if (expect_matches(i, exp) && del_timer(&i->timeout)) {
+			nf_ct_unlink_expect(i);
+			write_unlock_bh(&nf_conntrack_lock);
+			nf_conntrack_expect_put(i);
+			return;
+		}
+	}
+	write_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_unexpect_related);
+
+/* We don't increase the master conntrack refcount for non-fulfilled
+ * conntracks. During the conntrack destruction, the expectations are
+ * always killed before the conntrack itself */
+struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
+{
+	struct nf_conntrack_expect *new;
+
+	new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
+	if (!new)
+		return NULL;
+
+	new->master = me;
+	atomic_set(&new->use, 1);
+	return new;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_alloc);
+
+void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family,
+			      union nf_conntrack_address *saddr,
+			      union nf_conntrack_address *daddr,
+			      u_int8_t proto, __be16 *src, __be16 *dst)
+{
+	int len;
+
+	if (family == AF_INET)
+		len = 4;
+	else
+		len = 16;
+
+	exp->flags = 0;
+	exp->expectfn = NULL;
+	exp->helper = NULL;
+	exp->tuple.src.l3num = family;
+	exp->tuple.dst.protonum = proto;
+	exp->mask.src.l3num = 0xFFFF;
+	exp->mask.dst.protonum = 0xFF;
+
+	if (saddr) {
+		memcpy(&exp->tuple.src.u3, saddr, len);
+		if (sizeof(exp->tuple.src.u3) > len)
+			/* address needs to be cleared for nf_ct_tuple_equal */
+			memset((void *)&exp->tuple.src.u3 + len, 0x00,
+			       sizeof(exp->tuple.src.u3) - len);
+		memset(&exp->mask.src.u3, 0xFF, len);
+		if (sizeof(exp->mask.src.u3) > len)
+			memset((void *)&exp->mask.src.u3 + len, 0x00,
+			       sizeof(exp->mask.src.u3) - len);
+	} else {
+		memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
+		memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
+	}
+
+	if (daddr) {
+		memcpy(&exp->tuple.dst.u3, daddr, len);
+		if (sizeof(exp->tuple.dst.u3) > len)
+			/* address needs to be cleared for nf_ct_tuple_equal */
+			memset((void *)&exp->tuple.dst.u3 + len, 0x00,
+			       sizeof(exp->tuple.dst.u3) - len);
+		memset(&exp->mask.dst.u3, 0xFF, len);
+		if (sizeof(exp->mask.dst.u3) > len)
+			memset((void *)&exp->mask.dst.u3 + len, 0x00,
+			       sizeof(exp->mask.dst.u3) - len);
+	} else {
+		memset(&exp->tuple.dst.u3, 0x00, sizeof(exp->tuple.dst.u3));
+		memset(&exp->mask.dst.u3, 0x00, sizeof(exp->mask.dst.u3));
+	}
+
+	if (src) {
+		exp->tuple.src.u.all = (__force u16)*src;
+		exp->mask.src.u.all = 0xFFFF;
+	} else {
+		exp->tuple.src.u.all = 0;
+		exp->mask.src.u.all = 0;
+	}
+
+	if (dst) {
+		exp->tuple.dst.u.all = (__force u16)*dst;
+		exp->mask.dst.u.all = 0xFFFF;
+	} else {
+		exp->tuple.dst.u.all = 0;
+		exp->mask.dst.u.all = 0;
+	}
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_init);
+
+void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
+{
+	if (atomic_dec_and_test(&exp->use))
+		kmem_cache_free(nf_conntrack_expect_cachep, exp);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_put);
+
+static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
+{
+	struct nf_conn_help *master_help = nfct_help(exp->master);
+
+	atomic_inc(&exp->use);
+	master_help->expecting++;
+	list_add(&exp->list, &nf_conntrack_expect_list);
+
+	init_timer(&exp->timeout);
+	exp->timeout.data = (unsigned long)exp;
+	exp->timeout.function = expectation_timed_out;
+	exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
+	add_timer(&exp->timeout);
+
+	exp->id = ++nf_conntrack_expect_next_id;
+	atomic_inc(&exp->use);
+	NF_CT_STAT_INC(expect_create);
+}
+
+/* Race with expectations being used means we could have none to find; OK. */
+static void evict_oldest_expect(struct nf_conn *master)
+{
+	struct nf_conntrack_expect *i;
+
+	list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
+		if (i->master == master) {
+			if (del_timer(&i->timeout)) {
+				nf_ct_unlink_expect(i);
+				nf_conntrack_expect_put(i);
+			}
+			break;
+		}
+	}
+}
+
+static inline int refresh_timer(struct nf_conntrack_expect *i)
+{
+	struct nf_conn_help *master_help = nfct_help(i->master);
+
+	if (!del_timer(&i->timeout))
+		return 0;
+
+	i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
+	add_timer(&i->timeout);
+	return 1;
+}
+
+int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
+{
+	struct nf_conntrack_expect *i;
+	struct nf_conn *master = expect->master;
+	struct nf_conn_help *master_help = nfct_help(master);
+	int ret;
+
+	NF_CT_ASSERT(master_help);
+
+	write_lock_bh(&nf_conntrack_lock);
+	list_for_each_entry(i, &nf_conntrack_expect_list, list) {
+		if (expect_matches(i, expect)) {
+			/* Refresh timer: if it's dying, ignore.. */
+			if (refresh_timer(i)) {
+				ret = 0;
+				goto out;
+			}
+		} else if (expect_clash(i, expect)) {
+			ret = -EBUSY;
+			goto out;
+		}
+	}
+	/* Will be over limit? */
+	if (master_help->helper->max_expected &&
+	    master_help->expecting >= master_help->helper->max_expected)
+		evict_oldest_expect(master);
+
+	nf_conntrack_expect_insert(expect);
+	nf_conntrack_expect_event(IPEXP_NEW, expect);
+	ret = 0;
+out:
+	write_unlock_bh(&nf_conntrack_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_related);
+
+#ifdef CONFIG_PROC_FS
+static void *exp_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct list_head *e = &nf_conntrack_expect_list;
+	loff_t i;
+
+	/* strange seq_file api calls stop even if we fail,
+	 * thus we need to grab lock since stop unlocks */
+	read_lock_bh(&nf_conntrack_lock);
+
+	if (list_empty(e))
+		return NULL;
+
+	for (i = 0; i <= *pos; i++) {
+		e = e->next;
+		if (e == &nf_conntrack_expect_list)
+			return NULL;
+	}
+	return e;
+}
+
+static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct list_head *e = v;
+
+	++*pos;
+	e = e->next;
+
+	if (e == &nf_conntrack_expect_list)
+		return NULL;
+
+	return e;
+}
+
+static void exp_seq_stop(struct seq_file *s, void *v)
+{
+	read_unlock_bh(&nf_conntrack_lock);
+}
+
+static int exp_seq_show(struct seq_file *s, void *v)
+{
+	struct nf_conntrack_expect *expect = v;
+
+	if (expect->timeout.function)
+		seq_printf(s, "%ld ", timer_pending(&expect->timeout)
+			   ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
+	else
+		seq_printf(s, "- ");
+	seq_printf(s, "l3proto = %u proto=%u ",
+		   expect->tuple.src.l3num,
+		   expect->tuple.dst.protonum);
+	print_tuple(s, &expect->tuple,
+		    __nf_ct_l3proto_find(expect->tuple.src.l3num),
+		    __nf_ct_l4proto_find(expect->tuple.src.l3num,
+				       expect->tuple.dst.protonum));
+	return seq_putc(s, '\n');
+}
+
+static struct seq_operations exp_seq_ops = {
+	.start = exp_seq_start,
+	.next = exp_seq_next,
+	.stop = exp_seq_stop,
+	.show = exp_seq_show
+};
+
+static int exp_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &exp_seq_ops);
+}
+
+struct file_operations exp_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = exp_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+#endif /* CONFIG_PROC_FS */
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 0c17a5b..92a9471 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -26,12 +26,15 @@
 #include <net/tcp.h>
 
 #include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <linux/netfilter/nf_conntrack_ftp.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
 MODULE_DESCRIPTION("ftp connection tracking helper");
+MODULE_ALIAS("ip_conntrack_ftp");
 
 /* This is slow, but it's simple. --RR */
 static char *ftp_buffer;
@@ -48,7 +51,7 @@
 
 unsigned int (*nf_nat_ftp_hook)(struct sk_buff **pskb,
 				enum ip_conntrack_info ctinfo,
-				enum ip_ct_ftp_type type,
+				enum nf_ct_ftp_type type,
 				unsigned int matchoff,
 				unsigned int matchlen,
 				struct nf_conntrack_expect *exp,
@@ -71,7 +74,7 @@
 	size_t plen;
 	char skip;
 	char term;
-	enum ip_ct_ftp_type ftptype;
+	enum nf_ct_ftp_type ftptype;
 	int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char);
 } search[IP_CT_DIR_MAX][2] = {
 	[IP_CT_DIR_ORIGINAL] = {
@@ -80,7 +83,7 @@
 			.plen		= sizeof("PORT") - 1,
 			.skip		= ' ',
 			.term		= '\r',
-			.ftptype	= IP_CT_FTP_PORT,
+			.ftptype	= NF_CT_FTP_PORT,
 			.getnum		= try_rfc959,
 		},
 		{
@@ -88,7 +91,7 @@
 			.plen		= sizeof("EPRT") - 1,
 			.skip		= ' ',
 			.term		= '\r',
-			.ftptype	= IP_CT_FTP_EPRT,
+			.ftptype	= NF_CT_FTP_EPRT,
 			.getnum		= try_eprt,
 		},
 	},
@@ -98,7 +101,7 @@
 			.plen		= sizeof("227 ") - 1,
 			.skip		= '(',
 			.term		= ')',
-			.ftptype	= IP_CT_FTP_PASV,
+			.ftptype	= NF_CT_FTP_PASV,
 			.getnum		= try_rfc959,
 		},
 		{
@@ -106,7 +109,7 @@
 			.plen		= sizeof("229 ") - 1,
 			.skip		= '(',
 			.term		= ')',
-			.ftptype	= IP_CT_FTP_EPSV,
+			.ftptype	= NF_CT_FTP_EPSV,
 			.getnum		= try_epsv_response,
 		},
 	},
@@ -171,7 +174,7 @@
 
 /* Grab port: number up to delimiter */
 static int get_port(const char *data, int start, size_t dlen, char delim,
-		    u_int16_t *port)
+		    __be16 *port)
 {
 	u_int16_t tmp_port = 0;
 	int i;
@@ -317,7 +320,7 @@
 }
 
 /* Look up to see if we're just after a \n. */
-static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir)
+static int find_nl_seq(u32 seq, const struct nf_ct_ftp_master *info, int dir)
 {
 	unsigned int i;
 
@@ -328,7 +331,7 @@
 }
 
 /* We don't update if it's older than what we have. */
-static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir,
+static void update_nl_seq(u32 nl_seq, struct nf_ct_ftp_master *info, int dir,
 			  struct sk_buff *skb)
 {
 	unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
@@ -364,12 +367,12 @@
 	u32 seq;
 	int dir = CTINFO2DIR(ctinfo);
 	unsigned int matchlen, matchoff;
-	struct ip_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info;
+	struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info;
 	struct nf_conntrack_expect *exp;
 	struct nf_conntrack_man cmd = {};
-
 	unsigned int i;
 	int found = 0, ends_in_nl;
+	typeof(nf_nat_ftp_hook) nf_nat_ftp;
 
 	/* Until there's been traffic both ways, don't look in packets. */
 	if (ctinfo != IP_CT_ESTABLISHED
@@ -500,12 +503,12 @@
 			       .u = { .tcp = { 0 }},
 			     },
 		      .dst = { .protonum = 0xFF,
-			       .u = { .tcp = { 0xFFFF }},
+			       .u = { .tcp = { __constant_htons(0xFFFF) }},
 			     },
 		    };
 	if (cmd.l3num == PF_INET) {
-		exp->mask.src.u3.ip = 0xFFFFFFFF;
-		exp->mask.dst.u3.ip = 0xFFFFFFFF;
+		exp->mask.src.u3.ip = htonl(0xFFFFFFFF);
+		exp->mask.dst.u3.ip = htonl(0xFFFFFFFF);
 	} else {
 		memset(exp->mask.src.u3.ip6, 0xFF,
 		       sizeof(exp->mask.src.u3.ip6));
@@ -514,13 +517,15 @@
 	}
 
 	exp->expectfn = NULL;
+	exp->helper = NULL;
 	exp->flags = 0;
 
 	/* Now, NAT might want to mangle the packet, and register the
 	 * (possibly changed) expectation itself. */
-	if (nf_nat_ftp_hook)
-		ret = nf_nat_ftp_hook(pskb, ctinfo, search[dir][i].ftptype,
-				      matchoff, matchlen, exp, &seq);
+	nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook);
+	if (nf_nat_ftp && ct->status & IPS_NAT_MASK)
+		ret = nf_nat_ftp(pskb, ctinfo, search[dir][i].ftptype,
+				 matchoff, matchlen, exp, &seq);
 	else {
 		/* Can't expect this?  Best to drop packet now. */
 		if (nf_conntrack_expect_related(exp) != 0)
@@ -584,7 +589,8 @@
 		for (j = 0; j < 2; j++) {
 			ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
 			ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
-			ftp[i][j].mask.src.u.tcp.port = 0xFFFF;
+			ftp[i][j].mask.src.l3num = 0xFFFF;
+			ftp[i][j].mask.src.u.tcp.port = htons(0xFFFF);
 			ftp[i][j].mask.dst.protonum = 0xFF;
 			ftp[i][j].max_expected = 1;
 			ftp[i][j].timeout = 5 * 60;	/* 5 Minutes */
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
similarity index 98%
rename from net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
rename to net/netfilter/nf_conntrack_h323_asn1.c
index 26dfeca..f6fad71 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -15,7 +15,7 @@
 #else
 #include <stdio.h>
 #endif
-#include <linux/netfilter_ipv4/ip_conntrack_helper_h323_asn1.h>
+#include <linux/netfilter/nf_conntrack_h323_asn1.h>
 
 /* Trace Flag */
 #ifndef H323_TRACE
@@ -144,7 +144,7 @@
 /****************************************************************************
  * H.323 Types
  ****************************************************************************/
-#include "ip_conntrack_helper_h323_types.c"
+#include "nf_conntrack_h323_types.c"
 
 /****************************************************************************
  * Functions
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
new file mode 100644
index 0000000..6d85689
--- /dev/null
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -0,0 +1,1856 @@
+/*
+ * H.323 connection tracking helper
+ *
+ * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
+ *
+ * This source code is licensed under General Public License version 2.
+ *
+ * Based on the 'brute force' H.323 connection tracking module by
+ * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * For more information, please see http://nath323.sourceforge.net/
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_h323.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* Parameters */
+static unsigned int default_rrq_ttl __read_mostly = 300;
+module_param(default_rrq_ttl, uint, 0600);
+MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ");
+
+static int gkrouted_only __read_mostly = 1;
+module_param(gkrouted_only, int, 0600);
+MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
+
+static int callforward_filter __read_mostly = 1;
+module_param(callforward_filter, bool, 0600);
+MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
+		                     "if both endpoints are on different sides "
+				     "(determined by routing information)");
+
+/* Hooks for NAT */
+int (*set_h245_addr_hook) (struct sk_buff **pskb,
+			   unsigned char **data, int dataoff,
+			   H245_TransportAddress *taddr,
+			   union nf_conntrack_address *addr, __be16 port)
+			   __read_mostly;
+int (*set_h225_addr_hook) (struct sk_buff **pskb,
+			   unsigned char **data, int dataoff,
+			   TransportAddress *taddr,
+			   union nf_conntrack_address *addr, __be16 port)
+			   __read_mostly;
+int (*set_sig_addr_hook) (struct sk_buff **pskb,
+			  struct nf_conn *ct,
+			  enum ip_conntrack_info ctinfo,
+			  unsigned char **data,
+			  TransportAddress *taddr, int count) __read_mostly;
+int (*set_ras_addr_hook) (struct sk_buff **pskb,
+			  struct nf_conn *ct,
+			  enum ip_conntrack_info ctinfo,
+			  unsigned char **data,
+			  TransportAddress *taddr, int count) __read_mostly;
+int (*nat_rtp_rtcp_hook) (struct sk_buff **pskb,
+			  struct nf_conn *ct,
+			  enum ip_conntrack_info ctinfo,
+			  unsigned char **data, int dataoff,
+			  H245_TransportAddress *taddr,
+			  __be16 port, __be16 rtp_port,
+			  struct nf_conntrack_expect *rtp_exp,
+			  struct nf_conntrack_expect *rtcp_exp) __read_mostly;
+int (*nat_t120_hook) (struct sk_buff **pskb,
+		      struct nf_conn *ct,
+		      enum ip_conntrack_info ctinfo,
+		      unsigned char **data, int dataoff,
+		      H245_TransportAddress *taddr, __be16 port,
+		      struct nf_conntrack_expect *exp) __read_mostly;
+int (*nat_h245_hook) (struct sk_buff **pskb,
+		      struct nf_conn *ct,
+		      enum ip_conntrack_info ctinfo,
+		      unsigned char **data, int dataoff,
+		      TransportAddress *taddr, __be16 port,
+		      struct nf_conntrack_expect *exp) __read_mostly;
+int (*nat_callforwarding_hook) (struct sk_buff **pskb,
+				struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo,
+				unsigned char **data, int dataoff,
+				TransportAddress *taddr, __be16 port,
+				struct nf_conntrack_expect *exp) __read_mostly;
+int (*nat_q931_hook) (struct sk_buff **pskb,
+		      struct nf_conn *ct,
+		      enum ip_conntrack_info ctinfo,
+		      unsigned char **data, TransportAddress *taddr, int idx,
+		      __be16 port, struct nf_conntrack_expect *exp)
+		      __read_mostly;
+
+static DEFINE_SPINLOCK(nf_h323_lock);
+static char *h323_buffer;
+
+static struct nf_conntrack_helper nf_conntrack_helper_h245;
+static struct nf_conntrack_helper nf_conntrack_helper_q931[];
+static struct nf_conntrack_helper nf_conntrack_helper_ras[];
+
+/****************************************************************************/
+static int get_tpkt_data(struct sk_buff **pskb, unsigned int protoff,
+			 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			 unsigned char **data, int *datalen, int *dataoff)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	struct tcphdr _tcph, *th;
+	int tcpdatalen;
+	int tcpdataoff;
+	unsigned char *tpkt;
+	int tpktlen;
+	int tpktoff;
+
+	/* Get TCP header */
+	th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph);
+	if (th == NULL)
+		return 0;
+
+	/* Get TCP data offset */
+	tcpdataoff = protoff + th->doff * 4;
+
+	/* Get TCP data length */
+	tcpdatalen = (*pskb)->len - tcpdataoff;
+	if (tcpdatalen <= 0)	/* No TCP data */
+		goto clear_out;
+
+	if (*data == NULL) {	/* first TPKT */
+		/* Get first TPKT pointer */
+		tpkt = skb_header_pointer(*pskb, tcpdataoff, tcpdatalen,
+					  h323_buffer);
+		BUG_ON(tpkt == NULL);
+
+		/* Validate TPKT identifier */
+		if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) {
+			/* Netmeeting sends TPKT header and data separately */
+			if (info->tpkt_len[dir] > 0) {
+				DEBUGP("nf_ct_h323: previous packet "
+				       "indicated separate TPKT data of %hu "
+				       "bytes\n", info->tpkt_len[dir]);
+				if (info->tpkt_len[dir] <= tcpdatalen) {
+					/* Yes, there was a TPKT header
+					 * received */
+					*data = tpkt;
+					*datalen = info->tpkt_len[dir];
+					*dataoff = 0;
+					goto out;
+				}
+
+				/* Fragmented TPKT */
+				if (net_ratelimit())
+					printk("nf_ct_h323: "
+					       "fragmented TPKT\n");
+				goto clear_out;
+			}
+
+			/* It is not even a TPKT */
+			return 0;
+		}
+		tpktoff = 0;
+	} else {		/* Next TPKT */
+		tpktoff = *dataoff + *datalen;
+		tcpdatalen -= tpktoff;
+		if (tcpdatalen <= 4)	/* No more TPKT */
+			goto clear_out;
+		tpkt = *data + *datalen;
+
+		/* Validate TPKT identifier */
+		if (tpkt[0] != 0x03 || tpkt[1] != 0)
+			goto clear_out;
+	}
+
+	/* Validate TPKT length */
+	tpktlen = tpkt[2] * 256 + tpkt[3];
+	if (tpktlen < 4)
+		goto clear_out;
+	if (tpktlen > tcpdatalen) {
+		if (tcpdatalen == 4) {	/* Separate TPKT header */
+			/* Netmeeting sends TPKT header and data separately */
+			DEBUGP("nf_ct_h323: separate TPKT header indicates "
+			       "there will be TPKT data of %hu bytes\n",
+			       tpktlen - 4);
+			info->tpkt_len[dir] = tpktlen - 4;
+			return 0;
+		}
+
+		if (net_ratelimit())
+			printk("nf_ct_h323: incomplete TPKT (fragmented?)\n");
+		goto clear_out;
+	}
+
+	/* This is the encapsulated data */
+	*data = tpkt + 4;
+	*datalen = tpktlen - 4;
+	*dataoff = tpktoff + 4;
+
+      out:
+	/* Clear TPKT length */
+	info->tpkt_len[dir] = 0;
+	return 1;
+
+      clear_out:
+	info->tpkt_len[dir] = 0;
+	return 0;
+}
+
+/****************************************************************************/
+static int get_h245_addr(struct nf_conn *ct, unsigned char *data,
+			 H245_TransportAddress *taddr,
+			 union nf_conntrack_address *addr, __be16 *port)
+{
+	unsigned char *p;
+	int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	int len;
+
+	if (taddr->choice != eH245_TransportAddress_unicastAddress)
+		return 0;
+
+	switch (taddr->unicastAddress.choice) {
+	case eUnicastAddress_iPAddress:
+		if (family != AF_INET)
+			return 0;
+		p = data + taddr->unicastAddress.iPAddress.network;
+		len = 4;
+		break;
+	case eUnicastAddress_iP6Address:
+		if (family != AF_INET6)
+			return 0;
+		p = data + taddr->unicastAddress.iP6Address.network;
+		len = 16;
+		break;
+	default:
+		return 0;
+	}
+
+	memcpy(addr, p, len);
+	memset((void *)addr + len, 0, sizeof(*addr) - len);
+	memcpy(port, p + len, sizeof(__be16));
+
+	return 1;
+}
+
+/****************************************************************************/
+static int expect_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct,
+			   enum ip_conntrack_info ctinfo,
+			   unsigned char **data, int dataoff,
+			   H245_TransportAddress *taddr)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	__be16 port;
+	__be16 rtp_port, rtcp_port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *rtp_exp;
+	struct nf_conntrack_expect *rtcp_exp;
+	typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp;
+
+	/* Read RTP or RTCP address */
+	if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
+	    memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
+	    port == 0)
+		return 0;
+
+	/* RTP port is even */
+	port &= htons(~1);
+	rtp_port = port;
+	rtcp_port = htons(ntohs(port) + 1);
+
+	/* Create expect for RTP */
+	if ((rtp_exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(rtp_exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3,
+				 &ct->tuplehash[!dir].tuple.dst.u3,
+				 IPPROTO_UDP, NULL, &rtp_port);
+
+	/* Create expect for RTCP */
+	if ((rtcp_exp = nf_conntrack_expect_alloc(ct)) == NULL) {
+		nf_conntrack_expect_put(rtp_exp);
+		return -1;
+	}
+	nf_conntrack_expect_init(rtcp_exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3,
+				 &ct->tuplehash[!dir].tuple.dst.u3,
+				 IPPROTO_UDP, NULL, &rtcp_port);
+
+	if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+	   	   &ct->tuplehash[!dir].tuple.dst.u3,
+		   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+		   (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
+		   ct->status & IPS_NAT_MASK) {
+		/* NAT needed */
+		ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
+				   taddr, port, rtp_port, rtp_exp, rtcp_exp);
+	} else {		/* Conntrack only */
+		if (nf_conntrack_expect_related(rtp_exp) == 0) {
+			if (nf_conntrack_expect_related(rtcp_exp) == 0) {
+				DEBUGP("nf_ct_h323: expect RTP ");
+				NF_CT_DUMP_TUPLE(&rtp_exp->tuple);
+				DEBUGP("nf_ct_h323: expect RTCP ");
+				NF_CT_DUMP_TUPLE(&rtcp_exp->tuple);
+			} else {
+				nf_conntrack_unexpect_related(rtp_exp);
+				ret = -1;
+			}
+		} else
+			ret = -1;
+	}
+
+	nf_conntrack_expect_put(rtp_exp);
+	nf_conntrack_expect_put(rtcp_exp);
+
+	return ret;
+}
+
+/****************************************************************************/
+static int expect_t120(struct sk_buff **pskb,
+		       struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, int dataoff,
+		       H245_TransportAddress *taddr)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	__be16 port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *exp;
+	typeof(nat_t120_hook) nat_t120;
+
+	/* Read T.120 address */
+	if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
+	    memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
+	    port == 0)
+		return 0;
+
+	/* Create expect for T.120 connections */
+	if ((exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3,
+				 &ct->tuplehash[!dir].tuple.dst.u3,
+				 IPPROTO_TCP, NULL, &port);
+	exp->flags = NF_CT_EXPECT_PERMANENT;	/* Accept multiple channels */
+
+	if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+		   &ct->tuplehash[!dir].tuple.dst.u3,
+		   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+	    (nat_t120 = rcu_dereference(nat_t120_hook)) &&
+	    ct->status & IPS_NAT_MASK) {
+		/* NAT needed */
+		ret = nat_t120(pskb, ct, ctinfo, data, dataoff, taddr,
+			       port, exp);
+	} else {		/* Conntrack only */
+		if (nf_conntrack_expect_related(exp) == 0) {
+			DEBUGP("nf_ct_h323: expect T.120 ");
+			NF_CT_DUMP_TUPLE(&exp->tuple);
+		} else
+			ret = -1;
+	}
+
+	nf_conntrack_expect_put(exp);
+
+	return ret;
+}
+
+/****************************************************************************/
+static int process_h245_channel(struct sk_buff **pskb,
+				struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo,
+				unsigned char **data, int dataoff,
+				H2250LogicalChannelParameters *channel)
+{
+	int ret;
+
+	if (channel->options & eH2250LogicalChannelParameters_mediaChannel) {
+		/* RTP */
+		ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
+				      &channel->mediaChannel);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (channel->
+	    options & eH2250LogicalChannelParameters_mediaControlChannel) {
+		/* RTCP */
+		ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
+				      &channel->mediaControlChannel);
+		if (ret < 0)
+			return -1;
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_olc(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, int dataoff,
+		       OpenLogicalChannel *olc)
+{
+	int ret;
+
+	DEBUGP("nf_ct_h323: OpenLogicalChannel\n");
+
+	if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
+	    eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
+	{
+		ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff,
+					   &olc->
+					   forwardLogicalChannelParameters.
+					   multiplexParameters.
+					   h2250LogicalChannelParameters);
+		if (ret < 0)
+			return -1;
+	}
+
+	if ((olc->options &
+	     eOpenLogicalChannel_reverseLogicalChannelParameters) &&
+	    (olc->reverseLogicalChannelParameters.options &
+	     eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters)
+	    && (olc->reverseLogicalChannelParameters.multiplexParameters.
+		choice ==
+		eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
+	{
+		ret =
+		    process_h245_channel(pskb, ct, ctinfo, data, dataoff,
+					 &olc->
+					 reverseLogicalChannelParameters.
+					 multiplexParameters.
+					 h2250LogicalChannelParameters);
+		if (ret < 0)
+			return -1;
+	}
+
+	if ((olc->options & eOpenLogicalChannel_separateStack) &&
+	    olc->forwardLogicalChannelParameters.dataType.choice ==
+	    eDataType_data &&
+	    olc->forwardLogicalChannelParameters.dataType.data.application.
+	    choice == eDataApplicationCapability_application_t120 &&
+	    olc->forwardLogicalChannelParameters.dataType.data.application.
+	    t120.choice == eDataProtocolCapability_separateLANStack &&
+	    olc->separateStack.networkAddress.choice ==
+	    eNetworkAccessParameters_networkAddress_localAreaAddress) {
+		ret = expect_t120(pskb, ct, ctinfo, data, dataoff,
+				  &olc->separateStack.networkAddress.
+				  localAreaAddress);
+		if (ret < 0)
+			return -1;
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_olca(struct sk_buff **pskb, struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo,
+			unsigned char **data, int dataoff,
+			OpenLogicalChannelAck *olca)
+{
+	H2250LogicalChannelAckParameters *ack;
+	int ret;
+
+	DEBUGP("nf_ct_h323: OpenLogicalChannelAck\n");
+
+	if ((olca->options &
+	     eOpenLogicalChannelAck_reverseLogicalChannelParameters) &&
+	    (olca->reverseLogicalChannelParameters.options &
+	     eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters)
+	    && (olca->reverseLogicalChannelParameters.multiplexParameters.
+		choice ==
+		eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
+	{
+		ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff,
+					   &olca->
+					   reverseLogicalChannelParameters.
+					   multiplexParameters.
+					   h2250LogicalChannelParameters);
+		if (ret < 0)
+			return -1;
+	}
+
+	if ((olca->options &
+	     eOpenLogicalChannelAck_forwardMultiplexAckParameters) &&
+	    (olca->forwardMultiplexAckParameters.choice ==
+	     eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters))
+	{
+		ack = &olca->forwardMultiplexAckParameters.
+		    h2250LogicalChannelAckParameters;
+		if (ack->options &
+		    eH2250LogicalChannelAckParameters_mediaChannel) {
+			/* RTP */
+			ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
+					      &ack->mediaChannel);
+			if (ret < 0)
+				return -1;
+		}
+
+		if (ack->options &
+		    eH2250LogicalChannelAckParameters_mediaControlChannel) {
+			/* RTCP */
+			ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff,
+					      &ack->mediaControlChannel);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_h245(struct sk_buff **pskb, struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo,
+			unsigned char **data, int dataoff,
+			MultimediaSystemControlMessage *mscm)
+{
+	switch (mscm->choice) {
+	case eMultimediaSystemControlMessage_request:
+		if (mscm->request.choice ==
+		    eRequestMessage_openLogicalChannel) {
+			return process_olc(pskb, ct, ctinfo, data, dataoff,
+					   &mscm->request.openLogicalChannel);
+		}
+		DEBUGP("nf_ct_h323: H.245 Request %d\n",
+		       mscm->request.choice);
+		break;
+	case eMultimediaSystemControlMessage_response:
+		if (mscm->response.choice ==
+		    eResponseMessage_openLogicalChannelAck) {
+			return process_olca(pskb, ct, ctinfo, data, dataoff,
+					    &mscm->response.
+					    openLogicalChannelAck);
+		}
+		DEBUGP("nf_ct_h323: H.245 Response %d\n",
+		       mscm->response.choice);
+		break;
+	default:
+		DEBUGP("nf_ct_h323: H.245 signal %d\n", mscm->choice);
+		break;
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int h245_help(struct sk_buff **pskb, unsigned int protoff,
+		     struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	static MultimediaSystemControlMessage mscm;
+	unsigned char *data = NULL;
+	int datalen;
+	int dataoff;
+	int ret;
+
+	/* Until there's been traffic both ways, don't look in packets. */
+	if (ctinfo != IP_CT_ESTABLISHED &&
+	    ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+		return NF_ACCEPT;
+	}
+	DEBUGP("nf_ct_h245: skblen = %u\n", (*pskb)->len);
+
+	spin_lock_bh(&nf_h323_lock);
+
+	/* Process each TPKT */
+	while (get_tpkt_data(pskb, protoff, ct, ctinfo,
+			     &data, &datalen, &dataoff)) {
+		DEBUGP("nf_ct_h245: TPKT len=%d ", datalen);
+		NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
+
+		/* Decode H.245 signal */
+		ret = DecodeMultimediaSystemControlMessage(data, datalen,
+							   &mscm);
+		if (ret < 0) {
+			if (net_ratelimit())
+				printk("nf_ct_h245: decoding error: %s\n",
+				       ret == H323_ERROR_BOUND ?
+				       "out of bound" : "out of range");
+			/* We don't drop when decoding error */
+			break;
+		}
+
+		/* Process H.245 signal */
+		if (process_h245(pskb, ct, ctinfo, &data, dataoff, &mscm) < 0)
+			goto drop;
+	}
+
+	spin_unlock_bh(&nf_h323_lock);
+	return NF_ACCEPT;
+
+      drop:
+	spin_unlock_bh(&nf_h323_lock);
+	if (net_ratelimit())
+		printk("nf_ct_h245: packet dropped\n");
+	return NF_DROP;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
+	.name			= "H.245",
+	.me			= THIS_MODULE,
+	.max_expected		= H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
+	.timeout		= 240,
+	.tuple.dst.protonum	= IPPROTO_UDP,
+	.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+	.mask.dst.protonum	= 0xFF,
+	.help			= h245_help
+};
+
+/****************************************************************************/
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+		  TransportAddress *taddr,
+		  union nf_conntrack_address *addr, __be16 *port)
+{
+	unsigned char *p;
+	int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	int len;
+
+	switch (taddr->choice) {
+	case eTransportAddress_ipAddress:
+		if (family != AF_INET)
+			return 0;
+		p = data + taddr->ipAddress.ip;
+		len = 4;
+		break;
+	case eTransportAddress_ip6Address:
+		if (family != AF_INET6)
+			return 0;
+		p = data + taddr->ip6Address.ip6;
+		len = 16;
+		break;
+	default:
+		return 0;
+	}
+
+	memcpy(addr, p, len);
+	memset((void *)addr + len, 0, sizeof(*addr) - len);
+	memcpy(port, p + len, sizeof(__be16));
+
+	return 1;
+}
+
+/****************************************************************************/
+static int expect_h245(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, int dataoff,
+		       TransportAddress *taddr)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	__be16 port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *exp;
+	typeof(nat_h245_hook) nat_h245;
+
+	/* Read h245Address */
+	if (!get_h225_addr(ct, *data, taddr, &addr, &port) ||
+	    memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
+	    port == 0)
+		return 0;
+
+	/* Create expect for h245 connection */
+	if ((exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3,
+				 &ct->tuplehash[!dir].tuple.dst.u3,
+				 IPPROTO_TCP, NULL, &port);
+	exp->helper = &nf_conntrack_helper_h245;
+
+	if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+		   &ct->tuplehash[!dir].tuple.dst.u3,
+		   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+	    (nat_h245 = rcu_dereference(nat_h245_hook)) &&
+	    ct->status & IPS_NAT_MASK) {
+		/* NAT needed */
+		ret = nat_h245(pskb, ct, ctinfo, data, dataoff, taddr,
+			       port, exp);
+	} else {		/* Conntrack only */
+		if (nf_conntrack_expect_related(exp) == 0) {
+			DEBUGP("nf_ct_q931: expect H.245 ");
+			NF_CT_DUMP_TUPLE(&exp->tuple);
+		} else
+			ret = -1;
+	}
+
+	nf_conntrack_expect_put(exp);
+
+	return ret;
+}
+
+/* If the calling party is on the same side of the forward-to party,
+ * we don't need to track the second call */
+static int callforward_do_filter(union nf_conntrack_address *src,
+				 union nf_conntrack_address *dst,
+				 int family)
+{
+	struct flowi fl1, fl2;
+	int ret = 0;
+
+	memset(&fl1, 0, sizeof(fl1));
+	memset(&fl2, 0, sizeof(fl2));
+
+	switch (family) {
+	case AF_INET: {
+		struct rtable *rt1, *rt2;
+
+		fl1.fl4_dst = src->ip;
+		fl2.fl4_dst = dst->ip;
+		if (ip_route_output_key(&rt1, &fl1) == 0) {
+			if (ip_route_output_key(&rt2, &fl2) == 0) {
+				if (rt1->rt_gateway == rt2->rt_gateway &&
+				    rt1->u.dst.dev  == rt2->u.dst.dev)
+					ret = 1;
+				dst_release(&rt2->u.dst);
+			}
+			dst_release(&rt1->u.dst);
+		}
+		break;
+	}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	case AF_INET6: {
+		struct rt6_info *rt1, *rt2;
+
+		memcpy(&fl1.fl6_dst, src, sizeof(fl1.fl6_dst));
+		memcpy(&fl2.fl6_dst, dst, sizeof(fl2.fl6_dst));
+		rt1 = (struct rt6_info *)ip6_route_output(NULL, &fl1);
+		if (rt1) {
+			rt2 = (struct rt6_info *)ip6_route_output(NULL, &fl2);
+			if (rt2) {
+				if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
+				 	    sizeof(rt1->rt6i_gateway)) &&
+				    rt1->u.dst.dev == rt2->u.dst.dev)
+					ret = 1;
+				dst_release(&rt2->u.dst);
+			}
+			dst_release(&rt1->u.dst);
+		}
+		break;
+	}
+#endif
+	}
+	return ret;
+
+}
+
+/****************************************************************************/
+static int expect_callforwarding(struct sk_buff **pskb,
+				 struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo,
+				 unsigned char **data, int dataoff,
+				 TransportAddress *taddr)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	__be16 port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *exp;
+	typeof(nat_callforwarding_hook) nat_callforwarding;
+
+	/* Read alternativeAddress */
+	if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0)
+		return 0;
+
+	/* If the calling party is on the same side of the forward-to party,
+	 * we don't need to track the second call */
+	if (callforward_filter &&
+	    callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
+	    			  ct->tuplehash[!dir].tuple.src.l3num)) {
+		DEBUGP("nf_ct_q931: Call Forwarding not tracked\n");
+		return 0;
+	}
+
+	/* Create expect for the second call leg */
+	if ((exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3, &addr,
+				 IPPROTO_TCP, NULL, &port);
+	exp->helper = nf_conntrack_helper_q931;
+
+	if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
+		   &ct->tuplehash[!dir].tuple.dst.u3,
+		   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
+	    (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) &&
+	    ct->status & IPS_NAT_MASK) {
+		/* Need NAT */
+		ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff,
+					 taddr, port, exp);
+	} else {		/* Conntrack only */
+		if (nf_conntrack_expect_related(exp) == 0) {
+			DEBUGP("nf_ct_q931: expect Call Forwarding ");
+			NF_CT_DUMP_TUPLE(&exp->tuple);
+		} else
+			ret = -1;
+	}
+
+	nf_conntrack_expect_put(exp);
+
+	return ret;
+}
+
+/****************************************************************************/
+static int process_setup(struct sk_buff **pskb, struct nf_conn *ct,
+			 enum ip_conntrack_info ctinfo,
+			 unsigned char **data, int dataoff,
+			 Setup_UUIE *setup)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret;
+	int i;
+	__be16 port;
+	union nf_conntrack_address addr;
+	typeof(set_h225_addr_hook) set_h225_addr;
+
+	DEBUGP("nf_ct_q931: Setup\n");
+
+	if (setup->options & eSetup_UUIE_h245Address) {
+		ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
+				  &setup->h245Address);
+		if (ret < 0)
+			return -1;
+	}
+
+	set_h225_addr = rcu_dereference(set_h225_addr_hook);
+	if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
+	    (set_h225_addr) && ct->status && IPS_NAT_MASK &&
+	    get_h225_addr(ct, *data, &setup->destCallSignalAddress,
+	    		  &addr, &port) &&
+	    memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
+		DEBUGP("nf_ct_q931: set destCallSignalAddress "
+		       NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n",
+		       NIP6(*(struct in6_addr *)&addr), ntohs(port),
+		       NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.src.u3),
+		       ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
+		ret = set_h225_addr(pskb, data, dataoff,
+				    &setup->destCallSignalAddress,
+				    &ct->tuplehash[!dir].tuple.src.u3,
+				    ct->tuplehash[!dir].tuple.src.u.tcp.port);
+		if (ret < 0)
+			return -1;
+	}
+
+	if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
+	    (set_h225_addr) && ct->status & IPS_NAT_MASK &&
+	    get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
+	    		  &addr, &port) &&
+	    memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
+		DEBUGP("nf_ct_q931: set sourceCallSignalAddress "
+		       NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n",
+		       NIP6(*(struct in6_addr *)&addr), ntohs(port),
+		       NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.dst.u3),
+		       ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
+		ret = set_h225_addr(pskb, data, dataoff,
+				    &setup->sourceCallSignalAddress,
+				    &ct->tuplehash[!dir].tuple.dst.u3,
+				    ct->tuplehash[!dir].tuple.dst.u.tcp.port);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (setup->options & eSetup_UUIE_fastStart) {
+		for (i = 0; i < setup->fastStart.count; i++) {
+			ret = process_olc(pskb, ct, ctinfo, data, dataoff,
+					  &setup->fastStart.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_callproceeding(struct sk_buff **pskb,
+				  struct nf_conn *ct,
+				  enum ip_conntrack_info ctinfo,
+				  unsigned char **data, int dataoff,
+				  CallProceeding_UUIE *callproc)
+{
+	int ret;
+	int i;
+
+	DEBUGP("nf_ct_q931: CallProceeding\n");
+
+	if (callproc->options & eCallProceeding_UUIE_h245Address) {
+		ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
+				  &callproc->h245Address);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (callproc->options & eCallProceeding_UUIE_fastStart) {
+		for (i = 0; i < callproc->fastStart.count; i++) {
+			ret = process_olc(pskb, ct, ctinfo, data, dataoff,
+					  &callproc->fastStart.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_connect(struct sk_buff **pskb, struct nf_conn *ct,
+			   enum ip_conntrack_info ctinfo,
+			   unsigned char **data, int dataoff,
+			   Connect_UUIE *connect)
+{
+	int ret;
+	int i;
+
+	DEBUGP("nf_ct_q931: Connect\n");
+
+	if (connect->options & eConnect_UUIE_h245Address) {
+		ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
+				  &connect->h245Address);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (connect->options & eConnect_UUIE_fastStart) {
+		for (i = 0; i < connect->fastStart.count; i++) {
+			ret = process_olc(pskb, ct, ctinfo, data, dataoff,
+					  &connect->fastStart.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_alerting(struct sk_buff **pskb, struct nf_conn *ct,
+			    enum ip_conntrack_info ctinfo,
+			    unsigned char **data, int dataoff,
+			    Alerting_UUIE *alert)
+{
+	int ret;
+	int i;
+
+	DEBUGP("nf_ct_q931: Alerting\n");
+
+	if (alert->options & eAlerting_UUIE_h245Address) {
+		ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
+				  &alert->h245Address);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (alert->options & eAlerting_UUIE_fastStart) {
+		for (i = 0; i < alert->fastStart.count; i++) {
+			ret = process_olc(pskb, ct, ctinfo, data, dataoff,
+					  &alert->fastStart.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_information(struct sk_buff **pskb,
+			       struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo,
+			       unsigned char **data, int dataoff,
+			       Information_UUIE *info)
+{
+	int ret;
+	int i;
+
+	DEBUGP("nf_ct_q931: Information\n");
+
+	if (info->options & eInformation_UUIE_fastStart) {
+		for (i = 0; i < info->fastStart.count; i++) {
+			ret = process_olc(pskb, ct, ctinfo, data, dataoff,
+					  &info->fastStart.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_facility(struct sk_buff **pskb, struct nf_conn *ct,
+			    enum ip_conntrack_info ctinfo,
+			    unsigned char **data, int dataoff,
+			    Facility_UUIE *facility)
+{
+	int ret;
+	int i;
+
+	DEBUGP("nf_ct_q931: Facility\n");
+
+	if (facility->reason.choice == eFacilityReason_callForwarded) {
+		if (facility->options & eFacility_UUIE_alternativeAddress)
+			return expect_callforwarding(pskb, ct, ctinfo, data,
+						     dataoff,
+						     &facility->
+						     alternativeAddress);
+		return 0;
+	}
+
+	if (facility->options & eFacility_UUIE_h245Address) {
+		ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
+				  &facility->h245Address);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (facility->options & eFacility_UUIE_fastStart) {
+		for (i = 0; i < facility->fastStart.count; i++) {
+			ret = process_olc(pskb, ct, ctinfo, data, dataoff,
+					  &facility->fastStart.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_progress(struct sk_buff **pskb, struct nf_conn *ct,
+			    enum ip_conntrack_info ctinfo,
+			    unsigned char **data, int dataoff,
+			    Progress_UUIE *progress)
+{
+	int ret;
+	int i;
+
+	DEBUGP("nf_ct_q931: Progress\n");
+
+	if (progress->options & eProgress_UUIE_h245Address) {
+		ret = expect_h245(pskb, ct, ctinfo, data, dataoff,
+				  &progress->h245Address);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (progress->options & eProgress_UUIE_fastStart) {
+		for (i = 0; i < progress->fastStart.count; i++) {
+			ret = process_olc(pskb, ct, ctinfo, data, dataoff,
+					  &progress->fastStart.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_q931(struct sk_buff **pskb, struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo,
+			unsigned char **data, int dataoff, Q931 *q931)
+{
+	H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu;
+	int i;
+	int ret = 0;
+
+	switch (pdu->h323_message_body.choice) {
+	case eH323_UU_PDU_h323_message_body_setup:
+		ret = process_setup(pskb, ct, ctinfo, data, dataoff,
+				    &pdu->h323_message_body.setup);
+		break;
+	case eH323_UU_PDU_h323_message_body_callProceeding:
+		ret = process_callproceeding(pskb, ct, ctinfo, data, dataoff,
+					     &pdu->h323_message_body.
+					     callProceeding);
+		break;
+	case eH323_UU_PDU_h323_message_body_connect:
+		ret = process_connect(pskb, ct, ctinfo, data, dataoff,
+				      &pdu->h323_message_body.connect);
+		break;
+	case eH323_UU_PDU_h323_message_body_alerting:
+		ret = process_alerting(pskb, ct, ctinfo, data, dataoff,
+				       &pdu->h323_message_body.alerting);
+		break;
+	case eH323_UU_PDU_h323_message_body_information:
+		ret = process_information(pskb, ct, ctinfo, data, dataoff,
+					  &pdu->h323_message_body.
+					  information);
+		break;
+	case eH323_UU_PDU_h323_message_body_facility:
+		ret = process_facility(pskb, ct, ctinfo, data, dataoff,
+				       &pdu->h323_message_body.facility);
+		break;
+	case eH323_UU_PDU_h323_message_body_progress:
+		ret = process_progress(pskb, ct, ctinfo, data, dataoff,
+				       &pdu->h323_message_body.progress);
+		break;
+	default:
+		DEBUGP("nf_ct_q931: Q.931 signal %d\n",
+		       pdu->h323_message_body.choice);
+		break;
+	}
+
+	if (ret < 0)
+		return -1;
+
+	if (pdu->options & eH323_UU_PDU_h245Control) {
+		for (i = 0; i < pdu->h245Control.count; i++) {
+			ret = process_h245(pskb, ct, ctinfo, data, dataoff,
+					   &pdu->h245Control.item[i]);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int q931_help(struct sk_buff **pskb, unsigned int protoff,
+		     struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	static Q931 q931;
+	unsigned char *data = NULL;
+	int datalen;
+	int dataoff;
+	int ret;
+
+	/* Until there's been traffic both ways, don't look in packets. */
+	if (ctinfo != IP_CT_ESTABLISHED &&
+	    ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
+		return NF_ACCEPT;
+	}
+	DEBUGP("nf_ct_q931: skblen = %u\n", (*pskb)->len);
+
+	spin_lock_bh(&nf_h323_lock);
+
+	/* Process each TPKT */
+	while (get_tpkt_data(pskb, protoff, ct, ctinfo,
+			     &data, &datalen, &dataoff)) {
+		DEBUGP("nf_ct_q931: TPKT len=%d ", datalen);
+		NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
+
+		/* Decode Q.931 signal */
+		ret = DecodeQ931(data, datalen, &q931);
+		if (ret < 0) {
+			if (net_ratelimit())
+				printk("nf_ct_q931: decoding error: %s\n",
+				       ret == H323_ERROR_BOUND ?
+				       "out of bound" : "out of range");
+			/* We don't drop when decoding error */
+			break;
+		}
+
+		/* Process Q.931 signal */
+		if (process_q931(pskb, ct, ctinfo, &data, dataoff, &q931) < 0)
+			goto drop;
+	}
+
+	spin_unlock_bh(&nf_h323_lock);
+	return NF_ACCEPT;
+
+      drop:
+	spin_unlock_bh(&nf_h323_lock);
+	if (net_ratelimit())
+		printk("nf_ct_q931: packet dropped\n");
+	return NF_DROP;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
+	{
+		.name			= "Q.931",
+		.me			= THIS_MODULE,
+					  /* T.120 and H.245 */
+		.max_expected		= H323_RTP_CHANNEL_MAX * 4 + 4,
+		.timeout		= 240,
+		.tuple.src.l3num	= AF_INET,
+		.tuple.src.u.tcp.port	= __constant_htons(Q931_PORT),
+		.tuple.dst.protonum	= IPPROTO_TCP,
+		.mask.src.l3num		= 0xFFFF,
+		.mask.src.u.tcp.port	= __constant_htons(0xFFFF),
+		.mask.dst.protonum	= 0xFF,
+		.help			= q931_help
+	},
+	{
+		.name			= "Q.931",
+		.me			= THIS_MODULE,
+					  /* T.120 and H.245 */
+		.max_expected		= H323_RTP_CHANNEL_MAX * 4 + 4,
+		.timeout		= 240,
+		.tuple.src.l3num	= AF_INET6,
+		.tuple.src.u.tcp.port	= __constant_htons(Q931_PORT),
+		.tuple.dst.protonum	= IPPROTO_TCP,
+		.mask.src.l3num		= 0xFFFF,
+		.mask.src.u.tcp.port	= __constant_htons(0xFFFF),
+		.mask.dst.protonum	= 0xFF,
+		.help			= q931_help
+	},
+};
+
+/****************************************************************************/
+static unsigned char *get_udp_data(struct sk_buff **pskb, unsigned int protoff,
+				   int *datalen)
+{
+	struct udphdr _uh, *uh;
+	int dataoff;
+
+	uh = skb_header_pointer(*pskb, protoff, sizeof(_uh), &_uh);
+	if (uh == NULL)
+		return NULL;
+	dataoff = protoff + sizeof(_uh);
+	if (dataoff >= (*pskb)->len)
+		return NULL;
+	*datalen = (*pskb)->len - dataoff;
+	return skb_header_pointer(*pskb, dataoff, *datalen, h323_buffer);
+}
+
+/****************************************************************************/
+static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
+					       union nf_conntrack_address *addr,
+					       __be16 port)
+{
+	struct nf_conntrack_expect *exp;
+	struct nf_conntrack_tuple tuple;
+
+	memset(&tuple.src.u3, 0, sizeof(tuple.src.u3));
+	tuple.src.u.tcp.port = 0;
+	memcpy(&tuple.dst.u3, addr, sizeof(tuple.dst.u3));
+	tuple.dst.u.tcp.port = port;
+	tuple.dst.protonum = IPPROTO_TCP;
+
+	exp = __nf_conntrack_expect_find(&tuple);
+	if (exp && exp->master == ct)
+		return exp;
+	return NULL;
+}
+
+/****************************************************************************/
+static int set_expect_timeout(struct nf_conntrack_expect *exp,
+			      unsigned timeout)
+{
+	if (!exp || !del_timer(&exp->timeout))
+		return 0;
+
+	exp->timeout.expires = jiffies + timeout * HZ;
+	add_timer(&exp->timeout);
+
+	return 1;
+}
+
+/****************************************************************************/
+static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data,
+		       TransportAddress *taddr, int count)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	int i;
+	__be16 port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *exp;
+	typeof(nat_q931_hook) nat_q931;
+
+	/* Look for the first related address */
+	for (i = 0; i < count; i++) {
+		if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) &&
+		    memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3,
+		    	   sizeof(addr)) == 0 && port != 0)
+			break;
+	}
+
+	if (i >= count)		/* Not found */
+		return 0;
+
+	/* Create expect for Q.931 */
+	if ((exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 gkrouted_only ? /* only accept calls from GK? */
+				 	&ct->tuplehash[!dir].tuple.src.u3 :
+					NULL,
+				 &ct->tuplehash[!dir].tuple.dst.u3,
+				 IPPROTO_TCP, NULL, &port);
+	exp->helper = nf_conntrack_helper_q931;
+	exp->flags = NF_CT_EXPECT_PERMANENT;	/* Accept multiple calls */
+
+	nat_q931 = rcu_dereference(nat_q931_hook);
+	if (nat_q931 && ct->status & IPS_NAT_MASK) {	/* Need NAT */
+		ret = nat_q931(pskb, ct, ctinfo, data, taddr, i, port, exp);
+	} else {		/* Conntrack only */
+		if (nf_conntrack_expect_related(exp) == 0) {
+			DEBUGP("nf_ct_ras: expect Q.931 ");
+			NF_CT_DUMP_TUPLE(&exp->tuple);
+
+			/* Save port for looking up expect in processing RCF */
+			info->sig_port[dir] = port;
+		} else
+			ret = -1;
+	}
+
+	nf_conntrack_expect_put(exp);
+
+	return ret;
+}
+
+/****************************************************************************/
+static int process_grq(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, GatekeeperRequest *grq)
+{
+	typeof(set_ras_addr_hook) set_ras_addr;
+
+	DEBUGP("nf_ct_ras: GRQ\n");
+
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr && ct->status & IPS_NAT_MASK)	/* NATed */
+		return set_ras_addr(pskb, ct, ctinfo, data,
+				    &grq->rasAddress, 1);
+	return 0;
+}
+
+/****************************************************************************/
+static int process_gcf(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, GatekeeperConfirm *gcf)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	__be16 port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *exp;
+
+	DEBUGP("nf_ct_ras: GCF\n");
+
+	if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port))
+		return 0;
+
+	/* Registration port is the same as discovery port */
+	if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
+	    port == ct->tuplehash[dir].tuple.src.u.udp.port)
+		return 0;
+
+	/* Avoid RAS expectation loops. A GCF is never expected. */
+	if (test_bit(IPS_EXPECTED_BIT, &ct->status))
+		return 0;
+
+	/* Need new expect */
+	if ((exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3, &addr,
+				 IPPROTO_UDP, NULL, &port);
+	exp->helper = nf_conntrack_helper_ras;
+
+	if (nf_conntrack_expect_related(exp) == 0) {
+		DEBUGP("nf_ct_ras: expect RAS ");
+		NF_CT_DUMP_TUPLE(&exp->tuple);
+	} else
+		ret = -1;
+
+	nf_conntrack_expect_put(exp);
+
+	return ret;
+}
+
+/****************************************************************************/
+static int process_rrq(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, RegistrationRequest *rrq)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int ret;
+	typeof(set_ras_addr_hook) set_ras_addr;
+
+	DEBUGP("nf_ct_ras: RRQ\n");
+
+	ret = expect_q931(pskb, ct, ctinfo, data,
+			  rrq->callSignalAddress.item,
+			  rrq->callSignalAddress.count);
+	if (ret < 0)
+		return -1;
+
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr && ct->status & IPS_NAT_MASK) {
+		ret = set_ras_addr(pskb, ct, ctinfo, data,
+				   rrq->rasAddress.item,
+				   rrq->rasAddress.count);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (rrq->options & eRegistrationRequest_timeToLive) {
+		DEBUGP("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive);
+		info->timeout = rrq->timeToLive;
+	} else
+		info->timeout = default_rrq_ttl;
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_rcf(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, RegistrationConfirm *rcf)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	int ret;
+	struct nf_conntrack_expect *exp;
+	typeof(set_sig_addr_hook) set_sig_addr;
+
+	DEBUGP("nf_ct_ras: RCF\n");
+
+	set_sig_addr = rcu_dereference(set_sig_addr_hook);
+	if (set_sig_addr && ct->status & IPS_NAT_MASK) {
+		ret = set_sig_addr(pskb, ct, ctinfo, data,
+					rcf->callSignalAddress.item,
+					rcf->callSignalAddress.count);
+		if (ret < 0)
+			return -1;
+	}
+
+	if (rcf->options & eRegistrationConfirm_timeToLive) {
+		DEBUGP("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive);
+		info->timeout = rcf->timeToLive;
+	}
+
+	if (info->timeout > 0) {
+		DEBUGP
+		    ("nf_ct_ras: set RAS connection timeout to %u seconds\n",
+		     info->timeout);
+		nf_ct_refresh(ct, *pskb, info->timeout * HZ);
+
+		/* Set expect timeout */
+		read_lock_bh(&nf_conntrack_lock);
+		exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3,
+				  info->sig_port[!dir]);
+		if (exp) {
+			DEBUGP("nf_ct_ras: set Q.931 expect "
+			       "timeout to %u seconds for",
+			       info->timeout);
+			NF_CT_DUMP_TUPLE(&exp->tuple);
+			set_expect_timeout(exp, info->timeout);
+		}
+		read_unlock_bh(&nf_conntrack_lock);
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_urq(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, UnregistrationRequest *urq)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	int ret;
+	typeof(set_sig_addr_hook) set_sig_addr;
+
+	DEBUGP("nf_ct_ras: URQ\n");
+
+	set_sig_addr = rcu_dereference(set_sig_addr_hook);
+	if (set_sig_addr && ct->status & IPS_NAT_MASK) {
+		ret = set_sig_addr(pskb, ct, ctinfo, data,
+				   urq->callSignalAddress.item,
+				   urq->callSignalAddress.count);
+		if (ret < 0)
+			return -1;
+	}
+
+	/* Clear old expect */
+	nf_ct_remove_expectations(ct);
+	info->sig_port[dir] = 0;
+	info->sig_port[!dir] = 0;
+
+	/* Give it 30 seconds for UCF or URJ */
+	nf_ct_refresh(ct, *pskb, 30 * HZ);
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_arq(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, AdmissionRequest *arq)
+{
+	struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+	int dir = CTINFO2DIR(ctinfo);
+	__be16 port;
+	union nf_conntrack_address addr;
+	typeof(set_h225_addr_hook) set_h225_addr;
+
+	DEBUGP("nf_ct_ras: ARQ\n");
+
+	set_h225_addr = rcu_dereference(set_h225_addr_hook);
+	if ((arq->options & eAdmissionRequest_destCallSignalAddress) &&
+	    get_h225_addr(ct, *data, &arq->destCallSignalAddress,
+	    		  &addr, &port) &&
+	    !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
+	    port == info->sig_port[dir] &&
+	    set_h225_addr && ct->status & IPS_NAT_MASK) {
+		/* Answering ARQ */
+		return set_h225_addr(pskb, data, 0,
+				     &arq->destCallSignalAddress,
+				     &ct->tuplehash[!dir].tuple.dst.u3,
+				     info->sig_port[!dir]);
+	}
+
+	if ((arq->options & eAdmissionRequest_srcCallSignalAddress) &&
+	    get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
+	    		  &addr, &port) &&
+	    !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
+	    set_h225_addr && ct->status & IPS_NAT_MASK) {
+		/* Calling ARQ */
+		return set_h225_addr(pskb, data, 0,
+				     &arq->srcCallSignalAddress,
+				     &ct->tuplehash[!dir].tuple.dst.u3,
+				     port);
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_acf(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, AdmissionConfirm *acf)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	__be16 port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *exp;
+	typeof(set_sig_addr_hook) set_sig_addr;
+
+	DEBUGP("nf_ct_ras: ACF\n");
+
+	if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress,
+			   &addr, &port))
+		return 0;
+
+	if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) {
+		/* Answering ACF */
+		set_sig_addr = rcu_dereference(set_sig_addr_hook);
+		if (set_sig_addr && ct->status & IPS_NAT_MASK)
+			return set_sig_addr(pskb, ct, ctinfo, data,
+					    &acf->destCallSignalAddress, 1);
+		return 0;
+	}
+
+	/* Need new expect */
+	if ((exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3, &addr,
+				 IPPROTO_TCP, NULL, &port);
+	exp->flags = NF_CT_EXPECT_PERMANENT;
+	exp->helper = nf_conntrack_helper_q931;
+
+	if (nf_conntrack_expect_related(exp) == 0) {
+		DEBUGP("nf_ct_ras: expect Q.931 ");
+		NF_CT_DUMP_TUPLE(&exp->tuple);
+	} else
+		ret = -1;
+
+	nf_conntrack_expect_put(exp);
+
+	return ret;
+}
+
+/****************************************************************************/
+static int process_lrq(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, LocationRequest *lrq)
+{
+	typeof(set_ras_addr_hook) set_ras_addr;
+
+	DEBUGP("nf_ct_ras: LRQ\n");
+
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr && ct->status & IPS_NAT_MASK)
+		return set_ras_addr(pskb, ct, ctinfo, data,
+				    &lrq->replyAddress, 1);
+	return 0;
+}
+
+/****************************************************************************/
+static int process_lcf(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, LocationConfirm *lcf)
+{
+	int dir = CTINFO2DIR(ctinfo);
+	int ret = 0;
+	__be16 port;
+	union nf_conntrack_address addr;
+	struct nf_conntrack_expect *exp;
+
+	DEBUGP("nf_ct_ras: LCF\n");
+
+	if (!get_h225_addr(ct, *data, &lcf->callSignalAddress,
+			   &addr, &port))
+		return 0;
+
+	/* Need new expect for call signal */
+	if ((exp = nf_conntrack_expect_alloc(ct)) == NULL)
+		return -1;
+	nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num,
+				 &ct->tuplehash[!dir].tuple.src.u3, &addr,
+				 IPPROTO_TCP, NULL, &port);
+	exp->flags = NF_CT_EXPECT_PERMANENT;
+	exp->helper = nf_conntrack_helper_q931;
+
+	if (nf_conntrack_expect_related(exp) == 0) {
+		DEBUGP("nf_ct_ras: expect Q.931 ");
+		NF_CT_DUMP_TUPLE(&exp->tuple);
+	} else
+		ret = -1;
+
+	nf_conntrack_expect_put(exp);
+
+	/* Ignore rasAddress */
+
+	return ret;
+}
+
+/****************************************************************************/
+static int process_irr(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, InfoRequestResponse *irr)
+{
+	int ret;
+	typeof(set_ras_addr_hook) set_ras_addr;
+	typeof(set_sig_addr_hook) set_sig_addr;
+
+	DEBUGP("nf_ct_ras: IRR\n");
+
+	set_ras_addr = rcu_dereference(set_ras_addr_hook);
+	if (set_ras_addr && ct->status & IPS_NAT_MASK) {
+		ret = set_ras_addr(pskb, ct, ctinfo, data,
+				   &irr->rasAddress, 1);
+		if (ret < 0)
+			return -1;
+	}
+
+	set_sig_addr = rcu_dereference(set_sig_addr_hook);
+	if (set_sig_addr && ct->status & IPS_NAT_MASK) {
+		ret = set_sig_addr(pskb, ct, ctinfo, data,
+					irr->callSignalAddress.item,
+					irr->callSignalAddress.count);
+		if (ret < 0)
+			return -1;
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int process_ras(struct sk_buff **pskb, struct nf_conn *ct,
+		       enum ip_conntrack_info ctinfo,
+		       unsigned char **data, RasMessage *ras)
+{
+	switch (ras->choice) {
+	case eRasMessage_gatekeeperRequest:
+		return process_grq(pskb, ct, ctinfo, data,
+				   &ras->gatekeeperRequest);
+	case eRasMessage_gatekeeperConfirm:
+		return process_gcf(pskb, ct, ctinfo, data,
+				   &ras->gatekeeperConfirm);
+	case eRasMessage_registrationRequest:
+		return process_rrq(pskb, ct, ctinfo, data,
+				   &ras->registrationRequest);
+	case eRasMessage_registrationConfirm:
+		return process_rcf(pskb, ct, ctinfo, data,
+				   &ras->registrationConfirm);
+	case eRasMessage_unregistrationRequest:
+		return process_urq(pskb, ct, ctinfo, data,
+				   &ras->unregistrationRequest);
+	case eRasMessage_admissionRequest:
+		return process_arq(pskb, ct, ctinfo, data,
+				   &ras->admissionRequest);
+	case eRasMessage_admissionConfirm:
+		return process_acf(pskb, ct, ctinfo, data,
+				   &ras->admissionConfirm);
+	case eRasMessage_locationRequest:
+		return process_lrq(pskb, ct, ctinfo, data,
+				   &ras->locationRequest);
+	case eRasMessage_locationConfirm:
+		return process_lcf(pskb, ct, ctinfo, data,
+				   &ras->locationConfirm);
+	case eRasMessage_infoRequestResponse:
+		return process_irr(pskb, ct, ctinfo, data,
+				   &ras->infoRequestResponse);
+	default:
+		DEBUGP("nf_ct_ras: RAS message %d\n", ras->choice);
+		break;
+	}
+
+	return 0;
+}
+
+/****************************************************************************/
+static int ras_help(struct sk_buff **pskb, unsigned int protoff,
+		    struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	static RasMessage ras;
+	unsigned char *data;
+	int datalen = 0;
+	int ret;
+
+	DEBUGP("nf_ct_ras: skblen = %u\n", (*pskb)->len);
+
+	spin_lock_bh(&nf_h323_lock);
+
+	/* Get UDP data */
+	data = get_udp_data(pskb, protoff, &datalen);
+	if (data == NULL)
+		goto accept;
+	DEBUGP("nf_ct_ras: RAS message len=%d ", datalen);
+	NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
+
+	/* Decode RAS message */
+	ret = DecodeRasMessage(data, datalen, &ras);
+	if (ret < 0) {
+		if (net_ratelimit())
+			printk("nf_ct_ras: decoding error: %s\n",
+			       ret == H323_ERROR_BOUND ?
+			       "out of bound" : "out of range");
+		goto accept;
+	}
+
+	/* Process RAS message */
+	if (process_ras(pskb, ct, ctinfo, &data, &ras) < 0)
+		goto drop;
+
+      accept:
+	spin_unlock_bh(&nf_h323_lock);
+	return NF_ACCEPT;
+
+      drop:
+	spin_unlock_bh(&nf_h323_lock);
+	if (net_ratelimit())
+		printk("nf_ct_ras: packet dropped\n");
+	return NF_DROP;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
+	{
+		.name			= "RAS",
+		.me			= THIS_MODULE,
+		.max_expected		= 32,
+		.timeout		= 240,
+		.tuple.src.l3num	= AF_INET,
+		.tuple.src.u.udp.port	= __constant_htons(RAS_PORT),
+		.tuple.dst.protonum	= IPPROTO_UDP,
+		.mask.src.l3num		= 0xFFFF,
+		.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+		.mask.dst.protonum	= 0xFF,
+		.help			= ras_help,
+	},
+	{
+		.name			= "RAS",
+		.me			= THIS_MODULE,
+		.max_expected		= 32,
+		.timeout		= 240,
+		.tuple.src.l3num	= AF_INET6,
+		.tuple.src.u.udp.port	= __constant_htons(RAS_PORT),
+		.tuple.dst.protonum	= IPPROTO_UDP,
+		.mask.src.l3num		= 0xFFFF,
+		.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+		.mask.dst.protonum	= 0xFF,
+		.help			= ras_help,
+	},
+};
+
+/****************************************************************************/
+static void __exit nf_conntrack_h323_fini(void)
+{
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]);
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]);
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]);
+	kfree(h323_buffer);
+	DEBUGP("nf_ct_h323: fini\n");
+}
+
+/****************************************************************************/
+static int __init nf_conntrack_h323_init(void)
+{
+	int ret;
+
+	h323_buffer = kmalloc(65536, GFP_KERNEL);
+	if (!h323_buffer)
+		return -ENOMEM;
+	ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]);
+	if (ret < 0)
+		goto err1;
+	ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]);
+	if (ret < 0)
+		goto err2;
+	ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]);
+	if (ret < 0)
+		goto err3;
+	ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]);
+	if (ret < 0)
+		goto err4;
+	DEBUGP("nf_ct_h323: init success\n");
+	return 0;
+
+err4:
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]);
+err3:
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]);
+err2:
+	nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]);
+err1:
+	return ret;
+}
+
+/****************************************************************************/
+module_init(nf_conntrack_h323_init);
+module_exit(nf_conntrack_h323_fini);
+
+EXPORT_SYMBOL_GPL(get_h225_addr);
+EXPORT_SYMBOL_GPL(set_h245_addr_hook);
+EXPORT_SYMBOL_GPL(set_h225_addr_hook);
+EXPORT_SYMBOL_GPL(set_sig_addr_hook);
+EXPORT_SYMBOL_GPL(set_ras_addr_hook);
+EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook);
+EXPORT_SYMBOL_GPL(nat_t120_hook);
+EXPORT_SYMBOL_GPL(nat_h245_hook);
+EXPORT_SYMBOL_GPL(nat_callforwarding_hook);
+EXPORT_SYMBOL_GPL(nat_q931_hook);
+
+MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
+MODULE_DESCRIPTION("H.323 connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_conntrack_h323");
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_types.c b/net/netfilter/nf_conntrack_h323_types.c
similarity index 99%
rename from net/ipv4/netfilter/ip_conntrack_helper_h323_types.c
rename to net/netfilter/nf_conntrack_h323_types.c
index 4b35961..4c6f8b3 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323_types.c
+++ b/net/netfilter/nf_conntrack_h323_types.c
@@ -36,7 +36,8 @@
 };
 
 static field_t _TransportAddress_ip6Address[] = {	/* SEQUENCE */
-	{FNAME("ip") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
+	{FNAME("ip") OCTSTR, FIXD, 16, 0, DECODE,
+	 offsetof(TransportAddress_ip6Address, ip6), NULL},
 	{FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL},
 };
 
@@ -65,8 +66,8 @@
 	 _TransportAddress_ipSourceRoute},
 	{FNAME("ipxAddress") SEQ, 0, 3, 3, SKIP, 0,
 	 _TransportAddress_ipxAddress},
-	{FNAME("ip6Address") SEQ, 0, 2, 2, SKIP | EXT, 0,
-	 _TransportAddress_ip6Address},
+	{FNAME("ip6Address") SEQ, 0, 2, 2, DECODE | EXT,
+	offsetof(TransportAddress, ip6Address), _TransportAddress_ip6Address},
 	{FNAME("netBios") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL},
 	{FNAME("nsap") OCTSTR, 5, 1, 0, SKIP, 0, NULL},
 	{FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
new file mode 100644
index 0000000..0743be4
--- /dev/null
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -0,0 +1,155 @@
+/* Helper handling for netfilter. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+static __read_mostly LIST_HEAD(helpers);
+
+struct nf_conntrack_helper *
+__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_helper *h;
+
+	list_for_each_entry(h, &helpers, list) {
+		if (nf_ct_tuple_mask_cmp(tuple, &h->tuple, &h->mask))
+			return h;
+	}
+	return NULL;
+}
+
+struct nf_conntrack_helper *
+nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_helper *helper;
+
+	/* need nf_conntrack_lock to assure that helper exists until
+	 * try_module_get() is called */
+	read_lock_bh(&nf_conntrack_lock);
+
+	helper = __nf_ct_helper_find(tuple);
+	if (helper) {
+		/* need to increase module usage count to assure helper will
+		 * not go away while the caller is e.g. busy putting a
+		 * conntrack in the hash that uses the helper */
+		if (!try_module_get(helper->me))
+			helper = NULL;
+	}
+
+	read_unlock_bh(&nf_conntrack_lock);
+
+	return helper;
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_find_get);
+
+void nf_ct_helper_put(struct nf_conntrack_helper *helper)
+{
+	module_put(helper->me);
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_put);
+
+struct nf_conntrack_helper *
+__nf_conntrack_helper_find_byname(const char *name)
+{
+	struct nf_conntrack_helper *h;
+
+	list_for_each_entry(h, &helpers, list) {
+		if (!strcmp(h->name, name))
+			return h;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname);
+
+static inline int unhelp(struct nf_conntrack_tuple_hash *i,
+			 const struct nf_conntrack_helper *me)
+{
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
+	struct nf_conn_help *help = nfct_help(ct);
+
+	if (help && help->helper == me) {
+		nf_conntrack_event(IPCT_HELPER, ct);
+		help->helper = NULL;
+	}
+	return 0;
+}
+
+int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
+{
+	int size, ret;
+
+	BUG_ON(me->timeout == 0);
+
+	size = ALIGN(sizeof(struct nf_conn), __alignof__(struct nf_conn_help)) +
+	       sizeof(struct nf_conn_help);
+	ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
+					  size);
+	if (ret < 0) {
+		printk(KERN_ERR "nf_conntrack_helper_register: Unable to create slab cache for conntracks\n");
+		return ret;
+	}
+	write_lock_bh(&nf_conntrack_lock);
+	list_add(&me->list, &helpers);
+	write_unlock_bh(&nf_conntrack_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
+
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
+{
+	unsigned int i;
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conntrack_expect *exp, *tmp;
+
+	/* Need write lock here, to delete helper. */
+	write_lock_bh(&nf_conntrack_lock);
+	list_del(&me->list);
+
+	/* Get rid of expectations */
+	list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) {
+		struct nf_conn_help *help = nfct_help(exp->master);
+		if ((help->helper == me || exp->helper == me) &&
+		    del_timer(&exp->timeout)) {
+			nf_ct_unlink_expect(exp);
+			nf_conntrack_expect_put(exp);
+		}
+	}
+
+	/* Get rid of expecteds, set helpers to NULL. */
+	list_for_each_entry(h, &unconfirmed, list)
+		unhelp(h, me);
+	for (i = 0; i < nf_conntrack_htable_size; i++) {
+		list_for_each_entry(h, &nf_conntrack_hash[i], list)
+			unhelp(h, me);
+	}
+	write_unlock_bh(&nf_conntrack_lock);
+
+	/* Someone could be still looking at the helper in a bh. */
+	synchronize_net();
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
new file mode 100644
index 0000000..ed01db6
--- /dev/null
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -0,0 +1,281 @@
+/* IRC extension for IP connection tracking, Version 1.21
+ * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
+ * based on RR's ip_conntrack_ftp.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/netfilter.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_irc.h>
+
+#define MAX_PORTS 8
+static unsigned short ports[MAX_PORTS];
+static int ports_c;
+static unsigned int max_dcc_channels = 8;
+static unsigned int dcc_timeout __read_mostly = 300;
+/* This is slow, but it's simple. --RR */
+static char *irc_buffer;
+static DEFINE_SPINLOCK(irc_buffer_lock);
+
+unsigned int (*nf_nat_irc_hook)(struct sk_buff **pskb,
+				enum ip_conntrack_info ctinfo,
+				unsigned int matchoff,
+				unsigned int matchlen,
+				struct nf_conntrack_expect *exp) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_irc_hook);
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("IRC (DCC) connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_conntrack_irc");
+
+module_param_array(ports, ushort, &ports_c, 0400);
+MODULE_PARM_DESC(ports, "port numbers of IRC servers");
+module_param(max_dcc_channels, uint, 0400);
+MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per "
+				   "IRC session");
+module_param(dcc_timeout, uint, 0400);
+MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
+
+static const char *dccprotos[] = {
+	"SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
+};
+
+#define MINMATCHLEN	5
+
+#if 0
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \
+                                       __FILE__, __FUNCTION__ , ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* tries to get the ip_addr and port out of a dcc command
+ * return value: -1 on failure, 0 on success
+ *	data		pointer to first byte of DCC command data
+ *	data_end	pointer to last byte of dcc command data
+ *	ip		returns parsed ip of dcc command
+ *	port		returns parsed port of dcc command
+ *	ad_beg_p	returns pointer to first byte of addr data
+ *	ad_end_p	returns pointer to last byte of addr data
+ */
+static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
+		     u_int16_t *port, char **ad_beg_p, char **ad_end_p)
+{
+	/* at least 12: "AAAAAAAA P\1\n" */
+	while (*data++ != ' ')
+		if (data > data_end - 12)
+			return -1;
+
+	*ad_beg_p = data;
+	*ip = simple_strtoul(data, &data, 10);
+
+	/* skip blanks between ip and port */
+	while (*data == ' ') {
+		if (data >= data_end)
+			return -1;
+		data++;
+	}
+
+	*port = simple_strtoul(data, &data, 10);
+	*ad_end_p = data;
+
+	return 0;
+}
+
+static int help(struct sk_buff **pskb, unsigned int protoff,
+		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	unsigned int dataoff;
+	struct tcphdr _tcph, *th;
+	char *data, *data_limit, *ib_ptr;
+	int dir = CTINFO2DIR(ctinfo);
+	struct nf_conntrack_expect *exp;
+	struct nf_conntrack_tuple *tuple;
+	u_int32_t dcc_ip;
+	u_int16_t dcc_port;
+	__be16 port;
+	int i, ret = NF_ACCEPT;
+	char *addr_beg_p, *addr_end_p;
+	typeof(nf_nat_irc_hook) nf_nat_irc;
+
+	/* If packet is coming from IRC server */
+	if (dir == IP_CT_DIR_REPLY)
+		return NF_ACCEPT;
+
+	/* Until there's been traffic both ways, don't look in packets. */
+	if (ctinfo != IP_CT_ESTABLISHED &&
+	    ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+		return NF_ACCEPT;
+
+	/* Not a full tcp header? */
+	th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph);
+	if (th == NULL)
+		return NF_ACCEPT;
+
+	/* No data? */
+	dataoff = protoff + th->doff*4;
+	if (dataoff >= (*pskb)->len)
+		return NF_ACCEPT;
+
+	spin_lock_bh(&irc_buffer_lock);
+	ib_ptr = skb_header_pointer(*pskb, dataoff, (*pskb)->len - dataoff,
+				    irc_buffer);
+	BUG_ON(ib_ptr == NULL);
+
+	data = ib_ptr;
+	data_limit = ib_ptr + (*pskb)->len - dataoff;
+
+	/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
+	 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
+	while (data < data_limit - (19 + MINMATCHLEN)) {
+		if (memcmp(data, "\1DCC ", 5)) {
+			data++;
+			continue;
+		}
+		data += 5;
+		/* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
+
+		DEBUGP("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u...\n",
+			NIPQUAD(iph->saddr), ntohs(th->source),
+			NIPQUAD(iph->daddr), ntohs(th->dest));
+
+		for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
+			if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
+				/* no match */
+				continue;
+			}
+			data += strlen(dccprotos[i]);
+			DEBUGP("DCC %s detected\n", dccprotos[i]);
+
+			/* we have at least
+			 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
+			 * data left (== 14/13 bytes) */
+			if (parse_dcc((char *)data, data_limit, &dcc_ip,
+				       &dcc_port, &addr_beg_p, &addr_end_p)) {
+				DEBUGP("unable to parse dcc command\n");
+				continue;
+			}
+			DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n",
+				HIPQUAD(dcc_ip), dcc_port);
+
+			/* dcc_ip can be the internal OR external (NAT'ed) IP */
+			tuple = &ct->tuplehash[dir].tuple;
+			if (tuple->src.u3.ip != htonl(dcc_ip) &&
+			    tuple->dst.u3.ip != htonl(dcc_ip)) {
+				if (net_ratelimit())
+					printk(KERN_WARNING
+						"Forged DCC command from "
+						"%u.%u.%u.%u: %u.%u.%u.%u:%u\n",
+						NIPQUAD(tuple->src.u3.ip),
+						HIPQUAD(dcc_ip), dcc_port);
+				continue;
+			}
+
+			exp = nf_conntrack_expect_alloc(ct);
+			if (exp == NULL) {
+				ret = NF_DROP;
+				goto out;
+			}
+			tuple = &ct->tuplehash[!dir].tuple;
+			port = htons(dcc_port);
+			nf_conntrack_expect_init(exp, tuple->src.l3num,
+						 NULL, &tuple->dst.u3,
+						 IPPROTO_TCP, NULL, &port);
+
+			nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
+			if (nf_nat_irc && ct->status & IPS_NAT_MASK)
+				ret = nf_nat_irc(pskb, ctinfo,
+						 addr_beg_p - ib_ptr,
+						 addr_end_p - addr_beg_p,
+						 exp);
+			else if (nf_conntrack_expect_related(exp) != 0)
+				ret = NF_DROP;
+			nf_conntrack_expect_put(exp);
+			goto out;
+		}
+	}
+ out:
+	spin_unlock_bh(&irc_buffer_lock);
+	return ret;
+}
+
+static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
+static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly;
+
+static void nf_conntrack_irc_fini(void);
+
+static int __init nf_conntrack_irc_init(void)
+{
+	int i, ret;
+	char *tmpname;
+
+	if (max_dcc_channels < 1) {
+		printk("nf_ct_irc: max_dcc_channels must not be zero\n");
+		return -EINVAL;
+	}
+
+	irc_buffer = kmalloc(65536, GFP_KERNEL);
+	if (!irc_buffer)
+		return -ENOMEM;
+
+	/* If no port given, default to standard irc port */
+	if (ports_c == 0)
+		ports[ports_c++] = IRC_PORT;
+
+	for (i = 0; i < ports_c; i++) {
+		irc[i].tuple.src.l3num = AF_INET;
+		irc[i].tuple.src.u.tcp.port = htons(ports[i]);
+		irc[i].tuple.dst.protonum = IPPROTO_TCP;
+		irc[i].mask.src.l3num = 0xFFFF;
+		irc[i].mask.src.u.tcp.port = htons(0xFFFF);
+		irc[i].mask.dst.protonum = 0xFF;
+		irc[i].max_expected = max_dcc_channels;
+		irc[i].timeout = dcc_timeout;
+		irc[i].me = THIS_MODULE;
+		irc[i].help = help;
+
+		tmpname = &irc_names[i][0];
+		if (ports[i] == IRC_PORT)
+			sprintf(tmpname, "irc");
+		else
+			sprintf(tmpname, "irc-%u", i);
+		irc[i].name = tmpname;
+
+		ret = nf_conntrack_helper_register(&irc[i]);
+		if (ret) {
+			printk("nf_ct_irc: failed to register helper "
+			       "for pf: %u port: %u\n",
+			       irc[i].tuple.src.l3num, ports[i]);
+			nf_conntrack_irc_fini();
+			return ret;
+		}
+	}
+	return 0;
+}
+
+/* This function is intentionally _NOT_ defined as __exit, because
+ * it is needed by the init function */
+static void nf_conntrack_irc_fini(void)
+{
+	int i;
+
+	for (i = 0; i < ports_c; i++)
+		nf_conntrack_helper_unregister(&irc[i]);
+	kfree(irc_buffer);
+}
+
+module_init(nf_conntrack_irc_init);
+module_exit(nf_conntrack_irc_fini);
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
index 21e0bc9..a3d31c3 100644
--- a/net/netfilter/nf_conntrack_l3proto_generic.c
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -26,7 +26,7 @@
 
 #include <linux/netfilter_ipv4.h>
 #include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
@@ -37,8 +37,6 @@
 #define DEBUGP(format, args...)
 #endif
 
-DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat);
-
 static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
 				struct nf_conntrack_tuple *tuple)
 {
@@ -84,7 +82,7 @@
 	return NF_CT_F_BASIC;
 }
 
-struct nf_conntrack_l3proto nf_conntrack_generic_l3proto = {
+struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = {
 	.l3proto	 = PF_UNSPEC,
 	.name		 = "unknown",
 	.pkt_to_tuple	 = generic_pkt_to_tuple,
@@ -94,3 +92,4 @@
 	.prepare	 = generic_prepare,
 	.get_features	 = generic_get_features,
 };
+EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic);
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
new file mode 100644
index 0000000..a5b234e
--- /dev/null
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -0,0 +1,126 @@
+/*
+ *      NetBIOS name service broadcast connection tracking helper
+ *
+ *      (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+/*
+ *      This helper tracks locally originating NetBIOS name service
+ *      requests by issuing permanent expectations (valid until
+ *      timing out) matching all reply connections from the
+ *      destination network. The only NetBIOS specific thing is
+ *      actually the port number.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_addr.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <net/route.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define NMBD_PORT	137
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("NetBIOS name service broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_conntrack_netbios_ns");
+
+static unsigned int timeout __read_mostly = 3;
+module_param(timeout, uint, 0400);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+static int help(struct sk_buff **pskb, unsigned int protoff,
+                struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	struct nf_conntrack_expect *exp;
+	struct iphdr *iph = (*pskb)->nh.iph;
+	struct rtable *rt = (struct rtable *)(*pskb)->dst;
+	struct in_device *in_dev;
+	__be32 mask = 0;
+
+	/* we're only interested in locally generated packets */
+	if ((*pskb)->sk == NULL)
+		goto out;
+	if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
+		goto out;
+	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+		goto out;
+
+	rcu_read_lock();
+	in_dev = __in_dev_get_rcu(rt->u.dst.dev);
+	if (in_dev != NULL) {
+		for_primary_ifa(in_dev) {
+			if (ifa->ifa_broadcast == iph->daddr) {
+				mask = ifa->ifa_mask;
+				break;
+			}
+		} endfor_ifa(in_dev);
+	}
+	rcu_read_unlock();
+
+	if (mask == 0)
+		goto out;
+
+	exp = nf_conntrack_expect_alloc(ct);
+	if (exp == NULL)
+		goto out;
+
+	exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+	exp->tuple.src.u.udp.port = htons(NMBD_PORT);
+
+	exp->mask.src.u3.ip       = mask;
+	exp->mask.src.u.udp.port  = htons(0xFFFF);
+	exp->mask.dst.u3.ip       = htonl(0xFFFFFFFF);
+	exp->mask.dst.u.udp.port  = htons(0xFFFF);
+	exp->mask.dst.protonum    = 0xFF;
+
+	exp->expectfn             = NULL;
+	exp->flags                = NF_CT_EXPECT_PERMANENT;
+
+	nf_conntrack_expect_related(exp);
+	nf_conntrack_expect_put(exp);
+
+	nf_ct_refresh(ct, *pskb, timeout * HZ);
+out:
+	return NF_ACCEPT;
+}
+
+static struct nf_conntrack_helper helper __read_mostly = {
+	.name			= "netbios-ns",
+	.tuple.src.l3num	= AF_INET,
+	.tuple.src.u.udp.port	= __constant_htons(NMBD_PORT),
+	.tuple.dst.protonum	= IPPROTO_UDP,
+	.mask.src.l3num		= 0xFFFF,
+	.mask.src.u.udp.port	= __constant_htons(0xFFFF),
+	.mask.dst.protonum	= 0xFF,
+	.max_expected		= 1,
+	.me			= THIS_MODULE,
+	.help			= help,
+};
+
+static int __init nf_conntrack_netbios_ns_init(void)
+{
+	helper.timeout = timeout;
+	return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_netbios_ns_fini(void)
+{
+	nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_netbios_ns_init);
+module_exit(nf_conntrack_netbios_ns_fini);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ab67c2b..bd1d2de 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -35,10 +35,15 @@
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
-#include <linux/netfilter_ipv4/ip_nat_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#ifdef CONFIG_NF_NAT_NEEDED
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_protocol.h>
+#endif
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -50,15 +55,15 @@
 static inline int
 ctnetlink_dump_tuples_proto(struct sk_buff *skb, 
 			    const struct nf_conntrack_tuple *tuple,
-			    struct nf_conntrack_protocol *proto)
+			    struct nf_conntrack_l4proto *l4proto)
 {
 	int ret = 0;
 	struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_PROTO);
 
 	NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum);
 
-	if (likely(proto->tuple_to_nfattr))
-		ret = proto->tuple_to_nfattr(skb, tuple);
+	if (likely(l4proto->tuple_to_nfattr))
+		ret = l4proto->tuple_to_nfattr(skb, tuple);
 	
 	NFA_NEST_END(skb, nest_parms);
 
@@ -93,7 +98,7 @@
 {
 	int ret;
 	struct nf_conntrack_l3proto *l3proto;
-	struct nf_conntrack_protocol *proto;
+	struct nf_conntrack_l4proto *l4proto;
 
 	l3proto = nf_ct_l3proto_find_get(tuple->src.l3num);
 	ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
@@ -102,9 +107,9 @@
 	if (unlikely(ret < 0))
 		return ret;
 
-	proto = nf_ct_proto_find_get(tuple->src.l3num, tuple->dst.protonum);
-	ret = ctnetlink_dump_tuples_proto(skb, tuple, proto);
-	nf_ct_proto_put(proto);
+	l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum);
+	ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
+	nf_ct_l4proto_put(l4proto);
 
 	return ret;
 }
@@ -112,7 +117,7 @@
 static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	u_int32_t status = htonl((u_int32_t) ct->status);
+	__be32 status = htonl((u_int32_t) ct->status);
 	NFA_PUT(skb, CTA_STATUS, sizeof(status), &status);
 	return 0;
 
@@ -124,7 +129,7 @@
 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	long timeout_l = ct->timeout.expires - jiffies;
-	u_int32_t timeout;
+	__be32 timeout;
 
 	if (timeout_l < 0)
 		timeout = 0;
@@ -141,27 +146,27 @@
 static inline int
 ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	struct nf_conntrack_protocol *proto = nf_ct_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
+	struct nf_conntrack_l4proto *l4proto = nf_ct_l4proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
 	struct nfattr *nest_proto;
 	int ret;
 
-	if (!proto->to_nfattr) {
-		nf_ct_proto_put(proto);
+	if (!l4proto->to_nfattr) {
+		nf_ct_l4proto_put(l4proto);
 		return 0;
 	}
 	
 	nest_proto = NFA_NEST(skb, CTA_PROTOINFO);
 
-	ret = proto->to_nfattr(skb, nest_proto, ct);
+	ret = l4proto->to_nfattr(skb, nest_proto, ct);
 
-	nf_ct_proto_put(proto);
+	nf_ct_l4proto_put(l4proto);
 
 	NFA_NEST_END(skb, nest_proto);
 
 	return ret;
 
 nfattr_failure:
-	nf_ct_proto_put(proto);
+	nf_ct_l4proto_put(l4proto);
 	return -1;
 }
 
@@ -195,7 +200,7 @@
 {
 	enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
 	struct nfattr *nest_count = NFA_NEST(skb, type);
-	u_int32_t tmp;
+	__be32 tmp;
 
 	tmp = htonl(ct->counters[dir].packets);
 	NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp);
@@ -218,7 +223,7 @@
 static inline int
 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	u_int32_t mark = htonl(ct->mark);
+	__be32 mark = htonl(ct->mark);
 
 	NFA_PUT(skb, CTA_MARK, sizeof(u_int32_t), &mark);
 	return 0;
@@ -233,7 +238,7 @@
 static inline int
 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	u_int32_t id = htonl(ct->id);
+	__be32 id = htonl(ct->id);
 	NFA_PUT(skb, CTA_ID, sizeof(u_int32_t), &id);
 	return 0;
 
@@ -244,7 +249,7 @@
 static inline int
 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	u_int32_t use = htonl(atomic_read(&ct->ct_general.use));
+	__be32 use = htonl(atomic_read(&ct->ct_general.use));
 	
 	NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use);
 	return 0;
@@ -330,8 +335,6 @@
 	} else  if (events & (IPCT_NEW | IPCT_RELATED)) {
 		type = IPCTNL_MSG_CT_NEW;
 		flags = NLM_F_CREATE|NLM_F_EXCL;
-		/* dump everything */
-		events = ~0UL;
 		group = NFNLGRP_CONNTRACK_NEW;
 	} else  if (events & (IPCT_STATUS | IPCT_PROTOINFO)) {
 		type = IPCTNL_MSG_CT_NEW;
@@ -366,28 +369,35 @@
 	if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nfattr_failure;
 	NFA_NEST_END(skb, nest_parms);
-	
-	/* NAT stuff is now a status flag */
-	if ((events & IPCT_STATUS || events & IPCT_NATINFO)
-	    && ctnetlink_dump_status(skb, ct) < 0)
-		goto nfattr_failure;
-	if (events & IPCT_REFRESH
-	    && ctnetlink_dump_timeout(skb, ct) < 0)
-		goto nfattr_failure;
-	if (events & IPCT_PROTOINFO
-	    && ctnetlink_dump_protoinfo(skb, ct) < 0)
-		goto nfattr_failure;
-	if (events & IPCT_HELPINFO
-	    && ctnetlink_dump_helpinfo(skb, ct) < 0)
-		goto nfattr_failure;
 
-	if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
-		goto nfattr_failure;
+	if (events & IPCT_DESTROY) {
+		if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
+		    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+			goto nfattr_failure;
+	} else {
+		if (ctnetlink_dump_status(skb, ct) < 0)
+			goto nfattr_failure;
 
-	if (events & IPCT_MARK
-	    && ctnetlink_dump_mark(skb, ct) < 0)
-		goto nfattr_failure;
+		if (ctnetlink_dump_timeout(skb, ct) < 0)
+			goto nfattr_failure;
+
+		if (events & IPCT_PROTOINFO
+		    && ctnetlink_dump_protoinfo(skb, ct) < 0)
+		    	goto nfattr_failure;
+
+		if ((events & IPCT_HELPER || nfct_help(ct))
+		    && ctnetlink_dump_helpinfo(skb, ct) < 0)
+		    	goto nfattr_failure;
+
+		if ((events & IPCT_MARK || ct->mark)
+		    && ctnetlink_dump_mark(skb, ct) < 0)
+		    	goto nfattr_failure;
+
+		if (events & IPCT_COUNTER_FILLING &&
+		    (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
+		     ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0))
+			goto nfattr_failure;
+	}
 
 	nlh->nlmsg_len = skb->tail - b;
 	nfnetlink_send(skb, 0, group, 0);
@@ -424,7 +434,7 @@
 restart:
 		list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) {
 			h = (struct nf_conntrack_tuple_hash *) i;
-			if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
 				continue;
 			ct = nf_ct_tuplehash_to_ctrack(h);
 			/* Dump entries of a given L3 protocol number.
@@ -492,7 +502,7 @@
 			    struct nf_conntrack_tuple *tuple)
 {
 	struct nfattr *tb[CTA_PROTO_MAX];
-	struct nf_conntrack_protocol *proto;
+	struct nf_conntrack_l4proto *l4proto;
 	int ret = 0;
 
 	nfattr_parse_nested(tb, CTA_PROTO_MAX, attr);
@@ -504,12 +514,12 @@
 		return -EINVAL;
 	tuple->dst.protonum = *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]);
 
-	proto = nf_ct_proto_find_get(tuple->src.l3num, tuple->dst.protonum);
+	l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum);
 
-	if (likely(proto->nfattr_to_tuple))
-		ret = proto->nfattr_to_tuple(tb, tuple);
+	if (likely(l4proto->nfattr_to_tuple))
+		ret = l4proto->nfattr_to_tuple(tb, tuple);
 
-	nf_ct_proto_put(proto);
+	nf_ct_l4proto_put(l4proto);
 	
 	return ret;
 }
@@ -550,28 +560,28 @@
 	return 0;
 }
 
-#ifdef CONFIG_IP_NF_NAT_NEEDED
+#ifdef CONFIG_NF_NAT_NEEDED
 static const size_t cta_min_protonat[CTA_PROTONAT_MAX] = {
 	[CTA_PROTONAT_PORT_MIN-1]       = sizeof(u_int16_t),
 	[CTA_PROTONAT_PORT_MAX-1]       = sizeof(u_int16_t),
 };
 
-static int ctnetlink_parse_nat_proto(struct nfattr *attr,
+static int nfnetlink_parse_nat_proto(struct nfattr *attr,
 				     const struct nf_conn *ct,
-				     struct ip_nat_range *range)
+				     struct nf_nat_range *range)
 {
 	struct nfattr *tb[CTA_PROTONAT_MAX];
-	struct ip_nat_protocol *npt;
+	struct nf_nat_protocol *npt;
 
 	nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr);
 
 	if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
 		return -EINVAL;
 
-	npt = ip_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
+	npt = nf_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
 
 	if (!npt->nfattr_to_range) {
-		ip_nat_proto_put(npt);
+		nf_nat_proto_put(npt);
 		return 0;
 	}
 
@@ -579,7 +589,7 @@
 	if (npt->nfattr_to_range(tb, range) > 0)
 		range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
 
-	ip_nat_proto_put(npt);
+	nf_nat_proto_put(npt);
 
 	return 0;
 }
@@ -590,8 +600,8 @@
 };
 
 static inline int
-ctnetlink_parse_nat(struct nfattr *nat,
-		    const struct nf_conn *ct, struct ip_nat_range *range)
+nfnetlink_parse_nat(struct nfattr *nat,
+		    const struct nf_conn *ct, struct nf_nat_range *range)
 {
 	struct nfattr *tb[CTA_NAT_MAX];
 	int err;
@@ -604,12 +614,12 @@
 		return -EINVAL;
 
 	if (tb[CTA_NAT_MINIP-1])
-		range->min_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MINIP-1]);
+		range->min_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MINIP-1]);
 
 	if (!tb[CTA_NAT_MAXIP-1])
 		range->max_ip = range->min_ip;
 	else
-		range->max_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MAXIP-1]);
+		range->max_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MAXIP-1]);
 
 	if (range->min_ip)
 		range->flags |= IP_NAT_RANGE_MAP_IPS;
@@ -617,7 +627,7 @@
 	if (!tb[CTA_NAT_PROTO-1])
 		return 0;
 
-	err = ctnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range);
+	err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range);
 	if (err < 0)
 		return err;
 
@@ -682,7 +692,7 @@
 	ct = nf_ct_tuplehash_to_ctrack(h);
 	
 	if (cda[CTA_ID-1]) {
-		u_int32_t id = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_ID-1]));
+		u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1]));
 		if (ct->id != id) {
 			nf_ct_put(ct);
 			return -ENOENT;
@@ -752,7 +762,6 @@
 		nf_ct_put(ct);
 		return -ENOMEM;
 	}
-	NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid;
 
 	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 
 				  IPCTNL_MSG_CT_NEW, 1, ct);
@@ -776,7 +785,7 @@
 ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[])
 {
 	unsigned long d;
-	unsigned status = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]));
+	unsigned int status = ntohl(*(__be32 *)NFA_DATA(cda[CTA_STATUS-1]));
 	d = ct->status ^ status;
 
 	if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
@@ -793,35 +802,35 @@
 		return -EINVAL;
 
 	if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) {
-#ifndef CONFIG_IP_NF_NAT_NEEDED
+#ifndef CONFIG_NF_NAT_NEEDED
 		return -EINVAL;
 #else
-		struct ip_nat_range range;
+		struct nf_nat_range range;
 
 		if (cda[CTA_NAT_DST-1]) {
-			if (ctnetlink_parse_nat(cda[CTA_NAT_DST-1], ct,
+			if (nfnetlink_parse_nat(cda[CTA_NAT_DST-1], ct,
 						&range) < 0)
 				return -EINVAL;
-			if (ip_nat_initialized(ct,
+			if (nf_nat_initialized(ct,
 					       HOOK2MANIP(NF_IP_PRE_ROUTING)))
 				return -EEXIST;
-			ip_nat_setup_info(ct, &range, hooknum);
+			nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
 		}
 		if (cda[CTA_NAT_SRC-1]) {
-			if (ctnetlink_parse_nat(cda[CTA_NAT_SRC-1], ct,
+			if (nfnetlink_parse_nat(cda[CTA_NAT_SRC-1], ct,
 						&range) < 0)
 				return -EINVAL;
-			if (ip_nat_initialized(ct,
+			if (nf_nat_initialized(ct,
 					       HOOK2MANIP(NF_IP_POST_ROUTING)))
 				return -EEXIST;
-			ip_nat_setup_info(ct, &range, hooknum);
+			nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
 		}
 #endif
 	}
 
 	/* Be careful here, modifying NAT bits can screw up things,
 	 * so don't let users modify them directly if they don't pass
-	 * ip_nat_range. */
+	 * nf_nat_range. */
 	ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
 	return 0;
 }
@@ -875,7 +884,7 @@
 static inline int
 ctnetlink_change_timeout(struct nf_conn *ct, struct nfattr *cda[])
 {
-	u_int32_t timeout = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1]));
+	u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
 	
 	if (!del_timer(&ct->timeout))
 		return -ETIME;
@@ -890,18 +899,18 @@
 ctnetlink_change_protoinfo(struct nf_conn *ct, struct nfattr *cda[])
 {
 	struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1];
-	struct nf_conntrack_protocol *proto;
+	struct nf_conntrack_l4proto *l4proto;
 	u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
 	u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
 	int err = 0;
 
 	nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr);
 
-	proto = nf_ct_proto_find_get(l3num, npt);
+	l4proto = nf_ct_l4proto_find_get(l3num, npt);
 
-	if (proto->from_nfattr)
-		err = proto->from_nfattr(tb, ct);
-	nf_ct_proto_put(proto); 
+	if (l4proto->from_nfattr)
+		err = l4proto->from_nfattr(tb, ct);
+	nf_ct_l4proto_put(l4proto);
 
 	return err;
 }
@@ -937,7 +946,7 @@
 
 #if defined(CONFIG_NF_CONNTRACK_MARK)
 	if (cda[CTA_MARK-1])
-		ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
+		ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
 #endif
 
 	return 0;
@@ -958,14 +967,16 @@
 
 	if (!cda[CTA_TIMEOUT-1])
 		goto err;
-	ct->timeout.expires = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1]));
+	ct->timeout.expires = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1]));
 
 	ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
 	ct->status |= IPS_CONFIRMED;
 
-	err = ctnetlink_change_status(ct, cda);
-	if (err < 0)
-		goto err;
+	if (cda[CTA_STATUS-1]) {
+		err = ctnetlink_change_status(ct, cda);
+		if (err < 0)
+			goto err;
+	}
 
 	if (cda[CTA_PROTOINFO-1]) {
 		err = ctnetlink_change_protoinfo(ct, cda);
@@ -975,7 +986,7 @@
 
 #if defined(CONFIG_NF_CONNTRACK_MARK)
 	if (cda[CTA_MARK-1])
-		ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
+		ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
 #endif
 
 	help = nfct_help(ct);
@@ -1081,7 +1092,7 @@
 {
 	int ret;
 	struct nf_conntrack_l3proto *l3proto;
-	struct nf_conntrack_protocol *proto;
+	struct nf_conntrack_l4proto *l4proto;
 	struct nfattr *nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK);
 
 	l3proto = nf_ct_l3proto_find_get(tuple->src.l3num);
@@ -1091,9 +1102,9 @@
 	if (unlikely(ret < 0))
 		goto nfattr_failure;
 
-	proto = nf_ct_proto_find_get(tuple->src.l3num, tuple->dst.protonum);
-	ret = ctnetlink_dump_tuples_proto(skb, mask, proto);
-	nf_ct_proto_put(proto);
+	l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum);
+	ret = ctnetlink_dump_tuples_proto(skb, mask, l4proto);
+	nf_ct_l4proto_put(l4proto);
 	if (unlikely(ret < 0))
 		goto nfattr_failure;
 
@@ -1110,8 +1121,8 @@
                           const struct nf_conntrack_expect *exp)
 {
 	struct nf_conn *master = exp->master;
-	u_int32_t timeout = htonl((exp->timeout.expires - jiffies) / HZ);
-	u_int32_t id = htonl(exp->id);
+	__be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ);
+	__be32 id = htonl(exp->id);
 
 	if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
 		goto nfattr_failure;
@@ -1284,12 +1295,12 @@
 	if (err < 0)
 		return err;
 
-	exp = nf_conntrack_expect_find(&tuple);
+	exp = nf_conntrack_expect_find_get(&tuple);
 	if (!exp)
 		return -ENOENT;
 
 	if (cda[CTA_EXPECT_ID-1]) {
-		u_int32_t id = *(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
+		__be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
 		if (exp->id != ntohl(id)) {
 			nf_conntrack_expect_put(exp);
 			return -ENOENT;
@@ -1300,8 +1311,7 @@
 	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!skb2)
 		goto out;
-	NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid;
-	
+
 	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, 
 				      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW,
 				      1, exp);
@@ -1340,13 +1350,12 @@
 			return err;
 
 		/* bump usage count to 2 */
-		exp = nf_conntrack_expect_find(&tuple);
+		exp = nf_conntrack_expect_find_get(&tuple);
 		if (!exp)
 			return -ENOENT;
 
 		if (cda[CTA_EXPECT_ID-1]) {
-			u_int32_t id = 
-				*(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
+			__be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
 			if (exp->id != ntohl(id)) {
 				nf_conntrack_expect_put(exp);
 				return -ENOENT;
@@ -1442,6 +1451,7 @@
 	exp->expectfn = NULL;
 	exp->flags = 0;
 	exp->master = ct;
+	exp->helper = NULL;
 	memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
 	memcpy(&exp->mask, &mask, sizeof(struct nf_conntrack_tuple));
 
@@ -1538,6 +1548,7 @@
 	.cb				= ctnl_exp_cb,
 };
 
+MODULE_ALIAS("ip_conntrack_netlink");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
 
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
new file mode 100644
index 0000000..f0ff00e
--- /dev/null
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -0,0 +1,607 @@
+/*
+ * Connection tracking support for PPTP (Point to Point Tunneling Protocol).
+ * PPTP is a a protocol for creating virtual private networks.
+ * It is a specification defined by Microsoft and some vendors
+ * working with Microsoft.  PPTP is built on top of a modified
+ * version of the Internet Generic Routing Encapsulation Protocol.
+ * GRE is defined in RFC 1701 and RFC 1702.  Documentation of
+ * PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ * Limitations:
+ * 	 - We blindly assume that control connections are always
+ * 	   established in PNS->PAC direction.  This is a violation
+ * 	   of RFFC2673
+ * 	 - We can only support one single call within each session
+ * TODO:
+ *	 - testing of incoming PPTP calls
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
+#include <linux/netfilter/nf_conntrack_pptp.h>
+
+#define NF_CT_PPTP_VERSION "3.1"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP");
+MODULE_ALIAS("ip_conntrack_pptp");
+
+static DEFINE_SPINLOCK(nf_pptp_lock);
+
+int
+(*nf_nat_pptp_hook_outbound)(struct sk_buff **pskb,
+			     struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			     struct PptpControlHeader *ctlh,
+			     union pptp_ctrl_union *pptpReq) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound);
+
+int
+(*nf_nat_pptp_hook_inbound)(struct sk_buff **pskb,
+			    struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			    struct PptpControlHeader *ctlh,
+			    union pptp_ctrl_union *pptpReq) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound);
+
+void
+(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *expect_orig,
+			    struct nf_conntrack_expect *expect_reply)
+			    __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_exp_gre);
+
+void
+(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
+			     struct nf_conntrack_expect *exp) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
+
+#if 0
+/* PptpControlMessageType names */
+const char *pptp_msg_name[] = {
+	"UNKNOWN_MESSAGE",
+	"START_SESSION_REQUEST",
+	"START_SESSION_REPLY",
+	"STOP_SESSION_REQUEST",
+	"STOP_SESSION_REPLY",
+	"ECHO_REQUEST",
+	"ECHO_REPLY",
+	"OUT_CALL_REQUEST",
+	"OUT_CALL_REPLY",
+	"IN_CALL_REQUEST",
+	"IN_CALL_REPLY",
+	"IN_CALL_CONNECT",
+	"CALL_CLEAR_REQUEST",
+	"CALL_DISCONNECT_NOTIFY",
+	"WAN_ERROR_NOTIFY",
+	"SET_LINK_INFO"
+};
+EXPORT_SYMBOL(pptp_msg_name);
+#define DEBUGP(format, args...)	printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define SECS *HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+
+#define PPTP_GRE_TIMEOUT 		(10 MINS)
+#define PPTP_GRE_STREAM_TIMEOUT 	(5 HOURS)
+
+static void pptp_expectfn(struct nf_conn *ct,
+			 struct nf_conntrack_expect *exp)
+{
+	typeof(nf_nat_pptp_hook_expectfn) nf_nat_pptp_expectfn;
+	DEBUGP("increasing timeouts\n");
+
+	/* increase timeout of GRE data channel conntrack entry */
+	ct->proto.gre.timeout	     = PPTP_GRE_TIMEOUT;
+	ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT;
+
+	/* Can you see how rusty this code is, compared with the pre-2.6.11
+	 * one? That's what happened to my shiny newnat of 2002 ;( -HW */
+
+	rcu_read_lock();
+	nf_nat_pptp_expectfn = rcu_dereference(nf_nat_pptp_hook_expectfn);
+	if (nf_nat_pptp_expectfn && ct->status & IPS_NAT_MASK)
+		nf_nat_pptp_expectfn(ct, exp);
+	else {
+		struct nf_conntrack_tuple inv_t;
+		struct nf_conntrack_expect *exp_other;
+
+		/* obviously this tuple inversion only works until you do NAT */
+		nf_ct_invert_tuplepr(&inv_t, &exp->tuple);
+		DEBUGP("trying to unexpect other dir: ");
+		NF_CT_DUMP_TUPLE(&inv_t);
+
+		exp_other = nf_conntrack_expect_find_get(&inv_t);
+		if (exp_other) {
+			/* delete other expectation.  */
+			DEBUGP("found\n");
+			nf_conntrack_unexpect_related(exp_other);
+			nf_conntrack_expect_put(exp_other);
+		} else {
+			DEBUGP("not found\n");
+		}
+	}
+	rcu_read_unlock();
+}
+
+static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conntrack_expect *exp;
+	struct nf_conn *sibling;
+
+	DEBUGP("trying to timeout ct or exp for tuple ");
+	NF_CT_DUMP_TUPLE(t);
+
+	h = nf_conntrack_find_get(t, NULL);
+	if (h)  {
+		sibling = nf_ct_tuplehash_to_ctrack(h);
+		DEBUGP("setting timeout of conntrack %p to 0\n", sibling);
+		sibling->proto.gre.timeout	  = 0;
+		sibling->proto.gre.stream_timeout = 0;
+		if (del_timer(&sibling->timeout))
+			sibling->timeout.function((unsigned long)sibling);
+		nf_ct_put(sibling);
+		return 1;
+	} else {
+		exp = nf_conntrack_expect_find_get(t);
+		if (exp) {
+			DEBUGP("unexpect_related of expect %p\n", exp);
+			nf_conntrack_unexpect_related(exp);
+			nf_conntrack_expect_put(exp);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/* timeout GRE data connections */
+static void pptp_destroy_siblings(struct nf_conn *ct)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_conntrack_tuple t;
+
+	nf_ct_gre_keymap_destroy(ct);
+
+	/* try original (pns->pac) tuple */
+	memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
+	t.dst.protonum = IPPROTO_GRE;
+	t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
+	t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
+	if (!destroy_sibling_or_exp(&t))
+		DEBUGP("failed to timeout original pns->pac ct/exp\n");
+
+	/* try reply (pac->pns) tuple */
+	memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
+	t.dst.protonum = IPPROTO_GRE;
+	t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
+	t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
+	if (!destroy_sibling_or_exp(&t))
+		DEBUGP("failed to timeout reply pac->pns ct/exp\n");
+}
+
+/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
+static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
+{
+	struct nf_conntrack_expect *exp_orig, *exp_reply;
+	enum ip_conntrack_dir dir;
+	int ret = 1;
+	typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre;
+
+	exp_orig = nf_conntrack_expect_alloc(ct);
+	if (exp_orig == NULL)
+		goto out;
+
+	exp_reply = nf_conntrack_expect_alloc(ct);
+	if (exp_reply == NULL)
+		goto out_put_orig;
+
+	/* original direction, PNS->PAC */
+	dir = IP_CT_DIR_ORIGINAL;
+	nf_conntrack_expect_init(exp_orig, ct->tuplehash[dir].tuple.src.l3num,
+				 &ct->tuplehash[dir].tuple.src.u3,
+				 &ct->tuplehash[dir].tuple.dst.u3,
+				 IPPROTO_GRE, &peer_callid, &callid);
+	exp_orig->expectfn = pptp_expectfn;
+
+	/* reply direction, PAC->PNS */
+	dir = IP_CT_DIR_REPLY;
+	nf_conntrack_expect_init(exp_reply, ct->tuplehash[dir].tuple.src.l3num,
+				 &ct->tuplehash[dir].tuple.src.u3,
+				 &ct->tuplehash[dir].tuple.dst.u3,
+				 IPPROTO_GRE, &callid, &peer_callid);
+	exp_reply->expectfn = pptp_expectfn;
+
+	nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre);
+	if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK)
+		nf_nat_pptp_exp_gre(exp_orig, exp_reply);
+	if (nf_conntrack_expect_related(exp_orig) != 0)
+		goto out_put_both;
+	if (nf_conntrack_expect_related(exp_reply) != 0)
+		goto out_unexpect_orig;
+
+	/* Add GRE keymap entries */
+	if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_ORIGINAL, &exp_orig->tuple) != 0)
+		goto out_unexpect_both;
+	if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_REPLY, &exp_reply->tuple) != 0) {
+		nf_ct_gre_keymap_destroy(ct);
+		goto out_unexpect_both;
+	}
+	ret = 0;
+
+out_put_both:
+	nf_conntrack_expect_put(exp_reply);
+out_put_orig:
+	nf_conntrack_expect_put(exp_orig);
+out:
+	return ret;
+
+out_unexpect_both:
+	nf_conntrack_unexpect_related(exp_reply);
+out_unexpect_orig:
+	nf_conntrack_unexpect_related(exp_orig);
+	goto out_put_both;
+}
+
+static inline int
+pptp_inbound_pkt(struct sk_buff **pskb,
+		 struct PptpControlHeader *ctlh,
+		 union pptp_ctrl_union *pptpReq,
+		 unsigned int reqlen,
+		 struct nf_conn *ct,
+		 enum ip_conntrack_info ctinfo)
+{
+	struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+	u_int16_t msg;
+	__be16 cid = 0, pcid = 0;
+	typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
+
+	msg = ntohs(ctlh->messageType);
+	DEBUGP("inbound control message %s\n", pptp_msg_name[msg]);
+
+	switch (msg) {
+	case PPTP_START_SESSION_REPLY:
+		/* server confirms new control session */
+		if (info->sstate < PPTP_SESSION_REQUESTED)
+			goto invalid;
+		if (pptpReq->srep.resultCode == PPTP_START_OK)
+			info->sstate = PPTP_SESSION_CONFIRMED;
+		else
+			info->sstate = PPTP_SESSION_ERROR;
+		break;
+
+	case PPTP_STOP_SESSION_REPLY:
+		/* server confirms end of control session */
+		if (info->sstate > PPTP_SESSION_STOPREQ)
+			goto invalid;
+		if (pptpReq->strep.resultCode == PPTP_STOP_OK)
+			info->sstate = PPTP_SESSION_NONE;
+		else
+			info->sstate = PPTP_SESSION_ERROR;
+		break;
+
+	case PPTP_OUT_CALL_REPLY:
+		/* server accepted call, we now expect GRE frames */
+		if (info->sstate != PPTP_SESSION_CONFIRMED)
+			goto invalid;
+		if (info->cstate != PPTP_CALL_OUT_REQ &&
+		    info->cstate != PPTP_CALL_OUT_CONF)
+			goto invalid;
+
+		cid = pptpReq->ocack.callID;
+		pcid = pptpReq->ocack.peersCallID;
+		if (info->pns_call_id != pcid)
+			goto invalid;
+		DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
+			ntohs(cid), ntohs(pcid));
+
+		if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
+			info->cstate = PPTP_CALL_OUT_CONF;
+			info->pac_call_id = cid;
+			exp_gre(ct, cid, pcid);
+		} else
+			info->cstate = PPTP_CALL_NONE;
+		break;
+
+	case PPTP_IN_CALL_REQUEST:
+		/* server tells us about incoming call request */
+		if (info->sstate != PPTP_SESSION_CONFIRMED)
+			goto invalid;
+
+		cid = pptpReq->icreq.callID;
+		DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+		info->cstate = PPTP_CALL_IN_REQ;
+		info->pac_call_id = cid;
+		break;
+
+	case PPTP_IN_CALL_CONNECT:
+		/* server tells us about incoming call established */
+		if (info->sstate != PPTP_SESSION_CONFIRMED)
+			goto invalid;
+		if (info->cstate != PPTP_CALL_IN_REP &&
+		    info->cstate != PPTP_CALL_IN_CONF)
+			goto invalid;
+
+		pcid = pptpReq->iccon.peersCallID;
+		cid = info->pac_call_id;
+
+		if (info->pns_call_id != pcid)
+			goto invalid;
+
+		DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
+		info->cstate = PPTP_CALL_IN_CONF;
+
+		/* we expect a GRE connection from PAC to PNS */
+		exp_gre(ct, cid, pcid);
+		break;
+
+	case PPTP_CALL_DISCONNECT_NOTIFY:
+		/* server confirms disconnect */
+		cid = pptpReq->disc.callID;
+		DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+		info->cstate = PPTP_CALL_NONE;
+
+		/* untrack this call id, unexpect GRE packets */
+		pptp_destroy_siblings(ct);
+		break;
+
+	case PPTP_WAN_ERROR_NOTIFY:
+	case PPTP_ECHO_REQUEST:
+	case PPTP_ECHO_REPLY:
+		/* I don't have to explain these ;) */
+		break;
+
+	default:
+		goto invalid;
+	}
+
+	nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound);
+	if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK)
+		return nf_nat_pptp_inbound(pskb, ct, ctinfo, ctlh, pptpReq);
+	return NF_ACCEPT;
+
+invalid:
+	DEBUGP("invalid %s: type=%d cid=%u pcid=%u "
+	       "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
+	       msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+	       msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
+	       ntohs(info->pns_call_id), ntohs(info->pac_call_id));
+	return NF_ACCEPT;
+}
+
+static inline int
+pptp_outbound_pkt(struct sk_buff **pskb,
+		  struct PptpControlHeader *ctlh,
+		  union pptp_ctrl_union *pptpReq,
+		  unsigned int reqlen,
+		  struct nf_conn *ct,
+		  enum ip_conntrack_info ctinfo)
+{
+	struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+	u_int16_t msg;
+	__be16 cid = 0, pcid = 0;
+	typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
+
+	msg = ntohs(ctlh->messageType);
+	DEBUGP("outbound control message %s\n", pptp_msg_name[msg]);
+
+	switch (msg) {
+	case PPTP_START_SESSION_REQUEST:
+		/* client requests for new control session */
+		if (info->sstate != PPTP_SESSION_NONE)
+			goto invalid;
+		info->sstate = PPTP_SESSION_REQUESTED;
+		break;
+
+	case PPTP_STOP_SESSION_REQUEST:
+		/* client requests end of control session */
+		info->sstate = PPTP_SESSION_STOPREQ;
+		break;
+
+	case PPTP_OUT_CALL_REQUEST:
+		/* client initiating connection to server */
+		if (info->sstate != PPTP_SESSION_CONFIRMED)
+			goto invalid;
+		info->cstate = PPTP_CALL_OUT_REQ;
+		/* track PNS call id */
+		cid = pptpReq->ocreq.callID;
+		DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+		info->pns_call_id = cid;
+		break;
+
+	case PPTP_IN_CALL_REPLY:
+		/* client answers incoming call */
+		if (info->cstate != PPTP_CALL_IN_REQ &&
+		    info->cstate != PPTP_CALL_IN_REP)
+			goto invalid;
+
+		cid = pptpReq->icack.callID;
+		pcid = pptpReq->icack.peersCallID;
+		if (info->pac_call_id != pcid)
+			goto invalid;
+		DEBUGP("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
+		       ntohs(cid), ntohs(pcid));
+
+		if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
+			/* part two of the three-way handshake */
+			info->cstate = PPTP_CALL_IN_REP;
+			info->pns_call_id = cid;
+		} else
+			info->cstate = PPTP_CALL_NONE;
+		break;
+
+	case PPTP_CALL_CLEAR_REQUEST:
+		/* client requests hangup of call */
+		if (info->sstate != PPTP_SESSION_CONFIRMED)
+			goto invalid;
+		/* FUTURE: iterate over all calls and check if
+		 * call ID is valid.  We don't do this without newnat,
+		 * because we only know about last call */
+		info->cstate = PPTP_CALL_CLEAR_REQ;
+		break;
+
+	case PPTP_SET_LINK_INFO:
+	case PPTP_ECHO_REQUEST:
+	case PPTP_ECHO_REPLY:
+		/* I don't have to explain these ;) */
+		break;
+
+	default:
+		goto invalid;
+	}
+
+	nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound);
+	if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK)
+		return nf_nat_pptp_outbound(pskb, ct, ctinfo, ctlh, pptpReq);
+	return NF_ACCEPT;
+
+invalid:
+	DEBUGP("invalid %s: type=%d cid=%u pcid=%u "
+	       "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
+	       msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+	       msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
+	       ntohs(info->pns_call_id), ntohs(info->pac_call_id));
+	return NF_ACCEPT;
+}
+
+static const unsigned int pptp_msg_size[] = {
+	[PPTP_START_SESSION_REQUEST]  = sizeof(struct PptpStartSessionRequest),
+	[PPTP_START_SESSION_REPLY]    = sizeof(struct PptpStartSessionReply),
+	[PPTP_STOP_SESSION_REQUEST]   = sizeof(struct PptpStopSessionRequest),
+	[PPTP_STOP_SESSION_REPLY]     = sizeof(struct PptpStopSessionReply),
+	[PPTP_OUT_CALL_REQUEST]       = sizeof(struct PptpOutCallRequest),
+	[PPTP_OUT_CALL_REPLY]	      = sizeof(struct PptpOutCallReply),
+	[PPTP_IN_CALL_REQUEST]	      = sizeof(struct PptpInCallRequest),
+	[PPTP_IN_CALL_REPLY]	      = sizeof(struct PptpInCallReply),
+	[PPTP_IN_CALL_CONNECT]	      = sizeof(struct PptpInCallConnected),
+	[PPTP_CALL_CLEAR_REQUEST]     = sizeof(struct PptpClearCallRequest),
+	[PPTP_CALL_DISCONNECT_NOTIFY] = sizeof(struct PptpCallDisconnectNotify),
+	[PPTP_WAN_ERROR_NOTIFY]	      = sizeof(struct PptpWanErrorNotify),
+	[PPTP_SET_LINK_INFO]	      = sizeof(struct PptpSetLinkInfo),
+};
+
+/* track caller id inside control connection, call expect_related */
+static int
+conntrack_pptp_help(struct sk_buff **pskb, unsigned int protoff,
+		    struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+
+{
+	int dir = CTINFO2DIR(ctinfo);
+	struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+	struct tcphdr _tcph, *tcph;
+	struct pptp_pkt_hdr _pptph, *pptph;
+	struct PptpControlHeader _ctlh, *ctlh;
+	union pptp_ctrl_union _pptpReq, *pptpReq;
+	unsigned int tcplen = (*pskb)->len - protoff;
+	unsigned int datalen, reqlen, nexthdr_off;
+	int oldsstate, oldcstate;
+	int ret;
+	u_int16_t msg;
+
+	/* don't do any tracking before tcp handshake complete */
+	if (ctinfo != IP_CT_ESTABLISHED &&
+	    ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+		return NF_ACCEPT;
+
+	nexthdr_off = protoff;
+	tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph);
+	BUG_ON(!tcph);
+	nexthdr_off += tcph->doff * 4;
+ 	datalen = tcplen - tcph->doff * 4;
+
+	pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
+	if (!pptph) {
+		DEBUGP("no full PPTP header, can't track\n");
+		return NF_ACCEPT;
+	}
+	nexthdr_off += sizeof(_pptph);
+	datalen -= sizeof(_pptph);
+
+	/* if it's not a control message we can't do anything with it */
+	if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
+	    ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
+		DEBUGP("not a control packet\n");
+		return NF_ACCEPT;
+	}
+
+	ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
+	if (!ctlh)
+		return NF_ACCEPT;
+	nexthdr_off += sizeof(_ctlh);
+	datalen -= sizeof(_ctlh);
+
+	reqlen = datalen;
+	msg = ntohs(ctlh->messageType);
+	if (msg > 0 && msg <= PPTP_MSG_MAX && reqlen < pptp_msg_size[msg])
+		return NF_ACCEPT;
+	if (reqlen > sizeof(*pptpReq))
+		reqlen = sizeof(*pptpReq);
+
+	pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq);
+	if (!pptpReq)
+		return NF_ACCEPT;
+
+	oldsstate = info->sstate;
+	oldcstate = info->cstate;
+
+	spin_lock_bh(&nf_pptp_lock);
+
+	/* FIXME: We just blindly assume that the control connection is always
+	 * established from PNS->PAC.  However, RFC makes no guarantee */
+	if (dir == IP_CT_DIR_ORIGINAL)
+		/* client -> server (PNS -> PAC) */
+		ret = pptp_outbound_pkt(pskb, ctlh, pptpReq, reqlen, ct,
+					ctinfo);
+	else
+		/* server -> client (PAC -> PNS) */
+		ret = pptp_inbound_pkt(pskb, ctlh, pptpReq, reqlen, ct,
+				       ctinfo);
+	DEBUGP("sstate: %d->%d, cstate: %d->%d\n",
+		oldsstate, info->sstate, oldcstate, info->cstate);
+	spin_unlock_bh(&nf_pptp_lock);
+
+	return ret;
+}
+
+/* control protocol helper */
+static struct nf_conntrack_helper pptp __read_mostly = {
+	.name			= "pptp",
+	.me			= THIS_MODULE,
+	.max_expected		= 2,
+	.timeout		= 5 * 60,
+	.tuple.src.l3num	= AF_INET,
+	.tuple.src.u.tcp.port	= __constant_htons(PPTP_CONTROL_PORT),
+	.tuple.dst.protonum	= IPPROTO_TCP,
+	.mask.src.l3num		= 0xffff,
+	.mask.src.u.tcp.port	= __constant_htons(0xffff),
+	.mask.dst.protonum	= 0xff,
+	.help			= conntrack_pptp_help,
+	.destroy		= pptp_destroy_siblings,
+};
+
+static int __init nf_conntrack_pptp_init(void)
+{
+	return nf_conntrack_helper_register(&pptp);
+}
+
+static void __exit nf_conntrack_pptp_fini(void)
+{
+	nf_conntrack_helper_unregister(&pptp);
+	nf_ct_gre_keymap_flush();
+}
+
+module_init(nf_conntrack_pptp_init);
+module_exit(nf_conntrack_pptp_fini);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
new file mode 100644
index 0000000..1a61b72
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -0,0 +1,410 @@
+/* L3/L4 protocol support for nf_conntrack. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly;
+struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_l3protos);
+
+#ifdef CONFIG_SYSCTL
+static DEFINE_MUTEX(nf_ct_proto_sysctl_mutex);
+
+static int
+nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_table *path,
+		      struct ctl_table *table, unsigned int *users)
+{
+	if (*header == NULL) {
+		*header = nf_register_sysctl_table(path, table);
+		if (*header == NULL)
+			return -ENOMEM;
+	}
+	if (users != NULL)
+		(*users)++;
+	return 0;
+}
+
+static void
+nf_ct_unregister_sysctl(struct ctl_table_header **header,
+			struct ctl_table *table, unsigned int *users)
+{
+	if (users != NULL && --*users > 0)
+		return;
+	nf_unregister_sysctl_table(*header, table);
+	*header = NULL;
+}
+#endif
+
+struct nf_conntrack_l4proto *
+__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto)
+{
+	if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
+		return &nf_conntrack_l4proto_generic;
+
+	return nf_ct_protos[l3proto][l4proto];
+}
+EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
+
+/* this is guaranteed to always return a valid protocol helper, since
+ * it falls back to generic_protocol */
+struct nf_conntrack_l4proto *
+nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto)
+{
+	struct nf_conntrack_l4proto *p;
+
+	preempt_disable();
+	p = __nf_ct_l4proto_find(l3proto, l4proto);
+	if (!try_module_get(p->me))
+		p = &nf_conntrack_l4proto_generic;
+	preempt_enable();
+
+	return p;
+}
+EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
+
+void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p)
+{
+	module_put(p->me);
+}
+EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
+
+struct nf_conntrack_l3proto *
+nf_ct_l3proto_find_get(u_int16_t l3proto)
+{
+	struct nf_conntrack_l3proto *p;
+
+	preempt_disable();
+	p = __nf_ct_l3proto_find(l3proto);
+	if (!try_module_get(p->me))
+		p = &nf_conntrack_l3proto_generic;
+	preempt_enable();
+
+	return p;
+}
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get);
+
+void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
+{
+	module_put(p->me);
+}
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_put);
+
+int
+nf_ct_l3proto_try_module_get(unsigned short l3proto)
+{
+	int ret;
+	struct nf_conntrack_l3proto *p;
+
+retry:	p = nf_ct_l3proto_find_get(l3proto);
+	if (p == &nf_conntrack_l3proto_generic) {
+		ret = request_module("nf_conntrack-%d", l3proto);
+		if (!ret)
+			goto retry;
+
+		return -EPROTOTYPE;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_try_module_get);
+
+void nf_ct_l3proto_module_put(unsigned short l3proto)
+{
+	struct nf_conntrack_l3proto *p;
+
+	preempt_disable();
+	p = __nf_ct_l3proto_find(l3proto);
+	preempt_enable();
+
+	module_put(p->me);
+}
+EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
+
+static int kill_l3proto(struct nf_conn *i, void *data)
+{
+	return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
+			((struct nf_conntrack_l3proto *)data)->l3proto);
+}
+
+static int kill_l4proto(struct nf_conn *i, void *data)
+{
+	struct nf_conntrack_l4proto *l4proto;
+	l4proto = (struct nf_conntrack_l4proto *)data;
+	return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
+			l4proto->l4proto) &&
+	       (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
+			l4proto->l3proto);
+}
+
+static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
+{
+	int err = 0;
+
+#ifdef CONFIG_SYSCTL
+	mutex_lock(&nf_ct_proto_sysctl_mutex);
+	if (l3proto->ctl_table != NULL) {
+		err = nf_ct_register_sysctl(&l3proto->ctl_table_header,
+					    l3proto->ctl_table_path,
+					    l3proto->ctl_table, NULL);
+	}
+	mutex_unlock(&nf_ct_proto_sysctl_mutex);
+#endif
+	return err;
+}
+
+static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto)
+{
+#ifdef CONFIG_SYSCTL
+	mutex_lock(&nf_ct_proto_sysctl_mutex);
+	if (l3proto->ctl_table_header != NULL)
+		nf_ct_unregister_sysctl(&l3proto->ctl_table_header,
+					l3proto->ctl_table, NULL);
+	mutex_unlock(&nf_ct_proto_sysctl_mutex);
+#endif
+}
+
+int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
+{
+	int ret = 0;
+
+	if (proto->l3proto >= AF_MAX) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	write_lock_bh(&nf_conntrack_lock);
+	if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+	nf_ct_l3protos[proto->l3proto] = proto;
+	write_unlock_bh(&nf_conntrack_lock);
+
+	ret = nf_ct_l3proto_register_sysctl(proto);
+	if (ret < 0)
+		nf_conntrack_l3proto_unregister(proto);
+	return ret;
+
+out_unlock:
+	write_unlock_bh(&nf_conntrack_lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
+
+int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
+{
+	int ret = 0;
+
+	if (proto->l3proto >= AF_MAX) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	write_lock_bh(&nf_conntrack_lock);
+	if (nf_ct_l3protos[proto->l3proto] != proto) {
+		write_unlock_bh(&nf_conntrack_lock);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	nf_ct_l3protos[proto->l3proto] = &nf_conntrack_l3proto_generic;
+	write_unlock_bh(&nf_conntrack_lock);
+
+	nf_ct_l3proto_unregister_sysctl(proto);
+
+	/* Somebody could be still looking at the proto in bh. */
+	synchronize_net();
+
+	/* Remove all contrack entries for this protocol */
+	nf_ct_iterate_cleanup(kill_l3proto, proto);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
+
+static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
+{
+	int err = 0;
+
+#ifdef CONFIG_SYSCTL
+	mutex_lock(&nf_ct_proto_sysctl_mutex);
+	if (l4proto->ctl_table != NULL) {
+		err = nf_ct_register_sysctl(l4proto->ctl_table_header,
+					    nf_net_netfilter_sysctl_path,
+					    l4proto->ctl_table,
+					    l4proto->ctl_table_users);
+		if (err < 0)
+			goto out;
+	}
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	if (l4proto->ctl_compat_table != NULL) {
+		err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
+					    nf_net_ipv4_netfilter_sysctl_path,
+					    l4proto->ctl_compat_table, NULL);
+		if (err == 0)
+			goto out;
+		nf_ct_unregister_sysctl(l4proto->ctl_table_header,
+					l4proto->ctl_table,
+					l4proto->ctl_table_users);
+	}
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
+out:
+	mutex_unlock(&nf_ct_proto_sysctl_mutex);
+#endif /* CONFIG_SYSCTL */
+	return err;
+}
+
+static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto)
+{
+#ifdef CONFIG_SYSCTL
+	mutex_lock(&nf_ct_proto_sysctl_mutex);
+	if (l4proto->ctl_table_header != NULL &&
+	    *l4proto->ctl_table_header != NULL)
+		nf_ct_unregister_sysctl(l4proto->ctl_table_header,
+					l4proto->ctl_table,
+					l4proto->ctl_table_users);
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	if (l4proto->ctl_compat_table_header != NULL)
+		nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header,
+					l4proto->ctl_compat_table, NULL);
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
+	mutex_unlock(&nf_ct_proto_sysctl_mutex);
+#endif /* CONFIG_SYSCTL */
+}
+
+/* FIXME: Allow NULL functions and sub in pointers to generic for
+   them. --RR */
+int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
+{
+	int ret = 0;
+
+	if (l4proto->l3proto >= PF_MAX) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (l4proto == &nf_conntrack_l4proto_generic)
+		return nf_ct_l4proto_register_sysctl(l4proto);
+
+retry:
+	write_lock_bh(&nf_conntrack_lock);
+	if (nf_ct_protos[l4proto->l3proto]) {
+		if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
+				!= &nf_conntrack_l4proto_generic) {
+			ret = -EBUSY;
+			goto out_unlock;
+		}
+	} else {
+		/* l3proto may be loaded latter. */
+		struct nf_conntrack_l4proto **proto_array;
+		int i;
+
+		write_unlock_bh(&nf_conntrack_lock);
+
+		proto_array = (struct nf_conntrack_l4proto **)
+				kmalloc(MAX_NF_CT_PROTO *
+					 sizeof(struct nf_conntrack_l4proto *),
+					GFP_KERNEL);
+		if (proto_array == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < MAX_NF_CT_PROTO; i++)
+			proto_array[i] = &nf_conntrack_l4proto_generic;
+
+		write_lock_bh(&nf_conntrack_lock);
+		if (nf_ct_protos[l4proto->l3proto]) {
+			/* bad timing, but no problem */
+			write_unlock_bh(&nf_conntrack_lock);
+			kfree(proto_array);
+		} else {
+			nf_ct_protos[l4proto->l3proto] = proto_array;
+			write_unlock_bh(&nf_conntrack_lock);
+		}
+
+		/*
+		 * Just once because array is never freed until unloading
+		 * nf_conntrack.ko
+		 */
+		goto retry;
+	}
+
+	nf_ct_protos[l4proto->l3proto][l4proto->l4proto] = l4proto;
+	write_unlock_bh(&nf_conntrack_lock);
+
+	ret = nf_ct_l4proto_register_sysctl(l4proto);
+	if (ret < 0)
+		nf_conntrack_l4proto_unregister(l4proto);
+	return ret;
+
+out_unlock:
+	write_unlock_bh(&nf_conntrack_lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
+
+int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
+{
+	int ret = 0;
+
+	if (l4proto->l3proto >= PF_MAX) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (l4proto == &nf_conntrack_l4proto_generic) {
+		nf_ct_l4proto_unregister_sysctl(l4proto);
+		goto out;
+	}
+
+	write_lock_bh(&nf_conntrack_lock);
+	if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
+	    != l4proto) {
+		write_unlock_bh(&nf_conntrack_lock);
+		ret = -EBUSY;
+		goto out;
+	}
+	nf_ct_protos[l4proto->l3proto][l4proto->l4proto]
+		= &nf_conntrack_l4proto_generic;
+	write_unlock_bh(&nf_conntrack_lock);
+
+	nf_ct_l4proto_unregister_sysctl(l4proto);
+
+	/* Somebody could be still looking at the proto in bh. */
+	synchronize_net();
+
+	/* Remove all contrack entries for this protocol */
+	nf_ct_iterate_cleanup(kill_l4proto, l4proto);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 26408bb..6990253 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -15,9 +15,9 @@
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/netfilter.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
 
-unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
 
 static int generic_pkt_to_tuple(const struct sk_buff *skb,
 				unsigned int dataoff,
@@ -71,10 +71,42 @@
 	return 1;
 }
 
-struct nf_conntrack_protocol nf_conntrack_generic_protocol =
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *generic_sysctl_header;
+static struct ctl_table generic_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_GENERIC_TIMEOUT,
+		.procname	= "nf_conntrack_generic_timeout",
+		.data		= &nf_ct_generic_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+static struct ctl_table generic_compat_sysctl_table[] = {
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT,
+		.procname	= "ip_conntrack_generic_timeout",
+		.data		= &nf_ct_generic_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
+#endif /* CONFIG_SYSCTL */
+
+struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
 {
 	.l3proto		= PF_UNSPEC,
-	.proto			= 0,
+	.l4proto		= 0,
 	.name			= "unknown",
 	.pkt_to_tuple		= generic_pkt_to_tuple,
 	.invert_tuple		= generic_invert_tuple,
@@ -82,4 +114,11 @@
 	.print_conntrack	= generic_print_conntrack,
 	.packet			= packet,
 	.new			= new,
+#ifdef CONFIG_SYSCTL
+	.ctl_table_header	= &generic_sysctl_header,
+	.ctl_table		= generic_sysctl_table,
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	.ctl_compat_table	= generic_compat_sysctl_table,
+#endif
+#endif
 };
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
new file mode 100644
index 0000000..ac193ce
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -0,0 +1,305 @@
+/*
+ * ip_conntrack_proto_gre.c - Version 3.0
+ *
+ * Connection tracking protocol helper module for GRE.
+ *
+ * GRE is a generic encapsulation protocol, which is generally not very
+ * suited for NAT, as it has no protocol-specific part as port numbers.
+ *
+ * It has an optional key field, which may help us distinguishing two
+ * connections between the same two hosts.
+ *
+ * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
+ *
+ * PPTP is built on top of a modified version of GRE, and has a mandatory
+ * field called "CallID", which serves us for the same purpose as the key
+ * field in plain GRE.
+ *
+ * Documentation about PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
+#include <linux/netfilter/nf_conntrack_pptp.h>
+
+#define GRE_TIMEOUT		(30 * HZ)
+#define GRE_STREAM_TIMEOUT	(180 * HZ)
+
+#if 0
+#define DEBUGP(format, args...)	printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
+#else
+#define DEBUGP(x, args...)
+#endif
+
+static DEFINE_RWLOCK(nf_ct_gre_lock);
+static LIST_HEAD(gre_keymap_list);
+
+void nf_ct_gre_keymap_flush(void)
+{
+	struct list_head *pos, *n;
+
+	write_lock_bh(&nf_ct_gre_lock);
+	list_for_each_safe(pos, n, &gre_keymap_list) {
+		list_del(pos);
+		kfree(pos);
+	}
+	write_unlock_bh(&nf_ct_gre_lock);
+}
+EXPORT_SYMBOL(nf_ct_gre_keymap_flush);
+
+static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
+				const struct nf_conntrack_tuple *t)
+{
+	return km->tuple.src.l3num == t->src.l3num &&
+	       !memcmp(&km->tuple.src.u3, &t->src.u3, sizeof(t->src.u3)) &&
+	       !memcmp(&km->tuple.dst.u3, &t->dst.u3, sizeof(t->dst.u3)) &&
+	       km->tuple.dst.protonum == t->dst.protonum &&
+	       km->tuple.dst.u.all == t->dst.u.all;
+}
+
+/* look up the source key for a given tuple */
+static __be16 gre_keymap_lookup(struct nf_conntrack_tuple *t)
+{
+	struct nf_ct_gre_keymap *km;
+	__be16 key = 0;
+
+	read_lock_bh(&nf_ct_gre_lock);
+	list_for_each_entry(km, &gre_keymap_list, list) {
+		if (gre_key_cmpfn(km, t)) {
+			key = km->tuple.src.u.gre.key;
+			break;
+		}
+	}
+	read_unlock_bh(&nf_ct_gre_lock);
+
+	DEBUGP("lookup src key 0x%x for ", key);
+	NF_CT_DUMP_TUPLE(t);
+
+	return key;
+}
+
+/* add a single keymap entry, associate with specified master ct */
+int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
+			 struct nf_conntrack_tuple *t)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+	struct nf_ct_gre_keymap **kmp, *km;
+
+	BUG_ON(strcmp(help->helper->name, "pptp"));
+	kmp = &help->help.ct_pptp_info.keymap[dir];
+	if (*kmp) {
+		/* check whether it's a retransmission */
+		list_for_each_entry(km, &gre_keymap_list, list) {
+			if (gre_key_cmpfn(km, t) && km == *kmp)
+				return 0;
+		}
+		DEBUGP("trying to override keymap_%s for ct %p\n",
+			dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
+		return -EEXIST;
+	}
+
+	km = kmalloc(sizeof(*km), GFP_ATOMIC);
+	if (!km)
+		return -ENOMEM;
+	memcpy(&km->tuple, t, sizeof(*t));
+	*kmp = km;
+
+	DEBUGP("adding new entry %p: ", km);
+	NF_CT_DUMP_TUPLE(&km->tuple);
+
+	write_lock_bh(&nf_ct_gre_lock);
+	list_add_tail(&km->list, &gre_keymap_list);
+	write_unlock_bh(&nf_ct_gre_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
+
+/* destroy the keymap entries associated with specified master ct */
+void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+	enum ip_conntrack_dir dir;
+
+	DEBUGP("entering for ct %p\n", ct);
+	BUG_ON(strcmp(help->helper->name, "pptp"));
+
+	write_lock_bh(&nf_ct_gre_lock);
+	for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
+		if (help->help.ct_pptp_info.keymap[dir]) {
+			DEBUGP("removing %p from list\n",
+				help->help.ct_pptp_info.keymap[dir]);
+			list_del(&help->help.ct_pptp_info.keymap[dir]->list);
+			kfree(help->help.ct_pptp_info.keymap[dir]);
+			help->help.ct_pptp_info.keymap[dir] = NULL;
+		}
+	}
+	write_unlock_bh(&nf_ct_gre_lock);
+}
+EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
+
+/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
+
+/* invert gre part of tuple */
+static int gre_invert_tuple(struct nf_conntrack_tuple *tuple,
+			    const struct nf_conntrack_tuple *orig)
+{
+	tuple->dst.u.gre.key = orig->src.u.gre.key;
+	tuple->src.u.gre.key = orig->dst.u.gre.key;
+	return 1;
+}
+
+/* gre hdr info to tuple */
+static int gre_pkt_to_tuple(const struct sk_buff *skb,
+			   unsigned int dataoff,
+			   struct nf_conntrack_tuple *tuple)
+{
+	struct gre_hdr_pptp _pgrehdr, *pgrehdr;
+	__be16 srckey;
+	struct gre_hdr _grehdr, *grehdr;
+
+	/* first only delinearize old RFC1701 GRE header */
+	grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
+	if (!grehdr || grehdr->version != GRE_VERSION_PPTP) {
+		/* try to behave like "nf_conntrack_proto_generic" */
+		tuple->src.u.all = 0;
+		tuple->dst.u.all = 0;
+		return 1;
+	}
+
+	/* PPTP header is variable length, only need up to the call_id field */
+	pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
+	if (!pgrehdr)
+		return 1;
+
+	if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
+		DEBUGP("GRE_VERSION_PPTP but unknown proto\n");
+		return 0;
+	}
+
+	tuple->dst.u.gre.key = pgrehdr->call_id;
+	srckey = gre_keymap_lookup(tuple);
+	tuple->src.u.gre.key = srckey;
+
+	return 1;
+}
+
+/* print gre part of tuple */
+static int gre_print_tuple(struct seq_file *s,
+			   const struct nf_conntrack_tuple *tuple)
+{
+	return seq_printf(s, "srckey=0x%x dstkey=0x%x ",
+			  ntohs(tuple->src.u.gre.key),
+			  ntohs(tuple->dst.u.gre.key));
+}
+
+/* print private data for conntrack */
+static int gre_print_conntrack(struct seq_file *s,
+			       const struct nf_conn *ct)
+{
+	return seq_printf(s, "timeout=%u, stream_timeout=%u ",
+			  (ct->proto.gre.timeout / HZ),
+			  (ct->proto.gre.stream_timeout / HZ));
+}
+
+/* Returns verdict for packet, and may modify conntrack */
+static int gre_packet(struct nf_conn *ct,
+		      const struct sk_buff *skb,
+		      unsigned int dataoff,
+		      enum ip_conntrack_info ctinfo,
+		      int pf,
+		      unsigned int hooknum)
+{
+	/* If we've seen traffic both ways, this is a GRE connection.
+	 * Extend timeout. */
+	if (ct->status & IPS_SEEN_REPLY) {
+		nf_ct_refresh_acct(ct, ctinfo, skb,
+				   ct->proto.gre.stream_timeout);
+		/* Also, more likely to be important, and not a probe. */
+		set_bit(IPS_ASSURED_BIT, &ct->status);
+		nf_conntrack_event_cache(IPCT_STATUS, skb);
+	} else
+		nf_ct_refresh_acct(ct, ctinfo, skb,
+				   ct->proto.gre.timeout);
+
+	return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int gre_new(struct nf_conn *ct, const struct sk_buff *skb,
+		   unsigned int dataoff)
+{
+	DEBUGP(": ");
+	NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+
+	/* initialize to sane value.  Ideally a conntrack helper
+	 * (e.g. in case of pptp) is increasing them */
+	ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
+	ct->proto.gre.timeout = GRE_TIMEOUT;
+
+	return 1;
+}
+
+/* Called when a conntrack entry has already been removed from the hashes
+ * and is about to be deleted from memory */
+static void gre_destroy(struct nf_conn *ct)
+{
+	struct nf_conn *master = ct->master;
+	DEBUGP(" entering\n");
+
+	if (!master)
+		DEBUGP("no master !?!\n");
+	else
+		nf_ct_gre_keymap_destroy(master);
+}
+
+/* protocol helper struct */
+static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
+	.l3proto	 = AF_INET,
+	.l4proto	 = IPPROTO_GRE,
+	.name		 = "gre",
+	.pkt_to_tuple	 = gre_pkt_to_tuple,
+	.invert_tuple	 = gre_invert_tuple,
+	.print_tuple	 = gre_print_tuple,
+	.print_conntrack = gre_print_conntrack,
+	.packet		 = gre_packet,
+	.new		 = gre_new,
+	.destroy	 = gre_destroy,
+	.me 		 = THIS_MODULE,
+#if defined(CONFIG_NF_CONNTRACK_NETLINK) || \
+    defined(CONFIG_NF_CONNTRACK_NETLINK_MODULE)
+	.tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
+	.nfattr_to_tuple = nf_ct_port_nfattr_to_tuple,
+#endif
+};
+
+static int __init nf_ct_proto_gre_init(void)
+{
+	return nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
+}
+
+static void nf_ct_proto_gre_fini(void)
+{
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
+	nf_ct_gre_keymap_flush();
+}
+
+module_init(nf_ct_proto_gre_init);
+module_exit(nf_ct_proto_gre_fini);
+
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index af56877..76e2636 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -32,7 +32,8 @@
 #include <linux/interrupt.h>
 
 #include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
 
 #if 0
 #define DEBUGP(format, ...) printk(format, ## __VA_ARGS__)
@@ -216,7 +217,7 @@
 for (offset = dataoff + sizeof(sctp_sctphdr_t), count = 0;		\
 	offset < skb->len &&						\
 	(sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch));	\
-	offset += (htons(sch->length) + 3) & ~3, count++)
+	offset += (ntohs(sch->length) + 3) & ~3, count++)
 
 /* Some validity checks to make sure the chunks are fine */
 static int do_basic_checks(struct nf_conn *conntrack,
@@ -508,36 +509,10 @@
 	return 1;
 }
 
-struct nf_conntrack_protocol nf_conntrack_protocol_sctp4 = { 
-	.l3proto	 = PF_INET,
-	.proto 		 = IPPROTO_SCTP, 
-	.name 		 = "sctp",
-	.pkt_to_tuple 	 = sctp_pkt_to_tuple, 
-	.invert_tuple 	 = sctp_invert_tuple, 
-	.print_tuple 	 = sctp_print_tuple, 
-	.print_conntrack = sctp_print_conntrack,
-	.packet 	 = sctp_packet, 
-	.new 		 = sctp_new, 
-	.destroy 	 = NULL, 
-	.me 		 = THIS_MODULE 
-};
-
-struct nf_conntrack_protocol nf_conntrack_protocol_sctp6 = { 
-	.l3proto	 = PF_INET6,
-	.proto 		 = IPPROTO_SCTP, 
-	.name 		 = "sctp",
-	.pkt_to_tuple 	 = sctp_pkt_to_tuple, 
-	.invert_tuple 	 = sctp_invert_tuple, 
-	.print_tuple 	 = sctp_print_tuple, 
-	.print_conntrack = sctp_print_conntrack,
-	.packet 	 = sctp_packet, 
-	.new 		 = sctp_new, 
-	.destroy 	 = NULL, 
-	.me 		 = THIS_MODULE 
-};
-
 #ifdef CONFIG_SYSCTL
-static ctl_table nf_ct_sysctl_table[] = {
+static unsigned int sctp_sysctl_table_users;
+static struct ctl_table_header *sctp_sysctl_header;
+static struct ctl_table sctp_sysctl_table[] = {
 	{
 		.ctl_name	= NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED,
 		.procname	= "nf_conntrack_sctp_timeout_closed",
@@ -594,63 +569,134 @@
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec_jiffies,
 	},
-	{ .ctl_name = 0 }
-};
-
-static ctl_table nf_ct_netfilter_table[] = {
 	{
-		.ctl_name	= NET_NETFILTER,
-		.procname	= "netfilter",
-		.mode		= 0555,
-		.child		= nf_ct_sysctl_table,
-	},
-	{ .ctl_name = 0 }
+		.ctl_name = 0
+	}
 };
 
-static ctl_table nf_ct_net_table[] = {
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+static struct ctl_table sctp_compat_sysctl_table[] = {
 	{
-		.ctl_name	= CTL_NET,
-		.procname	= "net",
-		.mode		= 0555, 
-		.child		= nf_ct_netfilter_table,
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED,
+		.procname	= "ip_conntrack_sctp_timeout_closed",
+		.data		= &nf_ct_sctp_timeout_closed,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
 	},
-	{ .ctl_name = 0 }
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT,
+		.procname	= "ip_conntrack_sctp_timeout_cookie_wait",
+		.data		= &nf_ct_sctp_timeout_cookie_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED,
+		.procname	= "ip_conntrack_sctp_timeout_cookie_echoed",
+		.data		= &nf_ct_sctp_timeout_cookie_echoed,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED,
+		.procname	= "ip_conntrack_sctp_timeout_established",
+		.data		= &nf_ct_sctp_timeout_established,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT,
+		.procname	= "ip_conntrack_sctp_timeout_shutdown_sent",
+		.data		= &nf_ct_sctp_timeout_shutdown_sent,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD,
+		.procname	= "ip_conntrack_sctp_timeout_shutdown_recd",
+		.data		= &nf_ct_sctp_timeout_shutdown_recd,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT,
+		.procname	= "ip_conntrack_sctp_timeout_shutdown_ack_sent",
+		.data		= &nf_ct_sctp_timeout_shutdown_ack_sent,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name = 0
+	}
 };
-
-static struct ctl_table_header *nf_ct_sysctl_header;
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
 #endif
 
+struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
+	.l3proto		= PF_INET,
+	.l4proto 		= IPPROTO_SCTP,
+	.name 			= "sctp",
+	.pkt_to_tuple 		= sctp_pkt_to_tuple,
+	.invert_tuple 		= sctp_invert_tuple,
+	.print_tuple 		= sctp_print_tuple,
+	.print_conntrack	= sctp_print_conntrack,
+	.packet 		= sctp_packet,
+	.new 			= sctp_new,
+	.me 			= THIS_MODULE,
+#ifdef CONFIG_SYSCTL
+	.ctl_table_users	= &sctp_sysctl_table_users,
+	.ctl_table_header	= &sctp_sysctl_header,
+	.ctl_table		= sctp_sysctl_table,
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	.ctl_compat_table	= sctp_compat_sysctl_table,
+#endif
+#endif
+};
+
+struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
+	.l3proto		= PF_INET6,
+	.l4proto 		= IPPROTO_SCTP,
+	.name 			= "sctp",
+	.pkt_to_tuple 		= sctp_pkt_to_tuple,
+	.invert_tuple 		= sctp_invert_tuple,
+	.print_tuple 		= sctp_print_tuple,
+	.print_conntrack	= sctp_print_conntrack,
+	.packet 		= sctp_packet,
+	.new 			= sctp_new,
+	.me 			= THIS_MODULE,
+#ifdef CONFIG_SYSCTL
+	.ctl_table_users	= &sctp_sysctl_table_users,
+	.ctl_table_header	= &sctp_sysctl_header,
+	.ctl_table		= sctp_sysctl_table,
+#endif
+};
+
 int __init nf_conntrack_proto_sctp_init(void)
 {
 	int ret;
 
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp4);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4);
 	if (ret) {
-		printk("nf_conntrack_proto_sctp4: protocol register failed\n");
+		printk("nf_conntrack_l4proto_sctp4: protocol register failed\n");
 		goto out;
 	}
-	ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp6);
+	ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6);
 	if (ret) {
-		printk("nf_conntrack_proto_sctp6: protocol register failed\n");
+		printk("nf_conntrack_l4proto_sctp6: protocol register failed\n");
 		goto cleanup_sctp4;
 	}
 
-#ifdef CONFIG_SYSCTL
-	nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
-	if (nf_ct_sysctl_header == NULL) {
-		printk("nf_conntrack_proto_sctp: can't register to sysctl.\n");
-		goto cleanup;
-	}
-#endif
-
 	return ret;
 
-#ifdef CONFIG_SYSCTL
- cleanup:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6);
-#endif
  cleanup_sctp4:
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
  out:
 	DEBUGP("SCTP conntrack module loading %s\n", 
 					ret ? "failed": "succeeded");
@@ -659,11 +705,8 @@
 
 void __exit nf_conntrack_proto_sctp_fini(void)
 {
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6);
-	nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4);
-#ifdef CONFIG_SYSCTL
- 	unregister_sysctl_table(nf_ct_sysctl_header);
-#endif
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
+	nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
 	DEBUGP("SCTP conntrack module unloaded\n");
 }
 
@@ -673,3 +716,4 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kiran Kumar Immidi");
 MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP");
+MODULE_ALIAS("ip_conntrack_proto_sctp");
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 238bbb5..626b001 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -42,7 +42,8 @@
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv6.h>
 #include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
 
 #if 0
 #define DEBUGP printk
@@ -92,22 +93,22 @@
 #define HOURS * 60 MINS
 #define DAYS * 24 HOURS
 
-unsigned int nf_ct_tcp_timeout_syn_sent __read_mostly =      2 MINS;
-unsigned int nf_ct_tcp_timeout_syn_recv __read_mostly =     60 SECS;
-unsigned int nf_ct_tcp_timeout_established __read_mostly =   5 DAYS;
-unsigned int nf_ct_tcp_timeout_fin_wait __read_mostly =      2 MINS;
-unsigned int nf_ct_tcp_timeout_close_wait __read_mostly =   60 SECS;
-unsigned int nf_ct_tcp_timeout_last_ack __read_mostly =     30 SECS;
-unsigned int nf_ct_tcp_timeout_time_wait __read_mostly =     2 MINS;
-unsigned int nf_ct_tcp_timeout_close __read_mostly =        10 SECS;
+static unsigned int nf_ct_tcp_timeout_syn_sent __read_mostly =      2 MINS;
+static unsigned int nf_ct_tcp_timeout_syn_recv __read_mostly =     60 SECS;
+static unsigned int nf_ct_tcp_timeout_established __read_mostly =   5 DAYS;
+static unsigned int nf_ct_tcp_timeout_fin_wait __read_mostly =      2 MINS;
+static unsigned int nf_ct_tcp_timeout_close_wait __read_mostly =   60 SECS;
+static unsigned int nf_ct_tcp_timeout_last_ack __read_mostly =     30 SECS;
+static unsigned int nf_ct_tcp_timeout_time_wait __read_mostly =     2 MINS;
+static unsigned int nf_ct_tcp_timeout_close __read_mostly =        10 SECS;
 
 /* RFC1122 says the R2 limit should be at least 100 seconds.
    Linux uses 15 packets as limit, which corresponds 
    to ~13-30min depending on RTO. */
-unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly =   5 MINS;
+static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly =   5 MINS;
  
-static unsigned int * tcp_timeouts[]
-= { NULL,                              /* TCP_CONNTRACK_NONE */
+static unsigned int * tcp_timeouts[] = {
+    NULL,                              /* TCP_CONNTRACK_NONE */
     &nf_ct_tcp_timeout_syn_sent,       /* TCP_CONNTRACK_SYN_SENT, */
     &nf_ct_tcp_timeout_syn_recv,       /* TCP_CONNTRACK_SYN_RECV, */
     &nf_ct_tcp_timeout_established,    /* TCP_CONNTRACK_ESTABLISHED, */
@@ -473,8 +474,8 @@
 
 	/* Fast path for timestamp-only option */
 	if (length == TCPOLEN_TSTAMP_ALIGNED*4
-	    && *(__u32 *)ptr ==
-	        __constant_ntohl((TCPOPT_NOP << 24) 
+	    && *(__be32 *)ptr ==
+	        __constant_htonl((TCPOPT_NOP << 24)
 	        		 | (TCPOPT_NOP << 16)
 	        		 | (TCPOPT_TIMESTAMP << 8)
 	        		 | TCPOLEN_TIMESTAMP))
@@ -505,9 +506,7 @@
 			    	for (i = 0;
 			    	     i < (opsize - TCPOLEN_SACK_BASE);
 			    	     i += TCPOLEN_SACK_PERBLOCK) {
-					memcpy(&tmp, (__u32 *)(ptr + i) + 1,
-					       sizeof(__u32));
-					tmp = ntohl(tmp);
+				     	tmp = ntohl(*((__be32 *)(ptr+i)+1));
 
 					if (after(tmp, *sack))
 						*sack = tmp;
@@ -731,7 +730,7 @@
 	return res;
 }
 
-#ifdef CONFIG_IP_NF_NAT_NEEDED
+#ifdef CONFIG_NF_NAT_NEEDED
 /* Update sender->td_end after NAT successfully mangled the packet */
 /* Caller must linearize skb at tcp header. */
 void nf_conntrack_tcp_update(struct sk_buff *skb,
@@ -763,7 +762,7 @@
 		receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
 		receiver->td_scale);
 }
- 
+EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
 #endif
 
 #define	TH_FIN	0x01
@@ -1167,11 +1166,221 @@
 	return 0;
 }
 #endif
-  
-struct nf_conntrack_protocol nf_conntrack_protocol_tcp4 =
+
+#ifdef CONFIG_SYSCTL
+static unsigned int tcp_sysctl_table_users;
+static struct ctl_table_header *tcp_sysctl_header;
+static struct ctl_table tcp_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
+		.procname	= "nf_conntrack_tcp_timeout_syn_sent",
+		.data		= &nf_ct_tcp_timeout_syn_sent,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
+		.procname	= "nf_conntrack_tcp_timeout_syn_recv",
+		.data		= &nf_ct_tcp_timeout_syn_recv,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
+		.procname	= "nf_conntrack_tcp_timeout_established",
+		.data		= &nf_ct_tcp_timeout_established,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
+		.procname	= "nf_conntrack_tcp_timeout_fin_wait",
+		.data		= &nf_ct_tcp_timeout_fin_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
+		.procname	= "nf_conntrack_tcp_timeout_close_wait",
+		.data		= &nf_ct_tcp_timeout_close_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
+		.procname	= "nf_conntrack_tcp_timeout_last_ack",
+		.data		= &nf_ct_tcp_timeout_last_ack,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
+		.procname	= "nf_conntrack_tcp_timeout_time_wait",
+		.data		= &nf_ct_tcp_timeout_time_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
+		.procname	= "nf_conntrack_tcp_timeout_close",
+		.data		= &nf_ct_tcp_timeout_close,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS,
+		.procname	= "nf_conntrack_tcp_timeout_max_retrans",
+		.data		= &nf_ct_tcp_timeout_max_retrans,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_LOOSE,
+		.procname	= "nf_conntrack_tcp_loose",
+		.data		= &nf_ct_tcp_loose,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_BE_LIBERAL,
+		.procname       = "nf_conntrack_tcp_be_liberal",
+		.data           = &nf_ct_tcp_be_liberal,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_TCP_MAX_RETRANS,
+		.procname	= "nf_conntrack_tcp_max_retrans",
+		.data		= &nf_ct_tcp_max_retrans,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+static struct ctl_table tcp_compat_sysctl_table[] = {
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
+		.procname	= "ip_conntrack_tcp_timeout_syn_sent",
+		.data		= &nf_ct_tcp_timeout_syn_sent,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
+		.procname	= "ip_conntrack_tcp_timeout_syn_recv",
+		.data		= &nf_ct_tcp_timeout_syn_recv,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
+		.procname	= "ip_conntrack_tcp_timeout_established",
+		.data		= &nf_ct_tcp_timeout_established,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
+		.procname	= "ip_conntrack_tcp_timeout_fin_wait",
+		.data		= &nf_ct_tcp_timeout_fin_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
+		.procname	= "ip_conntrack_tcp_timeout_close_wait",
+		.data		= &nf_ct_tcp_timeout_close_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
+		.procname	= "ip_conntrack_tcp_timeout_last_ack",
+		.data		= &nf_ct_tcp_timeout_last_ack,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
+		.procname	= "ip_conntrack_tcp_timeout_time_wait",
+		.data		= &nf_ct_tcp_timeout_time_wait,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
+		.procname	= "ip_conntrack_tcp_timeout_close",
+		.data		= &nf_ct_tcp_timeout_close,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS,
+		.procname	= "ip_conntrack_tcp_timeout_max_retrans",
+		.data		= &nf_ct_tcp_timeout_max_retrans,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_LOOSE,
+		.procname	= "ip_conntrack_tcp_loose",
+		.data		= &nf_ct_tcp_loose,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL,
+		.procname	= "ip_conntrack_tcp_be_liberal",
+		.data		= &nf_ct_tcp_be_liberal,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS,
+		.procname	= "ip_conntrack_tcp_max_retrans",
+		.data		= &nf_ct_tcp_max_retrans,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
+#endif /* CONFIG_SYSCTL */
+
+struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
 {
 	.l3proto		= PF_INET,
-	.proto 			= IPPROTO_TCP,
+	.l4proto 		= IPPROTO_TCP,
 	.name 			= "tcp",
 	.pkt_to_tuple 		= tcp_pkt_to_tuple,
 	.invert_tuple 		= tcp_invert_tuple,
@@ -1187,12 +1396,21 @@
 	.tuple_to_nfattr	= nf_ct_port_tuple_to_nfattr,
 	.nfattr_to_tuple	= nf_ct_port_nfattr_to_tuple,
 #endif
+#ifdef CONFIG_SYSCTL
+	.ctl_table_users	= &tcp_sysctl_table_users,
+	.ctl_table_header	= &tcp_sysctl_header,
+	.ctl_table		= tcp_sysctl_table,
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	.ctl_compat_table	= tcp_compat_sysctl_table,
+#endif
+#endif
 };
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
 
-struct nf_conntrack_protocol nf_conntrack_protocol_tcp6 =
+struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
 {
 	.l3proto		= PF_INET6,
-	.proto 			= IPPROTO_TCP,
+	.l4proto 		= IPPROTO_TCP,
 	.name 			= "tcp",
 	.pkt_to_tuple 		= tcp_pkt_to_tuple,
 	.invert_tuple 		= tcp_invert_tuple,
@@ -1208,7 +1426,10 @@
 	.tuple_to_nfattr	= nf_ct_port_tuple_to_nfattr,
 	.nfattr_to_tuple	= nf_ct_port_nfattr_to_tuple,
 #endif
+#ifdef CONFIG_SYSCTL
+	.ctl_table_users	= &tcp_sysctl_table_users,
+	.ctl_table_header	= &tcp_sysctl_header,
+	.ctl_table		= tcp_sysctl_table,
+#endif
 };
-
-EXPORT_SYMBOL(nf_conntrack_protocol_tcp4);
-EXPORT_SYMBOL(nf_conntrack_protocol_tcp6);
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index d28981c..e49cd25 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -22,13 +22,15 @@
 #include <linux/ipv6.h>
 #include <net/ip6_checksum.h>
 #include <net/checksum.h>
+
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv6.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
 
-unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
-unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
+static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
+static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
 
 static int udp_pkt_to_tuple(const struct sk_buff *skb,
 			     unsigned int dataoff,
@@ -146,10 +148,59 @@
 	return NF_ACCEPT;
 }
 
-struct nf_conntrack_protocol nf_conntrack_protocol_udp4 =
+#ifdef CONFIG_SYSCTL
+static unsigned int udp_sysctl_table_users;
+static struct ctl_table_header *udp_sysctl_header;
+static struct ctl_table udp_sysctl_table[] = {
+	{
+		.ctl_name	= NET_NF_CONNTRACK_UDP_TIMEOUT,
+		.procname	= "nf_conntrack_udp_timeout",
+		.data		= &nf_ct_udp_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
+		.procname	= "nf_conntrack_udp_timeout_stream",
+		.data		= &nf_ct_udp_timeout_stream,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+static struct ctl_table udp_compat_sysctl_table[] = {
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT,
+		.procname	= "ip_conntrack_udp_timeout",
+		.data		= &nf_ct_udp_timeout,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
+		.procname	= "ip_conntrack_udp_timeout_stream",
+		.data		= &nf_ct_udp_timeout_stream,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_jiffies,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
+#endif /* CONFIG_SYSCTL */
+
+struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
 {
 	.l3proto		= PF_INET,
-	.proto			= IPPROTO_UDP,
+	.l4proto		= IPPROTO_UDP,
 	.name			= "udp",
 	.pkt_to_tuple		= udp_pkt_to_tuple,
 	.invert_tuple		= udp_invert_tuple,
@@ -163,12 +214,21 @@
 	.tuple_to_nfattr	= nf_ct_port_tuple_to_nfattr,
 	.nfattr_to_tuple	= nf_ct_port_nfattr_to_tuple,
 #endif
+#ifdef CONFIG_SYSCTL
+	.ctl_table_users	= &udp_sysctl_table_users,
+	.ctl_table_header	= &udp_sysctl_header,
+	.ctl_table		= udp_sysctl_table,
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+	.ctl_compat_table	= udp_compat_sysctl_table,
+#endif
+#endif
 };
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
 
-struct nf_conntrack_protocol nf_conntrack_protocol_udp6 =
+struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
 {
 	.l3proto		= PF_INET6,
-	.proto			= IPPROTO_UDP,
+	.l4proto		= IPPROTO_UDP,
 	.name			= "udp",
 	.pkt_to_tuple		= udp_pkt_to_tuple,
 	.invert_tuple		= udp_invert_tuple,
@@ -182,7 +242,10 @@
 	.tuple_to_nfattr	= nf_ct_port_tuple_to_nfattr,
 	.nfattr_to_tuple	= nf_ct_port_nfattr_to_tuple,
 #endif
+#ifdef CONFIG_SYSCTL
+	.ctl_table_users	= &udp_sysctl_table_users,
+	.ctl_table_header	= &udp_sysctl_header,
+	.ctl_table		= udp_sysctl_table,
+#endif
 };
-
-EXPORT_SYMBOL(nf_conntrack_protocol_udp4);
-EXPORT_SYMBOL(nf_conntrack_protocol_udp6);
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
new file mode 100644
index 0000000..eb2a241
--- /dev/null
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -0,0 +1,531 @@
+/* SIP extension for IP connection tracking.
+ *
+ * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
+ * based on RR's ip_conntrack_ftp.c and other modules.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/netfilter.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_sip.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
+MODULE_DESCRIPTION("SIP connection tracking helper");
+MODULE_ALIAS("ip_conntrack_sip");
+
+#define MAX_PORTS	8
+static unsigned short ports[MAX_PORTS];
+static int ports_c;
+module_param_array(ports, ushort, &ports_c, 0400);
+MODULE_PARM_DESC(ports, "port numbers of SIP servers");
+
+static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT;
+module_param(sip_timeout, uint, 0600);
+MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session");
+
+unsigned int (*nf_nat_sip_hook)(struct sk_buff **pskb,
+				enum ip_conntrack_info ctinfo,
+				struct nf_conn *ct,
+				const char **dptr) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
+
+unsigned int (*nf_nat_sdp_hook)(struct sk_buff **pskb,
+				enum ip_conntrack_info ctinfo,
+				struct nf_conntrack_expect *exp,
+				const char *dptr) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_sdp_hook);
+
+static int digits_len(struct nf_conn *, const char *, const char *, int *);
+static int epaddr_len(struct nf_conn *, const char *, const char *, int *);
+static int skp_digits_len(struct nf_conn *, const char *, const char *, int *);
+static int skp_epaddr_len(struct nf_conn *, const char *, const char *, int *);
+
+struct sip_header_nfo {
+	const char	*lname;
+	const char	*sname;
+	const char	*ln_str;
+	size_t		lnlen;
+	size_t		snlen;
+	size_t		ln_strlen;
+	int		case_sensitive;
+	int		(*match_len)(struct nf_conn *, const char *,
+				     const char *, int *);
+};
+
+static const struct sip_header_nfo ct_sip_hdrs[] = {
+	[POS_REG_REQ_URI] = { 	/* SIP REGISTER request URI */
+		.lname		= "sip:",
+		.lnlen		= sizeof("sip:") - 1,
+		.ln_str		= ":",
+		.ln_strlen	= sizeof(":") - 1,
+		.match_len	= epaddr_len,
+	},
+	[POS_REQ_URI] = { 	/* SIP request URI */
+		.lname		= "sip:",
+		.lnlen		= sizeof("sip:") - 1,
+		.ln_str		= "@",
+		.ln_strlen	= sizeof("@") - 1,
+		.match_len	= epaddr_len,
+	},
+	[POS_FROM] = {		/* SIP From header */
+		.lname		= "From:",
+		.lnlen		= sizeof("From:") - 1,
+		.sname		= "\r\nf:",
+		.snlen		= sizeof("\r\nf:") - 1,
+		.ln_str		= "sip:",
+		.ln_strlen	= sizeof("sip:") - 1,
+		.match_len	= skp_epaddr_len,
+	},
+	[POS_TO] = {		/* SIP To header */
+		.lname		= "To:",
+		.lnlen		= sizeof("To:") - 1,
+		.sname		= "\r\nt:",
+		.snlen		= sizeof("\r\nt:") - 1,
+		.ln_str		= "sip:",
+		.ln_strlen	= sizeof("sip:") - 1,
+		.match_len	= skp_epaddr_len
+	},
+	[POS_VIA] = { 		/* SIP Via header */
+		.lname		= "Via:",
+		.lnlen		= sizeof("Via:") - 1,
+		.sname		= "\r\nv:",
+		.snlen		= sizeof("\r\nv:") - 1, /* rfc3261 "\r\n" */
+		.ln_str		= "UDP ",
+		.ln_strlen	= sizeof("UDP ") - 1,
+		.match_len	= epaddr_len,
+	},
+	[POS_CONTACT] = { 	/* SIP Contact header */
+		.lname		= "Contact:",
+		.lnlen		= sizeof("Contact:") - 1,
+		.sname		= "\r\nm:",
+		.snlen		= sizeof("\r\nm:") - 1,
+		.ln_str		= "sip:",
+		.ln_strlen	= sizeof("sip:") - 1,
+		.match_len	= skp_epaddr_len
+	},
+	[POS_CONTENT] = { 	/* SIP Content length header */
+		.lname		= "Content-Length:",
+		.lnlen		= sizeof("Content-Length:") - 1,
+		.sname		= "\r\nl:",
+		.snlen		= sizeof("\r\nl:") - 1,
+		.ln_str		= ":",
+		.ln_strlen	= sizeof(":") - 1,
+		.match_len	= skp_digits_len
+	},
+	[POS_MEDIA] = {		/* SDP media info */
+		.case_sensitive	= 1,
+		.lname		= "\nm=",
+		.lnlen		= sizeof("\nm=") - 1,
+		.sname		= "\rm=",
+		.snlen		= sizeof("\rm=") - 1,
+		.ln_str		= "audio ",
+		.ln_strlen	= sizeof("audio ") - 1,
+		.match_len	= digits_len
+	},
+	[POS_OWNER_IP4] = {	/* SDP owner address*/
+		.case_sensitive	= 1,
+		.lname		= "\no=",
+		.lnlen		= sizeof("\no=") - 1,
+		.sname		= "\ro=",
+		.snlen		= sizeof("\ro=") - 1,
+		.ln_str		= "IN IP4 ",
+		.ln_strlen	= sizeof("IN IP4 ") - 1,
+		.match_len	= epaddr_len
+	},
+	[POS_CONNECTION_IP4] = {/* SDP connection info */
+		.case_sensitive	= 1,
+		.lname		= "\nc=",
+		.lnlen		= sizeof("\nc=") - 1,
+		.sname		= "\rc=",
+		.snlen		= sizeof("\rc=") - 1,
+		.ln_str		= "IN IP4 ",
+		.ln_strlen	= sizeof("IN IP4 ") - 1,
+		.match_len	= epaddr_len
+	},
+	[POS_OWNER_IP6] = {	/* SDP owner address*/
+		.case_sensitive	= 1,
+		.lname		= "\no=",
+		.lnlen		= sizeof("\no=") - 1,
+		.sname		= "\ro=",
+		.snlen		= sizeof("\ro=") - 1,
+		.ln_str		= "IN IP6 ",
+		.ln_strlen	= sizeof("IN IP6 ") - 1,
+		.match_len	= epaddr_len
+	},
+	[POS_CONNECTION_IP6] = {/* SDP connection info */
+		.case_sensitive	= 1,
+		.lname		= "\nc=",
+		.lnlen		= sizeof("\nc=") - 1,
+		.sname		= "\rc=",
+		.snlen		= sizeof("\rc=") - 1,
+		.ln_str		= "IN IP6 ",
+		.ln_strlen	= sizeof("IN IP6 ") - 1,
+		.match_len	= epaddr_len
+	},
+	[POS_SDP_HEADER] = { 	/* SDP version header */
+		.case_sensitive	= 1,
+		.lname		= "\nv=",
+		.lnlen		= sizeof("\nv=") - 1,
+		.sname		= "\rv=",
+		.snlen		= sizeof("\rv=") - 1,
+		.ln_str		= "=",
+		.ln_strlen	= sizeof("=") - 1,
+		.match_len	= digits_len
+	}
+};
+
+/* get line lenght until first CR or LF seen. */
+int ct_sip_lnlen(const char *line, const char *limit)
+{
+	const char *k = line;
+
+	while ((line <= limit) && (*line == '\r' || *line == '\n'))
+		line++;
+
+	while (line <= limit) {
+		if (*line == '\r' || *line == '\n')
+			break;
+		line++;
+	}
+	return line - k;
+}
+EXPORT_SYMBOL_GPL(ct_sip_lnlen);
+
+/* Linear string search, case sensitive. */
+const char *ct_sip_search(const char *needle, const char *haystack,
+			  size_t needle_len, size_t haystack_len,
+			  int case_sensitive)
+{
+	const char *limit = haystack + (haystack_len - needle_len);
+
+	while (haystack <= limit) {
+		if (case_sensitive) {
+			if (strncmp(haystack, needle, needle_len) == 0)
+				return haystack;
+		} else {
+			if (strnicmp(haystack, needle, needle_len) == 0)
+				return haystack;
+		}
+		haystack++;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(ct_sip_search);
+
+static int digits_len(struct nf_conn *ct, const char *dptr,
+		      const char *limit, int *shift)
+{
+	int len = 0;
+	while (dptr <= limit && isdigit(*dptr)) {
+		dptr++;
+		len++;
+	}
+	return len;
+}
+
+/* get digits lenght, skiping blank spaces. */
+static int skp_digits_len(struct nf_conn *ct, const char *dptr,
+			  const char *limit, int *shift)
+{
+	for (; dptr <= limit && *dptr == ' '; dptr++)
+		(*shift)++;
+
+	return digits_len(ct, dptr, limit, shift);
+}
+
+static int parse_addr(struct nf_conn *ct, const char *cp, const char **endp,
+		      union nf_conntrack_address *addr, const char *limit)
+{
+	const char *end;
+	int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	int ret = 0;
+
+	switch (family) {
+	case AF_INET:
+		ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
+		break;
+	case AF_INET6:
+		ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
+		break;
+	default:
+		BUG();
+	}
+
+	if (ret == 0 || end == cp)
+		return 0;
+	if (endp)
+		*endp = end;
+	return 1;
+}
+
+/* skip ip address. returns its length. */
+static int epaddr_len(struct nf_conn *ct, const char *dptr,
+		      const char *limit, int *shift)
+{
+	union nf_conntrack_address addr;
+	const char *aux = dptr;
+
+	if (!parse_addr(ct, dptr, &dptr, &addr, limit)) {
+		DEBUGP("ip: %s parse failed.!\n", dptr);
+		return 0;
+	}
+
+	/* Port number */
+	if (*dptr == ':') {
+		dptr++;
+		dptr += digits_len(ct, dptr, limit, shift);
+	}
+	return dptr - aux;
+}
+
+/* get address length, skiping user info. */
+static int skp_epaddr_len(struct nf_conn *ct, const char *dptr,
+			  const char *limit, int *shift)
+{
+	int s = *shift;
+
+	for (; dptr <= limit && *dptr != '@'; dptr++)
+		(*shift)++;
+
+	if (*dptr == '@') {
+		dptr++;
+		(*shift)++;
+	} else
+		*shift = s;
+
+	return epaddr_len(ct, dptr, limit, shift);
+}
+
+/* Returns 0 if not found, -1 error parsing. */
+int ct_sip_get_info(struct nf_conn *ct,
+		    const char *dptr, size_t dlen,
+		    unsigned int *matchoff,
+		    unsigned int *matchlen,
+		    enum sip_header_pos pos)
+{
+	const struct sip_header_nfo *hnfo = &ct_sip_hdrs[pos];
+	const char *limit, *aux, *k = dptr;
+	int shift = 0;
+
+	limit = dptr + (dlen - hnfo->lnlen);
+
+	while (dptr <= limit) {
+		if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) &&
+		    (strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) {
+			dptr++;
+			continue;
+		}
+		aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen,
+		                    ct_sip_lnlen(dptr, limit),
+				    hnfo->case_sensitive);
+		if (!aux) {
+			DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str,
+			       hnfo->lname);
+			return -1;
+		}
+		aux += hnfo->ln_strlen;
+
+		*matchlen = hnfo->match_len(ct, aux, limit, &shift);
+		if (!*matchlen)
+			return -1;
+
+		*matchoff = (aux - k) + shift;
+
+		DEBUGP("%s match succeeded! - len: %u\n", hnfo->lname,
+		       *matchlen);
+		return 1;
+	}
+	DEBUGP("%s header not found.\n", hnfo->lname);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ct_sip_get_info);
+
+static int set_expected_rtp(struct sk_buff **pskb,
+			    struct nf_conn *ct,
+			    enum ip_conntrack_info ctinfo,
+			    union nf_conntrack_address *addr,
+			    __be16 port,
+			    const char *dptr)
+{
+	struct nf_conntrack_expect *exp;
+	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+	int family = ct->tuplehash[!dir].tuple.src.l3num;
+	int ret;
+	typeof(nf_nat_sdp_hook) nf_nat_sdp;
+
+	exp = nf_conntrack_expect_alloc(ct);
+	if (exp == NULL)
+		return NF_DROP;
+	nf_conntrack_expect_init(exp, family,
+				 &ct->tuplehash[!dir].tuple.src.u3, addr,
+				 IPPROTO_UDP, NULL, &port);
+
+	nf_nat_sdp = rcu_dereference(nf_nat_sdp_hook);
+	if (nf_nat_sdp && ct->status & IPS_NAT_MASK)
+		ret = nf_nat_sdp(pskb, ctinfo, exp, dptr);
+	else {
+		if (nf_conntrack_expect_related(exp) != 0)
+			ret = NF_DROP;
+		else
+			ret = NF_ACCEPT;
+	}
+	nf_conntrack_expect_put(exp);
+
+	return ret;
+}
+
+static int sip_help(struct sk_buff **pskb,
+		    unsigned int protoff,
+		    struct nf_conn *ct,
+		    enum ip_conntrack_info ctinfo)
+{
+	int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	union nf_conntrack_address addr;
+	unsigned int dataoff, datalen;
+	const char *dptr;
+	int ret = NF_ACCEPT;
+	int matchoff, matchlen;
+	u_int16_t port;
+	enum sip_header_pos pos;
+	typeof(nf_nat_sip_hook) nf_nat_sip;
+
+	/* No Data ? */
+	dataoff = protoff + sizeof(struct udphdr);
+	if (dataoff >= (*pskb)->len)
+		return NF_ACCEPT;
+
+	nf_ct_refresh(ct, *pskb, sip_timeout * HZ);
+
+	if (!skb_is_nonlinear(*pskb))
+		dptr = (*pskb)->data + dataoff;
+	else {
+		DEBUGP("Copy of skbuff not supported yet.\n");
+		goto out;
+	}
+
+	nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
+	if (nf_nat_sip && ct->status & IPS_NAT_MASK) {
+		if (!nf_nat_sip(pskb, ctinfo, ct, &dptr)) {
+			ret = NF_DROP;
+			goto out;
+		}
+	}
+
+	datalen = (*pskb)->len - dataoff;
+	if (datalen < sizeof("SIP/2.0 200") - 1)
+		goto out;
+
+	/* RTP info only in some SDP pkts */
+	if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 &&
+	    memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) {
+		goto out;
+	}
+	/* Get address and port from SDP packet. */
+	pos = family == AF_INET ? POS_CONNECTION_IP4 : POS_CONNECTION_IP6;
+	if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen, pos) > 0) {
+
+		/* We'll drop only if there are parse problems. */
+		if (!parse_addr(ct, dptr + matchoff, NULL, &addr,
+			        dptr + datalen)) {
+			ret = NF_DROP;
+			goto out;
+		}
+		if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen,
+		                    POS_MEDIA) > 0) {
+
+			port = simple_strtoul(dptr + matchoff, NULL, 10);
+			if (port < 1024) {
+				ret = NF_DROP;
+				goto out;
+			}
+			ret = set_expected_rtp(pskb, ct, ctinfo, &addr,
+					       htons(port), dptr);
+		}
+	}
+out:
+	return ret;
+}
+
+static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly;
+static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly;
+
+static void nf_conntrack_sip_fini(void)
+{
+	int i, j;
+
+	for (i = 0; i < ports_c; i++) {
+		for (j = 0; j < 2; j++) {
+			if (sip[i][j].me == NULL)
+				continue;
+			nf_conntrack_helper_unregister(&sip[i][j]);
+		}
+	}
+}
+
+static int __init nf_conntrack_sip_init(void)
+{
+	int i, j, ret;
+	char *tmpname;
+
+	if (ports_c == 0)
+		ports[ports_c++] = SIP_PORT;
+
+	for (i = 0; i < ports_c; i++) {
+		memset(&sip[i], 0, sizeof(sip[i]));
+
+		sip[i][0].tuple.src.l3num = AF_INET;
+		sip[i][1].tuple.src.l3num = AF_INET6;
+		for (j = 0; j < 2; j++) {
+			sip[i][j].tuple.dst.protonum = IPPROTO_UDP;
+			sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
+			sip[i][j].mask.src.l3num = 0xFFFF;
+			sip[i][j].mask.src.u.udp.port = htons(0xFFFF);
+			sip[i][j].mask.dst.protonum = 0xFF;
+			sip[i][j].max_expected = 2;
+			sip[i][j].timeout = 3 * 60; /* 3 minutes */
+			sip[i][j].me = THIS_MODULE;
+			sip[i][j].help = sip_help;
+
+			tmpname = &sip_names[i][j][0];
+			if (ports[i] == SIP_PORT)
+				sprintf(tmpname, "sip");
+			else
+				sprintf(tmpname, "sip-%u", i);
+			sip[i][j].name = tmpname;
+
+			DEBUGP("port #%u: %u\n", i, ports[i]);
+
+			ret = nf_conntrack_helper_register(&sip[i][j]);
+			if (ret) {
+				printk("nf_ct_sip: failed to register helper "
+				       "for pf: %u port: %u\n",
+				       sip[i][j].tuple.src.l3num, ports[i]);
+				nf_conntrack_sip_fini();
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+module_init(nf_conntrack_sip_init);
+module_exit(nf_conntrack_sip_fini);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5954f67..f1cb60f 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -29,13 +29,11 @@
 #include <linux/sysctl.h>
 #endif
 
-#define ASSERT_READ_LOCK(x)
-#define ASSERT_WRITE_LOCK(x)
-
 #include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_protocol.h>
 #include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 
 #if 0
@@ -46,33 +44,15 @@
 
 MODULE_LICENSE("GPL");
 
-extern atomic_t nf_conntrack_count;
-DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
-
-static int kill_l3proto(struct nf_conn *i, void *data)
-{
-	return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == 
-			((struct nf_conntrack_l3proto *)data)->l3proto);
-}
-
-static int kill_proto(struct nf_conn *i, void *data)
-{
-	struct nf_conntrack_protocol *proto;
-	proto = (struct nf_conntrack_protocol *)data;
-	return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == 
-			proto->proto) &&
-	       (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
-			proto->l3proto);
-}
-
 #ifdef CONFIG_PROC_FS
-static int
+int
 print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
 	    struct nf_conntrack_l3proto *l3proto,
-	    struct nf_conntrack_protocol *proto)
+	    struct nf_conntrack_l4proto *l4proto)
 {
-	return l3proto->print_tuple(s, tuple) || proto->print_tuple(s, tuple);
+	return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple);
 }
+EXPORT_SYMBOL_GPL(print_tuple);
 
 #ifdef CONFIG_NF_CT_ACCT
 static unsigned int
@@ -150,9 +130,8 @@
 	const struct nf_conntrack_tuple_hash *hash = v;
 	const struct nf_conn *conntrack = nf_ct_tuplehash_to_ctrack(hash);
 	struct nf_conntrack_l3proto *l3proto;
-	struct nf_conntrack_protocol *proto;
+	struct nf_conntrack_l4proto *l4proto;
 
-	ASSERT_READ_LOCK(&nf_conntrack_lock);
 	NF_CT_ASSERT(conntrack);
 
 	/* we only want to print DIR_ORIGINAL */
@@ -163,16 +142,16 @@
 				       .tuple.src.l3num);
 
 	NF_CT_ASSERT(l3proto);
-	proto = __nf_ct_proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
+	l4proto = __nf_ct_l4proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
 				   .tuple.src.l3num,
 				   conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
 				   .tuple.dst.protonum);
-	NF_CT_ASSERT(proto);
+	NF_CT_ASSERT(l4proto);
 
 	if (seq_printf(s, "%-8s %u %-8s %u %ld ",
 		       l3proto->name,
 		       conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num,
-		       proto->name,
+		       l4proto->name,
 		       conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
 		       timer_pending(&conntrack->timeout)
 		       ? (long)(conntrack->timeout.expires - jiffies)/HZ : 0) != 0)
@@ -181,11 +160,11 @@
 	if (l3proto->print_conntrack(s, conntrack))
 		return -ENOSPC;
 
-	if (proto->print_conntrack(s, conntrack))
+	if (l4proto->print_conntrack(s, conntrack))
 		return -ENOSPC;
 
 	if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-			l3proto, proto))
+			l3proto, l4proto))
 		return -ENOSPC;
 
 	if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
@@ -196,7 +175,7 @@
 			return -ENOSPC;
 
 	if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
-			l3proto, proto))
+			l3proto, l4proto))
 		return -ENOSPC;
 
 	if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
@@ -258,84 +237,6 @@
 	.release = seq_release_private,
 };
 
-/* expects */
-static void *exp_seq_start(struct seq_file *s, loff_t *pos)
-{
-	struct list_head *e = &nf_conntrack_expect_list;
-	loff_t i;
-
-	/* strange seq_file api calls stop even if we fail,
-	 * thus we need to grab lock since stop unlocks */
-	read_lock_bh(&nf_conntrack_lock);
-
-	if (list_empty(e))
-		return NULL;
-
-	for (i = 0; i <= *pos; i++) {
-		e = e->next;
-		if (e == &nf_conntrack_expect_list)
-			return NULL;
-	}
-	return e;
-}
-
-static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
-	struct list_head *e = v;
-
-	++*pos;
-	e = e->next;
-
-	if (e == &nf_conntrack_expect_list)
-		return NULL;
-
-	return e;
-}
-
-static void exp_seq_stop(struct seq_file *s, void *v)
-{
-	read_unlock_bh(&nf_conntrack_lock);
-}
-
-static int exp_seq_show(struct seq_file *s, void *v)
-{
-	struct nf_conntrack_expect *expect = v;
-
-	if (expect->timeout.function)
-		seq_printf(s, "%ld ", timer_pending(&expect->timeout)
-			   ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
-	else
-		seq_printf(s, "- ");
-	seq_printf(s, "l3proto = %u proto=%u ",
-		   expect->tuple.src.l3num,
-		   expect->tuple.dst.protonum);
-	print_tuple(s, &expect->tuple,
-		    __nf_ct_l3proto_find(expect->tuple.src.l3num),
-		    __nf_ct_proto_find(expect->tuple.src.l3num,
-				       expect->tuple.dst.protonum));
-	return seq_putc(s, '\n');
-}
-
-static struct seq_operations exp_seq_ops = {
-	.start = exp_seq_start,
-	.next = exp_seq_next,
-	.stop = exp_seq_stop,
-	.show = exp_seq_show
-};
-
-static int exp_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &exp_seq_ops);
-}
-
-static struct file_operations exp_file_ops = {
-	.owner   = THIS_MODULE,
-	.open    = exp_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = seq_release
-};
-
 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	int cpu;
@@ -428,34 +329,9 @@
 /* Sysctl support */
 
 int nf_conntrack_checksum __read_mostly = 1;
+EXPORT_SYMBOL_GPL(nf_conntrack_checksum);
 
 #ifdef CONFIG_SYSCTL
-
-/* From nf_conntrack_core.c */
-extern int nf_conntrack_max;
-extern unsigned int nf_conntrack_htable_size;
-
-/* From nf_conntrack_proto_tcp.c */
-extern unsigned int nf_ct_tcp_timeout_syn_sent;
-extern unsigned int nf_ct_tcp_timeout_syn_recv;
-extern unsigned int nf_ct_tcp_timeout_established;
-extern unsigned int nf_ct_tcp_timeout_fin_wait;
-extern unsigned int nf_ct_tcp_timeout_close_wait;
-extern unsigned int nf_ct_tcp_timeout_last_ack;
-extern unsigned int nf_ct_tcp_timeout_time_wait;
-extern unsigned int nf_ct_tcp_timeout_close;
-extern unsigned int nf_ct_tcp_timeout_max_retrans;
-extern int nf_ct_tcp_loose;
-extern int nf_ct_tcp_be_liberal;
-extern int nf_ct_tcp_max_retrans;
-
-/* From nf_conntrack_proto_udp.c */
-extern unsigned int nf_ct_udp_timeout;
-extern unsigned int nf_ct_udp_timeout_stream;
-
-/* From nf_conntrack_proto_generic.c */
-extern unsigned int nf_ct_generic_timeout;
-
 /* Log invalid packets of a given protocol */
 static int log_invalid_proto_min = 0;
 static int log_invalid_proto_max = 255;
@@ -496,94 +372,6 @@
 		.proc_handler	= &proc_dointvec,
 	},
 	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
-		.procname	= "nf_conntrack_tcp_timeout_syn_sent",
-		.data		= &nf_ct_tcp_timeout_syn_sent,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
-		.procname	= "nf_conntrack_tcp_timeout_syn_recv",
-		.data		= &nf_ct_tcp_timeout_syn_recv,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
-		.procname	= "nf_conntrack_tcp_timeout_established",
-		.data		= &nf_ct_tcp_timeout_established,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
-		.procname	= "nf_conntrack_tcp_timeout_fin_wait",
-		.data		= &nf_ct_tcp_timeout_fin_wait,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
-		.procname	= "nf_conntrack_tcp_timeout_close_wait",
-		.data		= &nf_ct_tcp_timeout_close_wait,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
-		.procname	= "nf_conntrack_tcp_timeout_last_ack",
-		.data		= &nf_ct_tcp_timeout_last_ack,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
-		.procname	= "nf_conntrack_tcp_timeout_time_wait",
-		.data		= &nf_ct_tcp_timeout_time_wait,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
-		.procname	= "nf_conntrack_tcp_timeout_close",
-		.data		= &nf_ct_tcp_timeout_close,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_UDP_TIMEOUT,
-		.procname	= "nf_conntrack_udp_timeout",
-		.data		= &nf_ct_udp_timeout,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
-		.procname	= "nf_conntrack_udp_timeout_stream",
-		.data		= &nf_ct_udp_timeout_stream,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_GENERIC_TIMEOUT,
-		.procname	= "nf_conntrack_generic_timeout",
-		.data		= &nf_ct_generic_timeout,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
 		.ctl_name	= NET_NF_CONNTRACK_LOG_INVALID,
 		.procname	= "nf_conntrack_log_invalid",
 		.data		= &nf_ct_log_invalid,
@@ -594,38 +382,6 @@
 		.extra1		= &log_invalid_proto_min,
 		.extra2		= &log_invalid_proto_max,
 	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS,
-		.procname	= "nf_conntrack_tcp_timeout_max_retrans",
-		.data		= &nf_ct_tcp_timeout_max_retrans,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_jiffies,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_LOOSE,
-		.procname	= "nf_conntrack_tcp_loose",
-		.data		= &nf_ct_tcp_loose,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_BE_LIBERAL,
-		.procname       = "nf_conntrack_tcp_be_liberal",
-		.data           = &nf_ct_tcp_be_liberal,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = &proc_dointvec,
-	},
-	{
-		.ctl_name	= NET_NF_CONNTRACK_TCP_MAX_RETRANS,
-		.procname	= "nf_conntrack_tcp_max_retrans",
-		.data		= &nf_ct_tcp_max_retrans,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
 
 	{ .ctl_name = 0 }
 };
@@ -659,109 +415,9 @@
 	},
 	{ .ctl_name = 0 }
 };
-EXPORT_SYMBOL(nf_ct_log_invalid);
+EXPORT_SYMBOL_GPL(nf_ct_log_invalid);
 #endif /* CONFIG_SYSCTL */
 
-int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
-{
-	int ret = 0;
-
-	write_lock_bh(&nf_conntrack_lock);
-	if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_generic_l3proto) {
-		ret = -EBUSY;
-		goto out;
-	}
-	nf_ct_l3protos[proto->l3proto] = proto;
-out:
-	write_unlock_bh(&nf_conntrack_lock);
-
-	return ret;
-}
-
-void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
-{
-	write_lock_bh(&nf_conntrack_lock);
-	nf_ct_l3protos[proto->l3proto] = &nf_conntrack_generic_l3proto;
-	write_unlock_bh(&nf_conntrack_lock);
-	
-	/* Somebody could be still looking at the proto in bh. */
-	synchronize_net();
-
-	/* Remove all contrack entries for this protocol */
-	nf_ct_iterate_cleanup(kill_l3proto, proto);
-}
-
-/* FIXME: Allow NULL functions and sub in pointers to generic for
-   them. --RR */
-int nf_conntrack_protocol_register(struct nf_conntrack_protocol *proto)
-{
-	int ret = 0;
-
-retry:
-	write_lock_bh(&nf_conntrack_lock);
-	if (nf_ct_protos[proto->l3proto]) {
-		if (nf_ct_protos[proto->l3proto][proto->proto]
-				!= &nf_conntrack_generic_protocol) {
-			ret = -EBUSY;
-			goto out_unlock;
-		}
-	} else {
-		/* l3proto may be loaded latter. */
-		struct nf_conntrack_protocol **proto_array;
-		int i;
-
-		write_unlock_bh(&nf_conntrack_lock);
-
-		proto_array = (struct nf_conntrack_protocol **)
-				kmalloc(MAX_NF_CT_PROTO *
-					 sizeof(struct nf_conntrack_protocol *),
-					GFP_KERNEL);
-		if (proto_array == NULL) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		for (i = 0; i < MAX_NF_CT_PROTO; i++)
-			proto_array[i] = &nf_conntrack_generic_protocol;
-
-		write_lock_bh(&nf_conntrack_lock);
-		if (nf_ct_protos[proto->l3proto]) {
-			/* bad timing, but no problem */
-			write_unlock_bh(&nf_conntrack_lock);
-			kfree(proto_array);
-		} else {
-			nf_ct_protos[proto->l3proto] = proto_array;
-			write_unlock_bh(&nf_conntrack_lock);
-		}
-
-		/*
-		 * Just once because array is never freed until unloading
-		 * nf_conntrack.ko
-		 */
-		goto retry;
-	}
-
-	nf_ct_protos[proto->l3proto][proto->proto] = proto;
-
-out_unlock:
-	write_unlock_bh(&nf_conntrack_lock);
-out:
-	return ret;
-}
-
-void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto)
-{
-	write_lock_bh(&nf_conntrack_lock);
-	nf_ct_protos[proto->l3proto][proto->proto]
-		= &nf_conntrack_generic_protocol;
-	write_unlock_bh(&nf_conntrack_lock);
-	
-	/* Somebody could be still looking at the proto in bh. */
-	synchronize_net();
-
-	/* Remove all contrack entries for this protocol */
-	nf_ct_iterate_cleanup(kill_proto, proto);
-}
-
 static int __init nf_conntrack_standalone_init(void)
 {
 #ifdef CONFIG_PROC_FS
@@ -834,70 +490,4 @@
 void need_conntrack(void)
 {
 }
-
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-EXPORT_SYMBOL_GPL(nf_conntrack_chain);
-EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain);
-EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
-EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
-EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
-EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
-EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
-#endif
-EXPORT_SYMBOL(nf_ct_l3proto_try_module_get);
-EXPORT_SYMBOL(nf_ct_l3proto_module_put);
-EXPORT_SYMBOL(nf_conntrack_l3proto_register);
-EXPORT_SYMBOL(nf_conntrack_l3proto_unregister);
-EXPORT_SYMBOL(nf_conntrack_protocol_register);
-EXPORT_SYMBOL(nf_conntrack_protocol_unregister);
-EXPORT_SYMBOL(nf_ct_invert_tuplepr);
-EXPORT_SYMBOL(nf_conntrack_destroyed);
-EXPORT_SYMBOL(need_conntrack);
-EXPORT_SYMBOL(nf_conntrack_helper_register);
-EXPORT_SYMBOL(nf_conntrack_helper_unregister);
-EXPORT_SYMBOL(nf_ct_iterate_cleanup);
-EXPORT_SYMBOL(__nf_ct_refresh_acct);
-EXPORT_SYMBOL(nf_ct_protos);
-EXPORT_SYMBOL(__nf_ct_proto_find);
-EXPORT_SYMBOL(nf_ct_proto_find_get);
-EXPORT_SYMBOL(nf_ct_proto_put);
-EXPORT_SYMBOL(nf_ct_l3proto_find_get);
-EXPORT_SYMBOL(nf_ct_l3proto_put);
-EXPORT_SYMBOL(nf_ct_l3protos);
-EXPORT_SYMBOL_GPL(nf_conntrack_checksum);
-EXPORT_SYMBOL(nf_conntrack_expect_alloc);
-EXPORT_SYMBOL(nf_conntrack_expect_put);
-EXPORT_SYMBOL(nf_conntrack_expect_related);
-EXPORT_SYMBOL(nf_conntrack_unexpect_related);
-EXPORT_SYMBOL(nf_conntrack_tuple_taken);
-EXPORT_SYMBOL(nf_conntrack_htable_size);
-EXPORT_SYMBOL(nf_conntrack_lock);
-EXPORT_SYMBOL(nf_conntrack_hash);
-EXPORT_SYMBOL(nf_conntrack_untracked);
-EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-EXPORT_SYMBOL(nf_conntrack_tcp_update);
-#endif
-EXPORT_SYMBOL(__nf_conntrack_confirm);
-EXPORT_SYMBOL(nf_ct_get_tuple);
-EXPORT_SYMBOL(nf_ct_invert_tuple);
-EXPORT_SYMBOL(nf_conntrack_in);
-EXPORT_SYMBOL(__nf_conntrack_attach);
-EXPORT_SYMBOL(nf_conntrack_alloc);
-EXPORT_SYMBOL(nf_conntrack_free);
-EXPORT_SYMBOL(nf_conntrack_flush);
-EXPORT_SYMBOL(nf_ct_remove_expectations);
-EXPORT_SYMBOL(nf_ct_helper_find_get);
-EXPORT_SYMBOL(nf_ct_helper_put);
-EXPORT_SYMBOL(__nf_conntrack_helper_find_byname);
-EXPORT_SYMBOL(__nf_conntrack_find);
-EXPORT_SYMBOL(nf_ct_unlink_expect);
-EXPORT_SYMBOL(nf_conntrack_hash_insert);
-EXPORT_SYMBOL(__nf_conntrack_expect_find);
-EXPORT_SYMBOL(nf_conntrack_expect_find);
-EXPORT_SYMBOL(nf_conntrack_expect_list);
-#if defined(CONFIG_NF_CT_NETLINK) || \
-    defined(CONFIG_NF_CT_NETLINK_MODULE)
-EXPORT_SYMBOL(nf_ct_port_tuple_to_nfattr);
-EXPORT_SYMBOL(nf_ct_port_nfattr_to_tuple);
-#endif
+EXPORT_SYMBOL_GPL(need_conntrack);
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
new file mode 100644
index 0000000..f5bffe2
--- /dev/null
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -0,0 +1,160 @@
+/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/netfilter.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_tftp.h>
+
+MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
+MODULE_DESCRIPTION("TFTP connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_conntrack_tftp");
+
+#define MAX_PORTS 8
+static unsigned short ports[MAX_PORTS];
+static int ports_c;
+module_param_array(ports, ushort, &ports_c, 0400);
+MODULE_PARM_DESC(ports, "Port numbers of TFTP servers");
+
+#if 0
+#define DEBUGP(format, args...) printk("%s:%s:" format, \
+                                       __FILE__, __FUNCTION__ , ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+unsigned int (*nf_nat_tftp_hook)(struct sk_buff **pskb,
+				 enum ip_conntrack_info ctinfo,
+				 struct nf_conntrack_expect *exp) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_tftp_hook);
+
+static int tftp_help(struct sk_buff **pskb,
+		     unsigned int protoff,
+		     struct nf_conn *ct,
+		     enum ip_conntrack_info ctinfo)
+{
+	struct tftphdr _tftph, *tfh;
+	struct nf_conntrack_expect *exp;
+	struct nf_conntrack_tuple *tuple;
+	unsigned int ret = NF_ACCEPT;
+	int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	typeof(nf_nat_tftp_hook) nf_nat_tftp;
+
+	tfh = skb_header_pointer(*pskb, protoff + sizeof(struct udphdr),
+				 sizeof(_tftph), &_tftph);
+	if (tfh == NULL)
+		return NF_ACCEPT;
+
+	switch (ntohs(tfh->opcode)) {
+	case TFTP_OPCODE_READ:
+	case TFTP_OPCODE_WRITE:
+		/* RRQ and WRQ works the same way */
+		DEBUGP("");
+		NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+		NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+		exp = nf_conntrack_expect_alloc(ct);
+		if (exp == NULL)
+			return NF_DROP;
+		tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+		nf_conntrack_expect_init(exp, family,
+					 &tuple->src.u3, &tuple->dst.u3,
+					 IPPROTO_UDP,
+					 NULL, &tuple->dst.u.udp.port);
+
+		DEBUGP("expect: ");
+		NF_CT_DUMP_TUPLE(&exp->tuple);
+		NF_CT_DUMP_TUPLE(&exp->mask);
+
+		nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
+		if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
+			ret = nf_nat_tftp(pskb, ctinfo, exp);
+		else if (nf_conntrack_expect_related(exp) != 0)
+			ret = NF_DROP;
+		nf_conntrack_expect_put(exp);
+		break;
+	case TFTP_OPCODE_DATA:
+	case TFTP_OPCODE_ACK:
+		DEBUGP("Data/ACK opcode\n");
+		break;
+	case TFTP_OPCODE_ERROR:
+		DEBUGP("Error opcode\n");
+		break;
+	default:
+		DEBUGP("Unknown opcode\n");
+	}
+	return ret;
+}
+
+static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly;
+static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly;
+
+static void nf_conntrack_tftp_fini(void)
+{
+	int i, j;
+
+	for (i = 0; i < ports_c; i++) {
+		for (j = 0; j < 2; j++)
+			nf_conntrack_helper_unregister(&tftp[i][j]);
+	}
+}
+
+static int __init nf_conntrack_tftp_init(void)
+{
+	int i, j, ret;
+	char *tmpname;
+
+	if (ports_c == 0)
+		ports[ports_c++] = TFTP_PORT;
+
+	for (i = 0; i < ports_c; i++) {
+		memset(&tftp[i], 0, sizeof(tftp[i]));
+
+		tftp[i][0].tuple.src.l3num = AF_INET;
+		tftp[i][1].tuple.src.l3num = AF_INET6;
+		for (j = 0; j < 2; j++) {
+			tftp[i][j].tuple.dst.protonum = IPPROTO_UDP;
+			tftp[i][j].tuple.src.u.udp.port = htons(ports[i]);
+			tftp[i][j].mask.src.l3num = 0xFFFF;
+			tftp[i][j].mask.dst.protonum = 0xFF;
+			tftp[i][j].mask.src.u.udp.port = htons(0xFFFF);
+			tftp[i][j].max_expected = 1;
+			tftp[i][j].timeout = 5 * 60; /* 5 minutes */
+			tftp[i][j].me = THIS_MODULE;
+			tftp[i][j].help = tftp_help;
+
+			tmpname = &tftp_names[i][j][0];
+			if (ports[i] == TFTP_PORT)
+				sprintf(tmpname, "tftp");
+			else
+				sprintf(tmpname, "tftp-%u", i);
+			tftp[i][j].name = tmpname;
+
+			ret = nf_conntrack_helper_register(&tftp[i][j]);
+			if (ret) {
+				printk("nf_ct_tftp: failed to register helper "
+				       "for pf: %u port: %u\n",
+					tftp[i][j].tuple.src.l3num, ports[i]);
+				nf_conntrack_tftp_fini();
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+module_init(nf_conntrack_tftp_init);
+module_exit(nf_conntrack_tftp_fini);
diff --git a/net/netfilter/nf_sysctl.c b/net/netfilter/nf_sysctl.c
new file mode 100644
index 0000000..06ddddb
--- /dev/null
+++ b/net/netfilter/nf_sysctl.c
@@ -0,0 +1,134 @@
+/* nf_sysctl.c	netfilter sysctl registration/unregistation
+ *
+ * Copyright (c) 2006 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/module.h>
+#include <linux/sysctl.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+static void
+path_free(struct ctl_table *path, struct ctl_table *table)
+{
+	struct ctl_table *t, *next;
+
+	for (t = path; t != NULL && t != table; t = next) {
+		next = t->child;
+		kfree(t);
+	}
+}
+
+static struct ctl_table *
+path_dup(struct ctl_table *path, struct ctl_table *table)
+{
+	struct ctl_table *t, *last = NULL, *tmp;
+
+	for (t = path; t != NULL; t = t->child) {
+		/* twice the size since path elements are terminated by an
+		 * empty element */
+		tmp = kmemdup(t, 2 * sizeof(*t), GFP_KERNEL);
+		if (tmp == NULL) {
+			if (last != NULL)
+				path_free(path, table);
+			return NULL;
+		}
+
+		if (last != NULL)
+			last->child = tmp;
+		else
+			path = tmp;
+		last = tmp;
+	}
+
+	if (last != NULL)
+		last->child = table;
+	else
+		path = table;
+
+	return path;
+}
+
+struct ctl_table_header *
+nf_register_sysctl_table(struct ctl_table *path, struct ctl_table *table)
+{
+	struct ctl_table_header *header;
+
+	path = path_dup(path, table);
+	if (path == NULL)
+		return NULL;
+	header = register_sysctl_table(path, 0);
+	if (header == NULL)
+		path_free(path, table);
+	return header;
+}
+EXPORT_SYMBOL_GPL(nf_register_sysctl_table);
+
+void
+nf_unregister_sysctl_table(struct ctl_table_header *header,
+			   struct ctl_table *table)
+{
+	struct ctl_table *path = header->ctl_table;
+
+	unregister_sysctl_table(header);
+	path_free(path, table);
+}
+EXPORT_SYMBOL_GPL(nf_unregister_sysctl_table);
+
+/* net/netfilter */
+static struct ctl_table nf_net_netfilter_table[] = {
+	{
+		.ctl_name	= NET_NETFILTER,
+		.procname	= "netfilter",
+		.mode		= 0555,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+struct ctl_table nf_net_netfilter_sysctl_path[] = {
+	{
+		.ctl_name	= CTL_NET,
+		.procname	= "net",
+		.mode		= 0555,
+		.child		= nf_net_netfilter_table,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
+
+/* net/ipv4/netfilter */
+static struct ctl_table nf_net_ipv4_netfilter_table[] = {
+	{
+		.ctl_name	= NET_IPV4_NETFILTER,
+		.procname	= "netfilter",
+		.mode		= 0555,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+static struct ctl_table nf_net_ipv4_table[] = {
+	{
+		.ctl_name	= NET_IPV4,
+		.procname	= "ipv4",
+		.mode		= 0555,
+		.child		= nf_net_ipv4_netfilter_table,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+struct ctl_table nf_net_ipv4_netfilter_sysctl_path[] = {
+	{
+		.ctl_name	= CTL_NET,
+		.procname	= "net",
+		.mode		= 0555,
+		.child		= nf_net_ipv4_table,
+	},
+	{
+		.ctl_name	= 0
+	}
+};
+EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 1e5207b..d1505dd 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -408,13 +408,13 @@
 			const struct net_device *indev,
 			const struct net_device *outdev,
 			const struct nf_loginfo *li,
-			const char *prefix)
+			const char *prefix, unsigned int plen)
 {
 	unsigned char *old_tail;
 	struct nfulnl_msg_packet_hdr pmsg;
 	struct nlmsghdr *nlh;
 	struct nfgenmsg *nfmsg;
-	u_int32_t tmp_uint;
+	__be32 tmp_uint;
 
 	UDEBUG("entered\n");
 		
@@ -432,12 +432,8 @@
 
 	NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
 
-	if (prefix) {
-		int slen = strlen(prefix);
-		if (slen > NFULNL_PREFIXLEN)
-			slen = NFULNL_PREFIXLEN;
-		NFA_PUT(inst->skb, NFULA_PREFIX, slen, prefix);
-	}
+	if (prefix)
+		NFA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);
 
 	if (indev) {
 		tmp_uint = htonl(indev->ifindex);
@@ -501,18 +497,16 @@
 #endif
 	}
 
-	if (skb->nfmark) {
-		tmp_uint = htonl(skb->nfmark);
+	if (skb->mark) {
+		tmp_uint = htonl(skb->mark);
 		NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint);
 	}
 
 	if (indev && skb->dev && skb->dev->hard_header_parse) {
 		struct nfulnl_msg_packet_hw phw;
-
-		phw.hw_addrlen = 
-			skb->dev->hard_header_parse((struct sk_buff *)skb, 
+		int len = skb->dev->hard_header_parse((struct sk_buff *)skb,
 						    phw.hw_addr);
-		phw.hw_addrlen = htons(phw.hw_addrlen);
+		phw.hw_addrlen = htons(len);
 		NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
 	}
 
@@ -529,7 +523,7 @@
 	if (skb->sk) {
 		read_lock_bh(&skb->sk->sk_callback_lock);
 		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
-			u_int32_t uid = htonl(skb->sk->sk_socket->file->f_uid);
+			__be32 uid = htonl(skb->sk->sk_socket->file->f_uid);
 			/* need to unlock here since NFA_PUT may goto */
 			read_unlock_bh(&skb->sk->sk_callback_lock);
 			NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid);
@@ -603,6 +597,7 @@
 	const struct nf_loginfo *li;
 	unsigned int qthreshold;
 	unsigned int nlbufsiz;
+	unsigned int plen;
 
 	if (li_user && li_user->type == NF_LOG_TYPE_ULOG) 
 		li = li_user;
@@ -618,6 +613,10 @@
 		return;
 	}
 
+	plen = 0;
+	if (prefix)
+		plen = strlen(prefix);
+
 	/* all macros expand to constant values at compile time */
 	/* FIXME: do we want to make the size calculation conditional based on
 	 * what is actually present?  way more branches and checks, but more
@@ -632,7 +631,7 @@
 #endif
 		+ NFA_SPACE(sizeof(u_int32_t))	/* mark */
 		+ NFA_SPACE(sizeof(u_int32_t))	/* uid */
-		+ NFA_SPACE(NFULNL_PREFIXLEN)	/* prefix */
+		+ NFA_SPACE(plen)		/* prefix */
 		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw))
 		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp));
 
@@ -703,7 +702,7 @@
 	inst->qlen++;
 
 	__build_packet_message(inst, skb, data_len, pf,
-				hooknum, in, out, li, prefix);
+				hooknum, in, out, li, prefix, plen);
 
 	/* timer_pending always called within inst->lock, so there
 	 * is no chance of a race here */
@@ -882,15 +881,15 @@
 	}
 
 	if (nfula[NFULA_CFG_TIMEOUT-1]) {
-		u_int32_t timeout = 
-			*(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_TIMEOUT-1]);
+		__be32 timeout =
+			*(__be32 *)NFA_DATA(nfula[NFULA_CFG_TIMEOUT-1]);
 
 		nfulnl_set_timeout(inst, ntohl(timeout));
 	}
 
 	if (nfula[NFULA_CFG_NLBUFSIZ-1]) {
-		u_int32_t nlbufsiz = 
-			*(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_NLBUFSIZ-1]);
+		__be32 nlbufsiz =
+			*(__be32 *)NFA_DATA(nfula[NFULA_CFG_NLBUFSIZ-1]);
 
 		nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
 	}
@@ -903,8 +902,8 @@
 	}
 
 	if (nfula[NFULA_CFG_FLAGS-1]) {
-		u_int16_t flags =
-			*(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
+		__be16 flags =
+			*(__be16 *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
 		nfulnl_set_flags(inst, ntohs(flags));
 	}
 
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index e815a9a..a88a017 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -349,7 +349,7 @@
 	struct sk_buff *entskb = entry->skb;
 	struct net_device *indev;
 	struct net_device *outdev;
-	unsigned int tmp_uint;
+	__be32 tmp_uint;
 
 	QDEBUG("entered\n");
 
@@ -480,8 +480,8 @@
 #endif
 	}
 
-	if (entskb->nfmark) {
-		tmp_uint = htonl(entskb->nfmark);
+	if (entskb->mark) {
+		tmp_uint = htonl(entskb->mark);
 		NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
 	}
 
@@ -489,10 +489,9 @@
 	    && entskb->dev->hard_header_parse) {
 		struct nfqnl_msg_packet_hw phw;
 
-		phw.hw_addrlen =
-			entskb->dev->hard_header_parse(entskb,
+		int len = entskb->dev->hard_header_parse(entskb,
 			                                   phw.hw_addr);
-		phw.hw_addrlen = htons(phw.hw_addrlen);
+		phw.hw_addrlen = htons(len);
 		NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
 	}
 
@@ -835,8 +834,8 @@
 	}
 
 	if (nfqa[NFQA_MARK-1])
-		entry->skb->nfmark = ntohl(*(u_int32_t *)
-		                           NFA_DATA(nfqa[NFQA_MARK-1]));
+		entry->skb->mark = ntohl(*(__be32 *)
+		                         NFA_DATA(nfqa[NFQA_MARK-1]));
 		
 	issue_verdict(entry, verdict);
 	instance_put(queue);
@@ -948,6 +947,14 @@
 				ntohl(params->copy_range));
 	}
 
+	if (nfqa[NFQA_CFG_QUEUE_MAXLEN-1]) {
+		__be32 *queue_maxlen;
+		queue_maxlen = NFA_DATA(nfqa[NFQA_CFG_QUEUE_MAXLEN-1]);
+		spin_lock_bh(&queue->lock);
+		queue->queue_maxlen = ntohl(*queue_maxlen);
+		spin_unlock_bh(&queue->lock);
+	}
+
 out_put:
 	instance_put(queue);
 	return ret;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 58522fc..8996584 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/vmalloc.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
 
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_arp.h>
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
index c01524f..b554823 100644
--- a/net/netfilter/xt_CONNMARK.c
+++ b/net/netfilter/xt_CONNMARK.c
@@ -31,6 +31,9 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CONNMARK.h>
 #include <net/netfilter/nf_conntrack_compat.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <net/netfilter/nf_conntrack_ecache.h>
+#endif
 
 static unsigned int
 target(struct sk_buff **pskb,
@@ -42,7 +45,7 @@
 {
 	const struct xt_connmark_target_info *markinfo = targinfo;
 	u_int32_t diff;
-	u_int32_t nfmark;
+	u_int32_t mark;
 	u_int32_t newmark;
 	u_int32_t ctinfo;
 	u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo);
@@ -62,7 +65,7 @@
 			break;
 		case XT_CONNMARK_SAVE:
 			newmark = (*ctmark & ~markinfo->mask) |
-				  ((*pskb)->nfmark & markinfo->mask);
+				  ((*pskb)->mark & markinfo->mask);
 			if (*ctmark != newmark) {
 				*ctmark = newmark;
 #if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
@@ -73,10 +76,10 @@
 			}
 			break;
 		case XT_CONNMARK_RESTORE:
-			nfmark = (*pskb)->nfmark;
-			diff = (*ctmark ^ nfmark) & markinfo->mask;
+			mark = (*pskb)->mark;
+			diff = (*ctmark ^ mark) & markinfo->mask;
 			if (diff != 0)
-				(*pskb)->nfmark = nfmark ^ diff;
+				(*pskb)->mark = mark ^ diff;
 			break;
 		}
 	}
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
index c6e860a..0b48547 100644
--- a/net/netfilter/xt_MARK.c
+++ b/net/netfilter/xt_MARK.c
@@ -31,8 +31,8 @@
 {
 	const struct xt_mark_target_info *markinfo = targinfo;
 
-	if((*pskb)->nfmark != markinfo->mark)
-		(*pskb)->nfmark = markinfo->mark;
+	if((*pskb)->mark != markinfo->mark)
+		(*pskb)->mark = markinfo->mark;
 
 	return XT_CONTINUE;
 }
@@ -54,16 +54,16 @@
 		break;
 		
 	case XT_MARK_AND:
-		mark = (*pskb)->nfmark & markinfo->mark;
+		mark = (*pskb)->mark & markinfo->mark;
 		break;
 		
 	case XT_MARK_OR:
-		mark = (*pskb)->nfmark | markinfo->mark;
+		mark = (*pskb)->mark | markinfo->mark;
 		break;
 	}
 
-	if((*pskb)->nfmark != mark)
-		(*pskb)->nfmark = mark;
+	if((*pskb)->mark != mark)
+		(*pskb)->mark = mark;
 
 	return XT_CONTINUE;
 }
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
new file mode 100644
index 0000000..901ed7a
--- /dev/null
+++ b/net/netfilter/xt_NFLOG.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2006 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_NFLOG.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("x_tables NFLOG target");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_NFLOG");
+MODULE_ALIAS("ip6t_NFLOG");
+
+static unsigned int
+nflog_target(struct sk_buff **pskb,
+	     const struct net_device *in, const struct net_device *out,
+	     unsigned int hooknum, const struct xt_target *target,
+	     const void *targinfo)
+{
+	const struct xt_nflog_info *info = targinfo;
+	struct nf_loginfo li;
+
+	li.type		     = NF_LOG_TYPE_ULOG;
+	li.u.ulog.copy_len   = info->len;
+	li.u.ulog.group	     = info->group;
+	li.u.ulog.qthreshold = info->threshold;
+
+	nf_log_packet(target->family, hooknum, *pskb, in, out, &li,
+		      "%s", info->prefix);
+	return XT_CONTINUE;
+}
+
+static int
+nflog_checkentry(const char *tablename, const void *entry,
+		 const struct xt_target *target, void *targetinfo,
+		 unsigned int hookmask)
+{
+	struct xt_nflog_info *info = targetinfo;
+
+	if (info->flags & ~XT_NFLOG_MASK)
+		return 0;
+	if (info->prefix[sizeof(info->prefix) - 1] != '\0')
+		return 0;
+	return 1;
+}
+
+static struct xt_target xt_nflog_target[] = {
+	{
+		.name		= "NFLOG",
+		.family		= AF_INET,
+		.checkentry	= nflog_checkentry,
+		.target		= nflog_target,
+		.targetsize	= sizeof(struct xt_nflog_info),
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "NFLOG",
+		.family		= AF_INET6,
+		.checkentry	= nflog_checkentry,
+		.target		= nflog_target,
+		.targetsize	= sizeof(struct xt_nflog_info),
+		.me		= THIS_MODULE,
+	},
+};
+
+static int __init xt_nflog_init(void)
+{
+	return xt_register_targets(xt_nflog_target,
+				   ARRAY_SIZE(xt_nflog_target));
+}
+
+static void __exit xt_nflog_fini(void)
+{
+	xt_unregister_targets(xt_nflog_target, ARRAY_SIZE(xt_nflog_target));
+}
+
+module_init(xt_nflog_init);
+module_exit(xt_nflog_fini);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
new file mode 100644
index 0000000..a5a6e19
--- /dev/null
+++ b/net/netfilter/xt_hashlimit.c
@@ -0,0 +1,773 @@
+/* iptables match extension to limit the number of packets per second
+ * seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
+ *
+ * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
+ *
+ * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $
+ *
+ * Development of this code was funded by Astaro AG, http://www.astaro.com/
+ */
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/xt_hashlimit.h>
+#include <linux/mutex.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("iptables match for limiting per hash-bucket");
+MODULE_ALIAS("ipt_hashlimit");
+MODULE_ALIAS("ip6t_hashlimit");
+
+/* need to declare this at the top */
+static struct proc_dir_entry *hashlimit_procdir4;
+static struct proc_dir_entry *hashlimit_procdir6;
+static struct file_operations dl_file_ops;
+
+/* hash table crap */
+struct dsthash_dst {
+	union {
+		struct {
+			__be32 src;
+			__be32 dst;
+		} ip;
+		struct {
+			__be32 src[4];
+			__be32 dst[4];
+		} ip6;
+	} addr;
+	__be16 src_port;
+	__be16 dst_port;
+};
+
+struct dsthash_ent {
+	/* static / read-only parts in the beginning */
+	struct hlist_node node;
+	struct dsthash_dst dst;
+
+	/* modified structure members in the end */
+	unsigned long expires;		/* precalculated expiry time */
+	struct {
+		unsigned long prev;	/* last modification */
+		u_int32_t credit;
+		u_int32_t credit_cap, cost;
+	} rateinfo;
+};
+
+struct xt_hashlimit_htable {
+	struct hlist_node node;		/* global list of all htables */
+	atomic_t use;
+	int family;
+
+	struct hashlimit_cfg cfg;	/* config */
+
+	/* used internally */
+	spinlock_t lock;		/* lock for list_head */
+	u_int32_t rnd;			/* random seed for hash */
+	int rnd_initialized;
+	unsigned int count;		/* number entries in table */
+	struct timer_list timer;	/* timer for gc */
+
+	/* seq_file stuff */
+	struct proc_dir_entry *pde;
+
+	struct hlist_head hash[0];	/* hashtable itself */
+};
+
+static DEFINE_SPINLOCK(hashlimit_lock);	/* protects htables list */
+static DEFINE_MUTEX(hlimit_mutex);	/* additional checkentry protection */
+static HLIST_HEAD(hashlimit_htables);
+static struct kmem_cache *hashlimit_cachep __read_mostly;
+
+static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
+{
+	return !memcmp(&ent->dst, b, sizeof(ent->dst));
+}
+
+static u_int32_t
+hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
+{
+	return jhash(dst, sizeof(*dst), ht->rnd) % ht->cfg.size;
+}
+
+static struct dsthash_ent *
+dsthash_find(const struct xt_hashlimit_htable *ht, struct dsthash_dst *dst)
+{
+	struct dsthash_ent *ent;
+	struct hlist_node *pos;
+	u_int32_t hash = hash_dst(ht, dst);
+
+	if (!hlist_empty(&ht->hash[hash])) {
+		hlist_for_each_entry(ent, pos, &ht->hash[hash], node)
+			if (dst_cmp(ent, dst))
+				return ent;
+	}
+	return NULL;
+}
+
+/* allocate dsthash_ent, initialize dst, put in htable and lock it */
+static struct dsthash_ent *
+dsthash_alloc_init(struct xt_hashlimit_htable *ht, struct dsthash_dst *dst)
+{
+	struct dsthash_ent *ent;
+
+	/* initialize hash with random val at the time we allocate
+	 * the first hashtable entry */
+	if (!ht->rnd_initialized) {
+		get_random_bytes(&ht->rnd, 4);
+		ht->rnd_initialized = 1;
+	}
+
+	if (ht->cfg.max && ht->count >= ht->cfg.max) {
+		/* FIXME: do something. question is what.. */
+		if (net_ratelimit())
+			printk(KERN_WARNING
+				"xt_hashlimit: max count of %u reached\n",
+				ht->cfg.max);
+		return NULL;
+	}
+
+	ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
+	if (!ent) {
+		if (net_ratelimit())
+			printk(KERN_ERR
+				"xt_hashlimit: can't allocate dsthash_ent\n");
+		return NULL;
+	}
+	memcpy(&ent->dst, dst, sizeof(ent->dst));
+
+	hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
+	ht->count++;
+	return ent;
+}
+
+static inline void
+dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
+{
+	hlist_del(&ent->node);
+	kmem_cache_free(hashlimit_cachep, ent);
+	ht->count--;
+}
+static void htable_gc(unsigned long htlong);
+
+static int htable_create(struct xt_hashlimit_info *minfo, int family)
+{
+	struct xt_hashlimit_htable *hinfo;
+	unsigned int size;
+	unsigned int i;
+
+	if (minfo->cfg.size)
+		size = minfo->cfg.size;
+	else {
+		size = ((num_physpages << PAGE_SHIFT) / 16384) /
+		       sizeof(struct list_head);
+		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+			size = 8192;
+		if (size < 16)
+			size = 16;
+	}
+	/* FIXME: don't use vmalloc() here or anywhere else -HW */
+	hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
+			sizeof(struct list_head) * size);
+	if (!hinfo) {
+		printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
+		return -1;
+	}
+	minfo->hinfo = hinfo;
+
+	/* copy match config into hashtable config */
+	memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
+	hinfo->cfg.size = size;
+	if (!hinfo->cfg.max)
+		hinfo->cfg.max = 8 * hinfo->cfg.size;
+	else if (hinfo->cfg.max < hinfo->cfg.size)
+		hinfo->cfg.max = hinfo->cfg.size;
+
+	for (i = 0; i < hinfo->cfg.size; i++)
+		INIT_HLIST_HEAD(&hinfo->hash[i]);
+
+	atomic_set(&hinfo->use, 1);
+	hinfo->count = 0;
+	hinfo->family = family;
+	hinfo->rnd_initialized = 0;
+	spin_lock_init(&hinfo->lock);
+	hinfo->pde = create_proc_entry(minfo->name, 0,
+				       family == AF_INET ? hashlimit_procdir4 :
+				       			   hashlimit_procdir6);
+	if (!hinfo->pde) {
+		vfree(hinfo);
+		return -1;
+	}
+	hinfo->pde->proc_fops = &dl_file_ops;
+	hinfo->pde->data = hinfo;
+
+	init_timer(&hinfo->timer);
+	hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
+	hinfo->timer.data = (unsigned long )hinfo;
+	hinfo->timer.function = htable_gc;
+	add_timer(&hinfo->timer);
+
+	spin_lock_bh(&hashlimit_lock);
+	hlist_add_head(&hinfo->node, &hashlimit_htables);
+	spin_unlock_bh(&hashlimit_lock);
+
+	return 0;
+}
+
+static int select_all(struct xt_hashlimit_htable *ht, struct dsthash_ent *he)
+{
+	return 1;
+}
+
+static int select_gc(struct xt_hashlimit_htable *ht, struct dsthash_ent *he)
+{
+	return (jiffies >= he->expires);
+}
+
+static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
+		 		int (*select)(struct xt_hashlimit_htable *ht,
+					      struct dsthash_ent *he))
+{
+	unsigned int i;
+
+	/* lock hash table and iterate over it */
+	spin_lock_bh(&ht->lock);
+	for (i = 0; i < ht->cfg.size; i++) {
+		struct dsthash_ent *dh;
+		struct hlist_node *pos, *n;
+		hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
+			if ((*select)(ht, dh))
+				dsthash_free(ht, dh);
+		}
+	}
+	spin_unlock_bh(&ht->lock);
+}
+
+/* hash table garbage collector, run by timer */
+static void htable_gc(unsigned long htlong)
+{
+	struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
+
+	htable_selective_cleanup(ht, select_gc);
+
+	/* re-add the timer accordingly */
+	ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
+	add_timer(&ht->timer);
+}
+
+static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+{
+	/* remove timer, if it is pending */
+	if (timer_pending(&hinfo->timer))
+		del_timer(&hinfo->timer);
+
+	/* remove proc entry */
+	remove_proc_entry(hinfo->pde->name,
+			  hinfo->family == AF_INET ? hashlimit_procdir4 :
+			  			     hashlimit_procdir6);
+	htable_selective_cleanup(hinfo, select_all);
+	vfree(hinfo);
+}
+
+static struct xt_hashlimit_htable *htable_find_get(char *name, int family)
+{
+	struct xt_hashlimit_htable *hinfo;
+	struct hlist_node *pos;
+
+	spin_lock_bh(&hashlimit_lock);
+	hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
+		if (!strcmp(name, hinfo->pde->name) &&
+		    hinfo->family == family) {
+			atomic_inc(&hinfo->use);
+			spin_unlock_bh(&hashlimit_lock);
+			return hinfo;
+		}
+	}
+	spin_unlock_bh(&hashlimit_lock);
+	return NULL;
+}
+
+static void htable_put(struct xt_hashlimit_htable *hinfo)
+{
+	if (atomic_dec_and_test(&hinfo->use)) {
+		spin_lock_bh(&hashlimit_lock);
+		hlist_del(&hinfo->node);
+		spin_unlock_bh(&hashlimit_lock);
+		htable_destroy(hinfo);
+	}
+}
+
+/* The algorithm used is the Simple Token Bucket Filter (TBF)
+ * see net/sched/sch_tbf.c in the linux source tree
+ */
+
+/* Rusty: This is my (non-mathematically-inclined) understanding of
+   this algorithm.  The `average rate' in jiffies becomes your initial
+   amount of credit `credit' and the most credit you can ever have
+   `credit_cap'.  The `peak rate' becomes the cost of passing the
+   test, `cost'.
+
+   `prev' tracks the last packet hit: you gain one credit per jiffy.
+   If you get credit balance more than this, the extra credit is
+   discarded.  Every time the match passes, you lose `cost' credits;
+   if you don't have that many, the test fails.
+
+   See Alexey's formal explanation in net/sched/sch_tbf.c.
+
+   To get the maximum range, we multiply by this factor (ie. you get N
+   credits per jiffy).  We want to allow a rate as low as 1 per day
+   (slowest userspace tool allows), which means
+   CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
+*/
+#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
+
+/* Repeated shift and or gives us all 1s, final shift and add 1 gives
+ * us the power of 2 below the theoretical max, so GCC simply does a
+ * shift. */
+#define _POW2_BELOW2(x) ((x)|((x)>>1))
+#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
+#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
+#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
+#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
+#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
+
+#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+
+/* Precision saver. */
+static inline u_int32_t
+user2credits(u_int32_t user)
+{
+	/* If multiplying would overflow... */
+	if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+		/* Divide first. */
+		return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
+
+	return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
+}
+
+static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+{
+	dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
+	if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
+		dh->rateinfo.credit = dh->rateinfo.credit_cap;
+	dh->rateinfo.prev = now;
+}
+
+static int
+hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst,
+		   const struct sk_buff *skb, unsigned int protoff)
+{
+	__be16 _ports[2], *ports;
+	int nexthdr;
+
+	memset(dst, 0, sizeof(*dst));
+
+	switch (hinfo->family) {
+	case AF_INET:
+		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
+			dst->addr.ip.dst = skb->nh.iph->daddr;
+		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
+			dst->addr.ip.src = skb->nh.iph->saddr;
+
+		if (!(hinfo->cfg.mode &
+		      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
+			return 0;
+		nexthdr = skb->nh.iph->protocol;
+		break;
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+	case AF_INET6:
+		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
+			memcpy(&dst->addr.ip6.dst, &skb->nh.ipv6h->daddr,
+			       sizeof(dst->addr.ip6.dst));
+		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
+			memcpy(&dst->addr.ip6.src, &skb->nh.ipv6h->saddr,
+			       sizeof(dst->addr.ip6.src));
+
+		if (!(hinfo->cfg.mode &
+		      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
+			return 0;
+		nexthdr = ipv6_find_hdr(skb, &protoff, -1, NULL);
+		if (nexthdr < 0)
+			return -1;
+		break;
+#endif
+	default:
+		BUG();
+		return 0;
+	}
+
+	switch (nexthdr) {
+	case IPPROTO_TCP:
+	case IPPROTO_UDP:
+	case IPPROTO_SCTP:
+	case IPPROTO_DCCP:
+		ports = skb_header_pointer(skb, protoff, sizeof(_ports),
+					   &_ports);
+		break;
+	default:
+		_ports[0] = _ports[1] = 0;
+		ports = _ports;
+		break;
+	}
+	if (!ports)
+		return -1;
+	if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
+		dst->src_port = ports[0];
+	if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
+		dst->dst_port = ports[1];
+	return 0;
+}
+
+static int
+hashlimit_match(const struct sk_buff *skb,
+		const struct net_device *in,
+		const struct net_device *out,
+		const struct xt_match *match,
+		const void *matchinfo,
+		int offset,
+		unsigned int protoff,
+		int *hotdrop)
+{
+	struct xt_hashlimit_info *r =
+		((struct xt_hashlimit_info *)matchinfo)->u.master;
+	struct xt_hashlimit_htable *hinfo = r->hinfo;
+	unsigned long now = jiffies;
+	struct dsthash_ent *dh;
+	struct dsthash_dst dst;
+
+	if (hashlimit_init_dst(hinfo, &dst, skb, protoff) < 0)
+		goto hotdrop;
+
+	spin_lock_bh(&hinfo->lock);
+	dh = dsthash_find(hinfo, &dst);
+	if (!dh) {
+		dh = dsthash_alloc_init(hinfo, &dst);
+		if (!dh) {
+			spin_unlock_bh(&hinfo->lock);
+			goto hotdrop;
+		}
+
+		dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
+		dh->rateinfo.prev = jiffies;
+		dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
+						   hinfo->cfg.burst);
+		dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
+						       hinfo->cfg.burst);
+		dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+	} else {
+		/* update expiration timeout */
+		dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
+		rateinfo_recalc(dh, now);
+	}
+
+	if (dh->rateinfo.credit >= dh->rateinfo.cost) {
+		/* We're underlimit. */
+		dh->rateinfo.credit -= dh->rateinfo.cost;
+		spin_unlock_bh(&hinfo->lock);
+		return 1;
+	}
+
+       	spin_unlock_bh(&hinfo->lock);
+
+	/* default case: we're overlimit, thus don't match */
+	return 0;
+
+hotdrop:
+	*hotdrop = 1;
+	return 0;
+}
+
+static int
+hashlimit_checkentry(const char *tablename,
+		     const void *inf,
+		     const struct xt_match *match,
+		     void *matchinfo,
+		     unsigned int hook_mask)
+{
+	struct xt_hashlimit_info *r = matchinfo;
+
+	/* Check for overflow. */
+	if (r->cfg.burst == 0 ||
+	    user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
+		printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
+		       r->cfg.avg, r->cfg.burst);
+		return 0;
+	}
+	if (r->cfg.mode == 0 ||
+	    r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
+			   XT_HASHLIMIT_HASH_DIP |
+			   XT_HASHLIMIT_HASH_SIP |
+			   XT_HASHLIMIT_HASH_SPT))
+		return 0;
+	if (!r->cfg.gc_interval)
+		return 0;
+	if (!r->cfg.expire)
+		return 0;
+	if (r->name[sizeof(r->name) - 1] != '\0')
+		return 0;
+
+	/* This is the best we've got: We cannot release and re-grab lock,
+	 * since checkentry() is called before x_tables.c grabs xt_mutex.
+	 * We also cannot grab the hashtable spinlock, since htable_create will
+	 * call vmalloc, and that can sleep.  And we cannot just re-search
+	 * the list of htable's in htable_create(), since then we would
+	 * create duplicate proc files. -HW */
+	mutex_lock(&hlimit_mutex);
+	r->hinfo = htable_find_get(r->name, match->family);
+	if (!r->hinfo && htable_create(r, match->family) != 0) {
+		mutex_unlock(&hlimit_mutex);
+		return 0;
+	}
+	mutex_unlock(&hlimit_mutex);
+
+	/* Ugly hack: For SMP, we only want to use one set */
+	r->u.master = r;
+	return 1;
+}
+
+static void
+hashlimit_destroy(const struct xt_match *match, void *matchinfo)
+{
+	struct xt_hashlimit_info *r = matchinfo;
+
+	htable_put(r->hinfo);
+}
+
+#ifdef CONFIG_COMPAT
+struct compat_xt_hashlimit_info {
+	char name[IFNAMSIZ];
+	struct hashlimit_cfg cfg;
+	compat_uptr_t hinfo;
+	compat_uptr_t master;
+};
+
+static void compat_from_user(void *dst, void *src)
+{
+	int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
+
+	memcpy(dst, src, off);
+	memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
+}
+
+static int compat_to_user(void __user *dst, void *src)
+{
+	int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
+
+	return copy_to_user(dst, src, off) ? -EFAULT : 0;
+}
+#endif
+
+static struct xt_match xt_hashlimit[] = {
+	{
+		.name		= "hashlimit",
+		.family		= AF_INET,
+		.match		= hashlimit_match,
+		.matchsize	= sizeof(struct xt_hashlimit_info),
+#ifdef CONFIG_COMPAT
+		.compatsize	= sizeof(struct compat_xt_hashlimit_info),
+		.compat_from_user = compat_from_user,
+		.compat_to_user	= compat_to_user,
+#endif
+		.checkentry	= hashlimit_checkentry,
+		.destroy	= hashlimit_destroy,
+		.me		= THIS_MODULE
+	},
+	{
+		.name		= "hashlimit",
+		.family		= AF_INET6,
+		.match		= hashlimit_match,
+		.matchsize	= sizeof(struct xt_hashlimit_info),
+#ifdef CONFIG_COMPAT
+		.compatsize	= sizeof(struct compat_xt_hashlimit_info),
+		.compat_from_user = compat_from_user,
+		.compat_to_user	= compat_to_user,
+#endif
+		.checkentry	= hashlimit_checkentry,
+		.destroy	= hashlimit_destroy,
+		.me		= THIS_MODULE
+	},
+};
+
+/* PROC stuff */
+static void *dl_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct proc_dir_entry *pde = s->private;
+	struct xt_hashlimit_htable *htable = pde->data;
+	unsigned int *bucket;
+
+	spin_lock_bh(&htable->lock);
+	if (*pos >= htable->cfg.size)
+		return NULL;
+
+	bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
+	if (!bucket)
+		return ERR_PTR(-ENOMEM);
+
+	*bucket = *pos;
+	return bucket;
+}
+
+static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct proc_dir_entry *pde = s->private;
+	struct xt_hashlimit_htable *htable = pde->data;
+	unsigned int *bucket = (unsigned int *)v;
+
+	*pos = ++(*bucket);
+	if (*pos >= htable->cfg.size) {
+		kfree(v);
+		return NULL;
+	}
+	return bucket;
+}
+
+static void dl_seq_stop(struct seq_file *s, void *v)
+{
+	struct proc_dir_entry *pde = s->private;
+	struct xt_hashlimit_htable *htable = pde->data;
+	unsigned int *bucket = (unsigned int *)v;
+
+	kfree(bucket);
+	spin_unlock_bh(&htable->lock);
+}
+
+static int dl_seq_real_show(struct dsthash_ent *ent, int family,
+				   struct seq_file *s)
+{
+	/* recalculate to show accurate numbers */
+	rateinfo_recalc(ent, jiffies);
+
+	switch (family) {
+	case AF_INET:
+		return seq_printf(s, "%ld %u.%u.%u.%u:%u->"
+				     "%u.%u.%u.%u:%u %u %u %u\n",
+				 (long)(ent->expires - jiffies)/HZ,
+				 NIPQUAD(ent->dst.addr.ip.src),
+				 ntohs(ent->dst.src_port),
+				 NIPQUAD(ent->dst.addr.ip.dst),
+				 ntohs(ent->dst.dst_port),
+				 ent->rateinfo.credit, ent->rateinfo.credit_cap,
+				 ent->rateinfo.cost);
+	case AF_INET6:
+		return seq_printf(s, "%ld " NIP6_FMT ":%u->"
+				     NIP6_FMT ":%u %u %u %u\n",
+				 (long)(ent->expires - jiffies)/HZ,
+				 NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.src),
+				 ntohs(ent->dst.src_port),
+				 NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.dst),
+				 ntohs(ent->dst.dst_port),
+				 ent->rateinfo.credit, ent->rateinfo.credit_cap,
+				 ent->rateinfo.cost);
+	default:
+		BUG();
+		return 0;
+	}
+}
+
+static int dl_seq_show(struct seq_file *s, void *v)
+{
+	struct proc_dir_entry *pde = s->private;
+	struct xt_hashlimit_htable *htable = pde->data;
+	unsigned int *bucket = (unsigned int *)v;
+	struct dsthash_ent *ent;
+	struct hlist_node *pos;
+
+	if (!hlist_empty(&htable->hash[*bucket])) {
+		hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
+			if (dl_seq_real_show(ent, htable->family, s))
+				return 1;
+	}
+	return 0;
+}
+
+static struct seq_operations dl_seq_ops = {
+	.start = dl_seq_start,
+	.next  = dl_seq_next,
+	.stop  = dl_seq_stop,
+	.show  = dl_seq_show
+};
+
+static int dl_proc_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &dl_seq_ops);
+
+	if (!ret) {
+		struct seq_file *sf = file->private_data;
+		sf->private = PDE(inode);
+	}
+	return ret;
+}
+
+static struct file_operations dl_file_ops = {
+	.owner   = THIS_MODULE,
+	.open    = dl_proc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release
+};
+
+static int __init xt_hashlimit_init(void)
+{
+	int err;
+
+	err = xt_register_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit));
+	if (err < 0)
+		goto err1;
+
+	err = -ENOMEM;
+	hashlimit_cachep = kmem_cache_create("xt_hashlimit",
+					    sizeof(struct dsthash_ent), 0, 0,
+					    NULL, NULL);
+	if (!hashlimit_cachep) {
+		printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
+		goto err2;
+	}
+	hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", proc_net);
+	if (!hashlimit_procdir4) {
+		printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
+				"entry\n");
+		goto err3;
+	}
+	hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", proc_net);
+	if (!hashlimit_procdir6) {
+		printk(KERN_ERR "xt_hashlimit: tnable to create proc dir "
+				"entry\n");
+		goto err4;
+	}
+	return 0;
+err4:
+	remove_proc_entry("ipt_hashlimit", proc_net);
+err3:
+	kmem_cache_destroy(hashlimit_cachep);
+err2:
+	xt_unregister_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit));
+err1:
+	return err;
+
+}
+
+static void __exit xt_hashlimit_fini(void)
+{
+	remove_proc_entry("ipt_hashlimit", proc_net);
+	remove_proc_entry("ip6t_hashlimit", proc_net);
+	kmem_cache_destroy(hashlimit_cachep);
+	xt_unregister_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit));
+}
+
+module_init(xt_hashlimit_init);
+module_exit(xt_hashlimit_fini);
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 934dddf..dfa1ee6 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -31,7 +31,7 @@
 {
 	const struct xt_mark_info *info = matchinfo;
 
-	return ((skb->nfmark & info->mask) == info->mark) ^ info->invert;
+	return ((skb->mark & info->mask) == info->mark) ^ info->invert;
 }
 
 static int
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c
index d3aefd3..1602086 100644
--- a/net/netfilter/xt_multiport.c
+++ b/net/netfilter/xt_multiport.c
@@ -1,5 +1,5 @@
-/* Kernel module to match one of a list of TCP/UDP/SCTP/DCCP ports: ports are in
-   the same place so we can treat them as equal. */
+/* Kernel module to match one of a list of TCP/UDP(-Lite)/SCTP/DCCP ports:
+   ports are in the same place so we can treat them as equal. */
 
 /* (C) 1999-2001 Paul `Rusty' Russell
  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
@@ -104,7 +104,7 @@
       unsigned int protoff,
       int *hotdrop)
 {
-	u16 _ports[2], *pptr;
+	__be16 _ports[2], *pptr;
 	const struct xt_multiport *multiinfo = matchinfo;
 
 	if (offset)
@@ -135,7 +135,7 @@
 	 unsigned int protoff,
 	 int *hotdrop)
 {
-	u16 _ports[2], *pptr;
+	__be16 _ports[2], *pptr;
 	const struct xt_multiport_v1 *multiinfo = matchinfo;
 
 	if (offset)
@@ -162,6 +162,7 @@
 {
 	/* Must specify supported protocol, no unknown flags or bad count */
 	return (proto == IPPROTO_TCP || proto == IPPROTO_UDP
+		|| proto == IPPROTO_UDPLITE
 		|| proto == IPPROTO_SCTP || proto == IPPROTO_DCCP)
 		&& !(ip_invflags & XT_INV_PROTO)
 		&& (match_flags == XT_MULTIPORT_SOURCE
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index 7956aca..71bf036 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -71,7 +71,7 @@
 		duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", 
 				++i, offset, sch->type, htons(sch->length), sch->flags);
 
-		offset += (htons(sch->length) + 3) & ~3;
+		offset += (ntohs(sch->length) + 3) & ~3;
 
 		duprintf("skb->len: %d\toffset: %d\n", skb->len, offset);
 
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index e76a68e..46414b5 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -10,7 +10,7 @@
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
 
-MODULE_DESCRIPTION("x_tables match for TCP and UDP, supports IPv4 and IPv6");
+MODULE_DESCRIPTION("x_tables match for TCP and UDP(-Lite), supports IPv4 and IPv6");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("xt_tcp");
 MODULE_ALIAS("xt_udp");
@@ -234,6 +234,24 @@
 		.proto		= IPPROTO_UDP,
 		.me		= THIS_MODULE,
 	},
+	{
+		.name		= "udplite",
+		.family		= AF_INET,
+		.checkentry	= udp_checkentry,
+		.match		= udp_match,
+		.matchsize	= sizeof(struct xt_udp),
+		.proto		= IPPROTO_UDPLITE,
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "udplite",
+		.family		= AF_INET6,
+		.checkentry	= udp_checkentry,
+		.match		= udp_match,
+		.matchsize	= sizeof(struct xt_udp),
+		.proto		= IPPROTO_UDPLITE,
+		.me		= THIS_MODULE,
+	},
 };
 
 static int __init xt_tcpudp_init(void)
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index a6ce1d6..743b057 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -407,12 +407,14 @@
 
 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD,
 					      &audit_info);
-	audit_log_format(audit_buf,
-			 " cipso_doi=%u cipso_type=%s res=%u",
-			 doi,
-			 type_str,
-			 ret_val == 0 ? 1 : 0);
-	audit_log_end(audit_buf);
+	if (audit_buf != NULL) {
+		audit_log_format(audit_buf,
+				 " cipso_doi=%u cipso_type=%s res=%u",
+				 doi,
+				 type_str,
+				 ret_val == 0 ? 1 : 0);
+		audit_log_end(audit_buf);
+	}
 
 	return ret_val;
 }
@@ -452,17 +454,13 @@
 	}
 
 list_start:
-	ans_skb = nlmsg_new(NLMSG_GOODSIZE * nlsze_mult, GFP_KERNEL);
+	ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE * nlsze_mult, GFP_KERNEL);
 	if (ans_skb == NULL) {
 		ret_val = -ENOMEM;
 		goto list_failure;
 	}
-	data = netlbl_netlink_hdr_put(ans_skb,
-				      info->snd_pid,
-				      info->snd_seq,
-				      netlbl_cipsov4_gnl_family.id,
-				      0,
-				      NLBL_CIPSOV4_C_LIST);
+	data = genlmsg_put_reply(ans_skb, info, &netlbl_cipsov4_gnl_family,
+				 0, NLBL_CIPSOV4_C_LIST);
 	if (data == NULL) {
 		ret_val = -ENOMEM;
 		goto list_failure;
@@ -568,7 +566,7 @@
 
 	genlmsg_end(ans_skb, data);
 
-	ret_val = genlmsg_unicast(ans_skb, info->snd_pid);
+	ret_val = genlmsg_reply(ans_skb, info);
 	if (ret_val != 0)
 		goto list_failure;
 
@@ -607,12 +605,9 @@
 	struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg;
 	void *data;
 
-	data = netlbl_netlink_hdr_put(cb_arg->skb,
-				      NETLINK_CB(cb_arg->nl_cb->skb).pid,
-				      cb_arg->seq,
-				      netlbl_cipsov4_gnl_family.id,
-				      NLM_F_MULTI,
-				      NLBL_CIPSOV4_C_LISTALL);
+	data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+			   cb_arg->seq, &netlbl_cipsov4_gnl_family,
+			   NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL);
 	if (data == NULL)
 		goto listall_cb_failure;
 
@@ -687,11 +682,13 @@
 
 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL,
 					      &audit_info);
-	audit_log_format(audit_buf,
-			 " cipso_doi=%u res=%u",
-			 doi,
-			 ret_val == 0 ? 1 : 0);
-	audit_log_end(audit_buf);
+	if (audit_buf != NULL) {
+		audit_log_format(audit_buf,
+				 " cipso_doi=%u res=%u",
+				 doi,
+				 ret_val == 0 ? 1 : 0);
+		audit_log_end(audit_buf);
+	}
 
 	return ret_val;
 }
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index af4371d..f46a0ae 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -202,7 +202,6 @@
 	int ret_val;
 	u32 bkt;
 	struct audit_buffer *audit_buf;
-	char *audit_domain;
 
 	switch (entry->type) {
 	case NETLBL_NLTYPE_UNLABELED:
@@ -243,24 +242,24 @@
 	} else
 		ret_val = -EINVAL;
 
-	if (entry->domain != NULL)
-		audit_domain = entry->domain;
-	else
-		audit_domain = "(default)";
 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info);
-	audit_log_format(audit_buf, " nlbl_domain=%s", audit_domain);
-	switch (entry->type) {
-	case NETLBL_NLTYPE_UNLABELED:
-		audit_log_format(audit_buf, " nlbl_protocol=unlbl");
-		break;
-	case NETLBL_NLTYPE_CIPSOV4:
+	if (audit_buf != NULL) {
 		audit_log_format(audit_buf,
-				 " nlbl_protocol=cipsov4 cipso_doi=%u",
-				 entry->type_def.cipsov4->doi);
-		break;
+				 " nlbl_domain=%s",
+				 entry->domain ? entry->domain : "(default)");
+		switch (entry->type) {
+		case NETLBL_NLTYPE_UNLABELED:
+			audit_log_format(audit_buf, " nlbl_protocol=unlbl");
+			break;
+		case NETLBL_NLTYPE_CIPSOV4:
+			audit_log_format(audit_buf,
+					 " nlbl_protocol=cipsov4 cipso_doi=%u",
+					 entry->type_def.cipsov4->doi);
+			break;
+		}
+		audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
+		audit_log_end(audit_buf);
 	}
-	audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
-	audit_log_end(audit_buf);
 
 	rcu_read_unlock();
 
@@ -310,7 +309,6 @@
 	int ret_val = -ENOENT;
 	struct netlbl_dom_map *entry;
 	struct audit_buffer *audit_buf;
-	char *audit_domain;
 
 	rcu_read_lock();
 	if (domain != NULL)
@@ -348,16 +346,14 @@
 		spin_unlock(&netlbl_domhsh_def_lock);
 	}
 
-	if (entry->domain != NULL)
-		audit_domain = entry->domain;
-	else
-		audit_domain = "(default)";
 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
-	audit_log_format(audit_buf,
-			 " nlbl_domain=%s res=%u",
-			 audit_domain,
-			 ret_val == 0 ? 1 : 0);
-	audit_log_end(audit_buf);
+	if (audit_buf != NULL) {
+		audit_log_format(audit_buf,
+				 " nlbl_domain=%s res=%u",
+				 entry->domain ? entry->domain : "(default)",
+				 ret_val == 0 ? 1 : 0);
+		audit_log_end(audit_buf);
+	}
 
 	if (ret_val == 0)
 		call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index ff97110..e03a328 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -40,6 +40,207 @@
 #include "netlabel_user.h"
 
 /*
+ * Security Attribute Functions
+ */
+
+/**
+ * netlbl_secattr_catmap_walk - Walk a LSM secattr catmap looking for a bit
+ * @catmap: the category bitmap
+ * @offset: the offset to start searching at, in bits
+ *
+ * Description:
+ * This function walks a LSM secattr category bitmap starting at @offset and
+ * returns the spot of the first set bit or -ENOENT if no bits are set.
+ *
+ */
+int netlbl_secattr_catmap_walk(struct netlbl_lsm_secattr_catmap *catmap,
+			       u32 offset)
+{
+	struct netlbl_lsm_secattr_catmap *iter = catmap;
+	u32 node_idx;
+	u32 node_bit;
+	NETLBL_CATMAP_MAPTYPE bitmap;
+
+	if (offset > iter->startbit) {
+		while (offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
+			iter = iter->next;
+			if (iter == NULL)
+				return -ENOENT;
+		}
+		node_idx = (offset - iter->startbit) / NETLBL_CATMAP_MAPSIZE;
+		node_bit = offset - iter->startbit -
+			   (NETLBL_CATMAP_MAPSIZE * node_idx);
+	} else {
+		node_idx = 0;
+		node_bit = 0;
+	}
+	bitmap = iter->bitmap[node_idx] >> node_bit;
+
+	for (;;) {
+		if (bitmap != 0) {
+			while ((bitmap & NETLBL_CATMAP_BIT) == 0) {
+				bitmap >>= 1;
+				node_bit++;
+			}
+			return iter->startbit +
+				(NETLBL_CATMAP_MAPSIZE * node_idx) + node_bit;
+		}
+		if (++node_idx >= NETLBL_CATMAP_MAPCNT) {
+			if (iter->next != NULL) {
+				iter = iter->next;
+				node_idx = 0;
+			} else
+				return -ENOENT;
+		}
+		bitmap = iter->bitmap[node_idx];
+		node_bit = 0;
+	}
+
+	return -ENOENT;
+}
+
+/**
+ * netlbl_secattr_catmap_walk_rng - Find the end of a string of set bits
+ * @catmap: the category bitmap
+ * @offset: the offset to start searching at, in bits
+ *
+ * Description:
+ * This function walks a LSM secattr category bitmap starting at @offset and
+ * returns the spot of the first cleared bit or -ENOENT if the offset is past
+ * the end of the bitmap.
+ *
+ */
+int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap,
+				   u32 offset)
+{
+	struct netlbl_lsm_secattr_catmap *iter = catmap;
+	u32 node_idx;
+	u32 node_bit;
+	NETLBL_CATMAP_MAPTYPE bitmask;
+	NETLBL_CATMAP_MAPTYPE bitmap;
+
+	if (offset > iter->startbit) {
+		while (offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
+			iter = iter->next;
+			if (iter == NULL)
+				return -ENOENT;
+		}
+		node_idx = (offset - iter->startbit) / NETLBL_CATMAP_MAPSIZE;
+		node_bit = offset - iter->startbit -
+			   (NETLBL_CATMAP_MAPSIZE * node_idx);
+	} else {
+		node_idx = 0;
+		node_bit = 0;
+	}
+	bitmask = NETLBL_CATMAP_BIT << node_bit;
+
+	for (;;) {
+		bitmap = iter->bitmap[node_idx];
+		while (bitmask != 0 && (bitmap & bitmask) != 0) {
+			bitmask <<= 1;
+			node_bit++;
+		}
+
+		if (bitmask != 0)
+			return iter->startbit +
+				(NETLBL_CATMAP_MAPSIZE * node_idx) +
+				node_bit - 1;
+		else if (++node_idx >= NETLBL_CATMAP_MAPCNT) {
+			if (iter->next == NULL)
+				return iter->startbit +	NETLBL_CATMAP_SIZE - 1;
+			iter = iter->next;
+			node_idx = 0;
+		}
+		bitmask = NETLBL_CATMAP_BIT;
+		node_bit = 0;
+	}
+
+	return -ENOENT;
+}
+
+/**
+ * netlbl_secattr_catmap_setbit - Set a bit in a LSM secattr catmap
+ * @catmap: the category bitmap
+ * @bit: the bit to set
+ * @flags: memory allocation flags
+ *
+ * Description:
+ * Set the bit specified by @bit in @catmap.  Returns zero on success,
+ * negative values on failure.
+ *
+ */
+int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap *catmap,
+				 u32 bit,
+				 gfp_t flags)
+{
+	struct netlbl_lsm_secattr_catmap *iter = catmap;
+	u32 node_bit;
+	u32 node_idx;
+
+	while (iter->next != NULL &&
+	       bit >= (iter->startbit + NETLBL_CATMAP_SIZE))
+		iter = iter->next;
+	if (bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
+		iter->next = netlbl_secattr_catmap_alloc(flags);
+		if (iter->next == NULL)
+			return -ENOMEM;
+		iter = iter->next;
+		iter->startbit = bit & ~(NETLBL_CATMAP_SIZE - 1);
+	}
+
+	/* gcc always rounds to zero when doing integer division */
+	node_idx = (bit - iter->startbit) / NETLBL_CATMAP_MAPSIZE;
+	node_bit = bit - iter->startbit - (NETLBL_CATMAP_MAPSIZE * node_idx);
+	iter->bitmap[node_idx] |= NETLBL_CATMAP_BIT << node_bit;
+
+	return 0;
+}
+
+/**
+ * netlbl_secattr_catmap_setrng - Set a range of bits in a LSM secattr catmap
+ * @catmap: the category bitmap
+ * @start: the starting bit
+ * @end: the last bit in the string
+ * @flags: memory allocation flags
+ *
+ * Description:
+ * Set a range of bits, starting at @start and ending with @end.  Returns zero
+ * on success, negative values on failure.
+ *
+ */
+int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
+				 u32 start,
+				 u32 end,
+				 gfp_t flags)
+{
+	int ret_val = 0;
+	struct netlbl_lsm_secattr_catmap *iter = catmap;
+	u32 iter_max_spot;
+	u32 spot;
+
+	/* XXX - This could probably be made a bit faster by combining writes
+	 * to the catmap instead of setting a single bit each time, but for
+	 * right now skipping to the start of the range in the catmap should
+	 * be a nice improvement over calling the individual setbit function
+	 * repeatedly from a loop. */
+
+	while (iter->next != NULL &&
+	       start >= (iter->startbit + NETLBL_CATMAP_SIZE))
+		iter = iter->next;
+	iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE;
+
+	for (spot = start; spot <= end && ret_val == 0; spot++) {
+		if (spot >= iter_max_spot && iter->next != NULL) {
+			iter = iter->next;
+			iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE;
+		}
+		ret_val = netlbl_secattr_catmap_setbit(iter, spot, GFP_ATOMIC);
+	}
+
+	return ret_val;
+}
+
+/*
  * LSM Functions
  */
 
@@ -62,6 +263,9 @@
 	int ret_val = -ENOENT;
 	struct netlbl_dom_map *dom_entry;
 
+	if ((secattr->flags & NETLBL_SECATTR_DOMAIN) == 0)
+		return -ENOENT;
+
 	rcu_read_lock();
 	dom_entry = netlbl_domhsh_getentry(secattr->domain);
 	if (dom_entry == NULL)
@@ -146,10 +350,8 @@
 int netlbl_skbuff_getattr(const struct sk_buff *skb,
 			  struct netlbl_lsm_secattr *secattr)
 {
-	int ret_val;
-
-	ret_val = cipso_v4_skbuff_getattr(skb, secattr);
-	if (ret_val == 0)
+	if (CIPSO_V4_OPTEXIST(skb) &&
+	    cipso_v4_skbuff_getattr(skb, secattr) == 0)
 		return 0;
 
 	return netlbl_unlabel_getattr(secattr);
@@ -200,7 +402,7 @@
 int netlbl_cache_add(const struct sk_buff *skb,
 		     const struct netlbl_lsm_secattr *secattr)
 {
-	if (secattr->cache == NULL)
+	if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0)
 		return -ENOMSG;
 
 	if (CIPSO_V4_OPTEXIST(skb))
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 53c9079a..e8c80f3 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -188,12 +188,9 @@
 	struct netlbl_domhsh_walk_arg *cb_arg = arg;
 	void *data;
 
-	data = netlbl_netlink_hdr_put(cb_arg->skb,
-				      NETLINK_CB(cb_arg->nl_cb->skb).pid,
-				      cb_arg->seq,
-				      netlbl_mgmt_gnl_family.id,
-				      NLM_F_MULTI,
-				      NLBL_MGMT_C_LISTALL);
+	data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+			   cb_arg->seq, &netlbl_mgmt_gnl_family,
+			   NLM_F_MULTI, NLBL_MGMT_C_LISTALL);
 	if (data == NULL)
 		goto listall_cb_failure;
 
@@ -356,15 +353,11 @@
 	void *data;
 	struct netlbl_dom_map *entry;
 
-	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (ans_skb == NULL)
 		return -ENOMEM;
-	data = netlbl_netlink_hdr_put(ans_skb,
-				      info->snd_pid,
-				      info->snd_seq,
-				      netlbl_mgmt_gnl_family.id,
-				      0,
-				      NLBL_MGMT_C_LISTDEF);
+	data = genlmsg_put_reply(ans_skb, info, &netlbl_mgmt_gnl_family,
+				 0, NLBL_MGMT_C_LISTDEF);
 	if (data == NULL)
 		goto listdef_failure;
 
@@ -390,7 +383,7 @@
 
 	genlmsg_end(ans_skb, data);
 
-	ret_val = genlmsg_unicast(ans_skb, info->snd_pid);
+	ret_val = genlmsg_reply(ans_skb, info);
 	if (ret_val != 0)
 		goto listdef_failure;
 	return 0;
@@ -422,12 +415,9 @@
 	int ret_val = -ENOMEM;
 	void *data;
 
-	data = netlbl_netlink_hdr_put(skb,
-				      NETLINK_CB(cb->skb).pid,
-				      cb->nlh->nlmsg_seq,
-				      netlbl_mgmt_gnl_family.id,
-				      NLM_F_MULTI,
-				      NLBL_MGMT_C_PROTOCOLS);
+	data = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+			   &netlbl_mgmt_gnl_family, NLM_F_MULTI,
+			   NLBL_MGMT_C_PROTOCOLS);
 	if (data == NULL)
 		goto protocols_cb_failure;
 
@@ -492,15 +482,11 @@
 	struct sk_buff *ans_skb = NULL;
 	void *data;
 
-	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (ans_skb == NULL)
 		return -ENOMEM;
-	data = netlbl_netlink_hdr_put(ans_skb,
-				      info->snd_pid,
-				      info->snd_seq,
-				      netlbl_mgmt_gnl_family.id,
-				      0,
-				      NLBL_MGMT_C_VERSION);
+	data = genlmsg_put_reply(ans_skb, info, &netlbl_mgmt_gnl_family,
+				 0, NLBL_MGMT_C_VERSION);
 	if (data == NULL)
 		goto version_failure;
 
@@ -512,7 +498,7 @@
 
 	genlmsg_end(ans_skb, data);
 
-	ret_val = genlmsg_unicast(ans_skb, info->snd_pid);
+	ret_val = genlmsg_reply(ans_skb, info);
 	if (ret_val != 0)
 		goto version_failure;
 	return 0;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 1833ad2..5bc3718 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -35,6 +35,7 @@
 #include <linux/socket.h>
 #include <linux/string.h>
 #include <linux/skbuff.h>
+#include <linux/audit.h>
 #include <net/sock.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
@@ -47,7 +48,8 @@
 #include "netlabel_unlabeled.h"
 
 /* Accept unlabeled packets flag */
-static atomic_t netlabel_unlabel_accept_flg = ATOMIC_INIT(0);
+static DEFINE_SPINLOCK(netlabel_unlabel_acceptflg_lock);
+static u8 netlabel_unlabel_acceptflg = 0;
 
 /* NetLabel Generic NETLINK CIPSOv4 family */
 static struct genl_family netlbl_unlabel_gnl_family = {
@@ -82,13 +84,20 @@
 	struct audit_buffer *audit_buf;
 	u8 old_val;
 
-	old_val = atomic_read(&netlabel_unlabel_accept_flg);
-	atomic_set(&netlabel_unlabel_accept_flg, value);
+	rcu_read_lock();
+	old_val = netlabel_unlabel_acceptflg;
+	spin_lock(&netlabel_unlabel_acceptflg_lock);
+	netlabel_unlabel_acceptflg = value;
+	spin_unlock(&netlabel_unlabel_acceptflg_lock);
+	rcu_read_unlock();
 
 	audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW,
 					      audit_info);
-	audit_log_format(audit_buf, " unlbl_accept=%u old=%u", value, old_val);
-	audit_log_end(audit_buf);
+	if (audit_buf != NULL) {
+		audit_log_format(audit_buf,
+				 " unlbl_accept=%u old=%u", value, old_val);
+		audit_log_end(audit_buf);
+	}
 }
 
 /*
@@ -138,29 +147,27 @@
 	struct sk_buff *ans_skb;
 	void *data;
 
-	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (ans_skb == NULL)
 		goto list_failure;
-	data = netlbl_netlink_hdr_put(ans_skb,
-				      info->snd_pid,
-				      info->snd_seq,
-				      netlbl_unlabel_gnl_family.id,
-				      0,
-				      NLBL_UNLABEL_C_LIST);
+	data = genlmsg_put_reply(ans_skb, info, &netlbl_unlabel_gnl_family,
+				 0, NLBL_UNLABEL_C_LIST);
 	if (data == NULL) {
 		ret_val = -ENOMEM;
 		goto list_failure;
 	}
 
+	rcu_read_lock();
 	ret_val = nla_put_u8(ans_skb,
 			     NLBL_UNLABEL_A_ACPTFLG,
-			     atomic_read(&netlabel_unlabel_accept_flg));
+			     netlabel_unlabel_acceptflg);
+	rcu_read_unlock();
 	if (ret_val != 0)
 		goto list_failure;
 
 	genlmsg_end(ans_skb, data);
 
-	ret_val = genlmsg_unicast(ans_skb, info->snd_pid);
+	ret_val = genlmsg_reply(ans_skb, info);
 	if (ret_val != 0)
 		goto list_failure;
 	return 0;
@@ -240,10 +247,17 @@
  */
 int netlbl_unlabel_getattr(struct netlbl_lsm_secattr *secattr)
 {
-	if (atomic_read(&netlabel_unlabel_accept_flg) == 1)
-		return netlbl_secattr_init(secattr);
+	int ret_val;
 
-	return -ENOMSG;
+	rcu_read_lock();
+	if (netlabel_unlabel_acceptflg == 1) {
+		netlbl_secattr_init(secattr);
+		ret_val = 0;
+	} else
+		ret_val = -ENOMSG;
+	rcu_read_unlock();
+
+	return ret_val;
 }
 
 /**
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 98a4163..42f12bd 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -46,6 +46,10 @@
 #include "netlabel_cipso_v4.h"
 #include "netlabel_user.h"
 
+/* do not do any auditing if audit_enabled == 0, see kernel/audit.c for
+ * details */
+extern int audit_enabled;
+
 /*
  * NetLabel NETLINK Setup Functions
  */
@@ -101,6 +105,9 @@
 	char *secctx;
 	u32 secctx_len;
 
+	if (audit_enabled == 0)
+		return NULL;
+
 	audit_buf = audit_log_start(audit_ctx, GFP_ATOMIC, type);
 	if (audit_buf == NULL)
 		return NULL;
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index 47967ef..6d7f4ab 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -42,37 +42,6 @@
 /* NetLabel NETLINK helper functions */
 
 /**
- * netlbl_netlink_hdr_put - Write the NETLINK buffers into a sk_buff
- * @skb: the packet
- * @pid: the PID of the receipient
- * @seq: the sequence number
- * @type: the generic NETLINK message family type
- * @cmd: command
- *
- * Description:
- * Write both a NETLINK nlmsghdr structure and a Generic NETLINK genlmsghdr
- * struct to the packet.  Returns a pointer to the start of the payload buffer
- * on success or NULL on failure.
- *
- */
-static inline void *netlbl_netlink_hdr_put(struct sk_buff *skb,
-					   u32 pid,
-					   u32 seq,
-					   int type,
-					   int flags,
-					   u8 cmd)
-{
-	return genlmsg_put(skb,
-			   pid,
-			   seq,
-			   type,
-			   0,
-			   flags,
-			   cmd,
-			   NETLBL_PROTO_VERSION);
-}
-
-/**
  * netlbl_netlink_auditinfo - Fetch the audit information from a NETLINK msg
  * @skb: the packet
  * @audit_info: NetLabel audit information
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d527c89..3baafb1 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1148,12 +1148,11 @@
 	if (len > sk->sk_sndbuf - 32)
 		goto out;
 	err = -ENOBUFS;
-	skb = nlmsg_new(len, GFP_KERNEL);
+	skb = alloc_skb(len, GFP_KERNEL);
 	if (skb==NULL)
 		goto out;
 
 	NETLINK_CB(skb).pid	= nlk->pid;
-	NETLINK_CB(skb).dst_pid = dst_pid;
 	NETLINK_CB(skb).dst_group = dst_group;
 	NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
 	selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
@@ -1435,14 +1434,13 @@
 	struct sk_buff *skb;
 	struct nlmsghdr *rep;
 	struct nlmsgerr *errmsg;
-	int size;
+	size_t payload = sizeof(*errmsg);
 
-	if (err == 0)
-		size = nlmsg_total_size(sizeof(*errmsg));
-	else
-		size = nlmsg_total_size(sizeof(*errmsg) + nlmsg_len(nlh));
+	/* error messages get the original request appened */
+	if (err)
+		payload += nlmsg_len(nlh);
 
-	skb = nlmsg_new(size, GFP_KERNEL);
+	skb = nlmsg_new(payload, GFP_KERNEL);
 	if (!skb) {
 		struct sock *sk;
 
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 49bc2db..548e4e6 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -143,6 +143,13 @@
 		goto errout;
 	}
 
+	if (ops->dumpit)
+		ops->flags |= GENL_CMD_CAP_DUMP;
+	if (ops->doit)
+		ops->flags |= GENL_CMD_CAP_DO;
+	if (ops->policy)
+		ops->flags |= GENL_CMD_CAP_HASPOL;
+
 	genl_lock();
 	list_add_tail(&ops->ops_list, &family->ops_list);
 	genl_unlock();
@@ -331,7 +338,7 @@
 		}
 
 		*errp = err = netlink_dump_start(genl_sock, skb, nlh,
-						 ops->dumpit, NULL);
+						 ops->dumpit, ops->done);
 		if (err == 0)
 			skb_pull(skb, min(NLMSG_ALIGN(nlh->nlmsg_len),
 					  skb->len));
@@ -384,16 +391,19 @@
  * Controller
  **************************************************************************/
 
+static struct genl_family genl_ctrl = {
+	.id = GENL_ID_CTRL,
+	.name = "nlctrl",
+	.version = 0x2,
+	.maxattr = CTRL_ATTR_MAX,
+};
+
 static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
 			  u32 flags, struct sk_buff *skb, u8 cmd)
 {
-	struct nlattr *nla_ops;
-	struct genl_ops *ops;
 	void *hdr;
-	int idx = 1;
 
-	hdr = genlmsg_put(skb, pid, seq, GENL_ID_CTRL, 0, flags, cmd,
-			  family->version);
+	hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
 	if (hdr == NULL)
 		return -1;
 
@@ -403,34 +413,31 @@
 	NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize);
 	NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr);
 
-	nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
-	if (nla_ops == NULL)
-		goto nla_put_failure;
+	if (!list_empty(&family->ops_list)) {
+		struct nlattr *nla_ops;
+		struct genl_ops *ops;
+		int idx = 1;
 
-	list_for_each_entry(ops, &family->ops_list, ops_list) {
-		struct nlattr *nest;
-
-		nest = nla_nest_start(skb, idx++);
-		if (nest == NULL)
+		nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
+		if (nla_ops == NULL)
 			goto nla_put_failure;
 
-		NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
-		NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
+		list_for_each_entry(ops, &family->ops_list, ops_list) {
+			struct nlattr *nest;
 
-		if (ops->policy)
-			NLA_PUT_FLAG(skb, CTRL_ATTR_OP_POLICY);
+			nest = nla_nest_start(skb, idx++);
+			if (nest == NULL)
+				goto nla_put_failure;
 
-		if (ops->doit)
-			NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DOIT);
+			NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
+			NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
 
-		if (ops->dumpit)
-			NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DUMPIT);
+			nla_nest_end(skb, nest);
+		}
 
-		nla_nest_end(skb, nest);
+		nla_nest_end(skb, nla_ops);
 	}
 
-	nla_nest_end(skb, nla_ops);
-
 	return genlmsg_end(skb, hdr);
 
 nla_put_failure:
@@ -480,7 +487,7 @@
 	struct sk_buff *skb;
 	int err;
 
-	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (skb == NULL)
 		return ERR_PTR(-ENOBUFS);
 
@@ -529,7 +536,7 @@
 		goto errout;
 	}
 
-	err = genlmsg_unicast(msg, info->snd_pid);
+	err = genlmsg_reply(msg, info);
 errout:
 	return err;
 }
@@ -562,13 +569,6 @@
 	.policy		= ctrl_policy,
 };
 
-static struct genl_family genl_ctrl = {
-	.id = GENL_ID_CTRL,
-	.name = "nlctrl",
-	.version = 0x1,
-	.maxattr = CTRL_ATTR_MAX,
-};
-
 static int __init genl_init(void)
 {
 	int i, err;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index c11737f..0096105 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -155,14 +155,15 @@
 		atomic_set(&nr_neigh->refcount, 1);
 
 		if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
-			if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) {
+			nr_neigh->digipeat = kmemdup(ax25_digi,
+						     sizeof(*ax25_digi),
+						     GFP_KERNEL);
+			if (nr_neigh->digipeat == NULL) {
 				kfree(nr_neigh);
 				if (nr_node)
 					nr_node_put(nr_node);
 				return -ENOMEM;
 			}
-			memcpy(nr_neigh->digipeat, ax25_digi,
-					sizeof(*ax25_digi));
 		}
 
 		spin_lock_bh(&nr_neigh_list_lock);
@@ -432,11 +433,12 @@
 	atomic_set(&nr_neigh->refcount, 1);
 
 	if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
-		if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) {
+		nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
+					     GFP_KERNEL);
+		if (nr_neigh->digipeat == NULL) {
 			kfree(nr_neigh);
 			return -ENOMEM;
 		}
-		memcpy(nr_neigh->digipeat, ax25_digi, sizeof(*ax25_digi));
 	}
 
 	spin_lock_bh(&nr_neigh_list_lock);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f4ccb90..da73e8a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -71,6 +71,7 @@
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
 #include <asm/page.h>
+#include <asm/cacheflush.h>
 #include <asm/io.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -201,7 +202,7 @@
 	spinlock_t		bind_lock;
 	char			running;	/* prot_hook is attached*/
 	int			ifindex;	/* bound device		*/
-	unsigned short		num;
+	__be16			num;
 #ifdef CONFIG_PACKET_MULTICAST
 	struct packet_mclist	*mclist;
 #endif
@@ -331,7 +332,7 @@
 	struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
 	struct sk_buff *skb;
 	struct net_device *dev;
-	unsigned short proto=0;
+	__be16 proto=0;
 	int err;
 	
 	/*
@@ -659,7 +660,7 @@
 	sll->sll_ifindex = dev->ifindex;
 
 	h->tp_status = status;
-	mb();
+	smp_mb();
 
 	{
 		struct page *p_start, *p_end;
@@ -704,7 +705,7 @@
 	struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
 	struct sk_buff *skb;
 	struct net_device *dev;
-	unsigned short proto;
+	__be16 proto;
 	unsigned char *addr;
 	int ifindex, err, reserve = 0;
 
@@ -858,7 +859,7 @@
  *	Attach a packet hook.
  */
 
-static int packet_do_bind(struct sock *sk, struct net_device *dev, int protocol)
+static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
 {
 	struct packet_sock *po = pkt_sk(sk);
 	/*
@@ -983,6 +984,7 @@
 {
 	struct sock *sk;
 	struct packet_sock *po;
+	__be16 proto = (__force __be16)protocol; /* weird, but documented */
 	int err;
 
 	if (!capable(CAP_NET_RAW))
@@ -1010,7 +1012,7 @@
 
 	po = pkt_sk(sk);
 	sk->sk_family = PF_PACKET;
-	po->num = protocol;
+	po->num = proto;
 
 	sk->sk_destruct = packet_sock_destruct;
 	atomic_inc(&packet_socks_nr);
@@ -1027,8 +1029,8 @@
 #endif
 	po->prot_hook.af_packet_priv = sk;
 
-	if (protocol) {
-		po->prot_hook.type = protocol;
+	if (proto) {
+		po->prot_hook.type = proto;
 		dev_add_pack(&po->prot_hook);
 		sock_hold(sk);
 		po->running = 1;
@@ -1624,7 +1626,8 @@
 {
 	char **pg_vec = NULL;
 	struct packet_sock *po = pkt_sk(sk);
-	int was_running, num, order = 0;
+	int was_running, order = 0;
+	__be16 num;
 	int err = 0;
 	
 	if (req->tp_block_nr) {
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index a22542f..7252344 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -396,7 +396,7 @@
 int rose_add_loopback_node(rose_address *address)
 {
 	struct rose_node *rose_node;
-	unsigned int err = 0;
+	int err = 0;
 
 	spin_lock_bh(&rose_node_list_lock);
 
@@ -432,7 +432,7 @@
 out:
 	spin_unlock_bh(&rose_node_list_lock);
 
-	return 0;
+	return err;
 }
 
 /*
diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c
index dada34a..49effd9 100644
--- a/net/rxrpc/krxiod.c
+++ b/net/rxrpc/krxiod.c
@@ -13,6 +13,7 @@
 #include <linux/completion.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/freezer.h>
 #include <rxrpc/krxiod.h>
 #include <rxrpc/transport.h>
 #include <rxrpc/peer.h>
diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c
index cea4eb5..3ab0f77 100644
--- a/net/rxrpc/krxsecd.c
+++ b/net/rxrpc/krxsecd.c
@@ -27,6 +27,7 @@
 #include <rxrpc/call.h>
 #include <linux/udp.h>
 #include <linux/ip.h>
+#include <linux/freezer.h>
 #include <net/sock.h>
 #include "internal.h"
 
diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c
index 3e74669..9a9b613 100644
--- a/net/rxrpc/krxtimod.c
+++ b/net/rxrpc/krxtimod.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include <rxrpc/rxrpc.h>
 #include <rxrpc/krxtimod.h>
 #include <asm/errno.h>
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index 94b2e2f..4268b38 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -31,7 +31,6 @@
 #endif
 #include <linux/errqueue.h>
 #include <asm/uaccess.h>
-#include <asm/checksum.h>
 #include "internal.h"
 
 struct errormsg {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 8298ea9..f4544dd 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -6,6 +6,7 @@
 
 config NET_SCHED
 	bool "QoS and/or fair queueing"
+	select NET_SCH_FIFO
 	---help---
 	  When the kernel has several packets to send out over a network
 	  device, it has to decide which ones to send first, which ones to
@@ -40,6 +41,9 @@
 	  The available schedulers are listed in the following questions; you
 	  can say Y to as many as you like. If unsure, say N now.
 
+config NET_SCH_FIFO
+	bool
+
 if NET_SCHED
 
 choice
@@ -320,7 +324,7 @@
 
 config CLS_U32_MARK
 	bool "Netfilter marks support"
-	depends on NET_CLS_U32 && NETFILTER
+	depends on NET_CLS_U32
 	---help---
 	  Say Y here to be able to use netfilter marks as u32 key.
 
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 0f06aec..ff2d6e5 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -4,7 +4,7 @@
 
 obj-y	:= sch_generic.o
 
-obj-$(CONFIG_NET_SCHED)		+= sch_api.o sch_fifo.o sch_blackhole.o
+obj-$(CONFIG_NET_SCHED)		+= sch_api.o sch_blackhole.o
 obj-$(CONFIG_NET_CLS)		+= cls_api.o
 obj-$(CONFIG_NET_CLS_ACT)	+= act_api.o
 obj-$(CONFIG_NET_ACT_POLICE)	+= act_police.o
@@ -14,6 +14,7 @@
 obj-$(CONFIG_NET_ACT_IPT)	+= act_ipt.o
 obj-$(CONFIG_NET_ACT_PEDIT)	+= act_pedit.o
 obj-$(CONFIG_NET_ACT_SIMP)	+= act_simple.o
+obj-$(CONFIG_NET_SCH_FIFO)	+= sch_fifo.o
 obj-$(CONFIG_NET_SCH_CBQ)	+= sch_cbq.o
 obj-$(CONFIG_NET_SCH_HTB)	+= sch_htb.o
 obj-$(CONFIG_NET_SCH_HPFQ)	+= sch_hpfq.o
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 6cff566..85de7ef 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -48,14 +48,14 @@
 #ifdef CONFIG_GACT_PROB
 static int gact_net_rand(struct tcf_gact *gact)
 {
-	if (net_random() % gact->tcfg_pval)
+	if (!gact->tcfg_pval || net_random() % gact->tcfg_pval)
 		return gact->tcf_action;
 	return gact->tcfg_paction;
 }
 
 static int gact_determ(struct tcf_gact *gact)
 {
-	if (gact->tcf_bstats.packets % gact->tcfg_pval)
+	if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
 		return gact->tcf_action;
 	return gact->tcfg_paction;
 }
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d8c9310..a960806 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -156,10 +156,9 @@
 	    rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ)
 		strcpy(tname, "mangle");
 
-	t = kmalloc(td->u.target_size, GFP_KERNEL);
+	t = kmemdup(td, td->u.target_size, GFP_KERNEL);
 	if (unlikely(!t))
 		goto err2;
-	memcpy(t, td, td->u.target_size);
 
 	if ((err = ipt_init_target(t, tname, hook)) < 0)
 		goto err3;
@@ -256,13 +255,12 @@
 	** for foolproof you need to not assume this
 	*/
 
-	t = kmalloc(ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
+	t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
 	if (unlikely(!t))
 		goto rtattr_failure;
 
 	c.bindcnt = ipt->tcf_bindcnt - bind;
 	c.refcnt = ipt->tcf_refcnt - ref;
-	memcpy(t, ipt->tcfi_t, ipt->tcfi_t->u.user.target_size);
 	strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
 
 	RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index fed47b6..af68e1e 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -46,6 +46,18 @@
 	.lock	=	&police_lock,
 };
 
+/* old policer structure from before tc actions */
+struct tc_police_compat
+{
+	u32			index;
+	int			action;
+	u32			limit;
+	u32			burst;
+	u32			mtu;
+	struct tc_ratespec	rate;
+	struct tc_ratespec	peakrate;
+};
+
 /* Each policer is serialized by its individual spinlock */
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -131,12 +143,15 @@
 	struct tc_police *parm;
 	struct tcf_police *police;
 	struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
+	int size;
 
 	if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
 		return -EINVAL;
 
-	if (tb[TCA_POLICE_TBF-1] == NULL ||
-	    RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
+	if (tb[TCA_POLICE_TBF-1] == NULL)
+		return -EINVAL;
+	size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
+	if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
 		return -EINVAL;
 	parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
 
@@ -415,12 +430,15 @@
 	struct tcf_police *police;
 	struct rtattr *tb[TCA_POLICE_MAX];
 	struct tc_police *parm;
+	int size;
 
 	if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
 		return NULL;
 
-	if (tb[TCA_POLICE_TBF-1] == NULL ||
-	    RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
+	if (tb[TCA_POLICE_TBF-1] == NULL)
+		return NULL;
+	size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
+	if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
 		return NULL;
 
 	parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 901571a..5fe8085 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -71,11 +71,10 @@
 
 static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata)
 {
-	d->tcfd_defdata = kmalloc(datalen, GFP_KERNEL);
+	d->tcfd_defdata = kmemdup(defdata, datalen, GFP_KERNEL);
 	if (unlikely(!d->tcfd_defdata))
 		return -ENOMEM;
 	d->tcfd_datalen = datalen;
-	memcpy(d->tcfd_defdata, defdata, datalen);
 	return 0;
 }
 
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 37a1840..edb8fc9 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -217,7 +217,7 @@
 		/* Create new proto tcf */
 
 		err = -ENOBUFS;
-		if ((tp = kmalloc(sizeof(*tp), GFP_KERNEL)) == NULL)
+		if ((tp = kzalloc(sizeof(*tp), GFP_KERNEL)) == NULL)
 			goto errout;
 		err = -EINVAL;
 		tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]);
@@ -247,7 +247,6 @@
 			kfree(tp);
 			goto errout;
 		}
-		memset(tp, 0, sizeof(*tp));
 		tp->ops = tp_ops;
 		tp->protocol = protocol;
 		tp->prio = nprio ? : tcf_auto_prio(*back);
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index e54acc6..c797d6a 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -101,13 +101,10 @@
 	struct fw_head *head = (struct fw_head*)tp->root;
 	struct fw_filter *f;
 	int r;
-#ifdef CONFIG_NETFILTER
-	u32 id = skb->nfmark & head->mask;
-#else
-	u32 id = 0;
-#endif
+	u32 id = skb->mark;
 
 	if (head != NULL) {
+		id &= head->mask;
 		for (f=head->ht[fw_hash(id)]; f; f=f->next) {
 			if (f->id == id) {
 				*res = f->res;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 6e230ec..587b9ad 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -77,7 +77,7 @@
 struct rsvp_session
 {
 	struct rsvp_session	*next;
-	u32			dst[RSVP_DST_LEN];
+	__be32			dst[RSVP_DST_LEN];
 	struct tc_rsvp_gpi 	dpi;
 	u8			protocol;
 	u8			tunnelid;
@@ -89,7 +89,7 @@
 struct rsvp_filter
 {
 	struct rsvp_filter	*next;
-	u32			src[RSVP_DST_LEN];
+	__be32			src[RSVP_DST_LEN];
 	struct tc_rsvp_gpi	spi;
 	u8			tunnelhdr;
 
@@ -100,17 +100,17 @@
 	struct rsvp_session	*sess;
 };
 
-static __inline__ unsigned hash_dst(u32 *dst, u8 protocol, u8 tunnelid)
+static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
 {
-	unsigned h = dst[RSVP_DST_LEN-1];
+	unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
 	h ^= h>>16;
 	h ^= h>>8;
 	return (h ^ protocol ^ tunnelid) & 0xFF;
 }
 
-static __inline__ unsigned hash_src(u32 *src)
+static __inline__ unsigned hash_src(__be32 *src)
 {
-	unsigned h = src[RSVP_DST_LEN-1];
+	unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
 	h ^= h>>16;
 	h ^= h>>8;
 	h ^= h>>4;
@@ -138,7 +138,7 @@
 	struct rsvp_session *s;
 	struct rsvp_filter *f;
 	unsigned h1, h2;
-	u32 *dst, *src;
+	__be32 *dst, *src;
 	u8 protocol;
 	u8 tunnelid = 0;
 	u8 *xprt;
@@ -410,7 +410,7 @@
 	struct rtattr *tb[TCA_RSVP_MAX];
 	struct tcf_exts e;
 	unsigned h1, h2;
-	u32 *dst;
+	__be32 *dst;
 	int err;
 
 	if (opt == NULL)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 0a6cfa0..8b51948 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -143,7 +143,7 @@
 #endif
 
 #ifdef CONFIG_CLS_U32_MARK
-		if ((skb->nfmark & n->mark.mask) != n->mark.val) {
+		if ((skb->mark & n->mark.mask) != n->mark.val) {
 			n = n->next;
 			goto next_knode;
 		} else {
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 61e3b74..45d47d3 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -208,13 +208,9 @@
  * Netfilter
  **************************************************************************/
 
-META_COLLECTOR(int_nfmark)
+META_COLLECTOR(int_mark)
 {
-#ifdef CONFIG_NETFILTER
-	dst->value = skb->nfmark;
-#else
-	dst->value = 0;
-#endif
+	dst->value = skb->mark;
 }
 
 /**************************************************************************
@@ -490,7 +486,7 @@
 		[META_ID(PKTLEN)]		= META_FUNC(int_pktlen),
 		[META_ID(DATALEN)]		= META_FUNC(int_datalen),
 		[META_ID(MACLEN)]		= META_FUNC(int_maclen),
-		[META_ID(NFMARK)]		= META_FUNC(int_nfmark),
+		[META_ID(NFMARK)]		= META_FUNC(int_mark),
 		[META_ID(TCINDEX)]		= META_FUNC(int_tcindex),
 		[META_ID(RTCLASSID)]		= META_FUNC(int_rtclassid),
 		[META_ID(RTIIF)]		= META_FUNC(int_rtiif),
@@ -550,10 +546,9 @@
 {
 	int len = RTA_PAYLOAD(rta);
 
-	dst->val = (unsigned long) kmalloc(len, GFP_KERNEL);
+	dst->val = (unsigned long)kmemdup(RTA_DATA(rta), len, GFP_KERNEL);
 	if (dst->val == 0UL)
 		return -ENOMEM;
-	memcpy((void *) dst->val, RTA_DATA(rta), len);
 	dst->len = len;
 	return 0;
 }
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index cc80bab..005db40 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -34,12 +34,10 @@
 		return -EINVAL;
 
 	em->datalen = sizeof(*nbyte) + nbyte->len;
-	em->data = (unsigned long) kmalloc(em->datalen, GFP_KERNEL);
+	em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL);
 	if (em->data == 0UL)
 		return -ENOBUFS;
 
-	memcpy((void *) em->data, data, em->datalen);
-
 	return 0;
 }
 
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 0fd0768..8f8a16d 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -251,12 +251,11 @@
 					goto errout;
 				em->data = *(u32 *) data;
 			} else {
-				void *v = kmalloc(data_len, GFP_KERNEL);
+				void *v = kmemdup(data, data_len, GFP_KERNEL);
 				if (v == NULL) {
 					err = -ENOBUFS;
 					goto errout;
 				}
-				memcpy(v, data, data_len);
 				em->data = (unsigned long) v;
 			}
 		}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0b64892..65825f4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -191,19 +191,25 @@
    (root qdisc, all its children, children of children etc.)
  */
 
+static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle)
+{
+	struct Qdisc *q;
+
+	list_for_each_entry(q, &dev->qdisc_list, list) {
+		if (q->handle == handle)
+			return q;
+	}
+	return NULL;
+}
+
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
 	struct Qdisc *q;
 
 	read_lock(&qdisc_tree_lock);
-	list_for_each_entry(q, &dev->qdisc_list, list) {
-		if (q->handle == handle) {
-			read_unlock(&qdisc_tree_lock);
-			return q;
-		}
-	}
+	q = __qdisc_lookup(dev, handle);
 	read_unlock(&qdisc_tree_lock);
-	return NULL;
+	return q;
 }
 
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -348,6 +354,26 @@
 	return oqdisc;
 }
 
+void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+{
+	struct Qdisc_class_ops *cops;
+	unsigned long cl;
+	u32 parentid;
+
+	if (n == 0)
+		return;
+	while ((parentid = sch->parent)) {
+		sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
+		cops = sch->ops->cl_ops;
+		if (cops->qlen_notify) {
+			cl = cops->get(sch, parentid);
+			cops->qlen_notify(sch, cl);
+			cops->put(sch, cl);
+		}
+		sch->q.qlen -= n;
+	}
+}
+EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
 
 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
    to device "dev".
@@ -1112,7 +1138,7 @@
 	struct tcf_result *res)
 {
 	int err = 0;
-	u32 protocol = skb->protocol;
+	__be16 protocol = skb->protocol;
 #ifdef CONFIG_NET_CLS_ACT
 	struct tcf_proto *otp = tp;
 reclassify:
@@ -1277,7 +1303,6 @@
 
 subsys_initcall(pktsched_init);
 
-EXPORT_SYMBOL(qdisc_lookup);
 EXPORT_SYMBOL(qdisc_get_rtab);
 EXPORT_SYMBOL(qdisc_put_rtab);
 EXPORT_SYMBOL(register_qdisc);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index dbf44da..edc7bb0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -316,7 +316,7 @@
 	}
 	memset(flow,0,sizeof(*flow));
 	flow->filter_list = NULL;
-	if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops)))
+	if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,classid)))
 		flow->q = &noop_qdisc;
 	DPRINTK("atm_tc_change: qdisc %p\n",flow->q);
 	flow->sock = sock;
@@ -576,7 +576,8 @@
 
 	DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
 	p->flows = &p->link;
-	if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops)))
+	if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,
+					   sch->handle)))
 		p->link.q = &noop_qdisc;
 	DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q);
 	p->link.filter_list = NULL;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index bac881b..ba82dfa 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1429,7 +1429,8 @@
 	q->link.sibling = &q->link;
 	q->link.classid = sch->handle;
 	q->link.qdisc = sch;
-	if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
+	if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+					    sch->handle)))
 		q->link.q = &noop_qdisc;
 
 	q->link.priority = TC_CBQ_MAXPRIO-1;
@@ -1674,7 +1675,8 @@
 
 	if (cl) {
 		if (new == NULL) {
-			if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)) == NULL)
+			if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+						     cl->classid)) == NULL)
 				return -ENOBUFS;
 		} else {
 #ifdef CONFIG_NET_CLS_POLICE
@@ -1685,7 +1687,7 @@
 		sch_tree_lock(sch);
 		*old = cl->q;
 		cl->q = new;
-		sch->q.qlen -= (*old)->q.qlen;
+		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 		qdisc_reset(*old);
 		sch_tree_unlock(sch);
 
@@ -1932,7 +1934,7 @@
 	cl->R_tab = rtab;
 	rtab = NULL;
 	cl->refcnt = 1;
-	if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
+	if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
 		cl->q = &noop_qdisc;
 	cl->classid = classid;
 	cl->tparent = parent;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 11c8a21..d542181 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -88,15 +88,16 @@
 		sch, p, new, old);
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+		new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+					sch->handle);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
 
 	sch_tree_lock(sch);
 	*old = xchg(&p->q, new);
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 	qdisc_reset(*old);
-	sch->q.qlen = 0;
 	sch_tree_unlock(sch);
 
         return 0;
@@ -307,7 +308,7 @@
 			if (p->mask[index] != 0xff || p->value[index])
 				printk(KERN_WARNING "dsmark_dequeue: "
 				       "unsupported protocol %d\n",
-				       htons(skb->protocol));
+				       ntohs(skb->protocol));
 			break;
 	};
 
@@ -387,7 +388,7 @@
 	p->default_index = default_index;
 	p->set_tc_index = RTA_GET_FLAG(tb[TCA_DSMARK_SET_TC_INDEX-1]);
 
-	p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+	p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle);
 	if (p->q == NULL)
 		p->q = &noop_qdisc;
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 88c6a99..bc116bd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -450,13 +450,15 @@
 	return ERR_PTR(-err);
 }
 
-struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
+struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
+				 unsigned int parentid)
 {
 	struct Qdisc *sch;
 	
 	sch = qdisc_alloc(dev, ops);
 	if (IS_ERR(sch))
 		goto errout;
+	sch->parent = parentid;
 
 	if (!ops->init || ops->init(sch, NULL) == 0)
 		return sch;
@@ -520,7 +522,8 @@
 	if (dev->qdisc_sleeping == &noop_qdisc) {
 		struct Qdisc *qdisc;
 		if (dev->tx_queue_len) {
-			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
+			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
+						  TC_H_ROOT);
 			if (qdisc == NULL) {
 				printk(KERN_INFO "%s: activation failed\n", dev->name);
 				return;
@@ -606,13 +609,10 @@
 	qdisc_unlock_tree(dev);
 }
 
-EXPORT_SYMBOL(__netdev_watchdog_up);
 EXPORT_SYMBOL(netif_carrier_on);
 EXPORT_SYMBOL(netif_carrier_off);
 EXPORT_SYMBOL(noop_qdisc);
-EXPORT_SYMBOL(noop_qdisc_ops);
 EXPORT_SYMBOL(qdisc_create_dflt);
-EXPORT_SYMBOL(qdisc_alloc);
 EXPORT_SYMBOL(qdisc_destroy);
 EXPORT_SYMBOL(qdisc_reset);
 EXPORT_SYMBOL(qdisc_lock_tree);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 6a6735a..6eefa69 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -946,6 +946,7 @@
 	if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
 		if (net_ratelimit())
 			printk("qdisc_peek_len: failed to requeue\n");
+		qdisc_tree_decrease_qlen(sch, 1);
 		return 0;
 	}
 	return len;
@@ -957,11 +958,7 @@
 	unsigned int len = cl->qdisc->q.qlen;
 
 	qdisc_reset(cl->qdisc);
-	if (len > 0) {
-		update_vf(cl, 0, 0);
-		set_passive(cl);
-		sch->q.qlen -= len;
-	}
+	qdisc_tree_decrease_qlen(cl->qdisc, len);
 }
 
 static void
@@ -1138,7 +1135,7 @@
 	cl->classid   = classid;
 	cl->sched     = q;
 	cl->cl_parent = parent;
-	cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+	cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
 	cl->stats_lock = &sch->dev->queue_lock;
@@ -1271,7 +1268,8 @@
 	if (cl->level > 0)
 		return -EINVAL;
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+		new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+					cl->classid);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -1294,6 +1292,17 @@
 	return NULL;
 }
 
+static void
+hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
+{
+	struct hfsc_class *cl = (struct hfsc_class *)arg;
+
+	if (cl->qdisc->q.qlen == 0) {
+		update_vf(cl, 0, 0);
+		set_passive(cl);
+	}
+}
+
 static unsigned long
 hfsc_get_class(struct Qdisc *sch, u32 classid)
 {
@@ -1514,7 +1523,8 @@
 	q->root.refcnt  = 1;
 	q->root.classid = sch->handle;
 	q->root.sched   = q;
-	q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+	q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+					  sch->handle);
 	if (q->root.qdisc == NULL)
 		q->root.qdisc = &noop_qdisc;
 	q->root.stats_lock = &sch->dev->queue_lock;
@@ -1777,6 +1787,7 @@
 	.delete		= hfsc_delete_class,
 	.graft		= hfsc_graft_class,
 	.leaf		= hfsc_class_leaf,
+	.qlen_notify	= hfsc_qlen_notify,
 	.get		= hfsc_get_class,
 	.put		= hfsc_put_class,
 	.bind_tcf	= hfsc_bind_tcf,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 4b52fa7..215e68c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1223,17 +1223,14 @@
 	struct htb_class *cl = (struct htb_class *)arg;
 
 	if (cl && !cl->level) {
-		if (new == NULL && (new = qdisc_create_dflt(sch->dev,
-							    &pfifo_qdisc_ops))
+		if (new == NULL &&
+		    (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+		    			     cl->classid))
 		    == NULL)
 			return -ENOBUFS;
 		sch_tree_lock(sch);
 		if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
-			if (cl->prio_activity)
-				htb_deactivate(qdisc_priv(sch), cl);
-
-			/* TODO: is it correct ? Why CBQ doesn't do it ? */
-			sch->q.qlen -= (*old)->q.qlen;
+			qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 			qdisc_reset(*old);
 		}
 		sch_tree_unlock(sch);
@@ -1248,6 +1245,14 @@
 	return (cl && !cl->level) ? cl->un.leaf.q : NULL;
 }
 
+static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
+{
+	struct htb_class *cl = (struct htb_class *)arg;
+
+	if (cl->un.leaf.q->q.qlen == 0)
+		htb_deactivate(qdisc_priv(sch), cl);
+}
+
 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
 {
 	struct htb_class *cl = htb_find(classid, sch);
@@ -1269,9 +1274,9 @@
 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
 {
 	struct htb_sched *q = qdisc_priv(sch);
+
 	if (!cl->level) {
 		BUG_TRAP(cl->un.leaf.q);
-		sch->q.qlen -= cl->un.leaf.q->q.qlen;
 		qdisc_destroy(cl->un.leaf.q);
 	}
 	qdisc_put_rtab(cl->rate);
@@ -1322,6 +1327,7 @@
 {
 	struct htb_sched *q = qdisc_priv(sch);
 	struct htb_class *cl = (struct htb_class *)arg;
+	unsigned int qlen;
 
 	// TODO: why don't allow to delete subtree ? references ? does
 	// tc subsys quarantee us that in htb_destroy it holds no class
@@ -1334,6 +1340,12 @@
 	/* delete from hash and active; remainder in destroy_class */
 	hlist_del_init(&cl->hlist);
 
+	if (!cl->level) {
+		qlen = cl->un.leaf.q->q.qlen;
+		qdisc_reset(cl->un.leaf.q);
+		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
+	}
+
 	if (cl->prio_activity)
 		htb_deactivate(q, cl);
 
@@ -1410,11 +1422,14 @@
 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
 		   so that can't be used inside of sch_tree_lock
 		   -- thanks to Karlis Peisenieks */
-		new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+		new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
 		sch_tree_lock(sch);
 		if (parent && !parent->level) {
+			unsigned int qlen = parent->un.leaf.q->q.qlen;
+
 			/* turn parent into inner node */
-			sch->q.qlen -= parent->un.leaf.q->q.qlen;
+			qdisc_reset(parent->un.leaf.q);
+			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
 			qdisc_destroy(parent->un.leaf.q);
 			if (parent->prio_activity)
 				htb_deactivate(q, parent);
@@ -1562,6 +1577,7 @@
 static struct Qdisc_class_ops htb_class_ops = {
 	.graft		=	htb_graft,
 	.leaf		=	htb_leaf,
+	.qlen_notify	=	htb_qlen_notify,
 	.get		=	htb_get,
 	.put		=	htb_put,
 	.change		=	htb_change_class,
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 0441876..79542af 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -287,13 +287,10 @@
 			psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
 
 			if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
+				qdisc_tree_decrease_qlen(q->qdisc, 1);
 				sch->qstats.drops++;
-
-				/* After this qlen is confused */
 				printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
 				       q->qdisc->ops->id);
-
-				sch->q.qlen--;
 			}
 
 			mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
@@ -574,7 +571,8 @@
 	q->timer.function = netem_watchdog;
 	q->timer.data = (unsigned long) sch;
 
-	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops);
+	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
+				     TC_H_MAKE(sch->handle, 1));
 	if (!q->qdisc) {
 		pr_debug("netem: qdisc create failed\n");
 		return -ENOMEM;
@@ -661,8 +659,8 @@
 
 	sch_tree_lock(sch);
 	*old = xchg(&q->qdisc, new);
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 	qdisc_reset(*old);
-	sch->q.qlen = 0;
 	sch_tree_unlock(sch);
 
 	return 0;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index a5fa03c..2567b4c 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -222,21 +222,27 @@
 
 	for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
 		struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
-		if (child != &noop_qdisc)
+		if (child != &noop_qdisc) {
+			qdisc_tree_decrease_qlen(child, child->q.qlen);
 			qdisc_destroy(child);
+		}
 	}
 	sch_tree_unlock(sch);
 
 	for (i=0; i<q->bands; i++) {
 		if (q->queues[i] == &noop_qdisc) {
 			struct Qdisc *child;
-			child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+			child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+						  TC_H_MAKE(sch->handle, i + 1));
 			if (child) {
 				sch_tree_lock(sch);
 				child = xchg(&q->queues[i], child);
 
-				if (child != &noop_qdisc)
+				if (child != &noop_qdisc) {
+					qdisc_tree_decrease_qlen(child,
+								 child->q.qlen);
 					qdisc_destroy(child);
+				}
 				sch_tree_unlock(sch);
 			}
 		}
@@ -294,7 +300,7 @@
 	sch_tree_lock(sch);
 	*old = q->queues[band];
 	q->queues[band] = new;
-	sch->q.qlen -= (*old)->q.qlen;
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 	qdisc_reset(*old);
 	sch_tree_unlock(sch);
 
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index d65cadd..acddad0 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -175,12 +175,14 @@
 	qdisc_destroy(q->qdisc);
 }
 
-static struct Qdisc *red_create_dflt(struct net_device *dev, u32 limit)
+static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
 {
-	struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
+	struct Qdisc *q;
 	struct rtattr *rta;
 	int ret;
 
+	q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
+			      TC_H_MAKE(sch->handle, 1));
 	if (q) {
 		rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)),
 		              GFP_KERNEL);
@@ -219,7 +221,7 @@
 	ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
 
 	if (ctl->limit > 0) {
-		child = red_create_dflt(sch->dev, ctl->limit);
+		child = red_create_dflt(sch, ctl->limit);
 		if (child == NULL)
 			return -ENOMEM;
 	}
@@ -227,8 +229,10 @@
 	sch_tree_lock(sch);
 	q->flags = ctl->flags;
 	q->limit = ctl->limit;
-	if (child)
+	if (child) {
+		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
 		qdisc_destroy(xchg(&q->qdisc, child));
+	}
 
 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
 				 ctl->Plog, ctl->Scell_log,
@@ -306,8 +310,8 @@
 
 	sch_tree_lock(sch);
 	*old = xchg(&q->qdisc, new);
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 	qdisc_reset(*old);
-	sch->q.qlen = 0;
 	sch_tree_unlock(sch);
 	return 0;
 }
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d0d6e59..459cda2 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -393,6 +393,7 @@
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
 	struct tc_sfq_qopt *ctl = RTA_DATA(opt);
+	unsigned int qlen;
 
 	if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
 		return -EINVAL;
@@ -403,8 +404,10 @@
 	if (ctl->limit)
 		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH);
 
+	qlen = sch->q.qlen;
 	while (sch->q.qlen >= q->limit-1)
 		sfq_drop(sch);
+	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
 
 	del_timer(&q->perturb_timer);
 	if (q->perturb_period) {
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index d9a5d29..ed9b6d9 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -250,7 +250,7 @@
 
 		if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
 			/* When requeue fails skb is dropped */
-			sch->q.qlen--;
+			qdisc_tree_decrease_qlen(q->qdisc, 1);
 			sch->qstats.drops++;
 		}
 
@@ -273,12 +273,14 @@
 	del_timer(&q->wd_timer);
 }
 
-static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit)
+static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
 {
-	struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
+	struct Qdisc *q;
         struct rtattr *rta;
 	int ret;
 
+	q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
+			      TC_H_MAKE(sch->handle, 1));
 	if (q) {
 		rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
 		if (rta) {
@@ -341,13 +343,15 @@
 		goto done;
 
 	if (qopt->limit > 0) {
-		if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL)
+		if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL)
 			goto done;
 	}
 
 	sch_tree_lock(sch);
-	if (child)
+	if (child) {
+		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
 		qdisc_destroy(xchg(&q->qdisc, child));
+	}
 	q->limit = qopt->limit;
 	q->mtu = qopt->mtu;
 	q->max_size = max_size;
@@ -449,8 +453,8 @@
 
 	sch_tree_lock(sch);
 	*old = xchg(&q->qdisc, new);
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
 	qdisc_reset(*old);
-	sch->q.qlen = 0;
 	sch_tree_unlock(sch);
 
 	return 0;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index ed0445f..ad0057d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -61,7 +61,7 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
 
 
 /* 1st Level Abstractions. */
@@ -269,9 +269,7 @@
 
 	/* Create an input queue.  */
 	sctp_inq_init(&asoc->base.inqueue);
-	sctp_inq_set_th_handler(&asoc->base.inqueue,
-				    (void (*)(void *))sctp_assoc_bh_rcv,
-				    asoc);
+	sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
 
 	/* Create an output queue.  */
 	sctp_outq_init(asoc, &asoc->outqueue);
@@ -488,7 +486,7 @@
 				 " port: %d\n",
 				 asoc,
 				 (&peer->ipaddr),
-				 peer->ipaddr.v4.sin_port);
+				 ntohs(peer->ipaddr.v4.sin_port));
 
 	/* If we are to remove the current retran_path, update it
 	 * to the next peer before removing this peer from the list.
@@ -537,13 +535,13 @@
 	sp = sctp_sk(asoc->base.sk);
 
 	/* AF_INET and AF_INET6 share common port field. */
-	port = addr->v4.sin_port;
+	port = ntohs(addr->v4.sin_port);
 
 	SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
 				 " port: %d state:%d\n",
 				 asoc,
 				 addr,
-				 addr->v4.sin_port,
+				 port,
 				 peer_state);
 
 	/* Set the port if it has not been set yet.  */
@@ -709,6 +707,7 @@
 	struct sctp_transport *first;
 	struct sctp_transport *second;
 	struct sctp_ulpevent *event;
+	struct sockaddr_storage addr;
 	struct list_head *pos;
 	int spc_state = 0;
 
@@ -731,8 +730,9 @@
 	/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
 	 * user.
 	 */
-	event = sctp_ulpevent_make_peer_addr_change(asoc,
-				(struct sockaddr_storage *) &transport->ipaddr,
+	memset(&addr, 0, sizeof(struct sockaddr_storage));
+	memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
+	event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
 				0, spc_state, error, GFP_ATOMIC);
 	if (event)
 		sctp_ulpq_tail_event(&asoc->ulpq, event);
@@ -868,7 +868,7 @@
 	struct list_head *entry, *pos;
 	struct sctp_transport *transport;
 	struct sctp_chunk *chunk;
-	__u32 key = htonl(tsn);
+	__be32 key = htonl(tsn);
 
 	match = NULL;
 
@@ -926,8 +926,8 @@
 
 	sctp_read_lock(&asoc->base.addr_lock);
 
-	if ((asoc->base.bind_addr.port == laddr->v4.sin_port) &&
-	    (asoc->peer.port == paddr->v4.sin_port)) {
+	if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
+	    (htons(asoc->peer.port) == paddr->v4.sin_port)) {
 		transport = sctp_assoc_lookup_paddr(asoc, paddr);
 		if (!transport)
 			goto out;
@@ -944,8 +944,11 @@
 }
 
 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
 {
+	struct sctp_association *asoc =
+		container_of(work, struct sctp_association,
+			     base.inqueue.immediate);
 	struct sctp_endpoint *ep;
 	struct sctp_chunk *chunk;
 	struct sock *sk;
@@ -1135,7 +1138,7 @@
 				 " port: %d\n",
 				 asoc,
 				 (&t->ipaddr),
-				 t->ipaddr.v4.sin_port);
+				 ntohs(t->ipaddr.v4.sin_port));
 }
 
 /* Choose the transport for sending a INIT packet.  */
@@ -1160,7 +1163,7 @@
 				 " port: %d\n",
 				 asoc,
 				 (&t->ipaddr),
-				 t->ipaddr.v4.sin_port);
+				 ntohs(t->ipaddr.v4.sin_port));
 
 	return t;
 }
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 2b9c12a..0099415 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -161,7 +161,7 @@
 	 * Both v4 and v6 have the port at the same offset.
 	 */
 	if (!addr->a.v4.sin_port)
-		addr->a.v4.sin_port = bp->port;
+		addr->a.v4.sin_port = htons(bp->port);
 
 	addr->use_as_src = use_as_src;
 
@@ -275,7 +275,7 @@
 			break;
 		}
 
-		af->from_addr_param(&addr, rawaddr, port, 0);
+		af->from_addr_param(&addr, rawaddr, htons(port), 0);
 		retval = sctp_add_bind_addr(bp, &addr, 1, gfp);
 		if (retval) {
 			/* Can't finish building the list, clean up. */
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 9b6b394..1297569 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -61,7 +61,7 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
 
 /*
  * Initialize the base fields of the endpoint structure.
@@ -72,6 +72,10 @@
 {
 	memset(ep, 0, sizeof(struct sctp_endpoint));
 
+	ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
+	if (!ep->digest)
+		return NULL;
+
 	/* Initialize the base structure. */
 	/* What type of endpoint are we?  */
 	ep->base.type = SCTP_EP_TYPE_SOCKET;
@@ -85,8 +89,7 @@
 	sctp_inq_init(&ep->base.inqueue);
 
 	/* Set its top-half handler */
-	sctp_inq_set_th_handler(&ep->base.inqueue,
-				(void (*)(void *))sctp_endpoint_bh_rcv, ep);
+	sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
 
 	/* Initialize the bind addr area */
 	sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -182,6 +185,9 @@
 	/* Free up the HMAC transform. */
 	crypto_free_hash(sctp_sk(ep->base.sk)->hmac);
 
+	/* Free the digest buffer */
+	kfree(ep->digest);
+
 	/* Cleanup. */
 	sctp_inq_free(&ep->base.inqueue);
 	sctp_bind_addr_free(&ep->base.bind_addr);
@@ -223,7 +229,7 @@
 	struct sctp_endpoint *retval;
 
 	sctp_read_lock(&ep->base.addr_lock);
-	if (ep->base.bind_addr.port == laddr->v4.sin_port) {
+	if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
 		if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
 					 sctp_sk(ep->base.sk))) {
 			retval = ep;
@@ -251,7 +257,7 @@
 	struct sctp_association *asoc;
 	struct list_head *pos;
 
-	rport = paddr->v4.sin_port;
+	rport = ntohs(paddr->v4.sin_port);
 
 	list_for_each(pos, &ep->asocs) {
 		asoc = list_entry(pos, struct sctp_association, asocs);
@@ -311,8 +317,11 @@
 /* Do delayed input processing.  This is scheduled by sctp_rcv().
  * This may be called on BH or task time.
  */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
 {
+	struct sctp_endpoint *ep =
+		container_of(work, struct sctp_endpoint,
+			     base.inqueue.immediate);
 	struct sctp_association *asoc;
 	struct sock *sk;
 	struct sctp_transport *transport;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 6d82f40..3311187 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -726,7 +726,7 @@
 	struct sctp_endpoint *ep;
 	int hash;
 
-	hash = sctp_ep_hashfn(laddr->v4.sin_port);
+	hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
 	head = &sctp_ep_hashtable[hash];
 	read_lock(&head->lock);
 	for (epb = head->chain; epb; epb = epb->next) {
@@ -830,7 +830,7 @@
 	/* Optimize here for direct hit, only listening connections can
 	 * have wildcards anyways.
 	 */
-	hash = sctp_assoc_hashfn(local->v4.sin_port, peer->v4.sin_port);
+	hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
 	head = &sctp_assoc_hashtable[hash];
 	read_lock(&head->lock);
 	for (epb = head->chain; epb; epb = epb->next) {
@@ -957,7 +957,7 @@
 		if (!af)
 			continue;
 
-		af->from_addr_param(paddr, params.addr, ntohs(sh->source), 0);
+		af->from_addr_param(paddr, params.addr, sh->source, 0);
 
 		asoc = __sctp_lookup_association(laddr, paddr, &transport);
 		if (asoc)
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cf6deed..71b0746 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -54,7 +54,7 @@
 	queue->in_progress = NULL;
 
 	/* Create a task for delivering data.  */
-	INIT_WORK(&queue->immediate, NULL, NULL);
+	INIT_WORK(&queue->immediate, NULL);
 
 	queue->malloced = 0;
 }
@@ -97,7 +97,7 @@
 	 * on the BH related data structures.
 	 */
 	list_add_tail(&chunk->list, &q->in_chunk_list);
-	q->immediate.func(q->immediate.data);
+	q->immediate.func(&q->immediate);
 }
 
 /* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@
  * The intent is that this routine will pull stuff out of the
  * inqueue and process it.
  */
-void sctp_inq_set_th_handler(struct sctp_inq *q,
-				 void (*callback)(void *), void *arg)
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
 {
-	INIT_WORK(&q->immediate, callback, arg);
+	INIT_WORK(&q->immediate, callback);
 }
 
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 78071c6..3c3e560 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -84,7 +84,7 @@
 
 /* ICMP error handler. */
 SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-			     int type, int code, int offset, __u32 info)
+			     int type, int code, int offset, __be32 info)
 {
 	struct inet6_dev *idev;
 	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
@@ -170,8 +170,6 @@
 		fl.oif = transport->saddr.v6.sin6_scope_id;
 	else
 		fl.oif = sk->sk_bound_dev_if;
-	fl.fl_ip_sport = inet_sk(sk)->sport;
-	fl.fl_ip_dport = transport->ipaddr.v6.sin6_port;
 
 	if (np->opt && np->opt->srcrt) {
 		struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
@@ -239,7 +237,7 @@
 	int i, j;
 
 	for (i = 0; i < 4 ; i++) {
-		__u32 a1xora2;
+		__be32 a1xora2;
 
 		a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i];
 
@@ -350,7 +348,7 @@
 			     int is_saddr)
 {
 	void *from;
-	__u16 *port;
+	__be16 *port;
 	struct sctphdr *sh;
 
 	port = &addr->v6.sin6_port;
@@ -360,10 +358,10 @@
 
 	sh = (struct sctphdr *) skb->h.raw;
 	if (is_saddr) {
-		*port  = ntohs(sh->source);
+		*port  = sh->source;
 		from = &skb->nh.ipv6h->saddr;
 	} else {
-		*port = ntohs(sh->dest);
+		*port = sh->dest;
 		from = &skb->nh.ipv6h->daddr;
 	}
 	ipv6_addr_copy(&addr->v6.sin6_addr, from);
@@ -373,7 +371,7 @@
 static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
 {
 	addr->v6.sin6_family = AF_INET6;
-	addr->v6.sin6_port = inet_sk(sk)->num;
+	addr->v6.sin6_port = 0;
 	addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
 }
 
@@ -407,7 +405,7 @@
 /* Initialize a sctp_addr from an address parameter. */
 static void sctp_v6_from_addr_param(union sctp_addr *addr,
 				    union sctp_addr_param *param,
-				    __u16 port, int iif)
+				    __be16 port, int iif)
 {
 	addr->v6.sin6_family = AF_INET6;
 	addr->v6.sin6_port = port;
@@ -425,7 +423,7 @@
 	int length = sizeof(sctp_ipv6addr_param_t);
 
 	param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS;
-	param->v6.param_hdr.length = ntohs(length);
+	param->v6.param_hdr.length = htons(length);
 	ipv6_addr_copy(&param->v6.addr, &addr->v6.sin6_addr);
 
 	return length;
@@ -433,7 +431,7 @@
 
 /* Initialize a sctp_addr from a dst_entry. */
 static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst,
-			      unsigned short port)
+			      __be16 port)
 {
 	struct rt6_info *rt = (struct rt6_info *)dst;
 	addr->sa.sa_family = AF_INET6;
@@ -480,7 +478,7 @@
 }
 
 /* Initialize addr struct to INADDR_ANY. */
-static void sctp_v6_inaddr_any(union sctp_addr *addr, unsigned short port)
+static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
 {
 	memset(addr, 0x00, sizeof(union sctp_addr));
 	addr->v6.sin6_family = AF_INET6;
@@ -855,7 +853,7 @@
  * Returns number of addresses supported.
  */
 static int sctp_inet6_supported_addrs(const struct sctp_sock *opt,
-				      __u16 *types)
+				      __be16 *types)
 {
 	types[0] = SCTP_PARAM_IPV4_ADDRESS;
 	types[1] = SCTP_PARAM_IPV6_ADDRESS;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7395824..fba567a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1065,7 +1065,7 @@
 	 * A) Initialize the cacc_saw_newack to 0 for all destination
 	 * addresses.
 	 */
-	if (sack->num_gap_ack_blocks > 0 &&
+	if (sack->num_gap_ack_blocks &&
 	    primary->cacc.changeover_active) {
 		list_for_each(pos, transport_list) {
 			transport = list_entry(pos, struct sctp_transport,
@@ -1632,7 +1632,7 @@
 }
 
 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
-				    int nskips, __u16 stream)
+				    int nskips, __be16 stream)
 {
 	int i;
 
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 7f49e76..b3493bd 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -160,7 +160,7 @@
 
 	list_for_each(pos, &epb->bind_addr.address_list) {
 		laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
-		addr = (union sctp_addr *)&laddr->a;
+		addr = &laddr->a;
 		af = sctp_get_af_specific(addr->sa.sa_family);
 		if (primary && af->cmp_addr(addr, primary)) {
 			seq_printf(seq, "*");
@@ -177,10 +177,10 @@
 	union sctp_addr *addr, *primary;
 	struct sctp_af *af;
 
-	primary = &(assoc->peer.primary_addr);
+	primary = &assoc->peer.primary_addr;
 	list_for_each(pos, &assoc->peer.transport_addr_list) {
 		transport = list_entry(pos, struct sctp_transport, transports);
-		addr = (union sctp_addr *)&transport->ipaddr;
+		addr = &transport->ipaddr;
 		af = sctp_get_af_specific(addr->sa.sa_family);
 		if (af->cmp_addr(addr, primary)) {
 			seq_printf(seq, "*");
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5b4f82f..f2ba861 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -79,8 +79,8 @@
 static struct sctp_af *sctp_af_v4_specific;
 static struct sctp_af *sctp_af_v6_specific;
 
-kmem_cache_t *sctp_chunk_cachep __read_mostly;
-kmem_cache_t *sctp_bucket_cachep __read_mostly;
+struct kmem_cache *sctp_chunk_cachep __read_mostly;
+struct kmem_cache *sctp_bucket_cachep __read_mostly;
 
 /* Return the address of the control sock. */
 struct sock *sctp_get_ctl_sock(void)
@@ -251,7 +251,7 @@
 			     int is_saddr)
 {
 	void *from;
-	__u16 *port;
+	__be16 *port;
 	struct sctphdr *sh;
 
 	port = &addr->v4.sin_port;
@@ -259,10 +259,10 @@
 
 	sh = (struct sctphdr *) skb->h.raw;
 	if (is_saddr) {
-		*port  = ntohs(sh->source);
+		*port  = sh->source;
 		from = &skb->nh.iph->saddr;
 	} else {
-		*port = ntohs(sh->dest);
+		*port = sh->dest;
 		from = &skb->nh.iph->daddr;
 	}
 	memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr));
@@ -272,7 +272,7 @@
 static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
 {
 	addr->v4.sin_family = AF_INET;
-	addr->v4.sin_port = inet_sk(sk)->num;
+	addr->v4.sin_port = 0;
 	addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr;
 }
 
@@ -291,7 +291,7 @@
 /* Initialize a sctp_addr from an address parameter. */
 static void sctp_v4_from_addr_param(union sctp_addr *addr,
 				    union sctp_addr_param *param,
-				    __u16 port, int iif)
+				    __be16 port, int iif)
 {
 	addr->v4.sin_family = AF_INET;
 	addr->v4.sin_port = port;
@@ -307,7 +307,7 @@
 	int length = sizeof(sctp_ipv4addr_param_t);
 
 	param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS;
-	param->v4.param_hdr.length = ntohs(length);
+	param->v4.param_hdr.length = htons(length);
 	param->v4.addr.s_addr = addr->v4.sin_addr.s_addr;	
 
 	return length;
@@ -315,7 +315,7 @@
 
 /* Initialize a sctp_addr from a dst_entry. */
 static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst,
-			      unsigned short port)
+			      __be16 port)
 {
 	struct rtable *rt = (struct rtable *)dst;
 	saddr->v4.sin_family = AF_INET;
@@ -338,7 +338,7 @@
 }
 
 /* Initialize addr struct to INADDR_ANY. */
-static void sctp_v4_inaddr_any(union sctp_addr *addr, unsigned short port)
+static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
 {
 	addr->v4.sin_family = AF_INET;
 	addr->v4.sin_addr.s_addr = INADDR_ANY;
@@ -481,7 +481,7 @@
 					   list);
 			if (!laddr->use_as_src)
 				continue;
-			sctp_v4_dst_saddr(&dst_saddr, dst, bp->port);
+			sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
 			if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
 				goto out_unlock;
 		}
@@ -538,7 +538,7 @@
 
 	if (rt) {
 		saddr->v4.sin_family = AF_INET;
-		saddr->v4.sin_port = asoc->base.bind_addr.port;  
+		saddr->v4.sin_port = htons(asoc->base.bind_addr.port);
 		saddr->v4.sin_addr.s_addr = rt->rt_src; 
 	}
 }
@@ -791,7 +791,7 @@
  * chunks.  Returns number of addresses supported.
  */
 static int sctp_inet_supported_addrs(const struct sctp_sock *opt,
-				     __u16 *types)
+				     __be16 *types)
 {
 	types[0] = SCTP_PARAM_IPV4_ADDRESS;
 	return 1;
@@ -808,7 +808,7 @@
 			  NIPQUAD(((struct rtable *)skb->dst)->rt_dst));
 
 	SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
-	return ip_queue_xmit(skb, ipfragok);
+	return ip_queue_xmit(skb, skb->sk, ipfragok);
 }
 
 static struct sctp_af sctp_ipv4_specific;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 507dff7..30927d3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -65,7 +65,7 @@
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-extern kmem_cache_t *sctp_chunk_cachep;
+extern struct kmem_cache *sctp_chunk_cachep;
 
 SCTP_STATIC
 struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
@@ -111,7 +111,7 @@
  * provided chunk, as most cause codes will be embedded inside an
  * abort chunk.
  */
-void  sctp_init_cause(struct sctp_chunk *chunk, __u16 cause_code,
+void  sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
 		      const void *payload, size_t paylen)
 {
 	sctp_errhdr_t err;
@@ -183,7 +183,7 @@
 	int num_types, addrs_len = 0;
 	struct sctp_sock *sp;
 	sctp_supported_addrs_param_t sat;
-	__u16 types[2];
+	__be16 types[2];
 	sctp_adaption_ind_param_t aiparam;
 
 	/* RFC 2960 3.3.2 Initiation (INIT) (1)
@@ -775,7 +775,7 @@
 	const struct sctp_chunk *chunk, __u32 tsn)
 {
 	struct sctp_chunk *retval;
-	__u32 payload;
+	__be32 payload;
 
 	retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t)
 				 + sizeof(tsn));
@@ -951,7 +951,7 @@
 /* Create an Operation Error chunk.  */
 struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
 				 const struct sctp_chunk *chunk,
-				 __u16 cause_code, const void *payload,
+				 __be16 cause_code, const void *payload,
 				 size_t paylen)
 {
 	struct sctp_chunk *retval;
@@ -979,7 +979,7 @@
 {
 	struct sctp_chunk *retval;
 
-	retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC);
+	retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
 
 	if (!retval)
 		goto nodata;
@@ -1190,15 +1190,14 @@
 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
 		ssn = 0;
 	} else {
-		sid = htons(chunk->subh.data_hdr->stream);
+		sid = ntohs(chunk->subh.data_hdr->stream);
 		if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
 			ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
 		else
 			ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
-		ssn = htons(ssn);
 	}
 
-	chunk->subh.data_hdr->ssn = ssn;
+	chunk->subh.data_hdr->ssn = htons(ssn);
 	chunk->has_ssn = 1;
 }
 
@@ -1280,15 +1279,13 @@
 			- (bodysize % SCTP_COOKIE_MULTIPLE);
 	*cookie_len = headersize + bodysize;
 
-	retval = kmalloc(*cookie_len, GFP_ATOMIC);
-
-	if (!retval)
-		goto nodata;
-
 	/* Clear this memory since we are sending this data structure
 	 * out on the network.
 	 */
-	memset(retval, 0x00, *cookie_len);
+	retval = kzalloc(*cookie_len, GFP_ATOMIC);
+	if (!retval)
+		goto nodata;
+
 	cookie = (struct sctp_signed_cookie *) retval->body;
 
 	/* Set up the parameter header.  */
@@ -1438,7 +1435,7 @@
 		goto fail;
 	}
 
-	if (ntohs(chunk->sctp_hdr->source) != bear_cookie->peer_addr.v4.sin_port ||
+	if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port ||
 	    ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) {
 		*error = -SCTP_IERROR_BAD_PORTS;
 		goto fail;
@@ -1473,10 +1470,10 @@
 			suseconds_t usecs = (tv.tv_sec -
 				bear_cookie->expiration.tv_sec) * 1000000L +
 				tv.tv_usec - bear_cookie->expiration.tv_usec;
+			__be32 n = htonl(usecs);
 
-			usecs = htonl(usecs);
 			sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
-					&usecs, sizeof(usecs));
+					&n, sizeof(n));
 			*error = -SCTP_IERROR_STALE_COOKIE;
 		} else
 			*error = -SCTP_IERROR_NOMEM;
@@ -1539,8 +1536,8 @@
  ********************************************************************/
 
 struct __sctp_missing {
-	__u32 num_missing;
-	__u16 type;
+	__be32 num_missing;
+	__be16 type;
 }  __attribute__((packed));
 
 /*
@@ -1852,9 +1849,10 @@
 	 * added as the primary transport.  The source address seems to
 	 * be a a better choice than any of the embedded addresses.
 	 */
-	if (peer_addr)
+	if (peer_addr) {
 		if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
 			goto nomem;
+	}
 
 	/* Process the initialization parameters.  */
 
@@ -1910,10 +1908,9 @@
 	/* Copy cookie in case we need to resend COOKIE-ECHO. */
 	cookie = asoc->peer.cookie;
 	if (cookie) {
-		asoc->peer.cookie = kmalloc(asoc->peer.cookie_len, gfp);
+		asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
 		if (!asoc->peer.cookie)
 			goto clean_up;
-		memcpy(asoc->peer.cookie, cookie, asoc->peer.cookie_len);
 	}
 
 	/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
@@ -2027,7 +2024,7 @@
 		/* Fall through. */
 	case SCTP_PARAM_IPV4_ADDRESS:
 		af = sctp_get_af_specific(param_type2af(param.p->type));
-		af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);
+		af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
 		scope = sctp_scope(peer_addr);
 		if (sctp_in_scope(&addr, scope))
 			if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
@@ -2230,7 +2227,7 @@
 					      union sctp_addr	      *laddr,
 					      struct sockaddr	      *addrs,
 					      int		      addrcnt,
-					      __u16		      flags)
+					      __be16		      flags)
 {
 	sctp_addip_param_t	param;
 	struct sctp_chunk	*retval;
@@ -2363,14 +2360,14 @@
 }
 
 /* Add response parameters to an ASCONF_ACK chunk. */
-static void sctp_add_asconf_response(struct sctp_chunk *chunk, __u32 crr_id,
-			      __u16 err_code, sctp_addip_param_t *asconf_param)
+static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id,
+			      __be16 err_code, sctp_addip_param_t *asconf_param)
 {
 	sctp_addip_param_t 	ack_param;
 	sctp_errhdr_t		err_param;
 	int			asconf_param_len = 0;
 	int			err_param_len = 0;
-	__u16			response_type;
+	__be16			response_type;
 
 	if (SCTP_ERROR_NO_ERROR == err_code) {
 		response_type = SCTP_PARAM_SUCCESS_REPORT;
@@ -2404,7 +2401,7 @@
 }
 
 /* Process a asconf parameter. */
-static __u16 sctp_process_asconf_param(struct sctp_association *asoc,
+static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
 				       struct sctp_chunk *asconf,
 				       sctp_addip_param_t *asconf_param)
 {
@@ -2413,7 +2410,7 @@
 	union sctp_addr	addr;
 	struct list_head *pos;
 	union sctp_addr_param *addr_param;
-				 
+
 	addr_param = (union sctp_addr_param *)
 			((void *)asconf_param + sizeof(sctp_addip_param_t));
 
@@ -2421,7 +2418,7 @@
 	if (unlikely(!af))
 		return SCTP_ERROR_INV_PARAM;
 
-	af->from_addr_param(&addr, addr_param, asoc->peer.port, 0);
+	af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
 	switch (asconf_param->param_hdr.type) {
 	case SCTP_PARAM_ADD_IP:
 		/* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
@@ -2487,7 +2484,7 @@
 	sctp_addip_param_t	*asconf_param;
 	struct sctp_chunk	*asconf_ack;
 
-	__u16	err_code;
+	__be16	err_code;
 	int	length = 0;
 	int	chunk_len = asconf->skb->len;
 	__u32	serial;
@@ -2586,7 +2583,7 @@
 
 	/* We have checked the packet before, so we do not check again.	*/
 	af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type));
-	af->from_addr_param(&addr, addr_param, bp->port, 0);
+	af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
 
 	switch (asconf_param->param_hdr.type) {
 	case SCTP_PARAM_ADD_IP:
@@ -2630,7 +2627,7 @@
  * All TLVs after the failed response are considered unsuccessful unless a
  * specific success indication is present for the parameter.
  */
-static __u16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
+static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
 				      sctp_addip_param_t *asconf_param,
 				      int no_err)
 {
@@ -2638,7 +2635,7 @@
 	sctp_errhdr_t		*err_param;
 	int			length;
 	int			asconf_ack_len = asconf_ack->skb->len;
-	__u16			err_code;
+	__be16			err_code;
 
 	if (no_err)
 		err_code = SCTP_ERROR_NO_ERROR;
@@ -2694,7 +2691,7 @@
 	int	all_param_pass = 0;
 	int	no_err = 1;
 	int	retval = 0;
-	__u16	err_code = SCTP_ERROR_NO_ERROR;
+	__be16	err_code = SCTP_ERROR_NO_ERROR;
 
 	/* Skip the chunkhdr and addiphdr from the last asconf sent and store
 	 * a pointer to address parameter.
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9c10bde..7bbc615 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -442,7 +442,7 @@
 					 " transport IP: port:%d failed.\n",
 					 asoc,
 					 (&transport->ipaddr),
-					 transport->ipaddr.v4.sin_port);
+					 ntohs(transport->ipaddr.v4.sin_port));
 		sctp_assoc_control_transport(asoc, transport,
 					     SCTP_TRANSPORT_DOWN,
 					     SCTP_FAILED_THRESHOLD);
@@ -1360,12 +1360,12 @@
 			break;
 
 		case SCTP_CMD_INIT_FAILED:
-			sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
+			sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
 			break;
 
 		case SCTP_CMD_ASSOC_FAILED:
 			sctp_cmd_assoc_failed(commands, asoc, event_type,
-					      subtype, chunk, cmd->obj.u32);
+					      subtype, chunk, cmd->obj.err);
 			break;
 
 		case SCTP_CMD_INIT_COUNTER_INC:
@@ -1420,7 +1420,7 @@
 
 		case SCTP_CMD_PROCESS_CTSN:
 			/* Dummy up a SACK for processing. */
-			sackh.cum_tsn_ack = cmd->obj.u32;
+			sackh.cum_tsn_ack = cmd->obj.be32;
 			sackh.a_rwnd = 0;
 			sackh.num_gap_ack_blocks = 0;
 			sackh.num_dup_tsns = 0;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 1c42fe9..27cc444 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -93,7 +93,7 @@
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
-					   __u16 error, int sk_err,
+					   __be16 error, int sk_err,
 					   const struct sctp_association *asoc,
 					   struct sctp_transport *transport);
 
@@ -443,7 +443,7 @@
 	__u32 init_tag;
 	struct sctp_chunk *err_chunk;
 	struct sctp_packet *packet;
-	__u16 error;
+	sctp_error_t error;
 
 	if (!sctp_vtag_verify(chunk, asoc))
 		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -886,7 +886,7 @@
 				SCTP_ERROR(ETIMEDOUT));
 		/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_NO_ERROR));
+				SCTP_PERR(SCTP_ERROR_NO_ERROR));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_DELETE_TCB;
@@ -2138,7 +2138,7 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
-				SCTP_U32(SCTP_ERROR_STALE_COOKIE));
+				SCTP_PERR(SCTP_ERROR_STALE_COOKIE));
 		return SCTP_DISPOSITION_DELETE_TCB;
 	}
 
@@ -2158,7 +2158,7 @@
 	 * to give ample time to retransmit the new cookie and thus
 	 * yield a higher probability of success on the reattempt.
 	 */
-	stale = ntohl(*(suseconds_t *)((u8 *)err + sizeof(sctp_errhdr_t)));
+	stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t)));
 	stale = (stale * 2) / 1000;
 
 	bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE;
@@ -2250,7 +2250,7 @@
 {
 	struct sctp_chunk *chunk = arg;
 	unsigned len;
-	__u16 error = SCTP_ERROR_NO_ERROR;
+	__be16 error = SCTP_ERROR_NO_ERROR;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
 		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -2275,7 +2275,7 @@
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
  	/* ASSOC_FAILED will DELETE_TCB. */
-	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
+	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
 	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 
@@ -2295,7 +2295,7 @@
 {
 	struct sctp_chunk *chunk = arg;
 	unsigned len;
-	__u16 error = SCTP_ERROR_NO_ERROR;
+	__be16 error = SCTP_ERROR_NO_ERROR;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
 		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -2357,7 +2357,7 @@
  * This is common code called by several sctp_sf_*_abort() functions above.
  */
 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
-					   __u16 error, int sk_err,
+					   __be16 error, int sk_err,
 					   const struct sctp_association *asoc,
 					   struct sctp_transport *transport)
 {
@@ -2370,7 +2370,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
 	/* CMD_INIT_FAILED will DELETE_TCB. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
-			SCTP_U32(error));
+			SCTP_PERR(error));
 	return SCTP_DISPOSITION_ABORT;
 }
 
@@ -2466,7 +2466,7 @@
 	 *    received by the SHUTDOWN sender.
 	 */
 	sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
-			SCTP_U32(chunk->subh.shutdown_hdr->cum_tsn_ack));
+			SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack));
 
 out:
 	return disposition;
@@ -2545,6 +2545,7 @@
 {
 	sctp_cwrhdr_t *cwr;
 	struct sctp_chunk *chunk = arg;
+	u32 lowest_tsn;
 
 	if (!sctp_vtag_verify(chunk, asoc))
 		return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -2556,14 +2557,14 @@
 	cwr = (sctp_cwrhdr_t *) chunk->skb->data;
 	skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t));
 
-	cwr->lowest_tsn = ntohl(cwr->lowest_tsn);
+	lowest_tsn = ntohl(cwr->lowest_tsn);
 
 	/* Does this CWR ack the last sent congestion notification? */
-	if (TSN_lte(asoc->last_ecne_tsn, cwr->lowest_tsn)) {
+	if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) {
 		/* Stop sending ECNE. */
 		sctp_add_cmd_sf(commands,
 				SCTP_CMD_ECN_CWR,
-				SCTP_U32(cwr->lowest_tsn));
+				SCTP_U32(lowest_tsn));
 	}
 	return SCTP_DISPOSITION_CONSUME;
 }
@@ -3360,7 +3361,7 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 			        SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_ASCONF_ACK));
+				SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_ABORT;
@@ -3388,7 +3389,7 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 			        SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_ASCONF_ACK));
+				SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_ABORT;
@@ -3743,12 +3744,12 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 				SCTP_ERROR(ECONNREFUSED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
-				SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
+				SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
 	} else {
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 			        SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
+				SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 	}
 
@@ -4062,7 +4063,7 @@
 			SCTP_ERROR(ECONNABORTED));
 	/* Delete the established association. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-			SCTP_U32(SCTP_ERROR_USER_ABORT));
+			SCTP_PERR(SCTP_ERROR_USER_ABORT));
 
 	SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 	SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4199,7 +4200,7 @@
 			SCTP_ERROR(ECONNREFUSED));
 	/* Delete the established association. */
 	sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
-			SCTP_U32(SCTP_ERROR_USER_ABORT));
+			SCTP_PERR(SCTP_ERROR_USER_ABORT));
 
 	return retval;
 }
@@ -4571,7 +4572,7 @@
 				SCTP_ERROR(ETIMEDOUT));
 		/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_NO_ERROR));
+				SCTP_PERR(SCTP_ERROR_NO_ERROR));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_DELETE_TCB;
@@ -4693,7 +4694,7 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
-				SCTP_U32(SCTP_ERROR_NO_ERROR));
+				SCTP_PERR(SCTP_ERROR_NO_ERROR));
 		return SCTP_DISPOSITION_DELETE_TCB;
 	}
 
@@ -4745,7 +4746,7 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
-				SCTP_U32(SCTP_ERROR_NO_ERROR));
+				SCTP_PERR(SCTP_ERROR_NO_ERROR));
 		return SCTP_DISPOSITION_DELETE_TCB;
 	}
 
@@ -4781,7 +4782,7 @@
 				SCTP_ERROR(ETIMEDOUT));
 		/* Note:  CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_NO_ERROR));
+				SCTP_PERR(SCTP_ERROR_NO_ERROR));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_DELETE_TCB;
@@ -4859,7 +4860,7 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 				SCTP_ERROR(ETIMEDOUT));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_NO_ERROR));
+				SCTP_PERR(SCTP_ERROR_NO_ERROR));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 		SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
 		return SCTP_DISPOSITION_ABORT;
@@ -4915,7 +4916,7 @@
 	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 			SCTP_ERROR(ETIMEDOUT));
 	sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-			SCTP_U32(SCTP_ERROR_NO_ERROR));
+			SCTP_PERR(SCTP_ERROR_NO_ERROR));
 
 	return SCTP_DISPOSITION_DELETE_TCB;
 nomem:
@@ -5365,7 +5366,7 @@
 		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
 				SCTP_ERROR(ECONNABORTED));
 		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-				SCTP_U32(SCTP_ERROR_NO_DATA));
+				SCTP_PERR(SCTP_ERROR_NO_DATA));
 		SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 		SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
 		return SCTP_IERROR_NO_DATA;
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 8bcca56..733dd87 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -104,325 +104,322 @@
 	};
 }
 
+#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func}
+
 #define TYPE_SCTP_DATA { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_eat_data_6_2, .name = "sctp_sf_eat_data_6_2"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_eat_data_6_2, .name = "sctp_sf_eat_data_6_2"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_eat_data_fast_4_4, .name = "sctp_sf_eat_data_fast_4_4"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_data_fast_4_4), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_DATA */
 
 #define TYPE_SCTP_INIT { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_do_5_1B_init, .name = "sctp_sf_do_5_1B_init"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_do_5_2_1_siminit, .name = "sctp_sf_do_5_2_1_siminit"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_5_2_1_siminit, .name = "sctp_sf_do_5_2_1_siminit"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_do_9_2_reshutack, .name = "sctp_sf_do_9_2_reshutack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_reshutack), \
 } /* TYPE_SCTP_INIT */
 
 #define TYPE_SCTP_INIT_ACK { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_do_5_1C_ack, .name = "sctp_sf_do_5_1C_ack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_1C_ack), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_INIT_ACK */
 
 #define TYPE_SCTP_SACK { \
 	/*  SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_SACK */
 
 #define TYPE_SCTP_HEARTBEAT { \
 	/*  SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
 	/* This should not happen, but we are nice.  */ \
-	{.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \
 } /* TYPE_SCTP_HEARTBEAT */
 
 #define TYPE_SCTP_HEARTBEAT_ACK { \
 	/*  SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \
+	TYPE_SCTP_FUNC(sctp_sf_violation), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_HEARTBEAT_ACK */
 
 #define TYPE_SCTP_ABORT { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_pdiscard, .name = "sctp_sf_pdiscard"}, \
+	TYPE_SCTP_FUNC(sctp_sf_pdiscard), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_cookie_wait_abort, .name = "sctp_sf_cookie_wait_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_cookie_wait_abort), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_cookie_echoed_abort, \
-	 .name = "sctp_sf_cookie_echoed_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_abort), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_9_1_abort, .name = "sctp_sf_do_9_1_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_shutdown_pending_abort, \
-	.name = "sctp_sf_shutdown_pending_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_abort), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_shutdown_sent_abort, \
-	.name = "sctp_sf_shutdown_sent_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_abort), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_9_1_abort, .name = "sctp_sf_do_9_1_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_shutdown_ack_sent_abort, \
-	.name = "sctp_sf_shutdown_ack_sent_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_abort), \
 } /* TYPE_SCTP_ABORT */
 
 #define TYPE_SCTP_SHUTDOWN { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_9_2_shutdown, .name = "sctp_sf_do_9_2_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_9_2_shutdown_ack, \
-	 .name = "sctp_sf_do_9_2_shutdown_ack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_SHUTDOWN */
 
 #define TYPE_SCTP_SHUTDOWN_ACK { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_do_8_5_1_E_sa, .name = "sctp_sf_do_8_5_1_E_sa"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_8_5_1_E_sa, .name = "sctp_sf_do_8_5_1_E_sa"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \
+	TYPE_SCTP_FUNC(sctp_sf_violation), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \
+	TYPE_SCTP_FUNC(sctp_sf_violation), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_9_2_final, .name = "sctp_sf_do_9_2_final"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \
+	TYPE_SCTP_FUNC(sctp_sf_violation), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_do_9_2_final, .name = "sctp_sf_do_9_2_final"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \
 } /* TYPE_SCTP_SHUTDOWN_ACK */
 
 #define TYPE_SCTP_ERROR { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_cookie_echoed_err, .name = "sctp_sf_cookie_echoed_err"}, \
+	TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_err), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \
+	TYPE_SCTP_FUNC(sctp_sf_operr_notify), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \
+	TYPE_SCTP_FUNC(sctp_sf_operr_notify), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \
+	TYPE_SCTP_FUNC(sctp_sf_operr_notify), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_ERROR */
 
 #define TYPE_SCTP_COOKIE_ECHO { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_do_5_1D_ce, .name = "sctp_sf_do_5_1D_ce"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \
 } /* TYPE_SCTP_COOKIE_ECHO */
 
 #define TYPE_SCTP_COOKIE_ACK { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_5_1E_ca, .name = "sctp_sf_do_5_1E_ca"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_5_1E_ca), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_COOKIE_ACK */
 
 #define TYPE_SCTP_ECN_ECNE { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecne), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_ECN_ECNE */
 
 #define TYPE_SCTP_ECN_CWR { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_ECN_CWR */
 
 #define TYPE_SCTP_SHUTDOWN_COMPLETE { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_do_4_C, .name = "sctp_sf_do_4_C"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_4_C), \
 } /* TYPE_SCTP_SHUTDOWN_COMPLETE */
 
 /* The primary index for this table is the chunk type.
@@ -450,44 +447,44 @@
 
 #define TYPE_SCTP_ASCONF { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_asconf, .name = "sctp_sf_do_asconf"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_asconf), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_ASCONF */
 
 #define TYPE_SCTP_ASCONF_ACK { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_asconf_ack, .name = "sctp_sf_do_asconf_ack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_ASCONF_ACK */
 
 /* The primary index for this table is the chunk type.
@@ -500,23 +497,23 @@
 
 #define TYPE_SCTP_FWD_TSN { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ootb), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_eat_fwd_tsn_fast, .name = "sctp_sf_eat_fwd_tsn_fast"}, \
+	TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn_fast), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
+	TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
 } /* TYPE_SCTP_FWD_TSN */
 
 /* The primary index for this table is the chunk type.
@@ -529,167 +526,150 @@
 static const sctp_sm_table_entry_t
 chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
 	/* SCTP_STATE_EMPTY */
-	{.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"},
+	TYPE_SCTP_FUNC(sctp_sf_ootb),
 	/* SCTP_STATE_CLOSED */
-	{.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"},
+	TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8),
 	/* SCTP_STATE_COOKIE_WAIT */
-	{.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"},
+	TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
 	/* SCTP_STATE_COOKIE_ECHOED */
-	{.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"},
+	TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
 	/* SCTP_STATE_ESTABLISHED */
-	{.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"},
+	TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
 	/* SCTP_STATE_SHUTDOWN_PENDING */
-	{.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"},
+	TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
 	/* SCTP_STATE_SHUTDOWN_SENT */
-	{.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"},
+	TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */
-	{.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"},
+	TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */
-	{.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"},
+	TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
 };	/* chunk unknown */
 
 
 #define TYPE_SCTP_PRIMITIVE_ASSOCIATE  { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_do_prm_asoc, .name = "sctp_sf_do_prm_asoc"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
+	TYPE_SCTP_FUNC(sctp_sf_not_impl), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
+	TYPE_SCTP_FUNC(sctp_sf_not_impl), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
+	TYPE_SCTP_FUNC(sctp_sf_not_impl), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
+	TYPE_SCTP_FUNC(sctp_sf_not_impl), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
+	TYPE_SCTP_FUNC(sctp_sf_not_impl), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
+	TYPE_SCTP_FUNC(sctp_sf_not_impl), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
+	TYPE_SCTP_FUNC(sctp_sf_not_impl), \
 } /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */
 
 #define TYPE_SCTP_PRIMITIVE_SHUTDOWN  { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_closed), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_cookie_wait_prm_shutdown, \
-	 .name = "sctp_sf_cookie_wait_prm_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_shutdown), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_cookie_echoed_prm_shutdown, \
-	 .name = "sctp_sf_cookie_echoed_prm_shutdown"},\
+	TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_shutdown),\
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_9_2_prm_shutdown, \
-	 .name = "sctp_sf_do_9_2_prm_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_prm_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \
 } /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */
 
 #define TYPE_SCTP_PRIMITIVE_ABORT  { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_closed), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_cookie_wait_prm_abort, \
-	.name = "sctp_sf_cookie_wait_prm_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_abort), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_cookie_echoed_prm_abort, \
-	.name = "sctp_sf_cookie_echoed_prm_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_abort), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_9_1_prm_abort, \
-	.name = "sctp_sf_do_9_1_prm_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_shutdown_pending_prm_abort, \
-	.name = "sctp_sf_shutdown_pending_prm_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_prm_abort), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_shutdown_sent_prm_abort, \
-	.name = "sctp_sf_shutdown_sent_prm_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_prm_abort), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_9_1_prm_abort, \
-	.name = "sctp_sf_do_9_1_prm_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_shutdown_ack_sent_prm_abort, \
-	.name = "sctp_sf_shutdown_ack_sent_prm_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_prm_abort), \
 } /* TYPE_SCTP_PRIMITIVE_ABORT */
 
 #define TYPE_SCTP_PRIMITIVE_SEND  { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_closed), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 } /* TYPE_SCTP_PRIMITIVE_SEND */
 
 #define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT  { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_closed), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_do_prm_requestheartbeat,		      \
-	 .name = "sctp_sf_do_prm_requestheartbeat"},          \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat),          \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_prm_requestheartbeat,		      \
-	 .name = "sctp_sf_do_prm_requestheartbeat"},          \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat),          \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_prm_requestheartbeat,		      \
-	 .name = "sctp_sf_do_prm_requestheartbeat"},          \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat),          \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_prm_requestheartbeat,		      \
-	 .name = "sctp_sf_do_prm_requestheartbeat"},          \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat),          \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_prm_requestheartbeat,		      \
-	 .name = "sctp_sf_do_prm_requestheartbeat"},          \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat),          \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_prm_requestheartbeat,		      \
-	 .name = "sctp_sf_do_prm_requestheartbeat"},          \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat),          \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_do_prm_requestheartbeat,		      \
-	 .name = "sctp_sf_do_prm_requestheartbeat"},          \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat),          \
 } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
 
 #define TYPE_SCTP_PRIMITIVE_ASCONF { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_closed), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_closed), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_closed), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_prm_asconf, .name = "sctp_sf_do_prm_asconf"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
 
 /* The primary index for this table is the primitive type.
@@ -706,47 +686,44 @@
 
 #define TYPE_SCTP_OTHER_NO_PENDING_TSN  { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_9_2_start_shutdown, \
-	 .name = "sctp_do_9_2_start_shutdown"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_9_2_shutdown_ack, \
-	 .name = "sctp_sf_do_9_2_shutdown_ack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 }
 
 #define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH  { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_cookie_wait_icmp_abort, \
-	 .name = "sctp_sf_cookie_wait_icmp_abort"}, \
+	TYPE_SCTP_FUNC(sctp_sf_cookie_wait_icmp_abort), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \
+	TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
 }
 
 static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = {
@@ -756,215 +733,212 @@
 
 #define TYPE_SCTP_EVENT_TIMEOUT_NONE { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_t1_cookie_timer_expire, \
-	 .name = "sctp_sf_t1_cookie_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_t1_cookie_timer_expire), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_t1_init_timer_expire, \
-	 .name = "sctp_sf_t1_init_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_t1_init_timer_expire), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_t2_timer_expire, .name = "sctp_sf_t2_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_t2_timer_expire, .name = "sctp_sf_t2_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_t4_timer_expire, .name = "sctp_sf_t4_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_t4_timer_expire), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_t5_timer_expire, .name = "sctp_sf_t5_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_t5_timer_expire, .name = "sctp_sf_t5_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \
+	TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_SACK { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
+	TYPE_SCTP_FUNC(sctp_sf_bug), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \
+	TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 #define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \
 	/* SCTP_STATE_EMPTY */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_CLOSED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_WAIT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_COOKIE_ECHOED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_ESTABLISHED */ \
-	{.fn = sctp_sf_autoclose_timer_expire, \
-	 .name = "sctp_sf_autoclose_timer_expire"}, \
+	TYPE_SCTP_FUNC(sctp_sf_autoclose_timer_expire), \
 	/* SCTP_STATE_SHUTDOWN_PENDING */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 	/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
-	{.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \
+	TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
 static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 935bc91..1e8132b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -107,7 +107,7 @@
 			      struct sctp_association *, sctp_socket_type_t);
 static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
 
-extern kmem_cache_t *sctp_bucket_cachep;
+extern struct kmem_cache *sctp_bucket_cachep;
 
 /* Get the sndbuf space available at the time on the association.  */
 static inline int sctp_wspace(struct sctp_association *asoc)
@@ -229,11 +229,9 @@
 	struct sctp_transport *transport;
 	union sctp_addr *laddr = (union sctp_addr *)addr;
 
-	laddr->v4.sin_port = ntohs(laddr->v4.sin_port);
 	addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
-					       (union sctp_addr *)addr,
+					       laddr,
 					       &transport);
-	laddr->v4.sin_port = htons(laddr->v4.sin_port);
 
 	if (!addr_asoc)
 		return NULL;
@@ -368,9 +366,7 @@
 	sctp_write_lock(&ep->base.addr_lock);
 
 	/* Use GFP_ATOMIC since BHs are disabled.  */
-	addr->v4.sin_port = ntohs(addr->v4.sin_port);
 	ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
-	addr->v4.sin_port = htons(addr->v4.sin_port);
 	sctp_write_unlock(&ep->base.addr_lock);
 	sctp_local_bh_enable();
 
@@ -572,7 +568,6 @@
 			addr = (union sctp_addr *)addr_buf;
 			af = sctp_get_af_specific(addr->v4.sin_family);
 			memcpy(&saveaddr, addr, af->sockaddr_len);
-			saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
 			retval = sctp_add_bind_addr(bp, &saveaddr, 0,
 						    GFP_ATOMIC);
 			addr_buf += af->sockaddr_len;
@@ -607,9 +602,8 @@
 	int cnt;
 	struct sctp_bind_addr *bp = &ep->base.bind_addr;
 	int retval = 0;
-	union sctp_addr saveaddr;
 	void *addr_buf;
-	struct sockaddr *sa_addr;
+	union sctp_addr *sa_addr;
 	struct sctp_af *af;
 
 	SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n",
@@ -627,19 +621,13 @@
 			goto err_bindx_rem;
 		}
 
-		/* The list may contain either IPv4 or IPv6 address;
-		 * determine the address length to copy the address to
-		 * saveaddr. 
-		 */
-		sa_addr = (struct sockaddr *)addr_buf;
-		af = sctp_get_af_specific(sa_addr->sa_family);
+		sa_addr = (union sctp_addr *)addr_buf;
+		af = sctp_get_af_specific(sa_addr->sa.sa_family);
 		if (!af) {
 			retval = -EINVAL;
 			goto err_bindx_rem;
 		}
-		memcpy(&saveaddr, sa_addr, af->sockaddr_len); 
-		saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
-		if (saveaddr.v4.sin_port != bp->port) {
+		if (sa_addr->v4.sin_port != htons(bp->port)) {
 			retval = -EINVAL;
 			goto err_bindx_rem;
 		}
@@ -654,7 +642,7 @@
 		sctp_local_bh_disable();
 		sctp_write_lock(&ep->base.addr_lock);
 
-		retval = sctp_del_bind_addr(bp, &saveaddr);
+		retval = sctp_del_bind_addr(bp, sa_addr);
 
 		sctp_write_unlock(&ep->base.addr_lock);
 		sctp_local_bh_enable();
@@ -693,7 +681,6 @@
 	struct sctp_bind_addr	*bp;
 	struct sctp_chunk	*chunk;
 	union sctp_addr		*laddr;
-	union sctp_addr		saveaddr;
 	void			*addr_buf;
 	struct sctp_af		*af;
 	struct list_head	*pos, *pos1;
@@ -773,13 +760,11 @@
 		for (i = 0; i < addrcnt; i++) {
 			laddr = (union sctp_addr *)addr_buf;
 			af = sctp_get_af_specific(laddr->v4.sin_family);
-			memcpy(&saveaddr, laddr, af->sockaddr_len);
-			saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
 			list_for_each(pos1, &bp->address_list) {
 				saddr = list_entry(pos1,
 						   struct sctp_sockaddr_entry,
 						   list);
-				if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))
+				if (sctp_cmp_addr_exact(&saddr->a, laddr))
 					saddr->use_as_src = 0;
 			}
 			addr_buf += af->sockaddr_len;
@@ -979,7 +964,7 @@
 	int err = 0;
 	int addrcnt = 0;
 	int walk_size = 0;
-	struct sockaddr *sa_addr;
+	union sctp_addr *sa_addr;
 	void *addr_buf;
 
 	sp = sctp_sk(sk);
@@ -999,8 +984,8 @@
 	/* Walk through the addrs buffer and count the number of addresses. */
 	addr_buf = kaddrs;
 	while (walk_size < addrs_size) {
-		sa_addr = (struct sockaddr *)addr_buf;
-		af = sctp_get_af_specific(sa_addr->sa_family);
+		sa_addr = (union sctp_addr *)addr_buf;
+		af = sctp_get_af_specific(sa_addr->sa.sa_family);
 
 		/* If the address family is not supported or if this address
 		 * causes the address buffer to overflow return EINVAL.
@@ -1010,18 +995,16 @@
 			goto out_free;
 		}
 
-		err = sctp_verify_addr(sk, (union sctp_addr *)sa_addr,
-				       af->sockaddr_len);
+		err = sctp_verify_addr(sk, sa_addr, af->sockaddr_len);
 		if (err)
 			goto out_free;
 
 		memcpy(&to, sa_addr, af->sockaddr_len);
-		to.v4.sin_port = ntohs(to.v4.sin_port);
 
 		/* Check if there already is a matching association on the
 		 * endpoint (other than the one created here).
 		 */
-		asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
+		asoc2 = sctp_endpoint_lookup_assoc(ep, sa_addr, &transport);
 		if (asoc2 && asoc2 != asoc) {
 			if (asoc2->state >= SCTP_STATE_ESTABLISHED)
 				err = -EISCONN;
@@ -1034,7 +1017,7 @@
 		 * make sure that there is no peeled-off association matching
 		 * the peer address even on another socket.
 		 */
-		if (sctp_endpoint_is_peeled_off(ep, &to)) {
+		if (sctp_endpoint_is_peeled_off(ep, sa_addr)) {
 			err = -EADDRNOTAVAIL;
 			goto out_free;
 		}
@@ -1065,7 +1048,7 @@
 				}
 			}
 
-			scope = sctp_scope(&to);
+			scope = sctp_scope(sa_addr);
 			asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
 			if (!asoc) {
 				err = -ENOMEM;
@@ -1074,7 +1057,7 @@
 		}
 
 		/* Prime the peer's transport structures.  */
-		transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
+		transport = sctp_assoc_add_peer(asoc, sa_addr, GFP_KERNEL,
 						SCTP_UNKNOWN);
 		if (!transport) {
 			err = -ENOMEM;
@@ -1427,11 +1410,6 @@
 		if (msg_namelen > sizeof(to))
 			msg_namelen = sizeof(to);
 		memcpy(&to, msg->msg_name, msg_namelen);
-		SCTP_DEBUG_PRINTK("Just memcpy'd. msg_name is "
-				  "0x%x:%u.\n",
-				  to.v4.sin_addr.s_addr, to.v4.sin_port);
-
-		to.v4.sin_port = ntohs(to.v4.sin_port);
 		msg_name = msg->msg_name;
 	}
 
@@ -3217,8 +3195,8 @@
 	status.sstat_outstrms = asoc->c.sinit_num_ostreams;
 	status.sstat_fragmentation_point = asoc->frag_point;
 	status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
-	memcpy(&status.sstat_primary.spinfo_address,
-	       &(transport->ipaddr), sizeof(union sctp_addr));
+	memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
+			transport->af_specific->sockaddr_len);
 	/* Map ipv4 address into v4-mapped-on-v6 address.  */
 	sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
 		(union sctp_addr *)&status.sstat_primary.spinfo_address);
@@ -3770,7 +3748,6 @@
 		memcpy(&temp, &from->ipaddr, sizeof(temp));
 		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
 		addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
-		temp.v4.sin_port = htons(temp.v4.sin_port);
 		if (copy_to_user(to, &temp, addrlen))
 			return -EFAULT;
 		to += addrlen ;
@@ -3821,7 +3798,6 @@
 		addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
 		if(space_left < addrlen)
 			return -ENOMEM;
-		temp.v4.sin_port = htons(temp.v4.sin_port);
 		if (copy_to_user(to, &temp, addrlen))
 			return -EFAULT;
 		to += addrlen;
@@ -3889,7 +3865,7 @@
 						  struct sctp_sockaddr_entry,
 						  list);
 				if ((PF_INET == sk->sk_family) && 
-				    (AF_INET6 == addr->a.sa.sa_family))	
+				    (AF_INET6 == addr->a.sa.sa_family))
 					continue;
 				cnt++;
 			}
@@ -3933,7 +3909,6 @@
 		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
 								&temp);
 		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
-		temp.v4.sin_port = htons(port);
 		if (copy_to_user(to, &temp, addrlen)) {
 			sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
 						    flags);
@@ -3970,7 +3945,6 @@
 		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
 		if(space_left<addrlen)
 			return -ENOMEM;
-		temp.v4.sin_port = htons(port);
 		if (copy_to_user(*to, &temp, addrlen)) {
 			sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
 						    flags);
@@ -4055,7 +4029,6 @@
 		memcpy(&temp, &addr->a, sizeof(temp));
 		sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
 		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
-		temp.v4.sin_port = htons(temp.v4.sin_port);
 		if (copy_to_user(to, &temp, addrlen)) {
 			err = -EFAULT;
 			goto unlock;
@@ -4146,7 +4119,6 @@
 		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
 		if(space_left < addrlen)
 			return -ENOMEM; /*fixme: right error?*/
-		temp.v4.sin_port = htons(temp.v4.sin_port);
 		if (copy_to_user(to, &temp, addrlen)) {
 			err = -EFAULT;
 			goto unlock;
@@ -4194,12 +4166,8 @@
 	if (!asoc->peer.primary_path)
 		return -ENOTCONN;
 	
-	asoc->peer.primary_path->ipaddr.v4.sin_port =
-		htons(asoc->peer.primary_path->ipaddr.v4.sin_port);
 	memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
-	       sizeof(union sctp_addr));
-	asoc->peer.primary_path->ipaddr.v4.sin_port =
-		ntohs(asoc->peer.primary_path->ipaddr.v4.sin_port);
+		asoc->peer.primary_path->af_specific->sockaddr_len);
 
 	sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
 			(union sctp_addr *)&prim.ssp_addr);
@@ -4645,9 +4613,7 @@
 	unsigned short snum;
 	int ret;
 
-	/* NOTE:  Remember to put this back to net order. */
-	addr->v4.sin_port = ntohs(addr->v4.sin_port);
-	snum = addr->v4.sin_port;
+	snum = ntohs(addr->v4.sin_port);
 
 	SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum);
 	sctp_local_bh_disable();
@@ -4784,7 +4750,6 @@
 
 fail:
 	sctp_local_bh_enable();
-	addr->v4.sin_port = htons(addr->v4.sin_port);
 	return ret;
 }
 
@@ -5024,7 +4989,7 @@
 {
 	struct sctp_bind_bucket *pp;
 
-	pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC);
+	pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
 	SCTP_DBG_OBJCNT_INC(bind_bucket);
 	if (pp) {
 		pp->port = snum;
@@ -5083,7 +5048,7 @@
 {
 	union sctp_addr autoaddr;
 	struct sctp_af *af;
-	unsigned short port;
+	__be16 port;
 
 	/* Initialize a local sockaddr structure to INADDR_ANY. */
 	af = sctp_sk(sk)->pf->af;
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index ac4fae1..42d9498 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -401,13 +401,14 @@
 
 	/* Refresh the gap ack information. */
 	if (sctp_tsnmap_has_gap(map)) {
+		__u16 start, end;
 		sctp_tsnmap_iter_init(map, &iter);
 		while (sctp_tsnmap_next_gap_ack(map, &iter,
-						&map->gabs[gabs].start,
-						&map->gabs[gabs].end)) {
+						&start,
+						&end)) {
 
-			map->gabs[gabs].start = htons(map->gabs[gabs].start);
-			map->gabs[gabs].end = htons(map->gabs[gabs].end);
+			map->gabs[gabs].start = htons(start);
+			map->gabs[gabs].end = htons(end);
 			gabs++;
 			if (gabs >= SCTP_MAX_GABS)
 				break;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index a015283..e255a70 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -351,7 +351,7 @@
 	struct sctp_remote_error *sre;
 	struct sk_buff *skb;
 	sctp_errhdr_t *ch;
-	__u16 cause;
+	__be16 cause;
 	int elen;
 
 	ch = (sctp_errhdr_t *)(chunk->skb->data);
diff --git a/net/socket.c b/net/socket.c
index 6c9b9b3..29ea1de 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -77,7 +77,6 @@
 #include <linux/cache.h>
 #include <linux/module.h>
 #include <linux/highmem.h>
-#include <linux/divert.h>
 #include <linux/mount.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
@@ -231,13 +230,13 @@
 
 #define SOCKFS_MAGIC 0x534F434B
 
-static kmem_cache_t *sock_inode_cachep __read_mostly;
+static struct kmem_cache *sock_inode_cachep __read_mostly;
 
 static struct inode *sock_alloc_inode(struct super_block *sb)
 {
 	struct socket_alloc *ei;
 
-	ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL);
+	ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	init_waitqueue_head(&ei->socket.wait);
@@ -258,7 +257,7 @@
 			container_of(inode, struct socket_alloc, vfs_inode));
 }
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
 	struct socket_alloc *ei = (struct socket_alloc *)foo;
 
@@ -306,7 +305,14 @@
 
 static int sockfs_delete_dentry(struct dentry *dentry)
 {
-	return 1;
+	/*
+	 * At creation time, we pretended this dentry was hashed
+	 * (by clearing DCACHE_UNHASHED bit in d_flags)
+	 * At delete time, we restore the truth : not hashed.
+	 * (so that dput() can proceed correctly)
+	 */
+	dentry->d_flags |= DCACHE_UNHASHED;
+	return 0;
 }
 static struct dentry_operations sockfs_dentry_operations = {
 	.d_delete = sockfs_delete_dentry,
@@ -354,14 +360,20 @@
 
 	this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
 	this.name = name;
-	this.hash = SOCK_INODE(sock)->i_ino;
+	this.hash = 0;
 
 	file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
 	if (unlikely(!file->f_dentry))
 		return -ENOMEM;
 
 	file->f_dentry->d_op = &sockfs_dentry_operations;
-	d_add(file->f_dentry, SOCK_INODE(sock));
+	/*
+	 * We dont want to push this dentry into global dentry hash table.
+	 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+	 * This permits a working /proc/$pid/fd/XXX on sockets
+	 */
+	file->f_dentry->d_flags &= ~DCACHE_UNHASHED;
+	d_instantiate(file->f_dentry, SOCK_INODE(sock));
 	file->f_vfsmnt = mntget(sock_mnt);
 	file->f_mapping = file->f_dentry->d_inode->i_mapping;
 
@@ -852,11 +864,6 @@
 				err = vlan_ioctl_hook(argp);
 			mutex_unlock(&vlan_ioctl_mutex);
 			break;
-		case SIOCGIFDIVERT:
-		case SIOCSIFDIVERT:
-			/* Convert this to call through a hook */
-			err = divert_ioctl(cmd, argp);
-			break;
 		case SIOCADDDLCI:
 		case SIOCDELDLCI:
 			err = -ENOPKG;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index b36b946..a02ecc1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -68,7 +68,7 @@
 #define GSS_CRED_SLACK		1024		/* XXX: unused */
 /* length of a krb5 verifier (48), plus data added before arguments when
  * using integrity (two 4-byte integers): */
-#define GSS_VERF_SLACK		56
+#define GSS_VERF_SLACK		100
 
 /* XXX this define must match the gssd define
 * as it is passed to gssd to signal the use of
@@ -94,46 +94,6 @@
 static void gss_destroy_ctx(struct gss_cl_ctx *);
 static struct rpc_pipe_ops gss_upcall_ops;
 
-void
-print_hexl(u32 *p, u_int length, u_int offset)
-{
-	u_int i, j, jm;
-	u8 c, *cp;
-	
-	dprintk("RPC: print_hexl: length %d\n",length);
-	dprintk("\n");
-	cp = (u8 *) p;
-	
-	for (i = 0; i < length; i += 0x10) {
-		dprintk("  %04x: ", (u_int)(i + offset));
-		jm = length - i;
-		jm = jm > 16 ? 16 : jm;
-		
-		for (j = 0; j < jm; j++) {
-			if ((j % 2) == 1)
-				dprintk("%02x ", (u_int)cp[i+j]);
-			else
-				dprintk("%02x", (u_int)cp[i+j]);
-		}
-		for (; j < 16; j++) {
-			if ((j % 2) == 1)
-				dprintk("   ");
-			else
-				dprintk("  ");
-		}
-		dprintk(" ");
-		
-		for (j = 0; j < jm; j++) {
-			c = cp[i+j];
-			c = isprint(c) ? c : '.';
-			dprintk("%c", c);
-		}
-		dprintk("\n");
-	}
-}
-
-EXPORT_SYMBOL(print_hexl);
-
 static inline struct gss_cl_ctx *
 gss_get_ctx(struct gss_cl_ctx *ctx)
 {
@@ -198,11 +158,10 @@
 	q = (const void *)((const char *)p + len);
 	if (unlikely(q > end || q < p))
 		return ERR_PTR(-EFAULT);
-	dest->data = kmalloc(len, GFP_KERNEL);
+	dest->data = kmemdup(p, len, GFP_KERNEL);
 	if (unlikely(dest->data == NULL))
 		return ERR_PTR(-ENOMEM);
 	dest->len = len;
-	memcpy(dest->data, p, len);
 	return q;
 }
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index e11a40b..d926cda 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -43,6 +43,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/sunrpc/gss_krb5.h>
+#include <linux/sunrpc/xdr.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY        RPCDBG_AUTH
@@ -61,9 +62,6 @@
 	u8 local_iv[16] = {0};
 	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
-	dprintk("RPC:      krb5_encrypt: input data:\n");
-	print_hexl((u32 *)in, length, 0);
-
 	if (length % crypto_blkcipher_blocksize(tfm) != 0)
 		goto out;
 
@@ -80,12 +78,9 @@
 	sg_set_buf(sg, out, length);
 
 	ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
-
-	dprintk("RPC:      krb5_encrypt: output data:\n");
-	print_hexl((u32 *)out, length, 0);
 out:
 	dprintk("RPC:      krb5_encrypt returns %d\n",ret);
-	return(ret);
+	return ret;
 }
 
 EXPORT_SYMBOL(krb5_encrypt);
@@ -103,9 +98,6 @@
 	u8 local_iv[16] = {0};
 	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
-	dprintk("RPC:      krb5_decrypt: input data:\n");
-	print_hexl((u32 *)in, length, 0);
-
 	if (length % crypto_blkcipher_blocksize(tfm) != 0)
 		goto out;
 
@@ -121,83 +113,14 @@
 	sg_set_buf(sg, out, length);
 
 	ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
-
-	dprintk("RPC:      krb5_decrypt: output_data:\n");
-	print_hexl((u32 *)out, length, 0);
 out:
 	dprintk("RPC:      gss_k5decrypt returns %d\n",ret);
-	return(ret);
+	return ret;
 }
 
 EXPORT_SYMBOL(krb5_decrypt);
 
 static int
-process_xdr_buf(struct xdr_buf *buf, int offset, int len,
-		int (*actor)(struct scatterlist *, void *), void *data)
-{
-	int i, page_len, thislen, page_offset, ret = 0;
-	struct scatterlist	sg[1];
-
-	if (offset >= buf->head[0].iov_len) {
-		offset -= buf->head[0].iov_len;
-	} else {
-		thislen = buf->head[0].iov_len - offset;
-		if (thislen > len)
-			thislen = len;
-		sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
-		ret = actor(sg, data);
-		if (ret)
-			goto out;
-		offset = 0;
-		len -= thislen;
-	}
-	if (len == 0)
-		goto out;
-
-	if (offset >= buf->page_len) {
-		offset -= buf->page_len;
-	} else {
-		page_len = buf->page_len - offset;
-		if (page_len > len)
-			page_len = len;
-		len -= page_len;
-		page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
-		i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
-		thislen = PAGE_CACHE_SIZE - page_offset;
-		do {
-			if (thislen > page_len)
-				thislen = page_len;
-			sg->page = buf->pages[i];
-			sg->offset = page_offset;
-			sg->length = thislen;
-			ret = actor(sg, data);
-			if (ret)
-				goto out;
-			page_len -= thislen;
-			i++;
-			page_offset = 0;
-			thislen = PAGE_CACHE_SIZE;
-		} while (page_len != 0);
-		offset = 0;
-	}
-	if (len == 0)
-		goto out;
-
-	if (offset < buf->tail[0].iov_len) {
-		thislen = buf->tail[0].iov_len - offset;
-		if (thislen > len)
-			thislen = len;
-		sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
-		ret = actor(sg, data);
-		len -= thislen;
-	}
-	if (len != 0)
-		ret = -EINVAL;
-out:
-	return ret;
-}
-
-static int
 checksummer(struct scatterlist *sg, void *data)
 {
 	struct hash_desc *desc = data;
@@ -207,23 +130,13 @@
 
 /* checksum the plaintext data and hdrlen bytes of the token header */
 s32
-make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
+make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
 		   int body_offset, struct xdr_netobj *cksum)
 {
-	char                            *cksumname;
 	struct hash_desc                desc; /* XXX add to ctx? */
 	struct scatterlist              sg[1];
 	int err;
 
-	switch (cksumtype) {
-		case CKSUMTYPE_RSA_MD5:
-			cksumname = "md5";
-			break;
-		default:
-			dprintk("RPC:      krb5_make_checksum:"
-				" unsupported checksum %d", cksumtype);
-			return GSS_S_FAILURE;
-	}
 	desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
 	if (IS_ERR(desc.tfm))
 		return GSS_S_FAILURE;
@@ -237,7 +150,7 @@
 	err = crypto_hash_update(&desc, sg, hdrlen);
 	if (err)
 		goto out;
-	err = process_xdr_buf(body, body_offset, body->len - body_offset,
+	err = xdr_process_buf(body, body_offset, body->len - body_offset,
 			      checksummer, &desc);
 	if (err)
 		goto out;
@@ -335,7 +248,7 @@
 	desc.fragno = 0;
 	desc.fraglen = 0;
 
-	ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
+	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
 	return ret;
 }
 
@@ -401,7 +314,7 @@
 	desc.desc.flags = 0;
 	desc.fragno = 0;
 	desc.fraglen = 0;
-	return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
+	return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
 }
 
 EXPORT_SYMBOL(gss_decrypt_xdr_buf);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 325e72e..05d4bee 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -70,10 +70,9 @@
 	q = (const void *)((const char *)p + len);
 	if (unlikely(q > end || q < p))
 		return ERR_PTR(-EFAULT);
-	res->data = kmalloc(len, GFP_KERNEL);
+	res->data = kmemdup(p, len, GFP_KERNEL);
 	if (unlikely(res->data == NULL))
 		return ERR_PTR(-ENOMEM);
-	memcpy(res->data, p, len);
 	res->len = len;
 	return q;
 }
@@ -130,6 +129,7 @@
 {
 	const void *end = (const void *)((const char *)p + len);
 	struct	krb5_ctx *ctx;
+	int tmp;
 
 	if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
 		goto out_err;
@@ -137,17 +137,22 @@
 	p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
 	if (IS_ERR(p))
 		goto out_err_free_ctx;
-	p = simple_get_bytes(p, end, &ctx->seed_init, sizeof(ctx->seed_init));
+	/* The downcall format was designed before we completely understood
+	 * the uses of the context fields; so it includes some stuff we
+	 * just give some minimal sanity-checking, and some we ignore
+	 * completely (like the next twenty bytes): */
+	if (unlikely(p + 20 > end || p + 20 < p))
+		goto out_err_free_ctx;
+	p += 20;
+	p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
 	if (IS_ERR(p))
 		goto out_err_free_ctx;
-	p = simple_get_bytes(p, end, ctx->seed, sizeof(ctx->seed));
+	if (tmp != SGN_ALG_DES_MAC_MD5)
+		goto out_err_free_ctx;
+	p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
 	if (IS_ERR(p))
 		goto out_err_free_ctx;
-	p = simple_get_bytes(p, end, &ctx->signalg, sizeof(ctx->signalg));
-	if (IS_ERR(p))
-		goto out_err_free_ctx;
-	p = simple_get_bytes(p, end, &ctx->sealalg, sizeof(ctx->sealalg));
-	if (IS_ERR(p))
+	if (tmp != SEAL_ALG_DES)
 		goto out_err_free_ctx;
 	p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
 	if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 08601ee..d0bb506 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -77,7 +77,6 @@
 		struct xdr_netobj *token)
 {
 	struct krb5_ctx		*ctx = gss_ctx->internal_ctx_id;
-	s32			checksum_type;
 	char			cksumdata[16];
 	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
 	unsigned char		*ptr, *krb5_hdr, *msg_start;
@@ -88,21 +87,6 @@
 
 	now = get_seconds();
 
-	switch (ctx->signalg) {
-		case SGN_ALG_DES_MAC_MD5:
-			checksum_type = CKSUMTYPE_RSA_MD5;
-			break;
-		default:
-			dprintk("RPC:      gss_krb5_seal: ctx->signalg %d not"
-				" supported\n", ctx->signalg);
-			goto out_err;
-	}
-	if (ctx->sealalg != SEAL_ALG_NONE && ctx->sealalg != SEAL_ALG_DES) {
-		dprintk("RPC:      gss_krb5_seal: ctx->sealalg %d not supported\n",
-			ctx->sealalg);
-		goto out_err;
-	}
-
 	token->len = g_token_size(&ctx->mech_used, 22);
 
 	ptr = token->data;
@@ -115,37 +99,26 @@
 	krb5_hdr = ptr - 2;
 	msg_start = krb5_hdr + 24;
 
-	*(__be16 *)(krb5_hdr + 2) = htons(ctx->signalg);
+	*(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
 	memset(krb5_hdr + 4, 0xff, 4);
 
-	if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
-			goto out_err;
+	if (make_checksum("md5", krb5_hdr, 8, text, 0, &md5cksum))
+		return GSS_S_FAILURE;
 
-	switch (ctx->signalg) {
-	case SGN_ALG_DES_MAC_MD5:
-		if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
-				  md5cksum.data, md5cksum.len))
-			goto out_err;
-		memcpy(krb5_hdr + 16,
-		       md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
-		       KRB5_CKSUM_LENGTH);
+	if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
+			  md5cksum.data, md5cksum.len))
+		return GSS_S_FAILURE;
 
-		dprintk("RPC:      make_seal_token: cksum data: \n");
-		print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
-		break;
-	default:
-		BUG();
-	}
+	memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
+	       KRB5_CKSUM_LENGTH);
 
 	spin_lock(&krb5_seq_lock);
 	seq_send = ctx->seq_send++;
 	spin_unlock(&krb5_seq_lock);
 
-	if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
-			       seq_send, krb5_hdr + 16, krb5_hdr + 8)))
-		goto out_err;
+	if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
+			       ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))
+		return GSS_S_FAILURE;
 
-	return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
-out_err:
-	return GSS_S_FAILURE;
+	return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 0828cf6..87f8977 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -78,7 +78,6 @@
 	struct krb5_ctx		*ctx = gss_ctx->internal_ctx_id;
 	int			signalg;
 	int			sealalg;
-	s32			checksum_type;
 	char			cksumdata[16];
 	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
 	s32			now;
@@ -86,96 +85,54 @@
 	s32			seqnum;
 	unsigned char		*ptr = (unsigned char *)read_token->data;
 	int			bodysize;
-	u32			ret = GSS_S_DEFECTIVE_TOKEN;
 
 	dprintk("RPC:      krb5_read_token\n");
 
 	if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr,
 					read_token->len))
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
 	if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) ||
 	    (*ptr++ != ( KG_TOK_MIC_MSG    &0xff))   )
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
 	/* XXX sanity-check bodysize?? */
 
-	/* get the sign and seal algorithms */
-
 	signalg = ptr[0] + (ptr[1] << 8);
-	sealalg = ptr[2] + (ptr[3] << 8);
+	if (signalg != SGN_ALG_DES_MAC_MD5)
+		return GSS_S_DEFECTIVE_TOKEN;
 
-	/* Sanity checks */
+	sealalg = ptr[2] + (ptr[3] << 8);
+	if (sealalg != SEAL_ALG_NONE)
+		return GSS_S_DEFECTIVE_TOKEN;
 
 	if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
-	if (sealalg != 0xffff)
-		goto out;
+	if (make_checksum("md5", ptr - 2, 8, message_buffer, 0, &md5cksum))
+		return GSS_S_FAILURE;
 
-	/* there are several mappings of seal algorithms to sign algorithms,
-	   but few enough that we can try them all. */
+	if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16))
+		return GSS_S_FAILURE;
 
-	if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
-	    (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
-	    (ctx->sealalg == SEAL_ALG_DES3KD &&
-	     signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
-		goto out;
-
-	/* compute the checksum of the message */
-
-	/* initialize the the cksum */
-	switch (signalg) {
-	case SGN_ALG_DES_MAC_MD5:
-		checksum_type = CKSUMTYPE_RSA_MD5;
-		break;
-	default:
-		ret = GSS_S_DEFECTIVE_TOKEN;
-		goto out;
-	}
-
-	switch (signalg) {
-	case SGN_ALG_DES_MAC_MD5:
-		ret = make_checksum(checksum_type, ptr - 2, 8,
-					 message_buffer, 0, &md5cksum);
-		if (ret)
-			goto out;
-
-		ret = krb5_encrypt(ctx->seq, NULL, md5cksum.data,
-				   md5cksum.data, 16);
-		if (ret)
-			goto out;
-
-		if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
-			ret = GSS_S_BAD_SIG;
-			goto out;
-		}
-		break;
-	default:
-		ret = GSS_S_DEFECTIVE_TOKEN;
-		goto out;
-	}
+	if (memcmp(md5cksum.data + 8, ptr + 14, 8))
+		return GSS_S_BAD_SIG;
 
 	/* it got through unscathed.  Make sure the context is unexpired */
 
 	now = get_seconds();
 
-	ret = GSS_S_CONTEXT_EXPIRED;
 	if (now > ctx->endtime)
-		goto out;
+		return GSS_S_CONTEXT_EXPIRED;
 
 	/* do sequencing checks */
 
-	ret = GSS_S_BAD_SIG;
-	if ((ret = krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction,
-				    &seqnum)))
-		goto out;
+	if (krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum))
+		return GSS_S_FAILURE;
 
 	if ((ctx->initiate && direction != 0xff) ||
 	    (!ctx->initiate && direction != 0))
-		goto out;
+		return GSS_S_BAD_SIG;
 
-	ret = GSS_S_COMPLETE;
-out:
-	return ret;
+	return GSS_S_COMPLETE;
 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index cc45c16..fe25b3d 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -57,9 +57,9 @@
 					>>PAGE_CACHE_SHIFT;
 		int offset = (buf->page_base + len - 1)
 					& (PAGE_CACHE_SIZE - 1);
-		ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA);
+		ptr = kmap_atomic(buf->pages[last], KM_USER0);
 		pad = *(ptr + offset);
-		kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA);
+		kunmap_atomic(ptr, KM_USER0);
 		goto out;
 	} else
 		len -= buf->page_len;
@@ -120,7 +120,6 @@
 		struct xdr_buf *buf, struct page **pages)
 {
 	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
-	s32			checksum_type;
 	char			cksumdata[16];
 	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
 	int			blocksize = 0, plainlen;
@@ -134,21 +133,6 @@
 
 	now = get_seconds();
 
-	switch (kctx->signalg) {
-		case SGN_ALG_DES_MAC_MD5:
-			checksum_type = CKSUMTYPE_RSA_MD5;
-			break;
-		default:
-			dprintk("RPC:      gss_krb5_seal: kctx->signalg %d not"
-				" supported\n", kctx->signalg);
-			goto out_err;
-	}
-	if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
-		dprintk("RPC:      gss_krb5_seal: kctx->sealalg %d not supported\n",
-			kctx->sealalg);
-		goto out_err;
-	}
-
 	blocksize = crypto_blkcipher_blocksize(kctx->enc);
 	gss_krb5_add_padding(buf, offset, blocksize);
 	BUG_ON((buf->len - offset) % blocksize);
@@ -175,37 +159,27 @@
 	/* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
 	krb5_hdr = ptr - 2;
 	msg_start = krb5_hdr + 24;
-	/* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
 
-	*(__be16 *)(krb5_hdr + 2) = htons(kctx->signalg);
+	*(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
 	memset(krb5_hdr + 4, 0xff, 4);
-	*(__be16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
+	*(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES);
 
 	make_confounder(msg_start, blocksize);
 
 	/* XXXJBF: UGH!: */
 	tmp_pages = buf->pages;
 	buf->pages = pages;
-	if (make_checksum(checksum_type, krb5_hdr, 8, buf,
+	if (make_checksum("md5", krb5_hdr, 8, buf,
 				offset + headlen - blocksize, &md5cksum))
-		goto out_err;
+		return GSS_S_FAILURE;
 	buf->pages = tmp_pages;
 
-	switch (kctx->signalg) {
-	case SGN_ALG_DES_MAC_MD5:
-		if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
-				  md5cksum.data, md5cksum.len))
-			goto out_err;
-		memcpy(krb5_hdr + 16,
-		       md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
-		       KRB5_CKSUM_LENGTH);
-
-		dprintk("RPC:      make_seal_token: cksum data: \n");
-		print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
-		break;
-	default:
-		BUG();
-	}
+	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+			  md5cksum.data, md5cksum.len))
+		return GSS_S_FAILURE;
+	memcpy(krb5_hdr + 16,
+	       md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
+	       KRB5_CKSUM_LENGTH);
 
 	spin_lock(&krb5_seq_lock);
 	seq_send = kctx->seq_send++;
@@ -215,15 +189,13 @@
 	 * and encrypt at the same time: */
 	if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
 			       seq_send, krb5_hdr + 16, krb5_hdr + 8)))
-		goto out_err;
+		return GSS_S_FAILURE;
 
 	if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
 									pages))
-		goto out_err;
+		return GSS_S_FAILURE;
 
-	return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
-out_err:
-	return GSS_S_FAILURE;
+	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 }
 
 u32
@@ -232,7 +204,6 @@
 	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
 	int			signalg;
 	int			sealalg;
-	s32			checksum_type;
 	char			cksumdata[16];
 	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
 	s32			now;
@@ -240,7 +211,6 @@
 	s32			seqnum;
 	unsigned char		*ptr;
 	int			bodysize;
-	u32			ret = GSS_S_DEFECTIVE_TOKEN;
 	void			*data_start, *orig_start;
 	int			data_len;
 	int			blocksize;
@@ -250,98 +220,58 @@
 	ptr = (u8 *)buf->head[0].iov_base + offset;
 	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
 					buf->len - offset))
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
 	if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
 	    (*ptr++ !=  (KG_TOK_WRAP_MSG    &0xff))   )
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
 	/* XXX sanity-check bodysize?? */
 
 	/* get the sign and seal algorithms */
 
 	signalg = ptr[0] + (ptr[1] << 8);
-	sealalg = ptr[2] + (ptr[3] << 8);
+	if (signalg != SGN_ALG_DES_MAC_MD5)
+		return GSS_S_DEFECTIVE_TOKEN;
 
-	/* Sanity checks */
+	sealalg = ptr[2] + (ptr[3] << 8);
+	if (sealalg != SEAL_ALG_DES)
+		return GSS_S_DEFECTIVE_TOKEN;
 
 	if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
-		goto out;
-
-	if (sealalg == 0xffff)
-		goto out;
-
-	/* in the current spec, there is only one valid seal algorithm per
-	   key type, so a simple comparison is ok */
-
-	if (sealalg != kctx->sealalg)
-		goto out;
-
-	/* there are several mappings of seal algorithms to sign algorithms,
-	   but few enough that we can try them all. */
-
-	if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
-	    (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
-	    (kctx->sealalg == SEAL_ALG_DES3KD &&
-	     signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
 	if (gss_decrypt_xdr_buf(kctx->enc, buf,
 			ptr + 22 - (unsigned char *)buf->head[0].iov_base))
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
-	/* compute the checksum of the message */
+	if (make_checksum("md5", ptr - 2, 8, buf,
+		 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
+		return GSS_S_FAILURE;
 
-	/* initialize the the cksum */
-	switch (signalg) {
-	case SGN_ALG_DES_MAC_MD5:
-		checksum_type = CKSUMTYPE_RSA_MD5;
-		break;
-	default:
-		ret = GSS_S_DEFECTIVE_TOKEN;
-		goto out;
-	}
+	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+			   md5cksum.data, md5cksum.len))
+		return GSS_S_FAILURE;
 
-	switch (signalg) {
-	case SGN_ALG_DES_MAC_MD5:
-		ret = make_checksum(checksum_type, ptr - 2, 8, buf,
-			 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
-		if (ret)
-			goto out;
-
-		ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
-				   md5cksum.data, md5cksum.len);
-		if (ret)
-			goto out;
-
-		if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
-			ret = GSS_S_BAD_SIG;
-			goto out;
-		}
-		break;
-	default:
-		ret = GSS_S_DEFECTIVE_TOKEN;
-		goto out;
-	}
+	if (memcmp(md5cksum.data + 8, ptr + 14, 8))
+		return GSS_S_BAD_SIG;
 
 	/* it got through unscathed.  Make sure the context is unexpired */
 
 	now = get_seconds();
 
-	ret = GSS_S_CONTEXT_EXPIRED;
 	if (now > kctx->endtime)
-		goto out;
+		return GSS_S_CONTEXT_EXPIRED;
 
 	/* do sequencing checks */
 
-	ret = GSS_S_BAD_SIG;
-	if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
-				    &seqnum)))
-		goto out;
+	if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
+				    &seqnum))
+		return GSS_S_BAD_SIG;
 
 	if ((kctx->initiate && direction != 0xff) ||
 	    (!kctx->initiate && direction != 0))
-		goto out;
+		return GSS_S_BAD_SIG;
 
 	/* Copy the data back to the right position.  XXX: Would probably be
 	 * better to copy and encrypt at the same time. */
@@ -354,11 +284,8 @@
 	buf->head[0].iov_len -= (data_start - orig_start);
 	buf->len -= (data_start - orig_start);
 
-	ret = GSS_S_DEFECTIVE_TOKEN;
 	if (gss_krb5_remove_padding(buf, blocksize))
-		goto out;
+		return GSS_S_DEFECTIVE_TOKEN;
 
-	ret = GSS_S_COMPLETE;
-out:
-	return ret;
+	return GSS_S_COMPLETE;
 }
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index bdedf45..4146507 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -76,140 +76,79 @@
 	q = (const void *)((const char *)p + len);
 	if (unlikely(q > end || q < p))
 		return ERR_PTR(-EFAULT);
-	res->data = kmalloc(len, GFP_KERNEL);
+	res->data = kmemdup(p, len, GFP_KERNEL);
 	if (unlikely(res->data == NULL))
 		return ERR_PTR(-ENOMEM);
-	memcpy(res->data, p, len);
 	return q;
 }
 
-static inline const void *
-get_key(const void *p, const void *end, struct crypto_blkcipher **res,
-	int *resalg)
-{
-	struct xdr_netobj	key = { 0 };
-	int			setkey = 0;
-	char			*alg_name;
-
-	p = simple_get_bytes(p, end, resalg, sizeof(*resalg));
-	if (IS_ERR(p))
-		goto out_err;
-	p = simple_get_netobj(p, end, &key);
-	if (IS_ERR(p))
-		goto out_err;
-
-	switch (*resalg) {
-		case NID_des_cbc:
-			alg_name = "cbc(des)";
-			setkey = 1;
-			break;
-		case NID_cast5_cbc:
-			/* XXXX here in name only, not used */
-			alg_name = "cbc(cast5)";
-			setkey = 0; /* XXX will need to set to 1 */
-			break;
-		case NID_md5:
-			if (key.len == 0) {
-				dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
-			}
-			alg_name = "md5";
-			setkey = 0;
-			break;
-		default:
-			dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
-			goto out_err_free_key;
-	}
-	*res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(*res)) {
-		printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
-		*res = NULL;
-		goto out_err_free_key;
-	}
-	if (setkey) {
-		if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
-			printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
-			goto out_err_free_tfm;
-		}
-	}
-
-	if(key.len > 0)
-		kfree(key.data);
-	return p;
-
-out_err_free_tfm:
-	crypto_free_blkcipher(*res);
-out_err_free_key:
-	if(key.len > 0)
-		kfree(key.data);
-	p = ERR_PTR(-EINVAL);
-out_err:
-	return p;
-}
-
 static int
 gss_import_sec_context_spkm3(const void *p, size_t len,
 				struct gss_ctx *ctx_id)
 {
 	const void *end = (const void *)((const char *)p + len);
 	struct	spkm3_ctx *ctx;
+	int	version;
 
 	if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
 		goto out_err;
 
+	p = simple_get_bytes(p, end, &version, sizeof(version));
+	if (IS_ERR(p))
+		goto out_err_free_ctx;
+	if (version != 1) {
+		dprintk("RPC: unknown spkm3 token format: obsolete nfs-utils?\n");
+		goto out_err_free_ctx;
+	}
+
 	p = simple_get_netobj(p, end, &ctx->ctx_id);
 	if (IS_ERR(p))
 		goto out_err_free_ctx;
 
-	p = simple_get_bytes(p, end, &ctx->qop, sizeof(ctx->qop));
+	p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
 	if (IS_ERR(p))
 		goto out_err_free_ctx_id;
 
 	p = simple_get_netobj(p, end, &ctx->mech_used);
 	if (IS_ERR(p))
-		goto out_err_free_mech;
+		goto out_err_free_ctx_id;
 
 	p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags));
 	if (IS_ERR(p))
 		goto out_err_free_mech;
 
-	p = simple_get_bytes(p, end, &ctx->req_flags, sizeof(ctx->req_flags));
+	p = simple_get_netobj(p, end, &ctx->conf_alg);
 	if (IS_ERR(p))
 		goto out_err_free_mech;
 
-	p = simple_get_netobj(p, end, &ctx->share_key);
+	p = simple_get_netobj(p, end, &ctx->derived_conf_key);
 	if (IS_ERR(p))
-		goto out_err_free_s_key;
+		goto out_err_free_conf_alg;
 
-	p = get_key(p, end, &ctx->derived_conf_key, &ctx->conf_alg);
+	p = simple_get_netobj(p, end, &ctx->intg_alg);
 	if (IS_ERR(p))
-		goto out_err_free_s_key;
+		goto out_err_free_conf_key;
 
-	p = get_key(p, end, &ctx->derived_integ_key, &ctx->intg_alg);
+	p = simple_get_netobj(p, end, &ctx->derived_integ_key);
 	if (IS_ERR(p))
-		goto out_err_free_key1;
-
-	p = simple_get_bytes(p, end, &ctx->keyestb_alg, sizeof(ctx->keyestb_alg));
-	if (IS_ERR(p))
-		goto out_err_free_key2;
-
-	p = simple_get_bytes(p, end, &ctx->owf_alg, sizeof(ctx->owf_alg));
-	if (IS_ERR(p))
-		goto out_err_free_key2;
+		goto out_err_free_intg_alg;
 
 	if (p != end)
-		goto out_err_free_key2;
+		goto out_err_free_intg_key;
 
 	ctx_id->internal_ctx_id = ctx;
 
 	dprintk("Successfully imported new spkm context.\n");
 	return 0;
 
-out_err_free_key2:
-	crypto_free_blkcipher(ctx->derived_integ_key);
-out_err_free_key1:
-	crypto_free_blkcipher(ctx->derived_conf_key);
-out_err_free_s_key:
-	kfree(ctx->share_key.data);
+out_err_free_intg_key:
+	kfree(ctx->derived_integ_key.data);
+out_err_free_intg_alg:
+	kfree(ctx->intg_alg.data);
+out_err_free_conf_key:
+	kfree(ctx->derived_conf_key.data);
+out_err_free_conf_alg:
+	kfree(ctx->conf_alg.data);
 out_err_free_mech:
 	kfree(ctx->mech_used.data);
 out_err_free_ctx_id:
@@ -221,13 +160,16 @@
 }
 
 static void
-gss_delete_sec_context_spkm3(void *internal_ctx) {
+gss_delete_sec_context_spkm3(void *internal_ctx)
+{
 	struct spkm3_ctx *sctx = internal_ctx;
 
-	crypto_free_blkcipher(sctx->derived_integ_key);
-	crypto_free_blkcipher(sctx->derived_conf_key);
-	kfree(sctx->share_key.data);
+	kfree(sctx->derived_integ_key.data);
+	kfree(sctx->intg_alg.data);
+	kfree(sctx->derived_conf_key.data);
+	kfree(sctx->conf_alg.data);
 	kfree(sctx->mech_used.data);
+	kfree(sctx->ctx_id.data);
 	kfree(sctx);
 }
 
@@ -239,7 +181,6 @@
 	u32 maj_stat = 0;
 	struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
-	dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n");
 	maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
 
 	dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
@@ -254,10 +195,9 @@
 	u32 err = 0;
 	struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
-	dprintk("RPC: gss_get_mic_spkm3\n");
-
 	err = spkm3_make_token(sctx, message_buffer,
-			      message_token, SPKM_MIC_TOK);
+				message_token, SPKM_MIC_TOK);
+	dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err);
 	return err;
 }
 
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index 18c7862..b179d58 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -39,11 +39,17 @@
 #include <linux/sunrpc/gss_spkm3.h>
 #include <linux/random.h>
 #include <linux/crypto.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/sunrpc/xdr.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY        RPCDBG_AUTH
 #endif
 
+const struct xdr_netobj hmac_md5_oid = { 8, "\x2B\x06\x01\x05\x05\x08\x01\x01"};
+const struct xdr_netobj cast5_cbc_oid = {9, "\x2A\x86\x48\x86\xF6\x7D\x07\x42\x0A"};
+
 /*
  * spkm3_make_token()
  *
@@ -66,29 +72,23 @@
 	int			ctxelen = 0, ctxzbit = 0;
 	int			md5elen = 0, md5zbit = 0;
 
-	dprintk("RPC: spkm3_make_token\n");
-
 	now = jiffies;
 
 	if (ctx->ctx_id.len != 16) {
 		dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
-			ctx->ctx_id.len);
+				ctx->ctx_id.len);
 		goto out_err;
 	}
-		
-	switch (ctx->intg_alg) {
-		case NID_md5:
-			checksum_type = CKSUMTYPE_RSA_MD5;
-			break;
-		default:
-			dprintk("RPC: gss_spkm3_seal: ctx->signalg %d not"
-				" supported\n", ctx->intg_alg);
-			goto out_err;
-	}
-	/* XXX since we don't support WRAP, perhaps we don't care... */
-	if (ctx->conf_alg != NID_cast5_cbc) {
-		dprintk("RPC: gss_spkm3_seal: ctx->sealalg %d not supported\n",
-			ctx->conf_alg);
+
+	if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
+		dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm."
+				"only support hmac-md5 I-ALG.\n");
+		goto out_err;
+	} else
+		checksum_type = CKSUMTYPE_HMAC_MD5;
+
+	if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) {
+		dprintk("RPC: gss_spkm3_seal: unsupported C-ALG algorithm\n");
 		goto out_err;
 	}
 
@@ -96,10 +96,10 @@
 		/* Calculate checksum over the mic-header */
 		asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
 		spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
-		                         ctxelen, ctxzbit);
-
-		if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len, 
-		                             text, 0, &md5cksum))
+				ctxelen, ctxzbit);
+		if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key,
+					(char *)mic_hdr.data, mic_hdr.len,
+					text, 0, &md5cksum))
 			goto out_err;
 
 		asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
@@ -121,7 +121,66 @@
 
 	return  GSS_S_COMPLETE;
 out_err:
+	if (md5cksum.data)
+		kfree(md5cksum.data);
+
 	token->data = NULL;
 	token->len = 0;
 	return GSS_S_FAILURE;
 }
+
+static int
+spkm3_checksummer(struct scatterlist *sg, void *data)
+{
+	struct hash_desc *desc = data;
+
+	return crypto_hash_update(desc, sg, sg->length);
+}
+
+/* checksum the plaintext data and hdrlen bytes of the token header */
+s32
+make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
+		    unsigned int hdrlen, struct xdr_buf *body,
+		    unsigned int body_offset, struct xdr_netobj *cksum)
+{
+	char				*cksumname;
+	struct hash_desc		desc; /* XXX add to ctx? */
+	struct scatterlist		sg[1];
+	int err;
+
+	switch (cksumtype) {
+		case CKSUMTYPE_HMAC_MD5:
+			cksumname = "md5";
+			break;
+		default:
+			dprintk("RPC:      spkm3_make_checksum:"
+					" unsupported checksum %d", cksumtype);
+			return GSS_S_FAILURE;
+	}
+
+	if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE;
+
+	desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(desc.tfm))
+		return GSS_S_FAILURE;
+	cksum->len = crypto_hash_digestsize(desc.tfm);
+	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	err = crypto_hash_setkey(desc.tfm, key->data, key->len);
+	if (err)
+		goto out;
+
+	sg_set_buf(sg, header, hdrlen);
+	crypto_hash_update(&desc, sg, 1);
+
+	xdr_process_buf(body, body_offset, body->len - body_offset,
+			spkm3_checksummer, &desc);
+	crypto_hash_final(&desc, cksum->data);
+
+out:
+	crypto_free_hash(desc.tfm);
+
+	return err ? GSS_S_FAILURE : 0;
+}
+
+EXPORT_SYMBOL(make_spkm3_checksum);
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 854a983..35188b6 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -172,10 +172,10 @@
 	*(u8 *)hptr++ = zbit;
 	memcpy(hptr, ctxdata, elen);
 	hptr += elen;
-	*hdrlen = hptr - top; 
+	*hdrlen = hptr - top;
 }
-		
-/* 
+
+/*
  * spkm3_mic_innercontext_token()
  *
  * *tokp points to the beginning of the SPKM_MIC token  described 
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index 8537f58..e54581c 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -54,70 +54,70 @@
 		struct xdr_buf *message_buffer, /* signbuf */
 		int toktype)
 {
+	s32			checksum_type;
 	s32			code;
 	struct xdr_netobj	wire_cksum = {.len =0, .data = NULL};
 	char			cksumdata[16];
 	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
 	unsigned char		*ptr = (unsigned char *)read_token->data;
-	unsigned char           *cksum;
+	unsigned char		*cksum;
 	int			bodysize, md5elen;
 	int			mic_hdrlen;
 	u32			ret = GSS_S_DEFECTIVE_TOKEN;
 
-	dprintk("RPC: spkm3_read_token read_token->len %d\n", read_token->len);
-
 	if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used,
 					&bodysize, &ptr, read_token->len))
 		goto out;
 
 	/* decode the token */
 
-	if (toktype == SPKM_MIC_TOK) {
-
-		if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum))) 
-			goto out;
-
-		if (*cksum++ != 0x03) {
-			dprintk("RPC: spkm3_read_token BAD checksum type\n");
-			goto out;
-		}
-		md5elen = *cksum++; 
-		cksum++; 	/* move past the zbit */
-	
-		if(!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
-			goto out;
-
-		/* HARD CODED FOR MD5 */
-
-		/* compute the checksum of the message.
-		*  ptr + 2 = start of header piece of checksum
-		*  mic_hdrlen + 2 = length of header piece of checksum
-		*/
-		ret = GSS_S_DEFECTIVE_TOKEN;
-		code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2, 
-					mic_hdrlen + 2, 
-		                        message_buffer, 0, &md5cksum);
-
-		if (code)
-			goto out;
-
-		dprintk("RPC: spkm3_read_token: digest wire_cksum.len %d:\n", 
-			wire_cksum.len);
-		dprintk("          md5cksum.data\n");
-		print_hexl((u32 *) md5cksum.data, 16, 0);
-		dprintk("          cksum.data:\n");
-		print_hexl((u32 *) wire_cksum.data, wire_cksum.len, 0);
-
-		ret = GSS_S_BAD_SIG;
-		code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
-		if (code)
-			goto out;
-
-	} else { 
-		dprintk("RPC: BAD or UNSUPPORTED SPKM3 token type: %d\n",toktype);
+	if (toktype != SPKM_MIC_TOK) {
+		dprintk("RPC: BAD SPKM3 token type: %d\n", toktype);
 		goto out;
 	}
 
+	if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum)))
+		goto out;
+
+	if (*cksum++ != 0x03) {
+		dprintk("RPC: spkm3_read_token BAD checksum type\n");
+		goto out;
+	}
+	md5elen = *cksum++;
+	cksum++; 	/* move past the zbit */
+
+	if (!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
+		goto out;
+
+	/* HARD CODED FOR MD5 */
+
+	/* compute the checksum of the message.
+	 * ptr + 2 = start of header piece of checksum
+	 * mic_hdrlen + 2 = length of header piece of checksum
+	 */
+	ret = GSS_S_DEFECTIVE_TOKEN;
+	if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
+		dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm\n");
+		goto out;
+	}
+
+	checksum_type = CKSUMTYPE_HMAC_MD5;
+
+	code = make_spkm3_checksum(checksum_type,
+		&ctx->derived_integ_key, ptr + 2, mic_hdrlen + 2,
+		message_buffer, 0, &md5cksum);
+
+	if (code)
+		goto out;
+
+	ret = GSS_S_BAD_SIG;
+	code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
+	if (code) {
+		dprintk("RPC: bad MIC checksum\n");
+		goto out;
+	}
+
+
 	/* XXX: need to add expiration and sequencing */
 	ret = GSS_S_COMPLETE;
 out:
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1f0f079..700353b 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -113,9 +113,7 @@
 static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
 {
 	dst->len = len;
-	dst->data = (len ? kmalloc(len, GFP_KERNEL) : NULL);
-	if (dst->data)
-		memcpy(dst->data, src, len);
+	dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL);
 	if (len && !dst->data)
 		return -ENOMEM;
 	return 0;
@@ -756,10 +754,9 @@
 	if (!new)
 		goto out;
 	kref_init(&new->h.ref);
-	new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+	new->h.name = kstrdup(name, GFP_KERNEL);
 	if (!new->h.name)
 		goto out_free_dom;
-	strcpy(new->h.name, name);
 	new->h.flavour = &svcauthops_gss;
 	new->pseudoflavor = pseudoflavor;
 
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 00cb388..d96fd46 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -284,8 +284,8 @@
 static struct file_operations content_file_operations;
 static struct file_operations cache_flush_operations;
 
-static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static void do_cache_clean(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
 
 void cache_register(struct cache_detail *cd)
 {
@@ -337,7 +337,7 @@
 	spin_unlock(&cache_list_lock);
 
 	/* start the cleaning process */
-	schedule_work(&cache_cleaner);
+	schedule_delayed_work(&cache_cleaner, 0);
 }
 
 int cache_unregister(struct cache_detail *cd)
@@ -461,7 +461,7 @@
 /*
  * We want to regularly clean the cache, so we need to schedule some work ...
  */
-static void do_cache_clean(void *data)
+static void do_cache_clean(struct work_struct *work)
 {
 	int delay = 5;
 	if (cache_clean() == -1)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 78696f2..aba528b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -27,6 +27,7 @@
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/smp_lock.h>
 #include <linux/utsname.h>
 #include <linux/workqueue.h>
 
@@ -141,6 +142,10 @@
 	clnt->cl_vers     = version->number;
 	clnt->cl_stats    = program->stats;
 	clnt->cl_metrics  = rpc_alloc_iostats(clnt);
+	err = -ENOMEM;
+	if (clnt->cl_metrics == NULL)
+		goto out_no_stats;
+	clnt->cl_program  = program;
 
 	if (!xprt_bound(clnt->cl_xprt))
 		clnt->cl_autobind = 1;
@@ -173,6 +178,8 @@
 		rpc_put_mount();
 	}
 out_no_path:
+	rpc_free_iostats(clnt->cl_metrics);
+out_no_stats:
 	if (clnt->cl_server != clnt->cl_inline_name)
 		kfree(clnt->cl_server);
 	kfree(clnt);
@@ -252,13 +259,19 @@
 rpc_clone_client(struct rpc_clnt *clnt)
 {
 	struct rpc_clnt *new;
+	int err = -ENOMEM;
 
-	new = kmalloc(sizeof(*new), GFP_KERNEL);
+	new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
 	if (!new)
 		goto out_no_clnt;
-	memcpy(new, clnt, sizeof(*new));
 	atomic_set(&new->cl_count, 1);
 	atomic_set(&new->cl_users, 0);
+	new->cl_metrics = rpc_alloc_iostats(clnt);
+	if (new->cl_metrics == NULL)
+		goto out_no_stats;
+	err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
+	if (err != 0)
+		goto out_no_path;
 	new->cl_parent = clnt;
 	atomic_inc(&clnt->cl_count);
 	new->cl_xprt = xprt_get(clnt->cl_xprt);
@@ -266,16 +279,17 @@
 	new->cl_autobind = 0;
 	new->cl_oneshot = 0;
 	new->cl_dead = 0;
-	if (!IS_ERR(new->cl_dentry))
-		dget(new->cl_dentry);
 	rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
 	if (new->cl_auth)
 		atomic_inc(&new->cl_auth->au_count);
-	new->cl_metrics = rpc_alloc_iostats(clnt);
 	return new;
+out_no_path:
+	rpc_free_iostats(new->cl_metrics);
+out_no_stats:
+	kfree(new);
 out_no_clnt:
-	printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
-	return ERR_PTR(-ENOMEM);
+	dprintk("RPC: %s returned error %d\n", __FUNCTION__, err);
+	return ERR_PTR(err);
 }
 
 /*
@@ -328,16 +342,14 @@
 		rpcauth_destroy(clnt->cl_auth);
 		clnt->cl_auth = NULL;
 	}
-	if (clnt->cl_parent != clnt) {
-		if (!IS_ERR(clnt->cl_dentry))
-			dput(clnt->cl_dentry);
-		rpc_destroy_client(clnt->cl_parent);
-		goto out_free;
-	}
 	if (!IS_ERR(clnt->cl_dentry)) {
 		rpc_rmdir(clnt->cl_dentry);
 		rpc_put_mount();
 	}
+	if (clnt->cl_parent != clnt) {
+		rpc_destroy_client(clnt->cl_parent);
+		goto out_free;
+	}
 	if (clnt->cl_server != clnt->cl_inline_name)
 		kfree(clnt->cl_server);
 out_free:
@@ -467,10 +479,9 @@
 
 	BUG_ON(flags & RPC_TASK_ASYNC);
 
-	status = -ENOMEM;
 	task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL);
 	if (task == NULL)
-		goto out;
+		return -ENOMEM;
 
 	/* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
 	rpc_task_sigmask(task, &oldset);
@@ -479,15 +490,17 @@
 
 	/* Set up the call info struct and execute the task */
 	status = task->tk_status;
-	if (status == 0) {
-		atomic_inc(&task->tk_count);
-		status = rpc_execute(task);
-		if (status == 0)
-			status = task->tk_status;
+	if (status != 0) {
+		rpc_release_task(task);
+		goto out;
 	}
-	rpc_restore_sigmask(&oldset);
-	rpc_release_task(task);
+	atomic_inc(&task->tk_count);
+	status = rpc_execute(task);
+	if (status == 0)
+		status = task->tk_status;
+	rpc_put_task(task);
 out:
+	rpc_restore_sigmask(&oldset);
 	return status;
 }
 
@@ -529,8 +542,7 @@
 	rpc_restore_sigmask(&oldset);		
 	return status;
 out_release:
-	if (tk_ops->rpc_release != NULL)
-		tk_ops->rpc_release(data);
+	rpc_release_calldata(tk_ops, data);
 	return status;
 }
 
@@ -582,7 +594,11 @@
 char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format)
 {
 	struct rpc_xprt *xprt = clnt->cl_xprt;
-	return xprt->ops->print_addr(xprt, format);
+
+	if (xprt->address_strings[format] != NULL)
+		return xprt->address_strings[format];
+	else
+		return "unprintable";
 }
 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
 
@@ -812,8 +828,10 @@
 	if (encode == NULL)
 		return;
 
+	lock_kernel();
 	task->tk_status = rpcauth_wrap_req(task, encode, req, p,
 			task->tk_msg.rpc_argp);
+	unlock_kernel();
 	if (task->tk_status == -ENOMEM) {
 		/* XXX: Is this sane? */
 		rpc_delay(task, 3*HZ);
@@ -1144,9 +1162,12 @@
 
 	task->tk_action = rpc_exit_task;
 
-	if (decode)
+	if (decode) {
+		lock_kernel();
 		task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
 						      task->tk_msg.rpc_resp);
+		unlock_kernel();
+	}
 	dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
 					task->tk_status);
 	return;
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index e52afab..3946ec3 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -101,14 +101,14 @@
 	/* Autobind on cloned rpc clients is discouraged */
 	BUG_ON(clnt->cl_parent != clnt);
 
+	status = -EACCES;		/* tell caller to check again */
+	if (xprt_test_and_set_binding(xprt))
+		goto bailout_nowake;
+
 	/* Put self on queue before sending rpcbind request, in case
 	 * pmap_getport_done completes before we return from rpc_run_task */
 	rpc_sleep_on(&xprt->binding, task, NULL, NULL);
 
-	status = -EACCES;		/* tell caller to check again */
-	if (xprt_test_and_set_binding(xprt))
-		goto bailout_nofree;
-
 	/* Someone else may have bound if we slept */
 	status = 0;
 	if (xprt_bound(xprt))
@@ -134,7 +134,7 @@
 	child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map);
 	if (IS_ERR(child))
 		goto bailout;
-	rpc_release_task(child);
+	rpc_put_task(child);
 
 	task->tk_xprt->stat.bind_count++;
 	return;
@@ -143,8 +143,9 @@
 	pmap_map_free(map);
 	xprt_put(xprt);
 bailout_nofree:
-	task->tk_status = status;
 	pmap_wake_portmap_waiters(xprt, status);
+bailout_nowake:
+	task->tk_status = status;
 }
 
 #ifdef CONFIG_ROOT_NFS
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9a0b41a..19703aa 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -33,7 +33,7 @@
 static struct file_system_type rpc_pipe_fs_type;
 
 
-static kmem_cache_t *rpc_inode_cachep __read_mostly;
+static struct kmem_cache *rpc_inode_cachep __read_mostly;
 
 #define RPC_UPCALL_TIMEOUT (30*HZ)
 
@@ -54,10 +54,11 @@
 }
 
 static void
-rpc_timeout_upcall_queue(void *data)
+rpc_timeout_upcall_queue(struct work_struct *work)
 {
 	LIST_HEAD(free_list);
-	struct rpc_inode *rpci = (struct rpc_inode *)data;
+	struct rpc_inode *rpci =
+		container_of(work, struct rpc_inode, queue_timeout.work);
 	struct inode *inode = &rpci->vfs_inode;
 	void (*destroy_msg)(struct rpc_pipe_msg *);
 
@@ -142,7 +143,7 @@
 rpc_alloc_inode(struct super_block *sb)
 {
 	struct rpc_inode *rpci;
-	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL);
+	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
 	if (!rpci)
 		return NULL;
 	return &rpci->vfs_inode;
@@ -823,7 +824,7 @@
 };
 
 static void
-init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
 	struct rpc_inode *rpci = (struct rpc_inode *) foo;
 
@@ -837,7 +838,8 @@
 		INIT_LIST_HEAD(&rpci->pipe);
 		rpci->pipelen = 0;
 		init_waitqueue_head(&rpci->waitq);
-		INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+		INIT_DELAYED_WORK(&rpci->queue_timeout,
+				    rpc_timeout_upcall_queue);
 		rpci->ops = NULL;
 	}
 }
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index a1ab4ee..79bc4cd 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -34,14 +34,14 @@
 #define RPC_BUFFER_MAXSIZE	(2048)
 #define RPC_BUFFER_POOLSIZE	(8)
 #define RPC_TASK_POOLSIZE	(8)
-static kmem_cache_t	*rpc_task_slabp __read_mostly;
-static kmem_cache_t	*rpc_buffer_slabp __read_mostly;
+static struct kmem_cache	*rpc_task_slabp __read_mostly;
+static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
 static mempool_t	*rpc_task_mempool __read_mostly;
 static mempool_t	*rpc_buffer_mempool __read_mostly;
 
 static void			__rpc_default_timer(struct rpc_task *task);
 static void			rpciod_killall(void);
-static void			rpc_async_schedule(void *);
+static void			rpc_async_schedule(struct work_struct *);
 
 /*
  * RPC tasks sit here while waiting for conditions to improve.
@@ -266,12 +266,28 @@
 	return 0;
 }
 
+static void rpc_set_active(struct rpc_task *task)
+{
+	if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
+		return;
+	spin_lock(&rpc_sched_lock);
+#ifdef RPC_DEBUG
+	task->tk_magic = RPC_TASK_MAGIC_ID;
+	task->tk_pid = rpc_task_id++;
+#endif
+	/* Add to global list of all tasks */
+	list_add_tail(&task->tk_task, &all_tasks);
+	spin_unlock(&rpc_sched_lock);
+}
+
 /*
  * Mark an RPC call as having completed by clearing the 'active' bit
  */
-static inline void rpc_mark_complete_task(struct rpc_task *task)
+static void rpc_mark_complete_task(struct rpc_task *task)
 {
-	rpc_clear_active(task);
+	smp_mb__before_clear_bit();
+	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
+	smp_mb__after_clear_bit();
 	wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
 }
 
@@ -295,17 +311,19 @@
  */
 static void rpc_make_runnable(struct rpc_task *task)
 {
-	int do_ret;
-
 	BUG_ON(task->tk_timeout_fn);
-	do_ret = rpc_test_and_set_running(task);
 	rpc_clear_queued(task);
-	if (do_ret)
+	if (rpc_test_and_set_running(task))
 		return;
+	/* We might have raced */
+	if (RPC_IS_QUEUED(task)) {
+		rpc_clear_running(task);
+		return;
+	}
 	if (RPC_IS_ASYNC(task)) {
 		int status;
 
-		INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
+		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 		status = queue_work(task->tk_workqueue, &task->u.tk_work);
 		if (status < 0) {
 			printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -333,9 +351,6 @@
 		return;
 	}
 
-	/* Mark the task as being activated if so needed */
-	rpc_set_active(task);
-
 	__rpc_add_wait_queue(q, task);
 
 	BUG_ON(task->tk_callback != NULL);
@@ -346,6 +361,9 @@
 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 				rpc_action action, rpc_action timer)
 {
+	/* Mark the task as being activated if so needed */
+	rpc_set_active(task);
+
 	/*
 	 * Protect the queue operations.
 	 */
@@ -409,16 +427,19 @@
  */
 void rpc_wake_up_task(struct rpc_task *task)
 {
+	rcu_read_lock_bh();
 	if (rpc_start_wakeup(task)) {
 		if (RPC_IS_QUEUED(task)) {
 			struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
 
-			spin_lock_bh(&queue->lock);
+			/* Note: we're already in a bh-safe context */
+			spin_lock(&queue->lock);
 			__rpc_do_wake_up_task(task);
-			spin_unlock_bh(&queue->lock);
+			spin_unlock(&queue->lock);
 		}
 		rpc_finish_wakeup(task);
 	}
+	rcu_read_unlock_bh();
 }
 
 /*
@@ -481,14 +502,16 @@
 	struct rpc_task	*task = NULL;
 
 	dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
-	spin_lock_bh(&queue->lock);
+	rcu_read_lock_bh();
+	spin_lock(&queue->lock);
 	if (RPC_IS_PRIORITY(queue))
 		task = __rpc_wake_up_next_priority(queue);
 	else {
 		task_for_first(task, &queue->tasks[0])
 			__rpc_wake_up_task(task);
 	}
-	spin_unlock_bh(&queue->lock);
+	spin_unlock(&queue->lock);
+	rcu_read_unlock_bh();
 
 	return task;
 }
@@ -504,7 +527,8 @@
 	struct rpc_task *task, *next;
 	struct list_head *head;
 
-	spin_lock_bh(&queue->lock);
+	rcu_read_lock_bh();
+	spin_lock(&queue->lock);
 	head = &queue->tasks[queue->maxpriority];
 	for (;;) {
 		list_for_each_entry_safe(task, next, head, u.tk_wait.list)
@@ -513,7 +537,8 @@
 			break;
 		head--;
 	}
-	spin_unlock_bh(&queue->lock);
+	spin_unlock(&queue->lock);
+	rcu_read_unlock_bh();
 }
 
 /**
@@ -528,7 +553,8 @@
 	struct rpc_task *task, *next;
 	struct list_head *head;
 
-	spin_lock_bh(&queue->lock);
+	rcu_read_lock_bh();
+	spin_lock(&queue->lock);
 	head = &queue->tasks[queue->maxpriority];
 	for (;;) {
 		list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
@@ -539,7 +565,8 @@
 			break;
 		head--;
 	}
-	spin_unlock_bh(&queue->lock);
+	spin_unlock(&queue->lock);
+	rcu_read_unlock_bh();
 }
 
 static void __rpc_atrun(struct rpc_task *task)
@@ -561,7 +588,9 @@
  */
 static void rpc_prepare_task(struct rpc_task *task)
 {
+	lock_kernel();
 	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
+	unlock_kernel();
 }
 
 /*
@@ -571,7 +600,9 @@
 {
 	task->tk_action = NULL;
 	if (task->tk_ops->rpc_call_done != NULL) {
+		lock_kernel();
 		task->tk_ops->rpc_call_done(task, task->tk_calldata);
+		unlock_kernel();
 		if (task->tk_action != NULL) {
 			WARN_ON(RPC_ASSASSINATED(task));
 			/* Always release the RPC slot and buffer memory */
@@ -581,6 +612,15 @@
 }
 EXPORT_SYMBOL(rpc_exit_task);
 
+void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
+{
+	if (ops->rpc_release != NULL) {
+		lock_kernel();
+		ops->rpc_release(calldata);
+		unlock_kernel();
+	}
+}
+
 /*
  * This is the RPC `scheduler' (or rather, the finite state machine).
  */
@@ -615,9 +655,7 @@
 			 */
 			save_callback=task->tk_callback;
 			task->tk_callback=NULL;
-			lock_kernel();
 			save_callback(task);
-			unlock_kernel();
 		}
 
 		/*
@@ -628,9 +666,7 @@
 		if (!RPC_IS_QUEUED(task)) {
 			if (task->tk_action == NULL)
 				break;
-			lock_kernel();
 			task->tk_action(task);
-			unlock_kernel();
 		}
 
 		/*
@@ -671,8 +707,6 @@
 	}
 
 	dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
-	/* Wake up anyone who is waiting for task completion */
-	rpc_mark_complete_task(task);
 	/* Release all resources associated with the task */
 	rpc_release_task(task);
 	return status;
@@ -695,9 +729,9 @@
 	return __rpc_execute(task);
 }
 
-static void rpc_async_schedule(void *arg)
+static void rpc_async_schedule(struct work_struct *work)
 {
-	__rpc_execute((struct rpc_task *)arg);
+	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 }
 
 /**
@@ -786,15 +820,6 @@
 			task->tk_flags |= RPC_TASK_NOINTR;
 	}
 
-#ifdef RPC_DEBUG
-	task->tk_magic = RPC_TASK_MAGIC_ID;
-	task->tk_pid = rpc_task_id++;
-#endif
-	/* Add to global list of all tasks */
-	spin_lock(&rpc_sched_lock);
-	list_add_tail(&task->tk_task, &all_tasks);
-	spin_unlock(&rpc_sched_lock);
-
 	BUG_ON(task->tk_ops == NULL);
 
 	/* starting timestamp */
@@ -810,8 +835,9 @@
 	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
 }
 
-static void rpc_free_task(struct rpc_task *task)
+static void rpc_free_task(struct rcu_head *rcu)
 {
+	struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
 	dprintk("RPC: %4d freeing task\n", task->tk_pid);
 	mempool_free(task, rpc_task_mempool);
 }
@@ -847,16 +873,34 @@
 	goto out;
 }
 
-void rpc_release_task(struct rpc_task *task)
+
+void rpc_put_task(struct rpc_task *task)
 {
 	const struct rpc_call_ops *tk_ops = task->tk_ops;
 	void *calldata = task->tk_calldata;
 
+	if (!atomic_dec_and_test(&task->tk_count))
+		return;
+	/* Release resources */
+	if (task->tk_rqstp)
+		xprt_release(task);
+	if (task->tk_msg.rpc_cred)
+		rpcauth_unbindcred(task);
+	if (task->tk_client) {
+		rpc_release_client(task->tk_client);
+		task->tk_client = NULL;
+	}
+	if (task->tk_flags & RPC_TASK_DYNAMIC)
+		call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
+	rpc_release_calldata(tk_ops, calldata);
+}
+EXPORT_SYMBOL(rpc_put_task);
+
+void rpc_release_task(struct rpc_task *task)
+{
 #ifdef RPC_DEBUG
 	BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
 #endif
-	if (!atomic_dec_and_test(&task->tk_count))
-		return;
 	dprintk("RPC: %4d release task\n", task->tk_pid);
 
 	/* Remove from global task list */
@@ -869,23 +913,13 @@
 	/* Synchronously delete any running timer */
 	rpc_delete_timer(task);
 
-	/* Release resources */
-	if (task->tk_rqstp)
-		xprt_release(task);
-	if (task->tk_msg.rpc_cred)
-		rpcauth_unbindcred(task);
-	if (task->tk_client) {
-		rpc_release_client(task->tk_client);
-		task->tk_client = NULL;
-	}
-
 #ifdef RPC_DEBUG
 	task->tk_magic = 0;
 #endif
-	if (task->tk_flags & RPC_TASK_DYNAMIC)
-		rpc_free_task(task);
-	if (tk_ops->rpc_release)
-		tk_ops->rpc_release(calldata);
+	/* Wake up anyone who is waiting for task completion */
+	rpc_mark_complete_task(task);
+
+	rpc_put_task(task);
 }
 
 /**
@@ -902,8 +936,7 @@
 	struct rpc_task *task;
 	task = rpc_new_task(clnt, flags, ops, data);
 	if (task == NULL) {
-		if (ops->rpc_release != NULL)
-			ops->rpc_release(data);
+		rpc_release_calldata(ops, data);
 		return ERR_PTR(-ENOMEM);
 	}
 	atomic_inc(&task->tk_count);
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 6f17527..634885b 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -16,7 +16,7 @@
 
 
 /**
- * skb_read_bits - copy some data bits from skb to internal buffer
+ * xdr_skb_read_bits - copy some data bits from skb to internal buffer
  * @desc: sk_buff copy helper
  * @to: copy destination
  * @len: number of bytes to copy
@@ -24,11 +24,11 @@
  * Possibly called several times to iterate over an sk_buff and copy
  * data out of it.
  */
-static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len)
+size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 {
 	if (len > desc->count)
 		len = desc->count;
-	if (skb_copy_bits(desc->skb, desc->offset, to, len))
+	if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
 		return 0;
 	desc->count -= len;
 	desc->offset += len;
@@ -36,16 +36,17 @@
 }
 
 /**
- * skb_read_and_csum_bits - copy and checksum from skb to buffer
+ * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
  * @desc: sk_buff copy helper
  * @to: copy destination
  * @len: number of bytes to copy
  *
  * Same as skb_read_bits, but calculate a checksum at the same time.
  */
-static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
+static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 {
-	unsigned int	csum2, pos;
+	unsigned int pos;
+	__wsum csum2;
 
 	if (len > desc->count)
 		len = desc->count;
@@ -65,7 +66,7 @@
  * @copy_actor: virtual method for copying data
  *
  */
-ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor)
+ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
 {
 	struct page	**ppage = xdr->pages;
 	unsigned int	len, pglen = xdr->page_len;
@@ -147,7 +148,7 @@
  */
 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 {
-	skb_reader_t	desc;
+	struct xdr_skb_reader	desc;
 
 	desc.skb = skb;
 	desc.offset = sizeof(struct udphdr);
@@ -157,22 +158,22 @@
 		goto no_checksum;
 
 	desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-	if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
+	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
 		return -1;
 	if (desc.offset != skb->len) {
-		unsigned int csum2;
+		__wsum csum2;
 		csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0);
 		desc.csum = csum_block_add(desc.csum, csum2, desc.offset);
 	}
 	if (desc.count)
 		return -1;
-	if ((unsigned short)csum_fold(desc.csum))
+	if (csum_fold(desc.csum))
 		return -1;
 	if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
 		netdev_rx_csum_fault(skb->dev);
 	return 0;
 no_checksum:
-	if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
+	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
 		return -1;
 	if (desc.count)
 		return -1;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 192dff5..d85fdde 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -33,7 +33,6 @@
 EXPORT_SYMBOL(rpciod_up);
 EXPORT_SYMBOL(rpc_new_task);
 EXPORT_SYMBOL(rpc_wake_up_status);
-EXPORT_SYMBOL(rpc_release_task);
 
 /* RPC client functions */
 EXPORT_SYMBOL(rpc_clone_client);
@@ -139,6 +138,8 @@
 extern int register_rpc_pipefs(void);
 extern void unregister_rpc_pipefs(void);
 extern struct cache_detail ip_map_cache;
+extern int init_socket_xprt(void);
+extern void cleanup_socket_xprt(void);
 
 static int __init
 init_sunrpc(void)
@@ -156,6 +157,7 @@
 	rpc_proc_init();
 #endif
 	cache_register(&ip_map_cache);
+	init_socket_xprt();
 out:
 	return err;
 }
@@ -163,6 +165,7 @@
 static void __exit
 cleanup_sunrpc(void)
 {
+	cleanup_socket_xprt();
 	unregister_rpc_pipefs();
 	rpc_destroy_mempool();
 	if (cache_unregister(&ip_map_cache))
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index ee9bb15..c7bb5f7 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -119,7 +119,8 @@
 #define	DN_HASHMASK	(DN_HASHMAX-1)
 
 static struct hlist_head	auth_domain_table[DN_HASHMAX];
-static spinlock_t	auth_domain_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t	auth_domain_lock =
+	__SPIN_LOCK_UNLOCKED(auth_domain_lock);
 
 void auth_domain_put(struct auth_domain *dom)
 {
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index e1bd933..a0a953a 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -101,9 +101,9 @@
  * IP addresses in reverse-endian (i.e. on a little-endian machine).
  * So use a trivial but reliable hash instead
  */
-static inline int hash_ip(unsigned long ip)
+static inline int hash_ip(__be32 ip)
 {
-	int hash = ip ^ (ip>>16);
+	int hash = (__force u32)ip ^ ((__force u32)ip>>16);
 	return (hash ^ (hash>>8)) & 0xff;
 }
 #endif
@@ -284,7 +284,7 @@
 	ip.m_addr = addr;
 	ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
 				 hash_str(class, IP_HASHBITS) ^
-				 hash_ip((unsigned long)addr.s_addr));
+				 hash_ip(addr.s_addr));
 
 	if (ch)
 		return container_of(ch, struct ip_map, h);
@@ -313,7 +313,7 @@
 	ch = sunrpc_cache_update(&ip_map_cache,
 				 &ip.h, &ipm->h,
 				 hash_str(ipm->m_class, IP_HASHBITS) ^
-				 hash_ip((unsigned long)ipm->m_addr.s_addr));
+				 hash_ip(ipm->m_addr.s_addr));
 	if (!ch)
 		return -ENOMEM;
 	cache_put(ch, &ip_map_cache);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 64ca1f6..99f54fb 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -32,6 +32,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/file.h>
+#include <linux/freezer.h>
 #include <net/sock.h>
 #include <net/checksum.h>
 #include <net/ip.h>
@@ -84,6 +85,35 @@
  */
 static int svc_conn_age_period = 6*60;
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key svc_key[2];
+static struct lock_class_key svc_slock_key[2];
+
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	BUG_ON(sk->sk_lock.owner != NULL);
+	switch (sk->sk_family) {
+	case AF_INET:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
+		    &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
+		break;
+
+	case AF_INET6:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
+		    &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
+		break;
+
+	default:
+		BUG();
+	}
+}
+#else
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /*
  * Queue up an idle server thread.  Must have pool->sp_lock held.
  * Note: this is really a stack rather than a queue, so that we only
@@ -1556,6 +1586,8 @@
 	if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
 		return error;
 
+	svc_reclassify_socket(sock);
+
 	if (type == SOCK_STREAM)
 		sock->sk->sk_reuse = 1; /* allow address reuse */
 	error = kernel_bind(sock, (struct sockaddr *) sin,
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index d89b048..82b2752 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -18,7 +18,6 @@
 #include <linux/sunrpc/types.h>
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/stats.h>
-#include <linux/sunrpc/xprt.h>
 
 /*
  * Declare the debug flags here
@@ -119,11 +118,6 @@
 }
 
 
-static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
-static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
-static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
-static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
-
 static ctl_table debug_table[] = {
 	{
 		.ctl_name	= CTL_RPCDEBUG,
@@ -157,50 +151,6 @@
 		.mode		= 0644,
 		.proc_handler	= &proc_dodebug
 	}, 
-	{
-		.ctl_name	= CTL_SLOTTABLE_UDP,
-		.procname	= "udp_slot_table_entries",
-		.data		= &xprt_udp_slot_table_entries,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &min_slot_table_size,
-		.extra2		= &max_slot_table_size
-	},
-	{
-		.ctl_name	= CTL_SLOTTABLE_TCP,
-		.procname	= "tcp_slot_table_entries",
-		.data		= &xprt_tcp_slot_table_entries,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &min_slot_table_size,
-		.extra2		= &max_slot_table_size
-	},
-	{
-		.ctl_name	= CTL_MIN_RESVPORT,
-		.procname	= "min_resvport",
-		.data		= &xprt_min_resvport,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &xprt_min_resvport_limit,
-		.extra2		= &xprt_max_resvport_limit
-	},
-	{
-		.ctl_name	= CTL_MAX_RESVPORT,
-		.procname	= "max_resvport",
-		.data		= &xprt_max_resvport,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &xprt_min_resvport_limit,
-		.extra2		= &xprt_max_resvport_limit
-	},
 	{ .ctl_name = 0 }
 };
 
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 9022eb8..a0af250 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -640,41 +640,30 @@
 	buf->buflen = buf->len = iov->iov_len;
 }
 
-/* Sets subiov to the intersection of iov with the buffer of length len
- * starting base bytes after iov.  Indicates empty intersection by setting
- * length of subiov to zero.  Decrements len by length of subiov, sets base
- * to zero (or decrements it by length of iov if subiov is empty). */
-static void
-iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
-{
-	if (*base > iov->iov_len) {
-		subiov->iov_base = NULL;
-		subiov->iov_len = 0;
-		*base -= iov->iov_len;
-	} else {
-		subiov->iov_base = iov->iov_base + *base;
-		subiov->iov_len = min(*len, (int)iov->iov_len - *base);
-		*base = 0;
-	}
-	*len -= subiov->iov_len; 
-}
-
 /* Sets subbuf to the portion of buf of length len beginning base bytes
  * from the start of buf. Returns -1 if base of length are out of bounds. */
 int
 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
-			int base, int len)
+			unsigned int base, unsigned int len)
 {
-	int i;
-
 	subbuf->buflen = subbuf->len = len;
-	iov_subsegment(buf->head, subbuf->head, &base, &len);
+	if (base < buf->head[0].iov_len) {
+		subbuf->head[0].iov_base = buf->head[0].iov_base + base;
+		subbuf->head[0].iov_len = min_t(unsigned int, len,
+						buf->head[0].iov_len - base);
+		len -= subbuf->head[0].iov_len;
+		base = 0;
+	} else {
+		subbuf->head[0].iov_base = NULL;
+		subbuf->head[0].iov_len = 0;
+		base -= buf->head[0].iov_len;
+	}
 
 	if (base < buf->page_len) {
-		i = (base + buf->page_base) >> PAGE_CACHE_SHIFT;
-		subbuf->pages = &buf->pages[i];
-		subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK;
-		subbuf->page_len = min((int)buf->page_len - base, len);
+		subbuf->page_len = min(buf->page_len - base, len);
+		base += buf->page_base;
+		subbuf->page_base = base & ~PAGE_CACHE_MASK;
+		subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
 		len -= subbuf->page_len;
 		base = 0;
 	} else {
@@ -682,66 +671,85 @@
 		subbuf->page_len = 0;
 	}
 
-	iov_subsegment(buf->tail, subbuf->tail, &base, &len);
+	if (base < buf->tail[0].iov_len) {
+		subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
+		subbuf->tail[0].iov_len = min_t(unsigned int, len,
+						buf->tail[0].iov_len - base);
+		len -= subbuf->tail[0].iov_len;
+		base = 0;
+	} else {
+		subbuf->tail[0].iov_base = NULL;
+		subbuf->tail[0].iov_len = 0;
+		base -= buf->tail[0].iov_len;
+	}
+
 	if (base || len)
 		return -1;
 	return 0;
 }
 
-/* obj is assumed to point to allocated memory of size at least len: */
-int
-read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
+static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 {
-	struct xdr_buf subbuf;
-	int this_len;
-	int status;
+	unsigned int this_len;
 
-	status = xdr_buf_subsegment(buf, &subbuf, base, len);
-	if (status)
-		goto out;
-	this_len = min(len, (int)subbuf.head[0].iov_len);
-	memcpy(obj, subbuf.head[0].iov_base, this_len);
+	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
+	memcpy(obj, subbuf->head[0].iov_base, this_len);
 	len -= this_len;
 	obj += this_len;
-	this_len = min(len, (int)subbuf.page_len);
+	this_len = min_t(unsigned int, len, subbuf->page_len);
 	if (this_len)
-		_copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len);
+		_copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
 	len -= this_len;
 	obj += this_len;
-	this_len = min(len, (int)subbuf.tail[0].iov_len);
-	memcpy(obj, subbuf.tail[0].iov_base, this_len);
-out:
-	return status;
+	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
+	memcpy(obj, subbuf->tail[0].iov_base, this_len);
 }
 
 /* obj is assumed to point to allocated memory of size at least len: */
-int
-write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
+int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 {
 	struct xdr_buf subbuf;
-	int this_len;
 	int status;
 
 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
-	if (status)
-		goto out;
-	this_len = min(len, (int)subbuf.head[0].iov_len);
-	memcpy(subbuf.head[0].iov_base, obj, this_len);
+	if (status != 0)
+		return status;
+	__read_bytes_from_xdr_buf(&subbuf, obj, len);
+	return 0;
+}
+
+static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
+{
+	unsigned int this_len;
+
+	this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
+	memcpy(subbuf->head[0].iov_base, obj, this_len);
 	len -= this_len;
 	obj += this_len;
-	this_len = min(len, (int)subbuf.page_len);
+	this_len = min_t(unsigned int, len, subbuf->page_len);
 	if (this_len)
-		_copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);
+		_copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
 	len -= this_len;
 	obj += this_len;
-	this_len = min(len, (int)subbuf.tail[0].iov_len);
-	memcpy(subbuf.tail[0].iov_base, obj, this_len);
-out:
-	return status;
+	this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
+	memcpy(subbuf->tail[0].iov_base, obj, this_len);
+}
+
+/* obj is assumed to point to allocated memory of size at least len: */
+int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
+{
+	struct xdr_buf subbuf;
+	int status;
+
+	status = xdr_buf_subsegment(buf, &subbuf, base, len);
+	if (status != 0)
+		return status;
+	__write_bytes_to_xdr_buf(&subbuf, obj, len);
+	return 0;
 }
 
 int
-xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
+xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
 {
 	__be32	raw;
 	int	status;
@@ -754,7 +762,7 @@
 }
 
 int
-xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
+xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
 {
 	__be32	raw = htonl(obj);
 
@@ -765,44 +773,37 @@
  * entirely in the head or the tail, set object to point to it; otherwise
  * try to find space for it at the end of the tail, copy it there, and
  * set obj to point to it. */
-int
-xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
+int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
 {
-	u32	tail_offset = buf->head[0].iov_len + buf->page_len;
-	u32	obj_end_offset;
+	struct xdr_buf subbuf;
 
 	if (xdr_decode_word(buf, offset, &obj->len))
-		goto out;
-	obj_end_offset = offset + 4 + obj->len;
+		return -EFAULT;
+	if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
+		return -EFAULT;
 
-	if (obj_end_offset <= buf->head[0].iov_len) {
-		/* The obj is contained entirely in the head: */
-		obj->data = buf->head[0].iov_base + offset + 4;
-	} else if (offset + 4 >= tail_offset) {
-		if (obj_end_offset - tail_offset
-				> buf->tail[0].iov_len)
-			goto out;
-		/* The obj is contained entirely in the tail: */
-		obj->data = buf->tail[0].iov_base
-			+ offset - tail_offset + 4;
-	} else {
-		/* use end of tail as storage for obj:
-		 * (We don't copy to the beginning because then we'd have
-		 * to worry about doing a potentially overlapping copy.
-		 * This assumes the object is at most half the length of the
-		 * tail.) */
-		if (obj->len > buf->tail[0].iov_len)
-			goto out;
-		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len - 
-				obj->len;
-		if (read_bytes_from_xdr_buf(buf, offset + 4,
-					obj->data, obj->len))
-			goto out;
+	/* Is the obj contained entirely in the head? */
+	obj->data = subbuf.head[0].iov_base;
+	if (subbuf.head[0].iov_len == obj->len)
+		return 0;
+	/* ..or is the obj contained entirely in the tail? */
+	obj->data = subbuf.tail[0].iov_base;
+	if (subbuf.tail[0].iov_len == obj->len)
+		return 0;
 
-	}
+	/* use end of tail as storage for obj:
+	 * (We don't copy to the beginning because then we'd have
+	 * to worry about doing a potentially overlapping copy.
+	 * This assumes the object is at most half the length of the
+	 * tail.) */
+	if (obj->len > buf->buflen - buf->len)
+		return -ENOMEM;
+	if (buf->tail[0].iov_len != 0)
+		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
+	else
+		obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
+	__read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
 	return 0;
-out:
-	return -1;
 }
 
 /* Returns 0 on success, or else a negative error code. */
@@ -1020,3 +1021,71 @@
 
 	return xdr_xcode_array2(buf, base, desc, 1);
 }
+
+int
+xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
+                int (*actor)(struct scatterlist *, void *), void *data)
+{
+	int i, ret = 0;
+	unsigned page_len, thislen, page_offset;
+	struct scatterlist      sg[1];
+
+	if (offset >= buf->head[0].iov_len) {
+		offset -= buf->head[0].iov_len;
+	} else {
+		thislen = buf->head[0].iov_len - offset;
+		if (thislen > len)
+			thislen = len;
+		sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
+		ret = actor(sg, data);
+		if (ret)
+			goto out;
+		offset = 0;
+		len -= thislen;
+	}
+	if (len == 0)
+		goto out;
+
+	if (offset >= buf->page_len) {
+		offset -= buf->page_len;
+	} else {
+		page_len = buf->page_len - offset;
+		if (page_len > len)
+			page_len = len;
+		len -= page_len;
+		page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
+		i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
+		thislen = PAGE_CACHE_SIZE - page_offset;
+		do {
+			if (thislen > page_len)
+				thislen = page_len;
+			sg->page = buf->pages[i];
+			sg->offset = page_offset;
+			sg->length = thislen;
+			ret = actor(sg, data);
+			if (ret)
+				goto out;
+			page_len -= thislen;
+			i++;
+			page_offset = 0;
+			thislen = PAGE_CACHE_SIZE;
+		} while (page_len != 0);
+		offset = 0;
+	}
+	if (len == 0)
+		goto out;
+	if (offset < buf->tail[0].iov_len) {
+		thislen = buf->tail[0].iov_len - offset;
+		if (thislen > len)
+			thislen = len;
+		sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
+		ret = actor(sg, data);
+		len -= thislen;
+	}
+	if (len != 0)
+		ret = -EINVAL;
+out:
+	return ret;
+}
+EXPORT_SYMBOL(xdr_process_buf);
+
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8085747..7a3999f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -459,7 +459,6 @@
 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
 			req->rq_timeout = to->to_maxval;
 		req->rq_retries++;
-		pprintk("RPC: %lu retrans\n", jiffies);
 	} else {
 		req->rq_timeout = to->to_initval;
 		req->rq_retries = 0;
@@ -468,7 +467,6 @@
 		spin_lock_bh(&xprt->transport_lock);
 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
 		spin_unlock_bh(&xprt->transport_lock);
-		pprintk("RPC: %lu timeout\n", jiffies);
 		status = -ETIMEDOUT;
 	}
 
@@ -479,9 +477,10 @@
 	return status;
 }
 
-static void xprt_autoclose(void *args)
+static void xprt_autoclose(struct work_struct *work)
 {
-	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+	struct rpc_xprt *xprt =
+		container_of(work, struct rpc_xprt, task_cleanup);
 
 	xprt_disconnect(xprt);
 	xprt->ops->close(xprt);
@@ -891,39 +890,25 @@
  */
 struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to)
 {
-	int result;
 	struct rpc_xprt	*xprt;
 	struct rpc_rqst	*req;
 
-	if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) {
-		dprintk("RPC:      xprt_create_transport: no memory\n");
-		return ERR_PTR(-ENOMEM);
-	}
-	if (size <= sizeof(xprt->addr)) {
-		memcpy(&xprt->addr, ap, size);
-		xprt->addrlen = size;
-	} else {
-		kfree(xprt);
-		dprintk("RPC:      xprt_create_transport: address too large\n");
-		return ERR_PTR(-EBADF);
-	}
-
 	switch (proto) {
 	case IPPROTO_UDP:
-		result = xs_setup_udp(xprt, to);
+		xprt = xs_setup_udp(ap, size, to);
 		break;
 	case IPPROTO_TCP:
-		result = xs_setup_tcp(xprt, to);
+		xprt = xs_setup_tcp(ap, size, to);
 		break;
 	default:
 		printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
 				proto);
 		return ERR_PTR(-EIO);
 	}
-	if (result) {
-		kfree(xprt);
-		dprintk("RPC:      xprt_create_transport: failed, %d\n", result);
-		return ERR_PTR(result);
+	if (IS_ERR(xprt)) {
+		dprintk("RPC:      xprt_create_transport: failed, %ld\n",
+				-PTR_ERR(xprt));
+		return xprt;
 	}
 
 	kref_init(&xprt->kref);
@@ -932,7 +917,7 @@
 
 	INIT_LIST_HEAD(&xprt->free);
 	INIT_LIST_HEAD(&xprt->recv);
-	INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
+	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
 	init_timer(&xprt->timer);
 	xprt->timer.function = xprt_init_autodisconnect;
 	xprt->timer.data = (unsigned long) xprt;
@@ -969,8 +954,11 @@
 	dprintk("RPC:      destroying transport %p\n", xprt);
 	xprt->shutdown = 1;
 	del_timer_sync(&xprt->timer);
+
+	/*
+	 * Tear down transport state and free the rpc_xprt
+	 */
 	xprt->ops->destroy(xprt);
-	kfree(xprt);
 }
 
 /**
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 757fc91..49cabff 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -46,6 +46,92 @@
 unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
 
 /*
+ * We can register our own files under /proc/sys/sunrpc by
+ * calling register_sysctl_table() again.  The files in that
+ * directory become the union of all files registered there.
+ *
+ * We simply need to make sure that we don't collide with
+ * someone else's file names!
+ */
+
+#ifdef RPC_DEBUG
+
+static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
+static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
+static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
+static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
+
+static struct ctl_table_header *sunrpc_table_header;
+
+/*
+ * FIXME: changing the UDP slot table size should also resize the UDP
+ *        socket buffers for existing UDP transports
+ */
+static ctl_table xs_tunables_table[] = {
+	{
+		.ctl_name	= CTL_SLOTTABLE_UDP,
+		.procname	= "udp_slot_table_entries",
+		.data		= &xprt_udp_slot_table_entries,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_slot_table_size,
+		.extra2		= &max_slot_table_size
+	},
+	{
+		.ctl_name	= CTL_SLOTTABLE_TCP,
+		.procname	= "tcp_slot_table_entries",
+		.data		= &xprt_tcp_slot_table_entries,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &min_slot_table_size,
+		.extra2		= &max_slot_table_size
+	},
+	{
+		.ctl_name	= CTL_MIN_RESVPORT,
+		.procname	= "min_resvport",
+		.data		= &xprt_min_resvport,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &xprt_min_resvport_limit,
+		.extra2		= &xprt_max_resvport_limit
+	},
+	{
+		.ctl_name	= CTL_MAX_RESVPORT,
+		.procname	= "max_resvport",
+		.data		= &xprt_max_resvport,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &xprt_min_resvport_limit,
+		.extra2		= &xprt_max_resvport_limit
+	},
+	{
+		.ctl_name = 0,
+	},
+};
+
+static ctl_table sunrpc_table[] = {
+	{
+		.ctl_name	= CTL_SUNRPC,
+		.procname	= "sunrpc",
+		.mode		= 0555,
+		.child		= xs_tunables_table
+	},
+	{
+		.ctl_name = 0,
+	},
+};
+
+#endif
+
+/*
  * How many times to try sending a request on a socket before waiting
  * for the socket buffer to clear.
  */
@@ -125,6 +211,55 @@
 }
 #endif
 
+struct sock_xprt {
+	struct rpc_xprt		xprt;
+
+	/*
+	 * Network layer
+	 */
+	struct socket *		sock;
+	struct sock *		inet;
+
+	/*
+	 * State of TCP reply receive
+	 */
+	__be32			tcp_fraghdr,
+				tcp_xid;
+
+	u32			tcp_offset,
+				tcp_reclen;
+
+	unsigned long		tcp_copied,
+				tcp_flags;
+
+	/*
+	 * Connection of transports
+	 */
+	struct delayed_work	connect_worker;
+	unsigned short		port;
+
+	/*
+	 * UDP socket buffer size parameters
+	 */
+	size_t			rcvsize,
+				sndsize;
+
+	/*
+	 * Saved socket callback addresses
+	 */
+	void			(*old_data_ready)(struct sock *, int);
+	void			(*old_state_change)(struct sock *);
+	void			(*old_write_space)(struct sock *);
+};
+
+/*
+ * TCP receive state flags
+ */
+#define TCP_RCV_LAST_FRAG	(1UL << 0)
+#define TCP_RCV_COPY_FRAGHDR	(1UL << 1)
+#define TCP_RCV_COPY_XID	(1UL << 2)
+#define TCP_RCV_COPY_DATA	(1UL << 3)
+
 static void xs_format_peer_addresses(struct rpc_xprt *xprt)
 {
 	struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
@@ -168,37 +303,52 @@
 
 #define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
 
-static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
 {
-	struct kvec iov = {
-		.iov_base	= xdr->head[0].iov_base + base,
-		.iov_len	= len - base,
-	};
 	struct msghdr msg = {
 		.msg_name	= addr,
 		.msg_namelen	= addrlen,
-		.msg_flags	= XS_SENDMSG_FLAGS,
+		.msg_flags	= XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
+	};
+	struct kvec iov = {
+		.iov_base	= vec->iov_base + base,
+		.iov_len	= vec->iov_len - base,
 	};
 
-	if (xdr->len > len)
-		msg.msg_flags |= MSG_MORE;
-
-	if (likely(iov.iov_len))
+	if (iov.iov_len != 0)
 		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
 	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
 }
 
-static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
 {
-	struct kvec iov = {
-		.iov_base	= xdr->tail[0].iov_base + base,
-		.iov_len	= len - base,
-	};
-	struct msghdr msg = {
-		.msg_flags	= XS_SENDMSG_FLAGS,
-	};
+	struct page **ppage;
+	unsigned int remainder;
+	int err, sent = 0;
 
-	return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+	remainder = xdr->page_len - base;
+	base += xdr->page_base;
+	ppage = xdr->pages + (base >> PAGE_SHIFT);
+	base &= ~PAGE_MASK;
+	for(;;) {
+		unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
+		int flags = XS_SENDMSG_FLAGS;
+
+		remainder -= len;
+		if (remainder != 0 || more)
+			flags |= MSG_MORE;
+		err = sock->ops->sendpage(sock, *ppage, base, len, flags);
+		if (remainder == 0 || err != len)
+			break;
+		sent += err;
+		ppage++;
+		base = 0;
+	}
+	if (sent == 0)
+		return err;
+	if (err > 0)
+		sent += err;
+	return sent;
 }
 
 /**
@@ -210,76 +360,51 @@
  * @base: starting position in the buffer
  *
  */
-static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
+static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
 {
-	struct page **ppage = xdr->pages;
-	unsigned int len, pglen = xdr->page_len;
-	int err, ret = 0;
+	unsigned int remainder = xdr->len - base;
+	int err, sent = 0;
 
 	if (unlikely(!sock))
 		return -ENOTCONN;
 
 	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+	if (base != 0) {
+		addr = NULL;
+		addrlen = 0;
+	}
 
-	len = xdr->head[0].iov_len;
-	if (base < len || (addr != NULL && base == 0)) {
-		err = xs_send_head(sock, addr, addrlen, xdr, base, len);
-		if (ret == 0)
-			ret = err;
-		else if (err > 0)
-			ret += err;
-		if (err != (len - base))
+	if (base < xdr->head[0].iov_len || addr != NULL) {
+		unsigned int len = xdr->head[0].iov_len - base;
+		remainder -= len;
+		err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
+		if (remainder == 0 || err != len)
 			goto out;
+		sent += err;
 		base = 0;
 	} else
-		base -= len;
+		base -= xdr->head[0].iov_len;
 
-	if (unlikely(pglen == 0))
-		goto copy_tail;
-	if (unlikely(base >= pglen)) {
-		base -= pglen;
-		goto copy_tail;
-	}
-	if (base || xdr->page_base) {
-		pglen -= base;
-		base += xdr->page_base;
-		ppage += base >> PAGE_CACHE_SHIFT;
-		base &= ~PAGE_CACHE_MASK;
-	}
-
-	do {
-		int flags = XS_SENDMSG_FLAGS;
-
-		len = PAGE_CACHE_SIZE;
-		if (base)
-			len -= base;
-		if (pglen < len)
-			len = pglen;
-
-		if (pglen != len || xdr->tail[0].iov_len != 0)
-			flags |= MSG_MORE;
-
-		err = kernel_sendpage(sock, *ppage, base, len, flags);
-		if (ret == 0)
-			ret = err;
-		else if (err > 0)
-			ret += err;
-		if (err != len)
+	if (base < xdr->page_len) {
+		unsigned int len = xdr->page_len - base;
+		remainder -= len;
+		err = xs_send_pagedata(sock, xdr, base, remainder != 0);
+		if (remainder == 0 || err != len)
 			goto out;
+		sent += err;
 		base = 0;
-		ppage++;
-	} while ((pglen -= len) != 0);
-copy_tail:
-	len = xdr->tail[0].iov_len;
-	if (base < len) {
-		err = xs_send_tail(sock, xdr, base, len);
-		if (ret == 0)
-			ret = err;
-		else if (err > 0)
-			ret += err;
-	}
+	} else
+		base -= xdr->page_len;
+
+	if (base >= xdr->tail[0].iov_len)
+		return sent;
+	err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
 out:
-	return ret;
+	if (sent == 0)
+		return err;
+	if (err > 0)
+		sent += err;
+	return sent;
 }
 
 /**
@@ -291,19 +416,20 @@
 {
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_xprt *xprt = req->rq_xprt;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
 	dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
 			task->tk_pid, req->rq_slen - req->rq_bytes_sent,
 			req->rq_slen);
 
-	if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
+	if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
 		/* Protect against races with write_space */
 		spin_lock_bh(&xprt->transport_lock);
 
 		/* Don't race with disconnect */
 		if (!xprt_connected(xprt))
 			task->tk_status = -ENOTCONN;
-		else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags))
+		else if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
 			xprt_wait_for_buffer_space(task);
 
 		spin_unlock_bh(&xprt->transport_lock);
@@ -327,6 +453,7 @@
 {
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_xprt *xprt = req->rq_xprt;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 	struct xdr_buf *xdr = &req->rq_snd_buf;
 	int status;
 
@@ -335,8 +462,10 @@
 				req->rq_svec->iov_len);
 
 	req->rq_xtime = jiffies;
-	status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
-				xprt->addrlen, xdr, req->rq_bytes_sent);
+	status = xs_sendpages(transport->sock,
+			      (struct sockaddr *) &xprt->addr,
+			      xprt->addrlen, xdr,
+			      req->rq_bytes_sent);
 
 	dprintk("RPC:      xs_udp_send_request(%u) = %d\n",
 			xdr->len - req->rq_bytes_sent, status);
@@ -392,6 +521,7 @@
 {
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_xprt *xprt = req->rq_xprt;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 	struct xdr_buf *xdr = &req->rq_snd_buf;
 	int status, retry = 0;
 
@@ -406,8 +536,8 @@
 	 * called sendmsg(). */
 	while (1) {
 		req->rq_xtime = jiffies;
-		status = xs_sendpages(xprt->sock, NULL, 0, xdr,
-						req->rq_bytes_sent);
+		status = xs_sendpages(transport->sock,
+					NULL, 0, xdr, req->rq_bytes_sent);
 
 		dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
 				xdr->len - req->rq_bytes_sent, status);
@@ -485,8 +615,9 @@
  */
 static void xs_close(struct rpc_xprt *xprt)
 {
-	struct socket *sock = xprt->sock;
-	struct sock *sk = xprt->inet;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+	struct socket *sock = transport->sock;
+	struct sock *sk = transport->inet;
 
 	if (!sk)
 		goto clear_close_wait;
@@ -494,13 +625,13 @@
 	dprintk("RPC:      xs_close xprt %p\n", xprt);
 
 	write_lock_bh(&sk->sk_callback_lock);
-	xprt->inet = NULL;
-	xprt->sock = NULL;
+	transport->inet = NULL;
+	transport->sock = NULL;
 
 	sk->sk_user_data = NULL;
-	sk->sk_data_ready = xprt->old_data_ready;
-	sk->sk_state_change = xprt->old_state_change;
-	sk->sk_write_space = xprt->old_write_space;
+	sk->sk_data_ready = transport->old_data_ready;
+	sk->sk_state_change = transport->old_state_change;
+	sk->sk_write_space = transport->old_write_space;
 	write_unlock_bh(&sk->sk_callback_lock);
 
 	sk->sk_no_check = 0;
@@ -519,15 +650,18 @@
  */
 static void xs_destroy(struct rpc_xprt *xprt)
 {
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+
 	dprintk("RPC:      xs_destroy xprt %p\n", xprt);
 
-	cancel_delayed_work(&xprt->connect_worker);
+	cancel_delayed_work(&transport->connect_worker);
 	flush_scheduled_work();
 
 	xprt_disconnect(xprt);
 	xs_close(xprt);
 	xs_free_peer_addresses(xprt);
 	kfree(xprt->slot);
+	kfree(xprt);
 }
 
 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
@@ -603,91 +737,75 @@
 	read_unlock(&sk->sk_callback_lock);
 }
 
-static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
+static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
 {
-	if (len > desc->count)
-		len = desc->count;
-	if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
-		dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
-				len, desc->count);
-		return 0;
-	}
-	desc->offset += len;
-	desc->count -= len;
-	dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
-			len, desc->count);
-	return len;
-}
-
-static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
-{
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 	size_t len, used;
 	char *p;
 
-	p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
-	len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
-	used = xs_tcp_copy_data(desc, p, len);
-	xprt->tcp_offset += used;
+	p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
+	len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
+	used = xdr_skb_read_bits(desc, p, len);
+	transport->tcp_offset += used;
 	if (used != len)
 		return;
 
-	xprt->tcp_reclen = ntohl(xprt->tcp_recm);
-	if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
-		xprt->tcp_flags |= XPRT_LAST_FRAG;
+	transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
+	if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
+		transport->tcp_flags |= TCP_RCV_LAST_FRAG;
 	else
-		xprt->tcp_flags &= ~XPRT_LAST_FRAG;
-	xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
+		transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
+	transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
 
-	xprt->tcp_flags &= ~XPRT_COPY_RECM;
-	xprt->tcp_offset = 0;
+	transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
+	transport->tcp_offset = 0;
 
 	/* Sanity check of the record length */
-	if (unlikely(xprt->tcp_reclen < 4)) {
+	if (unlikely(transport->tcp_reclen < 4)) {
 		dprintk("RPC:      invalid TCP record fragment length\n");
 		xprt_disconnect(xprt);
 		return;
 	}
 	dprintk("RPC:      reading TCP record fragment of length %d\n",
-			xprt->tcp_reclen);
+			transport->tcp_reclen);
 }
 
-static void xs_tcp_check_recm(struct rpc_xprt *xprt)
+static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
 {
-	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
-			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
-	if (xprt->tcp_offset == xprt->tcp_reclen) {
-		xprt->tcp_flags |= XPRT_COPY_RECM;
-		xprt->tcp_offset = 0;
-		if (xprt->tcp_flags & XPRT_LAST_FRAG) {
-			xprt->tcp_flags &= ~XPRT_COPY_DATA;
-			xprt->tcp_flags |= XPRT_COPY_XID;
-			xprt->tcp_copied = 0;
+	if (transport->tcp_offset == transport->tcp_reclen) {
+		transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
+		transport->tcp_offset = 0;
+		if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
+			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
+			transport->tcp_flags |= TCP_RCV_COPY_XID;
+			transport->tcp_copied = 0;
 		}
 	}
 }
 
-static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
+static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
 {
 	size_t len, used;
 	char *p;
 
-	len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
+	len = sizeof(transport->tcp_xid) - transport->tcp_offset;
 	dprintk("RPC:      reading XID (%Zu bytes)\n", len);
-	p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
-	used = xs_tcp_copy_data(desc, p, len);
-	xprt->tcp_offset += used;
+	p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
+	used = xdr_skb_read_bits(desc, p, len);
+	transport->tcp_offset += used;
 	if (used != len)
 		return;
-	xprt->tcp_flags &= ~XPRT_COPY_XID;
-	xprt->tcp_flags |= XPRT_COPY_DATA;
-	xprt->tcp_copied = 4;
+	transport->tcp_flags &= ~TCP_RCV_COPY_XID;
+	transport->tcp_flags |= TCP_RCV_COPY_DATA;
+	transport->tcp_copied = 4;
 	dprintk("RPC:      reading reply for XID %08x\n",
-						ntohl(xprt->tcp_xid));
-	xs_tcp_check_recm(xprt);
+			ntohl(transport->tcp_xid));
+	xs_tcp_check_fraghdr(transport);
 }
 
-static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
+static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
 {
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 	struct rpc_rqst *req;
 	struct xdr_buf *rcvbuf;
 	size_t len;
@@ -695,116 +813,118 @@
 
 	/* Find and lock the request corresponding to this xid */
 	spin_lock(&xprt->transport_lock);
-	req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
+	req = xprt_lookup_rqst(xprt, transport->tcp_xid);
 	if (!req) {
-		xprt->tcp_flags &= ~XPRT_COPY_DATA;
+		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
 		dprintk("RPC:      XID %08x request not found!\n",
-				ntohl(xprt->tcp_xid));
+				ntohl(transport->tcp_xid));
 		spin_unlock(&xprt->transport_lock);
 		return;
 	}
 
 	rcvbuf = &req->rq_private_buf;
 	len = desc->count;
-	if (len > xprt->tcp_reclen - xprt->tcp_offset) {
-		skb_reader_t my_desc;
+	if (len > transport->tcp_reclen - transport->tcp_offset) {
+		struct xdr_skb_reader my_desc;
 
-		len = xprt->tcp_reclen - xprt->tcp_offset;
+		len = transport->tcp_reclen - transport->tcp_offset;
 		memcpy(&my_desc, desc, sizeof(my_desc));
 		my_desc.count = len;
-		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-					  &my_desc, xs_tcp_copy_data);
+		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
+					  &my_desc, xdr_skb_read_bits);
 		desc->count -= r;
 		desc->offset += r;
 	} else
-		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-					  desc, xs_tcp_copy_data);
+		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
+					  desc, xdr_skb_read_bits);
 
 	if (r > 0) {
-		xprt->tcp_copied += r;
-		xprt->tcp_offset += r;
+		transport->tcp_copied += r;
+		transport->tcp_offset += r;
 	}
 	if (r != len) {
 		/* Error when copying to the receive buffer,
 		 * usually because we weren't able to allocate
 		 * additional buffer pages. All we can do now
-		 * is turn off XPRT_COPY_DATA, so the request
+		 * is turn off TCP_RCV_COPY_DATA, so the request
 		 * will not receive any additional updates,
 		 * and time out.
 		 * Any remaining data from this record will
 		 * be discarded.
 		 */
-		xprt->tcp_flags &= ~XPRT_COPY_DATA;
+		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
 		dprintk("RPC:      XID %08x truncated request\n",
-				ntohl(xprt->tcp_xid));
+				ntohl(transport->tcp_xid));
 		dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-				xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+				xprt, transport->tcp_copied, transport->tcp_offset,
+					transport->tcp_reclen);
 		goto out;
 	}
 
 	dprintk("RPC:      XID %08x read %Zd bytes\n",
-			ntohl(xprt->tcp_xid), r);
+			ntohl(transport->tcp_xid), r);
 	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+			xprt, transport->tcp_copied, transport->tcp_offset,
+				transport->tcp_reclen);
 
-	if (xprt->tcp_copied == req->rq_private_buf.buflen)
-		xprt->tcp_flags &= ~XPRT_COPY_DATA;
-	else if (xprt->tcp_offset == xprt->tcp_reclen) {
-		if (xprt->tcp_flags & XPRT_LAST_FRAG)
-			xprt->tcp_flags &= ~XPRT_COPY_DATA;
+	if (transport->tcp_copied == req->rq_private_buf.buflen)
+		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
+	else if (transport->tcp_offset == transport->tcp_reclen) {
+		if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
+			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
 	}
 
 out:
-	if (!(xprt->tcp_flags & XPRT_COPY_DATA))
-		xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
+	if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
+		xprt_complete_rqst(req->rq_task, transport->tcp_copied);
 	spin_unlock(&xprt->transport_lock);
-	xs_tcp_check_recm(xprt);
+	xs_tcp_check_fraghdr(transport);
 }
 
-static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
+static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
 {
 	size_t len;
 
-	len = xprt->tcp_reclen - xprt->tcp_offset;
+	len = transport->tcp_reclen - transport->tcp_offset;
 	if (len > desc->count)
 		len = desc->count;
 	desc->count -= len;
 	desc->offset += len;
-	xprt->tcp_offset += len;
+	transport->tcp_offset += len;
 	dprintk("RPC:      discarded %Zu bytes\n", len);
-	xs_tcp_check_recm(xprt);
+	xs_tcp_check_fraghdr(transport);
 }
 
 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
 {
 	struct rpc_xprt *xprt = rd_desc->arg.data;
-	skb_reader_t desc = {
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+	struct xdr_skb_reader desc = {
 		.skb	= skb,
 		.offset	= offset,
 		.count	= len,
-		.csum	= 0
 	};
 
 	dprintk("RPC:      xs_tcp_data_recv started\n");
 	do {
 		/* Read in a new fragment marker if necessary */
 		/* Can we ever really expect to get completely empty fragments? */
-		if (xprt->tcp_flags & XPRT_COPY_RECM) {
+		if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
 			xs_tcp_read_fraghdr(xprt, &desc);
 			continue;
 		}
 		/* Read in the xid if necessary */
-		if (xprt->tcp_flags & XPRT_COPY_XID) {
-			xs_tcp_read_xid(xprt, &desc);
+		if (transport->tcp_flags & TCP_RCV_COPY_XID) {
+			xs_tcp_read_xid(transport, &desc);
 			continue;
 		}
 		/* Read in the request data */
-		if (xprt->tcp_flags & XPRT_COPY_DATA) {
+		if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
 			xs_tcp_read_request(xprt, &desc);
 			continue;
 		}
 		/* Skip over any trailing bytes on short reads */
-		xs_tcp_read_discard(xprt, &desc);
+		xs_tcp_read_discard(transport, &desc);
 	} while (desc.count);
 	dprintk("RPC:      xs_tcp_data_recv done\n");
 	return len - desc.count;
@@ -858,11 +978,16 @@
 	case TCP_ESTABLISHED:
 		spin_lock_bh(&xprt->transport_lock);
 		if (!xprt_test_and_set_connected(xprt)) {
+			struct sock_xprt *transport = container_of(xprt,
+					struct sock_xprt, xprt);
+
 			/* Reset TCP record info */
-			xprt->tcp_offset = 0;
-			xprt->tcp_reclen = 0;
-			xprt->tcp_copied = 0;
-			xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
+			transport->tcp_offset = 0;
+			transport->tcp_reclen = 0;
+			transport->tcp_copied = 0;
+			transport->tcp_flags =
+				TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
+
 			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
 			xprt_wake_pending_tasks(xprt, 0);
 		}
@@ -951,15 +1076,16 @@
 
 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
 {
-	struct sock *sk = xprt->inet;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+	struct sock *sk = transport->inet;
 
-	if (xprt->rcvsize) {
+	if (transport->rcvsize) {
 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-		sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
+		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
 	}
-	if (xprt->sndsize) {
+	if (transport->sndsize) {
 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-		sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
+		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
 		sk->sk_write_space(sk);
 	}
 }
@@ -974,12 +1100,14 @@
  */
 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
 {
-	xprt->sndsize = 0;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+
+	transport->sndsize = 0;
 	if (sndsize)
-		xprt->sndsize = sndsize + 1024;
-	xprt->rcvsize = 0;
+		transport->sndsize = sndsize + 1024;
+	transport->rcvsize = 0;
 	if (rcvsize)
-		xprt->rcvsize = rcvsize + 1024;
+		transport->rcvsize = rcvsize + 1024;
 
 	xs_udp_do_set_buffer_size(xprt);
 }
@@ -1003,19 +1131,6 @@
 }
 
 /**
- * xs_print_peer_address - format an IPv4 address for printing
- * @xprt: generic transport
- * @format: flags field indicating which parts of the address to render
- */
-static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format)
-{
-	if (xprt->address_strings[format] != NULL)
-		return xprt->address_strings[format];
-	else
-		return "unprintable";
-}
-
-/**
  * xs_set_port - reset the port number in the remote endpoint address
  * @xprt: generic transport
  * @port: new port number
@@ -1030,20 +1145,20 @@
 	sap->sin_port = htons(port);
 }
 
-static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
+static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock)
 {
 	struct sockaddr_in myaddr = {
 		.sin_family = AF_INET,
 	};
 	int err;
-	unsigned short port = xprt->port;
+	unsigned short port = transport->port;
 
 	do {
 		myaddr.sin_port = htons(port);
 		err = kernel_bind(sock, (struct sockaddr *) &myaddr,
 						sizeof(myaddr));
 		if (err == 0) {
-			xprt->port = port;
+			transport->port = port;
 			dprintk("RPC:      xs_bindresvport bound to port %u\n",
 					port);
 			return 0;
@@ -1052,22 +1167,53 @@
 			port = xprt_max_resvport;
 		else
 			port--;
-	} while (err == -EADDRINUSE && port != xprt->port);
+	} while (err == -EADDRINUSE && port != transport->port);
 
 	dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
 	return err;
 }
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key xs_key[2];
+static struct lock_class_key xs_slock_key[2];
+
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	BUG_ON(sk->sk_lock.owner != NULL);
+	switch (sk->sk_family) {
+	case AF_INET:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS",
+			&xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]);
+		break;
+
+	case AF_INET6:
+		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS",
+			&xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]);
+		break;
+
+	default:
+		BUG();
+	}
+}
+#else
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /**
  * xs_udp_connect_worker - set up a UDP socket
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
  *
  * Invoked by a work queue tasklet.
  */
-static void xs_udp_connect_worker(void *args)
+static void xs_udp_connect_worker(struct work_struct *work)
 {
-	struct rpc_xprt *xprt = (struct rpc_xprt *) args;
-	struct socket *sock = xprt->sock;
+	struct sock_xprt *transport =
+		container_of(work, struct sock_xprt, connect_worker.work);
+	struct rpc_xprt *xprt = &transport->xprt;
+	struct socket *sock = transport->sock;
 	int err, status = -EIO;
 
 	if (xprt->shutdown || !xprt_bound(xprt))
@@ -1080,24 +1226,25 @@
 		dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
 		goto out;
 	}
+	xs_reclassify_socket(sock);
 
-	if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+	if (xprt->resvport && xs_bindresvport(transport, sock) < 0) {
 		sock_release(sock);
 		goto out;
 	}
 
 	dprintk("RPC:      worker connecting xprt %p to address: %s\n",
-			xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+			xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
 
-	if (!xprt->inet) {
+	if (!transport->inet) {
 		struct sock *sk = sock->sk;
 
 		write_lock_bh(&sk->sk_callback_lock);
 
 		sk->sk_user_data = xprt;
-		xprt->old_data_ready = sk->sk_data_ready;
-		xprt->old_state_change = sk->sk_state_change;
-		xprt->old_write_space = sk->sk_write_space;
+		transport->old_data_ready = sk->sk_data_ready;
+		transport->old_state_change = sk->sk_state_change;
+		transport->old_write_space = sk->sk_write_space;
 		sk->sk_data_ready = xs_udp_data_ready;
 		sk->sk_write_space = xs_udp_write_space;
 		sk->sk_no_check = UDP_CSUM_NORCV;
@@ -1106,8 +1253,8 @@
 		xprt_set_connected(xprt);
 
 		/* Reset to new socket */
-		xprt->sock = sock;
-		xprt->inet = sk;
+		transport->sock = sock;
+		transport->inet = sk;
 
 		write_unlock_bh(&sk->sk_callback_lock);
 	}
@@ -1125,7 +1272,7 @@
 static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
 {
 	int result;
-	struct socket *sock = xprt->sock;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 	struct sockaddr any;
 
 	dprintk("RPC:      disconnecting xprt %p to reuse port\n", xprt);
@@ -1136,7 +1283,7 @@
 	 */
 	memset(&any, 0, sizeof(any));
 	any.sa_family = AF_UNSPEC;
-	result = kernel_connect(sock, &any, sizeof(any), 0);
+	result = kernel_connect(transport->sock, &any, sizeof(any), 0);
 	if (result)
 		dprintk("RPC:      AF_UNSPEC connect return code %d\n",
 				result);
@@ -1144,27 +1291,30 @@
 
 /**
  * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
  *
  * Invoked by a work queue tasklet.
  */
-static void xs_tcp_connect_worker(void *args)
+static void xs_tcp_connect_worker(struct work_struct *work)
 {
-	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
-	struct socket *sock = xprt->sock;
+	struct sock_xprt *transport =
+		container_of(work, struct sock_xprt, connect_worker.work);
+	struct rpc_xprt *xprt = &transport->xprt;
+	struct socket *sock = transport->sock;
 	int err, status = -EIO;
 
 	if (xprt->shutdown || !xprt_bound(xprt))
 		goto out;
 
-	if (!xprt->sock) {
+	if (!sock) {
 		/* start from scratch */
 		if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
 			dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
 			goto out;
 		}
+		xs_reclassify_socket(sock);
 
-		if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+		if (xprt->resvport && xs_bindresvport(transport, sock) < 0) {
 			sock_release(sock);
 			goto out;
 		}
@@ -1173,17 +1323,17 @@
 		xs_tcp_reuse_connection(xprt);
 
 	dprintk("RPC:      worker connecting xprt %p to address: %s\n",
-			xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+			xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
 
-	if (!xprt->inet) {
+	if (!transport->inet) {
 		struct sock *sk = sock->sk;
 
 		write_lock_bh(&sk->sk_callback_lock);
 
 		sk->sk_user_data = xprt;
-		xprt->old_data_ready = sk->sk_data_ready;
-		xprt->old_state_change = sk->sk_state_change;
-		xprt->old_write_space = sk->sk_write_space;
+		transport->old_data_ready = sk->sk_data_ready;
+		transport->old_state_change = sk->sk_state_change;
+		transport->old_write_space = sk->sk_write_space;
 		sk->sk_data_ready = xs_tcp_data_ready;
 		sk->sk_state_change = xs_tcp_state_change;
 		sk->sk_write_space = xs_tcp_write_space;
@@ -1198,8 +1348,8 @@
 		xprt_clear_connected(xprt);
 
 		/* Reset to new socket */
-		xprt->sock = sock;
-		xprt->inet = sk;
+		transport->sock = sock;
+		transport->inet = sk;
 
 		write_unlock_bh(&sk->sk_callback_lock);
 	}
@@ -1248,21 +1398,22 @@
 static void xs_connect(struct rpc_task *task)
 {
 	struct rpc_xprt *xprt = task->tk_xprt;
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
 	if (xprt_test_and_set_connecting(xprt))
 		return;
 
-	if (xprt->sock != NULL) {
+	if (transport->sock != NULL) {
 		dprintk("RPC:      xs_connect delayed xprt %p for %lu seconds\n",
 				xprt, xprt->reestablish_timeout / HZ);
-		schedule_delayed_work(&xprt->connect_worker,
+		schedule_delayed_work(&transport->connect_worker,
 					xprt->reestablish_timeout);
 		xprt->reestablish_timeout <<= 1;
 		if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
 			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
 	} else {
 		dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
-		schedule_work(&xprt->connect_worker);
+		schedule_delayed_work(&transport->connect_worker, 0);
 
 		/* flush_scheduled_work can sleep... */
 		if (!RPC_IS_ASYNC(task))
@@ -1278,8 +1429,10 @@
  */
 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 {
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+
 	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
-			xprt->port,
+			transport->port,
 			xprt->stat.bind_count,
 			xprt->stat.sends,
 			xprt->stat.recvs,
@@ -1296,13 +1449,14 @@
  */
 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 {
+	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 	long idle_time = 0;
 
 	if (xprt_connected(xprt))
 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
 
 	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
-			xprt->port,
+			transport->port,
 			xprt->stat.bind_count,
 			xprt->stat.connect_count,
 			xprt->stat.connect_time,
@@ -1316,7 +1470,6 @@
 
 static struct rpc_xprt_ops xs_udp_ops = {
 	.set_buffer_size	= xs_udp_set_buffer_size,
-	.print_addr		= xs_print_peer_address,
 	.reserve_xprt		= xprt_reserve_xprt_cong,
 	.release_xprt		= xprt_release_xprt_cong,
 	.rpcbind		= rpc_getport,
@@ -1334,7 +1487,6 @@
 };
 
 static struct rpc_xprt_ops xs_tcp_ops = {
-	.print_addr		= xs_print_peer_address,
 	.reserve_xprt		= xprt_reserve_xprt,
 	.release_xprt		= xs_tcp_release_xprt,
 	.rpcbind		= rpc_getport,
@@ -1349,33 +1501,64 @@
 	.print_stats		= xs_tcp_print_stats,
 };
 
+static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size)
+{
+	struct rpc_xprt *xprt;
+	struct sock_xprt *new;
+
+	if (addrlen > sizeof(xprt->addr)) {
+		dprintk("RPC:      xs_setup_xprt: address too large\n");
+		return ERR_PTR(-EBADF);
+	}
+
+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (new == NULL) {
+		dprintk("RPC:      xs_setup_xprt: couldn't allocate rpc_xprt\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	xprt = &new->xprt;
+
+	xprt->max_reqs = slot_table_size;
+	xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
+	if (xprt->slot == NULL) {
+		kfree(xprt);
+		dprintk("RPC:      xs_setup_xprt: couldn't allocate slot table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	memcpy(&xprt->addr, addr, addrlen);
+	xprt->addrlen = addrlen;
+	new->port = xs_get_random_port();
+
+	return xprt;
+}
+
 /**
  * xs_setup_udp - Set up transport to use a UDP socket
- * @xprt: transport to set up
+ * @addr: address of remote server
+ * @addrlen: length of address in bytes
  * @to:   timeout parameters
  *
  */
-int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
 {
-	size_t slot_table_size;
-	struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
+	struct rpc_xprt *xprt;
+	struct sock_xprt *transport;
 
-	xprt->max_reqs = xprt_udp_slot_table_entries;
-	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
-	xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
-	if (xprt->slot == NULL)
-		return -ENOMEM;
+	xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries);
+	if (IS_ERR(xprt))
+		return xprt;
+	transport = container_of(xprt, struct sock_xprt, xprt);
 
-	if (ntohs(addr->sin_port) != 0)
+	if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
 		xprt_set_bound(xprt);
-	xprt->port = xs_get_random_port();
 
 	xprt->prot = IPPROTO_UDP;
 	xprt->tsh_size = 0;
 	/* XXX: header size can vary due to auth type, IPv6, etc. */
 	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
 
-	INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+	INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_connect_worker);
 	xprt->bind_timeout = XS_BIND_TO;
 	xprt->connect_timeout = XS_UDP_CONN_TO;
 	xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1390,37 +1573,36 @@
 
 	xs_format_peer_addresses(xprt);
 	dprintk("RPC:      set up transport to address %s\n",
-			xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+			xprt->address_strings[RPC_DISPLAY_ALL]);
 
-	return 0;
+	return xprt;
 }
 
 /**
  * xs_setup_tcp - Set up transport to use a TCP socket
- * @xprt: transport to set up
+ * @addr: address of remote server
+ * @addrlen: length of address in bytes
  * @to: timeout parameters
  *
  */
-int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
 {
-	size_t slot_table_size;
-	struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
+	struct rpc_xprt *xprt;
+	struct sock_xprt *transport;
 
-	xprt->max_reqs = xprt_tcp_slot_table_entries;
-	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
-	xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
-	if (xprt->slot == NULL)
-		return -ENOMEM;
+	xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries);
+	if (IS_ERR(xprt))
+		return xprt;
+	transport = container_of(xprt, struct sock_xprt, xprt);
 
-	if (ntohs(addr->sin_port) != 0)
+	if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
 		xprt_set_bound(xprt);
-	xprt->port = xs_get_random_port();
 
 	xprt->prot = IPPROTO_TCP;
 	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
 
-	INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+	INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker);
 	xprt->bind_timeout = XS_BIND_TO;
 	xprt->connect_timeout = XS_TCP_CONN_TO;
 	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
@@ -1435,7 +1617,40 @@
 
 	xs_format_peer_addresses(xprt);
 	dprintk("RPC:      set up transport to address %s\n",
-			xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+			xprt->address_strings[RPC_DISPLAY_ALL]);
+
+	return xprt;
+}
+
+/**
+ * init_socket_xprt - set up xprtsock's sysctls
+ *
+ */
+int init_socket_xprt(void)
+{
+#ifdef RPC_DEBUG
+	if (!sunrpc_table_header) {
+		sunrpc_table_header = register_sysctl_table(sunrpc_table, 1);
+#ifdef CONFIG_PROC_FS
+		if (sunrpc_table[0].de)
+			sunrpc_table[0].de->owner = THIS_MODULE;
+#endif
+	}
+#endif
 
 	return 0;
 }
+
+/**
+ * cleanup_socket_xprt - remove xprtsock's sysctls
+ *
+ */
+void cleanup_socket_xprt(void)
+{
+#ifdef RPC_DEBUG
+	if (sunrpc_table_header) {
+		unregister_sysctl_table(sunrpc_table_header);
+		sunrpc_table_header = NULL;
+	}
+#endif
+}
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 1bb7570..730c5c4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -774,8 +774,8 @@
 
 int tipc_bclink_init(void)
 {
-	bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
-	bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
+	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
+	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
 	if (!bcbearer || !bclink) {
  nomem:
 	 	warn("Multicast link creation failed, no memory\n");
@@ -786,14 +786,12 @@
 		return -ENOMEM;
 	}
 
-	memset(bcbearer, 0, sizeof(struct bcbearer));
 	INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
 	bcbearer->bearer.media = &bcbearer->media;
 	bcbearer->media.send_msg = tipc_bcbearer_send;
 	sprintf(bcbearer->media.name, "tipc-multicast");
 
 	bcl = &bclink->link;
-	memset(bclink, 0, sizeof(struct bclink));
 	INIT_LIST_HEAD(&bcl->waiting_ports);
 	bcl->next_out_no = 1;
 	spin_lock_init(&bclink->node.lock);
diff --git a/net/tipc/config.c b/net/tipc/config.c
index ed1351e..458a2c4 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -107,7 +107,7 @@
 struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
 {
 	struct sk_buff *buf;
-	u32 value_net;
+	__be32 value_net;
 
 	buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
 	if (buf) {
@@ -284,8 +284,7 @@
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-	addr = *(u32 *)TLV_DATA(req_tlv_area);
-	addr = ntohl(addr);
+	addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (addr == tipc_own_addr)
 		return tipc_cfg_reply_none();
 	if (!tipc_addr_node_valid(addr))
@@ -319,8 +318,7 @@
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	tipc_remote_management = (value != 0);
 	return tipc_cfg_reply_none();
 }
@@ -332,8 +330,7 @@
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value != delimit(value, 1, 65535))
 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 						   " (max publications must be 1-65535)");
@@ -348,8 +345,7 @@
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value != delimit(value, 1, 65535))
 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 						   " (max subscriptions must be 1-65535");
@@ -363,8 +359,7 @@
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value == tipc_max_ports)
 		return tipc_cfg_reply_none();
 	if (value != delimit(value, 127, 65535))
@@ -383,8 +378,7 @@
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value == tipc_max_zones)
 		return tipc_cfg_reply_none();
 	if (value != delimit(value, 1, 255))
@@ -403,8 +397,7 @@
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value != delimit(value, 1, 1))
 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 						   " (max clusters fixed at 1)");
@@ -417,8 +410,7 @@
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value == tipc_max_nodes)
 		return tipc_cfg_reply_none();
 	if (value != delimit(value, 8, 2047))
@@ -437,8 +429,7 @@
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value != 0)
 		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
 						   " (max secondary nodes fixed at 0)");
@@ -451,8 +442,7 @@
 
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value == tipc_net_id)
 		return tipc_cfg_reply_none();
 	if (value != delimit(value, 1, 9999))
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index d8af4c2..627f99b7 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -393,8 +393,7 @@
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-	value = *(u32 *)TLV_DATA(req_tlv_area);
-	value = ntohl(value);
+	value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (value != delimit(value, 0, 32768))
 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 						   " (log size must be 0-32768)");
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index ae6ddf0..eb80778 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -42,7 +42,7 @@
 	unsigned long data;
 };
 
-static kmem_cache_t *tipc_queue_item_cache;
+static struct kmem_cache *tipc_queue_item_cache;
 static struct list_head signal_queue_head;
 static DEFINE_SPINLOCK(qitem_lock);
 static int handler_enabled = 0;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 03bd659..7bf87cb 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -66,11 +66,11 @@
  */
 
 struct distr_item {
-	u32 type;
-	u32 lower;
-	u32 upper;
-	u32 ref;
-	u32 key;
+	__be32 type;
+	__be32 lower;
+	__be32 upper;
+	__be32 ref;
+	__be32 key;
 };
 
 /**
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 886bda5..4111a31 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -60,7 +60,7 @@
 	struct node *n_ptr;
         struct node **curr_node;
 
-	n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
+	n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
 	if (!n_ptr) {
 		warn("Node creation failed, no memory\n");
 		return NULL;
@@ -75,7 +75,6 @@
 		return NULL;
 	}
 		
-	memset(n_ptr, 0, sizeof(*n_ptr));
 	n_ptr->addr = addr;
                 spin_lock_init(&n_ptr->lock);
 	INIT_LIST_HEAD(&n_ptr->nsub);
@@ -597,8 +596,7 @@
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-	domain = *(u32 *)TLV_DATA(req_tlv_area);
-	domain = ntohl(domain);
+	domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (!tipc_addr_domain_valid(domain))
 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 						   " (network address)");
@@ -642,8 +640,7 @@
 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-	domain = *(u32 *)TLV_DATA(req_tlv_area);
-	domain = ntohl(domain);
+	domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
 	if (!tipc_addr_domain_valid(domain))
 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
 						   " (network address)");
@@ -664,8 +661,7 @@
 
 	/* Add TLV for broadcast link */
 
-        link_info.dest = tipc_own_addr & 0xfffff00;
-	link_info.dest = htonl(link_info.dest);
+        link_info.dest = htonl(tipc_own_addr & 0xfffff00);
         link_info.up = htonl(1);
         sprintf(link_info.str, tipc_bclink_name);
 	tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 7a918f1..ddade73 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -350,7 +350,7 @@
 
 	/* Allocate subscription object */
 
-	sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+	sub = kzalloc(sizeof(*sub), GFP_ATOMIC);
 	if (!sub) {
 		warn("Subscription rejected, no memory\n");
 		subscr_terminate(subscriber);
@@ -359,7 +359,6 @@
 
 	/* Initialize subscription object */
 
-	memset(sub, 0, sizeof(*sub));
 	sub->seq.type = htohl(s->seq.type, subscriber->swap);
 	sub->seq.lower = htohl(s->seq.lower, subscriber->swap);
 	sub->seq.upper = htohl(s->seq.upper, subscriber->swap);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b43a278..2f208c7 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -151,8 +151,9 @@
  *    each socket state is protected by separate rwlock.
  */
 
-static inline unsigned unix_hash_fold(unsigned hash)
+static inline unsigned unix_hash_fold(__wsum n)
 {
+	unsigned hash = (__force unsigned)n;
 	hash ^= hash>>16;
 	hash ^= hash>>8;
 	return hash&(UNIX_HASH_SIZE-1);
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index 6f39faa..c205973 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -13,7 +13,7 @@
 * Due Credit:
 *               Wanpipe socket layer is based on Packet and 
 *               the X25 socket layers. The above sockets were 
-*               used for the specific use of Sangoma Technoloiges 
+*               used for the specific use of Sangoma Technologies 
 *               API programs. 
 *               Packet socket Authors: Ross Biro, Fred N. van Kempen and 
 *                                      Alan Cox.
@@ -23,7 +23,7 @@
 * Apr 25, 2000  Nenad Corbic     o Added the ability to send zero length packets.
 * Mar 13, 2000  Nenad Corbic	 o Added a tx buffer check via ioctl call.
 * Mar 06, 2000  Nenad Corbic     o Fixed the corrupt sock lcn problem.
-*                                  Server and client applicaton can run
+*                                  Server and client application can run
 *                                  simultaneously without conflicts.
 * Feb 29, 2000  Nenad Corbic     o Added support for PVC protocols, such as
 *                                  CHDLC, Frame Relay and HDLC API.
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 9479659..769cdd6 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -3,7 +3,7 @@
 *
 *		This module is completely hardware-independent and provides
 *		the following common services for the WAN Link Drivers:
-*		 o WAN device managenment (registering, unregistering)
+*		 o WAN device management (registering, unregistering)
 *		 o Network interface management
 *		 o Physical connection management (dial-up, incoming calls)
 *		 o Logical connection management (switched virtual circuits)
@@ -62,63 +62,6 @@
 
 #define KMEM_SAFETYZONE 8
 
-/***********FOR DEBUGGING PURPOSES*********************************************
-static void * dbg_kmalloc(unsigned int size, int prio, int line) {
-	int i = 0;
-	void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio);
-	char * c1 = v;
-	c1 += sizeof(unsigned int);
-	*((unsigned int *)v) = size;
-
-	for (i = 0; i < KMEM_SAFETYZONE; i++) {
-		c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D';
-		c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F';
-		c1 += 8;
-	}
-	c1 += size;
-	for (i = 0; i < KMEM_SAFETYZONE; i++) {
-		c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G';
-		c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L';
-		c1 += 8;
-	}
-	v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8;
-	printk(KERN_INFO "line %d  kmalloc(%d,%d) = %p\n",line,size,prio,v);
-	return v;
-}
-static void dbg_kfree(void * v, int line) {
-	unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8));
-	unsigned int size = *sp;
-	char * c1 = ((char *)v) - KMEM_SAFETYZONE*8;
-	int i = 0;
-	for (i = 0; i < KMEM_SAFETYZONE; i++) {
-		if (   c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D'
-		    || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') {
-			printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v);
-			printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
-			                c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
-		}
-		c1 += 8;
-	}
-	c1 += size;
-	for (i = 0; i < KMEM_SAFETYZONE; i++) {
-		if (   c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G'
-		    || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L'
-		   ) {
-			printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v);
-			printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
-			                c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
-		}
-		c1 += 8;
-	}
-	printk(KERN_INFO "line %d  kfree(%p)\n",line,v);
-	v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8);
-	kfree(v);
-}
-
-#define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__)
-#define kfree(x) dbg_kfree(x,__LINE__)
-*****************************************************************************/
-
 /*
  * 	Function Prototypes
  */
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 5a0dbeb..6b381fc03 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -119,6 +119,23 @@
 		.sadb_alg_maxbits = 160
 	}
 },
+{
+	.name = "xcbc(aes)",
+
+	.uinfo = {
+		.auth = {
+			.icv_truncbits = 96,
+			.icv_fullbits = 128,
+		}
+	},
+
+	.desc = {
+		.sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
+		.sadb_alg_ivlen = 0,
+		.sadb_alg_minbits = 128,
+		.sadb_alg_maxbits = 128
+	}
+},
 };
 
 static struct xfrm_algo_desc ealg_list[] = {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e8198a2..414f890 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -12,7 +12,7 @@
 #include <net/ip.h>
 #include <net/xfrm.h>
 
-static kmem_cache_t *secpath_cachep __read_mostly;
+static struct kmem_cache *secpath_cachep __read_mostly;
 
 void __secpath_destroy(struct sec_path *sp)
 {
@@ -27,7 +27,7 @@
 {
 	struct sec_path *sp;
 
-	sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC);
+	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
 	if (!sp)
 		return NULL;
 
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7736b23..bebd40e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -25,6 +25,7 @@
 #include <linux/cache.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
+#include <linux/audit.h>
 
 #include "xfrm_hash.h"
 
@@ -39,7 +40,7 @@
 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
 
-static kmem_cache_t *xfrm_dst_cache __read_mostly;
+static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
 static struct work_struct xfrm_policy_gc_work;
 static HLIST_HEAD(xfrm_policy_gc_list);
@@ -50,6 +51,40 @@
 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
 
+static inline int
+__xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+{
+	return  addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
+		addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
+		!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+		!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+		(fl->proto == sel->proto || !sel->proto) &&
+		(fl->oif == sel->ifindex || !sel->ifindex);
+}
+
+static inline int
+__xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+{
+	return  addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
+		addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
+		!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+		!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+		(fl->proto == sel->proto || !sel->proto) &&
+		(fl->oif == sel->ifindex || !sel->ifindex);
+}
+
+int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+		    unsigned short family)
+{
+	switch (family) {
+	case AF_INET:
+		return __xfrm4_selector_match(sel, fl);
+	case AF_INET6:
+		return __xfrm6_selector_match(sel, fl);
+	}
+	return 0;
+}
+
 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
 {
 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
@@ -358,7 +393,7 @@
 	xfrm_pol_put(policy);
 }
 
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
 {
 	struct xfrm_policy *policy;
 	struct hlist_node *entry, *tmp;
@@ -546,7 +581,7 @@
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
 	int dir, total;
 
@@ -563,7 +598,7 @@
 	mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 /* Generate new index... KAME seems to generate them ordered by cost
  * of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -770,7 +805,7 @@
 }
 EXPORT_SYMBOL(xfrm_policy_byid);
 
-void xfrm_policy_flush(u8 type)
+void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
 {
 	int dir;
 
@@ -790,6 +825,9 @@
 			hlist_del(&pol->byidx);
 			write_unlock_bh(&xfrm_policy_lock);
 
+			xfrm_audit_log(audit_info->loginuid, audit_info->secid,
+				       AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL);
+
 			xfrm_policy_kill(pol);
 			killed++;
 
@@ -808,6 +846,11 @@
 				hlist_del(&pol->byidx);
 				write_unlock_bh(&xfrm_policy_lock);
 
+				xfrm_audit_log(audit_info->loginuid,
+					       audit_info->secid,
+					       AUDIT_MAC_IPSEC_DELSPD, 1,
+					       pol, NULL);
+
 				xfrm_policy_kill(pol);
 				killed++;
 
@@ -826,33 +869,12 @@
 int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
 		     void *data)
 {
-	struct xfrm_policy *pol;
+	struct xfrm_policy *pol, *last = NULL;
 	struct hlist_node *entry;
-	int dir, count, error;
+	int dir, last_dir = 0, count, error;
 
 	read_lock_bh(&xfrm_policy_lock);
 	count = 0;
-	for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
-		struct hlist_head *table = xfrm_policy_bydst[dir].table;
-		int i;
-
-		hlist_for_each_entry(pol, entry,
-				     &xfrm_policy_inexact[dir], bydst) {
-			if (pol->type == type)
-				count++;
-		}
-		for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
-			hlist_for_each_entry(pol, entry, table + i, bydst) {
-				if (pol->type == type)
-					count++;
-			}
-		}
-	}
-
-	if (count == 0) {
-		error = -ENOENT;
-		goto out;
-	}
 
 	for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
 		struct hlist_head *table = xfrm_policy_bydst[dir].table;
@@ -862,21 +884,37 @@
 				     &xfrm_policy_inexact[dir], bydst) {
 			if (pol->type != type)
 				continue;
-			error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
-			if (error)
-				goto out;
+			if (last) {
+				error = func(last, last_dir % XFRM_POLICY_MAX,
+					     count, data);
+				if (error)
+					goto out;
+			}
+			last = pol;
+			last_dir = dir;
+			count++;
 		}
 		for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
 			hlist_for_each_entry(pol, entry, table + i, bydst) {
 				if (pol->type != type)
 					continue;
-				error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
-				if (error)
-					goto out;
+				if (last) {
+					error = func(last, last_dir % XFRM_POLICY_MAX,
+						     count, data);
+					if (error)
+						goto out;
+				}
+				last = pol;
+				last_dir = dir;
+				count++;
 			}
 		}
 	}
-	error = 0;
+	if (count == 0) {
+		error = -ENOENT;
+		goto out;
+	}
+	error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
 out:
 	read_unlock_bh(&xfrm_policy_lock);
 	return error;
@@ -1177,6 +1215,7 @@
 		if (tmpl->mode == XFRM_MODE_TUNNEL) {
 			remote = &tmpl->id.daddr;
 			local = &tmpl->saddr;
+			family = tmpl->encap_family;
 			if (xfrm_addr_any(local, family)) {
 				error = xfrm_get_saddr(&tmp, remote, family);
 				if (error)
@@ -1894,7 +1933,8 @@
 
 		if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
 			return 0;
-		if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol))
+		if (fl && pol &&
+		    !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
 			return 0;
 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
 			return 0;
@@ -1946,6 +1986,117 @@
 
 EXPORT_SYMBOL(xfrm_bundle_ok);
 
+#ifdef CONFIG_AUDITSYSCALL
+/* Audit addition and deletion of SAs and ipsec policy */
+
+void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
+		    struct xfrm_policy *xp, struct xfrm_state *x)
+{
+
+	char *secctx;
+	u32 secctx_len;
+	struct xfrm_sec_ctx *sctx = NULL;
+	struct audit_buffer *audit_buf;
+	int family;
+	extern int audit_enabled;
+
+	if (audit_enabled == 0)
+		return;
+
+	audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
+	if (audit_buf == NULL)
+	return;
+
+	switch(type) {
+	case AUDIT_MAC_IPSEC_ADDSA:
+		audit_log_format(audit_buf, "SAD add: auid=%u", auid);
+		break;
+	case AUDIT_MAC_IPSEC_DELSA:
+		audit_log_format(audit_buf, "SAD delete: auid=%u", auid);
+		break;
+	case AUDIT_MAC_IPSEC_ADDSPD:
+		audit_log_format(audit_buf, "SPD add: auid=%u", auid);
+		break;
+	case AUDIT_MAC_IPSEC_DELSPD:
+		audit_log_format(audit_buf, "SPD delete: auid=%u", auid);
+		break;
+	default:
+		return;
+	}
+
+	if (sid != 0 &&
+		security_secid_to_secctx(sid, &secctx, &secctx_len) == 0)
+		audit_log_format(audit_buf, " subj=%s", secctx);
+	else
+		audit_log_task_context(audit_buf);
+
+	if (xp) {
+		family = xp->selector.family;
+		if (xp->security)
+			sctx = xp->security;
+	} else {
+		family = x->props.family;
+		if (x->security)
+			sctx = x->security;
+	}
+
+	if (sctx)
+		audit_log_format(audit_buf,
+				" sec_alg=%u sec_doi=%u sec_obj=%s",
+				sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str);
+
+	switch(family) {
+	case AF_INET:
+		{
+			struct in_addr saddr, daddr;
+			if (xp) {
+				saddr.s_addr = xp->selector.saddr.a4;
+				daddr.s_addr = xp->selector.daddr.a4;
+			} else {
+				saddr.s_addr = x->props.saddr.a4;
+				daddr.s_addr = x->id.daddr.a4;
+			}
+			audit_log_format(audit_buf,
+					 " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
+					 NIPQUAD(saddr), NIPQUAD(daddr));
+		}
+			break;
+	case AF_INET6:
+		{
+			struct in6_addr saddr6, daddr6;
+			if (xp) {
+				memcpy(&saddr6, xp->selector.saddr.a6,
+					sizeof(struct in6_addr));
+				memcpy(&daddr6, xp->selector.daddr.a6,
+					sizeof(struct in6_addr));
+			} else {
+				memcpy(&saddr6, x->props.saddr.a6,
+					sizeof(struct in6_addr));
+				memcpy(&daddr6, x->id.daddr.a6,
+					sizeof(struct in6_addr));
+			}
+			audit_log_format(audit_buf,
+					 " src=" NIP6_FMT "dst=" NIP6_FMT,
+					 NIP6(saddr6), NIP6(daddr6));
+		}
+		break;
+	}
+
+	if (x)
+		audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s",
+				(unsigned long)ntohl(x->id.spi),
+				(unsigned long)ntohl(x->id.spi),
+				x->id.proto == IPPROTO_AH ? "AH" :
+				(x->id.proto == IPPROTO_ESP ?
+				"ESP" : "IPCOMP"));
+
+	audit_log_format(audit_buf, " res=%u", result);
+	audit_log_end(audit_buf);
+}
+
+EXPORT_SYMBOL(xfrm_audit_log);
+#endif /* CONFIG_AUDITSYSCALL */
+
 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
 {
 	int err = 0;
@@ -2080,7 +2231,7 @@
 			panic("XFRM: failed to allocate bydst hash\n");
 	}
 
-	INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+	INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
 	register_netdevice_notifier(&xfrm_dev_notifier);
 }
 
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 899de9e..fdb08d9 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/cache.h>
 #include <asm/uaccess.h>
+#include <linux/audit.h>
 
 #include "xfrm_hash.h"
 
@@ -115,7 +116,7 @@
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
 	struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
 	unsigned long nsize, osize;
@@ -168,7 +169,7 @@
 	mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
 EXPORT_SYMBOL(km_waitq);
@@ -207,7 +208,7 @@
 	kfree(x);
 }
 
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
 {
 	struct xfrm_state *x;
 	struct hlist_node *entry, *tmp;
@@ -238,6 +239,7 @@
 	unsigned long now = (unsigned long)xtime.tv_sec;
 	long next = LONG_MAX;
 	int warn = 0;
+	int err = 0;
 
 	spin_lock(&x->lock);
 	if (x->km.state == XFRM_STATE_DEAD)
@@ -295,9 +297,14 @@
 		next = 2;
 		goto resched;
 	}
-	if (!__xfrm_state_delete(x) && x->id.spi)
+
+	err = __xfrm_state_delete(x);
+	if (!err && x->id.spi)
 		km_state_expired(x, 1, 0);
 
+	xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+		       AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
+
 out:
 	spin_unlock(&x->lock);
 }
@@ -384,9 +391,10 @@
 }
 EXPORT_SYMBOL(xfrm_state_delete);
 
-void xfrm_state_flush(u8 proto)
+void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
 {
 	int i;
+	int err = 0;
 
 	spin_lock_bh(&xfrm_state_lock);
 	for (i = 0; i <= xfrm_state_hmask; i++) {
@@ -399,7 +407,11 @@
 				xfrm_state_hold(x);
 				spin_unlock_bh(&xfrm_state_lock);
 
-				xfrm_state_delete(x);
+				err = xfrm_state_delete(x);
+				xfrm_audit_log(audit_info->loginuid,
+					       audit_info->secid,
+					       AUDIT_MAC_IPSEC_DELSA,
+					       err ? 0 : 1, NULL, x);
 				xfrm_state_put(x);
 
 				spin_lock_bh(&xfrm_state_lock);
@@ -1099,7 +1111,7 @@
 		    void *data)
 {
 	int i;
-	struct xfrm_state *x;
+	struct xfrm_state *x, *last = NULL;
 	struct hlist_node *entry;
 	int count = 0;
 	int err = 0;
@@ -1107,24 +1119,22 @@
 	spin_lock_bh(&xfrm_state_lock);
 	for (i = 0; i <= xfrm_state_hmask; i++) {
 		hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
-			if (xfrm_id_proto_match(x->id.proto, proto))
-				count++;
+			if (!xfrm_id_proto_match(x->id.proto, proto))
+				continue;
+			if (last) {
+				err = func(last, count, data);
+				if (err)
+					goto out;
+			}
+			last = x;
+			count++;
 		}
 	}
 	if (count == 0) {
 		err = -ENOENT;
 		goto out;
 	}
-
-	for (i = 0; i <= xfrm_state_hmask; i++) {
-		hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
-			if (!xfrm_id_proto_match(x->id.proto, proto))
-				continue;
-			err = func(x, --count, data);
-			if (err)
-				goto out;
-		}
-	}
+	err = func(last, 0, data);
 out:
 	spin_unlock_bh(&xfrm_state_lock);
 	return err;
@@ -1304,7 +1314,7 @@
 }
 EXPORT_SYMBOL(km_query);
 
-int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
 {
 	int err = -EINVAL;
 	struct xfrm_mgr *km;
@@ -1568,6 +1578,6 @@
 		panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
 	xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
 
-	INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+	INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
 }
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 2ee14f8..e5372b1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -31,6 +31,7 @@
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <linux/in6.h>
 #endif
+#include <linux/audit.h>
 
 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
 {
@@ -244,11 +245,10 @@
 	*props = algo->desc.sadb_alg_id;
 
 	len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
-	p = kmalloc(len, GFP_KERNEL);
+	p = kmemdup(ualg, len, GFP_KERNEL);
 	if (!p)
 		return -ENOMEM;
 
-	memcpy(p, ualg, len);
 	strcpy(p->alg_name, algo->name);
 	*algpp = p;
 	return 0;
@@ -263,11 +263,10 @@
 		return 0;
 
 	uencap = RTA_DATA(rta);
-	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	p = kmemdup(uencap, sizeof(*p), GFP_KERNEL);
 	if (!p)
 		return -ENOMEM;
 
-	memcpy(p, uencap, sizeof(*p));
 	*encapp = p;
 	return 0;
 }
@@ -305,11 +304,10 @@
 		return 0;
 
 	uaddrp = RTA_DATA(rta);
-	p = kmalloc(sizeof(*p), GFP_KERNEL);
+	p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL);
 	if (!p)
 		return -ENOMEM;
 
-	memcpy(p, uaddrp, sizeof(*p));
 	*addrpp = p;
 	return 0;
 }
@@ -457,6 +455,9 @@
 	else
 		err = xfrm_state_update(x);
 
+	xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+		       AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
+
 	if (err < 0) {
 		x->km.state = XFRM_STATE_DEAD;
 		__xfrm_state_put(x);
@@ -526,6 +527,10 @@
 	}
 
 	err = xfrm_state_delete(x);
+
+	xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+		       AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
+
 	if (err < 0)
 		goto out;
 
@@ -653,7 +658,6 @@
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 
-	NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
 	info.in_skb = in_skb;
 	info.out_skb = skb;
 	info.nlmsg_seq = seq;
@@ -773,7 +777,7 @@
 	return err;
 }
 
-static int verify_policy_dir(__u8 dir)
+static int verify_policy_dir(u8 dir)
 {
 	switch (dir) {
 	case XFRM_POLICY_IN:
@@ -788,7 +792,7 @@
 	return 0;
 }
 
-static int verify_policy_type(__u8 type)
+static int verify_policy_type(u8 type)
 {
 	switch (type) {
 	case XFRM_POLICY_TYPE_MAIN:
@@ -875,22 +879,57 @@
 		t->aalgos = ut->aalgos;
 		t->ealgos = ut->ealgos;
 		t->calgos = ut->calgos;
+		t->encap_family = ut->family;
 	}
 }
 
+static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+{
+	int i;
+
+	if (nr > XFRM_MAX_DEPTH)
+		return -EINVAL;
+
+	for (i = 0; i < nr; i++) {
+		/* We never validated the ut->family value, so many
+		 * applications simply leave it at zero.  The check was
+		 * never made and ut->family was ignored because all
+		 * templates could be assumed to have the same family as
+		 * the policy itself.  Now that we will have ipv4-in-ipv6
+		 * and ipv6-in-ipv4 tunnels, this is no longer true.
+		 */
+		if (!ut[i].family)
+			ut[i].family = family;
+
+		switch (ut[i].family) {
+		case AF_INET:
+			break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+		case AF_INET6:
+			break;
+#endif
+		default:
+			return -EINVAL;
+		};
+	}
+
+	return 0;
+}
+
 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
 {
 	struct rtattr *rt = xfrma[XFRMA_TMPL-1];
-	struct xfrm_user_tmpl *utmpl;
-	int nr;
 
 	if (!rt) {
 		pol->xfrm_nr = 0;
 	} else {
-		nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
+		struct xfrm_user_tmpl *utmpl = RTA_DATA(rt);
+		int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
+		int err;
 
-		if (nr > XFRM_MAX_DEPTH)
-			return -EINVAL;
+		err = validate_tmpl(nr, utmpl, pol->family);
+		if (err)
+			return err;
 
 		copy_templates(pol, RTA_DATA(rt), nr);
 	}
@@ -901,7 +940,7 @@
 {
 	struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1];
 	struct xfrm_userpolicy_type *upt;
-	__u8 type = XFRM_POLICY_TYPE_MAIN;
+	u8 type = XFRM_POLICY_TYPE_MAIN;
 	int err;
 
 	if (rt) {
@@ -999,6 +1038,9 @@
 	 * a type XFRM_MSG_UPDPOLICY - JHS */
 	excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
 	err = xfrm_policy_insert(p->dir, xp, excl);
+	xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+		       AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
+
 	if (err) {
 		security_xfrm_policy_free(xp);
 		kfree(xp);
@@ -1028,7 +1070,7 @@
 		struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
 
 		memcpy(&up->id, &kp->id, sizeof(up->id));
-		up->family = xp->family;
+		up->family = kp->encap_family;
 		memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
 		up->reqid = kp->reqid;
 		up->mode = kp->mode;
@@ -1083,12 +1125,12 @@
 }
 
 #ifdef CONFIG_XFRM_SUB_POLICY
-static int copy_to_user_policy_type(struct xfrm_policy *xp, struct sk_buff *skb)
+static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
 {
 	struct xfrm_userpolicy_type upt;
 
 	memset(&upt, 0, sizeof(upt));
-	upt.type = xp->type;
+	upt.type = type;
 
 	RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
 
@@ -1099,7 +1141,7 @@
 }
 
 #else
-static inline int copy_to_user_policy_type(struct xfrm_policy *xp, struct sk_buff *skb)
+static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
 {
 	return 0;
 }
@@ -1128,7 +1170,7 @@
 		goto nlmsg_failure;
 	if (copy_to_user_sec_ctx(xp, skb))
 		goto nlmsg_failure;
-	if (copy_to_user_policy_type(xp, skb) < 0)
+	if (copy_to_user_policy_type(xp->type, skb) < 0)
 		goto nlmsg_failure;
 
 	nlh->nlmsg_len = skb->tail - b;
@@ -1171,7 +1213,6 @@
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 
-	NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
 	info.in_skb = in_skb;
 	info.out_skb = skb;
 	info.nlmsg_seq = seq;
@@ -1190,7 +1231,7 @@
 {
 	struct xfrm_policy *xp;
 	struct xfrm_userpolicy_id *p;
-	__u8 type = XFRM_POLICY_TYPE_MAIN;
+	u8 type = XFRM_POLICY_TYPE_MAIN;
 	int err;
 	struct km_event c;
 	int delete;
@@ -1227,6 +1268,10 @@
 		xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete);
 		security_xfrm_policy_free(&tmp);
 	}
+	if (delete)
+		xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+			       AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL);
+
 	if (xp == NULL)
 		return -ENOENT;
 
@@ -1261,8 +1306,11 @@
 {
 	struct km_event c;
 	struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
+	struct xfrm_audit audit_info;
 
-	xfrm_state_flush(p->proto);
+	audit_info.loginuid = NETLINK_CB(skb).loginuid;
+	audit_info.secid = NETLINK_CB(skb).sid;
+	xfrm_state_flush(p->proto, &audit_info);
 	c.data.proto = p->proto;
 	c.event = nlh->nlmsg_type;
 	c.seq = nlh->nlmsg_seq;
@@ -1284,10 +1332,12 @@
 	id = NLMSG_DATA(nlh);
 	nlh->nlmsg_flags = 0;
 
-	id->sa_id.daddr = x->id.daddr;
+	memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
 	id->sa_id.spi = x->id.spi;
 	id->sa_id.family = x->props.family;
 	id->sa_id.proto = x->id.proto;
+	memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
+	id->reqid = x->props.reqid;
 	id->flags = c->data.aevent;
 
 	RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
@@ -1408,14 +1458,17 @@
 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
 {
 	struct km_event c;
-	__u8 type = XFRM_POLICY_TYPE_MAIN;
+	u8 type = XFRM_POLICY_TYPE_MAIN;
 	int err;
+	struct xfrm_audit audit_info;
 
 	err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
 	if (err)
 		return err;
 
-	xfrm_policy_flush(type);
+	audit_info.loginuid = NETLINK_CB(skb).loginuid;
+	audit_info.secid = NETLINK_CB(skb).sid;
+	xfrm_policy_flush(type, &audit_info);
 	c.data.type = type;
 	c.event = nlh->nlmsg_type;
 	c.seq = nlh->nlmsg_seq;
@@ -1429,7 +1482,7 @@
 	struct xfrm_policy *xp;
 	struct xfrm_user_polexpire *up = NLMSG_DATA(nlh);
 	struct xfrm_userpolicy_info *p = &up->pol;
-	__u8 type = XFRM_POLICY_TYPE_MAIN;
+	u8 type = XFRM_POLICY_TYPE_MAIN;
 	int err = -ENOENT;
 
 	err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
@@ -1470,6 +1523,9 @@
 	err = 0;
 	if (up->hard) {
 		xfrm_policy_delete(xp, p->dir);
+		xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+				AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL);
+
 	} else {
 		// reset the timers here?
 		printk("Dont know what to do with soft policy expire\n");
@@ -1501,8 +1557,11 @@
 		goto out;
 	km_state_expired(x, ue->hard, current->pid);
 
-	if (ue->hard)
+	if (ue->hard) {
 		__xfrm_state_delete(x);
+		xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+			       AUDIT_MAC_IPSEC_DELSA, 1, NULL, x);
+	}
 out:
 	spin_unlock_bh(&x->lock);
 	xfrm_state_put(x);
@@ -1531,7 +1590,8 @@
 	}
 
 	/*   build an XP */
-	xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err);        if (!xp) {
+	xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err);
+	if (!xp) {
 		kfree(x);
 		return err;
 	}
@@ -1908,7 +1968,7 @@
 		goto nlmsg_failure;
 	if (copy_to_user_state_sec_ctx(x, skb))
 		goto nlmsg_failure;
-	if (copy_to_user_policy_type(xp, skb) < 0)
+	if (copy_to_user_policy_type(xp->type, skb) < 0)
 		goto nlmsg_failure;
 
 	nlh->nlmsg_len = skb->tail - b;
@@ -1980,7 +2040,7 @@
 		return NULL;
 
 	nr = ((len - sizeof(*p)) / sizeof(*ut));
-	if (nr > XFRM_MAX_DEPTH)
+	if (validate_tmpl(nr, ut, p->sel.family))
 		return NULL;
 
 	if (p->dir > XFRM_POLICY_OUT)
@@ -2018,7 +2078,7 @@
 		goto nlmsg_failure;
 	if (copy_to_user_sec_ctx(xp, skb))
 		goto nlmsg_failure;
-	if (copy_to_user_policy_type(xp, skb) < 0)
+	if (copy_to_user_policy_type(xp->type, skb) < 0)
 		goto nlmsg_failure;
 	upe->hard = !!hard;
 
@@ -2097,7 +2157,7 @@
 	copy_to_user_policy(xp, p, dir);
 	if (copy_to_user_tmpl(xp, skb) < 0)
 		goto nlmsg_failure;
-	if (copy_to_user_policy_type(xp, skb) < 0)
+	if (copy_to_user_policy_type(xp->type, skb) < 0)
 		goto nlmsg_failure;
 
 	nlh->nlmsg_len = skb->tail - b;
@@ -2118,7 +2178,6 @@
 	unsigned char *b;
 	int len = 0;
 #ifdef CONFIG_XFRM_SUB_POLICY
-	struct xfrm_userpolicy_type upt;
 	len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
 #endif
 	len += NLMSG_LENGTH(0);
@@ -2131,12 +2190,8 @@
 
 	nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0);
 	nlh->nlmsg_flags = 0;
-
-#ifdef CONFIG_XFRM_SUB_POLICY
-	memset(&upt, 0, sizeof(upt));
-	upt.type = c->data.type;
-	RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
-#endif
+	if (copy_to_user_policy_type(c->data.type, skb) < 0)
+		goto nlmsg_failure;
 
 	nlh->nlmsg_len = skb->tail - b;
 
@@ -2144,9 +2199,6 @@
 	return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
 
 nlmsg_failure:
-#ifdef CONFIG_XFRM_SUB_POLICY
-rtattr_failure:
-#endif
 	kfree_skb(skb);
 	return -1;
 }
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 22d281c..f359b73 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -43,7 +43,7 @@
 
 static struct sym_entry *table;
 static unsigned int table_size, table_cnt;
-static unsigned long long _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext;
+static unsigned long long _text, _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext;
 static int all_symbols = 0;
 static char symbol_prefix_char = '\0';
 
@@ -91,7 +91,9 @@
 		sym++;
 
 	/* Ignore most absolute/undefined (?) symbols. */
-	if (strcmp(sym, "_stext") == 0)
+	if (strcmp(sym, "_text") == 0)
+		_text = s->addr;
+	else if (strcmp(sym, "_stext") == 0)
 		_stext = s->addr;
 	else if (strcmp(sym, "_etext") == 0)
 		_etext = s->addr;
@@ -265,9 +267,25 @@
 
 	printf(".data\n");
 
+	/* Provide proper symbols relocatability by their '_text'
+	 * relativeness.  The symbol names cannot be used to construct
+	 * normal symbol references as the list of symbols contains
+	 * symbols that are declared static and are private to their
+	 * .o files.  This prevents .tmp_kallsyms.o or any other
+	 * object from referencing them.
+	 */
 	output_label("kallsyms_addresses");
 	for (i = 0; i < table_cnt; i++) {
-		printf("\tPTR\t%#llx\n", table[i].addr);
+		if (toupper(table[i].sym[0]) != 'A') {
+			if (_text <= table[i].addr)
+				printf("\tPTR\t_text + %#llx\n",
+					table[i].addr - _text);
+			else
+				printf("\tPTR\t_text - %#llx\n",
+					_text - table[i].addr);
+		} else {
+			printf("\tPTR\t%#llx\n", table[i].addr);
+		}
 	}
 	printf("\n");
 
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 338bdea..f5628c5 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -798,7 +798,7 @@
 			QAction *action;
 
 			headerPopup = new QPopupMenu(this);
-			action = new QAction("Show Name", 0, this);
+			action = new QAction(NULL, "Show Name", 0, this);
 			  action->setToggleAction(TRUE);
 			  connect(action, SIGNAL(toggled(bool)),
 				  parent(), SLOT(setShowName(bool)));
@@ -806,7 +806,7 @@
 				  action, SLOT(setOn(bool)));
 			  action->setOn(showName);
 			  action->addTo(headerPopup);
-			action = new QAction("Show Range", 0, this);
+			action = new QAction(NULL, "Show Range", 0, this);
 			  action->setToggleAction(TRUE);
 			  connect(action, SIGNAL(toggled(bool)),
 				  parent(), SLOT(setShowRange(bool)));
@@ -814,7 +814,7 @@
 				  action, SLOT(setOn(bool)));
 			  action->setOn(showRange);
 			  action->addTo(headerPopup);
-			action = new QAction("Show Data", 0, this);
+			action = new QAction(NULL, "Show Data", 0, this);
 			  action->setToggleAction(TRUE);
 			  connect(action, SIGNAL(toggled(bool)),
 				  parent(), SLOT(setShowData(bool)));
@@ -1161,7 +1161,7 @@
 QPopupMenu* ConfigInfoView::createPopupMenu(const QPoint& pos)
 {
 	QPopupMenu* popup = Parent::createPopupMenu(pos);
-	QAction* action = new QAction("Show Debug Info", 0, popup);
+	QAction* action = new QAction(NULL,"Show Debug Info", 0, popup);
 	  action->setToggleAction(TRUE);
 	  connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
 	  connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 187f5de..df3b272 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1430,7 +1430,7 @@
 	    # corresponding data structures "correctly". Catch it later in
 	    # output_* subs.
 	    push_parameter($arg, "", $file);
-	} elsif ($arg =~ m/\(/) {
+	} elsif ($arg =~ m/\(.*\*/) {
 	    # pointer-to-function
 	    $arg =~ tr/#/,/;
 	    $arg =~ m/[^\(]+\(\*([^\)]+)\)/;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 2e11416..ac0a582 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -911,6 +911,7 @@
 		".toc1",  /* used by ppc64 */
 		".stab",
 		".rodata",
+		".parainstructions",
 		".text.lock",
 		"__bug_table", /* used by powerpc for BUG() */
 		".pci_fixup_header",
@@ -931,6 +932,7 @@
 		".altinstructions",
 		".eh_frame",
 		".debug",
+		".parainstructions",
 		NULL
 	};
 	/* part of section name */
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 84999f6..72876df 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -48,6 +48,8 @@
 xfs_db -V 2>&1 | grep version | awk \
 'NR==1{print "xfsprogs              ", $3}'
 
+pccardctl -V 2>&1| grep pcmciautils | awk '{print "pcmciautils           ", $2}'
+
 cardmgr -V 2>&1| grep version | awk \
 'NR==1{print "pcmcia-cs             ", $3}'
 
@@ -87,10 +89,16 @@
 loadkeys -V 2>&1 | awk \
 '(NR==1 && ($2 ~ /console-tools/)) {print "Console-tools         ", $3}'
 
+oprofiled --version 2>&1 | awk \
+'(NR==1 && ($2 == "oprofile")) {print "oprofile              ", $3}'
+
 expr --v 2>&1 | awk 'NR==1{print "Sh-utils              ", $NF}'
 
 udevinfo -V 2>&1 | grep version | awk '{print "udev                  ", $3}'
 
+iwconfig --version 2>&1 | awk \
+'(NR==1 && ($3 == "version")) {print "wireless-tools        ",$4}'
+
 if [ -e /proc/modules ]; then
     X=`cat /proc/modules | sed -e "s/ .*$//"`
     echo "Modules Loaded         "$X
diff --git a/security/dummy.c b/security/dummy.c
index 43874c1..558795b 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -828,6 +828,11 @@
 {
 }
 
+static inline void dummy_inet_conn_established(struct sock *sk,
+			struct sk_buff *skb)
+{
+}
+
 static inline void dummy_req_classify_flow(const struct request_sock *req,
 			struct flowi *fl)
 {
@@ -836,7 +841,7 @@
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 static int dummy_xfrm_policy_alloc_security(struct xfrm_policy *xp,
-		struct xfrm_user_sec_ctx *sec_ctx, struct sock *sk)
+		struct xfrm_user_sec_ctx *sec_ctx)
 {
 	return 0;
 }
@@ -856,7 +861,7 @@
 }
 
 static int dummy_xfrm_state_alloc_security(struct xfrm_state *x,
-	struct xfrm_user_sec_ctx *sec_ctx, struct xfrm_sec_ctx *pol, u32 secid)
+	struct xfrm_user_sec_ctx *sec_ctx, u32 secid)
 {
 	return 0;
 }
@@ -881,12 +886,6 @@
 	return 1;
 }
 
-static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
-				struct xfrm_policy *xp)
-{
-	return 1;
-}
-
 static int dummy_xfrm_decode_session(struct sk_buff *skb, u32 *fl, int ckall)
 {
 	return 0;
@@ -1108,6 +1107,7 @@
 	set_to_dummy_if_null(ops, sock_graft);
 	set_to_dummy_if_null(ops, inet_conn_request);
 	set_to_dummy_if_null(ops, inet_csk_clone);
+	set_to_dummy_if_null(ops, inet_conn_established);
 	set_to_dummy_if_null(ops, req_classify_flow);
  #endif	/* CONFIG_SECURITY_NETWORK */
 #ifdef  CONFIG_SECURITY_NETWORK_XFRM
@@ -1120,7 +1120,6 @@
 	set_to_dummy_if_null(ops, xfrm_state_delete_security);
 	set_to_dummy_if_null(ops, xfrm_policy_lookup);
 	set_to_dummy_if_null(ops, xfrm_state_pol_flow_match);
-	set_to_dummy_if_null(ops, xfrm_flow_state_match);
 	set_to_dummy_if_null(ops, xfrm_decode_session);
 #endif	/* CONFIG_SECURITY_NETWORK_XFRM */
 #ifdef CONFIG_KEYS
diff --git a/security/keys/key.c b/security/keys/key.c
index 80de8c3..ac9326c 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -20,7 +20,7 @@
 #include <linux/err.h>
 #include "internal.h"
 
-static kmem_cache_t	*key_jar;
+static struct kmem_cache	*key_jar;
 struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
 DEFINE_SPINLOCK(key_serial_lock);
 
@@ -30,8 +30,8 @@
 static LIST_HEAD(key_types_list);
 static DECLARE_RWSEM(key_types_sem);
 
-static void key_cleanup(void *data);
-static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
+static void key_cleanup(struct work_struct *work);
+static DECLARE_WORK(key_cleanup_task, key_cleanup);
 
 /* we serialise key instantiation and link */
 DECLARE_RWSEM(key_construction_sem);
@@ -285,16 +285,14 @@
 	}
 
 	/* allocate and initialise the key and its description */
-	key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
+	key = kmem_cache_alloc(key_jar, GFP_KERNEL);
 	if (!key)
 		goto no_memory_2;
 
 	if (desc) {
-		key->description = kmalloc(desclen, GFP_KERNEL);
+		key->description = kmemdup(desc, desclen, GFP_KERNEL);
 		if (!key->description)
 			goto no_memory_3;
-
-		memcpy(key->description, desc, desclen);
 	}
 
 	atomic_set(&key->usage, 1);
@@ -552,7 +550,7 @@
  * do cleaning up in process context so that we don't have to disable
  * interrupts all over the place
  */
-static void key_cleanup(void *data)
+static void key_cleanup(struct work_struct *work)
 {
 	struct rb_node *_n;
 	struct key *key;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e8d02ac..ad45ce7 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -706,12 +706,10 @@
 				BUG_ON(size > PAGE_SIZE);
 
 				ret = -ENOMEM;
-				nklist = kmalloc(size, GFP_KERNEL);
+				nklist = kmemdup(klist, size, GFP_KERNEL);
 				if (!nklist)
 					goto error2;
 
-				memcpy(nklist, klist, size);
-
 				/* replace matched key */
 				atomic_inc(&key->usage);
 				nklist->keys[loop] = key;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 32150cf..b6f8680 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -27,7 +27,7 @@
 struct key_user root_key_user = {
 	.usage		= ATOMIC_INIT(3),
 	.consq		= LIST_HEAD_INIT(root_key_user.consq),
-	.lock		= SPIN_LOCK_UNLOCKED,
+	.lock		= __SPIN_LOCK_UNLOCKED(root_key_user.lock),
 	.nkeys		= ATOMIC_INIT(2),
 	.nikeys		= ATOMIC_INIT(2),
 	.uid		= 0,
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index a300702..e7c0b5e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -32,12 +32,7 @@
 #include "avc.h"
 #include "avc_ss.h"
 
-static const struct av_perm_to_string
-{
-  u16 tclass;
-  u32 value;
-  const char *name;
-} av_perm_to_string[] = {
+static const struct av_perm_to_string av_perm_to_string[] = {
 #define S_(c, v, s) { c, v, s },
 #include "av_perm_to_string.h"
 #undef S_
@@ -57,17 +52,21 @@
 #undef TE_
 #undef S_
 
-static const struct av_inherit
-{
-    u16 tclass;
-    const char **common_pts;
-    u32 common_base;
-} av_inherit[] = {
+static const struct av_inherit av_inherit[] = {
 #define S_(c, i, b) { c, common_##i##_perm_to_string, b },
 #include "av_inherit.h"
 #undef S_
 };
 
+const struct selinux_class_perm selinux_class_perm = {
+	av_perm_to_string,
+	ARRAY_SIZE(av_perm_to_string),
+	class_to_string,
+	ARRAY_SIZE(class_to_string),
+	av_inherit,
+	ARRAY_SIZE(av_inherit)
+};
+
 #define AVC_CACHE_SLOTS			512
 #define AVC_DEF_CACHE_THRESHOLD		512
 #define AVC_CACHE_RECLAIM		16
@@ -125,7 +124,7 @@
 
 static struct avc_cache avc_cache;
 static struct avc_callback_node *avc_callbacks;
-static kmem_cache_t *avc_node_cachep;
+static struct kmem_cache *avc_node_cachep;
 
 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
 {
@@ -333,7 +332,7 @@
 {
 	struct avc_node *node;
 
-	node = kmem_cache_alloc(avc_node_cachep, SLAB_ATOMIC);
+	node = kmem_cache_alloc(avc_node_cachep, GFP_ATOMIC);
 	if (!node)
 		goto out;
 
@@ -497,7 +496,7 @@
 		audit_log_format(ab, " %s=%d", name2, ntohs(port));
 }
 
-static inline void avc_print_ipv4_addr(struct audit_buffer *ab, u32 addr,
+static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
 				       __be16 port, char *name1, char *name2)
 {
 	if (addr)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 28ee187..44e9cd4 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -58,6 +58,7 @@
 #include <linux/netlink.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
+#include <linux/dccp.h>
 #include <linux/quota.h>
 #include <linux/un.h>		/* for Unix socket types */
 #include <net/af_unix.h>	/* for Unix socket types */
@@ -123,7 +124,7 @@
 static LIST_HEAD(superblock_security_head);
 static DEFINE_SPINLOCK(sb_security_lock);
 
-static kmem_cache_t *sel_inode_cache;
+static struct kmem_cache *sel_inode_cache;
 
 /* Return security context for a given sid or just the context 
    length if the buffer is null or length is 0 */
@@ -180,7 +181,7 @@
 	struct task_security_struct *tsec = current->security;
 	struct inode_security_struct *isec;
 
-	isec = kmem_cache_alloc(sel_inode_cache, SLAB_KERNEL);
+	isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL);
 	if (!isec)
 		return -ENOMEM;
 
@@ -751,6 +752,8 @@
 				return SECCLASS_UDP_SOCKET;
 			else
 				return SECCLASS_RAWIP_SOCKET;
+		case SOCK_DCCP:
+			return SECCLASS_DCCP_SOCKET;
 		default:
 			return SECCLASS_RAWIP_SOCKET;
 		}
@@ -2889,7 +2892,8 @@
 }
 
 /* Returns error only if unable to parse addresses */
-static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad)
+static int selinux_parse_skb_ipv4(struct sk_buff *skb,
+			struct avc_audit_data *ad, u8 *proto)
 {
 	int offset, ihlen, ret = -EINVAL;
 	struct iphdr _iph, *ih;
@@ -2907,6 +2911,9 @@
 	ad->u.net.v4info.daddr = ih->daddr;
 	ret = 0;
 
+	if (proto)
+		*proto = ih->protocol;
+
 	switch (ih->protocol) {
         case IPPROTO_TCP: {
         	struct tcphdr _tcph, *th;
@@ -2940,6 +2947,22 @@
         	break;
         }
 
+	case IPPROTO_DCCP: {
+		struct dccp_hdr _dccph, *dh;
+
+		if (ntohs(ih->frag_off) & IP_OFFSET)
+			break;
+
+		offset += ihlen;
+		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+		if (dh == NULL)
+			break;
+
+		ad->u.net.sport = dh->dccph_sport;
+		ad->u.net.dport = dh->dccph_dport;
+		break;
+        }
+
         default:
         	break;
         }
@@ -2950,7 +2973,8 @@
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 
 /* Returns error only if unable to parse addresses */
-static int selinux_parse_skb_ipv6(struct sk_buff *skb, struct avc_audit_data *ad)
+static int selinux_parse_skb_ipv6(struct sk_buff *skb,
+			struct avc_audit_data *ad, u8 *proto)
 {
 	u8 nexthdr;
 	int ret = -EINVAL, offset;
@@ -2971,6 +2995,9 @@
 	if (offset < 0)
 		goto out;
 
+	if (proto)
+		*proto = nexthdr;
+
 	switch (nexthdr) {
 	case IPPROTO_TCP: {
         	struct tcphdr _tcph, *th;
@@ -2996,6 +3023,18 @@
 		break;
 	}
 
+	case IPPROTO_DCCP: {
+		struct dccp_hdr _dccph, *dh;
+
+		dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+		if (dh == NULL)
+			break;
+
+		ad->u.net.sport = dh->dccph_sport;
+		ad->u.net.dport = dh->dccph_dport;
+		break;
+        }
+
 	/* includes fragments */
 	default:
 		break;
@@ -3007,13 +3046,13 @@
 #endif /* IPV6 */
 
 static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
-			     char **addrp, int *len, int src)
+			     char **addrp, int *len, int src, u8 *proto)
 {
 	int ret = 0;
 
 	switch (ad->u.net.family) {
 	case PF_INET:
-		ret = selinux_parse_skb_ipv4(skb, ad);
+		ret = selinux_parse_skb_ipv4(skb, ad, proto);
 		if (ret || !addrp)
 			break;
 		*len = 4;
@@ -3023,7 +3062,7 @@
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 	case PF_INET6:
-		ret = selinux_parse_skb_ipv6(skb, ad);
+		ret = selinux_parse_skb_ipv6(skb, ad, proto);
 		if (ret || !addrp)
 			break;
 		*len = 16;
@@ -3101,9 +3140,7 @@
 	if (sock->sk) {
 		sksec = sock->sk->sk_security;
 		sksec->sid = isec->sid;
-		err = selinux_netlbl_socket_post_create(sock,
-							family,
-							isec->sid);
+		err = selinux_netlbl_socket_post_create(sock);
 	}
 
 	return err;
@@ -3180,7 +3217,11 @@
 		case SECCLASS_UDP_SOCKET:
 			node_perm = UDP_SOCKET__NODE_BIND;
 			break;
-			
+
+		case SECCLASS_DCCP_SOCKET:
+			node_perm = DCCP_SOCKET__NODE_BIND;
+			break;
+
 		default:
 			node_perm = RAWIP_SOCKET__NODE_BIND;
 			break;
@@ -3218,16 +3259,17 @@
 		return err;
 
 	/*
-	 * If a TCP socket, check name_connect permission for the port.
+	 * If a TCP or DCCP socket, check name_connect permission for the port.
 	 */
 	isec = SOCK_INODE(sock)->i_security;
-	if (isec->sclass == SECCLASS_TCP_SOCKET) {
+	if (isec->sclass == SECCLASS_TCP_SOCKET ||
+	    isec->sclass == SECCLASS_DCCP_SOCKET) {
 		struct sock *sk = sock->sk;
 		struct avc_audit_data ad;
 		struct sockaddr_in *addr4 = NULL;
 		struct sockaddr_in6 *addr6 = NULL;
 		unsigned short snum;
-		u32 sid;
+		u32 sid, perm;
 
 		if (sk->sk_family == PF_INET) {
 			addr4 = (struct sockaddr_in *)address;
@@ -3246,11 +3288,13 @@
 		if (err)
 			goto out;
 
+		perm = (isec->sclass == SECCLASS_TCP_SOCKET) ?
+		       TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
+
 		AVC_AUDIT_DATA_INIT(&ad,NET);
 		ad.u.net.dport = htons(snum);
 		ad.u.net.family = sk->sk_family;
-		err = avc_has_perm(isec->sid, sid, isec->sclass,
-				   TCP_SOCKET__NAME_CONNECT, &ad);
+		err = avc_has_perm(isec->sid, sid, isec->sclass, perm, &ad);
 		if (err)
 			goto out;
 	}
@@ -3438,7 +3482,13 @@
 		node_perm = NODE__TCP_RECV;
 		recv_perm = TCP_SOCKET__RECV_MSG;
 		break;
-	
+
+	case SECCLASS_DCCP_SOCKET:
+		netif_perm = NETIF__DCCP_RECV;
+		node_perm = NODE__DCCP_RECV;
+		recv_perm = DCCP_SOCKET__RECV_MSG;
+		break;
+
 	default:
 		netif_perm = NETIF__RAWIP_RECV;
 		node_perm = NODE__RAWIP_RECV;
@@ -3487,14 +3537,14 @@
 		goto out;
 
 	/* Handle mapped IPv4 packets arriving via IPv6 sockets */
-	if (family == PF_INET6 && skb->protocol == ntohs(ETH_P_IP))
+	if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
 		family = PF_INET;
 
 	AVC_AUDIT_DATA_INIT(&ad, NET);
 	ad.u.net.netif = skb->dev ? skb->dev->name : "[unknown]";
 	ad.u.net.family = family;
 
-	err = selinux_parse_skb(skb, &ad, &addrp, &len, 1);
+	err = selinux_parse_skb(skb, &ad, &addrp, &len, 1, NULL);
 	if (err)
 		goto out;
 
@@ -3524,25 +3574,16 @@
 	u32 scontext_len;
 	struct sk_security_struct *ssec;
 	struct inode_security_struct *isec;
-	u32 peer_sid = 0;
+	u32 peer_sid = SECSID_NULL;
 
 	isec = SOCK_INODE(sock)->i_security;
 
-	/* if UNIX_STREAM check peer_sid, if TCP check dst for labelled sa */
-	if (isec->sclass == SECCLASS_UNIX_STREAM_SOCKET) {
+	if (isec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
+	    isec->sclass == SECCLASS_TCP_SOCKET) {
 		ssec = sock->sk->sk_security;
 		peer_sid = ssec->peer_sid;
 	}
-	else if (isec->sclass == SECCLASS_TCP_SOCKET) {
-		peer_sid = selinux_netlbl_socket_getpeersec_stream(sock);
-		if (peer_sid == SECSID_NULL)
-			peer_sid = selinux_socket_getpeer_stream(sock->sk);
-		if (peer_sid == SECSID_NULL) {
-			err = -ENOPROTOOPT;
-			goto out;
-		}
-	}
-	else {
+	if (peer_sid == SECSID_NULL) {
 		err = -ENOPROTOOPT;
 		goto out;
 	}
@@ -3574,13 +3615,12 @@
 	u32 peer_secid = SECSID_NULL;
 	int err = 0;
 
-	if (sock && (sock->sk->sk_family == PF_UNIX))
+	if (sock && sock->sk->sk_family == PF_UNIX)
 		selinux_get_inode_sid(SOCK_INODE(sock), &peer_secid);
-	else if (skb) {
-		peer_secid = selinux_netlbl_socket_getpeersec_dgram(skb);
-		if (peer_secid == SECSID_NULL)
-			peer_secid = selinux_socket_getpeer_dgram(skb);
-	}
+	else if (skb)
+		security_skb_extlbl_sid(skb,
+					SECINITSID_UNLABELED,
+					&peer_secid);
 
 	if (peer_secid == SECSID_NULL)
 		err = -EINVAL;
@@ -3607,7 +3647,7 @@
 	newssec->sid = ssec->sid;
 	newssec->peer_sid = ssec->peer_sid;
 
-	selinux_netlbl_sk_clone_security(ssec, newssec);
+	selinux_netlbl_sk_security_clone(ssec, newssec);
 }
 
 static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
@@ -3641,17 +3681,10 @@
 	u32 newsid;
 	u32 peersid;
 
-	newsid = selinux_netlbl_inet_conn_request(skb, sksec->sid);
-	if (newsid != SECSID_NULL) {
-		req->secid = newsid;
-		return 0;
-	}
-
-	err = selinux_xfrm_decode_session(skb, &peersid, 0);
-	BUG_ON(err);
-
+	security_skb_extlbl_sid(skb, SECINITSID_UNLABELED, &peersid);
 	if (peersid == SECSID_NULL) {
 		req->secid = sksec->sid;
+		req->peer_secid = SECSID_NULL;
 		return 0;
 	}
 
@@ -3660,6 +3693,7 @@
 		return err;
 
 	req->secid = newsid;
+	req->peer_secid = peersid;
 	return 0;
 }
 
@@ -3669,12 +3703,23 @@
 	struct sk_security_struct *newsksec = newsk->sk_security;
 
 	newsksec->sid = req->secid;
+	newsksec->peer_sid = req->peer_secid;
 	/* NOTE: Ideally, we should also get the isec->sid for the
 	   new socket in sync, but we don't have the isec available yet.
 	   So we will wait until sock_graft to do it, by which
 	   time it will have been created and available. */
 
-	selinux_netlbl_sk_security_init(newsksec, req->rsk_ops->family);
+	/* We don't need to take any sort of lock here as we are the only
+	 * thread with access to newsksec */
+	selinux_netlbl_sk_security_reset(newsksec, req->rsk_ops->family);
+}
+
+static void selinux_inet_conn_established(struct sock *sk,
+				struct sk_buff *skb)
+{
+	struct sk_security_struct *sksec = sk->sk_security;
+
+	security_skb_extlbl_sid(skb, SECINITSID_UNLABELED, &sksec->peer_sid);
 }
 
 static void selinux_req_classify_flow(const struct request_sock *req,
@@ -3757,7 +3802,13 @@
 		node_perm = NODE__TCP_SEND;
 		send_perm = TCP_SOCKET__SEND_MSG;
 		break;
-	
+
+	case SECCLASS_DCCP_SOCKET:
+		netif_perm = NETIF__DCCP_SEND;
+		node_perm = NODE__DCCP_SEND;
+		send_perm = DCCP_SOCKET__SEND_MSG;
+		break;
+
 	default:
 		netif_perm = NETIF__RAWIP_SEND;
 		node_perm = NODE__RAWIP_SEND;
@@ -3808,6 +3859,7 @@
 	struct avc_audit_data ad;
 	struct net_device *dev = (struct net_device *)out;
 	struct sk_security_struct *sksec;
+	u8 proto;
 
 	sk = skb->sk;
 	if (!sk)
@@ -3819,7 +3871,7 @@
 	ad.u.net.netif = dev->name;
 	ad.u.net.family = family;
 
-	err = selinux_parse_skb(skb, &ad, &addrp, &len, 0);
+	err = selinux_parse_skb(skb, &ad, &addrp, &len, 0, &proto);
 	if (err)
 		goto out;
 
@@ -3833,7 +3885,7 @@
 	if (err)
 		goto out;
 
-	err = selinux_xfrm_postroute_last(sksec->sid, skb, &ad);
+	err = selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto);
 out:
 	return err ? NF_DROP : NF_ACCEPT;
 }
@@ -4739,6 +4791,7 @@
 	.sock_graft =			selinux_sock_graft,
 	.inet_conn_request =		selinux_inet_conn_request,
 	.inet_csk_clone =		selinux_inet_csk_clone,
+	.inet_conn_established =	selinux_inet_conn_established,
 	.req_classify_flow =		selinux_req_classify_flow,
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -4751,7 +4804,6 @@
 	.xfrm_state_delete_security =	selinux_xfrm_state_delete,
 	.xfrm_policy_lookup = 		selinux_xfrm_policy_lookup,
 	.xfrm_state_pol_flow_match =	selinux_xfrm_state_pol_flow_match,
-	.xfrm_flow_state_match =	selinux_xfrm_flow_state_match,
 	.xfrm_decode_session =		selinux_xfrm_decode_session,
 #endif
 
diff --git a/security/selinux/include/av_inherit.h b/security/selinux/include/av_inherit.h
index a68fdd5..8377a4b 100644
--- a/security/selinux/include/av_inherit.h
+++ b/security/selinux/include/av_inherit.h
@@ -30,3 +30,4 @@
    S_(SECCLASS_NETLINK_DNRT_SOCKET, socket, 0x00400000UL)
    S_(SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET, socket, 0x00400000UL)
    S_(SECCLASS_APPLETALK_SOCKET, socket, 0x00400000UL)
+   S_(SECCLASS_DCCP_SOCKET, socket, 0x00400000UL)
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index 09fc8a2..ad9fb2d 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -35,12 +35,16 @@
    S_(SECCLASS_NODE, NODE__RAWIP_RECV, "rawip_recv")
    S_(SECCLASS_NODE, NODE__RAWIP_SEND, "rawip_send")
    S_(SECCLASS_NODE, NODE__ENFORCE_DEST, "enforce_dest")
+   S_(SECCLASS_NODE, NODE__DCCP_RECV, "dccp_recv")
+   S_(SECCLASS_NODE, NODE__DCCP_SEND, "dccp_send")
    S_(SECCLASS_NETIF, NETIF__TCP_RECV, "tcp_recv")
    S_(SECCLASS_NETIF, NETIF__TCP_SEND, "tcp_send")
    S_(SECCLASS_NETIF, NETIF__UDP_RECV, "udp_recv")
    S_(SECCLASS_NETIF, NETIF__UDP_SEND, "udp_send")
    S_(SECCLASS_NETIF, NETIF__RAWIP_RECV, "rawip_recv")
    S_(SECCLASS_NETIF, NETIF__RAWIP_SEND, "rawip_send")
+   S_(SECCLASS_NETIF, NETIF__DCCP_RECV, "dccp_recv")
+   S_(SECCLASS_NETIF, NETIF__DCCP_SEND, "dccp_send")
    S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__CONNECTTO, "connectto")
    S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__NEWCONN, "newconn")
    S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__ACCEPTFROM, "acceptfrom")
@@ -252,3 +256,7 @@
    S_(SECCLASS_KEY, KEY__LINK, "link")
    S_(SECCLASS_KEY, KEY__SETATTR, "setattr")
    S_(SECCLASS_KEY, KEY__CREATE, "create")
+   S_(SECCLASS_CONTEXT, CONTEXT__TRANSLATE, "translate")
+   S_(SECCLASS_CONTEXT, CONTEXT__CONTAINS, "contains")
+   S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NODE_BIND, "node_bind")
+   S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NAME_CONNECT, "name_connect")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index 81f4f52..2de4b5f 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -312,6 +312,8 @@
 #define NODE__RAWIP_RECV                          0x00000010UL
 #define NODE__RAWIP_SEND                          0x00000020UL
 #define NODE__ENFORCE_DEST                        0x00000040UL
+#define NODE__DCCP_RECV                           0x00000080UL
+#define NODE__DCCP_SEND                           0x00000100UL
 
 #define NETIF__TCP_RECV                           0x00000001UL
 #define NETIF__TCP_SEND                           0x00000002UL
@@ -319,6 +321,8 @@
 #define NETIF__UDP_SEND                           0x00000008UL
 #define NETIF__RAWIP_RECV                         0x00000010UL
 #define NETIF__RAWIP_SEND                         0x00000020UL
+#define NETIF__DCCP_RECV                          0x00000040UL
+#define NETIF__DCCP_SEND                          0x00000080UL
 
 #define NETLINK_SOCKET__IOCTL                     0x00000001UL
 #define NETLINK_SOCKET__READ                      0x00000002UL
@@ -970,3 +974,31 @@
 #define KEY__LINK                                 0x00000010UL
 #define KEY__SETATTR                              0x00000020UL
 #define KEY__CREATE                               0x00000040UL
+
+#define CONTEXT__TRANSLATE                        0x00000001UL
+#define CONTEXT__CONTAINS                         0x00000002UL
+
+#define DCCP_SOCKET__IOCTL                        0x00000001UL
+#define DCCP_SOCKET__READ                         0x00000002UL
+#define DCCP_SOCKET__WRITE                        0x00000004UL
+#define DCCP_SOCKET__CREATE                       0x00000008UL
+#define DCCP_SOCKET__GETATTR                      0x00000010UL
+#define DCCP_SOCKET__SETATTR                      0x00000020UL
+#define DCCP_SOCKET__LOCK                         0x00000040UL
+#define DCCP_SOCKET__RELABELFROM                  0x00000080UL
+#define DCCP_SOCKET__RELABELTO                    0x00000100UL
+#define DCCP_SOCKET__APPEND                       0x00000200UL
+#define DCCP_SOCKET__BIND                         0x00000400UL
+#define DCCP_SOCKET__CONNECT                      0x00000800UL
+#define DCCP_SOCKET__LISTEN                       0x00001000UL
+#define DCCP_SOCKET__ACCEPT                       0x00002000UL
+#define DCCP_SOCKET__GETOPT                       0x00004000UL
+#define DCCP_SOCKET__SETOPT                       0x00008000UL
+#define DCCP_SOCKET__SHUTDOWN                     0x00010000UL
+#define DCCP_SOCKET__RECVFROM                     0x00020000UL
+#define DCCP_SOCKET__SENDTO                       0x00040000UL
+#define DCCP_SOCKET__RECV_MSG                     0x00080000UL
+#define DCCP_SOCKET__SEND_MSG                     0x00100000UL
+#define DCCP_SOCKET__NAME_BIND                    0x00200000UL
+#define DCCP_SOCKET__NODE_BIND                    0x00400000UL
+#define DCCP_SOCKET__NAME_CONNECT                 0x00800000UL
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 960ef18..6ed10c3 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -54,12 +54,12 @@
 			char *netif;
 			struct sock *sk;
 			u16 family;
-			u16 dport;
-			u16 sport;
+			__be16 dport;
+			__be16 sport;
 			union {
 				struct {
-					u32 daddr;
-					u32 saddr;
+					__be32 daddr;
+					__be32 saddr;
 				} v4;
 				struct {
 					struct in6_addr daddr;
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index 450a283..ff869e8 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -10,5 +10,29 @@
 
 int avc_ss_reset(u32 seqno);
 
+struct av_perm_to_string
+{
+	u16 tclass;
+	u32 value;
+	const char *name;
+};
+
+struct av_inherit
+{
+	u16 tclass;
+	const char **common_pts;
+	u32 common_base;
+};
+
+struct selinux_class_perm
+{
+	const struct av_perm_to_string *av_perm_to_string;
+	u32 av_pts_len;
+	const char **class_to_string;
+	u32 cts_len;
+	const struct av_inherit *av_inherit;
+	u32 av_inherit_len;
+};
+
 #endif /* _SELINUX_AVC_SS_H_ */
 
diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h
index 24303b6..9f3ebb1 100644
--- a/security/selinux/include/class_to_string.h
+++ b/security/selinux/include/class_to_string.h
@@ -61,3 +61,5 @@
     S_("appletalk_socket")
     S_("packet")
     S_("key")
+    S_("context")
+    S_("dccp_socket")
diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h
index 95887ae..67cef37 100644
--- a/security/selinux/include/flask.h
+++ b/security/selinux/include/flask.h
@@ -63,6 +63,8 @@
 #define SECCLASS_APPLETALK_SOCKET                        56
 #define SECCLASS_PACKET                                  57
 #define SECCLASS_KEY                                     58
+#define SECCLASS_CONTEXT                                 59
+#define SECCLASS_DCCP_SOCKET                             60
 
 /*
  * Security identifier indices for initial entities
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index ef2267f..91b88f0 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -23,6 +23,7 @@
 #include <linux/fs.h>
 #include <linux/binfmts.h>
 #include <linux/in.h>
+#include <linux/spinlock.h>
 #include "flask.h"
 #include "avc.h"
 
@@ -108,6 +109,7 @@
 		NLBL_REQUIRE,
 		NLBL_LABELED,
 	} nlbl_state;
+	spinlock_t nlbl_lock;		/* protects nlbl_state */
 #endif
 };
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 1ef7917..210eec7 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -34,6 +34,8 @@
 #define POLICYDB_VERSION_MAX	POLICYDB_VERSION_RANGETRANS
 #endif
 
+struct sk_buff;
+
 extern int selinux_enabled;
 extern int selinux_mls_enabled;
 
@@ -80,6 +82,8 @@
 int security_node_sid(u16 domain, void *addr, u32 addrlen,
 	u32 *out_sid);
 
+void security_skb_extlbl_sid(struct sk_buff *skb, u32 base_sid, u32 *sid);
+
 int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
                                  u16 tclass);
 
diff --git a/security/selinux/include/selinux_netlabel.h b/security/selinux/include/selinux_netlabel.h
index 9de10cc..2a732c9 100644
--- a/security/selinux/include/selinux_netlabel.h
+++ b/security/selinux/include/selinux_netlabel.h
@@ -38,19 +38,17 @@
 
 #ifdef CONFIG_NETLABEL
 void selinux_netlbl_cache_invalidate(void);
-int selinux_netlbl_socket_post_create(struct socket *sock,
-				      int sock_family,
-				      u32 sid);
+int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid);
+int selinux_netlbl_socket_post_create(struct socket *sock);
 void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock);
-u32 selinux_netlbl_inet_conn_request(struct sk_buff *skb, u32 sock_sid);
 int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
 				struct sk_buff *skb,
 				struct avc_audit_data *ad);
-u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock);
-u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb);
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec,
+				      int family);
 void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
 				     int family);
-void selinux_netlbl_sk_clone_security(struct sk_security_struct *ssec,
+void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
 				      struct sk_security_struct *newssec);
 int selinux_netlbl_inode_permission(struct inode *inode, int mask);
 int selinux_netlbl_socket_setsockopt(struct socket *sock,
@@ -62,9 +60,15 @@
 	return;
 }
 
-static inline int selinux_netlbl_socket_post_create(struct socket *sock,
-						    int sock_family,
-						    u32 sid)
+static inline int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+					       u32 base_sid,
+					       u32 *sid)
+{
+	*sid = SECSID_NULL;
+	return 0;
+}
+
+static inline int selinux_netlbl_socket_post_create(struct socket *sock)
 {
 	return 0;
 }
@@ -75,12 +79,6 @@
 	return;
 }
 
-static inline u32 selinux_netlbl_inet_conn_request(struct sk_buff *skb,
-						   u32 sock_sid)
-{
-	return SECSID_NULL;
-}
-
 static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
 					      struct sk_buff *skb,
 					      struct avc_audit_data *ad)
@@ -88,14 +86,11 @@
 	return 0;
 }
 
-static inline u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock)
+static inline void selinux_netlbl_sk_security_reset(
+					       struct sk_security_struct *ssec,
+					       int family)
 {
-	return SECSID_NULL;
-}
-
-static inline u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb)
-{
-	return SECSID_NULL;
+	return;
 }
 
 static inline void selinux_netlbl_sk_security_init(
@@ -105,7 +100,7 @@
 	return;
 }
 
-static inline void selinux_netlbl_sk_clone_security(
+static inline void selinux_netlbl_sk_security_clone(
 	                                   struct sk_security_struct *ssec,
 					   struct sk_security_struct *newssec)
 {
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 526b280..161eb57 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -8,20 +8,17 @@
 #define _SELINUX_XFRM_H_
 
 int selinux_xfrm_policy_alloc(struct xfrm_policy *xp,
-		struct xfrm_user_sec_ctx *sec_ctx, struct sock *sk);
+		struct xfrm_user_sec_ctx *sec_ctx);
 int selinux_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new);
 void selinux_xfrm_policy_free(struct xfrm_policy *xp);
 int selinux_xfrm_policy_delete(struct xfrm_policy *xp);
 int selinux_xfrm_state_alloc(struct xfrm_state *x,
-	struct xfrm_user_sec_ctx *sec_ctx, struct xfrm_sec_ctx *pol, u32 secid);
+	struct xfrm_user_sec_ctx *sec_ctx, u32 secid);
 void selinux_xfrm_state_free(struct xfrm_state *x);
 int selinux_xfrm_state_delete(struct xfrm_state *x);
 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
 			struct xfrm_policy *xp, struct flowi *fl);
-int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
-			struct xfrm_policy *xp);
-
 
 /*
  * Extract the security blob from the sock (it's actually on the socket)
@@ -38,9 +35,7 @@
 int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
 			struct avc_audit_data *ad);
 int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
-			struct avc_audit_data *ad);
-u32 selinux_socket_getpeer_stream(struct sock *sk);
-u32 selinux_socket_getpeer_dgram(struct sk_buff *skb);
+			struct avc_audit_data *ad, u8 proto);
 int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
 #else
 static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
@@ -50,20 +45,11 @@
 }
 
 static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
-			struct avc_audit_data *ad)
+			struct avc_audit_data *ad, u8 proto)
 {
 	return 0;
 }
 
-static inline int selinux_socket_getpeer_stream(struct sock *sk)
-{
-	return SECSID_NULL;
-}
-
-static inline int selinux_socket_getpeer_dgram(struct sk_buff *skb)
-{
-	return SECSID_NULL;
-}
 static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
 {
 	*sid = SECSID_NULL;
@@ -71,4 +57,10 @@
 }
 #endif
 
+static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
+{
+	int err = selinux_xfrm_decode_session(skb, sid, 0);
+	BUG_ON(err);
+}
+
 #endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index b8f4d25..ccfe875 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -60,7 +60,6 @@
 	{ RTM_DELACTION,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 	{ RTM_GETACTION,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 	{ RTM_NEWPREFIX,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
-	{ RTM_GETPREFIX,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 	{ RTM_GETMULTICAST,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 	{ RTM_GETANYCAST,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 	{ RTM_GETNEIGHTBL,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index d049c7a..ebb993c 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -28,7 +28,7 @@
  (keyp->source_type << 9)) & \
  AVTAB_HASH_MASK)
 
-static kmem_cache_t *avtab_node_cachep;
+static struct kmem_cache *avtab_node_cachep;
 
 static struct avtab_node*
 avtab_insert_node(struct avtab *h, int hvalue,
@@ -36,7 +36,7 @@
 		  struct avtab_key *key, struct avtab_datum *datum)
 {
 	struct avtab_node * newnode;
-	newnode = kmem_cache_alloc(avtab_node_cachep, SLAB_KERNEL);
+	newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL);
 	if (newnode == NULL)
 		return NULL;
 	memset(newnode, 0, sizeof(struct avtab_node));
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index d539346..ce492a6 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -6,7 +6,7 @@
 /*
  * Updated: Hewlett-Packard <paul.moore@hp.com>
  *
- *      Added ebitmap_export() and ebitmap_import()
+ *      Added support to import/export the NetLabel category bitmap
  *
  * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
  */
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
+#include <net/netlabel.h>
 #include "ebitmap.h"
 #include "policydb.h"
 
@@ -67,141 +68,120 @@
 	return 0;
 }
 
+#ifdef CONFIG_NETLABEL
 /**
- * ebitmap_export - Export an ebitmap to a unsigned char bitmap string
- * @src: the ebitmap to export
- * @dst: the resulting bitmap string
- * @dst_len: length of dst in bytes
+ * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
+ * @ebmap: the ebitmap to export
+ * @catmap: the NetLabel category bitmap
  *
  * Description:
- * Allocate a buffer at least src->highbit bits long and export the extensible
- * bitmap into the buffer.  The bitmap string will be in little endian format,
- * i.e. LSB first.  The value returned in dst_len may not the true size of the
- * buffer as the length of the buffer is rounded up to a multiple of MAPTYPE.
- * The caller must free the buffer when finished. Returns zero on success,
- * negative values on failure.
+ * Export a SELinux extensibile bitmap into a NetLabel category bitmap.
+ * Returns zero on success, negative values on error.
  *
  */
-int ebitmap_export(const struct ebitmap *src,
-		   unsigned char **dst,
-		   size_t *dst_len)
+int ebitmap_netlbl_export(struct ebitmap *ebmap,
+			  struct netlbl_lsm_secattr_catmap **catmap)
 {
-	size_t bitmap_len;
-	unsigned char *bitmap;
-	struct ebitmap_node *iter_node;
-	MAPTYPE node_val;
-	size_t bitmap_byte;
-	unsigned char bitmask;
+	struct ebitmap_node *e_iter = ebmap->node;
+	struct netlbl_lsm_secattr_catmap *c_iter;
+	u32 cmap_idx;
 
-	if (src->highbit == 0) {
-		*dst = NULL;
-		*dst_len = 0;
+	/* This function is a much simpler because SELinux's MAPTYPE happens
+	 * to be the same as NetLabel's NETLBL_CATMAP_MAPTYPE, if MAPTYPE is
+	 * changed from a u64 this function will most likely need to be changed
+	 * as well.  It's not ideal but I think the tradeoff in terms of
+	 * neatness and speed is worth it. */
+
+	if (e_iter == NULL) {
+		*catmap = NULL;
 		return 0;
 	}
 
-	bitmap_len = src->highbit / 8;
-	if (src->highbit % 7)
-		bitmap_len += 1;
-
-	bitmap = kzalloc((bitmap_len & ~(sizeof(MAPTYPE) - 1)) +
-			 sizeof(MAPTYPE),
-			 GFP_ATOMIC);
-	if (bitmap == NULL)
+	c_iter = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+	if (c_iter == NULL)
 		return -ENOMEM;
+	*catmap = c_iter;
+	c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1);
 
-	iter_node = src->node;
-	do {
-		bitmap_byte = iter_node->startbit / 8;
-		bitmask = 0x80;
-		node_val = iter_node->map;
-		do {
-			if (bitmask == 0) {
-				bitmap_byte++;
-				bitmask = 0x80;
-			}
-			if (node_val & (MAPTYPE)0x01)
-				bitmap[bitmap_byte] |= bitmask;
-			node_val >>= 1;
-			bitmask >>= 1;
-		} while (node_val > 0);
-		iter_node = iter_node->next;
-	} while (iter_node);
+	while (e_iter != NULL) {
+		if (e_iter->startbit >=
+		    (c_iter->startbit + NETLBL_CATMAP_SIZE)) {
+			c_iter->next = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+			if (c_iter->next == NULL)
+				goto netlbl_export_failure;
+			c_iter = c_iter->next;
+			c_iter->startbit = e_iter->startbit &
+				           ~(NETLBL_CATMAP_SIZE - 1);
+		}
+		cmap_idx = (e_iter->startbit - c_iter->startbit) /
+			   NETLBL_CATMAP_MAPSIZE;
+		c_iter->bitmap[cmap_idx] = e_iter->map;
+		e_iter = e_iter->next;
+	}
 
-	*dst = bitmap;
-	*dst_len = bitmap_len;
 	return 0;
+
+netlbl_export_failure:
+	netlbl_secattr_catmap_free(*catmap);
+	return -ENOMEM;
 }
 
 /**
- * ebitmap_import - Import an unsigned char bitmap string into an ebitmap
- * @src: the bitmap string
- * @src_len: the bitmap length in bytes
- * @dst: the empty ebitmap
+ * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
+ * @ebmap: the ebitmap to export
+ * @catmap: the NetLabel category bitmap
  *
  * Description:
- * This function takes a little endian bitmap string in src and imports it into
- * the ebitmap pointed to by dst.  Returns zero on success, negative values on
- * failure.
+ * Import a NetLabel category bitmap into a SELinux extensibile bitmap.
+ * Returns zero on success, negative values on error.
  *
  */
-int ebitmap_import(const unsigned char *src,
-		   size_t src_len,
-		   struct ebitmap *dst)
+int ebitmap_netlbl_import(struct ebitmap *ebmap,
+			  struct netlbl_lsm_secattr_catmap *catmap)
 {
-	size_t src_off = 0;
-	size_t node_limit;
-	struct ebitmap_node *node_new;
-	struct ebitmap_node *node_last = NULL;
-	u32 i_byte;
-	u32 i_bit;
-	unsigned char src_byte;
+	struct ebitmap_node *e_iter = NULL;
+	struct ebitmap_node *emap_prev = NULL;
+	struct netlbl_lsm_secattr_catmap *c_iter = catmap;
+	u32 c_idx;
 
-	while (src_off < src_len) {
-		if (src_len - src_off >= sizeof(MAPTYPE)) {
-			if (*(MAPTYPE *)&src[src_off] == 0) {
-				src_off += sizeof(MAPTYPE);
+	/* This function is a much simpler because SELinux's MAPTYPE happens
+	 * to be the same as NetLabel's NETLBL_CATMAP_MAPTYPE, if MAPTYPE is
+	 * changed from a u64 this function will most likely need to be changed
+	 * as well.  It's not ideal but I think the tradeoff in terms of
+	 * neatness and speed is worth it. */
+
+	do {
+		for (c_idx = 0; c_idx < NETLBL_CATMAP_MAPCNT; c_idx++) {
+			if (c_iter->bitmap[c_idx] == 0)
 				continue;
-			}
-			node_limit = sizeof(MAPTYPE);
-		} else {
-			for (src_byte = 0, i_byte = src_off;
-			     i_byte < src_len && src_byte == 0;
-			     i_byte++)
-				src_byte |= src[i_byte];
-			if (src_byte == 0)
-				break;
-			node_limit = src_len - src_off;
-		}
 
-		node_new = kzalloc(sizeof(*node_new), GFP_ATOMIC);
-		if (unlikely(node_new == NULL)) {
-			ebitmap_destroy(dst);
-			return -ENOMEM;
-		}
-		node_new->startbit = src_off * 8;
-		for (i_byte = 0; i_byte < node_limit; i_byte++) {
-			src_byte = src[src_off++];
-			for (i_bit = i_byte * 8; src_byte != 0; i_bit++) {
-				if (src_byte & 0x80)
-					node_new->map |= MAPBIT << i_bit;
-				src_byte <<= 1;
-			}
-		}
+			e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
+			if (e_iter == NULL)
+				goto netlbl_import_failure;
+			if (emap_prev == NULL)
+				ebmap->node = e_iter;
+			else
+				emap_prev->next = e_iter;
+			emap_prev = e_iter;
 
-		if (node_last != NULL)
-			node_last->next = node_new;
-		else
-			dst->node = node_new;
-		node_last = node_new;
-	}
-
-	if (likely(node_last != NULL))
-		dst->highbit = node_last->startbit + MAPSIZE;
+			e_iter->startbit = c_iter->startbit +
+				           NETLBL_CATMAP_MAPSIZE * c_idx;
+			e_iter->map = c_iter->bitmap[c_idx];
+		}
+		c_iter = c_iter->next;
+	} while (c_iter != NULL);
+	if (e_iter != NULL)
+		ebmap->highbit = e_iter->startbit + MAPSIZE;
 	else
-		ebitmap_init(dst);
+		ebitmap_destroy(ebmap);
 
 	return 0;
+
+netlbl_import_failure:
+	ebitmap_destroy(ebmap);
+	return -ENOMEM;
 }
+#endif /* CONFIG_NETLABEL */
 
 int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
 {
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index da2d465..1270e34 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -14,6 +14,8 @@
 #ifndef _SS_EBITMAP_H_
 #define _SS_EBITMAP_H_
 
+#include <net/netlabel.h>
+
 #define MAPTYPE u64			/* portion of bitmap in each node */
 #define MAPSIZE (sizeof(MAPTYPE) * 8)	/* number of bits in node bitmap */
 #define MAPBIT  1ULL			/* a bit in the node bitmap */
@@ -69,16 +71,28 @@
 
 int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
 int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
-int ebitmap_export(const struct ebitmap *src,
-		   unsigned char **dst,
-		   size_t *dst_len);
-int ebitmap_import(const unsigned char *src,
-		   size_t src_len,
-		   struct ebitmap *dst);
 int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
 int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
 int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
 void ebitmap_destroy(struct ebitmap *e);
 int ebitmap_read(struct ebitmap *e, void *fp);
 
+#ifdef CONFIG_NETLABEL
+int ebitmap_netlbl_export(struct ebitmap *ebmap,
+			  struct netlbl_lsm_secattr_catmap **catmap);
+int ebitmap_netlbl_import(struct ebitmap *ebmap,
+			  struct netlbl_lsm_secattr_catmap *catmap);
+#else
+static inline int ebitmap_netlbl_export(struct ebitmap *ebmap,
+				struct netlbl_lsm_secattr_catmap **catmap)
+{
+	return -ENOMEM;
+}
+static inline int ebitmap_netlbl_import(struct ebitmap *ebmap,
+				struct netlbl_lsm_secattr_catmap *catmap)
+{
+	return -ENOMEM;
+}
+#endif
+
 #endif	/* _SS_EBITMAP_H_ */
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 24e5ec9..77b530c 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -8,8 +8,8 @@
 #include <linux/errno.h>
 #include "hashtab.h"
 
-struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, void *key),
-                               int (*keycmp)(struct hashtab *h, void *key1, void *key2),
+struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
+                               int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
                                u32 size)
 {
 	struct hashtab *p;
@@ -71,7 +71,7 @@
 	return 0;
 }
 
-void *hashtab_search(struct hashtab *h, void *key)
+void *hashtab_search(struct hashtab *h, const void *key)
 {
 	u32 hvalue;
 	struct hashtab_node *cur;
diff --git a/security/selinux/ss/hashtab.h b/security/selinux/ss/hashtab.h
index 4cc8581..7e2ff3e 100644
--- a/security/selinux/ss/hashtab.h
+++ b/security/selinux/ss/hashtab.h
@@ -22,9 +22,9 @@
 	struct hashtab_node **htable;	/* hash table */
 	u32 size;			/* number of slots in hash table */
 	u32 nel;			/* number of elements in hash table */
-	u32 (*hash_value)(struct hashtab *h, void *key);
+	u32 (*hash_value)(struct hashtab *h, const void *key);
 					/* hash function */
-	int (*keycmp)(struct hashtab *h, void *key1, void *key2);
+	int (*keycmp)(struct hashtab *h, const void *key1, const void *key2);
 					/* key comparison function */
 };
 
@@ -39,8 +39,8 @@
  * Returns NULL if insufficent space is available or
  * the new hash table otherwise.
  */
-struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, void *key),
-                               int (*keycmp)(struct hashtab *h, void *key1, void *key2),
+struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
+                               int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
                                u32 size);
 
 /*
@@ -59,7 +59,7 @@
  * Returns NULL if no entry has the specified key or
  * the datum of the entry otherwise.
  */
-void *hashtab_search(struct hashtab *h, void *k);
+void *hashtab_search(struct hashtab *h, const void *k);
 
 /*
  * Destroys the specified hash table.
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 2cca8e2..b4f682d 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -13,7 +13,7 @@
 /*
  * Updated: Hewlett-Packard <paul.moore@hp.com>
  *
- *      Added support to import/export the MLS label
+ *      Added support to import/export the MLS label from NetLabel
  *
  * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
  */
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/errno.h>
+#include <net/netlabel.h>
 #include "sidtab.h"
 #include "mls.h"
 #include "policydb.h"
@@ -571,163 +572,108 @@
 	return -EINVAL;
 }
 
+#ifdef CONFIG_NETLABEL
 /**
- * mls_export_lvl - Export the MLS sensitivity levels
+ * mls_export_netlbl_lvl - Export the MLS sensitivity levels to NetLabel
  * @context: the security context
- * @low: the low sensitivity level
- * @high: the high sensitivity level
+ * @secattr: the NetLabel security attributes
  *
  * Description:
- * Given the security context copy the low MLS sensitivity level into lvl_low
- * and the high sensitivity level in lvl_high.  The MLS levels are only
- * exported if the pointers are not NULL, if they are NULL then that level is
- * not exported.
+ * Given the security context copy the low MLS sensitivity level into the
+ * NetLabel MLS sensitivity level field.
  *
  */
-void mls_export_lvl(const struct context *context, u32 *low, u32 *high)
+void mls_export_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr)
 {
 	if (!selinux_mls_enabled)
 		return;
 
-	if (low != NULL)
-		*low = context->range.level[0].sens - 1;
-	if (high != NULL)
-		*high = context->range.level[1].sens - 1;
+	secattr->mls_lvl = context->range.level[0].sens - 1;
+	secattr->flags |= NETLBL_SECATTR_MLS_LVL;
 }
 
 /**
- * mls_import_lvl - Import the MLS sensitivity levels
+ * mls_import_netlbl_lvl - Import the NetLabel MLS sensitivity levels
  * @context: the security context
- * @low: the low sensitivity level
- * @high: the high sensitivity level
+ * @secattr: the NetLabel security attributes
  *
  * Description:
- * Given the security context and the two sensitivty levels, set the MLS levels
- * in the context according the two given as parameters.  Returns zero on
- * success, negative values on failure.
+ * Given the security context and the NetLabel security attributes, copy the
+ * NetLabel MLS sensitivity level into the context.
  *
  */
-void mls_import_lvl(struct context *context, u32 low, u32 high)
+void mls_import_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr)
 {
 	if (!selinux_mls_enabled)
 		return;
 
-	context->range.level[0].sens = low + 1;
-	context->range.level[1].sens = high + 1;
+	context->range.level[0].sens = secattr->mls_lvl + 1;
+	context->range.level[1].sens = context->range.level[0].sens;
 }
 
 /**
- * mls_export_cat - Export the MLS categories
+ * mls_export_netlbl_cat - Export the MLS categories to NetLabel
  * @context: the security context
- * @low: the low category
- * @low_len: length of the cat_low bitmap in bytes
- * @high: the high category
- * @high_len: length of the cat_high bitmap in bytes
+ * @secattr: the NetLabel security attributes
  *
  * Description:
- * Given the security context export the low MLS category bitmap into cat_low
- * and the high category bitmap into cat_high.  The MLS categories are only
- * exported if the pointers are not NULL, if they are NULL then that level is
- * not exported.  The caller is responsibile for freeing the memory when
- * finished.  Returns zero on success, negative values on failure.
+ * Given the security context copy the low MLS categories into the NetLabel
+ * MLS category field.  Returns zero on success, negative values on failure.
  *
  */
-int mls_export_cat(const struct context *context,
-		   unsigned char **low,
-		   size_t *low_len,
-		   unsigned char **high,
-		   size_t *high_len)
+int mls_export_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr)
 {
-	int rc = -EPERM;
+	int rc;
 
-	if (!selinux_mls_enabled) {
-		*low = NULL;
-		*low_len = 0;
-		*high = NULL;
-		*high_len = 0;
+	if (!selinux_mls_enabled)
 		return 0;
-	}
 
-	if (low != NULL) {
-		rc = ebitmap_export(&context->range.level[0].cat,
-				    low,
-				    low_len);
-		if (rc != 0)
-			goto export_cat_failure;
-	}
-	if (high != NULL) {
-		rc = ebitmap_export(&context->range.level[1].cat,
-				    high,
-				    high_len);
-		if (rc != 0)
-			goto export_cat_failure;
-	}
+	rc = ebitmap_netlbl_export(&context->range.level[0].cat,
+				   &secattr->mls_cat);
+	if (rc == 0 && secattr->mls_cat != NULL)
+		secattr->flags |= NETLBL_SECATTR_MLS_CAT;
 
-	return 0;
-
-export_cat_failure:
-	if (low != NULL) {
-		kfree(*low);
-		*low = NULL;
-		*low_len = 0;
-	}
-	if (high != NULL) {
-		kfree(*high);
-		*high = NULL;
-		*high_len = 0;
-	}
 	return rc;
 }
 
 /**
- * mls_import_cat - Import the MLS categories
+ * mls_import_netlbl_cat - Import the MLS categories from NetLabel
  * @context: the security context
- * @low: the low category
- * @low_len: length of the cat_low bitmap in bytes
- * @high: the high category
- * @high_len: length of the cat_high bitmap in bytes
+ * @secattr: the NetLabel security attributes
  *
  * Description:
- * Given the security context and the two category bitmap strings import the
- * categories into the security context.  The MLS categories are only imported
- * if the pointers are not NULL, if they are NULL they are skipped.  Returns
- * zero on success, negative values on failure.
+ * Copy the NetLabel security attributes into the SELinux context; since the
+ * NetLabel security attribute only contains a single MLS category use it for
+ * both the low and high categories of the context.  Returns zero on success,
+ * negative values on failure.
  *
  */
-int mls_import_cat(struct context *context,
-		   const unsigned char *low,
-		   size_t low_len,
-		   const unsigned char *high,
-		   size_t high_len)
+int mls_import_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr)
 {
-	int rc = -EPERM;
+	int rc;
 
 	if (!selinux_mls_enabled)
 		return 0;
 
-	if (low != NULL) {
-		rc = ebitmap_import(low,
-				    low_len,
-				    &context->range.level[0].cat);
-		if (rc != 0)
-			goto import_cat_failure;
-	}
-	if (high != NULL) {
-		if (high == low)
-			rc = ebitmap_cpy(&context->range.level[1].cat,
-					 &context->range.level[0].cat);
-		else
-			rc = ebitmap_import(high,
-					    high_len,
-					    &context->range.level[1].cat);
-		if (rc != 0)
-			goto import_cat_failure;
-	}
+	rc = ebitmap_netlbl_import(&context->range.level[0].cat,
+				   secattr->mls_cat);
+	if (rc != 0)
+		goto import_netlbl_cat_failure;
+
+	rc = ebitmap_cpy(&context->range.level[1].cat,
+			 &context->range.level[0].cat);
+	if (rc != 0)
+		goto import_netlbl_cat_failure;
 
 	return 0;
 
-import_cat_failure:
+import_netlbl_cat_failure:
 	ebitmap_destroy(&context->range.level[0].cat);
 	ebitmap_destroy(&context->range.level[1].cat);
 	return rc;
 }
+#endif /* CONFIG_NETLABEL */
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index df6032c..661d6fc 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -13,7 +13,7 @@
 /*
  * Updated: Hewlett-Packard <paul.moore@hp.com>
  *
- *      Added support to import/export the MLS label
+ *      Added support to import/export the MLS label from NetLabel
  *
  * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
  */
@@ -69,19 +69,37 @@
 int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
                          struct context *usercon);
 
-void mls_export_lvl(const struct context *context, u32 *low, u32 *high);
-void mls_import_lvl(struct context *context, u32 low, u32 high);
-
-int mls_export_cat(const struct context *context,
-		   unsigned char **low,
-		   size_t *low_len,
-		   unsigned char **high,
-		   size_t *high_len);
-int mls_import_cat(struct context *context,
-		   const unsigned char *low,
-		   size_t low_len,
-		   const unsigned char *high,
-		   size_t high_len);
+#ifdef CONFIG_NETLABEL
+void mls_export_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr);
+void mls_import_netlbl_lvl(struct context *context,
+			   struct netlbl_lsm_secattr *secattr);
+int mls_export_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr);
+int mls_import_netlbl_cat(struct context *context,
+			  struct netlbl_lsm_secattr *secattr);
+#else
+static inline void mls_export_netlbl_lvl(struct context *context,
+					 struct netlbl_lsm_secattr *secattr)
+{
+	return;
+}
+static inline void mls_import_netlbl_lvl(struct context *context,
+					 struct netlbl_lsm_secattr *secattr)
+{
+	return;
+}
+static inline int mls_export_netlbl_cat(struct context *context,
+					struct netlbl_lsm_secattr *secattr)
+{
+	return -ENOMEM;
+}
+static inline int mls_import_netlbl_cat(struct context *context,
+					struct netlbl_lsm_secattr *secattr)
+{
+	return -ENOMEM;
+}
+#endif
 
 #endif	/* _SS_MLS_H */
 
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index ba48961..cd79c63 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -468,7 +468,7 @@
 	return 0;
 }
 
-static int class_destroy(void *key, void *datum, void *p)
+static int cls_destroy(void *key, void *datum, void *p)
 {
 	struct class_datum *cladatum;
 	struct constraint_node *constraint, *ctemp;
@@ -566,7 +566,7 @@
 static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
 {
 	common_destroy,
-	class_destroy,
+	cls_destroy,
 	role_destroy,
 	type_destroy,
 	user_destroy,
@@ -1124,7 +1124,7 @@
 out:
 	return rc;
 bad:
-	class_destroy(key, cladatum, NULL);
+	cls_destroy(key, cladatum, NULL);
 	goto out;
 }
 
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index bfe1227..bdb7070 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -17,9 +17,13 @@
  *
  *      Added support for NetLabel
  *
+ * Updated: Chad Sellers <csellers@tresys.com>
+ *
+ *  Added validation of kernel classes and permissions
+ *
  * Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
- * Copyright (C) 2003 - 2004 Tresys Technology, LLC
+ * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
  * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  *	This program is free software; you can redistribute it and/or modify
  *  	it under the terms of the GNU General Public License as published by
@@ -29,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/spinlock.h>
+#include <linux/rcupdate.h>
 #include <linux/errno.h>
 #include <linux/in.h>
 #include <linux/sched.h>
@@ -49,10 +54,17 @@
 #include "mls.h"
 #include "objsec.h"
 #include "selinux_netlabel.h"
+#include "xfrm.h"
+#include "ebitmap.h"
 
 extern void selnl_notify_policyload(u32 seqno);
 unsigned int policydb_loaded_version;
 
+/*
+ * This is declared in avc.c
+ */
+extern const struct selinux_class_perm selinux_class_perm;
+
 static DEFINE_RWLOCK(policy_rwlock);
 #define POLICY_RDLOCK read_lock(&policy_rwlock)
 #define POLICY_WRLOCK write_lock_irq(&policy_rwlock)
@@ -1019,86 +1031,112 @@
 }
 
 /*
- * Verify that each permission that is defined under the
- * existing policy is still defined with the same value
- * in the new policy.
+ * Verify that each kernel class that is defined in the
+ * policy is correct
  */
-static int validate_perm(void *key, void *datum, void *p)
+static int validate_classes(struct policydb *p)
 {
-	struct hashtab *h;
-	struct perm_datum *perdatum, *perdatum2;
-	int rc = 0;
+	int i, j;
+	struct class_datum *cladatum;
+	struct perm_datum *perdatum;
+	u32 nprim, tmp, common_pts_len, perm_val, pol_val;
+	u16 class_val;
+	const struct selinux_class_perm *kdefs = &selinux_class_perm;
+	const char *def_class, *def_perm, *pol_class;
+	struct symtab *perms;
 
-
-	h = p;
-	perdatum = datum;
-
-	perdatum2 = hashtab_search(h, key);
-	if (!perdatum2) {
-		printk(KERN_ERR "security:  permission %s disappeared",
-		       (char *)key);
-		rc = -ENOENT;
-		goto out;
-	}
-	if (perdatum->value != perdatum2->value) {
-		printk(KERN_ERR "security:  the value of permission %s changed",
-		       (char *)key);
-		rc = -EINVAL;
-	}
-out:
-	return rc;
-}
-
-/*
- * Verify that each class that is defined under the
- * existing policy is still defined with the same
- * attributes in the new policy.
- */
-static int validate_class(void *key, void *datum, void *p)
-{
-	struct policydb *newp;
-	struct class_datum *cladatum, *cladatum2;
-	int rc;
-
-	newp = p;
-	cladatum = datum;
-
-	cladatum2 = hashtab_search(newp->p_classes.table, key);
-	if (!cladatum2) {
-		printk(KERN_ERR "security:  class %s disappeared\n",
-		       (char *)key);
-		rc = -ENOENT;
-		goto out;
-	}
-	if (cladatum->value != cladatum2->value) {
-		printk(KERN_ERR "security:  the value of class %s changed\n",
-		       (char *)key);
-		rc = -EINVAL;
-		goto out;
-	}
-	if ((cladatum->comdatum && !cladatum2->comdatum) ||
-	    (!cladatum->comdatum && cladatum2->comdatum)) {
-		printk(KERN_ERR "security:  the inherits clause for the access "
-		       "vector definition for class %s changed\n", (char *)key);
-		rc = -EINVAL;
-		goto out;
-	}
-	if (cladatum->comdatum) {
-		rc = hashtab_map(cladatum->comdatum->permissions.table, validate_perm,
-		                 cladatum2->comdatum->permissions.table);
-		if (rc) {
-			printk(" in the access vector definition for class "
-			       "%s\n", (char *)key);
-			goto out;
+	for (i = 1; i < kdefs->cts_len; i++) {
+		def_class = kdefs->class_to_string[i];
+		if (i > p->p_classes.nprim) {
+			printk(KERN_INFO
+			       "security:  class %s not defined in policy\n",
+			       def_class);
+			continue;
+		}
+		pol_class = p->p_class_val_to_name[i-1];
+		if (strcmp(pol_class, def_class)) {
+			printk(KERN_ERR
+			       "security:  class %d is incorrect, found %s but should be %s\n",
+			       i, pol_class, def_class);
+			return -EINVAL;
 		}
 	}
-	rc = hashtab_map(cladatum->permissions.table, validate_perm,
-	                 cladatum2->permissions.table);
-	if (rc)
-		printk(" in access vector definition for class %s\n",
-		       (char *)key);
-out:
-	return rc;
+	for (i = 0; i < kdefs->av_pts_len; i++) {
+		class_val = kdefs->av_perm_to_string[i].tclass;
+		perm_val = kdefs->av_perm_to_string[i].value;
+		def_perm = kdefs->av_perm_to_string[i].name;
+		if (class_val > p->p_classes.nprim)
+			continue;
+		pol_class = p->p_class_val_to_name[class_val-1];
+		cladatum = hashtab_search(p->p_classes.table, pol_class);
+		BUG_ON(!cladatum);
+		perms = &cladatum->permissions;
+		nprim = 1 << (perms->nprim - 1);
+		if (perm_val > nprim) {
+			printk(KERN_INFO
+			       "security:  permission %s in class %s not defined in policy\n",
+			       def_perm, pol_class);
+			continue;
+		}
+		perdatum = hashtab_search(perms->table, def_perm);
+		if (perdatum == NULL) {
+			printk(KERN_ERR
+			       "security:  permission %s in class %s not found in policy\n",
+			       def_perm, pol_class);
+			return -EINVAL;
+		}
+		pol_val = 1 << (perdatum->value - 1);
+		if (pol_val != perm_val) {
+			printk(KERN_ERR
+			       "security:  permission %s in class %s has incorrect value\n",
+			       def_perm, pol_class);
+			return -EINVAL;
+		}
+	}
+	for (i = 0; i < kdefs->av_inherit_len; i++) {
+		class_val = kdefs->av_inherit[i].tclass;
+		if (class_val > p->p_classes.nprim)
+			continue;
+		pol_class = p->p_class_val_to_name[class_val-1];
+		cladatum = hashtab_search(p->p_classes.table, pol_class);
+		BUG_ON(!cladatum);
+		if (!cladatum->comdatum) {
+			printk(KERN_ERR
+			       "security:  class %s should have an inherits clause but does not\n",
+			       pol_class);
+			return -EINVAL;
+		}
+		tmp = kdefs->av_inherit[i].common_base;
+		common_pts_len = 0;
+		while (!(tmp & 0x01)) {
+			common_pts_len++;
+			tmp >>= 1;
+		}
+		perms = &cladatum->comdatum->permissions;
+		for (j = 0; j < common_pts_len; j++) {
+			def_perm = kdefs->av_inherit[i].common_pts[j];
+			if (j >= perms->nprim) {
+				printk(KERN_INFO
+				       "security:  permission %s in class %s not defined in policy\n",
+				       def_perm, pol_class);
+				continue;
+			}
+			perdatum = hashtab_search(perms->table, def_perm);
+			if (perdatum == NULL) {
+				printk(KERN_ERR
+				       "security:  permission %s in class %s not found in policy\n",
+				       def_perm, pol_class);
+				return -EINVAL;
+			}
+			if (perdatum->value != j + 1) {
+				printk(KERN_ERR
+				       "security:  permission %s in class %s has incorrect value\n",
+				       def_perm, pol_class);
+				return -EINVAL;
+			}
+		}
+	}
+	return 0;
 }
 
 /* Clone the SID into the new SID table. */
@@ -1243,6 +1281,16 @@
 			avtab_cache_destroy();
 			return -EINVAL;
 		}
+		/* Verify that the kernel defined classes are correct. */
+		if (validate_classes(&policydb)) {
+			printk(KERN_ERR
+			       "security:  the definition of a class is incorrect\n");
+			LOAD_UNLOCK;
+			sidtab_destroy(&sidtab);
+			policydb_destroy(&policydb);
+			avtab_cache_destroy();
+			return -EINVAL;
+		}
 		policydb_loaded_version = policydb.policyvers;
 		ss_initialized = 1;
 		seqno = ++latest_granting;
@@ -1265,10 +1313,10 @@
 
 	sidtab_init(&newsidtab);
 
-	/* Verify that the existing classes did not change. */
-	if (hashtab_map(policydb.p_classes.table, validate_class, &newpolicydb)) {
-		printk(KERN_ERR "security:  the definition of an existing "
-		       "class changed\n");
+	/* Verify that the kernel defined classes are correct. */
+	if (validate_classes(&newpolicydb)) {
+		printk(KERN_ERR
+		       "security:  the definition of a class is incorrect\n");
 		rc = -EINVAL;
 		goto err;
 	}
@@ -2145,6 +2193,32 @@
 	aurule_callback = callback;
 }
 
+/**
+ * security_skb_extlbl_sid - Determine the external label of a packet
+ * @skb: the packet
+ * @base_sid: the SELinux SID to use as a context for MLS only external labels
+ * @sid: the packet's SID
+ *
+ * Description:
+ * Check the various different forms of external packet labeling and determine
+ * the external SID for the packet.
+ *
+ */
+void security_skb_extlbl_sid(struct sk_buff *skb, u32 base_sid, u32 *sid)
+{
+	u32 xfrm_sid;
+	u32 nlbl_sid;
+
+	selinux_skb_xfrm_sid(skb, &xfrm_sid);
+	if (selinux_netlbl_skbuff_getsid(skb,
+					 (xfrm_sid == SECSID_NULL ?
+					  base_sid : xfrm_sid),
+					 &nlbl_sid) != 0)
+		nlbl_sid = SECSID_NULL;
+
+	*sid = (nlbl_sid == SECSID_NULL ? xfrm_sid : nlbl_sid);
+}
+
 #ifdef CONFIG_NETLABEL
 /*
  * This is the structure we store inside the NetLabel cache block.
@@ -2209,8 +2283,6 @@
 	cache = kzalloc(sizeof(*cache),	GFP_ATOMIC);
 	if (cache == NULL)
 		goto netlbl_cache_add_return;
-	secattr.cache->free = selinux_netlbl_cache_free;
-	secattr.cache->data = (void *)cache;
 
 	cache->type = NETLBL_CACHE_T_MLS;
 	if (ebitmap_cpy(&cache->data.mls_label.level[0].cat,
@@ -2223,6 +2295,10 @@
 	cache->data.mls_label.level[0].sens = ctx->range.level[0].sens;
 	cache->data.mls_label.level[1].sens = ctx->range.level[0].sens;
 
+	secattr.cache->free = selinux_netlbl_cache_free;
+	secattr.cache->data = (void *)cache;
+	secattr.flags = NETLBL_SECATTR_CACHE;
+
 	netlbl_cache_add(skb, &secattr);
 
 netlbl_cache_add_return:
@@ -2268,7 +2344,7 @@
 
 	POLICY_RDLOCK;
 
-	if (secattr->cache) {
+	if (secattr->flags & NETLBL_SECATTR_CACHE) {
 		cache = NETLBL_CACHE(secattr->cache->data);
 		switch (cache->type) {
 		case NETLBL_CACHE_T_SID:
@@ -2301,7 +2377,7 @@
 		default:
 			goto netlbl_secattr_to_sid_return;
 		}
-	} else if (secattr->mls_lvl_vld) {
+	} else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
 		ctx = sidtab_search(&sidtab, base_sid);
 		if (ctx == NULL)
 			goto netlbl_secattr_to_sid_return;
@@ -2309,13 +2385,10 @@
 		ctx_new.user = ctx->user;
 		ctx_new.role = ctx->role;
 		ctx_new.type = ctx->type;
-		mls_import_lvl(&ctx_new, secattr->mls_lvl, secattr->mls_lvl);
-		if (secattr->mls_cat) {
-			if (mls_import_cat(&ctx_new,
-					   secattr->mls_cat,
-					   secattr->mls_cat_len,
-					   NULL,
-					   0) != 0)
+		mls_import_netlbl_lvl(&ctx_new, secattr);
+		if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
+			if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
+						  secattr->mls_cat) != 0)
 				goto netlbl_secattr_to_sid_return;
 			ctx_new.range.level[1].cat.highbit =
 				ctx_new.range.level[0].cat.highbit;
@@ -2360,20 +2433,20 @@
  * assign to the packet.  Returns zero on success, negative values on failure.
  *
  */
-static int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
-					u32 base_sid,
-					u32 *sid)
+int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, u32 base_sid, u32 *sid)
 {
 	int rc;
 	struct netlbl_lsm_secattr secattr;
 
 	netlbl_secattr_init(&secattr);
 	rc = netlbl_skbuff_getattr(skb, &secattr);
-	if (rc == 0)
+	if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
 		rc = selinux_netlbl_secattr_to_sid(skb,
 						   &secattr,
 						   base_sid,
 						   sid);
+	else
+		*sid = SECSID_NULL;
 	netlbl_secattr_destroy(&secattr);
 
 	return rc;
@@ -2386,7 +2459,9 @@
  *
  * Description:
  * Attempt to label a socket using the NetLabel mechanism using the given
- * SID.  Returns zero values on success, negative values on failure.
+ * SID.  Returns zero values on success, negative values on failure.  The
+ * caller is responsibile for calling rcu_read_lock() before calling this
+ * this function and rcu_read_unlock() after this function returns.
  *
  */
 static int selinux_netlbl_socket_setsid(struct socket *sock, u32 sid)
@@ -2409,19 +2484,18 @@
 
 	secattr.domain = kstrdup(policydb.p_type_val_to_name[ctx->type - 1],
 				 GFP_ATOMIC);
-	mls_export_lvl(ctx, &secattr.mls_lvl, NULL);
-	secattr.mls_lvl_vld = 1;
-	rc = mls_export_cat(ctx,
-			    &secattr.mls_cat,
-			    &secattr.mls_cat_len,
-			    NULL,
-			    NULL);
+	secattr.flags |= NETLBL_SECATTR_DOMAIN;
+	mls_export_netlbl_lvl(ctx, &secattr);
+	rc = mls_export_netlbl_cat(ctx, &secattr);
 	if (rc != 0)
 		goto netlbl_socket_setsid_return;
 
 	rc = netlbl_socket_setattr(sock, &secattr);
-	if (rc == 0)
+	if (rc == 0) {
+		spin_lock(&sksec->nlbl_lock);
 		sksec->nlbl_state = NLBL_LABELED;
+		spin_unlock(&sksec->nlbl_lock);
+	}
 
 netlbl_socket_setsid_return:
 	POLICY_RDUNLOCK;
@@ -2430,6 +2504,25 @@
 }
 
 /**
+ * selinux_netlbl_sk_security_reset - Reset the NetLabel fields
+ * @ssec: the sk_security_struct
+ * @family: the socket family
+ *
+ * Description:
+ * Called when the NetLabel state of a sk_security_struct needs to be reset.
+ * The caller is responsibile for all the NetLabel sk_security_struct locking.
+ *
+ */
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec,
+				      int family)
+{
+        if (family == PF_INET)
+		ssec->nlbl_state = NLBL_REQUIRE;
+	else
+		ssec->nlbl_state = NLBL_UNSET;
+}
+
+/**
  * selinux_netlbl_sk_security_init - Setup the NetLabel fields
  * @ssec: the sk_security_struct
  * @family: the socket family
@@ -2442,14 +2535,13 @@
 void selinux_netlbl_sk_security_init(struct sk_security_struct *ssec,
 				     int family)
 {
-        if (family == PF_INET)
-		ssec->nlbl_state = NLBL_REQUIRE;
-	else
-		ssec->nlbl_state = NLBL_UNSET;
+	/* No locking needed, we are the only one who has access to ssec */
+	selinux_netlbl_sk_security_reset(ssec, family);
+	spin_lock_init(&ssec->nlbl_lock);
 }
 
 /**
- * selinux_netlbl_sk_clone_security - Copy the NetLabel fields
+ * selinux_netlbl_sk_security_clone - Copy the NetLabel fields
  * @ssec: the original sk_security_struct
  * @newssec: the cloned sk_security_struct
  *
@@ -2458,41 +2550,41 @@
  * @newssec.
  *
  */
-void selinux_netlbl_sk_clone_security(struct sk_security_struct *ssec,
+void selinux_netlbl_sk_security_clone(struct sk_security_struct *ssec,
 				      struct sk_security_struct *newssec)
 {
+	/* We don't need to take newssec->nlbl_lock because we are the only
+	 * thread with access to newssec, but we do need to take the RCU read
+	 * lock as other threads could have access to ssec */
+	rcu_read_lock();
+	selinux_netlbl_sk_security_reset(newssec, ssec->sk->sk_family);
 	newssec->sclass = ssec->sclass;
-	if (ssec->nlbl_state != NLBL_UNSET)
-		newssec->nlbl_state = NLBL_REQUIRE;
-	else
-		newssec->nlbl_state = NLBL_UNSET;
+	rcu_read_unlock();
 }
 
 /**
  * selinux_netlbl_socket_post_create - Label a socket using NetLabel
  * @sock: the socket to label
- * @sock_family: the socket family
- * @sid: the SID to use
  *
  * Description:
  * Attempt to label a socket using the NetLabel mechanism using the given
  * SID.  Returns zero values on success, negative values on failure.
  *
  */
-int selinux_netlbl_socket_post_create(struct socket *sock,
-				      int sock_family,
-				      u32 sid)
+int selinux_netlbl_socket_post_create(struct socket *sock)
 {
+	int rc = 0;
 	struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
 	struct sk_security_struct *sksec = sock->sk->sk_security;
 
 	sksec->sclass = isec->sclass;
 
-	if (sock_family != PF_INET)
-		return 0;
+	rcu_read_lock();
+	if (sksec->nlbl_state == NLBL_REQUIRE)
+		rc = selinux_netlbl_socket_setsid(sock, sksec->sid);
+	rcu_read_unlock();
 
-	sksec->nlbl_state = NLBL_REQUIRE;
-	return selinux_netlbl_socket_setsid(sock, sid);
+	return rc;
 }
 
 /**
@@ -2514,11 +2606,16 @@
 
 	sksec->sclass = isec->sclass;
 
-	if (sk->sk_family != PF_INET)
+	rcu_read_lock();
+
+	if (sksec->nlbl_state != NLBL_REQUIRE) {
+		rcu_read_unlock();
 		return;
+	}
 
 	netlbl_secattr_init(&secattr);
 	if (netlbl_sock_getattr(sk, &secattr) == 0 &&
+	    secattr.flags != NETLBL_SECATTR_NONE &&
 	    selinux_netlbl_secattr_to_sid(NULL,
 					  &secattr,
 					  SECINITSID_UNLABELED,
@@ -2526,35 +2623,12 @@
 		sksec->peer_sid = nlbl_peer_sid;
 	netlbl_secattr_destroy(&secattr);
 
-	sksec->nlbl_state = NLBL_REQUIRE;
-
 	/* Try to set the NetLabel on the socket to save time later, if we fail
 	 * here we will pick up the pieces in later calls to
 	 * selinux_netlbl_inode_permission(). */
 	selinux_netlbl_socket_setsid(sock, sksec->sid);
-}
 
-/**
- * selinux_netlbl_inet_conn_request - Handle a new connection request
- * @skb: the packet
- * @sock_sid: the SID of the parent socket
- *
- * Description:
- * If present, use the security attributes of the packet in @skb and the
- * parent sock's SID to arrive at a SID for the new child sock.  Returns the
- * SID of the connection or SECSID_NULL on failure.
- *
- */
-u32 selinux_netlbl_inet_conn_request(struct sk_buff *skb, u32 sock_sid)
-{
-	int rc;
-	u32 peer_sid;
-
-	rc = selinux_netlbl_skbuff_getsid(skb, sock_sid, &peer_sid);
-	if (rc != 0)
-		return SECSID_NULL;
-
-	return peer_sid;
+	rcu_read_unlock();
 }
 
 /**
@@ -2572,25 +2646,24 @@
 int selinux_netlbl_inode_permission(struct inode *inode, int mask)
 {
 	int rc;
-	struct inode_security_struct *isec;
 	struct sk_security_struct *sksec;
 	struct socket *sock;
 
-	if (!S_ISSOCK(inode->i_mode))
+	if (!S_ISSOCK(inode->i_mode) ||
+	    ((mask & (MAY_WRITE | MAY_APPEND)) == 0))
 		return 0;
-
 	sock = SOCKET_I(inode);
-	isec = inode->i_security;
 	sksec = sock->sk->sk_security;
-	mutex_lock(&isec->lock);
-	if (unlikely(sksec->nlbl_state == NLBL_REQUIRE &&
-		     (mask & (MAY_WRITE | MAY_APPEND)))) {
-		lock_sock(sock->sk);
-		rc = selinux_netlbl_socket_setsid(sock, sksec->sid);
-		release_sock(sock->sk);
-	} else
-		rc = 0;
-	mutex_unlock(&isec->lock);
+
+	rcu_read_lock();
+	if (sksec->nlbl_state != NLBL_REQUIRE) {
+		rcu_read_unlock();
+		return 0;
+	}
+	lock_sock(sock->sk);
+	rc = selinux_netlbl_socket_setsid(sock, sksec->sid);
+	release_sock(sock->sk);
+	rcu_read_unlock();
 
 	return rc;
 }
@@ -2648,42 +2721,6 @@
 }
 
 /**
- * selinux_netlbl_socket_getpeersec_stream - Return the connected peer's SID
- * @sock: the socket
- *
- * Description:
- * Examine @sock to find the connected peer's SID.  Returns the SID on success
- * or SECSID_NULL on error.
- *
- */
-u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock)
-{
-	struct sk_security_struct *sksec = sock->sk->sk_security;
-	return sksec->peer_sid;
-}
-
-/**
- * selinux_netlbl_socket_getpeersec_dgram - Return the SID of a NetLabel packet
- * @skb: the packet
- *
- * Description:
- * Examine @skb to find the SID assigned to it by NetLabel.  Returns the SID on
- * success, SECSID_NULL on error.
- *
- */
-u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb)
-{
-	int peer_sid;
-
-	if (selinux_netlbl_skbuff_getsid(skb,
-					 SECINITSID_UNLABELED,
-					 &peer_sid) != 0)
-		return SECSID_NULL;
-
-	return peer_sid;
-}
-
-/**
  * selinux_netlbl_socket_setsockopt - Do not allow users to remove a NetLabel
  * @sock: the socket
  * @level: the socket level or protocol
@@ -2701,21 +2738,19 @@
 				     int optname)
 {
 	int rc = 0;
-	struct inode *inode = SOCK_INODE(sock);
 	struct sk_security_struct *sksec = sock->sk->sk_security;
-	struct inode_security_struct *isec = inode->i_security;
 	struct netlbl_lsm_secattr secattr;
 
-	mutex_lock(&isec->lock);
+	rcu_read_lock();
 	if (level == IPPROTO_IP && optname == IP_OPTIONS &&
 	    sksec->nlbl_state == NLBL_LABELED) {
 		netlbl_secattr_init(&secattr);
 		rc = netlbl_socket_getattr(sock, &secattr);
-		if (rc == 0 && (secattr.cache || secattr.mls_lvl_vld))
+		if (rc == 0 && secattr.flags != NETLBL_SECATTR_NONE)
 			rc = -EACCES;
 		netlbl_secattr_destroy(&secattr);
 	}
-	mutex_unlock(&isec->lock);
+	rcu_read_unlock();
 
 	return rc;
 }
diff --git a/security/selinux/ss/symtab.c b/security/selinux/ss/symtab.c
index 24a10d3..837658a 100644
--- a/security/selinux/ss/symtab.c
+++ b/security/selinux/ss/symtab.c
@@ -9,9 +9,9 @@
 #include <linux/errno.h>
 #include "symtab.h"
 
-static unsigned int symhash(struct hashtab *h, void *key)
+static unsigned int symhash(struct hashtab *h, const void *key)
 {
-	char *p, *keyp;
+	const char *p, *keyp;
 	unsigned int size;
 	unsigned int val;
 
@@ -23,9 +23,9 @@
 	return val & (h->size - 1);
 }
 
-static int symcmp(struct hashtab *h, void *key1, void *key2)
+static int symcmp(struct hashtab *h, const void *key1, const void *key2)
 {
-	char *keyp1, *keyp2;
+	const char *keyp1, *keyp2;
 
 	keyp1 = key1;
 	keyp2 = key2;
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 675b995..bd8d1ef 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -115,76 +115,46 @@
 			struct flowi *fl)
 {
 	u32 state_sid;
-	u32 pol_sid;
-	int err;
+	int rc;
 
-	if (xp->security) {
-		if (!x->security)
-			/* unlabeled SA and labeled policy can't match */
-			return 0;
-		else
-			state_sid = x->security->ctx_sid;
-		pol_sid = xp->security->ctx_sid;
-	} else
+	if (!xp->security)
 		if (x->security)
 			/* unlabeled policy and labeled SA can't match */
 			return 0;
 		else
 			/* unlabeled policy and unlabeled SA match all flows */
 			return 1;
+	else
+		if (!x->security)
+			/* unlabeled SA and labeled policy can't match */
+			return 0;
+		else
+			if (!selinux_authorizable_xfrm(x))
+				/* Not a SELinux-labeled SA */
+				return 0;
 
-	err = avc_has_perm(state_sid, pol_sid, SECCLASS_ASSOCIATION,
-			  ASSOCIATION__POLMATCH,
-			  NULL);
+	state_sid = x->security->ctx_sid;
 
-	if (err)
+	if (fl->secid != state_sid)
 		return 0;
 
-	err = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION,
+	rc = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION,
 			  ASSOCIATION__SENDTO,
 			  NULL)? 0:1;
 
-	return err;
-}
-
-/*
- * LSM hook implementation that authorizes that a particular outgoing flow
- * can use a given security association.
- */
-
-int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
-				  struct xfrm_policy *xp)
-{
-	int rc = 0;
-	u32 sel_sid = SECINITSID_UNLABELED;
-	struct xfrm_sec_ctx *ctx;
-
-	if (!xp->security)
-		if (!xfrm->security)
-			return 1;
-		else
-			return 0;
-	else
-		if (!xfrm->security)
-			return 0;
-
-	/* Context sid is either set to label or ANY_ASSOC */
-	if ((ctx = xfrm->security)) {
-		if (!selinux_authorizable_ctx(ctx))
-			return 0;
-
-		sel_sid = ctx->ctx_sid;
-	}
-
-	rc = avc_has_perm(fl->secid, sel_sid, SECCLASS_ASSOCIATION,
-			  ASSOCIATION__SENDTO,
-			  NULL)? 0:1;
+	/*
+	 * We don't need a separate SA Vs. policy polmatch check
+	 * since the SA is now of the same label as the flow and
+	 * a flow Vs. policy polmatch check had already happened
+	 * in selinux_xfrm_policy_lookup() above.
+	 */
 
 	return rc;
 }
 
 /*
- * LSM hook implementation that determines the sid for the session.
+ * LSM hook implementation that checks and/or returns the xfrm sid for the
+ * incoming packet.
  */
 
 int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
@@ -226,16 +196,15 @@
  * CTX does not have a meaningful value on input
  */
 static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
-	struct xfrm_user_sec_ctx *uctx, struct xfrm_sec_ctx *pol, u32 sid)
+	struct xfrm_user_sec_ctx *uctx, u32 sid)
 {
 	int rc = 0;
 	struct task_security_struct *tsec = current->security;
 	struct xfrm_sec_ctx *ctx = NULL;
 	char *ctx_str = NULL;
 	u32 str_len;
-	u32 ctx_sid;
 
-	BUG_ON(uctx && pol);
+	BUG_ON(uctx && sid);
 
 	if (!uctx)
 		goto not_from_user;
@@ -279,15 +248,7 @@
 	return rc;
 
 not_from_user:
-	if (pol) {
-		rc = security_sid_mls_copy(pol->ctx_sid, sid, &ctx_sid);
-		if (rc)
-			goto out;
-	}
-	else
-		ctx_sid = sid;
-
-	rc = security_sid_to_context(ctx_sid, &ctx_str, &str_len);
+	rc = security_sid_to_context(sid, &ctx_str, &str_len);
 	if (rc)
 		goto out;
 
@@ -302,7 +263,7 @@
 
 	ctx->ctx_doi = XFRM_SC_DOI_LSM;
 	ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
-	ctx->ctx_sid = ctx_sid;
+	ctx->ctx_sid = sid;
 	ctx->ctx_len = str_len;
 	memcpy(ctx->ctx_str,
 	       ctx_str,
@@ -323,22 +284,14 @@
  * xfrm_policy.
  */
 int selinux_xfrm_policy_alloc(struct xfrm_policy *xp,
-		struct xfrm_user_sec_ctx *uctx, struct sock *sk)
+		struct xfrm_user_sec_ctx *uctx)
 {
 	int err;
-	u32 sid;
 
 	BUG_ON(!xp);
-	BUG_ON(uctx && sk);
+	BUG_ON(!uctx);
 
-	if (sk) {
-		struct sk_security_struct *ssec = sk->sk_security;
-		sid = ssec->sid;
-	}
-	else
-		sid = SECSID_NULL;
-
-	err = selinux_xfrm_sec_ctx_alloc(&xp->security, uctx, NULL, sid);
+	err = selinux_xfrm_sec_ctx_alloc(&xp->security, uctx, 0);
 	return err;
 }
 
@@ -399,13 +352,13 @@
  * xfrm_state.
  */
 int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx,
-		struct xfrm_sec_ctx *pol, u32 secid)
+		u32 secid)
 {
 	int err;
 
 	BUG_ON(!x);
 
-	err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, pol, secid);
+	err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid);
 	return err;
 }
 
@@ -419,74 +372,6 @@
 		kfree(ctx);
 }
 
-/*
- * SELinux internal function to retrieve the context of a connected
- * (sk->sk_state == TCP_ESTABLISHED) TCP socket based on its security
- * association used to connect to the remote socket.
- *
- * Retrieve via getsockopt SO_PEERSEC.
- */
-u32 selinux_socket_getpeer_stream(struct sock *sk)
-{
-	struct dst_entry *dst, *dst_test;
-	u32 peer_sid = SECSID_NULL;
-
-	if (sk->sk_state != TCP_ESTABLISHED)
-		goto out;
-
-	dst = sk_dst_get(sk);
-	if (!dst)
-		goto out;
-
- 	for (dst_test = dst; dst_test != 0;
-      	     dst_test = dst_test->child) {
-		struct xfrm_state *x = dst_test->xfrm;
-
- 		if (x && selinux_authorizable_xfrm(x)) {
-	 	 	struct xfrm_sec_ctx *ctx = x->security;
-			peer_sid = ctx->ctx_sid;
-			break;
-		}
-	}
-	dst_release(dst);
-
-out:
-	return peer_sid;
-}
-
-/*
- * SELinux internal function to retrieve the context of a UDP packet
- * based on its security association used to connect to the remote socket.
- *
- * Retrieve via setsockopt IP_PASSSEC and recvmsg with control message
- * type SCM_SECURITY.
- */
-u32 selinux_socket_getpeer_dgram(struct sk_buff *skb)
-{
-	struct sec_path *sp;
-
-	if (skb == NULL)
-		return SECSID_NULL;
-
-	if (skb->sk->sk_protocol != IPPROTO_UDP)
-		return SECSID_NULL;
-
-	sp = skb->sp;
-	if (sp) {
-		int i;
-
-		for (i = sp->len-1; i >= 0; i--) {
-			struct xfrm_state *x = sp->xvec[i];
-			if (selinux_authorizable_xfrm(x)) {
-				struct xfrm_sec_ctx *ctx = x->security;
-				return ctx->ctx_sid;
-			}
-		}
-	}
-
-	return SECSID_NULL;
-}
-
  /*
   * LSM hook implementation that authorizes deletion of labeled SAs.
   */
@@ -532,6 +417,13 @@
 		}
 	}
 
+	/*
+	 * This check even when there's no association involved is
+	 * intended, according to Trent Jaeger, to make sure a
+	 * process can't engage in non-ipsec communication unless
+	 * explicitly allowed by policy.
+	 */
+
 	rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION,
 			  ASSOCIATION__RECVFROM, ad);
 
@@ -543,10 +435,10 @@
  * If we have no security association, then we need to determine
  * whether the socket is allowed to send to an unlabelled destination.
  * If we do have a authorizable security association, then it has already been
- * checked in xfrm_policy_lookup hook.
+ * checked in the selinux_xfrm_state_pol_flow_match hook above.
  */
 int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
-					struct avc_audit_data *ad)
+					struct avc_audit_data *ad, u8 proto)
 {
 	struct dst_entry *dst;
 	int rc = 0;
@@ -565,6 +457,27 @@
 		}
 	}
 
+	switch (proto) {
+	case IPPROTO_AH:
+	case IPPROTO_ESP:
+	case IPPROTO_COMP:
+		/*
+		 * We should have already seen this packet once before
+		 * it underwent xfrm(s). No need to subject it to the
+		 * unlabeled check.
+		 */
+		goto out;
+	default:
+		break;
+	}
+
+	/*
+	 * This check even when there's no association involved is
+	 * intended, according to Trent Jaeger, to make sure a
+	 * process can't engage in non-ipsec communication unless
+	 * explicitly allowed by policy.
+	 */
+
 	rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION,
 			  ASSOCIATION__SENDTO, ad);
 out:
diff --git a/sound/Kconfig b/sound/Kconfig
index e0d791a..95949b6 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -64,11 +64,11 @@
 
 source "sound/mips/Kconfig"
 
-# the following will depenend on the order of config.
+# the following will depend on the order of config.
 # here assuming USB is defined before ALSA
 source "sound/usb/Kconfig"
 
-# the following will depenend on the order of config.
+# the following will depend on the order of config.
 # here assuming PCMCIA is defined before ALSA
 source "sound/pcmcia/Kconfig"
 
diff --git a/sound/aoa/aoa-gpio.h b/sound/aoa/aoa-gpio.h
index 3a61f31..ee64f5d 100644
--- a/sound/aoa/aoa-gpio.h
+++ b/sound/aoa/aoa-gpio.h
@@ -59,10 +59,10 @@
 };
 
 struct gpio_notification {
+	struct delayed_work work;
 	notify_func_t notify;
 	void *data;
 	void *gpio_private;
-	struct work_struct work;
 	struct mutex mutex;
 };
 
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c
index 40eb47e..2b03bc7 100644
--- a/sound/aoa/core/snd-aoa-gpio-feature.c
+++ b/sound/aoa/core/snd-aoa-gpio-feature.c
@@ -195,9 +195,10 @@
 	ftr_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void ftr_handle_notify(void *data)
+static void ftr_handle_notify(struct work_struct *work)
 {
-	struct gpio_notification *notif = data;
+	struct gpio_notification *notif =
+		container_of(work, struct gpio_notification, work.work);
 
 	mutex_lock(&notif->mutex);
 	if (notif->notify)
@@ -253,12 +254,9 @@
 
 	ftr_gpio_all_amps_off(rt);
 	rt->implementation_private = 0;
-	INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify,
-		  &rt->headphone_notify);
-	INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify,
-		  &rt->line_in_notify);
-	INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify,
-		  &rt->line_out_notify);
+	INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
 	mutex_init(&rt->headphone_notify.mutex);
 	mutex_init(&rt->line_in_notify.mutex);
 	mutex_init(&rt->line_out_notify.mutex);
@@ -287,7 +285,7 @@
 {
 	struct gpio_notification *notif = data;
 
-	schedule_work(&notif->work);
+	schedule_delayed_work(&notif->work, 0);
 
 	return IRQ_HANDLED;
 }
diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c
index 2836c32..5ca2220 100644
--- a/sound/aoa/core/snd-aoa-gpio-pmf.c
+++ b/sound/aoa/core/snd-aoa-gpio-pmf.c
@@ -69,9 +69,10 @@
 	pmf_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void pmf_handle_notify(void *data)
+static void pmf_handle_notify(struct work_struct *work)
 {
-	struct gpio_notification *notif = data;
+	struct gpio_notification *notif =
+		container_of(work, struct gpio_notification, work.work);
 
 	mutex_lock(&notif->mutex);
 	if (notif->notify)
@@ -83,12 +84,9 @@
 {
 	pmf_gpio_all_amps_off(rt);
 	rt->implementation_private = 0;
-	INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify,
-		  &rt->headphone_notify);
-	INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify,
-		  &rt->line_in_notify);
-	INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify,
-		  &rt->line_out_notify);
+	INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
+	INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
 	mutex_init(&rt->headphone_notify.mutex);
 	mutex_init(&rt->line_in_notify.mutex);
 	mutex_init(&rt->line_out_notify.mutex);
@@ -129,7 +127,7 @@
 {
 	struct gpio_notification *notif = data;
 
-	schedule_work(&notif->work);
+	schedule_delayed_work(&notif->work, 0);
 }
 
 static int pmf_set_notify(struct gpio_runtime *rt,
diff --git a/sound/arm/sa11xx-uda1341.c b/sound/arm/sa11xx-uda1341.c
index c79a9af..c7e1b26 100644
--- a/sound/arm/sa11xx-uda1341.c
+++ b/sound/arm/sa11xx-uda1341.c
@@ -125,7 +125,7 @@
 #else
 	dma_regs_t *dma_regs;	/* points to our DMA registers */
 #endif
-	int active:1;		/* we are using this stream for transfer now */
+	unsigned int active:1;	/* we are using this stream for transfer now */
 	int period;		/* current transfer period */
 	int periods;		/* current count of periods registerd in the DMA engine */
 	int tx_spin;		/* are we recoding - flag used to do DMA trans. for sync */
diff --git a/sound/core/init.c b/sound/core/init.c
index 3058d62..6152a75 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -361,6 +361,8 @@
 		snd_printk(KERN_WARNING "unable to free card info\n");
 		/* Not fatal error */
 	}
+	if (card->dev)
+		device_unregister(card->dev);
 	kfree(card);
 	return 0;
 }
@@ -495,6 +497,12 @@
 	int err;
 
 	snd_assert(card != NULL, return -EINVAL);
+	if (!card->dev) {
+		card->dev = device_create(sound_class, card->parent, 0,
+					  "card%i", card->number);
+		if (IS_ERR(card->dev))
+			card->dev = NULL;
+	}
 	if ((err = snd_device_register_all(card)) < 0)
 		return err;
 	mutex_lock(&snd_card_mutex);
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index fbbbcd2..5ac6e19 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -910,7 +910,8 @@
 	substream->pstr->substream_opened--;
 }
 
-static ssize_t show_pcm_class(struct class_device *class_device, char *buf)
+static ssize_t show_pcm_class(struct device *dev,
+			      struct device_attribute *attr, char *buf)
 {
 	struct snd_pcm *pcm;
 	const char *str;
@@ -921,7 +922,7 @@
 		[SNDRV_PCM_CLASS_DIGITIZER] = "digitizer",
 	};
 
-	if (! (pcm = class_get_devdata(class_device)) ||
+	if (! (pcm = dev_get_drvdata(dev)) ||
 	    pcm->dev_class > SNDRV_PCM_CLASS_LAST)
 		str = "none";
 	else
@@ -929,7 +930,7 @@
         return snprintf(buf, PAGE_SIZE, "%s\n", str);
 }
 
-static struct class_device_attribute pcm_attrs =
+static struct device_attribute pcm_attrs =
 	__ATTR(pcm_class, S_IRUGO, show_pcm_class, NULL);
 
 static int snd_pcm_dev_register(struct snd_device *device)
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 66e24b5..6ea67b1 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3027,7 +3027,7 @@
 	struct page * page;
 	
 	if (substream == NULL)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	runtime = substream->runtime;
 	page = virt_to_page(runtime->status);
 	get_page(page);
@@ -3070,7 +3070,7 @@
 	struct page * page;
 	
 	if (substream == NULL)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	runtime = substream->runtime;
 	page = virt_to_page(runtime->control);
 	get_page(page);
@@ -3131,18 +3131,18 @@
 	size_t dma_bytes;
 	
 	if (substream == NULL)
-		return NOPAGE_OOM;
+		return NOPAGE_SIGBUS;
 	runtime = substream->runtime;
 	offset = area->vm_pgoff << PAGE_SHIFT;
 	offset += address - area->vm_start;
-	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM);
+	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS);
 	dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
 	if (offset > dma_bytes - PAGE_SIZE)
 		return NOPAGE_SIGBUS;
 	if (substream->ops->page) {
 		page = substream->ops->page(substream, offset);
 		if (! page)
-			return NOPAGE_OOM;
+			return NOPAGE_OOM; /* XXX: is this really due to OOM? */
 	} else {
 		vaddr = runtime->dma_area + offset;
 		page = virt_to_page(vaddr);
diff --git a/sound/core/sound.c b/sound/core/sound.c
index efa476c..2827420 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -61,9 +61,6 @@
 static struct snd_minor *snd_minors[SNDRV_OS_MINORS];
 static DEFINE_MUTEX(sound_mutex);
 
-extern struct class *sound_class;
-
-
 #ifdef CONFIG_KMOD
 
 /**
@@ -268,11 +265,10 @@
 	snd_minors[minor] = preg;
 	if (card)
 		device = card->dev;
-	preg->class_dev = class_device_create(sound_class, NULL,
-					      MKDEV(major, minor),
-					      device, "%s", name);
-	if (preg->class_dev)
-		class_set_devdata(preg->class_dev, private_data);
+	preg->dev = device_create(sound_class, device, MKDEV(major, minor),
+				  "%s", name);
+	if (preg->dev)
+		dev_set_drvdata(preg->dev, private_data);
 
 	mutex_unlock(&sound_mutex);
 	return 0;
@@ -320,7 +316,7 @@
 		return -EINVAL;
 	}
 
-	class_device_destroy(sound_class, MKDEV(major, minor));
+	device_destroy(sound_class, MKDEV(major, minor));
 
 	kfree(snd_minors[minor]);
 	snd_minors[minor] = NULL;
@@ -331,15 +327,15 @@
 EXPORT_SYMBOL(snd_unregister_device);
 
 int snd_add_device_sysfs_file(int type, struct snd_card *card, int dev,
-			      const struct class_device_attribute *attr)
+			      struct device_attribute *attr)
 {
 	int minor, ret = -EINVAL;
-	struct class_device *cdev;
+	struct device *d;
 
 	mutex_lock(&sound_mutex);
 	minor = find_snd_minor(type, card, dev);
-	if (minor >= 0 && (cdev = snd_minors[minor]->class_dev) != NULL)
-		ret = class_device_create_file(cdev, attr);
+	if (minor >= 0 && (d = snd_minors[minor]->dev) != NULL)
+		ret = device_create_file(d, attr);
 	mutex_unlock(&sound_mutex);
 	return ret;
 
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 12ffffc..d2f2c50 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -35,7 +35,7 @@
 
 #define AK4114_ADDR			0x00 /* fixed address */
 
-static void ak4114_stats(void *);
+static void ak4114_stats(struct work_struct *work);
 
 static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val)
 {
@@ -158,7 +158,7 @@
 	reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN);
 	/* bring up statistics / event queing */
 	chip->init = 0;
-	INIT_WORK(&chip->work, ak4114_stats, chip);
+	INIT_DELAYED_WORK(&chip->work, ak4114_stats);
 	queue_delayed_work(chip->workqueue, &chip->work, HZ / 10);
 }
 
@@ -561,9 +561,9 @@
 	return res;
 }
 
-static void ak4114_stats(void *data)
+static void ak4114_stats(struct work_struct *work)
 {
-	struct ak4114 *chip = (struct ak4114 *)data;
+	struct ak4114 *chip = container_of(work, struct ak4114, work.work);
 
 	if (chip->init)
 		return;
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index cc2b9ab..a0588c2 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -5,20 +5,6 @@
 #
 # Prompt user for primary drivers.
 
-config OSS_OBSOLETE_DRIVER
-	bool "Obsolete OSS drivers"
-	depends on SOUND_PRIME
-	help
-	  This option enables support for obsolete OSS drivers that
-	  are scheduled for removal in the near future since there
-	  are ALSA drivers for the same hardware.
-
-	  Please contact Adrian Bunk <bunk@stusta.de> if you had to
-	  say Y here because your soundcard is not properly supported
-	  by ALSA.
-
-	  If unsure, say N.
-
 config SOUND_BT878
 	tristate "BT878 audio dma"
 	depends on SOUND_PRIME && PCI
@@ -35,40 +21,6 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called btaudio.
 
-config SOUND_EMU10K1
-	tristate "Creative SBLive! (EMU10K1)"
-	depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER
-	---help---
-	  Say Y or M if you have a PCI sound card using the EMU10K1 chipset,
-	  such as the Creative SBLive!, SB PCI512 or Emu-APS.
-
-	  For more information on this driver and the degree of support for
-	  the different card models please check:
-
-		<http://sourceforge.net/projects/emu10k1/>
-
-	  It is now possible to load dsp microcode patches into the EMU10K1
-	  chip.  These patches are used to implement real time sound
-	  processing effects which include for example: signal routing,
-	  bass/treble control, AC3 passthrough, ...
-	  Userspace tools to create new patches and load/unload them can be
-	  found in the emu-tools package at the above URL.
-
-config MIDI_EMU10K1
-	bool "Creative SBLive! MIDI (EXPERIMENTAL)"
-	depends on SOUND_EMU10K1 && EXPERIMENTAL && ISA_DMA_API
-	help
-	  Say Y if you want to be able to use the OSS /dev/sequencer
-	  interface.  This code is still experimental.
-
-config SOUND_FUSION
-	tristate "Crystal SoundFusion (CS4280/461x)"
-	depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER
-	help
-	  This module drives the Crystal SoundFusion devices (CS4280/46xx
-	  series) when wired as native sound drivers with AC97 codecs.  If
-	  this driver does not work try the CS4232 driver.
-
 config SOUND_BCM_CS4297A
 	tristate "Crystal Sound CS4297a (for Swarm)"
 	depends on SOUND_PRIME && SIBYTE_SWARM
@@ -448,47 +400,6 @@
 
 	  Say Y unless you have 16MB or more RAM or a PCI sound card.
 
-config SOUND_AD1816
-	tristate "AD1816(A) based cards (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Say M here if you have a sound card based on the Analog Devices
-	  AD1816(A) chip.
-
-	  If you compile the driver into the kernel, you have to add
-	  "ad1816=<io>,<irq>,<dma>,<dma2>" to the kernel command line.
-
-config SOUND_AD1889
-	tristate "AD1889 based cards (AD1819 codec) (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && SOUND_OSS && PCI && OSS_OBSOLETE_DRIVER
-	help
-	  Say M here if you have a sound card based on the Analog Devices
-	  AD1889 chip.
-
-config SOUND_ADLIB
-	tristate "Adlib Cards"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Includes ASB 64 4D. Information on programming AdLib cards is
-	  available at <http://www.itsnet.com/home/ldragon/Specs/adlib.html>.
-
-config SOUND_ACI_MIXER
-	tristate "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20)"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	---help---
-	  ACI (Audio Command Interface) is a protocol used to communicate with
-	  the microcontroller on some sound cards produced by miro and
-	  Cardinal Technologies.  The main function of the ACI is to control
-	  the mixer and to get a product identification.
-
-	  This VoxWare ACI driver currently supports the ACI functions on the
-	  miroSOUND PCM1-pro, PCM12 and PCM20 radio. On the PCM20 radio, ACI
-	  also controls the radio tuner. This is supported in the video4linux
-	  miropcm20 driver (say M or Y here and go back to "Multimedia
-	  devices" -> "Radio Adapters").
-
-	  This driver is also available as a module and will be called aci.
-
 config SOUND_CS4232
 	tristate "Crystal CS4232 based (PnP) cards"
 	depends on SOUND_OSS
@@ -594,18 +505,6 @@
 	  If you compile the driver into the kernel, you have to add
 	  "mpu401=<io>,<irq>" to the kernel command line.
 
-config SOUND_NM256
-	tristate "NM256AV/NM256ZX audio support"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Say M here to include audio support for the NeoMagic 256AV/256ZX
-	  chipsets. These are the audio chipsets found in the Sony
-	  Z505S/SX/DX, some Sony F-series, and the Dell Latitude CPi and CPt
-	  laptops. It includes support for an AC97-compatible mixer and an
-	  apparently proprietary sound engine.
-
-	  See <file:Documentation/sound/oss/NM256> for further information.
-
 config SOUND_PAS
 	tristate "ProAudioSpectrum 16 support"
 	depends on SOUND_OSS
@@ -714,20 +613,6 @@
 
 	  If unsure, say Y.
 
-config SOUND_OPL3SA2
-	tristate "Yamaha OPL3-SA2 and SA3 based PnP cards"
-	depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-	help
-	  Say Y or M if you have a card based on one of these Yamaha sound
-	  chipsets or the "SAx", which is actually a SA3. Read
-	  <file:Documentation/sound/oss/OPL3-SA2> for more information on
-	  configuring these cards.
-
-	  If you compile the driver into the kernel and do not also
-	  configure in the optional ISA PnP support, you will have to add
-	  "opl3sa2=<io>,<irq>,<dma>,<dma2>,<mssio>,<mpuio>" to the kernel
-	  command line.
-
 config SOUND_UART6850
 	tristate "6850 UART support"
 	depends on SOUND_OSS
diff --git a/sound/oss/btaudio.c b/sound/oss/btaudio.c
index 6ad3841..ad7210a 100644
--- a/sound/oss/btaudio.c
+++ b/sound/oss/btaudio.c
@@ -1020,6 +1020,7 @@
  fail2:
         free_irq(bta->irq,bta);	
  fail1:
+	iounmap(bta->mmio);
 	kfree(bta);
  fail0:
 	release_mem_region(pci_resource_start(pci_dev,0),
@@ -1051,6 +1052,7 @@
         free_irq(bta->irq,bta);
 	release_mem_region(pci_resource_start(pci_dev,0),
 			   pci_resource_len(pci_dev,0));
+	iounmap(bta->mmio);
 
 	/* remove from linked list */
 	if (bta == btaudios) {
diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c
index 6e3c41f..147c8a9 100644
--- a/sound/oss/cs46xx.c
+++ b/sound/oss/cs46xx.c
@@ -91,6 +91,7 @@
 #include <linux/poll.h>
 #include <linux/ac97_codec.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
 
 #include <asm/io.h>
 #include <asm/dma.h>
@@ -779,7 +780,7 @@
 		rate = 48000 / 9;
 
 	/*
-	 *  We can not capture at at rate greater than the Input Rate (48000).
+	 *  We cannot capture at at rate greater than the Input Rate (48000).
 	 *  Return an error if an attempt is made to stray outside that limit.
 	 */
 	if (rate > 48000)
@@ -4754,8 +4755,8 @@
 	mdelay(5 * cs_laptop_wait);		/* Shouldnt be needed ?? */
 	
 /*
-* If we are resuming under 2.2.x then we can not schedule a timeout.
-* so, just spin the CPU.
+* If we are resuming under 2.2.x then we cannot schedule a timeout,
+* so just spin the CPU.
 */
 	if (card->pm.flags & CS46XX_PM_IDLE) {
 	/*
diff --git a/sound/oss/dmabuf.c b/sound/oss/dmabuf.c
index b256c04..eaf6997 100644
--- a/sound/oss/dmabuf.c
+++ b/sound/oss/dmabuf.c
@@ -25,6 +25,7 @@
 #define BE_CONSERVATIVE
 #define SAMPLE_ROUNDUP 0
 
+#include <linux/mm.h>
 #include "sound_config.h"
 
 #define DMAP_FREE_ON_CLOSE      0
diff --git a/sound/oss/emu10k1/audio.c b/sound/oss/emu10k1/audio.c
index cde4d59..49f902f 100644
--- a/sound/oss/emu10k1/audio.c
+++ b/sound/oss/emu10k1/audio.c
@@ -36,6 +36,7 @@
 #include <linux/bitops.h>
 #include <asm/io.h>
 #include <linux/sched.h>
+#include <linux/mm.h>
 #include <linux/smp_lock.h>
 
 #include "hwaccess.h"
@@ -110,9 +111,15 @@
 
 		if ((bytestocopy >= wiinst->buffer.fragment_size)
 		    || (bytestocopy >= count)) {
+			int rc;
+
 			bytestocopy = min_t(u32, bytestocopy, count);
 
-			emu10k1_wavein_xferdata(wiinst, (u8 __user *)buffer, &bytestocopy);
+			rc = emu10k1_wavein_xferdata(wiinst,
+						     (u8 __user *)buffer,
+						     &bytestocopy);
+			if (rc)
+				return rc;
 
 			count -= bytestocopy;
 			buffer += bytestocopy;
diff --git a/sound/oss/emu10k1/cardwi.c b/sound/oss/emu10k1/cardwi.c
index 8bbf44b..060d1be 100644
--- a/sound/oss/emu10k1/cardwi.c
+++ b/sound/oss/emu10k1/cardwi.c
@@ -304,11 +304,12 @@
 	}
 }
 
-static void copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov)
+static int copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov)
 {
-	if (cov == 1)
-		__copy_to_user(dst, src + str, len);
-	else {
+	if (cov == 1) {
+		if (__copy_to_user(dst, src + str, len))
+			return -EFAULT;
+	} else {
 		u8 byte;
 		u32 i;
 
@@ -316,22 +317,26 @@
 
 		for (i = 0; i < len; i++) {
 			byte = src[2 * i] ^ 0x80;
-			__copy_to_user(dst + i, &byte, 1);
+			if (__copy_to_user(dst + i, &byte, 1))
+				return -EFAULT;
 		}
 	}
+
+	return 0;
 }
 
-void emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size)
+int emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size)
 {
 	struct wavein_buffer *buffer = &wiinst->buffer;
 	u32 sizetocopy, sizetocopy_now, start;
 	unsigned long flags;
+	int ret;
 
 	sizetocopy = min_t(u32, buffer->size, *size);
 	*size = sizetocopy;
 
 	if (!sizetocopy)
-		return;
+		return 0;
 
 	spin_lock_irqsave(&wiinst->lock, flags);
 	start = buffer->pos;
@@ -345,11 +350,17 @@
 	if (sizetocopy > sizetocopy_now) {
 		sizetocopy -= sizetocopy_now;
 
-		copy_block(data, buffer->addr, start, sizetocopy_now, buffer->cov);
-		copy_block(data + sizetocopy_now, buffer->addr, 0, sizetocopy, buffer->cov);
+		ret = copy_block(data, buffer->addr, start, sizetocopy_now,
+				 buffer->cov);
+		if (ret == 0)
+			ret = copy_block(data + sizetocopy_now, buffer->addr, 0,
+					 sizetocopy, buffer->cov);
 	} else {
-		copy_block(data, buffer->addr, start, sizetocopy, buffer->cov);
+		ret = copy_block(data, buffer->addr, start, sizetocopy,
+				 buffer->cov);
 	}
+
+	return ret;
 }
 
 void emu10k1_wavein_update(struct emu10k1_card *card, struct wiinst *wiinst)
diff --git a/sound/oss/emu10k1/cardwi.h b/sound/oss/emu10k1/cardwi.h
index 15cfb9b..e82029b 100644
--- a/sound/oss/emu10k1/cardwi.h
+++ b/sound/oss/emu10k1/cardwi.h
@@ -83,7 +83,7 @@
 void emu10k1_wavein_start(struct emu10k1_wavedevice *);
 void emu10k1_wavein_stop(struct emu10k1_wavedevice *);
 void emu10k1_wavein_getxfersize(struct wiinst *, u32 *);
-void emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *);
+int emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *);
 int emu10k1_wavein_setformat(struct emu10k1_wavedevice *, struct wave_format *);
 void emu10k1_wavein_update(struct emu10k1_card *, struct wiinst *);
 
diff --git a/sound/oss/emu10k1/passthrough.c b/sound/oss/emu10k1/passthrough.c
index 4e3baca..6d21d43 100644
--- a/sound/oss/emu10k1/passthrough.c
+++ b/sound/oss/emu10k1/passthrough.c
@@ -162,12 +162,15 @@
 
 		DPD(3, "prepend size %d, prepending %d bytes\n", pt->prepend_size, needed);
 		if (count < needed) {
-			copy_from_user(pt->buf + pt->prepend_size, buffer, count);
+			if (copy_from_user(pt->buf + pt->prepend_size,
+					   buffer, count))
+				return -EFAULT;
 			pt->prepend_size += count;
 			DPD(3, "prepend size now %d\n", pt->prepend_size);
 			return count;
 		}
-		copy_from_user(pt->buf + pt->prepend_size, buffer, needed);
+		if (copy_from_user(pt->buf + pt->prepend_size, buffer, needed))
+			return -EFAULT;
 		r = pt_putblock(wave_dev, (u16 *) pt->buf, nonblock);
 		if (r)
 			return r;
@@ -178,7 +181,8 @@
 	blocks_copied = 0;
 	while (blocks > 0) {
 		u16 __user *bufptr = (u16 __user *) buffer + (bytes_copied/2);
-		copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE);
+		if (copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE))
+			return -EFAULT;
 		r = pt_putblock(wave_dev, (u16 *)pt->buf, nonblock);
 		if (r) {
 			if (bytes_copied)
@@ -193,7 +197,8 @@
 	i = count - bytes_copied;
 	if (i) {
 		pt->prepend_size = i;
-		copy_from_user(pt->buf, buffer + bytes_copied, i);
+		if (copy_from_user(pt->buf, buffer + bytes_copied, i))
+			return -EFAULT;
 		bytes_copied += i;
 		DPD(3, "filling prepend buffer with %d bytes", i);
 	}
diff --git a/sound/oss/es1371.c b/sound/oss/es1371.c
index ddf6b0a..cc282a0 100644
--- a/sound/oss/es1371.c
+++ b/sound/oss/es1371.c
@@ -130,6 +130,7 @@
 #include <linux/wait.h>
 #include <linux/dma-mapping.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
 
 #include <asm/io.h>
 #include <asm/page.h>
diff --git a/sound/oss/i810_audio.c b/sound/oss/i810_audio.c
index 240cc79..c3c8a72 100644
--- a/sound/oss/i810_audio.c
+++ b/sound/oss/i810_audio.c
@@ -101,6 +101,7 @@
 #include <linux/ac97_codec.h>
 #include <linux/bitops.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
 
 #include <asm/uaccess.h>
 
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
index 2344d09..8fb8e7f 100644
--- a/sound/oss/soundcard.c
+++ b/sound/oss/soundcard.c
@@ -42,6 +42,7 @@
 #include <linux/proc_fs.h>
 #include <linux/smp_lock.h>
 #include <linux/module.h>
+#include <linux/mm.h>
 
 /*
  * This ought to be moved into include/asm/dma.h
@@ -557,17 +558,17 @@
 	sound_dmap_flag = (dmabuf > 0 ? 1 : 0);
 
 	for (i = 0; i < sizeof (dev_list) / sizeof *dev_list; i++) {
-		class_device_create(sound_class, NULL,
-				    MKDEV(SOUND_MAJOR, dev_list[i].minor),
-				    NULL, "%s", dev_list[i].name);
+		device_create(sound_class, NULL,
+			      MKDEV(SOUND_MAJOR, dev_list[i].minor),
+			      "%s", dev_list[i].name);
 
 		if (!dev_list[i].num)
 			continue;
 
 		for (j = 1; j < *dev_list[i].num; j++)
-			class_device_create(sound_class, NULL,
-					    MKDEV(SOUND_MAJOR, dev_list[i].minor + (j*0x10)),
-					    NULL, "%s%d", dev_list[i].name, j);
+			device_create(sound_class, NULL,
+				      MKDEV(SOUND_MAJOR, dev_list[i].minor + (j*0x10)),
+				      "%s%d", dev_list[i].name, j);
 	}
 
 	if (sound_nblocks >= 1024)
@@ -581,11 +582,11 @@
 	int i, j;
 
 	for (i = 0; i < sizeof (dev_list) / sizeof *dev_list; i++) {
-		class_device_destroy(sound_class, MKDEV(SOUND_MAJOR, dev_list[i].minor));
+		device_destroy(sound_class, MKDEV(SOUND_MAJOR, dev_list[i].minor));
 		if (!dev_list[i].num)
 			continue;
 		for (j = 1; j < *dev_list[i].num; j++)
-			class_device_destroy(sound_class, MKDEV(SOUND_MAJOR, dev_list[i].minor + (j*0x10)));
+			device_destroy(sound_class, MKDEV(SOUND_MAJOR, dev_list[i].minor + (j*0x10)));
 	}
 	
 	unregister_sound_special(1);
diff --git a/sound/oss/sscape.c b/sound/oss/sscape.c
index 51f2fa6..30c36d1 100644
--- a/sound/oss/sscape.c
+++ b/sound/oss/sscape.c
@@ -39,6 +39,7 @@
 #include <linux/ioport.h>
 #include <linux/delay.h>
 #include <linux/proc_fs.h>
+#include <linux/mm.h>
 #include <linux/spinlock.h>
 
 #include "coproc.h"
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index 7a363a1..6b1f8c9 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -216,6 +216,7 @@
 #include <linux/gameport.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
+#include <linux/mm.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
index 17837d4..c96cc8c 100644
--- a/sound/oss/via82cxxx_audio.c
+++ b/sound/oss/via82cxxx_audio.c
@@ -2120,8 +2120,8 @@
 		return NOPAGE_SIGBUS; /* Disallow mremap */
 	}
         if (!card) {
-		DPRINTK ("EXIT, returning NOPAGE_OOM\n");
-		return NOPAGE_OOM;	/* Nothing allocated */
+		DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
+		return NOPAGE_SIGBUS;	/* Nothing allocated */
 	}
 
 	pgoff = vma->vm_pgoff + ((address - vma->vm_start) >> PAGE_SHIFT);
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6577b23..7abcb10 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1927,9 +1927,10 @@
 static struct snd_ac97_build_ops null_build_ops;
 
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-static void do_update_power(void *data)
+static void do_update_power(struct work_struct *work)
 {
-	update_power_regs(data);
+	update_power_regs(
+		container_of(work, struct snd_ac97, power_work.work));
 }
 #endif
 
@@ -1989,7 +1990,7 @@
 	mutex_init(&ac97->page_mutex);
 #ifdef CONFIG_SND_AC97_POWER_SAVE
 	ac97->power_workq = create_workqueue("ac97");
-	INIT_WORK(&ac97->power_work, do_update_power, ac97);
+	INIT_DELAYED_WORK(&ac97->power_work, do_update_power);
 #endif
 
 #ifdef CONFIG_PCI
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9c3d7ac..71482c1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -272,10 +272,11 @@
 /*
  * process queueud unsolicited events
  */
-static void process_unsol_events(void *data)
+static void process_unsol_events(struct work_struct *work)
 {
-	struct hda_bus *bus = data;
-	struct hda_bus_unsolicited *unsol = bus->unsol;
+	struct hda_bus_unsolicited *unsol =
+		container_of(work, struct hda_bus_unsolicited, work);
+	struct hda_bus *bus = unsol->bus;
 	struct hda_codec *codec;
 	unsigned int rp, caddr, res;
 
@@ -314,7 +315,8 @@
 		kfree(unsol);
 		return -ENOMEM;
 	}
-	INIT_WORK(&unsol->work, process_unsol_events, bus);
+	INIT_WORK(&unsol->work, process_unsol_events);
+	unsol->bus = bus;
 	bus->unsol = unsol;
 	return 0;
 }
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index f9416c3..9ca1baf 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -206,6 +206,7 @@
 	/* workqueue */
 	struct workqueue_struct *workq;
 	struct work_struct work;
+	struct hda_bus *bus;
 };
 
 /*
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index fd3590f..2d40cc7 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -219,35 +219,15 @@
 static int pdacf_config(struct pcmcia_device *link)
 {
 	struct snd_pdacf *pdacf = link->priv;
-	tuple_t tuple;
-	cisparse_t *parse = NULL;
-	u_short buf[32];
 	int last_fn, last_ret;
 
 	snd_printdd(KERN_DEBUG "pdacf_config called\n");
-	parse = kmalloc(sizeof(*parse), GFP_KERNEL);
-	if (! parse) {
-		snd_printk(KERN_ERR "pdacf_config: cannot allocate\n");
-		return -ENOMEM;
-	}
-	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
-	tuple.Attributes = 0;
-	tuple.TupleData = (cisdata_t *)buf;
-	tuple.TupleDataMax = sizeof(buf);
-	tuple.TupleOffset = 0;
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-	link->conf.ConfigBase = parse->config.base;
 	link->conf.ConfigIndex = 0x5;
 
 	CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
 	CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
 	CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
 
-	kfree(parse);
-
 	if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
 		goto failed;
 
@@ -255,7 +235,6 @@
 	return 0;
 
 cs_failed:
-	kfree(parse);
 	cs_error(link, last_fn, last_ret);
 failed:
 	pcmcia_disable_device(link);
@@ -299,7 +278,8 @@
  * Module entry points
  */
 static struct pcmcia_device_id snd_pdacf_ids[] = {
-	PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45),
+	/* this is too general PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45), */
+	PCMCIA_DEVICE_PROD_ID12("Core Sound","PDAudio-CF",0x396d19d2,0x71717b49),
 	PCMCIA_DEVICE_NULL
 };
 MODULE_DEVICE_TABLE(pcmcia, snd_pdacf_ids);
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 3089fcc..d7df59e 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -217,34 +217,12 @@
 {
 	struct vx_core *chip = link->priv;
 	struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
-	tuple_t tuple;
-	cisparse_t *parse;
-	u_short buf[32];
 	int last_fn, last_ret;
 
 	snd_printdd(KERN_DEBUG "vxpocket_config called\n");
-	parse = kmalloc(sizeof(*parse), GFP_KERNEL);
-	if (! parse) {
-		snd_printk(KERN_ERR "vx: cannot allocate\n");
-		return -ENOMEM;
-	}
-	tuple.Attributes = 0;
-	tuple.TupleData = (cisdata_t *)buf;
-	tuple.TupleDataMax = sizeof(buf);
-	tuple.TupleOffset = 0;
-	tuple.DesiredTuple = CISTPL_CONFIG;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-	link->conf.ConfigBase = parse->config.base;
-	link->conf.Present = parse->config.rmask[0];
 
 	/* redefine hardware record according to the VERSION1 string */
-	tuple.DesiredTuple = CISTPL_VERS_1;
-	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-	if (! strcmp(parse->version_1.str + parse->version_1.ofs[1], "VX-POCKET")) {
+	if (!strcmp(link->prod_id[1], "VX-POCKET")) {
 		snd_printdd("VX-pocket is detected\n");
 	} else {
 		snd_printdd("VX-pocket 440 is detected\n");
@@ -265,14 +243,12 @@
 		goto failed;
 
 	link->dev_node = &vxp->node;
-	kfree(parse);
 	return 0;
 
 cs_failed:
 	cs_error(link, last_fn, last_ret);
 failed:
 	pcmcia_disable_device(link);
-	kfree(parse);
 	return -ENODEV;
 }
 
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 2fbe1d1..8f074c7 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -942,10 +942,11 @@
 }
 
 static struct work_struct device_change;
+static struct snd_pmac *device_change_chip;
 
-static void device_change_handler(void *self)
+static void device_change_handler(struct work_struct *work)
 {
-	struct snd_pmac *chip = self;
+	struct snd_pmac *chip = device_change_chip;
 	struct pmac_tumbler *mix;
 	int headphone, lineout;
 
@@ -1417,7 +1418,8 @@
 	chip->resume = tumbler_resume;
 #endif
 
-	INIT_WORK(&device_change, device_change_handler, (void *)chip);
+	INIT_WORK(&device_change, device_change_handler);
+	device_change_chip = chip;
 
 #ifdef PMAC_SUPPORT_AUTOMUTE
 	if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 5322c50..8f1ced4 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -170,8 +170,8 @@
 	else
 		sprintf(s->name, "sound/%s%d", name, r / SOUND_STEP);
 
-	class_device_create(sound_class, NULL, MKDEV(SOUND_MAJOR, s->unit_minor),
-			    dev, s->name+6);
+	device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor),
+		      s->name+6);
 	return r;
 
  fail:
@@ -193,7 +193,7 @@
 	p = __sound_remove_unit(list, unit);
 	spin_unlock(&sound_loader_lock);
 	if (p) {
-		class_device_destroy(sound_class, MKDEV(SOUND_MAJOR, p->unit_minor));
+		device_destroy(sound_class, MKDEV(SOUND_MAJOR, p->unit_minor));
 		kfree(p);
 	}
 }
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index b7c5e59..24f5a26 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -981,7 +981,7 @@
 			if (umidi->usb_protocol_ops->finish_out_endpoint)
 				umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
 		}
-		if (ep->in && ep->in->urb)
+		if (ep->in)
 			usb_kill_urb(ep->in->urb);
 	}
 }
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index 1024c17..e74eb1b 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -1620,8 +1620,7 @@
 		kfree(mixer->urb->transfer_buffer);
 		usb_free_urb(mixer->urb);
 	}
-	if (mixer->rc_urb)
-		usb_free_urb(mixer->rc_urb);
+	usb_free_urb(mixer->rc_urb);
 	kfree(mixer->rc_setup_packet);
 	kfree(mixer);
 }
@@ -2056,8 +2055,6 @@
 	struct usb_mixer_interface *mixer;
 	
 	mixer = list_entry(p, struct usb_mixer_interface, list);
-	if (mixer->urb)
-		usb_kill_urb(mixer->urb);
-	if (mixer->rc_urb)
-		usb_kill_urb(mixer->rc_urb);
+	usb_kill_urb(mixer->urb);
+	usb_kill_urb(mixer->rc_urb);
 }
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 4b52d18..b76b3dd 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -48,7 +48,7 @@
 	
 	offset = area->vm_pgoff << PAGE_SHIFT;
 	offset += address - area->vm_start;
-	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM);
+	snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS);
 	vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset;
 	page = virt_to_page(vaddr);
 	get_page(page);